From: Jens Axboe <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>
Subject: [PATCH 1/5] io_uring: encapsulate extraneous wait flags into a separate struct
Date: Fri, 16 Aug 2024 14:38:12 -0600 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
Rather than need to pass in 2 or 3 separate arguments, add a struct
to encapsulate the timeout and sigset_t parts of waiting. In preparation
for adding another argument for waiting.
Signed-off-by: Jens Axboe <[email protected]>
---
io_uring/io_uring.c | 45 ++++++++++++++++++++++++---------------------
1 file changed, 24 insertions(+), 21 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 5e75672525df..f90a4490908b 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2384,13 +2384,18 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
return ret;
}
+struct ext_arg {
+ size_t argsz;
+ struct __kernel_timespec __user *ts;
+ const sigset_t __user *sig;
+};
+
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
*/
static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
- const sigset_t __user *sig, size_t sigsz,
- struct __kernel_timespec __user *uts)
+ struct ext_arg *ext_arg)
{
struct io_wait_queue iowq;
struct io_rings *rings = ctx->rings;
@@ -2416,10 +2421,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
iowq.timeout = KTIME_MAX;
iowq.no_iowait = flags & IORING_ENTER_NO_IOWAIT;
- if (uts) {
+ if (ext_arg->ts) {
struct timespec64 ts;
- if (get_timespec64(&ts, uts))
+ if (get_timespec64(&ts, ext_arg->ts))
return -EFAULT;
iowq.timeout = timespec64_to_ktime(ts);
@@ -2427,14 +2432,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
iowq.timeout = ktime_add(iowq.timeout, io_get_time(ctx));
}
- if (sig) {
+ if (ext_arg->sig) {
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
- ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
- sigsz);
+ ret = set_compat_user_sigmask((const compat_sigset_t __user *)ext_arg->sig,
+ ext_arg->argsz);
else
#endif
- ret = set_user_sigmask(sig, sigsz);
+ ret = set_user_sigmask(ext_arg->sig, ext_arg->argsz);
if (ret)
return ret;
@@ -3113,9 +3118,8 @@ static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t a
return 0;
}
-static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
- struct __kernel_timespec __user **ts,
- const sigset_t __user **sig)
+static int io_get_ext_arg(unsigned flags, const void __user *argp,
+ struct ext_arg *ext_arg)
{
struct io_uring_getevents_arg arg;
@@ -3124,8 +3128,8 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz
* is just a pointer to the sigset_t.
*/
if (!(flags & IORING_ENTER_EXT_ARG)) {
- *sig = (const sigset_t __user *) argp;
- *ts = NULL;
+ ext_arg->sig = (const sigset_t __user *) argp;
+ ext_arg->ts = NULL;
return 0;
}
@@ -3133,15 +3137,15 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz
* EXT_ARG is set - ensure we agree on the size of it and copy in our
* timespec and sigset_t pointers if good.
*/
- if (*argsz != sizeof(arg))
+ if (ext_arg->argsz != sizeof(arg))
return -EINVAL;
if (copy_from_user(&arg, argp, sizeof(arg)))
return -EFAULT;
if (arg.pad)
return -EINVAL;
- *sig = u64_to_user_ptr(arg.sigmask);
- *argsz = arg.sigmask_sz;
- *ts = u64_to_user_ptr(arg.ts);
+ ext_arg->sig = u64_to_user_ptr(arg.sigmask);
+ ext_arg->argsz = arg.sigmask_sz;
+ ext_arg->ts = u64_to_user_ptr(arg.ts);
return 0;
}
@@ -3247,15 +3251,14 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
}
mutex_unlock(&ctx->uring_lock);
} else {
- const sigset_t __user *sig;
- struct __kernel_timespec __user *ts;
+ struct ext_arg ext_arg = { .argsz = argsz };
- ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
+ ret2 = io_get_ext_arg(flags, argp, &ext_arg);
if (likely(!ret2)) {
min_complete = min(min_complete,
ctx->cq_entries);
ret2 = io_cqring_wait(ctx, min_complete, flags,
- sig, argsz, ts);
+ &ext_arg);
}
}
--
2.43.0
next prev parent reply other threads:[~2024-08-16 20:44 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-16 20:38 [PATCHSET v3 0/5] Add support for batched min timeout Jens Axboe
2024-08-16 20:38 ` Jens Axboe [this message]
2024-08-16 20:38 ` [PATCH 2/5] io_uring: move schedule wait logic into helper Jens Axboe
2024-08-16 20:38 ` [PATCH 3/5] io_uring: implement our own schedule timeout handling Jens Axboe
2024-08-16 20:38 ` [PATCH 4/5] io_uring: add support for batch wait timeout Jens Axboe
2024-08-16 20:38 ` [PATCH 5/5] io_uring: wire up min batch wake timeout Jens Axboe
-- strict thread matches above, loose matches on Subject: below --
2024-08-19 23:28 [PATCHSET v4 0/5] Add support for batched min timeout Jens Axboe
2024-08-19 23:28 ` [PATCH 1/5] io_uring: encapsulate extraneous wait flags into a separate struct Jens Axboe
2024-08-21 14:16 [PATCHSET v5 0/5] Add support for batched min timeout Jens Axboe
2024-08-21 14:16 ` [PATCH 1/5] io_uring: encapsulate extraneous wait flags into a separate struct Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox