From: Jens Axboe <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>
Subject: [PATCH 11/13] io_uring: read opcode and user_data from SQE exactly once
Date: Wed, 18 Dec 2019 10:18:33 -0700 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
If we defer a request, we can't be reading the opcode again. Ensure that
the user_data and opcode fields are stable. For the user_data we already
have a place for it, for the opcode we can fill a one byte hold and store
that as well. For both of them, assign them when we originally read the
SQE in io_get_sqring(). Any code that uses sqe->opcode or sqe->user_data
is switched to req->opcode and req->user_data.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 45 ++++++++++++++++++++-------------------------
1 file changed, 20 insertions(+), 25 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c269229e45ef..9f10f5a9da8d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -384,6 +384,7 @@ struct io_kiocb {
bool has_user;
bool in_async;
bool needs_fixed_file;
+ u8 opcode;
struct io_ring_ctx *ctx;
union {
@@ -597,12 +598,10 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
}
}
-static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
+static inline bool io_req_needs_user(struct io_kiocb *req)
{
- u8 opcode = READ_ONCE(sqe->opcode);
-
- return !(opcode == IORING_OP_READ_FIXED ||
- opcode == IORING_OP_WRITE_FIXED);
+ return !(req->opcode == IORING_OP_READ_FIXED ||
+ req->opcode == IORING_OP_WRITE_FIXED);
}
static inline bool io_prep_async_work(struct io_kiocb *req,
@@ -611,7 +610,7 @@ static inline bool io_prep_async_work(struct io_kiocb *req,
bool do_hashed = false;
if (req->sqe) {
- switch (req->sqe->opcode) {
+ switch (req->opcode) {
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
/* only regular files should be hashed for writes */
@@ -634,7 +633,7 @@ static inline bool io_prep_async_work(struct io_kiocb *req,
req->work.flags |= IO_WQ_WORK_UNBOUND;
break;
}
- if (io_sqe_needs_user(req->sqe))
+ if (io_req_needs_user(req))
req->work.flags |= IO_WQ_WORK_NEEDS_USER;
}
@@ -1005,7 +1004,7 @@ static void io_fail_links(struct io_kiocb *req)
trace_io_uring_fail_link(req, link);
if ((req->flags & REQ_F_LINK_TIMEOUT) &&
- link->sqe->opcode == IORING_OP_LINK_TIMEOUT) {
+ link->opcode == IORING_OP_LINK_TIMEOUT) {
io_link_cancel_timeout(link);
} else {
io_cqring_fill_event(link, -ECANCELED);
@@ -1648,7 +1647,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
* for that purpose and instead let the caller pass in the read/write
* flag.
*/
- opcode = READ_ONCE(sqe->opcode);
+ opcode = req->opcode;
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
*iovec = NULL;
return io_import_fixed(req->ctx, rw, sqe, iter);
@@ -3080,7 +3079,7 @@ static int io_req_defer_prep(struct io_kiocb *req)
struct iov_iter iter;
ssize_t ret;
- switch (io->sqe.opcode) {
+ switch (req->opcode) {
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
/* ensure prep does right import */
@@ -3179,11 +3178,10 @@ __attribute__((nonnull))
static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
- int ret, opcode;
struct io_ring_ctx *ctx = req->ctx;
+ int ret;
- opcode = READ_ONCE(req->sqe->opcode);
- switch (opcode) {
+ switch (req->opcode) {
case IORING_OP_NOP:
ret = io_nop(req);
break;
@@ -3320,11 +3318,9 @@ static bool io_req_op_valid(int op)
return op >= IORING_OP_NOP && op < IORING_OP_LAST;
}
-static int io_op_needs_file(const struct io_uring_sqe *sqe)
+static int io_req_needs_file(struct io_kiocb *req)
{
- int op = READ_ONCE(sqe->opcode);
-
- switch (op) {
+ switch (req->opcode) {
case IORING_OP_NOP:
case IORING_OP_POLL_REMOVE:
case IORING_OP_TIMEOUT:
@@ -3333,7 +3329,7 @@ static int io_op_needs_file(const struct io_uring_sqe *sqe)
case IORING_OP_LINK_TIMEOUT:
return 0;
default:
- if (io_req_op_valid(op))
+ if (io_req_op_valid(req->opcode))
return 1;
return -EINVAL;
}
@@ -3360,7 +3356,7 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
if (flags & IOSQE_IO_DRAIN)
req->flags |= REQ_F_IO_DRAIN;
- ret = io_op_needs_file(req->sqe);
+ ret = io_req_needs_file(req);
if (ret <= 0)
return ret;
@@ -3480,7 +3476,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
link_list);
- if (!nxt || nxt->sqe->opcode != IORING_OP_LINK_TIMEOUT)
+ if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
return NULL;
req->flags |= REQ_F_LINK_TIMEOUT;
@@ -3582,8 +3578,6 @@ static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
struct io_ring_ctx *ctx = req->ctx;
int ret;
- req->user_data = req->sqe->user_data;
-
/* enforce forwards compatibility on users */
if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) {
ret = -EINVAL;
@@ -3715,6 +3709,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req)
*/
req->sequence = ctx->cached_sq_head;
req->sqe = &ctx->sq_sqes[head];
+ req->opcode = READ_ONCE(req->sqe->opcode);
+ req->user_data = READ_ONCE(req->sqe->user_data);
ctx->cached_sq_head++;
return true;
}
@@ -3760,7 +3756,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
break;
}
- if (io_sqe_needs_user(req->sqe) && !*mm) {
+ if (io_req_needs_user(req) && !*mm) {
mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
if (!mm_fault) {
use_mm(ctx->sqo_mm);
@@ -3776,8 +3772,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
req->has_user = *mm != NULL;
req->in_async = async;
req->needs_fixed_file = async;
- trace_io_uring_submit_sqe(ctx, req->sqe->user_data,
- true, async);
+ trace_io_uring_submit_sqe(ctx, req->user_data, true, async);
if (!io_submit_sqe(req, statep, &link))
break;
/*
--
2.24.1
next prev parent reply other threads:[~2019-12-18 17:18 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-12-18 17:18 [PATCHSET v3] io_uring fixes for 5.5-rc3 Jens Axboe
2019-12-18 17:18 ` [PATCH 01/13] io_uring: fix stale comment and a few typos Jens Axboe
2019-12-18 17:18 ` [PATCH 02/13] io_uring: fix sporadic -EFAULT from IORING_OP_RECVMSG Jens Axboe
2019-12-18 17:18 ` [PATCH 03/13] io-wq: re-add io_wq_current_is_worker() Jens Axboe
2019-12-18 17:18 ` [PATCH 04/13] io_uring: fix pre-prepped issue with force_nonblock == true Jens Axboe
2019-12-18 17:18 ` [PATCH 05/13] io_uring: remove 'sqe' parameter to the OP helpers that take it Jens Axboe
2019-12-18 17:18 ` [PATCH 06/13] io_uring: any deferred command must have stable sqe data Jens Axboe
2019-12-18 17:18 ` [PATCH 07/13] io_uring: make HARDLINK imply LINK Jens Axboe
2019-12-18 17:18 ` [PATCH 08/13] io_uring: make IORING_POLL_ADD and IORING_POLL_REMOVE deferrable Jens Axboe
2019-12-18 17:18 ` [PATCH 09/13] io_uring: make IORING_OP_CANCEL_ASYNC deferrable Jens Axboe
2019-12-18 17:18 ` [PATCH 10/13] io_uring: make IORING_OP_TIMEOUT_REMOVE deferrable Jens Axboe
2019-12-18 17:18 ` Jens Axboe [this message]
2019-12-18 17:18 ` [PATCH 12/13] io_uring: warn about unhandled opcode Jens Axboe
2019-12-18 17:18 ` [PATCH 13/13] io_uring: don't wait when under-submitting Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox