From: Pavel Begunkov <[email protected]>
To: Jens Axboe <[email protected]>,
[email protected], [email protected]
Subject: [PATCH v2 3/3] io_uring: use inlined struct sqe_submit
Date: Thu, 7 Nov 2019 01:00:32 +0300 [thread overview]
Message-ID: <1b1e5571b9fc0ad13fa4480a91671c348ee6be10.1573077364.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>
req->submit is always up-to-date, use it directly
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 87 +++++++++++++++++++++++++--------------------------
1 file changed, 43 insertions(+), 44 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f863304e451a..886b9cd96181 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1155,10 +1155,9 @@ static bool io_file_supports_async(struct file *file)
return false;
}
-static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
- bool force_nonblock)
+static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
{
- const struct io_uring_sqe *sqe = s->sqe;
+ const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw;
unsigned ioprio;
@@ -1406,8 +1405,8 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
return ret;
}
-static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
- struct io_kiocb **nxt, bool force_nonblock)
+static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw;
@@ -1416,7 +1415,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count;
ssize_t read_size, ret;
- ret = io_prep_rw(req, s, force_nonblock);
+ ret = io_prep_rw(req, force_nonblock);
if (ret)
return ret;
file = kiocb->ki_filp;
@@ -1424,7 +1423,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF;
- ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
+ ret = io_import_iovec(req->ctx, READ, &req->submit, &iovec, &iter);
if (ret < 0)
return ret;
@@ -1456,7 +1455,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN)
- kiocb_done(kiocb, ret2, nxt, s->in_async);
+ kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
else
ret = -EAGAIN;
}
@@ -1464,8 +1463,8 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
return ret;
}
-static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
- struct io_kiocb **nxt, bool force_nonblock)
+static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw;
@@ -1474,7 +1473,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count;
ssize_t ret;
- ret = io_prep_rw(req, s, force_nonblock);
+ ret = io_prep_rw(req, force_nonblock);
if (ret)
return ret;
@@ -1482,7 +1481,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_WRITE)))
return -EBADF;
- ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
+ ret = io_import_iovec(req->ctx, WRITE, &req->submit, &iovec, &iter);
if (ret < 0)
return ret;
@@ -1519,7 +1518,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
else
ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
if (!force_nonblock || ret2 != -EAGAIN)
- kiocb_done(kiocb, ret2, nxt, s->in_async);
+ kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
else
ret = -EAGAIN;
}
@@ -2188,9 +2187,9 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
-static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req)
{
+ const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_uring_sqe *sqe_copy;
if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
@@ -2217,10 +2216,10 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
}
static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct sqe_submit *s, struct io_kiocb **nxt,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
int ret, opcode;
+ struct sqe_submit *s = &req->submit;
req->user_data = READ_ONCE(s->sqe->user_data);
@@ -2232,18 +2231,18 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
case IORING_OP_READV:
if (unlikely(s->sqe->buf_index))
return -EINVAL;
- ret = io_read(req, s, nxt, force_nonblock);
+ ret = io_read(req, nxt, force_nonblock);
break;
case IORING_OP_WRITEV:
if (unlikely(s->sqe->buf_index))
return -EINVAL;
- ret = io_write(req, s, nxt, force_nonblock);
+ ret = io_write(req, nxt, force_nonblock);
break;
case IORING_OP_READ_FIXED:
- ret = io_read(req, s, nxt, force_nonblock);
+ ret = io_read(req, nxt, force_nonblock);
break;
case IORING_OP_WRITE_FIXED:
- ret = io_write(req, s, nxt, force_nonblock);
+ ret = io_write(req, nxt, force_nonblock);
break;
case IORING_OP_FSYNC:
ret = io_fsync(req, s->sqe, nxt, force_nonblock);
@@ -2318,7 +2317,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
s->in_async = true;
do {
- ret = __io_submit_sqe(ctx, req, s, &nxt, false);
+ ret = __io_submit_sqe(ctx, req, &nxt, false);
/*
* We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't
@@ -2372,9 +2371,10 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
return table->files[index & IORING_FILE_TABLE_MASK];
}
-static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
+static int io_req_set_file(struct io_ring_ctx *ctx,
struct io_submit_state *state, struct io_kiocb *req)
{
+ struct sqe_submit *s = &req->submit;
unsigned flags;
int fd;
@@ -2438,12 +2438,11 @@ static int io_grab_files(struct io_ring_ctx *ctx, struct io_kiocb *req)
return ret;
}
-static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s)
+static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
{
int ret;
- ret = __io_submit_sqe(ctx, req, s, NULL, true);
+ ret = __io_submit_sqe(ctx, req, NULL, true);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -2451,6 +2450,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
*/
if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
(req->flags & REQ_F_MUST_PUNT))) {
+ struct sqe_submit *s = &req->submit;
struct io_uring_sqe *sqe_copy;
sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
@@ -2488,31 +2488,30 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return ret;
}
-static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s)
+static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
{
int ret;
- ret = io_req_defer(ctx, req, s->sqe);
+ ret = io_req_defer(ctx, req);
if (ret) {
if (ret != -EIOCBQUEUED) {
+ io_cqring_add_event(ctx, req->submit.sqe->user_data, ret);
io_free_req(req, NULL);
- io_cqring_add_event(ctx, s->sqe->user_data, ret);
}
return 0;
}
- return __io_queue_sqe(ctx, req, s);
+ return __io_queue_sqe(ctx, req);
}
static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s, struct io_kiocb *shadow)
+ struct io_kiocb *shadow)
{
int ret;
int need_submit = false;
if (!shadow)
- return io_queue_sqe(ctx, req, s);
+ return io_queue_sqe(ctx, req);
/*
* Mark the first IO in link list as DRAIN, let all the following
@@ -2520,12 +2519,12 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
* list.
*/
req->flags |= REQ_F_IO_DRAIN;
- ret = io_req_defer(ctx, req, s->sqe);
+ ret = io_req_defer(ctx, req);
if (ret) {
if (ret != -EIOCBQUEUED) {
+ io_cqring_add_event(ctx, req->submit.sqe->user_data, ret);
io_free_req(req, NULL);
__io_free_req(shadow);
- io_cqring_add_event(ctx, s->sqe->user_data, ret);
return 0;
}
} else {
@@ -2543,7 +2542,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
spin_unlock_irq(&ctx->completion_lock);
if (need_submit)
- return __io_queue_sqe(ctx, req, s);
+ return __io_queue_sqe(ctx, req);
return 0;
}
@@ -2551,10 +2550,10 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s, struct io_submit_state *state,
- struct io_kiocb **link)
+ struct io_submit_state *state, struct io_kiocb **link)
{
struct io_uring_sqe *sqe_copy;
+ struct sqe_submit *s = &req->submit;
int ret;
/* enforce forwards compatibility on users */
@@ -2563,11 +2562,11 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
goto err_req;
}
- ret = io_req_set_file(ctx, s, state, req);
+ ret = io_req_set_file(ctx, state, req);
if (unlikely(ret)) {
err_req:
- io_free_req(req, NULL);
io_cqring_add_event(ctx, s->sqe->user_data, ret);
+ io_free_req(req, NULL);
return;
}
@@ -2598,7 +2597,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
INIT_LIST_HEAD(&req->link_list);
*link = req;
} else {
- io_queue_sqe(ctx, req, s);
+ io_queue_sqe(ctx, req);
}
}
@@ -2739,7 +2738,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
req->submit.needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
true, async);
- io_submit_sqe(ctx, req, &req->submit, statep, &link);
+ io_submit_sqe(ctx, req, statep, &link);
submitted++;
/*
@@ -2747,14 +2746,14 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
* that's the end of the chain. Submit the previous link.
*/
if (!(sqe_flags & IOSQE_IO_LINK) && link) {
- io_queue_link_head(ctx, link, &link->submit, shadow_req);
+ io_queue_link_head(ctx, link, shadow_req);
link = NULL;
shadow_req = NULL;
}
}
if (link)
- io_queue_link_head(ctx, link, &link->submit, shadow_req);
+ io_queue_link_head(ctx, link, shadow_req);
if (statep)
io_submit_state_end(&state);
--
2.23.0
next prev parent reply other threads:[~2019-11-06 22:01 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-11-06 22:00 [PATCH v2 0/3] Inline sqe_submit Pavel Begunkov
2019-11-06 22:00 ` [PATCH v2 1/3] io_uring: allocate io_kiocb upfront Pavel Begunkov
2019-11-06 22:05 ` Pavel Begunkov
2019-11-06 22:18 ` Jens Axboe
2019-11-06 22:40 ` Pavel Begunkov
2019-11-06 22:00 ` [PATCH v2 2/3] io_uring: Use submit info inlined into req Pavel Begunkov
2019-11-06 22:00 ` Pavel Begunkov [this message]
2019-11-06 22:10 ` [PATCH v2 0/3] Inline sqe_submit Pavel Begunkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1b1e5571b9fc0ad13fa4480a91671c348ee6be10.1573077364.git.asml.silence@gmail.com \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox