* [PATCH 01/12] io_uring: avoid taking ctx refs for task-cancel
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
@ 2021-02-28 22:35 ` Pavel Begunkov
2021-02-28 22:35 ` [PATCH 02/12] io_uring: reuse io_req_task_queue_fail() Pavel Begunkov
` (11 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2021-02-28 22:35 UTC (permalink / raw)
To: Jens Axboe, io-uring
Don't bother to take a ctx->refs for io_req_task_cancel() because it
take uring_lock before putting a request, and the context is promised to
stay alive until unlock happens.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 42b675939582..ad2ddbd22d62 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1960,10 +1960,10 @@ static void io_req_task_cancel(struct callback_head *cb)
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
struct io_ring_ctx *ctx = req->ctx;
+ /* ctx is guaranteed to stay alive while we hold uring_lock */
mutex_lock(&ctx->uring_lock);
__io_req_task_cancel(req, req->result);
mutex_unlock(&ctx->uring_lock);
- percpu_ref_put(&ctx->refs);
}
static void __io_req_task_submit(struct io_kiocb *req)
@@ -1994,14 +1994,12 @@ static void io_req_task_queue(struct io_kiocb *req)
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
req->result = -ECANCELED;
- percpu_ref_get(&req->ctx->refs);
io_req_task_work_add_fallback(req, io_req_task_cancel);
}
}
static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
{
- percpu_ref_get(&req->ctx->refs);
req->result = ret;
req->task_work.func = io_req_task_cancel;
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 02/12] io_uring: reuse io_req_task_queue_fail()
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
2021-02-28 22:35 ` [PATCH 01/12] io_uring: avoid taking ctx refs for task-cancel Pavel Begunkov
@ 2021-02-28 22:35 ` Pavel Begunkov
2021-02-28 22:35 ` [PATCH 03/12] io_uring: further deduplicate file slot selection Pavel Begunkov
` (10 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2021-02-28 22:35 UTC (permalink / raw)
To: Jens Axboe, io-uring
Use io_req_task_queue_fail() on the fail path of io_req_task_queue().
It's unlikely to happen, so don't care about additional overhead, but
allows to keep all the req->result invariant in a single function.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index ad2ddbd22d62..528ab1666eb5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1986,25 +1986,21 @@ static void io_req_task_submit(struct callback_head *cb)
__io_req_task_submit(req);
}
-static void io_req_task_queue(struct io_kiocb *req)
+static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
{
- int ret;
+ req->result = ret;
+ req->task_work.func = io_req_task_cancel;
- req->task_work.func = io_req_task_submit;
- ret = io_req_task_work_add(req);
- if (unlikely(ret)) {
- req->result = -ECANCELED;
+ if (unlikely(io_req_task_work_add(req)))
io_req_task_work_add_fallback(req, io_req_task_cancel);
- }
}
-static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
+static void io_req_task_queue(struct io_kiocb *req)
{
- req->result = ret;
- req->task_work.func = io_req_task_cancel;
+ req->task_work.func = io_req_task_submit;
if (unlikely(io_req_task_work_add(req)))
- io_req_task_work_add_fallback(req, io_req_task_cancel);
+ io_req_task_queue_fail(req, -ECANCELED);
}
static inline void io_queue_next(struct io_kiocb *req)
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 03/12] io_uring: further deduplicate file slot selection
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
2021-02-28 22:35 ` [PATCH 01/12] io_uring: avoid taking ctx refs for task-cancel Pavel Begunkov
2021-02-28 22:35 ` [PATCH 02/12] io_uring: reuse io_req_task_queue_fail() Pavel Begunkov
@ 2021-02-28 22:35 ` Pavel Begunkov
2021-02-28 22:35 ` [PATCH 04/12] io_uring: add a helper failing not issued requests Pavel Begunkov
` (9 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2021-02-28 22:35 UTC (permalink / raw)
To: Jens Axboe, io-uring
io_fixed_file_slot() and io_file_from_index() behave pretty similarly,
DRY and call one from another.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 23 ++++++++++-------------
1 file changed, 10 insertions(+), 13 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 528ab1666eb5..e3c36c1dcfad 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6080,13 +6080,19 @@ static void io_wq_submit_work(struct io_wq_work *work)
}
}
-static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
- int index)
+static inline struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
+ unsigned i)
{
struct fixed_rsrc_table *table;
- table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
- return table->files[index & IORING_FILE_TABLE_MASK];
+ table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
+ return &table->files[i & IORING_FILE_TABLE_MASK];
+}
+
+static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
+ int index)
+{
+ return *io_fixed_file_slot(ctx->file_data, index);
}
static struct file *io_file_get(struct io_submit_state *state,
@@ -7397,15 +7403,6 @@ static void io_rsrc_put_work(struct work_struct *work)
}
}
-static struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
- unsigned i)
-{
- struct fixed_rsrc_table *table;
-
- table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
- return &table->files[i & IORING_FILE_TABLE_MASK];
-}
-
static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
{
struct fixed_rsrc_ref_node *ref_node;
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 04/12] io_uring: add a helper failing not issued requests
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
` (2 preceding siblings ...)
2021-02-28 22:35 ` [PATCH 03/12] io_uring: further deduplicate file slot selection Pavel Begunkov
@ 2021-02-28 22:35 ` Pavel Begunkov
2021-02-28 22:35 ` [PATCH 05/12] io_uring: refactor provide/remove buffer locking Pavel Begunkov
` (8 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2021-02-28 22:35 UTC (permalink / raw)
To: Jens Axboe, io-uring
Add a simple helper doing CQE posting, marking request for link-failure,
and putting both submission and completion references.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 29 ++++++++++++++---------------
1 file changed, 14 insertions(+), 15 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e3c36c1dcfad..75ff9e577592 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1543,8 +1543,8 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
__io_cqring_fill_event(req, res, 0);
}
-static inline void io_req_complete_post(struct io_kiocb *req, long res,
- unsigned int cflags)
+static void io_req_complete_post(struct io_kiocb *req, long res,
+ unsigned int cflags)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
@@ -1597,6 +1597,13 @@ static inline void io_req_complete(struct io_kiocb *req, long res)
__io_req_complete(req, 0, res, 0);
}
+static void io_req_complete_failed(struct io_kiocb *req, long res)
+{
+ req_set_fail_links(req);
+ io_put_req(req);
+ io_req_complete_post(req, res, 0);
+}
+
static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
{
struct io_submit_state *state = &ctx->submit_state;
@@ -6223,9 +6230,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
io_put_req(req);
}
} else {
- req_set_fail_links(req);
- io_put_req(req);
- io_req_complete(req, ret);
+ io_req_complete_failed(req, ret);
}
if (linked_timeout)
io_queue_linked_timeout(linked_timeout);
@@ -6239,9 +6244,7 @@ static void io_queue_sqe(struct io_kiocb *req)
if (ret) {
if (ret != -EIOCBQUEUED) {
fail_req:
- req_set_fail_links(req);
- io_put_req(req);
- io_req_complete(req, ret);
+ io_req_complete_failed(req, ret);
}
} else if (req->flags & REQ_F_FORCE_ASYNC) {
ret = io_req_defer_prep(req);
@@ -6352,13 +6355,11 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
ret = io_init_req(ctx, req, sqe);
if (unlikely(ret)) {
fail_req:
- io_put_req(req);
- io_req_complete(req, ret);
+ io_req_complete_failed(req, ret);
if (link->head) {
/* fail even hard links since we don't submit */
link->head->flags |= REQ_F_FAIL_LINK;
- io_put_req(link->head);
- io_req_complete(link->head, -ECANCELED);
+ io_req_complete_failed(link->head, -ECANCELED);
link->head = NULL;
}
return ret;
@@ -8601,9 +8602,7 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
while (!list_empty(&list)) {
de = list_first_entry(&list, struct io_defer_entry, list);
list_del_init(&de->list);
- req_set_fail_links(de->req);
- io_put_req(de->req);
- io_req_complete(de->req, -ECANCELED);
+ io_req_complete_failed(de->req, -ECANCELED);
kfree(de);
}
}
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 05/12] io_uring: refactor provide/remove buffer locking
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
` (3 preceding siblings ...)
2021-02-28 22:35 ` [PATCH 04/12] io_uring: add a helper failing not issued requests Pavel Begunkov
@ 2021-02-28 22:35 ` Pavel Begunkov
2021-02-28 22:35 ` [PATCH 06/12] io_uring: don't restirct issue_flags for io_openat Pavel Begunkov
` (7 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2021-02-28 22:35 UTC (permalink / raw)
To: Jens Axboe, io-uring
Always complete request holding the mutex instead of doing that strange
dancing with conditional ordering.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 23 ++++++-----------------
1 file changed, 6 insertions(+), 17 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 75ff9e577592..c40c7fb7fc2e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -3893,14 +3893,9 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
if (ret < 0)
req_set_fail_links(req);
- /* need to hold the lock to complete IOPOLL requests */
- if (ctx->flags & IORING_SETUP_IOPOLL) {
- __io_req_complete(req, issue_flags, ret, 0);
- io_ring_submit_unlock(ctx, !force_nonblock);
- } else {
- io_ring_submit_unlock(ctx, !force_nonblock);
- __io_req_complete(req, issue_flags, ret, 0);
- }
+ /* complete before unlock, IOPOLL may need the lock */
+ __io_req_complete(req, issue_flags, ret, 0);
+ io_ring_submit_unlock(ctx, !force_nonblock);
return 0;
}
@@ -3987,15 +3982,9 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
out:
if (ret < 0)
req_set_fail_links(req);
-
- /* need to hold the lock to complete IOPOLL requests */
- if (ctx->flags & IORING_SETUP_IOPOLL) {
- __io_req_complete(req, issue_flags, ret, 0);
- io_ring_submit_unlock(ctx, !force_nonblock);
- } else {
- io_ring_submit_unlock(ctx, !force_nonblock);
- __io_req_complete(req, issue_flags, ret, 0);
- }
+ /* complete before unlock, IOPOLL may need the lock */
+ __io_req_complete(req, issue_flags, ret, 0);
+ io_ring_submit_unlock(ctx, !force_nonblock);
return 0;
}
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 06/12] io_uring: don't restirct issue_flags for io_openat
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
` (4 preceding siblings ...)
2021-02-28 22:35 ` [PATCH 05/12] io_uring: refactor provide/remove buffer locking Pavel Begunkov
@ 2021-02-28 22:35 ` Pavel Begunkov
2021-02-28 22:35 ` [PATCH 07/12] io_uring: use better types for cflags Pavel Begunkov
` (6 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2021-02-28 22:35 UTC (permalink / raw)
To: Jens Axboe, io-uring
45d189c606292 ("io_uring: replace force_nonblock with flags") did
something strange for io_openat() slicing all issue_flags but
IO_URING_F_NONBLOCK. Not a bug for now, but better to just forward the
flags.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c40c7fb7fc2e..5b1b43c091c8 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -3826,7 +3826,7 @@ static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
{
- return io_openat2(req, issue_flags & IO_URING_F_NONBLOCK);
+ return io_openat2(req, issue_flags);
}
static int io_remove_buffers_prep(struct io_kiocb *req,
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 07/12] io_uring: use better types for cflags
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
` (5 preceding siblings ...)
2021-02-28 22:35 ` [PATCH 06/12] io_uring: don't restirct issue_flags for io_openat Pavel Begunkov
@ 2021-02-28 22:35 ` Pavel Begunkov
2021-02-28 22:35 ` [PATCH 08/12] io_uring: refactor out send/recv async setup Pavel Begunkov
` (5 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2021-02-28 22:35 UTC (permalink / raw)
To: Jens Axboe, io-uring
__io_cqring_fill_event() takes cflags as long to squeeze it into u32 in
an CQE, awhile all users pass int or unsigned. Replace it with unsigned
int and store it as u32 in struct io_completion to match CQE.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5b1b43c091c8..049a8fbd7792 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -646,7 +646,7 @@ struct io_unlink {
struct io_completion {
struct file *file;
struct list_head list;
- int cflags;
+ u32 cflags;
};
struct io_async_connect {
@@ -1498,7 +1498,8 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
}
}
-static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
+static void __io_cqring_fill_event(struct io_kiocb *req, long res,
+ unsigned int cflags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_uring_cqe *cqe;
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 08/12] io_uring: refactor out send/recv async setup
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
` (6 preceding siblings ...)
2021-02-28 22:35 ` [PATCH 07/12] io_uring: use better types for cflags Pavel Begunkov
@ 2021-02-28 22:35 ` Pavel Begunkov
2021-02-28 22:35 ` [PATCH 09/12] io_uring: untie alloc_async_data and needs_async_data Pavel Begunkov
` (4 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2021-02-28 22:35 UTC (permalink / raw)
To: Jens Axboe, io-uring
IORING_OP_[SEND,RECV] don't need async setup neither will get into
io_req_prep_async(). Remove them from io_req_prep_async() and remove
needs_async_data checks from the related setup functions.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 6 ------
1 file changed, 6 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 049a8fbd7792..6fb2baf8bd26 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4281,8 +4281,6 @@ static int io_sendmsg_prep_async(struct io_kiocb *req)
{
int ret;
- if (!io_op_defs[req->opcode].needs_async_data)
- return 0;
ret = io_sendmsg_copy_hdr(req, req->async_data);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
@@ -4502,8 +4500,6 @@ static int io_recvmsg_prep_async(struct io_kiocb *req)
{
int ret;
- if (!io_op_defs[req->opcode].needs_async_data)
- return 0;
ret = io_recvmsg_copy_hdr(req, req->async_data);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
@@ -5758,10 +5754,8 @@ static int io_req_prep_async(struct io_kiocb *req)
case IORING_OP_WRITE:
return io_rw_prep_async(req, WRITE);
case IORING_OP_SENDMSG:
- case IORING_OP_SEND:
return io_sendmsg_prep_async(req);
case IORING_OP_RECVMSG:
- case IORING_OP_RECV:
return io_recvmsg_prep_async(req);
case IORING_OP_CONNECT:
return io_connect_prep_async(req);
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 09/12] io_uring: untie alloc_async_data and needs_async_data
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
` (7 preceding siblings ...)
2021-02-28 22:35 ` [PATCH 08/12] io_uring: refactor out send/recv async setup Pavel Begunkov
@ 2021-02-28 22:35 ` Pavel Begunkov
2021-02-28 22:35 ` [PATCH 10/12] io_uring: rethink def->needs_async_data Pavel Begunkov
` (3 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2021-02-28 22:35 UTC (permalink / raw)
To: Jens Axboe, io-uring
All opcode handlers pretty well know whether they need async data or
not, and can skip testing for needs_async_data. The exception is rw
the generic path, but those test the flag by hand anyway. So, check the
flag and make io_alloc_async_data() allocating unconditionally.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 14 +++-----------
1 file changed, 3 insertions(+), 11 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6fb2baf8bd26..bfc795e8258f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -3062,21 +3062,13 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
}
}
-static inline int __io_alloc_async_data(struct io_kiocb *req)
+static inline int io_alloc_async_data(struct io_kiocb *req)
{
WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
return req->async_data == NULL;
}
-static int io_alloc_async_data(struct io_kiocb *req)
-{
- if (!io_op_defs[req->opcode].needs_async_data)
- return 0;
-
- return __io_alloc_async_data(req);
-}
-
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
const struct iovec *fast_iov,
struct iov_iter *iter, bool force)
@@ -3084,7 +3076,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
if (!force && !io_op_defs[req->opcode].needs_async_data)
return 0;
if (!req->async_data) {
- if (__io_alloc_async_data(req)) {
+ if (io_alloc_async_data(req)) {
kfree(iovec);
return -ENOMEM;
}
@@ -5770,7 +5762,7 @@ static int io_req_defer_prep(struct io_kiocb *req)
/* some opcodes init it during the inital prep */
if (req->async_data)
return 0;
- if (__io_alloc_async_data(req))
+ if (io_alloc_async_data(req))
return -EAGAIN;
return io_req_prep_async(req);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 10/12] io_uring: rethink def->needs_async_data
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
` (8 preceding siblings ...)
2021-02-28 22:35 ` [PATCH 09/12] io_uring: untie alloc_async_data and needs_async_data Pavel Begunkov
@ 2021-02-28 22:35 ` Pavel Begunkov
2021-02-28 22:35 ` [PATCH 11/12] io_uring: merge defer_prep() and prep_async() Pavel Begunkov
` (2 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2021-02-28 22:35 UTC (permalink / raw)
To: Jens Axboe, io-uring
needs_async_data controls allocation of async_data, and used in two
cases. 1) when async setup requires it (by io_req_prep_async() or
handler themselves), and 2) when op always needs additional space to
operate, like timeouts do.
Opcode preps already don't bother about the second case and do
allocation unconditionally, restrict needs_async_data to the first case
only and rename it into needs_async_setup.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 29 +++++++++++------------------
1 file changed, 11 insertions(+), 18 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index bfc795e8258f..c9ab7ee5d500 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -833,8 +833,8 @@ struct io_op_def {
unsigned pollout : 1;
/* op supports buffer selection */
unsigned buffer_select : 1;
- /* must always have async data allocated */
- unsigned needs_async_data : 1;
+ /* do prep async if is going to be punted */
+ unsigned needs_async_setup : 1;
/* should block plug */
unsigned plug : 1;
/* size of async data needed, if any */
@@ -848,7 +848,7 @@ static const struct io_op_def io_op_defs[] = {
.unbound_nonreg_file = 1,
.pollin = 1,
.buffer_select = 1,
- .needs_async_data = 1,
+ .needs_async_setup = 1,
.plug = 1,
.async_size = sizeof(struct io_async_rw),
},
@@ -857,7 +857,7 @@ static const struct io_op_def io_op_defs[] = {
.hash_reg_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
- .needs_async_data = 1,
+ .needs_async_setup = 1,
.plug = 1,
.async_size = sizeof(struct io_async_rw),
},
@@ -891,7 +891,7 @@ static const struct io_op_def io_op_defs[] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
- .needs_async_data = 1,
+ .needs_async_setup = 1,
.async_size = sizeof(struct io_async_msghdr),
},
[IORING_OP_RECVMSG] = {
@@ -899,11 +899,10 @@ static const struct io_op_def io_op_defs[] = {
.unbound_nonreg_file = 1,
.pollin = 1,
.buffer_select = 1,
- .needs_async_data = 1,
+ .needs_async_setup = 1,
.async_size = sizeof(struct io_async_msghdr),
},
[IORING_OP_TIMEOUT] = {
- .needs_async_data = 1,
.async_size = sizeof(struct io_timeout_data),
},
[IORING_OP_TIMEOUT_REMOVE] = {
@@ -916,14 +915,13 @@ static const struct io_op_def io_op_defs[] = {
},
[IORING_OP_ASYNC_CANCEL] = {},
[IORING_OP_LINK_TIMEOUT] = {
- .needs_async_data = 1,
.async_size = sizeof(struct io_timeout_data),
},
[IORING_OP_CONNECT] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
- .needs_async_data = 1,
+ .needs_async_setup = 1,
.async_size = sizeof(struct io_async_connect),
},
[IORING_OP_FALLOCATE] = {
@@ -3073,7 +3071,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
const struct iovec *fast_iov,
struct iov_iter *iter, bool force)
{
- if (!force && !io_op_defs[req->opcode].needs_async_data)
+ if (!force && !io_op_defs[req->opcode].needs_async_setup)
return 0;
if (!req->async_data) {
if (io_alloc_async_data(req)) {
@@ -5738,12 +5736,8 @@ static int io_req_prep_async(struct io_kiocb *req)
{
switch (req->opcode) {
case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- case IORING_OP_READ:
return io_rw_prep_async(req, READ);
case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- case IORING_OP_WRITE:
return io_rw_prep_async(req, WRITE);
case IORING_OP_SENDMSG:
return io_sendmsg_prep_async(req);
@@ -5757,11 +5751,10 @@ static int io_req_prep_async(struct io_kiocb *req)
static int io_req_defer_prep(struct io_kiocb *req)
{
- if (!io_op_defs[req->opcode].needs_async_data)
- return 0;
- /* some opcodes init it during the inital prep */
- if (req->async_data)
+ if (!io_op_defs[req->opcode].needs_async_setup)
return 0;
+ if (WARN_ON_ONCE(req->async_data))
+ return -EFAULT;
if (io_alloc_async_data(req))
return -EAGAIN;
return io_req_prep_async(req);
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 11/12] io_uring: merge defer_prep() and prep_async()
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
` (9 preceding siblings ...)
2021-02-28 22:35 ` [PATCH 10/12] io_uring: rethink def->needs_async_data Pavel Begunkov
@ 2021-02-28 22:35 ` Pavel Begunkov
2021-02-28 22:35 ` [PATCH 12/12] io_uring: simplify io_resubmit_prep() Pavel Begunkov
2021-03-01 23:53 ` [PATCH RESEND for-next 00/12] 5.13 first batch Jens Axboe
12 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2021-02-28 22:35 UTC (permalink / raw)
To: Jens Axboe, io-uring
Merge two function and do renaming in favour of the second one, it
relays the meaning better.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 28 +++++++++++++---------------
1 file changed, 13 insertions(+), 15 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c9ab7ee5d500..0bdaf5105d11 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5734,6 +5734,13 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_req_prep_async(struct io_kiocb *req)
{
+ if (!io_op_defs[req->opcode].needs_async_setup)
+ return 0;
+ if (WARN_ON_ONCE(req->async_data))
+ return -EFAULT;
+ if (io_alloc_async_data(req))
+ return -EAGAIN;
+
switch (req->opcode) {
case IORING_OP_READV:
return io_rw_prep_async(req, READ);
@@ -5746,18 +5753,9 @@ static int io_req_prep_async(struct io_kiocb *req)
case IORING_OP_CONNECT:
return io_connect_prep_async(req);
}
- return 0;
-}
-
-static int io_req_defer_prep(struct io_kiocb *req)
-{
- if (!io_op_defs[req->opcode].needs_async_setup)
- return 0;
- if (WARN_ON_ONCE(req->async_data))
- return -EFAULT;
- if (io_alloc_async_data(req))
- return -EAGAIN;
- return io_req_prep_async(req);
+ printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
+ req->opcode);
+ return -EFAULT;
}
static u32 io_get_sequence(struct io_kiocb *req)
@@ -5790,7 +5788,7 @@ static int io_req_defer(struct io_kiocb *req)
if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
return 0;
- ret = io_req_defer_prep(req);
+ ret = io_req_prep_async(req);
if (ret)
return ret;
io_prep_async_link(req);
@@ -6216,7 +6214,7 @@ static void io_queue_sqe(struct io_kiocb *req)
io_req_complete_failed(req, ret);
}
} else if (req->flags & REQ_F_FORCE_ASYNC) {
- ret = io_req_defer_prep(req);
+ ret = io_req_prep_async(req);
if (unlikely(ret))
goto fail_req;
io_queue_async_work(req);
@@ -6362,7 +6360,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
head->flags |= REQ_F_IO_DRAIN;
ctx->drain_next = 1;
}
- ret = io_req_defer_prep(req);
+ ret = io_req_prep_async(req);
if (unlikely(ret))
goto fail_req;
trace_io_uring_link(ctx, req, head);
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 12/12] io_uring: simplify io_resubmit_prep()
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
` (10 preceding siblings ...)
2021-02-28 22:35 ` [PATCH 11/12] io_uring: merge defer_prep() and prep_async() Pavel Begunkov
@ 2021-02-28 22:35 ` Pavel Begunkov
2021-03-01 23:53 ` [PATCH RESEND for-next 00/12] 5.13 first batch Jens Axboe
12 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2021-02-28 22:35 UTC (permalink / raw)
To: Jens Axboe, io-uring
If not for async_data NULL check, io_resubmit_prep() is already an rw
specific version of io_req_prep_async(), but slower because 1) it always
goes through io_import_iovec() even if following io_setup_async_rw() the
result 2) instead of initialising iovec/iter in-place it does it
on-stack and then copies with io_setup_async_rw().
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 37 +++----------------------------------
1 file changed, 3 insertions(+), 34 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0bdaf5105d11..61697acf3717 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1014,14 +1014,10 @@ static struct file *io_file_get(struct io_submit_state *state,
static void __io_queue_sqe(struct io_kiocb *req);
static void io_rsrc_put_work(struct work_struct *work);
-static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
- struct iov_iter *iter, bool needs_lock);
-static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
- const struct iovec *fast_iov,
- struct iov_iter *iter, bool force);
static void io_req_task_queue(struct io_kiocb *req);
static void io_submit_flush_completions(struct io_comp_state *cs,
struct io_ring_ctx *ctx);
+static int io_req_prep_async(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -2404,35 +2400,8 @@ static void kiocb_end_write(struct io_kiocb *req)
#ifdef CONFIG_BLOCK
static bool io_resubmit_prep(struct io_kiocb *req)
{
- struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
- int rw, ret;
- struct iov_iter iter;
-
- /* already prepared */
- if (req->async_data)
- return true;
-
- switch (req->opcode) {
- case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- case IORING_OP_READ:
- rw = READ;
- break;
- case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- case IORING_OP_WRITE:
- rw = WRITE;
- break;
- default:
- printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
- req->opcode);
- return false;
- }
-
- ret = io_import_iovec(rw, req, &iovec, &iter, false);
- if (ret < 0)
- return false;
- return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
+ /* either already prepared or successfully done */
+ return req->async_data || !io_req_prep_async(req);
}
#endif
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [PATCH RESEND for-next 00/12] 5.13 first batch
2021-02-28 22:35 [PATCH RESEND for-next 00/12] 5.13 first batch Pavel Begunkov
` (11 preceding siblings ...)
2021-02-28 22:35 ` [PATCH 12/12] io_uring: simplify io_resubmit_prep() Pavel Begunkov
@ 2021-03-01 23:53 ` Jens Axboe
12 siblings, 0 replies; 14+ messages in thread
From: Jens Axboe @ 2021-03-01 23:53 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 2/28/21 3:35 PM, Pavel Begunkov wrote:
> 1-7 are just random stuff
> 8-12 are further cleanups for around prep_async
>
> based on io_uring-worker.v4, but should apply fine to for-next
>
> Pavel Begunkov (12):
> io_uring: avoid taking ctx refs for task-cancel
> io_uring: reuse io_req_task_queue_fail()
> io_uring: further deduplicate file slot selection
> io_uring: add a helper failing not issued requests
> io_uring: refactor provide/remove buffer locking
> io_uring: don't restirct issue_flags for io_openat
> io_uring: use better types for cflags
> io_uring: refactor out send/recv async setup
> io_uring: untie alloc_async_data and needs_async_data
> io_uring: rethink def->needs_async_data
> io_uring: merge defer_prep() and prep_async()
> io_uring: simplify io_resubmit_prep()
>
> fs/io_uring.c | 210 ++++++++++++++++----------------------------------
> 1 file changed, 68 insertions(+), 142 deletions(-)
>
Thanks, I've queued this up for 5.13 - I'll most likely rebase this
branch a few times going forward, until we have 5.12 fully settled.
--
Jens Axboe
^ permalink raw reply [flat|nested] 14+ messages in thread