* [PATCH 01/16] io_uring: don't take ctx refs in task_work handler
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 02/16] io_uring: optimise io_uring_enter() Pavel Begunkov
` (15 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
__tctx_task_work() guarantees that ctx won't be killed while running
task_works, so we can remove now unnecessary ctx pinning for internally
armed polling.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 5 -----
1 file changed, 5 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5aa71304f25e..8ef8809b851f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4864,7 +4864,6 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
req->result = mask;
req->task_work.func = func;
- percpu_ref_get(&req->ctx->refs);
/*
* If this fails, then the task is exiting. When a task exits, the
@@ -4961,8 +4960,6 @@ static void io_poll_task_func(struct callback_head *cb)
if (nxt)
__io_req_task_submit(nxt);
}
-
- percpu_ref_put(&ctx->refs);
}
static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
@@ -5069,7 +5066,6 @@ static void io_async_task_func(struct callback_head *cb)
if (io_poll_rewait(req, &apoll->poll)) {
spin_unlock_irq(&ctx->completion_lock);
- percpu_ref_put(&ctx->refs);
return;
}
@@ -5085,7 +5081,6 @@ static void io_async_task_func(struct callback_head *cb)
else
__io_req_task_cancel(req, -ECANCELED);
- percpu_ref_put(&ctx->refs);
kfree(apoll->double_poll);
kfree(apoll);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 02/16] io_uring: optimise io_uring_enter()
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
2021-03-19 17:22 ` [PATCH 01/16] io_uring: don't take ctx refs in task_work handler Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 03/16] io_uring: optimise tctx node checks/alloc Pavel Begunkov
` (14 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
Add unlikely annotations, because my compiler pretty much mispredicts
every first check, and apart jumping around in the fast path, it also
generates extra instructions, like in advance setting ret value.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8ef8809b851f..439f95111b18 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -9138,31 +9138,31 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
size_t, argsz)
{
struct io_ring_ctx *ctx;
- long ret = -EBADF;
int submitted = 0;
struct fd f;
+ long ret;
io_run_task_work();
- if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
- IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))
+ if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
+ IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
return -EINVAL;
f = fdget(fd);
- if (!f.file)
+ if (unlikely(!f.file))
return -EBADF;
ret = -EOPNOTSUPP;
- if (f.file->f_op != &io_uring_fops)
+ if (unlikely(f.file->f_op != &io_uring_fops))
goto out_fput;
ret = -ENXIO;
ctx = f.file->private_data;
- if (!percpu_ref_tryget(&ctx->refs))
+ if (unlikely(!percpu_ref_tryget(&ctx->refs)))
goto out_fput;
ret = -EBADFD;
- if (ctx->flags & IORING_SETUP_R_DISABLED)
+ if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
goto out;
/*
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 03/16] io_uring: optimise tctx node checks/alloc
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
2021-03-19 17:22 ` [PATCH 01/16] io_uring: don't take ctx refs in task_work handler Pavel Begunkov
2021-03-19 17:22 ` [PATCH 02/16] io_uring: optimise io_uring_enter() Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 04/16] io_uring: keep io_req_free_batch() call locality Pavel Begunkov
` (13 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
First of all, w need to set tctx->sqpoll only when we add a new entry
into ->xa, so move it from the hot path. Also extract a hot path for
io_uring_add_task_file() as an inline helper.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 53 ++++++++++++++++++++++++++++-----------------------
1 file changed, 29 insertions(+), 24 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 439f95111b18..e00ac529df0e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8803,10 +8803,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
}
}
-/*
- * Note that this task has used io_uring. We use it for cancelation purposes.
- */
-static int io_uring_add_task_file(struct io_ring_ctx *ctx)
+static int __io_uring_add_task_file(struct io_ring_ctx *ctx)
{
struct io_uring_task *tctx = current->io_uring;
struct io_tctx_node *node;
@@ -8818,32 +8815,40 @@ static int io_uring_add_task_file(struct io_ring_ctx *ctx)
return ret;
tctx = current->io_uring;
}
- if (tctx->last != ctx) {
- void *old = xa_load(&tctx->xa, (unsigned long)ctx);
-
- if (!old) {
- node = kmalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
- node->ctx = ctx;
- node->task = current;
-
- ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
- node, GFP_KERNEL));
- if (ret) {
- kfree(node);
- return ret;
- }
+ if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+ node->ctx = ctx;
+ node->task = current;
- mutex_lock(&ctx->uring_lock);
- list_add(&node->ctx_node, &ctx->tctx_list);
- mutex_unlock(&ctx->uring_lock);
+ ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
+ node, GFP_KERNEL));
+ if (ret) {
+ kfree(node);
+ return ret;
}
- tctx->last = ctx;
+
+ mutex_lock(&ctx->uring_lock);
+ list_add(&node->ctx_node, &ctx->tctx_list);
+ mutex_unlock(&ctx->uring_lock);
}
+ tctx->last = ctx;
return 0;
}
+/*
+ * Note that this task has used io_uring. We use it for cancelation purposes.
+ */
+static inline int io_uring_add_task_file(struct io_ring_ctx *ctx)
+{
+ struct io_uring_task *tctx = current->io_uring;
+
+ if (likely(tctx && tctx->last == ctx))
+ return 0;
+ return __io_uring_add_task_file(ctx);
+}
+
/*
* Remove this io_uring_file -> task mapping.
*/
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 04/16] io_uring: keep io_req_free_batch() call locality
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (2 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 03/16] io_uring: optimise tctx node checks/alloc Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 05/16] io_uring: inline __io_queue_linked_timeout() Pavel Begunkov
` (12 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
Don't do a function call (io_dismantle_req()) in the middle and place it
to near other function calls, otherwise may lead to excessive register
spilling.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e00ac529df0e..e8be345c81ff 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2132,6 +2132,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
struct io_submit_state *state)
{
io_queue_next(req);
+ io_dismantle_req(req);
if (req->task != rb->task) {
if (rb->task)
@@ -2142,7 +2143,6 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
rb->task_refs++;
rb->ctx_refs++;
- io_dismantle_req(req);
if (state->free_reqs != ARRAY_SIZE(state->reqs))
state->reqs[state->free_reqs++] = req;
else
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 05/16] io_uring: inline __io_queue_linked_timeout()
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (3 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 04/16] io_uring: keep io_req_free_batch() call locality Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 06/16] io_uring: optimise success case of __io_queue_sqe Pavel Begunkov
` (11 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
Inline __io_queue_linked_timeout(), we don't need it
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 15 ++++-----------
1 file changed, 4 insertions(+), 11 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e8be345c81ff..a5e5c8da1081 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1026,7 +1026,6 @@ static void io_dismantle_req(struct io_kiocb *req);
static void io_put_task(struct task_struct *task, int nr);
static void io_queue_next(struct io_kiocb *req);
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
-static void __io_queue_linked_timeout(struct io_kiocb *req);
static void io_queue_linked_timeout(struct io_kiocb *req);
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_rsrc_update *ip,
@@ -6272,8 +6271,11 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-static void __io_queue_linked_timeout(struct io_kiocb *req)
+static void io_queue_linked_timeout(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ spin_lock_irq(&ctx->completion_lock);
/*
* If the back reference is NULL, then our linked request finished
* before we got a chance to setup the timer
@@ -6285,16 +6287,7 @@ static void __io_queue_linked_timeout(struct io_kiocb *req)
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
data->mode);
}
-}
-
-static void io_queue_linked_timeout(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- spin_lock_irq(&ctx->completion_lock);
- __io_queue_linked_timeout(req);
spin_unlock_irq(&ctx->completion_lock);
-
/* drop submission reference */
io_put_req(req);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 06/16] io_uring: optimise success case of __io_queue_sqe
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (4 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 05/16] io_uring: inline __io_queue_linked_timeout() Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 07/16] io_uring: refactor io_flush_cached_reqs() Pavel Begunkov
` (10 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
Move the case of successfully issued request by doing that check first.
It's not much of a difference, just generates slightly better code for
me.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a5e5c8da1081..c29f96e3111d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6317,15 +6317,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
* We async punt it if the file wasn't marked NOWAIT, or if the file
* doesn't support non-blocking read/write attempts
*/
- if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
- if (!io_arm_poll_handler(req)) {
- /*
- * Queued up for async execution, worker will release
- * submit reference when the iocb is actually submitted.
- */
- io_queue_async_work(req);
- }
- } else if (likely(!ret)) {
+ if (likely(!ret)) {
/* drop submission reference */
if (req->flags & REQ_F_COMPLETE_INLINE) {
struct io_ring_ctx *ctx = req->ctx;
@@ -6337,6 +6329,14 @@ static void __io_queue_sqe(struct io_kiocb *req)
} else {
io_put_req(req);
}
+ } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
+ if (!io_arm_poll_handler(req)) {
+ /*
+ * Queued up for async execution, worker will release
+ * submit reference when the iocb is actually submitted.
+ */
+ io_queue_async_work(req);
+ }
} else {
io_req_complete_failed(req, ret);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 07/16] io_uring: refactor io_flush_cached_reqs()
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (5 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 06/16] io_uring: optimise success case of __io_queue_sqe Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 08/16] io_uring: refactor rsrc refnode allocation Pavel Begunkov
` (9 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
Emphasize that return value of io_flush_cached_reqs() depends on number
of requests in the cache. It looks nicer and might help tools from
false-negative analyses.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c29f96e3111d..e4c92498a0af 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1647,11 +1647,12 @@ static void io_req_complete_failed(struct io_kiocb *req, long res)
io_req_complete_post(req, res, 0);
}
+/* Returns true IFF there are requests in the cache */
static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
{
struct io_submit_state *state = &ctx->submit_state;
struct io_comp_state *cs = &state->comp;
- struct io_kiocb *req = NULL;
+ int nr;
/*
* If we have more than a batch's worth of requests in our IRQ side
@@ -1665,16 +1666,19 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
spin_unlock_irq(&ctx->completion_lock);
}
+ nr = state->free_reqs;
while (!list_empty(&cs->free_list)) {
- req = list_first_entry(&cs->free_list, struct io_kiocb,
- compl.list);
+ struct io_kiocb *req = list_first_entry(&cs->free_list,
+ struct io_kiocb, compl.list);
+
list_del(&req->compl.list);
- state->reqs[state->free_reqs++] = req;
- if (state->free_reqs == ARRAY_SIZE(state->reqs))
+ state->reqs[nr++] = req;
+ if (nr == ARRAY_SIZE(state->reqs))
break;
}
- return req != NULL;
+ state->free_reqs = nr;
+ return nr != 0;
}
static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 08/16] io_uring: refactor rsrc refnode allocation
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (6 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 07/16] io_uring: refactor io_flush_cached_reqs() Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 09/16] io_uring: inline io_put_req and friends Pavel Begunkov
` (8 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
There are two problems:
1) we always allocate refnodes in advance and free them if those
haven't been used. It's expensive, takes two allocations, where one of
them is percpu. And it may be pretty common not actually using them.
2) Current API with allocating a refnode and setting some of the fields
is error prone, we don't ever want to have a file node runninng fixed
buffer callback...
Solve both with pre-init/get API. Pre-init just leaves the node for
later if not used, and for get (i.e. io_rsrc_refnode_get()), you need to
explicitly pass all arguments setting callbacks/etc., so it's more
resilient.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 58 ++++++++++++++++++++++++++++++++++-----------------
1 file changed, 39 insertions(+), 19 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e4c92498a0af..6655246287f3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -443,6 +443,7 @@ struct io_ring_ctx {
struct llist_head rsrc_put_llist;
struct list_head rsrc_ref_list;
spinlock_t rsrc_ref_lock;
+ struct fixed_rsrc_ref_node *rsrc_backup_node;
struct io_restriction restrictions;
@@ -7021,12 +7022,36 @@ static void io_sqe_rsrc_kill_node(struct io_ring_ctx *ctx, struct fixed_rsrc_dat
percpu_ref_kill(&ref_node->refs);
}
+static int io_rsrc_refnode_prealloc(struct io_ring_ctx *ctx)
+{
+ if (ctx->rsrc_backup_node)
+ return 0;
+ ctx->rsrc_backup_node = alloc_fixed_rsrc_ref_node(ctx);
+ return ctx->rsrc_backup_node ? 0 : -ENOMEM;
+}
+
+static struct fixed_rsrc_ref_node *
+io_rsrc_refnode_get(struct io_ring_ctx *ctx,
+ struct fixed_rsrc_data *rsrc_data,
+ void (*rsrc_put)(struct io_ring_ctx *ctx,
+ struct io_rsrc_put *prsrc))
+{
+ struct fixed_rsrc_ref_node *node = ctx->rsrc_backup_node;
+
+ WARN_ON_ONCE(!node);
+
+ ctx->rsrc_backup_node = NULL;
+ node->rsrc_data = rsrc_data;
+ node->rsrc_put = rsrc_put;
+ return node;
+}
+
static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
struct io_ring_ctx *ctx,
void (*rsrc_put)(struct io_ring_ctx *ctx,
struct io_rsrc_put *prsrc))
{
- struct fixed_rsrc_ref_node *backup_node;
+ struct fixed_rsrc_ref_node *node;
int ret;
if (data->quiesce)
@@ -7034,13 +7059,9 @@ static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
data->quiesce = true;
do {
- ret = -ENOMEM;
- backup_node = alloc_fixed_rsrc_ref_node(ctx);
- if (!backup_node)
+ ret = io_rsrc_refnode_prealloc(ctx);
+ if (ret)
break;
- backup_node->rsrc_data = data;
- backup_node->rsrc_put = rsrc_put;
-
io_sqe_rsrc_kill_node(ctx, data);
percpu_ref_kill(&data->refs);
flush_delayed_work(&ctx->rsrc_put_work);
@@ -7050,17 +7071,16 @@ static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
break;
percpu_ref_resurrect(&data->refs);
- io_sqe_rsrc_set_node(ctx, data, backup_node);
- backup_node = NULL;
+ node = io_rsrc_refnode_get(ctx, data, rsrc_put);
+ io_sqe_rsrc_set_node(ctx, data, node);
reinit_completion(&data->done);
+
mutex_unlock(&ctx->uring_lock);
ret = io_run_task_work_sig();
mutex_lock(&ctx->uring_lock);
} while (ret >= 0);
data->quiesce = false;
- if (backup_node)
- destroy_fixed_rsrc_ref_node(backup_node);
return ret;
}
@@ -7711,11 +7731,9 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
return -EOVERFLOW;
if (done > ctx->nr_user_files)
return -EINVAL;
-
- ref_node = alloc_fixed_rsrc_ref_node(ctx);
- if (!ref_node)
- return -ENOMEM;
- init_fixed_file_ref_node(ctx, ref_node);
+ err = io_rsrc_refnode_prealloc(ctx);
+ if (err)
+ return err;
fds = u64_to_user_ptr(up->data);
for (done = 0; done < nr_args; done++) {
@@ -7768,10 +7786,9 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
if (needs_switch) {
percpu_ref_kill(&data->node->refs);
+ ref_node = io_rsrc_refnode_get(ctx, data, io_ring_file_put);
io_sqe_rsrc_set_node(ctx, data, ref_node);
- } else
- destroy_fixed_rsrc_ref_node(ref_node);
-
+ }
return done ? done : err;
}
@@ -8447,6 +8464,9 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_eventfd_unregister(ctx);
io_destroy_buffers(ctx);
+ if (ctx->rsrc_backup_node)
+ destroy_fixed_rsrc_ref_node(ctx->rsrc_backup_node);
+
#if defined(CONFIG_UNIX)
if (ctx->ring_sock) {
ctx->ring_sock->file = NULL; /* so that iput() is called */
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 09/16] io_uring: inline io_put_req and friends
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (7 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 08/16] io_uring: refactor rsrc refnode allocation Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 10/16] io_uring: refactor io_free_req_deferred() Pavel Begunkov
` (7 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
One big omission is that io_put_req() haven't been marked inline, and at
least gcc 9 doesn't inline it, not to mention that it's really hot and
extra function call is intolerable, especially when it doesn't put a
final ref.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6655246287f3..10d0e3c6537c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2186,7 +2186,7 @@ static void io_submit_flush_completions(struct io_comp_state *cs,
* Drop reference to request, return next in chain (if there is one) if this
* was the last reference to this request.
*/
-static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
+static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
{
struct io_kiocb *nxt = NULL;
@@ -2197,7 +2197,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
return nxt;
}
-static void io_put_req(struct io_kiocb *req)
+static inline void io_put_req(struct io_kiocb *req)
{
if (req_ref_put_and_test(req))
io_free_req(req);
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 10/16] io_uring: refactor io_free_req_deferred()
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (8 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 09/16] io_uring: inline io_put_req and friends Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 11/16] io_uring: add helper flushing locked_free_list Pavel Begunkov
` (6 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
We don't care about ret value in io_free_req_deferred(), make the code a
bit more concise.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 10d0e3c6537c..d081ef54fb02 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2212,11 +2212,8 @@ static void io_put_req_deferred_cb(struct callback_head *cb)
static void io_free_req_deferred(struct io_kiocb *req)
{
- int ret;
-
req->task_work.func = io_put_req_deferred_cb;
- ret = io_req_task_work_add(req);
- if (unlikely(ret))
+ if (unlikely(io_req_task_work_add(req)))
io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 11/16] io_uring: add helper flushing locked_free_list
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (9 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 10/16] io_uring: refactor io_free_req_deferred() Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 12/16] io_uring: remove __io_req_task_cancel() Pavel Begunkov
` (5 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
Add a new helper io_flush_cached_locked_reqs() that splices
locked_free_list to free_list, and does it right doing all sync and
invariant reinit.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d081ef54fb02..6a5d712245f7 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1648,6 +1648,15 @@ static void io_req_complete_failed(struct io_kiocb *req, long res)
io_req_complete_post(req, res, 0);
}
+static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
+ struct io_comp_state *cs)
+{
+ spin_lock_irq(&ctx->completion_lock);
+ list_splice_init(&cs->locked_free_list, &cs->free_list);
+ cs->locked_free_nr = 0;
+ spin_unlock_irq(&ctx->completion_lock);
+}
+
/* Returns true IFF there are requests in the cache */
static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
{
@@ -1660,12 +1669,8 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
* locked cache, grab the lock and move them over to our submission
* side cache.
*/
- if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) {
- spin_lock_irq(&ctx->completion_lock);
- list_splice_init(&cs->locked_free_list, &cs->free_list);
- cs->locked_free_nr = 0;
- spin_unlock_irq(&ctx->completion_lock);
- }
+ if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH)
+ io_flush_cached_locked_reqs(ctx, cs);
nr = state->free_reqs;
while (!list_empty(&cs->free_list)) {
@@ -8425,13 +8430,8 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
submit_state->free_reqs = 0;
}
- spin_lock_irq(&ctx->completion_lock);
- list_splice_init(&cs->locked_free_list, &cs->free_list);
- cs->locked_free_nr = 0;
- spin_unlock_irq(&ctx->completion_lock);
-
+ io_flush_cached_locked_reqs(ctx, cs);
io_req_cache_free(&cs->free_list, NULL);
-
mutex_unlock(&ctx->uring_lock);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 12/16] io_uring: remove __io_req_task_cancel()
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (10 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 11/16] io_uring: add helper flushing locked_free_list Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 13/16] io_uring: inline io_clean_op()'s fast path Pavel Begunkov
` (4 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
Both io_req_complete_failed() and __io_req_task_cancel() do the same
thing: set failure flag, put both req refs and emit an CQE. The former
one is a bit more advance as it puts req back into a req cache, so make
it to take over __io_req_task_cancel() and remove the last one.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 28 +++-------------------------
1 file changed, 3 insertions(+), 25 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6a5d712245f7..e46e4d5c3676 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1022,7 +1022,6 @@ static bool io_rw_reissue(struct io_kiocb *req);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void io_put_req(struct io_kiocb *req);
static void io_put_req_deferred(struct io_kiocb *req, int nr);
-static void io_double_put_req(struct io_kiocb *req);
static void io_dismantle_req(struct io_kiocb *req);
static void io_put_task(struct task_struct *task, int nr);
static void io_queue_next(struct io_kiocb *req);
@@ -2039,20 +2038,6 @@ static void io_req_task_work_add_fallback(struct io_kiocb *req,
io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
}
-static void __io_req_task_cancel(struct io_kiocb *req, int error)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- spin_lock_irq(&ctx->completion_lock);
- io_cqring_fill_event(req, error);
- io_commit_cqring(ctx);
- spin_unlock_irq(&ctx->completion_lock);
-
- io_cqring_ev_posted(ctx);
- req_set_fail_links(req);
- io_double_put_req(req);
-}
-
static void io_req_task_cancel(struct callback_head *cb)
{
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
@@ -2060,7 +2045,7 @@ static void io_req_task_cancel(struct callback_head *cb)
/* ctx is guaranteed to stay alive while we hold uring_lock */
mutex_lock(&ctx->uring_lock);
- __io_req_task_cancel(req, req->result);
+ io_req_complete_failed(req, req->result);
mutex_unlock(&ctx->uring_lock);
}
@@ -2073,7 +2058,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
if (!(current->flags & PF_EXITING) && !current->in_execve)
__io_queue_sqe(req);
else
- __io_req_task_cancel(req, -EFAULT);
+ io_req_complete_failed(req, -EFAULT);
mutex_unlock(&ctx->uring_lock);
}
@@ -2228,13 +2213,6 @@ static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
io_free_req_deferred(req);
}
-static void io_double_put_req(struct io_kiocb *req)
-{
- /* drop both submit and complete references */
- if (req_ref_sub_and_test(req, 2))
- io_free_req(req);
-}
-
static unsigned io_cqring_events(struct io_ring_ctx *ctx)
{
/* See comment at the top of this file */
@@ -5085,7 +5063,7 @@ static void io_async_task_func(struct callback_head *cb)
if (!READ_ONCE(apoll->poll.canceled))
__io_req_task_submit(req);
else
- __io_req_task_cancel(req, -ECANCELED);
+ io_req_complete_failed(req, -ECANCELED);
kfree(apoll->double_poll);
kfree(apoll);
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 13/16] io_uring: inline io_clean_op()'s fast path
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (11 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 12/16] io_uring: remove __io_req_task_cancel() Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 14/16] io_uring: optimise io_dismantle_req() " Pavel Begunkov
` (3 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
Inline io_clean_op(), leaving __io_clean_op() but renaming it. This will
be used in following patches.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 21 +++++++++------------
1 file changed, 9 insertions(+), 12 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e46e4d5c3676..afc08ec2bc6e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1030,7 +1030,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req);
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_rsrc_update *ip,
unsigned nr_args);
-static void __io_clean_op(struct io_kiocb *req);
+static void io_clean_op(struct io_kiocb *req);
static struct file *io_file_get(struct io_submit_state *state,
struct io_kiocb *req, int fd, bool fixed);
static void __io_queue_sqe(struct io_kiocb *req);
@@ -1061,12 +1061,6 @@ EXPORT_SYMBOL(io_uring_get_socket);
#define io_for_each_link(pos, head) \
for (pos = (head); pos; pos = pos->link)
-static inline void io_clean_op(struct io_kiocb *req)
-{
- if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
- __io_clean_op(req);
-}
-
static inline void io_set_resource_node(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -1564,7 +1558,9 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res,
set_bit(0, &ctx->cq_check_overflow);
ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
}
- io_clean_op(req);
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+ io_clean_op(req);
+
req->result = res;
req->compl.cflags = cflags;
req_ref_get(req);
@@ -1620,7 +1616,8 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
static void io_req_complete_state(struct io_kiocb *req, long res,
unsigned int cflags)
{
- io_clean_op(req);
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+ io_clean_op(req);
req->result = res;
req->compl.cflags = cflags;
req->flags |= REQ_F_COMPLETE_INLINE;
@@ -1728,8 +1725,8 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
static void io_dismantle_req(struct io_kiocb *req)
{
- io_clean_op(req);
-
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+ io_clean_op(req);
if (req->async_data)
kfree(req->async_data);
if (req->file)
@@ -5927,7 +5924,7 @@ static int io_req_defer(struct io_kiocb *req)
return -EIOCBQUEUED;
}
-static void __io_clean_op(struct io_kiocb *req)
+static void io_clean_op(struct io_kiocb *req)
{
if (req->flags & REQ_F_BUFFER_SELECTED) {
switch (req->opcode) {
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 14/16] io_uring: optimise io_dismantle_req() fast path
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (12 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 13/16] io_uring: inline io_clean_op()'s fast path Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 15/16] io_uring: abolish old io_put_file() Pavel Begunkov
` (2 subsequent siblings)
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
Reshuffle io_dismantle_req() checks to put most of slow path stuff under
a single if.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 34 +++++++++++++++++++---------------
1 file changed, 19 insertions(+), 15 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index afc08ec2bc6e..b3484cedf1f1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1725,28 +1725,32 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
static void io_dismantle_req(struct io_kiocb *req)
{
- if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
- io_clean_op(req);
- if (req->async_data)
- kfree(req->async_data);
+ unsigned int flags = req->flags;
+
if (req->file)
- io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
+ io_put_file(req, req->file, (flags & REQ_F_FIXED_FILE));
+ if (flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
+ REQ_F_INFLIGHT)) {
+ io_clean_op(req);
+
+ if (req->flags & REQ_F_INFLIGHT) {
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->inflight_lock, flags);
+ list_del(&req->inflight_entry);
+ spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+ req->flags &= ~REQ_F_INFLIGHT;
+ }
+ }
if (req->fixed_rsrc_refs)
percpu_ref_put(req->fixed_rsrc_refs);
+ if (req->async_data)
+ kfree(req->async_data);
if (req->work.creds) {
put_cred(req->work.creds);
req->work.creds = NULL;
}
-
- if (req->flags & REQ_F_INFLIGHT) {
- struct io_ring_ctx *ctx = req->ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->inflight_lock, flags);
- list_del(&req->inflight_entry);
- spin_unlock_irqrestore(&ctx->inflight_lock, flags);
- req->flags &= ~REQ_F_INFLIGHT;
- }
}
/* must to be called somewhat shortly after putting a request */
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 15/16] io_uring: abolish old io_put_file()
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (13 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 14/16] io_uring: optimise io_dismantle_req() " Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 17:22 ` [PATCH 16/16] io_uring: optimise io_req_task_work_add() Pavel Begunkov
2021-03-19 18:59 ` [PATCH 00/16] random 5.13 bits Jens Axboe
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
io_put_file() doesn't do a good job at generating a good code. Inline
it, so we can check REQ_F_FIXED_FILE first, prioritising FIXED_FILE case
over requests without files, and saving a memory load in that case.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b3484cedf1f1..d7b4cbe2ac3a 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1716,10 +1716,9 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
return state->reqs[state->free_reqs];
}
-static inline void io_put_file(struct io_kiocb *req, struct file *file,
- bool fixed)
+static inline void io_put_file(struct file *file)
{
- if (!fixed)
+ if (file)
fput(file);
}
@@ -1727,8 +1726,8 @@ static void io_dismantle_req(struct io_kiocb *req)
{
unsigned int flags = req->flags;
- if (req->file)
- io_put_file(req, req->file, (flags & REQ_F_FIXED_FILE));
+ if (!(flags & REQ_F_FIXED_FILE))
+ io_put_file(req->file);
if (flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
REQ_F_INFLIGHT)) {
io_clean_op(req);
@@ -3647,7 +3646,8 @@ static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
if (sp->len)
ret = do_tee(in, out, sp->len, flags);
- io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
+ if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
+ io_put_file(in);
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret != sp->len)
@@ -3683,7 +3683,8 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
if (sp->len)
ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
- io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
+ if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
+ io_put_file(in);
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret != sp->len)
@@ -5967,8 +5968,8 @@ static void io_clean_op(struct io_kiocb *req)
}
case IORING_OP_SPLICE:
case IORING_OP_TEE:
- io_put_file(req, req->splice.file_in,
- (req->splice.flags & SPLICE_F_FD_IN_FIXED));
+ if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
+ io_put_file(req->splice.file_in);
break;
case IORING_OP_OPENAT:
case IORING_OP_OPENAT2:
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 16/16] io_uring: optimise io_req_task_work_add()
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (14 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 15/16] io_uring: abolish old io_put_file() Pavel Begunkov
@ 2021-03-19 17:22 ` Pavel Begunkov
2021-03-19 18:59 ` [PATCH 00/16] random 5.13 bits Jens Axboe
16 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-03-19 17:22 UTC (permalink / raw)
To: Jens Axboe, io-uring
Inline io_task_work_add() into io_req_task_work_add(). They both work
with a request, so keeping them separate doesn't make things much more
clear, but merging allows optimise it. Apart from small wins like not
reading req->ctx or not calculating @notify in the hot path, i.e. with
tctx->task_state set, it avoids doing wake_up_process() for every single
add, but only after actually done task_work_add().
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 50 ++++++++++++++++++--------------------------------
1 file changed, 18 insertions(+), 32 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d7b4cbe2ac3a..3548b2e60ba5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1931,13 +1931,17 @@ static void tctx_task_work(struct callback_head *cb)
cond_resched();
}
-static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
- enum task_work_notify_mode notify)
+static int io_req_task_work_add(struct io_kiocb *req)
{
+ struct task_struct *tsk = req->task;
struct io_uring_task *tctx = tsk->io_uring;
+ enum task_work_notify_mode notify;
struct io_wq_work_node *node, *prev;
unsigned long flags;
- int ret;
+ int ret = 0;
+
+ if (unlikely(tsk->flags & PF_EXITING))
+ return -ESRCH;
WARN_ON_ONCE(!tctx);
@@ -1950,14 +1954,23 @@ static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
test_and_set_bit(0, &tctx->task_state))
return 0;
- if (!task_work_add(tsk, &tctx->task_work, notify))
+ /*
+ * SQPOLL kernel thread doesn't need notification, just a wakeup. For
+ * all other cases, use TWA_SIGNAL unconditionally to ensure we're
+ * processing task_work. There's no reliable way to tell if TWA_RESUME
+ * will do the job.
+ */
+ notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
+
+ if (!task_work_add(tsk, &tctx->task_work, notify)) {
+ wake_up_process(tsk);
return 0;
+ }
/*
* Slow path - we failed, find and delete work. if the work is not
* in the list, it got run and we're fine.
*/
- ret = 0;
spin_lock_irqsave(&tctx->task_lock, flags);
wq_list_for_each(node, prev, &tctx->task_list) {
if (&req->io_task_work.node == node) {
@@ -1971,33 +1984,6 @@ static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
return ret;
}
-static int io_req_task_work_add(struct io_kiocb *req)
-{
- struct task_struct *tsk = req->task;
- struct io_ring_ctx *ctx = req->ctx;
- enum task_work_notify_mode notify;
- int ret;
-
- if (tsk->flags & PF_EXITING)
- return -ESRCH;
-
- /*
- * SQPOLL kernel thread doesn't need notification, just a wakeup. For
- * all other cases, use TWA_SIGNAL unconditionally to ensure we're
- * processing task_work. There's no reliable way to tell if TWA_RESUME
- * will do the job.
- */
- notify = TWA_NONE;
- if (!(ctx->flags & IORING_SETUP_SQPOLL))
- notify = TWA_SIGNAL;
-
- ret = io_task_work_add(tsk, req, notify);
- if (!ret)
- wake_up_process(tsk);
-
- return ret;
-}
-
static bool io_run_task_work_head(struct callback_head **work_head)
{
struct callback_head *work, *next;
--
2.24.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH 00/16] random 5.13 bits
2021-03-19 17:22 [PATCH 00/16] random 5.13 bits Pavel Begunkov
` (15 preceding siblings ...)
2021-03-19 17:22 ` [PATCH 16/16] io_uring: optimise io_req_task_work_add() Pavel Begunkov
@ 2021-03-19 18:59 ` Jens Axboe
16 siblings, 0 replies; 18+ messages in thread
From: Jens Axboe @ 2021-03-19 18:59 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 3/19/21 11:22 AM, Pavel Begunkov wrote:
> Random cleanups / small optimisations, should be fairly easy.
>
> Pavel Begunkov (16):
> io_uring: don't take ctx refs in task_work handler
> io_uring: optimise io_uring_enter()
> io_uring: optimise tctx node checks/alloc
> io_uring: keep io_req_free_batch() call locality
> io_uring: inline __io_queue_linked_timeout()
> io_uring: optimise success case of __io_queue_sqe
> io_uring: refactor io_flush_cached_reqs()
> io_uring: refactor rsrc refnode allocation
> io_uring: inline io_put_req and friends
> io_uring: refactor io_free_req_deferred()
> io_uring: add helper flushing locked_free_list
> io_uring: remove __io_req_task_cancel()
> io_uring: inline io_clean_op()'s fast path
> io_uring: optimise io_dismantle_req() fast path
> io_uring: abolish old io_put_file()
> io_uring: optimise io_req_task_work_add()
>
> fs/io_uring.c | 358 ++++++++++++++++++++++++--------------------------
> 1 file changed, 169 insertions(+), 189 deletions(-)
Thanks added, all look pretty straight forward to me.
--
Jens Axboe
^ permalink raw reply [flat|nested] 18+ messages in thread