* [PATCH 01/11] io_uring: don't take ctx refs in task_work handler
2021-03-04 18:52 [PATCH 5.13 00/11] a second batch for 5.13 Pavel Begunkov
@ 2021-03-04 18:52 ` Pavel Begunkov
2021-03-04 18:52 ` [PATCH 02/11] io_uring: optimise io_uring_enter() Pavel Begunkov
` (9 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2021-03-04 18:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
__tctx_task_work() guarantees that ctx won't be killed while running
task_works, so we can remove now unnecessary ctx pinning for internally
armed polling.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 5 -----
1 file changed, 5 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3cfc50320923..36d0bc506be4 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4780,7 +4780,6 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
req->result = mask;
req->task_work.func = func;
- percpu_ref_get(&req->ctx->refs);
/*
* If this fails, then the task is exiting. When a task exits, the
@@ -4877,8 +4876,6 @@ static void io_poll_task_func(struct callback_head *cb)
if (nxt)
__io_req_task_submit(nxt);
}
-
- percpu_ref_put(&ctx->refs);
}
static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
@@ -4985,7 +4982,6 @@ static void io_async_task_func(struct callback_head *cb)
if (io_poll_rewait(req, &apoll->poll)) {
spin_unlock_irq(&ctx->completion_lock);
- percpu_ref_put(&ctx->refs);
return;
}
@@ -5001,7 +4997,6 @@ static void io_async_task_func(struct callback_head *cb)
else
__io_req_task_cancel(req, -ECANCELED);
- percpu_ref_put(&ctx->refs);
kfree(apoll->double_poll);
kfree(apoll);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 02/11] io_uring: optimise io_uring_enter()
2021-03-04 18:52 [PATCH 5.13 00/11] a second batch for 5.13 Pavel Begunkov
2021-03-04 18:52 ` [PATCH 01/11] io_uring: don't take ctx refs in task_work handler Pavel Begunkov
@ 2021-03-04 18:52 ` Pavel Begunkov
2021-03-04 18:52 ` [PATCH 03/11] io_uring: move setting tctx->sqpoll from hot path Pavel Begunkov
` (8 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2021-03-04 18:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Add unlikely annotations, because my compiler pretty much mispredicts
every first check, and apart jumping around in the fast path, it also
generates extra instructions, like in advance setting ret value.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 36d0bc506be4..9175ab937e34 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -9001,31 +9001,31 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
size_t, argsz)
{
struct io_ring_ctx *ctx;
- long ret = -EBADF;
int submitted = 0;
struct fd f;
+ long ret;
io_run_task_work();
- if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
- IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))
+ if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
+ IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
return -EINVAL;
f = fdget(fd);
- if (!f.file)
+ if (unlikely(!f.file))
return -EBADF;
ret = -EOPNOTSUPP;
- if (f.file->f_op != &io_uring_fops)
+ if (unlikely(f.file->f_op != &io_uring_fops))
goto out_fput;
ret = -ENXIO;
ctx = f.file->private_data;
- if (!percpu_ref_tryget(&ctx->refs))
+ if (unlikely(!percpu_ref_tryget(&ctx->refs)))
goto out_fput;
ret = -EBADFD;
- if (ctx->flags & IORING_SETUP_R_DISABLED)
+ if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
goto out;
/*
--
2.24.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 03/11] io_uring: move setting tctx->sqpoll from hot path
2021-03-04 18:52 [PATCH 5.13 00/11] a second batch for 5.13 Pavel Begunkov
2021-03-04 18:52 ` [PATCH 01/11] io_uring: don't take ctx refs in task_work handler Pavel Begunkov
2021-03-04 18:52 ` [PATCH 02/11] io_uring: optimise io_uring_enter() Pavel Begunkov
@ 2021-03-04 18:52 ` Pavel Begunkov
2021-03-04 18:52 ` [PATCH 04/11] io_uring: inline io_clean_op() fast path Pavel Begunkov
` (7 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2021-03-04 18:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
We need to set tctx->sqpoll only when we add a new entry into ->xa, so
move it from the hot path.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9175ab937e34..869e564ce713 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8709,18 +8709,19 @@ static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
fput(file);
return ret;
}
+
+ /*
+ * This is race safe in that the task itself is doing
+ * this, hence it cannot be going through the exit/cancel
+ * paths at the same time. This cannot be modified while
+ * exit/cancel is running.
+ */
+ if (ctx->flags & IORING_SETUP_SQPOLL)
+ tctx->sqpoll = true;
}
tctx->last = file;
}
- /*
- * This is race safe in that the task itself is doing this, hence it
- * cannot be going through the exit/cancel paths at the same time.
- * This cannot be modified while exit/cancel is running.
- */
- if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
- tctx->sqpoll = true;
-
return 0;
}
--
2.24.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 04/11] io_uring: inline io_clean_op() fast path
2021-03-04 18:52 [PATCH 5.13 00/11] a second batch for 5.13 Pavel Begunkov
` (2 preceding siblings ...)
2021-03-04 18:52 ` [PATCH 03/11] io_uring: move setting tctx->sqpoll from hot path Pavel Begunkov
@ 2021-03-04 18:52 ` Pavel Begunkov
2021-03-04 18:52 ` [PATCH 05/11] io_uring: optimise io_dismantle_req() " Pavel Begunkov
` (6 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2021-03-04 18:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Inline io_clean_op(), leaving __io_clean_op() but renaming it. This will
be used in following patches.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 21 +++++++++------------
1 file changed, 9 insertions(+), 12 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 869e564ce713..d50d0e98639b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1000,7 +1000,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req);
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_rsrc_update *ip,
unsigned nr_args);
-static void __io_clean_op(struct io_kiocb *req);
+static void io_clean_op(struct io_kiocb *req);
static struct file *io_file_get(struct io_submit_state *state,
struct io_kiocb *req, int fd, bool fixed);
static void __io_queue_sqe(struct io_kiocb *req);
@@ -1031,12 +1031,6 @@ EXPORT_SYMBOL(io_uring_get_socket);
#define io_for_each_link(pos, head) \
for (pos = (head); pos; pos = pos->link)
-static inline void io_clean_op(struct io_kiocb *req)
-{
- if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
- __io_clean_op(req);
-}
-
static inline void io_set_resource_node(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -1527,7 +1521,9 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res,
set_bit(0, &ctx->cq_check_overflow);
ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
}
- io_clean_op(req);
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+ io_clean_op(req);
+
req->result = res;
req->compl.cflags = cflags;
req_ref_get(req);
@@ -1574,7 +1570,8 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
static void io_req_complete_state(struct io_kiocb *req, long res,
unsigned int cflags)
{
- io_clean_op(req);
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+ io_clean_op(req);
req->result = res;
req->compl.cflags = cflags;
req->flags |= REQ_F_COMPLETE_INLINE;
@@ -1673,8 +1670,8 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
static void io_dismantle_req(struct io_kiocb *req)
{
- io_clean_op(req);
-
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+ io_clean_op(req);
if (req->async_data)
kfree(req->async_data);
if (req->file)
@@ -5812,7 +5809,7 @@ static int io_req_defer(struct io_kiocb *req)
return -EIOCBQUEUED;
}
-static void __io_clean_op(struct io_kiocb *req)
+static void io_clean_op(struct io_kiocb *req)
{
if (req->flags & REQ_F_BUFFER_SELECTED) {
switch (req->opcode) {
--
2.24.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 05/11] io_uring: optimise io_dismantle_req() fast path
2021-03-04 18:52 [PATCH 5.13 00/11] a second batch for 5.13 Pavel Begunkov
` (3 preceding siblings ...)
2021-03-04 18:52 ` [PATCH 04/11] io_uring: inline io_clean_op() fast path Pavel Begunkov
@ 2021-03-04 18:52 ` Pavel Begunkov
2021-03-04 18:52 ` [PATCH 06/11] io_uring: abolish old io_put_file() Pavel Begunkov
` (5 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2021-03-04 18:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Put REQ_F_WORK_INITIALIZED check together with slow path
REQ_F_NEED_CLEANUP/etc. Also don't reload req->flags twice but cache it
in a var.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 32 ++++++++++++++++++--------------
1 file changed, 18 insertions(+), 14 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d50d0e98639b..c4ebdf1f759f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1670,24 +1670,28 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
static void io_dismantle_req(struct io_kiocb *req)
{
- if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
- io_clean_op(req);
- if (req->async_data)
- kfree(req->async_data);
+ unsigned int flags = req->flags;
+
if (req->file)
- io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
- if (req->fixed_rsrc_refs)
- percpu_ref_put(req->fixed_rsrc_refs);
+ io_put_file(req, req->file, (flags & REQ_F_FIXED_FILE));
+ if (flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
+ REQ_F_INFLIGHT)) {
+ io_clean_op(req);
- if (req->flags & REQ_F_INFLIGHT) {
- struct io_ring_ctx *ctx = req->ctx;
- unsigned long flags;
+ if (req->flags & REQ_F_INFLIGHT) {
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned long flags;
- spin_lock_irqsave(&ctx->inflight_lock, flags);
- list_del(&req->inflight_entry);
- spin_unlock_irqrestore(&ctx->inflight_lock, flags);
- req->flags &= ~REQ_F_INFLIGHT;
+ spin_lock_irqsave(&ctx->inflight_lock, flags);
+ list_del(&req->inflight_entry);
+ spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+ req->flags &= ~REQ_F_INFLIGHT;
+ }
}
+ if (req->fixed_rsrc_refs)
+ percpu_ref_put(req->fixed_rsrc_refs);
+ if (req->async_data)
+ kfree(req->async_data);
}
/* must to be called somewhat shortly after putting a request */
--
2.24.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 06/11] io_uring: abolish old io_put_file()
2021-03-04 18:52 [PATCH 5.13 00/11] a second batch for 5.13 Pavel Begunkov
` (4 preceding siblings ...)
2021-03-04 18:52 ` [PATCH 05/11] io_uring: optimise io_dismantle_req() " Pavel Begunkov
@ 2021-03-04 18:52 ` Pavel Begunkov
2021-03-04 18:52 ` [PATCH 07/11] io_uring: keep io_req_free_batch() call locality Pavel Begunkov
` (4 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2021-03-04 18:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
io_put_file() doesn't do a good job at generating a good code. Inline
it, so we can check REQ_F_FIXED_FILE first, prioritising FIXED_FILE case
over requests without files, and saving a memory load in that case.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c4ebdf1f759f..94b080c3cc65 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1661,10 +1661,9 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
return state->reqs[state->free_reqs];
}
-static inline void io_put_file(struct io_kiocb *req, struct file *file,
- bool fixed)
+static inline void io_put_file(struct file *file)
{
- if (!fixed)
+ if (file)
fput(file);
}
@@ -1672,8 +1671,8 @@ static void io_dismantle_req(struct io_kiocb *req)
{
unsigned int flags = req->flags;
- if (req->file)
- io_put_file(req, req->file, (flags & REQ_F_FIXED_FILE));
+ if (!(flags & REQ_F_FIXED_FILE))
+ io_put_file(req->file);
if (flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
REQ_F_INFLIGHT)) {
io_clean_op(req);
@@ -3572,7 +3571,8 @@ static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
if (sp->len)
ret = do_tee(in, out, sp->len, flags);
- io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
+ if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
+ io_put_file(in);
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret != sp->len)
@@ -3608,7 +3608,8 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
if (sp->len)
ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
- io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
+ if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
+ io_put_file(in);
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret != sp->len)
@@ -5852,8 +5853,8 @@ static void io_clean_op(struct io_kiocb *req)
}
case IORING_OP_SPLICE:
case IORING_OP_TEE:
- io_put_file(req, req->splice.file_in,
- (req->splice.flags & SPLICE_F_FD_IN_FIXED));
+ if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
+ io_put_file(req->splice.file_in);
break;
case IORING_OP_OPENAT:
case IORING_OP_OPENAT2:
--
2.24.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 07/11] io_uring: keep io_req_free_batch() call locality
2021-03-04 18:52 [PATCH 5.13 00/11] a second batch for 5.13 Pavel Begunkov
` (5 preceding siblings ...)
2021-03-04 18:52 ` [PATCH 06/11] io_uring: abolish old io_put_file() Pavel Begunkov
@ 2021-03-04 18:52 ` Pavel Begunkov
2021-03-04 18:52 ` [PATCH 08/11] io_uring: set req->work closer to all other fields Pavel Begunkov
` (3 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2021-03-04 18:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Don't do a function call (io_dismantle_req()) in the middle and place it
to near other function calls, otherwise may lead to excessive register
spilling.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 94b080c3cc65..9ebc447456ab 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2052,6 +2052,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
struct io_submit_state *state)
{
io_queue_next(req);
+ io_dismantle_req(req);
if (req->task != rb->task) {
if (rb->task)
@@ -2062,7 +2063,6 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
rb->task_refs++;
rb->ctx_refs++;
- io_dismantle_req(req);
if (state->free_reqs != ARRAY_SIZE(state->reqs))
state->reqs[state->free_reqs++] = req;
else
--
2.24.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 08/11] io_uring: set req->work closer to all other fields
2021-03-04 18:52 [PATCH 5.13 00/11] a second batch for 5.13 Pavel Begunkov
` (6 preceding siblings ...)
2021-03-04 18:52 ` [PATCH 07/11] io_uring: keep io_req_free_batch() call locality Pavel Begunkov
@ 2021-03-04 18:52 ` Pavel Begunkov
2021-03-04 18:52 ` [PATCH 09/11] io_uring: inline __io_queue_linked_timeout() Pavel Begunkov
` (2 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2021-03-04 18:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Keep req->work init close to setting all other fields, it's in
io_init_req() anyway.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9ebc447456ab..da5d8d962bff 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6271,6 +6271,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
atomic_set(&req->refs, 2);
req->task = current;
req->result = 0;
+ req->work.list.next = NULL;
+ req->work.flags = 0;
+ req->work.personality = READ_ONCE(sqe->personality);
/* enforce forwards compatibility on users */
if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
@@ -6288,9 +6291,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
!io_op_defs[req->opcode].buffer_select)
return -EOPNOTSUPP;
- req->work.list.next = NULL;
- req->work.flags = 0;
- req->work.personality = READ_ONCE(sqe->personality);
state = &ctx->submit_state;
/*
--
2.24.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 09/11] io_uring: inline __io_queue_linked_timeout()
2021-03-04 18:52 [PATCH 5.13 00/11] a second batch for 5.13 Pavel Begunkov
` (7 preceding siblings ...)
2021-03-04 18:52 ` [PATCH 08/11] io_uring: set req->work closer to all other fields Pavel Begunkov
@ 2021-03-04 18:52 ` Pavel Begunkov
2021-03-04 18:52 ` [PATCH 10/11] io_uring: optimise success case of __io_queue_sqe Pavel Begunkov
2021-03-04 18:52 ` [PATCH 11/11] io_uring: refactor io_flush_cached_reqs() Pavel Begunkov
10 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2021-03-04 18:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Inline __io_queue_linked_timeout(), we don't need it
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 15 ++++-----------
1 file changed, 4 insertions(+), 11 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index da5d8d962bff..c05579ac7bb7 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -995,7 +995,6 @@ static void io_dismantle_req(struct io_kiocb *req);
static void io_put_task(struct task_struct *task, int nr);
static void io_queue_next(struct io_kiocb *req);
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
-static void __io_queue_linked_timeout(struct io_kiocb *req);
static void io_queue_linked_timeout(struct io_kiocb *req);
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_rsrc_update *ip,
@@ -6126,8 +6125,11 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-static void __io_queue_linked_timeout(struct io_kiocb *req)
+static void io_queue_linked_timeout(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ spin_lock_irq(&ctx->completion_lock);
/*
* If the back reference is NULL, then our linked request finished
* before we got a chance to setup the timer
@@ -6139,16 +6141,7 @@ static void __io_queue_linked_timeout(struct io_kiocb *req)
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
data->mode);
}
-}
-
-static void io_queue_linked_timeout(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- spin_lock_irq(&ctx->completion_lock);
- __io_queue_linked_timeout(req);
spin_unlock_irq(&ctx->completion_lock);
-
/* drop submission reference */
io_put_req(req);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 10/11] io_uring: optimise success case of __io_queue_sqe
2021-03-04 18:52 [PATCH 5.13 00/11] a second batch for 5.13 Pavel Begunkov
` (8 preceding siblings ...)
2021-03-04 18:52 ` [PATCH 09/11] io_uring: inline __io_queue_linked_timeout() Pavel Begunkov
@ 2021-03-04 18:52 ` Pavel Begunkov
2021-03-04 18:52 ` [PATCH 11/11] io_uring: refactor io_flush_cached_reqs() Pavel Begunkov
10 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2021-03-04 18:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Move the case of successfully issued request by doing that check first.
It's not much of a difference, just generates slightly better code for
me.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c05579ac7bb7..75395cc84c39 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6171,15 +6171,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
* We async punt it if the file wasn't marked NOWAIT, or if the file
* doesn't support non-blocking read/write attempts
*/
- if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
- if (!io_arm_poll_handler(req)) {
- /*
- * Queued up for async execution, worker will release
- * submit reference when the iocb is actually submitted.
- */
- io_queue_async_work(req);
- }
- } else if (likely(!ret)) {
+ if (likely(!ret)) {
/* drop submission reference */
if (req->flags & REQ_F_COMPLETE_INLINE) {
struct io_ring_ctx *ctx = req->ctx;
@@ -6191,6 +6183,14 @@ static void __io_queue_sqe(struct io_kiocb *req)
} else {
io_put_req(req);
}
+ } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
+ if (!io_arm_poll_handler(req)) {
+ /*
+ * Queued up for async execution, worker will release
+ * submit reference when the iocb is actually submitted.
+ */
+ io_queue_async_work(req);
+ }
} else {
io_req_complete_failed(req, ret);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 11/11] io_uring: refactor io_flush_cached_reqs()
2021-03-04 18:52 [PATCH 5.13 00/11] a second batch for 5.13 Pavel Begunkov
` (9 preceding siblings ...)
2021-03-04 18:52 ` [PATCH 10/11] io_uring: optimise success case of __io_queue_sqe Pavel Begunkov
@ 2021-03-04 18:52 ` Pavel Begunkov
10 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2021-03-04 18:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Emphasize that return value of io_flush_cached_reqs() depends on number
of requests in the cache. It looks nicer and might help tools from
false-negative analyses.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 75395cc84c39..202a3b862722 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1597,11 +1597,12 @@ static void io_req_complete_failed(struct io_kiocb *req, long res)
io_req_complete_post(req, res, 0);
}
+/* Returns true IFF there are requests in the cache */
static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
{
struct io_submit_state *state = &ctx->submit_state;
struct io_comp_state *cs = &state->comp;
- struct io_kiocb *req = NULL;
+ int nr;
/*
* If we have more than a batch's worth of requests in our IRQ side
@@ -1615,16 +1616,19 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
spin_unlock_irq(&ctx->completion_lock);
}
+ nr = state->free_reqs;
while (!list_empty(&cs->free_list)) {
- req = list_first_entry(&cs->free_list, struct io_kiocb,
- compl.list);
+ struct io_kiocb *req = list_first_entry(&cs->free_list,
+ struct io_kiocb, compl.list);
+
list_del(&req->compl.list);
- state->reqs[state->free_reqs++] = req;
- if (state->free_reqs == ARRAY_SIZE(state->reqs))
+ state->reqs[nr++] = req;
+ if (nr == ARRAY_SIZE(state->reqs))
break;
}
- return req != NULL;
+ state->free_reqs = nr;
+ return nr != 0;
}
static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
--
2.24.0
^ permalink raw reply related [flat|nested] 12+ messages in thread