From: Pavel Begunkov <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>, [email protected]
Subject: [PATCH 05/16] io_uring: optimise extra io_get_cqe null check
Date: Tue, 15 Aug 2023 18:31:34 +0100 [thread overview]
Message-ID: <c3816ff286b95714efa5fb8cdde2b01d28202cb1.1692119257.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>
If the cached cqe check passes in io_get_cqe*() it already means that
the cqe we return is valid and non-zero, however the compiler is unable
to optimise null checks like in io_fill_cqe_req().
Do a bit of trickery, return success/fail boolean from io_get_cqe*()
and store cqe in the cqe parameter. That makes it do the right thing,
erasing the check together with the introduced indirection.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.c | 7 +++----
io_uring/io_uring.h | 20 +++++++++-----------
2 files changed, 12 insertions(+), 15 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 623d41755714..e5378dc7aa19 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -683,10 +683,10 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
io_cq_lock(ctx);
while (!list_empty(&ctx->cq_overflow_list)) {
- struct io_uring_cqe *cqe = io_get_cqe_overflow(ctx, true);
+ struct io_uring_cqe *cqe;
struct io_overflow_cqe *ocqe;
- if (!cqe)
+ if (!io_get_cqe_overflow(ctx, &cqe, true))
break;
ocqe = list_first_entry(&ctx->cq_overflow_list,
struct io_overflow_cqe, list);
@@ -862,8 +862,7 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
* submission (by quite a lot). Increment the overflow count in
* the ring.
*/
- cqe = io_get_cqe(ctx);
- if (likely(cqe)) {
+ if (likely(io_get_cqe(ctx, &cqe))) {
trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
WRITE_ONCE(cqe->user_data, user_data);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 9c80d20fe18f..2960e35b32a5 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -109,28 +109,27 @@ static inline void io_req_task_work_add(struct io_kiocb *req)
#define io_for_each_link(pos, head) \
for (pos = (head); pos; pos = pos->link)
-static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
- bool overflow)
+static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
+ struct io_uring_cqe **ret,
+ bool overflow)
{
- struct io_uring_cqe *cqe;
-
io_lockdep_assert_cq_locked(ctx);
if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
- return NULL;
+ return false;
}
- cqe = ctx->cqe_cached;
+ *ret = ctx->cqe_cached;
ctx->cached_cq_tail++;
ctx->cqe_cached++;
if (ctx->flags & IORING_SETUP_CQE32)
ctx->cqe_cached++;
- return cqe;
+ return true;
}
-static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
+static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
{
- return io_get_cqe_overflow(ctx, false);
+ return io_get_cqe_overflow(ctx, ret, false);
}
static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, struct io_kiocb *req)
@@ -142,8 +141,7 @@ static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, struct io_kiocb *req
* submission (by quite a lot). Increment the overflow count in
* the ring.
*/
- cqe = io_get_cqe(ctx);
- if (unlikely(!cqe))
+ if (unlikely(!io_get_cqe(ctx, &cqe)))
return false;
if (trace_io_uring_complete_enabled())
--
2.41.0
next prev parent reply other threads:[~2023-08-15 17:34 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-15 17:31 [RFC 00/16] caching and SQ/CQ optimisations Pavel Begunkov
2023-08-15 17:31 ` [PATCH 01/16] io_uring: improve cqe !tracing hot path Pavel Begunkov
2023-08-15 17:31 ` [PATCH 02/16] io_uring: cqe init hardening Pavel Begunkov
2023-08-19 15:03 ` Jens Axboe
2023-08-24 16:28 ` Pavel Begunkov
2023-08-24 16:49 ` Jens Axboe
2023-08-15 17:31 ` [PATCH 03/16] io_uring: simplify big_cqe handling Pavel Begunkov
2023-08-15 17:31 ` [PATCH 04/16] io_uring: refactor __io_get_cqe() Pavel Begunkov
2023-08-15 17:31 ` Pavel Begunkov [this message]
2023-08-15 17:31 ` [PATCH 06/16] io_uring: reorder cqring_flush and wakeups Pavel Begunkov
2023-08-15 17:31 ` [PATCH 07/16] io_uring: merge iopoll and normal completion paths Pavel Begunkov
2023-08-15 17:31 ` [PATCH 08/16] io_uring: compact SQ/CQ heads/tails Pavel Begunkov
2023-08-19 15:05 ` Jens Axboe
2023-08-24 16:29 ` Pavel Begunkov
2023-08-15 17:31 ` [PATCH 09/16] io_uring: add option to remove SQ indirection Pavel Begunkov
2023-08-19 15:06 ` Jens Axboe
2023-08-15 17:31 ` [PATCH 10/16] io_uring: static_key for !IORING_SETUP_NO_SQARRAY Pavel Begunkov
2023-08-15 17:31 ` [PATCH 11/16] io_uring: move non aligned field to the end Pavel Begunkov
2023-08-15 17:31 ` [PATCH 12/16] io_uring: banish non-hot data to end of io_ring_ctx Pavel Begunkov
2023-08-15 17:31 ` [PATCH 13/16] io_uring: separate task_work/waiting cache line Pavel Begunkov
2023-08-15 17:31 ` [PATCH 14/16] io_uring: move multishot cqe cache in ctx Pavel Begunkov
2023-08-15 17:31 ` [PATCH 15/16] io_uring: move iopoll ctx fields around Pavel Begunkov
2023-08-15 17:31 ` [PATCH 16/16] io_uring: force inline io_fill_cqe_req Pavel Begunkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=c3816ff286b95714efa5fb8cdde2b01d28202cb1.1692119257.git.asml.silence@gmail.com \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox