From: Pavel Begunkov <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>, [email protected]
Subject: [PATCH 03/16] io_uring: simplify big_cqe handling
Date: Tue, 15 Aug 2023 18:31:32 +0100 [thread overview]
Message-ID: <5dcfd5797c3788d0228ac0d6bc3c154a4e382ee9.1692119257.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>
Don't keep big_cqe bits of req in a union with hash_node, find a
separate space for it. It's bit safer, but also if we keep it always
initialised, we can get rid of ugly REQ_F_CQE32_INIT handling.
Signed-off-by: Pavel Begunkov <[email protected]>
---
include/linux/io_uring_types.h | 16 ++++++----------
io_uring/io_uring.c | 8 +++-----
io_uring/io_uring.h | 15 +++------------
io_uring/uring_cmd.c | 5 ++---
4 files changed, 14 insertions(+), 30 deletions(-)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index f04ce513fadb..9795eda529f7 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -409,7 +409,6 @@ enum {
REQ_F_SINGLE_POLL_BIT,
REQ_F_DOUBLE_POLL_BIT,
REQ_F_PARTIAL_IO_BIT,
- REQ_F_CQE32_INIT_BIT,
REQ_F_APOLL_MULTISHOT_BIT,
REQ_F_CLEAR_POLLIN_BIT,
REQ_F_HASH_LOCKED_BIT,
@@ -479,8 +478,6 @@ enum {
REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
/* fast poll multishot mode */
REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
- /* ->extra1 and ->extra2 are initialised */
- REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT),
/* recvmsg special flag, clear EPOLLIN */
REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
@@ -579,13 +576,7 @@ struct io_kiocb {
struct io_task_work io_task_work;
unsigned nr_tw;
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
- union {
- struct hlist_node hash_node;
- struct {
- u64 extra1;
- u64 extra2;
- };
- };
+ struct hlist_node hash_node;
/* internal polling, see IORING_FEAT_FAST_POLL */
struct async_poll *apoll;
/* opcode allocated if it needs to store data for async defer */
@@ -595,6 +586,11 @@ struct io_kiocb {
/* custom credentials, valid IFF REQ_F_CREDS is set */
const struct cred *creds;
struct io_wq_work work;
+
+ struct {
+ u64 extra1;
+ u64 extra2;
+ } big_cqe;
};
struct io_overflow_cqe {
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 4d27655be3a6..20b46e64cc07 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -807,13 +807,10 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
void io_req_cqe_overflow(struct io_kiocb *req)
{
- if (!(req->flags & REQ_F_CQE32_INIT)) {
- req->extra1 = 0;
- req->extra2 = 0;
- }
io_cqring_event_overflow(req->ctx, req->cqe.user_data,
req->cqe.res, req->cqe.flags,
- req->extra1, req->extra2);
+ req->big_cqe.extra1, req->big_cqe.extra2);
+ memset(&req->big_cqe, 0, sizeof(req->big_cqe));
}
/*
@@ -1057,6 +1054,7 @@ static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
req->async_data = NULL;
/* not necessary, but safer to zero */
memset(&req->cqe, 0, sizeof(req->cqe));
+ memset(&req->big_cqe, 0, sizeof(req->big_cqe));
}
static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 465598223386..9b5dfb6ef484 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -148,21 +148,12 @@ static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, struct io_kiocb *req
if (trace_io_uring_complete_enabled())
trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
req->cqe.res, req->cqe.flags,
- (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
- (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
+ req->big_cqe.extra1, req->big_cqe.extra2);
memcpy(cqe, &req->cqe, sizeof(*cqe));
-
if (ctx->flags & IORING_SETUP_CQE32) {
- u64 extra1 = 0, extra2 = 0;
-
- if (req->flags & REQ_F_CQE32_INIT) {
- extra1 = req->extra1;
- extra2 = req->extra2;
- }
-
- WRITE_ONCE(cqe->big_cqe[0], extra1);
- WRITE_ONCE(cqe->big_cqe[1], extra2);
+ memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
+ memset(&req->big_cqe, 0, sizeof(req->big_cqe));
}
return true;
}
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 8e7a03c1b20e..537795fddc87 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -43,9 +43,8 @@ EXPORT_SYMBOL_GPL(io_uring_cmd_do_in_task_lazy);
static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
u64 extra1, u64 extra2)
{
- req->extra1 = extra1;
- req->extra2 = extra2;
- req->flags |= REQ_F_CQE32_INIT;
+ req->big_cqe.extra1 = extra1;
+ req->big_cqe.extra2 = extra2;
}
/*
--
2.41.0
next prev parent reply other threads:[~2023-08-15 17:34 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-15 17:31 [RFC 00/16] caching and SQ/CQ optimisations Pavel Begunkov
2023-08-15 17:31 ` [PATCH 01/16] io_uring: improve cqe !tracing hot path Pavel Begunkov
2023-08-15 17:31 ` [PATCH 02/16] io_uring: cqe init hardening Pavel Begunkov
2023-08-19 15:03 ` Jens Axboe
2023-08-24 16:28 ` Pavel Begunkov
2023-08-24 16:49 ` Jens Axboe
2023-08-15 17:31 ` Pavel Begunkov [this message]
2023-08-15 17:31 ` [PATCH 04/16] io_uring: refactor __io_get_cqe() Pavel Begunkov
2023-08-15 17:31 ` [PATCH 05/16] io_uring: optimise extra io_get_cqe null check Pavel Begunkov
2023-08-15 17:31 ` [PATCH 06/16] io_uring: reorder cqring_flush and wakeups Pavel Begunkov
2023-08-15 17:31 ` [PATCH 07/16] io_uring: merge iopoll and normal completion paths Pavel Begunkov
2023-08-15 17:31 ` [PATCH 08/16] io_uring: compact SQ/CQ heads/tails Pavel Begunkov
2023-08-19 15:05 ` Jens Axboe
2023-08-24 16:29 ` Pavel Begunkov
2023-08-15 17:31 ` [PATCH 09/16] io_uring: add option to remove SQ indirection Pavel Begunkov
2023-08-19 15:06 ` Jens Axboe
2023-08-15 17:31 ` [PATCH 10/16] io_uring: static_key for !IORING_SETUP_NO_SQARRAY Pavel Begunkov
2023-08-15 17:31 ` [PATCH 11/16] io_uring: move non aligned field to the end Pavel Begunkov
2023-08-15 17:31 ` [PATCH 12/16] io_uring: banish non-hot data to end of io_ring_ctx Pavel Begunkov
2023-08-15 17:31 ` [PATCH 13/16] io_uring: separate task_work/waiting cache line Pavel Begunkov
2023-08-15 17:31 ` [PATCH 14/16] io_uring: move multishot cqe cache in ctx Pavel Begunkov
2023-08-15 17:31 ` [PATCH 15/16] io_uring: move iopoll ctx fields around Pavel Begunkov
2023-08-15 17:31 ` [PATCH 16/16] io_uring: force inline io_fill_cqe_req Pavel Begunkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5dcfd5797c3788d0228ac0d6bc3c154a4e382ee9.1692119257.git.asml.silence@gmail.com \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox