* [PATCH 5.19 1/6] io_uring: get rid of __io_fill_cqe{32}_req()
2022-06-15 10:23 [PATCH 5.19 0/6] CQE32 fixes Pavel Begunkov
@ 2022-06-15 10:23 ` Pavel Begunkov
2022-06-15 10:23 ` [PATCH 5.19 2/6] io_uring: unite fill_cqe and the 32B version Pavel Begunkov
` (6 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Pavel Begunkov @ 2022-06-15 10:23 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
There are too many cqe filling helpers, kill __io_fill_cqe{32}_req(),
use __io_fill_cqe{32}_req_filled() instead, and then rename it. It'll
simplify fixing in following patches.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 70 ++++++++++++++++-----------------------------------
1 file changed, 21 insertions(+), 49 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 1b0b6099e717..654c2f897497 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2464,8 +2464,8 @@ static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
}
-static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
+ struct io_kiocb *req)
{
struct io_uring_cqe *cqe;
@@ -2486,8 +2486,8 @@ static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
req->cqe.res, req->cqe.flags, 0, 0);
}
-static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx,
+ struct io_kiocb *req)
{
struct io_uring_cqe *cqe;
u64 extra1 = req->extra1;
@@ -2513,44 +2513,6 @@ static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
req->cqe.flags, extra1, extra2);
}
-static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
-{
- trace_io_uring_complete(req->ctx, req, req->cqe.user_data, res, cflags, 0, 0);
- return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags);
-}
-
-static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags,
- u64 extra1, u64 extra2)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct io_uring_cqe *cqe;
-
- if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_CQE32)))
- return;
- if (req->flags & REQ_F_CQE_SKIP)
- return;
-
- trace_io_uring_complete(ctx, req, req->cqe.user_data, res, cflags,
- extra1, extra2);
-
- /*
- * If we can't get a cq entry, userspace overflowed the
- * submission (by quite a lot). Increment the overflow count in
- * the ring.
- */
- cqe = io_get_cqe(ctx);
- if (likely(cqe)) {
- WRITE_ONCE(cqe->user_data, req->cqe.user_data);
- WRITE_ONCE(cqe->res, res);
- WRITE_ONCE(cqe->flags, cflags);
- WRITE_ONCE(cqe->big_cqe[0], extra1);
- WRITE_ONCE(cqe->big_cqe[1], extra2);
- return;
- }
-
- io_cqring_event_overflow(ctx, req->cqe.user_data, res, cflags, extra1, extra2);
-}
-
static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
s32 res, u32 cflags)
{
@@ -2593,16 +2555,24 @@ static void __io_req_complete_put(struct io_kiocb *req)
static void __io_req_complete_post(struct io_kiocb *req, s32 res,
u32 cflags)
{
- if (!(req->flags & REQ_F_CQE_SKIP))
- __io_fill_cqe_req(req, res, cflags);
+ if (!(req->flags & REQ_F_CQE_SKIP)) {
+ req->cqe.res = res;
+ req->cqe.flags = cflags;
+ __io_fill_cqe_req(req->ctx, req);
+ }
__io_req_complete_put(req);
}
static void __io_req_complete_post32(struct io_kiocb *req, s32 res,
u32 cflags, u64 extra1, u64 extra2)
{
- if (!(req->flags & REQ_F_CQE_SKIP))
- __io_fill_cqe32_req(req, res, cflags, extra1, extra2);
+ if (!(req->flags & REQ_F_CQE_SKIP)) {
+ req->cqe.res = res;
+ req->cqe.flags = cflags;
+ req->extra1 = extra1;
+ req->extra2 = extra2;
+ __io_fill_cqe32_req(req->ctx, req);
+ }
__io_req_complete_put(req);
}
@@ -3207,9 +3177,9 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
if (!(req->flags & REQ_F_CQE_SKIP)) {
if (!(ctx->flags & IORING_SETUP_CQE32))
- __io_fill_cqe_req_filled(ctx, req);
+ __io_fill_cqe_req(ctx, req);
else
- __io_fill_cqe32_req_filled(ctx, req);
+ __io_fill_cqe32_req(ctx, req);
}
}
@@ -3329,7 +3299,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
nr_events++;
if (unlikely(req->flags & REQ_F_CQE_SKIP))
continue;
- __io_fill_cqe_req(req, req->cqe.res, io_put_kbuf(req, 0));
+
+ req->cqe.flags = io_put_kbuf(req, 0);
+ __io_fill_cqe_req(req->ctx, req);
}
if (unlikely(!nr_events))
--
2.36.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 5.19 2/6] io_uring: unite fill_cqe and the 32B version
2022-06-15 10:23 [PATCH 5.19 0/6] CQE32 fixes Pavel Begunkov
2022-06-15 10:23 ` [PATCH 5.19 1/6] io_uring: get rid of __io_fill_cqe{32}_req() Pavel Begunkov
@ 2022-06-15 10:23 ` Pavel Begunkov
2022-06-15 10:23 ` [PATCH 5.19 3/6] io_uring: fill extra big cqe fields from req Pavel Begunkov
` (5 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Pavel Begunkov @ 2022-06-15 10:23 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
We want just one function that will handle both normal cqes and 32B
cqes. Combine __io_fill_cqe_req() and __io_fill_cqe_req32(). It's still
not entirely correct yet, but saves us from cases when we fill an CQE of
a wrong size.
Fixes: 76c68fbf1a1f9 ("io_uring: enable CQE32")
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 61 +++++++++++++++++++++++++++++++++++----------------
1 file changed, 42 insertions(+), 19 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 654c2f897497..eb858cf92af9 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2469,21 +2469,48 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
{
struct io_uring_cqe *cqe;
- trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
- req->cqe.res, req->cqe.flags, 0, 0);
+ if (!(ctx->flags & IORING_SETUP_CQE32)) {
+ trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags, 0, 0);
- /*
- * If we can't get a cq entry, userspace overflowed the
- * submission (by quite a lot). Increment the overflow count in
- * the ring.
- */
- cqe = io_get_cqe(ctx);
- if (likely(cqe)) {
- memcpy(cqe, &req->cqe, sizeof(*cqe));
- return true;
+ /*
+ * If we can't get a cq entry, userspace overflowed the
+ * submission (by quite a lot). Increment the overflow count in
+ * the ring.
+ */
+ cqe = io_get_cqe(ctx);
+ if (likely(cqe)) {
+ memcpy(cqe, &req->cqe, sizeof(*cqe));
+ return true;
+ }
+
+ return io_cqring_event_overflow(ctx, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags,
+ 0, 0);
+ } else {
+ u64 extra1 = req->extra1;
+ u64 extra2 = req->extra2;
+
+ trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags, extra1, extra2);
+
+ /*
+ * If we can't get a cq entry, userspace overflowed the
+ * submission (by quite a lot). Increment the overflow count in
+ * the ring.
+ */
+ cqe = io_get_cqe(ctx);
+ if (likely(cqe)) {
+ memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
+ WRITE_ONCE(cqe->big_cqe[0], extra1);
+ WRITE_ONCE(cqe->big_cqe[1], extra2);
+ return true;
+ }
+
+ return io_cqring_event_overflow(ctx, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags,
+ extra1, extra2);
}
- return io_cqring_event_overflow(ctx, req->cqe.user_data,
- req->cqe.res, req->cqe.flags, 0, 0);
}
static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx,
@@ -3175,12 +3202,8 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
struct io_kiocb *req = container_of(node, struct io_kiocb,
comp_list);
- if (!(req->flags & REQ_F_CQE_SKIP)) {
- if (!(ctx->flags & IORING_SETUP_CQE32))
- __io_fill_cqe_req(ctx, req);
- else
- __io_fill_cqe32_req(ctx, req);
- }
+ if (!(req->flags & REQ_F_CQE_SKIP))
+ __io_fill_cqe_req(ctx, req);
}
io_commit_cqring(ctx);
--
2.36.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 5.19 3/6] io_uring: fill extra big cqe fields from req
2022-06-15 10:23 [PATCH 5.19 0/6] CQE32 fixes Pavel Begunkov
2022-06-15 10:23 ` [PATCH 5.19 1/6] io_uring: get rid of __io_fill_cqe{32}_req() Pavel Begunkov
2022-06-15 10:23 ` [PATCH 5.19 2/6] io_uring: unite fill_cqe and the 32B version Pavel Begunkov
@ 2022-06-15 10:23 ` Pavel Begunkov
2022-06-15 10:23 ` [PATCH 5.19 4/6] io_uring: fix ->extra{1,2} misuse Pavel Begunkov
` (4 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Pavel Begunkov @ 2022-06-15 10:23 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
The only user of io_req_complete32()-like functions is cmd
requests. Instead of keeping the whole complete32 family, remove them
and provide the extras in already added for inline completions
req->extra{1,2}. When fill_cqe_res() finds CQE32 option enabled
it'll use those fields to fill a 32B cqe.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 78 +++++++--------------------------------------------
1 file changed, 10 insertions(+), 68 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index eb858cf92af9..10901db93f7e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2513,33 +2513,6 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
}
}
-static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
-{
- struct io_uring_cqe *cqe;
- u64 extra1 = req->extra1;
- u64 extra2 = req->extra2;
-
- trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
- req->cqe.res, req->cqe.flags, extra1, extra2);
-
- /*
- * If we can't get a cq entry, userspace overflowed the
- * submission (by quite a lot). Increment the overflow count in
- * the ring.
- */
- cqe = io_get_cqe(ctx);
- if (likely(cqe)) {
- memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
- cqe->big_cqe[0] = extra1;
- cqe->big_cqe[1] = extra2;
- return true;
- }
-
- return io_cqring_event_overflow(ctx, req->cqe.user_data, req->cqe.res,
- req->cqe.flags, extra1, extra2);
-}
-
static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
s32 res, u32 cflags)
{
@@ -2590,19 +2563,6 @@ static void __io_req_complete_post(struct io_kiocb *req, s32 res,
__io_req_complete_put(req);
}
-static void __io_req_complete_post32(struct io_kiocb *req, s32 res,
- u32 cflags, u64 extra1, u64 extra2)
-{
- if (!(req->flags & REQ_F_CQE_SKIP)) {
- req->cqe.res = res;
- req->cqe.flags = cflags;
- req->extra1 = extra1;
- req->extra2 = extra2;
- __io_fill_cqe32_req(req->ctx, req);
- }
- __io_req_complete_put(req);
-}
-
static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -2614,18 +2574,6 @@ static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags)
io_cqring_ev_posted(ctx);
}
-static void io_req_complete_post32(struct io_kiocb *req, s32 res,
- u32 cflags, u64 extra1, u64 extra2)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- spin_lock(&ctx->completion_lock);
- __io_req_complete_post32(req, res, cflags, extra1, extra2);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
-}
-
static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
u32 cflags)
{
@@ -2643,19 +2591,6 @@ static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
io_req_complete_post(req, res, cflags);
}
-static inline void __io_req_complete32(struct io_kiocb *req,
- unsigned int issue_flags, s32 res,
- u32 cflags, u64 extra1, u64 extra2)
-{
- if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
- io_req_complete_state(req, res, cflags);
- req->extra1 = extra1;
- req->extra2 = extra2;
- } else {
- io_req_complete_post32(req, res, cflags, extra1, extra2);
- }
-}
-
static inline void io_req_complete(struct io_kiocb *req, s32 res)
{
if (res < 0)
@@ -5079,6 +5014,13 @@ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
}
EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
+static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
+ u64 extra1, u64 extra2)
+{
+ req->extra1 = extra1;
+ req->extra2 = extra2;
+}
+
/*
* Called by consumers of io_uring_cmd, if they originally returned
* -EIOCBQUEUED upon receiving the command.
@@ -5089,10 +5031,10 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
if (ret < 0)
req_set_fail(req);
+
if (req->ctx->flags & IORING_SETUP_CQE32)
- __io_req_complete32(req, 0, ret, 0, res2, 0);
- else
- io_req_complete(req, ret);
+ io_req_set_cqe32_extra(req, res2, 0);
+ io_req_complete(req, ret);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
--
2.36.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 5.19 4/6] io_uring: fix ->extra{1,2} misuse
2022-06-15 10:23 [PATCH 5.19 0/6] CQE32 fixes Pavel Begunkov
` (2 preceding siblings ...)
2022-06-15 10:23 ` [PATCH 5.19 3/6] io_uring: fill extra big cqe fields from req Pavel Begunkov
@ 2022-06-15 10:23 ` Pavel Begunkov
2022-06-15 10:23 ` [PATCH 5.19 5/6] io_uring: inline __io_fill_cqe() Pavel Begunkov
` (3 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Pavel Begunkov @ 2022-06-15 10:23 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
We don't really know the state of req->extra{1,2] fields in
__io_fill_cqe_req(), if an opcode handler is not aware of CQE32 option,
it never sets them up properly. Track the state of those fields with a
request flag.
Fixes: 76c68fbf1a1f9 ("io_uring: enable CQE32")
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 10901db93f7e..808b7f4ace0b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -844,6 +844,7 @@ enum {
REQ_F_SINGLE_POLL_BIT,
REQ_F_DOUBLE_POLL_BIT,
REQ_F_PARTIAL_IO_BIT,
+ REQ_F_CQE32_INIT_BIT,
REQ_F_APOLL_MULTISHOT_BIT,
/* keep async read/write and isreg together and in order */
REQ_F_SUPPORT_NOWAIT_BIT,
@@ -913,6 +914,8 @@ enum {
REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
/* fast poll multishot mode */
REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
+ /* ->extra1 and ->extra2 are initialised */
+ REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT),
};
struct async_poll {
@@ -2488,8 +2491,12 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
req->cqe.res, req->cqe.flags,
0, 0);
} else {
- u64 extra1 = req->extra1;
- u64 extra2 = req->extra2;
+ u64 extra1 = 0, extra2 = 0;
+
+ if (req->flags & REQ_F_CQE32_INIT) {
+ extra1 = req->extra1;
+ extra2 = req->extra2;
+ }
trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
req->cqe.res, req->cqe.flags, extra1, extra2);
@@ -5019,6 +5026,7 @@ static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
{
req->extra1 = extra1;
req->extra2 = extra2;
+ req->flags |= REQ_F_CQE32_INIT;
}
/*
--
2.36.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 5.19 5/6] io_uring: inline __io_fill_cqe()
2022-06-15 10:23 [PATCH 5.19 0/6] CQE32 fixes Pavel Begunkov
` (3 preceding siblings ...)
2022-06-15 10:23 ` [PATCH 5.19 4/6] io_uring: fix ->extra{1,2} misuse Pavel Begunkov
@ 2022-06-15 10:23 ` Pavel Begunkov
2022-06-15 10:23 ` [PATCH 5.19 6/6] io_uring: make io_fill_cqe_aux to honour CQE32 Pavel Begunkov
` (2 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Pavel Begunkov @ 2022-06-15 10:23 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
In preparation for the following patch, inline __io_fill_cqe(), there is
only one user.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 37 ++++++++++++++++---------------------
1 file changed, 16 insertions(+), 21 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 808b7f4ace0b..792e9c95d217 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2447,26 +2447,6 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
return true;
}
-static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
- s32 res, u32 cflags)
-{
- struct io_uring_cqe *cqe;
-
- /*
- * If we can't get a cq entry, userspace overflowed the
- * submission (by quite a lot). Increment the overflow count in
- * the ring.
- */
- cqe = io_get_cqe(ctx);
- if (likely(cqe)) {
- WRITE_ONCE(cqe->user_data, user_data);
- WRITE_ONCE(cqe->res, res);
- WRITE_ONCE(cqe->flags, cflags);
- return true;
- }
- return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
-}
-
static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
struct io_kiocb *req)
{
@@ -2523,9 +2503,24 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
s32 res, u32 cflags)
{
+ struct io_uring_cqe *cqe;
+
ctx->cq_extra++;
trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
- return __io_fill_cqe(ctx, user_data, res, cflags);
+
+ /*
+ * If we can't get a cq entry, userspace overflowed the
+ * submission (by quite a lot). Increment the overflow count in
+ * the ring.
+ */
+ cqe = io_get_cqe(ctx);
+ if (likely(cqe)) {
+ WRITE_ONCE(cqe->user_data, user_data);
+ WRITE_ONCE(cqe->res, res);
+ WRITE_ONCE(cqe->flags, cflags);
+ return true;
+ }
+ return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
}
static void __io_req_complete_put(struct io_kiocb *req)
--
2.36.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 5.19 6/6] io_uring: make io_fill_cqe_aux to honour CQE32
2022-06-15 10:23 [PATCH 5.19 0/6] CQE32 fixes Pavel Begunkov
` (4 preceding siblings ...)
2022-06-15 10:23 ` [PATCH 5.19 5/6] io_uring: inline __io_fill_cqe() Pavel Begunkov
@ 2022-06-15 10:23 ` Pavel Begunkov
2022-06-15 12:03 ` [PATCH 5.19 0/6] CQE32 fixes Jens Axboe
2022-06-15 21:23 ` Jens Axboe
7 siblings, 0 replies; 11+ messages in thread
From: Pavel Begunkov @ 2022-06-15 10:23 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
Don't let io_fill_cqe_aux() post 16B cqes for CQE32 rings, neither the
kernel nor the userspace expect this to happen.
Fixes: 76c68fbf1a1f9 ("io_uring: enable CQE32")
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 792e9c95d217..5d479428d8e5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2518,6 +2518,11 @@ static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
WRITE_ONCE(cqe->user_data, user_data);
WRITE_ONCE(cqe->res, res);
WRITE_ONCE(cqe->flags, cflags);
+
+ if (ctx->flags & IORING_SETUP_CQE32) {
+ WRITE_ONCE(cqe->big_cqe[0], 0);
+ WRITE_ONCE(cqe->big_cqe[1], 0);
+ }
return true;
}
return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
--
2.36.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH 5.19 0/6] CQE32 fixes
2022-06-15 10:23 [PATCH 5.19 0/6] CQE32 fixes Pavel Begunkov
` (5 preceding siblings ...)
2022-06-15 10:23 ` [PATCH 5.19 6/6] io_uring: make io_fill_cqe_aux to honour CQE32 Pavel Begunkov
@ 2022-06-15 12:03 ` Jens Axboe
2022-06-15 12:21 ` Pavel Begunkov
2022-06-15 21:23 ` Jens Axboe
7 siblings, 1 reply; 11+ messages in thread
From: Jens Axboe @ 2022-06-15 12:03 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 6/15/22 4:23 AM, Pavel Begunkov wrote:
> Several fixes for IORING_SETUP_CQE32
>
> Pavel Begunkov (6):
> io_uring: get rid of __io_fill_cqe{32}_req()
> io_uring: unite fill_cqe and the 32B version
> io_uring: fill extra big cqe fields from req
> io_uring: fix ->extra{1,2} misuse
> io_uring: inline __io_fill_cqe()
> io_uring: make io_fill_cqe_aux to honour CQE32
>
> fs/io_uring.c | 209 +++++++++++++++++++-------------------------------
> 1 file changed, 77 insertions(+), 132 deletions(-)
Looks good to me, thanks a lot for doing this work. One minor thing that
I'd like to change, but can wait until 5.20, is the completion spots
where we pass in both ctx and req. Would be cleaner just to pass in req,
and 2 out of 3 spots always do (req->ctx, req) anyway.
--
Jens Axboe
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 5.19 0/6] CQE32 fixes
2022-06-15 12:03 ` [PATCH 5.19 0/6] CQE32 fixes Jens Axboe
@ 2022-06-15 12:21 ` Pavel Begunkov
2022-06-15 12:24 ` Jens Axboe
0 siblings, 1 reply; 11+ messages in thread
From: Pavel Begunkov @ 2022-06-15 12:21 UTC (permalink / raw)
To: Jens Axboe, io-uring
On 6/15/22 13:03, Jens Axboe wrote:
> On 6/15/22 4:23 AM, Pavel Begunkov wrote:
>> Several fixes for IORING_SETUP_CQE32
>>
>> Pavel Begunkov (6):
>> io_uring: get rid of __io_fill_cqe{32}_req()
>> io_uring: unite fill_cqe and the 32B version
>> io_uring: fill extra big cqe fields from req
>> io_uring: fix ->extra{1,2} misuse
>> io_uring: inline __io_fill_cqe()
>> io_uring: make io_fill_cqe_aux to honour CQE32
>>
>> fs/io_uring.c | 209 +++++++++++++++++++-------------------------------
>> 1 file changed, 77 insertions(+), 132 deletions(-)
>
> Looks good to me, thanks a lot for doing this work. One minor thing that
> I'd like to change, but can wait until 5.20, is the completion spots
> where we pass in both ctx and req. Would be cleaner just to pass in req,
> and 2 out of 3 spots always do (req->ctx, req) anyway.
That's because __io_submit_flush_completions() should already have
ctx in a register and we care about its performance. We can add
a helper if that's an eyesore.
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 5.19 0/6] CQE32 fixes
2022-06-15 12:21 ` Pavel Begunkov
@ 2022-06-15 12:24 ` Jens Axboe
0 siblings, 0 replies; 11+ messages in thread
From: Jens Axboe @ 2022-06-15 12:24 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 6/15/22 6:21 AM, Pavel Begunkov wrote:
> On 6/15/22 13:03, Jens Axboe wrote:
>> On 6/15/22 4:23 AM, Pavel Begunkov wrote:
>>> Several fixes for IORING_SETUP_CQE32
>>>
>>> Pavel Begunkov (6):
>>> io_uring: get rid of __io_fill_cqe{32}_req()
>>> io_uring: unite fill_cqe and the 32B version
>>> io_uring: fill extra big cqe fields from req
>>> io_uring: fix ->extra{1,2} misuse
>>> io_uring: inline __io_fill_cqe()
>>> io_uring: make io_fill_cqe_aux to honour CQE32
>>>
>>> fs/io_uring.c | 209 +++++++++++++++++++-------------------------------
>>> 1 file changed, 77 insertions(+), 132 deletions(-)
>>
>> Looks good to me, thanks a lot for doing this work. One minor thing that
>> I'd like to change, but can wait until 5.20, is the completion spots
>> where we pass in both ctx and req. Would be cleaner just to pass in req,
>> and 2 out of 3 spots always do (req->ctx, req) anyway.
>
> That's because __io_submit_flush_completions() should already have
> ctx in a register and we care about its performance. We can add
> a helper if that's an eyesore.
Yeah I realize that, just bothers the eye. Not sure it really matters as
we're going to pull that cacheline hot in io_kiocb anyway.
We can just leave it for now, it's not a big deal.
--
Jens Axboe
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 5.19 0/6] CQE32 fixes
2022-06-15 10:23 [PATCH 5.19 0/6] CQE32 fixes Pavel Begunkov
` (6 preceding siblings ...)
2022-06-15 12:03 ` [PATCH 5.19 0/6] CQE32 fixes Jens Axboe
@ 2022-06-15 21:23 ` Jens Axboe
7 siblings, 0 replies; 11+ messages in thread
From: Jens Axboe @ 2022-06-15 21:23 UTC (permalink / raw)
To: io-uring, asml.silence
On Wed, 15 Jun 2022 11:23:01 +0100, Pavel Begunkov wrote:
> Several fixes for IORING_SETUP_CQE32
>
> Pavel Begunkov (6):
> io_uring: get rid of __io_fill_cqe{32}_req()
> io_uring: unite fill_cqe and the 32B version
> io_uring: fill extra big cqe fields from req
> io_uring: fix ->extra{1,2} misuse
> io_uring: inline __io_fill_cqe()
> io_uring: make io_fill_cqe_aux to honour CQE32
>
> [...]
Applied, thanks!
[1/6] io_uring: get rid of __io_fill_cqe{32}_req()
(no commit info)
[2/6] io_uring: unite fill_cqe and the 32B version
(no commit info)
[3/6] io_uring: fill extra big cqe fields from req
(no commit info)
[4/6] io_uring: fix ->extra{1,2} misuse
(no commit info)
[5/6] io_uring: inline __io_fill_cqe()
(no commit info)
[6/6] io_uring: make io_fill_cqe_aux to honour CQE32
(no commit info)
Best regards,
--
Jens Axboe
^ permalink raw reply [flat|nested] 11+ messages in thread