* [PATCH for-next 1/6] io_uring: don't expose io_fill_cqe_aux()
2022-06-17 8:47 [PATCH for-next 0/6] clean up __io_fill_cqe_req() Pavel Begunkov
@ 2022-06-17 8:48 ` Pavel Begunkov
2022-06-17 8:48 ` [PATCH for-next 2/6] io_uring: don't inline __io_get_cqe() Pavel Begunkov
` (5 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2022-06-17 8:48 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
Deduplicate some code and add a helper for filling an aux CQE, locking
and notification.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.c | 18 ++++++++++++++++--
io_uring/io_uring.h | 3 +--
io_uring/msg_ring.c | 11 +----------
io_uring/net.c | 20 +++++---------------
io_uring/poll.c | 24 ++++++++----------------
io_uring/rsrc.c | 14 +++++---------
6 files changed, 36 insertions(+), 54 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 80c433995347..7ffb8422e7d0 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -673,8 +673,8 @@ bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
return true;
}
-bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
- u32 cflags)
+static bool io_fill_cqe_aux(struct io_ring_ctx *ctx,
+ u64 user_data, s32 res, u32 cflags)
{
struct io_uring_cqe *cqe;
@@ -701,6 +701,20 @@ bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
}
+bool io_post_aux_cqe(struct io_ring_ctx *ctx,
+ u64 user_data, s32 res, u32 cflags)
+{
+ bool filled;
+
+ spin_lock(&ctx->completion_lock);
+ filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ if (filled)
+ io_cqring_ev_posted(ctx);
+ return filled;
+}
+
static void __io_req_complete_put(struct io_kiocb *req)
{
/*
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 16e46b09253a..ce6538c9aed3 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -241,8 +241,7 @@ void io_req_complete_failed(struct io_kiocb *req, s32 res);
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
void io_req_complete_post(struct io_kiocb *req);
void __io_req_complete_post(struct io_kiocb *req);
-bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
- u32 cflags);
+bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
void io_cqring_ev_posted(struct io_ring_ctx *ctx);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 1f2de3534932..b02be2349652 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -33,7 +33,6 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_msg *msg = io_kiocb_to_cmd(req);
struct io_ring_ctx *target_ctx;
- bool filled;
int ret;
ret = -EBADFD;
@@ -42,16 +41,8 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
ret = -EOVERFLOW;
target_ctx = req->file->private_data;
-
- spin_lock(&target_ctx->completion_lock);
- filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0);
- io_commit_cqring(target_ctx);
- spin_unlock(&target_ctx->completion_lock);
-
- if (filled) {
- io_cqring_ev_posted(target_ctx);
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
ret = 0;
- }
done:
if (ret < 0)
diff --git a/io_uring/net.c b/io_uring/net.c
index cd931dae1313..4481deda8607 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -647,22 +647,12 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
io_req_set_res(req, ret, 0);
return IOU_OK;
}
- if (ret >= 0) {
- bool filled;
-
- spin_lock(&ctx->completion_lock);
- filled = io_fill_cqe_aux(ctx, req->cqe.user_data, ret,
- IORING_CQE_F_MORE);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- if (filled) {
- io_cqring_ev_posted(ctx);
- goto retry;
- }
- ret = -ECANCELED;
- }
- return ret;
+ if (ret < 0)
+ return ret;
+ if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE))
+ goto retry;
+ return -ECANCELED;
}
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 7f245f5617f6..d4bfc6d945cf 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -213,23 +213,15 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
__poll_t mask = mangle_poll(req->cqe.res &
req->apoll_events);
- bool filled;
-
- spin_lock(&ctx->completion_lock);
- filled = io_fill_cqe_aux(ctx, req->cqe.user_data,
- mask, IORING_CQE_F_MORE);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- if (filled) {
- io_cqring_ev_posted(ctx);
- continue;
- }
- return -ECANCELED;
- }
- ret = io_poll_issue(req, locked);
- if (ret)
- return ret;
+ if (!io_post_aux_cqe(ctx, req->cqe.user_data,
+ mask, IORING_CQE_F_MORE))
+ return -ECANCELED;
+ } else {
+ ret = io_poll_issue(req, locked);
+ if (ret)
+ return ret;
+ }
/*
* Release all references, retry if someone tried to restart
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 2f893e3f5c15..c10c512aa71b 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -173,17 +173,13 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
list_del(&prsrc->list);
if (prsrc->tag) {
- if (ctx->flags & IORING_SETUP_IOPOLL)
+ if (ctx->flags & IORING_SETUP_IOPOLL) {
mutex_lock(&ctx->uring_lock);
-
- spin_lock(&ctx->completion_lock);
- io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
-
- if (ctx->flags & IORING_SETUP_IOPOLL)
+ io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
mutex_unlock(&ctx->uring_lock);
+ } else {
+ io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
+ }
}
rsrc_data->do_put(ctx, prsrc);
--
2.36.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH for-next 2/6] io_uring: don't inline __io_get_cqe()
2022-06-17 8:47 [PATCH for-next 0/6] clean up __io_fill_cqe_req() Pavel Begunkov
2022-06-17 8:48 ` [PATCH for-next 1/6] io_uring: don't expose io_fill_cqe_aux() Pavel Begunkov
@ 2022-06-17 8:48 ` Pavel Begunkov
2022-06-17 8:48 ` [PATCH for-next 3/6] io_uring: introduce io_req_cqe_overflow() Pavel Begunkov
` (4 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2022-06-17 8:48 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
__io_get_cqe() is not as hot as io_get_cqe(), no need to inline it, it
sheds ~500B from the binary.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.c | 35 +++++++++++++++++++++++++++++++++++
io_uring/io_uring.h | 36 +-----------------------------------
2 files changed, 36 insertions(+), 35 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 7ffb8422e7d0..a3b1339335c5 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -165,6 +165,11 @@ static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
__io_submit_flush_completions(ctx);
}
+static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
+{
+ return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
+}
+
static bool io_match_linked(struct io_kiocb *head)
{
struct io_kiocb *req;
@@ -673,6 +678,36 @@ bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
return true;
}
+/*
+ * writes to the cq entry need to come after reading head; the
+ * control dependency is enough as we're using WRITE_ONCE to
+ * fill the cq entry
+ */
+struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
+{
+ struct io_rings *rings = ctx->rings;
+ unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
+ unsigned int shift = 0;
+ unsigned int free, queued, len;
+
+ if (ctx->flags & IORING_SETUP_CQE32)
+ shift = 1;
+
+ /* userspace may cheat modifying the tail, be safe and do min */
+ queued = min(__io_cqring_events(ctx), ctx->cq_entries);
+ free = ctx->cq_entries - queued;
+ /* we need a contiguous range, limit based on the current array offset */
+ len = min(free, ctx->cq_entries - off);
+ if (!len)
+ return NULL;
+
+ ctx->cached_cq_tail++;
+ ctx->cqe_cached = &rings->cqes[off];
+ ctx->cqe_sentinel = ctx->cqe_cached + len;
+ ctx->cqe_cached++;
+ return &rings->cqes[off << shift];
+}
+
static bool io_fill_cqe_aux(struct io_ring_ctx *ctx,
u64 user_data, s32 res, u32 cflags)
{
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index ce6538c9aed3..51032a494aec 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -16,44 +16,10 @@ enum {
IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
};
+struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
u32 cflags, u64 extra1, u64 extra2);
-static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
-{
- return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
-}
-
-/*
- * writes to the cq entry need to come after reading head; the
- * control dependency is enough as we're using WRITE_ONCE to
- * fill the cq entry
- */
-static inline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
-{
- struct io_rings *rings = ctx->rings;
- unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
- unsigned int shift = 0;
- unsigned int free, queued, len;
-
- if (ctx->flags & IORING_SETUP_CQE32)
- shift = 1;
-
- /* userspace may cheat modifying the tail, be safe and do min */
- queued = min(__io_cqring_events(ctx), ctx->cq_entries);
- free = ctx->cq_entries - queued;
- /* we need a contiguous range, limit based on the current array offset */
- len = min(free, ctx->cq_entries - off);
- if (!len)
- return NULL;
-
- ctx->cached_cq_tail++;
- ctx->cqe_cached = &rings->cqes[off];
- ctx->cqe_sentinel = ctx->cqe_cached + len;
- ctx->cqe_cached++;
- return &rings->cqes[off << shift];
-}
-
static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
{
if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
--
2.36.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH for-next 3/6] io_uring: introduce io_req_cqe_overflow()
2022-06-17 8:47 [PATCH for-next 0/6] clean up __io_fill_cqe_req() Pavel Begunkov
2022-06-17 8:48 ` [PATCH for-next 1/6] io_uring: don't expose io_fill_cqe_aux() Pavel Begunkov
2022-06-17 8:48 ` [PATCH for-next 2/6] io_uring: don't inline __io_get_cqe() Pavel Begunkov
@ 2022-06-17 8:48 ` Pavel Begunkov
2022-06-17 8:48 ` [PATCH for-next 4/6] io_uring: deduplicate __io_fill_cqe_req tracing Pavel Begunkov
` (3 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2022-06-17 8:48 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
__io_fill_cqe_req() is hot and inlined, we want it to be as small as
possible. Add io_req_cqe_overflow() accepting only a request and doing
all overflow accounting, and replace with it two calls to 6 argument
io_cqring_event_overflow().
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.c | 15 +++++++++++++--
io_uring/io_uring.h | 12 ++----------
2 files changed, 15 insertions(+), 12 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index a3b1339335c5..263d7e4f1b41 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -640,8 +640,8 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
}
}
-bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
- u32 cflags, u64 extra1, u64 extra2)
+static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
+ s32 res, u32 cflags, u64 extra1, u64 extra2)
{
struct io_overflow_cqe *ocqe;
size_t ocq_size = sizeof(struct io_overflow_cqe);
@@ -678,6 +678,17 @@ bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
return true;
}
+bool io_req_cqe_overflow(struct io_kiocb *req)
+{
+ if (!(req->flags & REQ_F_CQE32_INIT)) {
+ req->extra1 = 0;
+ req->extra2 = 0;
+ }
+ return io_cqring_event_overflow(req->ctx, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags,
+ req->extra1, req->extra2);
+}
+
/*
* writes to the cq entry need to come after reading head; the
* control dependency is enough as we're using WRITE_ONCE to
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 51032a494aec..668fff18d3cc 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -17,8 +17,7 @@ enum {
};
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
-bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
- u32 cflags, u64 extra1, u64 extra2);
+bool io_req_cqe_overflow(struct io_kiocb *req);
static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
{
@@ -58,10 +57,6 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
memcpy(cqe, &req->cqe, sizeof(*cqe));
return true;
}
-
- return io_cqring_event_overflow(ctx, req->cqe.user_data,
- req->cqe.res, req->cqe.flags,
- 0, 0);
} else {
u64 extra1 = 0, extra2 = 0;
@@ -85,11 +80,8 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
WRITE_ONCE(cqe->big_cqe[1], extra2);
return true;
}
-
- return io_cqring_event_overflow(ctx, req->cqe.user_data,
- req->cqe.res, req->cqe.flags,
- extra1, extra2);
}
+ return io_req_cqe_overflow(req);
}
static inline void req_set_fail(struct io_kiocb *req)
--
2.36.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH for-next 4/6] io_uring: deduplicate __io_fill_cqe_req tracing
2022-06-17 8:47 [PATCH for-next 0/6] clean up __io_fill_cqe_req() Pavel Begunkov
` (2 preceding siblings ...)
2022-06-17 8:48 ` [PATCH for-next 3/6] io_uring: introduce io_req_cqe_overflow() Pavel Begunkov
@ 2022-06-17 8:48 ` Pavel Begunkov
2022-06-17 8:48 ` [PATCH for-next 5/6] io_uring: deduplicate io_get_cqe() calls Pavel Begunkov
` (2 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2022-06-17 8:48 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
Deduplicate two trace_io_uring_complete() calls in __io_fill_cqe_req().
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.h | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 668fff18d3cc..4134b206c33c 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -43,10 +43,12 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
{
struct io_uring_cqe *cqe;
- if (!(ctx->flags & IORING_SETUP_CQE32)) {
- trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
- req->cqe.res, req->cqe.flags, 0, 0);
+ trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags,
+ (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
+ (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
+ if (!(ctx->flags & IORING_SETUP_CQE32)) {
/*
* If we can't get a cq entry, userspace overflowed the
* submission (by quite a lot). Increment the overflow count in
@@ -65,9 +67,6 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
extra2 = req->extra2;
}
- trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
- req->cqe.res, req->cqe.flags, extra1, extra2);
-
/*
* If we can't get a cq entry, userspace overflowed the
* submission (by quite a lot). Increment the overflow count in
--
2.36.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH for-next 5/6] io_uring: deduplicate io_get_cqe() calls
2022-06-17 8:47 [PATCH for-next 0/6] clean up __io_fill_cqe_req() Pavel Begunkov
` (3 preceding siblings ...)
2022-06-17 8:48 ` [PATCH for-next 4/6] io_uring: deduplicate __io_fill_cqe_req tracing Pavel Begunkov
@ 2022-06-17 8:48 ` Pavel Begunkov
2022-06-17 8:48 ` [PATCH for-next 6/6] io_uring: change ->cqe_cached invariant for CQE32 Pavel Begunkov
2022-06-17 13:35 ` [PATCH for-next 0/6] clean up __io_fill_cqe_req() Jens Axboe
6 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2022-06-17 8:48 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
Deduplicate calls to io_get_cqe() from __io_fill_cqe_req().
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.h | 38 +++++++++++++-------------------------
1 file changed, 13 insertions(+), 25 deletions(-)
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 4134b206c33c..cd29d91c2175 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -47,19 +47,17 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
req->cqe.res, req->cqe.flags,
(req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
(req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
+ /*
+ * If we can't get a cq entry, userspace overflowed the
+ * submission (by quite a lot). Increment the overflow count in
+ * the ring.
+ */
+ cqe = io_get_cqe(ctx);
+ if (unlikely(!cqe))
+ return io_req_cqe_overflow(req);
+ memcpy(cqe, &req->cqe, sizeof(*cqe));
- if (!(ctx->flags & IORING_SETUP_CQE32)) {
- /*
- * If we can't get a cq entry, userspace overflowed the
- * submission (by quite a lot). Increment the overflow count in
- * the ring.
- */
- cqe = io_get_cqe(ctx);
- if (likely(cqe)) {
- memcpy(cqe, &req->cqe, sizeof(*cqe));
- return true;
- }
- } else {
+ if (ctx->flags & IORING_SETUP_CQE32) {
u64 extra1 = 0, extra2 = 0;
if (req->flags & REQ_F_CQE32_INIT) {
@@ -67,20 +65,10 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
extra2 = req->extra2;
}
- /*
- * If we can't get a cq entry, userspace overflowed the
- * submission (by quite a lot). Increment the overflow count in
- * the ring.
- */
- cqe = io_get_cqe(ctx);
- if (likely(cqe)) {
- memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
- WRITE_ONCE(cqe->big_cqe[0], extra1);
- WRITE_ONCE(cqe->big_cqe[1], extra2);
- return true;
- }
+ WRITE_ONCE(cqe->big_cqe[0], extra1);
+ WRITE_ONCE(cqe->big_cqe[1], extra2);
}
- return io_req_cqe_overflow(req);
+ return true;
}
static inline void req_set_fail(struct io_kiocb *req)
--
2.36.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH for-next 6/6] io_uring: change ->cqe_cached invariant for CQE32
2022-06-17 8:47 [PATCH for-next 0/6] clean up __io_fill_cqe_req() Pavel Begunkov
` (4 preceding siblings ...)
2022-06-17 8:48 ` [PATCH for-next 5/6] io_uring: deduplicate io_get_cqe() calls Pavel Begunkov
@ 2022-06-17 8:48 ` Pavel Begunkov
2022-06-17 13:35 ` [PATCH for-next 0/6] clean up __io_fill_cqe_req() Jens Axboe
6 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2022-06-17 8:48 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
With IORING_SETUP_CQE32 ->cqe_cached doesn't store a real address but
rather an implicit offset into cqes. Store the real cqe pointer and
increment it accordingly if CQE32.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.c | 15 ++++++++++-----
io_uring/io_uring.h | 8 ++------
2 files changed, 12 insertions(+), 11 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 263d7e4f1b41..11b4b5040020 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -698,11 +698,8 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
- unsigned int shift = 0;
unsigned int free, queued, len;
- if (ctx->flags & IORING_SETUP_CQE32)
- shift = 1;
/* userspace may cheat modifying the tail, be safe and do min */
queued = min(__io_cqring_events(ctx), ctx->cq_entries);
@@ -712,11 +709,19 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
if (!len)
return NULL;
- ctx->cached_cq_tail++;
+ if (ctx->flags & IORING_SETUP_CQE32) {
+ off <<= 1;
+ len <<= 1;
+ }
+
ctx->cqe_cached = &rings->cqes[off];
ctx->cqe_sentinel = ctx->cqe_cached + len;
+
+ ctx->cached_cq_tail++;
ctx->cqe_cached++;
- return &rings->cqes[off << shift];
+ if (ctx->flags & IORING_SETUP_CQE32)
+ ctx->cqe_cached++;
+ return &rings->cqes[off];
}
static bool io_fill_cqe_aux(struct io_ring_ctx *ctx,
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index cd29d91c2175..f1b3e765495b 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -24,14 +24,10 @@ static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
struct io_uring_cqe *cqe = ctx->cqe_cached;
- if (ctx->flags & IORING_SETUP_CQE32) {
- unsigned int off = ctx->cqe_cached - ctx->rings->cqes;
-
- cqe += off;
- }
-
ctx->cached_cq_tail++;
ctx->cqe_cached++;
+ if (ctx->flags & IORING_SETUP_CQE32)
+ ctx->cqe_cached++;
return cqe;
}
--
2.36.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH for-next 0/6] clean up __io_fill_cqe_req()
2022-06-17 8:47 [PATCH for-next 0/6] clean up __io_fill_cqe_req() Pavel Begunkov
` (5 preceding siblings ...)
2022-06-17 8:48 ` [PATCH for-next 6/6] io_uring: change ->cqe_cached invariant for CQE32 Pavel Begunkov
@ 2022-06-17 13:35 ` Jens Axboe
6 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2022-06-17 13:35 UTC (permalink / raw)
To: io-uring, asml.silence
On Fri, 17 Jun 2022 09:47:59 +0100, Pavel Begunkov wrote:
> Clean up __io_fill_cqe_req() after recent changes
>
> Pavel Begunkov (6):
> io_uring: don't expose io_fill_cqe_aux()
> io_uring: don't inline __io_get_cqe()
> io_uring: introduce io_req_cqe_overflow()
> io_uring: deduplicate __io_fill_cqe_req tracing
> io_uring: deduplicate io_get_cqe() calls
> io_uring: change ->cqe_cached invariant for CQE32
>
> [...]
Applied, thanks!
[1/6] io_uring: don't expose io_fill_cqe_aux()
(no commit info)
[2/6] io_uring: don't inline __io_get_cqe()
(no commit info)
[3/6] io_uring: introduce io_req_cqe_overflow()
(no commit info)
[4/6] io_uring: deduplicate __io_fill_cqe_req tracing
(no commit info)
[5/6] io_uring: deduplicate io_get_cqe() calls
(no commit info)
[6/6] io_uring: change ->cqe_cached invariant for CQE32
(no commit info)
Best regards,
--
Jens Axboe
^ permalink raw reply [flat|nested] 8+ messages in thread