From: Pavel Begunkov <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>, [email protected]
Subject: [PATCH for-next 6/7] io_uring: introduce locking helpers for CQE posting
Date: Sun, 19 Jun 2022 12:26:09 +0100 [thread overview]
Message-ID: <693e461561af1ce9ccacfee9c28ff0c54e31e84f.1655637157.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>
spin_lock(&ctx->completion_lock);
/* post CQEs */
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
We have many places repeating this sequence, and the three function
unlock section is not perfect from the maintainance perspective and also
makes harder to add new locking/sync trick.
Introduce to helpers. io_cq_lock(), which is simple and only grabs
->completion_lock, and io_cq_unlock_post() encapsulating the three call
section.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.c | 57 +++++++++++++++++++++------------------------
io_uring/io_uring.h | 9 ++++++-
io_uring/timeout.c | 6 ++---
3 files changed, 36 insertions(+), 36 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 57aef092ef38..cff046b0734b 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -527,7 +527,7 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
io_eventfd_signal(ctx);
}
-void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
ctx->has_evfd))
@@ -536,6 +536,19 @@ void io_cqring_ev_posted(struct io_ring_ctx *ctx)
io_cqring_wake(ctx);
}
+static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
+ __releases(ctx->completion_lock)
+{
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
+}
+
+void io_cq_unlock_post(struct io_ring_ctx *ctx)
+{
+ __io_cq_unlock_post(ctx);
+}
+
/* Returns true if there are no backlogged entries after the flush */
static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
@@ -548,7 +561,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
if (ctx->flags & IORING_SETUP_CQE32)
cqe_size <<= 1;
- spin_lock(&ctx->completion_lock);
+ io_cq_lock(ctx);
while (!list_empty(&ctx->cq_overflow_list)) {
struct io_uring_cqe *cqe = io_get_cqe(ctx);
struct io_overflow_cqe *ocqe;
@@ -572,9 +585,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
}
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ io_cq_unlock_post(ctx);
return all_flushed;
}
@@ -760,11 +771,9 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx,
{
bool filled;
- spin_lock(&ctx->completion_lock);
+ io_cq_lock(ctx);
filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ io_cq_unlock_post(ctx);
return filled;
}
@@ -810,11 +819,9 @@ void io_req_complete_post(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- spin_lock(&ctx->completion_lock);
+ io_cq_lock(ctx);
__io_req_complete_post(req);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ io_cq_unlock_post(ctx);
}
inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
@@ -946,11 +953,9 @@ static void __io_req_find_next_prep(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- spin_lock(&ctx->completion_lock);
+ io_cq_lock(ctx);
io_disarm_next(req);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ io_cq_unlock_post(ctx);
}
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
@@ -984,13 +989,6 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
percpu_ref_put(&ctx->refs);
}
-static inline void ctx_commit_and_unlock(struct io_ring_ctx *ctx)
-{
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
-}
-
static void handle_prev_tw_list(struct io_wq_work_node *node,
struct io_ring_ctx **ctx, bool *uring_locked)
{
@@ -1006,7 +1004,7 @@ static void handle_prev_tw_list(struct io_wq_work_node *node,
if (req->ctx != *ctx) {
if (unlikely(!*uring_locked && *ctx))
- ctx_commit_and_unlock(*ctx);
+ io_cq_unlock_post(*ctx);
ctx_flush_and_put(*ctx, uring_locked);
*ctx = req->ctx;
@@ -1014,7 +1012,7 @@ static void handle_prev_tw_list(struct io_wq_work_node *node,
*uring_locked = mutex_trylock(&(*ctx)->uring_lock);
percpu_ref_get(&(*ctx)->refs);
if (unlikely(!*uring_locked))
- spin_lock(&(*ctx)->completion_lock);
+ io_cq_lock(*ctx);
}
if (likely(*uring_locked)) {
req->io_task_work.func(req, uring_locked);
@@ -1026,7 +1024,7 @@ static void handle_prev_tw_list(struct io_wq_work_node *node,
} while (node);
if (unlikely(!*uring_locked))
- ctx_commit_and_unlock(*ctx);
+ io_cq_unlock_post(*ctx);
}
static void handle_tw_list(struct io_wq_work_node *node,
@@ -1261,10 +1259,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
if (!(req->flags & REQ_F_CQE_SKIP))
__io_fill_cqe_req(ctx, req);
}
-
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ __io_cq_unlock_post(ctx);
io_free_batch_list(ctx, state->compl_reqs.first);
INIT_WQ_LIST(&state->compl_reqs);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 7feef8c36db7..bb8367908472 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -24,7 +24,6 @@ void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
void io_req_complete_post(struct io_kiocb *req);
void __io_req_complete_post(struct io_kiocb *req);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
-void io_cqring_ev_posted(struct io_ring_ctx *ctx);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
@@ -66,6 +65,14 @@ bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
#define io_for_each_link(pos, head) \
for (pos = (head); pos; pos = pos->link)
+static inline void io_cq_lock(struct io_ring_ctx *ctx)
+ __acquires(ctx->completion_lock)
+{
+ spin_lock(&ctx->completion_lock);
+}
+
+void io_cq_unlock_post(struct io_ring_ctx *ctx);
+
static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
{
if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index 4938c1cdcbcd..3c331b723332 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -615,7 +615,7 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
struct io_timeout *timeout, *tmp;
int canceled = 0;
- spin_lock(&ctx->completion_lock);
+ io_cq_lock(ctx);
spin_lock_irq(&ctx->timeout_lock);
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
@@ -626,8 +626,6 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
}
}
spin_unlock_irq(&ctx->timeout_lock);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ io_cq_unlock_post(ctx);
return canceled != 0;
}
--
2.36.1
next prev parent reply other threads:[~2022-06-19 11:26 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-06-19 11:26 [PATCH for-next 0/7] cqe posting cleanups Pavel Begunkov
2022-06-19 11:26 ` [PATCH for-next 1/7] io_uring: remove extra io_commit_cqring() Pavel Begunkov
2022-06-19 11:26 ` [PATCH for-next 2/7] io_uring: reshuffle io_uring/io_uring.h Pavel Begunkov
2022-06-19 11:26 ` [PATCH for-next 3/7] io_uring: move io_eventfd_signal() Pavel Begunkov
2022-06-19 11:26 ` [PATCH for-next 4/7] io_uring: hide eventfd assumptions in evenfd paths Pavel Begunkov
2022-06-19 18:18 ` Jens Axboe
2022-06-19 18:49 ` Pavel Begunkov
2022-06-19 18:58 ` Jens Axboe
2022-06-19 11:26 ` [PATCH for-next 5/7] io_uring: remove ->flush_cqes optimisation Pavel Begunkov
2022-06-19 13:31 ` Jens Axboe
2022-06-19 14:52 ` Pavel Begunkov
2022-06-19 15:52 ` Jens Axboe
2022-06-19 16:15 ` Pavel Begunkov
2022-06-19 16:17 ` Jens Axboe
2022-06-19 16:19 ` Pavel Begunkov
2022-06-19 16:38 ` Jens Axboe
2022-06-19 16:38 ` Jens Axboe
2022-06-19 11:26 ` Pavel Begunkov [this message]
2022-06-19 13:30 ` [PATCH for-next 6/7] io_uring: introduce locking helpers for CQE posting Jens Axboe
2022-06-19 14:20 ` Pavel Begunkov
2022-06-19 15:50 ` Jens Axboe
2022-06-19 11:26 ` [PATCH for-next 7/7] io_uring: add io_commit_cqring_flush() Pavel Begunkov
2022-06-19 12:36 ` [PATCH for-next 0/7] cqe posting cleanups Pavel Begunkov
2022-06-19 16:01 ` Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=693e461561af1ce9ccacfee9c28ff0c54e31e84f.1655637157.git.asml.silence@gmail.com \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox