* [PATCH] io_uring: kill dead code in io_req_complete_post
@ 2024-03-29 15:47 Ming Lei
2024-04-02 18:40 ` Pavel Begunkov
0 siblings, 1 reply; 3+ messages in thread
From: Ming Lei @ 2024-03-29 15:47 UTC (permalink / raw)
To: Jens Axboe, io-uring; +Cc: Ming Lei, Pavel Begunkov
Since commit 8f6c829491fe ("io_uring: remove struct io_tw_state::locked"),
io_req_complete_post() is only called from io-wq submit work, where the
request reference is guaranteed to be grabbed and won't drop to zero
in io_req_complete_post().
Kill the dead code, meantime add req_ref_put() to put the reference.
Cc: Pavel Begunkov <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
---
io_uring/io_uring.c | 37 ++-----------------------------------
io_uring/refs.h | 7 +++++++
2 files changed, 9 insertions(+), 35 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 104899522bc5..ac2e5da4558a 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -929,7 +929,6 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_rsrc_node *rsrc_node = NULL;
/*
* Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
@@ -946,42 +945,10 @@ static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
if (!io_fill_cqe_req(ctx, req))
io_req_cqe_overflow(req);
}
-
- /*
- * If we're the last reference to this request, add to our locked
- * free_list cache.
- */
- if (req_ref_put_and_test(req)) {
- if (req->flags & IO_REQ_LINK_FLAGS) {
- if (req->flags & IO_DISARM_MASK)
- io_disarm_next(req);
- if (req->link) {
- io_req_task_queue(req->link);
- req->link = NULL;
- }
- }
- io_put_kbuf_comp(req);
- if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
- io_clean_op(req);
- io_put_file(req);
-
- rsrc_node = req->rsrc_node;
- /*
- * Selected buffer deallocation in io_clean_op() assumes that
- * we don't hold ->completion_lock. Clean them here to avoid
- * deadlocks.
- */
- io_put_task_remote(req->task);
- wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
- ctx->locked_free_nr++;
- }
io_cq_unlock_post(ctx);
- if (rsrc_node) {
- io_ring_submit_lock(ctx, issue_flags);
- io_put_rsrc_node(ctx, rsrc_node);
- io_ring_submit_unlock(ctx, issue_flags);
- }
+ /* called from io-wq submit work only, the ref won't drop to zero */
+ req_ref_put(req);
}
void io_req_defer_failed(struct io_kiocb *req, s32 res)
diff --git a/io_uring/refs.h b/io_uring/refs.h
index 1336de3f2a30..63982ead9f7d 100644
--- a/io_uring/refs.h
+++ b/io_uring/refs.h
@@ -33,6 +33,13 @@ static inline void req_ref_get(struct io_kiocb *req)
atomic_inc(&req->refs);
}
+static inline void req_ref_put(struct io_kiocb *req)
+{
+ WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
+ WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+ atomic_dec(&req->refs);
+}
+
static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
{
if (!(req->flags & REQ_F_REFCOUNT)) {
--
2.41.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] io_uring: kill dead code in io_req_complete_post
2024-03-29 15:47 [PATCH] io_uring: kill dead code in io_req_complete_post Ming Lei
@ 2024-04-02 18:40 ` Pavel Begunkov
2024-04-02 19:59 ` Jens Axboe
0 siblings, 1 reply; 3+ messages in thread
From: Pavel Begunkov @ 2024-04-02 18:40 UTC (permalink / raw)
To: Ming Lei, Jens Axboe, io-uring
On 3/29/24 15:47, Ming Lei wrote:
> Since commit 8f6c829491fe ("io_uring: remove struct io_tw_state::locked"),
> io_req_complete_post() is only called from io-wq submit work, where the
> request reference is guaranteed to be grabbed and won't drop to zero
> in io_req_complete_post().
>
> Kill the dead code, meantime add req_ref_put() to put the reference.
Interesting... a nice clean up. The assumption is too implicit to
my taste, but should be just fine if we add
if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_IOWQ)))
return;
at the beginning of io_req_complete_post(), it's a slow path.
And with this change:
Reviewed-by: Pavel Begunkov <[email protected]>
> Cc: Pavel Begunkov <[email protected]>
> Signed-off-by: Ming Lei <[email protected]>
> ---
> io_uring/io_uring.c | 37 ++-----------------------------------
> io_uring/refs.h | 7 +++++++
> 2 files changed, 9 insertions(+), 35 deletions(-)
>
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index 104899522bc5..ac2e5da4558a 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -929,7 +929,6 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
> static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
> {
> struct io_ring_ctx *ctx = req->ctx;
> - struct io_rsrc_node *rsrc_node = NULL;
>
> /*
> * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
> @@ -946,42 +945,10 @@ static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
> if (!io_fill_cqe_req(ctx, req))
> io_req_cqe_overflow(req);
> }
> -
> - /*
> - * If we're the last reference to this request, add to our locked
> - * free_list cache.
> - */
> - if (req_ref_put_and_test(req)) {
> - if (req->flags & IO_REQ_LINK_FLAGS) {
> - if (req->flags & IO_DISARM_MASK)
> - io_disarm_next(req);
> - if (req->link) {
> - io_req_task_queue(req->link);
> - req->link = NULL;
> - }
> - }
> - io_put_kbuf_comp(req);
> - if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
> - io_clean_op(req);
> - io_put_file(req);
> -
> - rsrc_node = req->rsrc_node;
> - /*
> - * Selected buffer deallocation in io_clean_op() assumes that
> - * we don't hold ->completion_lock. Clean them here to avoid
> - * deadlocks.
> - */
> - io_put_task_remote(req->task);
> - wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
> - ctx->locked_free_nr++;
> - }
> io_cq_unlock_post(ctx);
>
> - if (rsrc_node) {
> - io_ring_submit_lock(ctx, issue_flags);
> - io_put_rsrc_node(ctx, rsrc_node);
> - io_ring_submit_unlock(ctx, issue_flags);
> - }
> + /* called from io-wq submit work only, the ref won't drop to zero */
> + req_ref_put(req);
> }
>
> void io_req_defer_failed(struct io_kiocb *req, s32 res)
> diff --git a/io_uring/refs.h b/io_uring/refs.h
> index 1336de3f2a30..63982ead9f7d 100644
> --- a/io_uring/refs.h
> +++ b/io_uring/refs.h
> @@ -33,6 +33,13 @@ static inline void req_ref_get(struct io_kiocb *req)
> atomic_inc(&req->refs);
> }
>
> +static inline void req_ref_put(struct io_kiocb *req)
> +{
> + WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
> + WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
> + atomic_dec(&req->refs);
> +}
> +
> static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
> {
> if (!(req->flags & REQ_F_REFCOUNT)) {
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] io_uring: kill dead code in io_req_complete_post
2024-04-02 18:40 ` Pavel Begunkov
@ 2024-04-02 19:59 ` Jens Axboe
0 siblings, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2024-04-02 19:59 UTC (permalink / raw)
To: Pavel Begunkov, Ming Lei, io-uring
On 4/2/24 12:40 PM, Pavel Begunkov wrote:
> On 3/29/24 15:47, Ming Lei wrote:
>> Since commit 8f6c829491fe ("io_uring: remove struct io_tw_state::locked"),
>> io_req_complete_post() is only called from io-wq submit work, where the
>> request reference is guaranteed to be grabbed and won't drop to zero
>> in io_req_complete_post().
>>
>> Kill the dead code, meantime add req_ref_put() to put the reference.
>
> Interesting... a nice clean up. The assumption is too implicit to
> my taste, but should be just fine if we add
>
> if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_IOWQ)))
> return;
And include a comment as to why that is there as well.
--
Jens Axboe
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2024-04-02 19:59 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-03-29 15:47 [PATCH] io_uring: kill dead code in io_req_complete_post Ming Lei
2024-04-02 18:40 ` Pavel Begunkov
2024-04-02 19:59 ` Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox