* [PATCH 1/5] io_uring: optimise iowq refcounting
2021-08-14 16:26 [PATCH for-next 0/5] 5.15 cleanups and optimisations Pavel Begunkov
@ 2021-08-14 16:26 ` Pavel Begunkov
2021-08-14 19:13 ` Jens Axboe
2021-08-14 16:26 ` [PATCH 2/5] io_uring: don't inflight-track linked timeouts Pavel Begunkov
` (3 subsequent siblings)
4 siblings, 1 reply; 13+ messages in thread
From: Pavel Begunkov @ 2021-08-14 16:26 UTC (permalink / raw)
To: Jens Axboe, io-uring
If a requests is forwarded into io-wq, there is a good chance it hasn't
been refcounted yet and we can save one req_ref_get() by setting the
refcount number to the right value directly.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 51c4166f68b5..0d9a443d4987 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1115,14 +1115,19 @@ static inline void req_ref_get(struct io_kiocb *req)
atomic_inc(&req->refs);
}
-static inline void io_req_refcount(struct io_kiocb *req)
+static inline void __io_req_refcount(struct io_kiocb *req, int nr)
{
if (!(req->flags & REQ_F_REFCOUNT)) {
req->flags |= REQ_F_REFCOUNT;
- atomic_set(&req->refs, 1);
+ atomic_set(&req->refs, nr);
}
}
+static inline void io_req_refcount(struct io_kiocb *req)
+{
+ __io_req_refcount(req, 1);
+}
+
static inline void io_req_set_rsrc_node(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -6311,9 +6316,11 @@ static void io_wq_submit_work(struct io_wq_work *work)
struct io_kiocb *timeout;
int ret = 0;
- io_req_refcount(req);
- /* will be dropped by ->io_free_work() after returning to io-wq */
- req_ref_get(req);
+ /* one will be dropped by ->io_free_work() after returning to io-wq */
+ if (!(req->flags & REQ_F_REFCOUNT))
+ __io_req_refcount(req, 2);
+ else
+ req_ref_get(req);
timeout = io_prep_linked_timeout(req);
if (timeout)
--
2.32.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH 1/5] io_uring: optimise iowq refcounting
2021-08-14 16:26 ` [PATCH 1/5] io_uring: optimise iowq refcounting Pavel Begunkov
@ 2021-08-14 19:13 ` Jens Axboe
2021-08-14 19:31 ` Pavel Begunkov
0 siblings, 1 reply; 13+ messages in thread
From: Jens Axboe @ 2021-08-14 19:13 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 8/14/21 10:26 AM, Pavel Begunkov wrote:
> If a requests is forwarded into io-wq, there is a good chance it hasn't
> been refcounted yet and we can save one req_ref_get() by setting the
> refcount number to the right value directly.
Not sure this really matters, but can't hurt either. But...
> @@ -1115,14 +1115,19 @@ static inline void req_ref_get(struct io_kiocb *req)
> atomic_inc(&req->refs);
> }
>
> -static inline void io_req_refcount(struct io_kiocb *req)
> +static inline void __io_req_refcount(struct io_kiocb *req, int nr)
> {
> if (!(req->flags & REQ_F_REFCOUNT)) {
> req->flags |= REQ_F_REFCOUNT;
> - atomic_set(&req->refs, 1);
> + atomic_set(&req->refs, nr);
> }
> }
>
> +static inline void io_req_refcount(struct io_kiocb *req)
> +{
> + __io_req_refcount(req, 1);
> +}
> +
I really think these should be io_req_set_refcount() or something like
that, making it clear that we're actively setting/manipulating the ref
count.
--
Jens Axboe
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/5] io_uring: optimise iowq refcounting
2021-08-14 19:13 ` Jens Axboe
@ 2021-08-14 19:31 ` Pavel Begunkov
2021-08-14 19:36 ` Pavel Begunkov
2021-08-14 19:37 ` Jens Axboe
0 siblings, 2 replies; 13+ messages in thread
From: Pavel Begunkov @ 2021-08-14 19:31 UTC (permalink / raw)
To: Jens Axboe, io-uring
On 8/14/21 8:13 PM, Jens Axboe wrote:
> On 8/14/21 10:26 AM, Pavel Begunkov wrote:
>> If a requests is forwarded into io-wq, there is a good chance it hasn't
>> been refcounted yet and we can save one req_ref_get() by setting the
>> refcount number to the right value directly.
>
> Not sure this really matters, but can't hurt either. But...
The refcount patches made this one atomic worse, and I just prefer
to not regress, even if slightly
>> @@ -1115,14 +1115,19 @@ static inline void req_ref_get(struct io_kiocb *req)
>> atomic_inc(&req->refs);
>> }
>>
>> -static inline void io_req_refcount(struct io_kiocb *req)
>> +static inline void __io_req_refcount(struct io_kiocb *req, int nr)
>> {
>> if (!(req->flags & REQ_F_REFCOUNT)) {
>> req->flags |= REQ_F_REFCOUNT;
>> - atomic_set(&req->refs, 1);
>> + atomic_set(&req->refs, nr);
>> }
>> }
>>
>> +static inline void io_req_refcount(struct io_kiocb *req)
>> +{
>> + __io_req_refcount(req, 1);
>> +}
>> +
>
> I really think these should be io_req_set_refcount() or something like
> that, making it clear that we're actively setting/manipulating the ref
> count.
Agree. A separate patch, maybe?
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/5] io_uring: optimise iowq refcounting
2021-08-14 19:31 ` Pavel Begunkov
@ 2021-08-14 19:36 ` Pavel Begunkov
2021-08-14 19:38 ` Jens Axboe
2021-08-14 19:37 ` Jens Axboe
1 sibling, 1 reply; 13+ messages in thread
From: Pavel Begunkov @ 2021-08-14 19:36 UTC (permalink / raw)
To: Jens Axboe, io-uring
On 8/14/21 8:31 PM, Pavel Begunkov wrote:
> On 8/14/21 8:13 PM, Jens Axboe wrote:
>> On 8/14/21 10:26 AM, Pavel Begunkov wrote:
>>> If a requests is forwarded into io-wq, there is a good chance it hasn't
>>> been refcounted yet and we can save one req_ref_get() by setting the
>>> refcount number to the right value directly.
>>
>> Not sure this really matters, but can't hurt either. But...
>
> The refcount patches made this one atomic worse, and I just prefer
> to not regress, even if slightly
>
>>> @@ -1115,14 +1115,19 @@ static inline void req_ref_get(struct io_kiocb *req)
>>> atomic_inc(&req->refs);
>>> }
>>>
>>> -static inline void io_req_refcount(struct io_kiocb *req)
>>> +static inline void __io_req_refcount(struct io_kiocb *req, int nr)
>>> {
>>> if (!(req->flags & REQ_F_REFCOUNT)) {
>>> req->flags |= REQ_F_REFCOUNT;
>>> - atomic_set(&req->refs, 1);
>>> + atomic_set(&req->refs, nr);
>>> }
>>> }
>>>
>>> +static inline void io_req_refcount(struct io_kiocb *req)
>>> +{
>>> + __io_req_refcount(req, 1);
>>> +}
>>> +
>>
>> I really think these should be io_req_set_refcount() or something like
>> that, making it clear that we're actively setting/manipulating the ref
>> count.
>
> Agree. A separate patch, maybe?
I mean it just would be a bit easier for me, instead of rebasing
this series and not yet sent patches.
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/5] io_uring: optimise iowq refcounting
2021-08-14 19:36 ` Pavel Begunkov
@ 2021-08-14 19:38 ` Jens Axboe
2021-08-15 9:41 ` Pavel Begunkov
0 siblings, 1 reply; 13+ messages in thread
From: Jens Axboe @ 2021-08-14 19:38 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 8/14/21 1:36 PM, Pavel Begunkov wrote:
> On 8/14/21 8:31 PM, Pavel Begunkov wrote:
>> On 8/14/21 8:13 PM, Jens Axboe wrote:
>>> On 8/14/21 10:26 AM, Pavel Begunkov wrote:
>>>> If a requests is forwarded into io-wq, there is a good chance it hasn't
>>>> been refcounted yet and we can save one req_ref_get() by setting the
>>>> refcount number to the right value directly.
>>>
>>> Not sure this really matters, but can't hurt either. But...
>>
>> The refcount patches made this one atomic worse, and I just prefer
>> to not regress, even if slightly
>>
>>>> @@ -1115,14 +1115,19 @@ static inline void req_ref_get(struct io_kiocb *req)
>>>> atomic_inc(&req->refs);
>>>> }
>>>>
>>>> -static inline void io_req_refcount(struct io_kiocb *req)
>>>> +static inline void __io_req_refcount(struct io_kiocb *req, int nr)
>>>> {
>>>> if (!(req->flags & REQ_F_REFCOUNT)) {
>>>> req->flags |= REQ_F_REFCOUNT;
>>>> - atomic_set(&req->refs, 1);
>>>> + atomic_set(&req->refs, nr);
>>>> }
>>>> }
>>>>
>>>> +static inline void io_req_refcount(struct io_kiocb *req)
>>>> +{
>>>> + __io_req_refcount(req, 1);
>>>> +}
>>>> +
>>>
>>> I really think these should be io_req_set_refcount() or something like
>>> that, making it clear that we're actively setting/manipulating the ref
>>> count.
>>
>> Agree. A separate patch, maybe?
>
> I mean it just would be a bit easier for me, instead of rebasing
> this series and not yet sent patches.
I think it should come before this series at least, or be folded into the
first patch. So probably no way around the rebase, sorry...
--
Jens Axboe
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/5] io_uring: optimise iowq refcounting
2021-08-14 19:38 ` Jens Axboe
@ 2021-08-15 9:41 ` Pavel Begunkov
2021-08-15 15:01 ` Jens Axboe
0 siblings, 1 reply; 13+ messages in thread
From: Pavel Begunkov @ 2021-08-15 9:41 UTC (permalink / raw)
To: Jens Axboe, io-uring
On 8/14/21 8:38 PM, Jens Axboe wrote:
> On 8/14/21 1:36 PM, Pavel Begunkov wrote:
>> On 8/14/21 8:31 PM, Pavel Begunkov wrote:
>>> On 8/14/21 8:13 PM, Jens Axboe wrote:
>>>> On 8/14/21 10:26 AM, Pavel Begunkov wrote:
>>>>> If a requests is forwarded into io-wq, there is a good chance it hasn't
>>>>> been refcounted yet and we can save one req_ref_get() by setting the
>>>>> refcount number to the right value directly.
>>>>
>>>> Not sure this really matters, but can't hurt either. But...
>>>
>>> The refcount patches made this one atomic worse, and I just prefer
>>> to not regress, even if slightly
>>>
>>>>> @@ -1115,14 +1115,19 @@ static inline void req_ref_get(struct io_kiocb *req)
>>>>> atomic_inc(&req->refs);
>>>>> }
>>>>>
>>>>> -static inline void io_req_refcount(struct io_kiocb *req)
>>>>> +static inline void __io_req_refcount(struct io_kiocb *req, int nr)
>>>>> {
>>>>> if (!(req->flags & REQ_F_REFCOUNT)) {
>>>>> req->flags |= REQ_F_REFCOUNT;
>>>>> - atomic_set(&req->refs, 1);
>>>>> + atomic_set(&req->refs, nr);
>>>>> }
>>>>> }
>>>>>
>>>>> +static inline void io_req_refcount(struct io_kiocb *req)
>>>>> +{
>>>>> + __io_req_refcount(req, 1);
>>>>> +}
>>>>> +
>>>>
>>>> I really think these should be io_req_set_refcount() or something like
>>>> that, making it clear that we're actively setting/manipulating the ref
>>>> count.
>>>
>>> Agree. A separate patch, maybe?
>>
>> I mean it just would be a bit easier for me, instead of rebasing
>> this series and not yet sent patches.
>
> I think it should come before this series at least, or be folded into the
> first patch. So probably no way around the rebase, sorry...
Don't see the point, but anyway, just resent it
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/5] io_uring: optimise iowq refcounting
2021-08-15 9:41 ` Pavel Begunkov
@ 2021-08-15 15:01 ` Jens Axboe
0 siblings, 0 replies; 13+ messages in thread
From: Jens Axboe @ 2021-08-15 15:01 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 8/15/21 3:41 AM, Pavel Begunkov wrote:
> On 8/14/21 8:38 PM, Jens Axboe wrote:
>> On 8/14/21 1:36 PM, Pavel Begunkov wrote:
>>> On 8/14/21 8:31 PM, Pavel Begunkov wrote:
>>>> On 8/14/21 8:13 PM, Jens Axboe wrote:
>>>>> On 8/14/21 10:26 AM, Pavel Begunkov wrote:
>>>>>> If a requests is forwarded into io-wq, there is a good chance it hasn't
>>>>>> been refcounted yet and we can save one req_ref_get() by setting the
>>>>>> refcount number to the right value directly.
>>>>>
>>>>> Not sure this really matters, but can't hurt either. But...
>>>>
>>>> The refcount patches made this one atomic worse, and I just prefer
>>>> to not regress, even if slightly
>>>>
>>>>>> @@ -1115,14 +1115,19 @@ static inline void req_ref_get(struct io_kiocb *req)
>>>>>> atomic_inc(&req->refs);
>>>>>> }
>>>>>>
>>>>>> -static inline void io_req_refcount(struct io_kiocb *req)
>>>>>> +static inline void __io_req_refcount(struct io_kiocb *req, int nr)
>>>>>> {
>>>>>> if (!(req->flags & REQ_F_REFCOUNT)) {
>>>>>> req->flags |= REQ_F_REFCOUNT;
>>>>>> - atomic_set(&req->refs, 1);
>>>>>> + atomic_set(&req->refs, nr);
>>>>>> }
>>>>>> }
>>>>>>
>>>>>> +static inline void io_req_refcount(struct io_kiocb *req)
>>>>>> +{
>>>>>> + __io_req_refcount(req, 1);
>>>>>> +}
>>>>>> +
>>>>>
>>>>> I really think these should be io_req_set_refcount() or something like
>>>>> that, making it clear that we're actively setting/manipulating the ref
>>>>> count.
>>>>
>>>> Agree. A separate patch, maybe?
>>>
>>> I mean it just would be a bit easier for me, instead of rebasing
>>> this series and not yet sent patches.
>>
>> I think it should come before this series at least, or be folded into the
>> first patch. So probably no way around the rebase, sorry...
>
> Don't see the point, but anyway, just resent it
That's the usual approach, first a prep patch to clean it up, then change
on top. The opposite might be easier since the other patches already exist,
but it's backwards in terms of ordering imho.
--
Jens Axboe
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/5] io_uring: optimise iowq refcounting
2021-08-14 19:31 ` Pavel Begunkov
2021-08-14 19:36 ` Pavel Begunkov
@ 2021-08-14 19:37 ` Jens Axboe
1 sibling, 0 replies; 13+ messages in thread
From: Jens Axboe @ 2021-08-14 19:37 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 8/14/21 1:31 PM, Pavel Begunkov wrote:
> On 8/14/21 8:13 PM, Jens Axboe wrote:
>> On 8/14/21 10:26 AM, Pavel Begunkov wrote:
>>> If a requests is forwarded into io-wq, there is a good chance it hasn't
>>> been refcounted yet and we can save one req_ref_get() by setting the
>>> refcount number to the right value directly.
>>
>> Not sure this really matters, but can't hurt either. But...
>
> The refcount patches made this one atomic worse, and I just prefer
> to not regress, even if slightly
Not really against it, but doubt it's measurable if you end up hitting
the io-wq slower path anyway. But as I said, can't really hurt, so not
aginst it.
>>> @@ -1115,14 +1115,19 @@ static inline void req_ref_get(struct io_kiocb *req)
>>> atomic_inc(&req->refs);
>>> }
>>>
>>> -static inline void io_req_refcount(struct io_kiocb *req)
>>> +static inline void __io_req_refcount(struct io_kiocb *req, int nr)
>>> {
>>> if (!(req->flags & REQ_F_REFCOUNT)) {
>>> req->flags |= REQ_F_REFCOUNT;
>>> - atomic_set(&req->refs, 1);
>>> + atomic_set(&req->refs, nr);
>>> }
>>> }
>>>
>>> +static inline void io_req_refcount(struct io_kiocb *req)
>>> +{
>>> + __io_req_refcount(req, 1);
>>> +}
>>> +
>>
>> I really think these should be io_req_set_refcount() or something like
>> that, making it clear that we're actively setting/manipulating the ref
>> count.
>
> Agree. A separate patch, maybe?
Maybe just fold it into this one, as it's splitting out a helper anyway.
Or do it as a prep patch before this one, up to you.
--
Jens Axboe
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 2/5] io_uring: don't inflight-track linked timeouts
2021-08-14 16:26 [PATCH for-next 0/5] 5.15 cleanups and optimisations Pavel Begunkov
2021-08-14 16:26 ` [PATCH 1/5] io_uring: optimise iowq refcounting Pavel Begunkov
@ 2021-08-14 16:26 ` Pavel Begunkov
2021-08-14 16:26 ` [PATCH 3/5] io_uring: optimise initial ltimeout refcounting Pavel Begunkov
` (2 subsequent siblings)
4 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2021-08-14 16:26 UTC (permalink / raw)
To: Jens Axboe, io-uring
Tracking linked timeouts as infligh was needed to make sure that io-wq
is not destroyed by io_uring_cancel_generic() racing with
io_async_cancel_one() accessing it. Now, cancellations issued by linked
timeouts are done in the task context, so it's already synchronised.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 2 --
1 file changed, 2 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0d9a443d4987..d572a831cf85 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5699,8 +5699,6 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
data->mode = io_translate_timeout_mode(flags);
hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
- if (is_timeout_link)
- io_req_track_inflight(req);
return 0;
}
--
2.32.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 3/5] io_uring: optimise initial ltimeout refcounting
2021-08-14 16:26 [PATCH for-next 0/5] 5.15 cleanups and optimisations Pavel Begunkov
2021-08-14 16:26 ` [PATCH 1/5] io_uring: optimise iowq refcounting Pavel Begunkov
2021-08-14 16:26 ` [PATCH 2/5] io_uring: don't inflight-track linked timeouts Pavel Begunkov
@ 2021-08-14 16:26 ` Pavel Begunkov
2021-08-14 16:26 ` [PATCH 4/5] io_uring: kill not necessary resubmit switch Pavel Begunkov
2021-08-14 16:26 ` [PATCH 5/5] io_uring: deduplicate cancellations Pavel Begunkov
4 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2021-08-14 16:26 UTC (permalink / raw)
To: Jens Axboe, io-uring
Linked timeouts are never refcounted when it comes to the first call to
__io_prep_linked_timeout(), so save an io_ref_get() and set the desired
value directly.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d572a831cf85..37b5516b63c8 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1310,8 +1310,7 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
/* linked timeouts should have two refs once prep'ed */
io_req_refcount(req);
- io_req_refcount(nxt);
- req_ref_get(nxt);
+ __io_req_refcount(nxt, 2);
nxt->timeout.head = req;
nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
--
2.32.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 4/5] io_uring: kill not necessary resubmit switch
2021-08-14 16:26 [PATCH for-next 0/5] 5.15 cleanups and optimisations Pavel Begunkov
` (2 preceding siblings ...)
2021-08-14 16:26 ` [PATCH 3/5] io_uring: optimise initial ltimeout refcounting Pavel Begunkov
@ 2021-08-14 16:26 ` Pavel Begunkov
2021-08-14 16:26 ` [PATCH 5/5] io_uring: deduplicate cancellations Pavel Begunkov
4 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2021-08-14 16:26 UTC (permalink / raw)
To: Jens Axboe, io-uring
773af69121ecc ("io_uring: always reissue from task_work context") makes
all resubmission to be made from task_work, so we don't need that hack
with resubmit/not-resubmit switch anymore.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 37b5516b63c8..c16d172ca37f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2291,7 +2291,7 @@ static inline bool io_run_task_work(void)
* Find and free completed poll iocbs
*/
static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
- struct list_head *done, bool resubmit)
+ struct list_head *done)
{
struct req_batch rb;
struct io_kiocb *req;
@@ -2306,7 +2306,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry);
- if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
+ if (READ_ONCE(req->result) == -EAGAIN &&
!(req->flags & REQ_F_DONT_REISSUE)) {
req->iopoll_completed = 0;
io_req_task_queue_reissue(req);
@@ -2329,7 +2329,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
}
static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
- long min, bool resubmit)
+ long min)
{
struct io_kiocb *req, *tmp;
LIST_HEAD(done);
@@ -2369,7 +2369,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
}
if (!list_empty(&done))
- io_iopoll_complete(ctx, nr_events, &done, resubmit);
+ io_iopoll_complete(ctx, nr_events, &done);
return 0;
}
@@ -2387,7 +2387,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
while (!list_empty(&ctx->iopoll_list)) {
unsigned int nr_events = 0;
- io_do_iopoll(ctx, &nr_events, 0, false);
+ io_do_iopoll(ctx, &nr_events, 0);
/* let it sleep and repeat later if can't complete a request */
if (nr_events == 0)
@@ -2449,7 +2449,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
list_empty(&ctx->iopoll_list))
break;
}
- ret = io_do_iopoll(ctx, &nr_events, min, true);
+ ret = io_do_iopoll(ctx, &nr_events, min);
} while (!ret && nr_events < min && !need_resched());
out:
mutex_unlock(&ctx->uring_lock);
@@ -6855,7 +6855,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
mutex_lock(&ctx->uring_lock);
if (!list_empty(&ctx->iopoll_list))
- io_do_iopoll(ctx, &nr_events, 0, true);
+ io_do_iopoll(ctx, &nr_events, 0);
/*
* Don't submit if refs are dying, good for io_uring_register(),
--
2.32.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 5/5] io_uring: deduplicate cancellations
2021-08-14 16:26 [PATCH for-next 0/5] 5.15 cleanups and optimisations Pavel Begunkov
` (3 preceding siblings ...)
2021-08-14 16:26 ` [PATCH 4/5] io_uring: kill not necessary resubmit switch Pavel Begunkov
@ 2021-08-14 16:26 ` Pavel Begunkov
4 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2021-08-14 16:26 UTC (permalink / raw)
To: Jens Axboe, io-uring
IORING_OP_ASYNC_CANCEL and IORING_OP_LINK_TIMEOUT have enough of
overlap, so extract a helper for request cancellation and use in both.
Also, removes some amount of ugliness because of success_ret.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 46 ++++++++++++++++++----------------------------
1 file changed, 18 insertions(+), 28 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c16d172ca37f..5560620968c9 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5790,32 +5790,24 @@ static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
return ret;
}
-static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
- struct io_kiocb *req, __u64 sqe_addr,
- int success_ret)
+static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
+ __acquires(&req->ctx->completion_lock)
{
+ struct io_ring_ctx *ctx = req->ctx;
int ret;
+ WARN_ON_ONCE(req->task != current);
+
ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
spin_lock(&ctx->completion_lock);
if (ret != -ENOENT)
- goto done;
+ return ret;
spin_lock_irq(&ctx->timeout_lock);
ret = io_timeout_cancel(ctx, sqe_addr);
spin_unlock_irq(&ctx->timeout_lock);
if (ret != -ENOENT)
- goto done;
- ret = io_poll_cancel(ctx, sqe_addr, false);
-done:
- if (!ret)
- ret = success_ret;
- io_cqring_fill_event(ctx, req->user_data, ret, 0);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
-
- if (ret < 0)
- req_set_fail(req);
+ return ret;
+ return io_poll_cancel(ctx, sqe_addr, false);
}
static int io_async_cancel_prep(struct io_kiocb *req,
@@ -5839,17 +5831,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
struct io_tctx_node *node;
int ret;
- /* tasks should wait for their io-wq threads, so safe w/o sync */
- ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
- spin_lock(&ctx->completion_lock);
- if (ret != -ENOENT)
- goto done;
- spin_lock_irq(&ctx->timeout_lock);
- ret = io_timeout_cancel(ctx, sqe_addr);
- spin_unlock_irq(&ctx->timeout_lock);
- if (ret != -ENOENT)
- goto done;
- ret = io_poll_cancel(ctx, sqe_addr, false);
+ ret = io_try_cancel_userdata(req, sqe_addr);
if (ret != -ENOENT)
goto done;
spin_unlock(&ctx->completion_lock);
@@ -6416,9 +6398,17 @@ static void io_req_task_link_timeout(struct io_kiocb *req)
{
struct io_kiocb *prev = req->timeout.prev;
struct io_ring_ctx *ctx = req->ctx;
+ int ret;
if (prev) {
- io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
+ ret = io_try_cancel_userdata(req, prev->user_data);
+ if (!ret)
+ ret = -ETIME;
+ io_cqring_fill_event(ctx, req->user_data, ret, 0);
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
+
io_put_req(prev);
io_put_req(req);
} else {
--
2.32.0
^ permalink raw reply related [flat|nested] 13+ messages in thread