From: Pavel Begunkov <[email protected]>
To: Hao Xu <[email protected]>, Jens Axboe <[email protected]>,
[email protected]
Subject: Re: [PATCH v2 04/24] io_uring: use slist for completion batching
Date: Tue, 28 Sep 2021 10:41:36 +0100 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
On 9/26/21 7:57 AM, Hao Xu wrote:
> 在 2021/9/25 上午4:59, Pavel Begunkov 写道:
>> Currently we collect requests for completion batching in an array.
>> Replace them with a singly linked list. It's as fast as arrays but
>> doesn't take some much space in ctx, and will be used in future patches.
>>
>> Signed-off-by: Pavel Begunkov <[email protected]>
>> ---
>> fs/io_uring.c | 52 +++++++++++++++++++++++++--------------------------
>> 1 file changed, 25 insertions(+), 27 deletions(-)
>>
>> diff --git a/fs/io_uring.c b/fs/io_uring.c
>> index 9c14e9e722ba..9a76c4f84311 100644
>> --- a/fs/io_uring.c
>> +++ b/fs/io_uring.c
>> @@ -322,8 +322,8 @@ struct io_submit_state {
>> /*
>> * Batch completion logic
>> */
>> - struct io_kiocb *compl_reqs[IO_COMPL_BATCH];
>> - unsigned int compl_nr;
>> + struct io_wq_work_list compl_reqs;
> Will it be better to rename struct io_wq_work_list to something more
> generic, io_wq_work_list is a bit confused, we are now using this
> type of linked list (stack as well) for various aim, not just to link
> iowq works.
Was thinking about it, e.g. io_slist, but had been already late --
lots of conflicts and a good chance to add a couple of extra bugs
on rebase. I think we can do it afterward (if ever considering
it troubles backporting)
>> +
>> /* inline/task_work completion list, under ->uring_lock */
>> struct list_head free_list;
>> };
>> @@ -883,6 +883,8 @@ struct io_kiocb {
>> struct io_wq_work work;
>> const struct cred *creds;
>> + struct io_wq_work_node comp_list;
>> +
>> /* store used ubuf, so we can prevent reloading */
>> struct io_mapped_ubuf *imu;
>> };
>> @@ -1169,7 +1171,7 @@ static inline void req_ref_get(struct io_kiocb *req)
>> static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
>> {
>> - if (ctx->submit_state.compl_nr)
>> + if (!wq_list_empty(&ctx->submit_state.compl_reqs))
>> __io_submit_flush_completions(ctx);
>> }
>> @@ -1326,6 +1328,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
>> INIT_LIST_HEAD(&ctx->submit_state.free_list);
>> INIT_LIST_HEAD(&ctx->locked_free_list);
>> INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
>> + INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
>> return ctx;
>> err:
>> kfree(ctx->dummy_ubuf);
>> @@ -1831,11 +1834,16 @@ static inline bool io_req_needs_clean(struct io_kiocb *req)
>> static void io_req_complete_state(struct io_kiocb *req, long res,
>> unsigned int cflags)
>> {
>> + struct io_submit_state *state;
>> +
>> if (io_req_needs_clean(req))
>> io_clean_op(req);
>> req->result = res;
>> req->compl.cflags = cflags;
>> req->flags |= REQ_F_COMPLETE_INLINE;
>> +
>> + state = &req->ctx->submit_state;
>> + wq_list_add_tail(&req->comp_list, &state->compl_reqs);
>> }
>> static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
>> @@ -2324,13 +2332,14 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
>> static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
>> __must_hold(&ctx->uring_lock)
>> {
>> + struct io_wq_work_node *node, *prev;
>> struct io_submit_state *state = &ctx->submit_state;
>> - int i, nr = state->compl_nr;
>> struct req_batch rb;
>> spin_lock(&ctx->completion_lock);
>> - for (i = 0; i < nr; i++) {
>> - struct io_kiocb *req = state->compl_reqs[i];
>> + wq_list_for_each(node, prev, &state->compl_reqs) {
>> + struct io_kiocb *req = container_of(node, struct io_kiocb,
>> + comp_list);
>> __io_cqring_fill_event(ctx, req->user_data, req->result,
>> req->compl.cflags);
>> @@ -2340,15 +2349,18 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
>> io_cqring_ev_posted(ctx);
>> io_init_req_batch(&rb);
>> - for (i = 0; i < nr; i++) {
>> - struct io_kiocb *req = state->compl_reqs[i];
>> + node = state->compl_reqs.first;
>> + do {
>> + struct io_kiocb *req = container_of(node, struct io_kiocb,
>> + comp_list);
>> + node = req->comp_list.next;
>> if (req_ref_put_and_test(req))
>> io_req_free_batch(&rb, req, &ctx->submit_state);
>> - }
>> + } while (node);
>> io_req_free_batch_finish(ctx, &rb);
>> - state->compl_nr = 0;
>> + INIT_WQ_LIST(&state->compl_reqs);
>> }
>> /*
>> @@ -2668,17 +2680,10 @@ static void io_req_task_complete(struct io_kiocb *req, bool *locked)
>> unsigned int cflags = io_put_rw_kbuf(req);
>> long res = req->result;
>> - if (*locked) {
>> - struct io_ring_ctx *ctx = req->ctx;
>> - struct io_submit_state *state = &ctx->submit_state;
>> -
>> + if (*locked)
>> io_req_complete_state(req, res, cflags);
>> - state->compl_reqs[state->compl_nr++] = req;
>> - if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
>> - io_submit_flush_completions(ctx);
>> - } else {
>> + else
>> io_req_complete_post(req, res, cflags);
>> - }
>> }
>> static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
>> @@ -6969,15 +6974,8 @@ static void __io_queue_sqe(struct io_kiocb *req)
>> * doesn't support non-blocking read/write attempts
>> */
>> if (likely(!ret)) {
>> - if (req->flags & REQ_F_COMPLETE_INLINE) {
>> - struct io_ring_ctx *ctx = req->ctx;
>> - struct io_submit_state *state = &ctx->submit_state;
>> -
>> - state->compl_reqs[state->compl_nr++] = req;
>> - if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
>> - io_submit_flush_completions(ctx);
>> + if (req->flags & REQ_F_COMPLETE_INLINE)
>> return;
>> - }
>> linked_timeout = io_prep_linked_timeout(req);
>> if (linked_timeout)
>>
>
--
Pavel Begunkov
next prev parent reply other threads:[~2021-09-28 9:42 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-24 20:59 [PATCH v2 00/24] rework and optimise submission+completion paths Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 01/24] io_uring: mark having different creds unlikely Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 02/24] io_uring: force_nonspin Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 03/24] io_uring: make io_do_iopoll return number of reqs Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 04/24] io_uring: use slist for completion batching Pavel Begunkov
2021-09-26 6:57 ` Hao Xu
2021-09-28 9:41 ` Pavel Begunkov [this message]
2021-09-28 15:32 ` Jens Axboe
2021-09-24 20:59 ` [PATCH v2 05/24] io_uring: remove allocation cache array Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 06/24] io-wq: add io_wq_work_node based stack Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 07/24] io_uring: replace list with stack for req caches Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 08/24] io_uring: split iopoll loop Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 09/24] io_uring: use single linked list for iopoll Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 10/24] io_uring: add a helper for batch free Pavel Begunkov
2021-09-26 3:36 ` Hao Xu
2021-09-28 9:33 ` Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 11/24] io_uring: convert iopoll_completed to store_release Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 12/24] io_uring: optimise batch completion Pavel Begunkov
[not found] ` <CAFUsyfLSXMvd_MBAp83qriW7LD=bg2=25TC4e_X4oMO1atoPYg@mail.gmail.com>
2021-09-28 9:35 ` Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 13/24] io_uring: inline completion batching helpers Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 14/24] io_uring: don't pass tail into io_free_batch_list Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 15/24] io_uring: don't pass state to io_submit_state_end Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 16/24] io_uring: deduplicate io_queue_sqe() call sites Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 17/24] io_uring: remove drain_active check from hot path Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 18/24] io_uring: split slow path from io_queue_sqe Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 19/24] io_uring: inline hot path of __io_queue_sqe() Pavel Begunkov
2021-09-24 21:00 ` [PATCH v2 20/24] io_uring: reshuffle queue_sqe completion handling Pavel Begunkov
2021-09-24 21:00 ` [PATCH v2 21/24] io_uring: restructure submit sqes to_submit checks Pavel Begunkov
2021-09-24 21:00 ` [PATCH v2 22/24] io_uring: kill off ->inflight_entry field Pavel Begunkov
2021-09-24 21:00 ` [PATCH v2 23/24] io_uring: comment why inline complete calls io_clean_op() Pavel Begunkov
2021-09-24 21:00 ` [PATCH v2 24/24] io_uring: disable draining earlier Pavel Begunkov
2021-09-30 16:04 ` [PATCH v2 00/24] rework and optimise submission+completion paths Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox