public inbox for [email protected]
 help / color / mirror / Atom feed
From: Pavel Begunkov <[email protected]>
To: Jens Axboe <[email protected]>, [email protected]
Subject: [PATCH v2 05/24] io_uring: remove allocation cache array
Date: Fri, 24 Sep 2021 21:59:45 +0100	[thread overview]
Message-ID: <8547095c35f7a87bab14f6447ecd30a273ed7500.1632516769.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>

We have several of request allocation layers, remove the last one, which
is the submit->reqs array, and always use submit->free_reqs instead.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 60 +++++++++++++++------------------------------------
 1 file changed, 17 insertions(+), 43 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9a76c4f84311..9d8d79104d75 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -310,12 +310,6 @@ struct io_submit_state {
 	struct blk_plug		plug;
 	struct io_submit_link	link;
 
-	/*
-	 * io_kiocb alloc cache
-	 */
-	void			*reqs[IO_REQ_CACHE_SIZE];
-	unsigned int		free_reqs;
-
 	bool			plug_started;
 	bool			need_plug;
 
@@ -1903,7 +1897,6 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
 static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
 {
 	struct io_submit_state *state = &ctx->submit_state;
-	int nr;
 
 	/*
 	 * If we have more than a batch's worth of requests in our IRQ side
@@ -1912,20 +1905,7 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
 	 */
 	if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
 		io_flush_cached_locked_reqs(ctx, state);
-
-	nr = state->free_reqs;
-	while (!list_empty(&state->free_list)) {
-		struct io_kiocb *req = list_first_entry(&state->free_list,
-					struct io_kiocb, inflight_entry);
-
-		list_del(&req->inflight_entry);
-		state->reqs[nr++] = req;
-		if (nr == ARRAY_SIZE(state->reqs))
-			break;
-	}
-
-	state->free_reqs = nr;
-	return nr != 0;
+	return !list_empty(&state->free_list);
 }
 
 /*
@@ -1939,33 +1919,36 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
 {
 	struct io_submit_state *state = &ctx->submit_state;
 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
+	void *reqs[IO_REQ_ALLOC_BATCH];
+	struct io_kiocb *req;
 	int ret, i;
 
-	BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
-
-	if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
+	if (likely(!list_empty(&state->free_list) || io_flush_cached_reqs(ctx)))
 		goto got_req;
 
-	ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
-				    state->reqs);
+	ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
 
 	/*
 	 * Bulk alloc is all-or-nothing. If we fail to get a batch,
 	 * retry single alloc to be on the safe side.
 	 */
 	if (unlikely(ret <= 0)) {
-		state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
-		if (!state->reqs[0])
+		reqs[0] = kmem_cache_alloc(req_cachep, gfp);
+		if (!reqs[0])
 			return NULL;
 		ret = 1;
 	}
 
-	for (i = 0; i < ret; i++)
-		io_preinit_req(state->reqs[i], ctx);
-	state->free_reqs = ret;
+	for (i = 0; i < ret; i++) {
+		req = reqs[i];
+
+		io_preinit_req(req, ctx);
+		list_add(&req->inflight_entry, &state->free_list);
+	}
 got_req:
-	state->free_reqs--;
-	return state->reqs[state->free_reqs];
+	req = list_first_entry(&state->free_list, struct io_kiocb, inflight_entry);
+	list_del(&req->inflight_entry);
+	return req;
 }
 
 static inline void io_put_file(struct file *file)
@@ -2323,10 +2306,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
 	rb->task_refs++;
 	rb->ctx_refs++;
 
-	if (state->free_reqs != ARRAY_SIZE(state->reqs))
-		state->reqs[state->free_reqs++] = req;
-	else
-		list_add(&req->inflight_entry, &state->free_list);
+	list_add(&req->inflight_entry, &state->free_list);
 }
 
 static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
@@ -9235,12 +9215,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
 	struct io_submit_state *state = &ctx->submit_state;
 
 	mutex_lock(&ctx->uring_lock);
-
-	if (state->free_reqs) {
-		kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
-		state->free_reqs = 0;
-	}
-
 	io_flush_cached_locked_reqs(ctx, state);
 	io_req_cache_free(&state->free_list);
 	mutex_unlock(&ctx->uring_lock);
-- 
2.33.0


  parent reply	other threads:[~2021-09-24 21:01 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-24 20:59 [PATCH v2 00/24] rework and optimise submission+completion paths Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 01/24] io_uring: mark having different creds unlikely Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 02/24] io_uring: force_nonspin Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 03/24] io_uring: make io_do_iopoll return number of reqs Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 04/24] io_uring: use slist for completion batching Pavel Begunkov
2021-09-26  6:57   ` Hao Xu
2021-09-28  9:41     ` Pavel Begunkov
2021-09-28 15:32       ` Jens Axboe
2021-09-24 20:59 ` Pavel Begunkov [this message]
2021-09-24 20:59 ` [PATCH v2 06/24] io-wq: add io_wq_work_node based stack Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 07/24] io_uring: replace list with stack for req caches Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 08/24] io_uring: split iopoll loop Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 09/24] io_uring: use single linked list for iopoll Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 10/24] io_uring: add a helper for batch free Pavel Begunkov
2021-09-26  3:36   ` Hao Xu
2021-09-28  9:33     ` Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 11/24] io_uring: convert iopoll_completed to store_release Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 12/24] io_uring: optimise batch completion Pavel Begunkov
     [not found]   ` <CAFUsyfLSXMvd_MBAp83qriW7LD=bg2=25TC4e_X4oMO1atoPYg@mail.gmail.com>
2021-09-28  9:35     ` Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 13/24] io_uring: inline completion batching helpers Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 14/24] io_uring: don't pass tail into io_free_batch_list Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 15/24] io_uring: don't pass state to io_submit_state_end Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 16/24] io_uring: deduplicate io_queue_sqe() call sites Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 17/24] io_uring: remove drain_active check from hot path Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 18/24] io_uring: split slow path from io_queue_sqe Pavel Begunkov
2021-09-24 20:59 ` [PATCH v2 19/24] io_uring: inline hot path of __io_queue_sqe() Pavel Begunkov
2021-09-24 21:00 ` [PATCH v2 20/24] io_uring: reshuffle queue_sqe completion handling Pavel Begunkov
2021-09-24 21:00 ` [PATCH v2 21/24] io_uring: restructure submit sqes to_submit checks Pavel Begunkov
2021-09-24 21:00 ` [PATCH v2 22/24] io_uring: kill off ->inflight_entry field Pavel Begunkov
2021-09-24 21:00 ` [PATCH v2 23/24] io_uring: comment why inline complete calls io_clean_op() Pavel Begunkov
2021-09-24 21:00 ` [PATCH v2 24/24] io_uring: disable draining earlier Pavel Begunkov
2021-09-30 16:04 ` [PATCH v2 00/24] rework and optimise submission+completion paths Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8547095c35f7a87bab14f6447ecd30a273ed7500.1632516769.git.asml.silence@gmail.com \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox