public inbox for [email protected]
 help / color / mirror / Atom feed
From: Pavel Begunkov <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>,
	[email protected], [email protected]
Subject: [PATCH v2 13/13] io_uring/rsrc: add custom limit for node caching
Date: Tue,  4 Apr 2023 13:39:57 +0100	[thread overview]
Message-ID: <d0cd538b944dac0bf878e276fc0199f21e6bccea.1680576071.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>

The number of entries in the rsrc node cache is limited to 512, which
still seems unnecessarily large. Add per cache thresholds and set to
to 32 for the rsrc node cache.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 include/linux/io_uring_types.h | 1 +
 io_uring/alloc_cache.h         | 6 ++++--
 io_uring/io_uring.c            | 9 ++++++---
 io_uring/rsrc.h                | 2 ++
 4 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 5d772e36e7fc..4a6ce03a4903 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -190,6 +190,7 @@ struct io_ev_fd {
 struct io_alloc_cache {
 	struct io_wq_work_node	list;
 	unsigned int		nr_cached;
+	unsigned int		max_cached;
 	size_t			elem_size;
 };
 
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index 2fbecaa3a1ba..851a527afb5e 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -13,7 +13,7 @@ struct io_cache_entry {
 static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
 				      struct io_cache_entry *entry)
 {
-	if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
+	if (cache->nr_cached < cache->max_cached) {
 		cache->nr_cached++;
 		wq_stack_add_head(&entry->node, &cache->list);
 		/* KASAN poisons object */
@@ -38,10 +38,12 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
 	return NULL;
 }
 
-static inline void io_alloc_cache_init(struct io_alloc_cache *cache, size_t size)
+static inline void io_alloc_cache_init(struct io_alloc_cache *cache,
+				       unsigned max_nr, size_t size)
 {
 	cache->list.next = NULL;
 	cache->nr_cached = 0;
+	cache->max_cached = max_nr;
 	cache->elem_size = size;
 }
 
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index da36fa1eeac9..ae90d2753e0d 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -310,9 +310,12 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	INIT_LIST_HEAD(&ctx->sqd_list);
 	INIT_LIST_HEAD(&ctx->cq_overflow_list);
 	INIT_LIST_HEAD(&ctx->io_buffers_cache);
-	io_alloc_cache_init(&ctx->rsrc_node_cache, sizeof(struct io_rsrc_node));
-	io_alloc_cache_init(&ctx->apoll_cache, sizeof(struct async_poll));
-	io_alloc_cache_init(&ctx->netmsg_cache, sizeof(struct io_async_msghdr));
+	io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
+			    sizeof(struct io_rsrc_node));
+	io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX,
+			    sizeof(struct async_poll));
+	io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
+			    sizeof(struct io_async_msghdr));
 	init_completion(&ctx->ref_comp);
 	xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
 	mutex_init(&ctx->uring_lock);
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 7ab9b2b2e757..8729f2fee256 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -6,6 +6,8 @@
 
 #include "alloc_cache.h"
 
+#define IO_NODE_ALLOC_CACHE_MAX 32
+
 #define IO_RSRC_TAG_TABLE_SHIFT	(PAGE_SHIFT - 3)
 #define IO_RSRC_TAG_TABLE_MAX	(1U << IO_RSRC_TAG_TABLE_SHIFT)
 #define IO_RSRC_TAG_TABLE_MASK	(IO_RSRC_TAG_TABLE_MAX - 1)
-- 
2.39.1


  parent reply	other threads:[~2023-04-04 12:41 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-04 12:39 [PATCH v2 00/13] optimise registered buffer/file updates Pavel Begunkov
2023-04-04 12:39 ` [PATCH v2 01/13] io_uring/rsrc: use non-pcpu refcounts for nodes Pavel Begunkov
2023-04-04 12:39 ` [PATCH v2 02/13] io_uring/rsrc: keep cached refs per node Pavel Begunkov
2023-04-04 12:39 ` [PATCH v2 03/13] io_uring: don't put nodes under spinlocks Pavel Begunkov
2023-04-04 12:39 ` [PATCH v2 04/13] io_uring: io_free_req() via tw Pavel Begunkov
2023-04-04 12:39 ` [PATCH v2 05/13] io_uring/rsrc: protect node refs with uring_lock Pavel Begunkov
2023-04-04 12:39 ` [PATCH v2 06/13] io_uring/rsrc: kill rsrc_ref_lock Pavel Begunkov
2023-04-04 12:39 ` [PATCH v2 07/13] io_uring/rsrc: rename rsrc_list Pavel Begunkov
2023-04-04 12:39 ` [PATCH v2 08/13] io_uring/rsrc: optimise io_rsrc_put allocation Pavel Begunkov
2023-04-04 12:39 ` [PATCH v2 09/13] io_uring/rsrc: don't offload node free Pavel Begunkov
2023-04-04 12:39 ` [PATCH v2 10/13] io_uring/rsrc: cache struct io_rsrc_node Pavel Begunkov
2023-04-04 12:39 ` [PATCH v2 11/13] io_uring/rsrc: add lockdep sanity checks Pavel Begunkov
2023-04-04 12:39 ` [PATCH v2 12/13] io_uring/rsrc: optimise io_rsrc_data refcounting Pavel Begunkov
2023-04-04 12:39 ` Pavel Begunkov [this message]
2023-04-04 15:30 ` [PATCH v2 00/13] optimise registered buffer/file updates Jens Axboe
2023-04-04 15:33 ` Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d0cd538b944dac0bf878e276fc0199f21e6bccea.1680576071.git.asml.silence@gmail.com \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox