public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCH 1/2] io_uring: Move from hlist to io_wq_work_node
@ 2023-02-21 13:57 Breno Leitao
  2023-02-21 13:57 ` [PATCH 2/2] io_uring: Add KASAN support for alloc_caches Breno Leitao
  2023-02-21 17:45 ` [PATCH 1/2] io_uring: Move from hlist to io_wq_work_node Pavel Begunkov
  0 siblings, 2 replies; 7+ messages in thread
From: Breno Leitao @ 2023-02-21 13:57 UTC (permalink / raw)
  To: axboe, asml.silence, io-uring; +Cc: linux-kernel, gustavold, leit

Having cache entries linked using the hlist format brings no benefit, and
also requires an unnecessary extra pointer address per cache entry.

Use the internal io_wq_work_node single-linked list for the internal
alloc caches (async_msghdr and async_poll)

This is required to be able to use KASAN on cache entries, since we do
not need to touch unused (and poisoned) cache entries when adding more
entries to the list.

Suggested-by: Pavel Begunkov <[email protected]>
Signed-off-by: Breno Leitao <[email protected]>
---
 include/linux/io_uring_types.h |  2 +-
 io_uring/alloc_cache.h         | 27 +++++++++++++++------------
 2 files changed, 16 insertions(+), 13 deletions(-)

diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 0efe4d784358..efa66b6c32c9 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -188,7 +188,7 @@ struct io_ev_fd {
 };
 
 struct io_alloc_cache {
-	struct hlist_head	list;
+	struct io_wq_work_node	list;
 	unsigned int		nr_cached;
 };
 
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index 729793ae9712..0d9ff9402a37 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -7,7 +7,7 @@
 #define IO_ALLOC_CACHE_MAX	512
 
 struct io_cache_entry {
-	struct hlist_node	node;
+	struct io_wq_work_node node;
 };
 
 static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
@@ -15,7 +15,7 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
 {
 	if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
 		cache->nr_cached++;
-		hlist_add_head(&entry->node, &cache->list);
+		wq_stack_add_head(&entry->node, &cache->list);
 		return true;
 	}
 	return false;
@@ -23,11 +23,14 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
 
 static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
 {
-	if (!hlist_empty(&cache->list)) {
-		struct hlist_node *node = cache->list.first;
-
-		hlist_del(node);
-		return container_of(node, struct io_cache_entry, node);
+	struct io_wq_work_node *node;
+	struct io_cache_entry *entry;
+
+	if (cache->list.next) {
+		node = cache->list.next;
+		entry = container_of(node, struct io_cache_entry, node);
+		cache->list.next = node->next;
+		return entry;
 	}
 
 	return NULL;
@@ -35,19 +38,19 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
 
 static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
 {
-	INIT_HLIST_HEAD(&cache->list);
+	cache->list.next = NULL;
 	cache->nr_cached = 0;
 }
 
 static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
 					void (*free)(struct io_cache_entry *))
 {
-	while (!hlist_empty(&cache->list)) {
-		struct hlist_node *node = cache->list.first;
+	struct io_cache_entry *entry;
 
-		hlist_del(node);
-		free(container_of(node, struct io_cache_entry, node));
+	while ((entry = io_alloc_cache_get(cache))) {
+		free(entry);
 	}
+
 	cache->nr_cached = 0;
 }
 #endif
-- 
2.39.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2023-02-21 23:54 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-02-21 13:57 [PATCH 1/2] io_uring: Move from hlist to io_wq_work_node Breno Leitao
2023-02-21 13:57 ` [PATCH 2/2] io_uring: Add KASAN support for alloc_caches Breno Leitao
2023-02-21 16:39   ` kernel test robot
2023-02-21 17:45 ` [PATCH 1/2] io_uring: Move from hlist to io_wq_work_node Pavel Begunkov
2023-02-21 18:38   ` Breno Leitao
2023-02-21 18:43     ` Pavel Begunkov
2023-02-21 23:53     ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox