public inbox for [email protected]
 help / color / mirror / Atom feed
From: Breno Leitao <[email protected]>
To: [email protected], [email protected], [email protected]
Cc: [email protected], [email protected], [email protected]
Subject: [PATCH 2/2] io_uring: Add KASAN support for alloc_caches
Date: Tue, 21 Feb 2023 05:57:21 -0800	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>

Add support for KASAN in the alloc_caches (apoll and netmsg_cache).
Thus, if something touches the unused caches, it will raise a KASAN
warning/exception.

It poisons the object when the object is put to the cache, and unpoisons
it when the object is gotten or freed.

Signed-off-by: Breno Leitao <[email protected]>
---
 io_uring/alloc_cache.h | 11 ++++++++---
 io_uring/io_uring.c    | 12 ++++++++++--
 io_uring/net.c         |  2 +-
 io_uring/poll.c        |  2 +-
 4 files changed, 20 insertions(+), 7 deletions(-)

diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index 0d9ff9402a37..0d5cd2c0a0ba 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -16,12 +16,15 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
 	if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
 		cache->nr_cached++;
 		wq_stack_add_head(&entry->node, &cache->list);
+		/* KASAN poisons object */
+		kasan_slab_free_mempool(entry);
 		return true;
 	}
 	return false;
 }
 
-static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
+static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache,
+							size_t size)
 {
 	struct io_wq_work_node *node;
 	struct io_cache_entry *entry;
@@ -29,6 +32,7 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
 	if (cache->list.next) {
 		node = cache->list.next;
 		entry = container_of(node, struct io_cache_entry, node);
+		kasan_unpoison_range(entry, size);
 		cache->list.next = node->next;
 		return entry;
 	}
@@ -43,11 +47,12 @@ static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
 }
 
 static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
-					void (*free)(struct io_cache_entry *))
+					void (*free)(struct io_cache_entry *),
+					size_t size)
 {
 	struct io_cache_entry *entry;
 
-	while ((entry = io_alloc_cache_get(cache))) {
+	while ((entry = io_alloc_cache_get(cache, size))) {
 		free(entry);
 	}
 
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 80b6204769e8..6a98902b8f62 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2766,6 +2766,15 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
 	mutex_unlock(&ctx->uring_lock);
 }
 
+static __cold void io_uring_acache_free(struct io_ring_ctx *ctx)
+{
+
+	io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free,
+			    sizeof(struct async_poll));
+	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free,
+			    sizeof(struct io_async_msghdr));
+}
+
 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
 {
 	io_sq_thread_finish(ctx);
@@ -2781,8 +2790,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
 		__io_sqe_files_unregister(ctx);
 	io_cqring_overflow_kill(ctx);
 	io_eventfd_unregister(ctx);
-	io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
-	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
+	io_uring_acache_free(ctx);
 	mutex_unlock(&ctx->uring_lock);
 	io_destroy_buffers(ctx);
 	if (ctx->sq_creds)
diff --git a/io_uring/net.c b/io_uring/net.c
index fbc34a7c2743..8dc67b23b030 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -139,7 +139,7 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
 	struct io_async_msghdr *hdr;
 
 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
-		entry = io_alloc_cache_get(&ctx->netmsg_cache);
+		entry = io_alloc_cache_get(&ctx->netmsg_cache, sizeof(struct io_async_msghdr));
 		if (entry) {
 			hdr = container_of(entry, struct io_async_msghdr, cache);
 			hdr->free_iov = NULL;
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 8339a92b4510..295d59875f00 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -661,7 +661,7 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
 		apoll = req->apoll;
 		kfree(apoll->double_poll);
 	} else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
-		entry = io_alloc_cache_get(&ctx->apoll_cache);
+		entry = io_alloc_cache_get(&ctx->apoll_cache, sizeof(struct async_poll));
 		if (entry == NULL)
 			goto alloc_apoll;
 		apoll = container_of(entry, struct async_poll, cache);
-- 
2.39.0


  reply	other threads:[~2023-02-21 13:57 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-21 13:57 [PATCH 1/2] io_uring: Move from hlist to io_wq_work_node Breno Leitao
2023-02-21 13:57 ` Breno Leitao [this message]
2023-02-21 16:39   ` [PATCH 2/2] io_uring: Add KASAN support for alloc_caches kernel test robot
2023-02-21 17:45 ` [PATCH 1/2] io_uring: Move from hlist to io_wq_work_node Pavel Begunkov
2023-02-21 18:38   ` Breno Leitao
2023-02-21 18:43     ` Pavel Begunkov
2023-02-21 23:53     ` Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox