public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCH] io_uring: introduce io_cache_free() helper
@ 2025-03-04 19:48 Caleb Sander Mateos
  2025-03-05 14:45 ` Jens Axboe
  0 siblings, 1 reply; 2+ messages in thread
From: Caleb Sander Mateos @ 2025-03-04 19:48 UTC (permalink / raw)
  To: Jens Axboe, Pavel Begunkov; +Cc: Caleb Sander Mateos, io-uring, linux-kernel

Add a helper function io_cache_free() that returns an allocation to a
io_alloc_cache, falling back on kfree() if the io_alloc_cache is full.
This is the inverse of io_cache_alloc(), which takes an allocation from
an io_alloc_cache and falls back on kmalloc() if the cache is empty.

Convert 4 callers to use the helper.

Signed-off-by: Caleb Sander Mateos <[email protected]>
Suggested-by: Li Zetao <[email protected]>
---
 io_uring/alloc_cache.h |  6 ++++++
 io_uring/futex.c       |  4 +---
 io_uring/io_uring.c    |  3 +--
 io_uring/rsrc.c        | 15 +++++----------
 4 files changed, 13 insertions(+), 15 deletions(-)

diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index 0dd17d8ba93a..7f68eff2e7f3 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -66,6 +66,12 @@ static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
 	if (obj)
 		return obj;
 	return io_cache_alloc_new(cache, gfp);
 }
 
+static inline void io_cache_free(struct io_alloc_cache *cache, void *obj)
+{
+	if (!io_alloc_cache_put(cache, obj))
+		kfree(obj);
+}
+
 #endif
diff --git a/io_uring/futex.c b/io_uring/futex.c
index b7581766406c..0ea4820cd8ff 100644
--- a/io_uring/futex.c
+++ b/io_uring/futex.c
@@ -51,16 +51,14 @@ static void __io_futex_complete(struct io_kiocb *req, io_tw_token_t tw)
 	io_req_task_complete(req, tw);
 }
 
 static void io_futex_complete(struct io_kiocb *req, io_tw_token_t tw)
 {
-	struct io_futex_data *ifd = req->async_data;
 	struct io_ring_ctx *ctx = req->ctx;
 
 	io_tw_lock(ctx, tw);
-	if (!io_alloc_cache_put(&ctx->futex_cache, ifd))
-		kfree(ifd);
+	io_cache_free(&ctx->futex_cache, req->async_data);
 	__io_futex_complete(req, tw);
 }
 
 static void io_futexv_complete(struct io_kiocb *req, io_tw_token_t tw)
 {
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index ccc343f61a57..58003fa6b327 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1420,12 +1420,11 @@ static void io_free_batch_list(struct io_ring_ctx *ctx,
 			if ((req->flags & REQ_F_POLLED) && req->apoll) {
 				struct async_poll *apoll = req->apoll;
 
 				if (apoll->double_poll)
 					kfree(apoll->double_poll);
-				if (!io_alloc_cache_put(&ctx->apoll_cache, apoll))
-					kfree(apoll);
+				io_cache_free(&ctx->apoll_cache, apoll);
 				req->flags &= ~REQ_F_POLLED;
 			}
 			if (req->flags & IO_REQ_LINK_FLAGS)
 				io_queue_next(req);
 			if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 3fb1bd616eef..5dd1e0827559 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -122,12 +122,13 @@ static struct io_mapped_ubuf *io_alloc_imu(struct io_ring_ctx *ctx,
 			GFP_KERNEL);
 }
 
 static void io_free_imu(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
 {
-	if (imu->nr_bvecs > IO_CACHED_BVECS_SEGS ||
-	    !io_alloc_cache_put(&ctx->imu_cache, imu))
+	if (imu->nr_bvecs <= IO_CACHED_BVECS_SEGS)
+		io_cache_free(&ctx->imu_cache, imu);
+	else
 		kvfree(imu);
 }
 
 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
 {
@@ -485,16 +486,10 @@ int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
 		req_set_fail(req);
 	io_req_set_res(req, ret, 0);
 	return IOU_OK;
 }
 
-static void io_free_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
-{
-	if (!io_alloc_cache_put(&ctx->node_cache, node))
-		kfree(node);
-}
-
 void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
 {
 	if (node->tag)
 		io_post_aux_cqe(ctx, node->tag, 0, 0);
 
@@ -508,11 +503,11 @@ void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
 	default:
 		WARN_ON_ONCE(1);
 		break;
 	}
 
-	io_free_node(ctx, node);
+	io_cache_free(&ctx->node_cache, node);
 }
 
 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
 {
 	if (!ctx->file_table.data.nr)
@@ -833,11 +828,11 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
 	}
 done:
 	if (ret) {
 		if (imu)
 			io_free_imu(ctx, imu);
-		io_free_node(ctx, node);
+		io_cache_free(&ctx->node_cache, node);
 		node = ERR_PTR(ret);
 	}
 	kvfree(pages);
 	return node;
 }
-- 
2.45.2


^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2025-03-05 14:45 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-03-04 19:48 [PATCH] io_uring: introduce io_cache_free() helper Caleb Sander Mateos
2025-03-05 14:45 ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox