From: Joanne Koong <joannelkoong@gmail.com>
To: axboe@kernel.dk, io-uring@vger.kernel.org
Cc: csander@purestorage.com, krisman@suse.de, bernd@bsbernd.com,
hch@infradead.org, asml.silence@gmail.com,
linux-fsdevel@vger.kernel.org
Subject: [PATCH v1 03/11] io_uring/kbuf: add support for kernel-managed buffer rings
Date: Mon, 9 Feb 2026 16:28:44 -0800 [thread overview]
Message-ID: <20260210002852.1394504-4-joannelkoong@gmail.com> (raw)
In-Reply-To: <20260210002852.1394504-1-joannelkoong@gmail.com>
Add support for kernel-managed buffer rings (kmbuf rings), which allow
the kernel to allocate and manage the backing buffers for a buffer
ring, rather than requiring the application to provide and manage them.
This introduces two new registration opcodes:
- IORING_REGISTER_KMBUF_RING: Register a kernel-managed buffer ring
- IORING_UNREGISTER_KMBUF_RING: Unregister a kernel-managed buffer ring
The existing io_uring_buf_reg structure is extended with a union to
support both application-provided buffer rings (pbuf) and kernel-managed
buffer rings (kmbuf):
- For pbuf rings: ring_addr specifies the user-provided ring address
- For kmbuf rings: buf_size specifies the size of each buffer. buf_size
must be non-zero and page-aligned.
The implementation follows the same pattern as pbuf ring registration,
reusing the validation and buffer list allocation helpers introduced in
earlier refactoring. The IOBL_KERNEL_MANAGED flag marks buffer lists as
kernel-managed for appropriate handling in the I/O path.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
---
include/uapi/linux/io_uring.h | 15 ++++-
io_uring/kbuf.c | 81 ++++++++++++++++++++++++-
io_uring/kbuf.h | 7 ++-
io_uring/memmap.c | 111 ++++++++++++++++++++++++++++++++++
io_uring/memmap.h | 4 ++
io_uring/register.c | 7 +++
6 files changed, 219 insertions(+), 6 deletions(-)
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index fc473af6feb4..a0889c1744bd 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -715,6 +715,10 @@ enum io_uring_register_op {
/* register bpf filtering programs */
IORING_REGISTER_BPF_FILTER = 37,
+ /* register/unregister kernel-managed ring buffer group */
+ IORING_REGISTER_KMBUF_RING = 38,
+ IORING_UNREGISTER_KMBUF_RING = 39,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -891,9 +895,16 @@ enum io_uring_register_pbuf_ring_flags {
IOU_PBUF_RING_INC = 2,
};
-/* argument for IORING_(UN)REGISTER_PBUF_RING */
+/* argument for IORING_(UN)REGISTER_PBUF_RING and
+ * IORING_(UN)REGISTER_KMBUF_RING
+ */
struct io_uring_buf_reg {
- __u64 ring_addr;
+ union {
+ /* used for pbuf rings */
+ __u64 ring_addr;
+ /* used for kmbuf rings */
+ __u32 buf_size;
+ };
__u32 ring_entries;
__u16 bgid;
__u16 flags;
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index aa9b70b72db4..9bc36451d083 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -427,10 +427,13 @@ static int io_remove_buffers_legacy(struct io_ring_ctx *ctx,
static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
{
- if (bl->flags & IOBL_BUF_RING)
+ if (bl->flags & IOBL_BUF_RING) {
io_free_region(ctx->user, &bl->region);
- else
+ if (bl->flags & IOBL_KERNEL_MANAGED)
+ kfree(bl->buf_ring);
+ } else {
io_remove_buffers_legacy(ctx, bl, -1U);
+ }
kfree(bl);
}
@@ -779,3 +782,77 @@ struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
return NULL;
return &bl->region;
}
+
+static int io_setup_kmbuf_ring(struct io_ring_ctx *ctx,
+ struct io_buffer_list *bl,
+ struct io_uring_buf_reg *reg)
+{
+ struct io_uring_buf_ring *ring;
+ unsigned long ring_size;
+ void *buf_region;
+ unsigned int i;
+ int ret;
+
+ /* allocate pages for the ring structure */
+ ring_size = flex_array_size(ring, bufs, bl->nr_entries);
+ ring = kzalloc(ring_size, GFP_KERNEL_ACCOUNT);
+ if (!ring)
+ return -ENOMEM;
+
+ ret = io_create_region_multi_buf(ctx, &bl->region, bl->nr_entries,
+ reg->buf_size);
+ if (ret) {
+ kfree(ring);
+ return ret;
+ }
+
+ /* initialize ring buf entries to point to the buffers */
+ buf_region = bl->region.ptr;
+ for (i = 0; i < bl->nr_entries; i++) {
+ struct io_uring_buf *buf = &ring->bufs[i];
+
+ buf->addr = (u64)(uintptr_t)buf_region;
+ buf->len = reg->buf_size;
+ buf->bid = i;
+
+ buf_region += reg->buf_size;
+ }
+ ring->tail = bl->nr_entries;
+
+ bl->buf_ring = ring;
+ bl->flags |= IOBL_KERNEL_MANAGED;
+
+ return 0;
+}
+
+int io_register_kmbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+{
+ struct io_uring_buf_reg reg;
+ struct io_buffer_list *bl;
+ int ret;
+
+ lockdep_assert_held(&ctx->uring_lock);
+
+ ret = io_copy_and_validate_buf_reg(arg, ®, 0);
+ if (ret)
+ return ret;
+
+ if (!reg.buf_size || !PAGE_ALIGNED(reg.buf_size))
+ return -EINVAL;
+
+ bl = io_alloc_new_buffer_list(ctx, ®);
+ if (IS_ERR(bl))
+ return PTR_ERR(bl);
+
+ ret = io_setup_kmbuf_ring(ctx, bl, ®);
+ if (ret) {
+ kfree(bl);
+ return ret;
+ }
+
+ ret = io_buffer_add_list(ctx, bl, reg.bgid);
+ if (ret)
+ io_put_bl(ctx, bl);
+
+ return ret;
+}
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index 40b44f4fdb15..62c80a1ebf03 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -7,9 +7,11 @@
enum {
/* ring mapped provided buffers */
- IOBL_BUF_RING = 1,
+ IOBL_BUF_RING = 1,
/* buffers are consumed incrementally rather than always fully */
- IOBL_INC = 2,
+ IOBL_INC = 2,
+ /* buffers are kernel managed */
+ IOBL_KERNEL_MANAGED = 4,
};
struct io_buffer_list {
@@ -74,6 +76,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags);
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
+int io_register_kmbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
int io_unregister_buf_ring(struct io_ring_ctx *ctx, void __user *arg);
int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
diff --git a/io_uring/memmap.c b/io_uring/memmap.c
index 89f56609e50a..8d37e93c0433 100644
--- a/io_uring/memmap.c
+++ b/io_uring/memmap.c
@@ -15,6 +15,28 @@
#include "rsrc.h"
#include "zcrx.h"
+static void release_multi_buf_pages(struct page **pages, unsigned long nr_pages)
+{
+ struct page *page;
+ unsigned int nr, i = 0;
+
+ while (nr_pages) {
+ page = pages[i];
+
+ if (!page || WARN_ON_ONCE(page != compound_head(page)))
+ return;
+
+ nr = compound_nr(page);
+ put_page(page);
+
+ if (WARN_ON_ONCE(nr > nr_pages))
+ return;
+
+ i += nr;
+ nr_pages -= nr;
+ }
+}
+
static bool io_mem_alloc_compound(struct page **pages, int nr_pages,
size_t size, gfp_t gfp)
{
@@ -86,6 +108,8 @@ enum {
IO_REGION_F_USER_PROVIDED = 2,
/* only the first page in the array is ref'ed */
IO_REGION_F_SINGLE_REF = 4,
+ /* pages in the array belong to multiple discrete allocations */
+ IO_REGION_F_MULTI_BUF = 8,
};
void io_free_region(struct user_struct *user, struct io_mapped_region *mr)
@@ -98,6 +122,8 @@ void io_free_region(struct user_struct *user, struct io_mapped_region *mr)
if (mr->flags & IO_REGION_F_USER_PROVIDED)
unpin_user_pages(mr->pages, nr_refs);
+ else if (mr->flags & IO_REGION_F_MULTI_BUF)
+ release_multi_buf_pages(mr->pages, nr_refs);
else
release_pages(mr->pages, nr_refs);
@@ -149,6 +175,54 @@ static int io_region_pin_pages(struct io_mapped_region *mr,
return 0;
}
+static int io_region_allocate_pages_multi_buf(struct io_mapped_region *mr,
+ unsigned int nr_bufs,
+ unsigned int buf_size)
+{
+ gfp_t gfp = GFP_USER | __GFP_ACCOUNT | __GFP_ZERO | __GFP_NOWARN;
+ struct page **pages, **cur_pages;
+ unsigned int nr_allocated;
+ unsigned int buf_pages;
+ unsigned int i;
+
+ if (!PAGE_ALIGNED(buf_size))
+ return -EINVAL;
+
+ buf_pages = buf_size >> PAGE_SHIFT;
+
+ pages = kvmalloc_array(mr->nr_pages, sizeof(*pages), gfp);
+ if (!pages)
+ return -ENOMEM;
+
+ cur_pages = pages;
+
+ for (i = 0; i < nr_bufs; i++) {
+ if (io_mem_alloc_compound(cur_pages, buf_pages, buf_size,
+ gfp)) {
+ cur_pages += buf_pages;
+ continue;
+ }
+
+ nr_allocated = alloc_pages_bulk_node(gfp, NUMA_NO_NODE,
+ buf_pages, cur_pages);
+ if (nr_allocated != buf_pages) {
+ unsigned int total =
+ (cur_pages - pages) + nr_allocated;
+
+ release_multi_buf_pages(pages, total);
+ kvfree(pages);
+ return -ENOMEM;
+ }
+
+ cur_pages += buf_pages;
+ }
+
+ mr->flags |= IO_REGION_F_MULTI_BUF;
+ mr->pages = pages;
+
+ return 0;
+}
+
static int io_region_allocate_pages(struct io_mapped_region *mr,
struct io_uring_region_desc *reg,
unsigned long mmap_offset)
@@ -181,6 +255,43 @@ static int io_region_allocate_pages(struct io_mapped_region *mr,
return 0;
}
+int io_create_region_multi_buf(struct io_ring_ctx *ctx,
+ struct io_mapped_region *mr,
+ unsigned int nr_bufs, unsigned int buf_size)
+{
+ unsigned int nr_pages;
+ int ret;
+
+ if (WARN_ON_ONCE(mr->pages || mr->ptr || mr->nr_pages))
+ return -EFAULT;
+
+ if (WARN_ON_ONCE(!nr_bufs || !buf_size || !PAGE_ALIGNED(buf_size)))
+ return -EINVAL;
+
+ if (check_mul_overflow(buf_size >> PAGE_SHIFT, nr_bufs, &nr_pages))
+ return -EINVAL;
+
+ if (ctx->user) {
+ ret = __io_account_mem(ctx->user, nr_pages);
+ if (ret)
+ return ret;
+ }
+ mr->nr_pages = nr_pages;
+
+ ret = io_region_allocate_pages_multi_buf(mr, nr_bufs, buf_size);
+ if (ret)
+ goto out_free;
+
+ ret = io_region_init_ptr(mr);
+ if (ret)
+ goto out_free;
+
+ return 0;
+out_free:
+ io_free_region(ctx->user, mr);
+ return ret;
+}
+
int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
struct io_uring_region_desc *reg,
unsigned long mmap_offset)
diff --git a/io_uring/memmap.h b/io_uring/memmap.h
index f4cfbb6b9a1f..3aa1167462ae 100644
--- a/io_uring/memmap.h
+++ b/io_uring/memmap.h
@@ -22,6 +22,10 @@ int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
struct io_uring_region_desc *reg,
unsigned long mmap_offset);
+int io_create_region_multi_buf(struct io_ring_ctx *ctx,
+ struct io_mapped_region *mr,
+ unsigned int nr_bufs, unsigned int buf_size);
+
static inline void *io_region_get_ptr(struct io_mapped_region *mr)
{
return mr->ptr;
diff --git a/io_uring/register.c b/io_uring/register.c
index 0882cb34f851..2db8daaf8fde 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -837,7 +837,14 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
ret = io_register_pbuf_ring(ctx, arg);
break;
+ case IORING_REGISTER_KMBUF_RING:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_register_kmbuf_ring(ctx, arg);
+ break;
case IORING_UNREGISTER_PBUF_RING:
+ case IORING_UNREGISTER_KMBUF_RING:
ret = -EINVAL;
if (!arg || nr_args != 1)
break;
--
2.47.3
next prev parent reply other threads:[~2026-02-10 0:31 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-10 0:28 [PATCH v1 00/11] io_uring: add kernel-managed buffer rings Joanne Koong
2026-02-10 0:28 ` [PATCH v1 01/11] io_uring/kbuf: refactor io_register_pbuf_ring() logic into generic helpers Joanne Koong
2026-02-10 0:28 ` [PATCH v1 02/11] io_uring/kbuf: rename io_unregister_pbuf_ring() to io_unregister_buf_ring() Joanne Koong
2026-02-10 0:28 ` Joanne Koong [this message]
2026-02-10 16:34 ` [PATCH v1 03/11] io_uring/kbuf: add support for kernel-managed buffer rings Pavel Begunkov
2026-02-10 19:39 ` Joanne Koong
2026-02-11 12:01 ` Pavel Begunkov
2026-02-11 22:06 ` Joanne Koong
2026-02-12 10:07 ` Christoph Hellwig
2026-02-12 10:52 ` Pavel Begunkov
2026-02-12 17:29 ` Joanne Koong
2026-02-13 7:27 ` Christoph Hellwig
2026-02-13 15:31 ` Pavel Begunkov
2026-02-13 15:48 ` Pavel Begunkov
2026-02-13 19:09 ` Joanne Koong
2026-02-13 19:30 ` Bernd Schubert
2026-02-13 19:38 ` Joanne Koong
2026-02-13 19:14 ` Joanne Koong
2026-02-13 16:27 ` Pavel Begunkov
2026-02-13 7:21 ` Christoph Hellwig
2026-02-13 13:18 ` Pavel Begunkov
2026-02-13 15:26 ` Pavel Begunkov
2026-02-11 15:45 ` Christoph Hellwig
2026-02-12 10:44 ` Pavel Begunkov
2026-02-13 7:18 ` Christoph Hellwig
2026-02-13 12:41 ` Pavel Begunkov
2026-02-13 22:04 ` Joanne Koong
2026-02-10 0:28 ` [PATCH v1 04/11] io_uring/kbuf: add mmap " Joanne Koong
2026-02-10 1:02 ` Jens Axboe
2026-02-10 0:28 ` [PATCH v1 05/11] io_uring/kbuf: support kernel-managed buffer rings in buffer selection Joanne Koong
2026-02-10 0:28 ` [PATCH v1 06/11] io_uring/kbuf: add buffer ring pinning/unpinning Joanne Koong
2026-02-10 1:07 ` Jens Axboe
2026-02-10 17:57 ` Caleb Sander Mateos
2026-02-10 18:00 ` Jens Axboe
2026-02-10 0:28 ` [PATCH v1 07/11] io_uring/kbuf: add recycling for kernel managed buffer rings Joanne Koong
2026-02-10 0:52 ` Jens Axboe
2026-02-10 0:28 ` [PATCH v1 08/11] io_uring/kbuf: add io_uring_is_kmbuf_ring() Joanne Koong
2026-02-10 0:28 ` [PATCH v1 09/11] io_uring/kbuf: export io_ring_buffer_select() Joanne Koong
2026-02-10 0:28 ` [PATCH v1 10/11] io_uring/kbuf: return buffer id in buffer selection Joanne Koong
2026-02-10 0:53 ` Jens Axboe
2026-02-10 22:36 ` Joanne Koong
2026-02-10 0:28 ` [PATCH v1 11/11] io_uring/cmd: set selected buffer index in __io_uring_cmd_done() Joanne Koong
2026-02-10 0:55 ` [PATCH v1 00/11] io_uring: add kernel-managed buffer rings Jens Axboe
2026-02-10 22:45 ` Joanne Koong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260210002852.1394504-4-joannelkoong@gmail.com \
--to=joannelkoong@gmail.com \
--cc=asml.silence@gmail.com \
--cc=axboe@kernel.dk \
--cc=bernd@bsbernd.com \
--cc=csander@purestorage.com \
--cc=hch@infradead.org \
--cc=io-uring@vger.kernel.org \
--cc=krisman@suse.de \
--cc=linux-fsdevel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox