* PATCH v2] io_uring/kbuf: limit legacy provided buffer lists to USHRT_MAX
@ 2025-06-03 21:33 Jens Axboe
0 siblings, 0 replies; only message in thread
From: Jens Axboe @ 2025-06-03 21:33 UTC (permalink / raw)
To: io-uring
The buffer ID for a provided buffer is an unsigned short, and hence
there can only be 64k added to any given buffer list before having
duplicate BIDs. Cap the legacy provided buffers at 64k in the list.
This is mostly to prevent silly stall reports from syzbot, which
likes to dump tons of buffers into a list and then have kernels with
lockdep and kasan churning through them and hitting long wait times
for buffer pruning at ring exit time.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
Since v1:
- Actually use the added 'ret' variable io_add_buffers()
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 8cce3ebd813f..2ea65f3cef72 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -108,6 +108,7 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
buf = req->kbuf;
bl = io_buffer_get_list(ctx, buf->bgid);
list_add(&buf->list, &bl->buf_list);
+ bl->nbufs++;
req->flags &= ~REQ_F_BUFFER_SELECTED;
io_ring_submit_unlock(ctx, issue_flags);
@@ -122,6 +123,7 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
list_del(&kbuf->list);
+ bl->nbufs--;
if (*len == 0 || *len > kbuf->len)
*len = kbuf->len;
if (list_empty(&bl->buf_list))
@@ -390,6 +392,7 @@ static int io_remove_buffers_legacy(struct io_ring_ctx *ctx,
for (i = 0; i < nbufs && !list_empty(&bl->buf_list); i++) {
nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
list_del(&nxt->list);
+ bl->nbufs--;
kfree(nxt);
cond_resched();
}
@@ -491,14 +494,24 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
{
struct io_buffer *buf;
u64 addr = pbuf->addr;
- int i, bid = pbuf->bid;
+ int ret = -ENOMEM, i, bid = pbuf->bid;
for (i = 0; i < pbuf->nbufs; i++) {
+ /*
+ * Nonsensical to have more than sizeof(bid) buffers in a
+ * buffer list, as the application then has no way of knowing
+ * which duplicate bid refers to what buffer.
+ */
+ if (bl->nbufs == USHRT_MAX) {
+ ret = -EOVERFLOW;
+ break;
+ }
buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
if (!buf)
break;
list_add_tail(&buf->list, &bl->buf_list);
+ bl->nbufs++;
buf->addr = addr;
buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
buf->bid = bid;
@@ -508,7 +521,7 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
cond_resched();
}
- return i ? 0 : -ENOMEM;
+ return i ? 0 : ret;
}
static int __io_manage_buffers_legacy(struct io_kiocb *req,
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index 4d2c209d1a41..5d83c7adc739 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -21,6 +21,9 @@ struct io_buffer_list {
struct list_head buf_list;
struct io_uring_buf_ring *buf_ring;
};
+ /* count of classic/legacy buffers in buffer list */
+ int nbufs;
+
__u16 bgid;
/* below is for ring provided buffers */
--
Jens Axboe
^ permalink raw reply related [flat|nested] only message in thread
only message in thread, other threads:[~2025-06-03 21:33 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-03 21:33 PATCH v2] io_uring/kbuf: limit legacy provided buffer lists to USHRT_MAX Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox