From: Ming Lei <[email protected]>
To: Keith Busch <[email protected]>
Cc: [email protected], [email protected],
[email protected], [email protected],
Keith Busch <[email protected]>
Subject: Re: [PATCH 3/6] io_uring: add support for kernel registered bvecs
Date: Mon, 10 Feb 2025 22:12:44 +0800 [thread overview]
Message-ID: <Z6oJXIsBMMkCpW_3@fedora> (raw)
In-Reply-To: <[email protected]>
On Mon, Feb 03, 2025 at 07:45:14AM -0800, Keith Busch wrote:
> From: Keith Busch <[email protected]>
>
> Provide an interface for the kernel to leverage the existing
> pre-registered buffers that io_uring provides. User space can reference
> these later to achieve zero-copy IO.
>
> User space must register an empty fixed buffer table with io_uring in
> order for the kernel to make use of it.
>
> Signed-off-by: Keith Busch <[email protected]>
> ---
> include/linux/io_uring.h | 1 +
> include/linux/io_uring_types.h | 3 +
> io_uring/rsrc.c | 114 +++++++++++++++++++++++++++++++--
> io_uring/rsrc.h | 1 +
> 4 files changed, 114 insertions(+), 5 deletions(-)
>
> diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
> index 85fe4e6b275c7..b5637a2aae340 100644
> --- a/include/linux/io_uring.h
> +++ b/include/linux/io_uring.h
> @@ -5,6 +5,7 @@
> #include <linux/sched.h>
> #include <linux/xarray.h>
> #include <uapi/linux/io_uring.h>
> +#include <linux/blk-mq.h>
>
> #if defined(CONFIG_IO_URING)
> void __io_uring_cancel(bool cancel_all);
> diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
> index 623d8e798a11a..7e5a5a70c35f2 100644
> --- a/include/linux/io_uring_types.h
> +++ b/include/linux/io_uring_types.h
> @@ -695,4 +695,7 @@ static inline bool io_ctx_cqe32(struct io_ring_ctx *ctx)
> return ctx->flags & IORING_SETUP_CQE32;
> }
>
> +int io_buffer_register_bvec(struct io_ring_ctx *ctx, const struct request *rq, unsigned int tag);
> +void io_buffer_unregister_bvec(struct io_ring_ctx *ctx, unsigned int tag);
> +
> #endif
> diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
> index 4d0e1c06c8bc6..8c4c374abcc10 100644
> --- a/io_uring/rsrc.c
> +++ b/io_uring/rsrc.c
> @@ -111,7 +111,10 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
> if (!refcount_dec_and_test(&imu->refs))
> return;
> for (i = 0; i < imu->nr_bvecs; i++)
> - unpin_user_page(imu->bvec[i].bv_page);
> + if (node->type == IORING_RSRC_KBUF)
> + put_page(imu->bvec[i].bv_page);
> + else
> + unpin_user_page(imu->bvec[i].bv_page);
> if (imu->acct_pages)
> io_unaccount_mem(ctx, imu->acct_pages);
> kvfree(imu);
> @@ -240,6 +243,13 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
> struct io_rsrc_node *node;
> u64 tag = 0;
>
> + i = array_index_nospec(up->offset + done, ctx->buf_table.nr);
> + node = io_rsrc_node_lookup(&ctx->buf_table, i);
> + if (node && node->type != IORING_RSRC_BUFFER) {
> + err = -EBUSY;
> + break;
> + }
> +
> uvec = u64_to_user_ptr(user_data);
> iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
> if (IS_ERR(iov)) {
> @@ -258,6 +268,7 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
> err = PTR_ERR(node);
> break;
> }
> +
> if (tag) {
> if (!node) {
> err = -EINVAL;
> @@ -265,7 +276,6 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
> }
> node->tag = tag;
> }
> - i = array_index_nospec(up->offset + done, ctx->buf_table.nr);
> io_reset_rsrc_node(ctx, &ctx->buf_table, i);
> ctx->buf_table.nodes[i] = node;
> if (ctx->compat)
> @@ -453,6 +463,7 @@ void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
> fput(io_slot_file(node));
> break;
> case IORING_RSRC_BUFFER:
> + case IORING_RSRC_KBUF:
> if (node->buf)
> io_buffer_unmap(ctx, node);
> break;
> @@ -860,6 +871,92 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
> return ret;
> }
>
> +static struct io_rsrc_node *io_buffer_alloc_node(struct io_ring_ctx *ctx,
> + unsigned int nr_bvecs,
> + unsigned int len)
> +{
> + struct io_mapped_ubuf *imu;
> + struct io_rsrc_node *node;
> +
> + node = io_rsrc_node_alloc(IORING_RSRC_KBUF);
> + if (!node)
> + return NULL;
> +
> + imu = kvmalloc(struct_size(imu, bvec, nr_bvecs), GFP_KERNEL);
> + if (!imu) {
> + io_put_rsrc_node(ctx, node);
> + return NULL;
> + }
> +
> + imu->ubuf = 0;
> + imu->len = len;
> + imu->acct_pages = 0;
> + imu->nr_bvecs = nr_bvecs;
> + refcount_set(&imu->refs, 1);
> +
> + node->buf = imu;
> + return node;
> +}
> +
> +int io_buffer_register_bvec(struct io_ring_ctx *ctx, const struct request *rq,
> + unsigned int index)
> +{
> + struct io_rsrc_data *data = &ctx->buf_table;
> + u16 nr_bvecs = blk_rq_nr_phys_segments(rq);
> + struct req_iterator rq_iter;
> + struct io_rsrc_node *node;
> + struct bio_vec bv;
> + int i = 0;
> +
> + lockdep_assert_held(&ctx->uring_lock);
> +
> + if (WARN_ON_ONCE(!data->nr))
> + return -EINVAL;
> + if (WARN_ON_ONCE(index >= data->nr))
> + return -EINVAL;
> +
> + node = data->nodes[index];
> + if (WARN_ON_ONCE(node))
> + return -EBUSY;
> +
> + node = io_buffer_alloc_node(ctx, nr_bvecs, blk_rq_bytes(rq));
> + if (!node)
> + return -ENOMEM;
> +
> + rq_for_each_bvec(bv, rq, rq_iter) {
> + get_page(bv.bv_page);
> + node->buf->bvec[i].bv_page = bv.bv_page;
> + node->buf->bvec[i].bv_len = bv.bv_len;
> + node->buf->bvec[i].bv_offset = bv.bv_offset;
> + i++;
In this patchset, ublk request buffer may cross uring OPs, so it is inevitable
for buggy application to complete IO command & ublk request before
io_uring read/write OP using the buffer/page is completed .
That is probably the reason why page reference is increased here, then
bvec page lifetime becomes not aligned with request any more from block
layer viewpoint.
Not sure this way is safe:
1) for current block storage driver, when request is completed, all
request bvec page ownership is transferred to upper layer(FS, application, ...),
but it becomes not true for ublk zero copy with this patchset
2) BIO_PAGE_PINNED may not be set for bio, so upper layer might think that
bvec pages can be reused or reclaimed after this ublk bio is completed.
Thanks,
Ming
next prev parent reply other threads:[~2025-02-10 14:13 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-03 15:45 [PATCH 0/6] ublk zero-copy support Keith Busch
2025-02-03 15:45 ` [PATCH 1/6] block: const blk_rq_nr_phys_segments request Keith Busch
2025-02-03 15:45 ` [PATCH 2/6] io_uring: use node for import Keith Busch
2025-02-03 15:45 ` [PATCH 3/6] io_uring: add support for kernel registered bvecs Keith Busch
2025-02-07 14:08 ` Pavel Begunkov
2025-02-07 15:17 ` Keith Busch
2025-02-08 15:49 ` Pavel Begunkov
2025-02-10 14:12 ` Ming Lei [this message]
2025-02-10 15:05 ` Keith Busch
2025-02-03 15:45 ` [PATCH 4/6] ublk: zc register/unregister bvec Keith Busch
2025-02-08 5:50 ` Ming Lei
2025-02-03 15:45 ` [PATCH 5/6] io_uring: add abstraction for buf_table rsrc data Keith Busch
2025-02-03 15:45 ` [PATCH 6/6] io_uring: cache nodes and mapped buffers Keith Busch
2025-02-07 12:41 ` Pavel Begunkov
2025-02-07 15:33 ` Keith Busch
2025-02-08 14:00 ` Pavel Begunkov
2025-02-07 15:59 ` Keith Busch
2025-02-08 14:24 ` Pavel Begunkov
2025-02-06 15:28 ` [PATCH 0/6] ublk zero-copy support Keith Busch
2025-02-07 3:51 ` Ming Lei
2025-02-07 14:06 ` Keith Busch
2025-02-08 5:44 ` Ming Lei
2025-02-08 14:16 ` Pavel Begunkov
2025-02-08 20:13 ` Keith Busch
2025-02-08 21:40 ` Pavel Begunkov
2025-02-08 7:52 ` Ming Lei
2025-02-08 0:51 ` Bernd Schubert
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Z6oJXIsBMMkCpW_3@fedora \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox