* [PATCH 2/3] io_uring/rsrc: add reference count to struct io_mapped_ubuf
2024-09-11 20:03 [PATCHSET 0/3] Provide more efficient buffer registration Jens Axboe
2024-09-11 20:03 ` [PATCH 1/3] io_uring/rsrc: clear 'slot' entry upfront Jens Axboe
@ 2024-09-11 20:03 ` Jens Axboe
2024-09-11 20:03 ` [PATCH 3/3] io_uring: add IORING_REGISTER_COPY_BUFFERS method Jens Axboe
2024-09-11 20:13 ` [PATCHSET 0/3] Provide more efficient buffer registration Jens Axboe
3 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2024-09-11 20:03 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
Currently there's a single ring owner of a mapped buffer, and hence the
reference count will always be 1 when it's torn down and freed. However,
in preparation for being able to link io_mapped_ubuf to different spots,
add a reference count to manage the lifetime of it.
Signed-off-by: Jens Axboe <[email protected]>
---
io_uring/rsrc.c | 3 +++
io_uring/rsrc.h | 1 +
2 files changed, 4 insertions(+)
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index d42114845fac..28f98de3c304 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -116,6 +116,8 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
*slot = NULL;
if (imu != &dummy_ubuf) {
+ if (!refcount_dec_and_test(&imu->refs))
+ return;
for (i = 0; i < imu->nr_bvecs; i++)
unpin_user_page(imu->bvec[i].bv_page);
if (imu->acct_pages)
@@ -990,6 +992,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
imu->folio_shift = data.folio_shift;
imu->folio_mask = ~((1UL << data.folio_shift) - 1);
}
+ refcount_set(&imu->refs, 1);
off = (unsigned long) iov->iov_base & ~imu->folio_mask;
*pimu = imu;
ret = 0;
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 3d0dda3556e6..98a253172c27 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -47,6 +47,7 @@ struct io_mapped_ubuf {
unsigned int folio_shift;
unsigned long acct_pages;
unsigned long folio_mask;
+ refcount_t refs;
struct bio_vec bvec[] __counted_by(nr_bvecs);
};
--
2.45.2
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 3/3] io_uring: add IORING_REGISTER_COPY_BUFFERS method
2024-09-11 20:03 [PATCHSET 0/3] Provide more efficient buffer registration Jens Axboe
2024-09-11 20:03 ` [PATCH 1/3] io_uring/rsrc: clear 'slot' entry upfront Jens Axboe
2024-09-11 20:03 ` [PATCH 2/3] io_uring/rsrc: add reference count to struct io_mapped_ubuf Jens Axboe
@ 2024-09-11 20:03 ` Jens Axboe
2024-09-11 20:13 ` [PATCHSET 0/3] Provide more efficient buffer registration Jens Axboe
3 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2024-09-11 20:03 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
Buffers can get registered with io_uring, which allows to skip the
repeated pin_pages, unpin/unref pages for each O_DIRECT operation. This
reduces the overhead of O_DIRECT IO.
However, registrering buffers can take some time. Normally this isn't an
issue as it's done at initialization time (and hence less critical), but
for cases where rings can be created and destroyed as part of an IO
thread pool, registering the same buffers for multiple rings become a
more time sensitive proposition. As an example, let's say an application
has an IO memory pool of 500G. Initial registration takes:
Got 500 huge pages (each 1024MB)
Registered 500 pages in 409 msec
or about 0.4 seconds. If we go higher to 900 1GB huge pages being
registered:
Registered 900 pages in 738 msec
which is, as expected, a fully linear scaling.
Rather than have each ring pin/map/register the same buffer pool,
provide an io_uring_register(2) opcode to simply duplicate the buffers
that are registered with another ring. Adding the same 900GB of
registered buffers to the target ring can then be accomplished in:
Copied 900 pages in 17 usec
While timing differs a bit, this provides around a 25,000-40,000x
speedup for this use case.
Signed-off-by: Jens Axboe <[email protected]>
---
include/uapi/linux/io_uring.h | 8 ++++
io_uring/register.c | 6 +++
io_uring/rsrc.c | 84 +++++++++++++++++++++++++++++++++++
io_uring/rsrc.h | 1 +
4 files changed, 99 insertions(+)
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index a275f91d2ac0..7b15216a3d7f 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -609,6 +609,9 @@ enum io_uring_register_op {
IORING_REGISTER_CLOCK = 29,
+ /* copy registered buffers from source ring to current ring */
+ IORING_REGISTER_COPY_BUFFERS = 30,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -694,6 +697,11 @@ struct io_uring_clock_register {
__u32 __resv[3];
};
+struct io_uring_copy_buffers {
+ __u32 src_fd;
+ __u32 pad[7];
+};
+
struct io_uring_buf {
__u64 addr;
__u32 len;
diff --git a/io_uring/register.c b/io_uring/register.c
index 57cb85c42526..c8670de33343 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -542,6 +542,12 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
ret = io_register_clock(ctx, arg);
break;
+ case IORING_REGISTER_COPY_BUFFERS:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_register_copy_buffers(ctx, arg);
+ break;
default:
ret = -EINVAL;
break;
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 28f98de3c304..457492c6a329 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -1137,3 +1137,87 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
return 0;
}
+
+/*
+ * Return with both ctx and src_ctx locked, locking the lowest valued ctx
+ * first to prevent deadlocks for the same operation with ctxs switched.
+ */
+static int lock_src_ctx(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx)
+{
+ if (ctx == src_ctx) {
+ return -EINVAL;
+ } else if (ctx > src_ctx) {
+ mutex_unlock(&ctx->uring_lock);
+ mutex_lock(&src_ctx->uring_lock);
+ mutex_lock(&ctx->uring_lock);
+ } else {
+ mutex_lock(&src_ctx->uring_lock);
+ }
+
+ return 0;
+}
+
+static int io_copy_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx)
+{
+ struct io_rsrc_data *data;
+ int i, ret, nbufs;
+
+ nbufs = src_ctx->nr_user_bufs;
+ if (!nbufs)
+ return -ENXIO;
+ ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, NULL, nbufs, &data);
+ if (ret)
+ return ret;
+ ret = io_buffers_map_alloc(ctx, nbufs);
+ if (ret) {
+ io_rsrc_data_free(data);
+ return ret;
+ }
+
+ for (i = 0; i < nbufs; i++) {
+ struct io_mapped_ubuf *src = src_ctx->user_bufs[i];
+
+ refcount_inc(&src->refs);
+ ctx->user_bufs[i] = src;
+ }
+ ctx->buf_data = data;
+ ctx->nr_user_bufs = nbufs;
+ return 0;
+}
+
+/*
+ * Copy the registered buffers from the source ring whose file descriptor
+ * is given in the src_fd to the current ring. This is identical to registering
+ * the buffers with ctx, except faster as mappings already exist.
+ *
+ * Since the memory is already accounted once, don't account it again.
+ */
+int io_register_copy_buffers(struct io_ring_ctx *ctx, void __user *arg)
+{
+ struct io_uring_copy_buffers buf;
+ struct io_ring_ctx *src_ctx;
+ struct fd f;
+ int ret;
+
+ if (ctx->user_bufs || ctx->nr_user_bufs)
+ return -EBUSY;
+ if (copy_from_user(&buf, arg, sizeof(buf)))
+ return -EFAULT;
+ if (memchr_inv(buf.pad, 0, sizeof(buf.pad)))
+ return -EINVAL;
+
+ f = fdget(buf.src_fd);
+ if (!f.file)
+ return -EBADF;
+ if (!io_is_uring_fops(f.file)) {
+ fdput(f);
+ return -EBADF;
+ }
+
+ src_ctx = f.file->private_data;
+ ret = lock_src_ctx(ctx, src_ctx);
+ if (!ret)
+ ret = io_copy_buffers(ctx, src_ctx);
+ mutex_unlock(&src_ctx->uring_lock);
+ return ret;
+}
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 98a253172c27..93546ab337a6 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -68,6 +68,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
struct io_mapped_ubuf *imu,
u64 buf_addr, size_t len);
+int io_register_copy_buffers(struct io_ring_ctx *ctx, void __user *arg);
void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
--
2.45.2
^ permalink raw reply related [flat|nested] 6+ messages in thread