* [PATCH 1/4] io_uring: remove sq/cq_off memset
2023-04-19 22:48 [PATCHSET RFC 0/4] Support for mapping SQ/CQ rings into huge page Jens Axboe
@ 2023-04-19 22:48 ` Jens Axboe
2023-04-19 22:48 ` [PATCH 2/4] io_uring: return error pointer from io_mem_alloc() Jens Axboe
` (2 subsequent siblings)
3 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2023-04-19 22:48 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
We only have two reserved members we're not clearing, do so manually
instead. This is in preparation for using one of these members for
a new feature.
Signed-off-by: Jens Axboe <[email protected]>
---
io_uring/io_uring.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 68684aabfbb7..7b4f3eb16a73 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -3900,7 +3900,6 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
if (ret)
goto err;
- memset(&p->sq_off, 0, sizeof(p->sq_off));
p->sq_off.head = offsetof(struct io_rings, sq.head);
p->sq_off.tail = offsetof(struct io_rings, sq.tail);
p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
@@ -3908,8 +3907,9 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
p->sq_off.flags = offsetof(struct io_rings, sq_flags);
p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
+ p->sq_off.resv1 = 0;
+ p->sq_off.resv2 = 0;
- memset(&p->cq_off, 0, sizeof(p->cq_off));
p->cq_off.head = offsetof(struct io_rings, cq.head);
p->cq_off.tail = offsetof(struct io_rings, cq.tail);
p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
@@ -3917,6 +3917,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
p->cq_off.cqes = offsetof(struct io_rings, cqes);
p->cq_off.flags = offsetof(struct io_rings, cq_flags);
+ p->cq_off.resv1 = 0;
+ p->cq_off.resv2 = 0;
p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
--
2.39.2
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/4] io_uring: return error pointer from io_mem_alloc()
2023-04-19 22:48 [PATCHSET RFC 0/4] Support for mapping SQ/CQ rings into huge page Jens Axboe
2023-04-19 22:48 ` [PATCH 1/4] io_uring: remove sq/cq_off memset Jens Axboe
@ 2023-04-19 22:48 ` Jens Axboe
2023-04-19 22:48 ` [PATCH 3/4] io_uring: add ring freeing helper Jens Axboe
2023-04-19 22:48 ` [PATCH 4/4] io_uring: support for user allocated memory for rings/sqes Jens Axboe
3 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2023-04-19 22:48 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
In preparation for having more than one time of ring allocator, make the
existing one return valid/error-pointer rather than just NULL.
Signed-off-by: Jens Axboe <[email protected]>
---
io_uring/io_uring.c | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 7b4f3eb16a73..13faa3115eb5 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2719,8 +2719,12 @@ static void io_mem_free(void *ptr)
static void *io_mem_alloc(size_t size)
{
gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
+ void *ret;
- return (void *) __get_free_pages(gfp, get_order(size));
+ ret = (void *) __get_free_pages(gfp, get_order(size));
+ if (ret)
+ return ret;
+ return ERR_PTR(-ENOMEM);
}
static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
@@ -3686,6 +3690,7 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
{
struct io_rings *rings;
size_t size, sq_array_offset;
+ void *ptr;
/* make sure these are sane, as we already accounted them */
ctx->sq_entries = p->sq_entries;
@@ -3696,8 +3701,8 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
return -EOVERFLOW;
rings = io_mem_alloc(size);
- if (!rings)
- return -ENOMEM;
+ if (IS_ERR(rings))
+ return PTR_ERR(rings);
ctx->rings = rings;
ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
@@ -3716,13 +3721,14 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
return -EOVERFLOW;
}
- ctx->sq_sqes = io_mem_alloc(size);
- if (!ctx->sq_sqes) {
+ ptr = io_mem_alloc(size);
+ if (IS_ERR(ptr)) {
io_mem_free(ctx->rings);
ctx->rings = NULL;
- return -ENOMEM;
+ return PTR_ERR(ptr);
}
+ ctx->sq_sqes = io_mem_alloc(size);
return 0;
}
--
2.39.2
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 4/4] io_uring: support for user allocated memory for rings/sqes
2023-04-19 22:48 [PATCHSET RFC 0/4] Support for mapping SQ/CQ rings into huge page Jens Axboe
` (2 preceding siblings ...)
2023-04-19 22:48 ` [PATCH 3/4] io_uring: add ring freeing helper Jens Axboe
@ 2023-04-19 22:48 ` Jens Axboe
3 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2023-04-19 22:48 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
Currently io_uring applications must call mmap(2) twice to map the rings
themselves, and the sqes array. This works fine, but it does not support
using huge pages to back the rings/sqes.
Provide a way for the application to pass in pre-allocated memory for
the rings/sqes, which can then suitably be allocated from shmfs or
via mmap to get huge page support.
Particularly for larger rings, this reduces the TLBs needed.
If an application wishes to take advantage of that, it must pre-allocate
the memory needed for the sq/cq ring, and the sqes. The former must
be passed in via the io_uring_params->cq_off.user_data field, while the
latter is passed in via the io_uring_params->sq_off.user_data field. Then
it must set IORING_SETUP_NO_MMAP in the io_uring_params->flags field,
and io_uring will then map the existing memory into the kernel for shared
use. The application must not call mmap(2) to map rings as it otherwise
would have, that will now fail with -EINVAL if this setup flag was used.
The pages used for the rings and sqes must be contigious. The intent here
is clearly that huge pages should be used, otherwise the normal setup
procedure works fine as-is. The application may use one huge page for
both the rings and sqes.
Outside of those initialization changes, everything works like it did
before.
Signed-off-by: Jens Axboe <[email protected]>
---
include/linux/io_uring_types.h | 10 ++++
include/uapi/linux/io_uring.h | 9 ++-
io_uring/io_uring.c | 102 +++++++++++++++++++++++++++++----
3 files changed, 109 insertions(+), 12 deletions(-)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index c54f3fb7ab1a..3489fa223586 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -211,6 +211,16 @@ struct io_ring_ctx {
unsigned int compat: 1;
enum task_work_notify_mode notify_method;
+
+ /*
+ * If IORING_SETUP_NO_MMAP is used, then the below holds
+ * the gup'ed pages for the two rings, and the sqes.
+ */
+ unsigned short n_ring_pages;
+ unsigned short n_sqe_pages;
+ struct page **ring_pages;
+ struct page **sqe_pages;
+
struct io_rings *rings;
struct task_struct *submitter_task;
struct percpu_ref refs;
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index ea903a677ce9..5499f9728f9d 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -179,6 +179,11 @@ enum {
*/
#define IORING_SETUP_NO_OFFLOAD (1U << 14)
+/*
+ * Application provides the memory for the rings
+ */
+#define IORING_SETUP_NO_MMAP (1U << 15)
+
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@@ -412,7 +417,7 @@ struct io_sqring_offsets {
__u32 dropped;
__u32 array;
__u32 resv1;
- __u64 resv2;
+ __u64 user_addr;
};
/*
@@ -431,7 +436,7 @@ struct io_cqring_offsets {
__u32 cqes;
__u32 flags;
__u32 resv1;
- __u64 resv2;
+ __u64 user_addr;
};
/*
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index cf570b0f82ec..d6694bd92453 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2716,12 +2716,80 @@ static void io_mem_free(void *ptr)
free_compound_page(page);
}
+static void io_pages_free(struct page ***pages, int npages)
+{
+ struct page **page_array;
+ int i;
+
+ if (!pages)
+ return;
+ page_array = *pages;
+ for (i = 0; i < npages; i++)
+ unpin_user_page(page_array[i]);
+ kvfree(page_array);
+ *pages = NULL;
+}
+
+static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
+ unsigned long uaddr, size_t size)
+{
+ struct page **page_array;
+ unsigned int nr_pages;
+ int ret;
+
+ *npages = 0;
+
+ if (uaddr & (PAGE_SIZE - 1) || !size)
+ return ERR_PTR(-EINVAL);
+
+ nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (nr_pages > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+ page_array = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!page_array)
+ return ERR_PTR(-ENOMEM);
+
+ ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
+ page_array);
+ if (ret != nr_pages) {
+err:
+ io_pages_free(&page_array, ret > 0 ? ret : 0);
+ return ret < 0 ? ERR_PTR(ret) : ERR_PTR(-EFAULT);
+ }
+ /* pages must be contig */
+ ret--;
+ if (page_array[0] + ret != page_array[ret])
+ goto err;
+ *pages = page_array;
+ *npages = nr_pages;
+ return page_to_virt(page_array[0]);
+}
+
+static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
+ size_t size)
+{
+ return __io_uaddr_map(&ctx->ring_pages, &ctx->n_ring_pages, uaddr,
+ size);
+}
+
+static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr,
+ size_t size)
+{
+ return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr,
+ size);
+}
+
static void io_rings_free(struct io_ring_ctx *ctx)
{
- io_mem_free(ctx->rings);
- io_mem_free(ctx->sq_sqes);
- ctx->rings = NULL;
- ctx->sq_sqes = NULL;
+ if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
+ io_mem_free(ctx->rings);
+ io_mem_free(ctx->sq_sqes);
+ ctx->rings = NULL;
+ ctx->sq_sqes = NULL;
+ } else {
+ io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
+ io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
+ }
}
static void *io_mem_alloc(size_t size)
@@ -3366,6 +3434,10 @@ static void *io_uring_validate_mmap_request(struct file *file,
struct page *page;
void *ptr;
+ /* Don't allow mmap if the ring was setup without it */
+ if (ctx->flags & IORING_SETUP_NO_MMAP)
+ return ERR_PTR(-EINVAL);
+
switch (offset & IORING_OFF_MMAP_MASK) {
case IORING_OFF_SQ_RING:
case IORING_OFF_CQ_RING:
@@ -3707,7 +3779,11 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
if (size == SIZE_MAX)
return -EOVERFLOW;
- rings = io_mem_alloc(size);
+ if (!(ctx->flags & IORING_SETUP_NO_MMAP))
+ rings = io_mem_alloc(size);
+ else
+ rings = io_rings_map(ctx, p->cq_off.user_addr, size);
+
if (IS_ERR(rings))
return PTR_ERR(rings);
@@ -3727,13 +3803,17 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
return -EOVERFLOW;
}
- ptr = io_mem_alloc(size);
+ if (!(ctx->flags & IORING_SETUP_NO_MMAP))
+ ptr = io_mem_alloc(size);
+ else
+ ptr = io_sqes_map(ctx, p->sq_off.user_addr, size);
+
if (IS_ERR(ptr)) {
io_rings_free(ctx);
return PTR_ERR(ptr);
}
- ctx->sq_sqes = io_mem_alloc(size);
+ ctx->sq_sqes = ptr;
return 0;
}
@@ -3919,7 +3999,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
p->sq_off.resv1 = 0;
- p->sq_off.resv2 = 0;
+ if (!(ctx->flags & IORING_SETUP_NO_MMAP))
+ p->sq_off.user_addr = 0;
p->cq_off.head = offsetof(struct io_rings, cq.head);
p->cq_off.tail = offsetof(struct io_rings, cq.tail);
@@ -3929,7 +4010,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
p->cq_off.cqes = offsetof(struct io_rings, cqes);
p->cq_off.flags = offsetof(struct io_rings, cq_flags);
p->cq_off.resv1 = 0;
- p->cq_off.resv2 = 0;
+ if (!(ctx->flags & IORING_SETUP_NO_MMAP))
+ p->cq_off.user_addr = 0;
p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
@@ -3996,7 +4078,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN |
- IORING_SETUP_NO_OFFLOAD))
+ IORING_SETUP_NO_OFFLOAD | IORING_SETUP_NO_MMAP))
return -EINVAL;
return io_uring_create(entries, &p, params);
--
2.39.2
^ permalink raw reply related [flat|nested] 6+ messages in thread