From: Pavel Begunkov <[email protected]>
To: [email protected]
Cc: [email protected]
Subject: [PATCH v2 13/18] io_uring: use region api for SQ
Date: Sun, 24 Nov 2024 21:12:30 +0000 [thread overview]
Message-ID: <0b85236173823848fa6207110e74914e5dba47e8.1732481694.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>
Convert internal parts of the SQ managment to the region API.
Signed-off-by: Pavel Begunkov <[email protected]>
---
include/linux/io_uring_types.h | 3 +--
io_uring/io_uring.c | 36 +++++++++++++---------------------
io_uring/memmap.c | 3 +--
io_uring/register.c | 35 +++++++++++++++------------------
4 files changed, 32 insertions(+), 45 deletions(-)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 4cee414080fd..3f353f269c6e 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -431,10 +431,9 @@ struct io_ring_ctx {
* the gup'ed pages for the two rings, and the sqes.
*/
unsigned short n_ring_pages;
- unsigned short n_sqe_pages;
struct page **ring_pages;
- struct page **sqe_pages;
+ struct io_mapped_region sq_region;
/* used for optimised request parameter and wait argument passing */
struct io_mapped_region param_region;
};
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index fee3b7f50176..a1dca7bce54a 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2633,29 +2633,19 @@ static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
size);
}
-static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr,
- size_t size)
-{
- return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr,
- size);
-}
-
static void io_rings_free(struct io_ring_ctx *ctx)
{
if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
io_pages_unmap(ctx->rings, &ctx->ring_pages, &ctx->n_ring_pages,
true);
- io_pages_unmap(ctx->sq_sqes, &ctx->sqe_pages, &ctx->n_sqe_pages,
- true);
} else {
io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
ctx->n_ring_pages = 0;
- io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
- ctx->n_sqe_pages = 0;
vunmap(ctx->rings);
- vunmap(ctx->sq_sqes);
}
+ io_free_region(ctx, &ctx->sq_region);
+
ctx->rings = NULL;
ctx->sq_sqes = NULL;
}
@@ -3472,9 +3462,10 @@ bool io_is_uring_fops(struct file *file)
static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
+ struct io_uring_region_desc rd;
struct io_rings *rings;
size_t size, sq_array_offset;
- void *ptr;
+ int ret;
/* make sure these are sane, as we already accounted them */
ctx->sq_entries = p->sq_entries;
@@ -3510,17 +3501,18 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
return -EOVERFLOW;
}
- if (!(ctx->flags & IORING_SETUP_NO_MMAP))
- ptr = io_pages_map(&ctx->sqe_pages, &ctx->n_sqe_pages, size);
- else
- ptr = io_sqes_map(ctx, p->sq_off.user_addr, size);
-
- if (IS_ERR(ptr)) {
+ memset(&rd, 0, sizeof(rd));
+ rd.size = PAGE_ALIGN(size);
+ if (ctx->flags & IORING_SETUP_NO_MMAP) {
+ rd.user_addr = p->sq_off.user_addr;
+ rd.flags |= IORING_MEM_REGION_TYPE_USER;
+ }
+ ret = io_create_region(ctx, &ctx->sq_region, &rd, IORING_OFF_SQES);
+ if (ret) {
io_rings_free(ctx);
- return PTR_ERR(ptr);
+ return ret;
}
-
- ctx->sq_sqes = ptr;
+ ctx->sq_sqes = io_region_get_ptr(&ctx->sq_region);
return 0;
}
diff --git a/io_uring/memmap.c b/io_uring/memmap.c
index 5d971ba33d5a..0a2d03bd312b 100644
--- a/io_uring/memmap.c
+++ b/io_uring/memmap.c
@@ -464,8 +464,7 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT);
return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages);
case IORING_OFF_SQES:
- return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages,
- ctx->n_sqe_pages);
+ return io_region_mmap(ctx, &ctx->sq_region, vma);
case IORING_OFF_PBUF_RING:
return io_pbuf_mmap(file, vma);
case IORING_MAP_OFF_PARAM_REGION:
diff --git a/io_uring/register.c b/io_uring/register.c
index 5e07205fb071..44cd64923d31 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -368,11 +368,11 @@ static int io_register_clock(struct io_ring_ctx *ctx,
*/
struct io_ring_ctx_rings {
unsigned short n_ring_pages;
- unsigned short n_sqe_pages;
struct page **ring_pages;
- struct page **sqe_pages;
- struct io_uring_sqe *sq_sqes;
struct io_rings *rings;
+
+ struct io_uring_sqe *sq_sqes;
+ struct io_mapped_region sq_region;
};
static void io_register_free_rings(struct io_ring_ctx *ctx,
@@ -382,14 +382,11 @@ static void io_register_free_rings(struct io_ring_ctx *ctx,
if (!(p->flags & IORING_SETUP_NO_MMAP)) {
io_pages_unmap(r->rings, &r->ring_pages, &r->n_ring_pages,
true);
- io_pages_unmap(r->sq_sqes, &r->sqe_pages, &r->n_sqe_pages,
- true);
} else {
io_pages_free(&r->ring_pages, r->n_ring_pages);
- io_pages_free(&r->sqe_pages, r->n_sqe_pages);
vunmap(r->rings);
- vunmap(r->sq_sqes);
}
+ io_free_region(ctx, &r->sq_region);
}
#define swap_old(ctx, o, n, field) \
@@ -404,11 +401,11 @@ static void io_register_free_rings(struct io_ring_ctx *ctx,
static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
{
+ struct io_uring_region_desc rd;
struct io_ring_ctx_rings o = { }, n = { }, *to_free = NULL;
size_t size, sq_array_offset;
struct io_uring_params p;
unsigned i, tail;
- void *ptr;
int ret;
/* for single issuer, must be owner resizing */
@@ -466,16 +463,18 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
return -EOVERFLOW;
}
- if (!(p.flags & IORING_SETUP_NO_MMAP))
- ptr = io_pages_map(&n.sqe_pages, &n.n_sqe_pages, size);
- else
- ptr = __io_uaddr_map(&n.sqe_pages, &n.n_sqe_pages,
- p.sq_off.user_addr,
- size);
- if (IS_ERR(ptr)) {
+ memset(&rd, 0, sizeof(rd));
+ rd.size = PAGE_ALIGN(size);
+ if (p.flags & IORING_SETUP_NO_MMAP) {
+ rd.user_addr = p.sq_off.user_addr;
+ rd.flags |= IORING_MEM_REGION_TYPE_USER;
+ }
+ ret = io_create_region_mmap_safe(ctx, &n.sq_region, &rd, IORING_OFF_SQES);
+ if (ret) {
io_register_free_rings(ctx, &p, &n);
- return PTR_ERR(ptr);
+ return ret;
}
+ n.sq_sqes = io_region_get_ptr(&n.sq_region);
/*
* If using SQPOLL, park the thread
@@ -506,7 +505,6 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
* Now copy SQ and CQ entries, if any. If either of the destination
* rings can't hold what is already there, then fail the operation.
*/
- n.sq_sqes = ptr;
tail = o.rings->sq.tail;
if (tail - o.rings->sq.head > p.sq_entries)
goto overflow;
@@ -555,9 +553,8 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
ctx->rings = n.rings;
ctx->sq_sqes = n.sq_sqes;
swap_old(ctx, o, n, n_ring_pages);
- swap_old(ctx, o, n, n_sqe_pages);
swap_old(ctx, o, n, ring_pages);
- swap_old(ctx, o, n, sqe_pages);
+ swap_old(ctx, o, n, sq_region);
to_free = &o;
ret = 0;
out:
--
2.46.0
next prev parent reply other threads:[~2024-11-24 21:12 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-24 21:12 [PATCH v2 00/18] kernel allocated regions and convert memmap to regions Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 01/18] io_uring: rename ->resize_lock Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 02/18] io_uring/rsrc: export io_check_coalesce_buffer Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 03/18] io_uring/memmap: add internal region flags Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 04/18] io_uring/memmap: flag regions with user pages Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 05/18] io_uring/memmap: account memory before pinning Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 06/18] io_uring/memmap: reuse io_free_region for failure path Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 07/18] io_uring/memmap: optimise single folio regions Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 08/18] io_uring/memmap: helper for pinning region pages Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 09/18] io_uring/memmap: add IO_REGION_F_SINGLE_REF Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 10/18] io_uring/memmap: implement kernel allocated regions Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 11/18] io_uring/memmap: implement mmap for regions Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 12/18] io_uring: pass ctx to io_register_free_rings Pavel Begunkov
2024-11-24 21:12 ` Pavel Begunkov [this message]
2024-11-24 21:12 ` [PATCH v2 14/18] io_uring: use region api for CQ Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 15/18] io_uring/kbuf: use mmap_lock to sync with mmap Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 16/18] io_uring/kbuf: remove pbuf ring refcounting Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 17/18] io_uring/kbuf: use region api for pbuf rings Pavel Begunkov
2024-11-24 21:12 ` [PATCH v2 18/18] io_uring/memmap: unify io_uring mmap'ing code Pavel Begunkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=0b85236173823848fa6207110e74914e5dba47e8.1732481694.git.asml.silence@gmail.com \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox