* [PATCHSET 0/2] Shrink io_mapped_ubuf size
@ 2024-09-15 15:22 ` Jens Axboe
2024-09-15 15:22 ` [PATCH 1/2] io_uring/rsrc: get rid of io_mapped_ubuf->folio_mask Jens Axboe
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Jens Axboe @ 2024-09-15 15:22 UTC (permalink / raw)
To: io-uring; +Cc: cliang01.li
Hi,
This shrinks it from (now) 48 bytes to 32 bytes. No ill effects observed
in terms of performance.
--
Jens Axboe
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 1/2] io_uring/rsrc: get rid of io_mapped_ubuf->folio_mask
2024-09-15 15:22 ` [PATCHSET 0/2] Shrink io_mapped_ubuf size Jens Axboe
@ 2024-09-15 15:22 ` Jens Axboe
2024-09-15 15:22 ` [PATCH 2/2] io_uring/rsrc: change ubuf->ubuf_end to length tracking Jens Axboe
2024-09-17 5:16 ` [PATCHSET 0/2] Shrink io_mapped_ubuf size Kanchan Joshi
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2024-09-15 15:22 UTC (permalink / raw)
To: io-uring; +Cc: cliang01.li, Jens Axboe
We don't really need to cache this, let's reclaim 8 bytes from struct
io_mapped_ubuf and just calculate it when we need it. The only hot path
here is io_import_fixed().
Signed-off-by: Jens Axboe <[email protected]>
---
io_uring/rsrc.c | 9 +++------
io_uring/rsrc.h | 1 -
2 files changed, 3 insertions(+), 7 deletions(-)
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 9264e555ae59..2477995e2d65 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -988,13 +988,10 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
imu->ubuf_end = imu->ubuf + iov->iov_len;
imu->nr_bvecs = nr_pages;
imu->folio_shift = PAGE_SHIFT;
- imu->folio_mask = PAGE_MASK;
- if (coalesced) {
+ if (coalesced)
imu->folio_shift = data.folio_shift;
- imu->folio_mask = ~((1UL << data.folio_shift) - 1);
- }
refcount_set(&imu->refs, 1);
- off = (unsigned long) iov->iov_base & ~imu->folio_mask;
+ off = (unsigned long) iov->iov_base & ((1UL << imu->folio_shift) - 1);
*pimu = imu;
ret = 0;
@@ -1132,7 +1129,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
iter->bvec = bvec + seg_skip;
iter->nr_segs -= seg_skip;
iter->count -= bvec->bv_len + offset;
- iter->iov_offset = offset & ~imu->folio_mask;
+ iter->iov_offset = offset & ((1UL << imu->folio_shift) - 1);
}
}
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index eb4803e473b0..e290d2be3285 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -46,7 +46,6 @@ struct io_mapped_ubuf {
unsigned int nr_bvecs;
unsigned int folio_shift;
unsigned long acct_pages;
- unsigned long folio_mask;
refcount_t refs;
struct bio_vec bvec[] __counted_by(nr_bvecs);
};
--
2.45.2
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 2/2] io_uring/rsrc: change ubuf->ubuf_end to length tracking
2024-09-15 15:22 ` [PATCHSET 0/2] Shrink io_mapped_ubuf size Jens Axboe
2024-09-15 15:22 ` [PATCH 1/2] io_uring/rsrc: get rid of io_mapped_ubuf->folio_mask Jens Axboe
@ 2024-09-15 15:22 ` Jens Axboe
2024-09-17 5:16 ` [PATCHSET 0/2] Shrink io_mapped_ubuf size Kanchan Joshi
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2024-09-15 15:22 UTC (permalink / raw)
To: io-uring; +Cc: cliang01.li, Jens Axboe
If we change it to tracking ubuf->start + ubuf->len, then we can reduce
the size of struct io_mapped_ubuf by another 4 bytes, effectively 8
bytes, as a hole is eliminated too.
This shrinks io_mapped_ubuf to 32 bytes.
Signed-off-by: Jens Axboe <[email protected]>
---
io_uring/fdinfo.c | 3 +--
io_uring/rsrc.c | 6 +++---
io_uring/rsrc.h | 4 ++--
3 files changed, 6 insertions(+), 7 deletions(-)
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index d43e1b5fcb36..6b1247664b35 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -177,9 +177,8 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
struct io_mapped_ubuf *buf = ctx->user_bufs[i];
- unsigned int len = buf->ubuf_end - buf->ubuf;
- seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
+ seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len);
}
if (has_lock && !xa_empty(&ctx->personalities)) {
unsigned long index;
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 2477995e2d65..131bcdda577a 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -38,7 +38,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
static const struct io_mapped_ubuf dummy_ubuf = {
/* set invalid range, so io_import_fixed() fails meeting it */
.ubuf = -1UL,
- .ubuf_end = 0,
+ .len = UINT_MAX,
};
int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
@@ -985,7 +985,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
size = iov->iov_len;
/* store original address for later verification */
imu->ubuf = (unsigned long) iov->iov_base;
- imu->ubuf_end = imu->ubuf + iov->iov_len;
+ imu->len = iov->iov_len;
imu->nr_bvecs = nr_pages;
imu->folio_shift = PAGE_SHIFT;
if (coalesced)
@@ -1086,7 +1086,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
return -EFAULT;
/* not inside the mapped region */
- if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
+ if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len)))
return -EFAULT;
/*
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index e290d2be3285..8ed588036210 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -42,11 +42,11 @@ struct io_rsrc_node {
struct io_mapped_ubuf {
u64 ubuf;
- u64 ubuf_end;
+ unsigned int len;
unsigned int nr_bvecs;
unsigned int folio_shift;
- unsigned long acct_pages;
refcount_t refs;
+ unsigned long acct_pages;
struct bio_vec bvec[] __counted_by(nr_bvecs);
};
--
2.45.2
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCHSET 0/2] Shrink io_mapped_ubuf size
2024-09-15 15:22 ` [PATCHSET 0/2] Shrink io_mapped_ubuf size Jens Axboe
2024-09-15 15:22 ` [PATCH 1/2] io_uring/rsrc: get rid of io_mapped_ubuf->folio_mask Jens Axboe
2024-09-15 15:22 ` [PATCH 2/2] io_uring/rsrc: change ubuf->ubuf_end to length tracking Jens Axboe
@ 2024-09-17 5:16 ` Kanchan Joshi
2 siblings, 0 replies; 4+ messages in thread
From: Kanchan Joshi @ 2024-09-17 5:16 UTC (permalink / raw)
To: Jens Axboe, io-uring; +Cc: cliang01.li
On 9/15/2024 8:52 PM, Jens Axboe wrote:
> Hi,
>
> This shrinks it from (now) 48 bytes to 32 bytes. No ill effects observed
> in terms of performance.
Reviewed-by: Kanchan Joshi <[email protected]>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2024-09-17 5:16 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <CGME20240915152336epcas5p13d091902fa4a858903a4d5cc59973c00@epcas5p1.samsung.com>
2024-09-15 15:22 ` [PATCHSET 0/2] Shrink io_mapped_ubuf size Jens Axboe
2024-09-15 15:22 ` [PATCH 1/2] io_uring/rsrc: get rid of io_mapped_ubuf->folio_mask Jens Axboe
2024-09-15 15:22 ` [PATCH 2/2] io_uring/rsrc: change ubuf->ubuf_end to length tracking Jens Axboe
2024-09-17 5:16 ` [PATCHSET 0/2] Shrink io_mapped_ubuf size Kanchan Joshi
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox