public inbox for [email protected]
 help / color / mirror / Atom feed
From: Pavel Begunkov <[email protected]>
To: [email protected]
Cc: [email protected], [email protected]
Subject: [RFC 4/7] io_uring/rsrc: introduce struct iou_buf_desc
Date: Sun, 30 Apr 2023 10:35:26 +0100	[thread overview]
Message-ID: <a2b80cf8a8fee8cfc8840f45ece4c0842ad48d74.1682701588.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>

Add struct iou_buf_desc, which will be used for new get_buf operations.
It'll be handed over to a file with via new operation to be filled.
After the content should eventually end up in struct io_mapped_ubuf,
and so to not make extra copies just place the descriptor inside struct
io_mapped_ubuf.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 include/linux/io_uring.h |  6 ++++++
 io_uring/rsrc.c          | 13 +++++++------
 io_uring/rsrc.h          | 11 +++++------
 3 files changed, 18 insertions(+), 12 deletions(-)

diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 35b9328ca335..fddb5d52b776 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -22,6 +22,12 @@ enum io_uring_cmd_flags {
 	IO_URING_F_IOPOLL		= (1 << 10),
 };
 
+struct iou_buf_desc {
+	unsigned		nr_bvecs;
+	unsigned		max_bvecs;
+	struct bio_vec		*bvec;
+};
+
 struct io_uring_cmd {
 	struct file	*file;
 	const void	*cmd;
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index b6305ae3538c..0edcebb6b5cb 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -84,7 +84,7 @@ static void io_put_reg_buf(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
 {
 	lockdep_assert_held(&ctx->uring_lock);
 
-	if ((imu->max_bvecs != IO_BUF_CACHE_MAX_BVECS) ||
+	if ((imu->desc.max_bvecs != IO_BUF_CACHE_MAX_BVECS) ||
 	    !io_alloc_cache_put(&ctx->reg_buf_cache, &imu->cache))
 		kvfree(imu);
 }
@@ -109,7 +109,8 @@ static struct io_mapped_ubuf *io_alloc_reg_buf(struct io_ring_ctx *ctx,
 			goto do_alloc;
 		imu = container_of(entry, struct io_mapped_ubuf, cache);
 	}
-	imu->max_bvecs = nr_bvecs;
+	imu->desc.bvec = imu->bvec;
+	imu->desc.max_bvecs = nr_bvecs;
 	return imu;
 }
 
@@ -168,7 +169,7 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
 	unsigned int i;
 
 	if (imu != ctx->dummy_ubuf) {
-		for (i = 0; i < imu->nr_bvecs; i++)
+		for (i = 0; i < imu->desc.nr_bvecs; i++)
 			unpin_user_page(imu->bvec[i].bv_page);
 		if (imu->acct_pages)
 			io_unaccount_mem(ctx, imu->acct_pages);
@@ -1020,7 +1021,7 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
 	for (i = 0; i < ctx->nr_user_bufs; i++) {
 		struct io_mapped_ubuf *imu = ctx->user_bufs[i];
 
-		for (j = 0; j < imu->nr_bvecs; j++) {
+		for (j = 0; j < imu->desc.nr_bvecs; j++) {
 			if (!PageCompound(imu->bvec[j].bv_page))
 				continue;
 			if (compound_head(imu->bvec[j].bv_page) == hpage)
@@ -1184,7 +1185,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
 	/* store original address for later verification */
 	imu->ubuf = (unsigned long) iov->iov_base;
 	imu->ubuf_end = imu->ubuf + iov->iov_len;
-	imu->nr_bvecs = nr_pages;
+	imu->desc.nr_bvecs = nr_pages;
 	imu->dir_mask = (1U << ITER_SOURCE) | (1U << ITER_DEST);
 	*pimu = imu;
 	ret = 0;
@@ -1292,7 +1293,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
 	 * and advance us to the beginning.
 	 */
 	offset = buf_addr - imu->ubuf;
-	iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
+	iov_iter_bvec(iter, ddir, imu->bvec, imu->desc.nr_bvecs, offset + len);
 
 	if (offset) {
 		/*
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 10daa25d9194..9ac10b3d25ac 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -54,12 +54,11 @@ struct io_mapped_ubuf {
 		struct io_cache_entry		cache;
 		u64				ubuf;
 	};
-	u64		ubuf_end;
-	unsigned int	nr_bvecs;
-	unsigned int	max_bvecs;
-	unsigned int	dir_mask;
-	unsigned long	acct_pages;
-	struct bio_vec	bvec[];
+	u64			ubuf_end;
+	struct iou_buf_desc	desc;
+	unsigned int		dir_mask;
+	unsigned long		acct_pages;
+	struct bio_vec		bvec[];
 };
 
 void io_rsrc_put_tw(struct callback_head *cb);
-- 
2.40.0


  parent reply	other threads:[~2023-04-30  9:37 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-30  9:35 [RFC 0/7] Rethinking splice Pavel Begunkov
2023-04-30  9:35 ` [RFC 1/7] io_uring: add io_mapped_ubuf caches Pavel Begunkov
2023-04-30  9:35 ` [RFC 2/7] io_uring: add reg-buffer data directions Pavel Begunkov
2023-04-30  9:35 ` [RFC 3/7] io_uring: fail loop_rw_iter with pure bvec bufs Pavel Begunkov
2023-04-30  9:35 ` Pavel Begunkov [this message]
2023-04-30  9:35 ` [RFC 5/7] io_uring/rsrc: add buffer release callbacks Pavel Begunkov
2023-04-30  9:35 ` [RFC 6/7] io_uring/rsrc: introduce helper installing one buffer Pavel Begunkov
2023-04-30  9:35 ` [RFC 7/7] io_uring,fs: introduce IORING_OP_GET_BUF Pavel Begunkov
2023-05-02 14:57   ` Ming Lei
2023-05-02 15:20     ` Ming Lei
2023-05-03 14:54     ` Pavel Begunkov
2023-05-04  2:06       ` Ming Lei
2023-05-08  2:30         ` Pavel Begunkov
2023-05-17  4:05           ` Ming Lei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a2b80cf8a8fee8cfc8840f45ece4c0842ad48d74.1682701588.git.asml.silence@gmail.com \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox