public inbox for [email protected]
 help / color / mirror / Atom feed
From: Dylan Yudaken <[email protected]>
To: Jens Axboe <[email protected]>,
	Pavel Begunkov <[email protected]>,
	<[email protected]>
Cc: <[email protected]>, Dylan Yudaken <[email protected]>
Subject: [PATCH for-next v2] io_uring: allow buffer recycling in READV
Date: Wed, 7 Sep 2022 09:51:52 -0700	[thread overview]
Message-ID: <[email protected]> (raw)

In commit 934447a603b2 ("io_uring: do not recycle buffer in READV") a
temporary fix was put in io_kbuf_recycle to simply never recycle READV
buffers.

Instead of that, rather treat READV with REQ_F_BUFFER_SELECTED the same as
a READ with REQ_F_BUFFER_SELECTED. Since READV requires iov_len of 1 they
are essentially the same.
In order to do this inside io_prep_rw() add some validation to check that
it is in fact only length 1, and also extract the length of the buffer at
prep time.

This allows removal of the io_iov_buffer_select codepaths as they are only
used from the READV op.

Signed-off-by: Dylan Yudaken <[email protected]>
---

since v1:
 - no change, just rebased on latest 6.0 branch

 io_uring/kbuf.h |  12 -----
 io_uring/rw.c   | 134 +++++++++++++++++++-----------------------------
 2 files changed, 52 insertions(+), 94 deletions(-)

diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index 746fbf31a703..c23e15d7d3ca 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -86,18 +86,6 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
 
 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 {
-	/*
-	 * READV uses fields in `struct io_rw` (len/addr) to stash the selected
-	 * buffer data. However if that buffer is recycled the original request
-	 * data stored in addr is lost. Therefore forbid recycling for now.
-	 */
-	if (req->opcode == IORING_OP_READV) {
-		if ((req->flags & REQ_F_BUFFER_RING) && req->buf_list) {
-			req->buf_list->head++;
-			req->buf_list = NULL;
-		}
-		return;
-	}
 	if (req->flags & REQ_F_BUFFER_SELECTED)
 		io_kbuf_recycle_legacy(req, issue_flags);
 	if (req->flags & REQ_F_BUFFER_RING)
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 1babd77da79c..74d467fe423d 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -33,6 +33,46 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req)
 	return req->flags & REQ_F_SUPPORT_NOWAIT;
 }
 
+#ifdef CONFIG_COMPAT
+static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
+{
+	struct compat_iovec __user *uiov;
+	compat_ssize_t clen;
+
+	uiov = u64_to_user_ptr(rw->addr);
+	if (!access_ok(uiov, sizeof(*uiov)))
+		return -EFAULT;
+	if (__get_user(clen, &uiov->iov_len))
+		return -EFAULT;
+	if (clen < 0)
+		return -EINVAL;
+
+	rw->len = clen;
+	return 0;
+}
+#endif
+
+static int io_iov_buffer_select_prep(struct io_kiocb *req)
+{
+	struct iovec __user *uiov;
+	struct iovec iov;
+	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+
+	if (rw->len != 1)
+		return -EINVAL;
+
+#ifdef CONFIG_COMPAT
+	if (req->ctx->compat)
+		return io_iov_compat_buffer_select_prep(rw);
+#endif
+
+	uiov = u64_to_user_ptr(rw->addr);
+	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
+		return -EFAULT;
+	rw->len = iov.iov_len;
+	return 0;
+}
+
 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
@@ -69,6 +109,16 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	rw->addr = READ_ONCE(sqe->addr);
 	rw->len = READ_ONCE(sqe->len);
 	rw->flags = READ_ONCE(sqe->rw_flags);
+
+	/* Have to do this validation here, as this is in io_read() rw->len might
+	 * have chanaged due to buffer selection
+	 */
+	if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
+		ret = io_iov_buffer_select_prep(req);
+		if (ret)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -273,79 +323,6 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
 	return IOU_ISSUE_SKIP_COMPLETE;
 }
 
-#ifdef CONFIG_COMPAT
-static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
-				unsigned int issue_flags)
-{
-	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
-	struct compat_iovec __user *uiov;
-	compat_ssize_t clen;
-	void __user *buf;
-	size_t len;
-
-	uiov = u64_to_user_ptr(rw->addr);
-	if (!access_ok(uiov, sizeof(*uiov)))
-		return -EFAULT;
-	if (__get_user(clen, &uiov->iov_len))
-		return -EFAULT;
-	if (clen < 0)
-		return -EINVAL;
-
-	len = clen;
-	buf = io_buffer_select(req, &len, issue_flags);
-	if (!buf)
-		return -ENOBUFS;
-	rw->addr = (unsigned long) buf;
-	iov[0].iov_base = buf;
-	rw->len = iov[0].iov_len = (compat_size_t) len;
-	return 0;
-}
-#endif
-
-static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
-				      unsigned int issue_flags)
-{
-	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
-	struct iovec __user *uiov = u64_to_user_ptr(rw->addr);
-	void __user *buf;
-	ssize_t len;
-
-	if (copy_from_user(iov, uiov, sizeof(*uiov)))
-		return -EFAULT;
-
-	len = iov[0].iov_len;
-	if (len < 0)
-		return -EINVAL;
-	buf = io_buffer_select(req, &len, issue_flags);
-	if (!buf)
-		return -ENOBUFS;
-	rw->addr = (unsigned long) buf;
-	iov[0].iov_base = buf;
-	rw->len = iov[0].iov_len = len;
-	return 0;
-}
-
-static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
-				    unsigned int issue_flags)
-{
-	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
-
-	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
-		iov[0].iov_base = u64_to_user_ptr(rw->addr);
-		iov[0].iov_len = rw->len;
-		return 0;
-	}
-	if (rw->len != 1)
-		return -EINVAL;
-
-#ifdef CONFIG_COMPAT
-	if (req->ctx->compat)
-		return io_compat_import(req, iov, issue_flags);
-#endif
-
-	return __io_iov_buffer_select(req, iov, issue_flags);
-}
-
 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
 				       struct io_rw_state *s,
 				       unsigned int issue_flags)
@@ -368,7 +345,8 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
 	buf = u64_to_user_ptr(rw->addr);
 	sqe_len = rw->len;
 
-	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
+	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
+	    (req->flags & REQ_F_BUFFER_SELECT)) {
 		if (io_do_buffer_select(req)) {
 			buf = io_buffer_select(req, &sqe_len, issue_flags);
 			if (!buf)
@@ -384,14 +362,6 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
 	}
 
 	iovec = s->fast_iov;
-	if (req->flags & REQ_F_BUFFER_SELECT) {
-		ret = io_iov_buffer_select(req, iovec, issue_flags);
-		if (ret)
-			return ERR_PTR(ret);
-		iov_iter_init(iter, ddir, iovec, 1, iovec->iov_len);
-		return NULL;
-	}
-
 	ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
 			      req->ctx->compat);
 	if (unlikely(ret < 0))

base-commit: 336d28a8f38013a069f2d46e73aaa1880ef17a47
-- 
2.30.2


             reply	other threads:[~2022-09-07 16:52 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-07 16:51 Dylan Yudaken [this message]
2022-09-07 16:57 ` [PATCH for-next v2] io_uring: allow buffer recycling in READV Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox