public inbox for [email protected]
 help / color / mirror / Atom feed
From: Christoph Hellwig <[email protected]>
To: [email protected]
Cc: [email protected], [email protected]
Subject: [PATCH 2/6] io_uring: don't use ERR_PTR for user pointers
Date: Wed, 18 May 2022 10:40:01 +0200	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>

ERR_PTR abuses the high bits of a pointer to transport error information.
This is only safe for kernel pointers and not user pointers.  Fix
io_buffer_select and its helpers to just return NULL for failure and get
rid of this abuse.

Signed-off-by: Christoph Hellwig <[email protected]>
---
 fs/io_uring.c | 83 +++++++++++++++++++++++----------------------------
 1 file changed, 37 insertions(+), 46 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 99862cbc1041c..abb7108258f96 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -3797,11 +3797,8 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
 }
 
 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
-					      struct io_buffer_list *bl,
-					      unsigned int issue_flags)
+					      struct io_buffer_list *bl)
 {
-	void __user *ret = ERR_PTR(-ENOBUFS);
-
 	if (!list_empty(&bl->buf_list)) {
 		struct io_buffer *kbuf;
 
@@ -3812,11 +3809,9 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
 		req->flags |= REQ_F_BUFFER_SELECTED;
 		req->kbuf = kbuf;
 		req->buf_index = kbuf->bid;
-		ret = u64_to_user_ptr(kbuf->addr);
+		return u64_to_user_ptr(kbuf->addr);
 	}
-
-	io_ring_submit_unlock(req->ctx, issue_flags);
-	return ret;
+	return NULL;
 }
 
 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
@@ -3829,7 +3824,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
 
 	if (unlikely(smp_load_acquire(&br->tail) == head)) {
 		io_ring_submit_unlock(req->ctx, issue_flags);
-		return ERR_PTR(-ENOBUFS);
+		return NULL;
 	}
 
 	head &= bl->mask;
@@ -3847,22 +3842,19 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
 	req->buf_list = bl;
 	req->buf_index = buf->bid;
 
-	if (!(issue_flags & IO_URING_F_UNLOCKED))
-		return u64_to_user_ptr(buf->addr);
-
-	/*
-	 * If we came in unlocked, we have no choice but to
-	 * consume the buffer here. This does mean it'll be
-	 * pinned until the IO completes. But coming in
-	 * unlocked means we're in io-wq context, hence there
-	 * should be no further retry. For the locked case, the
-	 * caller must ensure to call the commit when the
-	 * transfer completes (or if we get -EAGAIN and must
-	 * poll or retry).
-	 */
-	req->buf_list = NULL;
-	bl->head++;
-	io_ring_submit_unlock(req->ctx, issue_flags);
+	if (issue_flags & IO_URING_F_UNLOCKED) {
+		/*
+		 * If we came in unlocked, we have no choice but to consume the
+		 * buffer here. This does mean it'll be pinned until the IO
+		 * completes. But coming in unlocked means we're in io-wq
+		 * context, hence there should be no further retry. For the
+		 * locked case, the caller must ensure to call the commit when
+		 * the transfer completes (or if we get -EAGAIN and must poll
+		 * or retry).
+		 */
+		req->buf_list = NULL;
+		bl->head++;
+	}
 	return u64_to_user_ptr(buf->addr);
 }
 
@@ -3871,20 +3863,19 @@ static void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
 {
 	struct io_ring_ctx *ctx = req->ctx;
 	struct io_buffer_list *bl;
+	void __user *ret = NULL;
 
 	io_ring_submit_lock(req->ctx, issue_flags);
 
 	bl = io_buffer_get_list(ctx, req->buf_index);
-	if (unlikely(!bl)) {
-		io_ring_submit_unlock(req->ctx, issue_flags);
-		return ERR_PTR(-ENOBUFS);
+	if (likely(bl)) {
+		if (bl->buf_nr_pages)
+			ret = io_ring_buffer_select(req, len, bl, issue_flags);
+		else
+			ret = io_provided_buffer_select(req, len, bl);
 	}
-
-	/* selection helpers drop the submit lock again, if needed */
-	if (bl->buf_nr_pages)
-		return io_ring_buffer_select(req, len, bl, issue_flags);
-
-	return io_provided_buffer_select(req, len, bl, issue_flags);
+	io_ring_submit_unlock(req->ctx, issue_flags);
+	return ret;
 }
 
 #ifdef CONFIG_COMPAT
@@ -3906,8 +3897,8 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
 
 	len = clen;
 	buf = io_buffer_select(req, &len, issue_flags);
-	if (IS_ERR(buf))
-		return PTR_ERR(buf);
+	if (!buf)
+		return -ENOBUFS;
 	req->rw.addr = (unsigned long) buf;
 	iov[0].iov_base = buf;
 	req->rw.len = iov[0].iov_len = (compat_size_t) len;
@@ -3929,8 +3920,8 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
 	if (len < 0)
 		return -EINVAL;
 	buf = io_buffer_select(req, &len, issue_flags);
-	if (IS_ERR(buf))
-		return PTR_ERR(buf);
+	if (!buf)
+		return -ENOBUFS;
 	req->rw.addr = (unsigned long) buf;
 	iov[0].iov_base = buf;
 	req->rw.len = iov[0].iov_len = len;
@@ -3987,8 +3978,8 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
 	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
 		if (io_do_buffer_select(req)) {
 			buf = io_buffer_select(req, &sqe_len, issue_flags);
-			if (IS_ERR(buf))
-				return ERR_CAST(buf);
+			if (!buf)
+				return ERR_PTR(-ENOBUFS);
 			req->rw.addr = (unsigned long) buf;
 			req->rw.len = sqe_len;
 		}
@@ -5259,8 +5250,8 @@ static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
 		size_t len = 1;
 
 		buf = io_buffer_select(req, &len, issue_flags);
-		if (IS_ERR(buf))
-			return PTR_ERR(buf);
+		if (!buf)
+			return -ENOBUFS;
 	}
 
 	if (!(req->ctx->flags & IORING_SETUP_CQE32))
@@ -6394,8 +6385,8 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 		void __user *buf;
 
 		buf = io_buffer_select(req, &sr->len, issue_flags);
-		if (IS_ERR(buf))
-			return PTR_ERR(buf);
+		if (!buf)
+			return -ENOBUFS;
 		kmsg->fast_iov[0].iov_base = buf;
 		kmsg->fast_iov[0].iov_len = sr->len;
 		iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
@@ -6464,8 +6455,8 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 		void __user *buf;
 
 		buf = io_buffer_select(req, &sr->len, issue_flags);
-		if (IS_ERR(buf))
-			return PTR_ERR(buf);
+		if (!buf)
+			return -ENOBUFS;
 		sr->buf = buf;
 	}
 
-- 
2.30.2


  parent reply	other threads:[~2022-05-18  8:40 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-18  8:39 sparse fixes for io_uring Christoph Hellwig
2022-05-18  8:40 ` [PATCH 1/6] io_uring: use a rwf_t for io_rw.flags Christoph Hellwig
2022-05-18 12:27   ` Jens Axboe
2022-05-18  8:40 ` Christoph Hellwig [this message]
2022-05-18  8:40 ` [PATCH 3/6] io_uring: drop a spurious inline on a forward declaration Christoph Hellwig
2022-05-18  8:40 ` [PATCH 4/6] io_uring: make apoll_events a __poll_t Christoph Hellwig
2022-05-18  8:40 ` [PATCH 5/6] io_uring: consistently use the EPOLL* defines Christoph Hellwig
2022-05-18  8:40 ` [PATCH 6/6] io_uring: use rcu_dereference in io_close Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox