public inbox for [email protected]
 help / color / mirror / Atom feed
From: Jens Axboe <[email protected]>
To: Jann Horn <[email protected]>
Cc: io-uring <[email protected]>, [email protected]
Subject: Re: [PATCH 3/3] io_uring: support buffer selection
Date: Mon, 24 Feb 2020 10:06:24 -0700	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>

On 2/24/20 8:57 AM, Jens Axboe wrote:
>> ... and call io_send_recv_buffer_select() from here without holding
>> any extra locks in this function. This function is then called from
>> io_issue_sqe() (no extra locks held there either), which is called
>> from __io_queue_sqe() (no extra locks AFAICS), which is called from
>> places like task work.
>>
>> Am I missing something?
> 
> Submission is all done under the ctx->uring_lock mutex, though the async
> stuff does not. So for the normal path we should all be fine, as we'll
> be serialized for that. We just need to ensure we do buffer select when
> we prep the request for async execution, so the workers never have to do
> it. Either that, or ensure the workers grab the mutex before doing the
> grab (less ideal).
> 
>> It might in general be helpful to have more lockdep assertions in this
>> code, both to help the reader understand the locking rules and to help
>> verify locking correctness.
> 
> Agree, that'd be a good addition.

Tried to cater to both, here's an incremental that ensures we also grab
the uring_lock mutex for the async offload case, and adds lockdep
annotation for this particular case.


diff --git a/fs/io_uring.c b/fs/io_uring.c
index ba8d4e2d9f99..792ef01a521c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2173,7 +2173,7 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
 }
 
 static struct io_buffer *io_buffer_select(struct io_kiocb *req, int gid,
-					  void *buf)
+					  void *buf, bool needs_lock)
 {
 	struct list_head *list;
 	struct io_buffer *kbuf;
@@ -2181,17 +2181,34 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, int gid,
 	if (req->flags & REQ_F_BUFFER_SELECTED)
 		return buf;
 
+	/*
+	 * "Normal" inline submissions always hold the uring_lock, since we
+	 * grab it from the system call. Same is true for the SQPOLL offload.
+	 * The only exception is when we've detached the request and issue it
+	 * from an async worker thread, grab the lock for that case.
+	 */
+	if (needs_lock)
+		mutex_lock(&req->ctx->uring_lock);
+
+	lockdep_assert_held(&req->ctx->uring_lock);
+
 	list = idr_find(&req->ctx->io_buffer_idr, gid);
-	if (!list || list_empty(list))
-		return ERR_PTR(-ENOBUFS);
+	if (list && !list_empty(list)) {
+		kbuf = list_first_entry(list, struct io_buffer, list);
+		list_del(&kbuf->list);
+	} else {
+		kbuf = ERR_PTR(-ENOBUFS);
+	}
+
+	if (needs_lock)
+		mutex_unlock(&req->ctx->uring_lock);
 
-	kbuf = list_first_entry(list, struct io_buffer, list);
-	list_del(&kbuf->list);
 	return kbuf;
 }
 
 static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
-			       struct iovec **iovec, struct iov_iter *iter)
+			       struct iovec **iovec, struct iov_iter *iter,
+			       bool needs_lock)
 {
 	void __user *buf = u64_to_user_ptr(req->rw.addr);
 	size_t sqe_len = req->rw.len;
@@ -2215,7 +2232,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
 			int gid;
 
 			gid = (int) (unsigned long) req->rw.kiocb.private;
-			kbuf = io_buffer_select(req, gid, buf);
+			kbuf = io_buffer_select(req, gid, buf, needs_lock);
 			if (IS_ERR(kbuf)) {
 				*iovec = NULL;
 				return PTR_ERR(kbuf);
@@ -2369,7 +2386,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	io = req->io;
 	io->rw.iov = io->rw.fast_iov;
 	req->io = NULL;
-	ret = io_import_iovec(READ, req, &io->rw.iov, &iter);
+	ret = io_import_iovec(READ, req, &io->rw.iov, &iter, !force_nonblock);
 	req->io = io;
 	if (ret < 0)
 		return ret;
@@ -2387,7 +2404,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
 	size_t iov_count;
 	ssize_t io_size, ret;
 
-	ret = io_import_iovec(READ, req, &iovec, &iter);
+	ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
 	if (ret < 0)
 		return ret;
 
@@ -2459,7 +2476,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	io = req->io;
 	io->rw.iov = io->rw.fast_iov;
 	req->io = NULL;
-	ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter);
+	ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter, !force_nonblock);
 	req->io = io;
 	if (ret < 0)
 		return ret;
@@ -2477,7 +2494,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
 	size_t iov_count;
 	ssize_t ret, io_size;
 
-	ret = io_import_iovec(WRITE, req, &iovec, &iter);
+	ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
 	if (ret < 0)
 		return ret;
 
@@ -2910,7 +2927,8 @@ static int io_provide_buffer_prep(struct io_kiocb *req,
 	return 0;
 }
 
-static int io_provide_buffer(struct io_kiocb *req, struct io_kiocb **nxt)
+static int io_provide_buffer(struct io_kiocb *req, struct io_kiocb **nxt,
+			     bool force_nonblock)
 {
 	struct io_provide_buf *p = &req->pbuf;
 	struct io_ring_ctx *ctx = req->ctx;
@@ -2918,6 +2936,17 @@ static int io_provide_buffer(struct io_kiocb *req, struct io_kiocb **nxt)
 	struct io_buffer *buf;
 	int ret = 0;
 
+	/*
+	 * "Normal" inline submissions always hold the uring_lock, since we
+	 * grab it from the system call. Same is true for the SQPOLL offload.
+	 * The only exception is when we've detached the request and issue it
+	 * from an async worker thread, grab the lock for that case.
+	 */
+	if (!force_nonblock)
+		mutex_lock(&ctx->uring_lock);
+
+	lockdep_assert_held(&ctx->uring_lock);
+
 	list = idr_find(&ctx->io_buffer_idr, p->gid);
 	if (!list) {
 		list = kmalloc(sizeof(*list), GFP_KERNEL);
@@ -2950,6 +2979,8 @@ static int io_provide_buffer(struct io_kiocb *req, struct io_kiocb **nxt)
 	list_add(&buf->list, list);
 	ret = buf->bid;
 out:
+	if (!force_nonblock)
+		mutex_unlock(&ctx->uring_lock);
 	if (ret < 0)
 		req_set_fail_links(req);
 	io_cqring_add_event(req, ret);
@@ -3387,14 +3418,15 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
 
 static struct io_buffer *io_send_recv_buffer_select(struct io_kiocb *req,
 						    struct io_buffer **kbuf,
-						    int *cflags)
+						    int *cflags,
+						    bool needs_lock)
 {
 	struct io_sr_msg *sr = &req->sr_msg;
 
 	if (!(req->flags & REQ_F_BUFFER_SELECT))
 		return req->sr_msg.buf;
 
-	*kbuf = io_buffer_select(req, sr->gid, sr->kbuf);
+	*kbuf = io_buffer_select(req, sr->gid, sr->kbuf, needs_lock);
 	if (IS_ERR(*kbuf))
 		return *kbuf;
 
@@ -3427,7 +3459,8 @@ static int io_send(struct io_kiocb *req, struct io_kiocb **nxt,
 		void __user *buf;
 		unsigned flags;
 
-		buf = io_send_recv_buffer_select(req, &kbuf, &cflags);
+		buf = io_send_recv_buffer_select(req, &kbuf, &cflags,
+							!force_nonblock);
 		if (IS_ERR(buf)) {
 			ret = PTR_ERR(buf);
 			goto out;
@@ -3592,7 +3625,8 @@ static int io_recv(struct io_kiocb *req, struct io_kiocb **nxt,
 		void __user *buf;
 		unsigned flags;
 
-		buf = io_send_recv_buffer_select(req, &kbuf, &cflags);
+		buf = io_send_recv_buffer_select(req, &kbuf, &cflags,
+							!force_nonblock);
 		if (IS_ERR(buf)) {
 			ret = PTR_ERR(buf);
 			goto out;
@@ -4903,7 +4937,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 			if (ret)
 				break;
 		}
-		ret = io_provide_buffer(req, nxt);
+		ret = io_provide_buffer(req, nxt, force_nonblock);
 		break;
 	default:
 		ret = -EINVAL;

-- 
Jens Axboe


  reply	other threads:[~2020-02-24 17:06 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-24  2:56 [PATCHSET 0/3] io_uring support for automatic buffers Jens Axboe
2020-02-24  2:56 ` [PATCH 1/3] io_uring: buffer registration infrastructure Jens Axboe
2020-02-24  2:56 ` [PATCH 2/3] io_uring: add IORING_OP_PROVIDE_BUFFER Jens Axboe
2020-02-24  2:56 ` [PATCH 3/3] io_uring: support buffer selection Jens Axboe
2020-02-24 15:50   ` Jann Horn
2020-02-24 15:57     ` Jens Axboe
2020-02-24 17:06       ` Jens Axboe [this message]
  -- strict thread matches above, loose matches on Subject: below --
2020-02-25 16:04 [PATCHSET v2 0/3] io_uring support for automatic buffers Jens Axboe
2020-02-25 16:04 ` [PATCH 3/3] io_uring: support buffer selection Jens Axboe
2020-02-25 16:19 [PATCHSET v2 0/3] io_uring support for automatic buffers Jens Axboe
2020-02-25 16:19 ` [PATCH 3/3] io_uring: support buffer selection Jens Axboe
2020-02-25 16:20 [PATCHSET v2b 0/3] io_uring support for automatic buffers Jens Axboe
2020-02-25 16:20 ` [PATCH 3/3] io_uring: support buffer selection Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox