From: Jens Axboe <[email protected]>
To: [email protected]
Cc: [email protected], [email protected],
[email protected], [email protected],
Jens Axboe <[email protected]>
Subject: [PATCH 12/12] io_uring: support true async buffered reads, if file provides it
Date: Tue, 26 May 2020 13:51:23 -0600 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
If the file is flagged with FMODE_BUF_RASYNC, then we don't have to punt
the buffered read to an io-wq worker. Instead we can rely on page
unlocking callbacks to support retry based async IO. This is a lot more
efficient than doing async thread offload.
The retry is done similarly to how we handle poll based retry. From
the unlock callback, we simply queue the retry to a task_work based
handler.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 130 ++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 126 insertions(+), 4 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e6865afa8467..95df63b0b2ce 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -79,6 +79,7 @@
#include <linux/fs_struct.h>
#include <linux/splice.h>
#include <linux/task_work.h>
+#include <linux/pagemap.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@ -498,6 +499,8 @@ struct io_async_rw {
struct iovec *iov;
ssize_t nr_segs;
ssize_t size;
+ struct wait_page_queue wpq;
+ struct callback_head task_work;
};
struct io_async_ctx {
@@ -2568,6 +2571,119 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
+static void io_async_buf_cancel(struct callback_head *cb)
+{
+ struct io_async_rw *rw;
+ struct io_ring_ctx *ctx;
+ struct io_kiocb *req;
+
+ rw = container_of(cb, struct io_async_rw, task_work);
+ req = rw->wpq.wait.private;
+ ctx = req->ctx;
+
+ spin_lock_irq(&ctx->completion_lock);
+ io_cqring_fill_event(req, -ECANCELED);
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
+
+ io_cqring_ev_posted(ctx);
+ req_set_fail_links(req);
+ io_double_put_req(req);
+}
+
+static void io_async_buf_retry(struct callback_head *cb)
+{
+ struct io_async_rw *rw;
+ struct io_ring_ctx *ctx;
+ struct io_kiocb *req;
+
+ rw = container_of(cb, struct io_async_rw, task_work);
+ req = rw->wpq.wait.private;
+ ctx = req->ctx;
+
+ __set_current_state(TASK_RUNNING);
+ mutex_lock(&ctx->uring_lock);
+ __io_queue_sqe(req, NULL);
+ mutex_unlock(&ctx->uring_lock);
+}
+
+static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
+ int sync, void *arg)
+{
+ struct wait_page_queue *wpq;
+ struct io_kiocb *req = wait->private;
+ struct io_async_rw *rw = &req->io->rw;
+ struct wait_page_key *key = arg;
+ struct task_struct *tsk;
+ int ret;
+
+ wpq = container_of(wait, struct wait_page_queue, wait);
+
+ ret = wake_page_match(wpq, key);
+ if (ret != 1)
+ return ret;
+
+ list_del_init(&wait->entry);
+
+ init_task_work(&rw->task_work, io_async_buf_retry);
+ /* submit ref gets dropped, acquire a new one */
+ refcount_inc(&req->refs);
+ tsk = req->task;
+ ret = task_work_add(tsk, &rw->task_work, true);
+ if (unlikely(ret)) {
+ /* queue just for cancelation */
+ init_task_work(&rw->task_work, io_async_buf_cancel);
+ tsk = io_wq_get_task(req->ctx->io_wq);
+ task_work_add(tsk, &rw->task_work, true);
+ }
+ wake_up_process(tsk);
+ return 1;
+}
+
+static bool io_rw_should_retry(struct io_kiocb *req)
+{
+ struct kiocb *kiocb = &req->rw.kiocb;
+ int ret;
+
+ /* never retry for NOWAIT, we just complete with -EAGAIN */
+ if (req->flags & REQ_F_NOWAIT)
+ return false;
+
+ /* already tried, or we're doing O_DIRECT */
+ if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_WAITQ))
+ return false;
+ /*
+ * just use poll if we can, and don't attempt if the fs doesn't
+ * support callback based unlocks
+ */
+ if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
+ return false;
+
+ /*
+ * If request type doesn't require req->io to defer in general,
+ * we need to allocate it here
+ */
+ if (!req->io && __io_alloc_async_ctx(req))
+ return false;
+
+ ret = kiocb_wait_page_queue_init(kiocb, &req->io->rw.wpq,
+ io_async_buf_func, req);
+ if (!ret) {
+ get_task_struct(current);
+ req->task = current;
+ return true;
+ }
+
+ return false;
+}
+
+static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
+{
+ if (req->file->f_op->read_iter)
+ return call_read_iter(req->file, &req->rw.kiocb, iter);
+ return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter);
+}
+
static int io_read(struct io_kiocb *req, bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
@@ -2601,10 +2717,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
if (!ret) {
ssize_t ret2;
- if (req->file->f_op->read_iter)
- ret2 = call_read_iter(req->file, kiocb, &iter);
- else
- ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
+ ret2 = io_iter_do_read(req, &iter);
/* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN) {
@@ -2619,6 +2732,15 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
if (!(req->flags & REQ_F_NOWAIT) &&
!file_can_poll(req->file))
req->flags |= REQ_F_MUST_PUNT;
+ /* if we can retry, do so with the callbacks armed */
+ if (io_rw_should_retry(req)) {
+ ret2 = io_iter_do_read(req, &iter);
+ if (ret2 != -EAGAIN) {
+ kiocb_done(kiocb, ret2);
+ goto out_free;
+ }
+ }
+ kiocb->ki_flags &= ~IOCB_WAITQ;
return -EAGAIN;
}
}
--
2.26.2
next prev parent reply other threads:[~2020-05-26 19:51 UTC|newest]
Thread overview: 71+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-26 19:51 [PATCHSET v5 0/12] Add support for async buffered reads Jens Axboe
2020-05-26 19:51 ` [PATCH 01/12] block: read-ahead submission should imply no-wait as well Jens Axboe
2020-05-26 19:51 ` [PATCH 02/12] mm: allow read-ahead with IOCB_NOWAIT set Jens Axboe
2020-05-26 20:45 ` Johannes Weiner
2020-05-26 19:51 ` [PATCH 03/12] mm: abstract out wake_page_match() from wake_page_function() Jens Axboe
2020-05-26 21:02 ` Johannes Weiner
2020-06-01 14:16 ` Matthew Wilcox
2020-05-26 19:51 ` [PATCH 04/12] mm: add support for async page locking Jens Axboe
2020-05-26 21:59 ` Johannes Weiner
2020-05-26 22:01 ` Jens Axboe
2020-05-27 16:02 ` Johannes Weiner
2020-06-01 14:26 ` Matthew Wilcox
2020-06-01 17:15 ` Jens Axboe
2020-05-26 19:51 ` [PATCH 05/12] mm: support async buffered reads in generic_file_buffered_read() Jens Axboe
2020-05-26 22:00 ` Johannes Weiner
2020-05-26 19:51 ` [PATCH 06/12] fs: add FMODE_BUF_RASYNC Jens Axboe
2020-05-26 19:51 ` [PATCH 07/12] ext4: flag as supporting buffered async reads Jens Axboe
2020-05-26 19:51 ` [PATCH 08/12] block: flag block devices as supporting IOCB_WAITQ Jens Axboe
2020-05-26 19:51 ` [PATCH 09/12] xfs: flag files as supporting buffered async reads Jens Axboe
2020-05-28 17:53 ` Darrick J. Wong
2020-05-28 19:23 ` Jens Axboe
2020-05-26 19:51 ` [PATCH 10/12] btrfs: " Jens Axboe
2020-05-26 19:57 ` Chris Mason
2020-05-26 19:51 ` [PATCH 11/12] mm: add kiocb_wait_page_queue_init() helper Jens Axboe
2020-05-26 22:01 ` Johannes Weiner
2020-05-26 19:51 ` Jens Axboe [this message]
[not found] ` <CA+icZUWfX+QmroE6j74C7o-BdfMF5=6PdYrA=5W_JCKddqkJgQ@mail.gmail.com>
2020-05-28 17:06 ` [PATCHSET v5 0/12] Add support for async buffered reads Jens Axboe
2020-05-28 17:12 ` Sedat Dilek
2020-05-28 17:14 ` Jens Axboe
2020-05-28 18:20 ` Sedat Dilek
2020-05-29 10:02 ` Sedat Dilek
2020-05-29 11:22 ` Sedat Dilek
2020-05-30 13:36 ` Sedat Dilek
2020-05-30 18:57 ` Sedat Dilek
2020-05-31 1:57 ` Jens Axboe
[not found] ` <CA+icZUXxmOA-5+dukCgxfSp4eVHB+QaAHO6tsgq0iioQs3Af-w@mail.gmail.com>
2020-05-31 7:12 ` Sedat Dilek
2020-06-01 13:35 ` Sedat Dilek
2020-06-01 14:04 ` Jens Axboe
2020-06-01 14:13 ` Sedat Dilek
2020-06-01 14:14 ` Jens Axboe
2020-06-01 14:35 ` Jens Axboe
2020-06-01 14:43 ` Sedat Dilek
2020-06-01 14:46 ` Jens Axboe
2020-06-01 14:51 ` Sedat Dilek
2020-06-01 20:18 ` Sedat Dilek
[not found] ` <[email protected]>
2020-06-04 1:04 ` Jens Axboe
2020-06-04 1:30 ` Andres Freund
2020-06-05 19:56 ` Andres Freund
2020-06-05 14:42 ` Jens Axboe
2020-06-05 20:20 ` Andres Freund
2020-06-05 20:21 ` Jens Axboe
2020-06-05 20:36 ` Andres Freund
2020-06-05 20:53 ` Jens Axboe
2020-06-05 21:13 ` Jens Axboe
2020-06-05 21:21 ` Jens Axboe
2020-06-05 22:30 ` Andres Freund
2020-06-05 22:36 ` Andres Freund
2020-06-05 22:49 ` Jens Axboe
2020-06-05 22:54 ` Andres Freund
2020-06-05 22:56 ` Jens Axboe
2020-06-05 23:02 ` Andres Freund
2020-06-06 0:33 ` Sedat Dilek
2020-06-06 16:04 ` Jens Axboe
-- strict thread matches above, loose matches on Subject: below --
2020-05-24 19:21 [PATCHSET v4 " Jens Axboe
2020-05-24 19:22 ` [PATCH 12/12] io_uring: support true async buffered reads, if file provides it Jens Axboe
2020-05-23 18:57 [PATCHSET v2 0/12] Add support for async buffered reads Jens Axboe
2020-05-23 18:57 ` [PATCH 12/12] io_uring: support true async buffered reads, if file provides it Jens Axboe
2020-05-25 7:29 ` Pavel Begunkov
2020-05-25 19:59 ` Jens Axboe
2020-05-26 7:44 ` Pavel Begunkov
2020-05-26 13:50 ` Jens Axboe
2020-05-26 7:38 ` Pavel Begunkov
2020-05-26 13:47 ` Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox