From 941e155f8386b76f5c9788d472a47bf2db4c8aa3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 22 Jun 2020 11:09:46 -0600 Subject: [PATCH 5/5] io_uring: enable READ/WRITE to use deferred completion list A bit more surgery required here, as completions are generally done through the kiocb->ki_complete() callback, even if they complete inline. Signed-off-by: Jens Axboe --- fs/io_uring.c | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 0c5787f1a376..d415a6126675 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1982,7 +1982,8 @@ static inline void req_set_fail_links(struct io_kiocb *req) req->flags |= REQ_F_FAIL_LINK; } -static void io_complete_rw_common(struct kiocb *kiocb, long res) +static void io_complete_rw_common(struct kiocb *kiocb, long res, + struct list_head *comp_list) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); int cflags = 0; @@ -1994,7 +1995,7 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res) req_set_fail_links(req); if (req->flags & REQ_F_BUFFER_SELECTED) cflags = io_put_kbuf(req); - io_cqring_add_event(req, res, cflags); + __io_req_complete(req, res, cflags, comp_list); } static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx) @@ -2097,14 +2098,18 @@ static bool io_rw_reissue(struct io_kiocb *req, long res) return false; } +static void __io_complete_rw(struct io_kiocb *req, long res, long res2, + struct list_head *comp_list) +{ + if (!io_rw_reissue(req, res)) + io_complete_rw_common(&req->rw.kiocb, res, comp_list); +} + static void io_complete_rw(struct kiocb *kiocb, long res, long res2) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); - if (!io_rw_reissue(req, res)) { - io_complete_rw_common(kiocb, res); - io_put_req(req); - } + __io_complete_rw(req, res, res2, NULL); } static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) @@ -2338,14 +2343,15 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) } } -static void kiocb_done(struct kiocb *kiocb, ssize_t ret) +static void kiocb_done(struct kiocb *kiocb, ssize_t ret, + struct list_head *comp_list) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); if (req->flags & REQ_F_CUR_POS) req->file->f_pos = kiocb->ki_pos; if (ret >= 0 && kiocb->ki_complete == io_complete_rw) - io_complete_rw(kiocb, ret, 0); + __io_complete_rw(req, ret, 0, comp_list); else io_rw_done(kiocb, ret); } @@ -2881,7 +2887,8 @@ static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter); } -static int io_read(struct io_kiocb *req, bool force_nonblock) +static int io_read(struct io_kiocb *req, bool force_nonblock, + struct list_head *comp_list) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; @@ -2916,7 +2923,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock) /* Catch -EAGAIN return for forced non-blocking submission */ if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) { - kiocb_done(kiocb, ret2); + kiocb_done(kiocb, ret2, comp_list); } else { iter.count = iov_count; iter.nr_segs = nr_segs; @@ -2931,7 +2938,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock) if (ret2 == -EIOCBQUEUED) { goto out_free; } else if (ret2 != -EAGAIN) { - kiocb_done(kiocb, ret2); + kiocb_done(kiocb, ret2, comp_list); goto out_free; } } @@ -2977,7 +2984,8 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, return 0; } -static int io_write(struct io_kiocb *req, bool force_nonblock) +static int io_write(struct io_kiocb *req, bool force_nonblock, + struct list_head *comp_list) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; @@ -3046,7 +3054,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock) if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) ret2 = -EAGAIN; if (!force_nonblock || ret2 != -EAGAIN) { - kiocb_done(kiocb, ret2); + kiocb_done(kiocb, ret2, comp_list); } else { iter.count = iov_count; iter.nr_segs = nr_segs; @@ -5372,7 +5380,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret < 0) break; } - ret = io_read(req, force_nonblock); + ret = io_read(req, force_nonblock, comp_list); break; case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: @@ -5382,7 +5390,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret < 0) break; } - ret = io_write(req, force_nonblock); + ret = io_write(req, force_nonblock, comp_list); break; case IORING_OP_FSYNC: if (sqe) { -- 2.27.0