* [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx
@ 2020-02-28 23:37 Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 1/5] io_uring: remove @nxt from the handlers Pavel Begunkov
` (5 more replies)
0 siblings, 6 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-02-28 23:37 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
After io_put_req_find_next() was patched, handlers no more return
next work, but enqueue them through io_queue_async_work() (mostly
by io_put_work() -> io_put_req()). The patchset fixes that.
Patches 1-2 clean up and removes all futile attempts to get nxt from
the opcode handlers. The 3rd one moves all this propagation idea into
work->put_work(). And the rest ones are small clean up on top.
v2: rebase on top of poll changes
Pavel Begunkov (5):
io_uring: remove @nxt from the handlers
io_uring/io-wq: pass *work instead of **workptr
io_uring/io-wq: allow put_work return next work
io_uring: remove extra nxt check after punt
io_uring: remove io_prep_next_work()
fs/io-wq.c | 28 ++---
fs/io-wq.h | 4 +-
fs/io_uring.c | 320 ++++++++++++++++++++------------------------------
3 files changed, 141 insertions(+), 211 deletions(-)
--
2.24.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v2 1/5] io_uring: remove @nxt from the handlers
2020-02-28 23:37 [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
@ 2020-02-28 23:37 ` Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 2/5] io_uring/io-wq: pass *work instead of **workptr Pavel Begunkov
` (4 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-02-28 23:37 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
After io_put_req_find_next() fix, no opcode handler can return non-NULL
nxt, that's because there is always a submission ref, which keeps them
from doing that. Remove @nxt from them, it's intrusive but
straightforward.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 245 ++++++++++++++++++--------------------------------
1 file changed, 86 insertions(+), 159 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8d8d93adb9c2..ee75e503964d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1815,17 +1815,6 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
io_put_req(req);
}
-static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
-{
- struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
- struct io_kiocb *nxt = NULL;
-
- io_complete_rw_common(kiocb, res);
- io_put_req_find_next(req, &nxt);
-
- return nxt;
-}
-
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
@@ -2020,14 +2009,14 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
}
}
-static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt)
+static void kiocb_done(struct kiocb *kiocb, ssize_t ret)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos;
if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
- *nxt = __io_complete_rw(kiocb, ret);
+ io_complete_rw(kiocb, ret, 0);
else
io_rw_done(kiocb, ret);
}
@@ -2276,8 +2265,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
-static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_read(struct io_kiocb *req, bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw.kiocb;
@@ -2317,7 +2305,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
/* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN) {
- kiocb_done(kiocb, ret2, nxt);
+ kiocb_done(kiocb, ret2);
} else {
copy_iov:
ret = io_setup_async_rw(req, io_size, iovec,
@@ -2366,8 +2354,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
-static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_write(struct io_kiocb *req, bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw.kiocb;
@@ -2431,7 +2418,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN;
if (!force_nonblock || ret2 != -EAGAIN) {
- kiocb_done(kiocb, ret2, nxt);
+ kiocb_done(kiocb, ret2);
} else {
copy_iov:
ret = io_setup_async_rw(req, io_size, iovec,
@@ -2488,8 +2475,7 @@ static bool io_splice_punt(struct file *file)
return !(file->f_mode & O_NONBLOCK);
}
-static int io_splice(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_splice(struct io_kiocb *req, bool force_nonblock)
{
struct io_splice *sp = &req->splice;
struct file *in = sp->file_in;
@@ -2516,7 +2502,7 @@ static int io_splice(struct io_kiocb *req, struct io_kiocb **nxt,
io_cqring_add_event(req, ret);
if (ret != sp->len)
req_set_fail_links(req);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
}
@@ -2568,28 +2554,7 @@ static bool io_req_cancelled(struct io_kiocb *req)
return false;
}
-static void io_link_work_cb(struct io_wq_work **workptr)
-{
- struct io_wq_work *work = *workptr;
- struct io_kiocb *link = work->data;
-
- io_queue_linked_timeout(link);
- io_wq_submit_work(workptr);
-}
-
-static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
-{
- struct io_kiocb *link;
-
- io_prep_next_work(nxt, &link);
- *workptr = &nxt->work;
- if (link) {
- nxt->work.func = io_link_work_cb;
- nxt->work.data = link;
- }
-}
-
-static void __io_fsync(struct io_kiocb *req, struct io_kiocb **nxt)
+static void __io_fsync(struct io_kiocb *req)
{
loff_t end = req->sync.off + req->sync.len;
int ret;
@@ -2600,23 +2565,19 @@ static void __io_fsync(struct io_kiocb *req, struct io_kiocb **nxt)
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
}
static void io_fsync_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
if (io_req_cancelled(req))
return;
- __io_fsync(req, &nxt);
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ __io_fsync(req);
}
-static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_fsync(struct io_kiocb *req, bool force_nonblock)
{
/* fsync always requires a blocking context */
if (force_nonblock) {
@@ -2624,11 +2585,11 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
req->work.func = io_fsync_finish;
return -EAGAIN;
}
- __io_fsync(req, nxt);
+ __io_fsync(req);
return 0;
}
-static void __io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt)
+static void __io_fallocate(struct io_kiocb *req)
{
int ret;
@@ -2640,17 +2601,14 @@ static void __io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt)
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
}
static void io_fallocate_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
- __io_fallocate(req, &nxt);
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ __io_fallocate(req);
}
static int io_fallocate_prep(struct io_kiocb *req,
@@ -2665,8 +2623,7 @@ static int io_fallocate_prep(struct io_kiocb *req,
return 0;
}
-static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
{
/* fallocate always requiring blocking context */
if (force_nonblock) {
@@ -2675,7 +2632,7 @@ static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
return -EAGAIN;
}
- __io_fallocate(req, nxt);
+ __io_fallocate(req);
return 0;
}
@@ -2748,8 +2705,7 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_openat2(struct io_kiocb *req, bool force_nonblock)
{
struct open_flags op;
struct file *file;
@@ -2780,15 +2736,14 @@ static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
}
-static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_openat(struct io_kiocb *req, bool force_nonblock)
{
req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
- return io_openat2(req, nxt, force_nonblock);
+ return io_openat2(req, force_nonblock);
}
static int io_epoll_ctl_prep(struct io_kiocb *req,
@@ -2816,8 +2771,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
#endif
}
-static int io_epoll_ctl(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_EPOLL)
struct io_epoll *ie = &req->epoll;
@@ -2830,7 +2784,7 @@ static int io_epoll_ctl(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
@@ -2852,8 +2806,7 @@ static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
#endif
}
-static int io_madvise(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_madvise(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
struct io_madvise *ma = &req->madvise;
@@ -2866,7 +2819,7 @@ static int io_madvise(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
@@ -2884,8 +2837,7 @@ static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static int io_fadvise(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
{
struct io_fadvise *fa = &req->fadvise;
int ret;
@@ -2905,7 +2857,7 @@ static int io_fadvise(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
}
@@ -2942,8 +2894,7 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static int io_statx(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_statx(struct io_kiocb *req, bool force_nonblock)
{
struct io_open *ctx = &req->open;
unsigned lookup_flags;
@@ -2980,7 +2931,7 @@ static int io_statx(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
}
@@ -3007,7 +2958,7 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
/* only called when __close_fd_get_file() is done */
-static void __io_close_finish(struct io_kiocb *req, struct io_kiocb **nxt)
+static void __io_close_finish(struct io_kiocb *req)
{
int ret;
@@ -3016,22 +2967,18 @@ static void __io_close_finish(struct io_kiocb *req, struct io_kiocb **nxt)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
fput(req->close.put_file);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
}
static void io_close_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
/* not cancellable, don't do io_req_cancelled() */
- __io_close_finish(req, &nxt);
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ __io_close_finish(req);
}
-static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_close(struct io_kiocb *req, bool force_nonblock)
{
int ret;
@@ -3048,7 +2995,7 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
* No ->flush(), safely close from here and just punt the
* fput() to async context.
*/
- __io_close_finish(req, nxt);
+ __io_close_finish(req);
return 0;
eagain:
req->work.func = io_close_finish;
@@ -3079,7 +3026,7 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static void __io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt)
+static void __io_sync_file_range(struct io_kiocb *req)
{
int ret;
@@ -3088,24 +3035,20 @@ static void __io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt)
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
}
static void io_sync_file_range_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
if (io_req_cancelled(req))
return;
- __io_sync_file_range(req, &nxt);
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ __io_sync_file_range(req);
}
-static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
{
/* sync_file_range always requires a blocking context */
if (force_nonblock) {
@@ -3114,7 +3057,7 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
return -EAGAIN;
}
- __io_sync_file_range(req, nxt);
+ __io_sync_file_range(req);
return 0;
}
@@ -3166,8 +3109,7 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
#endif
}
-static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_NET)
struct io_async_msghdr *kmsg = NULL;
@@ -3221,15 +3163,14 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
#endif
}
-static int io_send(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_send(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_NET)
struct socket *sock;
@@ -3272,7 +3213,7 @@ static int io_send(struct io_kiocb *req, struct io_kiocb **nxt,
io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
@@ -3313,8 +3254,7 @@ static int io_recvmsg_prep(struct io_kiocb *req,
#endif
}
-static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_NET)
struct io_async_msghdr *kmsg = NULL;
@@ -3370,15 +3310,14 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
#endif
}
-static int io_recv(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_recv(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_NET)
struct socket *sock;
@@ -3422,7 +3361,7 @@ static int io_recv(struct io_kiocb *req, struct io_kiocb **nxt,
io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
@@ -3450,8 +3389,7 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
#if defined(CONFIG_NET)
-static int __io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int __io_accept(struct io_kiocb *req, bool force_nonblock)
{
struct io_accept *accept = &req->accept;
unsigned file_flags;
@@ -3467,32 +3405,28 @@ static int __io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
}
static void io_accept_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
io_put_req(req);
if (io_req_cancelled(req))
return;
- __io_accept(req, &nxt, false);
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ __io_accept(req, false);
}
#endif
-static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_accept(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_NET)
int ret;
- ret = __io_accept(req, nxt, force_nonblock);
+ ret = __io_accept(req, force_nonblock);
if (ret == -EAGAIN && force_nonblock) {
req->work.func = io_accept_finish;
return -EAGAIN;
@@ -3527,8 +3461,7 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
#endif
}
-static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_connect(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_NET)
struct io_async_ctx __io, *io;
@@ -3566,7 +3499,7 @@ static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
@@ -3957,7 +3890,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return 0;
}
-static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt)
+static int io_poll_add(struct io_kiocb *req)
{
struct io_poll_iocb *poll = &req->poll;
struct io_ring_ctx *ctx = req->ctx;
@@ -3979,7 +3912,7 @@ static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt)
if (mask) {
io_cqring_ev_posted(ctx);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
}
return ipt.error;
}
@@ -4228,7 +4161,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
struct io_kiocb *req, __u64 sqe_addr,
- struct io_kiocb **nxt, int success_ret)
+ int success_ret)
{
unsigned long flags;
int ret;
@@ -4254,7 +4187,7 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
if (ret < 0)
req_set_fail_links(req);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
}
static int io_async_cancel_prep(struct io_kiocb *req,
@@ -4270,11 +4203,11 @@ static int io_async_cancel_prep(struct io_kiocb *req,
return 0;
}
-static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
+static int io_async_cancel(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- io_async_find_and_cancel(ctx, req, req->cancel.addr, nxt, 0);
+ io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
return 0;
}
@@ -4481,7 +4414,7 @@ static void io_cleanup_req(struct io_kiocb *req)
}
static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_kiocb **nxt, bool force_nonblock)
+ bool force_nonblock)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
@@ -4498,7 +4431,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0)
break;
}
- ret = io_read(req, nxt, force_nonblock);
+ ret = io_read(req, force_nonblock);
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
@@ -4508,7 +4441,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0)
break;
}
- ret = io_write(req, nxt, force_nonblock);
+ ret = io_write(req, force_nonblock);
break;
case IORING_OP_FSYNC:
if (sqe) {
@@ -4516,7 +4449,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0)
break;
}
- ret = io_fsync(req, nxt, force_nonblock);
+ ret = io_fsync(req, force_nonblock);
break;
case IORING_OP_POLL_ADD:
if (sqe) {
@@ -4524,7 +4457,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_poll_add(req, nxt);
+ ret = io_poll_add(req);
break;
case IORING_OP_POLL_REMOVE:
if (sqe) {
@@ -4540,7 +4473,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0)
break;
}
- ret = io_sync_file_range(req, nxt, force_nonblock);
+ ret = io_sync_file_range(req, force_nonblock);
break;
case IORING_OP_SENDMSG:
case IORING_OP_SEND:
@@ -4550,9 +4483,9 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
break;
}
if (req->opcode == IORING_OP_SENDMSG)
- ret = io_sendmsg(req, nxt, force_nonblock);
+ ret = io_sendmsg(req, force_nonblock);
else
- ret = io_send(req, nxt, force_nonblock);
+ ret = io_send(req, force_nonblock);
break;
case IORING_OP_RECVMSG:
case IORING_OP_RECV:
@@ -4562,9 +4495,9 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
break;
}
if (req->opcode == IORING_OP_RECVMSG)
- ret = io_recvmsg(req, nxt, force_nonblock);
+ ret = io_recvmsg(req, force_nonblock);
else
- ret = io_recv(req, nxt, force_nonblock);
+ ret = io_recv(req, force_nonblock);
break;
case IORING_OP_TIMEOUT:
if (sqe) {
@@ -4588,7 +4521,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_accept(req, nxt, force_nonblock);
+ ret = io_accept(req, force_nonblock);
break;
case IORING_OP_CONNECT:
if (sqe) {
@@ -4596,7 +4529,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_connect(req, nxt, force_nonblock);
+ ret = io_connect(req, force_nonblock);
break;
case IORING_OP_ASYNC_CANCEL:
if (sqe) {
@@ -4604,7 +4537,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_async_cancel(req, nxt);
+ ret = io_async_cancel(req);
break;
case IORING_OP_FALLOCATE:
if (sqe) {
@@ -4612,7 +4545,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_fallocate(req, nxt, force_nonblock);
+ ret = io_fallocate(req, force_nonblock);
break;
case IORING_OP_OPENAT:
if (sqe) {
@@ -4620,7 +4553,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_openat(req, nxt, force_nonblock);
+ ret = io_openat(req, force_nonblock);
break;
case IORING_OP_CLOSE:
if (sqe) {
@@ -4628,7 +4561,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_close(req, nxt, force_nonblock);
+ ret = io_close(req, force_nonblock);
break;
case IORING_OP_FILES_UPDATE:
if (sqe) {
@@ -4644,7 +4577,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_statx(req, nxt, force_nonblock);
+ ret = io_statx(req, force_nonblock);
break;
case IORING_OP_FADVISE:
if (sqe) {
@@ -4652,7 +4585,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_fadvise(req, nxt, force_nonblock);
+ ret = io_fadvise(req, force_nonblock);
break;
case IORING_OP_MADVISE:
if (sqe) {
@@ -4660,7 +4593,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_madvise(req, nxt, force_nonblock);
+ ret = io_madvise(req, force_nonblock);
break;
case IORING_OP_OPENAT2:
if (sqe) {
@@ -4668,7 +4601,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_openat2(req, nxt, force_nonblock);
+ ret = io_openat2(req, force_nonblock);
break;
case IORING_OP_EPOLL_CTL:
if (sqe) {
@@ -4676,7 +4609,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_epoll_ctl(req, nxt, force_nonblock);
+ ret = io_epoll_ctl(req, force_nonblock);
break;
case IORING_OP_SPLICE:
if (sqe) {
@@ -4684,7 +4617,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0)
break;
}
- ret = io_splice(req, nxt, force_nonblock);
+ ret = io_splice(req, force_nonblock);
break;
default:
ret = -EINVAL;
@@ -4717,7 +4650,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
{
struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
int ret = 0;
/* if NO_CANCEL is set, we must still run the work */
@@ -4728,7 +4660,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
if (!ret) {
do {
- ret = io_issue_sqe(req, NULL, &nxt, false);
+ ret = io_issue_sqe(req, NULL, false);
/*
* We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't
@@ -4748,10 +4680,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
io_cqring_add_event(req, ret);
io_put_req(req);
}
-
- /* if a dependent link is ready, pass it back */
- if (!ret && nxt)
- io_wq_assign_next(workptr, nxt);
}
static int io_req_needs_file(struct io_kiocb *req, int fd)
@@ -4877,8 +4805,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
if (prev) {
req_set_fail_links(prev);
- io_async_find_and_cancel(ctx, req, prev->user_data, NULL,
- -ETIME);
+ io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
io_put_req(prev);
} else {
io_cqring_add_event(req, -ETIME);
@@ -4947,7 +4874,7 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
old_creds = override_creds(req->work.creds);
}
- ret = io_issue_sqe(req, sqe, &nxt, true);
+ ret = io_issue_sqe(req, sqe, true);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
--
2.24.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v2 2/5] io_uring/io-wq: pass *work instead of **workptr
2020-02-28 23:37 [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 1/5] io_uring: remove @nxt from the handlers Pavel Begunkov
@ 2020-02-28 23:37 ` Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 3/5] io_uring/io-wq: allow put_work return next work Pavel Begunkov
` (3 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-02-28 23:37 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
Now work->func() never modifies passed workptr.
Remove extra indirection by passing struct work*
instead of a pointer to that.
Also, it leaves (work != old_work) dancing in io_worker_handle_work(),
as it'll be reused shortly.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io-wq.c | 11 +++++------
fs/io-wq.h | 2 +-
fs/io_uring.c | 25 ++++++++++++-------------
3 files changed, 18 insertions(+), 20 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index a05c32df2046..a830eddaffbe 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -503,7 +503,7 @@ static void io_worker_handle_work(struct io_worker *worker)
}
old_work = work;
- work->func(&work);
+ work->func(work);
spin_lock_irq(&worker->lock);
worker->cur_work = NULL;
@@ -756,7 +756,7 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
*/
if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ work->func(work);
return;
}
@@ -896,7 +896,7 @@ static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
if (found) {
work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ work->func(work);
return IO_WQ_CANCEL_OK;
}
@@ -972,7 +972,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
if (found) {
work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ work->func(work);
return IO_WQ_CANCEL_OK;
}
@@ -1049,9 +1049,8 @@ struct io_wq_flush_data {
struct completion done;
};
-static void io_wq_flush_func(struct io_wq_work **workptr)
+static void io_wq_flush_func(struct io_wq_work *work)
{
- struct io_wq_work *work = *workptr;
struct io_wq_flush_data *data;
data = container_of(work, struct io_wq_flush_data, work);
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 001194aef6ae..508615af4552 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -68,7 +68,7 @@ struct io_wq_work {
struct io_wq_work_node list;
void *data;
};
- void (*func)(struct io_wq_work **);
+ void (*func)(struct io_wq_work *);
struct files_struct *files;
struct mm_struct *mm;
const struct cred *creds;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index ee75e503964d..54dfa9b71864 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -793,7 +793,7 @@ static const struct io_op_def io_op_defs[] = {
}
};
-static void io_wq_submit_work(struct io_wq_work **workptr);
+static void io_wq_submit_work(struct io_wq_work *work);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void io_put_req(struct io_kiocb *req);
static void __io_double_put_req(struct io_kiocb *req);
@@ -2568,9 +2568,9 @@ static void __io_fsync(struct io_kiocb *req)
io_put_req(req);
}
-static void io_fsync_finish(struct io_wq_work **workptr)
+static void io_fsync_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
if (io_req_cancelled(req))
return;
@@ -2604,9 +2604,9 @@ static void __io_fallocate(struct io_kiocb *req)
io_put_req(req);
}
-static void io_fallocate_finish(struct io_wq_work **workptr)
+static void io_fallocate_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
__io_fallocate(req);
}
@@ -2970,9 +2970,9 @@ static void __io_close_finish(struct io_kiocb *req)
io_put_req(req);
}
-static void io_close_finish(struct io_wq_work **workptr)
+static void io_close_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
/* not cancellable, don't do io_req_cancelled() */
__io_close_finish(req);
@@ -3039,9 +3039,9 @@ static void __io_sync_file_range(struct io_kiocb *req)
}
-static void io_sync_file_range_finish(struct io_wq_work **workptr)
+static void io_sync_file_range_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
if (io_req_cancelled(req))
return;
@@ -3409,9 +3409,9 @@ static int __io_accept(struct io_kiocb *req, bool force_nonblock)
return 0;
}
-static void io_accept_finish(struct io_wq_work **workptr)
+static void io_accept_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
io_put_req(req);
@@ -4646,9 +4646,8 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
-static void io_wq_submit_work(struct io_wq_work **workptr)
+static void io_wq_submit_work(struct io_wq_work *work)
{
- struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
int ret = 0;
--
2.24.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v2 3/5] io_uring/io-wq: allow put_work return next work
2020-02-28 23:37 [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 1/5] io_uring: remove @nxt from the handlers Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 2/5] io_uring/io-wq: pass *work instead of **workptr Pavel Begunkov
@ 2020-02-28 23:37 ` Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 4/5] io_uring: remove extra nxt check after punt Pavel Begunkov
` (2 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-02-28 23:37 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
Formerly work->func() was returning next work to exeucute. Make put_work
do the same. As put_work() is the last thing happening with work during
issuing, it have all info needed to deduce the next job.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io-wq.c | 17 +++++------------
fs/io-wq.h | 2 +-
fs/io_uring.c | 29 ++++++++++++++++++++++++++---
3 files changed, 32 insertions(+), 16 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index a830eddaffbe..8bdda5e23dcd 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -443,7 +443,7 @@ static void io_wq_switch_creds(struct io_worker *worker,
static void io_worker_handle_work(struct io_worker *worker)
__releases(wqe->lock)
{
- struct io_wq_work *work, *old_work = NULL, *put_work = NULL;
+ struct io_wq_work *work, *old_work = NULL;
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
@@ -464,8 +464,6 @@ static void io_worker_handle_work(struct io_worker *worker)
wqe->flags |= IO_WQE_FLAG_STALLED;
spin_unlock_irq(&wqe->lock);
- if (put_work && wq->put_work)
- wq->put_work(old_work);
if (!work)
break;
next:
@@ -497,10 +495,8 @@ static void io_worker_handle_work(struct io_worker *worker)
if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
work->flags |= IO_WQ_WORK_CANCEL;
- if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL)) {
- put_work = work;
+ if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL))
wq->get_work(work);
- }
old_work = work;
work->func(work);
@@ -509,6 +505,9 @@ static void io_worker_handle_work(struct io_worker *worker)
worker->cur_work = NULL;
spin_unlock_irq(&worker->lock);
+ if (wq->put_work && !(work->flags & IO_WQ_WORK_INTERNAL))
+ wq->put_work(&work);
+
spin_lock_irq(&wqe->lock);
if (hash != -1U) {
@@ -517,12 +516,6 @@ static void io_worker_handle_work(struct io_worker *worker)
}
if (work && work != old_work) {
spin_unlock_irq(&wqe->lock);
-
- if (put_work && wq->put_work) {
- wq->put_work(put_work);
- put_work = NULL;
- }
-
/* dependent work not hashed */
hash = -1U;
goto next;
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 508615af4552..f1d717e9acc1 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -83,7 +83,7 @@ struct io_wq_work {
} while (0) \
typedef void (get_work_fn)(struct io_wq_work *);
-typedef void (put_work_fn)(struct io_wq_work *);
+typedef void (put_work_fn)(struct io_wq_work **);
struct io_wq_data {
struct user_struct *user;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 54dfa9b71864..5a579f686f9e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6064,11 +6064,34 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
return __io_sqe_files_update(ctx, &up, nr_args);
}
-static void io_put_work(struct io_wq_work *work)
+static void io_link_work_cb(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct io_kiocb *link = work->data;
- io_put_req(req);
+ io_queue_linked_timeout(link);
+ io_wq_submit_work(work);
+}
+
+static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *req)
+{
+ struct io_kiocb *link;
+
+ io_prep_next_work(req, &link);
+ *workptr = &req->work;
+ if (link) {
+ req->work.func = io_link_work_cb;
+ req->work.data = link;
+ }
+}
+
+static void io_put_work(struct io_wq_work **workptr)
+{
+ struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *nxt = NULL;
+
+ io_put_req_find_next(req, &nxt);
+ if (nxt)
+ io_wq_assign_next(workptr, nxt);
}
static void io_get_work(struct io_wq_work *work)
--
2.24.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v2 4/5] io_uring: remove extra nxt check after punt
2020-02-28 23:37 [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
` (2 preceding siblings ...)
2020-02-28 23:37 ` [PATCH v2 3/5] io_uring/io-wq: allow put_work return next work Pavel Begunkov
@ 2020-02-28 23:37 ` Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 5/5] io_uring: remove io_prep_next_work() Pavel Begunkov
2020-02-29 18:44 ` [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
5 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-02-28 23:37 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
After __io_queue_sqe() ended up in io_queue_async_work(), it's already
known that there is no @nxt req, so skip the check and return from the
function.
Also, @nxt initialisation now can be done just before
io_put_req_find_next(), as there is no jumping until it's checked.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5a579f686f9e..cefbae582b5f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4857,7 +4857,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_kiocb *linked_timeout;
- struct io_kiocb *nxt = NULL;
+ struct io_kiocb *nxt;
const struct cred *old_creds = NULL;
int ret;
@@ -4884,7 +4884,7 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (io_arm_poll_handler(req)) {
if (linked_timeout)
io_queue_linked_timeout(linked_timeout);
- goto done_req;
+ goto exit;
}
punt:
if (io_op_defs[req->opcode].file_table) {
@@ -4898,10 +4898,11 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
* submit reference when the iocb is actually submitted.
*/
io_queue_async_work(req);
- goto done_req;
+ goto exit;
}
err:
+ nxt = NULL;
/* drop submission reference */
io_put_req_find_next(req, &nxt);
@@ -4918,15 +4919,14 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req_set_fail_links(req);
io_put_req(req);
}
-done_req:
if (nxt) {
req = nxt;
- nxt = NULL;
if (req->flags & REQ_F_FORCE_ASYNC)
goto punt;
goto again;
}
+exit:
if (old_creds)
revert_creds(old_creds);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v2 5/5] io_uring: remove io_prep_next_work()
2020-02-28 23:37 [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
` (3 preceding siblings ...)
2020-02-28 23:37 ` [PATCH v2 4/5] io_uring: remove extra nxt check after punt Pavel Begunkov
@ 2020-02-28 23:37 ` Pavel Begunkov
2020-02-29 18:44 ` [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
5 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-02-28 23:37 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
io-wq about IO_WQ_WORK_UNBOUND flag only while enqueueing, so it's
useless setting it for a next req of a link. Thet only useful thing
there is io_prep_linked_timeout(). Inline it.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 13 +------------
1 file changed, 1 insertion(+), 12 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index cefbae582b5f..b16cad7ebe40 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -999,17 +999,6 @@ static inline void io_req_work_drop_env(struct io_kiocb *req)
}
}
-static inline void io_prep_next_work(struct io_kiocb *req,
- struct io_kiocb **link)
-{
- const struct io_op_def *def = &io_op_defs[req->opcode];
-
- if (!(req->flags & REQ_F_ISREG) && def->unbound_nonreg_file)
- req->work.flags |= IO_WQ_WORK_UNBOUND;
-
- *link = io_prep_linked_timeout(req);
-}
-
static inline bool io_prep_async_work(struct io_kiocb *req,
struct io_kiocb **link)
{
@@ -6076,8 +6065,8 @@ static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *req)
{
struct io_kiocb *link;
- io_prep_next_work(req, &link);
*workptr = &req->work;
+ link = io_prep_linked_timeout(req);
if (link) {
req->work.func = io_link_work_cb;
req->work.data = link;
--
2.24.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx
2020-02-28 23:37 [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
` (4 preceding siblings ...)
2020-02-28 23:37 ` [PATCH v2 5/5] io_uring: remove io_prep_next_work() Pavel Begunkov
@ 2020-02-29 18:44 ` Pavel Begunkov
2020-02-29 19:00 ` Jens Axboe
5 siblings, 1 reply; 8+ messages in thread
From: Pavel Begunkov @ 2020-02-29 18:44 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
On 29/02/2020 02:37, Pavel Begunkov wrote:
> After io_put_req_find_next() was patched, handlers no more return
> next work, but enqueue them through io_queue_async_work() (mostly
> by io_put_work() -> io_put_req()). The patchset fixes that.
>
> Patches 1-2 clean up and removes all futile attempts to get nxt from
> the opcode handlers. The 3rd one moves all this propagation idea into
> work->put_work(). And the rest ones are small clean up on top.
And now I'm hesitant about the approach. It works fine, but I want to remove a
lot of excessive locking from io-wq, and it'll be in the way. Ignore this, I'll
try something else
The question is whether there was a problem with io_req_find_next() in the first
place... It was stealing @nxt, when it already completed a request and were
synchronous to the submission ref holder, thus it should have been fine.
> v2: rebase on top of poll changes
>
> Pavel Begunkov (5):
> io_uring: remove @nxt from the handlers
> io_uring/io-wq: pass *work instead of **workptr
> io_uring/io-wq: allow put_work return next work
> io_uring: remove extra nxt check after punt
> io_uring: remove io_prep_next_work()
>
> fs/io-wq.c | 28 ++---
> fs/io-wq.h | 4 +-
> fs/io_uring.c | 320 ++++++++++++++++++++------------------------------
> 3 files changed, 141 insertions(+), 211 deletions(-)
>
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx
2020-02-29 18:44 ` [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
@ 2020-02-29 19:00 ` Jens Axboe
0 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2020-02-29 19:00 UTC (permalink / raw)
To: Pavel Begunkov, io-uring, linux-kernel
On 2/29/20 11:44 AM, Pavel Begunkov wrote:
> On 29/02/2020 02:37, Pavel Begunkov wrote:
>> After io_put_req_find_next() was patched, handlers no more return
>> next work, but enqueue them through io_queue_async_work() (mostly
>> by io_put_work() -> io_put_req()). The patchset fixes that.
>>
>> Patches 1-2 clean up and removes all futile attempts to get nxt from
>> the opcode handlers. The 3rd one moves all this propagation idea into
>> work->put_work(). And the rest ones are small clean up on top.
>
> And now I'm hesitant about the approach. It works fine, but I want to
> remove a lot of excessive locking from io-wq, and it'll be in the way.
> Ignore this, I'll try something else
>
> The question is whether there was a problem with io_req_find_next() in
> the first place... It was stealing @nxt, when it already completed a
> request and were synchronous to the submission ref holder, thus it
> should have been fine.
There was only a problem with it if we have multiple calls of
io_put_req_find_next(), so it was a bit fragile. That was the only
issue, but that's big enough imho.
I'll ignore this series for now, you can always rebase on top of the
other stuff if you want to.
--
Jens Axboe
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2020-02-29 19:00 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-02-28 23:37 [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 1/5] io_uring: remove @nxt from the handlers Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 2/5] io_uring/io-wq: pass *work instead of **workptr Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 3/5] io_uring/io-wq: allow put_work return next work Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 4/5] io_uring: remove extra nxt check after punt Pavel Begunkov
2020-02-28 23:37 ` [PATCH v2 5/5] io_uring: remove io_prep_next_work() Pavel Begunkov
2020-02-29 18:44 ` [PATCH REBASE v2 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
2020-02-29 19:00 ` Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox