* [PATCH 0/5] return nxt propagation within io-wq ctx
@ 2020-02-28 22:53 Pavel Begunkov
2020-02-28 22:53 ` [PATCH 1/5] io_uring: remove @nxt from the handlers Pavel Begunkov
` (4 more replies)
0 siblings, 5 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-02-28 22:53 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
After io_put_req_find_next() was patched, handlers no more return
next work, but enqueue them through io_queue_async_work() (mostly
by io_put_work() -> io_put_req()). The patchset fixes that.
Patches 1-2 clean up and removes all futile attempts to get nxt from
the opcode handlers. The 3rd one moves all this propagation idea into
work->put_work(). And the rest ones are small clean up on top.
Pavel Begunkov (5):
io_uring: remove @nxt from the handlers
io_uring/io-wq: pass *work instead of **workptr
io_uring/io-wq: allow put_work return next work
io_uring: remove extra nxt check after punt
io_uring: remove io_prep_next_work()
fs/io-wq.c | 28 ++---
fs/io-wq.h | 4 +-
fs/io_uring.c | 334 ++++++++++++++++++++------------------------------
3 files changed, 146 insertions(+), 220 deletions(-)
--
2.24.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 1/5] io_uring: remove @nxt from the handlers
2020-02-28 22:53 [PATCH 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
@ 2020-02-28 22:53 ` Pavel Begunkov
2020-02-28 22:53 ` [PATCH 2/5] io_uring/io-wq: pass *work instead of **workptr Pavel Begunkov
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-02-28 22:53 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
After io_put_req_find_next() fix, no opcode handler can return non-NULL
nxt, that's because there is always a submission ref, which keeps them
from doing that. Remove @nxt from them, it's intrusive but
straightforward.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 250 ++++++++++++++++++--------------------------------
1 file changed, 87 insertions(+), 163 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f5fbde552be7..c92bd6d8d630 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1795,17 +1795,6 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
io_put_req(req);
}
-static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
-{
- struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
- struct io_kiocb *nxt = NULL;
-
- io_complete_rw_common(kiocb, res);
- io_put_req_find_next(req, &nxt);
-
- return nxt;
-}
-
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
@@ -2000,14 +1989,14 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
}
}
-static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt)
+static void kiocb_done(struct kiocb *kiocb, ssize_t ret)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos;
if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
- *nxt = __io_complete_rw(kiocb, ret);
+ io_complete_rw(kiocb, ret, 0);
else
io_rw_done(kiocb, ret);
}
@@ -2256,8 +2245,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
-static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_read(struct io_kiocb *req, bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw.kiocb;
@@ -2297,7 +2285,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
/* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN) {
- kiocb_done(kiocb, ret2, nxt);
+ kiocb_done(kiocb, ret2);
} else {
copy_iov:
ret = io_setup_async_rw(req, io_size, iovec,
@@ -2346,8 +2334,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
-static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_write(struct io_kiocb *req, bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw.kiocb;
@@ -2411,7 +2398,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN;
if (!force_nonblock || ret2 != -EAGAIN) {
- kiocb_done(kiocb, ret2, nxt);
+ kiocb_done(kiocb, ret2);
} else {
copy_iov:
ret = io_setup_async_rw(req, io_size, iovec,
@@ -2468,8 +2455,7 @@ static bool io_splice_punt(struct file *file)
return !(file->f_mode & O_NONBLOCK);
}
-static int io_splice(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_splice(struct io_kiocb *req, bool force_nonblock)
{
struct io_splice *sp = &req->splice;
struct file *in = sp->file_in;
@@ -2496,7 +2482,7 @@ static int io_splice(struct io_kiocb *req, struct io_kiocb **nxt,
io_cqring_add_event(req, ret);
if (ret != sp->len)
req_set_fail_links(req);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
}
@@ -2548,28 +2534,7 @@ static bool io_req_cancelled(struct io_kiocb *req)
return false;
}
-static void io_link_work_cb(struct io_wq_work **workptr)
-{
- struct io_wq_work *work = *workptr;
- struct io_kiocb *link = work->data;
-
- io_queue_linked_timeout(link);
- io_wq_submit_work(workptr);
-}
-
-static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
-{
- struct io_kiocb *link;
-
- io_prep_next_work(nxt, &link);
- *workptr = &nxt->work;
- if (link) {
- nxt->work.func = io_link_work_cb;
- nxt->work.data = link;
- }
-}
-
-static void __io_fsync(struct io_kiocb *req, struct io_kiocb **nxt)
+static void __io_fsync(struct io_kiocb *req)
{
loff_t end = req->sync.off + req->sync.len;
int ret;
@@ -2580,23 +2545,19 @@ static void __io_fsync(struct io_kiocb *req, struct io_kiocb **nxt)
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
}
static void io_fsync_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
if (io_req_cancelled(req))
return;
- __io_fsync(req, &nxt);
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ __io_fsync(req);
}
-static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_fsync(struct io_kiocb *req, bool force_nonblock)
{
/* fsync always requires a blocking context */
if (force_nonblock) {
@@ -2604,11 +2565,11 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
req->work.func = io_fsync_finish;
return -EAGAIN;
}
- __io_fsync(req, nxt);
+ __io_fsync(req);
return 0;
}
-static void __io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt)
+static void __io_fallocate(struct io_kiocb *req)
{
int ret;
@@ -2620,17 +2581,14 @@ static void __io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt)
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
}
static void io_fallocate_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
- __io_fallocate(req, &nxt);
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ __io_fallocate(req);
}
static int io_fallocate_prep(struct io_kiocb *req,
@@ -2645,8 +2603,7 @@ static int io_fallocate_prep(struct io_kiocb *req,
return 0;
}
-static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
{
/* fallocate always requiring blocking context */
if (force_nonblock) {
@@ -2655,7 +2612,7 @@ static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
return -EAGAIN;
}
- __io_fallocate(req, nxt);
+ __io_fallocate(req);
return 0;
}
@@ -2728,8 +2685,7 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_openat2(struct io_kiocb *req, bool force_nonblock)
{
struct open_flags op;
struct file *file;
@@ -2760,15 +2716,14 @@ static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
}
-static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_openat(struct io_kiocb *req, bool force_nonblock)
{
req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
- return io_openat2(req, nxt, force_nonblock);
+ return io_openat2(req, force_nonblock);
}
static int io_epoll_ctl_prep(struct io_kiocb *req,
@@ -2796,8 +2751,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
#endif
}
-static int io_epoll_ctl(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_EPOLL)
struct io_epoll *ie = &req->epoll;
@@ -2810,7 +2764,7 @@ static int io_epoll_ctl(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
@@ -2832,8 +2786,7 @@ static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
#endif
}
-static int io_madvise(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_madvise(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
struct io_madvise *ma = &req->madvise;
@@ -2846,7 +2799,7 @@ static int io_madvise(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
@@ -2864,8 +2817,7 @@ static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static int io_fadvise(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
{
struct io_fadvise *fa = &req->fadvise;
int ret;
@@ -2885,7 +2837,7 @@ static int io_fadvise(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
}
@@ -2922,8 +2874,7 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static int io_statx(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_statx(struct io_kiocb *req, bool force_nonblock)
{
struct io_open *ctx = &req->open;
unsigned lookup_flags;
@@ -2960,7 +2911,7 @@ static int io_statx(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
}
@@ -2987,7 +2938,7 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
/* only called when __close_fd_get_file() is done */
-static void __io_close_finish(struct io_kiocb *req, struct io_kiocb **nxt)
+static void __io_close_finish(struct io_kiocb *req)
{
int ret;
@@ -2996,22 +2947,18 @@ static void __io_close_finish(struct io_kiocb *req, struct io_kiocb **nxt)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
fput(req->close.put_file);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
}
static void io_close_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
/* not cancellable, don't do io_req_cancelled() */
- __io_close_finish(req, &nxt);
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ __io_close_finish(req);
}
-static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_close(struct io_kiocb *req, bool force_nonblock)
{
int ret;
@@ -3028,7 +2975,7 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
* No ->flush(), safely close from here and just punt the
* fput() to async context.
*/
- __io_close_finish(req, nxt);
+ __io_close_finish(req);
return 0;
eagain:
req->work.func = io_close_finish;
@@ -3059,7 +3006,7 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static void __io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt)
+static void __io_sync_file_range(struct io_kiocb *req)
{
int ret;
@@ -3068,24 +3015,20 @@ static void __io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt)
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
}
static void io_sync_file_range_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
if (io_req_cancelled(req))
return;
- __io_sync_file_range(req, &nxt);
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ __io_sync_file_range(req);
}
-static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
{
/* sync_file_range always requires a blocking context */
if (force_nonblock) {
@@ -3094,7 +3037,7 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
return -EAGAIN;
}
- __io_sync_file_range(req, nxt);
+ __io_sync_file_range(req);
return 0;
}
@@ -3141,8 +3084,7 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
#endif
}
-static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_NET)
struct io_async_msghdr *kmsg = NULL;
@@ -3196,15 +3138,14 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
#endif
}
-static int io_send(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_send(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_NET)
struct socket *sock;
@@ -3247,7 +3188,7 @@ static int io_send(struct io_kiocb *req, struct io_kiocb **nxt,
io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
@@ -3283,8 +3224,7 @@ static int io_recvmsg_prep(struct io_kiocb *req,
#endif
}
-static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_NET)
struct io_async_msghdr *kmsg = NULL;
@@ -3340,15 +3280,14 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
#endif
}
-static int io_recv(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_recv(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_NET)
struct socket *sock;
@@ -3392,7 +3331,7 @@ static int io_recv(struct io_kiocb *req, struct io_kiocb **nxt,
io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
@@ -3420,8 +3359,7 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
#if defined(CONFIG_NET)
-static int __io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int __io_accept(struct io_kiocb *req, bool force_nonblock)
{
struct io_accept *accept = &req->accept;
unsigned file_flags;
@@ -3437,32 +3375,28 @@ static int __io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
}
static void io_accept_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
io_put_req(req);
if (io_req_cancelled(req))
return;
- __io_accept(req, &nxt, false);
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ __io_accept(req, false);
}
#endif
-static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_accept(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_NET)
int ret;
- ret = __io_accept(req, nxt, force_nonblock);
+ ret = __io_accept(req, force_nonblock);
if (ret == -EAGAIN && force_nonblock) {
req->work.func = io_accept_finish;
return -EAGAIN;
@@ -3497,8 +3431,7 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
#endif
}
-static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_connect(struct io_kiocb *req, bool force_nonblock)
{
#if defined(CONFIG_NET)
struct io_async_ctx __io, *io;
@@ -3536,7 +3469,7 @@ static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
return 0;
#else
return -EOPNOTSUPP;
@@ -3641,7 +3574,6 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
struct io_poll_iocb *poll = &req->poll;
struct poll_table_struct pt = { ._key = poll->events };
struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *nxt = NULL;
__poll_t mask = 0;
int ret = 0;
@@ -3676,9 +3608,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
if (ret < 0)
req_set_fail_links(req);
- io_put_req_find_next(req, &nxt);
- if (nxt)
- io_wq_assign_next(workptr, nxt);
+ io_put_req(req);
}
static void __io_poll_flush(struct io_ring_ctx *ctx, struct llist_node *nodes)
@@ -3826,7 +3756,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return 0;
}
-static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt)
+static int io_poll_add(struct io_kiocb *req)
{
struct io_poll_iocb *poll = &req->poll;
struct io_ring_ctx *ctx = req->ctx;
@@ -3880,7 +3810,7 @@ static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt)
if (mask) {
io_cqring_ev_posted(ctx);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
}
return ipt.error;
}
@@ -4129,7 +4059,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
struct io_kiocb *req, __u64 sqe_addr,
- struct io_kiocb **nxt, int success_ret)
+ int success_ret)
{
unsigned long flags;
int ret;
@@ -4155,7 +4085,7 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
if (ret < 0)
req_set_fail_links(req);
- io_put_req_find_next(req, nxt);
+ io_put_req(req);
}
static int io_async_cancel_prep(struct io_kiocb *req,
@@ -4171,11 +4101,11 @@ static int io_async_cancel_prep(struct io_kiocb *req,
return 0;
}
-static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
+static int io_async_cancel(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- io_async_find_and_cancel(ctx, req, req->cancel.addr, nxt, 0);
+ io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
return 0;
}
@@ -4382,7 +4312,7 @@ static void io_cleanup_req(struct io_kiocb *req)
}
static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_kiocb **nxt, bool force_nonblock)
+ bool force_nonblock)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
@@ -4399,7 +4329,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0)
break;
}
- ret = io_read(req, nxt, force_nonblock);
+ ret = io_read(req, force_nonblock);
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
@@ -4409,7 +4339,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0)
break;
}
- ret = io_write(req, nxt, force_nonblock);
+ ret = io_write(req, force_nonblock);
break;
case IORING_OP_FSYNC:
if (sqe) {
@@ -4417,7 +4347,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0)
break;
}
- ret = io_fsync(req, nxt, force_nonblock);
+ ret = io_fsync(req, force_nonblock);
break;
case IORING_OP_POLL_ADD:
if (sqe) {
@@ -4425,7 +4355,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_poll_add(req, nxt);
+ ret = io_poll_add(req);
break;
case IORING_OP_POLL_REMOVE:
if (sqe) {
@@ -4441,7 +4371,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0)
break;
}
- ret = io_sync_file_range(req, nxt, force_nonblock);
+ ret = io_sync_file_range(req, force_nonblock);
break;
case IORING_OP_SENDMSG:
case IORING_OP_SEND:
@@ -4451,9 +4381,9 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
break;
}
if (req->opcode == IORING_OP_SENDMSG)
- ret = io_sendmsg(req, nxt, force_nonblock);
+ ret = io_sendmsg(req, force_nonblock);
else
- ret = io_send(req, nxt, force_nonblock);
+ ret = io_send(req, force_nonblock);
break;
case IORING_OP_RECVMSG:
case IORING_OP_RECV:
@@ -4463,9 +4393,9 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
break;
}
if (req->opcode == IORING_OP_RECVMSG)
- ret = io_recvmsg(req, nxt, force_nonblock);
+ ret = io_recvmsg(req, force_nonblock);
else
- ret = io_recv(req, nxt, force_nonblock);
+ ret = io_recv(req, force_nonblock);
break;
case IORING_OP_TIMEOUT:
if (sqe) {
@@ -4489,7 +4419,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_accept(req, nxt, force_nonblock);
+ ret = io_accept(req, force_nonblock);
break;
case IORING_OP_CONNECT:
if (sqe) {
@@ -4497,7 +4427,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_connect(req, nxt, force_nonblock);
+ ret = io_connect(req, force_nonblock);
break;
case IORING_OP_ASYNC_CANCEL:
if (sqe) {
@@ -4505,7 +4435,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_async_cancel(req, nxt);
+ ret = io_async_cancel(req);
break;
case IORING_OP_FALLOCATE:
if (sqe) {
@@ -4513,7 +4443,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_fallocate(req, nxt, force_nonblock);
+ ret = io_fallocate(req, force_nonblock);
break;
case IORING_OP_OPENAT:
if (sqe) {
@@ -4521,7 +4451,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_openat(req, nxt, force_nonblock);
+ ret = io_openat(req, force_nonblock);
break;
case IORING_OP_CLOSE:
if (sqe) {
@@ -4529,7 +4459,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_close(req, nxt, force_nonblock);
+ ret = io_close(req, force_nonblock);
break;
case IORING_OP_FILES_UPDATE:
if (sqe) {
@@ -4545,7 +4475,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_statx(req, nxt, force_nonblock);
+ ret = io_statx(req, force_nonblock);
break;
case IORING_OP_FADVISE:
if (sqe) {
@@ -4553,7 +4483,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_fadvise(req, nxt, force_nonblock);
+ ret = io_fadvise(req, force_nonblock);
break;
case IORING_OP_MADVISE:
if (sqe) {
@@ -4561,7 +4491,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_madvise(req, nxt, force_nonblock);
+ ret = io_madvise(req, force_nonblock);
break;
case IORING_OP_OPENAT2:
if (sqe) {
@@ -4569,7 +4499,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_openat2(req, nxt, force_nonblock);
+ ret = io_openat2(req, force_nonblock);
break;
case IORING_OP_EPOLL_CTL:
if (sqe) {
@@ -4577,7 +4507,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret)
break;
}
- ret = io_epoll_ctl(req, nxt, force_nonblock);
+ ret = io_epoll_ctl(req, force_nonblock);
break;
case IORING_OP_SPLICE:
if (sqe) {
@@ -4585,7 +4515,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0)
break;
}
- ret = io_splice(req, nxt, force_nonblock);
+ ret = io_splice(req, force_nonblock);
break;
default:
ret = -EINVAL;
@@ -4618,7 +4548,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
{
struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
int ret = 0;
/* if NO_CANCEL is set, we must still run the work */
@@ -4629,7 +4558,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
if (!ret) {
do {
- ret = io_issue_sqe(req, NULL, &nxt, false);
+ ret = io_issue_sqe(req, NULL, false);
/*
* We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't
@@ -4649,10 +4578,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
io_cqring_add_event(req, ret);
io_put_req(req);
}
-
- /* if a dependent link is ready, pass it back */
- if (!ret && nxt)
- io_wq_assign_next(workptr, nxt);
}
static int io_req_needs_file(struct io_kiocb *req, int fd)
@@ -4778,8 +4703,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
if (prev) {
req_set_fail_links(prev);
- io_async_find_and_cancel(ctx, req, prev->user_data, NULL,
- -ETIME);
+ io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
io_put_req(prev);
} else {
io_cqring_add_event(req, -ETIME);
@@ -4845,7 +4769,7 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
old_creds = override_creds(req->work.creds);
}
- ret = io_issue_sqe(req, sqe, &nxt, true);
+ ret = io_issue_sqe(req, sqe, true);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/5] io_uring/io-wq: pass *work instead of **workptr
2020-02-28 22:53 [PATCH 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
2020-02-28 22:53 ` [PATCH 1/5] io_uring: remove @nxt from the handlers Pavel Begunkov
@ 2020-02-28 22:53 ` Pavel Begunkov
2020-02-28 22:53 ` [PATCH 3/5] io_uring/io-wq: allow put_work return next work Pavel Begunkov
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-02-28 22:53 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
Now work->func() never modifies passed workptr.
Remove extra indirection by passing struct work*
instead of a pointer to that.
Also, it leaves (work != old_work) dancing in io_worker_handle_work(),
as it'll be reused shortly.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io-wq.c | 11 +++++------
fs/io-wq.h | 2 +-
fs/io_uring.c | 36 +++++++++++++++++-------------------
3 files changed, 23 insertions(+), 26 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index a05c32df2046..a830eddaffbe 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -503,7 +503,7 @@ static void io_worker_handle_work(struct io_worker *worker)
}
old_work = work;
- work->func(&work);
+ work->func(work);
spin_lock_irq(&worker->lock);
worker->cur_work = NULL;
@@ -756,7 +756,7 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
*/
if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ work->func(work);
return;
}
@@ -896,7 +896,7 @@ static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
if (found) {
work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ work->func(work);
return IO_WQ_CANCEL_OK;
}
@@ -972,7 +972,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
if (found) {
work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ work->func(work);
return IO_WQ_CANCEL_OK;
}
@@ -1049,9 +1049,8 @@ struct io_wq_flush_data {
struct completion done;
};
-static void io_wq_flush_func(struct io_wq_work **workptr)
+static void io_wq_flush_func(struct io_wq_work *work)
{
- struct io_wq_work *work = *workptr;
struct io_wq_flush_data *data;
data = container_of(work, struct io_wq_flush_data, work);
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 001194aef6ae..508615af4552 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -68,7 +68,7 @@ struct io_wq_work {
struct io_wq_work_node list;
void *data;
};
- void (*func)(struct io_wq_work **);
+ void (*func)(struct io_wq_work *);
struct files_struct *files;
struct mm_struct *mm;
const struct cred *creds;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c92bd6d8d630..c49ed2846f85 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -767,7 +767,7 @@ static const struct io_op_def io_op_defs[] = {
}
};
-static void io_wq_submit_work(struct io_wq_work **workptr);
+static void io_wq_submit_work(struct io_wq_work *work);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void io_put_req(struct io_kiocb *req);
static void __io_double_put_req(struct io_kiocb *req);
@@ -2548,9 +2548,9 @@ static void __io_fsync(struct io_kiocb *req)
io_put_req(req);
}
-static void io_fsync_finish(struct io_wq_work **workptr)
+static void io_fsync_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
if (io_req_cancelled(req))
return;
@@ -2584,9 +2584,9 @@ static void __io_fallocate(struct io_kiocb *req)
io_put_req(req);
}
-static void io_fallocate_finish(struct io_wq_work **workptr)
+static void io_fallocate_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
__io_fallocate(req);
}
@@ -2950,9 +2950,9 @@ static void __io_close_finish(struct io_kiocb *req)
io_put_req(req);
}
-static void io_close_finish(struct io_wq_work **workptr)
+static void io_close_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
/* not cancellable, don't do io_req_cancelled() */
__io_close_finish(req);
@@ -3019,9 +3019,9 @@ static void __io_sync_file_range(struct io_kiocb *req)
}
-static void io_sync_file_range_finish(struct io_wq_work **workptr)
+static void io_sync_file_range_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
if (io_req_cancelled(req))
return;
@@ -3379,9 +3379,9 @@ static int __io_accept(struct io_kiocb *req, bool force_nonblock)
return 0;
}
-static void io_accept_finish(struct io_wq_work **workptr)
+static void io_accept_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
io_put_req(req);
@@ -3567,9 +3567,8 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
io_commit_cqring(ctx);
}
-static void io_poll_complete_work(struct io_wq_work **workptr)
+static void io_poll_complete_work(struct io_wq_work *work)
{
- struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_poll_iocb *poll = &req->poll;
struct poll_table_struct pt = { ._key = poll->events };
@@ -3634,9 +3633,9 @@ static void __io_poll_flush(struct io_ring_ctx *ctx, struct llist_node *nodes)
io_free_req_many(ctx, &rb);
}
-static void io_poll_flush(struct io_wq_work **workptr)
+static void io_poll_flush(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct llist_node *nodes;
nodes = llist_del_all(&req->ctx->poll_llist);
@@ -3644,9 +3643,9 @@ static void io_poll_flush(struct io_wq_work **workptr)
__io_poll_flush(req->ctx, nodes);
}
-static void io_poll_trigger_evfd(struct io_wq_work **workptr)
+static void io_poll_trigger_evfd(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
eventfd_signal(req->ctx->cq_ev_fd, 1);
io_put_req(req);
@@ -4544,9 +4543,8 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
-static void io_wq_submit_work(struct io_wq_work **workptr)
+static void io_wq_submit_work(struct io_wq_work *work)
{
- struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
int ret = 0;
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 3/5] io_uring/io-wq: allow put_work return next work
2020-02-28 22:53 [PATCH 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
2020-02-28 22:53 ` [PATCH 1/5] io_uring: remove @nxt from the handlers Pavel Begunkov
2020-02-28 22:53 ` [PATCH 2/5] io_uring/io-wq: pass *work instead of **workptr Pavel Begunkov
@ 2020-02-28 22:53 ` Pavel Begunkov
2020-02-28 22:53 ` [PATCH 4/5] io_uring: remove extra nxt check after punt Pavel Begunkov
2020-02-28 22:53 ` [PATCH 5/5] io_uring: remove io_prep_next_work() Pavel Begunkov
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-02-28 22:53 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
Formerly work->func() was returning next work to exeucute. Make put_work
do the same. As put_work() is the last thing happening with work during
issuing, it have all info needed to deduce the next job.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io-wq.c | 17 +++++------------
fs/io-wq.h | 2 +-
fs/io_uring.c | 29 ++++++++++++++++++++++++++---
3 files changed, 32 insertions(+), 16 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index a830eddaffbe..8bdda5e23dcd 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -443,7 +443,7 @@ static void io_wq_switch_creds(struct io_worker *worker,
static void io_worker_handle_work(struct io_worker *worker)
__releases(wqe->lock)
{
- struct io_wq_work *work, *old_work = NULL, *put_work = NULL;
+ struct io_wq_work *work, *old_work = NULL;
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
@@ -464,8 +464,6 @@ static void io_worker_handle_work(struct io_worker *worker)
wqe->flags |= IO_WQE_FLAG_STALLED;
spin_unlock_irq(&wqe->lock);
- if (put_work && wq->put_work)
- wq->put_work(old_work);
if (!work)
break;
next:
@@ -497,10 +495,8 @@ static void io_worker_handle_work(struct io_worker *worker)
if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
work->flags |= IO_WQ_WORK_CANCEL;
- if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL)) {
- put_work = work;
+ if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL))
wq->get_work(work);
- }
old_work = work;
work->func(work);
@@ -509,6 +505,9 @@ static void io_worker_handle_work(struct io_worker *worker)
worker->cur_work = NULL;
spin_unlock_irq(&worker->lock);
+ if (wq->put_work && !(work->flags & IO_WQ_WORK_INTERNAL))
+ wq->put_work(&work);
+
spin_lock_irq(&wqe->lock);
if (hash != -1U) {
@@ -517,12 +516,6 @@ static void io_worker_handle_work(struct io_worker *worker)
}
if (work && work != old_work) {
spin_unlock_irq(&wqe->lock);
-
- if (put_work && wq->put_work) {
- wq->put_work(put_work);
- put_work = NULL;
- }
-
/* dependent work not hashed */
hash = -1U;
goto next;
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 508615af4552..f1d717e9acc1 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -83,7 +83,7 @@ struct io_wq_work {
} while (0) \
typedef void (get_work_fn)(struct io_wq_work *);
-typedef void (put_work_fn)(struct io_wq_work *);
+typedef void (put_work_fn)(struct io_wq_work **);
struct io_wq_data {
struct user_struct *user;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c49ed2846f85..9b220044b608 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5939,11 +5939,34 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
return __io_sqe_files_update(ctx, &up, nr_args);
}
-static void io_put_work(struct io_wq_work *work)
+static void io_link_work_cb(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct io_kiocb *link = work->data;
- io_put_req(req);
+ io_queue_linked_timeout(link);
+ io_wq_submit_work(work);
+}
+
+static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *req)
+{
+ struct io_kiocb *link;
+
+ io_prep_next_work(req, &link);
+ *workptr = &req->work;
+ if (link) {
+ req->work.func = io_link_work_cb;
+ req->work.data = link;
+ }
+}
+
+static void io_put_work(struct io_wq_work **workptr)
+{
+ struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *nxt = NULL;
+
+ io_put_req_find_next(req, &nxt);
+ if (nxt)
+ io_wq_assign_next(workptr, nxt);
}
static void io_get_work(struct io_wq_work *work)
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 4/5] io_uring: remove extra nxt check after punt
2020-02-28 22:53 [PATCH 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
` (2 preceding siblings ...)
2020-02-28 22:53 ` [PATCH 3/5] io_uring/io-wq: allow put_work return next work Pavel Begunkov
@ 2020-02-28 22:53 ` Pavel Begunkov
2020-02-28 22:53 ` [PATCH 5/5] io_uring: remove io_prep_next_work() Pavel Begunkov
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-02-28 22:53 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
After __io_queue_sqe() ended up in io_queue_async_work(), it's already
known that there is no @nxt req, so skip the check and return from the
function.
Also, @nxt initialisation now can be done just before
io_put_req_find_next(), as there is no jumping until it's checked.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9b220044b608..3017db9088cd 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4751,7 +4751,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_kiocb *linked_timeout;
- struct io_kiocb *nxt = NULL;
+ struct io_kiocb *nxt;
const struct cred *old_creds = NULL;
int ret;
@@ -4787,10 +4787,11 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
* submit reference when the iocb is actually submitted.
*/
io_queue_async_work(req);
- goto done_req;
+ goto exit;
}
err:
+ nxt = NULL;
/* drop submission reference */
io_put_req_find_next(req, &nxt);
@@ -4807,15 +4808,14 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req_set_fail_links(req);
io_put_req(req);
}
-done_req:
if (nxt) {
req = nxt;
- nxt = NULL;
if (req->flags & REQ_F_FORCE_ASYNC)
goto punt;
goto again;
}
+exit:
if (old_creds)
revert_creds(old_creds);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 5/5] io_uring: remove io_prep_next_work()
2020-02-28 22:53 [PATCH 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
` (3 preceding siblings ...)
2020-02-28 22:53 ` [PATCH 4/5] io_uring: remove extra nxt check after punt Pavel Begunkov
@ 2020-02-28 22:53 ` Pavel Begunkov
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-02-28 22:53 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
io-wq about IO_WQ_WORK_UNBOUND flag only while enqueueing, so it's
useless setting it for a next req of a link. Thet only useful thing
there is io_prep_linked_timeout(). Inline it.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 13 +------------
1 file changed, 1 insertion(+), 12 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3017db9088cd..00039545bfa3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -974,17 +974,6 @@ static inline void io_req_work_drop_env(struct io_kiocb *req)
}
}
-static inline void io_prep_next_work(struct io_kiocb *req,
- struct io_kiocb **link)
-{
- const struct io_op_def *def = &io_op_defs[req->opcode];
-
- if (!(req->flags & REQ_F_ISREG) && def->unbound_nonreg_file)
- req->work.flags |= IO_WQ_WORK_UNBOUND;
-
- *link = io_prep_linked_timeout(req);
-}
-
static inline bool io_prep_async_work(struct io_kiocb *req,
struct io_kiocb **link)
{
@@ -5951,8 +5940,8 @@ static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *req)
{
struct io_kiocb *link;
- io_prep_next_work(req, &link);
*workptr = &req->work;
+ link = io_prep_linked_timeout(req);
if (link) {
req->work.func = io_link_work_cb;
req->work.data = link;
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
end of thread, other threads:[~2020-02-28 22:54 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-02-28 22:53 [PATCH 0/5] return nxt propagation within io-wq ctx Pavel Begunkov
2020-02-28 22:53 ` [PATCH 1/5] io_uring: remove @nxt from the handlers Pavel Begunkov
2020-02-28 22:53 ` [PATCH 2/5] io_uring/io-wq: pass *work instead of **workptr Pavel Begunkov
2020-02-28 22:53 ` [PATCH 3/5] io_uring/io-wq: allow put_work return next work Pavel Begunkov
2020-02-28 22:53 ` [PATCH 4/5] io_uring: remove extra nxt check after punt Pavel Begunkov
2020-02-28 22:53 ` [PATCH 5/5] io_uring: remove io_prep_next_work() Pavel Begunkov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox