* [PATCH v4] io_uring: keep io_put_req only responsible for release and put req
@ 2019-11-08 15:50 Jackie Liu
2019-11-08 18:19 ` Jens Axboe
0 siblings, 1 reply; 2+ messages in thread
From: Jackie Liu @ 2019-11-08 15:50 UTC (permalink / raw)
To: axboe; +Cc: io-uring, liuyun01
We already have io_put_req_find_next to find the next req of the link.
we should not use the io_put_req function to find them. They should be
functions of the same level.
Signed-off-by: Jackie Liu <[email protected]>
---
V4:
- rebase to for-5.5/io_uring-test
fs/io_uring.c | 73 ++++++++++++++++++++++++++++++-----------------------------
1 file changed, 37 insertions(+), 36 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 94ec44c..577bc96 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -372,7 +372,7 @@ struct io_submit_state {
static void io_wq_submit_work(struct io_wq_work **workptr);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void __io_free_req(struct io_kiocb *req);
-static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr);
+static void io_put_req(struct io_kiocb *req);
static void io_double_put_req(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -558,7 +558,7 @@ static void io_kill_timeout(struct io_kiocb *req)
atomic_inc(&req->ctx->cq_timeouts);
list_del_init(&req->list);
io_cqring_fill_event(req, 0);
- io_put_req(req, NULL);
+ io_put_req(req);
}
}
@@ -658,7 +658,7 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
while (!list_empty(&list)) {
req = list_first_entry(&list, struct io_kiocb, list);
list_del(&req->list);
- io_put_req(req, NULL);
+ io_put_req(req);
}
}
@@ -832,7 +832,7 @@ static bool io_link_cancel_timeout(struct io_kiocb *req)
io_cqring_fill_event(req, -ECANCELED);
io_commit_cqring(ctx);
req->flags &= ~REQ_F_LINK;
- io_put_req(req, NULL);
+ io_put_req(req);
return true;
}
@@ -951,21 +951,13 @@ static void io_free_req(struct io_kiocb *req, struct io_kiocb **nxt)
* Drop reference to request, return next in chain (if there is one) if this
* was the last reference to this request.
*/
-static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
+static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
{
struct io_kiocb *nxt = NULL;
if (refcount_dec_and_test(&req->refs))
io_free_req(req, &nxt);
- return nxt;
-}
-
-static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr)
-{
- struct io_kiocb *nxt;
-
- nxt = io_put_req_find_next(req);
if (nxt) {
if (nxtptr)
*nxtptr = nxt;
@@ -974,6 +966,12 @@ static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr)
}
}
+static void io_put_req(struct io_kiocb *req)
+{
+ if (refcount_dec_and_test(&req->refs))
+ io_free_req(req, NULL);
+}
+
static void io_double_put_req(struct io_kiocb *req)
{
/* drop both submit and complete references */
@@ -1220,15 +1218,18 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
io_complete_rw_common(kiocb, res);
- io_put_req(req, NULL);
+ io_put_req(req);
}
static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+ struct io_kiocb *nxt = NULL;
io_complete_rw_common(kiocb, res);
- return io_put_req_find_next(req);
+ io_put_req_find_next(req, &nxt);
+
+ return nxt;
}
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
@@ -1721,7 +1722,7 @@ static int io_nop(struct io_kiocb *req)
return -EINVAL;
io_cqring_add_event(req, 0);
- io_put_req(req, NULL);
+ io_put_req(req);
return 0;
}
@@ -1768,7 +1769,7 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req, ret);
- io_put_req(req, nxt);
+ io_put_req_find_next(req, nxt);
return 0;
}
@@ -1815,7 +1816,7 @@ static int io_sync_file_range(struct io_kiocb *req,
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req, ret);
- io_put_req(req, nxt);
+ io_put_req_find_next(req, nxt);
return 0;
}
@@ -1853,7 +1854,7 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
io_cqring_add_event(req, ret);
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
- io_put_req(req, nxt);
+ io_put_req_find_next(req, nxt);
return 0;
}
#endif
@@ -1907,7 +1908,7 @@ static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req, ret);
- io_put_req(req, nxt);
+ io_put_req_find_next(req, nxt);
return 0;
#else
return -EOPNOTSUPP;
@@ -1970,7 +1971,7 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_cqring_add_event(req, ret);
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
- io_put_req(req, NULL);
+ io_put_req(req);
return 0;
}
@@ -2018,7 +2019,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
io_cqring_ev_posted(ctx);
- io_put_req(req, &nxt);
+ io_put_req_find_next(req, &nxt);
if (nxt)
*workptr = &nxt->work;
}
@@ -2045,7 +2046,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
- io_put_req(req, NULL);
+ io_put_req(req);
} else {
io_queue_async_work(req);
}
@@ -2138,7 +2139,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (mask) {
io_cqring_ev_posted(ctx);
- io_put_req(req, nxt);
+ io_put_req_find_next(req, nxt);
}
return ipt.error;
}
@@ -2180,7 +2181,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
io_cqring_ev_posted(ctx);
if (req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
- io_put_req(req, NULL);
+ io_put_req(req);
return HRTIMER_NORESTART;
}
@@ -2223,7 +2224,7 @@ static int io_timeout_remove(struct io_kiocb *req,
io_cqring_ev_posted(ctx);
if (req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
- io_put_req(req, NULL);
+ io_put_req(req);
return 0;
}
@@ -2239,8 +2240,8 @@ static int io_timeout_remove(struct io_kiocb *req,
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
- io_put_req(treq, NULL);
- io_put_req(req, NULL);
+ io_put_req(treq);
+ io_put_req(req);
return 0;
}
@@ -2375,7 +2376,7 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req, ret);
- io_put_req(req, nxt);
+ io_put_req_find_next(req, nxt);
return 0;
}
@@ -2521,13 +2522,13 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
}
/* drop submission reference */
- io_put_req(req, NULL);
+ io_put_req(req);
if (ret) {
if (req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req, ret);
- io_put_req(req, NULL);
+ io_put_req(req);
}
/* async context always use a copy of the sqe */
@@ -2658,7 +2659,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
}
io_cqring_add_event(req, ret);
- io_put_req(req, NULL);
+ io_put_req(req);
return HRTIMER_NORESTART;
}
@@ -2690,7 +2691,7 @@ static int io_queue_linked_timeout(struct io_kiocb *req, struct io_kiocb *nxt)
ret = 0;
err:
/* drop submission reference */
- io_put_req(nxt, NULL);
+ io_put_req(nxt);
if (ret) {
struct io_ring_ctx *ctx = req->ctx;
@@ -2703,7 +2704,7 @@ static int io_queue_linked_timeout(struct io_kiocb *req, struct io_kiocb *nxt)
io_cqring_fill_event(nxt, ret);
trace_io_uring_fail_link(req, nxt);
io_commit_cqring(ctx);
- io_put_req(nxt, NULL);
+ io_put_req(nxt);
ret = -ECANCELED;
}
@@ -2769,14 +2770,14 @@ static int __io_queue_sqe(struct io_kiocb *req)
/* drop submission reference */
err:
- io_put_req(req, NULL);
+ io_put_req(req);
/* and drop final reference, if we failed */
if (ret) {
io_cqring_add_event(req, ret);
if (req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
- io_put_req(req, NULL);
+ io_put_req(req);
}
return ret;
--
2.7.4
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH v4] io_uring: keep io_put_req only responsible for release and put req
2019-11-08 15:50 [PATCH v4] io_uring: keep io_put_req only responsible for release and put req Jackie Liu
@ 2019-11-08 18:19 ` Jens Axboe
0 siblings, 0 replies; 2+ messages in thread
From: Jens Axboe @ 2019-11-08 18:19 UTC (permalink / raw)
To: Jackie Liu; +Cc: io-uring
On 11/8/19 8:50 AM, Jackie Liu wrote:
> We already have io_put_req_find_next to find the next req of the link.
> we should not use the io_put_req function to find them. They should be
> functions of the same level.
Applied, thanks.
--
Jens Axboe
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2019-11-08 18:19 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-11-08 15:50 [PATCH v4] io_uring: keep io_put_req only responsible for release and put req Jackie Liu
2019-11-08 18:19 ` Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox