public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCH] io_uring: introduce add/post event and put function
@ 2019-11-21  9:00 Bob Liu
  2019-11-21 14:05 ` Jens Axboe
  0 siblings, 1 reply; 2+ messages in thread
From: Bob Liu @ 2019-11-21  9:00 UTC (permalink / raw)
  To: axboe; +Cc: io-uring, Bob Liu

* Only complie-tested right now. *
There are so many duplicated code doing add/post event and then put req.
Put them to common funcs io_cqring_event_posted_and_put() and
io_cqring_add_event_and_put().

Signed-off-by: Bob Liu <[email protected]>
---
 fs/io_uring.c | 145 ++++++++++++++++++++++++++++++----------------------------
 1 file changed, 74 insertions(+), 71 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 299a218..816eef3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1039,6 +1039,56 @@ static void io_double_put_req(struct io_kiocb *req)
 		io_free_req(req);
 }
 
+/*
+ * Add event to io_cqring and put req.
+ */
+static void io_cqring_add_event_and_put(struct io_kiocb *req, long ret,
+		int should_fail_link, bool double_put, struct io_kiocb **nxt)
+{
+	if (should_fail_link == 1) {
+		if (ret < 0 && (req->flags & REQ_F_LINK))
+			req->flags |= REQ_F_FAIL_LINK;
+	} else if (should_fail_link == 2) {
+		/* Don't care about ret < 0 when should_fail_link == 2 */
+		if (req->flags & REQ_F_LINK)
+			req->flags |= REQ_F_FAIL_LINK;
+	}
+
+	io_cqring_add_event(req, ret);
+
+	if (double_put)
+		io_double_put_req(req);
+	else {
+		if (nxt)
+			io_put_req_find_next(req, nxt);
+		else
+			io_put_req(req);
+	}
+}
+
+/*
+ * Post event and put req.
+ */
+static void io_cqring_event_posted_and_put(struct io_kiocb *req, long ret,
+		int should_fail_link, struct io_kiocb **nxt)
+{
+	if (should_fail_link == 1) {
+		if (ret < 0 && (req->flags & REQ_F_LINK))
+			req->flags |= REQ_F_FAIL_LINK;
+	} else if (should_fail_link == 2) {
+		/* Don't care about ret < 0 when should_fail_link == 2 */
+		if (req->flags & REQ_F_LINK)
+			req->flags |= REQ_F_FAIL_LINK;
+	}
+
+	io_cqring_ev_posted(req->ctx);
+
+	if (nxt)
+		io_put_req_find_next(req, nxt);
+	else
+		io_put_req(req);
+}
+
 static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
 {
 	struct io_rings *rings = ctx->rings;
@@ -1789,8 +1839,7 @@ static int io_nop(struct io_kiocb *req)
 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
 		return -EINVAL;
 
-	io_cqring_add_event(req, 0);
-	io_put_req(req);
+	io_cqring_add_event_and_put(req, 0, 0, false, NULL);
 	return 0;
 }
 
@@ -1834,10 +1883,7 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 				end > 0 ? end : LLONG_MAX,
 				fsync_flags & IORING_FSYNC_DATASYNC);
 
-	if (ret < 0 && (req->flags & REQ_F_LINK))
-		req->flags |= REQ_F_FAIL_LINK;
-	io_cqring_add_event(req, ret);
-	io_put_req_find_next(req, nxt);
+	io_cqring_add_event_and_put(req, ret, 1, false, nxt);
 	return 0;
 }
 
@@ -1880,11 +1926,8 @@ static int io_sync_file_range(struct io_kiocb *req,
 	flags = READ_ONCE(sqe->sync_range_flags);
 
 	ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
+	io_cqring_add_event_and_put(req, ret, 1, false, nxt);
 
-	if (ret < 0 && (req->flags & REQ_F_LINK))
-		req->flags |= REQ_F_FAIL_LINK;
-	io_cqring_add_event(req, ret);
-	io_put_req_find_next(req, nxt);
 	return 0;
 }
 
@@ -1919,10 +1962,8 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 			return ret;
 	}
 
-	io_cqring_add_event(req, ret);
-	if (ret < 0 && (req->flags & REQ_F_LINK))
-		req->flags |= REQ_F_FAIL_LINK;
-	io_put_req_find_next(req, nxt);
+	io_cqring_add_event_and_put(req, ret, 1, false, nxt);
+
 	return 0;
 }
 #endif
@@ -1975,10 +2016,7 @@ static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	}
 	if (ret == -ERESTARTSYS)
 		ret = -EINTR;
-	if (ret < 0 && (req->flags & REQ_F_LINK))
-		req->flags |= REQ_F_FAIL_LINK;
-	io_cqring_add_event(req, ret);
-	io_put_req_find_next(req, nxt);
+	io_cqring_add_event_and_put(req, ret, 1, false, nxt);
 	return 0;
 #else
 	return -EOPNOTSUPP;
@@ -2061,10 +2099,7 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr));
 	spin_unlock_irq(&ctx->completion_lock);
 
-	io_cqring_add_event(req, ret);
-	if (ret < 0 && (req->flags & REQ_F_LINK))
-		req->flags |= REQ_F_FAIL_LINK;
-	io_put_req(req);
+	io_cqring_add_event_and_put(req, ret, 1, false, NULL);
 	return 0;
 }
 
@@ -2118,11 +2153,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
 	io_poll_complete(req, mask, ret);
 	spin_unlock_irq(&ctx->completion_lock);
 
-	io_cqring_ev_posted(ctx);
-
-	if (ret < 0 && req->flags & REQ_F_LINK)
-		req->flags |= REQ_F_FAIL_LINK;
-	io_put_req_find_next(req, &nxt);
+	io_cqring_event_posted_and_put(req, ret, 1, &nxt);
 	if (nxt)
 		*workptr = &nxt->work;
 }
@@ -2267,10 +2298,9 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	}
 	spin_unlock_irq(&ctx->completion_lock);
 
-	if (mask) {
-		io_cqring_ev_posted(ctx);
-		io_put_req_find_next(req, nxt);
-	}
+	if (mask)
+		io_cqring_event_posted_and_put(req, 0, 0, nxt);
+
 	return ipt.error;
 }
 
@@ -2308,10 +2338,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 	io_commit_cqring(ctx);
 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
-	io_cqring_ev_posted(ctx);
-	if (req->flags & REQ_F_LINK)
-		req->flags |= REQ_F_FAIL_LINK;
-	io_put_req(req);
+	io_cqring_event_posted_and_put(req, 0, 2, NULL);
 	return HRTIMER_NORESTART;
 }
 
@@ -2366,10 +2393,7 @@ static int io_timeout_remove(struct io_kiocb *req,
 	io_cqring_fill_event(req, ret);
 	io_commit_cqring(ctx);
 	spin_unlock_irq(&ctx->completion_lock);
-	io_cqring_ev_posted(ctx);
-	if (ret < 0 && req->flags & REQ_F_LINK)
-		req->flags |= REQ_F_FAIL_LINK;
-	io_put_req(req);
+	io_cqring_event_posted_and_put(req, ret, 1, NULL);
 	return 0;
 }
 
@@ -2530,11 +2554,7 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
 	io_cqring_fill_event(req, ret);
 	io_commit_cqring(ctx);
 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
-	io_cqring_ev_posted(ctx);
-
-	if (ret < 0 && (req->flags & REQ_F_LINK))
-		req->flags |= REQ_F_FAIL_LINK;
-	io_put_req_find_next(req, nxt);
+	io_cqring_event_posted_and_put(req, ret, 1, nxt);
 }
 
 static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
@@ -2722,12 +2742,8 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
 	/* drop submission reference */
 	io_put_req(req);
 
-	if (ret) {
-		if (req->flags & REQ_F_LINK)
-			req->flags |= REQ_F_FAIL_LINK;
-		io_cqring_add_event(req, ret);
-		io_put_req(req);
-	}
+	if (ret)
+		io_cqring_add_event_and_put(req, ret, 2, false, NULL);
 
 	/* if a dependent link is ready, pass it back */
 	if (!ret && nxt) {
@@ -2870,8 +2886,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
 						-ETIME);
 		io_put_req(prev);
 	} else {
-		io_cqring_add_event(req, -ETIME);
-		io_put_req(req);
+		io_cqring_add_event_and_put(req, -ETIME, 0, false, NULL);
 	}
 	return HRTIMER_NORESTART;
 }
@@ -2962,12 +2977,8 @@ static void __io_queue_sqe(struct io_kiocb *req)
 	}
 
 	/* and drop final reference, if we failed */
-	if (ret) {
-		io_cqring_add_event(req, ret);
-		if (req->flags & REQ_F_LINK)
-			req->flags |= REQ_F_FAIL_LINK;
-		io_put_req(req);
-	}
+	if (ret)
+		io_cqring_add_event_and_put(req, ret, 2, false, NULL);
 }
 
 static void io_queue_sqe(struct io_kiocb *req)
@@ -2975,14 +2986,10 @@ static void io_queue_sqe(struct io_kiocb *req)
 	int ret;
 
 	ret = io_req_defer(req);
-	if (!ret) {
+	if (!ret)
 		__io_queue_sqe(req);
-	} else if (ret != -EIOCBQUEUED) {
-		io_cqring_add_event(req, ret);
-		if (req->flags & REQ_F_LINK)
-			req->flags |= REQ_F_FAIL_LINK;
-		io_double_put_req(req);
-	}
+	else if (ret != -EIOCBQUEUED)
+		io_cqring_add_event_and_put(req, ret, 2, true, NULL);
 }
 
 static void io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
@@ -3010,10 +3017,7 @@ static void io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
 	if (ret) {
 		if (ret != -EIOCBQUEUED) {
 err:
-			io_cqring_add_event(req, ret);
-			if (req->flags & REQ_F_LINK)
-				req->flags |= REQ_F_FAIL_LINK;
-			io_double_put_req(req);
+			io_cqring_add_event_and_put(req, ret, 2, true, NULL);
 			if (shadow)
 				__io_free_req(shadow);
 			return;
@@ -3057,8 +3061,7 @@ static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
 	ret = io_req_set_file(state, req);
 	if (unlikely(ret)) {
 err_req:
-		io_cqring_add_event(req, ret);
-		io_double_put_req(req);
+		io_cqring_add_event_and_put(req, ret, 0, true, NULL);
 		return;
 	}
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] io_uring: introduce add/post event and put function
  2019-11-21  9:00 [PATCH] io_uring: introduce add/post event and put function Bob Liu
@ 2019-11-21 14:05 ` Jens Axboe
  0 siblings, 0 replies; 2+ messages in thread
From: Jens Axboe @ 2019-11-21 14:05 UTC (permalink / raw)
  To: Bob Liu; +Cc: io-uring

On 11/21/19 2:00 AM, Bob Liu wrote:
> * Only complie-tested right now. *
> There are so many duplicated code doing add/post event and then put req.
> Put them to common funcs io_cqring_event_posted_and_put() and
> io_cqring_add_event_and_put().
> 
> Signed-off-by: Bob Liu <[email protected]>
> ---
>   fs/io_uring.c | 145 ++++++++++++++++++++++++++++++----------------------------
>   1 file changed, 74 insertions(+), 71 deletions(-)
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 299a218..816eef3 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -1039,6 +1039,56 @@ static void io_double_put_req(struct io_kiocb *req)
>   		io_free_req(req);
>   }
>   
> +/*
> + * Add event to io_cqring and put req.
> + */
> +static void io_cqring_add_event_and_put(struct io_kiocb *req, long ret,
> +		int should_fail_link, bool double_put, struct io_kiocb **nxt)
> +{
> +	if (should_fail_link == 1) {
> +		if (ret < 0 && (req->flags & REQ_F_LINK))
> +			req->flags |= REQ_F_FAIL_LINK;
> +	} else if (should_fail_link == 2) {
> +		/* Don't care about ret < 0 when should_fail_link == 2 */
> +		if (req->flags & REQ_F_LINK)
> +			req->flags |= REQ_F_FAIL_LINK;
> +	}
> +
> +	io_cqring_add_event(req, ret);
> +
> +	if (double_put)
> +		io_double_put_req(req);
> +	else {
> +		if (nxt)
> +			io_put_req_find_next(req, nxt);
> +		else
> +			io_put_req(req);
> +	}
> +}

I'd really like to clean up this part, as it's both duplicated a lot and
also fragile in terms of places forgetting to do part of the necessary
dance. However, this helper is a bit of a monster (and the other one as
well), it's hard to know what this does:

	io_cqring_add_event_and_put(req, ret, 1, false, nxt);

without looking up what '1' and 'false' might be. Having multiple int
values for should_fail_link is also a bit, well, tricky. Maybe it needs
to be two helpers?

And if it does need something like 'should_fail_link', I think that'd be
done cleaner by using some sort of mask. IO_PUT_ERROR_ON_NEGATIVE,
IO_PUT_ERROR_ALWAYS, or something like that. Then you can tell in the
caller what it's going to do, rather than having to look up if what '1'
or '2' as the argument means.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2019-11-21 15:23 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-11-21  9:00 [PATCH] io_uring: introduce add/post event and put function Bob Liu
2019-11-21 14:05 ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox