public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCHSET 0/2] Fix issue with (and clarify) linked timeouts
@ 2019-11-15  4:56 Jens Axboe
  2019-11-15  4:56 ` [PATCH 1/2] io_uring: cleanup return values from the queueing functions Jens Axboe
  2019-11-15  4:56 ` [PATCH 2/2] io_uring: fix sequencing issues with linked timeouts Jens Axboe
  0 siblings, 2 replies; 3+ messages in thread
From: Jens Axboe @ 2019-11-15  4:56 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence

First patch is just a prep patch that cleans up the return values from
the various queueing functions, and the second one fixes sequencing
issues with linked timeouts, as well as makes it so that we only support
one linked timeout in a chain (and only as the 2nd request). We can relax
this restriction later, if it makes sense, for now I think we should just
play it safe. This doesn't mean that you can only submit a request and
timeout in a submission, just that requests that are linked with
IOSQE_LINK must have a parent request and then a timeout tied to that.
That's the intended use case anyway, a timeout is paired with one other
parent request.

 fs/io_uring.c | 79 +++++++++++++++++++++++++++++----------------------
 1 file changed, 45 insertions(+), 34 deletions(-)

-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 1/2] io_uring: cleanup return values from the queueing functions
  2019-11-15  4:56 [PATCHSET 0/2] Fix issue with (and clarify) linked timeouts Jens Axboe
@ 2019-11-15  4:56 ` Jens Axboe
  2019-11-15  4:56 ` [PATCH 2/2] io_uring: fix sequencing issues with linked timeouts Jens Axboe
  1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2019-11-15  4:56 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence, Jens Axboe

__io_queue_sqe(), io_queue_sqe(), io_queue_link_head() all return 0/err,
but the caller doesn't care since the errors are handled inline. Clean
these up and just make them void.

Signed-off-by: Jens Axboe <[email protected]>
---
 fs/io_uring.c | 28 ++++++++++++----------------
 1 file changed, 12 insertions(+), 16 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5ad652fa24b8..b9965f2f69ca 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2825,7 +2825,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req,
 	return nxt;
 }
 
-static int __io_queue_sqe(struct io_kiocb *req)
+static void __io_queue_sqe(struct io_kiocb *req)
 {
 	enum hrtimer_mode mode;
 	struct io_kiocb *nxt;
@@ -2870,7 +2870,7 @@ static int __io_queue_sqe(struct io_kiocb *req)
 			if (nxt)
 				io_queue_linked_timeout(nxt, &ts, &mode);
 
-			return 0;
+			return;
 		}
 	}
 
@@ -2892,11 +2892,9 @@ static int __io_queue_sqe(struct io_kiocb *req)
 			req->flags |= REQ_F_FAIL_LINK;
 		io_put_req(req);
 	}
-
-	return ret;
 }
 
-static int io_queue_sqe(struct io_kiocb *req)
+static void io_queue_sqe(struct io_kiocb *req)
 {
 	int ret;
 
@@ -2906,20 +2904,20 @@ static int io_queue_sqe(struct io_kiocb *req)
 			io_cqring_add_event(req, ret);
 			io_double_put_req(req);
 		}
-		return 0;
-	}
-
-	return __io_queue_sqe(req);
+	} else
+		__io_queue_sqe(req);
 }
 
-static int io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
+static void io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
 {
 	int ret;
 	int need_submit = false;
 	struct io_ring_ctx *ctx = req->ctx;
 
-	if (!shadow)
-		return io_queue_sqe(req);
+	if (!shadow) {
+		io_queue_sqe(req);
+		return;
+	}
 
 	/*
 	 * Mark the first IO in link list as DRAIN, let all the following
@@ -2933,7 +2931,7 @@ static int io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
 			io_cqring_add_event(req, ret);
 			io_double_put_req(req);
 			__io_free_req(shadow);
-			return 0;
+			return;
 		}
 	} else {
 		/*
@@ -2950,9 +2948,7 @@ static int io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
 	spin_unlock_irq(&ctx->completion_lock);
 
 	if (need_submit)
-		return __io_queue_sqe(req);
-
-	return 0;
+		__io_queue_sqe(req);
 }
 
 #define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH 2/2] io_uring: fix sequencing issues with linked timeouts
  2019-11-15  4:56 [PATCHSET 0/2] Fix issue with (and clarify) linked timeouts Jens Axboe
  2019-11-15  4:56 ` [PATCH 1/2] io_uring: cleanup return values from the queueing functions Jens Axboe
@ 2019-11-15  4:56 ` Jens Axboe
  1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2019-11-15  4:56 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence, Jens Axboe

We can't easily support multiple linked timeouts in a chain, and it's
not a given that it even makes sense to do so. For now, make it
explicitly illegal, we'll error the head request with -ENOLINK and
the rest of the chain will get cancelled as they do with other errors.

Ensure that we handle the sequencing of linked timeouts correctly as
well. The loop in io_req_link_next() isn't necessary, and it will never
do anything, because we can't have both REQ_F_LINK_TIMEOUT set and have
dependent links.

Reported-by: Pavel Begunkov <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
---
 fs/io_uring.c | 51 +++++++++++++++++++++++++++++++++------------------
 1 file changed, 33 insertions(+), 18 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index b9965f2f69ca..c148a56b5894 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -345,6 +345,7 @@ struct io_kiocb {
 #define REQ_F_MUST_PUNT		4096	/* must be punted even for NONBLOCK */
 #define REQ_F_INFLIGHT		8192	/* on inflight list */
 #define REQ_F_COMP_LOCKED	16384	/* completion under lock */
+#define REQ_F_FREE_SQE		32768
 	u64			user_data;
 	u32			result;
 	u32			sequence;
@@ -858,30 +859,26 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
 	 * safe side.
 	 */
 	nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
-	while (nxt) {
+	if (nxt) {
 		list_del_init(&nxt->list);
 		if (!list_empty(&req->link_list)) {
 			INIT_LIST_HEAD(&nxt->link_list);
 			list_splice(&req->link_list, &nxt->link_list);
 			nxt->flags |= REQ_F_LINK;
+		} else if (req->flags & REQ_F_LINK_TIMEOUT) {
+			wake_ev = io_link_cancel_timeout(nxt);
+			nxt = NULL;
 		}
 
 		/*
 		 * If we're in async work, we can continue processing the chain
 		 * in this context instead of having to queue up new async work.
 		 */
-		if (req->flags & REQ_F_LINK_TIMEOUT) {
-			wake_ev = io_link_cancel_timeout(nxt);
-
-			/* we dropped this link, get next */
-			nxt = list_first_entry_or_null(&req->link_list,
-							struct io_kiocb, list);
-		} else if (nxtptr && io_wq_current_is_worker()) {
-			*nxtptr = nxt;
-			break;
-		} else {
-			io_queue_async_work(nxt);
-			break;
+		if (nxt) {
+			if (nxtptr && io_wq_current_is_worker())
+				*nxtptr = nxt;
+			else
+				io_queue_async_work(nxt);
 		}
 	}
 
@@ -906,6 +903,9 @@ static void io_fail_links(struct io_kiocb *req)
 
 		trace_io_uring_fail_link(req, link);
 
+		if (req->flags & REQ_F_FREE_SQE)
+			kfree(link->submit.sqe);
+
 		if ((req->flags & REQ_F_LINK_TIMEOUT) &&
 		    link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) {
 			io_link_cancel_timeout(link);
@@ -2465,7 +2465,7 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	    sqe->cancel_flags)
 		return -EINVAL;
 
-	io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), NULL);
+	io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt);
 	return 0;
 }
 
@@ -2741,10 +2741,12 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
 	 */
 	if (!list_empty(&req->list)) {
 		prev = list_entry(req->list.prev, struct io_kiocb, link_list);
-		if (refcount_inc_not_zero(&prev->refs))
+		if (refcount_inc_not_zero(&prev->refs)) {
+			prev->flags &= ~REQ_F_LINK_TIMEOUT;
 			list_del_init(&req->list);
-		else
+		} else {
 			prev = NULL;
+		}
 	}
 
 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
@@ -2914,6 +2916,10 @@ static void io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
 	int need_submit = false;
 	struct io_ring_ctx *ctx = req->ctx;
 
+	if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
+		ret = -ENOLINK;
+		goto err;
+	}
 	if (!shadow) {
 		io_queue_sqe(req);
 		return;
@@ -2928,9 +2934,13 @@ static void io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
 	ret = io_req_defer(req);
 	if (ret) {
 		if (ret != -EIOCBQUEUED) {
+err:
 			io_cqring_add_event(req, ret);
-			io_double_put_req(req);
-			__io_free_req(shadow);
+			/* need to ensure we fail links */
+			io_put_req(req);
+			io_put_req(req);
+			if (shadow)
+				__io_free_req(shadow);
 			return;
 		}
 	} else {
@@ -2987,6 +2997,11 @@ static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
 	if (*link) {
 		struct io_kiocb *prev = *link;
 
+		/* For now, linked timeouts must be first linked request */
+		if (READ_ONCE(s->sqe->opcode) == IORING_OP_LINK_TIMEOUT &&
+		    !list_empty(&prev->link_list))
+			prev->flags |= REQ_F_FAIL_LINK | REQ_F_FREE_SQE;
+
 		sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
 		if (!sqe_copy) {
 			ret = -EAGAIN;
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-11-15  4:56 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-11-15  4:56 [PATCHSET 0/2] Fix issue with (and clarify) linked timeouts Jens Axboe
2019-11-15  4:56 ` [PATCH 1/2] io_uring: cleanup return values from the queueing functions Jens Axboe
2019-11-15  4:56 ` [PATCH 2/2] io_uring: fix sequencing issues with linked timeouts Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox