public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCH for-next 0/6] cleanup task/files cancel
@ 2020-11-06 13:00 Pavel Begunkov
  2020-11-06 13:00 ` [PATCH 1/6] io_uring: simplify io_task_match() Pavel Begunkov
                   ` (6 more replies)
  0 siblings, 7 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-11-06 13:00 UTC (permalink / raw)
  To: Jens Axboe, io-uring

That unifies cancellation/matching/etc., so we can kill all that going
out of hand zoo of functions.

Jens, [3/6] changes the behaviour, but as last time it's less
restrictive and doesn't kill what we don't need to kill. Though, it'd
prefer you to check the assumption in light of the cancel changes you've
done.

Based on for-5.11/io_uring + "io_uring: fix link lookup racing with link
timeout", should apply ok after you merge everything.

Pavel Begunkov (6):
  io_uring: simplify io_task_match()
  io_uring: add a {task,files} pair matching helper
  io_uring: cancel only requests of current task
  io_uring: don't iterate io_uring_cancel_files()
  io_uring: pass files into kill timeouts/poll
  io_uring: always batch cancel in *cancel_files()

 fs/io-wq.c    |  10 --
 fs/io-wq.h    |   1 -
 fs/io_uring.c | 260 ++++++++++++++------------------------------------
 3 files changed, 69 insertions(+), 202 deletions(-)

-- 
2.24.0


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/6] io_uring: simplify io_task_match()
  2020-11-06 13:00 [PATCH for-next 0/6] cleanup task/files cancel Pavel Begunkov
@ 2020-11-06 13:00 ` Pavel Begunkov
  2020-11-06 13:00 ` [PATCH 2/6] io_uring: add a {task,files} pair matching helper Pavel Begunkov
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-11-06 13:00 UTC (permalink / raw)
  To: Jens Axboe, io-uring

If IORING_SETUP_SQPOLL is set all requests belong to the corresponding
SQPOLL task, so skip task checking in that case and always match.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 568ece369a1a..74bb5cb9697c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1562,11 +1562,7 @@ static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
 
 	if (!tsk || req->task == tsk)
 		return true;
-	if (ctx->flags & IORING_SETUP_SQPOLL) {
-		if (ctx->sq_data && req->task == ctx->sq_data->thread)
-			return true;
-	}
-	return false;
+	return (ctx->flags & IORING_SETUP_SQPOLL);
 }
 
 /*
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/6] io_uring: add a {task,files} pair matching helper
  2020-11-06 13:00 [PATCH for-next 0/6] cleanup task/files cancel Pavel Begunkov
  2020-11-06 13:00 ` [PATCH 1/6] io_uring: simplify io_task_match() Pavel Begunkov
@ 2020-11-06 13:00 ` Pavel Begunkov
  2020-11-06 13:00 ` [PATCH 3/6] io_uring: cancel only requests of current task Pavel Begunkov
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-11-06 13:00 UTC (permalink / raw)
  To: Jens Axboe, io-uring

Add io_match_task() that matches both task and files.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 48 ++++++++++++++++++++++--------------------------
 1 file changed, 22 insertions(+), 26 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 74bb5cb9697c..14e671c909ed 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1037,6 +1037,26 @@ static inline void io_clean_op(struct io_kiocb *req)
 		__io_clean_op(req);
 }
 
+static bool io_match_task(struct io_kiocb *head,
+			  struct task_struct *task,
+			  struct files_struct *files)
+{
+	struct io_kiocb *req;
+
+	if (task && head->task != task)
+		return false;
+	if (!files)
+		return true;
+
+	io_for_each_link(req, head) {
+		if ((req->flags & REQ_F_WORK_INITIALIZED) &&
+		    (req->work.flags & IO_WQ_WORK_FILES) &&
+		    req->work.identity->files == files)
+			return true;
+	}
+	return false;
+}
+
 static void io_sq_thread_drop_mm_files(void)
 {
 	struct files_struct *files = current->files;
@@ -1686,27 +1706,6 @@ static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
 	}
 }
 
-static inline bool __io_match_files(struct io_kiocb *req,
-				    struct files_struct *files)
-{
-	return ((req->flags & REQ_F_WORK_INITIALIZED) &&
-	        (req->work.flags & IO_WQ_WORK_FILES)) &&
-		req->work.identity->files == files;
-}
-
-static bool io_match_files(struct io_kiocb *head, struct files_struct *files)
-{
-	struct io_kiocb *req;
-
-	if (!files)
-		return true;
-	io_for_each_link(req, head) {
-		if (__io_match_files(req, files))
-			return true;
-	}
-	return false;
-}
-
 /* Returns true if there are no backlogged entries after the flush */
 static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
 				     struct task_struct *tsk,
@@ -1734,9 +1733,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
 
 	cqe = NULL;
 	list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
-		if (tsk && req->task != tsk)
-			continue;
-		if (!io_match_files(req, files))
+		if (!io_match_task(req, tsk, files))
 			continue;
 
 		cqe = io_get_cqring(ctx);
@@ -8776,8 +8773,7 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
 
 	spin_lock_irq(&ctx->completion_lock);
 	list_for_each_entry_reverse(de, &ctx->defer_list, list) {
-		if (io_task_match(de->req, task) &&
-		    io_match_files(de->req, files)) {
+		if (io_match_task(de->req, task, files)) {
 			list_cut_position(&list, &ctx->defer_list, &de->list);
 			break;
 		}
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 3/6] io_uring: cancel only requests of current task
  2020-11-06 13:00 [PATCH for-next 0/6] cleanup task/files cancel Pavel Begunkov
  2020-11-06 13:00 ` [PATCH 1/6] io_uring: simplify io_task_match() Pavel Begunkov
  2020-11-06 13:00 ` [PATCH 2/6] io_uring: add a {task,files} pair matching helper Pavel Begunkov
@ 2020-11-06 13:00 ` Pavel Begunkov
  2020-11-06 13:00 ` [PATCH 4/6] io_uring: don't iterate io_uring_cancel_files() Pavel Begunkov
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-11-06 13:00 UTC (permalink / raw)
  To: Jens Axboe, io-uring

io_uring_cancel_files() cancels all request that match files regardless
of task. There is no real need in that, cancel only requests of the
specified task. That also handles SQPOLL case as it already changes task
to it.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 23 +++++------------------
 1 file changed, 5 insertions(+), 18 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 14e671c909ed..8716a864b8b5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8654,14 +8654,6 @@ static int io_uring_release(struct inode *inode, struct file *file)
 	return 0;
 }
 
-static bool io_wq_files_match(struct io_wq_work *work, void *data)
-{
-	struct files_struct *files = data;
-
-	return !files || ((work->flags & IO_WQ_WORK_FILES) &&
-				work->identity->files == files);
-}
-
 /*
  * Returns true if 'preq' is the link parent of 'req'
  */
@@ -8794,21 +8786,20 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
  * Returns true if we found and killed one or more files pinning requests
  */
 static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
+				  struct task_struct *task,
 				  struct files_struct *files)
 {
 	if (list_empty_careful(&ctx->inflight_list))
 		return false;
 
-	/* cancel all at once, should be faster than doing it one by one*/
-	io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
-
 	while (!list_empty_careful(&ctx->inflight_list)) {
 		struct io_kiocb *cancel_req = NULL, *req;
 		DEFINE_WAIT(wait);
 
 		spin_lock_irq(&ctx->inflight_lock);
 		list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
-			if (files && (req->work.flags & IO_WQ_WORK_FILES) &&
+			if (req->task == task &&
+			    (req->work.flags & IO_WQ_WORK_FILES) &&
 			    req->work.identity->files != files)
 				continue;
 			/* req is being completed, ignore */
@@ -8851,7 +8842,7 @@ static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
 {
 	bool ret;
 
-	ret = io_uring_cancel_files(ctx, files);
+	ret = io_uring_cancel_files(ctx, task, files);
 	if (!files) {
 		enum io_wq_cancel cret;
 
@@ -8890,11 +8881,7 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
 		io_sq_thread_park(ctx->sq_data);
 	}
 
-	if (files)
-		io_cancel_defer_files(ctx, NULL, files);
-	else
-		io_cancel_defer_files(ctx, task, NULL);
-
+	io_cancel_defer_files(ctx, task, files);
 	io_cqring_overflow_flush(ctx, true, task, files);
 
 	while (__io_uring_cancel_task_requests(ctx, task, files)) {
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 4/6] io_uring: don't iterate io_uring_cancel_files()
  2020-11-06 13:00 [PATCH for-next 0/6] cleanup task/files cancel Pavel Begunkov
                   ` (2 preceding siblings ...)
  2020-11-06 13:00 ` [PATCH 3/6] io_uring: cancel only requests of current task Pavel Begunkov
@ 2020-11-06 13:00 ` Pavel Begunkov
  2020-11-06 13:00 ` [PATCH 5/6] io_uring: pass files into kill timeouts/poll Pavel Begunkov
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-11-06 13:00 UTC (permalink / raw)
  To: Jens Axboe, io-uring

io_uring_cancel_files() guarantees to cancel all matching requests,
that's not necessary to do that in a loop. Move it up in the callchain
into io_uring_cancel_task_requests().

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 34 ++++++++++++----------------------
 1 file changed, 12 insertions(+), 22 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8716a864b8b5..22ac3ce57819 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8782,16 +8782,10 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
 	}
 }
 
-/*
- * Returns true if we found and killed one or more files pinning requests
- */
-static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
+static void io_uring_cancel_files(struct io_ring_ctx *ctx,
 				  struct task_struct *task,
 				  struct files_struct *files)
 {
-	if (list_empty_careful(&ctx->inflight_list))
-		return false;
-
 	while (!list_empty_careful(&ctx->inflight_list)) {
 		struct io_kiocb *cancel_req = NULL, *req;
 		DEFINE_WAIT(wait);
@@ -8824,8 +8818,6 @@ static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
 		schedule();
 		finish_wait(&ctx->inflight_wait, &wait);
 	}
-
-	return true;
 }
 
 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
@@ -8836,15 +8828,12 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
 	return io_task_match(req, task);
 }
 
-static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
-					    struct task_struct *task,
-					    struct files_struct *files)
+static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+					    struct task_struct *task)
 {
-	bool ret;
-
-	ret = io_uring_cancel_files(ctx, task, files);
-	if (!files) {
+	while (1) {
 		enum io_wq_cancel cret;
+		bool ret = false;
 
 		cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, task, true);
 		if (cret != IO_WQ_CANCEL_NOTFOUND)
@@ -8860,9 +8849,11 @@ static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
 
 		ret |= io_poll_remove_all(ctx, task);
 		ret |= io_kill_timeouts(ctx, task);
+		if (!ret)
+			break;
+		io_run_task_work();
+		cond_resched();
 	}
-
-	return ret;
 }
 
 /*
@@ -8883,11 +8874,10 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
 
 	io_cancel_defer_files(ctx, task, files);
 	io_cqring_overflow_flush(ctx, true, task, files);
+	io_uring_cancel_files(ctx, task, files);
 
-	while (__io_uring_cancel_task_requests(ctx, task, files)) {
-		io_run_task_work();
-		cond_resched();
-	}
+	if (!files)
+		__io_uring_cancel_task_requests(ctx, task);
 
 	if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
 		atomic_dec(&task->io_uring->in_idle);
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 5/6] io_uring: pass files into kill timeouts/poll
  2020-11-06 13:00 [PATCH for-next 0/6] cleanup task/files cancel Pavel Begunkov
                   ` (3 preceding siblings ...)
  2020-11-06 13:00 ` [PATCH 4/6] io_uring: don't iterate io_uring_cancel_files() Pavel Begunkov
@ 2020-11-06 13:00 ` Pavel Begunkov
  2020-11-06 13:00 ` [PATCH 6/6] io_uring: always batch cancel in *cancel_files() Pavel Begunkov
  2020-11-09 14:43 ` [PATCH for-next 0/6] cleanup task/files cancel Jens Axboe
  6 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-11-06 13:00 UTC (permalink / raw)
  To: Jens Axboe, io-uring

Make io_poll_remove_all() and io_kill_timeouts() to match against files
as well. A preparation patch, effectively not used by now.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 22ac3ce57819..c93060149087 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1588,14 +1588,15 @@ static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
 /*
  * Returns true if we found and killed one or more timeouts
  */
-static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
+static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
+			     struct files_struct *files)
 {
 	struct io_kiocb *req, *tmp;
 	int canceled = 0;
 
 	spin_lock_irq(&ctx->completion_lock);
 	list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
-		if (io_task_match(req, tsk)) {
+		if (io_match_task(req, tsk, files)) {
 			io_kill_timeout(req);
 			canceled++;
 		}
@@ -5469,7 +5470,8 @@ static bool io_poll_remove_one(struct io_kiocb *req)
 /*
  * Returns true if we found and killed one or more poll requests
  */
-static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
+static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
+			       struct files_struct *files)
 {
 	struct hlist_node *tmp;
 	struct io_kiocb *req;
@@ -5481,7 +5483,7 @@ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
 
 		list = &ctx->cancel_hash[i];
 		hlist_for_each_entry_safe(req, tmp, list, hash_node) {
-			if (io_task_match(req, tsk))
+			if (io_match_task(req, tsk, files))
 				posted += io_poll_remove_one(req);
 		}
 	}
@@ -8615,8 +8617,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
 	percpu_ref_kill(&ctx->refs);
 	mutex_unlock(&ctx->uring_lock);
 
-	io_kill_timeouts(ctx, NULL);
-	io_poll_remove_all(ctx, NULL);
+	io_kill_timeouts(ctx, NULL, NULL);
+	io_poll_remove_all(ctx, NULL, NULL);
 
 	if (ctx->io_wq)
 		io_wq_cancel_all(ctx->io_wq);
@@ -8847,8 +8849,8 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
 			}
 		}
 
-		ret |= io_poll_remove_all(ctx, task);
-		ret |= io_kill_timeouts(ctx, task);
+		ret |= io_poll_remove_all(ctx, task, NULL);
+		ret |= io_kill_timeouts(ctx, task, NULL);
 		if (!ret)
 			break;
 		io_run_task_work();
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 6/6] io_uring: always batch cancel in *cancel_files()
  2020-11-06 13:00 [PATCH for-next 0/6] cleanup task/files cancel Pavel Begunkov
                   ` (4 preceding siblings ...)
  2020-11-06 13:00 ` [PATCH 5/6] io_uring: pass files into kill timeouts/poll Pavel Begunkov
@ 2020-11-06 13:00 ` Pavel Begunkov
  2020-11-09 14:43 ` [PATCH for-next 0/6] cleanup task/files cancel Jens Axboe
  6 siblings, 0 replies; 8+ messages in thread
From: Pavel Begunkov @ 2020-11-06 13:00 UTC (permalink / raw)
  To: Jens Axboe, io-uring

Instead of iterating over each request and cancelling it individually in
io_uring_cancel_files(), try to cancel all matching requests and use
->inflight_list only to check if there anything left.

In many cases it should be faster, and we can reuse a lot of code from
task cancellation.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io-wq.c    |  10 ----
 fs/io-wq.h    |   1 -
 fs/io_uring.c | 135 ++++++++------------------------------------------
 3 files changed, 21 insertions(+), 125 deletions(-)

diff --git a/fs/io-wq.c b/fs/io-wq.c
index b53c055bea6a..f72d53848dcb 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -1078,16 +1078,6 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
 	return IO_WQ_CANCEL_NOTFOUND;
 }
 
-static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
-{
-	return work == data;
-}
-
-enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
-{
-	return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false);
-}
-
 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
 {
 	int ret = -ENOMEM, node;
diff --git a/fs/io-wq.h b/fs/io-wq.h
index cba36f03c355..069496c6d4f9 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -129,7 +129,6 @@ static inline bool io_wq_is_hashed(struct io_wq_work *work)
 }
 
 void io_wq_cancel_all(struct io_wq *wq);
-enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
 
 typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
 
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c93060149087..a4146b1f50ef 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1576,15 +1576,6 @@ static void io_kill_timeout(struct io_kiocb *req)
 	}
 }
 
-static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
-{
-	struct io_ring_ctx *ctx = req->ctx;
-
-	if (!tsk || req->task == tsk)
-		return true;
-	return (ctx->flags & IORING_SETUP_SQPOLL);
-}
-
 /*
  * Returns true if we found and killed one or more timeouts
  */
@@ -8656,108 +8647,31 @@ static int io_uring_release(struct inode *inode, struct file *file)
 	return 0;
 }
 
-/*
- * Returns true if 'preq' is the link parent of 'req'
- */
-static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
-{
-	struct io_kiocb *link;
-
-	io_for_each_link(link, preq->link) {
-		if (link == req)
-			return true;
-	}
-	return false;
-}
-
-/*
- * We're looking to cancel 'req' because it's holding on to our files, but
- * 'req' could be a link to another request. See if it is, and cancel that
- * parent request if so.
- */
-static bool io_poll_remove_link(struct io_ring_ctx *ctx, struct io_kiocb *req)
-{
-	struct hlist_node *tmp;
-	struct io_kiocb *preq;
-	bool found = false;
-	int i;
-
-	spin_lock_irq(&ctx->completion_lock);
-	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
-		struct hlist_head *list;
-
-		list = &ctx->cancel_hash[i];
-		hlist_for_each_entry_safe(preq, tmp, list, hash_node) {
-			found = io_match_link(preq, req);
-			if (found) {
-				io_poll_remove_one(preq);
-				break;
-			}
-		}
-	}
-	spin_unlock_irq(&ctx->completion_lock);
-	return found;
-}
-
-static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
-				   struct io_kiocb *req)
-{
-	struct io_kiocb *preq;
-	bool found = false;
-
-	spin_lock_irq(&ctx->completion_lock);
-	list_for_each_entry(preq, &ctx->timeout_list, timeout.list) {
-		found = io_match_link(preq, req);
-		if (found) {
-			__io_timeout_cancel(preq);
-			break;
-		}
-	}
-	spin_unlock_irq(&ctx->completion_lock);
-	return found;
-}
+struct io_task_cancel {
+	struct task_struct *task;
+	struct files_struct *files;
+};
 
-static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
+static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
 {
 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+	struct io_task_cancel *cancel = data;
 	bool ret;
 
-	if (req->flags & REQ_F_LINK_TIMEOUT) {
+	if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
 		unsigned long flags;
 		struct io_ring_ctx *ctx = req->ctx;
 
 		/* protect against races with linked timeouts */
 		spin_lock_irqsave(&ctx->completion_lock, flags);
-		ret = io_match_link(req, data);
+		ret = io_match_task(req, cancel->task, cancel->files);
 		spin_unlock_irqrestore(&ctx->completion_lock, flags);
 	} else {
-		ret = io_match_link(req, data);
+		ret = io_match_task(req, cancel->task, cancel->files);
 	}
 	return ret;
 }
 
-static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
-{
-	enum io_wq_cancel cret;
-
-	/* cancel this particular work, if it's running */
-	cret = io_wq_cancel_work(ctx->io_wq, &req->work);
-	if (cret != IO_WQ_CANCEL_NOTFOUND)
-		return;
-
-	/* find links that hold this pending, cancel those */
-	cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true);
-	if (cret != IO_WQ_CANCEL_NOTFOUND)
-		return;
-
-	/* if we have a poll link holding this pending, cancel that */
-	if (io_poll_remove_link(ctx, req))
-		return;
-
-	/* final option, timeout link is holding this req pending */
-	io_timeout_remove_link(ctx, req);
-}
-
 static void io_cancel_defer_files(struct io_ring_ctx *ctx,
 				  struct task_struct *task,
 				  struct files_struct *files)
@@ -8789,8 +8703,10 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
 				  struct files_struct *files)
 {
 	while (!list_empty_careful(&ctx->inflight_list)) {
-		struct io_kiocb *cancel_req = NULL, *req;
+		struct io_task_cancel cancel = { .task = task, .files = NULL, };
+		struct io_kiocb *req;
 		DEFINE_WAIT(wait);
+		bool found = false;
 
 		spin_lock_irq(&ctx->inflight_lock);
 		list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
@@ -8798,23 +8714,21 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
 			    (req->work.flags & IO_WQ_WORK_FILES) &&
 			    req->work.identity->files != files)
 				continue;
-			/* req is being completed, ignore */
-			if (!refcount_inc_not_zero(&req->refs))
-				continue;
-			cancel_req = req;
+			found = true;
 			break;
 		}
-		if (cancel_req)
+		if (found)
 			prepare_to_wait(&ctx->inflight_wait, &wait,
 						TASK_UNINTERRUPTIBLE);
 		spin_unlock_irq(&ctx->inflight_lock);
 
 		/* We need to keep going until we don't find a matching req */
-		if (!cancel_req)
+		if (!found)
 			break;
-		/* cancel this request, or head link requests */
-		io_attempt_cancel(ctx, cancel_req);
-		io_put_req(cancel_req);
+
+		io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
+		io_poll_remove_all(ctx, task, files);
+		io_kill_timeouts(ctx, task, files);
 		/* cancellations _may_ trigger task work */
 		io_run_task_work();
 		schedule();
@@ -8822,22 +8736,15 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
 	}
 }
 
-static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
-{
-	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
-	struct task_struct *task = data;
-
-	return io_task_match(req, task);
-}
-
 static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
 					    struct task_struct *task)
 {
 	while (1) {
+		struct io_task_cancel cancel = { .task = task, .files = NULL, };
 		enum io_wq_cancel cret;
 		bool ret = false;
 
-		cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, task, true);
+		cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
 		if (cret != IO_WQ_CANCEL_NOTFOUND)
 			ret = true;
 
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH for-next 0/6] cleanup task/files cancel
  2020-11-06 13:00 [PATCH for-next 0/6] cleanup task/files cancel Pavel Begunkov
                   ` (5 preceding siblings ...)
  2020-11-06 13:00 ` [PATCH 6/6] io_uring: always batch cancel in *cancel_files() Pavel Begunkov
@ 2020-11-09 14:43 ` Jens Axboe
  6 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2020-11-09 14:43 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 11/6/20 6:00 AM, Pavel Begunkov wrote:
> That unifies cancellation/matching/etc., so we can kill all that going
> out of hand zoo of functions.
> 
> Jens, [3/6] changes the behaviour, but as last time it's less
> restrictive and doesn't kill what we don't need to kill. Though, it'd
> prefer you to check the assumption in light of the cancel changes you've
> done.
> 
> Based on for-5.11/io_uring + "io_uring: fix link lookup racing with link
> timeout", should apply ok after you merge everything.
> 
> Pavel Begunkov (6):
>   io_uring: simplify io_task_match()
>   io_uring: add a {task,files} pair matching helper
>   io_uring: cancel only requests of current task
>   io_uring: don't iterate io_uring_cancel_files()
>   io_uring: pass files into kill timeouts/poll
>   io_uring: always batch cancel in *cancel_files()
> 
>  fs/io-wq.c    |  10 --
>  fs/io-wq.h    |   1 -
>  fs/io_uring.c | 260 ++++++++++++++------------------------------------
>  3 files changed, 69 insertions(+), 202 deletions(-)

Applied, thanks.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-11-09 14:43 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-11-06 13:00 [PATCH for-next 0/6] cleanup task/files cancel Pavel Begunkov
2020-11-06 13:00 ` [PATCH 1/6] io_uring: simplify io_task_match() Pavel Begunkov
2020-11-06 13:00 ` [PATCH 2/6] io_uring: add a {task,files} pair matching helper Pavel Begunkov
2020-11-06 13:00 ` [PATCH 3/6] io_uring: cancel only requests of current task Pavel Begunkov
2020-11-06 13:00 ` [PATCH 4/6] io_uring: don't iterate io_uring_cancel_files() Pavel Begunkov
2020-11-06 13:00 ` [PATCH 5/6] io_uring: pass files into kill timeouts/poll Pavel Begunkov
2020-11-06 13:00 ` [PATCH 6/6] io_uring: always batch cancel in *cancel_files() Pavel Begunkov
2020-11-09 14:43 ` [PATCH for-next 0/6] cleanup task/files cancel Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox