* [PATCHSET 0/3] io_uring: cancel pending work if task exits
@ 2020-02-09 17:12 Jens Axboe
2020-02-09 17:12 ` [PATCH 1/3] io-wq: make io_wqe_cancel_work() take a match handler Jens Axboe
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Jens Axboe @ 2020-02-09 17:12 UTC (permalink / raw)
To: io-uring
We've tried something like this before, but it covered too much and
would not work for a shared ring across processes. This one simply
keys the cancel off the internal pid, so we only cancel work for the
task that is exiting.
--
Jens Axboe
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 1/3] io-wq: make io_wqe_cancel_work() take a match handler
2020-02-09 17:12 [PATCHSET 0/3] io_uring: cancel pending work if task exits Jens Axboe
@ 2020-02-09 17:12 ` Jens Axboe
2020-02-09 17:12 ` [PATCH 2/3] io-wq: add io_wq_cancel_pid() to cancel based on a specific pid Jens Axboe
2020-02-09 17:12 ` [PATCH 3/3] io_uring: cancel pending async work if task exits Jens Axboe
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2020-02-09 17:12 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
We want to use the cancel functionality for canceling based on not
just the work itself. Instead of matching on the work address
manually, allow a match handler to tell us if we found the right work
item or not.
No functional changes in this patch.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io-wq.c | 33 ++++++++++++++++++++++-----------
1 file changed, 22 insertions(+), 11 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 58b1891bcfe5..4889b42308ac 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -940,17 +940,19 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
return ret;
}
+struct work_match {
+ bool (*fn)(struct io_wq_work *, void *data);
+ void *data;
+};
+
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{
- struct io_wq_work *work = data;
+ struct work_match *match = data;
unsigned long flags;
bool ret = false;
- if (worker->cur_work != work)
- return false;
-
spin_lock_irqsave(&worker->lock, flags);
- if (worker->cur_work == work &&
+ if (match->fn(worker->cur_work, match->data) &&
!(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL)) {
send_sig(SIGINT, worker->task, 1);
ret = true;
@@ -961,15 +963,13 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
}
static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
- struct io_wq_work *cwork)
+ struct work_match *match)
{
struct io_wq_work_node *node, *prev;
struct io_wq_work *work;
unsigned long flags;
bool found = false;
- cwork->flags |= IO_WQ_WORK_CANCEL;
-
/*
* First check pending list, if we're lucky we can just remove it
* from there. CANCEL_OK means that the work is returned as-new,
@@ -979,7 +979,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
wq_list_for_each(node, prev, &wqe->work_list) {
work = container_of(node, struct io_wq_work, list);
- if (work == cwork) {
+ if (match->fn(work, match->data)) {
wq_node_del(&wqe->work_list, node, prev);
found = true;
break;
@@ -1000,20 +1000,31 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
* completion will run normally in this case.
*/
rcu_read_lock();
- found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, cwork);
+ found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
rcu_read_unlock();
return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
}
+static bool io_wq_work_match(struct io_wq_work *work, void *data)
+{
+ return work == data;
+}
+
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
{
+ struct work_match match = {
+ .fn = io_wq_work_match,
+ .data = cwork
+ };
enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
int node;
+ cwork->flags |= IO_WQ_WORK_CANCEL;
+
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
- ret = io_wqe_cancel_work(wqe, cwork);
+ ret = io_wqe_cancel_work(wqe, &match);
if (ret != IO_WQ_CANCEL_NOTFOUND)
break;
}
--
2.25.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 2/3] io-wq: add io_wq_cancel_pid() to cancel based on a specific pid
2020-02-09 17:12 [PATCHSET 0/3] io_uring: cancel pending work if task exits Jens Axboe
2020-02-09 17:12 ` [PATCH 1/3] io-wq: make io_wqe_cancel_work() take a match handler Jens Axboe
@ 2020-02-09 17:12 ` Jens Axboe
2020-02-09 17:12 ` [PATCH 3/3] io_uring: cancel pending async work if task exits Jens Axboe
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2020-02-09 17:12 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
Add a helper that allows the caller to cancel work based on what mm
it belongs to. This allows io_uring to cancel work from a given
task or thread when it exits.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io-wq.c | 29 +++++++++++++++++++++++++++++
fs/io-wq.h | 2 ++
2 files changed, 31 insertions(+)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 4889b42308ac..9317c1a075eb 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -1032,6 +1032,35 @@ enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
return ret;
}
+static bool io_wq_pid_match(struct io_wq_work *work, void *data)
+{
+ pid_t pid = (pid_t) (unsigned long) data;
+
+ if (work)
+ return work->task_pid == pid;
+ return false;
+}
+
+enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
+{
+ struct work_match match = {
+ .fn = io_wq_pid_match,
+ .data = (void *) (unsigned long) pid
+ };
+ enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
+ int node;
+
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
+
+ ret = io_wqe_cancel_work(wqe, &match);
+ if (ret != IO_WQ_CANCEL_NOTFOUND)
+ break;
+ }
+
+ return ret;
+}
+
struct io_wq_flush_data {
struct io_wq_work work;
struct completion done;
diff --git a/fs/io-wq.h b/fs/io-wq.h
index f152ba677d8f..ccc7d84af57d 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -76,6 +76,7 @@ struct io_wq_work {
const struct cred *creds;
struct fs_struct *fs;
unsigned flags;
+ pid_t task_pid;
};
#define INIT_IO_WORK(work, _func) \
@@ -109,6 +110,7 @@ void io_wq_flush(struct io_wq *wq);
void io_wq_cancel_all(struct io_wq *wq);
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
+enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid);
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
--
2.25.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 3/3] io_uring: cancel pending async work if task exits
2020-02-09 17:12 [PATCHSET 0/3] io_uring: cancel pending work if task exits Jens Axboe
2020-02-09 17:12 ` [PATCH 1/3] io-wq: make io_wqe_cancel_work() take a match handler Jens Axboe
2020-02-09 17:12 ` [PATCH 2/3] io-wq: add io_wq_cancel_pid() to cancel based on a specific pid Jens Axboe
@ 2020-02-09 17:12 ` Jens Axboe
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2020-02-09 17:12 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
Normally we cancel all work we track, but for untracked work we could
leave the async worker behind until that work completes. This is totally
fine, but does leave resources pending after the task is gone until that
work completes.
Cancel work that this task queued up when it goes away.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 24ebd5714bf9..bd5ac9a6677f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -925,6 +925,7 @@ static inline void io_req_work_grab_env(struct io_kiocb *req,
}
spin_unlock(¤t->fs->lock);
}
+ req->work.task_pid = task_pid_vnr(current);
}
static inline void io_req_work_drop_env(struct io_kiocb *req)
@@ -6474,6 +6475,13 @@ static int io_uring_flush(struct file *file, void *data)
struct io_ring_ctx *ctx = file->private_data;
io_uring_cancel_files(ctx, data);
+
+ /*
+ * If the task is going away, cancel work it may have pending
+ */
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+ io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
+
return 0;
}
--
2.25.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
end of thread, other threads:[~2020-02-09 17:12 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-02-09 17:12 [PATCHSET 0/3] io_uring: cancel pending work if task exits Jens Axboe
2020-02-09 17:12 ` [PATCH 1/3] io-wq: make io_wqe_cancel_work() take a match handler Jens Axboe
2020-02-09 17:12 ` [PATCH 2/3] io-wq: add io_wq_cancel_pid() to cancel based on a specific pid Jens Axboe
2020-02-09 17:12 ` [PATCH 3/3] io_uring: cancel pending async work if task exits Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox