* [PATCH 1/2] io_uring: lazy get task
2020-06-15 7:33 [PATCH 0/2] don't use pid for request cancellation Pavel Begunkov
@ 2020-06-15 7:33 ` Pavel Begunkov
2020-06-15 7:33 ` [PATCH 2/2] io_uring: cancel by ->task not pid Pavel Begunkov
2020-06-15 15:04 ` [PATCH 0/2] don't use pid for request cancellation Jens Axboe
2 siblings, 0 replies; 5+ messages in thread
From: Pavel Begunkov @ 2020-06-15 7:33 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel; +Cc: ebiederm
There will be multiple places where req->task is used, so refcount-pin
it lazily with introduced *io_{get,put}_req_task(). We need to always
have valid ->task for cancellation reasons, but don't care about pinning
it in some cases. That's why it sets req->task in io_req_init() and
implements get/put laziness with a flag.
This also removes using @current from polling io_arm_poll_handler(),
etc., but doesn't change observable behaviour
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 30 ++++++++++++++++++++++--------
1 file changed, 22 insertions(+), 8 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5b5cab6691d2..f05d2e45965e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -543,6 +543,7 @@ enum {
REQ_F_NO_FILE_TABLE_BIT,
REQ_F_QUEUE_TIMEOUT_BIT,
REQ_F_WORK_INITIALIZED_BIT,
+ REQ_F_TASK_PINNED_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -602,6 +603,8 @@ enum {
REQ_F_QUEUE_TIMEOUT = BIT(REQ_F_QUEUE_TIMEOUT_BIT),
/* io_wq_work is initialized */
REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
+ /* req->task is refcounted */
+ REQ_F_TASK_PINNED = BIT(REQ_F_TASK_PINNED_BIT),
};
struct async_poll {
@@ -912,6 +915,21 @@ struct sock *io_uring_get_socket(struct file *file)
}
EXPORT_SYMBOL(io_uring_get_socket);
+static void io_get_req_task(struct io_kiocb *req)
+{
+ if (req->flags & REQ_F_TASK_PINNED)
+ return;
+ get_task_struct(req->task);
+ req->flags |= REQ_F_TASK_PINNED;
+}
+
+/* not idempotent -- it doesn't clear REQ_F_TASK_PINNED */
+static void __io_put_req_task(struct io_kiocb *req)
+{
+ if (req->flags & REQ_F_TASK_PINNED)
+ put_task_struct(req->task);
+}
+
static void io_file_put_work(struct work_struct *work);
/*
@@ -1400,9 +1418,7 @@ static void __io_req_aux_free(struct io_kiocb *req)
kfree(req->io);
if (req->file)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
- if (req->task)
- put_task_struct(req->task);
-
+ __io_put_req_task(req);
io_req_work_drop_env(req);
}
@@ -4367,8 +4383,7 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
memcpy(&apoll->work, &req->work, sizeof(req->work));
had_io = req->io != NULL;
- get_task_struct(current);
- req->task = current;
+ io_get_req_task(req);
req->apoll = apoll;
INIT_HLIST_NODE(&req->hash_node);
@@ -4556,8 +4571,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
events = READ_ONCE(sqe->poll_events);
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
- get_task_struct(current);
- req->task = current;
+ io_get_req_task(req);
return 0;
}
@@ -5818,7 +5832,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
req->flags = 0;
/* one is dropped after submission, the other at completion */
refcount_set(&req->refs, 2);
- req->task = NULL;
+ req->task = current;
req->result = 0;
if (unlikely(req->opcode >= IORING_OP_LAST))
--
2.24.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/2] io_uring: cancel by ->task not pid
2020-06-15 7:33 [PATCH 0/2] don't use pid for request cancellation Pavel Begunkov
2020-06-15 7:33 ` [PATCH 1/2] io_uring: lazy get task Pavel Begunkov
@ 2020-06-15 7:33 ` Pavel Begunkov
2020-06-15 15:04 ` [PATCH 0/2] don't use pid for request cancellation Jens Axboe
2 siblings, 0 replies; 5+ messages in thread
From: Pavel Begunkov @ 2020-06-15 7:33 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel; +Cc: ebiederm
For an exiting process it tries to cancel all its inflight requests.
Use req->task to match such instead of work.pid. We always have
req->task set, and it will be valid because we're matching only
current exiting task.
Also, remove work.pid and everything related, it's useless now
Reported-by: Eric W. Biederman <[email protected]>
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io-wq.h | 1 -
fs/io_uring.c | 16 ++++++----------
2 files changed, 6 insertions(+), 11 deletions(-)
diff --git a/fs/io-wq.h b/fs/io-wq.h
index b72538fe5afd..071f1a997800 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -90,7 +90,6 @@ struct io_wq_work {
const struct cred *creds;
struct fs_struct *fs;
unsigned flags;
- pid_t task_pid;
};
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f05d2e45965e..54addaba742d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1065,8 +1065,6 @@ static inline void io_req_work_grab_env(struct io_kiocb *req,
}
spin_unlock(¤t->fs->lock);
}
- if (!req->work.task_pid)
- req->work.task_pid = task_pid_vnr(current);
}
static inline void io_req_work_drop_env(struct io_kiocb *req)
@@ -7455,11 +7453,12 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
}
}
-static bool io_cancel_pid_cb(struct io_wq_work *work, void *data)
+static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
{
- pid_t pid = (pid_t) (unsigned long) data;
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct task_struct *task = data;
- return work->task_pid == pid;
+ return req->task == task;
}
static int io_uring_flush(struct file *file, void *data)
@@ -7471,11 +7470,8 @@ static int io_uring_flush(struct file *file, void *data)
/*
* If the task is going away, cancel work it may have pending
*/
- if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
- void *data = (void *) (unsigned long)task_pid_vnr(current);
-
- io_wq_cancel_cb(ctx->io_wq, io_cancel_pid_cb, data, true);
- }
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+ io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, current, true);
return 0;
}
--
2.24.0
^ permalink raw reply related [flat|nested] 5+ messages in thread