* [PATCH 5.12 0/4] random 5.12 bits
@ 2021-03-01 18:20 Pavel Begunkov
2021-03-01 18:20 ` [PATCH 1/4] io_uring: choose right tctx->io_wq for try cancel Pavel Begunkov
` (4 more replies)
0 siblings, 5 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-03-01 18:20 UTC (permalink / raw)
To: Jens Axboe, io-uring
1/4 is a fix.
Others may make slightly easier looking for ->io_uring usages.
Pavel Begunkov (4):
io_uring: choose right tctx->io_wq for try cancel
io_uring: inline io_req_clean_work()
io_uring: inline __io_queue_async_work()
io_uring: remove extra in_idle wake up
fs/io_uring.c | 44 +++++++++++++++-----------------------------
1 file changed, 15 insertions(+), 29 deletions(-)
--
2.24.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 1/4] io_uring: choose right tctx->io_wq for try cancel
2021-03-01 18:20 [PATCH 5.12 0/4] random 5.12 bits Pavel Begunkov
@ 2021-03-01 18:20 ` Pavel Begunkov
2021-03-01 18:20 ` [PATCH 2/4] io_uring: inline io_req_clean_work() Pavel Begunkov
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-03-01 18:20 UTC (permalink / raw)
To: Jens Axboe, io-uring
When we cancel SQPOLL, @task in io_uring_try_cancel_requests() will
differ from current. Use the right tctx from passed in @task, and don't
forget that it can be NULL when the io_uring ctx exits.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0c363529a836..34d0fd4a933b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8615,7 +8615,8 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct files_struct *files)
{
struct io_task_cancel cancel = { .task = task, .files = files, };
- struct io_uring_task *tctx = current->io_uring;
+ struct task_struct *tctx_task = task ?: current;
+ struct io_uring_task *tctx = tctx_task->io_uring;
while (1) {
enum io_wq_cancel cret;
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/4] io_uring: inline io_req_clean_work()
2021-03-01 18:20 [PATCH 5.12 0/4] random 5.12 bits Pavel Begunkov
2021-03-01 18:20 ` [PATCH 1/4] io_uring: choose right tctx->io_wq for try cancel Pavel Begunkov
@ 2021-03-01 18:20 ` Pavel Begunkov
2021-03-01 18:20 ` [PATCH 3/4] io_uring: inline __io_queue_async_work() Pavel Begunkov
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-03-01 18:20 UTC (permalink / raw)
To: Jens Axboe, io-uring
Inline io_req_clean_work(), less code and easier to analyse
tctx dependencies and refs usage.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 30 +++++++++++++-----------------
1 file changed, 13 insertions(+), 17 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 34d0fd4a933b..3d8c99c46127 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1167,22 +1167,6 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
return false;
}
-static void io_req_clean_work(struct io_kiocb *req)
-{
- if (req->flags & REQ_F_INFLIGHT) {
- struct io_ring_ctx *ctx = req->ctx;
- struct io_uring_task *tctx = req->task->io_uring;
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->inflight_lock, flags);
- list_del(&req->inflight_entry);
- spin_unlock_irqrestore(&ctx->inflight_lock, flags);
- req->flags &= ~REQ_F_INFLIGHT;
- if (atomic_read(&tctx->in_idle))
- wake_up(&tctx->wait);
- }
-}
-
static void io_req_track_inflight(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -1671,7 +1655,19 @@ static void io_dismantle_req(struct io_kiocb *req)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
if (req->fixed_rsrc_refs)
percpu_ref_put(req->fixed_rsrc_refs);
- io_req_clean_work(req);
+
+ if (req->flags & REQ_F_INFLIGHT) {
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_uring_task *tctx = req->task->io_uring;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->inflight_lock, flags);
+ list_del(&req->inflight_entry);
+ spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+ req->flags &= ~REQ_F_INFLIGHT;
+ if (atomic_read(&tctx->in_idle))
+ wake_up(&tctx->wait);
+ }
}
static inline void io_put_task(struct task_struct *task, int nr)
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 3/4] io_uring: inline __io_queue_async_work()
2021-03-01 18:20 [PATCH 5.12 0/4] random 5.12 bits Pavel Begunkov
2021-03-01 18:20 ` [PATCH 1/4] io_uring: choose right tctx->io_wq for try cancel Pavel Begunkov
2021-03-01 18:20 ` [PATCH 2/4] io_uring: inline io_req_clean_work() Pavel Begunkov
@ 2021-03-01 18:20 ` Pavel Begunkov
2021-03-01 18:20 ` [PATCH 4/4] io_uring: remove extra in_idle wake up Pavel Begunkov
2021-03-01 23:41 ` [PATCH 5.12 0/4] random 5.12 bits Jens Axboe
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-03-01 18:20 UTC (permalink / raw)
To: Jens Axboe, io-uring
__io_queue_async_work() is only called from io_queue_async_work(),
inline it.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 13 ++-----------
1 file changed, 2 insertions(+), 11 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3d8c99c46127..411323dc43bb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1205,7 +1205,7 @@ static void io_prep_async_link(struct io_kiocb *req)
io_prep_async_work(cur);
}
-static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
+static void io_queue_async_work(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *link = io_prep_linked_timeout(req);
@@ -1216,18 +1216,9 @@ static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
&req->work, req->flags);
- io_wq_enqueue(tctx->io_wq, &req->work);
- return link;
-}
-
-static void io_queue_async_work(struct io_kiocb *req)
-{
- struct io_kiocb *link;
-
/* init ->work of the whole link before punting */
io_prep_async_link(req);
- link = __io_queue_async_work(req);
-
+ io_wq_enqueue(tctx->io_wq, &req->work);
if (link)
io_queue_linked_timeout(link);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 4/4] io_uring: remove extra in_idle wake up
2021-03-01 18:20 [PATCH 5.12 0/4] random 5.12 bits Pavel Begunkov
` (2 preceding siblings ...)
2021-03-01 18:20 ` [PATCH 3/4] io_uring: inline __io_queue_async_work() Pavel Begunkov
@ 2021-03-01 18:20 ` Pavel Begunkov
2021-03-01 23:41 ` [PATCH 5.12 0/4] random 5.12 bits Jens Axboe
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-03-01 18:20 UTC (permalink / raw)
To: Jens Axboe, io-uring
io_dismantle_req() is always followed by io_put_task(), which already do
proper in_idle wake ups, so we can skip waking the owner task in
io_dismantle_req(). The rules are simpler now, do io_put_task() shortly
after ending a request, and it will be fine.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 411323dc43bb..e9215477426d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1649,18 +1649,16 @@ static void io_dismantle_req(struct io_kiocb *req)
if (req->flags & REQ_F_INFLIGHT) {
struct io_ring_ctx *ctx = req->ctx;
- struct io_uring_task *tctx = req->task->io_uring;
unsigned long flags;
spin_lock_irqsave(&ctx->inflight_lock, flags);
list_del(&req->inflight_entry);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
req->flags &= ~REQ_F_INFLIGHT;
- if (atomic_read(&tctx->in_idle))
- wake_up(&tctx->wait);
}
}
+/* must to be called somewhat shortly after putting a request */
static inline void io_put_task(struct task_struct *task, int nr)
{
struct io_uring_task *tctx = task->io_uring;
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH 5.12 0/4] random 5.12 bits
2021-03-01 18:20 [PATCH 5.12 0/4] random 5.12 bits Pavel Begunkov
` (3 preceding siblings ...)
2021-03-01 18:20 ` [PATCH 4/4] io_uring: remove extra in_idle wake up Pavel Begunkov
@ 2021-03-01 23:41 ` Jens Axboe
4 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2021-03-01 23:41 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 3/1/21 11:20 AM, Pavel Begunkov wrote:
> 1/4 is a fix.
> Others may make slightly easier looking for ->io_uring usages.
>
> Pavel Begunkov (4):
> io_uring: choose right tctx->io_wq for try cancel
> io_uring: inline io_req_clean_work()
> io_uring: inline __io_queue_async_work()
> io_uring: remove extra in_idle wake up
>
> fs/io_uring.c | 44 +++++++++++++++-----------------------------
> 1 file changed, 15 insertions(+), 29 deletions(-)
Looks good, applied.
--
Jens Axboe
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2021-03-02 7:08 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-03-01 18:20 [PATCH 5.12 0/4] random 5.12 bits Pavel Begunkov
2021-03-01 18:20 ` [PATCH 1/4] io_uring: choose right tctx->io_wq for try cancel Pavel Begunkov
2021-03-01 18:20 ` [PATCH 2/4] io_uring: inline io_req_clean_work() Pavel Begunkov
2021-03-01 18:20 ` [PATCH 3/4] io_uring: inline __io_queue_async_work() Pavel Begunkov
2021-03-01 18:20 ` [PATCH 4/4] io_uring: remove extra in_idle wake up Pavel Begunkov
2021-03-01 23:41 ` [PATCH 5.12 0/4] random 5.12 bits Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox