* [PATCH 1/3] io_uring: allow task match to be passed to io_req_cache_free()
2021-02-13 16:14 [PATCHSET 0/3] Free request caches for exiting task Jens Axboe
@ 2021-02-13 16:14 ` Jens Axboe
2021-02-13 16:14 ` [PATCH 2/3] io_uring: add helper to free all request caches Jens Axboe
2021-02-13 16:14 ` [PATCH 3/3] io_uring: kill cached requests from exiting task closing the ring Jens Axboe
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2021-02-13 16:14 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
No changes in this patch, just allows a caller to pass in a targeted
task that we must match for freeing requests in the cache.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 2e8cb739c835..9cd7b03a6f34 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8681,12 +8681,13 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
idr_destroy(&ctx->io_buffer_idr);
}
-static void io_req_cache_free(struct list_head *list)
+static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
{
- while (!list_empty(list)) {
- struct io_kiocb *req;
+ struct io_kiocb *req, *nxt;
- req = list_first_entry(list, struct io_kiocb, compl.list);
+ list_for_each_entry_safe(req, nxt, list, compl.list) {
+ if (tsk && req->task != tsk)
+ continue;
list_del(&req->compl.list);
kmem_cache_free(req_cachep, req);
}
@@ -8742,8 +8743,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
free_uid(ctx->user);
put_cred(ctx->creds);
kfree(ctx->cancel_hash);
- io_req_cache_free(&ctx->submit_state.comp.free_list);
- io_req_cache_free(&ctx->submit_state.comp.locked_free_list);
+ io_req_cache_free(&ctx->submit_state.comp.free_list, NULL);
+ io_req_cache_free(&ctx->submit_state.comp.locked_free_list, NULL);
kfree(ctx);
}
--
2.30.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 2/3] io_uring: add helper to free all request caches
2021-02-13 16:14 [PATCHSET 0/3] Free request caches for exiting task Jens Axboe
2021-02-13 16:14 ` [PATCH 1/3] io_uring: allow task match to be passed to io_req_cache_free() Jens Axboe
@ 2021-02-13 16:14 ` Jens Axboe
2021-02-13 16:14 ` [PATCH 3/3] io_uring: kill cached requests from exiting task closing the ring Jens Axboe
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2021-02-13 16:14 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
We have three different ones, put it in a helper for easy calling. This
is in preparation for doing it outside of ring freeing as well. With
that in mind, also ensure that we do the proper locking for safe calling
from a context where the ring it still live.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 26 +++++++++++++++++++-------
1 file changed, 19 insertions(+), 7 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9cd7b03a6f34..1895fc132252 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8693,10 +8693,27 @@ static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
}
}
-static void io_ring_ctx_free(struct io_ring_ctx *ctx)
+static void io_req_caches_free(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct io_submit_state *submit_state = &ctx->submit_state;
+ mutex_lock(&ctx->uring_lock);
+
+ if (submit_state->free_reqs)
+ kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
+ submit_state->reqs);
+
+ io_req_cache_free(&submit_state->comp.free_list, NULL);
+
+ spin_lock_irq(&ctx->completion_lock);
+ io_req_cache_free(&submit_state->comp.locked_free_list, NULL);
+ spin_unlock_irq(&ctx->completion_lock);
+
+ mutex_unlock(&ctx->uring_lock);
+}
+
+static void io_ring_ctx_free(struct io_ring_ctx *ctx)
+{
/*
* Some may use context even when all refs and requests have been put,
* and they are free to do so while still holding uring_lock, see
@@ -8715,10 +8732,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
ctx->mm_account = NULL;
}
- if (submit_state->free_reqs)
- kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
- submit_state->reqs);
-
#ifdef CONFIG_BLK_CGROUP
if (ctx->sqo_blkcg_css)
css_put(ctx->sqo_blkcg_css);
@@ -8742,9 +8755,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
percpu_ref_exit(&ctx->refs);
free_uid(ctx->user);
put_cred(ctx->creds);
+ io_req_caches_free(ctx, NULL);
kfree(ctx->cancel_hash);
- io_req_cache_free(&ctx->submit_state.comp.free_list, NULL);
- io_req_cache_free(&ctx->submit_state.comp.locked_free_list, NULL);
kfree(ctx);
}
--
2.30.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 3/3] io_uring: kill cached requests from exiting task closing the ring
2021-02-13 16:14 [PATCHSET 0/3] Free request caches for exiting task Jens Axboe
2021-02-13 16:14 ` [PATCH 1/3] io_uring: allow task match to be passed to io_req_cache_free() Jens Axboe
2021-02-13 16:14 ` [PATCH 2/3] io_uring: add helper to free all request caches Jens Axboe
@ 2021-02-13 16:14 ` Jens Axboe
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2021-02-13 16:14 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
Be nice and prune these upfront, in case the ring is being shared and
one of the tasks is going away. This is a bit more important now that
we account the allocations.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 1895fc132252..a9d094f7060f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -9232,8 +9232,10 @@ static int io_uring_flush(struct file *file, void *data)
struct io_uring_task *tctx = current->io_uring;
struct io_ring_ctx *ctx = file->private_data;
- if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
io_uring_cancel_task_requests(ctx, NULL);
+ io_req_caches_free(ctx, current);
+ }
if (!tctx)
return 0;
--
2.30.0
^ permalink raw reply related [flat|nested] 4+ messages in thread