* [PATCH 1/2] io_uring/tctx: have io_uring_alloc_task_context() return tctx
2026-04-08 19:24 [PATCHSET for-next 0/2] tctx setup cleanups Jens Axboe
@ 2026-04-08 19:24 ` Jens Axboe
2026-04-08 19:24 ` [PATCH 2/2] io_uring/tctx: clean up __io_uring_add_tctx_node() error handling Jens Axboe
1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2026-04-08 19:24 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
Instead of having io_uring_alloc_task_context() return an int and
assign tsk->io_uring, just have it return the task context directly.
This enables cleaner error handling in callers, which may have
failure points post calling io_uring_alloc_task_context().
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
io_uring/sqpoll.c | 8 +++++++-
io_uring/tctx.c | 21 ++++++++++-----------
io_uring/tctx.h | 4 ++--
3 files changed, 19 insertions(+), 14 deletions(-)
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index c6bb938ec5ea..46c12afec73e 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -458,6 +458,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
return -EINVAL;
}
if (ctx->flags & IORING_SETUP_SQPOLL) {
+ struct io_uring_task *tctx;
struct task_struct *tsk;
struct io_sq_data *sqd;
bool attached;
@@ -524,8 +525,13 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
rcu_assign_pointer(sqd->thread, tsk);
mutex_unlock(&sqd->lock);
+ ret = 0;
get_task_struct(tsk);
- ret = io_uring_alloc_task_context(tsk, ctx);
+ tctx = io_uring_alloc_task_context(tsk, ctx);
+ if (!IS_ERR(tctx))
+ tsk->io_uring = tctx;
+ else
+ ret = PTR_ERR(tctx);
wake_up_new_task(tsk);
if (ret)
goto err;
diff --git a/io_uring/tctx.c b/io_uring/tctx.c
index 143de8e990eb..e5cef6a8dde0 100644
--- a/io_uring/tctx.c
+++ b/io_uring/tctx.c
@@ -74,20 +74,20 @@ void __io_uring_free(struct task_struct *tsk)
}
}
-__cold int io_uring_alloc_task_context(struct task_struct *task,
- struct io_ring_ctx *ctx)
+__cold struct io_uring_task *io_uring_alloc_task_context(struct task_struct *task,
+ struct io_ring_ctx *ctx)
{
struct io_uring_task *tctx;
int ret;
tctx = kzalloc_obj(*tctx);
if (unlikely(!tctx))
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
if (unlikely(ret)) {
kfree(tctx);
- return ret;
+ return ERR_PTR(ret);
}
tctx->io_wq = io_init_wq_offload(ctx, task);
@@ -95,7 +95,7 @@ __cold int io_uring_alloc_task_context(struct task_struct *task,
ret = PTR_ERR(tctx->io_wq);
percpu_counter_destroy(&tctx->inflight);
kfree(tctx);
- return ret;
+ return ERR_PTR(ret);
}
tctx->task = task;
@@ -103,10 +103,9 @@ __cold int io_uring_alloc_task_context(struct task_struct *task,
init_waitqueue_head(&tctx->wait);
atomic_set(&tctx->in_cancel, 0);
atomic_set(&tctx->inflight_tracked, 0);
- task->io_uring = tctx;
init_llist_head(&tctx->task_list);
init_task_work(&tctx->task_work, tctx_task_work);
- return 0;
+ return tctx;
}
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
@@ -116,11 +115,11 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
int ret;
if (unlikely(!tctx)) {
- ret = io_uring_alloc_task_context(current, ctx);
- if (unlikely(ret))
- return ret;
+ tctx = io_uring_alloc_task_context(current, ctx);
+ if (IS_ERR(tctx))
+ return PTR_ERR(tctx);
- tctx = current->io_uring;
+ current->io_uring = tctx;
if (ctx->int_flags & IO_RING_F_IOWQ_LIMITS_SET) {
unsigned int limits[2] = { ctx->iowq_limits[0],
ctx->iowq_limits[1], };
diff --git a/io_uring/tctx.h b/io_uring/tctx.h
index 608e96de70a2..2310d2a0c46d 100644
--- a/io_uring/tctx.h
+++ b/io_uring/tctx.h
@@ -6,8 +6,8 @@ struct io_tctx_node {
struct io_ring_ctx *ctx;
};
-int io_uring_alloc_task_context(struct task_struct *task,
- struct io_ring_ctx *ctx);
+struct io_uring_task *io_uring_alloc_task_context(struct task_struct *task,
+ struct io_ring_ctx *ctx);
void io_uring_del_tctx_node(unsigned long index);
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx);
int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx);
--
2.53.0
^ permalink raw reply related [flat|nested] 3+ messages in thread* [PATCH 2/2] io_uring/tctx: clean up __io_uring_add_tctx_node() error handling
2026-04-08 19:24 [PATCHSET for-next 0/2] tctx setup cleanups Jens Axboe
2026-04-08 19:24 ` [PATCH 1/2] io_uring/tctx: have io_uring_alloc_task_context() return tctx Jens Axboe
@ 2026-04-08 19:24 ` Jens Axboe
1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2026-04-08 19:24 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
Refactor __io_uring_add_tctx_node() so that on error it never leaves
current->io_uring pointing at a half-setup tctx. This moves the
assignment of current->io_uring to the end of the function post any
failure points.
Separate out the node installation into io_tctx_install_node() to
further clean this up.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
io_uring/tctx.c | 60 ++++++++++++++++++++++++++++++++-----------------
1 file changed, 40 insertions(+), 20 deletions(-)
diff --git a/io_uring/tctx.c b/io_uring/tctx.c
index e5cef6a8dde0..61533f30494f 100644
--- a/io_uring/tctx.c
+++ b/io_uring/tctx.c
@@ -108,10 +108,37 @@ __cold struct io_uring_task *io_uring_alloc_task_context(struct task_struct *tas
return tctx;
}
+static int io_tctx_install_node(struct io_ring_ctx *ctx,
+ struct io_uring_task *tctx)
+{
+ struct io_tctx_node *node;
+ int ret;
+
+ if (xa_load(&tctx->xa, (unsigned long)ctx))
+ return 0;
+
+ node = kmalloc_obj(*node);
+ if (!node)
+ return -ENOMEM;
+ node->ctx = ctx;
+ node->task = current;
+
+ ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
+ node, GFP_KERNEL));
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+
+ mutex_lock(&ctx->tctx_lock);
+ list_add(&node->ctx_node, &ctx->tctx_list);
+ mutex_unlock(&ctx->tctx_lock);
+ return 0;
+}
+
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
{
struct io_uring_task *tctx = current->io_uring;
- struct io_tctx_node *node;
int ret;
if (unlikely(!tctx)) {
@@ -119,14 +146,13 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
if (IS_ERR(tctx))
return PTR_ERR(tctx);
- current->io_uring = tctx;
if (ctx->int_flags & IO_RING_F_IOWQ_LIMITS_SET) {
unsigned int limits[2] = { ctx->iowq_limits[0],
ctx->iowq_limits[1], };
ret = io_wq_max_workers(tctx->io_wq, limits);
if (ret)
- return ret;
+ goto err_free;
}
}
@@ -137,25 +163,19 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
*/
if (tctx->io_wq)
io_wq_set_exit_on_idle(tctx->io_wq, false);
- if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
- node = kmalloc_obj(*node);
- if (!node)
- return -ENOMEM;
- node->ctx = ctx;
- node->task = current;
-
- ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
- node, GFP_KERNEL));
- if (ret) {
- kfree(node);
- return ret;
- }
- mutex_lock(&ctx->tctx_lock);
- list_add(&node->ctx_node, &ctx->tctx_list);
- mutex_unlock(&ctx->tctx_lock);
+ ret = io_tctx_install_node(ctx, tctx);
+ if (!ret) {
+ current->io_uring = tctx;
+ return 0;
}
- return 0;
+ if (!current->io_uring) {
+err_free:
+ io_wq_put_and_exit(tctx->io_wq);
+ percpu_counter_destroy(&tctx->inflight);
+ kfree(tctx);
+ }
+ return ret;
}
int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx)
--
2.53.0
^ permalink raw reply related [flat|nested] 3+ messages in thread