* [PATCH 1/3] io_uring: rename trace_io_uring_submit_sqe() tracepoint
2023-03-30 16:33 [PATCHSET 0/3] Misc cleanups Jens Axboe
@ 2023-03-30 16:33 ` Jens Axboe
2023-03-30 16:33 ` [PATCH 2/3] io_uring: cap io_sqring_entries() at SQ ring size Jens Axboe
2023-03-30 16:33 ` [PATCH 3/3] io_uring: get rid of io_preinit_req() Jens Axboe
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2023-03-30 16:33 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
It has nothing to do with the SQE at this point, it's a request
submission. While in there, get rid of the 'force_nonblock' argument
which is also dead, as we only pass in true.
Signed-off-by: Jens Axboe <[email protected]>
---
include/trace/events/io_uring.h | 15 ++++++---------
io_uring/io_uring.c | 3 +--
2 files changed, 7 insertions(+), 11 deletions(-)
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 936fd41bf147..69454f1f98b0 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -360,19 +360,18 @@ TRACE_EVENT(io_uring_complete,
);
/**
- * io_uring_submit_sqe - called before submitting one SQE
+ * io_uring_submit_req - called before submitting a request
*
* @req: pointer to a submitted request
- * @force_nonblock: whether a context blocking or not
*
* Allows to track SQE submitting, to understand what was the source of it, SQ
* thread or io_uring_enter call.
*/
-TRACE_EVENT(io_uring_submit_sqe,
+TRACE_EVENT(io_uring_submit_req,
- TP_PROTO(struct io_kiocb *req, bool force_nonblock),
+ TP_PROTO(struct io_kiocb *req),
- TP_ARGS(req, force_nonblock),
+ TP_ARGS(req),
TP_STRUCT__entry (
__field( void *, ctx )
@@ -380,7 +379,6 @@ TRACE_EVENT(io_uring_submit_sqe,
__field( unsigned long long, user_data )
__field( u8, opcode )
__field( u32, flags )
- __field( bool, force_nonblock )
__field( bool, sq_thread )
__string( op_str, io_uring_get_opcode(req->opcode) )
@@ -392,16 +390,15 @@ TRACE_EVENT(io_uring_submit_sqe,
__entry->user_data = req->cqe.user_data;
__entry->opcode = req->opcode;
__entry->flags = req->flags;
- __entry->force_nonblock = force_nonblock;
__entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL;
__assign_str(op_str, io_uring_get_opcode(req->opcode));
),
TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, "
- "non block %d, sq_thread %d", __entry->ctx, __entry->req,
+ "sq_thread %d", __entry->ctx, __entry->req,
__entry->user_data, __get_str(op_str),
- __entry->flags, __entry->force_nonblock, __entry->sq_thread)
+ __entry->flags, __entry->sq_thread)
);
/*
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 536940675c67..775b53730c2f 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2305,8 +2305,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (unlikely(ret))
return io_submit_fail_init(sqe, req, ret);
- /* don't need @sqe from now on */
- trace_io_uring_submit_sqe(req, true);
+ trace_io_uring_submit_req(req);
/*
* If we already have a head request, queue this one for async
--
2.39.2
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 2/3] io_uring: cap io_sqring_entries() at SQ ring size
2023-03-30 16:33 [PATCHSET 0/3] Misc cleanups Jens Axboe
2023-03-30 16:33 ` [PATCH 1/3] io_uring: rename trace_io_uring_submit_sqe() tracepoint Jens Axboe
@ 2023-03-30 16:33 ` Jens Axboe
2023-03-30 16:33 ` [PATCH 3/3] io_uring: get rid of io_preinit_req() Jens Axboe
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2023-03-30 16:33 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
We already do this manually for the !SQPOLL case, do it in general and
we can also dump the ugly min3() in io_submit_sqes().
Signed-off-by: Jens Axboe <[email protected]>
---
io_uring/io_uring.c | 2 +-
io_uring/io_uring.h | 4 +++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 775b53730c2f..a0b64831c455 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2434,7 +2434,7 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
if (unlikely(!entries))
return 0;
/* make sure SQ entry isn't read before tail */
- ret = left = min3(nr, ctx->sq_entries, entries);
+ ret = left = min(nr, entries);
io_get_task_refs(left);
io_submit_state_start(&ctx->submit_state, left);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index c33f719731ac..193b2db39fe8 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -262,9 +262,11 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx)
static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
+ unsigned int entries;
/* make sure SQ entry isn't read before tail */
- return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
+ entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
+ return min(entries, ctx->sq_entries);
}
static inline int io_run_task_work(void)
--
2.39.2
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 3/3] io_uring: get rid of io_preinit_req()
2023-03-30 16:33 [PATCHSET 0/3] Misc cleanups Jens Axboe
2023-03-30 16:33 ` [PATCH 1/3] io_uring: rename trace_io_uring_submit_sqe() tracepoint Jens Axboe
2023-03-30 16:33 ` [PATCH 2/3] io_uring: cap io_sqring_entries() at SQ ring size Jens Axboe
@ 2023-03-30 16:33 ` Jens Axboe
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2023-03-30 16:33 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
Just assign where we setup the ring anyway, splitting the init into
two doesn't really buy us anything and it's a bit more fragile. With
this, io_init_req() handles the whole thing while the cacheline is
pulled in anyway.
Signed-off-by: Jens Axboe <[email protected]>
---
io_uring/io_uring.c | 19 ++++---------------
1 file changed, 4 insertions(+), 15 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index a0b64831c455..7f5d0b833955 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1034,19 +1034,6 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res)
io_req_complete_defer(req);
}
-/*
- * Don't initialise the fields below on every allocation, but do that in
- * advance and keep them valid across allocations.
- */
-static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
-{
- req->ctx = ctx;
- req->link = NULL;
- req->async_data = NULL;
- /* not necessary, but safer to zero */
- req->cqe.res = 0;
-}
-
static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
struct io_submit_state *state)
{
@@ -1097,7 +1084,6 @@ __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
for (i = 0; i < ret; i++) {
struct io_kiocb *req = reqs[i];
- io_preinit_req(req, ctx);
io_req_add_to_cache(req, ctx);
}
return true;
@@ -2172,14 +2158,17 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
int personality;
u8 opcode;
- /* req is partially pre-initialised, see io_preinit_req() */
req->opcode = opcode = READ_ONCE(sqe->opcode);
/* same numerical values with corresponding REQ_F_*, safe to copy */
req->flags = sqe_flags = READ_ONCE(sqe->flags);
req->cqe.user_data = READ_ONCE(sqe->user_data);
+ req->cqe.res = 0;
+ req->ctx = ctx;
req->file = NULL;
req->rsrc_node = NULL;
req->task = current;
+ req->async_data = NULL;
+ req->link = NULL;
if (unlikely(opcode >= IORING_OP_LAST)) {
req->opcode = 0;
--
2.39.2
^ permalink raw reply related [flat|nested] 4+ messages in thread