public inbox for io-uring@vger.kernel.org
 help / color / mirror / Atom feed
From: Pavel Begunkov <asml.silence@gmail.com>
To: io-uring@vger.kernel.org
Cc: asml.silence@gmail.com, Martin KaFai Lau <martin.lau@linux.dev>,
	bpf@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [RFC v2 1/5] io_uring: add struct for state controlling cqwait
Date: Fri,  6 Jun 2025 14:57:58 +0100	[thread overview]
Message-ID: <933217fc63d9f7753e0e3e8dc239ba1a3f15add4.1749214572.git.asml.silence@gmail.com> (raw)
In-Reply-To: <cover.1749214572.git.asml.silence@gmail.com>

Add struct iou_loop_state and place there parameter controlling the flow
of normal CQ waiting. It will be exposed to BPF for api of the helpers,
and while I could've used struct io_wait_queue, the name is not ideal,
and keeping only necessary bits makes further development a bit cleaner.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/io_uring.c | 20 ++++++++++----------
 io_uring/io_uring.h | 11 ++++++++---
 io_uring/napi.c     |  4 ++--
 3 files changed, 20 insertions(+), 15 deletions(-)

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 5cdccf65c652..9cc4d8f335a1 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2404,8 +2404,8 @@ static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer)
 	struct io_ring_ctx *ctx = iowq->ctx;
 
 	/* no general timeout, or shorter (or equal), we are done */
-	if (iowq->timeout == KTIME_MAX ||
-	    ktime_compare(iowq->min_timeout, iowq->timeout) >= 0)
+	if (iowq->state.timeout == KTIME_MAX ||
+	    ktime_compare(iowq->min_timeout, iowq->state.timeout) >= 0)
 		goto out_wake;
 	/* work we may need to run, wake function will see if we need to wake */
 	if (io_has_work(ctx))
@@ -2431,7 +2431,7 @@ static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer)
 	}
 
 	hrtimer_update_function(&iowq->t, io_cqring_timer_wakeup);
-	hrtimer_set_expires(timer, iowq->timeout);
+	hrtimer_set_expires(timer, iowq->state.timeout);
 	return HRTIMER_RESTART;
 out_wake:
 	return io_cqring_timer_wakeup(timer);
@@ -2447,7 +2447,7 @@ static int io_cqring_schedule_timeout(struct io_wait_queue *iowq,
 		hrtimer_setup_on_stack(&iowq->t, io_cqring_min_timer_wakeup, clock_id,
 				       HRTIMER_MODE_ABS);
 	} else {
-		timeout = iowq->timeout;
+		timeout = iowq->state.timeout;
 		hrtimer_setup_on_stack(&iowq->t, io_cqring_timer_wakeup, clock_id,
 				       HRTIMER_MODE_ABS);
 	}
@@ -2488,7 +2488,7 @@ static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
 	 */
 	if (ext_arg->iowait && current_pending_io())
 		current->in_iowait = 1;
-	if (iowq->timeout != KTIME_MAX || iowq->min_timeout)
+	if (iowq->state.timeout != KTIME_MAX || iowq->min_timeout)
 		ret = io_cqring_schedule_timeout(iowq, ctx->clockid, start_time);
 	else
 		schedule();
@@ -2546,18 +2546,18 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
 	iowq.wq.private = current;
 	INIT_LIST_HEAD(&iowq.wq.entry);
 	iowq.ctx = ctx;
-	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
+	iowq.state.target_cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
 	iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail);
 	iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
 	iowq.hit_timeout = 0;
 	iowq.min_timeout = ext_arg->min_time;
-	iowq.timeout = KTIME_MAX;
+	iowq.state.timeout = KTIME_MAX;
 	start_time = io_get_time(ctx);
 
 	if (ext_arg->ts_set) {
-		iowq.timeout = timespec64_to_ktime(ext_arg->ts);
+		iowq.state.timeout = timespec64_to_ktime(ext_arg->ts);
 		if (!(flags & IORING_ENTER_ABS_TIMER))
-			iowq.timeout = ktime_add(iowq.timeout, start_time);
+			iowq.state.timeout = ktime_add(iowq.state.timeout, start_time);
 	}
 
 	if (ext_arg->sig) {
@@ -2582,7 +2582,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
 
 		/* if min timeout has been hit, don't reset wait count */
 		if (!iowq.hit_timeout)
-			nr_wait = (int) iowq.cq_tail -
+			nr_wait = (int) iowq.state.target_cq_tail -
 					READ_ONCE(ctx->rings->cq.tail);
 		else
 			nr_wait = 1;
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 0ea7a435d1de..edf698b81a95 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -39,15 +39,19 @@ enum {
 	IOU_REQUEUE		= -3072,
 };
 
+struct iou_loop_state {
+	__u32			target_cq_tail;
+	ktime_t			timeout;
+};
+
 struct io_wait_queue {
+	struct iou_loop_state state;
 	struct wait_queue_entry wq;
 	struct io_ring_ctx *ctx;
-	unsigned cq_tail;
 	unsigned cq_min_tail;
 	unsigned nr_timeouts;
 	int hit_timeout;
 	ktime_t min_timeout;
-	ktime_t timeout;
 	struct hrtimer t;
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
@@ -59,7 +63,8 @@ struct io_wait_queue {
 static inline bool io_should_wake(struct io_wait_queue *iowq)
 {
 	struct io_ring_ctx *ctx = iowq->ctx;
-	int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
+	u32 target = iowq->state.target_cq_tail;
+	int dist = READ_ONCE(ctx->rings->cq.tail) - target;
 
 	/*
 	 * Wake up if we have enough events, or if a timeout occurred since we
diff --git a/io_uring/napi.c b/io_uring/napi.c
index 4a10de03e426..e08bddc1dbd2 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -360,8 +360,8 @@ void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
 		return;
 
 	iowq->napi_busy_poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
-	if (iowq->timeout != KTIME_MAX) {
-		ktime_t dt = ktime_sub(iowq->timeout, io_get_time(ctx));
+	if (iowq->state.timeout != KTIME_MAX) {
+		ktime_t dt = ktime_sub(iowq->state.timeout, io_get_time(ctx));
 
 		iowq->napi_busy_poll_dt = min_t(u64, iowq->napi_busy_poll_dt, dt);
 	}
-- 
2.49.0


  reply	other threads:[~2025-06-06 13:56 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-06 13:57 [RFC v2 0/5] BPF controlled io_uring Pavel Begunkov
2025-06-06 13:57 ` Pavel Begunkov [this message]
2025-06-06 13:57 ` [RFC v2 2/5] io_uring/bpf: add stubs for bpf struct_ops Pavel Begunkov
2025-06-06 14:25   ` Jens Axboe
2025-06-06 14:28     ` Jens Axboe
2025-06-06 13:58 ` [RFC v2 3/5] io_uring/bpf: implement struct_ops registration Pavel Begunkov
2025-06-06 14:57   ` Jens Axboe
2025-06-06 20:00     ` Pavel Begunkov
2025-06-06 21:07       ` Jens Axboe
2025-06-06 13:58 ` [RFC v2 4/5] io_uring/bpf: add handle events callback Pavel Begunkov
2025-06-12  2:28   ` Alexei Starovoitov
2025-06-12  9:33     ` Pavel Begunkov
2025-06-12 14:07     ` Jens Axboe
2025-06-06 13:58 ` [RFC v2 5/5] io_uring/bpf: add basic kfunc helpers Pavel Begunkov
2025-06-12  2:47   ` Alexei Starovoitov
2025-06-12 13:26     ` Pavel Begunkov
2025-06-12 14:06       ` Jens Axboe
2025-06-13  0:25       ` Alexei Starovoitov
2025-06-13 16:12         ` Pavel Begunkov
2025-06-13 19:51           ` Alexei Starovoitov
2025-06-16 20:34             ` Pavel Begunkov
2025-06-06 14:38 ` [RFC v2 0/5] BPF controlled io_uring Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=933217fc63d9f7753e0e3e8dc239ba1a3f15add4.1749214572.git.asml.silence@gmail.com \
    --to=asml.silence@gmail.com \
    --cc=bpf@vger.kernel.org \
    --cc=io-uring@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=martin.lau@linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox