public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCH v2 0/4] small 5.17 updates
@ 2021-12-05 14:37 Pavel Begunkov
  2021-12-05 14:37 ` [PATCH v2 1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf() Pavel Begunkov
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-12-05 14:37 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence

3/4 changes the return of IOPOLL for CQE_SKIP while we can, and
other are just small clean ups.

v2: adjusted 3/4 commit message

Hao Xu (1):
  io_uring: move up io_put_kbuf() and io_put_rw_kbuf()

Pavel Begunkov (3):
  io_uring: simplify selected buf handling
  io_uring: tweak iopoll CQE_SKIP event counting
  io_uring: reuse io_req_task_complete for timeouts

 fs/io_uring.c | 91 +++++++++++++++++++++------------------------------
 1 file changed, 38 insertions(+), 53 deletions(-)

-- 
2.34.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2 1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf()
  2021-12-05 14:37 [PATCH v2 0/4] small 5.17 updates Pavel Begunkov
@ 2021-12-05 14:37 ` Pavel Begunkov
  2021-12-05 14:37 ` [PATCH v2 2/4] io_uring: simplify selected buf handling Pavel Begunkov
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-12-05 14:37 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence, Hao Xu

From: Hao Xu <[email protected]>

Move them up to avoid explicit declaration. We will use them in later
patches.

Reviewed-by: Pavel Begunkov <[email protected]>
Signed-off-by: Hao Xu <[email protected]>
Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8b6bfed16f65..ffbe1b76f3a0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1273,6 +1273,24 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req,
 	}
 }
 
+static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
+{
+	unsigned int cflags;
+
+	cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
+	cflags |= IORING_CQE_F_BUFFER;
+	req->flags &= ~REQ_F_BUFFER_SELECTED;
+	kfree(kbuf);
+	return cflags;
+}
+
+static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
+{
+	if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
+		return 0;
+	return io_put_kbuf(req, req->kbuf);
+}
+
 static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
 {
 	bool got = percpu_ref_tryget(ref);
@@ -2456,24 +2474,6 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
 }
 
-static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
-{
-	unsigned int cflags;
-
-	cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
-	cflags |= IORING_CQE_F_BUFFER;
-	req->flags &= ~REQ_F_BUFFER_SELECTED;
-	kfree(kbuf);
-	return cflags;
-}
-
-static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
-{
-	if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
-		return 0;
-	return io_put_kbuf(req, req->kbuf);
-}
-
 static inline bool io_run_task_work(void)
 {
 	if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
-- 
2.34.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 2/4] io_uring: simplify selected buf handling
  2021-12-05 14:37 [PATCH v2 0/4] small 5.17 updates Pavel Begunkov
  2021-12-05 14:37 ` [PATCH v2 1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf() Pavel Begunkov
@ 2021-12-05 14:37 ` Pavel Begunkov
  2021-12-05 14:37 ` [PATCH v2 3/4] io_uring: tweak iopoll CQE_SKIP event counting Pavel Begunkov
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-12-05 14:37 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence

As selected buffers are now stored in a separate field in a request, get
rid of rw/recv specific helpers and simplify the code.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 44 +++++++++++++++++---------------------------
 1 file changed, 17 insertions(+), 27 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index ffbe1b76f3a0..64add8260abb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1273,22 +1273,24 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req,
 	}
 }
 
-static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
+static unsigned int __io_put_kbuf(struct io_kiocb *req)
 {
+	struct io_buffer *kbuf = req->kbuf;
 	unsigned int cflags;
 
 	cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
 	cflags |= IORING_CQE_F_BUFFER;
 	req->flags &= ~REQ_F_BUFFER_SELECTED;
 	kfree(kbuf);
+	req->kbuf = NULL;
 	return cflags;
 }
 
-static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
+static inline unsigned int io_put_kbuf(struct io_kiocb *req)
 {
 	if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
 		return 0;
-	return io_put_kbuf(req, req->kbuf);
+	return __io_put_kbuf(req);
 }
 
 static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
@@ -2532,14 +2534,14 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
 	prev = start;
 	wq_list_for_each_resume(pos, prev) {
 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
-		u32 cflags;
 
 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
 		if (!smp_load_acquire(&req->iopoll_completed))
 			break;
-		cflags = io_put_rw_kbuf(req);
+
 		if (!(req->flags & REQ_F_CQE_SKIP))
-			__io_fill_cqe(ctx, req->user_data, req->result, cflags);
+			__io_fill_cqe(ctx, req->user_data, req->result,
+				      io_put_kbuf(req));
 		nr_events++;
 	}
 
@@ -2715,7 +2717,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
 
 static void io_req_task_complete(struct io_kiocb *req, bool *locked)
 {
-	unsigned int cflags = io_put_rw_kbuf(req);
+	unsigned int cflags = io_put_kbuf(req);
 	int res = req->result;
 
 	if (*locked) {
@@ -2731,7 +2733,7 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
 {
 	if (__io_complete_rw_common(req, res))
 		return;
-	__io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
+	__io_req_complete(req, issue_flags, req->result, io_put_kbuf(req));
 }
 
 static void io_complete_rw(struct kiocb *kiocb, long res)
@@ -4979,11 +4981,6 @@ static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
 	return io_buffer_select(req, &sr->len, sr->bgid, issue_flags);
 }
 
-static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
-{
-	return io_put_kbuf(req, req->kbuf);
-}
-
 static int io_recvmsg_prep_async(struct io_kiocb *req)
 {
 	int ret;
@@ -5021,8 +5018,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 	struct socket *sock;
 	struct io_buffer *kbuf;
 	unsigned flags;
-	int min_ret = 0;
-	int ret, cflags = 0;
+	int ret, min_ret = 0;
 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
 	sock = sock_from_file(req->file);
@@ -5066,13 +5062,11 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 		req_set_fail(req);
 	}
 
-	if (req->flags & REQ_F_BUFFER_SELECTED)
-		cflags = io_put_recv_kbuf(req);
 	/* fast path, check for non-NULL to avoid function call */
 	if (kmsg->free_iov)
 		kfree(kmsg->free_iov);
 	req->flags &= ~REQ_F_NEED_CLEANUP;
-	__io_req_complete(req, issue_flags, ret, cflags);
+	__io_req_complete(req, issue_flags, ret, io_put_kbuf(req));
 	return 0;
 }
 
@@ -5085,8 +5079,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 	struct socket *sock;
 	struct iovec iov;
 	unsigned flags;
-	int min_ret = 0;
-	int ret, cflags = 0;
+	int ret, min_ret = 0;
 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
 	sock = sock_from_file(req->file);
@@ -5128,9 +5121,8 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 	} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
 		req_set_fail(req);
 	}
-	if (req->flags & REQ_F_BUFFER_SELECTED)
-		cflags = io_put_recv_kbuf(req);
-	__io_req_complete(req, issue_flags, ret, cflags);
+
+	__io_req_complete(req, issue_flags, ret, io_put_kbuf(req));
 	return 0;
 }
 
@@ -6578,10 +6570,8 @@ static __cold void io_drain_req(struct io_kiocb *req)
 
 static void io_clean_op(struct io_kiocb *req)
 {
-	if (req->flags & REQ_F_BUFFER_SELECTED) {
-		kfree(req->kbuf);
-		req->kbuf = NULL;
-	}
+	if (req->flags & REQ_F_BUFFER_SELECTED)
+		io_put_kbuf(req);
 
 	if (req->flags & REQ_F_NEED_CLEANUP) {
 		switch (req->opcode) {
-- 
2.34.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 3/4] io_uring: tweak iopoll CQE_SKIP event counting
  2021-12-05 14:37 [PATCH v2 0/4] small 5.17 updates Pavel Begunkov
  2021-12-05 14:37 ` [PATCH v2 1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf() Pavel Begunkov
  2021-12-05 14:37 ` [PATCH v2 2/4] io_uring: simplify selected buf handling Pavel Begunkov
@ 2021-12-05 14:37 ` Pavel Begunkov
  2021-12-05 14:38 ` [PATCH v2 4/4] io_uring: reuse io_req_task_complete for timeouts Pavel Begunkov
  2021-12-05 15:56 ` [PATCH v2 0/4] small 5.17 updates Jens Axboe
  4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-12-05 14:37 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence

When iopolling the userspace specifies the minimum number of "events" it
expects. Previously, we had one CQE per request, so the definition of
an "event" was unequivocal, but that's not more the case anymore with
REQ_F_CQE_SKIP.

Currently it counts the number of completed requests, replace it with
the number of posted CQEs. This allows users of the "one CQE per link"
scheme to wait for all N links in a single syscall, which is not
possible without the patch and requires extra context switches.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 64add8260abb..ea7a0daa0b3b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2538,10 +2538,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
 		if (!smp_load_acquire(&req->iopoll_completed))
 			break;
+		if (unlikely(req->flags & REQ_F_CQE_SKIP))
+			continue;
 
-		if (!(req->flags & REQ_F_CQE_SKIP))
-			__io_fill_cqe(ctx, req->user_data, req->result,
-				      io_put_kbuf(req));
+		__io_fill_cqe(ctx, req->user_data, req->result, io_put_kbuf(req));
 		nr_events++;
 	}
 
-- 
2.34.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 4/4] io_uring: reuse io_req_task_complete for timeouts
  2021-12-05 14:37 [PATCH v2 0/4] small 5.17 updates Pavel Begunkov
                   ` (2 preceding siblings ...)
  2021-12-05 14:37 ` [PATCH v2 3/4] io_uring: tweak iopoll CQE_SKIP event counting Pavel Begunkov
@ 2021-12-05 14:38 ` Pavel Begunkov
  2021-12-05 15:56 ` [PATCH v2 0/4] small 5.17 updates Jens Axboe
  4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-12-05 14:38 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence

With kbuf unification io_req_task_complete() is now a generic function,
use it for timeout's tw completions.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index ea7a0daa0b3b..1265dc1942eb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5953,15 +5953,6 @@ static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
 	return 0;
 }
 
-static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
-{
-	struct io_timeout_data *data = req->async_data;
-
-	if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
-		req_set_fail(req);
-	io_req_complete_post(req, -ETIME, 0);
-}
-
 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 {
 	struct io_timeout_data *data = container_of(timer,
@@ -5976,7 +5967,11 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 		atomic_read(&req->ctx->cq_timeouts) + 1);
 	spin_unlock_irqrestore(&ctx->timeout_lock, flags);
 
-	req->io_task_work.func = io_req_task_timeout;
+	if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
+		req_set_fail(req);
+
+	req->result = -ETIME;
+	req->io_task_work.func = io_req_task_complete;
 	io_req_task_work_add(req);
 	return HRTIMER_NORESTART;
 }
-- 
2.34.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 0/4] small 5.17 updates
  2021-12-05 14:37 [PATCH v2 0/4] small 5.17 updates Pavel Begunkov
                   ` (3 preceding siblings ...)
  2021-12-05 14:38 ` [PATCH v2 4/4] io_uring: reuse io_req_task_complete for timeouts Pavel Begunkov
@ 2021-12-05 15:56 ` Jens Axboe
  4 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2021-12-05 15:56 UTC (permalink / raw)
  To: io-uring, Pavel Begunkov

On Sun, 5 Dec 2021 14:37:56 +0000, Pavel Begunkov wrote:
> 3/4 changes the return of IOPOLL for CQE_SKIP while we can, and
> other are just small clean ups.
> 
> v2: adjusted 3/4 commit message
> 
> Hao Xu (1):
>   io_uring: move up io_put_kbuf() and io_put_rw_kbuf()
> 
> [...]

Applied, thanks!

[1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf()
      commit: 3648e5265cfa51492a65ee5a01f151807ec46dee
[2/4] io_uring: simplify selected buf handling
      commit: d1fd1c201d750711e17377acb4914d3ea29a608c
[3/4] io_uring: tweak iopoll CQE_SKIP event counting
      commit: 83a13a4181b0e874d1f196e11b953c3c9f009f68
[4/4] io_uring: reuse io_req_task_complete for timeouts
      commit: a90c8bf6590676035336ae98cc51bce1aeb96c33

Best regards,
-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-12-05 15:56 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-12-05 14:37 [PATCH v2 0/4] small 5.17 updates Pavel Begunkov
2021-12-05 14:37 ` [PATCH v2 1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf() Pavel Begunkov
2021-12-05 14:37 ` [PATCH v2 2/4] io_uring: simplify selected buf handling Pavel Begunkov
2021-12-05 14:37 ` [PATCH v2 3/4] io_uring: tweak iopoll CQE_SKIP event counting Pavel Begunkov
2021-12-05 14:38 ` [PATCH v2 4/4] io_uring: reuse io_req_task_complete for timeouts Pavel Begunkov
2021-12-05 15:56 ` [PATCH v2 0/4] small 5.17 updates Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox