* [PATCH v2 2/4] io_uring: simplify selected buf handling
2021-12-05 14:37 [PATCH v2 0/4] small 5.17 updates Pavel Begunkov
2021-12-05 14:37 ` [PATCH v2 1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf() Pavel Begunkov
@ 2021-12-05 14:37 ` Pavel Begunkov
2021-12-05 14:37 ` [PATCH v2 3/4] io_uring: tweak iopoll CQE_SKIP event counting Pavel Begunkov
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-12-05 14:37 UTC (permalink / raw)
To: io-uring; +Cc: asml.silence
As selected buffers are now stored in a separate field in a request, get
rid of rw/recv specific helpers and simplify the code.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 44 +++++++++++++++++---------------------------
1 file changed, 17 insertions(+), 27 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index ffbe1b76f3a0..64add8260abb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1273,22 +1273,24 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req,
}
}
-static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
+static unsigned int __io_put_kbuf(struct io_kiocb *req)
{
+ struct io_buffer *kbuf = req->kbuf;
unsigned int cflags;
cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
cflags |= IORING_CQE_F_BUFFER;
req->flags &= ~REQ_F_BUFFER_SELECTED;
kfree(kbuf);
+ req->kbuf = NULL;
return cflags;
}
-static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
+static inline unsigned int io_put_kbuf(struct io_kiocb *req)
{
if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
return 0;
- return io_put_kbuf(req, req->kbuf);
+ return __io_put_kbuf(req);
}
static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
@@ -2532,14 +2534,14 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
prev = start;
wq_list_for_each_resume(pos, prev) {
struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
- u32 cflags;
/* order with io_complete_rw_iopoll(), e.g. ->result updates */
if (!smp_load_acquire(&req->iopoll_completed))
break;
- cflags = io_put_rw_kbuf(req);
+
if (!(req->flags & REQ_F_CQE_SKIP))
- __io_fill_cqe(ctx, req->user_data, req->result, cflags);
+ __io_fill_cqe(ctx, req->user_data, req->result,
+ io_put_kbuf(req));
nr_events++;
}
@@ -2715,7 +2717,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
static void io_req_task_complete(struct io_kiocb *req, bool *locked)
{
- unsigned int cflags = io_put_rw_kbuf(req);
+ unsigned int cflags = io_put_kbuf(req);
int res = req->result;
if (*locked) {
@@ -2731,7 +2733,7 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
{
if (__io_complete_rw_common(req, res))
return;
- __io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
+ __io_req_complete(req, issue_flags, req->result, io_put_kbuf(req));
}
static void io_complete_rw(struct kiocb *kiocb, long res)
@@ -4979,11 +4981,6 @@ static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
return io_buffer_select(req, &sr->len, sr->bgid, issue_flags);
}
-static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
-{
- return io_put_kbuf(req, req->kbuf);
-}
-
static int io_recvmsg_prep_async(struct io_kiocb *req)
{
int ret;
@@ -5021,8 +5018,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
struct socket *sock;
struct io_buffer *kbuf;
unsigned flags;
- int min_ret = 0;
- int ret, cflags = 0;
+ int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
sock = sock_from_file(req->file);
@@ -5066,13 +5062,11 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
req_set_fail(req);
}
- if (req->flags & REQ_F_BUFFER_SELECTED)
- cflags = io_put_recv_kbuf(req);
/* fast path, check for non-NULL to avoid function call */
if (kmsg->free_iov)
kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP;
- __io_req_complete(req, issue_flags, ret, cflags);
+ __io_req_complete(req, issue_flags, ret, io_put_kbuf(req));
return 0;
}
@@ -5085,8 +5079,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
struct socket *sock;
struct iovec iov;
unsigned flags;
- int min_ret = 0;
- int ret, cflags = 0;
+ int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
sock = sock_from_file(req->file);
@@ -5128,9 +5121,8 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
req_set_fail(req);
}
- if (req->flags & REQ_F_BUFFER_SELECTED)
- cflags = io_put_recv_kbuf(req);
- __io_req_complete(req, issue_flags, ret, cflags);
+
+ __io_req_complete(req, issue_flags, ret, io_put_kbuf(req));
return 0;
}
@@ -6578,10 +6570,8 @@ static __cold void io_drain_req(struct io_kiocb *req)
static void io_clean_op(struct io_kiocb *req)
{
- if (req->flags & REQ_F_BUFFER_SELECTED) {
- kfree(req->kbuf);
- req->kbuf = NULL;
- }
+ if (req->flags & REQ_F_BUFFER_SELECTED)
+ io_put_kbuf(req);
if (req->flags & REQ_F_NEED_CLEANUP) {
switch (req->opcode) {
--
2.34.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v2 3/4] io_uring: tweak iopoll CQE_SKIP event counting
2021-12-05 14:37 [PATCH v2 0/4] small 5.17 updates Pavel Begunkov
2021-12-05 14:37 ` [PATCH v2 1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf() Pavel Begunkov
2021-12-05 14:37 ` [PATCH v2 2/4] io_uring: simplify selected buf handling Pavel Begunkov
@ 2021-12-05 14:37 ` Pavel Begunkov
2021-12-05 14:38 ` [PATCH v2 4/4] io_uring: reuse io_req_task_complete for timeouts Pavel Begunkov
2021-12-05 15:56 ` [PATCH v2 0/4] small 5.17 updates Jens Axboe
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-12-05 14:37 UTC (permalink / raw)
To: io-uring; +Cc: asml.silence
When iopolling the userspace specifies the minimum number of "events" it
expects. Previously, we had one CQE per request, so the definition of
an "event" was unequivocal, but that's not more the case anymore with
REQ_F_CQE_SKIP.
Currently it counts the number of completed requests, replace it with
the number of posted CQEs. This allows users of the "one CQE per link"
scheme to wait for all N links in a single syscall, which is not
possible without the patch and requires extra context switches.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 64add8260abb..ea7a0daa0b3b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2538,10 +2538,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
/* order with io_complete_rw_iopoll(), e.g. ->result updates */
if (!smp_load_acquire(&req->iopoll_completed))
break;
+ if (unlikely(req->flags & REQ_F_CQE_SKIP))
+ continue;
- if (!(req->flags & REQ_F_CQE_SKIP))
- __io_fill_cqe(ctx, req->user_data, req->result,
- io_put_kbuf(req));
+ __io_fill_cqe(ctx, req->user_data, req->result, io_put_kbuf(req));
nr_events++;
}
--
2.34.0
^ permalink raw reply related [flat|nested] 6+ messages in thread