* [PATCH] io_uring: kbuf: inline io_kbuf_recycle_ring()
@ 2022-06-23 7:17 Hao Xu
2022-06-23 12:22 ` Jens Axboe
0 siblings, 1 reply; 2+ messages in thread
From: Hao Xu @ 2022-06-23 7:17 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, Pavel Begunkov
From: Hao Xu <[email protected]>
Make io_kbuf_recycle_ring() inline since it is the fast path of
provided buffer.
Signed-off-by: Hao Xu <[email protected]>
---
io_uring/kbuf.c | 66 ------------------------------------------------
io_uring/kbuf.h | 67 ++++++++++++++++++++++++++++++++++++++++++++++---
2 files changed, 64 insertions(+), 69 deletions(-)
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 4b7f2aa99e38..306db7929a50 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -17,8 +17,6 @@
#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
-#define BGID_ARRAY 64
-
struct io_provide_buf {
struct file *file;
__u64 addr;
@@ -28,15 +26,6 @@ struct io_provide_buf {
__u16 bid;
};
-static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
- unsigned int bgid)
-{
- if (ctx->io_bl && bgid < BGID_ARRAY)
- return &ctx->io_bl[bgid];
-
- return xa_load(&ctx->io_bl_xa, bgid);
-}
-
static int io_buffer_add_list(struct io_ring_ctx *ctx,
struct io_buffer_list *bl, unsigned int bgid)
{
@@ -47,61 +36,6 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
}
-void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct io_buffer_list *bl;
- struct io_buffer *buf;
-
- /*
- * For legacy provided buffer mode, don't recycle if we already did
- * IO to this buffer. For ring-mapped provided buffer mode, we should
- * increment ring->head to explicitly monopolize the buffer to avoid
- * multiple use.
- */
- if (req->flags & REQ_F_PARTIAL_IO)
- return;
-
- io_ring_submit_lock(ctx, issue_flags);
-
- buf = req->kbuf;
- bl = io_buffer_get_list(ctx, buf->bgid);
- list_add(&buf->list, &bl->buf_list);
- req->flags &= ~REQ_F_BUFFER_SELECTED;
- req->buf_index = buf->bgid;
-
- io_ring_submit_unlock(ctx, issue_flags);
- return;
-}
-
-void io_kbuf_recycle_ring(struct io_kiocb *req)
-{
- /*
- * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
- * the flag and hence ensure that bl->head doesn't get incremented.
- * If the tail has already been incremented, hang on to it.
- * The exception is partial io, that case we should increment bl->head
- * to monopolize the buffer.
- */
- if (req->buf_list) {
- if (req->flags & REQ_F_PARTIAL_IO) {
- /*
- * If we end up here, then the io_uring_lock has
- * been kept held since we retrieved the buffer.
- * For the io-wq case, we already cleared
- * req->buf_list when the buffer was retrieved,
- * hence it cannot be set here for that case.
- */
- req->buf_list->head++;
- req->buf_list = NULL;
- } else {
- req->buf_index = req->buf_list->bgid;
- req->flags &= ~REQ_F_BUFFER_RING;
- }
- }
- return;
-}
-
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
{
unsigned int cflags;
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index b5a89ffadf31..5b0b129e3e9c 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -4,6 +4,8 @@
#include <uapi/linux/io_uring.h>
+#define BGID_ARRAY 64
+
struct io_buffer_list {
/*
* If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
@@ -48,9 +50,6 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
-void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
-void io_kbuf_recycle_ring(struct io_kiocb *req);
-
static inline bool io_do_buffer_select(struct io_kiocb *req)
{
if (!(req->flags & REQ_F_BUFFER_SELECT))
@@ -58,6 +57,68 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
}
+static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
+ unsigned int bgid)
+{
+ if (ctx->io_bl && bgid < BGID_ARRAY)
+ return &ctx->io_bl[bgid];
+
+ return xa_load(&ctx->io_bl_xa, bgid);
+}
+
+static void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_buffer_list *bl;
+ struct io_buffer *buf;
+
+ /*
+ * For legacy provided buffer mode, don't recycle if we already did
+ * IO to this buffer. For ring-mapped provided buffer mode, we should
+ * increment ring->head to explicitly monopolize the buffer to avoid
+ * multiple use.
+ */
+ if (req->flags & REQ_F_PARTIAL_IO)
+ return;
+
+ io_ring_submit_lock(ctx, issue_flags);
+
+ buf = req->kbuf;
+ bl = io_buffer_get_list(ctx, buf->bgid);
+ list_add(&buf->list, &bl->buf_list);
+ req->flags &= ~REQ_F_BUFFER_SELECTED;
+ req->buf_index = buf->bgid;
+
+ io_ring_submit_unlock(ctx, issue_flags);
+}
+
+static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
+{
+ /*
+ * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
+ * the flag and hence ensure that bl->head doesn't get incremented.
+ * If the tail has already been incremented, hang on to it.
+ * The exception is partial io, that case we should increment bl->head
+ * to monopolize the buffer.
+ */
+ if (req->buf_list) {
+ if (req->flags & REQ_F_PARTIAL_IO) {
+ /*
+ * If we end up here, then the io_uring_lock has
+ * been kept held since we retrieved the buffer.
+ * For the io-wq case, we already cleared
+ * req->buf_list when the buffer was retrieved,
+ * hence it cannot be set here for that case.
+ */
+ req->buf_list->head++;
+ req->buf_list = NULL;
+ } else {
+ req->buf_index = req->buf_list->bgid;
+ req->flags &= ~REQ_F_BUFFER_RING;
+ }
+ }
+}
+
static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
{
if (req->flags & REQ_F_BUFFER_SELECTED)
base-commit: 5ec69c3a15ae6e904d76545d9a9c686eb758def0
--
2.25.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] io_uring: kbuf: inline io_kbuf_recycle_ring()
2022-06-23 7:17 [PATCH] io_uring: kbuf: inline io_kbuf_recycle_ring() Hao Xu
@ 2022-06-23 12:22 ` Jens Axboe
0 siblings, 0 replies; 2+ messages in thread
From: Jens Axboe @ 2022-06-23 12:22 UTC (permalink / raw)
To: Hao Xu, io-uring; +Cc: Pavel Begunkov
On 6/23/22 1:17 AM, Hao Xu wrote:
> From: Hao Xu <[email protected]>
>
> Make io_kbuf_recycle_ring() inline since it is the fast path of
> provided buffer.
The legacy recycling path doesn't need to get inlined, it's
too fat for that. Let's just move io_kbuf_recycle_ring() and leave
io_kbuf_recycle_legacy() out-of-line in kbuf.c as a function call.
--
Jens Axboe
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2022-06-23 12:22 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-06-23 7:17 [PATCH] io_uring: kbuf: inline io_kbuf_recycle_ring() Hao Xu
2022-06-23 12:22 ` Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox