From: Pavel Begunkov <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>, [email protected]
Subject: [PATCH for-next v3 04/16] io_uring: don't inline io_put_kbuf
Date: Thu, 16 Jun 2022 10:22:00 +0100 [thread overview]
Message-ID: <2e21ccf0be471ffa654032914b9430813cae53f8.1655371007.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>
io_put_kbuf() is huge, don't bloat the kernel with inlining.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/kbuf.c | 33 +++++++++++++++++++++++++++++++++
io_uring/kbuf.h | 38 ++++++--------------------------------
2 files changed, 39 insertions(+), 32 deletions(-)
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 5885343705bd..223d9db2ba94 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -82,6 +82,39 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
}
+unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
+{
+ unsigned int cflags;
+
+ /*
+ * We can add this buffer back to two lists:
+ *
+ * 1) The io_buffers_cache list. This one is protected by the
+ * ctx->uring_lock. If we already hold this lock, add back to this
+ * list as we can grab it from issue as well.
+ * 2) The io_buffers_comp list. This one is protected by the
+ * ctx->completion_lock.
+ *
+ * We migrate buffers from the comp_list to the issue cache list
+ * when we need one.
+ */
+ if (req->flags & REQ_F_BUFFER_RING) {
+ /* no buffers to recycle for this case */
+ cflags = __io_put_kbuf_list(req, NULL);
+ } else if (issue_flags & IO_URING_F_UNLOCKED) {
+ struct io_ring_ctx *ctx = req->ctx;
+
+ spin_lock(&ctx->completion_lock);
+ cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
+ spin_unlock(&ctx->completion_lock);
+ } else {
+ lockdep_assert_held(&req->ctx->uring_lock);
+
+ cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
+ }
+ return cflags;
+}
+
static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
struct io_buffer_list *bl)
{
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index 80b6df2c7535..5da3d4039aed 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -47,6 +47,8 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
+unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
+
static inline bool io_do_buffer_select(struct io_kiocb *req)
{
if (!(req->flags & REQ_F_BUFFER_SELECT))
@@ -70,7 +72,8 @@ static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
__io_kbuf_recycle(req, issue_flags);
}
-static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list)
+static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
+ struct list_head *list)
{
if (req->flags & REQ_F_BUFFER_RING) {
if (req->buf_list)
@@ -90,44 +93,15 @@ static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
return 0;
- return __io_put_kbuf(req, &req->ctx->io_buffers_comp);
+ return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
}
static inline unsigned int io_put_kbuf(struct io_kiocb *req,
unsigned issue_flags)
{
- unsigned int cflags;
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
return 0;
-
- /*
- * We can add this buffer back to two lists:
- *
- * 1) The io_buffers_cache list. This one is protected by the
- * ctx->uring_lock. If we already hold this lock, add back to this
- * list as we can grab it from issue as well.
- * 2) The io_buffers_comp list. This one is protected by the
- * ctx->completion_lock.
- *
- * We migrate buffers from the comp_list to the issue cache list
- * when we need one.
- */
- if (req->flags & REQ_F_BUFFER_RING) {
- /* no buffers to recycle for this case */
- cflags = __io_put_kbuf(req, NULL);
- } else if (issue_flags & IO_URING_F_UNLOCKED) {
- struct io_ring_ctx *ctx = req->ctx;
-
- spin_lock(&ctx->completion_lock);
- cflags = __io_put_kbuf(req, &ctx->io_buffers_comp);
- spin_unlock(&ctx->completion_lock);
- } else {
- lockdep_assert_held(&req->ctx->uring_lock);
-
- cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache);
- }
-
- return cflags;
+ return __io_put_kbuf(req, issue_flags);
}
#endif
--
2.36.1
next prev parent reply other threads:[~2022-06-16 9:22 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-06-16 9:21 [PATCH for-next v3 00/16] 5.20 cleanups and poll optimisations Pavel Begunkov
2022-06-16 9:21 ` [PATCH for-next v3 01/16] io_uring: rw: delegate sync completions to core io_uring Pavel Begunkov
2022-06-16 9:21 ` [PATCH for-next v3 02/16] io_uring: kill REQ_F_COMPLETE_INLINE Pavel Begunkov
2022-06-16 9:21 ` [PATCH for-next v3 03/16] io_uring: refactor io_req_task_complete() Pavel Begunkov
2022-06-16 9:22 ` Pavel Begunkov [this message]
2022-06-16 9:22 ` [PATCH for-next v3 05/16] io_uring: poll: remove unnecessary req->ref set Pavel Begunkov
2022-06-16 9:22 ` [PATCH for-next v3 06/16] io_uring: switch cancel_hash to use per entry spinlock Pavel Begunkov
2022-06-16 9:22 ` [PATCH for-next v3 07/16] io_uring: pass poll_find lock back Pavel Begunkov
2022-06-16 9:22 ` [PATCH for-next v3 08/16] io_uring: clean up io_try_cancel Pavel Begunkov
2022-06-16 9:22 ` [PATCH for-next v3 09/16] io_uring: limit the number of cancellation buckets Pavel Begunkov
2022-06-16 9:22 ` [PATCH for-next v3 10/16] io_uring: clean up io_ring_ctx_alloc Pavel Begunkov
2022-06-16 9:22 ` [PATCH for-next v3 11/16] io_uring: use state completion infra for poll reqs Pavel Begunkov
2022-06-16 9:22 ` [PATCH for-next v3 12/16] io_uring: add IORING_SETUP_SINGLE_ISSUER Pavel Begunkov
2022-06-16 9:22 ` [PATCH for-next v3 13/16] io_uring: pass hash table into poll_find Pavel Begunkov
2022-06-16 9:22 ` [PATCH for-next v3 14/16] io_uring: introduce a struct for hash table Pavel Begunkov
2022-06-16 9:22 ` [PATCH for-next v3 15/16] io_uring: propagate locking state to poll cancel Pavel Begunkov
2022-06-16 9:22 ` [PATCH for-next v3 16/16] io_uring: mutex locked poll hashing Pavel Begunkov
2022-06-17 15:35 ` Nathan Chancellor
2022-06-18 12:07 ` Pavel Begunkov
2022-06-16 13:18 ` [PATCH for-next v3 00/16] 5.20 cleanups and poll optimisations Jens Axboe
2022-06-16 15:58 ` Hao Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=2e21ccf0be471ffa654032914b9430813cae53f8.1655371007.git.asml.silence@gmail.com \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox