public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCH] io_uring: kbuf: kill __io_kbuf_recycle()
@ 2022-06-22  5:55 Hao Xu
  2022-06-22 17:48 ` Jens Axboe
  2022-06-22 22:27 ` Jens Axboe
  0 siblings, 2 replies; 4+ messages in thread
From: Hao Xu @ 2022-06-22  5:55 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe, Pavel Begunkov

From: Hao Xu <[email protected]>

__io_kbuf_recycle() is only called in io_kbuf_recycle(). Kill it and
tweak the code so that the legacy pbuf and ring pbuf code become clear

Signed-off-by: Hao Xu <[email protected]>
---
 io_uring/kbuf.c | 71 +++++++++++++++++++++++++++++--------------------
 io_uring/kbuf.h | 21 ++++++---------
 2 files changed, 50 insertions(+), 42 deletions(-)

diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index e4ee11cd337c..4b7f2aa99e38 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -37,36 +37,30 @@ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
 	return xa_load(&ctx->io_bl_xa, bgid);
 }
 
-void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
+static int io_buffer_add_list(struct io_ring_ctx *ctx,
+			      struct io_buffer_list *bl, unsigned int bgid)
+{
+	bl->bgid = bgid;
+	if (bgid < BGID_ARRAY)
+		return 0;
+
+	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
+}
+
+void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
 {
 	struct io_ring_ctx *ctx = req->ctx;
 	struct io_buffer_list *bl;
 	struct io_buffer *buf;
 
 	/*
-	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
-	 * the flag and hence ensure that bl->head doesn't get incremented.
-	 * If the tail has already been incremented, hang on to it.
+	 * For legacy provided buffer mode, don't recycle if we already did
+	 * IO to this buffer. For ring-mapped provided buffer mode, we should
+	 * increment ring->head to explicitly monopolize the buffer to avoid
+	 * multiple use.
 	 */
-	if (req->flags & REQ_F_BUFFER_RING) {
-		if (req->buf_list) {
-			if (req->flags & REQ_F_PARTIAL_IO) {
-				/*
-				 * If we end up here, then the io_uring_lock has
-				 * been kept held since we retrieved the buffer.
-				 * For the io-wq case, we already cleared
-				 * req->buf_list when the buffer was retrieved,
-				 * hence it cannot be set here for that case.
-				 */
-				req->buf_list->head++;
-				req->buf_list = NULL;
-			} else {
-				req->buf_index = req->buf_list->bgid;
-				req->flags &= ~REQ_F_BUFFER_RING;
-			}
-		}
+	if (req->flags & REQ_F_PARTIAL_IO)
 		return;
-	}
 
 	io_ring_submit_lock(ctx, issue_flags);
 
@@ -77,16 +71,35 @@ void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 	req->buf_index = buf->bgid;
 
 	io_ring_submit_unlock(ctx, issue_flags);
+	return;
 }
 
-static int io_buffer_add_list(struct io_ring_ctx *ctx,
-			      struct io_buffer_list *bl, unsigned int bgid)
+void io_kbuf_recycle_ring(struct io_kiocb *req)
 {
-	bl->bgid = bgid;
-	if (bgid < BGID_ARRAY)
-		return 0;
-
-	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
+	/*
+	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
+	 * the flag and hence ensure that bl->head doesn't get incremented.
+	 * If the tail has already been incremented, hang on to it.
+	 * The exception is partial io, that case we should increment bl->head
+	 * to monopolize the buffer.
+	 */
+	if (req->buf_list) {
+		if (req->flags & REQ_F_PARTIAL_IO) {
+			/*
+			 * If we end up here, then the io_uring_lock has
+			 * been kept held since we retrieved the buffer.
+			 * For the io-wq case, we already cleared
+			 * req->buf_list when the buffer was retrieved,
+			 * hence it cannot be set here for that case.
+			 */
+			req->buf_list->head++;
+			req->buf_list = NULL;
+		} else {
+			req->buf_index = req->buf_list->bgid;
+			req->flags &= ~REQ_F_BUFFER_RING;
+		}
+	}
+	return;
 }
 
 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index 5da3d4039aed..b5a89ffadf31 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -35,7 +35,6 @@ struct io_buffer {
 
 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
 			      unsigned int issue_flags);
-void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags);
 void io_destroy_buffers(struct io_ring_ctx *ctx);
 
 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
@@ -49,6 +48,9 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 
 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
 
+void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
+void io_kbuf_recycle_ring(struct io_kiocb *req);
+
 static inline bool io_do_buffer_select(struct io_kiocb *req)
 {
 	if (!(req->flags & REQ_F_BUFFER_SELECT))
@@ -58,18 +60,11 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
 
 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 {
-	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
-		return;
-	/*
-	 * For legacy provided buffer mode, don't recycle if we already did
-	 * IO to this buffer. For ring-mapped provided buffer mode, we should
-	 * increment ring->head to explicitly monopolize the buffer to avoid
-	 * multiple use.
-	 */
-	if ((req->flags & REQ_F_BUFFER_SELECTED) &&
-	    (req->flags & REQ_F_PARTIAL_IO))
-		return;
-	__io_kbuf_recycle(req, issue_flags);
+	if (req->flags & REQ_F_BUFFER_SELECTED)
+		io_kbuf_recycle_legacy(req, issue_flags);
+
+	if (req->flags & REQ_F_BUFFER_RING)
+		io_kbuf_recycle_ring(req);
 }
 
 static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] io_uring: kbuf: kill __io_kbuf_recycle()
  2022-06-22  5:55 [PATCH] io_uring: kbuf: kill __io_kbuf_recycle() Hao Xu
@ 2022-06-22 17:48 ` Jens Axboe
  2022-06-23  7:18   ` Hao Xu
  2022-06-22 22:27 ` Jens Axboe
  1 sibling, 1 reply; 4+ messages in thread
From: Jens Axboe @ 2022-06-22 17:48 UTC (permalink / raw)
  To: Hao Xu, io-uring; +Cc: Pavel Begunkov

On 6/21/22 11:55 PM, Hao Xu wrote:
> From: Hao Xu <[email protected]>
> 
> __io_kbuf_recycle() is only called in io_kbuf_recycle(). Kill it and
> tweak the code so that the legacy pbuf and ring pbuf code become clear

I have applied this one as I think it makes sense separately, but I'd
really like to see the ring provided buffer recycling done inline as
that is fast path for provided buffers (and it's very few instructions).
Care to do a patch on top for that?

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] io_uring: kbuf: kill __io_kbuf_recycle()
  2022-06-22  5:55 [PATCH] io_uring: kbuf: kill __io_kbuf_recycle() Hao Xu
  2022-06-22 17:48 ` Jens Axboe
@ 2022-06-22 22:27 ` Jens Axboe
  1 sibling, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2022-06-22 22:27 UTC (permalink / raw)
  To: hao.xu, io-uring; +Cc: asml.silence

On Wed, 22 Jun 2022 13:55:51 +0800, Hao Xu wrote:
> From: Hao Xu <[email protected]>
> 
> __io_kbuf_recycle() is only called in io_kbuf_recycle(). Kill it and
> tweak the code so that the legacy pbuf and ring pbuf code become clear
> 
> 

Applied, thanks!

[1/1] io_uring: kbuf: kill __io_kbuf_recycle()
      commit: b4ef7c36b5ca6a0b96c8b493c495b17a0884fd11

Best regards,
-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] io_uring: kbuf: kill __io_kbuf_recycle()
  2022-06-22 17:48 ` Jens Axboe
@ 2022-06-23  7:18   ` Hao Xu
  0 siblings, 0 replies; 4+ messages in thread
From: Hao Xu @ 2022-06-23  7:18 UTC (permalink / raw)
  To: Jens Axboe, io-uring; +Cc: Pavel Begunkov

On 6/23/22 01:48, Jens Axboe wrote:
> On 6/21/22 11:55 PM, Hao Xu wrote:
>> From: Hao Xu <[email protected]>
>>
>> __io_kbuf_recycle() is only called in io_kbuf_recycle(). Kill it and
>> tweak the code so that the legacy pbuf and ring pbuf code become clear
> 
> I have applied this one as I think it makes sense separately, but I'd
> really like to see the ring provided buffer recycling done inline as
> that is fast path for provided buffers (and it's very few instructions).
> Care to do a patch on top for that?
> 

No problem.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-06-23  7:18 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-06-22  5:55 [PATCH] io_uring: kbuf: kill __io_kbuf_recycle() Hao Xu
2022-06-22 17:48 ` Jens Axboe
2022-06-23  7:18   ` Hao Xu
2022-06-22 22:27 ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox