public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCH 0/2] random for-next cleanups
@ 2023-08-09 12:25 Pavel Begunkov
  2023-08-09 12:25 ` [PATCH 1/2] io_uring/rsrc: keep one global dummy_ubuf Pavel Begunkov
  2023-08-09 12:25 ` [PATCH 2/2] io_uring: simplify io_run_task_work_sig return Pavel Begunkov
  0 siblings, 2 replies; 4+ messages in thread
From: Pavel Begunkov @ 2023-08-09 12:25 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe, asml.silence

Patch 1 removes stupid dummy_ubuf allocations.
Patch 2 cleans a bit up io_run_task_work_sig()s return codes.

Pavel Begunkov (2):
  io_uring/rsrc: keep one global dummy_ubuf
  io_uring: simplify io_run_task_work_sig return

 io_uring/io_uring.c | 13 ++-----------
 io_uring/rsrc.c     | 14 ++++++++++----
 2 files changed, 12 insertions(+), 15 deletions(-)

-- 
2.41.0


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] io_uring/rsrc: keep one global dummy_ubuf
  2023-08-09 12:25 [PATCH 0/2] random for-next cleanups Pavel Begunkov
@ 2023-08-09 12:25 ` Pavel Begunkov
  2023-08-09 15:05   ` Pavel Begunkov
  2023-08-09 12:25 ` [PATCH 2/2] io_uring: simplify io_run_task_work_sig return Pavel Begunkov
  1 sibling, 1 reply; 4+ messages in thread
From: Pavel Begunkov @ 2023-08-09 12:25 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe, asml.silence

We set empty registered buffers to dummy_ubuf as an optimisation.
Currently, we allocate the dummy entry for each ring, whenever we can
simply have one global instance.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 io_uring/io_uring.c |  9 ---------
 io_uring/rsrc.c     | 14 ++++++++++----
 2 files changed, 10 insertions(+), 13 deletions(-)

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index fb70ae436db6..3c97401240c2 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -307,13 +307,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 		goto err;
 	if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
 		goto err;
-
-	ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
-	if (!ctx->dummy_ubuf)
-		goto err;
-	/* set invalid range, so io_import_fixed() fails meeting it */
-	ctx->dummy_ubuf->ubuf = -1UL;
-
 	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
 			    0, GFP_KERNEL))
 		goto err;
@@ -352,7 +345,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
 	return ctx;
 err:
-	kfree(ctx->dummy_ubuf);
 	kfree(ctx->cancel_table.hbs);
 	kfree(ctx->cancel_table_locked.hbs);
 	kfree(ctx->io_bl);
@@ -2905,7 +2897,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
 		io_wq_put_hash(ctx->hash_map);
 	kfree(ctx->cancel_table.hbs);
 	kfree(ctx->cancel_table_locked.hbs);
-	kfree(ctx->dummy_ubuf);
 	kfree(ctx->io_bl);
 	xa_destroy(&ctx->io_bl_xa);
 	kfree(ctx);
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 5e8fdd9b8ca6..92e2471283ba 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -33,6 +33,12 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
 #define IORING_MAX_FIXED_FILES	(1U << 20)
 #define IORING_MAX_REG_BUFFERS	(1U << 14)
 
+static const struct io_mapped_ubuf dummy_ubuf = {
+	/* set invalid range, so io_import_fixed() fails meeting it */
+	.ubuf = -1UL,
+	.ubuf_end = 0,
+};
+
 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
 {
 	unsigned long page_limit, cur_pages, new_pages;
@@ -132,7 +138,7 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
 	struct io_mapped_ubuf *imu = *slot;
 	unsigned int i;
 
-	if (imu != ctx->dummy_ubuf) {
+	if (imu != &dummy_ubuf) {
 		for (i = 0; i < imu->nr_bvecs; i++)
 			unpin_user_page(imu->bvec[i].bv_page);
 		if (imu->acct_pages)
@@ -459,14 +465,14 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
 			break;
 
 		i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
-		if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
+		if (ctx->user_bufs[i] != &dummy_ubuf) {
 			err = io_queue_rsrc_removal(ctx->buf_data, i,
 						    ctx->user_bufs[i]);
 			if (unlikely(err)) {
 				io_buffer_unmap(ctx, &imu);
 				break;
 			}
-			ctx->user_bufs[i] = ctx->dummy_ubuf;
+			ctx->user_bufs[i] = &dummy_ubuf;
 		}
 
 		ctx->user_bufs[i] = imu;
@@ -1077,7 +1083,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
 	int ret, nr_pages, i;
 	struct folio *folio = NULL;
 
-	*pimu = ctx->dummy_ubuf;
+	*pimu = &dummy_ubuf;
 	if (!iov->iov_base)
 		return 0;
 
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] io_uring: simplify io_run_task_work_sig return
  2023-08-09 12:25 [PATCH 0/2] random for-next cleanups Pavel Begunkov
  2023-08-09 12:25 ` [PATCH 1/2] io_uring/rsrc: keep one global dummy_ubuf Pavel Begunkov
@ 2023-08-09 12:25 ` Pavel Begunkov
  1 sibling, 0 replies; 4+ messages in thread
From: Pavel Begunkov @ 2023-08-09 12:25 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe, asml.silence

Nobody cares about io_run_task_work_sig returning 1, we only check for
negative errors. Simplify by keeping to 0/-error returns.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 io_uring/io_uring.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 3c97401240c2..aa531debeb81 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2485,10 +2485,10 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
 	if (!llist_empty(&ctx->work_llist)) {
 		__set_current_state(TASK_RUNNING);
 		if (io_run_local_work(ctx) > 0)
-			return 1;
+			return 0;
 	}
 	if (io_run_task_work() > 0)
-		return 1;
+		return 0;
 	if (task_sigpending(current))
 		return -EINTR;
 	return 0;
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] io_uring/rsrc: keep one global dummy_ubuf
  2023-08-09 12:25 ` [PATCH 1/2] io_uring/rsrc: keep one global dummy_ubuf Pavel Begunkov
@ 2023-08-09 15:05   ` Pavel Begunkov
  0 siblings, 0 replies; 4+ messages in thread
From: Pavel Begunkov @ 2023-08-09 15:05 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe

On 8/9/23 13:25, Pavel Begunkov wrote:
> We set empty registered buffers to dummy_ubuf as an optimisation.
> Currently, we allocate the dummy entry for each ring, whenever we can
> simply have one global instance.

And only now it started complaining about const-ness, I'll
resend it.


> Signed-off-by: Pavel Begunkov <[email protected]>
> ---
>   io_uring/io_uring.c |  9 ---------
>   io_uring/rsrc.c     | 14 ++++++++++----
>   2 files changed, 10 insertions(+), 13 deletions(-)
> 
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index fb70ae436db6..3c97401240c2 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -307,13 +307,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
>   		goto err;
>   	if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
>   		goto err;
> -
> -	ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
> -	if (!ctx->dummy_ubuf)
> -		goto err;
> -	/* set invalid range, so io_import_fixed() fails meeting it */
> -	ctx->dummy_ubuf->ubuf = -1UL;
> -
>   	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
>   			    0, GFP_KERNEL))
>   		goto err;
> @@ -352,7 +345,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
>   	INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
>   	return ctx;
>   err:
> -	kfree(ctx->dummy_ubuf);
>   	kfree(ctx->cancel_table.hbs);
>   	kfree(ctx->cancel_table_locked.hbs);
>   	kfree(ctx->io_bl);
> @@ -2905,7 +2897,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
>   		io_wq_put_hash(ctx->hash_map);
>   	kfree(ctx->cancel_table.hbs);
>   	kfree(ctx->cancel_table_locked.hbs);
> -	kfree(ctx->dummy_ubuf);
>   	kfree(ctx->io_bl);
>   	xa_destroy(&ctx->io_bl_xa);
>   	kfree(ctx);
> diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
> index 5e8fdd9b8ca6..92e2471283ba 100644
> --- a/io_uring/rsrc.c
> +++ b/io_uring/rsrc.c
> @@ -33,6 +33,12 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
>   #define IORING_MAX_FIXED_FILES	(1U << 20)
>   #define IORING_MAX_REG_BUFFERS	(1U << 14)
>   
> +static const struct io_mapped_ubuf dummy_ubuf = {
> +	/* set invalid range, so io_import_fixed() fails meeting it */
> +	.ubuf = -1UL,
> +	.ubuf_end = 0,
> +};
> +
>   int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
>   {
>   	unsigned long page_limit, cur_pages, new_pages;
> @@ -132,7 +138,7 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
>   	struct io_mapped_ubuf *imu = *slot;
>   	unsigned int i;
>   
> -	if (imu != ctx->dummy_ubuf) {
> +	if (imu != &dummy_ubuf) {
>   		for (i = 0; i < imu->nr_bvecs; i++)
>   			unpin_user_page(imu->bvec[i].bv_page);
>   		if (imu->acct_pages)
> @@ -459,14 +465,14 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
>   			break;
>   
>   		i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
> -		if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
> +		if (ctx->user_bufs[i] != &dummy_ubuf) {
>   			err = io_queue_rsrc_removal(ctx->buf_data, i,
>   						    ctx->user_bufs[i]);
>   			if (unlikely(err)) {
>   				io_buffer_unmap(ctx, &imu);
>   				break;
>   			}
> -			ctx->user_bufs[i] = ctx->dummy_ubuf;
> +			ctx->user_bufs[i] = &dummy_ubuf;
>   		}
>   
>   		ctx->user_bufs[i] = imu;
> @@ -1077,7 +1083,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
>   	int ret, nr_pages, i;
>   	struct folio *folio = NULL;
>   
> -	*pimu = ctx->dummy_ubuf;
> +	*pimu = &dummy_ubuf;
>   	if (!iov->iov_base)
>   		return 0;
>   

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2023-08-09 15:06 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-08-09 12:25 [PATCH 0/2] random for-next cleanups Pavel Begunkov
2023-08-09 12:25 ` [PATCH 1/2] io_uring/rsrc: keep one global dummy_ubuf Pavel Begunkov
2023-08-09 15:05   ` Pavel Begunkov
2023-08-09 12:25 ` [PATCH 2/2] io_uring: simplify io_run_task_work_sig return Pavel Begunkov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox