public inbox for [email protected]
 help / color / mirror / Atom feed
From: Pavel Begunkov <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>, [email protected]
Subject: [PATCH for-next v2 21/25] io_uring: add IORING_SETUP_SINGLE_ISSUER
Date: Tue, 14 Jun 2022 15:37:11 +0100	[thread overview]
Message-ID: <d6235e12830a225584b90cf3b29f09a0681acc95.1655213915.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>

Add a new IORING_SETUP_SINGLE_ISSUER flag and the userspace visible part
of it, i.e. put limitations of submitters. Also, don't allow it together
with IOPOLL as we're not going to put it to good use.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 include/uapi/linux/io_uring.h |  5 ++++-
 io_uring/io_uring.c           |  7 +++++--
 io_uring/io_uring_types.h     |  1 +
 io_uring/tctx.c               | 27 ++++++++++++++++++++++++---
 io_uring/tctx.h               |  4 ++--
 5 files changed, 36 insertions(+), 8 deletions(-)

diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index a41ddb8c5e1f..a3a691340d3e 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -138,9 +138,12 @@ enum {
  * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
  */
 #define IORING_SETUP_TASKRUN_FLAG	(1U << 9)
-
 #define IORING_SETUP_SQE128		(1U << 10) /* SQEs are 128 byte */
 #define IORING_SETUP_CQE32		(1U << 11) /* CQEs are 32 byte */
+/*
+ * Only one task is allowed to submit requests
+ */
+#define IORING_SETUP_SINGLE_ISSUER	(1U << 12)
 
 enum io_uring_op {
 	IORING_OP_NOP,
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 15d209f334eb..4b90439808e3 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -3020,6 +3020,8 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
 	io_destroy_buffers(ctx);
 	if (ctx->sq_creds)
 		put_cred(ctx->sq_creds);
+	if (ctx->submitter_task)
+		put_task_struct(ctx->submitter_task);
 
 	/* there are no registered resources left, nobody uses it */
 	if (ctx->rsrc_node)
@@ -3752,7 +3754,7 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
 	if (fd < 0)
 		return fd;
 
-	ret = io_uring_add_tctx_node(ctx);
+	ret = __io_uring_add_tctx_node(ctx, false);
 	if (ret) {
 		put_unused_fd(fd);
 		return ret;
@@ -3972,7 +3974,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
 			IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
 			IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
 			IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
-			IORING_SETUP_SQE128 | IORING_SETUP_CQE32))
+			IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
+			IORING_SETUP_SINGLE_ISSUER))
 		return -EINVAL;
 
 	return io_uring_create(entries, &p, params);
diff --git a/io_uring/io_uring_types.h b/io_uring/io_uring_types.h
index aba0f8cd6f49..f6d0ad25f377 100644
--- a/io_uring/io_uring_types.h
+++ b/io_uring/io_uring_types.h
@@ -241,6 +241,7 @@ struct io_ring_ctx {
 	/* Keep this last, we don't need it for the fast path */
 
 	struct io_restriction		restrictions;
+	struct task_struct		*submitter_task;
 
 	/* slow path rsrc auxilary data, used by update/register */
 	struct io_rsrc_node		*rsrc_backup_node;
diff --git a/io_uring/tctx.c b/io_uring/tctx.c
index 6adf659687f8..012be261dc50 100644
--- a/io_uring/tctx.c
+++ b/io_uring/tctx.c
@@ -81,12 +81,32 @@ __cold int io_uring_alloc_task_context(struct task_struct *task,
 	return 0;
 }
 
-int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
+static int io_register_submitter(struct io_ring_ctx *ctx)
+{
+	int ret = 0;
+
+	mutex_lock(&ctx->uring_lock);
+	if (!ctx->submitter_task)
+		ctx->submitter_task = get_task_struct(current);
+	else if (ctx->submitter_task != current)
+		ret = -EEXIST;
+	mutex_unlock(&ctx->uring_lock);
+
+	return ret;
+}
+
+int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter)
 {
 	struct io_uring_task *tctx = current->io_uring;
 	struct io_tctx_node *node;
 	int ret;
 
+	if ((ctx->flags & IORING_SETUP_SINGLE_ISSUER) && submitter) {
+		ret = io_register_submitter(ctx);
+		if (ret)
+			return ret;
+	}
+
 	if (unlikely(!tctx)) {
 		ret = io_uring_alloc_task_context(current, ctx);
 		if (unlikely(ret))
@@ -120,7 +140,8 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
 		list_add(&node->ctx_node, &ctx->tctx_list);
 		mutex_unlock(&ctx->uring_lock);
 	}
-	tctx->last = ctx;
+	if (submitter)
+		tctx->last = ctx;
 	return 0;
 }
 
@@ -228,7 +249,7 @@ int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
 		return -EINVAL;
 
 	mutex_unlock(&ctx->uring_lock);
-	ret = io_uring_add_tctx_node(ctx);
+	ret = __io_uring_add_tctx_node(ctx, false);
 	mutex_lock(&ctx->uring_lock);
 	if (ret)
 		return ret;
diff --git a/io_uring/tctx.h b/io_uring/tctx.h
index 7684713e950f..dde82ce4d8e2 100644
--- a/io_uring/tctx.h
+++ b/io_uring/tctx.h
@@ -34,7 +34,7 @@ struct io_tctx_node {
 int io_uring_alloc_task_context(struct task_struct *task,
 				struct io_ring_ctx *ctx);
 void io_uring_del_tctx_node(unsigned long index);
-int __io_uring_add_tctx_node(struct io_ring_ctx *ctx);
+int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter);
 void io_uring_clean_tctx(struct io_uring_task *tctx);
 
 void io_uring_unreg_ringfd(void);
@@ -52,5 +52,5 @@ static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
 
 	if (likely(tctx && tctx->last == ctx))
 		return 0;
-	return __io_uring_add_tctx_node(ctx);
+	return __io_uring_add_tctx_node(ctx, true);
 }
-- 
2.36.1



  parent reply	other threads:[~2022-06-14 14:38 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-14 14:36 [PATCH for-next v2 00/25] 5.20 cleanups and poll optimisations Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 01/25] io_uring: make reg buf init consistent Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 02/25] io_uring: move defer_list to slow data Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 03/25] io_uring: better caching for ctx timeout fields Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 04/25] io_uring: refactor ctx slow data placement Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 05/25] io_uring: move small helpers to headers Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 06/25] io_uring: explain io_wq_work::cancel_seq placement Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 07/25] io_uring: inline ->registered_rings Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 08/25] io_uring: don't set REQ_F_COMPLETE_INLINE in tw Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 09/25] io_uring: never defer-complete multi-apoll Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 10/25] io_uring: kill REQ_F_COMPLETE_INLINE Pavel Begunkov
2022-06-15  8:20   ` Hao Xu
2022-06-14 14:37 ` [PATCH for-next v2 11/25] io_uring: refactor io_req_task_complete() Pavel Begunkov
2022-06-14 17:45   ` Hao Xu
2022-06-14 17:52     ` Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 12/25] io_uring: don't inline io_put_kbuf Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 13/25] io_uring: remove check_cq checking from hot paths Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 14/25] io_uring: poll: remove unnecessary req->ref set Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 15/25] io_uring: switch cancel_hash to use per entry spinlock Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 16/25] io_uring: pass poll_find lock back Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 17/25] io_uring: clean up io_try_cancel Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 18/25] io_uring: limit number hash buckets Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 19/25] io_uring: clean up io_ring_ctx_alloc Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 20/25] io_uring: use state completion infra for poll reqs Pavel Begunkov
2022-06-14 14:37 ` Pavel Begunkov [this message]
2022-06-14 14:37 ` [PATCH for-next v2 22/25] io_uring: pass hash table into poll_find Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 23/25] io_uring: introduce a struct for hash table Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 24/25] io_uring: propagate locking state to poll cancel Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 25/25] io_uring: mutex locked poll hashing Pavel Begunkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d6235e12830a225584b90cf3b29f09a0681acc95.1655213915.git.asml.silence@gmail.com \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox