public inbox for [email protected]
 help / color / mirror / Atom feed
From: Pavel Begunkov <[email protected]>
To: Jens Axboe <[email protected]>,
	[email protected], [email protected]
Subject: [PATCH 5/8] io_uring: move cur_mm into io_submit_state
Date: Sat, 25 Jan 2020 00:40:28 +0300	[thread overview]
Message-ID: <be4ad3420fbda926d2e969738c0ae6940a47d8ec.1579901866.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>

cur_mm is only used per submission, so it could be place into
io_submit_state. There is the reasoning behind:
- it's more convenient, don't need to pass it down the call stack
- it's passed as a pointer, so in either case needs memory read/write
- now uses heap (ctx->submit_state) instead of stack
- set only once for non-IORING_SETUP_SQPOLL case.
- generates pretty similar code as @ctx is hot and always somewhere in a
register

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 46 ++++++++++++++++++++++++++--------------------
 1 file changed, 26 insertions(+), 20 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 951c2fc7b5b7..c0e72390d272 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -219,6 +219,8 @@ struct io_submit_state {
 
 	struct file		*ring_file;
 	int			ring_fd;
+
+	struct mm_struct	*mm;
 };
 
 struct io_ring_ctx {
@@ -4834,8 +4836,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
 }
 
 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
-			  struct file *ring_file, int ring_fd,
-			  struct mm_struct **mm, bool async)
+			  struct file *ring_file, int ring_fd, bool async)
 {
 	struct blk_plug plug;
 	struct io_kiocb *link = NULL;
@@ -4883,15 +4884,15 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 			break;
 		}
 
-		if (io_op_defs[req->opcode].needs_mm && !*mm) {
+		if (io_op_defs[req->opcode].needs_mm && !ctx->submit_state.mm) {
 			mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
 			if (!mm_fault) {
 				use_mm(ctx->sqo_mm);
-				*mm = ctx->sqo_mm;
+				ctx->submit_state.mm = ctx->sqo_mm;
 			}
 		}
 
-		req->has_user = *mm != NULL;
+		req->has_user = (ctx->submit_state.mm != NULL);
 		req->in_async = async;
 		req->needs_fixed_file = async;
 		trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
@@ -4918,7 +4919,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 static int io_sq_thread(void *data)
 {
 	struct io_ring_ctx *ctx = data;
-	struct mm_struct *cur_mm = NULL;
+	struct io_submit_state *submit = &ctx->submit_state;
 	const struct cred *old_cred;
 	mm_segment_t old_fs;
 	DEFINE_WAIT(wait);
@@ -4993,10 +4994,15 @@ static int io_sq_thread(void *data)
 			 * adding ourselves to the waitqueue, as the unuse/drop
 			 * may sleep.
 			 */
-			if (cur_mm) {
-				unuse_mm(cur_mm);
-				mmput(cur_mm);
-				cur_mm = NULL;
+			if (submit->mm) {
+				/*
+				 * this thread is the only submitter, thus
+				 * it's safe to change submit->mm without
+				 * taking ctx->uring_lock
+				 */
+				unuse_mm(submit->mm);
+				mmput(submit->mm);
+				submit->mm = NULL;
 			}
 
 			prepare_to_wait(&ctx->sqo_wait, &wait,
@@ -5027,16 +5033,17 @@ static int io_sq_thread(void *data)
 		}
 
 		mutex_lock(&ctx->uring_lock);
-		ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
+		ret = io_submit_sqes(ctx, to_submit, NULL, -1, true);
 		mutex_unlock(&ctx->uring_lock);
 		if (ret > 0)
 			inflight += ret;
 	}
 
 	set_fs(old_fs);
-	if (cur_mm) {
-		unuse_mm(cur_mm);
-		mmput(cur_mm);
+	if (submit->mm) {
+		unuse_mm(submit->mm);
+		mmput(submit->mm);
+		submit->mm = NULL;
 	}
 	revert_creds(old_cred);
 
@@ -5757,6 +5764,10 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
 	mmgrab(current->mm);
 	ctx->sqo_mm = current->mm;
 
+	ctx->submit_state.mm = NULL;
+	if (!(ctx->flags & IORING_SETUP_SQPOLL))
+		ctx->submit_state.mm = ctx->sqo_mm;
+
 	if (ctx->flags & IORING_SETUP_SQPOLL) {
 		ret = -EPERM;
 		if (!capable(CAP_SYS_ADMIN))
@@ -6369,8 +6380,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 			wake_up(&ctx->sqo_wait);
 		submitted = to_submit;
 	} else if (to_submit) {
-		struct mm_struct *cur_mm;
-
 		if (current->mm != ctx->sqo_mm ||
 		    current_cred() != ctx->creds) {
 			ret = -EPERM;
@@ -6378,10 +6387,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 		}
 
 		mutex_lock(&ctx->uring_lock);
-		/* already have mm, so io_submit_sqes() won't try to grab it */
-		cur_mm = ctx->sqo_mm;
-		submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
-					   &cur_mm, false);
+		submitted = io_submit_sqes(ctx, to_submit, f.file, fd, false);
 		mutex_unlock(&ctx->uring_lock);
 
 		if (submitted != to_submit)
-- 
2.24.0


  parent reply	other threads:[~2020-01-24 21:42 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-24 21:40 [PATCH 0/8] add persistent submission state Pavel Begunkov
2020-01-24 21:40 ` [PATCH 1/8] io_uring: add comment for drain_next Pavel Begunkov
2020-01-24 21:40 ` [PATCH 2/8] io_uring: always pass non-null io_submit_state Pavel Begunkov
2020-01-24 21:40 ` [PATCH 3/8] io_uring: place io_submit_state into ctx Pavel Begunkov
2020-01-24 21:40 ` [PATCH 4/8] io_uring: move ring_fd into io_submit_state Pavel Begunkov
2020-01-24 21:40 ` Pavel Begunkov [this message]
2020-01-24 21:40 ` [PATCH 6/8] io_uring: move *link " Pavel Begunkov
2020-01-24 21:40 ` [PATCH 7/8] io_uring: persistent req bulk allocation cache Pavel Begunkov
2020-01-24 21:40 ` [PATCH 8/8] io_uring: optimise " Pavel Begunkov
2020-01-25 19:53 ` [PATCH v2 0/8] add persistent submission state Pavel Begunkov
2020-01-25 19:53   ` [PATCH v2 1/8] io_uring: leave a comment for drain_next Pavel Begunkov
2020-01-25 19:53   ` [PATCH v2 2/8] io_uring: always pass non-null io_submit_state Pavel Begunkov
2020-01-25 19:53   ` [PATCH v2 3/8] io_uring: place io_submit_state into ctx Pavel Begunkov
2020-01-25 19:53   ` [PATCH v2 4/8] io_uring: move ring_fd into io_submit_state Pavel Begunkov
2020-01-25 19:53   ` [PATCH v2 5/8] io_uring: move cur_mm " Pavel Begunkov
2020-01-25 19:53   ` [PATCH v2 6/8] io_uring: move *link " Pavel Begunkov
2020-01-25 19:53   ` [PATCH v2 7/8] io_uring: persistent req bulk allocation cache Pavel Begunkov
2020-01-25 19:53   ` [PATCH v2 8/8] io_uring: optimise " Pavel Begunkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=be4ad3420fbda926d2e969738c0ae6940a47d8ec.1579901866.git.asml.silence@gmail.com \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox