* [PATCH 0/5] submission path refactoring pt.2
@ 2020-04-11 23:05 Pavel Begunkov
2020-04-11 23:05 ` [PATCH 1/5] io_uring: remove obsolete @mm_fault Pavel Begunkov
` (5 more replies)
0 siblings, 6 replies; 7+ messages in thread
From: Pavel Begunkov @ 2020-04-11 23:05 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
The 2nd and last part of submission path refactoring. This moves
request initialisation bits into io_init_req(), doing better job
logically segmenting the code. Keeping negative diffstat as a bonus.
Pavel Begunkov (5):
io_uring: remove obsolete @mm_fault
io_uring: track mm through current->mm
io_uring: DRY early submission req fail code
io_uring: keep all sqe->flags in req->flags
io_uring: move all request init code in one place
fs/io_uring.c | 196 +++++++++++++++++++++++---------------------------
1 file changed, 90 insertions(+), 106 deletions(-)
--
2.24.0
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 1/5] io_uring: remove obsolete @mm_fault
2020-04-11 23:05 [PATCH 0/5] submission path refactoring pt.2 Pavel Begunkov
@ 2020-04-11 23:05 ` Pavel Begunkov
2020-04-11 23:05 ` [PATCH 2/5] io_uring: track mm through current->mm Pavel Begunkov
` (4 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Pavel Begunkov @ 2020-04-11 23:05 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
If io_submit_sqes() can't grab an mm, it fails and exits right away.
There is no need to track the fact of the failure. Remove @mm_fault.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index be65eda059ac..343899915465 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5816,7 +5816,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
int i, submitted = 0;
- bool mm_fault = false;
/* if we have a backlog and couldn't flush it all, return BUSY */
if (test_bit(0, &ctx->sq_check_overflow)) {
@@ -5870,8 +5869,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
}
if (io_op_defs[req->opcode].needs_mm && !*mm) {
- mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
- if (unlikely(mm_fault)) {
+ if (unlikely(!mmget_not_zero(ctx->sqo_mm))) {
err = -EFAULT;
goto fail_req;
}
--
2.24.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 2/5] io_uring: track mm through current->mm
2020-04-11 23:05 [PATCH 0/5] submission path refactoring pt.2 Pavel Begunkov
2020-04-11 23:05 ` [PATCH 1/5] io_uring: remove obsolete @mm_fault Pavel Begunkov
@ 2020-04-11 23:05 ` Pavel Begunkov
2020-04-11 23:05 ` [PATCH 3/5] io_uring: DRY early submission req fail code Pavel Begunkov
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Pavel Begunkov @ 2020-04-11 23:05 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
As a preparation for extracting request init bits, remove self-coded mm
tracking from io_submit_sqes(), but rely on current->mm. It's more
convenient, than passing this piece of state in other functions.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 37 ++++++++++++++++---------------------
1 file changed, 16 insertions(+), 21 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 343899915465..27868ec328b8 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5810,8 +5810,7 @@ static void io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
}
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
- struct file *ring_file, int ring_fd,
- struct mm_struct **mm, bool async)
+ struct file *ring_file, int ring_fd, bool async)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
@@ -5868,13 +5867,12 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
break;
}
- if (io_op_defs[req->opcode].needs_mm && !*mm) {
+ if (io_op_defs[req->opcode].needs_mm && !current->mm) {
if (unlikely(!mmget_not_zero(ctx->sqo_mm))) {
err = -EFAULT;
goto fail_req;
}
use_mm(ctx->sqo_mm);
- *mm = ctx->sqo_mm;
}
req->needs_fixed_file = async;
@@ -5900,10 +5898,19 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
return submitted;
}
+static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
+{
+ struct mm_struct *mm = current->mm;
+
+ if (mm) {
+ unuse_mm(mm);
+ mmput(mm);
+ }
+}
+
static int io_sq_thread(void *data)
{
struct io_ring_ctx *ctx = data;
- struct mm_struct *cur_mm = NULL;
const struct cred *old_cred;
mm_segment_t old_fs;
DEFINE_WAIT(wait);
@@ -5944,11 +5951,7 @@ static int io_sq_thread(void *data)
* adding ourselves to the waitqueue, as the unuse/drop
* may sleep.
*/
- if (cur_mm) {
- unuse_mm(cur_mm);
- mmput(cur_mm);
- cur_mm = NULL;
- }
+ io_sq_thread_drop_mm(ctx);
/*
* We're polling. If we're within the defined idle
@@ -6012,7 +6015,7 @@ static int io_sq_thread(void *data)
}
mutex_lock(&ctx->uring_lock);
- ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
+ ret = io_submit_sqes(ctx, to_submit, NULL, -1, true);
mutex_unlock(&ctx->uring_lock);
timeout = jiffies + ctx->sq_thread_idle;
}
@@ -6021,10 +6024,7 @@ static int io_sq_thread(void *data)
task_work_run();
set_fs(old_fs);
- if (cur_mm) {
- unuse_mm(cur_mm);
- mmput(cur_mm);
- }
+ io_sq_thread_drop_mm(ctx);
revert_creds(old_cred);
kthread_parkme();
@@ -7493,13 +7493,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
wake_up(&ctx->sqo_wait);
submitted = to_submit;
} else if (to_submit) {
- struct mm_struct *cur_mm;
-
mutex_lock(&ctx->uring_lock);
- /* already have mm, so io_submit_sqes() won't try to grab it */
- cur_mm = ctx->sqo_mm;
- submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
- &cur_mm, false);
+ submitted = io_submit_sqes(ctx, to_submit, f.file, fd, false);
mutex_unlock(&ctx->uring_lock);
if (submitted != to_submit)
--
2.24.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 3/5] io_uring: DRY early submission req fail code
2020-04-11 23:05 [PATCH 0/5] submission path refactoring pt.2 Pavel Begunkov
2020-04-11 23:05 ` [PATCH 1/5] io_uring: remove obsolete @mm_fault Pavel Begunkov
2020-04-11 23:05 ` [PATCH 2/5] io_uring: track mm through current->mm Pavel Begunkov
@ 2020-04-11 23:05 ` Pavel Begunkov
2020-04-11 23:05 ` [PATCH 4/5] io_uring: keep all sqe->flags in req->flags Pavel Begunkov
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Pavel Begunkov @ 2020-04-11 23:05 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
Having only one place for cleaning up a request after a link assembly/
submission failure will play handy in the future. At least it allows
to remove duplicated cleanup sequence.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 50 +++++++++++++++++++-------------------------------
1 file changed, 19 insertions(+), 31 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 27868ec328b8..90b806e4022a 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5606,7 +5606,7 @@ static inline void io_queue_link_head(struct io_kiocb *req)
IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
IOSQE_BUFFER_SELECT)
-static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_submit_state *state, struct io_kiocb **link)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -5616,24 +5616,18 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
sqe_flags = READ_ONCE(sqe->flags);
/* enforce forwards compatibility on users */
- if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
- ret = -EINVAL;
- goto err_req;
- }
+ if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
+ return -EINVAL;
if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
- !io_op_defs[req->opcode].buffer_select) {
- ret = -EOPNOTSUPP;
- goto err_req;
- }
+ !io_op_defs[req->opcode].buffer_select)
+ return -EOPNOTSUPP;
id = READ_ONCE(sqe->personality);
if (id) {
req->work.creds = idr_find(&ctx->personality_idr, id);
- if (unlikely(!req->work.creds)) {
- ret = -EINVAL;
- goto err_req;
- }
+ if (unlikely(!req->work.creds))
+ return -EINVAL;
get_cred(req->work.creds);
}
@@ -5644,12 +5638,8 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
fd = READ_ONCE(sqe->fd);
ret = io_req_set_file(state, req, fd, sqe_flags);
- if (unlikely(ret)) {
-err_req:
- io_cqring_add_event(req, ret);
- io_double_put_req(req);
- return false;
- }
+ if (unlikely(ret))
+ return ret;
/*
* If we already have a head request, queue this one for async
@@ -5672,16 +5662,14 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
head->flags |= REQ_F_IO_DRAIN;
ctx->drain_next = 1;
}
- if (io_alloc_async_ctx(req)) {
- ret = -EAGAIN;
- goto err_req;
- }
+ if (io_alloc_async_ctx(req))
+ return -EAGAIN;
ret = io_req_defer_prep(req, sqe);
if (ret) {
/* fail even hard links since we don't submit */
head->flags |= REQ_F_FAIL_LINK;
- goto err_req;
+ return ret;
}
trace_io_uring_link(ctx, req, head);
list_add_tail(&req->link_list, &head->link_list);
@@ -5700,10 +5688,9 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req->flags |= REQ_F_LINK;
INIT_LIST_HEAD(&req->link_list);
- if (io_alloc_async_ctx(req)) {
- ret = -EAGAIN;
- goto err_req;
- }
+ if (io_alloc_async_ctx(req))
+ return -EAGAIN;
+
ret = io_req_defer_prep(req, sqe);
if (ret)
req->flags |= REQ_F_FAIL_LINK;
@@ -5713,7 +5700,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
}
}
- return true;
+ return 0;
}
/*
@@ -5878,8 +5865,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
req->needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
true, async);
- if (!io_submit_sqe(req, sqe, statep, &link))
- break;
+ err = io_submit_sqe(req, sqe, statep, &link);
+ if (err)
+ goto fail_req;
}
if (unlikely(submitted != nr)) {
--
2.24.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 4/5] io_uring: keep all sqe->flags in req->flags
2020-04-11 23:05 [PATCH 0/5] submission path refactoring pt.2 Pavel Begunkov
` (2 preceding siblings ...)
2020-04-11 23:05 ` [PATCH 3/5] io_uring: DRY early submission req fail code Pavel Begunkov
@ 2020-04-11 23:05 ` Pavel Begunkov
2020-04-11 23:05 ` [PATCH 5/5] io_uring: move all request init code in one place Pavel Begunkov
2020-04-12 15:23 ` [PATCH 0/5] submission path refactoring pt.2 Jens Axboe
5 siblings, 0 replies; 7+ messages in thread
From: Pavel Begunkov @ 2020-04-11 23:05 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
It's a good idea to not read sqe->flags twice, as it's prone to security
bugs. Instead of passing it around, embeed them in req->flags. It's
already so except for IOSQE_IO_LINK.
1. rename former REQ_F_LINK -> REQ_F_LINK_HEAD
2. introduce and copy REQ_F_LINK, which mimics IO_IOSQE_LINK
And leave req_set_fail_links() using new REQ_F_LINK, because it's more
sensible.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 23 +++++++++++++----------
1 file changed, 13 insertions(+), 10 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 90b806e4022a..9118a0210e0a 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -506,6 +506,7 @@ enum {
REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
+ REQ_F_LINK_HEAD_BIT,
REQ_F_LINK_NEXT_BIT,
REQ_F_FAIL_LINK_BIT,
REQ_F_INFLIGHT_BIT,
@@ -541,6 +542,8 @@ enum {
/* IOSQE_BUFFER_SELECT */
REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
+ /* head of a link */
+ REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
/* already grabbed next link */
REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
/* fail rest of links */
@@ -1435,7 +1438,7 @@ static bool io_link_cancel_timeout(struct io_kiocb *req)
if (ret != -1) {
io_cqring_fill_event(req, -ECANCELED);
io_commit_cqring(ctx);
- req->flags &= ~REQ_F_LINK;
+ req->flags &= ~REQ_F_LINK_HEAD;
io_put_req(req);
return true;
}
@@ -1471,7 +1474,7 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
list_del_init(&req->link_list);
if (!list_empty(&nxt->link_list))
- nxt->flags |= REQ_F_LINK;
+ nxt->flags |= REQ_F_LINK_HEAD;
*nxtptr = nxt;
break;
}
@@ -1482,7 +1485,7 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
}
/*
- * Called if REQ_F_LINK is set, and we fail the head request
+ * Called if REQ_F_LINK_HEAD is set, and we fail the head request
*/
static void io_fail_links(struct io_kiocb *req)
{
@@ -1515,7 +1518,7 @@ static void io_fail_links(struct io_kiocb *req)
static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
{
- if (likely(!(req->flags & REQ_F_LINK)))
+ if (likely(!(req->flags & REQ_F_LINK_HEAD)))
return;
/*
@@ -1667,7 +1670,7 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
{
- if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req))
+ if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req))
return false;
if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
@@ -2560,7 +2563,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
req->result = 0;
io_size = ret;
- if (req->flags & REQ_F_LINK)
+ if (req->flags & REQ_F_LINK_HEAD)
req->result = io_size;
/*
@@ -2651,7 +2654,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
req->result = 0;
io_size = ret;
- if (req->flags & REQ_F_LINK)
+ if (req->flags & REQ_F_LINK_HEAD)
req->result = io_size;
/*
@@ -5474,7 +5477,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
{
struct io_kiocb *nxt;
- if (!(req->flags & REQ_F_LINK))
+ if (!(req->flags & REQ_F_LINK_HEAD))
return NULL;
/* for polled retry, if flag is set, we already went through here */
if (req->flags & REQ_F_POLLED)
@@ -5634,7 +5637,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
/* same numerical values with corresponding REQ_F_*, safe to copy */
req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK |
IOSQE_ASYNC | IOSQE_FIXED_FILE |
- IOSQE_BUFFER_SELECT);
+ IOSQE_BUFFER_SELECT | IOSQE_IO_LINK);
fd = READ_ONCE(sqe->fd);
ret = io_req_set_file(state, req, fd, sqe_flags);
@@ -5685,7 +5688,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req->ctx->drain_next = 0;
}
if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
- req->flags |= REQ_F_LINK;
+ req->flags |= REQ_F_LINK_HEAD;
INIT_LIST_HEAD(&req->link_list);
if (io_alloc_async_ctx(req))
--
2.24.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 5/5] io_uring: move all request init code in one place
2020-04-11 23:05 [PATCH 0/5] submission path refactoring pt.2 Pavel Begunkov
` (3 preceding siblings ...)
2020-04-11 23:05 ` [PATCH 4/5] io_uring: keep all sqe->flags in req->flags Pavel Begunkov
@ 2020-04-11 23:05 ` Pavel Begunkov
2020-04-12 15:23 ` [PATCH 0/5] submission path refactoring pt.2 Jens Axboe
5 siblings, 0 replies; 7+ messages in thread
From: Pavel Begunkov @ 2020-04-11 23:05 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
Requests initialisation is scattered across several functions, namely
io_init_req(), io_submit_sqes(), io_submit_sqe(). Put it
in io_init_req() for better data locality and code clarity.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 104 +++++++++++++++++++++++++-------------------------
1 file changed, 52 insertions(+), 52 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9118a0210e0a..f593a37665f3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5605,44 +5605,11 @@ static inline void io_queue_link_head(struct io_kiocb *req)
io_queue_sqe(req, NULL);
}
-#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
- IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
- IOSQE_BUFFER_SELECT)
-
static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_submit_state *state, struct io_kiocb **link)
{
struct io_ring_ctx *ctx = req->ctx;
- unsigned int sqe_flags;
- int ret, id, fd;
-
- sqe_flags = READ_ONCE(sqe->flags);
-
- /* enforce forwards compatibility on users */
- if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
- return -EINVAL;
-
- if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
- !io_op_defs[req->opcode].buffer_select)
- return -EOPNOTSUPP;
-
- id = READ_ONCE(sqe->personality);
- if (id) {
- req->work.creds = idr_find(&ctx->personality_idr, id);
- if (unlikely(!req->work.creds))
- return -EINVAL;
- get_cred(req->work.creds);
- }
-
- /* same numerical values with corresponding REQ_F_*, safe to copy */
- req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK |
- IOSQE_ASYNC | IOSQE_FIXED_FILE |
- IOSQE_BUFFER_SELECT | IOSQE_IO_LINK);
-
- fd = READ_ONCE(sqe->fd);
- ret = io_req_set_file(state, req, fd, sqe_flags);
- if (unlikely(ret))
- return ret;
+ int ret;
/*
* If we already have a head request, queue this one for async
@@ -5661,7 +5628,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
* next after the link request. The last one is done via
* drain_next flag to persist the effect across calls.
*/
- if (sqe_flags & IOSQE_IO_DRAIN) {
+ if (req->flags & REQ_F_IO_DRAIN) {
head->flags |= REQ_F_IO_DRAIN;
ctx->drain_next = 1;
}
@@ -5678,16 +5645,16 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
list_add_tail(&req->link_list, &head->link_list);
/* last request of a link, enqueue the link */
- if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) {
+ if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
io_queue_link_head(head);
*link = NULL;
}
} else {
if (unlikely(ctx->drain_next)) {
req->flags |= REQ_F_IO_DRAIN;
- req->ctx->drain_next = 0;
+ ctx->drain_next = 0;
}
- if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
+ if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
req->flags |= REQ_F_LINK_HEAD;
INIT_LIST_HEAD(&req->link_list);
@@ -5777,9 +5744,17 @@ static inline void io_consume_sqe(struct io_ring_ctx *ctx)
ctx->cached_sq_head++;
}
-static void io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
+ IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
+ IOSQE_BUFFER_SELECT)
+
+static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ const struct io_uring_sqe *sqe,
+ struct io_submit_state *state, bool async)
{
+ unsigned int sqe_flags;
+ int id, fd;
+
/*
* All io need record the previous position, if LINK vs DARIN,
* it can be used to mark the position of the first IO in the
@@ -5796,7 +5771,42 @@ static void io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
refcount_set(&req->refs, 2);
req->task = NULL;
req->result = 0;
+ req->needs_fixed_file = async;
INIT_IO_WORK(&req->work, io_wq_submit_work);
+
+ if (unlikely(req->opcode >= IORING_OP_LAST))
+ return -EINVAL;
+
+ if (io_op_defs[req->opcode].needs_mm && !current->mm) {
+ if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
+ return -EFAULT;
+ use_mm(ctx->sqo_mm);
+ }
+
+ sqe_flags = READ_ONCE(sqe->flags);
+ /* enforce forwards compatibility on users */
+ if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
+ return -EINVAL;
+
+ if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
+ !io_op_defs[req->opcode].buffer_select)
+ return -EOPNOTSUPP;
+
+ id = READ_ONCE(sqe->personality);
+ if (id) {
+ req->work.creds = idr_find(&ctx->personality_idr, id);
+ if (unlikely(!req->work.creds))
+ return -EINVAL;
+ get_cred(req->work.creds);
+ }
+
+ /* same numerical values with corresponding REQ_F_*, safe to copy */
+ req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK |
+ IOSQE_ASYNC | IOSQE_FIXED_FILE |
+ IOSQE_BUFFER_SELECT | IOSQE_IO_LINK);
+
+ fd = READ_ONCE(sqe->fd);
+ return io_req_set_file(state, req, fd, sqe_flags);
}
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
@@ -5844,28 +5854,18 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
break;
}
- io_init_req(ctx, req, sqe);
+ err = io_init_req(ctx, req, sqe, statep, async);
io_consume_sqe(ctx);
/* will complete beyond this point, count as submitted */
submitted++;
- if (unlikely(req->opcode >= IORING_OP_LAST)) {
- err = -EINVAL;
+ if (unlikely(err)) {
fail_req:
io_cqring_add_event(req, err);
io_double_put_req(req);
break;
}
- if (io_op_defs[req->opcode].needs_mm && !current->mm) {
- if (unlikely(!mmget_not_zero(ctx->sqo_mm))) {
- err = -EFAULT;
- goto fail_req;
- }
- use_mm(ctx->sqo_mm);
- }
-
- req->needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
true, async);
err = io_submit_sqe(req, sqe, statep, &link);
--
2.24.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH 0/5] submission path refactoring pt.2
2020-04-11 23:05 [PATCH 0/5] submission path refactoring pt.2 Pavel Begunkov
` (4 preceding siblings ...)
2020-04-11 23:05 ` [PATCH 5/5] io_uring: move all request init code in one place Pavel Begunkov
@ 2020-04-12 15:23 ` Jens Axboe
5 siblings, 0 replies; 7+ messages in thread
From: Jens Axboe @ 2020-04-12 15:23 UTC (permalink / raw)
To: Pavel Begunkov, io-uring, linux-kernel
On 4/11/20 5:05 PM, Pavel Begunkov wrote:
> The 2nd and last part of submission path refactoring. This moves
> request initialisation bits into io_init_req(), doing better job
> logically segmenting the code. Keeping negative diffstat as a bonus.
>
> Pavel Begunkov (5):
> io_uring: remove obsolete @mm_fault
> io_uring: track mm through current->mm
> io_uring: DRY early submission req fail code
> io_uring: keep all sqe->flags in req->flags
> io_uring: move all request init code in one place
>
> fs/io_uring.c | 196 +++++++++++++++++++++++---------------------------
> 1 file changed, 90 insertions(+), 106 deletions(-)
Looks good, applied. Thanks!
--
Jens Axboe
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2020-04-12 15:30 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-04-11 23:05 [PATCH 0/5] submission path refactoring pt.2 Pavel Begunkov
2020-04-11 23:05 ` [PATCH 1/5] io_uring: remove obsolete @mm_fault Pavel Begunkov
2020-04-11 23:05 ` [PATCH 2/5] io_uring: track mm through current->mm Pavel Begunkov
2020-04-11 23:05 ` [PATCH 3/5] io_uring: DRY early submission req fail code Pavel Begunkov
2020-04-11 23:05 ` [PATCH 4/5] io_uring: keep all sqe->flags in req->flags Pavel Begunkov
2020-04-11 23:05 ` [PATCH 5/5] io_uring: move all request init code in one place Pavel Begunkov
2020-04-12 15:23 ` [PATCH 0/5] submission path refactoring pt.2 Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox