* [PATCH 1/4] io_uring: inline io_req_work_grab_env()
2020-07-15 9:46 [PATCH 0/4] quick unrelated cleanups Pavel Begunkov
@ 2020-07-15 9:46 ` Pavel Begunkov
2020-07-15 9:46 ` [PATCH 2/4] io_uring: remove empty cleanup of OP_OPEN* reqs Pavel Begunkov
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-07-15 9:46 UTC (permalink / raw)
To: Jens Axboe, io-uring
The only caller of io_req_work_grab_env() is io_prep_async_work(), and
they are both initialising req->work. Inline grab_env(), it's easier
to keep this way, moreover there already were bugs with misplacing
io_req_init_async().
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 48 +++++++++++++++++++-----------------------------
1 file changed, 19 insertions(+), 29 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b854dbf530bb..149a1c37665e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1114,31 +1114,7 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
}
}
-static void io_req_work_grab_env(struct io_kiocb *req)
-{
- const struct io_op_def *def = &io_op_defs[req->opcode];
-
- io_req_init_async(req);
-
- if (!req->work.mm && def->needs_mm) {
- mmgrab(current->mm);
- req->work.mm = current->mm;
- }
- if (!req->work.creds)
- req->work.creds = get_current_cred();
- if (!req->work.fs && def->needs_fs) {
- spin_lock(¤t->fs->lock);
- if (!current->fs->in_exec) {
- req->work.fs = current->fs;
- req->work.fs->users++;
- } else {
- req->work.flags |= IO_WQ_WORK_CANCEL;
- }
- spin_unlock(¤t->fs->lock);
- }
-}
-
-static inline void io_req_work_drop_env(struct io_kiocb *req)
+static void io_req_clean_work(struct io_kiocb *req)
{
if (!(req->flags & REQ_F_WORK_INITIALIZED))
return;
@@ -1176,8 +1152,22 @@ static void io_prep_async_work(struct io_kiocb *req)
if (def->unbound_nonreg_file)
req->work.flags |= IO_WQ_WORK_UNBOUND;
}
-
- io_req_work_grab_env(req);
+ if (!req->work.mm && def->needs_mm) {
+ mmgrab(current->mm);
+ req->work.mm = current->mm;
+ }
+ if (!req->work.creds)
+ req->work.creds = get_current_cred();
+ if (!req->work.fs && def->needs_fs) {
+ spin_lock(¤t->fs->lock);
+ if (!current->fs->in_exec) {
+ req->work.fs = current->fs;
+ req->work.fs->users++;
+ } else {
+ req->work.flags |= IO_WQ_WORK_CANCEL;
+ }
+ spin_unlock(¤t->fs->lock);
+ }
}
static void io_prep_async_link(struct io_kiocb *req)
@@ -1546,7 +1536,7 @@ static void io_dismantle_req(struct io_kiocb *req)
if (req->file)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
__io_put_req_task(req);
- io_req_work_drop_env(req);
+ io_req_clean_work(req);
if (req->flags & REQ_F_INFLIGHT) {
struct io_ring_ctx *ctx = req->ctx;
@@ -4815,7 +4805,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
io_put_req(req);
/*
* restore ->work because we will call
- * io_req_work_drop_env below when dropping the
+ * io_req_clean_work below when dropping the
* final reference.
*/
if (req->flags & REQ_F_WORK_INITIALIZED)
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 3/4] io_uring: alloc ->io in io_req_defer_prep()
2020-07-15 9:46 [PATCH 0/4] quick unrelated cleanups Pavel Begunkov
2020-07-15 9:46 ` [PATCH 1/4] io_uring: inline io_req_work_grab_env() Pavel Begunkov
2020-07-15 9:46 ` [PATCH 2/4] io_uring: remove empty cleanup of OP_OPEN* reqs Pavel Begunkov
@ 2020-07-15 9:46 ` Pavel Begunkov
2020-07-15 9:46 ` [PATCH 4/4] io_uring/io-wq: move RLIMIT_FSIZE to io-wq Pavel Begunkov
2020-07-15 15:44 ` [PATCH 0/4] quick unrelated cleanups Jens Axboe
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-07-15 9:46 UTC (permalink / raw)
To: Jens Axboe, io-uring
Every call to io_req_defer_prep() is prepended with allocating ->io,
just do that in the function. And while we're at it, mark error paths
with unlikey and replace "if (ret < 0)" with "if (ret)".
There is only one change in the observable behaviour, that's instead of
killing the head request right away on error, it postpones it until the
link is assembled, that looks more preferable.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 22 +++++++---------------
1 file changed, 7 insertions(+), 15 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a3157028c591..0e6bbf3367b9 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5259,6 +5259,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
if (!sqe)
return 0;
+ if (io_alloc_async_ctx(req))
+ return -EAGAIN;
+
if (io_op_defs[req->opcode].file_table) {
io_req_init_async(req);
ret = io_grab_files(req);
@@ -5398,10 +5401,8 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
if (!req->io) {
- if (io_alloc_async_ctx(req))
- return -EAGAIN;
ret = io_req_defer_prep(req, sqe);
- if (ret < 0)
+ if (ret)
return ret;
}
io_prep_async_link(req);
@@ -6004,11 +6005,8 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
}
} else if (req->flags & REQ_F_FORCE_ASYNC) {
if (!req->io) {
- ret = -EAGAIN;
- if (io_alloc_async_ctx(req))
- goto fail_req;
ret = io_req_defer_prep(req, sqe);
- if (unlikely(ret < 0))
+ if (unlikely(ret))
goto fail_req;
}
@@ -6060,11 +6058,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
head->flags |= REQ_F_IO_DRAIN;
ctx->drain_next = 1;
}
- if (io_alloc_async_ctx(req))
- return -EAGAIN;
-
ret = io_req_defer_prep(req, sqe);
- if (ret) {
+ if (unlikely(ret)) {
/* fail even hard links since we don't submit */
head->flags |= REQ_F_FAIL_LINK;
return ret;
@@ -6087,11 +6082,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req->flags |= REQ_F_LINK_HEAD;
INIT_LIST_HEAD(&req->link_list);
- if (io_alloc_async_ctx(req))
- return -EAGAIN;
-
ret = io_req_defer_prep(req, sqe);
- if (ret)
+ if (unlikely(ret))
req->flags |= REQ_F_FAIL_LINK;
*link = req;
} else {
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 4/4] io_uring/io-wq: move RLIMIT_FSIZE to io-wq
2020-07-15 9:46 [PATCH 0/4] quick unrelated cleanups Pavel Begunkov
` (2 preceding siblings ...)
2020-07-15 9:46 ` [PATCH 3/4] io_uring: alloc ->io in io_req_defer_prep() Pavel Begunkov
@ 2020-07-15 9:46 ` Pavel Begunkov
2020-07-15 15:44 ` [PATCH 0/4] quick unrelated cleanups Jens Axboe
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-07-15 9:46 UTC (permalink / raw)
To: Jens Axboe, io-uring
RLIMIT_SIZE in needed only for execution from an io-wq context, hence
move all preparations from hot path to io-wq work setup.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io-wq.c | 1 +
fs/io-wq.h | 1 +
fs/io_uring.c | 22 +++++++++-------------
3 files changed, 11 insertions(+), 13 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 72f759e1d6eb..8702d3c3b291 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -462,6 +462,7 @@ static void io_impersonate_work(struct io_worker *worker,
io_wq_switch_mm(worker, work);
if (worker->cur_creds != work->creds)
io_wq_switch_creds(worker, work);
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->fsize;
}
static void io_assign_current_work(struct io_worker *worker,
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 114f12ec2d65..ddaf9614cf9b 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -89,6 +89,7 @@ struct io_wq_work {
struct mm_struct *mm;
const struct cred *creds;
struct fs_struct *fs;
+ unsigned long fsize;
unsigned flags;
};
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0e6bbf3367b9..ce63e1389568 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -644,7 +644,6 @@ struct io_kiocb {
unsigned int flags;
refcount_t refs;
struct task_struct *task;
- unsigned long fsize;
u64 user_data;
struct list_head link_list;
@@ -735,6 +734,7 @@ struct io_op_def {
unsigned pollout : 1;
/* op supports buffer selection */
unsigned buffer_select : 1;
+ unsigned needs_fsize : 1;
};
static const struct io_op_def io_op_defs[] = {
@@ -754,6 +754,7 @@ static const struct io_op_def io_op_defs[] = {
.hash_reg_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
+ .needs_fsize = 1,
},
[IORING_OP_FSYNC] = {
.needs_file = 1,
@@ -768,6 +769,7 @@ static const struct io_op_def io_op_defs[] = {
.hash_reg_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
+ .needs_fsize = 1,
},
[IORING_OP_POLL_ADD] = {
.needs_file = 1,
@@ -820,6 +822,7 @@ static const struct io_op_def io_op_defs[] = {
},
[IORING_OP_FALLOCATE] = {
.needs_file = 1,
+ .needs_fsize = 1,
},
[IORING_OP_OPENAT] = {
.file_table = 1,
@@ -851,6 +854,7 @@ static const struct io_op_def io_op_defs[] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
+ .needs_fsize = 1,
},
[IORING_OP_FADVISE] = {
.needs_file = 1,
@@ -1168,6 +1172,10 @@ static void io_prep_async_work(struct io_kiocb *req)
}
spin_unlock(¤t->fs->lock);
}
+ if (def->needs_fsize)
+ req->work.fsize = rlimit(RLIMIT_FSIZE);
+ else
+ req->work.fsize = RLIM_INFINITY;
}
static void io_prep_async_link(struct io_kiocb *req)
@@ -3071,8 +3079,6 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
return -EBADF;
- req->fsize = rlimit(RLIMIT_FSIZE);
-
/* either don't need iovec imported or already have it */
if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
return 0;
@@ -3129,17 +3135,11 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
}
kiocb->ki_flags |= IOCB_WRITE;
- if (!force_nonblock)
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
-
if (req->file->f_op->write_iter)
ret2 = call_write_iter(req->file, kiocb, &iter);
else
ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
- if (!force_nonblock)
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
-
/*
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
* retry them without IOCB_NOWAIT.
@@ -3334,7 +3334,6 @@ static int io_fallocate_prep(struct io_kiocb *req,
req->sync.off = READ_ONCE(sqe->off);
req->sync.len = READ_ONCE(sqe->addr);
req->sync.mode = READ_ONCE(sqe->len);
- req->fsize = rlimit(RLIMIT_FSIZE);
return 0;
}
@@ -3345,11 +3344,8 @@ static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
/* fallocate always requiring blocking context */
if (force_nonblock)
return -EAGAIN;
-
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
req->sync.len);
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
if (ret < 0)
req_set_fail_links(req);
io_req_complete(req, ret);
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread