* [PATCH 0/4] small cleanups for-next
@ 2021-10-01 17:06 Pavel Begunkov
2021-10-01 17:07 ` [PATCH 1/4] io_uring: extra a helper for drain init Pavel Begunkov
` (4 more replies)
0 siblings, 5 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-10-01 17:06 UTC (permalink / raw)
To: Jens Axboe, io-uring; +Cc: asml.silence
1-2 cleans up after drain patches.
4/4 restructures storing selected buffers, gets rid of aliasing it
with rw.addr in particular.
Pavel Begunkov (4):
io_uring: extra a helper for drain init
io_uring: don't return from io_drain_req()
io_uring: init opcode in io_init_req()
io_uring: clean up buffer select
fs/io_uring.c | 142 ++++++++++++++++++--------------------------------
1 file changed, 52 insertions(+), 90 deletions(-)
--
2.33.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 1/4] io_uring: extra a helper for drain init
2021-10-01 17:06 [PATCH 0/4] small cleanups for-next Pavel Begunkov
@ 2021-10-01 17:07 ` Pavel Begunkov
2021-10-01 17:07 ` [PATCH 2/4] io_uring: don't return from io_drain_req() Pavel Begunkov
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-10-01 17:07 UTC (permalink / raw)
To: Jens Axboe, io-uring; +Cc: asml.silence
Add a helper io_init_req_drain for initialising requests with
IOSQE_DRAIN set. Also move bits from preambule of io_drain_req() in
there, because we already modify all the bits needed inside the helper.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 48 ++++++++++++++++++++++--------------------------
1 file changed, 22 insertions(+), 26 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index ed5bff887294..e75d9cac7d45 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6432,28 +6432,11 @@ static u32 io_get_sequence(struct io_kiocb *req)
static bool io_drain_req(struct io_kiocb *req)
{
- struct io_kiocb *pos;
struct io_ring_ctx *ctx = req->ctx;
struct io_defer_entry *de;
int ret;
u32 seq;
- /* not interested in head, start from the first linked */
- io_for_each_link(pos, req->link) {
- /*
- * If we need to drain a request in the middle of a link, drain
- * the head request and the next request/link after the current
- * link. Considering sequential execution of links,
- * IOSQE_IO_DRAIN will be maintained for every request of our
- * link.
- */
- if (pos->flags & REQ_F_IO_DRAIN) {
- ctx->drain_next = true;
- req->flags |= REQ_F_IO_DRAIN;
- break;
- }
- }
-
/* Still need defer if there is pending req in defer list. */
if (likely(list_empty_careful(&ctx->defer_list) &&
!(req->flags & REQ_F_IO_DRAIN))) {
@@ -6994,6 +6977,25 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx,
return true;
}
+static void io_init_req_drain(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *head = ctx->submit_state.link.head;
+
+ ctx->drain_active = true;
+ if (head) {
+ /*
+ * If we need to drain a request in the middle of a link, drain
+ * the head request and the next request/link after the current
+ * link. Considering sequential execution of links,
+ * IOSQE_IO_DRAIN will be maintained for every request of our
+ * link.
+ */
+ head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
+ ctx->drain_next = true;
+ }
+}
+
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe)
__must_hold(&ctx->uring_lock)
@@ -7020,14 +7022,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
!io_op_defs[req->opcode].buffer_select)
return -EOPNOTSUPP;
- if (sqe_flags & IOSQE_IO_DRAIN) {
- struct io_submit_link *link = &ctx->submit_state.link;
-
- ctx->drain_active = true;
- req->flags |= REQ_F_FORCE_ASYNC;
- if (link->head)
- link->head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
- }
+ if (sqe_flags & IOSQE_IO_DRAIN)
+ io_init_req_drain(req);
}
if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
@@ -7039,7 +7035,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
ctx->drain_next = false;
ctx->drain_active = true;
- req->flags |= REQ_F_FORCE_ASYNC | IOSQE_IO_DRAIN;
+ req->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
}
}
--
2.33.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/4] io_uring: don't return from io_drain_req()
2021-10-01 17:06 [PATCH 0/4] small cleanups for-next Pavel Begunkov
2021-10-01 17:07 ` [PATCH 1/4] io_uring: extra a helper for drain init Pavel Begunkov
@ 2021-10-01 17:07 ` Pavel Begunkov
2021-10-01 17:07 ` [PATCH 3/4] io_uring: init opcode in io_init_req() Pavel Begunkov
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-10-01 17:07 UTC (permalink / raw)
To: Jens Axboe, io-uring; +Cc: asml.silence
Never return from io_drain_req() but punt to tw if we've got there but
it's a false positive and we shouldn't actually drain.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 36 ++++++++++++++----------------------
1 file changed, 14 insertions(+), 22 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e75d9cac7d45..8d6d8415e89d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6430,46 +6430,39 @@ static u32 io_get_sequence(struct io_kiocb *req)
return seq;
}
-static bool io_drain_req(struct io_kiocb *req)
+static void io_drain_req(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_defer_entry *de;
int ret;
- u32 seq;
+ u32 seq = io_get_sequence(req);
/* Still need defer if there is pending req in defer list. */
- if (likely(list_empty_careful(&ctx->defer_list) &&
- !(req->flags & REQ_F_IO_DRAIN))) {
- ctx->drain_active = false;
- return false;
- }
-
- seq = io_get_sequence(req);
- /* Still a chance to pass the sequence check */
if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
+queue:
ctx->drain_active = false;
- return false;
+ io_req_task_queue(req);
+ return;
}
ret = io_req_prep_async(req);
- if (ret)
- goto fail;
+ if (ret) {
+fail:
+ io_req_complete_failed(req, ret);
+ return;
+ }
io_prep_async_link(req);
de = kmalloc(sizeof(*de), GFP_KERNEL);
if (!de) {
ret = -ENOMEM;
-fail:
- io_req_complete_failed(req, ret);
- return true;
+ goto fail;
}
spin_lock(&ctx->completion_lock);
if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
spin_unlock(&ctx->completion_lock);
kfree(de);
- io_queue_async_work(req, NULL);
- ctx->drain_active = false;
- return true;
+ goto queue;
}
trace_io_uring_defer(ctx, req, req->user_data);
@@ -6477,7 +6470,6 @@ static bool io_drain_req(struct io_kiocb *req)
de->seq = seq;
list_add_tail(&de->list, &ctx->defer_list);
spin_unlock(&ctx->completion_lock);
- return true;
}
static void io_clean_op(struct io_kiocb *req)
@@ -6933,8 +6925,8 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
{
if (req->flags & REQ_F_FAIL) {
io_req_complete_fail_submit(req);
- } else if (unlikely(req->ctx->drain_active) && io_drain_req(req)) {
- return;
+ } else if (unlikely(req->ctx->drain_active)) {
+ io_drain_req(req);
} else {
int ret = io_req_prep_async(req);
--
2.33.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 3/4] io_uring: init opcode in io_init_req()
2021-10-01 17:06 [PATCH 0/4] small cleanups for-next Pavel Begunkov
2021-10-01 17:07 ` [PATCH 1/4] io_uring: extra a helper for drain init Pavel Begunkov
2021-10-01 17:07 ` [PATCH 2/4] io_uring: don't return from io_drain_req() Pavel Begunkov
@ 2021-10-01 17:07 ` Pavel Begunkov
2021-10-01 17:07 ` [PATCH 4/4] io_uring: clean up buffer select Pavel Begunkov
2021-10-01 17:34 ` [PATCH 0/4] small cleanups for-next Jens Axboe
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-10-01 17:07 UTC (permalink / raw)
To: Jens Axboe, io-uring; +Cc: asml.silence
Move io_req_prep() call inside of io_init_req(), it simplifies a bit
error handling for callers.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8d6d8415e89d..ddb23bb2e4b8 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6994,7 +6994,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
{
struct io_submit_state *state;
unsigned int sqe_flags;
- int personality, ret = 0;
+ int personality;
/* req is partially pre-initialised, see io_preinit_req() */
req->opcode = READ_ONCE(sqe->opcode);
@@ -7055,9 +7055,10 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
(sqe_flags & IOSQE_FIXED_FILE));
if (unlikely(!req->file))
- ret = -EBADF;
+ return -EBADF;
}
- return ret;
+
+ return io_req_prep(req, sqe);
}
static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
@@ -7069,7 +7070,6 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
ret = io_init_req(ctx, req, sqe);
if (unlikely(ret)) {
-fail_req:
trace_io_uring_req_failed(sqe, ret);
/* fail even hard links since we don't submit */
@@ -7094,10 +7094,6 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return ret;
}
req_fail_link_node(req, ret);
- } else {
- ret = io_req_prep(req, sqe);
- if (unlikely(ret))
- goto fail_req;
}
/* don't need @sqe from now on */
--
2.33.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 4/4] io_uring: clean up buffer select
2021-10-01 17:06 [PATCH 0/4] small cleanups for-next Pavel Begunkov
` (2 preceding siblings ...)
2021-10-01 17:07 ` [PATCH 3/4] io_uring: init opcode in io_init_req() Pavel Begunkov
@ 2021-10-01 17:07 ` Pavel Begunkov
2021-10-01 17:34 ` [PATCH 0/4] small cleanups for-next Jens Axboe
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2021-10-01 17:07 UTC (permalink / raw)
To: Jens Axboe, io-uring; +Cc: asml.silence
Hiding a pointer to a struct io_buffer in rw.addr is error prone. We
have some place in io_kiocb, so keep kbuf's in a separate field
without aliasing and risks of it being misused.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 46 ++++++++++++----------------------------------
1 file changed, 12 insertions(+), 34 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index ddb23bb2e4b8..cf392b1228d0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -573,7 +573,6 @@ struct io_sr_msg {
int msg_flags;
int bgid;
size_t len;
- struct io_buffer *kbuf;
};
struct io_open {
@@ -877,6 +876,7 @@ struct io_kiocb {
struct io_mapped_ubuf *imu;
struct io_wq_work work;
const struct cred *creds;
+ struct io_buffer *kbuf;
};
struct io_tctx_node {
@@ -2376,12 +2376,9 @@ static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
{
- struct io_buffer *kbuf;
-
if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
return 0;
- kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
- return io_put_kbuf(req, kbuf);
+ return io_put_kbuf(req, req->kbuf);
}
static inline bool io_run_task_work(void)
@@ -3003,9 +3000,9 @@ static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
}
static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
- int bgid, struct io_buffer *kbuf,
- bool needs_lock)
+ int bgid, bool needs_lock)
{
+ struct io_buffer *kbuf = req->kbuf;
struct io_buffer *head;
if (req->flags & REQ_F_BUFFER_SELECTED)
@@ -3027,12 +3024,13 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
}
if (*len > kbuf->len)
*len = kbuf->len;
+ req->flags |= REQ_F_BUFFER_SELECTED;
+ req->kbuf = kbuf;
} else {
kbuf = ERR_PTR(-ENOBUFS);
}
io_ring_submit_unlock(req->ctx, needs_lock);
-
return kbuf;
}
@@ -3042,13 +3040,10 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
struct io_buffer *kbuf;
u16 bgid;
- kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
bgid = req->buf_index;
- kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
+ kbuf = io_buffer_select(req, len, bgid, needs_lock);
if (IS_ERR(kbuf))
return kbuf;
- req->rw.addr = (u64) (unsigned long) kbuf;
- req->flags |= REQ_F_BUFFER_SELECTED;
return u64_to_user_ptr(kbuf->addr);
}
@@ -3104,9 +3099,8 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
bool needs_lock)
{
if (req->flags & REQ_F_BUFFER_SELECTED) {
- struct io_buffer *kbuf;
+ struct io_buffer *kbuf = req->kbuf;
- kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
iov[0].iov_len = kbuf->len;
return 0;
@@ -4872,20 +4866,13 @@ static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
bool needs_lock)
{
struct io_sr_msg *sr = &req->sr_msg;
- struct io_buffer *kbuf;
-
- kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
- if (IS_ERR(kbuf))
- return kbuf;
- sr->kbuf = kbuf;
- req->flags |= REQ_F_BUFFER_SELECTED;
- return kbuf;
+ return io_buffer_select(req, &sr->len, sr->bgid, needs_lock);
}
static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
{
- return io_put_kbuf(req, req->sr_msg.kbuf);
+ return io_put_kbuf(req, req->kbuf);
}
static int io_recvmsg_prep_async(struct io_kiocb *req)
@@ -6475,17 +6462,8 @@ static void io_drain_req(struct io_kiocb *req)
static void io_clean_op(struct io_kiocb *req)
{
if (req->flags & REQ_F_BUFFER_SELECTED) {
- switch (req->opcode) {
- case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- case IORING_OP_READ:
- kfree((void *)(unsigned long)req->rw.addr);
- break;
- case IORING_OP_RECVMSG:
- case IORING_OP_RECV:
- kfree(req->sr_msg.kbuf);
- break;
- }
+ kfree(req->kbuf);
+ req->kbuf = NULL;
}
if (req->flags & REQ_F_NEED_CLEANUP) {
--
2.33.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH 0/4] small cleanups for-next
2021-10-01 17:06 [PATCH 0/4] small cleanups for-next Pavel Begunkov
` (3 preceding siblings ...)
2021-10-01 17:07 ` [PATCH 4/4] io_uring: clean up buffer select Pavel Begunkov
@ 2021-10-01 17:34 ` Jens Axboe
4 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2021-10-01 17:34 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 10/1/21 11:06 AM, Pavel Begunkov wrote:
> 1-2 cleans up after drain patches.
>
> 4/4 restructures storing selected buffers, gets rid of aliasing it
> with rw.addr in particular.
>
> Pavel Begunkov (4):
> io_uring: extra a helper for drain init
> io_uring: don't return from io_drain_req()
> io_uring: init opcode in io_init_req()
> io_uring: clean up buffer select
>
> fs/io_uring.c | 142 ++++++++++++++++++--------------------------------
> 1 file changed, 52 insertions(+), 90 deletions(-)
Looks good, and I agree on removing the kbuf aliasing. Applied.
--
Jens Axboe
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2021-10-01 17:34 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-10-01 17:06 [PATCH 0/4] small cleanups for-next Pavel Begunkov
2021-10-01 17:07 ` [PATCH 1/4] io_uring: extra a helper for drain init Pavel Begunkov
2021-10-01 17:07 ` [PATCH 2/4] io_uring: don't return from io_drain_req() Pavel Begunkov
2021-10-01 17:07 ` [PATCH 3/4] io_uring: init opcode in io_init_req() Pavel Begunkov
2021-10-01 17:07 ` [PATCH 4/4] io_uring: clean up buffer select Pavel Begunkov
2021-10-01 17:34 ` [PATCH 0/4] small cleanups for-next Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox