* [PATCH v3] io_uring: reduce latency by reissueing the operation
@ 2021-06-21 19:22 Olivier Langlois
2021-06-21 20:05 ` Jens Axboe
2021-06-22 11:00 ` Pavel Begunkov
0 siblings, 2 replies; 3+ messages in thread
From: Olivier Langlois @ 2021-06-21 19:22 UTC (permalink / raw)
To: Jens Axboe, Pavel Begunkov, io-uring, linux-kernel; +Cc: Olivier Langlois
It is quite frequent that when an operation fails and returns EAGAIN,
the data becomes available between that failure and the call to
vfs_poll() done by io_arm_poll_handler().
Detecting the situation and reissuing the operation is much faster
than going ahead and push the operation to the io-wq.
Signed-off-by: Olivier Langlois <[email protected]>
---
fs/io_uring.c | 31 ++++++++++++++++++++++---------
1 file changed, 22 insertions(+), 9 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index fc8637f591a6..5efa67c2f974 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5152,7 +5152,13 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
return mask;
}
-static bool io_arm_poll_handler(struct io_kiocb *req)
+enum {
+ IO_APOLL_OK,
+ IO_APOLL_ABORTED,
+ IO_APOLL_READY
+};
+
+static int io_arm_poll_handler(struct io_kiocb *req)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
struct io_ring_ctx *ctx = req->ctx;
@@ -5162,22 +5168,22 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
int rw;
if (!req->file || !file_can_poll(req->file))
- return false;
+ return IO_APOLL_ABORTED;
if (req->flags & REQ_F_POLLED)
- return false;
+ return IO_APOLL_ABORTED;
if (def->pollin)
rw = READ;
else if (def->pollout)
rw = WRITE;
else
- return false;
+ return IO_APOLL_ABORTED;
/* if we can't nonblock try, then no point in arming a poll handler */
if (!io_file_supports_async(req, rw))
- return false;
+ return IO_APOLL_ABORTED;
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll))
- return false;
+ return IO_APOLL_ABORTED;
apoll->double_poll = NULL;
req->flags |= REQ_F_POLLED;
@@ -5203,12 +5209,14 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
if (ret || ipt.error) {
io_poll_remove_double(req);
spin_unlock_irq(&ctx->completion_lock);
- return false;
+ if (ret)
+ return IO_APOLL_READY;
+ return IO_APOLL_ABORTED;
}
spin_unlock_irq(&ctx->completion_lock);
trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
mask, apoll->poll.events);
- return true;
+ return IO_APOLL_OK;
}
static bool __io_poll_remove_one(struct io_kiocb *req,
@@ -6437,6 +6445,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
int ret;
+issue_sqe:
ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
/*
@@ -6456,12 +6465,16 @@ static void __io_queue_sqe(struct io_kiocb *req)
io_put_req(req);
}
} else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
- if (!io_arm_poll_handler(req)) {
+ switch (io_arm_poll_handler(req)) {
+ case IO_APOLL_READY:
+ goto issue_sqe;
+ case IO_APOLL_ABORTED:
/*
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.
*/
io_queue_async_work(req);
+ break;
}
} else {
io_req_complete_failed(req, ret);
--
2.32.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH v3] io_uring: reduce latency by reissueing the operation
2021-06-21 19:22 [PATCH v3] io_uring: reduce latency by reissueing the operation Olivier Langlois
@ 2021-06-21 20:05 ` Jens Axboe
2021-06-22 11:00 ` Pavel Begunkov
1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2021-06-21 20:05 UTC (permalink / raw)
To: Olivier Langlois, Pavel Begunkov, io-uring, linux-kernel
On 6/21/21 1:22 PM, Olivier Langlois wrote:
> It is quite frequent that when an operation fails and returns EAGAIN,
> the data becomes available between that failure and the call to
> vfs_poll() done by io_arm_poll_handler().
>
> Detecting the situation and reissuing the operation is much faster
> than going ahead and push the operation to the io-wq.
This now looks pretty good to me. I know you had some data associated
with this, would be great to include it in the commit message.
--
Jens Axboe
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v3] io_uring: reduce latency by reissueing the operation
2021-06-21 19:22 [PATCH v3] io_uring: reduce latency by reissueing the operation Olivier Langlois
2021-06-21 20:05 ` Jens Axboe
@ 2021-06-22 11:00 ` Pavel Begunkov
1 sibling, 0 replies; 3+ messages in thread
From: Pavel Begunkov @ 2021-06-22 11:00 UTC (permalink / raw)
To: Olivier Langlois, Jens Axboe, io-uring, linux-kernel
On 6/21/21 8:22 PM, Olivier Langlois wrote:
> It is quite frequent that when an operation fails and returns EAGAIN,
> the data becomes available between that failure and the call to
> vfs_poll() done by io_arm_poll_handler().
>
> Detecting the situation and reissuing the operation is much faster
> than going ahead and push the operation to the io-wq.
Looks good to me, will stamp later after testing it out.
> Signed-off-by: Olivier Langlois <[email protected]>
> ---
> fs/io_uring.c | 31 ++++++++++++++++++++++---------
> 1 file changed, 22 insertions(+), 9 deletions(-)
>
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index fc8637f591a6..5efa67c2f974 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -5152,7 +5152,13 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
> return mask;
> }
>
> -static bool io_arm_poll_handler(struct io_kiocb *req)
> +enum {
> + IO_APOLL_OK,
> + IO_APOLL_ABORTED,
> + IO_APOLL_READY
> +};
> +
> +static int io_arm_poll_handler(struct io_kiocb *req)
> {
> const struct io_op_def *def = &io_op_defs[req->opcode];
> struct io_ring_ctx *ctx = req->ctx;
> @@ -5162,22 +5168,22 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
> int rw;
>
> if (!req->file || !file_can_poll(req->file))
> - return false;
> + return IO_APOLL_ABORTED;
> if (req->flags & REQ_F_POLLED)
> - return false;
> + return IO_APOLL_ABORTED;
> if (def->pollin)
> rw = READ;
> else if (def->pollout)
> rw = WRITE;
> else
> - return false;
> + return IO_APOLL_ABORTED;
> /* if we can't nonblock try, then no point in arming a poll handler */
> if (!io_file_supports_async(req, rw))
> - return false;
> + return IO_APOLL_ABORTED;
>
> apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
> if (unlikely(!apoll))
> - return false;
> + return IO_APOLL_ABORTED;
> apoll->double_poll = NULL;
>
> req->flags |= REQ_F_POLLED;
> @@ -5203,12 +5209,14 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
> if (ret || ipt.error) {
> io_poll_remove_double(req);
> spin_unlock_irq(&ctx->completion_lock);
> - return false;
> + if (ret)
> + return IO_APOLL_READY;
> + return IO_APOLL_ABORTED;
> }
> spin_unlock_irq(&ctx->completion_lock);
> trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
> mask, apoll->poll.events);
> - return true;
> + return IO_APOLL_OK;
> }
>
> static bool __io_poll_remove_one(struct io_kiocb *req,
> @@ -6437,6 +6445,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
> struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
> int ret;
>
> +issue_sqe:
> ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
>
> /*
> @@ -6456,12 +6465,16 @@ static void __io_queue_sqe(struct io_kiocb *req)
> io_put_req(req);
> }
> } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
> - if (!io_arm_poll_handler(req)) {
> + switch (io_arm_poll_handler(req)) {
> + case IO_APOLL_READY:
> + goto issue_sqe;
> + case IO_APOLL_ABORTED:
> /*
> * Queued up for async execution, worker will release
> * submit reference when the iocb is actually submitted.
> */
> io_queue_async_work(req);
> + break;
> }
> } else {
> io_req_complete_failed(req, ret);
>
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2021-06-22 11:00 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-06-21 19:22 [PATCH v3] io_uring: reduce latency by reissueing the operation Olivier Langlois
2021-06-21 20:05 ` Jens Axboe
2021-06-22 11:00 ` Pavel Begunkov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox