* [PATCH v2 1/2] io_uring: reduce frequent add_wait_queue() overhead for multi-shot poll request
2021-09-24 4:22 [PATCH v2 0/2] improvements for poll requests Xiaoguang Wang
@ 2021-09-24 4:22 ` Xiaoguang Wang
2021-09-24 4:22 ` [PATCH v2 2/2] io_uring: don't get completion_lock in io_poll_rewait() Xiaoguang Wang
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Xiaoguang Wang @ 2021-09-24 4:22 UTC (permalink / raw)
To: io-uring; +Cc: axboe, asml.silence
Run echo_server to evaluate io_uring's multi-shot poll performance, perf
shows that add_wait_queue() has obvious overhead. Intruduce a new state
'active' in io_poll_iocb to indicate whether io_poll_wake() should queue
a task_work. This new state will be set to true initially, be set to false
when starting to queue a task work, and be set to true again when a poll
cqe has been committed. One concern is that this method may lost waken-up
event, but seems it's ok.
io_poll_wake io_poll_task_func
t1 |
t2 | WRITE_ONCE(req->poll.active, true);
t3 |
t4 | io_commit_cqring(ctx);
t5 |
t6 |
If waken-up events happens before or at t4, it's ok, user app will always
see a cqe. If waken-up events happens after t4 and IIUC, io_poll_wake()
will see the new req->poll.active value by using READ_ONCE().
Echo_server codes can be cloned from:
https://codeup.openanolis.cn/codeup/storage/io_uring-echo-server.git,
branch is xiaoguangwang/io_uring_multishot.
Without this patch, the tps in our test environment is 284116, with
this patch, the tps is 287832, about 1.3% reqs improvement, which
is indeed in accord with the saved add_wait_queue() cost.
Signed-off-by: Xiaoguang Wang <[email protected]>
---
fs/io_uring.c | 20 ++++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7bfd2d00d4fc..7fc52a7f6f05 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -488,6 +488,7 @@ struct io_poll_iocb {
__poll_t events;
bool done;
bool canceled;
+ bool active;
struct wait_queue_entry wait;
};
@@ -5248,8 +5249,6 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
- list_del_init(&poll->wait.entry);
-
req->result = mask;
req->io_task_work.func = func;
@@ -5280,7 +5279,10 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
spin_lock(&ctx->completion_lock);
if (!req->result && !READ_ONCE(poll->canceled)) {
- add_wait_queue(poll->head, &poll->wait);
+ if (req->opcode == IORING_OP_POLL_ADD)
+ WRITE_ONCE(req->poll.active, true);
+ else
+ add_wait_queue(poll->head, &poll->wait);
return true;
}
@@ -5356,6 +5358,9 @@ static inline bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
return done;
}
+static bool __io_poll_remove_one(struct io_kiocb *req,
+ struct io_poll_iocb *poll, bool do_cancel);
+
static void io_poll_task_func(struct io_kiocb *req, bool *locked)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -5369,10 +5374,11 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
done = __io_poll_complete(req, req->result);
if (done) {
io_poll_remove_double(req);
+ __io_poll_remove_one(req, io_poll_get_single(req), true);
hash_del(&req->hash_node);
} else {
req->result = 0;
- add_wait_queue(req->poll.head, &req->poll.wait);
+ WRITE_ONCE(req->poll.active, true);
}
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
@@ -5427,6 +5433,7 @@ static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
poll->head = NULL;
poll->done = false;
poll->canceled = false;
+ poll->active = true;
#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
/* mask in events that we always want/need */
poll->events = events | IO_POLL_UNMASK;
@@ -5524,6 +5531,7 @@ static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
key_to_poll(key));
+ list_del_init(&poll->wait.entry);
return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
}
@@ -5792,6 +5800,10 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
struct io_kiocb *req = wait->private;
struct io_poll_iocb *poll = &req->poll;
+ if (!READ_ONCE(poll->active))
+ return 0;
+
+ WRITE_ONCE(poll->active, false);
return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
}
--
2.14.4.44.g2045bb6
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH v2 2/2] io_uring: don't get completion_lock in io_poll_rewait()
2021-09-24 4:22 [PATCH v2 0/2] improvements for poll requests Xiaoguang Wang
2021-09-24 4:22 ` [PATCH v2 1/2] io_uring: reduce frequent add_wait_queue() overhead for multi-shot poll request Xiaoguang Wang
@ 2021-09-24 4:22 ` Xiaoguang Wang
2021-10-21 7:00 ` [PATCH v2 0/2] improvements for poll requests Xiaoguang Wang
2021-10-21 14:47 ` Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Xiaoguang Wang @ 2021-09-24 4:22 UTC (permalink / raw)
To: io-uring; +Cc: axboe, asml.silence
In current implementation, if there are not available events,
io_poll_rewait() just gets completion_lock, and unlocks it in
io_poll_task_func() or io_async_task_func(), which isn't necessary.
Change this logic to let io_poll_task_func() or io_async_task_func()
get the completion_lock lock, this is also a preparation for
later patch, which will batch poll request completion.
Signed-off-by: Xiaoguang Wang <[email protected]>
---
fs/io_uring.c | 50 ++++++++++++++++++++++----------------------------
1 file changed, 22 insertions(+), 28 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7fc52a7f6f05..1cf0aaaa086e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5263,10 +5263,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
}
static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
- __acquires(&req->ctx->completion_lock)
{
- struct io_ring_ctx *ctx = req->ctx;
-
/* req->task == current here, checking PF_EXITING is safe */
if (unlikely(req->task->flags & PF_EXITING))
WRITE_ONCE(poll->canceled, true);
@@ -5277,7 +5274,6 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
req->result = vfs_poll(req->file, &pt) & poll->events;
}
- spin_lock(&ctx->completion_lock);
if (!req->result && !READ_ONCE(poll->canceled)) {
if (req->opcode == IORING_OP_POLL_ADD)
WRITE_ONCE(req->poll.active, true);
@@ -5365,30 +5361,29 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *nxt;
+ bool done;
- if (io_poll_rewait(req, &req->poll)) {
- spin_unlock(&ctx->completion_lock);
- } else {
- bool done;
+ if (io_poll_rewait(req, &req->poll))
+ return;
- done = __io_poll_complete(req, req->result);
- if (done) {
- io_poll_remove_double(req);
- __io_poll_remove_one(req, io_poll_get_single(req), true);
- hash_del(&req->hash_node);
- } else {
- req->result = 0;
- WRITE_ONCE(req->poll.active, true);
- }
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ spin_lock(&ctx->completion_lock);
+ done = __io_poll_complete(req, req->result);
+ if (done) {
+ io_poll_remove_double(req);
+ __io_poll_remove_one(req, io_poll_get_single(req), true);
+ hash_del(&req->hash_node);
+ } else {
+ req->result = 0;
+ WRITE_ONCE(req->poll.active, true);
+ }
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
- if (done) {
- nxt = io_put_req_find_next(req);
- if (nxt)
- io_req_task_submit(nxt, locked);
- }
+ if (done) {
+ nxt = io_put_req_find_next(req);
+ if (nxt)
+ io_req_task_submit(nxt, locked);
}
}
@@ -5507,11 +5502,10 @@ static void io_async_task_func(struct io_kiocb *req, bool *locked)
trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
- if (io_poll_rewait(req, &apoll->poll)) {
- spin_unlock(&ctx->completion_lock);
+ if (io_poll_rewait(req, &apoll->poll))
return;
- }
+ spin_lock(&ctx->completion_lock);
hash_del(&req->hash_node);
io_poll_remove_double(req);
spin_unlock(&ctx->completion_lock);
--
2.14.4.44.g2045bb6
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH v2 0/2] improvements for poll requests
2021-09-24 4:22 [PATCH v2 0/2] improvements for poll requests Xiaoguang Wang
2021-09-24 4:22 ` [PATCH v2 1/2] io_uring: reduce frequent add_wait_queue() overhead for multi-shot poll request Xiaoguang Wang
2021-09-24 4:22 ` [PATCH v2 2/2] io_uring: don't get completion_lock in io_poll_rewait() Xiaoguang Wang
@ 2021-10-21 7:00 ` Xiaoguang Wang
2021-10-21 14:47 ` Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Xiaoguang Wang @ 2021-10-21 7:00 UTC (permalink / raw)
To: io-uring; +Cc: axboe, asml.silence
hello,
Friendly ping.
The method in this patch set can improve multi-shot's performance a bit.
If you have some free
time, please have a look, thanks.
Regards,
Xiaoguang Wang
> Echo_server codes can be clone from:
> https://codeup.openanolis.cn/codeup/storage/io_uring-echo-server.git
> branch is xiaoguangwang/io_uring_multishot. There is a simple HOWTO
> in this repository.
>
> Usage:
> In server: port 10016, 1000 connections, packet size 16 bytes, and
> enable fixed files.
> taskset -c 10 io_uring_echo_server_multi_shot -f -p 10016 -n 1000 -l 16
>
> In client:
> taskset -c 13,14,15,16 ./echo -addr 11.238.147.21:10016 -n 1000 -size 16
>
> Before this patchset, the tps is like below:
> 1:15:53 req: 1430425, req/s: 286084.693
> 11:15:58 req: 1426021, req/s: 285204.079
> 11:16:03 req: 1416761, req/s: 283352.146
> 11:16:08 req: 1417969, req/s: 283165.637
> 11:16:13 req: 1424591, req/s: 285349.915
> 11:16:18 req: 1418706, req/s: 283738.725
> 11:16:23 req: 1411988, req/s: 282399.052
> 11:16:28 req: 1419097, req/s: 283820.477
> 11:16:33 req: 1417816, req/s: 283563.262
> 11:16:38 req: 1422461, req/s: 284491.702
> 11:16:43 req: 1418176, req/s: 283635.327
> 11:16:48 req: 1414525, req/s: 282905.276
> 11:16:53 req: 1415624, req/s: 283124.140
> 11:16:58 req: 1426435, req/s: 284970.486
>
> with this patchset:
> 2021/09/24 11:10:01 start to do client
> 11:10:06 req: 1444979, req/s: 288995.300
> 11:10:11 req: 1442559, req/s: 288511.689
> 11:10:16 req: 1427253, req/s: 285450.390
> 11:10:21 req: 1445236, req/s: 288349.853
> 11:10:26 req: 1423949, req/s: 285480.941
> 11:10:31 req: 1445304, req/s: 289060.815
> 11:10:36 req: 1441036, req/s: 288207.119
> 11:10:41 req: 1441117, req/s: 288220.695
> 11:10:46 req: 1441451, req/s: 288292.731
> 11:10:51 req: 1438801, req/s: 287759.157
> 11:10:56 req: 1433227, req/s: 286646.338
> 11:11:01 req: 1438307, req/s: 287661.577
>
> about 1.3% tps improvements.
>
> Changes in v2:
> I dropped the poll request completion batching patch in V1, since
> it shows performance fluctuations, hard to say whether it's useful.
>
> Xiaoguang Wang (2):
> io_uring: reduce frequent add_wait_queue() overhead for multi-shot
> poll request
> io_uring: don't get completion_lock in io_poll_rewait()
>
> fs/io_uring.c | 66 ++++++++++++++++++++++++++++++++---------------------------
> 1 file changed, 36 insertions(+), 30 deletions(-)
>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v2 0/2] improvements for poll requests
2021-09-24 4:22 [PATCH v2 0/2] improvements for poll requests Xiaoguang Wang
` (2 preceding siblings ...)
2021-10-21 7:00 ` [PATCH v2 0/2] improvements for poll requests Xiaoguang Wang
@ 2021-10-21 14:47 ` Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2021-10-21 14:47 UTC (permalink / raw)
To: Xiaoguang Wang, io-uring; +Cc: asml.silence
On 9/23/21 10:22 PM, Xiaoguang Wang wrote:
> Echo_server codes can be clone from:
> https://codeup.openanolis.cn/codeup/storage/io_uring-echo-server.git
> branch is xiaoguangwang/io_uring_multishot. There is a simple HOWTO
> in this repository.
>
> Usage:
> In server: port 10016, 1000 connections, packet size 16 bytes, and
> enable fixed files.
> taskset -c 10 io_uring_echo_server_multi_shot -f -p 10016 -n 1000 -l 16
>
> In client:
> taskset -c 13,14,15,16 ./echo -addr 11.238.147.21:10016 -n 1000 -size 16
>
> Before this patchset, the tps is like below:
> 1:15:53 req: 1430425, req/s: 286084.693
> 11:15:58 req: 1426021, req/s: 285204.079
> 11:16:03 req: 1416761, req/s: 283352.146
> 11:16:08 req: 1417969, req/s: 283165.637
> 11:16:13 req: 1424591, req/s: 285349.915
> 11:16:18 req: 1418706, req/s: 283738.725
> 11:16:23 req: 1411988, req/s: 282399.052
> 11:16:28 req: 1419097, req/s: 283820.477
> 11:16:33 req: 1417816, req/s: 283563.262
> 11:16:38 req: 1422461, req/s: 284491.702
> 11:16:43 req: 1418176, req/s: 283635.327
> 11:16:48 req: 1414525, req/s: 282905.276
> 11:16:53 req: 1415624, req/s: 283124.140
> 11:16:58 req: 1426435, req/s: 284970.486
>
> with this patchset:
> 2021/09/24 11:10:01 start to do client
> 11:10:06 req: 1444979, req/s: 288995.300
> 11:10:11 req: 1442559, req/s: 288511.689
> 11:10:16 req: 1427253, req/s: 285450.390
> 11:10:21 req: 1445236, req/s: 288349.853
> 11:10:26 req: 1423949, req/s: 285480.941
> 11:10:31 req: 1445304, req/s: 289060.815
> 11:10:36 req: 1441036, req/s: 288207.119
> 11:10:41 req: 1441117, req/s: 288220.695
> 11:10:46 req: 1441451, req/s: 288292.731
> 11:10:51 req: 1438801, req/s: 287759.157
> 11:10:56 req: 1433227, req/s: 286646.338
> 11:11:01 req: 1438307, req/s: 287661.577
>
> about 1.3% tps improvements.
>
> Changes in v2:
> I dropped the poll request completion batching patch in V1, since
> it shows performance fluctuations, hard to say whether it's useful.
Sorry for being slow on this one. Can you resend it against
for-5.16/io_uring? It no longer applies. Thanks!
--
Jens Axboe
^ permalink raw reply [flat|nested] 5+ messages in thread