* [PATCH 0/4] io-wq/io_uring locking optimisation
@ 2020-03-04 13:14 Pavel Begunkov
2020-03-04 13:14 ` [PATCH 1/4] io-wq: shuffle io_worker_handle_work() code Pavel Begunkov
` (4 more replies)
0 siblings, 5 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-03-04 13:14 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
[1-3] are shedding excessive locking of @wqe's and @worker's spinlocks
from io_worker_handle_work().
[4] removes an extra pair of refcount get/put by making former io_put_work()
to own the submission reference. It also changes io-wq get/put API
and renames io_put_work() into io_free_work() to reflect it.
Pavel Begunkov (4):
io-wq: shuffle io_worker_handle_work() code
io-wq: optimise locking in io_worker_handle_work()
io-wq: optimise out *next_work() double lock
io_uring/io-wq: forward submission ref to async
fs/io-wq.c | 148 ++++++++++++++++++++++++++------------------------
fs/io-wq.h | 6 +-
fs/io_uring.c | 31 ++++-------
3 files changed, 90 insertions(+), 95 deletions(-)
--
2.24.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 1/4] io-wq: shuffle io_worker_handle_work() code
2020-03-04 13:14 [PATCH 0/4] io-wq/io_uring locking optimisation Pavel Begunkov
@ 2020-03-04 13:14 ` Pavel Begunkov
2020-03-04 13:14 ` [PATCH 2/4] io-wq: optimise locking in io_worker_handle_work() Pavel Begunkov
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-03-04 13:14 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
This is a preparation patch, it adds some helpers and makes
the next patches cleaner.
- extract io_impersonate_work() and io_assign_current_work()
- replace @next label with nested do-while
- move put_work() right after NULL'ing cur_work.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io-wq.c | 123 ++++++++++++++++++++++++++++-------------------------
1 file changed, 64 insertions(+), 59 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 042c7e2057ef..e438dc4d7cb3 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -440,14 +440,43 @@ static void io_wq_switch_creds(struct io_worker *worker,
worker->saved_creds = old_creds;
}
+static void io_impersonate_work(struct io_worker *worker,
+ struct io_wq_work *work)
+{
+ if (work->files && current->files != work->files) {
+ task_lock(current);
+ current->files = work->files;
+ task_unlock(current);
+ }
+ if (work->fs && current->fs != work->fs)
+ current->fs = work->fs;
+ if (work->mm != worker->mm)
+ io_wq_switch_mm(worker, work);
+ if (worker->cur_creds != work->creds)
+ io_wq_switch_creds(worker, work);
+}
+
+static void io_assign_current_work(struct io_worker *worker,
+ struct io_wq_work *work)
+{
+ /* flush pending signals before assigning new work */
+ if (signal_pending(current))
+ flush_signals(current);
+ cond_resched();
+
+ spin_lock_irq(&worker->lock);
+ worker->cur_work = work;
+ spin_unlock_irq(&worker->lock);
+}
+
static void io_worker_handle_work(struct io_worker *worker)
__releases(wqe->lock)
{
- struct io_wq_work *work, *old_work = NULL, *put_work = NULL;
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
do {
+ struct io_wq_work *work, *old_work;
unsigned hash = -1U;
/*
@@ -464,69 +493,45 @@ static void io_worker_handle_work(struct io_worker *worker)
wqe->flags |= IO_WQE_FLAG_STALLED;
spin_unlock_irq(&wqe->lock);
- if (put_work && wq->put_work)
- wq->put_work(old_work);
if (!work)
break;
-next:
- /* flush any pending signals before assigning new work */
- if (signal_pending(current))
- flush_signals(current);
-
- cond_resched();
- spin_lock_irq(&worker->lock);
- worker->cur_work = work;
- spin_unlock_irq(&worker->lock);
-
- if (work->files && current->files != work->files) {
- task_lock(current);
- current->files = work->files;
- task_unlock(current);
- }
- if (work->fs && current->fs != work->fs)
- current->fs = work->fs;
- if (work->mm != worker->mm)
- io_wq_switch_mm(worker, work);
- if (worker->cur_creds != work->creds)
- io_wq_switch_creds(worker, work);
- /*
- * OK to set IO_WQ_WORK_CANCEL even for uncancellable work,
- * the worker function will do the right thing.
- */
- if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
- work->flags |= IO_WQ_WORK_CANCEL;
-
- if (wq->get_work) {
- put_work = work;
- wq->get_work(work);
- }
-
- old_work = work;
- work->func(&work);
-
- spin_lock_irq(&worker->lock);
- worker->cur_work = NULL;
- spin_unlock_irq(&worker->lock);
-
- spin_lock_irq(&wqe->lock);
-
- if (hash != -1U) {
- wqe->hash_map &= ~BIT(hash);
- wqe->flags &= ~IO_WQE_FLAG_STALLED;
- }
- if (work && work != old_work) {
- spin_unlock_irq(&wqe->lock);
-
- if (put_work && wq->put_work) {
- wq->put_work(put_work);
- put_work = NULL;
+ /* handle a whole dependent link */
+ do {
+ io_assign_current_work(worker, work);
+ io_impersonate_work(worker, work);
+
+ /*
+ * OK to set IO_WQ_WORK_CANCEL even for uncancellable
+ * work, the worker function will do the right thing.
+ */
+ if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
+ work->flags |= IO_WQ_WORK_CANCEL;
+
+ if (wq->get_work)
+ wq->get_work(work);
+
+ old_work = work;
+ work->func(&work);
+
+ spin_lock_irq(&worker->lock);
+ worker->cur_work = NULL;
+ spin_unlock_irq(&worker->lock);
+
+ if (wq->put_work)
+ wq->put_work(old_work);
+
+ if (hash != -1U) {
+ spin_lock_irq(&wqe->lock);
+ wqe->hash_map &= ~BIT_ULL(hash);
+ wqe->flags &= ~IO_WQE_FLAG_STALLED;
+ spin_unlock_irq(&wqe->lock);
+ /* dependent work is not hashed */
+ hash = -1U;
}
+ } while (work && work != old_work);
- /* dependent work not hashed */
- hash = -1U;
- goto next;
- }
+ spin_lock_irq(&wqe->lock);
} while (1);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/4] io-wq: optimise locking in io_worker_handle_work()
2020-03-04 13:14 [PATCH 0/4] io-wq/io_uring locking optimisation Pavel Begunkov
2020-03-04 13:14 ` [PATCH 1/4] io-wq: shuffle io_worker_handle_work() code Pavel Begunkov
@ 2020-03-04 13:14 ` Pavel Begunkov
2020-03-04 13:14 ` [PATCH 3/4] io-wq: optimise out *next_work() double lock Pavel Begunkov
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-03-04 13:14 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
There are 2 optimisations:
- Now, io_worker_handler_work() do io_assign_current_work() twice per
request, and each one adds lock/unlock(worker->lock) pair. The first is
to reset worker->cur_work to NULL, and the second to set a real work
shortly after. If there is a dependant work, set it immediately, that
effectively removes the extra NULL'ing.
- And there is no use in taking wqe->lock for linked works, as they are
not hashed now. Optimise it out.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io-wq.c | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index e438dc4d7cb3..473af080470a 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -476,7 +476,7 @@ static void io_worker_handle_work(struct io_worker *worker)
struct io_wq *wq = wqe->wq;
do {
- struct io_wq_work *work, *old_work;
+ struct io_wq_work *work;
unsigned hash = -1U;
/*
@@ -495,12 +495,13 @@ static void io_worker_handle_work(struct io_worker *worker)
spin_unlock_irq(&wqe->lock);
if (!work)
break;
+ io_assign_current_work(worker, work);
/* handle a whole dependent link */
do {
- io_assign_current_work(worker, work);
- io_impersonate_work(worker, work);
+ struct io_wq_work *old_work;
+ io_impersonate_work(worker, work);
/*
* OK to set IO_WQ_WORK_CANCEL even for uncancellable
* work, the worker function will do the right thing.
@@ -513,10 +514,8 @@ static void io_worker_handle_work(struct io_worker *worker)
old_work = work;
work->func(&work);
-
- spin_lock_irq(&worker->lock);
- worker->cur_work = NULL;
- spin_unlock_irq(&worker->lock);
+ work = (old_work == work) ? NULL : work;
+ io_assign_current_work(worker, work);
if (wq->put_work)
wq->put_work(old_work);
@@ -529,7 +528,7 @@ static void io_worker_handle_work(struct io_worker *worker)
/* dependent work is not hashed */
hash = -1U;
}
- } while (work && work != old_work);
+ } while (work);
spin_lock_irq(&wqe->lock);
} while (1);
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 3/4] io-wq: optimise out *next_work() double lock
2020-03-04 13:14 [PATCH 0/4] io-wq/io_uring locking optimisation Pavel Begunkov
2020-03-04 13:14 ` [PATCH 1/4] io-wq: shuffle io_worker_handle_work() code Pavel Begunkov
2020-03-04 13:14 ` [PATCH 2/4] io-wq: optimise locking in io_worker_handle_work() Pavel Begunkov
@ 2020-03-04 13:14 ` Pavel Begunkov
2020-03-04 13:14 ` [PATCH 4/4] io_uring/io-wq: forward submission ref to async Pavel Begunkov
2020-03-04 18:48 ` [PATCH 0/4] io-wq/io_uring locking optimisation Jens Axboe
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-03-04 13:14 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
When executing non-linked hashed work, io_worker_handle_work()
will lock-unlock wqe->lock to update hash, and then immediately
lock-unlock to get next work. Optimise this case and do
lock/unlock only once.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io-wq.c | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 473af080470a..82e76011d409 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -474,11 +474,11 @@ static void io_worker_handle_work(struct io_worker *worker)
{
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
+ unsigned hash = -1U;
do {
struct io_wq_work *work;
- unsigned hash = -1U;
-
+get_next:
/*
* If we got some work, mark us as busy. If we didn't, but
* the list isn't empty, it means we stalled on hashed work.
@@ -524,9 +524,12 @@ static void io_worker_handle_work(struct io_worker *worker)
spin_lock_irq(&wqe->lock);
wqe->hash_map &= ~BIT_ULL(hash);
wqe->flags &= ~IO_WQE_FLAG_STALLED;
- spin_unlock_irq(&wqe->lock);
/* dependent work is not hashed */
hash = -1U;
+ /* skip unnecessary unlock-lock wqe->lock */
+ if (!work)
+ goto get_next;
+ spin_unlock_irq(&wqe->lock);
}
} while (work);
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 4/4] io_uring/io-wq: forward submission ref to async
2020-03-04 13:14 [PATCH 0/4] io-wq/io_uring locking optimisation Pavel Begunkov
` (2 preceding siblings ...)
2020-03-04 13:14 ` [PATCH 3/4] io-wq: optimise out *next_work() double lock Pavel Begunkov
@ 2020-03-04 13:14 ` Pavel Begunkov
2020-03-04 18:48 ` [PATCH 0/4] io-wq/io_uring locking optimisation Jens Axboe
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-03-04 13:14 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-kernel
First it changes io-wq interfaces. It replaces {get,put}_work() with
free_work(), which guaranteed to be called exactly once. It also enforces
free_work() callback to be non-NULL.
io_uring follows the changes and instead of putting a submission reference
in io_put_req_async_completion(), it will be done in io_free_work(). As
removes io_get_work() with corresponding refcount_inc(), the ref balance
is maintained.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io-wq.c | 29 ++++++++++++++---------------
fs/io-wq.h | 6 ++----
fs/io_uring.c | 31 +++++++++++--------------------
3 files changed, 27 insertions(+), 39 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 82e76011d409..eda36f997dea 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -107,8 +107,7 @@ struct io_wq {
struct io_wqe **wqes;
unsigned long state;
- get_work_fn *get_work;
- put_work_fn *put_work;
+ free_work_fn *free_work;
struct task_struct *manager;
struct user_struct *user;
@@ -509,16 +508,11 @@ static void io_worker_handle_work(struct io_worker *worker)
if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
work->flags |= IO_WQ_WORK_CANCEL;
- if (wq->get_work)
- wq->get_work(work);
-
old_work = work;
work->func(&work);
work = (old_work == work) ? NULL : work;
io_assign_current_work(worker, work);
-
- if (wq->put_work)
- wq->put_work(old_work);
+ wq->free_work(old_work);
if (hash != -1U) {
spin_lock_irq(&wqe->lock);
@@ -749,14 +743,17 @@ static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
return true;
}
-static void io_run_cancel(struct io_wq_work *work)
+static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
{
+ struct io_wq *wq = wqe->wq;
+
do {
struct io_wq_work *old_work = work;
work->flags |= IO_WQ_WORK_CANCEL;
work->func(&work);
work = (work == old_work) ? NULL : work;
+ wq->free_work(old_work);
} while (work);
}
@@ -773,7 +770,7 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
* It's close enough to not be an issue, fork() has the same delay.
*/
if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
- io_run_cancel(work);
+ io_run_cancel(work, wqe);
return;
}
@@ -912,7 +909,7 @@ static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
spin_unlock_irqrestore(&wqe->lock, flags);
if (found) {
- io_run_cancel(work);
+ io_run_cancel(work, wqe);
return IO_WQ_CANCEL_OK;
}
@@ -987,7 +984,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
spin_unlock_irqrestore(&wqe->lock, flags);
if (found) {
- io_run_cancel(work);
+ io_run_cancel(work, wqe);
return IO_WQ_CANCEL_OK;
}
@@ -1064,6 +1061,9 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
int ret = -ENOMEM, node;
struct io_wq *wq;
+ if (WARN_ON_ONCE(!data->free_work))
+ return ERR_PTR(-EINVAL);
+
wq = kzalloc(sizeof(*wq), GFP_KERNEL);
if (!wq)
return ERR_PTR(-ENOMEM);
@@ -1074,8 +1074,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
return ERR_PTR(-ENOMEM);
}
- wq->get_work = data->get_work;
- wq->put_work = data->put_work;
+ wq->free_work = data->free_work;
/* caller must already hold a reference to this */
wq->user = data->user;
@@ -1132,7 +1131,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
{
- if (data->get_work != wq->get_work || data->put_work != wq->put_work)
+ if (data->free_work != wq->free_work)
return false;
return refcount_inc_not_zero(&wq->use_refs);
diff --git a/fs/io-wq.h b/fs/io-wq.h
index a0978d6958f0..2117b9a4f161 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -81,14 +81,12 @@ struct io_wq_work {
*(work) = (struct io_wq_work){ .func = _func }; \
} while (0) \
-typedef void (get_work_fn)(struct io_wq_work *);
-typedef void (put_work_fn)(struct io_wq_work *);
+typedef void (free_work_fn)(struct io_wq_work *);
struct io_wq_data {
struct user_struct *user;
- get_work_fn *get_work;
- put_work_fn *put_work;
+ free_work_fn *free_work;
};
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 40ca9e6a5ace..0d6f4b3b8f13 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1558,8 +1558,8 @@ static void io_put_req(struct io_kiocb *req)
io_free_req(req);
}
-static void io_put_req_async_completion(struct io_kiocb *req,
- struct io_wq_work **workptr)
+static void io_steal_work(struct io_kiocb *req,
+ struct io_wq_work **workptr)
{
/*
* It's in an io-wq worker, so there always should be at least
@@ -1569,7 +1569,6 @@ static void io_put_req_async_completion(struct io_kiocb *req,
* It also means, that if the counter dropped to 1, then there is
* no asynchronous users left, so it's safe to steal the next work.
*/
- refcount_dec(&req->refs);
if (refcount_read(&req->refs) == 1) {
struct io_kiocb *nxt = NULL;
@@ -2578,7 +2577,7 @@ static bool io_req_cancelled(struct io_kiocb *req)
if (req->work.flags & IO_WQ_WORK_CANCEL) {
req_set_fail_links(req);
io_cqring_add_event(req, -ECANCELED);
- io_double_put_req(req);
+ io_put_req(req);
return true;
}
@@ -2606,7 +2605,7 @@ static void io_fsync_finish(struct io_wq_work **workptr)
if (io_req_cancelled(req))
return;
__io_fsync(req);
- io_put_req_async_completion(req, workptr);
+ io_steal_work(req, workptr);
}
static int io_fsync(struct io_kiocb *req, bool force_nonblock)
@@ -2639,7 +2638,7 @@ static void io_fallocate_finish(struct io_wq_work **workptr)
if (io_req_cancelled(req))
return;
__io_fallocate(req);
- io_put_req_async_completion(req, workptr);
+ io_steal_work(req, workptr);
}
static int io_fallocate_prep(struct io_kiocb *req,
@@ -3006,7 +3005,7 @@ static void io_close_finish(struct io_wq_work **workptr)
/* not cancellable, don't do io_req_cancelled() */
__io_close_finish(req);
- io_put_req_async_completion(req, workptr);
+ io_steal_work(req, workptr);
}
static int io_close(struct io_kiocb *req, bool force_nonblock)
@@ -3452,7 +3451,7 @@ static void io_accept_finish(struct io_wq_work **workptr)
if (io_req_cancelled(req))
return;
__io_accept(req, false);
- io_put_req_async_completion(req, workptr);
+ io_steal_work(req, workptr);
}
#endif
@@ -4719,7 +4718,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
io_put_req(req);
}
- io_put_req_async_completion(req, workptr);
+ io_steal_work(req, workptr);
}
static int io_req_needs_file(struct io_kiocb *req, int fd)
@@ -6105,21 +6104,14 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
return __io_sqe_files_update(ctx, &up, nr_args);
}
-static void io_put_work(struct io_wq_work *work)
+static void io_free_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- /* Consider that io_put_req_async_completion() relies on this ref */
+ /* Consider that io_steal_work() relies on this ref */
io_put_req(req);
}
-static void io_get_work(struct io_wq_work *work)
-{
- struct io_kiocb *req = container_of(work, struct io_kiocb, work);
-
- refcount_inc(&req->refs);
-}
-
static int io_init_wq_offload(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
@@ -6130,8 +6122,7 @@ static int io_init_wq_offload(struct io_ring_ctx *ctx,
int ret = 0;
data.user = ctx->user;
- data.get_work = io_get_work;
- data.put_work = io_put_work;
+ data.free_work = io_free_work;
if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
/* Do QD, or 4 * CPUS, whatever is smallest */
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH 0/4] io-wq/io_uring locking optimisation
2020-03-04 13:14 [PATCH 0/4] io-wq/io_uring locking optimisation Pavel Begunkov
` (3 preceding siblings ...)
2020-03-04 13:14 ` [PATCH 4/4] io_uring/io-wq: forward submission ref to async Pavel Begunkov
@ 2020-03-04 18:48 ` Jens Axboe
4 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2020-03-04 18:48 UTC (permalink / raw)
To: Pavel Begunkov, io-uring, linux-kernel
On 3/4/20 6:14 AM, Pavel Begunkov wrote:
> [1-3] are shedding excessive locking of @wqe's and @worker's spinlocks
> from io_worker_handle_work().
>
> [4] removes an extra pair of refcount get/put by making former io_put_work()
> to own the submission reference. It also changes io-wq get/put API
> and renames io_put_work() into io_free_work() to reflect it.
LGTM, and tests out fine so far - applied, thanks.
--
Jens Axboe
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2020-03-04 18:48 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-03-04 13:14 [PATCH 0/4] io-wq/io_uring locking optimisation Pavel Begunkov
2020-03-04 13:14 ` [PATCH 1/4] io-wq: shuffle io_worker_handle_work() code Pavel Begunkov
2020-03-04 13:14 ` [PATCH 2/4] io-wq: optimise locking in io_worker_handle_work() Pavel Begunkov
2020-03-04 13:14 ` [PATCH 3/4] io-wq: optimise out *next_work() double lock Pavel Begunkov
2020-03-04 13:14 ` [PATCH 4/4] io_uring/io-wq: forward submission ref to async Pavel Begunkov
2020-03-04 18:48 ` [PATCH 0/4] io-wq/io_uring locking optimisation Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox