From: Hao Xu <[email protected]>
To: Jens Axboe <[email protected]>
Cc: [email protected], Pavel Begunkov <[email protected]>,
Joseph Qi <[email protected]>
Subject: [PATCH 8/9] io-wq: batch the handling of fixed worker private works
Date: Wed, 24 Nov 2021 12:46:47 +0800 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
Let's reduce acct->lock contension by batching the handling of private
work list for fixed_workers.
Signed-off-by: Hao Xu <[email protected]>
---
fs/io-wq.c | 42 ++++++++++++++++++++++++++++++++----------
fs/io-wq.h | 5 +++++
2 files changed, 37 insertions(+), 10 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index b53019d4691d..097ea598bfe5 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -479,7 +479,7 @@ static void io_wait_on_hash(struct io_wqe_acct *acct, unsigned int hash)
}
static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
- struct io_worker *worker)
+ struct io_worker *worker, bool needs_lock)
__must_hold(acct->lock)
{
struct io_wq_work_node *node, *prev;
@@ -487,14 +487,23 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
unsigned int stall_hash = -1U;
struct io_wqe *wqe = worker->wqe;
+ if (needs_lock)
+ raw_spin_lock(&acct->lock);
wq_list_for_each(node, prev, &acct->work_list) {
unsigned int hash;
work = container_of(node, struct io_wq_work, list);
+ /* hash optimization doesn't work for fixed_workers for now */
+ if (!needs_lock) {
+ wq_list_del(&acct->work_list, node, prev);
+ return work;
+ }
+
/* not hashed, can run anytime */
if (!io_wq_is_hashed(work)) {
wq_list_del(&acct->work_list, node, prev);
+ raw_spin_unlock(&acct->lock);
return work;
}
@@ -506,6 +515,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
wqe->hash_tail[hash] = NULL;
wq_list_cut(&acct->work_list, &tail->list, prev);
+ raw_spin_unlock(&acct->lock);
return work;
}
if (stall_hash == -1U)
@@ -515,15 +525,21 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
}
if (stall_hash != -1U) {
+ if (!needs_lock)
+ acct = &worker->acct;
/*
* Set this before dropping the lock to avoid racing with new
* work being added and clearing the stalled bit.
*/
set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
- raw_spin_unlock(&acct->lock);
+ if (needs_lock)
+ raw_spin_unlock(&acct->lock);
io_wait_on_hash(acct, stall_hash);
- raw_spin_lock(&acct->lock);
+ if (needs_lock)
+ raw_spin_lock(&acct->lock);
}
+ if (needs_lock)
+ raw_spin_unlock(&acct->lock);
return NULL;
}
@@ -553,7 +569,8 @@ static void io_assign_current_work(struct io_worker *worker,
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
-static void io_worker_handle_work(struct io_worker *worker, struct io_wqe_acct *acct)
+static void io_worker_handle_work(struct io_worker *worker, struct io_wqe_acct *acct,
+ bool needs_lock)
{
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
@@ -569,9 +586,7 @@ static void io_worker_handle_work(struct io_worker *worker, struct io_wqe_acct *
* can't make progress, any work completion or insertion will
* clear the stalled flag.
*/
- raw_spin_lock(&acct->lock);
- work = io_get_next_work(acct, worker);
- raw_spin_unlock(&acct->lock);
+ work = io_get_next_work(acct, worker, needs_lock);
if (work) {
raw_spin_lock(&wqe->lock);
__io_worker_busy(wqe, worker, work);
@@ -604,7 +619,7 @@ static void io_worker_handle_work(struct io_worker *worker, struct io_wqe_acct *
if (linked)
io_wqe_enqueue(wqe, linked);
- if (hash != -1U && !next_hashed) {
+ if (needs_lock && hash != -1U && !next_hashed) {
clear_bit(hash, &wq->hash->map);
clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
if (wq_has_sleeper(&wq->hash->wait))
@@ -618,12 +633,19 @@ static void io_worker_handle_work(struct io_worker *worker, struct io_wqe_acct *
static inline void io_worker_handle_private_work(struct io_worker *worker)
{
- io_worker_handle_work(worker, &worker->acct);
+ struct io_wqe_acct acct;
+
+ raw_spin_lock(&worker->acct.lock);
+ acct = worker->acct;
+ wq_list_clean(&worker->acct.work_list);
+ worker->acct.nr_works = 0;
+ raw_spin_unlock(&worker->acct.lock);
+ io_worker_handle_work(worker, &acct, false);
}
static inline void io_worker_handle_public_work(struct io_worker *worker)
{
- io_worker_handle_work(worker, io_wqe_get_acct(worker));
+ io_worker_handle_work(worker, io_wqe_get_acct(worker), true);
}
static int io_wqe_worker(void *data)
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 41bf37674a49..7c330264172b 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -40,6 +40,11 @@ struct io_wq_work_list {
(list)->first = NULL; \
} while (0)
+static inline void wq_list_clean(struct io_wq_work_list *list)
+{
+ list->first = list->last = NULL;
+}
+
static inline void wq_list_add_after(struct io_wq_work_node *node,
struct io_wq_work_node *pos,
struct io_wq_work_list *list)
--
2.24.4
next prev parent reply other threads:[~2021-11-24 4:47 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-11-24 4:46 [RFC 0/9] fixed worker: a new way to handle io works Hao Xu
2021-11-24 4:46 ` [PATCH 1/9] io-wq: decouple work_list protection from the big wqe->lock Hao Xu
2021-11-24 4:46 ` [PATCH 2/9] io-wq: reduce acct->lock crossing functions lock/unlock Hao Xu
2021-11-24 4:46 ` [PATCH 3/9] io-wq: update check condition for lock Hao Xu
2021-11-25 14:47 ` Pavel Begunkov
2021-11-30 3:32 ` Hao Xu
2021-11-24 4:46 ` [PATCH 4/9] io-wq: use IO_WQ_ACCT_NR rather than hardcoded number Hao Xu
2021-11-24 4:46 ` [PATCH 5/9] io-wq: move hash wait entry to io_wqe_acct Hao Xu
2021-11-24 4:46 ` [PATCH 6/9] io-wq: add infra data structure for fixed workers Hao Xu
2021-11-24 4:46 ` [PATCH 7/9] io-wq: implement fixed worker logic Hao Xu
2021-11-24 4:46 ` Hao Xu [this message]
2021-11-24 4:46 ` [PATCH 9/9] io-wq: small optimization for __io_worker_busy() Hao Xu
2021-11-25 15:09 ` [RFC 0/9] fixed worker: a new way to handle io works Pavel Begunkov
2021-11-30 3:48 ` Hao Xu
-- strict thread matches above, loose matches on Subject: below --
2022-04-20 10:39 [RFC v2 0/9] fixed worker Hao Xu
2022-04-20 10:39 ` [PATCH 8/9] io-wq: batch the handling of fixed worker private works Hao Xu
2022-04-29 10:18 [RFC v3 0/9] fixed worker Hao Xu
2022-04-29 10:18 ` [PATCH 8/9] io-wq: batch the handling of fixed worker private works Hao Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox