From: Hao Xu <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>, Pavel Begunkov <[email protected]>
Subject: [PATCH 8/9] io-wq: batch the handling of fixed worker private works
Date: Wed, 20 Apr 2022 18:39:59 +0800 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
From: Hao Xu <[email protected]>
Reduce acct->lock contension by batching the handling of private work
list for fixed_workers.
Signed-off-by: Hao Xu <[email protected]>
---
fs/io-wq.c | 42 +++++++++++++++++++++++++++++++++---------
fs/io-wq.h | 5 +++++
2 files changed, 38 insertions(+), 9 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 8fa5bfb298dc..807985249f62 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -539,8 +539,23 @@ static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
return ret;
}
+static inline void conditional_acct_lock(struct io_wqe_acct *acct,
+ bool needs_lock)
+{
+ if (needs_lock)
+ raw_spin_lock(&acct->lock);
+}
+
+static inline void conditional_acct_unlock(struct io_wqe_acct *acct,
+ bool needs_lock)
+{
+ if (needs_lock)
+ raw_spin_unlock(&acct->lock);
+}
+
static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
- struct io_worker *worker)
+ struct io_worker *worker,
+ bool needs_lock)
__must_hold(acct->lock)
{
struct io_wq_work_node *node, *prev;
@@ -548,6 +563,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
unsigned int stall_hash = -1U;
struct io_wqe *wqe = worker->wqe;
+ conditional_acct_lock(acct, needs_lock);
wq_list_for_each(node, prev, &acct->work_list) {
unsigned int hash;
@@ -556,6 +572,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
/* not hashed, can run anytime */
if (!io_wq_is_hashed(work)) {
wq_list_del(&acct->work_list, node, prev);
+ conditional_acct_unlock(acct, needs_lock);
return work;
}
@@ -567,6 +584,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
wqe->hash_tail[hash] = NULL;
wq_list_cut(&acct->work_list, &tail->list, prev);
+ conditional_acct_unlock(acct, needs_lock);
return work;
}
if (stall_hash == -1U)
@@ -583,15 +601,16 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
* work being added and clearing the stalled bit.
*/
set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
- raw_spin_unlock(&acct->lock);
+ conditional_acct_unlock(acct, needs_lock);
unstalled = io_wait_on_hash(wqe, stall_hash);
- raw_spin_lock(&acct->lock);
+ conditional_acct_lock(acct, needs_lock);
if (unstalled) {
clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
if (wq_has_sleeper(&wqe->wq->hash->wait))
wake_up(&wqe->wq->hash->wait);
}
}
+ conditional_acct_unlock(acct, needs_lock);
return NULL;
}
@@ -625,7 +644,7 @@ static void io_assign_current_work(struct io_worker *worker,
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
static void io_worker_handle_work(struct io_worker *worker,
- struct io_wqe_acct *acct)
+ struct io_wqe_acct *acct, bool needs_lock)
{
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
@@ -641,9 +660,7 @@ static void io_worker_handle_work(struct io_worker *worker,
* can't make progress, any work completion or insertion will
* clear the stalled flag.
*/
- raw_spin_lock(&acct->lock);
- work = io_get_next_work(acct, worker);
- raw_spin_unlock(&acct->lock);
+ work = io_get_next_work(acct, worker, needs_lock);
if (work) {
__io_worker_busy(wqe, worker);
@@ -700,12 +717,19 @@ static void io_worker_handle_work(struct io_worker *worker,
static inline void io_worker_handle_private_work(struct io_worker *worker)
{
- io_worker_handle_work(worker, &worker->acct);
+ struct io_wqe_acct acct;
+
+ raw_spin_lock(&worker->acct.lock);
+ acct = worker->acct;
+ wq_list_clean(&worker->acct.work_list);
+ worker->acct.nr_works = 0;
+ raw_spin_unlock(&worker->acct.lock);
+ io_worker_handle_work(worker, &acct, false);
}
static inline void io_worker_handle_public_work(struct io_worker *worker)
{
- io_worker_handle_work(worker, io_wqe_get_acct(worker));
+ io_worker_handle_work(worker, io_wqe_get_acct(worker), true);
}
static int io_wqe_worker(void *data)
diff --git a/fs/io-wq.h b/fs/io-wq.h
index dbecd27656c7..98befe7b0081 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -40,6 +40,11 @@ struct io_wq_work_list {
(list)->first = NULL; \
} while (0)
+static inline void wq_list_clean(struct io_wq_work_list *list)
+{
+ list->first = list->last = NULL;
+}
+
static inline void wq_list_add_after(struct io_wq_work_node *node,
struct io_wq_work_node *pos,
struct io_wq_work_list *list)
--
2.36.0
next prev parent reply other threads:[~2022-04-20 10:40 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-20 10:39 [RFC v2 0/9] fixed worker Hao Xu
2022-04-20 10:39 ` [PATCH 1/9] io-wq: add a worker flag for individual exit Hao Xu
2022-04-20 10:39 ` [PATCH 2/9] io-wq: change argument of create_io_worker() for convienence Hao Xu
2022-04-20 10:39 ` [PATCH 3/9] io-wq: add infra data structure for fixed workers Hao Xu
2022-04-20 10:39 ` [PATCH 4/9] io-wq: tweak io_get_acct() Hao Xu
2022-04-20 10:39 ` [PATCH 5/9] io-wq: fixed worker initialization Hao Xu
2022-04-20 10:39 ` [PATCH 6/9] io-wq: fixed worker exit Hao Xu
2022-04-20 10:39 ` [PATCH 7/9] io-wq: implement fixed worker logic Hao Xu
2022-04-20 10:39 ` Hao Xu [this message]
2022-04-20 10:40 ` [PATCH 9/9] io_uring: add register fixed worker interface Hao Xu
-- strict thread matches above, loose matches on Subject: below --
2022-04-29 10:18 [RFC v3 0/9] fixed worker Hao Xu
2022-04-29 10:18 ` [PATCH 8/9] io-wq: batch the handling of fixed worker private works Hao Xu
2021-11-24 4:46 [RFC 0/9] fixed worker: a new way to handle io works Hao Xu
2021-11-24 4:46 ` [PATCH 8/9] io-wq: batch the handling of fixed worker private works Hao Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox