From: Hao Xu <[email protected]>
To: Jens Axboe <[email protected]>
Cc: [email protected], Pavel Begunkov <[email protected]>,
Joseph Qi <[email protected]>
Subject: [PATCH 5/9] io-wq: move hash wait entry to io_wqe_acct
Date: Wed, 24 Nov 2021 12:46:44 +0800 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
Move wait entry to struct io_wqe_acct since we are going to add private
work list for io_worker in the next patch. This is preparation for the
fixed io-worker feature.
Signed-off-by: Hao Xu <[email protected]>
---
fs/io-wq.c | 45 ++++++++++++++++++++++++---------------------
1 file changed, 24 insertions(+), 21 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index dce365013bd5..44c3e344c5d6 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -77,6 +77,8 @@ struct io_wqe_acct {
raw_spinlock_t lock;
struct io_wq_work_list work_list;
unsigned long flags;
+ struct wait_queue_entry wait;
+ struct io_wqe *wqe;
};
enum {
@@ -97,8 +99,6 @@ struct io_wqe {
struct hlist_nulls_head free_list;
struct list_head all_list;
- struct wait_queue_entry wait;
-
struct io_wq *wq;
struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
@@ -431,16 +431,16 @@ static inline unsigned int io_get_work_hash(struct io_wq_work *work)
return work->flags >> IO_WQ_HASH_SHIFT;
}
-static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
+static void io_wait_on_hash(struct io_wqe_acct *acct, unsigned int hash)
{
- struct io_wq *wq = wqe->wq;
+ struct io_wq *wq = acct->wqe->wq;
spin_lock_irq(&wq->hash->wait.lock);
- if (list_empty(&wqe->wait.entry)) {
- __add_wait_queue(&wq->hash->wait, &wqe->wait);
+ if (list_empty(&acct->wait.entry)) {
+ __add_wait_queue(&wq->hash->wait, &acct->wait);
if (!test_bit(hash, &wq->hash->map)) {
__set_current_state(TASK_RUNNING);
- list_del_init(&wqe->wait.entry);
+ list_del_init(&acct->wait.entry);
}
}
spin_unlock_irq(&wq->hash->wait.lock);
@@ -489,7 +489,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
*/
set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
raw_spin_unlock(&acct->lock);
- io_wait_on_hash(wqe, stall_hash);
+ io_wait_on_hash(acct, stall_hash);
raw_spin_lock(&acct->lock);
}
@@ -1076,19 +1076,17 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
int sync, void *key)
{
- struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
- int i;
+ struct io_wqe_acct *acct = container_of(wait, struct io_wqe_acct, wait);
+ bool ret;
list_del_init(&wait->entry);
-
- rcu_read_lock();
- for (i = 0; i < IO_WQ_ACCT_NR; i++) {
- struct io_wqe_acct *acct = &wqe->acct[i];
-
- if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
- io_wqe_activate_free_worker(wqe, acct);
+ ret = test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
+ if (ret) {
+ rcu_read_lock();
+ io_wqe_activate_free_worker(acct->wqe, acct);
+ rcu_read_unlock();
}
- rcu_read_unlock();
+
return 1;
}
@@ -1132,8 +1130,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
task_rlimit(current, RLIMIT_NPROC);
- INIT_LIST_HEAD(&wqe->wait.entry);
- wqe->wait.func = io_wqe_hash_wake;
+
for (i = 0; i < IO_WQ_ACCT_NR; i++) {
struct io_wqe_acct *acct = &wqe->acct[i];
@@ -1141,6 +1138,9 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
atomic_set(&acct->nr_running, 0);
INIT_WQ_LIST(&acct->work_list);
raw_spin_lock_init(&acct->lock);
+ INIT_LIST_HEAD(&acct->wait.entry);
+ acct->wait.func = io_wqe_hash_wake;
+ acct->wqe = wqe;
}
wqe->wq = wq;
raw_spin_lock_init(&wqe->lock);
@@ -1207,8 +1207,11 @@ static void io_wq_exit_workers(struct io_wq *wq)
wait_for_completion(&wq->worker_done);
for_each_node(node) {
+ int i;
+
spin_lock_irq(&wq->hash->wait.lock);
- list_del_init(&wq->wqes[node]->wait.entry);
+ for (i = 0; i < IO_WQ_ACCT_NR; i++)
+ list_del_init(&wq->wqes[node]->acct[i].wait.entry);
spin_unlock_irq(&wq->hash->wait.lock);
}
put_task_struct(wq->task);
--
2.24.4
next prev parent reply other threads:[~2021-11-24 4:47 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-11-24 4:46 [RFC 0/9] fixed worker: a new way to handle io works Hao Xu
2021-11-24 4:46 ` [PATCH 1/9] io-wq: decouple work_list protection from the big wqe->lock Hao Xu
2021-11-24 4:46 ` [PATCH 2/9] io-wq: reduce acct->lock crossing functions lock/unlock Hao Xu
2021-11-24 4:46 ` [PATCH 3/9] io-wq: update check condition for lock Hao Xu
2021-11-25 14:47 ` Pavel Begunkov
2021-11-30 3:32 ` Hao Xu
2021-11-24 4:46 ` [PATCH 4/9] io-wq: use IO_WQ_ACCT_NR rather than hardcoded number Hao Xu
2021-11-24 4:46 ` Hao Xu [this message]
2021-11-24 4:46 ` [PATCH 6/9] io-wq: add infra data structure for fixed workers Hao Xu
2021-11-24 4:46 ` [PATCH 7/9] io-wq: implement fixed worker logic Hao Xu
2021-11-24 4:46 ` [PATCH 8/9] io-wq: batch the handling of fixed worker private works Hao Xu
2021-11-24 4:46 ` [PATCH 9/9] io-wq: small optimization for __io_worker_busy() Hao Xu
2021-11-25 15:09 ` [RFC 0/9] fixed worker: a new way to handle io works Pavel Begunkov
2021-11-30 3:48 ` Hao Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox