public inbox for [email protected]
 help / color / mirror / Atom feed
From: Jens Axboe <[email protected]>
To: [email protected]
Cc: [email protected], Jens Axboe <[email protected]>
Subject: [PATCH 1/8] io-wq: remove now redundant struct io_wq_nulls_list
Date: Fri, 15 Nov 2019 18:53:07 -0700	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>

Since we don't iterate these lists anymore after commit:

e61df66c69b1 ("io-wq: ensure free/busy list browsing see all items")

we don't need to retain the nulls value we use for them. That means it's
pretty pointless to wrap the hlist_nulls_head in a structure, so get rid
of it.

Signed-off-by: Jens Axboe <[email protected]>
---
 fs/io-wq.c | 29 ++++++++++-------------------
 1 file changed, 10 insertions(+), 19 deletions(-)

diff --git a/fs/io-wq.c b/fs/io-wq.c
index fcb6c74209da..9174007ce107 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -59,11 +59,6 @@ struct io_worker {
 	struct files_struct *restore_files;
 };
 
-struct io_wq_nulls_list {
-	struct hlist_nulls_head head;
-	unsigned long nulls;
-};
-
 #if BITS_PER_LONG == 64
 #define IO_WQ_HASH_ORDER	6
 #else
@@ -95,8 +90,8 @@ struct io_wqe {
 	int node;
 	struct io_wqe_acct acct[2];
 
-	struct io_wq_nulls_list free_list;
-	struct io_wq_nulls_list busy_list;
+	struct hlist_nulls_head free_list;
+	struct hlist_nulls_head busy_list;
 	struct list_head all_list;
 
 	struct io_wq *wq;
@@ -249,7 +244,7 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
 	struct hlist_nulls_node *n;
 	struct io_worker *worker;
 
-	n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list.head));
+	n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
 	if (is_a_nulls(n))
 		return false;
 
@@ -325,8 +320,7 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
 	if (worker->flags & IO_WORKER_F_FREE) {
 		worker->flags &= ~IO_WORKER_F_FREE;
 		hlist_nulls_del_init_rcu(&worker->nulls_node);
-		hlist_nulls_add_head_rcu(&worker->nulls_node,
-						&wqe->busy_list.head);
+		hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
 	}
 
 	/*
@@ -365,8 +359,7 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
 	if (!(worker->flags & IO_WORKER_F_FREE)) {
 		worker->flags |= IO_WORKER_F_FREE;
 		hlist_nulls_del_init_rcu(&worker->nulls_node);
-		hlist_nulls_add_head_rcu(&worker->nulls_node,
-						&wqe->free_list.head);
+		hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
 	}
 
 	return __io_worker_unuse(wqe, worker);
@@ -592,7 +585,7 @@ static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
 	}
 
 	spin_lock_irq(&wqe->lock);
-	hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list.head);
+	hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
 	list_add_tail_rcu(&worker->all_list, &wqe->all_list);
 	worker->flags |= IO_WORKER_F_FREE;
 	if (index == IO_WQ_ACCT_BOUND)
@@ -617,7 +610,7 @@ static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
 	if (index == IO_WQ_ACCT_BOUND && !acct->nr_workers)
 		return true;
 	/* if we have available workers or no work, no need */
-	if (!hlist_nulls_empty(&wqe->free_list.head) || !io_wqe_run_queue(wqe))
+	if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
 		return false;
 	return acct->nr_workers < acct->max_workers;
 }
@@ -665,7 +658,7 @@ static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
 		return true;
 
 	rcu_read_lock();
-	free_worker = !hlist_nulls_empty(&wqe->free_list.head);
+	free_worker = !hlist_nulls_empty(&wqe->free_list);
 	rcu_read_unlock();
 	if (free_worker)
 		return true;
@@ -1009,10 +1002,8 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
 		wqe->wq = wq;
 		spin_lock_init(&wqe->lock);
 		INIT_LIST_HEAD(&wqe->work_list);
-		INIT_HLIST_NULLS_HEAD(&wqe->free_list.head, 0);
-		wqe->free_list.nulls = 0;
-		INIT_HLIST_NULLS_HEAD(&wqe->busy_list.head, 1);
-		wqe->busy_list.nulls = 1;
+		INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
+		INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
 		INIT_LIST_HEAD(&wqe->all_list);
 
 		i++;
-- 
2.24.0


  reply	other threads:[~2019-11-16  1:53 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-16  1:53 [PATCHSET] Pending io_uring items not yet queued up for 5.5 Jens Axboe
2019-11-16  1:53 ` Jens Axboe [this message]
2019-11-16  1:53 ` [PATCH 2/8] io_uring: make POLL_ADD/POLL_REMOVE scale better Jens Axboe
2019-11-16  1:53 ` [PATCH 3/8] io_uring: io_async_cancel() should pass in 'nxt' request pointer Jens Axboe
2019-11-16  1:53 ` [PATCH 4/8] io_uring: cleanup return values from the queueing functions Jens Axboe
2019-11-16  1:53 ` [PATCH 5/8] io_uring: make io_double_put_req() use normal completion path Jens Axboe
2019-11-16  1:53 ` [PATCH 6/8] io_uring: make req->timeout be dynamically allocated Jens Axboe
2019-11-16  1:53 ` [PATCH 7/8] io_uring: fix sequencing issues with linked timeouts Jens Axboe
2019-11-19 20:51   ` Pavel Begunkov
2019-11-19 22:13     ` Jens Axboe
2019-11-20 12:42       ` Pavel Begunkov
2019-11-20 17:19         ` Jens Axboe
2019-11-20 18:15           ` Jens Axboe
2019-11-16  1:53 ` [PATCH 8/8] io_uring: remove dead REQ_F_SEQ_PREV flag Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox