From: "Matthew Wilcox (Oracle)" <[email protected]>
To: Andrew Morton <[email protected]>
Cc: "Matthew Wilcox (Oracle)" <[email protected]>,
Jens Axboe <[email protected]>,
[email protected], [email protected]
Subject: [PATCH 2/9] mm: Call the hugetlb destructor directly
Date: Tue, 15 Aug 2023 04:26:38 +0100 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
Indirect calls are expensive, thanks to Spectre. Convert this one to
a direct call, and pass a folio instead of the head page to save a few
more instructions.
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
---
include/linux/hugetlb.h | 3 ++-
include/linux/mm.h | 6 +-----
mm/hugetlb.c | 26 ++++++++++++--------------
mm/page_alloc.c | 8 +++++---
4 files changed, 20 insertions(+), 23 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0a393bc02f25..9555859537a3 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -26,6 +26,8 @@ typedef struct { unsigned long pd; } hugepd_t;
#define __hugepd(x) ((hugepd_t) { (x) })
#endif
+void free_huge_page(struct folio *folio);
+
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
@@ -165,7 +167,6 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
void folio_putback_active_hugetlb(struct folio *folio);
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
-void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 19493d6a2bb8..7fb529dbff31 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1278,13 +1278,9 @@ typedef void compound_page_dtor(struct page *);
enum compound_dtor_id {
NULL_COMPOUND_DTOR,
COMPOUND_PAGE_DTOR,
-#ifdef CONFIG_HUGETLB_PAGE
HUGETLB_PAGE_DTOR,
-#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
TRANSHUGE_PAGE_DTOR,
-#endif
- NR_COMPOUND_DTORS,
+ NR_COMPOUND_DTORS
};
static inline void folio_set_compound_dtor(struct folio *folio,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e327a5a7602c..bc340f5dbbd4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1875,13 +1875,12 @@ struct hstate *size_to_hstate(unsigned long size)
return NULL;
}
-void free_huge_page(struct page *page)
+void free_huge_page(struct folio *folio)
{
/*
* Can't pass hstate in here because it is called from the
* compound page destructor.
*/
- struct folio *folio = page_folio(page);
struct hstate *h = folio_hstate(folio);
int nid = folio_nid(folio);
struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
@@ -1936,7 +1935,7 @@ void free_huge_page(struct page *page)
spin_unlock_irqrestore(&hugetlb_lock, flags);
update_and_free_hugetlb_folio(h, folio, true);
} else {
- arch_clear_hugepage_flags(page);
+ arch_clear_hugepage_flags(&folio->page);
enqueue_hugetlb_folio(h, folio);
spin_unlock_irqrestore(&hugetlb_lock, flags);
}
@@ -2246,7 +2245,7 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node,
nodes_allowed, node_alloc_noretry);
if (folio) {
- free_huge_page(&folio->page); /* free it into the hugepage allocator */
+ free_huge_page(folio); /* free it into the hugepage allocator */
return 1;
}
}
@@ -2435,7 +2434,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
folio_set_hugetlb_temporary(folio);
spin_unlock_irq(&hugetlb_lock);
- free_huge_page(&folio->page);
+ free_huge_page(folio);
return NULL;
}
@@ -2547,8 +2546,7 @@ static int gather_surplus_pages(struct hstate *h, long delta)
__must_hold(&hugetlb_lock)
{
LIST_HEAD(surplus_list);
- struct folio *folio;
- struct page *page, *tmp;
+ struct folio *folio, *tmp;
int ret;
long i;
long needed, allocated;
@@ -2608,11 +2606,11 @@ static int gather_surplus_pages(struct hstate *h, long delta)
ret = 0;
/* Free the needed pages to the hugetlb pool */
- list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+ list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
if ((--needed) < 0)
break;
/* Add the page to the hugetlb allocator */
- enqueue_hugetlb_folio(h, page_folio(page));
+ enqueue_hugetlb_folio(h, folio);
}
free:
spin_unlock_irq(&hugetlb_lock);
@@ -2621,8 +2619,8 @@ static int gather_surplus_pages(struct hstate *h, long delta)
* Free unnecessary surplus pages to the buddy allocator.
* Pages have no ref count, call free_huge_page directly.
*/
- list_for_each_entry_safe(page, tmp, &surplus_list, lru)
- free_huge_page(page);
+ list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
+ free_huge_page(folio);
spin_lock_irq(&hugetlb_lock);
return ret;
@@ -3232,7 +3230,7 @@ static void __init gather_bootmem_prealloc(void)
if (prep_compound_gigantic_folio(folio, huge_page_order(h))) {
WARN_ON(folio_test_reserved(folio));
prep_new_hugetlb_folio(h, folio, folio_nid(folio));
- free_huge_page(page); /* add to the hugepage allocator */
+ free_huge_page(folio); /* add to the hugepage allocator */
} else {
/* VERY unlikely inflated ref count on a tail page */
free_gigantic_folio(folio, huge_page_order(h));
@@ -3264,7 +3262,7 @@ static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
&node_states[N_MEMORY], NULL);
if (!folio)
break;
- free_huge_page(&folio->page); /* free it into the hugepage allocator */
+ free_huge_page(folio); /* free it into the hugepage allocator */
}
cond_resched();
}
@@ -3658,7 +3656,7 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
prep_compound_page(subpage, target_hstate->order);
folio_change_private(inner_folio, NULL);
prep_new_hugetlb_folio(target_hstate, inner_folio, nid);
- free_huge_page(subpage);
+ free_huge_page(inner_folio);
}
mutex_unlock(&target_hstate->resize_lock);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8fe9ff917850..1f67d4968590 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -287,9 +287,6 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
[NULL_COMPOUND_DTOR] = NULL,
[COMPOUND_PAGE_DTOR] = free_compound_page,
-#ifdef CONFIG_HUGETLB_PAGE
- [HUGETLB_PAGE_DTOR] = free_huge_page,
-#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
[TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
#endif
@@ -622,6 +619,11 @@ void destroy_large_folio(struct folio *folio)
{
enum compound_dtor_id dtor = folio->_folio_dtor;
+ if (folio_test_hugetlb(folio)) {
+ free_huge_page(folio);
+ return;
+ }
+
VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
compound_page_dtors[dtor](&folio->page);
}
--
2.40.1
next prev parent reply other threads:[~2023-08-15 3:32 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-15 3:26 [PATCH 0/9] Remove _folio_dtor and _folio_order Matthew Wilcox (Oracle)
2023-08-15 3:26 ` [PATCH 1/9] io_uring: Stop calling free_compound_page() Matthew Wilcox (Oracle)
2023-08-15 7:33 ` David Hildenbrand
2023-08-15 15:00 ` Jens Axboe
2023-08-15 15:36 ` Matthew Wilcox
2023-08-15 3:26 ` Matthew Wilcox (Oracle) [this message]
2023-08-15 7:36 ` [PATCH 2/9] mm: Call the hugetlb destructor directly David Hildenbrand
2023-08-15 3:26 ` [PATCH 3/9] mm: Call free_transhuge_folio() directly from destroy_large_folio() Matthew Wilcox (Oracle)
2023-08-15 6:13 ` kernel test robot
2023-08-15 7:40 ` David Hildenbrand
2023-08-15 14:06 ` Matthew Wilcox
2023-08-15 8:09 ` kernel test robot
2023-08-15 3:26 ` [PATCH 4/9] mm: Make free_compound_page() static Matthew Wilcox (Oracle)
2023-08-15 7:47 ` David Hildenbrand
2023-08-15 7:48 ` David Hildenbrand
2023-08-15 3:26 ` [PATCH 5/9] mm: Remove free_compound_page() Matthew Wilcox (Oracle)
2023-08-15 7:48 ` David Hildenbrand
2023-08-15 3:26 ` [PATCH 6/9] mm: Remove HUGETLB_PAGE_DTOR Matthew Wilcox (Oracle)
2023-08-15 7:50 ` David Hildenbrand
2023-08-15 3:26 ` [PATCH 7/9] mm: Add deferred_list page flag Matthew Wilcox (Oracle)
2023-08-15 7:54 ` David Hildenbrand
2023-08-15 15:32 ` Matthew Wilcox
2023-08-15 16:40 ` David Hildenbrand
2023-08-15 17:06 ` Matthew Wilcox
2023-08-15 17:27 ` David Hildenbrand
2023-08-15 19:58 ` Matthew Wilcox
2023-08-16 3:14 ` Matthew Wilcox
2023-08-16 10:12 ` David Hildenbrand
2023-08-16 12:05 ` Matthew Wilcox
2023-08-16 12:34 ` David Hildenbrand
2023-08-16 9:55 ` David Hildenbrand
2023-08-15 3:26 ` [PATCH 8/9] mm: Rearrange page flags Matthew Wilcox (Oracle)
2023-08-15 4:30 ` Yosry Ahmed
2023-08-15 19:24 ` Peter Xu
2023-08-15 20:07 ` Matthew Wilcox
2023-08-15 22:31 ` Yosry Ahmed
2023-08-15 23:01 ` Matthew Wilcox
2023-08-15 23:33 ` Yosry Ahmed
2023-08-15 3:26 ` [PATCH 9/9] mm: Free up a word in the first tail page Matthew Wilcox (Oracle)
2023-08-15 7:59 ` David Hildenbrand
2023-08-15 11:39 ` Matthew Wilcox
2023-08-15 19:21 ` Peter Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox