Skip to content

Commit

Permalink
mm: turn folio_test_hugetlb into a PageType
Browse files Browse the repository at this point in the history
The current folio_test_hugetlb() can be fooled by a concurrent folio split
into returning true for a folio which has never belonged to hugetlbfs. 
This can't happen if the caller holds a refcount on it, but we have a few
places (memory-failure, compaction, procfs) which do not and should not
take a speculative reference.

Since hugetlb pages do not use individual page mapcounts (they are always
fully mapped and use the entire_mapcount field to record the number of
mappings), the PageType field is available now that page_mapcount()
ignores the value in this field.

In compaction and with CONFIG_DEBUG_VM enabled, the current implementation
can result in an oops, as reported by Luis. This happens since 9c5ccf2
("mm: remove HUGETLB_PAGE_DTOR") effectively added some VM_BUG_ON() checks
in the PageHuge() testing path.

[[email protected]: update vmcoreinfo]
  Link: https://lkml.kernel.org/r/[email protected]
Link: https://lkml.kernel.org/r/[email protected]
Fixes: 9c5ccf2 ("mm: remove HUGETLB_PAGE_DTOR")
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Reported-by: Luis Chamberlain <[email protected]>
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218227
Cc: Miaohe Lin <[email protected]>
Cc: Muchun Song <[email protected]>
Cc: Oscar Salvador <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
Matthew Wilcox (Oracle) authored and akpm00 committed Apr 25, 2024
1 parent fd1a745 commit d99e314
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 59 deletions.
70 changes: 33 additions & 37 deletions include/linux/page-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,6 @@ enum pageflags {

/* At least one page in this folio has the hwpoison flag set */
PG_has_hwpoisoned = PG_error,
PG_hugetlb = PG_active,
PG_large_rmappable = PG_workingset, /* anon or file-backed */
};

Expand Down Expand Up @@ -876,29 +875,6 @@ TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)

#define PG_head_mask ((1UL << PG_head))

#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(const struct page *page);
SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)

/**
* folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
* @folio: The folio to test.
*
* Context: Any context. Caller should have a reference on the folio to
* prevent it from being turned into a tail page.
* Return: True for hugetlbfs folios, false for anon folios or folios
* belonging to other filesystems.
*/
static inline bool folio_test_hugetlb(const struct folio *folio)
{
return folio_test_large(folio) &&
test_bit(PG_hugetlb, const_folio_flags(folio, 1));
}
#else
TESTPAGEFLAG_FALSE(Huge, hugetlb)
#endif

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
Expand Down Expand Up @@ -954,18 +930,6 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
#endif

/*
* Check if a page is currently marked HWPoisoned. Note that this check is
* best effort only and inherently racy: there is no way to synchronize with
* failing hardware.
*/
static inline bool is_page_hwpoison(struct page *page)
{
if (PageHWPoison(page))
return true;
return PageHuge(page) && PageHWPoison(compound_head(page));
}

/*
* For pages that are never mapped to userspace (and aren't PageSlab),
* page_type may be used. Because it is initialised to -1, we invert the
Expand All @@ -982,6 +946,7 @@ static inline bool is_page_hwpoison(struct page *page)
#define PG_offline 0x00000100
#define PG_table 0x00000200
#define PG_guard 0x00000400
#define PG_hugetlb 0x00000800

#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
Expand Down Expand Up @@ -1076,6 +1041,37 @@ PAGE_TYPE_OPS(Table, table, pgtable)
*/
PAGE_TYPE_OPS(Guard, guard, guard)

#ifdef CONFIG_HUGETLB_PAGE
FOLIO_TYPE_OPS(hugetlb, hugetlb)
#else
FOLIO_TEST_FLAG_FALSE(hugetlb)
#endif

/**
* PageHuge - Determine if the page belongs to hugetlbfs
* @page: The page to test.
*
* Context: Any context.
* Return: True for hugetlbfs pages, false for anon pages or pages
* belonging to other filesystems.
*/
static inline bool PageHuge(const struct page *page)
{
return folio_test_hugetlb(page_folio(page));
}

/*
* Check if a page is currently marked HWPoisoned. Note that this check is
* best effort only and inherently racy: there is no way to synchronize with
* failing hardware.
*/
static inline bool is_page_hwpoison(struct page *page)
{
if (PageHWPoison(page))
return true;
return PageHuge(page) && PageHWPoison(compound_head(page));
}

extern bool is_free_buddy_page(struct page *page);

PAGEFLAG(Isolated, isolated, PF_ANY);
Expand Down Expand Up @@ -1142,7 +1138,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
*/
#define PAGE_FLAGS_SECOND \
(0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
1UL << PG_hugetlb | 1UL << PG_large_rmappable)
1UL << PG_large_rmappable)

#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
Expand Down
1 change: 1 addition & 0 deletions include/trace/events/mmflags.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ IF_HAVE_PG_ARCH_X(arch_3)
#define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }

#define __def_pagetype_names \
DEF_PAGETYPE_NAME(hugetlb), \
DEF_PAGETYPE_NAME(offline), \
DEF_PAGETYPE_NAME(guard), \
DEF_PAGETYPE_NAME(table), \
Expand Down
5 changes: 2 additions & 3 deletions kernel/vmcore_info.c
Original file line number Diff line number Diff line change
Expand Up @@ -205,11 +205,10 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_head_mask);
#define PAGE_BUDDY_MAPCOUNT_VALUE (~PG_buddy)
VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
#ifdef CONFIG_HUGETLB_PAGE
VMCOREINFO_NUMBER(PG_hugetlb);
#define PAGE_HUGETLB_MAPCOUNT_VALUE (~PG_hugetlb)
VMCOREINFO_NUMBER(PAGE_HUGETLB_MAPCOUNT_VALUE);
#define PAGE_OFFLINE_MAPCOUNT_VALUE (~PG_offline)
VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE);
#endif

#ifdef CONFIG_KALLSYMS
VMCOREINFO_SYMBOL(kallsyms_names);
Expand Down
22 changes: 3 additions & 19 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1624,7 +1624,7 @@ static inline void __clear_hugetlb_destructor(struct hstate *h,
{
lockdep_assert_held(&hugetlb_lock);

folio_clear_hugetlb(folio);
__folio_clear_hugetlb(folio);
}

/*
Expand Down Expand Up @@ -1711,7 +1711,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
h->surplus_huge_pages_node[nid]++;
}

folio_set_hugetlb(folio);
__folio_set_hugetlb(folio);
folio_change_private(folio, NULL);
/*
* We have to set hugetlb_vmemmap_optimized again as above
Expand Down Expand Up @@ -2049,7 +2049,7 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)

static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{
folio_set_hugetlb(folio);
__folio_set_hugetlb(folio);
INIT_LIST_HEAD(&folio->lru);
hugetlb_set_folio_subpool(folio, NULL);
set_hugetlb_cgroup(folio, NULL);
Expand Down Expand Up @@ -2159,22 +2159,6 @@ static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
return __prep_compound_gigantic_folio(folio, order, true);
}

/*
* PageHuge() only returns true for hugetlbfs pages, but not for normal or
* transparent huge pages. See the PageTransHuge() documentation for more
* details.
*/
int PageHuge(const struct page *page)
{
const struct folio *folio;

if (!PageCompound(page))
return 0;
folio = page_folio(page);
return folio_test_hugetlb(folio);
}
EXPORT_SYMBOL_GPL(PageHuge);

/*
* Find and lock address space (mapping) in write mode.
*
Expand Down

0 comments on commit d99e314

Please sign in to comment.