Skip to content

Commit

Permalink
mm/hugetlb: convert dissolve_free_huge_pages() to folios
Browse files Browse the repository at this point in the history
Allows us to rename dissolve_free_huge_pages() to
dissolve_free_hugetlb_folio(). Convert one caller to pass in a folio
directly and use page_folio() to convert the caller in mm/memory-failure.

[[email protected]: remove unneeded `extern']
  Link: https://lkml.kernel.org/r/[email protected]
[[email protected]: v2]
  Link: https://lkml.kernel.org/r/[email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Sidhartha Kumar <[email protected]>
Reviewed-by: Oscar Salvador <[email protected]>
Reviewed-by: Vishal Moola (Oracle) <[email protected]>
Reviewed-by: Miaohe Lin <[email protected]>
Cc: Jane Chu <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Cc: Muchun Song <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
sidkumar99 authored and akpm00 committed May 6, 2024
1 parent 452e862 commit 54fa49b
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 15 deletions.
4 changes: 2 additions & 2 deletions include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -861,7 +861,7 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}

extern int dissolve_free_huge_page(struct page *page);
int dissolve_free_hugetlb_folio(struct folio *folio);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);

Expand Down Expand Up @@ -1148,7 +1148,7 @@ static inline int hstate_index(struct hstate *h)
return 0;
}

static inline int dissolve_free_huge_page(struct page *page)
static inline int dissolve_free_hugetlb_folio(struct folio *folio)
{
return 0;
}
Expand Down
17 changes: 8 additions & 9 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -2377,8 +2377,8 @@ static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
}

/*
* Dissolve a given free hugepage into free buddy pages. This function does
* nothing for in-use hugepages and non-hugepages.
* Dissolve a given free hugetlb folio into free buddy pages. This function
* does nothing for in-use hugetlb folios and non-hugetlb folios.
* This function returns values like below:
*
* -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
Expand All @@ -2390,10 +2390,9 @@ static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
* 0: successfully dissolved free hugepages or the page is not a
* hugepage (considered as already dissolved)
*/
int dissolve_free_huge_page(struct page *page)
int dissolve_free_hugetlb_folio(struct folio *folio)
{
int rc = -EBUSY;
struct folio *folio = page_folio(page);

retry:
/* Not to disrupt normal path by vainly holding hugetlb_lock */
Expand Down Expand Up @@ -2470,13 +2469,13 @@ int dissolve_free_huge_page(struct page *page)
* make specified memory blocks removable from the system.
* Note that this will dissolve a free gigantic hugepage completely, if any
* part of it lies within the given range.
* Also note that if dissolve_free_huge_page() returns with an error, all
* free hugepages that were dissolved before that error are lost.
* Also note that if dissolve_free_hugetlb_folio() returns with an error, all
* free hugetlb folios that were dissolved before that error are lost.
*/
int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
struct page *page;
struct folio *folio;
int rc = 0;
unsigned int order;
struct hstate *h;
Expand All @@ -2489,8 +2488,8 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
order = min(order, huge_page_order(h));

for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
page = pfn_to_page(pfn);
rc = dissolve_free_huge_page(page);
folio = pfn_folio(pfn);
rc = dissolve_free_hugetlb_folio(folio);
if (rc)
break;
}
Expand Down
8 changes: 4 additions & 4 deletions mm/memory-failure.c
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ static int __page_handle_poison(struct page *page)

/*
* zone_pcp_disable() can't be used here. It will
* hold pcp_batch_high_lock and dissolve_free_huge_page() might hold
* hold pcp_batch_high_lock and dissolve_free_hugetlb_folio() might hold
* cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap
* optimization is enabled. This will break current lock dependency
* chain and leads to deadlock.
Expand All @@ -165,7 +165,7 @@ static int __page_handle_poison(struct page *page)
* but nothing guarantees that those pages do not get back to a PCP
* queue if we need to refill those.
*/
ret = dissolve_free_huge_page(page);
ret = dissolve_free_hugetlb_folio(page_folio(page));
if (!ret) {
drain_all_pages(page_zone(page));
ret = take_page_off_buddy(page);
Expand All @@ -178,8 +178,8 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo
{
if (hugepage_or_freepage) {
/*
* Doing this check for free pages is also fine since dissolve_free_huge_page
* returns 0 for non-hugetlb pages as well.
* Doing this check for free pages is also fine since
* dissolve_free_hugetlb_folio() returns 0 for non-hugetlb folios as well.
*/
if (__page_handle_poison(page) <= 0)
/*
Expand Down

0 comments on commit 54fa49b

Please sign in to comment.