Skip to content

Commit

Permalink
hugetlb: restrict hugepage_migration_support() to x86_64
Browse files Browse the repository at this point in the history
Currently hugepage migration is available for all archs which support
pmd-level hugepage, but testing is done only for x86_64 and there're
bugs for other archs.  So to avoid breaking such archs, this patch
limits the availability strictly to x86_64 until developers of other
archs get interested in enabling this feature.

Simply disabling hugepage migration on non-x86_64 archs is not enough to
fix the reported problem where sys_move_pages() hits the BUG_ON() in
follow_page(FOLL_GET), so let's fix this by checking if hugepage
migration is supported in vma_migratable().

Signed-off-by: Naoya Horiguchi <[email protected]>
Reported-by: Michael Ellerman <[email protected]>
Tested-by: Michael Ellerman <[email protected]>
Acked-by: Hugh Dickins <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: Russell King <[email protected]>
Cc: Martin Schwidefsky <[email protected]>
Cc: James Hogan <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: David Miller <[email protected]>
Cc: <[email protected]>	[3.12+]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Naoya Horiguchi authored and torvalds committed Jun 4, 2014
1 parent 7f39dda commit c177c81
Show file tree
Hide file tree
Showing 15 changed files with 18 additions and 73 deletions.
5 changes: 0 additions & 5 deletions arch/arm/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,3 @@ int pmd_huge(pmd_t pmd)
{
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
}

int pmd_huge_support(void)
{
return 1;
}
5 changes: 0 additions & 5 deletions arch/arm64/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,6 @@ int pud_huge(pud_t pud)
#endif
}

int pmd_huge_support(void)
{
return 1;
}

static __init int setup_hugepagesz(char *opt)
{
unsigned long ps = memparse(opt, &opt);
Expand Down
5 changes: 0 additions & 5 deletions arch/ia64/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -114,11 +114,6 @@ int pud_huge(pud_t pud)
return 0;
}

int pmd_huge_support(void)
{
return 0;
}

struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
{
Expand Down
5 changes: 0 additions & 5 deletions arch/metag/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,11 +110,6 @@ int pud_huge(pud_t pud)
return 0;
}

int pmd_huge_support(void)
{
return 1;
}

struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
Expand Down
5 changes: 0 additions & 5 deletions arch/mips/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,11 +84,6 @@ int pud_huge(pud_t pud)
return (pud_val(pud) & _PAGE_HUGE) != 0;
}

int pmd_huge_support(void)
{
return 1;
}

struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
Expand Down
10 changes: 0 additions & 10 deletions arch/powerpc/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,11 +86,6 @@ int pgd_huge(pgd_t pgd)
*/
return ((pgd_val(pgd) & 0x3) != 0x0);
}

int pmd_huge_support(void)
{
return 1;
}
#else
int pmd_huge(pmd_t pmd)
{
Expand All @@ -106,11 +101,6 @@ int pgd_huge(pgd_t pgd)
{
return 0;
}

int pmd_huge_support(void)
{
return 0;
}
#endif

pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
Expand Down
5 changes: 0 additions & 5 deletions arch/s390/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -220,11 +220,6 @@ int pud_huge(pud_t pud)
return 0;
}

int pmd_huge_support(void)
{
return 1;
}

struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmdp, int write)
{
Expand Down
5 changes: 0 additions & 5 deletions arch/sh/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,6 @@ int pud_huge(pud_t pud)
return 0;
}

int pmd_huge_support(void)
{
return 0;
}

struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
Expand Down
5 changes: 0 additions & 5 deletions arch/sparc/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,11 +231,6 @@ int pud_huge(pud_t pud)
return 0;
}

int pmd_huge_support(void)
{
return 0;
}

struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
Expand Down
5 changes: 0 additions & 5 deletions arch/tile/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -166,11 +166,6 @@ int pud_huge(pud_t pud)
return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
}

int pmd_huge_support(void)
{
return 1;
}

struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
Expand Down
4 changes: 4 additions & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1873,6 +1873,10 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
def_bool y
depends on X86_64 || X86_PAE

config ARCH_ENABLE_HUGEPAGE_MIGRATION
def_bool y
depends on X86_64 && HUGETLB_PAGE && MIGRATION

menu "Power management and ACPI options"

config ARCH_HIBERNATION_HEADER
Expand Down
10 changes: 0 additions & 10 deletions arch/x86/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
{
return NULL;
}

int pmd_huge_support(void)
{
return 0;
}
#else

struct page *
Expand All @@ -80,11 +75,6 @@ int pud_huge(pud_t pud)
{
return !!(pud_val(pud) & _PAGE_PSE);
}

int pmd_huge_support(void)
{
return 1;
}
#endif

#ifdef CONFIG_HUGETLB_PAGE
Expand Down
13 changes: 5 additions & 8 deletions include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -392,15 +392,13 @@ static inline pgoff_t basepage_index(struct page *page)

extern void dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
int pmd_huge_support(void);
/*
* Currently hugepage migration is enabled only for pmd-based hugepage.
* This function will be updated when hugepage migration is more widely
* supported.
*/
static inline int hugepage_migration_support(struct hstate *h)
{
return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
return huge_page_shift(h) == PMD_SHIFT;
#else
return 0;
#endif
}

static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
Expand Down Expand Up @@ -450,7 +448,6 @@ static inline pgoff_t basepage_index(struct page *page)
return page->index;
}
#define dissolve_free_huge_pages(s, e) do {} while (0)
#define pmd_huge_support() 0
#define hugepage_migration_support(h) 0

static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
Expand Down
6 changes: 6 additions & 0 deletions include/linux/mempolicy.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,12 @@ static inline int vma_migratable(struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
return 0;

#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
if (vma->vm_flags & VM_HUGETLB)
return 0;
#endif

/*
* Migration allocates pages in the highest zone. If we cannot
* do so then migration (at least from node to node) is not
Expand Down
3 changes: 3 additions & 0 deletions mm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,9 @@ config MIGRATION
pages as migration can relocate pages to satisfy a huge page
allocation instead of reclaiming.

config ARCH_ENABLE_HUGEPAGE_MIGRATION
boolean

config PHYS_ADDR_T_64BIT
def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT

Expand Down

0 comments on commit c177c81

Please sign in to comment.