Skip to content

Commit

Permalink
Revert "x86_64: Quicklist support for x86_64"
Browse files Browse the repository at this point in the history
This reverts commit 34feb2c.

Suresh Siddha points out that this one breaks the fundamental
requirement that you cannot free page table pages before the TLB caches
are flushed.  The quicklists do not give the same kinds of guarantees
that the mmu_gather structure does, at least not in NUMA configurations.

Requested-by: Suresh Siddha <[email protected]>
Acked-by: Andi Kleen <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Asit Mallick <[email protected]>
Cc: Tony Luck <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Linus Torvalds committed Sep 21, 2007
1 parent 077a6c2 commit da8f153
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 59 deletions.
8 changes: 0 additions & 8 deletions arch/x86_64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -60,14 +60,6 @@ config ZONE_DMA
bool
default y

config QUICKLIST
bool
default y

config NR_QUICK
int
default 2

config ISA
bool

Expand Down
1 change: 0 additions & 1 deletion arch/x86_64/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,6 @@ void cpu_idle (void)
if (__get_cpu_var(cpu_idle_state))
__get_cpu_var(cpu_idle_state) = 0;

check_pgt_cache();
rmb();
idle = pm_idle;
if (!idle)
Expand Down
2 changes: 1 addition & 1 deletion arch/x86_64/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ void flush_tlb_mm (struct mm_struct * mm)
}
if (!cpus_empty(cpu_mask))
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
check_pgt_cache();

preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_mm);
Expand Down
73 changes: 24 additions & 49 deletions include/asm-x86_64/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,6 @@
#include <asm/pda.h>
#include <linux/threads.h>
#include <linux/mm.h>
#include <linux/quicklist.h>

#define QUICK_PGD 0 /* We preserve special mappings over free */
#define QUICK_PT 1 /* Other page table pages that are zero on free */

#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
Expand All @@ -24,23 +20,23 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
static inline void pmd_free(pmd_t *pmd)
{
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
quicklist_free(QUICK_PT, NULL, pmd);
free_page((unsigned long)pmd);
}

static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
{
return (pmd_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}

static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
return (pud_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}

static inline void pud_free (pud_t *pud)
{
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
quicklist_free(QUICK_PT, NULL, pud);
free_page((unsigned long)pud);
}

static inline void pgd_list_add(pgd_t *pgd)
Expand All @@ -61,57 +57,41 @@ static inline void pgd_list_del(pgd_t *pgd)
spin_unlock(&pgd_lock);
}

static inline void pgd_ctor(void *x)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
unsigned boundary;
pgd_t *pgd = x;
struct page *page = virt_to_page(pgd);

pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (!pgd)
return NULL;
pgd_list_add(pgd);
/*
* Copy kernel pointers in from init.
* Could keep a freelist or slab cache of those because the kernel
* part never changes.
*/
boundary = pgd_index(__PAGE_OFFSET);
memset(pgd, 0, boundary * sizeof(pgd_t));
memcpy(pgd + boundary,
init_level4_pgt + boundary,
(PTRS_PER_PGD - boundary) * sizeof(pgd_t));

spin_lock(&pgd_lock);
list_add(&page->lru, &pgd_list);
spin_unlock(&pgd_lock);
}

static inline void pgd_dtor(void *x)
{
pgd_t *pgd = x;
struct page *page = virt_to_page(pgd);

spin_lock(&pgd_lock);
list_del(&page->lru);
spin_unlock(&pgd_lock);
}

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = (pgd_t *)quicklist_alloc(QUICK_PGD,
GFP_KERNEL|__GFP_REPEAT, pgd_ctor);
init_level4_pgt + boundary,
(PTRS_PER_PGD - boundary) * sizeof(pgd_t));
return pgd;
}

static inline void pgd_free(pgd_t *pgd)
{
BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
quicklist_free(QUICK_PGD, pgd_dtor, pgd);
pgd_list_del(pgd);
free_page((unsigned long)pgd);
}

static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
return (pte_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}

static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
void *p = (void *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);

void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
if (!p)
return NULL;
return virt_to_page(p);
Expand All @@ -123,22 +103,17 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
static inline void pte_free_kernel(pte_t *pte)
{
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
quicklist_free(QUICK_PT, NULL, pte);
free_page((unsigned long)pte);
}

static inline void pte_free(struct page *pte)
{
quicklist_free_page(QUICK_PT, NULL, pte);
}
__free_page(pte);
}

#define __pte_free_tlb(tlb,pte) quicklist_free_page(QUICK_PT, NULL,(pte))
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))

#define __pmd_free_tlb(tlb,x) quicklist_free(QUICK_PT, NULL, (x))
#define __pud_free_tlb(tlb,x) quicklist_free(QUICK_PT, NULL, (x))
#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))

static inline void check_pgt_cache(void)
{
quicklist_trim(QUICK_PGD, pgd_dtor, 25, 16);
quicklist_trim(QUICK_PT, NULL, 25, 16);
}
#endif /* _X86_64_PGALLOC_H */
1 change: 1 addition & 0 deletions include/asm-x86_64/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -411,6 +411,7 @@ pte_t *lookup_address(unsigned long addr);
#define HAVE_ARCH_UNMAPPED_AREA

#define pgtable_cache_init() do { } while (0)
#define check_pgt_cache() do { } while (0)

#define PAGE_AGP PAGE_KERNEL_NOCACHE
#define HAVE_PAGE_AGP 1
Expand Down

0 comments on commit da8f153

Please sign in to comment.