Skip to content

Commit

Permalink
mm/vmalloc: remove map_kernel_range
Browse files Browse the repository at this point in the history
Patch series "mm/vmalloc: cleanup after hugepage series", v2.

Christoph pointed out some overdue cleanups required after the huge
vmalloc series, and I had another failure error message improvement as
well.

This patch (of 5):

This is a shim around vmap_pages_range, get rid of it.

Move the main API comment from the _noflush variant to the normal variant,
and make _noflush internal to mm/.

Link: https://lkml.kernel.org/r/[email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Nicholas Piggin <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: Uladzislau Rezki <[email protected]>
Cc: Cédric Le Goater <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
npiggin authored and torvalds committed Apr 30, 2021
1 parent 121e6f3 commit b67177e
Show file tree
Hide file tree
Showing 5 changed files with 38 additions and 51 deletions.
2 changes: 1 addition & 1 deletion Documentation/core-api/cachetlb.rst
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ Here are the routines, one by one:
there will be no entries in the cache for the kernel address
space for virtual addresses in the range 'start' to 'end-1'.

The first of these two routines is invoked after map_kernel_range()
The first of these two routines is invoked after vmap_range()
has installed the page table entries. The second is invoked
before unmap_kernel_range() deletes the page table entries.

Expand Down
11 changes: 0 additions & 11 deletions include/linux/vmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -212,10 +212,6 @@ static inline bool is_vm_area_hugepages(const void *addr)
int vmap_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift);
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages);
int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
static inline void set_vm_flush_reset_perms(void *addr)
Expand All @@ -227,13 +223,6 @@ static inline void set_vm_flush_reset_perms(void *addr)
}

#else
static inline int
map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages)
{
return size >> PAGE_SHIFT;
}
#define map_kernel_range map_kernel_range_noflush
static inline void
unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
Expand Down
6 changes: 6 additions & 0 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -637,4 +637,10 @@ struct migration_target_control {
gfp_t gfp_mask;
};

/*
* mm/vmalloc.c
*/
int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift);

#endif /* __MM_INTERNAL_H */
5 changes: 3 additions & 2 deletions mm/percpu-vm.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
* Chunks are mapped into vmalloc areas and populated page by page.
* This is the default chunk allocator.
*/
#include "internal.h"

static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
unsigned int cpu, int page_idx)
Expand Down Expand Up @@ -192,8 +193,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
static int __pcpu_map_pages(unsigned long addr, struct page **pages,
int nr_pages)
{
return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
PAGE_KERNEL, pages);
return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT),
PAGE_KERNEL, pages, PAGE_SHIFT);
}

/**
Expand Down
65 changes: 28 additions & 37 deletions mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -523,7 +523,16 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
return 0;
}

static int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
/*
* vmap_pages_range_noflush is similar to vmap_pages_range, but does not
* flush caches.
*
* The caller is responsible for calling flush_cache_vmap() after this
* function returns successfully and before the addresses are accessed.
*
* This is an internal function only. Do not use outside mm/.
*/
int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
Expand All @@ -549,48 +558,26 @@ static int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
return 0;
}

static int vmap_pages_range(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
int err;

err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
flush_cache_vmap(addr, end);
return err;
}

/**
* map_kernel_range_noflush - map kernel VM area with the specified pages
* vmap_pages_range - map pages to a kernel virtual address
* @addr: start of the VM area to map
* @size: size of the VM area to map
* @end: end of the VM area to map (non-inclusive)
* @prot: page protection flags to use
* @pages: pages to map
*
* Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should
* have been allocated using get_vm_area() and its friends.
*
* NOTE:
* This function does NOT do any cache flushing. The caller is responsible for
* calling flush_cache_vmap() on to-be-mapped areas before calling this
* function.
* @pages: pages to map (always PAGE_SIZE pages)
* @page_shift: maximum shift that the pages may be mapped with, @pages must
* be aligned and contiguous up to at least this shift.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
pgprot_t prot, struct page **pages)
{
return vmap_pages_range_noflush(addr, addr + size, prot, pages, PAGE_SHIFT);
}

int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
struct page **pages)
static int vmap_pages_range(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
int ret;
int err;

ret = map_kernel_range_noflush(start, size, prot, pages);
flush_cache_vmap(start, start + size);
return ret;
err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
flush_cache_vmap(addr, end);
return err;
}

int is_vmalloc_or_module_addr(const void *x)
Expand Down Expand Up @@ -2156,10 +2143,12 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)

kasan_unpoison_vmalloc(mem, size);

if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) {
if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
pages, PAGE_SHIFT) < 0) {
vm_unmap_ram(mem, count);
return NULL;
}

return mem;
}
EXPORT_SYMBOL(vm_map_ram);
Expand Down Expand Up @@ -2703,6 +2692,7 @@ void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot)
{
struct vm_struct *area;
unsigned long addr;
unsigned long size; /* In bytes */

might_sleep();
Expand All @@ -2715,8 +2705,9 @@ void *vmap(struct page **pages, unsigned int count,
if (!area)
return NULL;

if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot),
pages) < 0) {
addr = (unsigned long)area->addr;
if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
pages, PAGE_SHIFT) < 0) {
vunmap(area->addr);
return NULL;
}
Expand Down

0 comments on commit b67177e

Please sign in to comment.