Skip to content

Commit

Permalink
mm: directly use __mlock_vma_pages_range() in find_extend_vma()
Browse files Browse the repository at this point in the history
In find_extend_vma(), we don't need mlock_vma_pages_range() to verify
the vma type - we know we're working with a stack.  So, we can call
directly into __mlock_vma_pages_range(), and remove the last
make_pages_present() call site.

Note that we don't use mm_populate() here, so we can't release the
mmap_sem while allocating new stack pages.  This is deemed acceptable,
because the stack vmas grow by a bounded number of pages at a time, and
these are anon pages so we don't have to read from disk to populate
them.

Signed-off-by: Michel Lespinasse <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Tested-by: Andy Lutomirski <[email protected]>
Cc: Greg Ungerer <[email protected]>
Cc: David Howells <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
walken-google authored and torvalds committed Feb 24, 2013
1 parent c22c0d6 commit cea10a1
Show file tree
Hide file tree
Showing 5 changed files with 9 additions and 87 deletions.
1 change: 0 additions & 1 deletion include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1035,7 +1035,6 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif

extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, int write);
Expand Down
4 changes: 2 additions & 2 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,8 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node *rb_parent);

#ifdef CONFIG_MMU
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern long __mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *nonblocking);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
Expand Down
24 changes: 0 additions & 24 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -3824,30 +3824,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
}
#endif /* __PAGETABLE_PMD_FOLDED */

int make_pages_present(unsigned long addr, unsigned long end)
{
int ret, len, write;
struct vm_area_struct * vma;

vma = find_vma(current->mm, addr);
if (!vma)
return -ENOMEM;
/*
* We want to touch writable mappings with a write fault in order
* to break COW, except for shared mappings because these don't COW
* and we would not want to dirty them for nothing.
*/
write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
BUG_ON(addr >= end);
BUG_ON(end > vma->vm_end);
len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
ret = get_user_pages(current, current->mm, addr,
len, write, 0, NULL, NULL);
if (ret < 0)
return ret;
return ret == len ? 0 : -EFAULT;
}

#if !defined(__HAVE_ARCH_GATE_AREA)

#if defined(AT_SYSINFO_EHDR)
Expand Down
57 changes: 3 additions & 54 deletions mm/mlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,8 @@ void munlock_vma_page(struct page *page)
*
* vma->vm_mm->mmap_sem must be held for at least read.
*/
static long __mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
int *nonblocking)
long __mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *nonblocking)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start;
Expand Down Expand Up @@ -202,56 +201,6 @@ static int __mlock_posix_error_return(long retval)
return retval;
}

/**
* mlock_vma_pages_range() - mlock pages in specified vma range.
* @vma - the vma containing the specfied address range
* @start - starting address in @vma to mlock
* @end - end address [+1] in @vma to mlock
*
* For mmap()/mremap()/expansion of mlocked vma.
*
* return 0 on success for "normal" vmas.
*
* return number of pages [> 0] to be removed from locked_vm on success
* of "special" vmas.
*/
long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
int nr_pages = (end - start) / PAGE_SIZE;
BUG_ON(!(vma->vm_flags & VM_LOCKED));

/*
* filter unlockable vmas
*/
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
goto no_mlock;

if (!((vma->vm_flags & VM_DONTEXPAND) ||
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm))) {

__mlock_vma_pages_range(vma, start, end, NULL);

/* Hide errors from mmap() and other callers */
return 0;
}

/*
* User mapped kernel pages or huge pages:
* make these pages present to populate the ptes, but
* fall thru' to reset VM_LOCKED--no need to unlock, and
* return nr_pages so these don't get counted against task's
* locked limit. huge pages are already counted against
* locked vm limit.
*/
make_pages_present(start, end);

no_mlock:
vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */
return nr_pages; /* error or pages NOT mlocked */
}

/*
* munlock_vma_pages_range() - munlock all pages in the vma range.'
* @vma - vma containing range to be munlock()ed.
Expand Down Expand Up @@ -303,7 +252,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
*
* Filters out "special" vmas -- VM_LOCKED never gets set for these, and
* munlock is a no-op. However, for some special vmas, we go ahead and
* populate the ptes via make_pages_present().
* populate the ptes.
*
* For vmas that pass the filters, merge/split as appropriate.
*/
Expand Down
10 changes: 4 additions & 6 deletions mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -2204,9 +2204,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
return vma;
if (!prev || expand_stack(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED) {
mlock_vma_pages_range(prev, addr, prev->vm_end);
}
if (prev->vm_flags & VM_LOCKED)
__mlock_vma_pages_range(prev, addr, prev->vm_end, NULL);
return prev;
}
#else
Expand All @@ -2232,9 +2231,8 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
start = vma->vm_start;
if (expand_stack(vma, addr))
return NULL;
if (vma->vm_flags & VM_LOCKED) {
mlock_vma_pages_range(vma, addr, start);
}
if (vma->vm_flags & VM_LOCKED)
__mlock_vma_pages_range(vma, addr, start, NULL);
return vma;
}
#endif
Expand Down

0 comments on commit cea10a1

Please sign in to comment.