Skip to content

Commit

Permalink
mm: add new mmget() helper
Browse files Browse the repository at this point in the history
Apart from adding the helper function itself, the rest of the kernel is
converted mechanically using:

  git grep -l 'atomic_inc.*mm_users' | xargs sed -i 's/atomic_inc(&\(.*\)->mm_users);/mmget\(\1\);/'
  git grep -l 'atomic_inc.*mm_users' | xargs sed -i 's/atomic_inc(&\(.*\)\.mm_users);/mmget\(\&\1\);/'

This is needed for a later patch that hooks into the helper, but might
be a worthwhile cleanup on its own.

(Michal Hocko provided most of the kerneldoc comment.)

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Vegard Nossum <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Acked-by: David Rientjes <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
vegard authored and torvalds committed Feb 28, 2017
1 parent f1f1007 commit 3fce371
Show file tree
Hide file tree
Showing 10 changed files with 35 additions and 14 deletions.
2 changes: 1 addition & 1 deletion arch/arc/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ void start_kernel_secondary(void)
/* MMU, Caches, Vector Table, Interrupts etc */
setup_processor();

atomic_inc(&mm->mm_users);
mmget(mm);
mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
Expand Down
2 changes: 1 addition & 1 deletion arch/blackfin/mach-common/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ void secondary_start_kernel(void)
local_irq_disable();

/* Attach the new idle task to the global mm. */
atomic_inc(&mm->mm_users);
mmget(mm);
mmgrab(mm);
current->active_mm = mm;

Expand Down
2 changes: 1 addition & 1 deletion arch/frv/mm/mmu-context.c
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ int cxn_pin_by_pid(pid_t pid)
task_lock(tsk);
if (tsk->mm) {
mm = tsk->mm;
atomic_inc(&mm->mm_users);
mmget(mm);
ret = 0;
}
task_unlock(tsk);
Expand Down
2 changes: 1 addition & 1 deletion arch/metag/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ asmlinkage void secondary_start_kernel(void)
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
atomic_inc(&mm->mm_users);
mmget(mm);
mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
Expand Down
2 changes: 1 addition & 1 deletion arch/sh/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ asmlinkage void start_secondary(void)

enable_mmu();
mmgrab(mm);
atomic_inc(&mm->mm_users);
mmget(mm);
current->active_mm = mm;
#ifdef CONFIG_MMU
enter_lazy_tlb(mm, current);
Expand Down
2 changes: 1 addition & 1 deletion arch/xtensa/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ void secondary_start_kernel(void)

/* All kernel threads share the same mm context. */

atomic_inc(&mm->mm_users);
mmget(mm);
mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
Expand Down
21 changes: 21 additions & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -2948,6 +2948,27 @@ static inline void mmdrop_async(struct mm_struct *mm)
}
}

/**
* mmget() - Pin the address space associated with a &struct mm_struct.
* @mm: The address space to pin.
*
* Make sure that the address space of the given &struct mm_struct doesn't
* go away. This does not protect against parts of the address space being
* modified or freed, however.
*
* Never use this function to pin this address space for an
* unbounded/indefinite amount of time.
*
* Use mmput() to release the reference acquired by mmget().
*
* See also <Documentation/vm/active_mm.txt> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmget(struct mm_struct *mm)
{
atomic_inc(&mm->mm_users);
}

static inline bool mmget_not_zero(struct mm_struct *mm)
{
return atomic_inc_not_zero(&mm->mm_users);
Expand Down
4 changes: 2 additions & 2 deletions kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -1000,7 +1000,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
if (task->flags & PF_KTHREAD)
mm = NULL;
else
atomic_inc(&mm->mm_users);
mmget(mm);
}
task_unlock(task);
return mm;
Expand Down Expand Up @@ -1188,7 +1188,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
vmacache_flush(tsk);

if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users);
mmget(oldmm);
mm = oldmm;
goto good_mm;
}
Expand Down
10 changes: 5 additions & 5 deletions mm/swapfile.c
Original file line number Diff line number Diff line change
Expand Up @@ -1671,7 +1671,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
* that.
*/
start_mm = &init_mm;
atomic_inc(&init_mm.mm_users);
mmget(&init_mm);

/*
* Keep on scanning until all entries have gone. Usually,
Expand Down Expand Up @@ -1720,7 +1720,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
if (atomic_read(&start_mm->mm_users) == 1) {
mmput(start_mm);
start_mm = &init_mm;
atomic_inc(&init_mm.mm_users);
mmget(&init_mm);
}

/*
Expand Down Expand Up @@ -1757,8 +1757,8 @@ int try_to_unuse(unsigned int type, bool frontswap,
struct mm_struct *prev_mm = start_mm;
struct mm_struct *mm;

atomic_inc(&new_start_mm->mm_users);
atomic_inc(&prev_mm->mm_users);
mmget(new_start_mm);
mmget(prev_mm);
spin_lock(&mmlist_lock);
while (swap_count(*swap_map) && !retval &&
(p = p->next) != &start_mm->mmlist) {
Expand All @@ -1781,7 +1781,7 @@ int try_to_unuse(unsigned int type, bool frontswap,

if (set_start_mm && *swap_map < swcount) {
mmput(new_start_mm);
atomic_inc(&mm->mm_users);
mmget(mm);
new_start_mm = mm;
set_start_mm = 0;
}
Expand Down
2 changes: 1 addition & 1 deletion virt/kvm/async_pf.c
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
work->addr = hva;
work->arch = *arch;
work->mm = current->mm;
atomic_inc(&work->mm->mm_users);
mmget(work->mm);
kvm_get_kvm(work->vcpu->kvm);

/* this can't really happen otherwise gfn_to_pfn_async
Expand Down

0 comments on commit 3fce371

Please sign in to comment.