forked from luck/tmp_suning_uos_patched
mm: add new mmget() helper
Apart from adding the helper function itself, the rest of the kernel is converted mechanically using: git grep -l 'atomic_inc.*mm_users' | xargs sed -i 's/atomic_inc(&\(.*\)->mm_users);/mmget\(\1\);/' git grep -l 'atomic_inc.*mm_users' | xargs sed -i 's/atomic_inc(&\(.*\)\.mm_users);/mmget\(\&\1\);/' This is needed for a later patch that hooks into the helper, but might be a worthwhile cleanup on its own. (Michal Hocko provided most of the kerneldoc comment.) Link: http://lkml.kernel.org/r/20161218123229.22952-2-vegard.nossum@oracle.com Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
f1f1007644
commit
3fce371bfa
|
@ -139,7 +139,7 @@ void start_kernel_secondary(void)
|
|||
/* MMU, Caches, Vector Table, Interrupts etc */
|
||||
setup_processor();
|
||||
|
||||
atomic_inc(&mm->mm_users);
|
||||
mmget(mm);
|
||||
mmgrab(mm);
|
||||
current->active_mm = mm;
|
||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||
|
|
|
@ -307,7 +307,7 @@ void secondary_start_kernel(void)
|
|||
local_irq_disable();
|
||||
|
||||
/* Attach the new idle task to the global mm. */
|
||||
atomic_inc(&mm->mm_users);
|
||||
mmget(mm);
|
||||
mmgrab(mm);
|
||||
current->active_mm = mm;
|
||||
|
||||
|
|
|
@ -188,7 +188,7 @@ int cxn_pin_by_pid(pid_t pid)
|
|||
task_lock(tsk);
|
||||
if (tsk->mm) {
|
||||
mm = tsk->mm;
|
||||
atomic_inc(&mm->mm_users);
|
||||
mmget(mm);
|
||||
ret = 0;
|
||||
}
|
||||
task_unlock(tsk);
|
||||
|
|
|
@ -344,7 +344,7 @@ asmlinkage void secondary_start_kernel(void)
|
|||
* All kernel threads share the same mm context; grab a
|
||||
* reference and switch to it.
|
||||
*/
|
||||
atomic_inc(&mm->mm_users);
|
||||
mmget(mm);
|
||||
mmgrab(mm);
|
||||
current->active_mm = mm;
|
||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||
|
|
|
@ -179,7 +179,7 @@ asmlinkage void start_secondary(void)
|
|||
|
||||
enable_mmu();
|
||||
mmgrab(mm);
|
||||
atomic_inc(&mm->mm_users);
|
||||
mmget(mm);
|
||||
current->active_mm = mm;
|
||||
#ifdef CONFIG_MMU
|
||||
enter_lazy_tlb(mm, current);
|
||||
|
|
|
@ -135,7 +135,7 @@ void secondary_start_kernel(void)
|
|||
|
||||
/* All kernel threads share the same mm context. */
|
||||
|
||||
atomic_inc(&mm->mm_users);
|
||||
mmget(mm);
|
||||
mmgrab(mm);
|
||||
current->active_mm = mm;
|
||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||
|
|
|
@ -2948,6 +2948,27 @@ static inline void mmdrop_async(struct mm_struct *mm)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mmget() - Pin the address space associated with a &struct mm_struct.
|
||||
* @mm: The address space to pin.
|
||||
*
|
||||
* Make sure that the address space of the given &struct mm_struct doesn't
|
||||
* go away. This does not protect against parts of the address space being
|
||||
* modified or freed, however.
|
||||
*
|
||||
* Never use this function to pin this address space for an
|
||||
* unbounded/indefinite amount of time.
|
||||
*
|
||||
* Use mmput() to release the reference acquired by mmget().
|
||||
*
|
||||
* See also <Documentation/vm/active_mm.txt> for an in-depth explanation
|
||||
* of &mm_struct.mm_count vs &mm_struct.mm_users.
|
||||
*/
|
||||
static inline void mmget(struct mm_struct *mm)
|
||||
{
|
||||
atomic_inc(&mm->mm_users);
|
||||
}
|
||||
|
||||
static inline bool mmget_not_zero(struct mm_struct *mm)
|
||||
{
|
||||
return atomic_inc_not_zero(&mm->mm_users);
|
||||
|
|
|
@ -1000,7 +1000,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
|
|||
if (task->flags & PF_KTHREAD)
|
||||
mm = NULL;
|
||||
else
|
||||
atomic_inc(&mm->mm_users);
|
||||
mmget(mm);
|
||||
}
|
||||
task_unlock(task);
|
||||
return mm;
|
||||
|
@ -1188,7 +1188,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
|
|||
vmacache_flush(tsk);
|
||||
|
||||
if (clone_flags & CLONE_VM) {
|
||||
atomic_inc(&oldmm->mm_users);
|
||||
mmget(oldmm);
|
||||
mm = oldmm;
|
||||
goto good_mm;
|
||||
}
|
||||
|
|
|
@ -1671,7 +1671,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
|
|||
* that.
|
||||
*/
|
||||
start_mm = &init_mm;
|
||||
atomic_inc(&init_mm.mm_users);
|
||||
mmget(&init_mm);
|
||||
|
||||
/*
|
||||
* Keep on scanning until all entries have gone. Usually,
|
||||
|
@ -1720,7 +1720,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
|
|||
if (atomic_read(&start_mm->mm_users) == 1) {
|
||||
mmput(start_mm);
|
||||
start_mm = &init_mm;
|
||||
atomic_inc(&init_mm.mm_users);
|
||||
mmget(&init_mm);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1757,8 +1757,8 @@ int try_to_unuse(unsigned int type, bool frontswap,
|
|||
struct mm_struct *prev_mm = start_mm;
|
||||
struct mm_struct *mm;
|
||||
|
||||
atomic_inc(&new_start_mm->mm_users);
|
||||
atomic_inc(&prev_mm->mm_users);
|
||||
mmget(new_start_mm);
|
||||
mmget(prev_mm);
|
||||
spin_lock(&mmlist_lock);
|
||||
while (swap_count(*swap_map) && !retval &&
|
||||
(p = p->next) != &start_mm->mmlist) {
|
||||
|
@ -1781,7 +1781,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
|
|||
|
||||
if (set_start_mm && *swap_map < swcount) {
|
||||
mmput(new_start_mm);
|
||||
atomic_inc(&mm->mm_users);
|
||||
mmget(mm);
|
||||
new_start_mm = mm;
|
||||
set_start_mm = 0;
|
||||
}
|
||||
|
|
|
@ -204,7 +204,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
|
|||
work->addr = hva;
|
||||
work->arch = *arch;
|
||||
work->mm = current->mm;
|
||||
atomic_inc(&work->mm->mm_users);
|
||||
mmget(work->mm);
|
||||
kvm_get_kvm(work->vcpu->kvm);
|
||||
|
||||
/* this can't really happen otherwise gfn_to_pfn_async
|
||||
|
|
Loading…
Reference in New Issue
Block a user