forked from luck/tmp_suning_uos_patched
huge pagecache: extend mremap pmd rmap lockout to files
Whatever huge pagecache implementation we go with, file rmap locking must be added to anon rmap locking, when mremap's move_page_tables() finds a pmd_trans_huge pmd entry: a simple change, let's do it now. Factor out take_rmap_locks() and drop_rmap_locks() to handle the locking for make move_ptes() and move_page_tables(), and delete the VM_BUG_ON_VMA which rejected vm_file and required anon_vma. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andres Lagar-Cavilla <andreslc@google.com> Cc: Yang Shi <yang.shi@linaro.org> Cc: Ning Qu <quning@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Andres Lagar-Cavilla <andreslc@google.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
bf8616d5fa
commit
1d069b7dd5
42
mm/mremap.c
42
mm/mremap.c
|
@ -70,6 +70,22 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
return pmd;
|
||||
}
|
||||
|
||||
static void take_rmap_locks(struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->vm_file)
|
||||
i_mmap_lock_write(vma->vm_file->f_mapping);
|
||||
if (vma->anon_vma)
|
||||
anon_vma_lock_write(vma->anon_vma);
|
||||
}
|
||||
|
||||
static void drop_rmap_locks(struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->anon_vma)
|
||||
anon_vma_unlock_write(vma->anon_vma);
|
||||
if (vma->vm_file)
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
}
|
||||
|
||||
static pte_t move_soft_dirty_pte(pte_t pte)
|
||||
{
|
||||
/*
|
||||
|
@ -90,8 +106,6 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|||
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
||||
unsigned long new_addr, bool need_rmap_locks)
|
||||
{
|
||||
struct address_space *mapping = NULL;
|
||||
struct anon_vma *anon_vma = NULL;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pte_t *old_pte, *new_pte, pte;
|
||||
spinlock_t *old_ptl, *new_ptl;
|
||||
|
@ -114,16 +128,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|||
* serialize access to individual ptes, but only rmap traversal
|
||||
* order guarantees that we won't miss both the old and new ptes).
|
||||
*/
|
||||
if (need_rmap_locks) {
|
||||
if (vma->vm_file) {
|
||||
mapping = vma->vm_file->f_mapping;
|
||||
i_mmap_lock_write(mapping);
|
||||
}
|
||||
if (vma->anon_vma) {
|
||||
anon_vma = vma->anon_vma;
|
||||
anon_vma_lock_write(anon_vma);
|
||||
}
|
||||
}
|
||||
if (need_rmap_locks)
|
||||
take_rmap_locks(vma);
|
||||
|
||||
/*
|
||||
* We don't have to worry about the ordering of src and dst
|
||||
|
@ -151,10 +157,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|||
spin_unlock(new_ptl);
|
||||
pte_unmap(new_pte - 1);
|
||||
pte_unmap_unlock(old_pte - 1, old_ptl);
|
||||
if (anon_vma)
|
||||
anon_vma_unlock_write(anon_vma);
|
||||
if (mapping)
|
||||
i_mmap_unlock_write(mapping);
|
||||
if (need_rmap_locks)
|
||||
drop_rmap_locks(vma);
|
||||
}
|
||||
|
||||
#define LATENCY_LIMIT (64 * PAGE_SIZE)
|
||||
|
@ -193,15 +197,13 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|||
if (pmd_trans_huge(*old_pmd)) {
|
||||
if (extent == HPAGE_PMD_SIZE) {
|
||||
bool moved;
|
||||
VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
|
||||
vma);
|
||||
/* See comment in move_ptes() */
|
||||
if (need_rmap_locks)
|
||||
anon_vma_lock_write(vma->anon_vma);
|
||||
take_rmap_locks(vma);
|
||||
moved = move_huge_pmd(vma, old_addr, new_addr,
|
||||
old_end, old_pmd, new_pmd);
|
||||
if (need_rmap_locks)
|
||||
anon_vma_unlock_write(vma->anon_vma);
|
||||
drop_rmap_locks(vma);
|
||||
if (moved) {
|
||||
need_flush = true;
|
||||
continue;
|
||||
|
|
Loading…
Reference in New Issue
Block a user