mm: numa: defer TLB flush for THP migration as long as possible
THP migration can fail for a variety of reasons. Avoid flushing the TLB to deal with THP migration races until the copy is ready to start. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Alex Thorlton <athorlton@sgi.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
af2c1401e6
commit
b0943d61b8
|
@ -1376,13 +1376,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
goto clear_pmdnuma;
|
||||
}
|
||||
|
||||
/*
|
||||
* The page_table_lock above provides a memory barrier
|
||||
* with change_protection_range.
|
||||
*/
|
||||
if (mm_tlb_flush_pending(mm))
|
||||
flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
|
||||
|
||||
/*
|
||||
* Migrate the THP to the requested node, returns with page unlocked
|
||||
* and pmd_numa cleared.
|
||||
|
|
|
@ -1759,6 +1759,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|||
goto out_fail;
|
||||
}
|
||||
|
||||
if (mm_tlb_flush_pending(mm))
|
||||
flush_tlb_range(vma, mmun_start, mmun_end);
|
||||
|
||||
/* Prepare a page as a migration target */
|
||||
__set_page_locked(new_page);
|
||||
SetPageSwapBacked(new_page);
|
||||
|
|
Loading…
Reference in New Issue
Block a user