forked from luck/tmp_suning_uos_patched
mm/mmu_notifier: use correct mmu_notifier events for each invalidation
This updates each existing invalidation to use the correct mmu notifier event that represent what is happening to the CPU page table. See the patch which introduced the events to see the rational behind this. Link: http://lkml.kernel.org/r/20190326164747.24405-7-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Cc: Christian König <christian.koenig@amd.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Felix Kuehling <Felix.Kuehling@amd.com> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Ross Zwisler <zwisler@kernel.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Christian Koenig <christian.koenig@amd.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
6f4f13e8d9
commit
7269f99993
|
@ -1169,8 +1169,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
|||
break;
|
||||
}
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
|
||||
NULL, mm, 0, -1UL);
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
|
||||
0, NULL, mm, 0, -1UL);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
}
|
||||
walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
|
||||
|
|
|
@ -161,7 +161,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
|
|||
struct mmu_notifier_range range;
|
||||
struct mem_cgroup *memcg;
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, addr,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
|
||||
addr + PAGE_SIZE);
|
||||
|
||||
VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
|
||||
|
|
|
@ -1224,9 +1224,8 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
|
|||
cond_resched();
|
||||
}
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
|
||||
haddr,
|
||||
haddr + HPAGE_PMD_SIZE);
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
haddr, haddr + HPAGE_PMD_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
|
||||
|
@ -1389,9 +1388,8 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
|
|||
vma, HPAGE_PMD_NR);
|
||||
__SetPageUptodate(new_page);
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
|
||||
haddr,
|
||||
haddr + HPAGE_PMD_SIZE);
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
haddr, haddr + HPAGE_PMD_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
spin_lock(vmf->ptl);
|
||||
|
@ -2066,7 +2064,7 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
|
|||
spinlock_t *ptl;
|
||||
struct mmu_notifier_range range;
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
address & HPAGE_PUD_MASK,
|
||||
(address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
@ -2285,7 +2283,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
spinlock_t *ptl;
|
||||
struct mmu_notifier_range range;
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
address & HPAGE_PMD_MASK,
|
||||
(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
|
|
@ -3294,7 +3294,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
|
|||
cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
|
||||
|
||||
if (cow) {
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, src,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
|
||||
vma->vm_start,
|
||||
vma->vm_end);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
@ -3675,7 +3675,7 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
pages_per_huge_page(h));
|
||||
__SetPageUptodate(new_page);
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, haddr,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
|
||||
haddr + huge_page_size(h));
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
|
@ -4411,8 +4411,8 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|||
* start/end. Set range.start/range.end to cover the maximum possible
|
||||
* range if PMD sharing is possible.
|
||||
*/
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
|
||||
end);
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
|
||||
0, vma, mm, start, end);
|
||||
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
|
||||
|
||||
BUG_ON(address >= end);
|
||||
|
|
|
@ -1016,7 +1016,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|||
pte = pte_offset_map(pmd, address);
|
||||
pte_ptl = pte_lockptr(mm, pmd);
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, NULL, mm,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
|
||||
address, address + HPAGE_PMD_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
|
||||
|
|
4
mm/ksm.c
4
mm/ksm.c
|
@ -1066,7 +1066,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
|||
|
||||
BUG_ON(PageTransCompound(page));
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
|
||||
pvmw.address,
|
||||
pvmw.address + PAGE_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
@ -1155,7 +1155,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
|
|||
if (!pmd)
|
||||
goto out;
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, addr,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
|
||||
addr + PAGE_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
|
|
|
@ -472,7 +472,7 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
|
|||
range.end = min(vma->vm_end, end_addr);
|
||||
if (range.end <= vma->vm_start)
|
||||
return -EINVAL;
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
|
||||
range.start, range.end);
|
||||
|
||||
lru_add_drain();
|
||||
|
|
14
mm/memory.c
14
mm/memory.c
|
@ -1010,8 +1010,8 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|||
is_cow = is_cow_mapping(vma->vm_flags);
|
||||
|
||||
if (is_cow) {
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma,
|
||||
src_mm, addr, end);
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
|
||||
0, vma, src_mm, addr, end);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
}
|
||||
|
||||
|
@ -1358,7 +1358,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
|
|||
struct mmu_gather tlb;
|
||||
|
||||
lru_add_drain();
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
start, start + size);
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
|
||||
update_hiwater_rss(vma->vm_mm);
|
||||
|
@ -1385,7 +1385,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
|
|||
struct mmu_gather tlb;
|
||||
|
||||
lru_add_drain();
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
address, address + size);
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
|
||||
update_hiwater_rss(vma->vm_mm);
|
||||
|
@ -2283,7 +2283,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
|||
|
||||
__SetPageUptodate(new_page);
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
|
||||
vmf->address & PAGE_MASK,
|
||||
(vmf->address & PAGE_MASK) + PAGE_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
@ -4109,7 +4109,7 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
|||
goto out;
|
||||
|
||||
if (range) {
|
||||
mmu_notifier_range_init(range, MMU_NOTIFY_UNMAP, 0,
|
||||
mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
|
||||
NULL, mm, address & PMD_MASK,
|
||||
(address & PMD_MASK) + PMD_SIZE);
|
||||
mmu_notifier_invalidate_range_start(range);
|
||||
|
@ -4128,7 +4128,7 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
|||
goto out;
|
||||
|
||||
if (range) {
|
||||
mmu_notifier_range_init(range, MMU_NOTIFY_UNMAP, 0, NULL, mm,
|
||||
mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
|
||||
address & PAGE_MASK,
|
||||
(address & PAGE_MASK) + PAGE_SIZE);
|
||||
mmu_notifier_invalidate_range_start(range);
|
||||
|
|
|
@ -2356,7 +2356,7 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
|
|||
mm_walk.mm = migrate->vma->vm_mm;
|
||||
mm_walk.private = migrate;
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, NULL, mm_walk.mm,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm,
|
||||
migrate->start,
|
||||
migrate->end);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
@ -2765,7 +2765,7 @@ static void migrate_vma_pages(struct migrate_vma *migrate)
|
|||
notified = true;
|
||||
|
||||
mmu_notifier_range_init(&range,
|
||||
MMU_NOTIFY_UNMAP, 0,
|
||||
MMU_NOTIFY_CLEAR, 0,
|
||||
NULL,
|
||||
migrate->vma->vm_mm,
|
||||
addr, migrate->end);
|
||||
|
|
|
@ -185,8 +185,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|||
|
||||
/* invoke the mmu notifier if the pmd is populated */
|
||||
if (!range.start) {
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
|
||||
vma, vma->vm_mm, addr, end);
|
||||
mmu_notifier_range_init(&range,
|
||||
MMU_NOTIFY_PROTECTION_VMA, 0,
|
||||
vma, vma->vm_mm, addr, end);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
}
|
||||
|
||||
|
|
|
@ -896,8 +896,8 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
|||
* We have to assume the worse case ie pmd for invalidation. Note that
|
||||
* the page can not be free from this function.
|
||||
*/
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
|
||||
address,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
|
||||
0, vma, vma->vm_mm, address,
|
||||
min(vma->vm_end, address +
|
||||
(PAGE_SIZE << compound_order(page))));
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
@ -1372,7 +1372,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
* Note that the page can not be free in this function as call of
|
||||
* try_to_unmap() must hold a reference on the page.
|
||||
*/
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
address,
|
||||
min(vma->vm_end, address +
|
||||
(PAGE_SIZE << compound_order(page))));
|
||||
|
|
Loading…
Reference in New Issue
Block a user