forked from luck/tmp_suning_uos_patched
mm: Count the number of pages affected in change_protection()
This will be used for three kinds of purposes: - to optimize mprotect() - to speed up working set scanning for working set areas that have not been touched - to more accurately scan per real working set No change in functionality from this patch. Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
4fd017708c
commit
7da4d641c5
|
@ -87,7 +87,7 @@ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
|||
pud_t *pud, int write);
|
||||
int pmd_huge(pmd_t pmd);
|
||||
int pud_huge(pud_t pmd);
|
||||
void hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned long end, pgprot_t newprot);
|
||||
|
||||
#else /* !CONFIG_HUGETLB_PAGE */
|
||||
|
@ -132,7 +132,11 @@ static inline void copy_huge_page(struct page *dst, struct page *src)
|
|||
{
|
||||
}
|
||||
|
||||
#define hugetlb_change_protection(vma, address, end, newprot)
|
||||
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned long end, pgprot_t newprot)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma, unsigned long start,
|
||||
|
|
|
@ -1078,6 +1078,9 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
|
|||
extern unsigned long do_mremap(unsigned long addr,
|
||||
unsigned long old_len, unsigned long new_len,
|
||||
unsigned long flags, unsigned long new_addr);
|
||||
extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, pgprot_t newprot,
|
||||
int dirty_accountable);
|
||||
extern int mprotect_fixup(struct vm_area_struct *vma,
|
||||
struct vm_area_struct **pprev, unsigned long start,
|
||||
unsigned long end, unsigned long newflags);
|
||||
|
|
10
mm/hugetlb.c
10
mm/hugetlb.c
|
@ -3014,7 +3014,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
return i ? i : -EFAULT;
|
||||
}
|
||||
|
||||
void hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned long end, pgprot_t newprot)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
|
@ -3022,6 +3022,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
|
|||
pte_t *ptep;
|
||||
pte_t pte;
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
unsigned long pages = 0;
|
||||
|
||||
BUG_ON(address >= end);
|
||||
flush_cache_range(vma, address, end);
|
||||
|
@ -3032,12 +3033,15 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
|
|||
ptep = huge_pte_offset(mm, address);
|
||||
if (!ptep)
|
||||
continue;
|
||||
if (huge_pmd_unshare(mm, &address, ptep))
|
||||
if (huge_pmd_unshare(mm, &address, ptep)) {
|
||||
pages++;
|
||||
continue;
|
||||
}
|
||||
if (!huge_pte_none(huge_ptep_get(ptep))) {
|
||||
pte = huge_ptep_get_and_clear(mm, address, ptep);
|
||||
pte = pte_mkhuge(pte_modify(pte, newprot));
|
||||
set_huge_pte_at(mm, address, ptep, pte);
|
||||
pages++;
|
||||
}
|
||||
}
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
|
@ -3049,6 +3053,8 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
|
|||
*/
|
||||
flush_tlb_range(vma, start, end);
|
||||
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
|
||||
|
||||
return pages << h->order;
|
||||
}
|
||||
|
||||
int hugetlb_reserve_pages(struct inode *inode,
|
||||
|
|
|
@ -35,12 +35,13 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
||||
static unsigned long change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
||||
unsigned long addr, unsigned long end, pgprot_t newprot,
|
||||
int dirty_accountable)
|
||||
{
|
||||
pte_t *pte, oldpte;
|
||||
spinlock_t *ptl;
|
||||
unsigned long pages = 0;
|
||||
|
||||
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
||||
arch_enter_lazy_mmu_mode();
|
||||
|
@ -60,6 +61,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|||
ptent = pte_mkwrite(ptent);
|
||||
|
||||
ptep_modify_prot_commit(mm, addr, pte, ptent);
|
||||
pages++;
|
||||
} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
|
||||
swp_entry_t entry = pte_to_swp_entry(oldpte);
|
||||
|
||||
|
@ -72,18 +74,22 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|||
set_pte_at(mm, addr, pte,
|
||||
swp_entry_to_pte(entry));
|
||||
}
|
||||
pages++;
|
||||
}
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
arch_leave_lazy_mmu_mode();
|
||||
pte_unmap_unlock(pte - 1, ptl);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
||||
static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
||||
unsigned long addr, unsigned long end, pgprot_t newprot,
|
||||
int dirty_accountable)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
unsigned long next;
|
||||
unsigned long pages = 0;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
|
@ -91,35 +97,42 @@ static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
|||
if (pmd_trans_huge(*pmd)) {
|
||||
if (next - addr != HPAGE_PMD_SIZE)
|
||||
split_huge_page_pmd(vma->vm_mm, pmd);
|
||||
else if (change_huge_pmd(vma, pmd, addr, newprot))
|
||||
else if (change_huge_pmd(vma, pmd, addr, newprot)) {
|
||||
pages += HPAGE_PMD_NR;
|
||||
continue;
|
||||
}
|
||||
/* fall through */
|
||||
}
|
||||
if (pmd_none_or_clear_bad(pmd))
|
||||
continue;
|
||||
change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
|
||||
pages += change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
|
||||
dirty_accountable);
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
|
||||
static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
|
||||
unsigned long addr, unsigned long end, pgprot_t newprot,
|
||||
int dirty_accountable)
|
||||
{
|
||||
pud_t *pud;
|
||||
unsigned long next;
|
||||
unsigned long pages = 0;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none_or_clear_bad(pud))
|
||||
continue;
|
||||
change_pmd_range(vma, pud, addr, next, newprot,
|
||||
pages += change_pmd_range(vma, pud, addr, next, newprot,
|
||||
dirty_accountable);
|
||||
} while (pud++, addr = next, addr != end);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
static void change_protection(struct vm_area_struct *vma,
|
||||
static unsigned long change_protection_range(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long end, pgprot_t newprot,
|
||||
int dirty_accountable)
|
||||
{
|
||||
|
@ -127,6 +140,7 @@ static void change_protection(struct vm_area_struct *vma,
|
|||
pgd_t *pgd;
|
||||
unsigned long next;
|
||||
unsigned long start = addr;
|
||||
unsigned long pages = 0;
|
||||
|
||||
BUG_ON(addr >= end);
|
||||
pgd = pgd_offset(mm, addr);
|
||||
|
@ -135,10 +149,30 @@ static void change_protection(struct vm_area_struct *vma,
|
|||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none_or_clear_bad(pgd))
|
||||
continue;
|
||||
change_pud_range(vma, pgd, addr, next, newprot,
|
||||
pages += change_pud_range(vma, pgd, addr, next, newprot,
|
||||
dirty_accountable);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
|
||||
flush_tlb_range(vma, start, end);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, pgprot_t newprot,
|
||||
int dirty_accountable)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned long pages;
|
||||
|
||||
mmu_notifier_invalidate_range_start(mm, start, end);
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
pages = hugetlb_change_protection(vma, start, end, newprot);
|
||||
else
|
||||
pages = change_protection_range(vma, start, end, newprot, dirty_accountable);
|
||||
mmu_notifier_invalidate_range_end(mm, start, end);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -213,12 +247,8 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
|||
dirty_accountable = 1;
|
||||
}
|
||||
|
||||
mmu_notifier_invalidate_range_start(mm, start, end);
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
|
||||
else
|
||||
change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
|
||||
mmu_notifier_invalidate_range_end(mm, start, end);
|
||||
change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
|
||||
|
||||
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
|
||||
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
|
||||
perf_event_mmap(vma);
|
||||
|
|
Loading…
Reference in New Issue
Block a user