forked from luck/tmp_suning_uos_patched
mm/arm64: use general page fault accounting
Use the general page fault accounting by passing regs into handle_mm_fault(). It naturally solve the issue of multiple page fault accounting when page fault retry happened. To do this, we pass pt_regs pointer into __do_page_fault(). Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Will Deacon <will@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Link: http://lkml.kernel.org/r/20200707225021.200906-6-peterx@redhat.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
79fea6c654
commit
6a1bb025d2
|
@ -404,7 +404,8 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
|
|||
#define VM_FAULT_BADACCESS 0x020000
|
||||
|
||||
static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
|
||||
unsigned int mm_flags, unsigned long vm_flags)
|
||||
unsigned int mm_flags, unsigned long vm_flags,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct vm_area_struct *vma = find_vma(mm, addr);
|
||||
|
||||
|
@ -428,7 +429,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
|
|||
*/
|
||||
if (!(vma->vm_flags & vm_flags))
|
||||
return VM_FAULT_BADACCESS;
|
||||
return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, NULL);
|
||||
return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs);
|
||||
}
|
||||
|
||||
static bool is_el0_instruction_abort(unsigned int esr)
|
||||
|
@ -450,7 +451,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
|||
{
|
||||
const struct fault_info *inf;
|
||||
struct mm_struct *mm = current->mm;
|
||||
vm_fault_t fault, major = 0;
|
||||
vm_fault_t fault;
|
||||
unsigned long vm_flags = VM_ACCESS_FLAGS;
|
||||
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
|
||||
|
||||
|
@ -516,8 +517,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
|||
#endif
|
||||
}
|
||||
|
||||
fault = __do_page_fault(mm, addr, mm_flags, vm_flags);
|
||||
major |= fault & VM_FAULT_MAJOR;
|
||||
fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
|
||||
|
||||
/* Quick path to respond to signals */
|
||||
if (fault_signal_pending(fault, regs)) {
|
||||
|
@ -538,25 +538,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
|||
* Handle the "normal" (no error) case first.
|
||||
*/
|
||||
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
|
||||
VM_FAULT_BADACCESS)))) {
|
||||
/*
|
||||
* Major/minor page fault accounting is only done
|
||||
* once. If we go through a retry, it is extremely
|
||||
* likely that the page will be found in page cache at
|
||||
* that point.
|
||||
*/
|
||||
if (major) {
|
||||
current->maj_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
|
||||
addr);
|
||||
} else {
|
||||
current->min_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
|
||||
addr);
|
||||
}
|
||||
|
||||
VM_FAULT_BADACCESS))))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are in kernel mode at this point, we have no context to
|
||||
|
|
Loading…
Reference in New Issue
Block a user