forked from luck/tmp_suning_uos_patched
perf/x86: Further optimize copy_from_user_nmi()
Now that we can deal with nested NMI due to IRET re-enabling NMIs and can deal with faults from NMI by making sure we preserve CR2 over NMIs we can in fact simply access user-space memory from NMI context. So rewrite copy_from_user_nmi() to use __copy_from_user_inatomic() and rework the fault path to do the minimal required work before taking the in_atomic() fault handler. In particular avoid perf_sw_event() which would make perf recurse on itself (it should be harmless as our recursion protections should be able to deal with this -- but why tempt fate). Also rename notify_page_fault() to kprobes_fault() as that is a much better name; there is no notifier in it and its specific to kprobes. Don measured that his worst case NMI path shrunk from ~300K cycles to ~150K cycles. Cc: Stephane Eranian <eranian@google.com> Cc: jmario@redhat.com Cc: Arnaldo Carvalho de Melo <acme@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andi Kleen <ak@linux.intel.com> Cc: dave.hansen@linux.intel.com Tested-by: Don Zickus <dzickus@redhat.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20131024105206.GM2490@laptop.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
2c42cfbfe1
commit
e00b12e64b
|
@ -11,39 +11,26 @@
|
|||
#include <linux/sched.h>
|
||||
|
||||
/*
|
||||
* best effort, GUP based copy_from_user() that is NMI-safe
|
||||
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
|
||||
* nested NMI paths are careful to preserve CR2.
|
||||
*/
|
||||
unsigned long
|
||||
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long offset, addr = (unsigned long)from;
|
||||
unsigned long size, len = 0;
|
||||
struct page *page;
|
||||
void *map;
|
||||
int ret;
|
||||
unsigned long ret;
|
||||
|
||||
if (__range_not_ok(from, n, TASK_SIZE))
|
||||
return len;
|
||||
return 0;
|
||||
|
||||
do {
|
||||
ret = __get_user_pages_fast(addr, 1, 0, &page);
|
||||
if (!ret)
|
||||
break;
|
||||
/*
|
||||
* Even though this function is typically called from NMI/IRQ context
|
||||
* disable pagefaults so that its behaviour is consistent even when
|
||||
* called form other contexts.
|
||||
*/
|
||||
pagefault_disable();
|
||||
ret = __copy_from_user_inatomic(to, from, n);
|
||||
pagefault_enable();
|
||||
|
||||
offset = addr & (PAGE_SIZE - 1);
|
||||
size = min(PAGE_SIZE - offset, n - len);
|
||||
|
||||
map = kmap_atomic(page);
|
||||
memcpy(to, map+offset, size);
|
||||
kunmap_atomic(map);
|
||||
put_page(page);
|
||||
|
||||
len += size;
|
||||
to += size;
|
||||
addr += size;
|
||||
|
||||
} while (len < n);
|
||||
|
||||
return len;
|
||||
return n - ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
|
||||
|
|
|
@ -51,7 +51,7 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int __kprobes notify_page_fault(struct pt_regs *regs)
|
||||
static inline int __kprobes kprobes_fault(struct pt_regs *regs)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1048,7 +1048,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|||
return;
|
||||
|
||||
/* kprobes don't want to hook the spurious faults: */
|
||||
if (notify_page_fault(regs))
|
||||
if (kprobes_fault(regs))
|
||||
return;
|
||||
/*
|
||||
* Don't take the mm semaphore here. If we fixup a prefetch
|
||||
|
@ -1060,8 +1060,28 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|||
}
|
||||
|
||||
/* kprobes don't want to hook the spurious faults: */
|
||||
if (unlikely(notify_page_fault(regs)))
|
||||
if (unlikely(kprobes_fault(regs)))
|
||||
return;
|
||||
|
||||
if (unlikely(error_code & PF_RSVD))
|
||||
pgtable_bad(regs, error_code, address);
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_SMAP)) {
|
||||
if (unlikely(smap_violation(error_code, regs))) {
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're in an interrupt, have no user context or are running
|
||||
* in an atomic region then we must not take the fault:
|
||||
*/
|
||||
if (unlikely(in_atomic() || !mm)) {
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* It's safe to allow irq's after cr2 has been saved and the
|
||||
* vmalloc fault has been handled.
|
||||
|
@ -1078,27 +1098,8 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|||
local_irq_enable();
|
||||
}
|
||||
|
||||
if (unlikely(error_code & PF_RSVD))
|
||||
pgtable_bad(regs, error_code, address);
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_SMAP)) {
|
||||
if (unlikely(smap_violation(error_code, regs))) {
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
|
||||
/*
|
||||
* If we're in an interrupt, have no user context or are running
|
||||
* in an atomic region then we must not take the fault:
|
||||
*/
|
||||
if (unlikely(in_atomic() || !mm)) {
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
|
||||
if (error_code & PF_WRITE)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user