Peter Zijlstra says:
These patches address a number of instrumentation issues that were found after the x86/entry overhaul. When combined with rcu/urgent and objtool/urgent, these patches make UBSAN/KASAN/KCSAN happy again. Part of making this all work is bumping the minimum GCC version for KASAN builds to gcc-8.3, the reason for this is that the __no_sanitize_address function attribute is broken in GCC releases before that. No known GCC version has a working __no_sanitize_undefined, however because the only noinstr violation that results from this happens when an UB is found, we treat it like WARN. That is, we allow it to violate the noinstr rules in order to get the warning out. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAl74oWMACgkQEsHwGGHe VUpZCw/5AfanXrEixuh4hZLPBOJ7MtW0YI3eyBRJ8j14R8iaK+Hvn/yU4/+qC2jj eAlc42QS6Ckzcdknyy8VpHVDR7LR2angN0ePJmrbKsjYq0LTrnfa2H5uABcAQoiW 0BuGFub0QBRjCkxgsOoG3llqWsTkhRrGX1928lCuuK+8L+kB0bREGMqpR36EBFaS wIyLodLO/Bd+YcoWDMvm4I6FvHcdyY3Oq++mzro+5ye7bE9s0PpMC5IXNzmIuGmR 31UvST+ooRMsM6GlhxHpn6pZuCqfjygXAYuuutwdK10g1f75ESkQdYz9T9KDlHrF 4GqzcCGtOlN4DAvk3L7KGfHw3XIhioGFxeRT+gGgKsnxoBjvJXJ8x9GrcLA9jdJi WeqlqiEOiAa949nclwQQ+fSrx4LgLhJ8bexyOkwiRPx7R75Y0e6OqpxZtE6GiL8O BA6Z6cR7U8H4uhKIzZZ0NJiLwO1cSGo5Uz/ERcyg4L23rHYKrDdaQwFSDUxXWq/s 2lEqISD0WrSwMxJtfET3zB0B20n6IO7Uszo0FdnDFO62fck8HlStZsqV4meoT2Cc moqIZsYc3qnESxO9OhWHdSGGAyGS0qcE4Sq/oM8d2dIvIeL4KwHqTE6QFSmcUivi QYdXIIQnqJgqX4dmvLFrTuI2Whc86oS40U5/Dhv7BlHx0oewSlg= =fcu1 -----END PGP SIGNATURE----- Merge tag 'x86_entry_for_5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 entry fixes from Borislav Petkov: "This is the x86/entry urgent pile which has accumulated since the merge window. It is not the smallest but considering the almost complete entry core rewrite, the amount of fixes to follow is somewhat higher than usual, which is to be expected. Peter Zijlstra says: 'These patches address a number of instrumentation issues that were found after the x86/entry overhaul. When combined with rcu/urgent and objtool/urgent, these patches make UBSAN/KASAN/KCSAN happy again. Part of making this all work is bumping the minimum GCC version for KASAN builds to gcc-8.3, the reason for this is that the __no_sanitize_address function attribute is broken in GCC releases before that. No known GCC version has a working __no_sanitize_undefined, however because the only noinstr violation that results from this happens when an UB is found, we treat it like WARN. That is, we allow it to violate the noinstr rules in order to get the warning out'" * tag 'x86_entry_for_5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/entry: Fix #UD vs WARN more x86/entry: Increase entry_stack size to a full page x86/entry: Fixup bad_iret vs noinstr objtool: Don't consider vmlinux a C-file kasan: Fix required compiler version compiler_attributes.h: Support no_sanitize_undefined check with GCC 4 x86/entry, bug: Comment the instrumentation_begin() usage for WARN() x86/entry, ubsan, objtool: Whitelist __ubsan_handle_*() x86/entry, cpumask: Provide non-instrumented variant of cpu_is_offline() compiler_types.h: Add __no_sanitize_{address,undefined} to noinstr kasan: Bump required compiler version x86, kcsan: Add __no_kcsan to noinstr kcsan: Remove __no_kcsan_or_inline x86, kcsan: Remove __no_kcsan_or_inline usage
This commit is contained in:
commit
a358505d8a
|
@ -114,12 +114,6 @@ the below options are available:
|
|||
To dynamically limit for which functions to generate reports, see the
|
||||
`DebugFS interface`_ blacklist/whitelist feature.
|
||||
|
||||
For ``__always_inline`` functions, replace ``__always_inline`` with
|
||||
``__no_kcsan_or_inline`` (which implies ``__always_inline``)::
|
||||
|
||||
static __no_kcsan_or_inline void foo(void) {
|
||||
...
|
||||
|
||||
* To disable data race detection for a particular compilation unit, add to the
|
||||
``Makefile``::
|
||||
|
||||
|
|
|
@ -201,12 +201,8 @@ arch_test_and_change_bit(long nr, volatile unsigned long *addr)
|
|||
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
|
||||
}
|
||||
|
||||
static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
|
||||
static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
/*
|
||||
* Because this is a plain access, we need to disable KCSAN here to
|
||||
* avoid double instrumentation via instrumented bitops.
|
||||
*/
|
||||
return ((1UL << (nr & (BITS_PER_LONG-1))) &
|
||||
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
|
||||
}
|
||||
|
|
|
@ -75,6 +75,12 @@ do { \
|
|||
unreachable(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* This instrumentation_begin() is strictly speaking incorrect; but it
|
||||
* suppresses the complaints from WARN()s in noinstr code. If such a WARN()
|
||||
* were to trigger, we'd rather wreck the machine in an attempt to get the
|
||||
* message out than not know about it.
|
||||
*/
|
||||
#define __WARN_FLAGS(flags) \
|
||||
do { \
|
||||
instrumentation_begin(); \
|
||||
|
|
|
@ -11,5 +11,23 @@ extern cpumask_var_t cpu_sibling_setup_mask;
|
|||
|
||||
extern void setup_cpu_local_masks(void);
|
||||
|
||||
/*
|
||||
* NMI and MCE exceptions need cpu_is_offline() _really_ early,
|
||||
* provide an arch_ special for them to avoid instrumentation.
|
||||
*/
|
||||
#if NR_CPUS > 1
|
||||
static __always_inline bool arch_cpu_online(int cpu)
|
||||
{
|
||||
return arch_test_bit(cpu, cpumask_bits(cpu_online_mask));
|
||||
}
|
||||
#else
|
||||
static __always_inline bool arch_cpu_online(int cpu)
|
||||
{
|
||||
return cpu == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define arch_cpu_is_offline(cpu) unlikely(!arch_cpu_online(cpu))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_X86_CPUMASK_H */
|
||||
|
|
|
@ -370,7 +370,7 @@ struct x86_hw_tss {
|
|||
#define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1)
|
||||
|
||||
struct entry_stack {
|
||||
unsigned long words[64];
|
||||
char stack[PAGE_SIZE];
|
||||
};
|
||||
|
||||
struct entry_stack_page {
|
||||
|
|
|
@ -1083,7 +1083,7 @@ static noinstr bool mce_check_crashing_cpu(void)
|
|||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
if (cpu_is_offline(cpu) ||
|
||||
if (arch_cpu_is_offline(cpu) ||
|
||||
(crashing_cpu != -1 && crashing_cpu != cpu)) {
|
||||
u64 mcgstatus;
|
||||
|
||||
|
|
|
@ -478,7 +478,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7);
|
|||
|
||||
DEFINE_IDTENTRY_RAW(exc_nmi)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id()))
|
||||
if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
|
||||
return;
|
||||
|
||||
if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
|
||||
|
|
|
@ -84,17 +84,16 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
|
|||
local_irq_disable();
|
||||
}
|
||||
|
||||
int is_valid_bugaddr(unsigned long addr)
|
||||
__always_inline int is_valid_bugaddr(unsigned long addr)
|
||||
{
|
||||
unsigned short ud;
|
||||
|
||||
if (addr < TASK_SIZE_MAX)
|
||||
return 0;
|
||||
|
||||
if (get_kernel_nofault(ud, (unsigned short *)addr))
|
||||
return 0;
|
||||
|
||||
return ud == INSN_UD0 || ud == INSN_UD2;
|
||||
/*
|
||||
* We got #UD, if the text isn't readable we'd have gotten
|
||||
* a different exception.
|
||||
*/
|
||||
return *(unsigned short *)addr == INSN_UD2;
|
||||
}
|
||||
|
||||
static nokprobe_inline int
|
||||
|
@ -216,40 +215,45 @@ static inline void handle_invalid_op(struct pt_regs *regs)
|
|||
ILL_ILLOPN, error_get_trap_addr(regs));
|
||||
}
|
||||
|
||||
static noinstr bool handle_bug(struct pt_regs *regs)
|
||||
{
|
||||
bool handled = false;
|
||||
|
||||
if (!is_valid_bugaddr(regs->ip))
|
||||
return handled;
|
||||
|
||||
/*
|
||||
* All lies, just get the WARN/BUG out.
|
||||
*/
|
||||
instrumentation_begin();
|
||||
/*
|
||||
* Since we're emulating a CALL with exceptions, restore the interrupt
|
||||
* state to what it was at the exception site.
|
||||
*/
|
||||
if (regs->flags & X86_EFLAGS_IF)
|
||||
raw_local_irq_enable();
|
||||
if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
|
||||
regs->ip += LEN_UD2;
|
||||
handled = true;
|
||||
}
|
||||
if (regs->flags & X86_EFLAGS_IF)
|
||||
raw_local_irq_disable();
|
||||
instrumentation_end();
|
||||
|
||||
return handled;
|
||||
}
|
||||
|
||||
DEFINE_IDTENTRY_RAW(exc_invalid_op)
|
||||
{
|
||||
bool rcu_exit;
|
||||
|
||||
/*
|
||||
* Handle BUG/WARN like NMIs instead of like normal idtentries:
|
||||
* if we bugged/warned in a bad RCU context, for example, the last
|
||||
* thing we want is to BUG/WARN again in the idtentry code, ad
|
||||
* infinitum.
|
||||
* We use UD2 as a short encoding for 'CALL __WARN', as such
|
||||
* handle it before exception entry to avoid recursive WARN
|
||||
* in case exception entry is the one triggering WARNs.
|
||||
*/
|
||||
if (!user_mode(regs) && is_valid_bugaddr(regs->ip)) {
|
||||
enum bug_trap_type type;
|
||||
|
||||
nmi_enter();
|
||||
instrumentation_begin();
|
||||
trace_hardirqs_off_finish();
|
||||
type = report_bug(regs->ip, regs);
|
||||
if (regs->flags & X86_EFLAGS_IF)
|
||||
trace_hardirqs_on_prepare();
|
||||
instrumentation_end();
|
||||
nmi_exit();
|
||||
|
||||
if (type == BUG_TRAP_TYPE_WARN) {
|
||||
/* Skip the ud2. */
|
||||
regs->ip += LEN_UD2;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Else, if this was a BUG and report_bug returns or if this
|
||||
* was just a normal #UD, we want to continue onward and
|
||||
* crash.
|
||||
*/
|
||||
}
|
||||
if (!user_mode(regs) && handle_bug(regs))
|
||||
return;
|
||||
|
||||
rcu_exit = idtentry_enter_cond_rcu(regs);
|
||||
instrumentation_begin();
|
||||
|
@ -691,13 +695,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
|
|||
(struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
|
||||
|
||||
/* Copy the IRET target to the temporary storage. */
|
||||
memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
|
||||
__memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
|
||||
|
||||
/* Copy the remainder of the stack from the current stack. */
|
||||
memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
|
||||
__memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
|
||||
|
||||
/* Update the entry stack */
|
||||
memcpy(new_stack, &tmp, sizeof(tmp));
|
||||
__memcpy(new_stack, &tmp, sizeof(tmp));
|
||||
|
||||
BUG_ON(!user_mode(&new_stack->regs));
|
||||
return new_stack;
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
#include <asm/alternative-asm.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
.pushsection .noinstr.text, "ax"
|
||||
|
||||
/*
|
||||
* We build a jump to memcpy_orig by default which gets NOPped out on
|
||||
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
|
||||
|
@ -184,6 +186,8 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
|
|||
retq
|
||||
SYM_FUNC_END(memcpy_orig)
|
||||
|
||||
.popsection
|
||||
|
||||
#ifndef CONFIG_UML
|
||||
|
||||
MCSAFE_TEST_CTL
|
||||
|
|
|
@ -33,6 +33,14 @@
|
|||
#define __no_sanitize_thread
|
||||
#endif
|
||||
|
||||
#if __has_feature(undefined_behavior_sanitizer)
|
||||
/* GCC does not have __SANITIZE_UNDEFINED__ */
|
||||
#define __no_sanitize_undefined \
|
||||
__attribute__((no_sanitize("undefined")))
|
||||
#else
|
||||
#define __no_sanitize_undefined
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Not all versions of clang implement the the type-generic versions
|
||||
* of the builtin overflow checkers. Fortunately, clang implements
|
||||
|
|
|
@ -150,6 +150,12 @@
|
|||
#define __no_sanitize_thread
|
||||
#endif
|
||||
|
||||
#if __has_attribute(__no_sanitize_undefined__)
|
||||
#define __no_sanitize_undefined __attribute__((no_sanitize_undefined))
|
||||
#else
|
||||
#define __no_sanitize_undefined
|
||||
#endif
|
||||
|
||||
#if GCC_VERSION >= 50100
|
||||
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
|
||||
#endif
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
# define __GCC4_has_attribute___noclone__ 1
|
||||
# define __GCC4_has_attribute___nonstring__ 0
|
||||
# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
|
||||
# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9)
|
||||
# define __GCC4_has_attribute___fallthrough__ 0
|
||||
#endif
|
||||
|
||||
|
|
|
@ -118,10 +118,6 @@ struct ftrace_likely_data {
|
|||
#define notrace __attribute__((__no_instrument_function__))
|
||||
#endif
|
||||
|
||||
/* Section for code which can't be instrumented at all */
|
||||
#define noinstr \
|
||||
noinline notrace __attribute((__section__(".noinstr.text")))
|
||||
|
||||
/*
|
||||
* it doesn't make sense on ARM (currently the only user of __naked)
|
||||
* to trace naked functions because then mcount is called without
|
||||
|
@ -193,16 +189,18 @@ struct ftrace_likely_data {
|
|||
|
||||
#define __no_kcsan __no_sanitize_thread
|
||||
#ifdef __SANITIZE_THREAD__
|
||||
# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused
|
||||
# define __no_sanitize_or_inline __no_kcsan_or_inline
|
||||
#else
|
||||
# define __no_kcsan_or_inline __always_inline
|
||||
# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
|
||||
#endif
|
||||
|
||||
#ifndef __no_sanitize_or_inline
|
||||
#define __no_sanitize_or_inline __always_inline
|
||||
#endif
|
||||
|
||||
/* Section for code which can't be instrumented at all */
|
||||
#define noinstr \
|
||||
noinline notrace __attribute((__section__(".noinstr.text"))) \
|
||||
__no_kcsan __no_sanitize_address
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -15,11 +15,15 @@ config CC_HAS_KASAN_GENERIC
|
|||
config CC_HAS_KASAN_SW_TAGS
|
||||
def_bool $(cc-option, -fsanitize=kernel-hwaddress)
|
||||
|
||||
config CC_HAS_WORKING_NOSANITIZE_ADDRESS
|
||||
def_bool !CC_IS_GCC || GCC_VERSION >= 80300
|
||||
|
||||
config KASAN
|
||||
bool "KASAN: runtime memory debugger"
|
||||
depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
|
||||
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
|
||||
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
|
||||
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
|
||||
help
|
||||
Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
|
||||
designed to find out-of-bounds accesses and use-after-free bugs.
|
||||
|
|
|
@ -2190,10 +2190,36 @@ static inline const char *call_dest_name(struct instruction *insn)
|
|||
return "{dynamic}";
|
||||
}
|
||||
|
||||
static inline bool noinstr_call_dest(struct symbol *func)
|
||||
{
|
||||
/*
|
||||
* We can't deal with indirect function calls at present;
|
||||
* assume they're instrumented.
|
||||
*/
|
||||
if (!func)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If the symbol is from a noinstr section; we good.
|
||||
*/
|
||||
if (func->sec->noinstr)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* The __ubsan_handle_*() calls are like WARN(), they only happen when
|
||||
* something 'BAD' happened. At the risk of taking the machine down,
|
||||
* let them proceed to get the message out.
|
||||
*/
|
||||
if (!strncmp(func->name, "__ubsan_handle_", 15))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int validate_call(struct instruction *insn, struct insn_state *state)
|
||||
{
|
||||
if (state->noinstr && state->instr <= 0 &&
|
||||
(!insn->call_dest || !insn->call_dest->sec->noinstr)) {
|
||||
!noinstr_call_dest(insn->call_dest)) {
|
||||
WARN_FUNC("call to %s() leaves .noinstr.text section",
|
||||
insn->sec, insn->offset, call_dest_name(insn));
|
||||
return 1;
|
||||
|
@ -2746,7 +2772,7 @@ int check(const char *_objname, bool orc)
|
|||
|
||||
INIT_LIST_HEAD(&file.insn_list);
|
||||
hash_init(file.insn_hash);
|
||||
file.c_file = find_section_by_name(file.elf, ".comment");
|
||||
file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
|
||||
file.ignore_unreachables = no_unreachable;
|
||||
file.hints = false;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user