forked from luck/tmp_suning_uos_patched
kcsan: Add kcsan_set_access_mask() support
When setting up an access mask with kcsan_set_access_mask(), KCSAN will only report races if concurrent changes to bits set in access_mask are observed. Conveying access_mask via a separate call avoids introducing overhead in the common-case fast-path. Acked-by: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b738f6169f
commit
81af89e158
|
@ -68,6 +68,16 @@ void kcsan_flat_atomic_end(void);
|
|||
*/
|
||||
void kcsan_atomic_next(int n);
|
||||
|
||||
/**
|
||||
* kcsan_set_access_mask - set access mask
|
||||
*
|
||||
* Set the access mask for all accesses for the current context if non-zero.
|
||||
* Only value changes to bits set in the mask will be reported.
|
||||
*
|
||||
* @mask bitmask
|
||||
*/
|
||||
void kcsan_set_access_mask(unsigned long mask);
|
||||
|
||||
#else /* CONFIG_KCSAN */
|
||||
|
||||
static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
|
||||
|
@ -78,6 +88,7 @@ static inline void kcsan_nestable_atomic_end(void) { }
|
|||
static inline void kcsan_flat_atomic_begin(void) { }
|
||||
static inline void kcsan_flat_atomic_end(void) { }
|
||||
static inline void kcsan_atomic_next(int n) { }
|
||||
static inline void kcsan_set_access_mask(unsigned long mask) { }
|
||||
|
||||
#endif /* CONFIG_KCSAN */
|
||||
|
||||
|
|
|
@ -35,6 +35,11 @@ struct kcsan_ctx {
|
|||
*/
|
||||
int atomic_nest_count;
|
||||
bool in_flat_atomic;
|
||||
|
||||
/*
|
||||
* Access mask for all accesses if non-zero.
|
||||
*/
|
||||
unsigned long access_mask;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -167,6 +167,7 @@ struct task_struct init_task
|
|||
.atomic_next = 0,
|
||||
.atomic_nest_count = 0,
|
||||
.in_flat_atomic = false,
|
||||
.access_mask = 0,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
|
|
@ -39,6 +39,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
|
|||
.atomic_next = 0,
|
||||
.atomic_nest_count = 0,
|
||||
.in_flat_atomic = false,
|
||||
.access_mask = 0,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -298,6 +299,15 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
|
|||
|
||||
if (!kcsan_is_enabled())
|
||||
return;
|
||||
|
||||
/*
|
||||
* The access_mask check relies on value-change comparison. To avoid
|
||||
* reporting a race where e.g. the writer set up the watchpoint, but the
|
||||
* reader has access_mask!=0, we have to ignore the found watchpoint.
|
||||
*/
|
||||
if (get_ctx()->access_mask != 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Consume the watchpoint as soon as possible, to minimize the chances
|
||||
* of !consumed. Consuming the watchpoint must always be guarded by
|
||||
|
@ -341,6 +351,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
|
|||
u32 _4;
|
||||
u64 _8;
|
||||
} expect_value;
|
||||
unsigned long access_mask;
|
||||
enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
|
||||
unsigned long ua_flags = user_access_save();
|
||||
unsigned long irq_flags;
|
||||
|
@ -435,18 +446,27 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
|
|||
* Re-read value, and check if it is as expected; if not, we infer a
|
||||
* racy access.
|
||||
*/
|
||||
access_mask = get_ctx()->access_mask;
|
||||
switch (size) {
|
||||
case 1:
|
||||
expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
|
||||
if (access_mask)
|
||||
expect_value._1 &= (u8)access_mask;
|
||||
break;
|
||||
case 2:
|
||||
expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
|
||||
if (access_mask)
|
||||
expect_value._2 &= (u16)access_mask;
|
||||
break;
|
||||
case 4:
|
||||
expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
|
||||
if (access_mask)
|
||||
expect_value._4 &= (u32)access_mask;
|
||||
break;
|
||||
case 8:
|
||||
expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
|
||||
if (access_mask)
|
||||
expect_value._8 &= (u64)access_mask;
|
||||
break;
|
||||
default:
|
||||
break; /* ignore; we do not diff the values */
|
||||
|
@ -460,11 +480,20 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
|
|||
if (!remove_watchpoint(watchpoint)) {
|
||||
/*
|
||||
* Depending on the access type, map a value_change of MAYBE to
|
||||
* TRUE (require reporting).
|
||||
* TRUE (always report) or FALSE (never report).
|
||||
*/
|
||||
if (value_change == KCSAN_VALUE_CHANGE_MAYBE && (size > 8 || is_assert)) {
|
||||
/* Always assume a value-change. */
|
||||
value_change = KCSAN_VALUE_CHANGE_TRUE;
|
||||
if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
|
||||
if (access_mask != 0) {
|
||||
/*
|
||||
* For access with access_mask, we require a
|
||||
* value-change, as it is likely that races on
|
||||
* ~access_mask bits are expected.
|
||||
*/
|
||||
value_change = KCSAN_VALUE_CHANGE_FALSE;
|
||||
} else if (size > 8 || is_assert) {
|
||||
/* Always assume a value-change. */
|
||||
value_change = KCSAN_VALUE_CHANGE_TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -622,6 +651,12 @@ void kcsan_atomic_next(int n)
|
|||
}
|
||||
EXPORT_SYMBOL(kcsan_atomic_next);
|
||||
|
||||
void kcsan_set_access_mask(unsigned long mask)
|
||||
{
|
||||
get_ctx()->access_mask = mask;
|
||||
}
|
||||
EXPORT_SYMBOL(kcsan_set_access_mask);
|
||||
|
||||
void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
|
||||
{
|
||||
check_access(ptr, size, type);
|
||||
|
|
|
@ -98,6 +98,11 @@ enum kcsan_value_change {
|
|||
*/
|
||||
KCSAN_VALUE_CHANGE_MAYBE,
|
||||
|
||||
/*
|
||||
* Did not observe a value-change, and it is invalid to report the race.
|
||||
*/
|
||||
KCSAN_VALUE_CHANGE_FALSE,
|
||||
|
||||
/*
|
||||
* The value was observed to change, and the race should be reported.
|
||||
*/
|
||||
|
|
|
@ -132,6 +132,9 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
|
|||
static bool
|
||||
skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
|
||||
{
|
||||
/* Should never get here if value_change==FALSE. */
|
||||
WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
|
||||
|
||||
/*
|
||||
* The first call to skip_report always has value_change==TRUE, since we
|
||||
* cannot know the value written of an instrumented access. For the 2nd
|
||||
|
@ -493,7 +496,15 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type,
|
|||
|
||||
kcsan_disable_current();
|
||||
if (prepare_report(&flags, ptr, size, access_type, cpu_id, type)) {
|
||||
if (print_report(ptr, size, access_type, value_change, cpu_id, type) && panic_on_warn)
|
||||
/*
|
||||
* Never report if value_change is FALSE, only if we it is
|
||||
* either TRUE or MAYBE. In case of MAYBE, further filtering may
|
||||
* be done once we know the full stack trace in print_report().
|
||||
*/
|
||||
bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE &&
|
||||
print_report(ptr, size, access_type, value_change, cpu_id, type);
|
||||
|
||||
if (reported && panic_on_warn)
|
||||
panic("panic_on_warn set ...\n");
|
||||
|
||||
release_report(&flags, type);
|
||||
|
|
Loading…
Reference in New Issue
Block a user