forked from luck/tmp_suning_uos_patched
kcsan: Add option for verbose reporting
Adds CONFIG_KCSAN_VERBOSE to optionally enable more verbose reports. Currently information about the reporting task's held locks and IRQ trace events are shown, if they are enabled. Signed-off-by: Marco Elver <elver@google.com> Suggested-by: Qian Cai <cai@lca.pw> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
48b1fc190a
commit
2402d0eae5
|
@ -18,8 +18,8 @@
|
|||
#include "kcsan.h"
|
||||
|
||||
static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
|
||||
static unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
|
||||
static unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
|
||||
unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
|
||||
unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
|
||||
static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
|
||||
static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
|
||||
|
||||
|
|
|
@ -13,6 +13,9 @@
|
|||
/* The number of adjacent watchpoints to check. */
|
||||
#define KCSAN_CHECK_ADJACENT 1
|
||||
|
||||
extern unsigned int kcsan_udelay_task;
|
||||
extern unsigned int kcsan_udelay_interrupt;
|
||||
|
||||
/*
|
||||
* Globally enable and disable KCSAN.
|
||||
*/
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/debug_locks.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
@ -31,7 +33,26 @@ static struct {
|
|||
int cpu_id;
|
||||
unsigned long stack_entries[NUM_STACK_ENTRIES];
|
||||
int num_stack_entries;
|
||||
} other_info = { .ptr = NULL };
|
||||
|
||||
/*
|
||||
* Optionally pass @current. Typically we do not need to pass @current
|
||||
* via @other_info since just @task_pid is sufficient. Passing @current
|
||||
* has additional overhead.
|
||||
*
|
||||
* To safely pass @current, we must either use get_task_struct/
|
||||
* put_task_struct, or stall the thread that populated @other_info.
|
||||
*
|
||||
* We cannot rely on get_task_struct/put_task_struct in case
|
||||
* release_report() races with a task being released, and would have to
|
||||
* free it in release_report(). This may result in deadlock if we want
|
||||
* to use KCSAN on the allocators.
|
||||
*
|
||||
* Since we also want to reliably print held locks for
|
||||
* CONFIG_KCSAN_VERBOSE, the current implementation stalls the thread
|
||||
* that populated @other_info until it has been consumed.
|
||||
*/
|
||||
struct task_struct *task;
|
||||
} other_info;
|
||||
|
||||
/*
|
||||
* Information about reported races; used to rate limit reporting.
|
||||
|
@ -245,6 +266,16 @@ static int sym_strcmp(void *addr1, void *addr2)
|
|||
return strncmp(buf1, buf2, sizeof(buf1));
|
||||
}
|
||||
|
||||
static void print_verbose_info(struct task_struct *task)
|
||||
{
|
||||
if (!task)
|
||||
return;
|
||||
|
||||
pr_err("\n");
|
||||
debug_show_held_locks(task);
|
||||
print_irqtrace_events(task);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if a report was generated, false otherwise.
|
||||
*/
|
||||
|
@ -319,6 +350,9 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type,
|
|||
other_info.num_stack_entries - other_skipnr,
|
||||
0);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
|
||||
print_verbose_info(other_info.task);
|
||||
|
||||
pr_err("\n");
|
||||
pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
|
||||
get_access_type(access_type), ptr, size,
|
||||
|
@ -340,6 +374,9 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type,
|
|||
stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr,
|
||||
0);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
|
||||
print_verbose_info(current);
|
||||
|
||||
/* Print report footer. */
|
||||
pr_err("\n");
|
||||
pr_err("Reported by Kernel Concurrency Sanitizer on:\n");
|
||||
|
@ -357,6 +394,67 @@ static void release_report(unsigned long *flags, enum kcsan_report_type type)
|
|||
spin_unlock_irqrestore(&report_lock, *flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets @other_info.task and awaits consumption of @other_info.
|
||||
*
|
||||
* Precondition: report_lock is held.
|
||||
* Postcondition: report_lock is held.
|
||||
*/
|
||||
static void
|
||||
set_other_info_task_blocking(unsigned long *flags, const volatile void *ptr)
|
||||
{
|
||||
/*
|
||||
* We may be instrumenting a code-path where current->state is already
|
||||
* something other than TASK_RUNNING.
|
||||
*/
|
||||
const bool is_running = current->state == TASK_RUNNING;
|
||||
/*
|
||||
* To avoid deadlock in case we are in an interrupt here and this is a
|
||||
* race with a task on the same CPU (KCSAN_INTERRUPT_WATCHER), provide a
|
||||
* timeout to ensure this works in all contexts.
|
||||
*
|
||||
* Await approximately the worst case delay of the reporting thread (if
|
||||
* we are not interrupted).
|
||||
*/
|
||||
int timeout = max(kcsan_udelay_task, kcsan_udelay_interrupt);
|
||||
|
||||
other_info.task = current;
|
||||
do {
|
||||
if (is_running) {
|
||||
/*
|
||||
* Let lockdep know the real task is sleeping, to print
|
||||
* the held locks (recall we turned lockdep off, so
|
||||
* locking/unlocking @report_lock won't be recorded).
|
||||
*/
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
spin_unlock_irqrestore(&report_lock, *flags);
|
||||
/*
|
||||
* We cannot call schedule() since we also cannot reliably
|
||||
* determine if sleeping here is permitted -- see in_atomic().
|
||||
*/
|
||||
|
||||
udelay(1);
|
||||
spin_lock_irqsave(&report_lock, *flags);
|
||||
if (timeout-- < 0) {
|
||||
/*
|
||||
* Abort. Reset other_info.task to NULL, since it
|
||||
* appears the other thread is still going to consume
|
||||
* it. It will result in no verbose info printed for
|
||||
* this task.
|
||||
*/
|
||||
other_info.task = NULL;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* If @ptr nor @current matches, then our information has been
|
||||
* consumed and we may continue. If not, retry.
|
||||
*/
|
||||
} while (other_info.ptr == ptr && other_info.task == current);
|
||||
if (is_running)
|
||||
set_current_state(TASK_RUNNING);
|
||||
}
|
||||
|
||||
/*
|
||||
* Depending on the report type either sets other_info and returns false, or
|
||||
* acquires the matching other_info and returns true. If other_info is not
|
||||
|
@ -388,6 +486,9 @@ static bool prepare_report(unsigned long *flags, const volatile void *ptr,
|
|||
other_info.cpu_id = cpu_id;
|
||||
other_info.num_stack_entries = stack_trace_save(other_info.stack_entries, NUM_STACK_ENTRIES, 1);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
|
||||
set_other_info_task_blocking(flags, ptr);
|
||||
|
||||
spin_unlock_irqrestore(&report_lock, *flags);
|
||||
|
||||
/*
|
||||
|
|
|
@ -20,6 +20,19 @@ menuconfig KCSAN
|
|||
|
||||
if KCSAN
|
||||
|
||||
config KCSAN_VERBOSE
|
||||
bool "Show verbose reports with more information about system state"
|
||||
depends on PROVE_LOCKING
|
||||
help
|
||||
If enabled, reports show more information about the system state that
|
||||
may help better analyze and debug races. This includes held locks and
|
||||
IRQ trace events.
|
||||
|
||||
While this option should generally be benign, we call into more
|
||||
external functions on report generation; if a race report is
|
||||
generated from any one of them, system stability may suffer due to
|
||||
deadlocks or recursion. If in doubt, say N.
|
||||
|
||||
config KCSAN_DEBUG
|
||||
bool "Debugging of KCSAN internals"
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user