forked from luck/tmp_suning_uos_patched
de8f5e4f2d
Extend lockdep to validate lock wait-type context. The current wait-types are: LD_WAIT_FREE, /* wait free, rcu etc.. */ LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */ LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ Where lockdep validates that the current lock (the one being acquired) fits in the current wait-context (as generated by the held stack). This ensures that there is no attempt to acquire mutexes while holding spinlocks, to acquire spinlocks while holding raw_spinlocks and so on. In other words, its a more fancy might_sleep(). Obviously RCU made the entire ordeal more complex than a simple single value test because RCU can be acquired in (pretty much) any context and while it presents a context to nested locks it is not the same as it got acquired in. Therefore its necessary to split the wait_type into two values, one representing the acquire (outer) and one representing the nested context (inner). For most 'normal' locks these two are the same. [ To make static initialization easier we have the rule that: .outer == INV means .outer == .inner; because INV == 0. ] It further means that its required to find the minimal .inner of the held stack to compare against the outer of the new lock; because while 'normal' RCU presents a CONFIG type to nested locks, if it is taken while already holding a SPIN type it obviously doesn't relax the rules. Below is an example output generated by the trivial test code: raw_spin_lock(&foo); spin_lock(&bar); spin_unlock(&bar); raw_spin_unlock(&foo); [ BUG: Invalid wait context ] ----------------------------- swapper/0/1 is trying to lock: ffffc90000013f20 (&bar){....}-{3:3}, at: kernel_init+0xdb/0x187 other info that might help us debug this: 1 lock held by swapper/0/1: #0: ffffc90000013ee0 (&foo){+.+.}-{2:2}, at: kernel_init+0xd1/0x187 The way to read it is to look at the new -{n,m} part in the lock description; -{3:3} for the attempted lock, and try and match that up to the held locks, which in this case is the one: -{2,2}. This tells that the acquiring lock requires a more relaxed environment than presented by the lock stack. Currently only the normal locks and RCU are converted, the rest of the lockdep users defaults to .inner = INV which is ignored. More conversions can be done when desired. The check for spinlock_t nesting is not enabled by default. It's a separate config option for now as there are known problems which are currently addressed. The config option allows to identify these problems and to verify that the solutions found are indeed solving them. The config switch will be removed and the checks will permanently enabled once the vast majority of issues has been addressed. [ bigeasy: Move LD_WAIT_FREE,… out of CONFIG_LOCKDEP to avoid compile failure with CONFIG_DEBUG_SPINLOCK + !CONFIG_LOCKDEP] [ tglx: Add the config option ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200321113242.427089655@linutronix.de
108 lines
2.8 KiB
C
108 lines
2.8 KiB
C
/*
|
|
* kernel/mutex-debug.c
|
|
*
|
|
* Debugging code for mutexes
|
|
*
|
|
* Started by Ingo Molnar:
|
|
*
|
|
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
*
|
|
* lock debugging, locking tree, deadlock detection started by:
|
|
*
|
|
* Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
|
|
* Released under the General Public License (GPL).
|
|
*/
|
|
#include <linux/mutex.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/export.h>
|
|
#include <linux/poison.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/debug_locks.h>
|
|
|
|
#include "mutex-debug.h"
|
|
|
|
/*
|
|
* Must be called with lock->wait_lock held.
|
|
*/
|
|
void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
|
|
{
|
|
memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
|
|
waiter->magic = waiter;
|
|
INIT_LIST_HEAD(&waiter->list);
|
|
}
|
|
|
|
void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
|
|
{
|
|
lockdep_assert_held(&lock->wait_lock);
|
|
DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
|
|
DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
|
|
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
|
|
}
|
|
|
|
void debug_mutex_free_waiter(struct mutex_waiter *waiter)
|
|
{
|
|
DEBUG_LOCKS_WARN_ON(!list_empty(&waiter->list));
|
|
memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter));
|
|
}
|
|
|
|
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
|
struct task_struct *task)
|
|
{
|
|
lockdep_assert_held(&lock->wait_lock);
|
|
|
|
/* Mark the current thread as blocked on the lock: */
|
|
task->blocked_on = waiter;
|
|
}
|
|
|
|
void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
|
struct task_struct *task)
|
|
{
|
|
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
|
|
DEBUG_LOCKS_WARN_ON(waiter->task != task);
|
|
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
|
|
task->blocked_on = NULL;
|
|
|
|
list_del_init(&waiter->list);
|
|
waiter->task = NULL;
|
|
}
|
|
|
|
void debug_mutex_unlock(struct mutex *lock)
|
|
{
|
|
if (likely(debug_locks)) {
|
|
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
|
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
|
|
}
|
|
}
|
|
|
|
void debug_mutex_init(struct mutex *lock, const char *name,
|
|
struct lock_class_key *key)
|
|
{
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
/*
|
|
* Make sure we are not reinitializing a held lock:
|
|
*/
|
|
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
|
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
|
|
#endif
|
|
lock->magic = lock;
|
|
}
|
|
|
|
/***
|
|
* mutex_destroy - mark a mutex unusable
|
|
* @lock: the mutex to be destroyed
|
|
*
|
|
* This function marks the mutex uninitialized, and any subsequent
|
|
* use of the mutex is forbidden. The mutex must not be locked when
|
|
* this function is called.
|
|
*/
|
|
void mutex_destroy(struct mutex *lock)
|
|
{
|
|
DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock));
|
|
lock->magic = NULL;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mutex_destroy);
|