lockdep: Comment all warnings
Andrew requested I comment all the lockdep WARN()s to help other people figure out wth is wrong.. Requested-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1315301493.3191.9.camel@twins Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
cb475de3d1
commit
0119fee449
156
kernel/lockdep.c
156
kernel/lockdep.c
@ -96,8 +96,13 @@ static int graph_lock(void)
|
||||
|
||||
static inline int graph_unlock(void)
|
||||
{
|
||||
if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
|
||||
if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
|
||||
/*
|
||||
* The lockdep graph lock isn't locked while we expect it to
|
||||
* be, we're confused now, bye!
|
||||
*/
|
||||
return DEBUG_LOCKS_WARN_ON(1);
|
||||
}
|
||||
|
||||
current->lockdep_recursion--;
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
@ -134,6 +139,9 @@ static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
|
||||
static inline struct lock_class *hlock_class(struct held_lock *hlock)
|
||||
{
|
||||
if (!hlock->class_idx) {
|
||||
/*
|
||||
* Someone passed in garbage, we give up.
|
||||
*/
|
||||
DEBUG_LOCKS_WARN_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
@ -687,6 +695,10 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
||||
*/
|
||||
list_for_each_entry(class, hash_head, hash_entry) {
|
||||
if (class->key == key) {
|
||||
/*
|
||||
* Huh! same key, different name? Did someone trample
|
||||
* on some memory? We're most confused.
|
||||
*/
|
||||
WARN_ON_ONCE(class->name != lock->name);
|
||||
return class;
|
||||
}
|
||||
@ -800,6 +812,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
|
||||
else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
|
||||
lock->class_cache[subclass] = class;
|
||||
|
||||
/*
|
||||
* Hash collision, did we smoke some? We found a class with a matching
|
||||
* hash but the subclass -- which is hashed in -- didn't match.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
|
||||
return NULL;
|
||||
|
||||
@ -926,7 +942,7 @@ static inline void mark_lock_accessed(struct lock_list *lock,
|
||||
unsigned long nr;
|
||||
|
||||
nr = lock - list_entries;
|
||||
WARN_ON(nr >= nr_list_entries);
|
||||
WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
|
||||
lock->parent = parent;
|
||||
lock->class->dep_gen_id = lockdep_dependency_gen_id;
|
||||
}
|
||||
@ -936,7 +952,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock)
|
||||
unsigned long nr;
|
||||
|
||||
nr = lock - list_entries;
|
||||
WARN_ON(nr >= nr_list_entries);
|
||||
WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
|
||||
return lock->class->dep_gen_id == lockdep_dependency_gen_id;
|
||||
}
|
||||
|
||||
@ -1196,6 +1212,9 @@ static noinline int print_bfs_bug(int ret)
|
||||
if (!debug_locks_off_graph_unlock())
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Breadth-first-search failed, graph got corrupted?
|
||||
*/
|
||||
WARN(1, "lockdep bfs error:%d\n", ret);
|
||||
|
||||
return 0;
|
||||
@ -1944,6 +1963,11 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
|
||||
if (!debug_locks_off_graph_unlock())
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Clearly we all shouldn't be here, but since we made it we
|
||||
* can reliable say we messed up our state. See the above two
|
||||
* gotos for reasons why we could possibly end up here.
|
||||
*/
|
||||
WARN_ON(1);
|
||||
|
||||
return 0;
|
||||
@ -1975,6 +1999,11 @@ static inline int lookup_chain_cache(struct task_struct *curr,
|
||||
struct held_lock *hlock_curr, *hlock_next;
|
||||
int i, j;
|
||||
|
||||
/*
|
||||
* We might need to take the graph lock, ensure we've got IRQs
|
||||
* disabled to make this an IRQ-safe lock.. for recursion reasons
|
||||
* lockdep won't complain about its own locking errors.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return 0;
|
||||
/*
|
||||
@ -2126,6 +2155,10 @@ static void check_chain_key(struct task_struct *curr)
|
||||
hlock = curr->held_locks + i;
|
||||
if (chain_key != hlock->prev_chain_key) {
|
||||
debug_locks_off();
|
||||
/*
|
||||
* We got mighty confused, our chain keys don't match
|
||||
* with what we expect, someone trample on our task state?
|
||||
*/
|
||||
WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
|
||||
curr->lockdep_depth, i,
|
||||
(unsigned long long)chain_key,
|
||||
@ -2133,6 +2166,9 @@ static void check_chain_key(struct task_struct *curr)
|
||||
return;
|
||||
}
|
||||
id = hlock->class_idx - 1;
|
||||
/*
|
||||
* Whoops ran out of static storage again?
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
|
||||
return;
|
||||
|
||||
@ -2144,6 +2180,10 @@ static void check_chain_key(struct task_struct *curr)
|
||||
}
|
||||
if (chain_key != curr->curr_chain_key) {
|
||||
debug_locks_off();
|
||||
/*
|
||||
* More smoking hash instead of calculating it, damn see these
|
||||
* numbers float.. I bet that a pink elephant stepped on my memory.
|
||||
*/
|
||||
WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
|
||||
curr->lockdep_depth, i,
|
||||
(unsigned long long)chain_key,
|
||||
@ -2525,12 +2565,24 @@ void trace_hardirqs_on_caller(unsigned long ip)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're enabling irqs and according to our state above irqs weren't
|
||||
* already enabled, yet we find the hardware thinks they are in fact
|
||||
* enabled.. someone messed up their IRQ state tracing.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return;
|
||||
|
||||
/*
|
||||
* See the fine text that goes along with this variable definition.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Can't allow enabling interrupts while in an interrupt handler,
|
||||
* that's general bad form and such. Recursion, limited stack etc..
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
|
||||
return;
|
||||
|
||||
@ -2558,6 +2610,10 @@ void trace_hardirqs_off_caller(unsigned long ip)
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
/*
|
||||
* So we're supposed to get called after you mask local IRQs, but for
|
||||
* some reason the hardware doesn't quite think you did a proper job.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return;
|
||||
|
||||
@ -2590,6 +2646,10 @@ void trace_softirqs_on(unsigned long ip)
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
/*
|
||||
* We fancy IRQs being disabled here, see softirq.c, avoids
|
||||
* funny state and nesting things.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return;
|
||||
|
||||
@ -2626,6 +2686,9 @@ void trace_softirqs_off(unsigned long ip)
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
/*
|
||||
* We fancy IRQs being disabled here, see softirq.c
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return;
|
||||
|
||||
@ -2637,6 +2700,9 @@ void trace_softirqs_off(unsigned long ip)
|
||||
curr->softirq_disable_ip = ip;
|
||||
curr->softirq_disable_event = ++curr->irq_events;
|
||||
debug_atomic_inc(softirqs_off_events);
|
||||
/*
|
||||
* Whoops, we wanted softirqs off, so why aren't they?
|
||||
*/
|
||||
DEBUG_LOCKS_WARN_ON(!softirq_count());
|
||||
} else
|
||||
debug_atomic_inc(redundant_softirqs_off);
|
||||
@ -2661,6 +2727,9 @@ static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
|
||||
if (!(gfp_mask & __GFP_FS))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Oi! Can't be having __GFP_FS allocations with IRQs disabled.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
|
||||
return;
|
||||
|
||||
@ -2773,13 +2842,13 @@ static int separate_irq_context(struct task_struct *curr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
|
||||
|
||||
static inline
|
||||
int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
|
||||
enum lock_usage_bit new_bit)
|
||||
{
|
||||
WARN_ON(1);
|
||||
WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -2799,7 +2868,7 @@ void lockdep_trace_alloc(gfp_t gfp_mask)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
|
||||
|
||||
/*
|
||||
* Mark a lock with a usage bit, and validate the state transition:
|
||||
@ -2880,6 +2949,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||
lock->cpu = raw_smp_processor_id();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Can't be having no nameless bastards around this place!
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!name)) {
|
||||
lock->name = "NULL";
|
||||
return;
|
||||
@ -2887,6 +2959,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||
|
||||
lock->name = name;
|
||||
|
||||
/*
|
||||
* No key, no joy, we need to hash something.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!key))
|
||||
return;
|
||||
/*
|
||||
@ -2894,6 +2969,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||
*/
|
||||
if (!static_obj(key)) {
|
||||
printk("BUG: key %p not in .data!\n", key);
|
||||
/*
|
||||
* What it says above ^^^^^, I suggest you read it.
|
||||
*/
|
||||
DEBUG_LOCKS_WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
@ -2932,6 +3010,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
if (unlikely(!debug_locks))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Lockdep should run with IRQs disabled, otherwise we could
|
||||
* get an interrupt which would want to take locks, which would
|
||||
* end up in lockdep and have you got a head-ache already?
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return 0;
|
||||
|
||||
@ -2963,6 +3046,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
* dependency checks are done)
|
||||
*/
|
||||
depth = curr->lockdep_depth;
|
||||
/*
|
||||
* Ran out of static storage for our per-task lock stack again have we?
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
|
||||
return 0;
|
||||
|
||||
@ -2981,6 +3067,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
}
|
||||
|
||||
hlock = curr->held_locks + depth;
|
||||
/*
|
||||
* Plain impossible, we just registered it and checked it weren't no
|
||||
* NULL like.. I bet this mushroom I ate was good!
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!class))
|
||||
return 0;
|
||||
hlock->class_idx = class_idx;
|
||||
@ -3015,11 +3105,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
* the hash, not class->key.
|
||||
*/
|
||||
id = class - lock_classes;
|
||||
/*
|
||||
* Whoops, we did it again.. ran straight out of our static allocation.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
|
||||
return 0;
|
||||
|
||||
chain_key = curr->curr_chain_key;
|
||||
if (!depth) {
|
||||
/*
|
||||
* How can we have a chain hash when we ain't got no keys?!
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
|
||||
return 0;
|
||||
chain_head = 1;
|
||||
@ -3091,6 +3187,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
|
||||
{
|
||||
if (unlikely(!debug_locks))
|
||||
return 0;
|
||||
/*
|
||||
* Lockdep should run with IRQs disabled, recursion, head-ache, etc..
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return 0;
|
||||
|
||||
@ -3120,6 +3219,11 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
|
||||
if (!class)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* References, but not a lock we're actually ref-counting?
|
||||
* State got messed up, follow the sites that change ->references
|
||||
* and try to make sense of it.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
|
||||
return 0;
|
||||
|
||||
@ -3142,6 +3246,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
|
||||
int i;
|
||||
|
||||
depth = curr->lockdep_depth;
|
||||
/*
|
||||
* This function is about (re)setting the class of a held lock,
|
||||
* yet we're not actually holding any locks. Naughty user!
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!depth))
|
||||
return 0;
|
||||
|
||||
@ -3177,6 +3285,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* I took it apart and put it back together again, except now I have
|
||||
* these 'spare' parts.. where shall I put them.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
|
||||
return 0;
|
||||
return 1;
|
||||
@ -3201,6 +3313,10 @@ lock_release_non_nested(struct task_struct *curr,
|
||||
* of held locks:
|
||||
*/
|
||||
depth = curr->lockdep_depth;
|
||||
/*
|
||||
* So we're all set to release this lock.. wait what lock? We don't
|
||||
* own any locks, you've been drinking again?
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!depth))
|
||||
return 0;
|
||||
|
||||
@ -3253,6 +3369,10 @@ lock_release_non_nested(struct task_struct *curr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We had N bottles of beer on the wall, we drank one, but now
|
||||
* there's not N-1 bottles of beer left on the wall...
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
|
||||
return 0;
|
||||
return 1;
|
||||
@ -3283,6 +3403,9 @@ static int lock_release_nested(struct task_struct *curr,
|
||||
return lock_release_non_nested(curr, lock, ip);
|
||||
curr->lockdep_depth--;
|
||||
|
||||
/*
|
||||
* No more locks, but somehow we've got hash left over, who left it?
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
|
||||
return 0;
|
||||
|
||||
@ -3365,10 +3488,13 @@ static void check_flags(unsigned long flags)
|
||||
* check if not in hardirq contexts:
|
||||
*/
|
||||
if (!hardirq_count()) {
|
||||
if (softirq_count())
|
||||
if (softirq_count()) {
|
||||
/* like the above, but with softirqs */
|
||||
DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
|
||||
else
|
||||
} else {
|
||||
/* lick the above, does it taste good? */
|
||||
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
|
||||
}
|
||||
}
|
||||
|
||||
if (!debug_locks)
|
||||
@ -3506,6 +3632,10 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
|
||||
int i, contention_point, contending_point;
|
||||
|
||||
depth = curr->lockdep_depth;
|
||||
/*
|
||||
* Whee, we contended on this lock, except it seems we're not
|
||||
* actually trying to acquire anything much at all..
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!depth))
|
||||
return;
|
||||
|
||||
@ -3555,6 +3685,10 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
|
||||
int i, cpu;
|
||||
|
||||
depth = curr->lockdep_depth;
|
||||
/*
|
||||
* Yay, we acquired ownership of this lock we didn't try to
|
||||
* acquire, how the heck did that happen?
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!depth))
|
||||
return;
|
||||
|
||||
@ -3759,8 +3893,12 @@ void lockdep_reset_lock(struct lockdep_map *lock)
|
||||
match |= class == lock->class_cache[j];
|
||||
|
||||
if (unlikely(match)) {
|
||||
if (debug_locks_off_graph_unlock())
|
||||
if (debug_locks_off_graph_unlock()) {
|
||||
/*
|
||||
* We all just reset everything, how did it match?
|
||||
*/
|
||||
WARN_ON(1);
|
||||
}
|
||||
goto out_restore;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user