GNU Linux-libre 4.14.266-gnu1
[releases.git] / kernel / locking / lockdep.c
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/sched/clock.h>
32 #include <linux/sched/task.h>
33 #include <linux/sched/mm.h>
34 #include <linux/delay.h>
35 #include <linux/module.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/spinlock.h>
39 #include <linux/kallsyms.h>
40 #include <linux/interrupt.h>
41 #include <linux/stacktrace.h>
42 #include <linux/debug_locks.h>
43 #include <linux/irqflags.h>
44 #include <linux/utsname.h>
45 #include <linux/hash.h>
46 #include <linux/ftrace.h>
47 #include <linux/stringify.h>
48 #include <linux/bitops.h>
49 #include <linux/gfp.h>
50 #include <linux/random.h>
51 #include <linux/jhash.h>
52
53 #include <asm/sections.h>
54
55 #include "lockdep_internals.h"
56
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/lock.h>
59
60 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
61 #include <linux/slab.h>
62 #endif
63
64 #ifdef CONFIG_PROVE_LOCKING
65 int prove_locking = 1;
66 module_param(prove_locking, int, 0644);
67 #else
68 #define prove_locking 0
69 #endif
70
71 #ifdef CONFIG_LOCK_STAT
72 int lock_stat = 1;
73 module_param(lock_stat, int, 0644);
74 #else
75 #define lock_stat 0
76 #endif
77
78 /*
79  * lockdep_lock: protects the lockdep graph, the hashes and the
80  *               class/list/hash allocators.
81  *
82  * This is one of the rare exceptions where it's justified
83  * to use a raw spinlock - we really dont want the spinlock
84  * code to recurse back into the lockdep code...
85  */
86 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
87
88 static int graph_lock(void)
89 {
90         arch_spin_lock(&lockdep_lock);
91         /*
92          * Make sure that if another CPU detected a bug while
93          * walking the graph we dont change it (while the other
94          * CPU is busy printing out stuff with the graph lock
95          * dropped already)
96          */
97         if (!debug_locks) {
98                 arch_spin_unlock(&lockdep_lock);
99                 return 0;
100         }
101         /* prevent any recursions within lockdep from causing deadlocks */
102         current->lockdep_recursion++;
103         return 1;
104 }
105
106 static inline int graph_unlock(void)
107 {
108         if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
109                 /*
110                  * The lockdep graph lock isn't locked while we expect it to
111                  * be, we're confused now, bye!
112                  */
113                 return DEBUG_LOCKS_WARN_ON(1);
114         }
115
116         current->lockdep_recursion--;
117         arch_spin_unlock(&lockdep_lock);
118         return 0;
119 }
120
121 /*
122  * Turn lock debugging off and return with 0 if it was off already,
123  * and also release the graph lock:
124  */
125 static inline int debug_locks_off_graph_unlock(void)
126 {
127         int ret = debug_locks_off();
128
129         arch_spin_unlock(&lockdep_lock);
130
131         return ret;
132 }
133
134 unsigned long nr_list_entries;
135 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
136
137 /*
138  * All data structures here are protected by the global debug_lock.
139  *
140  * Mutex key structs only get allocated, once during bootup, and never
141  * get freed - this significantly simplifies the debugging code.
142  */
143 unsigned long nr_lock_classes;
144 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
145
146 static inline struct lock_class *hlock_class(struct held_lock *hlock)
147 {
148         if (!hlock->class_idx) {
149                 /*
150                  * Someone passed in garbage, we give up.
151                  */
152                 DEBUG_LOCKS_WARN_ON(1);
153                 return NULL;
154         }
155         return lock_classes + hlock->class_idx - 1;
156 }
157
158 #ifdef CONFIG_LOCK_STAT
159 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
160
161 static inline u64 lockstat_clock(void)
162 {
163         return local_clock();
164 }
165
166 static int lock_point(unsigned long points[], unsigned long ip)
167 {
168         int i;
169
170         for (i = 0; i < LOCKSTAT_POINTS; i++) {
171                 if (points[i] == 0) {
172                         points[i] = ip;
173                         break;
174                 }
175                 if (points[i] == ip)
176                         break;
177         }
178
179         return i;
180 }
181
182 static void lock_time_inc(struct lock_time *lt, u64 time)
183 {
184         if (time > lt->max)
185                 lt->max = time;
186
187         if (time < lt->min || !lt->nr)
188                 lt->min = time;
189
190         lt->total += time;
191         lt->nr++;
192 }
193
194 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
195 {
196         if (!src->nr)
197                 return;
198
199         if (src->max > dst->max)
200                 dst->max = src->max;
201
202         if (src->min < dst->min || !dst->nr)
203                 dst->min = src->min;
204
205         dst->total += src->total;
206         dst->nr += src->nr;
207 }
208
209 struct lock_class_stats lock_stats(struct lock_class *class)
210 {
211         struct lock_class_stats stats;
212         int cpu, i;
213
214         memset(&stats, 0, sizeof(struct lock_class_stats));
215         for_each_possible_cpu(cpu) {
216                 struct lock_class_stats *pcs =
217                         &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
218
219                 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
220                         stats.contention_point[i] += pcs->contention_point[i];
221
222                 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
223                         stats.contending_point[i] += pcs->contending_point[i];
224
225                 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
226                 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
227
228                 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
229                 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
230
231                 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
232                         stats.bounces[i] += pcs->bounces[i];
233         }
234
235         return stats;
236 }
237
238 void clear_lock_stats(struct lock_class *class)
239 {
240         int cpu;
241
242         for_each_possible_cpu(cpu) {
243                 struct lock_class_stats *cpu_stats =
244                         &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
245
246                 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
247         }
248         memset(class->contention_point, 0, sizeof(class->contention_point));
249         memset(class->contending_point, 0, sizeof(class->contending_point));
250 }
251
252 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
253 {
254         return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
255 }
256
257 static void put_lock_stats(struct lock_class_stats *stats)
258 {
259         put_cpu_var(cpu_lock_stats);
260 }
261
262 static void lock_release_holdtime(struct held_lock *hlock)
263 {
264         struct lock_class_stats *stats;
265         u64 holdtime;
266
267         if (!lock_stat)
268                 return;
269
270         holdtime = lockstat_clock() - hlock->holdtime_stamp;
271
272         stats = get_lock_stats(hlock_class(hlock));
273         if (hlock->read)
274                 lock_time_inc(&stats->read_holdtime, holdtime);
275         else
276                 lock_time_inc(&stats->write_holdtime, holdtime);
277         put_lock_stats(stats);
278 }
279 #else
280 static inline void lock_release_holdtime(struct held_lock *hlock)
281 {
282 }
283 #endif
284
285 /*
286  * We keep a global list of all lock classes. The list only grows,
287  * never shrinks. The list is only accessed with the lockdep
288  * spinlock lock held.
289  */
290 LIST_HEAD(all_lock_classes);
291
292 /*
293  * The lockdep classes are in a hash-table as well, for fast lookup:
294  */
295 #define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
296 #define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
297 #define __classhashfn(key)      hash_long((unsigned long)key, CLASSHASH_BITS)
298 #define classhashentry(key)     (classhash_table + __classhashfn((key)))
299
300 static struct hlist_head classhash_table[CLASSHASH_SIZE];
301
302 /*
303  * We put the lock dependency chains into a hash-table as well, to cache
304  * their existence:
305  */
306 #define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
307 #define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
308 #define __chainhashfn(chain)    hash_long(chain, CHAINHASH_BITS)
309 #define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
310
311 static struct hlist_head chainhash_table[CHAINHASH_SIZE];
312
313 /*
314  * The hash key of the lock dependency chains is a hash itself too:
315  * it's a hash of all locks taken up to that lock, including that lock.
316  * It's a 64-bit hash, because it's important for the keys to be
317  * unique.
318  */
319 static inline u64 iterate_chain_key(u64 key, u32 idx)
320 {
321         u32 k0 = key, k1 = key >> 32;
322
323         __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
324
325         return k0 | (u64)k1 << 32;
326 }
327
328 void lockdep_off(void)
329 {
330         current->lockdep_recursion++;
331 }
332 EXPORT_SYMBOL(lockdep_off);
333
334 void lockdep_on(void)
335 {
336         current->lockdep_recursion--;
337 }
338 EXPORT_SYMBOL(lockdep_on);
339
340 /*
341  * Debugging switches:
342  */
343
344 #define VERBOSE                 0
345 #define VERY_VERBOSE            0
346
347 #if VERBOSE
348 # define HARDIRQ_VERBOSE        1
349 # define SOFTIRQ_VERBOSE        1
350 #else
351 # define HARDIRQ_VERBOSE        0
352 # define SOFTIRQ_VERBOSE        0
353 #endif
354
355 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
356 /*
357  * Quick filtering for interesting events:
358  */
359 static int class_filter(struct lock_class *class)
360 {
361 #if 0
362         /* Example */
363         if (class->name_version == 1 &&
364                         !strcmp(class->name, "lockname"))
365                 return 1;
366         if (class->name_version == 1 &&
367                         !strcmp(class->name, "&struct->lockfield"))
368                 return 1;
369 #endif
370         /* Filter everything else. 1 would be to allow everything else */
371         return 0;
372 }
373 #endif
374
375 static int verbose(struct lock_class *class)
376 {
377 #if VERBOSE
378         return class_filter(class);
379 #endif
380         return 0;
381 }
382
383 /*
384  * Stack-trace: tightly packed array of stack backtrace
385  * addresses. Protected by the graph_lock.
386  */
387 unsigned long nr_stack_trace_entries;
388 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
389
390 static void print_lockdep_off(const char *bug_msg)
391 {
392         printk(KERN_DEBUG "%s\n", bug_msg);
393         printk(KERN_DEBUG "turning off the locking correctness validator.\n");
394 #ifdef CONFIG_LOCK_STAT
395         printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
396 #endif
397 }
398
399 static int save_trace(struct stack_trace *trace)
400 {
401         trace->nr_entries = 0;
402         trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
403         trace->entries = stack_trace + nr_stack_trace_entries;
404
405         trace->skip = 3;
406
407         save_stack_trace(trace);
408
409         /*
410          * Some daft arches put -1 at the end to indicate its a full trace.
411          *
412          * <rant> this is buggy anyway, since it takes a whole extra entry so a
413          * complete trace that maxes out the entries provided will be reported
414          * as incomplete, friggin useless </rant>
415          */
416         if (trace->nr_entries != 0 &&
417             trace->entries[trace->nr_entries-1] == ULONG_MAX)
418                 trace->nr_entries--;
419
420         trace->max_entries = trace->nr_entries;
421
422         nr_stack_trace_entries += trace->nr_entries;
423
424         if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
425                 if (!debug_locks_off_graph_unlock())
426                         return 0;
427
428                 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
429                 dump_stack();
430
431                 return 0;
432         }
433
434         return 1;
435 }
436
437 unsigned int nr_hardirq_chains;
438 unsigned int nr_softirq_chains;
439 unsigned int nr_process_chains;
440 unsigned int max_lockdep_depth;
441
442 #ifdef CONFIG_DEBUG_LOCKDEP
443 /*
444  * Various lockdep statistics:
445  */
446 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
447 #endif
448
449 /*
450  * Locking printouts:
451  */
452
453 #define __USAGE(__STATE)                                                \
454         [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",       \
455         [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",         \
456         [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
457         [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
458
459 static const char *usage_str[] =
460 {
461 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
462 #include "lockdep_states.h"
463 #undef LOCKDEP_STATE
464         [LOCK_USED] = "INITIAL USE",
465 };
466
467 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
468 {
469         return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
470 }
471
472 static inline unsigned long lock_flag(enum lock_usage_bit bit)
473 {
474         return 1UL << bit;
475 }
476
477 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
478 {
479         char c = '.';
480
481         if (class->usage_mask & lock_flag(bit + 2))
482                 c = '+';
483         if (class->usage_mask & lock_flag(bit)) {
484                 c = '-';
485                 if (class->usage_mask & lock_flag(bit + 2))
486                         c = '?';
487         }
488
489         return c;
490 }
491
492 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
493 {
494         int i = 0;
495
496 #define LOCKDEP_STATE(__STATE)                                          \
497         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);     \
498         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
499 #include "lockdep_states.h"
500 #undef LOCKDEP_STATE
501
502         usage[i] = '\0';
503 }
504
505 static void __print_lock_name(struct lock_class *class)
506 {
507         char str[KSYM_NAME_LEN];
508         const char *name;
509
510         name = class->name;
511         if (!name) {
512                 name = __get_key_name(class->key, str);
513                 printk(KERN_CONT "%s", name);
514         } else {
515                 printk(KERN_CONT "%s", name);
516                 if (class->name_version > 1)
517                         printk(KERN_CONT "#%d", class->name_version);
518                 if (class->subclass)
519                         printk(KERN_CONT "/%d", class->subclass);
520         }
521 }
522
523 static void print_lock_name(struct lock_class *class)
524 {
525         char usage[LOCK_USAGE_CHARS];
526
527         get_usage_chars(class, usage);
528
529         printk(KERN_CONT " (");
530         __print_lock_name(class);
531         printk(KERN_CONT "){%s}", usage);
532 }
533
534 static void print_lockdep_cache(struct lockdep_map *lock)
535 {
536         const char *name;
537         char str[KSYM_NAME_LEN];
538
539         name = lock->name;
540         if (!name)
541                 name = __get_key_name(lock->key->subkeys, str);
542
543         printk(KERN_CONT "%s", name);
544 }
545
546 static void print_lock(struct held_lock *hlock)
547 {
548         /*
549          * We can be called locklessly through debug_show_all_locks() so be
550          * extra careful, the hlock might have been released and cleared.
551          */
552         unsigned int class_idx = hlock->class_idx;
553
554         /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
555         barrier();
556
557         if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
558                 printk(KERN_CONT "<RELEASED>\n");
559                 return;
560         }
561
562         print_lock_name(lock_classes + class_idx - 1);
563         printk(KERN_CONT ", at: [<%p>] %pS\n",
564                 (void *)hlock->acquire_ip, (void *)hlock->acquire_ip);
565 }
566
567 static void lockdep_print_held_locks(struct task_struct *curr)
568 {
569         int i, depth = curr->lockdep_depth;
570
571         if (!depth) {
572                 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
573                 return;
574         }
575         printk("%d lock%s held by %s/%d:\n",
576                 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
577
578         for (i = 0; i < depth; i++) {
579                 printk(" #%d: ", i);
580                 print_lock(curr->held_locks + i);
581         }
582 }
583
584 static void print_kernel_ident(void)
585 {
586         printk("%s %.*s %s\n", init_utsname()->release,
587                 (int)strcspn(init_utsname()->version, " "),
588                 init_utsname()->version,
589                 print_tainted());
590 }
591
592 static int very_verbose(struct lock_class *class)
593 {
594 #if VERY_VERBOSE
595         return class_filter(class);
596 #endif
597         return 0;
598 }
599
600 /*
601  * Is this the address of a static object:
602  */
603 #ifdef __KERNEL__
604 static int static_obj(void *obj)
605 {
606         unsigned long start = (unsigned long) &_stext,
607                       end   = (unsigned long) &_end,
608                       addr  = (unsigned long) obj;
609
610         /*
611          * static variable?
612          */
613         if ((addr >= start) && (addr < end))
614                 return 1;
615
616         if (arch_is_kernel_data(addr))
617                 return 1;
618
619         /*
620          * in-kernel percpu var?
621          */
622         if (is_kernel_percpu_address(addr))
623                 return 1;
624
625         /*
626          * module static or percpu var?
627          */
628         return is_module_address(addr) || is_module_percpu_address(addr);
629 }
630 #endif
631
632 /*
633  * To make lock name printouts unique, we calculate a unique
634  * class->name_version generation counter:
635  */
636 static int count_matching_names(struct lock_class *new_class)
637 {
638         struct lock_class *class;
639         int count = 0;
640
641         if (!new_class->name)
642                 return 0;
643
644         list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
645                 if (new_class->key - new_class->subclass == class->key)
646                         return class->name_version;
647                 if (class->name && !strcmp(class->name, new_class->name))
648                         count = max(count, class->name_version);
649         }
650
651         return count + 1;
652 }
653
654 /*
655  * Register a lock's class in the hash-table, if the class is not present
656  * yet. Otherwise we look it up. We cache the result in the lock object
657  * itself, so actual lookup of the hash should be once per lock object.
658  */
659 static inline struct lock_class *
660 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
661 {
662         struct lockdep_subclass_key *key;
663         struct hlist_head *hash_head;
664         struct lock_class *class;
665         bool is_static = false;
666
667         if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
668                 debug_locks_off();
669                 printk(KERN_ERR
670                         "BUG: looking up invalid subclass: %u\n", subclass);
671                 printk(KERN_ERR
672                         "turning off the locking correctness validator.\n");
673                 dump_stack();
674                 return NULL;
675         }
676
677         /*
678          * Static locks do not have their class-keys yet - for them the key
679          * is the lock object itself. If the lock is in the per cpu area,
680          * the canonical address of the lock (per cpu offset removed) is
681          * used.
682          */
683         if (unlikely(!lock->key)) {
684                 unsigned long can_addr, addr = (unsigned long)lock;
685
686                 if (__is_kernel_percpu_address(addr, &can_addr))
687                         lock->key = (void *)can_addr;
688                 else if (__is_module_percpu_address(addr, &can_addr))
689                         lock->key = (void *)can_addr;
690                 else if (static_obj(lock))
691                         lock->key = (void *)lock;
692                 else
693                         return ERR_PTR(-EINVAL);
694                 is_static = true;
695         }
696
697         /*
698          * NOTE: the class-key must be unique. For dynamic locks, a static
699          * lock_class_key variable is passed in through the mutex_init()
700          * (or spin_lock_init()) call - which acts as the key. For static
701          * locks we use the lock object itself as the key.
702          */
703         BUILD_BUG_ON(sizeof(struct lock_class_key) >
704                         sizeof(struct lockdep_map));
705
706         key = lock->key->subkeys + subclass;
707
708         hash_head = classhashentry(key);
709
710         /*
711          * We do an RCU walk of the hash, see lockdep_free_key_range().
712          */
713         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
714                 return NULL;
715
716         hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) {
717                 if (class->key == key) {
718                         /*
719                          * Huh! same key, different name? Did someone trample
720                          * on some memory? We're most confused.
721                          */
722                         WARN_ON_ONCE(class->name != lock->name);
723                         return class;
724                 }
725         }
726
727         return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
728 }
729
730 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
731 static void cross_init(struct lockdep_map *lock, int cross);
732 static int cross_lock(struct lockdep_map *lock);
733 static int lock_acquire_crosslock(struct held_lock *hlock);
734 static int lock_release_crosslock(struct lockdep_map *lock);
735 #else
736 static inline void cross_init(struct lockdep_map *lock, int cross) {}
737 static inline int cross_lock(struct lockdep_map *lock) { return 0; }
738 static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; }
739 static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; }
740 #endif
741
742 /*
743  * Register a lock's class in the hash-table, if the class is not present
744  * yet. Otherwise we look it up. We cache the result in the lock object
745  * itself, so actual lookup of the hash should be once per lock object.
746  */
747 static struct lock_class *
748 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
749 {
750         struct lockdep_subclass_key *key;
751         struct hlist_head *hash_head;
752         struct lock_class *class;
753
754         DEBUG_LOCKS_WARN_ON(!irqs_disabled());
755
756         class = look_up_lock_class(lock, subclass);
757         if (likely(!IS_ERR_OR_NULL(class)))
758                 goto out_set_class_cache;
759
760         /*
761          * Debug-check: all keys must be persistent!
762          */
763         if (IS_ERR(class)) {
764                 debug_locks_off();
765                 printk("INFO: trying to register non-static key.\n");
766                 printk("the code is fine but needs lockdep annotation.\n");
767                 printk("turning off the locking correctness validator.\n");
768                 dump_stack();
769                 return NULL;
770         }
771
772         key = lock->key->subkeys + subclass;
773         hash_head = classhashentry(key);
774
775         if (!graph_lock()) {
776                 return NULL;
777         }
778         /*
779          * We have to do the hash-walk again, to avoid races
780          * with another CPU:
781          */
782         hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
783                 if (class->key == key)
784                         goto out_unlock_set;
785         }
786
787         /*
788          * Allocate a new key from the static array, and add it to
789          * the hash:
790          */
791         if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
792                 if (!debug_locks_off_graph_unlock()) {
793                         return NULL;
794                 }
795
796                 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
797                 dump_stack();
798                 return NULL;
799         }
800         class = lock_classes + nr_lock_classes++;
801         debug_atomic_inc(nr_unused_locks);
802         class->key = key;
803         class->name = lock->name;
804         class->subclass = subclass;
805         INIT_LIST_HEAD(&class->lock_entry);
806         INIT_LIST_HEAD(&class->locks_before);
807         INIT_LIST_HEAD(&class->locks_after);
808         class->name_version = count_matching_names(class);
809         /*
810          * We use RCU's safe list-add method to make
811          * parallel walking of the hash-list safe:
812          */
813         hlist_add_head_rcu(&class->hash_entry, hash_head);
814         /*
815          * Add it to the global list of classes:
816          */
817         list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
818
819         if (verbose(class)) {
820                 graph_unlock();
821
822                 printk("\nnew class %p: %s", class->key, class->name);
823                 if (class->name_version > 1)
824                         printk(KERN_CONT "#%d", class->name_version);
825                 printk(KERN_CONT "\n");
826                 dump_stack();
827
828                 if (!graph_lock()) {
829                         return NULL;
830                 }
831         }
832 out_unlock_set:
833         graph_unlock();
834
835 out_set_class_cache:
836         if (!subclass || force)
837                 lock->class_cache[0] = class;
838         else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
839                 lock->class_cache[subclass] = class;
840
841         /*
842          * Hash collision, did we smoke some? We found a class with a matching
843          * hash but the subclass -- which is hashed in -- didn't match.
844          */
845         if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
846                 return NULL;
847
848         return class;
849 }
850
851 #ifdef CONFIG_PROVE_LOCKING
852 /*
853  * Allocate a lockdep entry. (assumes the graph_lock held, returns
854  * with NULL on failure)
855  */
856 static struct lock_list *alloc_list_entry(void)
857 {
858         if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
859                 if (!debug_locks_off_graph_unlock())
860                         return NULL;
861
862                 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
863                 dump_stack();
864                 return NULL;
865         }
866         return list_entries + nr_list_entries++;
867 }
868
869 /*
870  * Add a new dependency to the head of the list:
871  */
872 static int add_lock_to_list(struct lock_class *this, struct list_head *head,
873                             unsigned long ip, int distance,
874                             struct stack_trace *trace)
875 {
876         struct lock_list *entry;
877         /*
878          * Lock not present yet - get a new dependency struct and
879          * add it to the list:
880          */
881         entry = alloc_list_entry();
882         if (!entry)
883                 return 0;
884
885         entry->class = this;
886         entry->distance = distance;
887         entry->trace = *trace;
888         /*
889          * Both allocation and removal are done under the graph lock; but
890          * iteration is under RCU-sched; see look_up_lock_class() and
891          * lockdep_free_key_range().
892          */
893         list_add_tail_rcu(&entry->entry, head);
894
895         return 1;
896 }
897
898 /*
899  * For good efficiency of modular, we use power of 2
900  */
901 #define MAX_CIRCULAR_QUEUE_SIZE         4096UL
902 #define CQ_MASK                         (MAX_CIRCULAR_QUEUE_SIZE-1)
903
904 /*
905  * The circular_queue and helpers is used to implement the
906  * breadth-first search(BFS)algorithem, by which we can build
907  * the shortest path from the next lock to be acquired to the
908  * previous held lock if there is a circular between them.
909  */
910 struct circular_queue {
911         unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
912         unsigned int  front, rear;
913 };
914
915 static struct circular_queue lock_cq;
916
917 unsigned int max_bfs_queue_depth;
918
919 static unsigned int lockdep_dependency_gen_id;
920
921 static inline void __cq_init(struct circular_queue *cq)
922 {
923         cq->front = cq->rear = 0;
924         lockdep_dependency_gen_id++;
925 }
926
927 static inline int __cq_empty(struct circular_queue *cq)
928 {
929         return (cq->front == cq->rear);
930 }
931
932 static inline int __cq_full(struct circular_queue *cq)
933 {
934         return ((cq->rear + 1) & CQ_MASK) == cq->front;
935 }
936
937 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
938 {
939         if (__cq_full(cq))
940                 return -1;
941
942         cq->element[cq->rear] = elem;
943         cq->rear = (cq->rear + 1) & CQ_MASK;
944         return 0;
945 }
946
947 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
948 {
949         if (__cq_empty(cq))
950                 return -1;
951
952         *elem = cq->element[cq->front];
953         cq->front = (cq->front + 1) & CQ_MASK;
954         return 0;
955 }
956
957 static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
958 {
959         return (cq->rear - cq->front) & CQ_MASK;
960 }
961
962 static inline void mark_lock_accessed(struct lock_list *lock,
963                                         struct lock_list *parent)
964 {
965         unsigned long nr;
966
967         nr = lock - list_entries;
968         WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
969         lock->parent = parent;
970         lock->class->dep_gen_id = lockdep_dependency_gen_id;
971 }
972
973 static inline unsigned long lock_accessed(struct lock_list *lock)
974 {
975         unsigned long nr;
976
977         nr = lock - list_entries;
978         WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
979         return lock->class->dep_gen_id == lockdep_dependency_gen_id;
980 }
981
982 static inline struct lock_list *get_lock_parent(struct lock_list *child)
983 {
984         return child->parent;
985 }
986
987 static inline int get_lock_depth(struct lock_list *child)
988 {
989         int depth = 0;
990         struct lock_list *parent;
991
992         while ((parent = get_lock_parent(child))) {
993                 child = parent;
994                 depth++;
995         }
996         return depth;
997 }
998
999 static int __bfs(struct lock_list *source_entry,
1000                  void *data,
1001                  int (*match)(struct lock_list *entry, void *data),
1002                  struct lock_list **target_entry,
1003                  int forward)
1004 {
1005         struct lock_list *entry;
1006         struct list_head *head;
1007         struct circular_queue *cq = &lock_cq;
1008         int ret = 1;
1009
1010         if (match(source_entry, data)) {
1011                 *target_entry = source_entry;
1012                 ret = 0;
1013                 goto exit;
1014         }
1015
1016         if (forward)
1017                 head = &source_entry->class->locks_after;
1018         else
1019                 head = &source_entry->class->locks_before;
1020
1021         if (list_empty(head))
1022                 goto exit;
1023
1024         __cq_init(cq);
1025         __cq_enqueue(cq, (unsigned long)source_entry);
1026
1027         while (!__cq_empty(cq)) {
1028                 struct lock_list *lock;
1029
1030                 __cq_dequeue(cq, (unsigned long *)&lock);
1031
1032                 if (!lock->class) {
1033                         ret = -2;
1034                         goto exit;
1035                 }
1036
1037                 if (forward)
1038                         head = &lock->class->locks_after;
1039                 else
1040                         head = &lock->class->locks_before;
1041
1042                 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1043
1044                 list_for_each_entry_rcu(entry, head, entry) {
1045                         if (!lock_accessed(entry)) {
1046                                 unsigned int cq_depth;
1047                                 mark_lock_accessed(entry, lock);
1048                                 if (match(entry, data)) {
1049                                         *target_entry = entry;
1050                                         ret = 0;
1051                                         goto exit;
1052                                 }
1053
1054                                 if (__cq_enqueue(cq, (unsigned long)entry)) {
1055                                         ret = -1;
1056                                         goto exit;
1057                                 }
1058                                 cq_depth = __cq_get_elem_count(cq);
1059                                 if (max_bfs_queue_depth < cq_depth)
1060                                         max_bfs_queue_depth = cq_depth;
1061                         }
1062                 }
1063         }
1064 exit:
1065         return ret;
1066 }
1067
1068 static inline int __bfs_forwards(struct lock_list *src_entry,
1069                         void *data,
1070                         int (*match)(struct lock_list *entry, void *data),
1071                         struct lock_list **target_entry)
1072 {
1073         return __bfs(src_entry, data, match, target_entry, 1);
1074
1075 }
1076
1077 static inline int __bfs_backwards(struct lock_list *src_entry,
1078                         void *data,
1079                         int (*match)(struct lock_list *entry, void *data),
1080                         struct lock_list **target_entry)
1081 {
1082         return __bfs(src_entry, data, match, target_entry, 0);
1083
1084 }
1085
1086 /*
1087  * Recursive, forwards-direction lock-dependency checking, used for
1088  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1089  * checking.
1090  */
1091
1092 /*
1093  * Print a dependency chain entry (this is only done when a deadlock
1094  * has been detected):
1095  */
1096 static noinline int
1097 print_circular_bug_entry(struct lock_list *target, int depth)
1098 {
1099         if (debug_locks_silent)
1100                 return 0;
1101         printk("\n-> #%u", depth);
1102         print_lock_name(target->class);
1103         printk(KERN_CONT ":\n");
1104         print_stack_trace(&target->trace, 6);
1105
1106         return 0;
1107 }
1108
1109 static void
1110 print_circular_lock_scenario(struct held_lock *src,
1111                              struct held_lock *tgt,
1112                              struct lock_list *prt)
1113 {
1114         struct lock_class *source = hlock_class(src);
1115         struct lock_class *target = hlock_class(tgt);
1116         struct lock_class *parent = prt->class;
1117
1118         /*
1119          * A direct locking problem where unsafe_class lock is taken
1120          * directly by safe_class lock, then all we need to show
1121          * is the deadlock scenario, as it is obvious that the
1122          * unsafe lock is taken under the safe lock.
1123          *
1124          * But if there is a chain instead, where the safe lock takes
1125          * an intermediate lock (middle_class) where this lock is
1126          * not the same as the safe lock, then the lock chain is
1127          * used to describe the problem. Otherwise we would need
1128          * to show a different CPU case for each link in the chain
1129          * from the safe_class lock to the unsafe_class lock.
1130          */
1131         if (parent != source) {
1132                 printk("Chain exists of:\n  ");
1133                 __print_lock_name(source);
1134                 printk(KERN_CONT " --> ");
1135                 __print_lock_name(parent);
1136                 printk(KERN_CONT " --> ");
1137                 __print_lock_name(target);
1138                 printk(KERN_CONT "\n\n");
1139         }
1140
1141         if (cross_lock(tgt->instance)) {
1142                 printk(" Possible unsafe locking scenario by crosslock:\n\n");
1143                 printk("       CPU0                    CPU1\n");
1144                 printk("       ----                    ----\n");
1145                 printk("  lock(");
1146                 __print_lock_name(parent);
1147                 printk(KERN_CONT ");\n");
1148                 printk("  lock(");
1149                 __print_lock_name(target);
1150                 printk(KERN_CONT ");\n");
1151                 printk("                               lock(");
1152                 __print_lock_name(source);
1153                 printk(KERN_CONT ");\n");
1154                 printk("                               unlock(");
1155                 __print_lock_name(target);
1156                 printk(KERN_CONT ");\n");
1157                 printk("\n *** DEADLOCK ***\n\n");
1158         } else {
1159                 printk(" Possible unsafe locking scenario:\n\n");
1160                 printk("       CPU0                    CPU1\n");
1161                 printk("       ----                    ----\n");
1162                 printk("  lock(");
1163                 __print_lock_name(target);
1164                 printk(KERN_CONT ");\n");
1165                 printk("                               lock(");
1166                 __print_lock_name(parent);
1167                 printk(KERN_CONT ");\n");
1168                 printk("                               lock(");
1169                 __print_lock_name(target);
1170                 printk(KERN_CONT ");\n");
1171                 printk("  lock(");
1172                 __print_lock_name(source);
1173                 printk(KERN_CONT ");\n");
1174                 printk("\n *** DEADLOCK ***\n\n");
1175         }
1176 }
1177
1178 /*
1179  * When a circular dependency is detected, print the
1180  * header first:
1181  */
1182 static noinline int
1183 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1184                         struct held_lock *check_src,
1185                         struct held_lock *check_tgt)
1186 {
1187         struct task_struct *curr = current;
1188
1189         if (debug_locks_silent)
1190                 return 0;
1191
1192         pr_warn("\n");
1193         pr_warn("======================================================\n");
1194         pr_warn("WARNING: possible circular locking dependency detected\n");
1195         print_kernel_ident();
1196         pr_warn("------------------------------------------------------\n");
1197         pr_warn("%s/%d is trying to acquire lock:\n",
1198                 curr->comm, task_pid_nr(curr));
1199         print_lock(check_src);
1200
1201         if (cross_lock(check_tgt->instance))
1202                 pr_warn("\nbut now in release context of a crosslock acquired at the following:\n");
1203         else
1204                 pr_warn("\nbut task is already holding lock:\n");
1205
1206         print_lock(check_tgt);
1207         pr_warn("\nwhich lock already depends on the new lock.\n\n");
1208         pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1209
1210         print_circular_bug_entry(entry, depth);
1211
1212         return 0;
1213 }
1214
1215 static inline int class_equal(struct lock_list *entry, void *data)
1216 {
1217         return entry->class == data;
1218 }
1219
1220 static noinline int print_circular_bug(struct lock_list *this,
1221                                 struct lock_list *target,
1222                                 struct held_lock *check_src,
1223                                 struct held_lock *check_tgt,
1224                                 struct stack_trace *trace)
1225 {
1226         struct task_struct *curr = current;
1227         struct lock_list *parent;
1228         struct lock_list *first_parent;
1229         int depth;
1230
1231         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1232                 return 0;
1233
1234         if (cross_lock(check_tgt->instance))
1235                 this->trace = *trace;
1236         else if (!save_trace(&this->trace))
1237                 return 0;
1238
1239         depth = get_lock_depth(target);
1240
1241         print_circular_bug_header(target, depth, check_src, check_tgt);
1242
1243         parent = get_lock_parent(target);
1244         first_parent = parent;
1245
1246         while (parent) {
1247                 print_circular_bug_entry(parent, --depth);
1248                 parent = get_lock_parent(parent);
1249         }
1250
1251         printk("\nother info that might help us debug this:\n\n");
1252         print_circular_lock_scenario(check_src, check_tgt,
1253                                      first_parent);
1254
1255         lockdep_print_held_locks(curr);
1256
1257         printk("\nstack backtrace:\n");
1258         dump_stack();
1259
1260         return 0;
1261 }
1262
1263 static noinline int print_bfs_bug(int ret)
1264 {
1265         if (!debug_locks_off_graph_unlock())
1266                 return 0;
1267
1268         /*
1269          * Breadth-first-search failed, graph got corrupted?
1270          */
1271         WARN(1, "lockdep bfs error:%d\n", ret);
1272
1273         return 0;
1274 }
1275
1276 static int noop_count(struct lock_list *entry, void *data)
1277 {
1278         (*(unsigned long *)data)++;
1279         return 0;
1280 }
1281
1282 static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1283 {
1284         unsigned long  count = 0;
1285         struct lock_list *uninitialized_var(target_entry);
1286
1287         __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1288
1289         return count;
1290 }
1291 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1292 {
1293         unsigned long ret, flags;
1294         struct lock_list this;
1295
1296         this.parent = NULL;
1297         this.class = class;
1298
1299         raw_local_irq_save(flags);
1300         current->lockdep_recursion = 1;
1301         arch_spin_lock(&lockdep_lock);
1302         ret = __lockdep_count_forward_deps(&this);
1303         arch_spin_unlock(&lockdep_lock);
1304         current->lockdep_recursion = 0;
1305         raw_local_irq_restore(flags);
1306
1307         return ret;
1308 }
1309
1310 static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1311 {
1312         unsigned long  count = 0;
1313         struct lock_list *uninitialized_var(target_entry);
1314
1315         __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1316
1317         return count;
1318 }
1319
1320 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1321 {
1322         unsigned long ret, flags;
1323         struct lock_list this;
1324
1325         this.parent = NULL;
1326         this.class = class;
1327
1328         raw_local_irq_save(flags);
1329         current->lockdep_recursion = 1;
1330         arch_spin_lock(&lockdep_lock);
1331         ret = __lockdep_count_backward_deps(&this);
1332         arch_spin_unlock(&lockdep_lock);
1333         current->lockdep_recursion = 0;
1334         raw_local_irq_restore(flags);
1335
1336         return ret;
1337 }
1338
1339 /*
1340  * Prove that the dependency graph starting at <entry> can not
1341  * lead to <target>. Print an error and return 0 if it does.
1342  */
1343 static noinline int
1344 check_noncircular(struct lock_list *root, struct lock_class *target,
1345                 struct lock_list **target_entry)
1346 {
1347         int result;
1348
1349         debug_atomic_inc(nr_cyclic_checks);
1350
1351         result = __bfs_forwards(root, target, class_equal, target_entry);
1352
1353         return result;
1354 }
1355
1356 static noinline int
1357 check_redundant(struct lock_list *root, struct lock_class *target,
1358                 struct lock_list **target_entry)
1359 {
1360         int result;
1361
1362         debug_atomic_inc(nr_redundant_checks);
1363
1364         result = __bfs_forwards(root, target, class_equal, target_entry);
1365
1366         return result;
1367 }
1368
1369 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1370 /*
1371  * Forwards and backwards subgraph searching, for the purposes of
1372  * proving that two subgraphs can be connected by a new dependency
1373  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1374  */
1375
1376 static inline int usage_match(struct lock_list *entry, void *bit)
1377 {
1378         return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1379 }
1380
1381
1382
1383 /*
1384  * Find a node in the forwards-direction dependency sub-graph starting
1385  * at @root->class that matches @bit.
1386  *
1387  * Return 0 if such a node exists in the subgraph, and put that node
1388  * into *@target_entry.
1389  *
1390  * Return 1 otherwise and keep *@target_entry unchanged.
1391  * Return <0 on error.
1392  */
1393 static int
1394 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1395                         struct lock_list **target_entry)
1396 {
1397         int result;
1398
1399         debug_atomic_inc(nr_find_usage_forwards_checks);
1400
1401         result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1402
1403         return result;
1404 }
1405
1406 /*
1407  * Find a node in the backwards-direction dependency sub-graph starting
1408  * at @root->class that matches @bit.
1409  *
1410  * Return 0 if such a node exists in the subgraph, and put that node
1411  * into *@target_entry.
1412  *
1413  * Return 1 otherwise and keep *@target_entry unchanged.
1414  * Return <0 on error.
1415  */
1416 static int
1417 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1418                         struct lock_list **target_entry)
1419 {
1420         int result;
1421
1422         debug_atomic_inc(nr_find_usage_backwards_checks);
1423
1424         result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1425
1426         return result;
1427 }
1428
1429 static void print_lock_class_header(struct lock_class *class, int depth)
1430 {
1431         int bit;
1432
1433         printk("%*s->", depth, "");
1434         print_lock_name(class);
1435         printk(KERN_CONT " ops: %lu", class->ops);
1436         printk(KERN_CONT " {\n");
1437
1438         for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1439                 if (class->usage_mask & (1 << bit)) {
1440                         int len = depth;
1441
1442                         len += printk("%*s   %s", depth, "", usage_str[bit]);
1443                         len += printk(KERN_CONT " at:\n");
1444                         print_stack_trace(class->usage_traces + bit, len);
1445                 }
1446         }
1447         printk("%*s }\n", depth, "");
1448
1449         printk("%*s ... key      at: [<%p>] %pS\n",
1450                 depth, "", class->key, class->key);
1451 }
1452
1453 /*
1454  * printk the shortest lock dependencies from @start to @end in reverse order:
1455  */
1456 static void __used
1457 print_shortest_lock_dependencies(struct lock_list *leaf,
1458                                 struct lock_list *root)
1459 {
1460         struct lock_list *entry = leaf;
1461         int depth;
1462
1463         /*compute depth from generated tree by BFS*/
1464         depth = get_lock_depth(leaf);
1465
1466         do {
1467                 print_lock_class_header(entry->class, depth);
1468                 printk("%*s ... acquired at:\n", depth, "");
1469                 print_stack_trace(&entry->trace, 2);
1470                 printk("\n");
1471
1472                 if (depth == 0 && (entry != root)) {
1473                         printk("lockdep:%s bad path found in chain graph\n", __func__);
1474                         break;
1475                 }
1476
1477                 entry = get_lock_parent(entry);
1478                 depth--;
1479         } while (entry && (depth >= 0));
1480
1481         return;
1482 }
1483
1484 static void
1485 print_irq_lock_scenario(struct lock_list *safe_entry,
1486                         struct lock_list *unsafe_entry,
1487                         struct lock_class *prev_class,
1488                         struct lock_class *next_class)
1489 {
1490         struct lock_class *safe_class = safe_entry->class;
1491         struct lock_class *unsafe_class = unsafe_entry->class;
1492         struct lock_class *middle_class = prev_class;
1493
1494         if (middle_class == safe_class)
1495                 middle_class = next_class;
1496
1497         /*
1498          * A direct locking problem where unsafe_class lock is taken
1499          * directly by safe_class lock, then all we need to show
1500          * is the deadlock scenario, as it is obvious that the
1501          * unsafe lock is taken under the safe lock.
1502          *
1503          * But if there is a chain instead, where the safe lock takes
1504          * an intermediate lock (middle_class) where this lock is
1505          * not the same as the safe lock, then the lock chain is
1506          * used to describe the problem. Otherwise we would need
1507          * to show a different CPU case for each link in the chain
1508          * from the safe_class lock to the unsafe_class lock.
1509          */
1510         if (middle_class != unsafe_class) {
1511                 printk("Chain exists of:\n  ");
1512                 __print_lock_name(safe_class);
1513                 printk(KERN_CONT " --> ");
1514                 __print_lock_name(middle_class);
1515                 printk(KERN_CONT " --> ");
1516                 __print_lock_name(unsafe_class);
1517                 printk(KERN_CONT "\n\n");
1518         }
1519
1520         printk(" Possible interrupt unsafe locking scenario:\n\n");
1521         printk("       CPU0                    CPU1\n");
1522         printk("       ----                    ----\n");
1523         printk("  lock(");
1524         __print_lock_name(unsafe_class);
1525         printk(KERN_CONT ");\n");
1526         printk("                               local_irq_disable();\n");
1527         printk("                               lock(");
1528         __print_lock_name(safe_class);
1529         printk(KERN_CONT ");\n");
1530         printk("                               lock(");
1531         __print_lock_name(middle_class);
1532         printk(KERN_CONT ");\n");
1533         printk("  <Interrupt>\n");
1534         printk("    lock(");
1535         __print_lock_name(safe_class);
1536         printk(KERN_CONT ");\n");
1537         printk("\n *** DEADLOCK ***\n\n");
1538 }
1539
1540 static int
1541 print_bad_irq_dependency(struct task_struct *curr,
1542                          struct lock_list *prev_root,
1543                          struct lock_list *next_root,
1544                          struct lock_list *backwards_entry,
1545                          struct lock_list *forwards_entry,
1546                          struct held_lock *prev,
1547                          struct held_lock *next,
1548                          enum lock_usage_bit bit1,
1549                          enum lock_usage_bit bit2,
1550                          const char *irqclass)
1551 {
1552         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1553                 return 0;
1554
1555         pr_warn("\n");
1556         pr_warn("=====================================================\n");
1557         pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1558                 irqclass, irqclass);
1559         print_kernel_ident();
1560         pr_warn("-----------------------------------------------------\n");
1561         pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1562                 curr->comm, task_pid_nr(curr),
1563                 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1564                 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1565                 curr->hardirqs_enabled,
1566                 curr->softirqs_enabled);
1567         print_lock(next);
1568
1569         pr_warn("\nand this task is already holding:\n");
1570         print_lock(prev);
1571         pr_warn("which would create a new lock dependency:\n");
1572         print_lock_name(hlock_class(prev));
1573         pr_cont(" ->");
1574         print_lock_name(hlock_class(next));
1575         pr_cont("\n");
1576
1577         pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1578                 irqclass);
1579         print_lock_name(backwards_entry->class);
1580         pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1581
1582         print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1583
1584         pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1585         print_lock_name(forwards_entry->class);
1586         pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1587         pr_warn("...");
1588
1589         print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1590
1591         pr_warn("\nother info that might help us debug this:\n\n");
1592         print_irq_lock_scenario(backwards_entry, forwards_entry,
1593                                 hlock_class(prev), hlock_class(next));
1594
1595         lockdep_print_held_locks(curr);
1596
1597         pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1598         if (!save_trace(&prev_root->trace))
1599                 return 0;
1600         print_shortest_lock_dependencies(backwards_entry, prev_root);
1601
1602         pr_warn("\nthe dependencies between the lock to be acquired");
1603         pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
1604         if (!save_trace(&next_root->trace))
1605                 return 0;
1606         print_shortest_lock_dependencies(forwards_entry, next_root);
1607
1608         pr_warn("\nstack backtrace:\n");
1609         dump_stack();
1610
1611         return 0;
1612 }
1613
1614 static int
1615 check_usage(struct task_struct *curr, struct held_lock *prev,
1616             struct held_lock *next, enum lock_usage_bit bit_backwards,
1617             enum lock_usage_bit bit_forwards, const char *irqclass)
1618 {
1619         int ret;
1620         struct lock_list this, that;
1621         struct lock_list *uninitialized_var(target_entry);
1622         struct lock_list *uninitialized_var(target_entry1);
1623
1624         this.parent = NULL;
1625
1626         this.class = hlock_class(prev);
1627         ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1628         if (ret < 0)
1629                 return print_bfs_bug(ret);
1630         if (ret == 1)
1631                 return ret;
1632
1633         that.parent = NULL;
1634         that.class = hlock_class(next);
1635         ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1636         if (ret < 0)
1637                 return print_bfs_bug(ret);
1638         if (ret == 1)
1639                 return ret;
1640
1641         return print_bad_irq_dependency(curr, &this, &that,
1642                         target_entry, target_entry1,
1643                         prev, next,
1644                         bit_backwards, bit_forwards, irqclass);
1645 }
1646
1647 static const char *state_names[] = {
1648 #define LOCKDEP_STATE(__STATE) \
1649         __stringify(__STATE),
1650 #include "lockdep_states.h"
1651 #undef LOCKDEP_STATE
1652 };
1653
1654 static const char *state_rnames[] = {
1655 #define LOCKDEP_STATE(__STATE) \
1656         __stringify(__STATE)"-READ",
1657 #include "lockdep_states.h"
1658 #undef LOCKDEP_STATE
1659 };
1660
1661 static inline const char *state_name(enum lock_usage_bit bit)
1662 {
1663         return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1664 }
1665
1666 static int exclusive_bit(int new_bit)
1667 {
1668         /*
1669          * USED_IN
1670          * USED_IN_READ
1671          * ENABLED
1672          * ENABLED_READ
1673          *
1674          * bit 0 - write/read
1675          * bit 1 - used_in/enabled
1676          * bit 2+  state
1677          */
1678
1679         int state = new_bit & ~3;
1680         int dir = new_bit & 2;
1681
1682         /*
1683          * keep state, bit flip the direction and strip read.
1684          */
1685         return state | (dir ^ 2);
1686 }
1687
1688 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1689                            struct held_lock *next, enum lock_usage_bit bit)
1690 {
1691         /*
1692          * Prove that the new dependency does not connect a hardirq-safe
1693          * lock with a hardirq-unsafe lock - to achieve this we search
1694          * the backwards-subgraph starting at <prev>, and the
1695          * forwards-subgraph starting at <next>:
1696          */
1697         if (!check_usage(curr, prev, next, bit,
1698                            exclusive_bit(bit), state_name(bit)))
1699                 return 0;
1700
1701         bit++; /* _READ */
1702
1703         /*
1704          * Prove that the new dependency does not connect a hardirq-safe-read
1705          * lock with a hardirq-unsafe lock - to achieve this we search
1706          * the backwards-subgraph starting at <prev>, and the
1707          * forwards-subgraph starting at <next>:
1708          */
1709         if (!check_usage(curr, prev, next, bit,
1710                            exclusive_bit(bit), state_name(bit)))
1711                 return 0;
1712
1713         return 1;
1714 }
1715
1716 static int
1717 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1718                 struct held_lock *next)
1719 {
1720 #define LOCKDEP_STATE(__STATE)                                          \
1721         if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1722                 return 0;
1723 #include "lockdep_states.h"
1724 #undef LOCKDEP_STATE
1725
1726         return 1;
1727 }
1728
1729 static void inc_chains(void)
1730 {
1731         if (current->hardirq_context)
1732                 nr_hardirq_chains++;
1733         else {
1734                 if (current->softirq_context)
1735                         nr_softirq_chains++;
1736                 else
1737                         nr_process_chains++;
1738         }
1739 }
1740
1741 #else
1742
1743 static inline int
1744 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1745                 struct held_lock *next)
1746 {
1747         return 1;
1748 }
1749
1750 static inline void inc_chains(void)
1751 {
1752         nr_process_chains++;
1753 }
1754
1755 #endif
1756
1757 static void
1758 print_deadlock_scenario(struct held_lock *nxt,
1759                              struct held_lock *prv)
1760 {
1761         struct lock_class *next = hlock_class(nxt);
1762         struct lock_class *prev = hlock_class(prv);
1763
1764         printk(" Possible unsafe locking scenario:\n\n");
1765         printk("       CPU0\n");
1766         printk("       ----\n");
1767         printk("  lock(");
1768         __print_lock_name(prev);
1769         printk(KERN_CONT ");\n");
1770         printk("  lock(");
1771         __print_lock_name(next);
1772         printk(KERN_CONT ");\n");
1773         printk("\n *** DEADLOCK ***\n\n");
1774         printk(" May be due to missing lock nesting notation\n\n");
1775 }
1776
1777 static int
1778 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1779                    struct held_lock *next)
1780 {
1781         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1782                 return 0;
1783
1784         pr_warn("\n");
1785         pr_warn("============================================\n");
1786         pr_warn("WARNING: possible recursive locking detected\n");
1787         print_kernel_ident();
1788         pr_warn("--------------------------------------------\n");
1789         pr_warn("%s/%d is trying to acquire lock:\n",
1790                 curr->comm, task_pid_nr(curr));
1791         print_lock(next);
1792         pr_warn("\nbut task is already holding lock:\n");
1793         print_lock(prev);
1794
1795         pr_warn("\nother info that might help us debug this:\n");
1796         print_deadlock_scenario(next, prev);
1797         lockdep_print_held_locks(curr);
1798
1799         pr_warn("\nstack backtrace:\n");
1800         dump_stack();
1801
1802         return 0;
1803 }
1804
1805 /*
1806  * Check whether we are holding such a class already.
1807  *
1808  * (Note that this has to be done separately, because the graph cannot
1809  * detect such classes of deadlocks.)
1810  *
1811  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1812  */
1813 static int
1814 check_deadlock(struct task_struct *curr, struct held_lock *next,
1815                struct lockdep_map *next_instance, int read)
1816 {
1817         struct held_lock *prev;
1818         struct held_lock *nest = NULL;
1819         int i;
1820
1821         for (i = 0; i < curr->lockdep_depth; i++) {
1822                 prev = curr->held_locks + i;
1823
1824                 if (prev->instance == next->nest_lock)
1825                         nest = prev;
1826
1827                 if (hlock_class(prev) != hlock_class(next))
1828                         continue;
1829
1830                 /*
1831                  * Allow read-after-read recursion of the same
1832                  * lock class (i.e. read_lock(lock)+read_lock(lock)):
1833                  */
1834                 if ((read == 2) && prev->read)
1835                         return 2;
1836
1837                 /*
1838                  * We're holding the nest_lock, which serializes this lock's
1839                  * nesting behaviour.
1840                  */
1841                 if (nest)
1842                         return 2;
1843
1844                 if (cross_lock(prev->instance))
1845                         continue;
1846
1847                 return print_deadlock_bug(curr, prev, next);
1848         }
1849         return 1;
1850 }
1851
1852 /*
1853  * There was a chain-cache miss, and we are about to add a new dependency
1854  * to a previous lock. We recursively validate the following rules:
1855  *
1856  *  - would the adding of the <prev> -> <next> dependency create a
1857  *    circular dependency in the graph? [== circular deadlock]
1858  *
1859  *  - does the new prev->next dependency connect any hardirq-safe lock
1860  *    (in the full backwards-subgraph starting at <prev>) with any
1861  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1862  *    <next>)? [== illegal lock inversion with hardirq contexts]
1863  *
1864  *  - does the new prev->next dependency connect any softirq-safe lock
1865  *    (in the full backwards-subgraph starting at <prev>) with any
1866  *    softirq-unsafe lock (in the full forwards-subgraph starting at
1867  *    <next>)? [== illegal lock inversion with softirq contexts]
1868  *
1869  * any of these scenarios could lead to a deadlock.
1870  *
1871  * Then if all the validations pass, we add the forwards and backwards
1872  * dependency.
1873  */
1874 static int
1875 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1876                struct held_lock *next, int distance, struct stack_trace *trace,
1877                int (*save)(struct stack_trace *trace))
1878 {
1879         struct lock_list *uninitialized_var(target_entry);
1880         struct lock_list *entry;
1881         struct lock_list this;
1882         int ret;
1883
1884         /*
1885          * Prove that the new <prev> -> <next> dependency would not
1886          * create a circular dependency in the graph. (We do this by
1887          * forward-recursing into the graph starting at <next>, and
1888          * checking whether we can reach <prev>.)
1889          *
1890          * We are using global variables to control the recursion, to
1891          * keep the stackframe size of the recursive functions low:
1892          */
1893         this.class = hlock_class(next);
1894         this.parent = NULL;
1895         ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1896         if (unlikely(!ret)) {
1897                 if (!trace->entries) {
1898                         /*
1899                          * If @save fails here, the printing might trigger
1900                          * a WARN but because of the !nr_entries it should
1901                          * not do bad things.
1902                          */
1903                         save(trace);
1904                 }
1905                 return print_circular_bug(&this, target_entry, next, prev, trace);
1906         }
1907         else if (unlikely(ret < 0))
1908                 return print_bfs_bug(ret);
1909
1910         if (!check_prev_add_irq(curr, prev, next))
1911                 return 0;
1912
1913         /*
1914          * For recursive read-locks we do all the dependency checks,
1915          * but we dont store read-triggered dependencies (only
1916          * write-triggered dependencies). This ensures that only the
1917          * write-side dependencies matter, and that if for example a
1918          * write-lock never takes any other locks, then the reads are
1919          * equivalent to a NOP.
1920          */
1921         if (next->read == 2 || prev->read == 2)
1922                 return 1;
1923         /*
1924          * Is the <prev> -> <next> dependency already present?
1925          *
1926          * (this may occur even though this is a new chain: consider
1927          *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1928          *  chains - the second one will be new, but L1 already has
1929          *  L2 added to its dependency list, due to the first chain.)
1930          */
1931         list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1932                 if (entry->class == hlock_class(next)) {
1933                         if (distance == 1)
1934                                 entry->distance = 1;
1935                         return 1;
1936                 }
1937         }
1938
1939         /*
1940          * Is the <prev> -> <next> link redundant?
1941          */
1942         this.class = hlock_class(prev);
1943         this.parent = NULL;
1944         ret = check_redundant(&this, hlock_class(next), &target_entry);
1945         if (!ret) {
1946                 debug_atomic_inc(nr_redundant);
1947                 return 2;
1948         }
1949         if (ret < 0)
1950                 return print_bfs_bug(ret);
1951
1952
1953         if (!trace->entries && !save(trace))
1954                 return 0;
1955
1956         /*
1957          * Ok, all validations passed, add the new lock
1958          * to the previous lock's dependency list:
1959          */
1960         ret = add_lock_to_list(hlock_class(next),
1961                                &hlock_class(prev)->locks_after,
1962                                next->acquire_ip, distance, trace);
1963
1964         if (!ret)
1965                 return 0;
1966
1967         ret = add_lock_to_list(hlock_class(prev),
1968                                &hlock_class(next)->locks_before,
1969                                next->acquire_ip, distance, trace);
1970         if (!ret)
1971                 return 0;
1972
1973         return 2;
1974 }
1975
1976 /*
1977  * Add the dependency to all directly-previous locks that are 'relevant'.
1978  * The ones that are relevant are (in increasing distance from curr):
1979  * all consecutive trylock entries and the final non-trylock entry - or
1980  * the end of this context's lock-chain - whichever comes first.
1981  */
1982 static int
1983 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1984 {
1985         int depth = curr->lockdep_depth;
1986         struct held_lock *hlock;
1987         struct stack_trace trace = {
1988                 .nr_entries = 0,
1989                 .max_entries = 0,
1990                 .entries = NULL,
1991                 .skip = 0,
1992         };
1993
1994         /*
1995          * Debugging checks.
1996          *
1997          * Depth must not be zero for a non-head lock:
1998          */
1999         if (!depth)
2000                 goto out_bug;
2001         /*
2002          * At least two relevant locks must exist for this
2003          * to be a head:
2004          */
2005         if (curr->held_locks[depth].irq_context !=
2006                         curr->held_locks[depth-1].irq_context)
2007                 goto out_bug;
2008
2009         for (;;) {
2010                 int distance = curr->lockdep_depth - depth + 1;
2011                 hlock = curr->held_locks + depth - 1;
2012                 /*
2013                  * Only non-crosslock entries get new dependencies added.
2014                  * Crosslock entries will be added by commit later:
2015                  */
2016                 if (!cross_lock(hlock->instance)) {
2017                         /*
2018                          * Only non-recursive-read entries get new dependencies
2019                          * added:
2020                          */
2021                         if (hlock->read != 2 && hlock->check) {
2022                                 int ret = check_prev_add(curr, hlock, next,
2023                                                          distance, &trace, save_trace);
2024                                 if (!ret)
2025                                         return 0;
2026
2027                                 /*
2028                                  * Stop after the first non-trylock entry,
2029                                  * as non-trylock entries have added their
2030                                  * own direct dependencies already, so this
2031                                  * lock is connected to them indirectly:
2032                                  */
2033                                 if (!hlock->trylock)
2034                                         break;
2035                         }
2036                 }
2037                 depth--;
2038                 /*
2039                  * End of lock-stack?
2040                  */
2041                 if (!depth)
2042                         break;
2043                 /*
2044                  * Stop the search if we cross into another context:
2045                  */
2046                 if (curr->held_locks[depth].irq_context !=
2047                                 curr->held_locks[depth-1].irq_context)
2048                         break;
2049         }
2050         return 1;
2051 out_bug:
2052         if (!debug_locks_off_graph_unlock())
2053                 return 0;
2054
2055         /*
2056          * Clearly we all shouldn't be here, but since we made it we
2057          * can reliable say we messed up our state. See the above two
2058          * gotos for reasons why we could possibly end up here.
2059          */
2060         WARN_ON(1);
2061
2062         return 0;
2063 }
2064
2065 unsigned long nr_lock_chains;
2066 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2067 int nr_chain_hlocks;
2068 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
2069
2070 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
2071 {
2072         return lock_classes + chain_hlocks[chain->base + i];
2073 }
2074
2075 /*
2076  * Returns the index of the first held_lock of the current chain
2077  */
2078 static inline int get_first_held_lock(struct task_struct *curr,
2079                                         struct held_lock *hlock)
2080 {
2081         int i;
2082         struct held_lock *hlock_curr;
2083
2084         for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2085                 hlock_curr = curr->held_locks + i;
2086                 if (hlock_curr->irq_context != hlock->irq_context)
2087                         break;
2088
2089         }
2090
2091         return ++i;
2092 }
2093
2094 #ifdef CONFIG_DEBUG_LOCKDEP
2095 /*
2096  * Returns the next chain_key iteration
2097  */
2098 static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2099 {
2100         u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2101
2102         printk(" class_idx:%d -> chain_key:%016Lx",
2103                 class_idx,
2104                 (unsigned long long)new_chain_key);
2105         return new_chain_key;
2106 }
2107
2108 static void
2109 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2110 {
2111         struct held_lock *hlock;
2112         u64 chain_key = 0;
2113         int depth = curr->lockdep_depth;
2114         int i;
2115
2116         printk("depth: %u\n", depth + 1);
2117         for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2118                 hlock = curr->held_locks + i;
2119                 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2120
2121                 print_lock(hlock);
2122         }
2123
2124         print_chain_key_iteration(hlock_next->class_idx, chain_key);
2125         print_lock(hlock_next);
2126 }
2127
2128 static void print_chain_keys_chain(struct lock_chain *chain)
2129 {
2130         int i;
2131         u64 chain_key = 0;
2132         int class_id;
2133
2134         printk("depth: %u\n", chain->depth);
2135         for (i = 0; i < chain->depth; i++) {
2136                 class_id = chain_hlocks[chain->base + i];
2137                 chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2138
2139                 print_lock_name(lock_classes + class_id);
2140                 printk("\n");
2141         }
2142 }
2143
2144 static void print_collision(struct task_struct *curr,
2145                         struct held_lock *hlock_next,
2146                         struct lock_chain *chain)
2147 {
2148         pr_warn("\n");
2149         pr_warn("============================\n");
2150         pr_warn("WARNING: chain_key collision\n");
2151         print_kernel_ident();
2152         pr_warn("----------------------------\n");
2153         pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2154         pr_warn("Hash chain already cached but the contents don't match!\n");
2155
2156         pr_warn("Held locks:");
2157         print_chain_keys_held_locks(curr, hlock_next);
2158
2159         pr_warn("Locks in cached chain:");
2160         print_chain_keys_chain(chain);
2161
2162         pr_warn("\nstack backtrace:\n");
2163         dump_stack();
2164 }
2165 #endif
2166
2167 /*
2168  * Checks whether the chain and the current held locks are consistent
2169  * in depth and also in content. If they are not it most likely means
2170  * that there was a collision during the calculation of the chain_key.
2171  * Returns: 0 not passed, 1 passed
2172  */
2173 static int check_no_collision(struct task_struct *curr,
2174                         struct held_lock *hlock,
2175                         struct lock_chain *chain)
2176 {
2177 #ifdef CONFIG_DEBUG_LOCKDEP
2178         int i, j, id;
2179
2180         i = get_first_held_lock(curr, hlock);
2181
2182         if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2183                 print_collision(curr, hlock, chain);
2184                 return 0;
2185         }
2186
2187         for (j = 0; j < chain->depth - 1; j++, i++) {
2188                 id = curr->held_locks[i].class_idx - 1;
2189
2190                 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2191                         print_collision(curr, hlock, chain);
2192                         return 0;
2193                 }
2194         }
2195 #endif
2196         return 1;
2197 }
2198
2199 /*
2200  * This is for building a chain between just two different classes,
2201  * instead of adding a new hlock upon current, which is done by
2202  * add_chain_cache().
2203  *
2204  * This can be called in any context with two classes, while
2205  * add_chain_cache() must be done within the lock owener's context
2206  * since it uses hlock which might be racy in another context.
2207  */
2208 static inline int add_chain_cache_classes(unsigned int prev,
2209                                           unsigned int next,
2210                                           unsigned int irq_context,
2211                                           u64 chain_key)
2212 {
2213         struct hlist_head *hash_head = chainhashentry(chain_key);
2214         struct lock_chain *chain;
2215
2216         /*
2217          * Allocate a new chain entry from the static array, and add
2218          * it to the hash:
2219          */
2220
2221         /*
2222          * We might need to take the graph lock, ensure we've got IRQs
2223          * disabled to make this an IRQ-safe lock.. for recursion reasons
2224          * lockdep won't complain about its own locking errors.
2225          */
2226         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2227                 return 0;
2228
2229         if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2230                 if (!debug_locks_off_graph_unlock())
2231                         return 0;
2232
2233                 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2234                 dump_stack();
2235                 return 0;
2236         }
2237
2238         chain = lock_chains + nr_lock_chains++;
2239         chain->chain_key = chain_key;
2240         chain->irq_context = irq_context;
2241         chain->depth = 2;
2242         if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2243                 chain->base = nr_chain_hlocks;
2244                 nr_chain_hlocks += chain->depth;
2245                 chain_hlocks[chain->base] = prev - 1;
2246                 chain_hlocks[chain->base + 1] = next -1;
2247         }
2248 #ifdef CONFIG_DEBUG_LOCKDEP
2249         /*
2250          * Important for check_no_collision().
2251          */
2252         else {
2253                 if (!debug_locks_off_graph_unlock())
2254                         return 0;
2255
2256                 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2257                 dump_stack();
2258                 return 0;
2259         }
2260 #endif
2261
2262         hlist_add_head_rcu(&chain->entry, hash_head);
2263         debug_atomic_inc(chain_lookup_misses);
2264         inc_chains();
2265
2266         return 1;
2267 }
2268
2269 /*
2270  * Adds a dependency chain into chain hashtable. And must be called with
2271  * graph_lock held.
2272  *
2273  * Return 0 if fail, and graph_lock is released.
2274  * Return 1 if succeed, with graph_lock held.
2275  */
2276 static inline int add_chain_cache(struct task_struct *curr,
2277                                   struct held_lock *hlock,
2278                                   u64 chain_key)
2279 {
2280         struct lock_class *class = hlock_class(hlock);
2281         struct hlist_head *hash_head = chainhashentry(chain_key);
2282         struct lock_chain *chain;
2283         int i, j;
2284
2285         /*
2286          * Allocate a new chain entry from the static array, and add
2287          * it to the hash:
2288          */
2289
2290         /*
2291          * We might need to take the graph lock, ensure we've got IRQs
2292          * disabled to make this an IRQ-safe lock.. for recursion reasons
2293          * lockdep won't complain about its own locking errors.
2294          */
2295         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2296                 return 0;
2297
2298         if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2299                 if (!debug_locks_off_graph_unlock())
2300                         return 0;
2301
2302                 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2303                 dump_stack();
2304                 return 0;
2305         }
2306         chain = lock_chains + nr_lock_chains++;
2307         chain->chain_key = chain_key;
2308         chain->irq_context = hlock->irq_context;
2309         i = get_first_held_lock(curr, hlock);
2310         chain->depth = curr->lockdep_depth + 1 - i;
2311
2312         BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2313         BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
2314         BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2315
2316         if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2317                 chain->base = nr_chain_hlocks;
2318                 for (j = 0; j < chain->depth - 1; j++, i++) {
2319                         int lock_id = curr->held_locks[i].class_idx - 1;
2320                         chain_hlocks[chain->base + j] = lock_id;
2321                 }
2322                 chain_hlocks[chain->base + j] = class - lock_classes;
2323         }
2324
2325         if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
2326                 nr_chain_hlocks += chain->depth;
2327
2328 #ifdef CONFIG_DEBUG_LOCKDEP
2329         /*
2330          * Important for check_no_collision().
2331          */
2332         if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
2333                 if (!debug_locks_off_graph_unlock())
2334                         return 0;
2335
2336                 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2337                 dump_stack();
2338                 return 0;
2339         }
2340 #endif
2341
2342         hlist_add_head_rcu(&chain->entry, hash_head);
2343         debug_atomic_inc(chain_lookup_misses);
2344         inc_chains();
2345
2346         return 1;
2347 }
2348
2349 /*
2350  * Look up a dependency chain.
2351  */
2352 static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
2353 {
2354         struct hlist_head *hash_head = chainhashentry(chain_key);
2355         struct lock_chain *chain;
2356
2357         /*
2358          * We can walk it lock-free, because entries only get added
2359          * to the hash:
2360          */
2361         hlist_for_each_entry_rcu(chain, hash_head, entry) {
2362                 if (chain->chain_key == chain_key) {
2363                         debug_atomic_inc(chain_lookup_hits);
2364                         return chain;
2365                 }
2366         }
2367         return NULL;
2368 }
2369
2370 /*
2371  * If the key is not present yet in dependency chain cache then
2372  * add it and return 1 - in this case the new dependency chain is
2373  * validated. If the key is already hashed, return 0.
2374  * (On return with 1 graph_lock is held.)
2375  */
2376 static inline int lookup_chain_cache_add(struct task_struct *curr,
2377                                          struct held_lock *hlock,
2378                                          u64 chain_key)
2379 {
2380         struct lock_class *class = hlock_class(hlock);
2381         struct lock_chain *chain = lookup_chain_cache(chain_key);
2382
2383         if (chain) {
2384 cache_hit:
2385                 if (!check_no_collision(curr, hlock, chain))
2386                         return 0;
2387
2388                 if (very_verbose(class)) {
2389                         printk("\nhash chain already cached, key: "
2390                                         "%016Lx tail class: [%p] %s\n",
2391                                         (unsigned long long)chain_key,
2392                                         class->key, class->name);
2393                 }
2394
2395                 return 0;
2396         }
2397
2398         if (very_verbose(class)) {
2399                 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
2400                         (unsigned long long)chain_key, class->key, class->name);
2401         }
2402
2403         if (!graph_lock())
2404                 return 0;
2405
2406         /*
2407          * We have to walk the chain again locked - to avoid duplicates:
2408          */
2409         chain = lookup_chain_cache(chain_key);
2410         if (chain) {
2411                 graph_unlock();
2412                 goto cache_hit;
2413         }
2414
2415         if (!add_chain_cache(curr, hlock, chain_key))
2416                 return 0;
2417
2418         return 1;
2419 }
2420
2421 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2422                 struct held_lock *hlock, int chain_head, u64 chain_key)
2423 {
2424         /*
2425          * Trylock needs to maintain the stack of held locks, but it
2426          * does not add new dependencies, because trylock can be done
2427          * in any order.
2428          *
2429          * We look up the chain_key and do the O(N^2) check and update of
2430          * the dependencies only if this is a new dependency chain.
2431          * (If lookup_chain_cache_add() return with 1 it acquires
2432          * graph_lock for us)
2433          */
2434         if (!hlock->trylock && hlock->check &&
2435             lookup_chain_cache_add(curr, hlock, chain_key)) {
2436                 /*
2437                  * Check whether last held lock:
2438                  *
2439                  * - is irq-safe, if this lock is irq-unsafe
2440                  * - is softirq-safe, if this lock is hardirq-unsafe
2441                  *
2442                  * And check whether the new lock's dependency graph
2443                  * could lead back to the previous lock.
2444                  *
2445                  * any of these scenarios could lead to a deadlock. If
2446                  * All validations
2447                  */
2448                 int ret = check_deadlock(curr, hlock, lock, hlock->read);
2449
2450                 if (!ret)
2451                         return 0;
2452                 /*
2453                  * Mark recursive read, as we jump over it when
2454                  * building dependencies (just like we jump over
2455                  * trylock entries):
2456                  */
2457                 if (ret == 2)
2458                         hlock->read = 2;
2459                 /*
2460                  * Add dependency only if this lock is not the head
2461                  * of the chain, and if it's not a secondary read-lock:
2462                  */
2463                 if (!chain_head && ret != 2) {
2464                         if (!check_prevs_add(curr, hlock))
2465                                 return 0;
2466                 }
2467
2468                 graph_unlock();
2469         } else {
2470                 /* after lookup_chain_cache_add(): */
2471                 if (unlikely(!debug_locks))
2472                         return 0;
2473         }
2474
2475         return 1;
2476 }
2477 #else
2478 static inline int validate_chain(struct task_struct *curr,
2479                 struct lockdep_map *lock, struct held_lock *hlock,
2480                 int chain_head, u64 chain_key)
2481 {
2482         return 1;
2483 }
2484 #endif
2485
2486 /*
2487  * We are building curr_chain_key incrementally, so double-check
2488  * it from scratch, to make sure that it's done correctly:
2489  */
2490 static void check_chain_key(struct task_struct *curr)
2491 {
2492 #ifdef CONFIG_DEBUG_LOCKDEP
2493         struct held_lock *hlock, *prev_hlock = NULL;
2494         unsigned int i;
2495         u64 chain_key = 0;
2496
2497         for (i = 0; i < curr->lockdep_depth; i++) {
2498                 hlock = curr->held_locks + i;
2499                 if (chain_key != hlock->prev_chain_key) {
2500                         debug_locks_off();
2501                         /*
2502                          * We got mighty confused, our chain keys don't match
2503                          * with what we expect, someone trample on our task state?
2504                          */
2505                         WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2506                                 curr->lockdep_depth, i,
2507                                 (unsigned long long)chain_key,
2508                                 (unsigned long long)hlock->prev_chain_key);
2509                         return;
2510                 }
2511                 /*
2512                  * Whoops ran out of static storage again?
2513                  */
2514                 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2515                         return;
2516
2517                 if (prev_hlock && (prev_hlock->irq_context !=
2518                                                         hlock->irq_context))
2519                         chain_key = 0;
2520                 chain_key = iterate_chain_key(chain_key, hlock->class_idx);
2521                 prev_hlock = hlock;
2522         }
2523         if (chain_key != curr->curr_chain_key) {
2524                 debug_locks_off();
2525                 /*
2526                  * More smoking hash instead of calculating it, damn see these
2527                  * numbers float.. I bet that a pink elephant stepped on my memory.
2528                  */
2529                 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2530                         curr->lockdep_depth, i,
2531                         (unsigned long long)chain_key,
2532                         (unsigned long long)curr->curr_chain_key);
2533         }
2534 #endif
2535 }
2536
2537 static void
2538 print_usage_bug_scenario(struct held_lock *lock)
2539 {
2540         struct lock_class *class = hlock_class(lock);
2541
2542         printk(" Possible unsafe locking scenario:\n\n");
2543         printk("       CPU0\n");
2544         printk("       ----\n");
2545         printk("  lock(");
2546         __print_lock_name(class);
2547         printk(KERN_CONT ");\n");
2548         printk("  <Interrupt>\n");
2549         printk("    lock(");
2550         __print_lock_name(class);
2551         printk(KERN_CONT ");\n");
2552         printk("\n *** DEADLOCK ***\n\n");
2553 }
2554
2555 static int
2556 print_usage_bug(struct task_struct *curr, struct held_lock *this,
2557                 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2558 {
2559         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2560                 return 0;
2561
2562         pr_warn("\n");
2563         pr_warn("================================\n");
2564         pr_warn("WARNING: inconsistent lock state\n");
2565         print_kernel_ident();
2566         pr_warn("--------------------------------\n");
2567
2568         pr_warn("inconsistent {%s} -> {%s} usage.\n",
2569                 usage_str[prev_bit], usage_str[new_bit]);
2570
2571         pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2572                 curr->comm, task_pid_nr(curr),
2573                 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2574                 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2575                 trace_hardirqs_enabled(curr),
2576                 trace_softirqs_enabled(curr));
2577         print_lock(this);
2578
2579         pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2580         print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2581
2582         print_irqtrace_events(curr);
2583         pr_warn("\nother info that might help us debug this:\n");
2584         print_usage_bug_scenario(this);
2585
2586         lockdep_print_held_locks(curr);
2587
2588         pr_warn("\nstack backtrace:\n");
2589         dump_stack();
2590
2591         return 0;
2592 }
2593
2594 /*
2595  * Print out an error if an invalid bit is set:
2596  */
2597 static inline int
2598 valid_state(struct task_struct *curr, struct held_lock *this,
2599             enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2600 {
2601         if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2602                 return print_usage_bug(curr, this, bad_bit, new_bit);
2603         return 1;
2604 }
2605
2606 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2607                      enum lock_usage_bit new_bit);
2608
2609 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2610
2611 /*
2612  * print irq inversion bug:
2613  */
2614 static int
2615 print_irq_inversion_bug(struct task_struct *curr,
2616                         struct lock_list *root, struct lock_list *other,
2617                         struct held_lock *this, int forwards,
2618                         const char *irqclass)
2619 {
2620         struct lock_list *entry = other;
2621         struct lock_list *middle = NULL;
2622         int depth;
2623
2624         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2625                 return 0;
2626
2627         pr_warn("\n");
2628         pr_warn("========================================================\n");
2629         pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2630         print_kernel_ident();
2631         pr_warn("--------------------------------------------------------\n");
2632         pr_warn("%s/%d just changed the state of lock:\n",
2633                 curr->comm, task_pid_nr(curr));
2634         print_lock(this);
2635         if (forwards)
2636                 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2637         else
2638                 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2639         print_lock_name(other->class);
2640         pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2641
2642         pr_warn("\nother info that might help us debug this:\n");
2643
2644         /* Find a middle lock (if one exists) */
2645         depth = get_lock_depth(other);
2646         do {
2647                 if (depth == 0 && (entry != root)) {
2648                         pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
2649                         break;
2650                 }
2651                 middle = entry;
2652                 entry = get_lock_parent(entry);
2653                 depth--;
2654         } while (entry && entry != root && (depth >= 0));
2655         if (forwards)
2656                 print_irq_lock_scenario(root, other,
2657                         middle ? middle->class : root->class, other->class);
2658         else
2659                 print_irq_lock_scenario(other, root,
2660                         middle ? middle->class : other->class, root->class);
2661
2662         lockdep_print_held_locks(curr);
2663
2664         pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2665         if (!save_trace(&root->trace))
2666                 return 0;
2667         print_shortest_lock_dependencies(other, root);
2668
2669         pr_warn("\nstack backtrace:\n");
2670         dump_stack();
2671
2672         return 0;
2673 }
2674
2675 /*
2676  * Prove that in the forwards-direction subgraph starting at <this>
2677  * there is no lock matching <mask>:
2678  */
2679 static int
2680 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2681                      enum lock_usage_bit bit, const char *irqclass)
2682 {
2683         int ret;
2684         struct lock_list root;
2685         struct lock_list *uninitialized_var(target_entry);
2686
2687         root.parent = NULL;
2688         root.class = hlock_class(this);
2689         ret = find_usage_forwards(&root, bit, &target_entry);
2690         if (ret < 0)
2691                 return print_bfs_bug(ret);
2692         if (ret == 1)
2693                 return ret;
2694
2695         return print_irq_inversion_bug(curr, &root, target_entry,
2696                                         this, 1, irqclass);
2697 }
2698
2699 /*
2700  * Prove that in the backwards-direction subgraph starting at <this>
2701  * there is no lock matching <mask>:
2702  */
2703 static int
2704 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2705                       enum lock_usage_bit bit, const char *irqclass)
2706 {
2707         int ret;
2708         struct lock_list root;
2709         struct lock_list *uninitialized_var(target_entry);
2710
2711         root.parent = NULL;
2712         root.class = hlock_class(this);
2713         ret = find_usage_backwards(&root, bit, &target_entry);
2714         if (ret < 0)
2715                 return print_bfs_bug(ret);
2716         if (ret == 1)
2717                 return ret;
2718
2719         return print_irq_inversion_bug(curr, &root, target_entry,
2720                                         this, 0, irqclass);
2721 }
2722
2723 void print_irqtrace_events(struct task_struct *curr)
2724 {
2725         printk("irq event stamp: %u\n", curr->irq_events);
2726         printk("hardirqs last  enabled at (%u): [<%p>] %pS\n",
2727                 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
2728                 (void *)curr->hardirq_enable_ip);
2729         printk("hardirqs last disabled at (%u): [<%p>] %pS\n",
2730                 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
2731                 (void *)curr->hardirq_disable_ip);
2732         printk("softirqs last  enabled at (%u): [<%p>] %pS\n",
2733                 curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
2734                 (void *)curr->softirq_enable_ip);
2735         printk("softirqs last disabled at (%u): [<%p>] %pS\n",
2736                 curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
2737                 (void *)curr->softirq_disable_ip);
2738 }
2739
2740 static int HARDIRQ_verbose(struct lock_class *class)
2741 {
2742 #if HARDIRQ_VERBOSE
2743         return class_filter(class);
2744 #endif
2745         return 0;
2746 }
2747
2748 static int SOFTIRQ_verbose(struct lock_class *class)
2749 {
2750 #if SOFTIRQ_VERBOSE
2751         return class_filter(class);
2752 #endif
2753         return 0;
2754 }
2755
2756 #define STRICT_READ_CHECKS      1
2757
2758 static int (*state_verbose_f[])(struct lock_class *class) = {
2759 #define LOCKDEP_STATE(__STATE) \
2760         __STATE##_verbose,
2761 #include "lockdep_states.h"
2762 #undef LOCKDEP_STATE
2763 };
2764
2765 static inline int state_verbose(enum lock_usage_bit bit,
2766                                 struct lock_class *class)
2767 {
2768         return state_verbose_f[bit >> 2](class);
2769 }
2770
2771 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2772                              enum lock_usage_bit bit, const char *name);
2773
2774 static int
2775 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2776                 enum lock_usage_bit new_bit)
2777 {
2778         int excl_bit = exclusive_bit(new_bit);
2779         int read = new_bit & 1;
2780         int dir = new_bit & 2;
2781
2782         /*
2783          * mark USED_IN has to look forwards -- to ensure no dependency
2784          * has ENABLED state, which would allow recursion deadlocks.
2785          *
2786          * mark ENABLED has to look backwards -- to ensure no dependee
2787          * has USED_IN state, which, again, would allow  recursion deadlocks.
2788          */
2789         check_usage_f usage = dir ?
2790                 check_usage_backwards : check_usage_forwards;
2791
2792         /*
2793          * Validate that this particular lock does not have conflicting
2794          * usage states.
2795          */
2796         if (!valid_state(curr, this, new_bit, excl_bit))
2797                 return 0;
2798
2799         /*
2800          * Validate that the lock dependencies don't have conflicting usage
2801          * states.
2802          */
2803         if ((!read || !dir || STRICT_READ_CHECKS) &&
2804                         !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2805                 return 0;
2806
2807         /*
2808          * Check for read in write conflicts
2809          */
2810         if (!read) {
2811                 if (!valid_state(curr, this, new_bit, excl_bit + 1))
2812                         return 0;
2813
2814                 if (STRICT_READ_CHECKS &&
2815                         !usage(curr, this, excl_bit + 1,
2816                                 state_name(new_bit + 1)))
2817                         return 0;
2818         }
2819
2820         if (state_verbose(new_bit, hlock_class(this)))
2821                 return 2;
2822
2823         return 1;
2824 }
2825
2826 enum mark_type {
2827 #define LOCKDEP_STATE(__STATE)  __STATE,
2828 #include "lockdep_states.h"
2829 #undef LOCKDEP_STATE
2830 };
2831
2832 /*
2833  * Mark all held locks with a usage bit:
2834  */
2835 static int
2836 mark_held_locks(struct task_struct *curr, enum mark_type mark)
2837 {
2838         enum lock_usage_bit usage_bit;
2839         struct held_lock *hlock;
2840         int i;
2841
2842         for (i = 0; i < curr->lockdep_depth; i++) {
2843                 hlock = curr->held_locks + i;
2844
2845                 usage_bit = 2 + (mark << 2); /* ENABLED */
2846                 if (hlock->read)
2847                         usage_bit += 1; /* READ */
2848
2849                 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2850
2851                 if (!hlock->check)
2852                         continue;
2853
2854                 if (!mark_lock(curr, hlock, usage_bit))
2855                         return 0;
2856         }
2857
2858         return 1;
2859 }
2860
2861 /*
2862  * Hardirqs will be enabled:
2863  */
2864 static void __trace_hardirqs_on_caller(unsigned long ip)
2865 {
2866         struct task_struct *curr = current;
2867
2868         /* we'll do an OFF -> ON transition: */
2869         curr->hardirqs_enabled = 1;
2870
2871         /*
2872          * We are going to turn hardirqs on, so set the
2873          * usage bit for all held locks:
2874          */
2875         if (!mark_held_locks(curr, HARDIRQ))
2876                 return;
2877         /*
2878          * If we have softirqs enabled, then set the usage
2879          * bit for all held locks. (disabled hardirqs prevented
2880          * this bit from being set before)
2881          */
2882         if (curr->softirqs_enabled)
2883                 if (!mark_held_locks(curr, SOFTIRQ))
2884                         return;
2885
2886         curr->hardirq_enable_ip = ip;
2887         curr->hardirq_enable_event = ++curr->irq_events;
2888         debug_atomic_inc(hardirqs_on_events);
2889 }
2890
2891 __visible void trace_hardirqs_on_caller(unsigned long ip)
2892 {
2893         time_hardirqs_on(CALLER_ADDR0, ip);
2894
2895         if (unlikely(!debug_locks || current->lockdep_recursion))
2896                 return;
2897
2898         if (unlikely(current->hardirqs_enabled)) {
2899                 /*
2900                  * Neither irq nor preemption are disabled here
2901                  * so this is racy by nature but losing one hit
2902                  * in a stat is not a big deal.
2903                  */
2904                 __debug_atomic_inc(redundant_hardirqs_on);
2905                 return;
2906         }
2907
2908         /*
2909          * We're enabling irqs and according to our state above irqs weren't
2910          * already enabled, yet we find the hardware thinks they are in fact
2911          * enabled.. someone messed up their IRQ state tracing.
2912          */
2913         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2914                 return;
2915
2916         /*
2917          * See the fine text that goes along with this variable definition.
2918          */
2919         if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2920                 return;
2921
2922         /*
2923          * Can't allow enabling interrupts while in an interrupt handler,
2924          * that's general bad form and such. Recursion, limited stack etc..
2925          */
2926         if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2927                 return;
2928
2929         current->lockdep_recursion = 1;
2930         __trace_hardirqs_on_caller(ip);
2931         current->lockdep_recursion = 0;
2932 }
2933 EXPORT_SYMBOL(trace_hardirqs_on_caller);
2934
2935 void trace_hardirqs_on(void)
2936 {
2937         trace_hardirqs_on_caller(CALLER_ADDR0);
2938 }
2939 EXPORT_SYMBOL(trace_hardirqs_on);
2940
2941 /*
2942  * Hardirqs were disabled:
2943  */
2944 __visible void trace_hardirqs_off_caller(unsigned long ip)
2945 {
2946         struct task_struct *curr = current;
2947
2948         time_hardirqs_off(CALLER_ADDR0, ip);
2949
2950         if (unlikely(!debug_locks || current->lockdep_recursion))
2951                 return;
2952
2953         /*
2954          * So we're supposed to get called after you mask local IRQs, but for
2955          * some reason the hardware doesn't quite think you did a proper job.
2956          */
2957         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2958                 return;
2959
2960         if (curr->hardirqs_enabled) {
2961                 /*
2962                  * We have done an ON -> OFF transition:
2963                  */
2964                 curr->hardirqs_enabled = 0;
2965                 curr->hardirq_disable_ip = ip;
2966                 curr->hardirq_disable_event = ++curr->irq_events;
2967                 debug_atomic_inc(hardirqs_off_events);
2968         } else
2969                 debug_atomic_inc(redundant_hardirqs_off);
2970 }
2971 EXPORT_SYMBOL(trace_hardirqs_off_caller);
2972
2973 void trace_hardirqs_off(void)
2974 {
2975         trace_hardirqs_off_caller(CALLER_ADDR0);
2976 }
2977 EXPORT_SYMBOL(trace_hardirqs_off);
2978
2979 /*
2980  * Softirqs will be enabled:
2981  */
2982 void trace_softirqs_on(unsigned long ip)
2983 {
2984         struct task_struct *curr = current;
2985
2986         if (unlikely(!debug_locks || current->lockdep_recursion))
2987                 return;
2988
2989         /*
2990          * We fancy IRQs being disabled here, see softirq.c, avoids
2991          * funny state and nesting things.
2992          */
2993         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2994                 return;
2995
2996         if (curr->softirqs_enabled) {
2997                 debug_atomic_inc(redundant_softirqs_on);
2998                 return;
2999         }
3000
3001         current->lockdep_recursion = 1;
3002         /*
3003          * We'll do an OFF -> ON transition:
3004          */
3005         curr->softirqs_enabled = 1;
3006         curr->softirq_enable_ip = ip;
3007         curr->softirq_enable_event = ++curr->irq_events;
3008         debug_atomic_inc(softirqs_on_events);
3009         /*
3010          * We are going to turn softirqs on, so set the
3011          * usage bit for all held locks, if hardirqs are
3012          * enabled too:
3013          */
3014         if (curr->hardirqs_enabled)
3015                 mark_held_locks(curr, SOFTIRQ);
3016         current->lockdep_recursion = 0;
3017 }
3018
3019 /*
3020  * Softirqs were disabled:
3021  */
3022 void trace_softirqs_off(unsigned long ip)
3023 {
3024         struct task_struct *curr = current;
3025
3026         if (unlikely(!debug_locks || current->lockdep_recursion))
3027                 return;
3028
3029         /*
3030          * We fancy IRQs being disabled here, see softirq.c
3031          */
3032         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3033                 return;
3034
3035         if (curr->softirqs_enabled) {
3036                 /*
3037                  * We have done an ON -> OFF transition:
3038                  */
3039                 curr->softirqs_enabled = 0;
3040                 curr->softirq_disable_ip = ip;
3041                 curr->softirq_disable_event = ++curr->irq_events;
3042                 debug_atomic_inc(softirqs_off_events);
3043                 /*
3044                  * Whoops, we wanted softirqs off, so why aren't they?
3045                  */
3046                 DEBUG_LOCKS_WARN_ON(!softirq_count());
3047         } else
3048                 debug_atomic_inc(redundant_softirqs_off);
3049 }
3050
3051 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
3052 {
3053         /*
3054          * If non-trylock use in a hardirq or softirq context, then
3055          * mark the lock as used in these contexts:
3056          */
3057         if (!hlock->trylock) {
3058                 if (hlock->read) {
3059                         if (curr->hardirq_context)
3060                                 if (!mark_lock(curr, hlock,
3061                                                 LOCK_USED_IN_HARDIRQ_READ))
3062                                         return 0;
3063                         if (curr->softirq_context)
3064                                 if (!mark_lock(curr, hlock,
3065                                                 LOCK_USED_IN_SOFTIRQ_READ))
3066                                         return 0;
3067                 } else {
3068                         if (curr->hardirq_context)
3069                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
3070                                         return 0;
3071                         if (curr->softirq_context)
3072                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
3073                                         return 0;
3074                 }
3075         }
3076         if (!hlock->hardirqs_off) {
3077                 if (hlock->read) {
3078                         if (!mark_lock(curr, hlock,
3079                                         LOCK_ENABLED_HARDIRQ_READ))
3080                                 return 0;
3081                         if (curr->softirqs_enabled)
3082                                 if (!mark_lock(curr, hlock,
3083                                                 LOCK_ENABLED_SOFTIRQ_READ))
3084                                         return 0;
3085                 } else {
3086                         if (!mark_lock(curr, hlock,
3087                                         LOCK_ENABLED_HARDIRQ))
3088                                 return 0;
3089                         if (curr->softirqs_enabled)
3090                                 if (!mark_lock(curr, hlock,
3091                                                 LOCK_ENABLED_SOFTIRQ))
3092                                         return 0;
3093                 }
3094         }
3095
3096         return 1;
3097 }
3098
3099 static inline unsigned int task_irq_context(struct task_struct *task)
3100 {
3101         return 2 * !!task->hardirq_context + !!task->softirq_context;
3102 }
3103
3104 static int separate_irq_context(struct task_struct *curr,
3105                 struct held_lock *hlock)
3106 {
3107         unsigned int depth = curr->lockdep_depth;
3108
3109         /*
3110          * Keep track of points where we cross into an interrupt context:
3111          */
3112         if (depth) {
3113                 struct held_lock *prev_hlock;
3114
3115                 prev_hlock = curr->held_locks + depth-1;
3116                 /*
3117                  * If we cross into another context, reset the
3118                  * hash key (this also prevents the checking and the
3119                  * adding of the dependency to 'prev'):
3120                  */
3121                 if (prev_hlock->irq_context != hlock->irq_context)
3122                         return 1;
3123         }
3124         return 0;
3125 }
3126
3127 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3128
3129 static inline
3130 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3131                 enum lock_usage_bit new_bit)
3132 {
3133         WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
3134         return 1;
3135 }
3136
3137 static inline int mark_irqflags(struct task_struct *curr,
3138                 struct held_lock *hlock)
3139 {
3140         return 1;
3141 }
3142
3143 static inline unsigned int task_irq_context(struct task_struct *task)
3144 {
3145         return 0;
3146 }
3147
3148 static inline int separate_irq_context(struct task_struct *curr,
3149                 struct held_lock *hlock)
3150 {
3151         return 0;
3152 }
3153
3154 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3155
3156 /*
3157  * Mark a lock with a usage bit, and validate the state transition:
3158  */
3159 static int mark_lock(struct task_struct *curr, struct held_lock *this,
3160                              enum lock_usage_bit new_bit)
3161 {
3162         unsigned int new_mask = 1 << new_bit, ret = 1;
3163
3164         /*
3165          * If already set then do not dirty the cacheline,
3166          * nor do any checks:
3167          */
3168         if (likely(hlock_class(this)->usage_mask & new_mask))
3169                 return 1;
3170
3171         if (!graph_lock())
3172                 return 0;
3173         /*
3174          * Make sure we didn't race:
3175          */
3176         if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3177                 graph_unlock();
3178                 return 1;
3179         }
3180
3181         hlock_class(this)->usage_mask |= new_mask;
3182
3183         if (!save_trace(hlock_class(this)->usage_traces + new_bit))
3184                 return 0;
3185
3186         switch (new_bit) {
3187 #define LOCKDEP_STATE(__STATE)                  \
3188         case LOCK_USED_IN_##__STATE:            \
3189         case LOCK_USED_IN_##__STATE##_READ:     \
3190         case LOCK_ENABLED_##__STATE:            \
3191         case LOCK_ENABLED_##__STATE##_READ:
3192 #include "lockdep_states.h"
3193 #undef LOCKDEP_STATE
3194                 ret = mark_lock_irq(curr, this, new_bit);
3195                 if (!ret)
3196                         return 0;
3197                 break;
3198         case LOCK_USED:
3199                 debug_atomic_dec(nr_unused_locks);
3200                 break;
3201         default:
3202                 if (!debug_locks_off_graph_unlock())
3203                         return 0;
3204                 WARN_ON(1);
3205                 return 0;
3206         }
3207
3208         graph_unlock();
3209
3210         /*
3211          * We must printk outside of the graph_lock:
3212          */
3213         if (ret == 2) {
3214                 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3215                 print_lock(this);
3216                 print_irqtrace_events(curr);
3217                 dump_stack();
3218         }
3219
3220         return ret;
3221 }
3222
3223 /*
3224  * Initialize a lock instance's lock-class mapping info:
3225  */
3226 static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
3227                       struct lock_class_key *key, int subclass)
3228 {
3229         int i;
3230
3231         for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3232                 lock->class_cache[i] = NULL;
3233
3234 #ifdef CONFIG_LOCK_STAT
3235         lock->cpu = raw_smp_processor_id();
3236 #endif
3237
3238         /*
3239          * Can't be having no nameless bastards around this place!
3240          */
3241         if (DEBUG_LOCKS_WARN_ON(!name)) {
3242                 lock->name = "NULL";
3243                 return;
3244         }
3245
3246         lock->name = name;
3247
3248         /*
3249          * No key, no joy, we need to hash something.
3250          */
3251         if (DEBUG_LOCKS_WARN_ON(!key))
3252                 return;
3253         /*
3254          * Sanity check, the lock-class key must be persistent:
3255          */
3256         if (!static_obj(key)) {
3257                 printk("BUG: key %p not in .data!\n", key);
3258                 /*
3259                  * What it says above ^^^^^, I suggest you read it.
3260                  */
3261                 DEBUG_LOCKS_WARN_ON(1);
3262                 return;
3263         }
3264         lock->key = key;
3265
3266         if (unlikely(!debug_locks))
3267                 return;
3268
3269         if (subclass) {
3270                 unsigned long flags;
3271
3272                 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3273                         return;
3274
3275                 raw_local_irq_save(flags);
3276                 current->lockdep_recursion = 1;
3277                 register_lock_class(lock, subclass, 1);
3278                 current->lockdep_recursion = 0;
3279                 raw_local_irq_restore(flags);
3280         }
3281 }
3282
3283 void lockdep_init_map(struct lockdep_map *lock, const char *name,
3284                       struct lock_class_key *key, int subclass)
3285 {
3286         cross_init(lock, 0);
3287         __lockdep_init_map(lock, name, key, subclass);
3288 }
3289 EXPORT_SYMBOL_GPL(lockdep_init_map);
3290
3291 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
3292 void lockdep_init_map_crosslock(struct lockdep_map *lock, const char *name,
3293                       struct lock_class_key *key, int subclass)
3294 {
3295         cross_init(lock, 1);
3296         __lockdep_init_map(lock, name, key, subclass);
3297 }
3298 EXPORT_SYMBOL_GPL(lockdep_init_map_crosslock);
3299 #endif
3300
3301 struct lock_class_key __lockdep_no_validate__;
3302 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3303
3304 static int
3305 print_lock_nested_lock_not_held(struct task_struct *curr,
3306                                 struct held_lock *hlock,
3307                                 unsigned long ip)
3308 {
3309         if (!debug_locks_off())
3310                 return 0;
3311         if (debug_locks_silent)
3312                 return 0;
3313
3314         pr_warn("\n");
3315         pr_warn("==================================\n");
3316         pr_warn("WARNING: Nested lock was not taken\n");
3317         print_kernel_ident();
3318         pr_warn("----------------------------------\n");
3319
3320         pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3321         print_lock(hlock);
3322
3323         pr_warn("\nbut this task is not holding:\n");
3324         pr_warn("%s\n", hlock->nest_lock->name);
3325
3326         pr_warn("\nstack backtrace:\n");
3327         dump_stack();
3328
3329         pr_warn("\nother info that might help us debug this:\n");
3330         lockdep_print_held_locks(curr);
3331
3332         pr_warn("\nstack backtrace:\n");
3333         dump_stack();
3334
3335         return 0;
3336 }
3337
3338 static int __lock_is_held(struct lockdep_map *lock, int read);
3339
3340 /*
3341  * This gets called for every mutex_lock*()/spin_lock*() operation.
3342  * We maintain the dependency maps and validate the locking attempt:
3343  */
3344 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3345                           int trylock, int read, int check, int hardirqs_off,
3346                           struct lockdep_map *nest_lock, unsigned long ip,
3347                           int references, int pin_count)
3348 {
3349         struct task_struct *curr = current;
3350         struct lock_class *class = NULL;
3351         struct held_lock *hlock;
3352         unsigned int depth;
3353         int chain_head = 0;
3354         int class_idx;
3355         u64 chain_key;
3356         int ret;
3357
3358         if (unlikely(!debug_locks))
3359                 return 0;
3360
3361         /*
3362          * Lockdep should run with IRQs disabled, otherwise we could
3363          * get an interrupt which would want to take locks, which would
3364          * end up in lockdep and have you got a head-ache already?
3365          */
3366         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3367                 return 0;
3368
3369         if (!prove_locking || lock->key == &__lockdep_no_validate__)
3370                 check = 0;
3371
3372         if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3373                 class = lock->class_cache[subclass];
3374         /*
3375          * Not cached?
3376          */
3377         if (unlikely(!class)) {
3378                 class = register_lock_class(lock, subclass, 0);
3379                 if (!class)
3380                         return 0;
3381         }
3382         atomic_inc((atomic_t *)&class->ops);
3383         if (very_verbose(class)) {
3384                 printk("\nacquire class [%p] %s", class->key, class->name);
3385                 if (class->name_version > 1)
3386                         printk(KERN_CONT "#%d", class->name_version);
3387                 printk(KERN_CONT "\n");
3388                 dump_stack();
3389         }
3390
3391         /*
3392          * Add the lock to the list of currently held locks.
3393          * (we dont increase the depth just yet, up until the
3394          * dependency checks are done)
3395          */
3396         depth = curr->lockdep_depth;
3397         /*
3398          * Ran out of static storage for our per-task lock stack again have we?
3399          */
3400         if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3401                 return 0;
3402
3403         class_idx = class - lock_classes + 1;
3404
3405         /* TODO: nest_lock is not implemented for crosslock yet. */
3406         if (depth && !cross_lock(lock)) {
3407                 hlock = curr->held_locks + depth - 1;
3408                 if (hlock->class_idx == class_idx && nest_lock) {
3409                         if (!references)
3410                                 references++;
3411
3412                         if (!hlock->references)
3413                                 hlock->references++;
3414
3415                         hlock->references += references;
3416
3417                         /* Overflow */
3418                         if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
3419                                 return 0;
3420
3421                         return 1;
3422                 }
3423         }
3424
3425         hlock = curr->held_locks + depth;
3426         /*
3427          * Plain impossible, we just registered it and checked it weren't no
3428          * NULL like.. I bet this mushroom I ate was good!
3429          */
3430         if (DEBUG_LOCKS_WARN_ON(!class))
3431                 return 0;
3432         hlock->class_idx = class_idx;
3433         hlock->acquire_ip = ip;
3434         hlock->instance = lock;
3435         hlock->nest_lock = nest_lock;
3436         hlock->irq_context = task_irq_context(curr);
3437         hlock->trylock = trylock;
3438         hlock->read = read;
3439         hlock->check = check;
3440         hlock->hardirqs_off = !!hardirqs_off;
3441         hlock->references = references;
3442 #ifdef CONFIG_LOCK_STAT
3443         hlock->waittime_stamp = 0;
3444         hlock->holdtime_stamp = lockstat_clock();
3445 #endif
3446         hlock->pin_count = pin_count;
3447
3448         if (check && !mark_irqflags(curr, hlock))
3449                 return 0;
3450
3451         /* mark it as used: */
3452         if (!mark_lock(curr, hlock, LOCK_USED))
3453                 return 0;
3454
3455         /*
3456          * Calculate the chain hash: it's the combined hash of all the
3457          * lock keys along the dependency chain. We save the hash value
3458          * at every step so that we can get the current hash easily
3459          * after unlock. The chain hash is then used to cache dependency
3460          * results.
3461          *
3462          * The 'key ID' is what is the most compact key value to drive
3463          * the hash, not class->key.
3464          */
3465         /*
3466          * Whoops, we did it again.. ran straight out of our static allocation.
3467          */
3468         if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
3469                 return 0;
3470
3471         chain_key = curr->curr_chain_key;
3472         if (!depth) {
3473                 /*
3474                  * How can we have a chain hash when we ain't got no keys?!
3475                  */
3476                 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3477                         return 0;
3478                 chain_head = 1;
3479         }
3480
3481         hlock->prev_chain_key = chain_key;
3482         if (separate_irq_context(curr, hlock)) {
3483                 chain_key = 0;
3484                 chain_head = 1;
3485         }
3486         chain_key = iterate_chain_key(chain_key, class_idx);
3487
3488         if (nest_lock && !__lock_is_held(nest_lock, -1))
3489                 return print_lock_nested_lock_not_held(curr, hlock, ip);
3490
3491         if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3492                 return 0;
3493
3494         ret = lock_acquire_crosslock(hlock);
3495         /*
3496          * 2 means normal acquire operations are needed. Otherwise, it's
3497          * ok just to return with '0:fail, 1:success'.
3498          */
3499         if (ret != 2)
3500                 return ret;
3501
3502         curr->curr_chain_key = chain_key;
3503         curr->lockdep_depth++;
3504         check_chain_key(curr);
3505 #ifdef CONFIG_DEBUG_LOCKDEP
3506         if (unlikely(!debug_locks))
3507                 return 0;
3508 #endif
3509         if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3510                 debug_locks_off();
3511                 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3512                 printk(KERN_DEBUG "depth: %i  max: %lu!\n",
3513                        curr->lockdep_depth, MAX_LOCK_DEPTH);
3514
3515                 lockdep_print_held_locks(current);
3516                 debug_show_all_locks();
3517                 dump_stack();
3518
3519                 return 0;
3520         }
3521
3522         if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3523                 max_lockdep_depth = curr->lockdep_depth;
3524
3525         return 1;
3526 }
3527
3528 static int
3529 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3530                            unsigned long ip)
3531 {
3532         if (!debug_locks_off())
3533                 return 0;
3534         if (debug_locks_silent)
3535                 return 0;
3536
3537         pr_warn("\n");
3538         pr_warn("=====================================\n");
3539         pr_warn("WARNING: bad unlock balance detected!\n");
3540         print_kernel_ident();
3541         pr_warn("-------------------------------------\n");
3542         pr_warn("%s/%d is trying to release lock (",
3543                 curr->comm, task_pid_nr(curr));
3544         print_lockdep_cache(lock);
3545         pr_cont(") at:\n");
3546         print_ip_sym(ip);
3547         pr_warn("but there are no more locks to release!\n");
3548         pr_warn("\nother info that might help us debug this:\n");
3549         lockdep_print_held_locks(curr);
3550
3551         pr_warn("\nstack backtrace:\n");
3552         dump_stack();
3553
3554         return 0;
3555 }
3556
3557 static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3558 {
3559         if (hlock->instance == lock)
3560                 return 1;
3561
3562         if (hlock->references) {
3563                 struct lock_class *class = lock->class_cache[0];
3564
3565                 if (!class)
3566                         class = look_up_lock_class(lock, 0);
3567
3568                 /*
3569                  * If look_up_lock_class() failed to find a class, we're trying
3570                  * to test if we hold a lock that has never yet been acquired.
3571                  * Clearly if the lock hasn't been acquired _ever_, we're not
3572                  * holding it either, so report failure.
3573                  */
3574                 if (IS_ERR_OR_NULL(class))
3575                         return 0;
3576
3577                 /*
3578                  * References, but not a lock we're actually ref-counting?
3579                  * State got messed up, follow the sites that change ->references
3580                  * and try to make sense of it.
3581                  */
3582                 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3583                         return 0;
3584
3585                 if (hlock->class_idx == class - lock_classes + 1)
3586                         return 1;
3587         }
3588
3589         return 0;
3590 }
3591
3592 /* @depth must not be zero */
3593 static struct held_lock *find_held_lock(struct task_struct *curr,
3594                                         struct lockdep_map *lock,
3595                                         unsigned int depth, int *idx)
3596 {
3597         struct held_lock *ret, *hlock, *prev_hlock;
3598         int i;
3599
3600         i = depth - 1;
3601         hlock = curr->held_locks + i;
3602         ret = hlock;
3603         if (match_held_lock(hlock, lock))
3604                 goto out;
3605
3606         ret = NULL;
3607         for (i--, prev_hlock = hlock--;
3608              i >= 0;
3609              i--, prev_hlock = hlock--) {
3610                 /*
3611                  * We must not cross into another context:
3612                  */
3613                 if (prev_hlock->irq_context != hlock->irq_context) {
3614                         ret = NULL;
3615                         break;
3616                 }
3617                 if (match_held_lock(hlock, lock)) {
3618                         ret = hlock;
3619                         break;
3620                 }
3621         }
3622
3623 out:
3624         *idx = i;
3625         return ret;
3626 }
3627
3628 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3629                               int idx)
3630 {
3631         struct held_lock *hlock;
3632
3633         for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3634                 if (!__lock_acquire(hlock->instance,
3635                                     hlock_class(hlock)->subclass,
3636                                     hlock->trylock,
3637                                     hlock->read, hlock->check,
3638                                     hlock->hardirqs_off,
3639                                     hlock->nest_lock, hlock->acquire_ip,
3640                                     hlock->references, hlock->pin_count))
3641                         return 1;
3642         }
3643         return 0;
3644 }
3645
3646 static int
3647 __lock_set_class(struct lockdep_map *lock, const char *name,
3648                  struct lock_class_key *key, unsigned int subclass,
3649                  unsigned long ip)
3650 {
3651         struct task_struct *curr = current;
3652         struct held_lock *hlock;
3653         struct lock_class *class;
3654         unsigned int depth;
3655         int i;
3656
3657         depth = curr->lockdep_depth;
3658         /*
3659          * This function is about (re)setting the class of a held lock,
3660          * yet we're not actually holding any locks. Naughty user!
3661          */
3662         if (DEBUG_LOCKS_WARN_ON(!depth))
3663                 return 0;
3664
3665         hlock = find_held_lock(curr, lock, depth, &i);
3666         if (!hlock)
3667                 return print_unlock_imbalance_bug(curr, lock, ip);
3668
3669         lockdep_init_map(lock, name, key, 0);
3670         class = register_lock_class(lock, subclass, 0);
3671         hlock->class_idx = class - lock_classes + 1;
3672
3673         curr->lockdep_depth = i;
3674         curr->curr_chain_key = hlock->prev_chain_key;
3675
3676         if (reacquire_held_locks(curr, depth, i))
3677                 return 0;
3678
3679         /*
3680          * I took it apart and put it back together again, except now I have
3681          * these 'spare' parts.. where shall I put them.
3682          */
3683         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3684                 return 0;
3685         return 1;
3686 }
3687
3688 static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3689 {
3690         struct task_struct *curr = current;
3691         struct held_lock *hlock;
3692         unsigned int depth;
3693         int i;
3694
3695         if (unlikely(!debug_locks))
3696                 return 0;
3697
3698         depth = curr->lockdep_depth;
3699         /*
3700          * This function is about (re)setting the class of a held lock,
3701          * yet we're not actually holding any locks. Naughty user!
3702          */
3703         if (DEBUG_LOCKS_WARN_ON(!depth))
3704                 return 0;
3705
3706         hlock = find_held_lock(curr, lock, depth, &i);
3707         if (!hlock)
3708                 return print_unlock_imbalance_bug(curr, lock, ip);
3709
3710         curr->lockdep_depth = i;
3711         curr->curr_chain_key = hlock->prev_chain_key;
3712
3713         WARN(hlock->read, "downgrading a read lock");
3714         hlock->read = 1;
3715         hlock->acquire_ip = ip;
3716
3717         if (reacquire_held_locks(curr, depth, i))
3718                 return 0;
3719
3720         /*
3721          * I took it apart and put it back together again, except now I have
3722          * these 'spare' parts.. where shall I put them.
3723          */
3724         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3725                 return 0;
3726         return 1;
3727 }
3728
3729 /*
3730  * Remove the lock to the list of currently held locks - this gets
3731  * called on mutex_unlock()/spin_unlock*() (or on a failed
3732  * mutex_lock_interruptible()).
3733  *
3734  * @nested is an hysterical artifact, needs a tree wide cleanup.
3735  */
3736 static int
3737 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3738 {
3739         struct task_struct *curr = current;
3740         struct held_lock *hlock;
3741         unsigned int depth;
3742         int ret, i;
3743
3744         if (unlikely(!debug_locks))
3745                 return 0;
3746
3747         ret = lock_release_crosslock(lock);
3748         /*
3749          * 2 means normal release operations are needed. Otherwise, it's
3750          * ok just to return with '0:fail, 1:success'.
3751          */
3752         if (ret != 2)
3753                 return ret;
3754
3755         depth = curr->lockdep_depth;
3756         /*
3757          * So we're all set to release this lock.. wait what lock? We don't
3758          * own any locks, you've been drinking again?
3759          */
3760         if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3761                  return print_unlock_imbalance_bug(curr, lock, ip);
3762
3763         /*
3764          * Check whether the lock exists in the current stack
3765          * of held locks:
3766          */
3767         hlock = find_held_lock(curr, lock, depth, &i);
3768         if (!hlock)
3769                 return print_unlock_imbalance_bug(curr, lock, ip);
3770
3771         if (hlock->instance == lock)
3772                 lock_release_holdtime(hlock);
3773
3774         WARN(hlock->pin_count, "releasing a pinned lock\n");
3775
3776         if (hlock->references) {
3777                 hlock->references--;
3778                 if (hlock->references) {
3779                         /*
3780                          * We had, and after removing one, still have
3781                          * references, the current lock stack is still
3782                          * valid. We're done!
3783                          */
3784                         return 1;
3785                 }
3786         }
3787
3788         /*
3789          * We have the right lock to unlock, 'hlock' points to it.
3790          * Now we remove it from the stack, and add back the other
3791          * entries (if any), recalculating the hash along the way:
3792          */
3793
3794         curr->lockdep_depth = i;
3795         curr->curr_chain_key = hlock->prev_chain_key;
3796
3797         if (reacquire_held_locks(curr, depth, i + 1))
3798                 return 0;
3799
3800         /*
3801          * We had N bottles of beer on the wall, we drank one, but now
3802          * there's not N-1 bottles of beer left on the wall...
3803          */
3804         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3805                 return 0;
3806
3807         return 1;
3808 }
3809
3810 static int __lock_is_held(struct lockdep_map *lock, int read)
3811 {
3812         struct task_struct *curr = current;
3813         int i;
3814
3815         for (i = 0; i < curr->lockdep_depth; i++) {
3816                 struct held_lock *hlock = curr->held_locks + i;
3817
3818                 if (match_held_lock(hlock, lock)) {
3819                         if (read == -1 || hlock->read == read)
3820                                 return 1;
3821
3822                         return 0;
3823                 }
3824         }
3825
3826         return 0;
3827 }
3828
3829 static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
3830 {
3831         struct pin_cookie cookie = NIL_COOKIE;
3832         struct task_struct *curr = current;
3833         int i;
3834
3835         if (unlikely(!debug_locks))
3836                 return cookie;
3837
3838         for (i = 0; i < curr->lockdep_depth; i++) {
3839                 struct held_lock *hlock = curr->held_locks + i;
3840
3841                 if (match_held_lock(hlock, lock)) {
3842                         /*
3843                          * Grab 16bits of randomness; this is sufficient to not
3844                          * be guessable and still allows some pin nesting in
3845                          * our u32 pin_count.
3846                          */
3847                         cookie.val = 1 + (prandom_u32() >> 16);
3848                         hlock->pin_count += cookie.val;
3849                         return cookie;
3850                 }
3851         }
3852
3853         WARN(1, "pinning an unheld lock\n");
3854         return cookie;
3855 }
3856
3857 static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3858 {
3859         struct task_struct *curr = current;
3860         int i;
3861
3862         if (unlikely(!debug_locks))
3863                 return;
3864
3865         for (i = 0; i < curr->lockdep_depth; i++) {
3866                 struct held_lock *hlock = curr->held_locks + i;
3867
3868                 if (match_held_lock(hlock, lock)) {
3869                         hlock->pin_count += cookie.val;
3870                         return;
3871                 }
3872         }
3873
3874         WARN(1, "pinning an unheld lock\n");
3875 }
3876
3877 static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3878 {
3879         struct task_struct *curr = current;
3880         int i;
3881
3882         if (unlikely(!debug_locks))
3883                 return;
3884
3885         for (i = 0; i < curr->lockdep_depth; i++) {
3886                 struct held_lock *hlock = curr->held_locks + i;
3887
3888                 if (match_held_lock(hlock, lock)) {
3889                         if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3890                                 return;
3891
3892                         hlock->pin_count -= cookie.val;
3893
3894                         if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
3895                                 hlock->pin_count = 0;
3896
3897                         return;
3898                 }
3899         }
3900
3901         WARN(1, "unpinning an unheld lock\n");
3902 }
3903
3904 /*
3905  * Check whether we follow the irq-flags state precisely:
3906  */
3907 static void check_flags(unsigned long flags)
3908 {
3909 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3910     defined(CONFIG_TRACE_IRQFLAGS)
3911         if (!debug_locks)
3912                 return;
3913
3914         if (irqs_disabled_flags(flags)) {
3915                 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3916                         printk("possible reason: unannotated irqs-off.\n");
3917                 }
3918         } else {
3919                 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3920                         printk("possible reason: unannotated irqs-on.\n");
3921                 }
3922         }
3923
3924         /*
3925          * We dont accurately track softirq state in e.g.
3926          * hardirq contexts (such as on 4KSTACKS), so only
3927          * check if not in hardirq contexts:
3928          */
3929         if (!hardirq_count()) {
3930                 if (softirq_count()) {
3931                         /* like the above, but with softirqs */
3932                         DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3933                 } else {
3934                         /* lick the above, does it taste good? */
3935                         DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3936                 }
3937         }
3938
3939         if (!debug_locks)
3940                 print_irqtrace_events(current);
3941 #endif
3942 }
3943
3944 void lock_set_class(struct lockdep_map *lock, const char *name,
3945                     struct lock_class_key *key, unsigned int subclass,
3946                     unsigned long ip)
3947 {
3948         unsigned long flags;
3949
3950         if (unlikely(current->lockdep_recursion))
3951                 return;
3952
3953         raw_local_irq_save(flags);
3954         current->lockdep_recursion = 1;
3955         check_flags(flags);
3956         if (__lock_set_class(lock, name, key, subclass, ip))
3957                 check_chain_key(current);
3958         current->lockdep_recursion = 0;
3959         raw_local_irq_restore(flags);
3960 }
3961 EXPORT_SYMBOL_GPL(lock_set_class);
3962
3963 void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3964 {
3965         unsigned long flags;
3966
3967         if (unlikely(current->lockdep_recursion))
3968                 return;
3969
3970         raw_local_irq_save(flags);
3971         current->lockdep_recursion = 1;
3972         check_flags(flags);
3973         if (__lock_downgrade(lock, ip))
3974                 check_chain_key(current);
3975         current->lockdep_recursion = 0;
3976         raw_local_irq_restore(flags);
3977 }
3978 EXPORT_SYMBOL_GPL(lock_downgrade);
3979
3980 /*
3981  * We are not always called with irqs disabled - do that here,
3982  * and also avoid lockdep recursion:
3983  */
3984 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3985                           int trylock, int read, int check,
3986                           struct lockdep_map *nest_lock, unsigned long ip)
3987 {
3988         unsigned long flags;
3989
3990         if (unlikely(current->lockdep_recursion))
3991                 return;
3992
3993         raw_local_irq_save(flags);
3994         check_flags(flags);
3995
3996         current->lockdep_recursion = 1;
3997         trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3998         __lock_acquire(lock, subclass, trylock, read, check,
3999                        irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
4000         current->lockdep_recursion = 0;
4001         raw_local_irq_restore(flags);
4002 }
4003 EXPORT_SYMBOL_GPL(lock_acquire);
4004
4005 void lock_release(struct lockdep_map *lock, int nested,
4006                           unsigned long ip)
4007 {
4008         unsigned long flags;
4009
4010         if (unlikely(current->lockdep_recursion))
4011                 return;
4012
4013         raw_local_irq_save(flags);
4014         check_flags(flags);
4015         current->lockdep_recursion = 1;
4016         trace_lock_release(lock, ip);
4017         if (__lock_release(lock, nested, ip))
4018                 check_chain_key(current);
4019         current->lockdep_recursion = 0;
4020         raw_local_irq_restore(flags);
4021 }
4022 EXPORT_SYMBOL_GPL(lock_release);
4023
4024 int lock_is_held_type(struct lockdep_map *lock, int read)
4025 {
4026         unsigned long flags;
4027         int ret = 0;
4028
4029         if (unlikely(current->lockdep_recursion))
4030                 return 1; /* avoid false negative lockdep_assert_held() */
4031
4032         raw_local_irq_save(flags);
4033         check_flags(flags);
4034
4035         current->lockdep_recursion = 1;
4036         ret = __lock_is_held(lock, read);
4037         current->lockdep_recursion = 0;
4038         raw_local_irq_restore(flags);
4039
4040         return ret;
4041 }
4042 EXPORT_SYMBOL_GPL(lock_is_held_type);
4043
4044 struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
4045 {
4046         struct pin_cookie cookie = NIL_COOKIE;
4047         unsigned long flags;
4048
4049         if (unlikely(current->lockdep_recursion))
4050                 return cookie;
4051
4052         raw_local_irq_save(flags);
4053         check_flags(flags);
4054
4055         current->lockdep_recursion = 1;
4056         cookie = __lock_pin_lock(lock);
4057         current->lockdep_recursion = 0;
4058         raw_local_irq_restore(flags);
4059
4060         return cookie;
4061 }
4062 EXPORT_SYMBOL_GPL(lock_pin_lock);
4063
4064 void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4065 {
4066         unsigned long flags;
4067
4068         if (unlikely(current->lockdep_recursion))
4069                 return;
4070
4071         raw_local_irq_save(flags);
4072         check_flags(flags);
4073
4074         current->lockdep_recursion = 1;
4075         __lock_repin_lock(lock, cookie);
4076         current->lockdep_recursion = 0;
4077         raw_local_irq_restore(flags);
4078 }
4079 EXPORT_SYMBOL_GPL(lock_repin_lock);
4080
4081 void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4082 {
4083         unsigned long flags;
4084
4085         if (unlikely(current->lockdep_recursion))
4086                 return;
4087
4088         raw_local_irq_save(flags);
4089         check_flags(flags);
4090
4091         current->lockdep_recursion = 1;
4092         __lock_unpin_lock(lock, cookie);
4093         current->lockdep_recursion = 0;
4094         raw_local_irq_restore(flags);
4095 }
4096 EXPORT_SYMBOL_GPL(lock_unpin_lock);
4097
4098 #ifdef CONFIG_LOCK_STAT
4099 static int
4100 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
4101                            unsigned long ip)
4102 {
4103         if (!debug_locks_off())
4104                 return 0;
4105         if (debug_locks_silent)
4106                 return 0;
4107
4108         pr_warn("\n");
4109         pr_warn("=================================\n");
4110         pr_warn("WARNING: bad contention detected!\n");
4111         print_kernel_ident();
4112         pr_warn("---------------------------------\n");
4113         pr_warn("%s/%d is trying to contend lock (",
4114                 curr->comm, task_pid_nr(curr));
4115         print_lockdep_cache(lock);
4116         pr_cont(") at:\n");
4117         print_ip_sym(ip);
4118         pr_warn("but there are no locks held!\n");
4119         pr_warn("\nother info that might help us debug this:\n");
4120         lockdep_print_held_locks(curr);
4121
4122         pr_warn("\nstack backtrace:\n");
4123         dump_stack();
4124
4125         return 0;
4126 }
4127
4128 static void
4129 __lock_contended(struct lockdep_map *lock, unsigned long ip)
4130 {
4131         struct task_struct *curr = current;
4132         struct held_lock *hlock;
4133         struct lock_class_stats *stats;
4134         unsigned int depth;
4135         int i, contention_point, contending_point;
4136
4137         depth = curr->lockdep_depth;
4138         /*
4139          * Whee, we contended on this lock, except it seems we're not
4140          * actually trying to acquire anything much at all..
4141          */
4142         if (DEBUG_LOCKS_WARN_ON(!depth))
4143                 return;
4144
4145         hlock = find_held_lock(curr, lock, depth, &i);
4146         if (!hlock) {
4147                 print_lock_contention_bug(curr, lock, ip);
4148                 return;
4149         }
4150
4151         if (hlock->instance != lock)
4152                 return;
4153
4154         hlock->waittime_stamp = lockstat_clock();
4155
4156         contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
4157         contending_point = lock_point(hlock_class(hlock)->contending_point,
4158                                       lock->ip);
4159
4160         stats = get_lock_stats(hlock_class(hlock));
4161         if (contention_point < LOCKSTAT_POINTS)
4162                 stats->contention_point[contention_point]++;
4163         if (contending_point < LOCKSTAT_POINTS)
4164                 stats->contending_point[contending_point]++;
4165         if (lock->cpu != smp_processor_id())
4166                 stats->bounces[bounce_contended + !!hlock->read]++;
4167         put_lock_stats(stats);
4168 }
4169
4170 static void
4171 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
4172 {
4173         struct task_struct *curr = current;
4174         struct held_lock *hlock;
4175         struct lock_class_stats *stats;
4176         unsigned int depth;
4177         u64 now, waittime = 0;
4178         int i, cpu;
4179
4180         depth = curr->lockdep_depth;
4181         /*
4182          * Yay, we acquired ownership of this lock we didn't try to
4183          * acquire, how the heck did that happen?
4184          */
4185         if (DEBUG_LOCKS_WARN_ON(!depth))
4186                 return;
4187
4188         hlock = find_held_lock(curr, lock, depth, &i);
4189         if (!hlock) {
4190                 print_lock_contention_bug(curr, lock, _RET_IP_);
4191                 return;
4192         }
4193
4194         if (hlock->instance != lock)
4195                 return;
4196
4197         cpu = smp_processor_id();
4198         if (hlock->waittime_stamp) {
4199                 now = lockstat_clock();
4200                 waittime = now - hlock->waittime_stamp;
4201                 hlock->holdtime_stamp = now;
4202         }
4203
4204         trace_lock_acquired(lock, ip);
4205
4206         stats = get_lock_stats(hlock_class(hlock));
4207         if (waittime) {
4208                 if (hlock->read)
4209                         lock_time_inc(&stats->read_waittime, waittime);
4210                 else
4211                         lock_time_inc(&stats->write_waittime, waittime);
4212         }
4213         if (lock->cpu != cpu)
4214                 stats->bounces[bounce_acquired + !!hlock->read]++;
4215         put_lock_stats(stats);
4216
4217         lock->cpu = cpu;
4218         lock->ip = ip;
4219 }
4220
4221 void lock_contended(struct lockdep_map *lock, unsigned long ip)
4222 {
4223         unsigned long flags;
4224
4225         if (unlikely(!lock_stat || !debug_locks))
4226                 return;
4227
4228         if (unlikely(current->lockdep_recursion))
4229                 return;
4230
4231         raw_local_irq_save(flags);
4232         check_flags(flags);
4233         current->lockdep_recursion = 1;
4234         trace_lock_contended(lock, ip);
4235         __lock_contended(lock, ip);
4236         current->lockdep_recursion = 0;
4237         raw_local_irq_restore(flags);
4238 }
4239 EXPORT_SYMBOL_GPL(lock_contended);
4240
4241 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4242 {
4243         unsigned long flags;
4244
4245         if (unlikely(!lock_stat || !debug_locks))
4246                 return;
4247
4248         if (unlikely(current->lockdep_recursion))
4249                 return;
4250
4251         raw_local_irq_save(flags);
4252         check_flags(flags);
4253         current->lockdep_recursion = 1;
4254         __lock_acquired(lock, ip);
4255         current->lockdep_recursion = 0;
4256         raw_local_irq_restore(flags);
4257 }
4258 EXPORT_SYMBOL_GPL(lock_acquired);
4259 #endif
4260
4261 /*
4262  * Used by the testsuite, sanitize the validator state
4263  * after a simulated failure:
4264  */
4265
4266 void lockdep_reset(void)
4267 {
4268         unsigned long flags;
4269         int i;
4270
4271         raw_local_irq_save(flags);
4272         current->curr_chain_key = 0;
4273         current->lockdep_depth = 0;
4274         current->lockdep_recursion = 0;
4275         memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4276         nr_hardirq_chains = 0;
4277         nr_softirq_chains = 0;
4278         nr_process_chains = 0;
4279         debug_locks = 1;
4280         for (i = 0; i < CHAINHASH_SIZE; i++)
4281                 INIT_HLIST_HEAD(chainhash_table + i);
4282         raw_local_irq_restore(flags);
4283 }
4284
4285 static void zap_class(struct lock_class *class)
4286 {
4287         int i;
4288
4289         /*
4290          * Remove all dependencies this lock is
4291          * involved in:
4292          */
4293         for (i = 0; i < nr_list_entries; i++) {
4294                 if (list_entries[i].class == class)
4295                         list_del_rcu(&list_entries[i].entry);
4296         }
4297         /*
4298          * Unhash the class and remove it from the all_lock_classes list:
4299          */
4300         hlist_del_rcu(&class->hash_entry);
4301         list_del_rcu(&class->lock_entry);
4302
4303         RCU_INIT_POINTER(class->key, NULL);
4304         RCU_INIT_POINTER(class->name, NULL);
4305 }
4306
4307 static inline int within(const void *addr, void *start, unsigned long size)
4308 {
4309         return addr >= start && addr < start + size;
4310 }
4311
4312 /*
4313  * Used in module.c to remove lock classes from memory that is going to be
4314  * freed; and possibly re-used by other modules.
4315  *
4316  * We will have had one sync_sched() before getting here, so we're guaranteed
4317  * nobody will look up these exact classes -- they're properly dead but still
4318  * allocated.
4319  */
4320 void lockdep_free_key_range(void *start, unsigned long size)
4321 {
4322         struct lock_class *class;
4323         struct hlist_head *head;
4324         unsigned long flags;
4325         int i;
4326         int locked;
4327
4328         raw_local_irq_save(flags);
4329         locked = graph_lock();
4330
4331         /*
4332          * Unhash all classes that were created by this module:
4333          */
4334         for (i = 0; i < CLASSHASH_SIZE; i++) {
4335                 head = classhash_table + i;
4336                 hlist_for_each_entry_rcu(class, head, hash_entry) {
4337                         if (within(class->key, start, size))
4338                                 zap_class(class);
4339                         else if (within(class->name, start, size))
4340                                 zap_class(class);
4341                 }
4342         }
4343
4344         if (locked)
4345                 graph_unlock();
4346         raw_local_irq_restore(flags);
4347
4348         /*
4349          * Wait for any possible iterators from look_up_lock_class() to pass
4350          * before continuing to free the memory they refer to.
4351          *
4352          * sync_sched() is sufficient because the read-side is IRQ disable.
4353          */
4354         synchronize_sched();
4355
4356         /*
4357          * XXX at this point we could return the resources to the pool;
4358          * instead we leak them. We would need to change to bitmap allocators
4359          * instead of the linear allocators we have now.
4360          */
4361 }
4362
4363 void lockdep_reset_lock(struct lockdep_map *lock)
4364 {
4365         struct lock_class *class;
4366         struct hlist_head *head;
4367         unsigned long flags;
4368         int i, j;
4369         int locked;
4370
4371         raw_local_irq_save(flags);
4372
4373         /*
4374          * Remove all classes this lock might have:
4375          */
4376         for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
4377                 /*
4378                  * If the class exists we look it up and zap it:
4379                  */
4380                 class = look_up_lock_class(lock, j);
4381                 if (!IS_ERR_OR_NULL(class))
4382                         zap_class(class);
4383         }
4384         /*
4385          * Debug check: in the end all mapped classes should
4386          * be gone.
4387          */
4388         locked = graph_lock();
4389         for (i = 0; i < CLASSHASH_SIZE; i++) {
4390                 head = classhash_table + i;
4391                 hlist_for_each_entry_rcu(class, head, hash_entry) {
4392                         int match = 0;
4393
4394                         for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4395                                 match |= class == lock->class_cache[j];
4396
4397                         if (unlikely(match)) {
4398                                 if (debug_locks_off_graph_unlock()) {
4399                                         /*
4400                                          * We all just reset everything, how did it match?
4401                                          */
4402                                         WARN_ON(1);
4403                                 }
4404                                 goto out_restore;
4405                         }
4406                 }
4407         }
4408         if (locked)
4409                 graph_unlock();
4410
4411 out_restore:
4412         raw_local_irq_restore(flags);
4413 }
4414
4415 void __init lockdep_info(void)
4416 {
4417         printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4418
4419         printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
4420         printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
4421         printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
4422         printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
4423         printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
4424         printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
4425         printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
4426
4427         printk(" memory used by lock dependency info: %lu kB\n",
4428                 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
4429                 sizeof(struct list_head) * CLASSHASH_SIZE +
4430                 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
4431                 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
4432                 sizeof(struct list_head) * CHAINHASH_SIZE
4433 #ifdef CONFIG_PROVE_LOCKING
4434                 + sizeof(struct circular_queue)
4435 #endif
4436                 ) / 1024
4437                 );
4438
4439         printk(" per task-struct memory footprint: %lu bytes\n",
4440                 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
4441 }
4442
4443 static void
4444 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4445                      const void *mem_to, struct held_lock *hlock)
4446 {
4447         if (!debug_locks_off())
4448                 return;
4449         if (debug_locks_silent)
4450                 return;
4451
4452         pr_warn("\n");
4453         pr_warn("=========================\n");
4454         pr_warn("WARNING: held lock freed!\n");
4455         print_kernel_ident();
4456         pr_warn("-------------------------\n");
4457         pr_warn("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
4458                 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4459         print_lock(hlock);
4460         lockdep_print_held_locks(curr);
4461
4462         pr_warn("\nstack backtrace:\n");
4463         dump_stack();
4464 }
4465
4466 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4467                                 const void* lock_from, unsigned long lock_len)
4468 {
4469         return lock_from + lock_len <= mem_from ||
4470                 mem_from + mem_len <= lock_from;
4471 }
4472
4473 /*
4474  * Called when kernel memory is freed (or unmapped), or if a lock
4475  * is destroyed or reinitialized - this code checks whether there is
4476  * any held lock in the memory range of <from> to <to>:
4477  */
4478 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4479 {
4480         struct task_struct *curr = current;
4481         struct held_lock *hlock;
4482         unsigned long flags;
4483         int i;
4484
4485         if (unlikely(!debug_locks))
4486                 return;
4487
4488         raw_local_irq_save(flags);
4489         for (i = 0; i < curr->lockdep_depth; i++) {
4490                 hlock = curr->held_locks + i;
4491
4492                 if (not_in_range(mem_from, mem_len, hlock->instance,
4493                                         sizeof(*hlock->instance)))
4494                         continue;
4495
4496                 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4497                 break;
4498         }
4499         raw_local_irq_restore(flags);
4500 }
4501 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4502
4503 static void print_held_locks_bug(void)
4504 {
4505         if (!debug_locks_off())
4506                 return;
4507         if (debug_locks_silent)
4508                 return;
4509
4510         pr_warn("\n");
4511         pr_warn("====================================\n");
4512         pr_warn("WARNING: %s/%d still has locks held!\n",
4513                current->comm, task_pid_nr(current));
4514         print_kernel_ident();
4515         pr_warn("------------------------------------\n");
4516         lockdep_print_held_locks(current);
4517         pr_warn("\nstack backtrace:\n");
4518         dump_stack();
4519 }
4520
4521 void debug_check_no_locks_held(void)
4522 {
4523         if (unlikely(current->lockdep_depth > 0))
4524                 print_held_locks_bug();
4525 }
4526 EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4527
4528 #ifdef __KERNEL__
4529 void debug_show_all_locks(void)
4530 {
4531         struct task_struct *g, *p;
4532         int count = 10;
4533         int unlock = 1;
4534
4535         if (unlikely(!debug_locks)) {
4536                 pr_warn("INFO: lockdep is turned off.\n");
4537                 return;
4538         }
4539         pr_warn("\nShowing all locks held in the system:\n");
4540
4541         /*
4542          * Here we try to get the tasklist_lock as hard as possible,
4543          * if not successful after 2 seconds we ignore it (but keep
4544          * trying). This is to enable a debug printout even if a
4545          * tasklist_lock-holding task deadlocks or crashes.
4546          */
4547 retry:
4548         if (!read_trylock(&tasklist_lock)) {
4549                 if (count == 10)
4550                         pr_warn("hm, tasklist_lock locked, retrying... ");
4551                 if (count) {
4552                         count--;
4553                         pr_cont(" #%d", 10-count);
4554                         mdelay(200);
4555                         goto retry;
4556                 }
4557                 pr_cont(" ignoring it.\n");
4558                 unlock = 0;
4559         } else {
4560                 if (count != 10)
4561                         pr_cont(" locked it.\n");
4562         }
4563
4564         do_each_thread(g, p) {
4565                 /*
4566                  * It's not reliable to print a task's held locks
4567                  * if it's not sleeping (or if it's not the current
4568                  * task):
4569                  */
4570                 if (p->state == TASK_RUNNING && p != current)
4571                         continue;
4572                 if (p->lockdep_depth)
4573                         lockdep_print_held_locks(p);
4574                 if (!unlock)
4575                         if (read_trylock(&tasklist_lock))
4576                                 unlock = 1;
4577         } while_each_thread(g, p);
4578
4579         pr_warn("\n");
4580         pr_warn("=============================================\n\n");
4581
4582         if (unlock)
4583                 read_unlock(&tasklist_lock);
4584 }
4585 EXPORT_SYMBOL_GPL(debug_show_all_locks);
4586 #endif
4587
4588 /*
4589  * Careful: only use this function if you are sure that
4590  * the task cannot run in parallel!
4591  */
4592 void debug_show_held_locks(struct task_struct *task)
4593 {
4594         if (unlikely(!debug_locks)) {
4595                 printk("INFO: lockdep is turned off.\n");
4596                 return;
4597         }
4598         lockdep_print_held_locks(task);
4599 }
4600 EXPORT_SYMBOL_GPL(debug_show_held_locks);
4601
4602 asmlinkage __visible void lockdep_sys_exit(void)
4603 {
4604         struct task_struct *curr = current;
4605
4606         if (unlikely(curr->lockdep_depth)) {
4607                 if (!debug_locks_off())
4608                         return;
4609                 pr_warn("\n");
4610                 pr_warn("================================================\n");
4611                 pr_warn("WARNING: lock held when returning to user space!\n");
4612                 print_kernel_ident();
4613                 pr_warn("------------------------------------------------\n");
4614                 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
4615                                 curr->comm, curr->pid);
4616                 lockdep_print_held_locks(curr);
4617         }
4618
4619         /*
4620          * The lock history for each syscall should be independent. So wipe the
4621          * slate clean on return to userspace.
4622          */
4623         lockdep_invariant_state(false);
4624 }
4625
4626 void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4627 {
4628         struct task_struct *curr = current;
4629
4630         /* Note: the following can be executed concurrently, so be careful. */
4631         pr_warn("\n");
4632         pr_warn("=============================\n");
4633         pr_warn("WARNING: suspicious RCU usage\n");
4634         print_kernel_ident();
4635         pr_warn("-----------------------------\n");
4636         pr_warn("%s:%d %s!\n", file, line, s);
4637         pr_warn("\nother info that might help us debug this:\n\n");
4638         pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4639                !rcu_lockdep_current_cpu_online()
4640                         ? "RCU used illegally from offline CPU!\n"
4641                         : !rcu_is_watching()
4642                                 ? "RCU used illegally from idle CPU!\n"
4643                                 : "",
4644                rcu_scheduler_active, debug_locks);
4645
4646         /*
4647          * If a CPU is in the RCU-free window in idle (ie: in the section
4648          * between rcu_idle_enter() and rcu_idle_exit(), then RCU
4649          * considers that CPU to be in an "extended quiescent state",
4650          * which means that RCU will be completely ignoring that CPU.
4651          * Therefore, rcu_read_lock() and friends have absolutely no
4652          * effect on a CPU running in that state. In other words, even if
4653          * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
4654          * delete data structures out from under it.  RCU really has no
4655          * choice here: we need to keep an RCU-free window in idle where
4656          * the CPU may possibly enter into low power mode. This way we can
4657          * notice an extended quiescent state to other CPUs that started a grace
4658          * period. Otherwise we would delay any grace period as long as we run
4659          * in the idle task.
4660          *
4661          * So complain bitterly if someone does call rcu_read_lock(),
4662          * rcu_read_lock_bh() and so on from extended quiescent states.
4663          */
4664         if (!rcu_is_watching())
4665                 pr_warn("RCU used illegally from extended quiescent state!\n");
4666
4667         lockdep_print_held_locks(curr);
4668         pr_warn("\nstack backtrace:\n");
4669         dump_stack();
4670 }
4671 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
4672
4673 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
4674
4675 /*
4676  * Crossrelease works by recording a lock history for each thread and
4677  * connecting those historic locks that were taken after the
4678  * wait_for_completion() in the complete() context.
4679  *
4680  * Task-A                               Task-B
4681  *
4682  *                                      mutex_lock(&A);
4683  *                                      mutex_unlock(&A);
4684  *
4685  * wait_for_completion(&C);
4686  *   lock_acquire_crosslock();
4687  *     atomic_inc_return(&cross_gen_id);
4688  *                                |
4689  *                                |     mutex_lock(&B);
4690  *                                |     mutex_unlock(&B);
4691  *                                |
4692  *                                |     complete(&C);
4693  *                                `--     lock_commit_crosslock();
4694  *
4695  * Which will then add a dependency between B and C.
4696  */
4697
4698 #define xhlock(i)         (current->xhlocks[(i) % MAX_XHLOCKS_NR])
4699
4700 /*
4701  * Whenever a crosslock is held, cross_gen_id will be increased.
4702  */
4703 static atomic_t cross_gen_id; /* Can be wrapped */
4704
4705 /*
4706  * Make an entry of the ring buffer invalid.
4707  */
4708 static inline void invalidate_xhlock(struct hist_lock *xhlock)
4709 {
4710         /*
4711          * Normally, xhlock->hlock.instance must be !NULL.
4712          */
4713         xhlock->hlock.instance = NULL;
4714 }
4715
4716 /*
4717  * Lock history stacks; we have 2 nested lock history stacks:
4718  *
4719  *   HARD(IRQ)
4720  *   SOFT(IRQ)
4721  *
4722  * The thing is that once we complete a HARD/SOFT IRQ the future task locks
4723  * should not depend on any of the locks observed while running the IRQ.  So
4724  * what we do is rewind the history buffer and erase all our knowledge of that
4725  * temporal event.
4726  */
4727
4728 void crossrelease_hist_start(enum xhlock_context_t c)
4729 {
4730         struct task_struct *cur = current;
4731
4732         if (!cur->xhlocks)
4733                 return;
4734
4735         cur->xhlock_idx_hist[c] = cur->xhlock_idx;
4736         cur->hist_id_save[c]    = cur->hist_id;
4737 }
4738
4739 void crossrelease_hist_end(enum xhlock_context_t c)
4740 {
4741         struct task_struct *cur = current;
4742
4743         if (cur->xhlocks) {
4744                 unsigned int idx = cur->xhlock_idx_hist[c];
4745                 struct hist_lock *h = &xhlock(idx);
4746
4747                 cur->xhlock_idx = idx;
4748
4749                 /* Check if the ring was overwritten. */
4750                 if (h->hist_id != cur->hist_id_save[c])
4751                         invalidate_xhlock(h);
4752         }
4753 }
4754
4755 /*
4756  * lockdep_invariant_state() is used to annotate independence inside a task, to
4757  * make one task look like multiple independent 'tasks'.
4758  *
4759  * Take for instance workqueues; each work is independent of the last. The
4760  * completion of a future work does not depend on the completion of a past work
4761  * (in general). Therefore we must not carry that (lock) dependency across
4762  * works.
4763  *
4764  * This is true for many things; pretty much all kthreads fall into this
4765  * pattern, where they have an invariant state and future completions do not
4766  * depend on past completions. Its just that since they all have the 'same'
4767  * form -- the kthread does the same over and over -- it doesn't typically
4768  * matter.
4769  *
4770  * The same is true for system-calls, once a system call is completed (we've
4771  * returned to userspace) the next system call does not depend on the lock
4772  * history of the previous system call.
4773  *
4774  * They key property for independence, this invariant state, is that it must be
4775  * a point where we hold no locks and have no history. Because if we were to
4776  * hold locks, the restore at _end() would not necessarily recover it's history
4777  * entry. Similarly, independence per-definition means it does not depend on
4778  * prior state.
4779  */
4780 void lockdep_invariant_state(bool force)
4781 {
4782         /*
4783          * We call this at an invariant point, no current state, no history.
4784          * Verify the former, enforce the latter.
4785          */
4786         WARN_ON_ONCE(!force && current->lockdep_depth);
4787         if (current->xhlocks)
4788                 invalidate_xhlock(&xhlock(current->xhlock_idx));
4789 }
4790
4791 static int cross_lock(struct lockdep_map *lock)
4792 {
4793         return lock ? lock->cross : 0;
4794 }
4795
4796 /*
4797  * This is needed to decide the relationship between wrapable variables.
4798  */
4799 static inline int before(unsigned int a, unsigned int b)
4800 {
4801         return (int)(a - b) < 0;
4802 }
4803
4804 static inline struct lock_class *xhlock_class(struct hist_lock *xhlock)
4805 {
4806         return hlock_class(&xhlock->hlock);
4807 }
4808
4809 static inline struct lock_class *xlock_class(struct cross_lock *xlock)
4810 {
4811         return hlock_class(&xlock->hlock);
4812 }
4813
4814 /*
4815  * Should we check a dependency with previous one?
4816  */
4817 static inline int depend_before(struct held_lock *hlock)
4818 {
4819         return hlock->read != 2 && hlock->check && !hlock->trylock;
4820 }
4821
4822 /*
4823  * Should we check a dependency with next one?
4824  */
4825 static inline int depend_after(struct held_lock *hlock)
4826 {
4827         return hlock->read != 2 && hlock->check;
4828 }
4829
4830 /*
4831  * Check if the xhlock is valid, which would be false if,
4832  *
4833  *    1. Has not used after initializaion yet.
4834  *    2. Got invalidated.
4835  *
4836  * Remind hist_lock is implemented as a ring buffer.
4837  */
4838 static inline int xhlock_valid(struct hist_lock *xhlock)
4839 {
4840         /*
4841          * xhlock->hlock.instance must be !NULL.
4842          */
4843         return !!xhlock->hlock.instance;
4844 }
4845
4846 /*
4847  * Record a hist_lock entry.
4848  *
4849  * Irq disable is only required.
4850  */
4851 static void add_xhlock(struct held_lock *hlock)
4852 {
4853         unsigned int idx = ++current->xhlock_idx;
4854         struct hist_lock *xhlock = &xhlock(idx);
4855
4856 #ifdef CONFIG_DEBUG_LOCKDEP
4857         /*
4858          * This can be done locklessly because they are all task-local
4859          * state, we must however ensure IRQs are disabled.
4860          */
4861         WARN_ON_ONCE(!irqs_disabled());
4862 #endif
4863
4864         /* Initialize hist_lock's members */
4865         xhlock->hlock = *hlock;
4866         xhlock->hist_id = ++current->hist_id;
4867
4868         xhlock->trace.nr_entries = 0;
4869         xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
4870         xhlock->trace.entries = xhlock->trace_entries;
4871         xhlock->trace.skip = 3;
4872         save_stack_trace(&xhlock->trace);
4873 }
4874
4875 static inline int same_context_xhlock(struct hist_lock *xhlock)
4876 {
4877         return xhlock->hlock.irq_context == task_irq_context(current);
4878 }
4879
4880 /*
4881  * This should be lockless as far as possible because this would be
4882  * called very frequently.
4883  */
4884 static void check_add_xhlock(struct held_lock *hlock)
4885 {
4886         /*
4887          * Record a hist_lock, only in case that acquisitions ahead
4888          * could depend on the held_lock. For example, if the held_lock
4889          * is trylock then acquisitions ahead never depends on that.
4890          * In that case, we don't need to record it. Just return.
4891          */
4892         if (!current->xhlocks || !depend_before(hlock))
4893                 return;
4894
4895         add_xhlock(hlock);
4896 }
4897
4898 /*
4899  * For crosslock.
4900  */
4901 static int add_xlock(struct held_lock *hlock)
4902 {
4903         struct cross_lock *xlock;
4904         unsigned int gen_id;
4905
4906         if (!graph_lock())
4907                 return 0;
4908
4909         xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock;
4910
4911         /*
4912          * When acquisitions for a crosslock are overlapped, we use
4913          * nr_acquire to perform commit for them, based on cross_gen_id
4914          * of the first acquisition, which allows to add additional
4915          * dependencies.
4916          *
4917          * Moreover, when no acquisition of a crosslock is in progress,
4918          * we should not perform commit because the lock might not exist
4919          * any more, which might cause incorrect memory access. So we
4920          * have to track the number of acquisitions of a crosslock.
4921          *
4922          * depend_after() is necessary to initialize only the first
4923          * valid xlock so that the xlock can be used on its commit.
4924          */
4925         if (xlock->nr_acquire++ && depend_after(&xlock->hlock))
4926                 goto unlock;
4927
4928         gen_id = (unsigned int)atomic_inc_return(&cross_gen_id);
4929         xlock->hlock = *hlock;
4930         xlock->hlock.gen_id = gen_id;
4931 unlock:
4932         graph_unlock();
4933         return 1;
4934 }
4935
4936 /*
4937  * Called for both normal and crosslock acquires. Normal locks will be
4938  * pushed on the hist_lock queue. Cross locks will record state and
4939  * stop regular lock_acquire() to avoid being placed on the held_lock
4940  * stack.
4941  *
4942  * Return: 0 - failure;
4943  *         1 - crosslock, done;
4944  *         2 - normal lock, continue to held_lock[] ops.
4945  */
4946 static int lock_acquire_crosslock(struct held_lock *hlock)
4947 {
4948         /*
4949          *      CONTEXT 1               CONTEXT 2
4950          *      ---------               ---------
4951          *      lock A (cross)
4952          *      X = atomic_inc_return(&cross_gen_id)
4953          *      ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4954          *                              Y = atomic_read_acquire(&cross_gen_id)
4955          *                              lock B
4956          *
4957          * atomic_read_acquire() is for ordering between A and B,
4958          * IOW, A happens before B, when CONTEXT 2 see Y >= X.
4959          *
4960          * Pairs with atomic_inc_return() in add_xlock().
4961          */
4962         hlock->gen_id = (unsigned int)atomic_read_acquire(&cross_gen_id);
4963
4964         if (cross_lock(hlock->instance))
4965                 return add_xlock(hlock);
4966
4967         check_add_xhlock(hlock);
4968         return 2;
4969 }
4970
4971 static int copy_trace(struct stack_trace *trace)
4972 {
4973         unsigned long *buf = stack_trace + nr_stack_trace_entries;
4974         unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
4975         unsigned int nr = min(max_nr, trace->nr_entries);
4976
4977         trace->nr_entries = nr;
4978         memcpy(buf, trace->entries, nr * sizeof(trace->entries[0]));
4979         trace->entries = buf;
4980         nr_stack_trace_entries += nr;
4981
4982         if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
4983                 if (!debug_locks_off_graph_unlock())
4984                         return 0;
4985
4986                 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
4987                 dump_stack();
4988
4989                 return 0;
4990         }
4991
4992         return 1;
4993 }
4994
4995 static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
4996 {
4997         unsigned int xid, pid;
4998         u64 chain_key;
4999
5000         xid = xlock_class(xlock) - lock_classes;
5001         chain_key = iterate_chain_key((u64)0, xid);
5002         pid = xhlock_class(xhlock) - lock_classes;
5003         chain_key = iterate_chain_key(chain_key, pid);
5004
5005         if (lookup_chain_cache(chain_key))
5006                 return 1;
5007
5008         if (!add_chain_cache_classes(xid, pid, xhlock->hlock.irq_context,
5009                                 chain_key))
5010                 return 0;
5011
5012         if (!check_prev_add(current, &xlock->hlock, &xhlock->hlock, 1,
5013                             &xhlock->trace, copy_trace))
5014                 return 0;
5015
5016         return 1;
5017 }
5018
5019 static void commit_xhlocks(struct cross_lock *xlock)
5020 {
5021         unsigned int cur = current->xhlock_idx;
5022         unsigned int prev_hist_id = xhlock(cur).hist_id;
5023         unsigned int i;
5024
5025         if (!graph_lock())
5026                 return;
5027
5028         if (xlock->nr_acquire) {
5029                 for (i = 0; i < MAX_XHLOCKS_NR; i++) {
5030                         struct hist_lock *xhlock = &xhlock(cur - i);
5031
5032                         if (!xhlock_valid(xhlock))
5033                                 break;
5034
5035                         if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id))
5036                                 break;
5037
5038                         if (!same_context_xhlock(xhlock))
5039                                 break;
5040
5041                         /*
5042                          * Filter out the cases where the ring buffer was
5043                          * overwritten and the current entry has a bigger
5044                          * hist_id than the previous one, which is impossible
5045                          * otherwise:
5046                          */
5047                         if (unlikely(before(prev_hist_id, xhlock->hist_id)))
5048                                 break;
5049
5050                         prev_hist_id = xhlock->hist_id;
5051
5052                         /*
5053                          * commit_xhlock() returns 0 with graph_lock already
5054                          * released if fail.
5055                          */
5056                         if (!commit_xhlock(xlock, xhlock))
5057                                 return;
5058                 }
5059         }
5060
5061         graph_unlock();
5062 }
5063
5064 void lock_commit_crosslock(struct lockdep_map *lock)
5065 {
5066         struct cross_lock *xlock;
5067         unsigned long flags;
5068
5069         if (unlikely(!debug_locks || current->lockdep_recursion))
5070                 return;
5071
5072         if (!current->xhlocks)
5073                 return;
5074
5075         /*
5076          * Do commit hist_locks with the cross_lock, only in case that
5077          * the cross_lock could depend on acquisitions after that.
5078          *
5079          * For example, if the cross_lock does not have the 'check' flag
5080          * then we don't need to check dependencies and commit for that.
5081          * Just skip it. In that case, of course, the cross_lock does
5082          * not depend on acquisitions ahead, either.
5083          *
5084          * WARNING: Don't do that in add_xlock() in advance. When an
5085          * acquisition context is different from the commit context,
5086          * invalid(skipped) cross_lock might be accessed.
5087          */
5088         if (!depend_after(&((struct lockdep_map_cross *)lock)->xlock.hlock))
5089                 return;
5090
5091         raw_local_irq_save(flags);
5092         check_flags(flags);
5093         current->lockdep_recursion = 1;
5094         xlock = &((struct lockdep_map_cross *)lock)->xlock;
5095         commit_xhlocks(xlock);
5096         current->lockdep_recursion = 0;
5097         raw_local_irq_restore(flags);
5098 }
5099 EXPORT_SYMBOL_GPL(lock_commit_crosslock);
5100
5101 /*
5102  * Return: 0 - failure;
5103  *         1 - crosslock, done;
5104  *         2 - normal lock, continue to held_lock[] ops.
5105  */
5106 static int lock_release_crosslock(struct lockdep_map *lock)
5107 {
5108         if (cross_lock(lock)) {
5109                 if (!graph_lock())
5110                         return 0;
5111                 ((struct lockdep_map_cross *)lock)->xlock.nr_acquire--;
5112                 graph_unlock();
5113                 return 1;
5114         }
5115         return 2;
5116 }
5117
5118 static void cross_init(struct lockdep_map *lock, int cross)
5119 {
5120         if (cross)
5121                 ((struct lockdep_map_cross *)lock)->xlock.nr_acquire = 0;
5122
5123         lock->cross = cross;
5124
5125         /*
5126          * Crossrelease assumes that the ring buffer size of xhlocks
5127          * is aligned with power of 2. So force it on build.
5128          */
5129         BUILD_BUG_ON(MAX_XHLOCKS_NR & (MAX_XHLOCKS_NR - 1));
5130 }
5131
5132 void lockdep_init_task(struct task_struct *task)
5133 {
5134         int i;
5135
5136         task->xhlock_idx = UINT_MAX;
5137         task->hist_id = 0;
5138
5139         for (i = 0; i < XHLOCK_CTX_NR; i++) {
5140                 task->xhlock_idx_hist[i] = UINT_MAX;
5141                 task->hist_id_save[i] = 0;
5142         }
5143
5144         task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
5145                                 GFP_KERNEL);
5146 }
5147
5148 void lockdep_free_task(struct task_struct *task)
5149 {
5150         if (task->xhlocks) {
5151                 void *tmp = task->xhlocks;
5152                 /* Diable crossrelease for current */
5153                 task->xhlocks = NULL;
5154                 kfree(tmp);
5155         }
5156 }
5157 #endif