GNU Linux-libre 4.14.266-gnu1
[releases.git] / lib / debugobjects.c
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22
23 #define ODEBUG_HASH_BITS        14
24 #define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
25
26 #define ODEBUG_POOL_SIZE        1024
27 #define ODEBUG_POOL_MIN_LEVEL   256
28
29 #define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
32
33 struct debug_bucket {
34         struct hlist_head       list;
35         raw_spinlock_t          lock;
36 };
37
38 static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
39
40 static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
41
42 static DEFINE_RAW_SPINLOCK(pool_lock);
43
44 static HLIST_HEAD(obj_pool);
45
46 static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
47 static int                      obj_pool_free = ODEBUG_POOL_SIZE;
48 static int                      obj_pool_used;
49 static int                      obj_pool_max_used;
50 static struct kmem_cache        *obj_cache;
51
52 static int                      debug_objects_maxchain __read_mostly;
53 static int                      debug_objects_fixups __read_mostly;
54 static int                      debug_objects_warnings __read_mostly;
55 static int                      debug_objects_enabled __read_mostly
56                                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
57 static int                      debug_objects_pool_size __read_mostly
58                                 = ODEBUG_POOL_SIZE;
59 static int                      debug_objects_pool_min_level __read_mostly
60                                 = ODEBUG_POOL_MIN_LEVEL;
61 static struct debug_obj_descr   *descr_test  __read_mostly;
62
63 /*
64  * Track numbers of kmem_cache_alloc()/free() calls done.
65  */
66 static int                      debug_objects_allocated;
67 static int                      debug_objects_freed;
68
69 static void free_obj_work(struct work_struct *work);
70 static DECLARE_WORK(debug_obj_work, free_obj_work);
71
72 static int __init enable_object_debug(char *str)
73 {
74         debug_objects_enabled = 1;
75         return 0;
76 }
77
78 static int __init disable_object_debug(char *str)
79 {
80         debug_objects_enabled = 0;
81         return 0;
82 }
83
84 early_param("debug_objects", enable_object_debug);
85 early_param("no_debug_objects", disable_object_debug);
86
87 static const char *obj_states[ODEBUG_STATE_MAX] = {
88         [ODEBUG_STATE_NONE]             = "none",
89         [ODEBUG_STATE_INIT]             = "initialized",
90         [ODEBUG_STATE_INACTIVE]         = "inactive",
91         [ODEBUG_STATE_ACTIVE]           = "active",
92         [ODEBUG_STATE_DESTROYED]        = "destroyed",
93         [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
94 };
95
96 static void fill_pool(void)
97 {
98         gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
99         struct debug_obj *new;
100         unsigned long flags;
101
102         if (likely(obj_pool_free >= debug_objects_pool_min_level))
103                 return;
104
105         if (unlikely(!obj_cache))
106                 return;
107
108         while (obj_pool_free < debug_objects_pool_min_level) {
109
110                 new = kmem_cache_zalloc(obj_cache, gfp);
111                 if (!new)
112                         return;
113
114                 raw_spin_lock_irqsave(&pool_lock, flags);
115                 hlist_add_head(&new->node, &obj_pool);
116                 debug_objects_allocated++;
117                 obj_pool_free++;
118                 raw_spin_unlock_irqrestore(&pool_lock, flags);
119         }
120 }
121
122 /*
123  * Lookup an object in the hash bucket.
124  */
125 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
126 {
127         struct debug_obj *obj;
128         int cnt = 0;
129
130         hlist_for_each_entry(obj, &b->list, node) {
131                 cnt++;
132                 if (obj->object == addr)
133                         return obj;
134         }
135         if (cnt > debug_objects_maxchain)
136                 debug_objects_maxchain = cnt;
137
138         return NULL;
139 }
140
141 /*
142  * Allocate a new object. If the pool is empty, switch off the debugger.
143  * Must be called with interrupts disabled.
144  */
145 static struct debug_obj *
146 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
147 {
148         struct debug_obj *obj = NULL;
149
150         raw_spin_lock(&pool_lock);
151         if (obj_pool.first) {
152                 obj         = hlist_entry(obj_pool.first, typeof(*obj), node);
153
154                 obj->object = addr;
155                 obj->descr  = descr;
156                 obj->state  = ODEBUG_STATE_NONE;
157                 obj->astate = 0;
158                 hlist_del(&obj->node);
159
160                 hlist_add_head(&obj->node, &b->list);
161
162                 obj_pool_used++;
163                 if (obj_pool_used > obj_pool_max_used)
164                         obj_pool_max_used = obj_pool_used;
165
166                 obj_pool_free--;
167                 if (obj_pool_free < obj_pool_min_free)
168                         obj_pool_min_free = obj_pool_free;
169         }
170         raw_spin_unlock(&pool_lock);
171
172         return obj;
173 }
174
175 /*
176  * workqueue function to free objects.
177  *
178  * To reduce contention on the global pool_lock, the actual freeing of
179  * debug objects will be delayed if the pool_lock is busy. We also free
180  * the objects in a batch of 4 for each lock/unlock cycle.
181  */
182 #define ODEBUG_FREE_BATCH       4
183
184 static void free_obj_work(struct work_struct *work)
185 {
186         struct debug_obj *objs[ODEBUG_FREE_BATCH];
187         unsigned long flags;
188         int i;
189
190         if (!raw_spin_trylock_irqsave(&pool_lock, flags))
191                 return;
192         while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
193                 for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
194                         objs[i] = hlist_entry(obj_pool.first,
195                                               typeof(*objs[0]), node);
196                         hlist_del(&objs[i]->node);
197                 }
198
199                 obj_pool_free -= ODEBUG_FREE_BATCH;
200                 debug_objects_freed += ODEBUG_FREE_BATCH;
201                 /*
202                  * We release pool_lock across kmem_cache_free() to
203                  * avoid contention on pool_lock.
204                  */
205                 raw_spin_unlock_irqrestore(&pool_lock, flags);
206                 for (i = 0; i < ODEBUG_FREE_BATCH; i++)
207                         kmem_cache_free(obj_cache, objs[i]);
208                 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
209                         return;
210         }
211         raw_spin_unlock_irqrestore(&pool_lock, flags);
212 }
213
214 /*
215  * Put the object back into the pool and schedule work to free objects
216  * if necessary.
217  */
218 static void free_object(struct debug_obj *obj)
219 {
220         unsigned long flags;
221         int sched = 0;
222
223         raw_spin_lock_irqsave(&pool_lock, flags);
224         /*
225          * schedule work when the pool is filled and the cache is
226          * initialized:
227          */
228         if (obj_pool_free > debug_objects_pool_size && obj_cache)
229                 sched = 1;
230         hlist_add_head(&obj->node, &obj_pool);
231         obj_pool_free++;
232         obj_pool_used--;
233         raw_spin_unlock_irqrestore(&pool_lock, flags);
234         if (sched)
235                 schedule_work(&debug_obj_work);
236 }
237
238 /*
239  * We run out of memory. That means we probably have tons of objects
240  * allocated.
241  */
242 static void debug_objects_oom(void)
243 {
244         struct debug_bucket *db = obj_hash;
245         struct hlist_node *tmp;
246         HLIST_HEAD(freelist);
247         struct debug_obj *obj;
248         unsigned long flags;
249         int i;
250
251         pr_warn("Out of memory. ODEBUG disabled\n");
252
253         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
254                 raw_spin_lock_irqsave(&db->lock, flags);
255                 hlist_move_list(&db->list, &freelist);
256                 raw_spin_unlock_irqrestore(&db->lock, flags);
257
258                 /* Now free them */
259                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
260                         hlist_del(&obj->node);
261                         free_object(obj);
262                 }
263         }
264 }
265
266 /*
267  * We use the pfn of the address for the hash. That way we can check
268  * for freed objects simply by checking the affected bucket.
269  */
270 static struct debug_bucket *get_bucket(unsigned long addr)
271 {
272         unsigned long hash;
273
274         hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
275         return &obj_hash[hash];
276 }
277
278 static void debug_print_object(struct debug_obj *obj, char *msg)
279 {
280         struct debug_obj_descr *descr = obj->descr;
281         static int limit;
282
283         if (limit < 5 && descr != descr_test) {
284                 void *hint = descr->debug_hint ?
285                         descr->debug_hint(obj->object) : NULL;
286                 limit++;
287                 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
288                                  "object type: %s hint: %pS\n",
289                         msg, obj_states[obj->state], obj->astate,
290                         descr->name, hint);
291         }
292         debug_objects_warnings++;
293 }
294
295 /*
296  * Try to repair the damage, so we have a better chance to get useful
297  * debug output.
298  */
299 static bool
300 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
301                    void * addr, enum debug_obj_state state)
302 {
303         if (fixup && fixup(addr, state)) {
304                 debug_objects_fixups++;
305                 return true;
306         }
307         return false;
308 }
309
310 static void debug_object_is_on_stack(void *addr, int onstack)
311 {
312         int is_on_stack;
313         static int limit;
314
315         if (limit > 4)
316                 return;
317
318         is_on_stack = object_is_on_stack(addr);
319         if (is_on_stack == onstack)
320                 return;
321
322         limit++;
323         if (is_on_stack)
324                 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
325                          task_stack_page(current));
326         else
327                 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
328                          task_stack_page(current));
329
330         WARN_ON(1);
331 }
332
333 static void
334 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
335 {
336         enum debug_obj_state state;
337         struct debug_bucket *db;
338         struct debug_obj *obj;
339         unsigned long flags;
340
341         fill_pool();
342
343         db = get_bucket((unsigned long) addr);
344
345         raw_spin_lock_irqsave(&db->lock, flags);
346
347         obj = lookup_object(addr, db);
348         if (!obj) {
349                 obj = alloc_object(addr, db, descr);
350                 if (!obj) {
351                         debug_objects_enabled = 0;
352                         raw_spin_unlock_irqrestore(&db->lock, flags);
353                         debug_objects_oom();
354                         return;
355                 }
356                 debug_object_is_on_stack(addr, onstack);
357         }
358
359         switch (obj->state) {
360         case ODEBUG_STATE_NONE:
361         case ODEBUG_STATE_INIT:
362         case ODEBUG_STATE_INACTIVE:
363                 obj->state = ODEBUG_STATE_INIT;
364                 break;
365
366         case ODEBUG_STATE_ACTIVE:
367                 debug_print_object(obj, "init");
368                 state = obj->state;
369                 raw_spin_unlock_irqrestore(&db->lock, flags);
370                 debug_object_fixup(descr->fixup_init, addr, state);
371                 return;
372
373         case ODEBUG_STATE_DESTROYED:
374                 debug_print_object(obj, "init");
375                 break;
376         default:
377                 break;
378         }
379
380         raw_spin_unlock_irqrestore(&db->lock, flags);
381 }
382
383 /**
384  * debug_object_init - debug checks when an object is initialized
385  * @addr:       address of the object
386  * @descr:      pointer to an object specific debug description structure
387  */
388 void debug_object_init(void *addr, struct debug_obj_descr *descr)
389 {
390         if (!debug_objects_enabled)
391                 return;
392
393         __debug_object_init(addr, descr, 0);
394 }
395 EXPORT_SYMBOL_GPL(debug_object_init);
396
397 /**
398  * debug_object_init_on_stack - debug checks when an object on stack is
399  *                              initialized
400  * @addr:       address of the object
401  * @descr:      pointer to an object specific debug description structure
402  */
403 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
404 {
405         if (!debug_objects_enabled)
406                 return;
407
408         __debug_object_init(addr, descr, 1);
409 }
410 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
411
412 /**
413  * debug_object_activate - debug checks when an object is activated
414  * @addr:       address of the object
415  * @descr:      pointer to an object specific debug description structure
416  * Returns 0 for success, -EINVAL for check failed.
417  */
418 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
419 {
420         enum debug_obj_state state;
421         struct debug_bucket *db;
422         struct debug_obj *obj;
423         unsigned long flags;
424         int ret;
425         struct debug_obj o = { .object = addr,
426                                .state = ODEBUG_STATE_NOTAVAILABLE,
427                                .descr = descr };
428
429         if (!debug_objects_enabled)
430                 return 0;
431
432         db = get_bucket((unsigned long) addr);
433
434         raw_spin_lock_irqsave(&db->lock, flags);
435
436         obj = lookup_object(addr, db);
437         if (obj) {
438                 switch (obj->state) {
439                 case ODEBUG_STATE_INIT:
440                 case ODEBUG_STATE_INACTIVE:
441                         obj->state = ODEBUG_STATE_ACTIVE;
442                         ret = 0;
443                         break;
444
445                 case ODEBUG_STATE_ACTIVE:
446                         debug_print_object(obj, "activate");
447                         state = obj->state;
448                         raw_spin_unlock_irqrestore(&db->lock, flags);
449                         ret = debug_object_fixup(descr->fixup_activate, addr, state);
450                         return ret ? 0 : -EINVAL;
451
452                 case ODEBUG_STATE_DESTROYED:
453                         debug_print_object(obj, "activate");
454                         ret = -EINVAL;
455                         break;
456                 default:
457                         ret = 0;
458                         break;
459                 }
460                 raw_spin_unlock_irqrestore(&db->lock, flags);
461                 return ret;
462         }
463
464         raw_spin_unlock_irqrestore(&db->lock, flags);
465         /*
466          * We are here when a static object is activated. We
467          * let the type specific code confirm whether this is
468          * true or not. if true, we just make sure that the
469          * static object is tracked in the object tracker. If
470          * not, this must be a bug, so we try to fix it up.
471          */
472         if (descr->is_static_object && descr->is_static_object(addr)) {
473                 /* track this static object */
474                 debug_object_init(addr, descr);
475                 debug_object_activate(addr, descr);
476         } else {
477                 debug_print_object(&o, "activate");
478                 ret = debug_object_fixup(descr->fixup_activate, addr,
479                                         ODEBUG_STATE_NOTAVAILABLE);
480                 return ret ? 0 : -EINVAL;
481         }
482         return 0;
483 }
484 EXPORT_SYMBOL_GPL(debug_object_activate);
485
486 /**
487  * debug_object_deactivate - debug checks when an object is deactivated
488  * @addr:       address of the object
489  * @descr:      pointer to an object specific debug description structure
490  */
491 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
492 {
493         struct debug_bucket *db;
494         struct debug_obj *obj;
495         unsigned long flags;
496
497         if (!debug_objects_enabled)
498                 return;
499
500         db = get_bucket((unsigned long) addr);
501
502         raw_spin_lock_irqsave(&db->lock, flags);
503
504         obj = lookup_object(addr, db);
505         if (obj) {
506                 switch (obj->state) {
507                 case ODEBUG_STATE_INIT:
508                 case ODEBUG_STATE_INACTIVE:
509                 case ODEBUG_STATE_ACTIVE:
510                         if (!obj->astate)
511                                 obj->state = ODEBUG_STATE_INACTIVE;
512                         else
513                                 debug_print_object(obj, "deactivate");
514                         break;
515
516                 case ODEBUG_STATE_DESTROYED:
517                         debug_print_object(obj, "deactivate");
518                         break;
519                 default:
520                         break;
521                 }
522         } else {
523                 struct debug_obj o = { .object = addr,
524                                        .state = ODEBUG_STATE_NOTAVAILABLE,
525                                        .descr = descr };
526
527                 debug_print_object(&o, "deactivate");
528         }
529
530         raw_spin_unlock_irqrestore(&db->lock, flags);
531 }
532 EXPORT_SYMBOL_GPL(debug_object_deactivate);
533
534 /**
535  * debug_object_destroy - debug checks when an object is destroyed
536  * @addr:       address of the object
537  * @descr:      pointer to an object specific debug description structure
538  */
539 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
540 {
541         enum debug_obj_state state;
542         struct debug_bucket *db;
543         struct debug_obj *obj;
544         unsigned long flags;
545
546         if (!debug_objects_enabled)
547                 return;
548
549         db = get_bucket((unsigned long) addr);
550
551         raw_spin_lock_irqsave(&db->lock, flags);
552
553         obj = lookup_object(addr, db);
554         if (!obj)
555                 goto out_unlock;
556
557         switch (obj->state) {
558         case ODEBUG_STATE_NONE:
559         case ODEBUG_STATE_INIT:
560         case ODEBUG_STATE_INACTIVE:
561                 obj->state = ODEBUG_STATE_DESTROYED;
562                 break;
563         case ODEBUG_STATE_ACTIVE:
564                 debug_print_object(obj, "destroy");
565                 state = obj->state;
566                 raw_spin_unlock_irqrestore(&db->lock, flags);
567                 debug_object_fixup(descr->fixup_destroy, addr, state);
568                 return;
569
570         case ODEBUG_STATE_DESTROYED:
571                 debug_print_object(obj, "destroy");
572                 break;
573         default:
574                 break;
575         }
576 out_unlock:
577         raw_spin_unlock_irqrestore(&db->lock, flags);
578 }
579 EXPORT_SYMBOL_GPL(debug_object_destroy);
580
581 /**
582  * debug_object_free - debug checks when an object is freed
583  * @addr:       address of the object
584  * @descr:      pointer to an object specific debug description structure
585  */
586 void debug_object_free(void *addr, struct debug_obj_descr *descr)
587 {
588         enum debug_obj_state state;
589         struct debug_bucket *db;
590         struct debug_obj *obj;
591         unsigned long flags;
592
593         if (!debug_objects_enabled)
594                 return;
595
596         db = get_bucket((unsigned long) addr);
597
598         raw_spin_lock_irqsave(&db->lock, flags);
599
600         obj = lookup_object(addr, db);
601         if (!obj)
602                 goto out_unlock;
603
604         switch (obj->state) {
605         case ODEBUG_STATE_ACTIVE:
606                 debug_print_object(obj, "free");
607                 state = obj->state;
608                 raw_spin_unlock_irqrestore(&db->lock, flags);
609                 debug_object_fixup(descr->fixup_free, addr, state);
610                 return;
611         default:
612                 hlist_del(&obj->node);
613                 raw_spin_unlock_irqrestore(&db->lock, flags);
614                 free_object(obj);
615                 return;
616         }
617 out_unlock:
618         raw_spin_unlock_irqrestore(&db->lock, flags);
619 }
620 EXPORT_SYMBOL_GPL(debug_object_free);
621
622 /**
623  * debug_object_assert_init - debug checks when object should be init-ed
624  * @addr:       address of the object
625  * @descr:      pointer to an object specific debug description structure
626  */
627 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
628 {
629         struct debug_bucket *db;
630         struct debug_obj *obj;
631         unsigned long flags;
632
633         if (!debug_objects_enabled)
634                 return;
635
636         db = get_bucket((unsigned long) addr);
637
638         raw_spin_lock_irqsave(&db->lock, flags);
639
640         obj = lookup_object(addr, db);
641         if (!obj) {
642                 struct debug_obj o = { .object = addr,
643                                        .state = ODEBUG_STATE_NOTAVAILABLE,
644                                        .descr = descr };
645
646                 raw_spin_unlock_irqrestore(&db->lock, flags);
647                 /*
648                  * Maybe the object is static, and we let the type specific
649                  * code confirm. Track this static object if true, else invoke
650                  * fixup.
651                  */
652                 if (descr->is_static_object && descr->is_static_object(addr)) {
653                         /* Track this static object */
654                         debug_object_init(addr, descr);
655                 } else {
656                         debug_print_object(&o, "assert_init");
657                         debug_object_fixup(descr->fixup_assert_init, addr,
658                                            ODEBUG_STATE_NOTAVAILABLE);
659                 }
660                 return;
661         }
662
663         raw_spin_unlock_irqrestore(&db->lock, flags);
664 }
665 EXPORT_SYMBOL_GPL(debug_object_assert_init);
666
667 /**
668  * debug_object_active_state - debug checks object usage state machine
669  * @addr:       address of the object
670  * @descr:      pointer to an object specific debug description structure
671  * @expect:     expected state
672  * @next:       state to move to if expected state is found
673  */
674 void
675 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
676                           unsigned int expect, unsigned int next)
677 {
678         struct debug_bucket *db;
679         struct debug_obj *obj;
680         unsigned long flags;
681
682         if (!debug_objects_enabled)
683                 return;
684
685         db = get_bucket((unsigned long) addr);
686
687         raw_spin_lock_irqsave(&db->lock, flags);
688
689         obj = lookup_object(addr, db);
690         if (obj) {
691                 switch (obj->state) {
692                 case ODEBUG_STATE_ACTIVE:
693                         if (obj->astate == expect)
694                                 obj->astate = next;
695                         else
696                                 debug_print_object(obj, "active_state");
697                         break;
698
699                 default:
700                         debug_print_object(obj, "active_state");
701                         break;
702                 }
703         } else {
704                 struct debug_obj o = { .object = addr,
705                                        .state = ODEBUG_STATE_NOTAVAILABLE,
706                                        .descr = descr };
707
708                 debug_print_object(&o, "active_state");
709         }
710
711         raw_spin_unlock_irqrestore(&db->lock, flags);
712 }
713 EXPORT_SYMBOL_GPL(debug_object_active_state);
714
715 #ifdef CONFIG_DEBUG_OBJECTS_FREE
716 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
717 {
718         unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
719         struct hlist_node *tmp;
720         HLIST_HEAD(freelist);
721         struct debug_obj_descr *descr;
722         enum debug_obj_state state;
723         struct debug_bucket *db;
724         struct debug_obj *obj;
725         int cnt;
726
727         saddr = (unsigned long) address;
728         eaddr = saddr + size;
729         paddr = saddr & ODEBUG_CHUNK_MASK;
730         chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
731         chunks >>= ODEBUG_CHUNK_SHIFT;
732
733         for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
734                 db = get_bucket(paddr);
735
736 repeat:
737                 cnt = 0;
738                 raw_spin_lock_irqsave(&db->lock, flags);
739                 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
740                         cnt++;
741                         oaddr = (unsigned long) obj->object;
742                         if (oaddr < saddr || oaddr >= eaddr)
743                                 continue;
744
745                         switch (obj->state) {
746                         case ODEBUG_STATE_ACTIVE:
747                                 debug_print_object(obj, "free");
748                                 descr = obj->descr;
749                                 state = obj->state;
750                                 raw_spin_unlock_irqrestore(&db->lock, flags);
751                                 debug_object_fixup(descr->fixup_free,
752                                                    (void *) oaddr, state);
753                                 goto repeat;
754                         default:
755                                 hlist_del(&obj->node);
756                                 hlist_add_head(&obj->node, &freelist);
757                                 break;
758                         }
759                 }
760                 raw_spin_unlock_irqrestore(&db->lock, flags);
761
762                 /* Now free them */
763                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
764                         hlist_del(&obj->node);
765                         free_object(obj);
766                 }
767
768                 if (cnt > debug_objects_maxchain)
769                         debug_objects_maxchain = cnt;
770         }
771 }
772
773 void debug_check_no_obj_freed(const void *address, unsigned long size)
774 {
775         if (debug_objects_enabled)
776                 __debug_check_no_obj_freed(address, size);
777 }
778 #endif
779
780 #ifdef CONFIG_DEBUG_FS
781
782 static int debug_stats_show(struct seq_file *m, void *v)
783 {
784         seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
785         seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
786         seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
787         seq_printf(m, "pool_free     :%d\n", obj_pool_free);
788         seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
789         seq_printf(m, "pool_used     :%d\n", obj_pool_used);
790         seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
791         seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
792         seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
793         return 0;
794 }
795
796 static int debug_stats_open(struct inode *inode, struct file *filp)
797 {
798         return single_open(filp, debug_stats_show, NULL);
799 }
800
801 static const struct file_operations debug_stats_fops = {
802         .open           = debug_stats_open,
803         .read           = seq_read,
804         .llseek         = seq_lseek,
805         .release        = single_release,
806 };
807
808 static int __init debug_objects_init_debugfs(void)
809 {
810         struct dentry *dbgdir, *dbgstats;
811
812         if (!debug_objects_enabled)
813                 return 0;
814
815         dbgdir = debugfs_create_dir("debug_objects", NULL);
816         if (!dbgdir)
817                 return -ENOMEM;
818
819         dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
820                                        &debug_stats_fops);
821         if (!dbgstats)
822                 goto err;
823
824         return 0;
825
826 err:
827         debugfs_remove(dbgdir);
828
829         return -ENOMEM;
830 }
831 __initcall(debug_objects_init_debugfs);
832
833 #else
834 static inline void debug_objects_init_debugfs(void) { }
835 #endif
836
837 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
838
839 /* Random data structure for the self test */
840 struct self_test {
841         unsigned long   dummy1[6];
842         int             static_init;
843         unsigned long   dummy2[3];
844 };
845
846 static __initdata struct debug_obj_descr descr_type_test;
847
848 static bool __init is_static_object(void *addr)
849 {
850         struct self_test *obj = addr;
851
852         return obj->static_init;
853 }
854
855 /*
856  * fixup_init is called when:
857  * - an active object is initialized
858  */
859 static bool __init fixup_init(void *addr, enum debug_obj_state state)
860 {
861         struct self_test *obj = addr;
862
863         switch (state) {
864         case ODEBUG_STATE_ACTIVE:
865                 debug_object_deactivate(obj, &descr_type_test);
866                 debug_object_init(obj, &descr_type_test);
867                 return true;
868         default:
869                 return false;
870         }
871 }
872
873 /*
874  * fixup_activate is called when:
875  * - an active object is activated
876  * - an unknown non-static object is activated
877  */
878 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
879 {
880         struct self_test *obj = addr;
881
882         switch (state) {
883         case ODEBUG_STATE_NOTAVAILABLE:
884                 return true;
885         case ODEBUG_STATE_ACTIVE:
886                 debug_object_deactivate(obj, &descr_type_test);
887                 debug_object_activate(obj, &descr_type_test);
888                 return true;
889
890         default:
891                 return false;
892         }
893 }
894
895 /*
896  * fixup_destroy is called when:
897  * - an active object is destroyed
898  */
899 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
900 {
901         struct self_test *obj = addr;
902
903         switch (state) {
904         case ODEBUG_STATE_ACTIVE:
905                 debug_object_deactivate(obj, &descr_type_test);
906                 debug_object_destroy(obj, &descr_type_test);
907                 return true;
908         default:
909                 return false;
910         }
911 }
912
913 /*
914  * fixup_free is called when:
915  * - an active object is freed
916  */
917 static bool __init fixup_free(void *addr, enum debug_obj_state state)
918 {
919         struct self_test *obj = addr;
920
921         switch (state) {
922         case ODEBUG_STATE_ACTIVE:
923                 debug_object_deactivate(obj, &descr_type_test);
924                 debug_object_free(obj, &descr_type_test);
925                 return true;
926         default:
927                 return false;
928         }
929 }
930
931 static int __init
932 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
933 {
934         struct debug_bucket *db;
935         struct debug_obj *obj;
936         unsigned long flags;
937         int res = -EINVAL;
938
939         db = get_bucket((unsigned long) addr);
940
941         raw_spin_lock_irqsave(&db->lock, flags);
942
943         obj = lookup_object(addr, db);
944         if (!obj && state != ODEBUG_STATE_NONE) {
945                 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
946                 goto out;
947         }
948         if (obj && obj->state != state) {
949                 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
950                        obj->state, state);
951                 goto out;
952         }
953         if (fixups != debug_objects_fixups) {
954                 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
955                        fixups, debug_objects_fixups);
956                 goto out;
957         }
958         if (warnings != debug_objects_warnings) {
959                 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
960                        warnings, debug_objects_warnings);
961                 goto out;
962         }
963         res = 0;
964 out:
965         raw_spin_unlock_irqrestore(&db->lock, flags);
966         if (res)
967                 debug_objects_enabled = 0;
968         return res;
969 }
970
971 static __initdata struct debug_obj_descr descr_type_test = {
972         .name                   = "selftest",
973         .is_static_object       = is_static_object,
974         .fixup_init             = fixup_init,
975         .fixup_activate         = fixup_activate,
976         .fixup_destroy          = fixup_destroy,
977         .fixup_free             = fixup_free,
978 };
979
980 static __initdata struct self_test obj = { .static_init = 0 };
981
982 static void __init debug_objects_selftest(void)
983 {
984         int fixups, oldfixups, warnings, oldwarnings;
985         unsigned long flags;
986
987         local_irq_save(flags);
988
989         fixups = oldfixups = debug_objects_fixups;
990         warnings = oldwarnings = debug_objects_warnings;
991         descr_test = &descr_type_test;
992
993         debug_object_init(&obj, &descr_type_test);
994         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
995                 goto out;
996         debug_object_activate(&obj, &descr_type_test);
997         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
998                 goto out;
999         debug_object_activate(&obj, &descr_type_test);
1000         if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1001                 goto out;
1002         debug_object_deactivate(&obj, &descr_type_test);
1003         if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1004                 goto out;
1005         debug_object_destroy(&obj, &descr_type_test);
1006         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1007                 goto out;
1008         debug_object_init(&obj, &descr_type_test);
1009         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1010                 goto out;
1011         debug_object_activate(&obj, &descr_type_test);
1012         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1013                 goto out;
1014         debug_object_deactivate(&obj, &descr_type_test);
1015         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1016                 goto out;
1017         debug_object_free(&obj, &descr_type_test);
1018         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1019                 goto out;
1020
1021         obj.static_init = 1;
1022         debug_object_activate(&obj, &descr_type_test);
1023         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1024                 goto out;
1025         debug_object_init(&obj, &descr_type_test);
1026         if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1027                 goto out;
1028         debug_object_free(&obj, &descr_type_test);
1029         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1030                 goto out;
1031
1032 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1033         debug_object_init(&obj, &descr_type_test);
1034         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1035                 goto out;
1036         debug_object_activate(&obj, &descr_type_test);
1037         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1038                 goto out;
1039         __debug_check_no_obj_freed(&obj, sizeof(obj));
1040         if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1041                 goto out;
1042 #endif
1043         pr_info("selftest passed\n");
1044
1045 out:
1046         debug_objects_fixups = oldfixups;
1047         debug_objects_warnings = oldwarnings;
1048         descr_test = NULL;
1049
1050         local_irq_restore(flags);
1051 }
1052 #else
1053 static inline void debug_objects_selftest(void) { }
1054 #endif
1055
1056 /*
1057  * Called during early boot to initialize the hash buckets and link
1058  * the static object pool objects into the poll list. After this call
1059  * the object tracker is fully operational.
1060  */
1061 void __init debug_objects_early_init(void)
1062 {
1063         int i;
1064
1065         for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1066                 raw_spin_lock_init(&obj_hash[i].lock);
1067
1068         for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1069                 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1070 }
1071
1072 /*
1073  * Convert the statically allocated objects to dynamic ones:
1074  */
1075 static int __init debug_objects_replace_static_objects(void)
1076 {
1077         struct debug_bucket *db = obj_hash;
1078         struct hlist_node *tmp;
1079         struct debug_obj *obj, *new;
1080         HLIST_HEAD(objects);
1081         int i, cnt = 0;
1082
1083         for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1084                 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1085                 if (!obj)
1086                         goto free;
1087                 hlist_add_head(&obj->node, &objects);
1088         }
1089
1090         /*
1091          * When debug_objects_mem_init() is called we know that only
1092          * one CPU is up, so disabling interrupts is enough
1093          * protection. This avoids the lockdep hell of lock ordering.
1094          */
1095         local_irq_disable();
1096
1097         /* Remove the statically allocated objects from the pool */
1098         hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1099                 hlist_del(&obj->node);
1100         /* Move the allocated objects to the pool */
1101         hlist_move_list(&objects, &obj_pool);
1102
1103         /* Replace the active object references */
1104         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1105                 hlist_move_list(&db->list, &objects);
1106
1107                 hlist_for_each_entry(obj, &objects, node) {
1108                         new = hlist_entry(obj_pool.first, typeof(*obj), node);
1109                         hlist_del(&new->node);
1110                         /* copy object data */
1111                         *new = *obj;
1112                         hlist_add_head(&new->node, &db->list);
1113                         cnt++;
1114                 }
1115         }
1116         local_irq_enable();
1117
1118         pr_debug("%d of %d active objects replaced\n",
1119                  cnt, obj_pool_used);
1120         return 0;
1121 free:
1122         hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1123                 hlist_del(&obj->node);
1124                 kmem_cache_free(obj_cache, obj);
1125         }
1126         return -ENOMEM;
1127 }
1128
1129 /*
1130  * Called after the kmem_caches are functional to setup a dedicated
1131  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1132  * prevents that the debug code is called on kmem_cache_free() for the
1133  * debug tracker objects to avoid recursive calls.
1134  */
1135 void __init debug_objects_mem_init(void)
1136 {
1137         if (!debug_objects_enabled)
1138                 return;
1139
1140         obj_cache = kmem_cache_create("debug_objects_cache",
1141                                       sizeof (struct debug_obj), 0,
1142                                       SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1143                                       NULL);
1144
1145         if (!obj_cache || debug_objects_replace_static_objects()) {
1146                 debug_objects_enabled = 0;
1147                 if (obj_cache)
1148                         kmem_cache_destroy(obj_cache);
1149                 pr_warn("out of memory.\n");
1150         } else
1151                 debug_objects_selftest();
1152
1153         /*
1154          * Increase the thresholds for allocating and freeing objects
1155          * according to the number of possible CPUs available in the system.
1156          */
1157         debug_objects_pool_size += num_possible_cpus() * 32;
1158         debug_objects_pool_min_level += num_possible_cpus() * 4;
1159 }