debugobjects: Use global free list in __debug_check_no_obj_freed()
[linux-block.git] / lib / debugobjects.c
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22
23 #define ODEBUG_HASH_BITS        14
24 #define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
25
26 #define ODEBUG_POOL_SIZE        1024
27 #define ODEBUG_POOL_MIN_LEVEL   256
28
29 #define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
32
33 struct debug_bucket {
34         struct hlist_head       list;
35         raw_spinlock_t          lock;
36 };
37
38 static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
39
40 static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
41
42 static DEFINE_RAW_SPINLOCK(pool_lock);
43
44 static HLIST_HEAD(obj_pool);
45 static HLIST_HEAD(obj_to_free);
46
47 static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
48 static int                      obj_pool_free = ODEBUG_POOL_SIZE;
49 static int                      obj_pool_used;
50 static int                      obj_pool_max_used;
51 /* The number of objs on the global free list */
52 static int                      obj_nr_tofree;
53 static struct kmem_cache        *obj_cache;
54
55 static int                      debug_objects_maxchain __read_mostly;
56 static int                      debug_objects_maxchecked __read_mostly;
57 static int                      debug_objects_fixups __read_mostly;
58 static int                      debug_objects_warnings __read_mostly;
59 static int                      debug_objects_enabled __read_mostly
60                                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
61 static int                      debug_objects_pool_size __read_mostly
62                                 = ODEBUG_POOL_SIZE;
63 static int                      debug_objects_pool_min_level __read_mostly
64                                 = ODEBUG_POOL_MIN_LEVEL;
65 static struct debug_obj_descr   *descr_test  __read_mostly;
66
67 /*
68  * Track numbers of kmem_cache_alloc()/free() calls done.
69  */
70 static int                      debug_objects_allocated;
71 static int                      debug_objects_freed;
72
73 static void free_obj_work(struct work_struct *work);
74 static DECLARE_WORK(debug_obj_work, free_obj_work);
75
76 static int __init enable_object_debug(char *str)
77 {
78         debug_objects_enabled = 1;
79         return 0;
80 }
81
82 static int __init disable_object_debug(char *str)
83 {
84         debug_objects_enabled = 0;
85         return 0;
86 }
87
88 early_param("debug_objects", enable_object_debug);
89 early_param("no_debug_objects", disable_object_debug);
90
91 static const char *obj_states[ODEBUG_STATE_MAX] = {
92         [ODEBUG_STATE_NONE]             = "none",
93         [ODEBUG_STATE_INIT]             = "initialized",
94         [ODEBUG_STATE_INACTIVE]         = "inactive",
95         [ODEBUG_STATE_ACTIVE]           = "active",
96         [ODEBUG_STATE_DESTROYED]        = "destroyed",
97         [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
98 };
99
100 static void fill_pool(void)
101 {
102         gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
103         struct debug_obj *new, *obj;
104         unsigned long flags;
105
106         if (likely(obj_pool_free >= debug_objects_pool_min_level))
107                 return;
108
109         /*
110          * Reuse objs from the global free list; they will be reinitialized
111          * when allocating.
112          */
113         while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
114                 raw_spin_lock_irqsave(&pool_lock, flags);
115                 /*
116                  * Recheck with the lock held as the worker thread might have
117                  * won the race and freed the global free list already.
118                  */
119                 if (obj_nr_tofree) {
120                         obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
121                         hlist_del(&obj->node);
122                         obj_nr_tofree--;
123                         hlist_add_head(&obj->node, &obj_pool);
124                         obj_pool_free++;
125                 }
126                 raw_spin_unlock_irqrestore(&pool_lock, flags);
127         }
128
129         if (unlikely(!obj_cache))
130                 return;
131
132         while (obj_pool_free < debug_objects_pool_min_level) {
133
134                 new = kmem_cache_zalloc(obj_cache, gfp);
135                 if (!new)
136                         return;
137
138                 kmemleak_ignore(new);
139                 raw_spin_lock_irqsave(&pool_lock, flags);
140                 hlist_add_head(&new->node, &obj_pool);
141                 debug_objects_allocated++;
142                 obj_pool_free++;
143                 raw_spin_unlock_irqrestore(&pool_lock, flags);
144         }
145 }
146
147 /*
148  * Lookup an object in the hash bucket.
149  */
150 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
151 {
152         struct debug_obj *obj;
153         int cnt = 0;
154
155         hlist_for_each_entry(obj, &b->list, node) {
156                 cnt++;
157                 if (obj->object == addr)
158                         return obj;
159         }
160         if (cnt > debug_objects_maxchain)
161                 debug_objects_maxchain = cnt;
162
163         return NULL;
164 }
165
166 /*
167  * Allocate a new object. If the pool is empty, switch off the debugger.
168  * Must be called with interrupts disabled.
169  */
170 static struct debug_obj *
171 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
172 {
173         struct debug_obj *obj = NULL;
174
175         raw_spin_lock(&pool_lock);
176         if (obj_pool.first) {
177                 obj         = hlist_entry(obj_pool.first, typeof(*obj), node);
178
179                 obj->object = addr;
180                 obj->descr  = descr;
181                 obj->state  = ODEBUG_STATE_NONE;
182                 obj->astate = 0;
183                 hlist_del(&obj->node);
184
185                 hlist_add_head(&obj->node, &b->list);
186
187                 obj_pool_used++;
188                 if (obj_pool_used > obj_pool_max_used)
189                         obj_pool_max_used = obj_pool_used;
190
191                 obj_pool_free--;
192                 if (obj_pool_free < obj_pool_min_free)
193                         obj_pool_min_free = obj_pool_free;
194         }
195         raw_spin_unlock(&pool_lock);
196
197         return obj;
198 }
199
200 /*
201  * workqueue function to free objects.
202  *
203  * To reduce contention on the global pool_lock, the actual freeing of
204  * debug objects will be delayed if the pool_lock is busy.
205  */
206 static void free_obj_work(struct work_struct *work)
207 {
208         struct hlist_node *tmp;
209         struct debug_obj *obj;
210         unsigned long flags;
211         HLIST_HEAD(tofree);
212
213         if (!raw_spin_trylock_irqsave(&pool_lock, flags))
214                 return;
215
216         /*
217          * The objs on the pool list might be allocated before the work is
218          * run, so recheck if pool list it full or not, if not fill pool
219          * list from the global free list
220          */
221         while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
222                 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
223                 hlist_del(&obj->node);
224                 hlist_add_head(&obj->node, &obj_pool);
225                 obj_pool_free++;
226                 obj_nr_tofree--;
227         }
228
229         /*
230          * Pool list is already full and there are still objs on the free
231          * list. Move remaining free objs to a temporary list to free the
232          * memory outside the pool_lock held region.
233          */
234         if (obj_nr_tofree) {
235                 hlist_move_list(&obj_to_free, &tofree);
236                 obj_nr_tofree = 0;
237         }
238         raw_spin_unlock_irqrestore(&pool_lock, flags);
239
240         hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
241                 hlist_del(&obj->node);
242                 kmem_cache_free(obj_cache, obj);
243         }
244 }
245
246 static bool __free_object(struct debug_obj *obj)
247 {
248         unsigned long flags;
249         bool work;
250
251         raw_spin_lock_irqsave(&pool_lock, flags);
252         work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
253         obj_pool_used--;
254
255         if (work) {
256                 obj_nr_tofree++;
257                 hlist_add_head(&obj->node, &obj_to_free);
258         } else {
259                 obj_pool_free++;
260                 hlist_add_head(&obj->node, &obj_pool);
261         }
262         raw_spin_unlock_irqrestore(&pool_lock, flags);
263         return work;
264 }
265
266 /*
267  * Put the object back into the pool and schedule work to free objects
268  * if necessary.
269  */
270 static void free_object(struct debug_obj *obj)
271 {
272         if (__free_object(obj))
273                 schedule_work(&debug_obj_work);
274 }
275
276 /*
277  * We run out of memory. That means we probably have tons of objects
278  * allocated.
279  */
280 static void debug_objects_oom(void)
281 {
282         struct debug_bucket *db = obj_hash;
283         struct hlist_node *tmp;
284         HLIST_HEAD(freelist);
285         struct debug_obj *obj;
286         unsigned long flags;
287         int i;
288
289         pr_warn("Out of memory. ODEBUG disabled\n");
290
291         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
292                 raw_spin_lock_irqsave(&db->lock, flags);
293                 hlist_move_list(&db->list, &freelist);
294                 raw_spin_unlock_irqrestore(&db->lock, flags);
295
296                 /* Now free them */
297                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
298                         hlist_del(&obj->node);
299                         free_object(obj);
300                 }
301         }
302 }
303
304 /*
305  * We use the pfn of the address for the hash. That way we can check
306  * for freed objects simply by checking the affected bucket.
307  */
308 static struct debug_bucket *get_bucket(unsigned long addr)
309 {
310         unsigned long hash;
311
312         hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
313         return &obj_hash[hash];
314 }
315
316 static void debug_print_object(struct debug_obj *obj, char *msg)
317 {
318         struct debug_obj_descr *descr = obj->descr;
319         static int limit;
320
321         if (limit < 5 && descr != descr_test) {
322                 void *hint = descr->debug_hint ?
323                         descr->debug_hint(obj->object) : NULL;
324                 limit++;
325                 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
326                                  "object type: %s hint: %pS\n",
327                         msg, obj_states[obj->state], obj->astate,
328                         descr->name, hint);
329         }
330         debug_objects_warnings++;
331 }
332
333 /*
334  * Try to repair the damage, so we have a better chance to get useful
335  * debug output.
336  */
337 static bool
338 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
339                    void * addr, enum debug_obj_state state)
340 {
341         if (fixup && fixup(addr, state)) {
342                 debug_objects_fixups++;
343                 return true;
344         }
345         return false;
346 }
347
348 static void debug_object_is_on_stack(void *addr, int onstack)
349 {
350         int is_on_stack;
351         static int limit;
352
353         if (limit > 4)
354                 return;
355
356         is_on_stack = object_is_on_stack(addr);
357         if (is_on_stack == onstack)
358                 return;
359
360         limit++;
361         if (is_on_stack)
362                 pr_warn("object is on stack, but not annotated\n");
363         else
364                 pr_warn("object is not on stack, but annotated\n");
365         WARN_ON(1);
366 }
367
368 static void
369 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
370 {
371         enum debug_obj_state state;
372         struct debug_bucket *db;
373         struct debug_obj *obj;
374         unsigned long flags;
375
376         fill_pool();
377
378         db = get_bucket((unsigned long) addr);
379
380         raw_spin_lock_irqsave(&db->lock, flags);
381
382         obj = lookup_object(addr, db);
383         if (!obj) {
384                 obj = alloc_object(addr, db, descr);
385                 if (!obj) {
386                         debug_objects_enabled = 0;
387                         raw_spin_unlock_irqrestore(&db->lock, flags);
388                         debug_objects_oom();
389                         return;
390                 }
391                 debug_object_is_on_stack(addr, onstack);
392         }
393
394         switch (obj->state) {
395         case ODEBUG_STATE_NONE:
396         case ODEBUG_STATE_INIT:
397         case ODEBUG_STATE_INACTIVE:
398                 obj->state = ODEBUG_STATE_INIT;
399                 break;
400
401         case ODEBUG_STATE_ACTIVE:
402                 debug_print_object(obj, "init");
403                 state = obj->state;
404                 raw_spin_unlock_irqrestore(&db->lock, flags);
405                 debug_object_fixup(descr->fixup_init, addr, state);
406                 return;
407
408         case ODEBUG_STATE_DESTROYED:
409                 debug_print_object(obj, "init");
410                 break;
411         default:
412                 break;
413         }
414
415         raw_spin_unlock_irqrestore(&db->lock, flags);
416 }
417
418 /**
419  * debug_object_init - debug checks when an object is initialized
420  * @addr:       address of the object
421  * @descr:      pointer to an object specific debug description structure
422  */
423 void debug_object_init(void *addr, struct debug_obj_descr *descr)
424 {
425         if (!debug_objects_enabled)
426                 return;
427
428         __debug_object_init(addr, descr, 0);
429 }
430 EXPORT_SYMBOL_GPL(debug_object_init);
431
432 /**
433  * debug_object_init_on_stack - debug checks when an object on stack is
434  *                              initialized
435  * @addr:       address of the object
436  * @descr:      pointer to an object specific debug description structure
437  */
438 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
439 {
440         if (!debug_objects_enabled)
441                 return;
442
443         __debug_object_init(addr, descr, 1);
444 }
445 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
446
447 /**
448  * debug_object_activate - debug checks when an object is activated
449  * @addr:       address of the object
450  * @descr:      pointer to an object specific debug description structure
451  * Returns 0 for success, -EINVAL for check failed.
452  */
453 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
454 {
455         enum debug_obj_state state;
456         struct debug_bucket *db;
457         struct debug_obj *obj;
458         unsigned long flags;
459         int ret;
460         struct debug_obj o = { .object = addr,
461                                .state = ODEBUG_STATE_NOTAVAILABLE,
462                                .descr = descr };
463
464         if (!debug_objects_enabled)
465                 return 0;
466
467         db = get_bucket((unsigned long) addr);
468
469         raw_spin_lock_irqsave(&db->lock, flags);
470
471         obj = lookup_object(addr, db);
472         if (obj) {
473                 switch (obj->state) {
474                 case ODEBUG_STATE_INIT:
475                 case ODEBUG_STATE_INACTIVE:
476                         obj->state = ODEBUG_STATE_ACTIVE;
477                         ret = 0;
478                         break;
479
480                 case ODEBUG_STATE_ACTIVE:
481                         debug_print_object(obj, "activate");
482                         state = obj->state;
483                         raw_spin_unlock_irqrestore(&db->lock, flags);
484                         ret = debug_object_fixup(descr->fixup_activate, addr, state);
485                         return ret ? 0 : -EINVAL;
486
487                 case ODEBUG_STATE_DESTROYED:
488                         debug_print_object(obj, "activate");
489                         ret = -EINVAL;
490                         break;
491                 default:
492                         ret = 0;
493                         break;
494                 }
495                 raw_spin_unlock_irqrestore(&db->lock, flags);
496                 return ret;
497         }
498
499         raw_spin_unlock_irqrestore(&db->lock, flags);
500         /*
501          * We are here when a static object is activated. We
502          * let the type specific code confirm whether this is
503          * true or not. if true, we just make sure that the
504          * static object is tracked in the object tracker. If
505          * not, this must be a bug, so we try to fix it up.
506          */
507         if (descr->is_static_object && descr->is_static_object(addr)) {
508                 /* track this static object */
509                 debug_object_init(addr, descr);
510                 debug_object_activate(addr, descr);
511         } else {
512                 debug_print_object(&o, "activate");
513                 ret = debug_object_fixup(descr->fixup_activate, addr,
514                                         ODEBUG_STATE_NOTAVAILABLE);
515                 return ret ? 0 : -EINVAL;
516         }
517         return 0;
518 }
519 EXPORT_SYMBOL_GPL(debug_object_activate);
520
521 /**
522  * debug_object_deactivate - debug checks when an object is deactivated
523  * @addr:       address of the object
524  * @descr:      pointer to an object specific debug description structure
525  */
526 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
527 {
528         struct debug_bucket *db;
529         struct debug_obj *obj;
530         unsigned long flags;
531
532         if (!debug_objects_enabled)
533                 return;
534
535         db = get_bucket((unsigned long) addr);
536
537         raw_spin_lock_irqsave(&db->lock, flags);
538
539         obj = lookup_object(addr, db);
540         if (obj) {
541                 switch (obj->state) {
542                 case ODEBUG_STATE_INIT:
543                 case ODEBUG_STATE_INACTIVE:
544                 case ODEBUG_STATE_ACTIVE:
545                         if (!obj->astate)
546                                 obj->state = ODEBUG_STATE_INACTIVE;
547                         else
548                                 debug_print_object(obj, "deactivate");
549                         break;
550
551                 case ODEBUG_STATE_DESTROYED:
552                         debug_print_object(obj, "deactivate");
553                         break;
554                 default:
555                         break;
556                 }
557         } else {
558                 struct debug_obj o = { .object = addr,
559                                        .state = ODEBUG_STATE_NOTAVAILABLE,
560                                        .descr = descr };
561
562                 debug_print_object(&o, "deactivate");
563         }
564
565         raw_spin_unlock_irqrestore(&db->lock, flags);
566 }
567 EXPORT_SYMBOL_GPL(debug_object_deactivate);
568
569 /**
570  * debug_object_destroy - debug checks when an object is destroyed
571  * @addr:       address of the object
572  * @descr:      pointer to an object specific debug description structure
573  */
574 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
575 {
576         enum debug_obj_state state;
577         struct debug_bucket *db;
578         struct debug_obj *obj;
579         unsigned long flags;
580
581         if (!debug_objects_enabled)
582                 return;
583
584         db = get_bucket((unsigned long) addr);
585
586         raw_spin_lock_irqsave(&db->lock, flags);
587
588         obj = lookup_object(addr, db);
589         if (!obj)
590                 goto out_unlock;
591
592         switch (obj->state) {
593         case ODEBUG_STATE_NONE:
594         case ODEBUG_STATE_INIT:
595         case ODEBUG_STATE_INACTIVE:
596                 obj->state = ODEBUG_STATE_DESTROYED;
597                 break;
598         case ODEBUG_STATE_ACTIVE:
599                 debug_print_object(obj, "destroy");
600                 state = obj->state;
601                 raw_spin_unlock_irqrestore(&db->lock, flags);
602                 debug_object_fixup(descr->fixup_destroy, addr, state);
603                 return;
604
605         case ODEBUG_STATE_DESTROYED:
606                 debug_print_object(obj, "destroy");
607                 break;
608         default:
609                 break;
610         }
611 out_unlock:
612         raw_spin_unlock_irqrestore(&db->lock, flags);
613 }
614 EXPORT_SYMBOL_GPL(debug_object_destroy);
615
616 /**
617  * debug_object_free - debug checks when an object is freed
618  * @addr:       address of the object
619  * @descr:      pointer to an object specific debug description structure
620  */
621 void debug_object_free(void *addr, struct debug_obj_descr *descr)
622 {
623         enum debug_obj_state state;
624         struct debug_bucket *db;
625         struct debug_obj *obj;
626         unsigned long flags;
627
628         if (!debug_objects_enabled)
629                 return;
630
631         db = get_bucket((unsigned long) addr);
632
633         raw_spin_lock_irqsave(&db->lock, flags);
634
635         obj = lookup_object(addr, db);
636         if (!obj)
637                 goto out_unlock;
638
639         switch (obj->state) {
640         case ODEBUG_STATE_ACTIVE:
641                 debug_print_object(obj, "free");
642                 state = obj->state;
643                 raw_spin_unlock_irqrestore(&db->lock, flags);
644                 debug_object_fixup(descr->fixup_free, addr, state);
645                 return;
646         default:
647                 hlist_del(&obj->node);
648                 raw_spin_unlock_irqrestore(&db->lock, flags);
649                 free_object(obj);
650                 return;
651         }
652 out_unlock:
653         raw_spin_unlock_irqrestore(&db->lock, flags);
654 }
655 EXPORT_SYMBOL_GPL(debug_object_free);
656
657 /**
658  * debug_object_assert_init - debug checks when object should be init-ed
659  * @addr:       address of the object
660  * @descr:      pointer to an object specific debug description structure
661  */
662 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
663 {
664         struct debug_bucket *db;
665         struct debug_obj *obj;
666         unsigned long flags;
667
668         if (!debug_objects_enabled)
669                 return;
670
671         db = get_bucket((unsigned long) addr);
672
673         raw_spin_lock_irqsave(&db->lock, flags);
674
675         obj = lookup_object(addr, db);
676         if (!obj) {
677                 struct debug_obj o = { .object = addr,
678                                        .state = ODEBUG_STATE_NOTAVAILABLE,
679                                        .descr = descr };
680
681                 raw_spin_unlock_irqrestore(&db->lock, flags);
682                 /*
683                  * Maybe the object is static, and we let the type specific
684                  * code confirm. Track this static object if true, else invoke
685                  * fixup.
686                  */
687                 if (descr->is_static_object && descr->is_static_object(addr)) {
688                         /* Track this static object */
689                         debug_object_init(addr, descr);
690                 } else {
691                         debug_print_object(&o, "assert_init");
692                         debug_object_fixup(descr->fixup_assert_init, addr,
693                                            ODEBUG_STATE_NOTAVAILABLE);
694                 }
695                 return;
696         }
697
698         raw_spin_unlock_irqrestore(&db->lock, flags);
699 }
700 EXPORT_SYMBOL_GPL(debug_object_assert_init);
701
702 /**
703  * debug_object_active_state - debug checks object usage state machine
704  * @addr:       address of the object
705  * @descr:      pointer to an object specific debug description structure
706  * @expect:     expected state
707  * @next:       state to move to if expected state is found
708  */
709 void
710 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
711                           unsigned int expect, unsigned int next)
712 {
713         struct debug_bucket *db;
714         struct debug_obj *obj;
715         unsigned long flags;
716
717         if (!debug_objects_enabled)
718                 return;
719
720         db = get_bucket((unsigned long) addr);
721
722         raw_spin_lock_irqsave(&db->lock, flags);
723
724         obj = lookup_object(addr, db);
725         if (obj) {
726                 switch (obj->state) {
727                 case ODEBUG_STATE_ACTIVE:
728                         if (obj->astate == expect)
729                                 obj->astate = next;
730                         else
731                                 debug_print_object(obj, "active_state");
732                         break;
733
734                 default:
735                         debug_print_object(obj, "active_state");
736                         break;
737                 }
738         } else {
739                 struct debug_obj o = { .object = addr,
740                                        .state = ODEBUG_STATE_NOTAVAILABLE,
741                                        .descr = descr };
742
743                 debug_print_object(&o, "active_state");
744         }
745
746         raw_spin_unlock_irqrestore(&db->lock, flags);
747 }
748 EXPORT_SYMBOL_GPL(debug_object_active_state);
749
750 #ifdef CONFIG_DEBUG_OBJECTS_FREE
751 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
752 {
753         unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
754         struct debug_obj_descr *descr;
755         enum debug_obj_state state;
756         struct debug_bucket *db;
757         struct hlist_node *tmp;
758         struct debug_obj *obj;
759         int cnt, objs_checked = 0;
760         bool work = false;
761
762         saddr = (unsigned long) address;
763         eaddr = saddr + size;
764         paddr = saddr & ODEBUG_CHUNK_MASK;
765         chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
766         chunks >>= ODEBUG_CHUNK_SHIFT;
767
768         for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
769                 db = get_bucket(paddr);
770
771 repeat:
772                 cnt = 0;
773                 raw_spin_lock_irqsave(&db->lock, flags);
774                 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
775                         cnt++;
776                         oaddr = (unsigned long) obj->object;
777                         if (oaddr < saddr || oaddr >= eaddr)
778                                 continue;
779
780                         switch (obj->state) {
781                         case ODEBUG_STATE_ACTIVE:
782                                 debug_print_object(obj, "free");
783                                 descr = obj->descr;
784                                 state = obj->state;
785                                 raw_spin_unlock_irqrestore(&db->lock, flags);
786                                 debug_object_fixup(descr->fixup_free,
787                                                    (void *) oaddr, state);
788                                 goto repeat;
789                         default:
790                                 hlist_del(&obj->node);
791                                 work |= __free_object(obj);
792                                 break;
793                         }
794                 }
795                 raw_spin_unlock_irqrestore(&db->lock, flags);
796
797                 if (cnt > debug_objects_maxchain)
798                         debug_objects_maxchain = cnt;
799
800                 objs_checked += cnt;
801         }
802
803         if (objs_checked > debug_objects_maxchecked)
804                 debug_objects_maxchecked = objs_checked;
805
806         /* Schedule work to actually kmem_cache_free() objects */
807         if (work)
808                 schedule_work(&debug_obj_work);
809 }
810
811 void debug_check_no_obj_freed(const void *address, unsigned long size)
812 {
813         if (debug_objects_enabled)
814                 __debug_check_no_obj_freed(address, size);
815 }
816 #endif
817
818 #ifdef CONFIG_DEBUG_FS
819
820 static int debug_stats_show(struct seq_file *m, void *v)
821 {
822         seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
823         seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
824         seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
825         seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
826         seq_printf(m, "pool_free     :%d\n", obj_pool_free);
827         seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
828         seq_printf(m, "pool_used     :%d\n", obj_pool_used);
829         seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
830         seq_printf(m, "on_free_list  :%d\n", obj_nr_tofree);
831         seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
832         seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
833         return 0;
834 }
835
836 static int debug_stats_open(struct inode *inode, struct file *filp)
837 {
838         return single_open(filp, debug_stats_show, NULL);
839 }
840
841 static const struct file_operations debug_stats_fops = {
842         .open           = debug_stats_open,
843         .read           = seq_read,
844         .llseek         = seq_lseek,
845         .release        = single_release,
846 };
847
848 static int __init debug_objects_init_debugfs(void)
849 {
850         struct dentry *dbgdir, *dbgstats;
851
852         if (!debug_objects_enabled)
853                 return 0;
854
855         dbgdir = debugfs_create_dir("debug_objects", NULL);
856         if (!dbgdir)
857                 return -ENOMEM;
858
859         dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
860                                        &debug_stats_fops);
861         if (!dbgstats)
862                 goto err;
863
864         return 0;
865
866 err:
867         debugfs_remove(dbgdir);
868
869         return -ENOMEM;
870 }
871 __initcall(debug_objects_init_debugfs);
872
873 #else
874 static inline void debug_objects_init_debugfs(void) { }
875 #endif
876
877 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
878
879 /* Random data structure for the self test */
880 struct self_test {
881         unsigned long   dummy1[6];
882         int             static_init;
883         unsigned long   dummy2[3];
884 };
885
886 static __initdata struct debug_obj_descr descr_type_test;
887
888 static bool __init is_static_object(void *addr)
889 {
890         struct self_test *obj = addr;
891
892         return obj->static_init;
893 }
894
895 /*
896  * fixup_init is called when:
897  * - an active object is initialized
898  */
899 static bool __init fixup_init(void *addr, enum debug_obj_state state)
900 {
901         struct self_test *obj = addr;
902
903         switch (state) {
904         case ODEBUG_STATE_ACTIVE:
905                 debug_object_deactivate(obj, &descr_type_test);
906                 debug_object_init(obj, &descr_type_test);
907                 return true;
908         default:
909                 return false;
910         }
911 }
912
913 /*
914  * fixup_activate is called when:
915  * - an active object is activated
916  * - an unknown non-static object is activated
917  */
918 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
919 {
920         struct self_test *obj = addr;
921
922         switch (state) {
923         case ODEBUG_STATE_NOTAVAILABLE:
924                 return true;
925         case ODEBUG_STATE_ACTIVE:
926                 debug_object_deactivate(obj, &descr_type_test);
927                 debug_object_activate(obj, &descr_type_test);
928                 return true;
929
930         default:
931                 return false;
932         }
933 }
934
935 /*
936  * fixup_destroy is called when:
937  * - an active object is destroyed
938  */
939 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
940 {
941         struct self_test *obj = addr;
942
943         switch (state) {
944         case ODEBUG_STATE_ACTIVE:
945                 debug_object_deactivate(obj, &descr_type_test);
946                 debug_object_destroy(obj, &descr_type_test);
947                 return true;
948         default:
949                 return false;
950         }
951 }
952
953 /*
954  * fixup_free is called when:
955  * - an active object is freed
956  */
957 static bool __init fixup_free(void *addr, enum debug_obj_state state)
958 {
959         struct self_test *obj = addr;
960
961         switch (state) {
962         case ODEBUG_STATE_ACTIVE:
963                 debug_object_deactivate(obj, &descr_type_test);
964                 debug_object_free(obj, &descr_type_test);
965                 return true;
966         default:
967                 return false;
968         }
969 }
970
971 static int __init
972 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
973 {
974         struct debug_bucket *db;
975         struct debug_obj *obj;
976         unsigned long flags;
977         int res = -EINVAL;
978
979         db = get_bucket((unsigned long) addr);
980
981         raw_spin_lock_irqsave(&db->lock, flags);
982
983         obj = lookup_object(addr, db);
984         if (!obj && state != ODEBUG_STATE_NONE) {
985                 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
986                 goto out;
987         }
988         if (obj && obj->state != state) {
989                 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
990                        obj->state, state);
991                 goto out;
992         }
993         if (fixups != debug_objects_fixups) {
994                 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
995                        fixups, debug_objects_fixups);
996                 goto out;
997         }
998         if (warnings != debug_objects_warnings) {
999                 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1000                        warnings, debug_objects_warnings);
1001                 goto out;
1002         }
1003         res = 0;
1004 out:
1005         raw_spin_unlock_irqrestore(&db->lock, flags);
1006         if (res)
1007                 debug_objects_enabled = 0;
1008         return res;
1009 }
1010
1011 static __initdata struct debug_obj_descr descr_type_test = {
1012         .name                   = "selftest",
1013         .is_static_object       = is_static_object,
1014         .fixup_init             = fixup_init,
1015         .fixup_activate         = fixup_activate,
1016         .fixup_destroy          = fixup_destroy,
1017         .fixup_free             = fixup_free,
1018 };
1019
1020 static __initdata struct self_test obj = { .static_init = 0 };
1021
1022 static void __init debug_objects_selftest(void)
1023 {
1024         int fixups, oldfixups, warnings, oldwarnings;
1025         unsigned long flags;
1026
1027         local_irq_save(flags);
1028
1029         fixups = oldfixups = debug_objects_fixups;
1030         warnings = oldwarnings = debug_objects_warnings;
1031         descr_test = &descr_type_test;
1032
1033         debug_object_init(&obj, &descr_type_test);
1034         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1035                 goto out;
1036         debug_object_activate(&obj, &descr_type_test);
1037         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1038                 goto out;
1039         debug_object_activate(&obj, &descr_type_test);
1040         if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1041                 goto out;
1042         debug_object_deactivate(&obj, &descr_type_test);
1043         if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1044                 goto out;
1045         debug_object_destroy(&obj, &descr_type_test);
1046         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1047                 goto out;
1048         debug_object_init(&obj, &descr_type_test);
1049         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1050                 goto out;
1051         debug_object_activate(&obj, &descr_type_test);
1052         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1053                 goto out;
1054         debug_object_deactivate(&obj, &descr_type_test);
1055         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1056                 goto out;
1057         debug_object_free(&obj, &descr_type_test);
1058         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1059                 goto out;
1060
1061         obj.static_init = 1;
1062         debug_object_activate(&obj, &descr_type_test);
1063         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1064                 goto out;
1065         debug_object_init(&obj, &descr_type_test);
1066         if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1067                 goto out;
1068         debug_object_free(&obj, &descr_type_test);
1069         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1070                 goto out;
1071
1072 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1073         debug_object_init(&obj, &descr_type_test);
1074         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1075                 goto out;
1076         debug_object_activate(&obj, &descr_type_test);
1077         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1078                 goto out;
1079         __debug_check_no_obj_freed(&obj, sizeof(obj));
1080         if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1081                 goto out;
1082 #endif
1083         pr_info("selftest passed\n");
1084
1085 out:
1086         debug_objects_fixups = oldfixups;
1087         debug_objects_warnings = oldwarnings;
1088         descr_test = NULL;
1089
1090         local_irq_restore(flags);
1091 }
1092 #else
1093 static inline void debug_objects_selftest(void) { }
1094 #endif
1095
1096 /*
1097  * Called during early boot to initialize the hash buckets and link
1098  * the static object pool objects into the poll list. After this call
1099  * the object tracker is fully operational.
1100  */
1101 void __init debug_objects_early_init(void)
1102 {
1103         int i;
1104
1105         for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1106                 raw_spin_lock_init(&obj_hash[i].lock);
1107
1108         for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1109                 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1110 }
1111
1112 /*
1113  * Convert the statically allocated objects to dynamic ones:
1114  */
1115 static int __init debug_objects_replace_static_objects(void)
1116 {
1117         struct debug_bucket *db = obj_hash;
1118         struct hlist_node *tmp;
1119         struct debug_obj *obj, *new;
1120         HLIST_HEAD(objects);
1121         int i, cnt = 0;
1122
1123         for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1124                 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1125                 if (!obj)
1126                         goto free;
1127                 kmemleak_ignore(obj);
1128                 hlist_add_head(&obj->node, &objects);
1129         }
1130
1131         /*
1132          * When debug_objects_mem_init() is called we know that only
1133          * one CPU is up, so disabling interrupts is enough
1134          * protection. This avoids the lockdep hell of lock ordering.
1135          */
1136         local_irq_disable();
1137
1138         /* Remove the statically allocated objects from the pool */
1139         hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1140                 hlist_del(&obj->node);
1141         /* Move the allocated objects to the pool */
1142         hlist_move_list(&objects, &obj_pool);
1143
1144         /* Replace the active object references */
1145         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1146                 hlist_move_list(&db->list, &objects);
1147
1148                 hlist_for_each_entry(obj, &objects, node) {
1149                         new = hlist_entry(obj_pool.first, typeof(*obj), node);
1150                         hlist_del(&new->node);
1151                         /* copy object data */
1152                         *new = *obj;
1153                         hlist_add_head(&new->node, &db->list);
1154                         cnt++;
1155                 }
1156         }
1157         local_irq_enable();
1158
1159         pr_debug("%d of %d active objects replaced\n",
1160                  cnt, obj_pool_used);
1161         return 0;
1162 free:
1163         hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1164                 hlist_del(&obj->node);
1165                 kmem_cache_free(obj_cache, obj);
1166         }
1167         return -ENOMEM;
1168 }
1169
1170 /*
1171  * Called after the kmem_caches are functional to setup a dedicated
1172  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1173  * prevents that the debug code is called on kmem_cache_free() for the
1174  * debug tracker objects to avoid recursive calls.
1175  */
1176 void __init debug_objects_mem_init(void)
1177 {
1178         if (!debug_objects_enabled)
1179                 return;
1180
1181         obj_cache = kmem_cache_create("debug_objects_cache",
1182                                       sizeof (struct debug_obj), 0,
1183                                       SLAB_DEBUG_OBJECTS, NULL);
1184
1185         if (!obj_cache || debug_objects_replace_static_objects()) {
1186                 debug_objects_enabled = 0;
1187                 if (obj_cache)
1188                         kmem_cache_destroy(obj_cache);
1189                 pr_warn("out of memory.\n");
1190         } else
1191                 debug_objects_selftest();
1192
1193         /*
1194          * Increase the thresholds for allocating and freeing objects
1195          * according to the number of possible CPUs available in the system.
1196          */
1197         debug_objects_pool_size += num_possible_cpus() * 32;
1198         debug_objects_pool_min_level += num_possible_cpus() * 4;
1199 }