Commit | Line | Data |
---|---|---|
3ac7fe5a TG |
1 | /* |
2 | * Generic infrastructure for lifetime debugging of objects. | |
3 | * | |
4 | * Started by Thomas Gleixner | |
5 | * | |
6 | * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> | |
7 | * | |
8 | * For licencing details see kernel-base/COPYING | |
9 | */ | |
719e4843 FF |
10 | |
11 | #define pr_fmt(fmt) "ODEBUG: " fmt | |
12 | ||
3ac7fe5a TG |
13 | #include <linux/debugobjects.h> |
14 | #include <linux/interrupt.h> | |
d43c36dc | 15 | #include <linux/sched.h> |
3ac7fe5a TG |
16 | #include <linux/seq_file.h> |
17 | #include <linux/debugfs.h> | |
5a0e3ad6 | 18 | #include <linux/slab.h> |
3ac7fe5a TG |
19 | #include <linux/hash.h> |
20 | ||
21 | #define ODEBUG_HASH_BITS 14 | |
22 | #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) | |
23 | ||
0b6ec8c0 | 24 | #define ODEBUG_POOL_SIZE 1024 |
3ac7fe5a TG |
25 | #define ODEBUG_POOL_MIN_LEVEL 256 |
26 | ||
27 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT | |
28 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) | |
29 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) | |
30 | ||
31 | struct debug_bucket { | |
32 | struct hlist_head list; | |
aef9cb05 | 33 | raw_spinlock_t lock; |
3ac7fe5a TG |
34 | }; |
35 | ||
36 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; | |
37 | ||
1be1cb7b | 38 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; |
3ac7fe5a | 39 | |
aef9cb05 | 40 | static DEFINE_RAW_SPINLOCK(pool_lock); |
3ac7fe5a TG |
41 | |
42 | static HLIST_HEAD(obj_pool); | |
43 | ||
44 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; | |
45 | static int obj_pool_free = ODEBUG_POOL_SIZE; | |
46 | static int obj_pool_used; | |
47 | static int obj_pool_max_used; | |
48 | static struct kmem_cache *obj_cache; | |
49 | ||
50 | static int debug_objects_maxchain __read_mostly; | |
51 | static int debug_objects_fixups __read_mostly; | |
52 | static int debug_objects_warnings __read_mostly; | |
3ae70205 IM |
53 | static int debug_objects_enabled __read_mostly |
54 | = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; | |
55 | ||
3ac7fe5a TG |
56 | static struct debug_obj_descr *descr_test __read_mostly; |
57 | ||
337fff8b TG |
58 | static void free_obj_work(struct work_struct *work); |
59 | static DECLARE_WORK(debug_obj_work, free_obj_work); | |
60 | ||
3ac7fe5a TG |
61 | static int __init enable_object_debug(char *str) |
62 | { | |
63 | debug_objects_enabled = 1; | |
64 | return 0; | |
65 | } | |
3e8ebb5c KM |
66 | |
67 | static int __init disable_object_debug(char *str) | |
68 | { | |
69 | debug_objects_enabled = 0; | |
70 | return 0; | |
71 | } | |
72 | ||
3ac7fe5a | 73 | early_param("debug_objects", enable_object_debug); |
3e8ebb5c | 74 | early_param("no_debug_objects", disable_object_debug); |
3ac7fe5a TG |
75 | |
76 | static const char *obj_states[ODEBUG_STATE_MAX] = { | |
77 | [ODEBUG_STATE_NONE] = "none", | |
78 | [ODEBUG_STATE_INIT] = "initialized", | |
79 | [ODEBUG_STATE_INACTIVE] = "inactive", | |
80 | [ODEBUG_STATE_ACTIVE] = "active", | |
81 | [ODEBUG_STATE_DESTROYED] = "destroyed", | |
82 | [ODEBUG_STATE_NOTAVAILABLE] = "not available", | |
83 | }; | |
84 | ||
1fda107d | 85 | static void fill_pool(void) |
3ac7fe5a TG |
86 | { |
87 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | |
88 | struct debug_obj *new; | |
50db04dd | 89 | unsigned long flags; |
3ac7fe5a TG |
90 | |
91 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) | |
1fda107d | 92 | return; |
3ac7fe5a TG |
93 | |
94 | if (unlikely(!obj_cache)) | |
1fda107d | 95 | return; |
3ac7fe5a TG |
96 | |
97 | while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { | |
98 | ||
99 | new = kmem_cache_zalloc(obj_cache, gfp); | |
100 | if (!new) | |
3340808c | 101 | return; |
3ac7fe5a | 102 | |
aef9cb05 | 103 | raw_spin_lock_irqsave(&pool_lock, flags); |
3ac7fe5a TG |
104 | hlist_add_head(&new->node, &obj_pool); |
105 | obj_pool_free++; | |
aef9cb05 | 106 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
3ac7fe5a | 107 | } |
3ac7fe5a TG |
108 | } |
109 | ||
110 | /* | |
111 | * Lookup an object in the hash bucket. | |
112 | */ | |
113 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | |
114 | { | |
3ac7fe5a TG |
115 | struct debug_obj *obj; |
116 | int cnt = 0; | |
117 | ||
b67bfe0d | 118 | hlist_for_each_entry(obj, &b->list, node) { |
3ac7fe5a TG |
119 | cnt++; |
120 | if (obj->object == addr) | |
121 | return obj; | |
122 | } | |
123 | if (cnt > debug_objects_maxchain) | |
124 | debug_objects_maxchain = cnt; | |
125 | ||
126 | return NULL; | |
127 | } | |
128 | ||
129 | /* | |
50db04dd | 130 | * Allocate a new object. If the pool is empty, switch off the debugger. |
673d62cc | 131 | * Must be called with interrupts disabled. |
3ac7fe5a TG |
132 | */ |
133 | static struct debug_obj * | |
134 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |
135 | { | |
136 | struct debug_obj *obj = NULL; | |
3ac7fe5a | 137 | |
aef9cb05 | 138 | raw_spin_lock(&pool_lock); |
3ac7fe5a TG |
139 | if (obj_pool.first) { |
140 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | |
141 | ||
142 | obj->object = addr; | |
143 | obj->descr = descr; | |
144 | obj->state = ODEBUG_STATE_NONE; | |
a5d8e467 | 145 | obj->astate = 0; |
3ac7fe5a TG |
146 | hlist_del(&obj->node); |
147 | ||
148 | hlist_add_head(&obj->node, &b->list); | |
149 | ||
150 | obj_pool_used++; | |
151 | if (obj_pool_used > obj_pool_max_used) | |
152 | obj_pool_max_used = obj_pool_used; | |
153 | ||
154 | obj_pool_free--; | |
155 | if (obj_pool_free < obj_pool_min_free) | |
156 | obj_pool_min_free = obj_pool_free; | |
157 | } | |
aef9cb05 | 158 | raw_spin_unlock(&pool_lock); |
3ac7fe5a | 159 | |
3ac7fe5a TG |
160 | return obj; |
161 | } | |
162 | ||
163 | /* | |
337fff8b | 164 | * workqueue function to free objects. |
3ac7fe5a | 165 | */ |
337fff8b | 166 | static void free_obj_work(struct work_struct *work) |
3ac7fe5a | 167 | { |
337fff8b | 168 | struct debug_obj *obj; |
673d62cc | 169 | unsigned long flags; |
3ac7fe5a | 170 | |
aef9cb05 | 171 | raw_spin_lock_irqsave(&pool_lock, flags); |
337fff8b TG |
172 | while (obj_pool_free > ODEBUG_POOL_SIZE) { |
173 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | |
174 | hlist_del(&obj->node); | |
175 | obj_pool_free--; | |
176 | /* | |
177 | * We release pool_lock across kmem_cache_free() to | |
178 | * avoid contention on pool_lock. | |
179 | */ | |
aef9cb05 | 180 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
3ac7fe5a | 181 | kmem_cache_free(obj_cache, obj); |
aef9cb05 | 182 | raw_spin_lock_irqsave(&pool_lock, flags); |
3ac7fe5a | 183 | } |
aef9cb05 | 184 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
337fff8b TG |
185 | } |
186 | ||
187 | /* | |
188 | * Put the object back into the pool and schedule work to free objects | |
189 | * if necessary. | |
190 | */ | |
191 | static void free_object(struct debug_obj *obj) | |
192 | { | |
193 | unsigned long flags; | |
194 | int sched = 0; | |
195 | ||
aef9cb05 | 196 | raw_spin_lock_irqsave(&pool_lock, flags); |
337fff8b TG |
197 | /* |
198 | * schedule work when the pool is filled and the cache is | |
199 | * initialized: | |
200 | */ | |
201 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) | |
d3773ba1 | 202 | sched = keventd_up(); |
337fff8b TG |
203 | hlist_add_head(&obj->node, &obj_pool); |
204 | obj_pool_free++; | |
205 | obj_pool_used--; | |
aef9cb05 | 206 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
337fff8b TG |
207 | if (sched) |
208 | schedule_work(&debug_obj_work); | |
3ac7fe5a TG |
209 | } |
210 | ||
211 | /* | |
212 | * We run out of memory. That means we probably have tons of objects | |
213 | * allocated. | |
214 | */ | |
215 | static void debug_objects_oom(void) | |
216 | { | |
217 | struct debug_bucket *db = obj_hash; | |
b67bfe0d | 218 | struct hlist_node *tmp; |
673d62cc | 219 | HLIST_HEAD(freelist); |
3ac7fe5a TG |
220 | struct debug_obj *obj; |
221 | unsigned long flags; | |
222 | int i; | |
223 | ||
719e4843 | 224 | pr_warn("Out of memory. ODEBUG disabled\n"); |
3ac7fe5a TG |
225 | |
226 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | |
aef9cb05 | 227 | raw_spin_lock_irqsave(&db->lock, flags); |
673d62cc | 228 | hlist_move_list(&db->list, &freelist); |
aef9cb05 | 229 | raw_spin_unlock_irqrestore(&db->lock, flags); |
673d62cc VN |
230 | |
231 | /* Now free them */ | |
b67bfe0d | 232 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
3ac7fe5a TG |
233 | hlist_del(&obj->node); |
234 | free_object(obj); | |
235 | } | |
3ac7fe5a TG |
236 | } |
237 | } | |
238 | ||
239 | /* | |
240 | * We use the pfn of the address for the hash. That way we can check | |
241 | * for freed objects simply by checking the affected bucket. | |
242 | */ | |
243 | static struct debug_bucket *get_bucket(unsigned long addr) | |
244 | { | |
245 | unsigned long hash; | |
246 | ||
247 | hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); | |
248 | return &obj_hash[hash]; | |
249 | } | |
250 | ||
251 | static void debug_print_object(struct debug_obj *obj, char *msg) | |
252 | { | |
99777288 | 253 | struct debug_obj_descr *descr = obj->descr; |
3ac7fe5a TG |
254 | static int limit; |
255 | ||
99777288 SG |
256 | if (limit < 5 && descr != descr_test) { |
257 | void *hint = descr->debug_hint ? | |
258 | descr->debug_hint(obj->object) : NULL; | |
3ac7fe5a | 259 | limit++; |
a5d8e467 | 260 | WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " |
99777288 | 261 | "object type: %s hint: %pS\n", |
a5d8e467 | 262 | msg, obj_states[obj->state], obj->astate, |
99777288 | 263 | descr->name, hint); |
3ac7fe5a TG |
264 | } |
265 | debug_objects_warnings++; | |
266 | } | |
267 | ||
268 | /* | |
269 | * Try to repair the damage, so we have a better chance to get useful | |
270 | * debug output. | |
271 | */ | |
b1e4d9d8 DC |
272 | static bool |
273 | debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state), | |
3ac7fe5a TG |
274 | void * addr, enum debug_obj_state state) |
275 | { | |
b1e4d9d8 DC |
276 | if (fixup && fixup(addr, state)) { |
277 | debug_objects_fixups++; | |
278 | return true; | |
279 | } | |
280 | return false; | |
3ac7fe5a TG |
281 | } |
282 | ||
283 | static void debug_object_is_on_stack(void *addr, int onstack) | |
284 | { | |
3ac7fe5a TG |
285 | int is_on_stack; |
286 | static int limit; | |
287 | ||
288 | if (limit > 4) | |
289 | return; | |
290 | ||
8b05c7e6 | 291 | is_on_stack = object_is_on_stack(addr); |
3ac7fe5a TG |
292 | if (is_on_stack == onstack) |
293 | return; | |
294 | ||
295 | limit++; | |
296 | if (is_on_stack) | |
719e4843 | 297 | pr_warn("object is on stack, but not annotated\n"); |
3ac7fe5a | 298 | else |
719e4843 | 299 | pr_warn("object is not on stack, but annotated\n"); |
3ac7fe5a TG |
300 | WARN_ON(1); |
301 | } | |
302 | ||
303 | static void | |
304 | __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | |
305 | { | |
306 | enum debug_obj_state state; | |
307 | struct debug_bucket *db; | |
308 | struct debug_obj *obj; | |
309 | unsigned long flags; | |
310 | ||
50db04dd VN |
311 | fill_pool(); |
312 | ||
3ac7fe5a TG |
313 | db = get_bucket((unsigned long) addr); |
314 | ||
aef9cb05 | 315 | raw_spin_lock_irqsave(&db->lock, flags); |
3ac7fe5a TG |
316 | |
317 | obj = lookup_object(addr, db); | |
318 | if (!obj) { | |
319 | obj = alloc_object(addr, db, descr); | |
320 | if (!obj) { | |
321 | debug_objects_enabled = 0; | |
aef9cb05 | 322 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a TG |
323 | debug_objects_oom(); |
324 | return; | |
325 | } | |
326 | debug_object_is_on_stack(addr, onstack); | |
327 | } | |
328 | ||
329 | switch (obj->state) { | |
330 | case ODEBUG_STATE_NONE: | |
331 | case ODEBUG_STATE_INIT: | |
332 | case ODEBUG_STATE_INACTIVE: | |
333 | obj->state = ODEBUG_STATE_INIT; | |
334 | break; | |
335 | ||
336 | case ODEBUG_STATE_ACTIVE: | |
337 | debug_print_object(obj, "init"); | |
338 | state = obj->state; | |
aef9cb05 | 339 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a TG |
340 | debug_object_fixup(descr->fixup_init, addr, state); |
341 | return; | |
342 | ||
343 | case ODEBUG_STATE_DESTROYED: | |
344 | debug_print_object(obj, "init"); | |
345 | break; | |
346 | default: | |
347 | break; | |
348 | } | |
349 | ||
aef9cb05 | 350 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a TG |
351 | } |
352 | ||
353 | /** | |
354 | * debug_object_init - debug checks when an object is initialized | |
355 | * @addr: address of the object | |
356 | * @descr: pointer to an object specific debug description structure | |
357 | */ | |
358 | void debug_object_init(void *addr, struct debug_obj_descr *descr) | |
359 | { | |
360 | if (!debug_objects_enabled) | |
361 | return; | |
362 | ||
363 | __debug_object_init(addr, descr, 0); | |
364 | } | |
365 | ||
366 | /** | |
367 | * debug_object_init_on_stack - debug checks when an object on stack is | |
368 | * initialized | |
369 | * @addr: address of the object | |
370 | * @descr: pointer to an object specific debug description structure | |
371 | */ | |
372 | void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) | |
373 | { | |
374 | if (!debug_objects_enabled) | |
375 | return; | |
376 | ||
377 | __debug_object_init(addr, descr, 1); | |
378 | } | |
379 | ||
380 | /** | |
381 | * debug_object_activate - debug checks when an object is activated | |
382 | * @addr: address of the object | |
383 | * @descr: pointer to an object specific debug description structure | |
b778ae25 | 384 | * Returns 0 for success, -EINVAL for check failed. |
3ac7fe5a | 385 | */ |
b778ae25 | 386 | int debug_object_activate(void *addr, struct debug_obj_descr *descr) |
3ac7fe5a TG |
387 | { |
388 | enum debug_obj_state state; | |
389 | struct debug_bucket *db; | |
390 | struct debug_obj *obj; | |
391 | unsigned long flags; | |
b778ae25 | 392 | int ret; |
feac18dd SB |
393 | struct debug_obj o = { .object = addr, |
394 | .state = ODEBUG_STATE_NOTAVAILABLE, | |
395 | .descr = descr }; | |
3ac7fe5a TG |
396 | |
397 | if (!debug_objects_enabled) | |
b778ae25 | 398 | return 0; |
3ac7fe5a TG |
399 | |
400 | db = get_bucket((unsigned long) addr); | |
401 | ||
aef9cb05 | 402 | raw_spin_lock_irqsave(&db->lock, flags); |
3ac7fe5a TG |
403 | |
404 | obj = lookup_object(addr, db); | |
405 | if (obj) { | |
406 | switch (obj->state) { | |
407 | case ODEBUG_STATE_INIT: | |
408 | case ODEBUG_STATE_INACTIVE: | |
409 | obj->state = ODEBUG_STATE_ACTIVE; | |
b778ae25 | 410 | ret = 0; |
3ac7fe5a TG |
411 | break; |
412 | ||
413 | case ODEBUG_STATE_ACTIVE: | |
414 | debug_print_object(obj, "activate"); | |
415 | state = obj->state; | |
aef9cb05 | 416 | raw_spin_unlock_irqrestore(&db->lock, flags); |
b778ae25 | 417 | ret = debug_object_fixup(descr->fixup_activate, addr, state); |
e7a8e78b | 418 | return ret ? 0 : -EINVAL; |
3ac7fe5a TG |
419 | |
420 | case ODEBUG_STATE_DESTROYED: | |
421 | debug_print_object(obj, "activate"); | |
b778ae25 | 422 | ret = -EINVAL; |
3ac7fe5a TG |
423 | break; |
424 | default: | |
b778ae25 | 425 | ret = 0; |
3ac7fe5a TG |
426 | break; |
427 | } | |
aef9cb05 | 428 | raw_spin_unlock_irqrestore(&db->lock, flags); |
b778ae25 | 429 | return ret; |
3ac7fe5a TG |
430 | } |
431 | ||
aef9cb05 | 432 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a | 433 | /* |
b9fdac7f DC |
434 | * We are here when a static object is activated. We |
435 | * let the type specific code confirm whether this is | |
436 | * true or not. if true, we just make sure that the | |
437 | * static object is tracked in the object tracker. If | |
438 | * not, this must be a bug, so we try to fix it up. | |
3ac7fe5a | 439 | */ |
b9fdac7f DC |
440 | if (descr->is_static_object && descr->is_static_object(addr)) { |
441 | /* track this static object */ | |
442 | debug_object_init(addr, descr); | |
443 | debug_object_activate(addr, descr); | |
444 | } else { | |
feac18dd | 445 | debug_print_object(&o, "activate"); |
b9fdac7f DC |
446 | ret = debug_object_fixup(descr->fixup_activate, addr, |
447 | ODEBUG_STATE_NOTAVAILABLE); | |
448 | return ret ? 0 : -EINVAL; | |
b778ae25 PM |
449 | } |
450 | return 0; | |
3ac7fe5a TG |
451 | } |
452 | ||
453 | /** | |
454 | * debug_object_deactivate - debug checks when an object is deactivated | |
455 | * @addr: address of the object | |
456 | * @descr: pointer to an object specific debug description structure | |
457 | */ | |
458 | void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | |
459 | { | |
460 | struct debug_bucket *db; | |
461 | struct debug_obj *obj; | |
462 | unsigned long flags; | |
463 | ||
464 | if (!debug_objects_enabled) | |
465 | return; | |
466 | ||
467 | db = get_bucket((unsigned long) addr); | |
468 | ||
aef9cb05 | 469 | raw_spin_lock_irqsave(&db->lock, flags); |
3ac7fe5a TG |
470 | |
471 | obj = lookup_object(addr, db); | |
472 | if (obj) { | |
473 | switch (obj->state) { | |
474 | case ODEBUG_STATE_INIT: | |
475 | case ODEBUG_STATE_INACTIVE: | |
476 | case ODEBUG_STATE_ACTIVE: | |
a5d8e467 MD |
477 | if (!obj->astate) |
478 | obj->state = ODEBUG_STATE_INACTIVE; | |
479 | else | |
480 | debug_print_object(obj, "deactivate"); | |
3ac7fe5a TG |
481 | break; |
482 | ||
483 | case ODEBUG_STATE_DESTROYED: | |
484 | debug_print_object(obj, "deactivate"); | |
485 | break; | |
486 | default: | |
487 | break; | |
488 | } | |
489 | } else { | |
490 | struct debug_obj o = { .object = addr, | |
491 | .state = ODEBUG_STATE_NOTAVAILABLE, | |
492 | .descr = descr }; | |
493 | ||
494 | debug_print_object(&o, "deactivate"); | |
495 | } | |
496 | ||
aef9cb05 | 497 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a TG |
498 | } |
499 | ||
500 | /** | |
501 | * debug_object_destroy - debug checks when an object is destroyed | |
502 | * @addr: address of the object | |
503 | * @descr: pointer to an object specific debug description structure | |
504 | */ | |
505 | void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | |
506 | { | |
507 | enum debug_obj_state state; | |
508 | struct debug_bucket *db; | |
509 | struct debug_obj *obj; | |
510 | unsigned long flags; | |
511 | ||
512 | if (!debug_objects_enabled) | |
513 | return; | |
514 | ||
515 | db = get_bucket((unsigned long) addr); | |
516 | ||
aef9cb05 | 517 | raw_spin_lock_irqsave(&db->lock, flags); |
3ac7fe5a TG |
518 | |
519 | obj = lookup_object(addr, db); | |
520 | if (!obj) | |
521 | goto out_unlock; | |
522 | ||
523 | switch (obj->state) { | |
524 | case ODEBUG_STATE_NONE: | |
525 | case ODEBUG_STATE_INIT: | |
526 | case ODEBUG_STATE_INACTIVE: | |
527 | obj->state = ODEBUG_STATE_DESTROYED; | |
528 | break; | |
529 | case ODEBUG_STATE_ACTIVE: | |
530 | debug_print_object(obj, "destroy"); | |
531 | state = obj->state; | |
aef9cb05 | 532 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a TG |
533 | debug_object_fixup(descr->fixup_destroy, addr, state); |
534 | return; | |
535 | ||
536 | case ODEBUG_STATE_DESTROYED: | |
537 | debug_print_object(obj, "destroy"); | |
538 | break; | |
539 | default: | |
540 | break; | |
541 | } | |
542 | out_unlock: | |
aef9cb05 | 543 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a TG |
544 | } |
545 | ||
546 | /** | |
547 | * debug_object_free - debug checks when an object is freed | |
548 | * @addr: address of the object | |
549 | * @descr: pointer to an object specific debug description structure | |
550 | */ | |
551 | void debug_object_free(void *addr, struct debug_obj_descr *descr) | |
552 | { | |
553 | enum debug_obj_state state; | |
554 | struct debug_bucket *db; | |
555 | struct debug_obj *obj; | |
556 | unsigned long flags; | |
557 | ||
558 | if (!debug_objects_enabled) | |
559 | return; | |
560 | ||
561 | db = get_bucket((unsigned long) addr); | |
562 | ||
aef9cb05 | 563 | raw_spin_lock_irqsave(&db->lock, flags); |
3ac7fe5a TG |
564 | |
565 | obj = lookup_object(addr, db); | |
566 | if (!obj) | |
567 | goto out_unlock; | |
568 | ||
569 | switch (obj->state) { | |
570 | case ODEBUG_STATE_ACTIVE: | |
571 | debug_print_object(obj, "free"); | |
572 | state = obj->state; | |
aef9cb05 | 573 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a TG |
574 | debug_object_fixup(descr->fixup_free, addr, state); |
575 | return; | |
576 | default: | |
577 | hlist_del(&obj->node); | |
aef9cb05 | 578 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a | 579 | free_object(obj); |
673d62cc | 580 | return; |
3ac7fe5a TG |
581 | } |
582 | out_unlock: | |
aef9cb05 | 583 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a TG |
584 | } |
585 | ||
b84d435c CC |
586 | /** |
587 | * debug_object_assert_init - debug checks when object should be init-ed | |
588 | * @addr: address of the object | |
589 | * @descr: pointer to an object specific debug description structure | |
590 | */ | |
591 | void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) | |
592 | { | |
593 | struct debug_bucket *db; | |
594 | struct debug_obj *obj; | |
595 | unsigned long flags; | |
596 | ||
597 | if (!debug_objects_enabled) | |
598 | return; | |
599 | ||
600 | db = get_bucket((unsigned long) addr); | |
601 | ||
602 | raw_spin_lock_irqsave(&db->lock, flags); | |
603 | ||
604 | obj = lookup_object(addr, db); | |
605 | if (!obj) { | |
606 | struct debug_obj o = { .object = addr, | |
607 | .state = ODEBUG_STATE_NOTAVAILABLE, | |
608 | .descr = descr }; | |
609 | ||
610 | raw_spin_unlock_irqrestore(&db->lock, flags); | |
611 | /* | |
b9fdac7f DC |
612 | * Maybe the object is static, and we let the type specific |
613 | * code confirm. Track this static object if true, else invoke | |
614 | * fixup. | |
b84d435c | 615 | */ |
b9fdac7f DC |
616 | if (descr->is_static_object && descr->is_static_object(addr)) { |
617 | /* Track this static object */ | |
618 | debug_object_init(addr, descr); | |
619 | } else { | |
b84d435c | 620 | debug_print_object(&o, "assert_init"); |
b9fdac7f DC |
621 | debug_object_fixup(descr->fixup_assert_init, addr, |
622 | ODEBUG_STATE_NOTAVAILABLE); | |
623 | } | |
b84d435c CC |
624 | return; |
625 | } | |
626 | ||
627 | raw_spin_unlock_irqrestore(&db->lock, flags); | |
628 | } | |
629 | ||
a5d8e467 MD |
630 | /** |
631 | * debug_object_active_state - debug checks object usage state machine | |
632 | * @addr: address of the object | |
633 | * @descr: pointer to an object specific debug description structure | |
634 | * @expect: expected state | |
635 | * @next: state to move to if expected state is found | |
636 | */ | |
637 | void | |
638 | debug_object_active_state(void *addr, struct debug_obj_descr *descr, | |
639 | unsigned int expect, unsigned int next) | |
640 | { | |
641 | struct debug_bucket *db; | |
642 | struct debug_obj *obj; | |
643 | unsigned long flags; | |
644 | ||
645 | if (!debug_objects_enabled) | |
646 | return; | |
647 | ||
648 | db = get_bucket((unsigned long) addr); | |
649 | ||
650 | raw_spin_lock_irqsave(&db->lock, flags); | |
651 | ||
652 | obj = lookup_object(addr, db); | |
653 | if (obj) { | |
654 | switch (obj->state) { | |
655 | case ODEBUG_STATE_ACTIVE: | |
656 | if (obj->astate == expect) | |
657 | obj->astate = next; | |
658 | else | |
659 | debug_print_object(obj, "active_state"); | |
660 | break; | |
661 | ||
662 | default: | |
663 | debug_print_object(obj, "active_state"); | |
664 | break; | |
665 | } | |
666 | } else { | |
667 | struct debug_obj o = { .object = addr, | |
668 | .state = ODEBUG_STATE_NOTAVAILABLE, | |
669 | .descr = descr }; | |
670 | ||
671 | debug_print_object(&o, "active_state"); | |
672 | } | |
673 | ||
674 | raw_spin_unlock_irqrestore(&db->lock, flags); | |
675 | } | |
676 | ||
3ac7fe5a TG |
677 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
678 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | |
679 | { | |
680 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | |
b67bfe0d | 681 | struct hlist_node *tmp; |
673d62cc | 682 | HLIST_HEAD(freelist); |
3ac7fe5a TG |
683 | struct debug_obj_descr *descr; |
684 | enum debug_obj_state state; | |
685 | struct debug_bucket *db; | |
686 | struct debug_obj *obj; | |
687 | int cnt; | |
688 | ||
689 | saddr = (unsigned long) address; | |
690 | eaddr = saddr + size; | |
691 | paddr = saddr & ODEBUG_CHUNK_MASK; | |
692 | chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); | |
693 | chunks >>= ODEBUG_CHUNK_SHIFT; | |
694 | ||
695 | for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { | |
696 | db = get_bucket(paddr); | |
697 | ||
698 | repeat: | |
699 | cnt = 0; | |
aef9cb05 | 700 | raw_spin_lock_irqsave(&db->lock, flags); |
b67bfe0d | 701 | hlist_for_each_entry_safe(obj, tmp, &db->list, node) { |
3ac7fe5a TG |
702 | cnt++; |
703 | oaddr = (unsigned long) obj->object; | |
704 | if (oaddr < saddr || oaddr >= eaddr) | |
705 | continue; | |
706 | ||
707 | switch (obj->state) { | |
708 | case ODEBUG_STATE_ACTIVE: | |
709 | debug_print_object(obj, "free"); | |
710 | descr = obj->descr; | |
711 | state = obj->state; | |
aef9cb05 | 712 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a TG |
713 | debug_object_fixup(descr->fixup_free, |
714 | (void *) oaddr, state); | |
715 | goto repeat; | |
716 | default: | |
717 | hlist_del(&obj->node); | |
673d62cc | 718 | hlist_add_head(&obj->node, &freelist); |
3ac7fe5a TG |
719 | break; |
720 | } | |
721 | } | |
aef9cb05 | 722 | raw_spin_unlock_irqrestore(&db->lock, flags); |
673d62cc VN |
723 | |
724 | /* Now free them */ | |
b67bfe0d | 725 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
673d62cc VN |
726 | hlist_del(&obj->node); |
727 | free_object(obj); | |
728 | } | |
729 | ||
3ac7fe5a TG |
730 | if (cnt > debug_objects_maxchain) |
731 | debug_objects_maxchain = cnt; | |
732 | } | |
733 | } | |
734 | ||
735 | void debug_check_no_obj_freed(const void *address, unsigned long size) | |
736 | { | |
737 | if (debug_objects_enabled) | |
738 | __debug_check_no_obj_freed(address, size); | |
739 | } | |
740 | #endif | |
741 | ||
742 | #ifdef CONFIG_DEBUG_FS | |
743 | ||
744 | static int debug_stats_show(struct seq_file *m, void *v) | |
745 | { | |
746 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); | |
747 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); | |
748 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); | |
749 | seq_printf(m, "pool_free :%d\n", obj_pool_free); | |
750 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); | |
751 | seq_printf(m, "pool_used :%d\n", obj_pool_used); | |
752 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); | |
753 | return 0; | |
754 | } | |
755 | ||
756 | static int debug_stats_open(struct inode *inode, struct file *filp) | |
757 | { | |
758 | return single_open(filp, debug_stats_show, NULL); | |
759 | } | |
760 | ||
761 | static const struct file_operations debug_stats_fops = { | |
762 | .open = debug_stats_open, | |
763 | .read = seq_read, | |
764 | .llseek = seq_lseek, | |
765 | .release = single_release, | |
766 | }; | |
767 | ||
768 | static int __init debug_objects_init_debugfs(void) | |
769 | { | |
770 | struct dentry *dbgdir, *dbgstats; | |
771 | ||
772 | if (!debug_objects_enabled) | |
773 | return 0; | |
774 | ||
775 | dbgdir = debugfs_create_dir("debug_objects", NULL); | |
776 | if (!dbgdir) | |
777 | return -ENOMEM; | |
778 | ||
779 | dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, | |
780 | &debug_stats_fops); | |
781 | if (!dbgstats) | |
782 | goto err; | |
783 | ||
784 | return 0; | |
785 | ||
786 | err: | |
787 | debugfs_remove(dbgdir); | |
788 | ||
789 | return -ENOMEM; | |
790 | } | |
791 | __initcall(debug_objects_init_debugfs); | |
792 | ||
793 | #else | |
794 | static inline void debug_objects_init_debugfs(void) { } | |
795 | #endif | |
796 | ||
797 | #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST | |
798 | ||
799 | /* Random data structure for the self test */ | |
800 | struct self_test { | |
801 | unsigned long dummy1[6]; | |
802 | int static_init; | |
803 | unsigned long dummy2[3]; | |
804 | }; | |
805 | ||
806 | static __initdata struct debug_obj_descr descr_type_test; | |
807 | ||
b9fdac7f DC |
808 | static bool __init is_static_object(void *addr) |
809 | { | |
810 | struct self_test *obj = addr; | |
811 | ||
812 | return obj->static_init; | |
813 | } | |
814 | ||
3ac7fe5a TG |
815 | /* |
816 | * fixup_init is called when: | |
817 | * - an active object is initialized | |
818 | */ | |
b1e4d9d8 | 819 | static bool __init fixup_init(void *addr, enum debug_obj_state state) |
3ac7fe5a TG |
820 | { |
821 | struct self_test *obj = addr; | |
822 | ||
823 | switch (state) { | |
824 | case ODEBUG_STATE_ACTIVE: | |
825 | debug_object_deactivate(obj, &descr_type_test); | |
826 | debug_object_init(obj, &descr_type_test); | |
b1e4d9d8 | 827 | return true; |
3ac7fe5a | 828 | default: |
b1e4d9d8 | 829 | return false; |
3ac7fe5a TG |
830 | } |
831 | } | |
832 | ||
833 | /* | |
834 | * fixup_activate is called when: | |
835 | * - an active object is activated | |
b9fdac7f | 836 | * - an unknown non-static object is activated |
3ac7fe5a | 837 | */ |
b1e4d9d8 | 838 | static bool __init fixup_activate(void *addr, enum debug_obj_state state) |
3ac7fe5a TG |
839 | { |
840 | struct self_test *obj = addr; | |
841 | ||
842 | switch (state) { | |
843 | case ODEBUG_STATE_NOTAVAILABLE: | |
b1e4d9d8 | 844 | return true; |
3ac7fe5a TG |
845 | case ODEBUG_STATE_ACTIVE: |
846 | debug_object_deactivate(obj, &descr_type_test); | |
847 | debug_object_activate(obj, &descr_type_test); | |
b1e4d9d8 | 848 | return true; |
3ac7fe5a TG |
849 | |
850 | default: | |
b1e4d9d8 | 851 | return false; |
3ac7fe5a TG |
852 | } |
853 | } | |
854 | ||
855 | /* | |
856 | * fixup_destroy is called when: | |
857 | * - an active object is destroyed | |
858 | */ | |
b1e4d9d8 | 859 | static bool __init fixup_destroy(void *addr, enum debug_obj_state state) |
3ac7fe5a TG |
860 | { |
861 | struct self_test *obj = addr; | |
862 | ||
863 | switch (state) { | |
864 | case ODEBUG_STATE_ACTIVE: | |
865 | debug_object_deactivate(obj, &descr_type_test); | |
866 | debug_object_destroy(obj, &descr_type_test); | |
b1e4d9d8 | 867 | return true; |
3ac7fe5a | 868 | default: |
b1e4d9d8 | 869 | return false; |
3ac7fe5a TG |
870 | } |
871 | } | |
872 | ||
873 | /* | |
874 | * fixup_free is called when: | |
875 | * - an active object is freed | |
876 | */ | |
b1e4d9d8 | 877 | static bool __init fixup_free(void *addr, enum debug_obj_state state) |
3ac7fe5a TG |
878 | { |
879 | struct self_test *obj = addr; | |
880 | ||
881 | switch (state) { | |
882 | case ODEBUG_STATE_ACTIVE: | |
883 | debug_object_deactivate(obj, &descr_type_test); | |
884 | debug_object_free(obj, &descr_type_test); | |
b1e4d9d8 | 885 | return true; |
3ac7fe5a | 886 | default: |
b1e4d9d8 | 887 | return false; |
3ac7fe5a TG |
888 | } |
889 | } | |
890 | ||
1fb2f77c | 891 | static int __init |
3ac7fe5a TG |
892 | check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) |
893 | { | |
894 | struct debug_bucket *db; | |
895 | struct debug_obj *obj; | |
896 | unsigned long flags; | |
897 | int res = -EINVAL; | |
898 | ||
899 | db = get_bucket((unsigned long) addr); | |
900 | ||
aef9cb05 | 901 | raw_spin_lock_irqsave(&db->lock, flags); |
3ac7fe5a TG |
902 | |
903 | obj = lookup_object(addr, db); | |
904 | if (!obj && state != ODEBUG_STATE_NONE) { | |
5cd2b459 | 905 | WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); |
3ac7fe5a TG |
906 | goto out; |
907 | } | |
908 | if (obj && obj->state != state) { | |
5cd2b459 | 909 | WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", |
3ac7fe5a | 910 | obj->state, state); |
3ac7fe5a TG |
911 | goto out; |
912 | } | |
913 | if (fixups != debug_objects_fixups) { | |
5cd2b459 | 914 | WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", |
3ac7fe5a | 915 | fixups, debug_objects_fixups); |
3ac7fe5a TG |
916 | goto out; |
917 | } | |
918 | if (warnings != debug_objects_warnings) { | |
5cd2b459 | 919 | WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", |
3ac7fe5a | 920 | warnings, debug_objects_warnings); |
3ac7fe5a TG |
921 | goto out; |
922 | } | |
923 | res = 0; | |
924 | out: | |
aef9cb05 | 925 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a TG |
926 | if (res) |
927 | debug_objects_enabled = 0; | |
928 | return res; | |
929 | } | |
930 | ||
931 | static __initdata struct debug_obj_descr descr_type_test = { | |
932 | .name = "selftest", | |
b9fdac7f | 933 | .is_static_object = is_static_object, |
3ac7fe5a TG |
934 | .fixup_init = fixup_init, |
935 | .fixup_activate = fixup_activate, | |
936 | .fixup_destroy = fixup_destroy, | |
937 | .fixup_free = fixup_free, | |
938 | }; | |
939 | ||
940 | static __initdata struct self_test obj = { .static_init = 0 }; | |
941 | ||
942 | static void __init debug_objects_selftest(void) | |
943 | { | |
944 | int fixups, oldfixups, warnings, oldwarnings; | |
945 | unsigned long flags; | |
946 | ||
947 | local_irq_save(flags); | |
948 | ||
949 | fixups = oldfixups = debug_objects_fixups; | |
950 | warnings = oldwarnings = debug_objects_warnings; | |
951 | descr_test = &descr_type_test; | |
952 | ||
953 | debug_object_init(&obj, &descr_type_test); | |
954 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) | |
955 | goto out; | |
956 | debug_object_activate(&obj, &descr_type_test); | |
957 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) | |
958 | goto out; | |
959 | debug_object_activate(&obj, &descr_type_test); | |
960 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) | |
961 | goto out; | |
962 | debug_object_deactivate(&obj, &descr_type_test); | |
963 | if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) | |
964 | goto out; | |
965 | debug_object_destroy(&obj, &descr_type_test); | |
966 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) | |
967 | goto out; | |
968 | debug_object_init(&obj, &descr_type_test); | |
969 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | |
970 | goto out; | |
971 | debug_object_activate(&obj, &descr_type_test); | |
972 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | |
973 | goto out; | |
974 | debug_object_deactivate(&obj, &descr_type_test); | |
975 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | |
976 | goto out; | |
977 | debug_object_free(&obj, &descr_type_test); | |
978 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) | |
979 | goto out; | |
980 | ||
981 | obj.static_init = 1; | |
982 | debug_object_activate(&obj, &descr_type_test); | |
9f78ff00 | 983 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
3ac7fe5a TG |
984 | goto out; |
985 | debug_object_init(&obj, &descr_type_test); | |
986 | if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) | |
987 | goto out; | |
988 | debug_object_free(&obj, &descr_type_test); | |
989 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) | |
990 | goto out; | |
991 | ||
992 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | |
993 | debug_object_init(&obj, &descr_type_test); | |
994 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) | |
995 | goto out; | |
996 | debug_object_activate(&obj, &descr_type_test); | |
997 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) | |
998 | goto out; | |
999 | __debug_check_no_obj_freed(&obj, sizeof(obj)); | |
1000 | if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) | |
1001 | goto out; | |
1002 | #endif | |
719e4843 | 1003 | pr_info("selftest passed\n"); |
3ac7fe5a TG |
1004 | |
1005 | out: | |
1006 | debug_objects_fixups = oldfixups; | |
1007 | debug_objects_warnings = oldwarnings; | |
1008 | descr_test = NULL; | |
1009 | ||
1010 | local_irq_restore(flags); | |
1011 | } | |
1012 | #else | |
1013 | static inline void debug_objects_selftest(void) { } | |
1014 | #endif | |
1015 | ||
1016 | /* | |
1017 | * Called during early boot to initialize the hash buckets and link | |
1018 | * the static object pool objects into the poll list. After this call | |
1019 | * the object tracker is fully operational. | |
1020 | */ | |
1021 | void __init debug_objects_early_init(void) | |
1022 | { | |
1023 | int i; | |
1024 | ||
1025 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) | |
aef9cb05 | 1026 | raw_spin_lock_init(&obj_hash[i].lock); |
3ac7fe5a TG |
1027 | |
1028 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) | |
1029 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); | |
1030 | } | |
1031 | ||
1be1cb7b TG |
1032 | /* |
1033 | * Convert the statically allocated objects to dynamic ones: | |
1034 | */ | |
1fb2f77c | 1035 | static int __init debug_objects_replace_static_objects(void) |
1be1cb7b TG |
1036 | { |
1037 | struct debug_bucket *db = obj_hash; | |
b67bfe0d | 1038 | struct hlist_node *tmp; |
1be1cb7b TG |
1039 | struct debug_obj *obj, *new; |
1040 | HLIST_HEAD(objects); | |
1041 | int i, cnt = 0; | |
1042 | ||
1043 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) { | |
1044 | obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); | |
1045 | if (!obj) | |
1046 | goto free; | |
1047 | hlist_add_head(&obj->node, &objects); | |
1048 | } | |
1049 | ||
1050 | /* | |
1051 | * When debug_objects_mem_init() is called we know that only | |
1052 | * one CPU is up, so disabling interrupts is enough | |
1053 | * protection. This avoids the lockdep hell of lock ordering. | |
1054 | */ | |
1055 | local_irq_disable(); | |
1056 | ||
1057 | /* Remove the statically allocated objects from the pool */ | |
b67bfe0d | 1058 | hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) |
1be1cb7b TG |
1059 | hlist_del(&obj->node); |
1060 | /* Move the allocated objects to the pool */ | |
1061 | hlist_move_list(&objects, &obj_pool); | |
1062 | ||
1063 | /* Replace the active object references */ | |
1064 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | |
1065 | hlist_move_list(&db->list, &objects); | |
1066 | ||
b67bfe0d | 1067 | hlist_for_each_entry(obj, &objects, node) { |
1be1cb7b TG |
1068 | new = hlist_entry(obj_pool.first, typeof(*obj), node); |
1069 | hlist_del(&new->node); | |
1070 | /* copy object data */ | |
1071 | *new = *obj; | |
1072 | hlist_add_head(&new->node, &db->list); | |
1073 | cnt++; | |
1074 | } | |
1075 | } | |
765a5e0c | 1076 | local_irq_enable(); |
1be1cb7b | 1077 | |
c0f35cc0 FF |
1078 | pr_debug("%d of %d active objects replaced\n", |
1079 | cnt, obj_pool_used); | |
1be1cb7b TG |
1080 | return 0; |
1081 | free: | |
b67bfe0d | 1082 | hlist_for_each_entry_safe(obj, tmp, &objects, node) { |
1be1cb7b TG |
1083 | hlist_del(&obj->node); |
1084 | kmem_cache_free(obj_cache, obj); | |
1085 | } | |
1086 | return -ENOMEM; | |
1087 | } | |
1088 | ||
3ac7fe5a TG |
1089 | /* |
1090 | * Called after the kmem_caches are functional to setup a dedicated | |
1091 | * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag | |
1092 | * prevents that the debug code is called on kmem_cache_free() for the | |
1093 | * debug tracker objects to avoid recursive calls. | |
1094 | */ | |
1095 | void __init debug_objects_mem_init(void) | |
1096 | { | |
1097 | if (!debug_objects_enabled) | |
1098 | return; | |
1099 | ||
1100 | obj_cache = kmem_cache_create("debug_objects_cache", | |
1101 | sizeof (struct debug_obj), 0, | |
1102 | SLAB_DEBUG_OBJECTS, NULL); | |
1103 | ||
1be1cb7b | 1104 | if (!obj_cache || debug_objects_replace_static_objects()) { |
3ac7fe5a | 1105 | debug_objects_enabled = 0; |
1be1cb7b TG |
1106 | if (obj_cache) |
1107 | kmem_cache_destroy(obj_cache); | |
719e4843 | 1108 | pr_warn("out of memory.\n"); |
1be1cb7b | 1109 | } else |
3ac7fe5a TG |
1110 | debug_objects_selftest(); |
1111 | } |