blob: 1c43d4c5d2ab012483da358fb7de02f3743cda33
1 | /* |
2 | * Generic infrastructure for lifetime debugging of objects. |
3 | * |
4 | * Started by Thomas Gleixner |
5 | * |
6 | * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> |
7 | * |
8 | * For licencing details see kernel-base/COPYING |
9 | */ |
10 | |
11 | #define pr_fmt(fmt) "ODEBUG: " fmt |
12 | |
13 | #include <linux/debugobjects.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/sched.h> |
16 | #include <linux/seq_file.h> |
17 | #include <linux/debugfs.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/hash.h> |
20 | |
21 | #define ODEBUG_HASH_BITS 14 |
22 | #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) |
23 | |
24 | #define ODEBUG_POOL_SIZE 1024 |
25 | #define ODEBUG_POOL_MIN_LEVEL 256 |
26 | |
27 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT |
28 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) |
29 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) |
30 | |
31 | struct debug_bucket { |
32 | struct hlist_head list; |
33 | raw_spinlock_t lock; |
34 | }; |
35 | |
36 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; |
37 | |
38 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; |
39 | |
40 | static DEFINE_RAW_SPINLOCK(pool_lock); |
41 | |
42 | static HLIST_HEAD(obj_pool); |
43 | |
44 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; |
45 | static int obj_pool_free = ODEBUG_POOL_SIZE; |
46 | static int obj_pool_used; |
47 | static int obj_pool_max_used; |
48 | static struct kmem_cache *obj_cache; |
49 | |
50 | static int debug_objects_maxchain __read_mostly; |
51 | static int debug_objects_fixups __read_mostly; |
52 | static int debug_objects_warnings __read_mostly; |
53 | static int debug_objects_enabled __read_mostly |
54 | = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; |
55 | |
56 | static struct debug_obj_descr *descr_test __read_mostly; |
57 | |
58 | static void free_obj_work(struct work_struct *work); |
59 | static DECLARE_WORK(debug_obj_work, free_obj_work); |
60 | |
61 | static int __init enable_object_debug(char *str) |
62 | { |
63 | debug_objects_enabled = 1; |
64 | return 0; |
65 | } |
66 | |
67 | static int __init disable_object_debug(char *str) |
68 | { |
69 | debug_objects_enabled = 0; |
70 | return 0; |
71 | } |
72 | |
73 | early_param("debug_objects", enable_object_debug); |
74 | early_param("no_debug_objects", disable_object_debug); |
75 | |
76 | static const char *obj_states[ODEBUG_STATE_MAX] = { |
77 | [ODEBUG_STATE_NONE] = "none", |
78 | [ODEBUG_STATE_INIT] = "initialized", |
79 | [ODEBUG_STATE_INACTIVE] = "inactive", |
80 | [ODEBUG_STATE_ACTIVE] = "active", |
81 | [ODEBUG_STATE_DESTROYED] = "destroyed", |
82 | [ODEBUG_STATE_NOTAVAILABLE] = "not available", |
83 | }; |
84 | |
85 | static void fill_pool(void) |
86 | { |
87 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
88 | struct debug_obj *new; |
89 | unsigned long flags; |
90 | |
91 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) |
92 | return; |
93 | |
94 | if (unlikely(!obj_cache)) |
95 | return; |
96 | |
97 | while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { |
98 | |
99 | new = kmem_cache_zalloc(obj_cache, gfp); |
100 | if (!new) |
101 | return; |
102 | |
103 | raw_spin_lock_irqsave(&pool_lock, flags); |
104 | hlist_add_head(&new->node, &obj_pool); |
105 | obj_pool_free++; |
106 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
107 | } |
108 | } |
109 | |
110 | /* |
111 | * Lookup an object in the hash bucket. |
112 | */ |
113 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) |
114 | { |
115 | struct debug_obj *obj; |
116 | int cnt = 0; |
117 | |
118 | hlist_for_each_entry(obj, &b->list, node) { |
119 | cnt++; |
120 | if (obj->object == addr) |
121 | return obj; |
122 | } |
123 | if (cnt > debug_objects_maxchain) |
124 | debug_objects_maxchain = cnt; |
125 | |
126 | return NULL; |
127 | } |
128 | |
129 | /* |
130 | * Allocate a new object. If the pool is empty, switch off the debugger. |
131 | * Must be called with interrupts disabled. |
132 | */ |
133 | static struct debug_obj * |
134 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
135 | { |
136 | struct debug_obj *obj = NULL; |
137 | |
138 | raw_spin_lock(&pool_lock); |
139 | if (obj_pool.first) { |
140 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
141 | |
142 | obj->object = addr; |
143 | obj->descr = descr; |
144 | obj->state = ODEBUG_STATE_NONE; |
145 | obj->astate = 0; |
146 | hlist_del(&obj->node); |
147 | |
148 | hlist_add_head(&obj->node, &b->list); |
149 | |
150 | obj_pool_used++; |
151 | if (obj_pool_used > obj_pool_max_used) |
152 | obj_pool_max_used = obj_pool_used; |
153 | |
154 | obj_pool_free--; |
155 | if (obj_pool_free < obj_pool_min_free) |
156 | obj_pool_min_free = obj_pool_free; |
157 | } |
158 | raw_spin_unlock(&pool_lock); |
159 | |
160 | return obj; |
161 | } |
162 | |
163 | /* |
164 | * workqueue function to free objects. |
165 | */ |
166 | static void free_obj_work(struct work_struct *work) |
167 | { |
168 | struct debug_obj *obj; |
169 | unsigned long flags; |
170 | |
171 | raw_spin_lock_irqsave(&pool_lock, flags); |
172 | while (obj_pool_free > ODEBUG_POOL_SIZE) { |
173 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
174 | hlist_del(&obj->node); |
175 | obj_pool_free--; |
176 | /* |
177 | * We release pool_lock across kmem_cache_free() to |
178 | * avoid contention on pool_lock. |
179 | */ |
180 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
181 | kmem_cache_free(obj_cache, obj); |
182 | raw_spin_lock_irqsave(&pool_lock, flags); |
183 | } |
184 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
185 | } |
186 | |
187 | /* |
188 | * Put the object back into the pool and schedule work to free objects |
189 | * if necessary. |
190 | */ |
191 | static void free_object(struct debug_obj *obj) |
192 | { |
193 | unsigned long flags; |
194 | int sched = 0; |
195 | |
196 | raw_spin_lock_irqsave(&pool_lock, flags); |
197 | /* |
198 | * schedule work when the pool is filled and the cache is |
199 | * initialized: |
200 | */ |
201 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) |
202 | sched = keventd_up(); |
203 | hlist_add_head(&obj->node, &obj_pool); |
204 | obj_pool_free++; |
205 | obj_pool_used--; |
206 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
207 | if (sched) |
208 | schedule_work(&debug_obj_work); |
209 | } |
210 | |
211 | /* |
212 | * We run out of memory. That means we probably have tons of objects |
213 | * allocated. |
214 | */ |
215 | static void debug_objects_oom(void) |
216 | { |
217 | struct debug_bucket *db = obj_hash; |
218 | struct hlist_node *tmp; |
219 | HLIST_HEAD(freelist); |
220 | struct debug_obj *obj; |
221 | unsigned long flags; |
222 | int i; |
223 | |
224 | pr_warn("Out of memory. ODEBUG disabled\n"); |
225 | |
226 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
227 | raw_spin_lock_irqsave(&db->lock, flags); |
228 | hlist_move_list(&db->list, &freelist); |
229 | raw_spin_unlock_irqrestore(&db->lock, flags); |
230 | |
231 | /* Now free them */ |
232 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
233 | hlist_del(&obj->node); |
234 | free_object(obj); |
235 | } |
236 | } |
237 | } |
238 | |
239 | /* |
240 | * We use the pfn of the address for the hash. That way we can check |
241 | * for freed objects simply by checking the affected bucket. |
242 | */ |
243 | static struct debug_bucket *get_bucket(unsigned long addr) |
244 | { |
245 | unsigned long hash; |
246 | |
247 | hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); |
248 | return &obj_hash[hash]; |
249 | } |
250 | |
251 | static void debug_print_object(struct debug_obj *obj, char *msg) |
252 | { |
253 | struct debug_obj_descr *descr = obj->descr; |
254 | static int limit; |
255 | |
256 | if (limit < 5 && descr != descr_test) { |
257 | void *hint = descr->debug_hint ? |
258 | descr->debug_hint(obj->object) : NULL; |
259 | limit++; |
260 | WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " |
261 | "object type: %s hint: %pS\n", |
262 | msg, obj_states[obj->state], obj->astate, |
263 | descr->name, hint); |
264 | } |
265 | debug_objects_warnings++; |
266 | } |
267 | |
268 | /* |
269 | * Try to repair the damage, so we have a better chance to get useful |
270 | * debug output. |
271 | */ |
272 | static bool |
273 | debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state), |
274 | void * addr, enum debug_obj_state state) |
275 | { |
276 | if (fixup && fixup(addr, state)) { |
277 | debug_objects_fixups++; |
278 | return true; |
279 | } |
280 | return false; |
281 | } |
282 | |
283 | static void debug_object_is_on_stack(void *addr, int onstack) |
284 | { |
285 | int is_on_stack; |
286 | static int limit; |
287 | |
288 | if (limit > 4) |
289 | return; |
290 | |
291 | is_on_stack = object_is_on_stack(addr); |
292 | if (is_on_stack == onstack) |
293 | return; |
294 | |
295 | limit++; |
296 | if (is_on_stack) |
297 | pr_warn("object %p is on stack %p, but NOT annotated.\n", addr, |
298 | task_stack_page(current)); |
299 | else |
300 | pr_warn("object %p is NOT on stack %p, but annotated.\n", addr, |
301 | task_stack_page(current)); |
302 | |
303 | WARN_ON(1); |
304 | } |
305 | |
306 | static void |
307 | __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) |
308 | { |
309 | enum debug_obj_state state; |
310 | struct debug_bucket *db; |
311 | struct debug_obj *obj; |
312 | unsigned long flags; |
313 | |
314 | fill_pool(); |
315 | |
316 | db = get_bucket((unsigned long) addr); |
317 | |
318 | raw_spin_lock_irqsave(&db->lock, flags); |
319 | |
320 | obj = lookup_object(addr, db); |
321 | if (!obj) { |
322 | obj = alloc_object(addr, db, descr); |
323 | if (!obj) { |
324 | debug_objects_enabled = 0; |
325 | raw_spin_unlock_irqrestore(&db->lock, flags); |
326 | debug_objects_oom(); |
327 | return; |
328 | } |
329 | debug_object_is_on_stack(addr, onstack); |
330 | } |
331 | |
332 | switch (obj->state) { |
333 | case ODEBUG_STATE_NONE: |
334 | case ODEBUG_STATE_INIT: |
335 | case ODEBUG_STATE_INACTIVE: |
336 | obj->state = ODEBUG_STATE_INIT; |
337 | break; |
338 | |
339 | case ODEBUG_STATE_ACTIVE: |
340 | debug_print_object(obj, "init"); |
341 | state = obj->state; |
342 | raw_spin_unlock_irqrestore(&db->lock, flags); |
343 | debug_object_fixup(descr->fixup_init, addr, state); |
344 | return; |
345 | |
346 | case ODEBUG_STATE_DESTROYED: |
347 | debug_print_object(obj, "init"); |
348 | break; |
349 | default: |
350 | break; |
351 | } |
352 | |
353 | raw_spin_unlock_irqrestore(&db->lock, flags); |
354 | } |
355 | |
356 | /** |
357 | * debug_object_init - debug checks when an object is initialized |
358 | * @addr: address of the object |
359 | * @descr: pointer to an object specific debug description structure |
360 | */ |
361 | void debug_object_init(void *addr, struct debug_obj_descr *descr) |
362 | { |
363 | if (!debug_objects_enabled) |
364 | return; |
365 | |
366 | __debug_object_init(addr, descr, 0); |
367 | } |
368 | EXPORT_SYMBOL_GPL(debug_object_init); |
369 | |
370 | /** |
371 | * debug_object_init_on_stack - debug checks when an object on stack is |
372 | * initialized |
373 | * @addr: address of the object |
374 | * @descr: pointer to an object specific debug description structure |
375 | */ |
376 | void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) |
377 | { |
378 | if (!debug_objects_enabled) |
379 | return; |
380 | |
381 | __debug_object_init(addr, descr, 1); |
382 | } |
383 | EXPORT_SYMBOL_GPL(debug_object_init_on_stack); |
384 | |
385 | /** |
386 | * debug_object_activate - debug checks when an object is activated |
387 | * @addr: address of the object |
388 | * @descr: pointer to an object specific debug description structure |
389 | * Returns 0 for success, -EINVAL for check failed. |
390 | */ |
391 | int debug_object_activate(void *addr, struct debug_obj_descr *descr) |
392 | { |
393 | enum debug_obj_state state; |
394 | struct debug_bucket *db; |
395 | struct debug_obj *obj; |
396 | unsigned long flags; |
397 | int ret; |
398 | struct debug_obj o = { .object = addr, |
399 | .state = ODEBUG_STATE_NOTAVAILABLE, |
400 | .descr = descr }; |
401 | |
402 | if (!debug_objects_enabled) |
403 | return 0; |
404 | |
405 | db = get_bucket((unsigned long) addr); |
406 | |
407 | raw_spin_lock_irqsave(&db->lock, flags); |
408 | |
409 | obj = lookup_object(addr, db); |
410 | if (obj) { |
411 | switch (obj->state) { |
412 | case ODEBUG_STATE_INIT: |
413 | case ODEBUG_STATE_INACTIVE: |
414 | obj->state = ODEBUG_STATE_ACTIVE; |
415 | ret = 0; |
416 | break; |
417 | |
418 | case ODEBUG_STATE_ACTIVE: |
419 | debug_print_object(obj, "activate"); |
420 | state = obj->state; |
421 | raw_spin_unlock_irqrestore(&db->lock, flags); |
422 | ret = debug_object_fixup(descr->fixup_activate, addr, state); |
423 | return ret ? 0 : -EINVAL; |
424 | |
425 | case ODEBUG_STATE_DESTROYED: |
426 | debug_print_object(obj, "activate"); |
427 | ret = -EINVAL; |
428 | break; |
429 | default: |
430 | ret = 0; |
431 | break; |
432 | } |
433 | raw_spin_unlock_irqrestore(&db->lock, flags); |
434 | return ret; |
435 | } |
436 | |
437 | raw_spin_unlock_irqrestore(&db->lock, flags); |
438 | /* |
439 | * We are here when a static object is activated. We |
440 | * let the type specific code confirm whether this is |
441 | * true or not. if true, we just make sure that the |
442 | * static object is tracked in the object tracker. If |
443 | * not, this must be a bug, so we try to fix it up. |
444 | */ |
445 | if (descr->is_static_object && descr->is_static_object(addr)) { |
446 | /* track this static object */ |
447 | debug_object_init(addr, descr); |
448 | debug_object_activate(addr, descr); |
449 | } else { |
450 | debug_print_object(&o, "activate"); |
451 | ret = debug_object_fixup(descr->fixup_activate, addr, |
452 | ODEBUG_STATE_NOTAVAILABLE); |
453 | return ret ? 0 : -EINVAL; |
454 | } |
455 | return 0; |
456 | } |
457 | EXPORT_SYMBOL_GPL(debug_object_activate); |
458 | |
459 | /** |
460 | * debug_object_deactivate - debug checks when an object is deactivated |
461 | * @addr: address of the object |
462 | * @descr: pointer to an object specific debug description structure |
463 | */ |
464 | void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) |
465 | { |
466 | struct debug_bucket *db; |
467 | struct debug_obj *obj; |
468 | unsigned long flags; |
469 | |
470 | if (!debug_objects_enabled) |
471 | return; |
472 | |
473 | db = get_bucket((unsigned long) addr); |
474 | |
475 | raw_spin_lock_irqsave(&db->lock, flags); |
476 | |
477 | obj = lookup_object(addr, db); |
478 | if (obj) { |
479 | switch (obj->state) { |
480 | case ODEBUG_STATE_INIT: |
481 | case ODEBUG_STATE_INACTIVE: |
482 | case ODEBUG_STATE_ACTIVE: |
483 | if (!obj->astate) |
484 | obj->state = ODEBUG_STATE_INACTIVE; |
485 | else |
486 | debug_print_object(obj, "deactivate"); |
487 | break; |
488 | |
489 | case ODEBUG_STATE_DESTROYED: |
490 | debug_print_object(obj, "deactivate"); |
491 | break; |
492 | default: |
493 | break; |
494 | } |
495 | } else { |
496 | struct debug_obj o = { .object = addr, |
497 | .state = ODEBUG_STATE_NOTAVAILABLE, |
498 | .descr = descr }; |
499 | |
500 | debug_print_object(&o, "deactivate"); |
501 | } |
502 | |
503 | raw_spin_unlock_irqrestore(&db->lock, flags); |
504 | } |
505 | EXPORT_SYMBOL_GPL(debug_object_deactivate); |
506 | |
507 | /** |
508 | * debug_object_destroy - debug checks when an object is destroyed |
509 | * @addr: address of the object |
510 | * @descr: pointer to an object specific debug description structure |
511 | */ |
512 | void debug_object_destroy(void *addr, struct debug_obj_descr *descr) |
513 | { |
514 | enum debug_obj_state state; |
515 | struct debug_bucket *db; |
516 | struct debug_obj *obj; |
517 | unsigned long flags; |
518 | |
519 | if (!debug_objects_enabled) |
520 | return; |
521 | |
522 | db = get_bucket((unsigned long) addr); |
523 | |
524 | raw_spin_lock_irqsave(&db->lock, flags); |
525 | |
526 | obj = lookup_object(addr, db); |
527 | if (!obj) |
528 | goto out_unlock; |
529 | |
530 | switch (obj->state) { |
531 | case ODEBUG_STATE_NONE: |
532 | case ODEBUG_STATE_INIT: |
533 | case ODEBUG_STATE_INACTIVE: |
534 | obj->state = ODEBUG_STATE_DESTROYED; |
535 | break; |
536 | case ODEBUG_STATE_ACTIVE: |
537 | debug_print_object(obj, "destroy"); |
538 | state = obj->state; |
539 | raw_spin_unlock_irqrestore(&db->lock, flags); |
540 | debug_object_fixup(descr->fixup_destroy, addr, state); |
541 | return; |
542 | |
543 | case ODEBUG_STATE_DESTROYED: |
544 | debug_print_object(obj, "destroy"); |
545 | break; |
546 | default: |
547 | break; |
548 | } |
549 | out_unlock: |
550 | raw_spin_unlock_irqrestore(&db->lock, flags); |
551 | } |
552 | EXPORT_SYMBOL_GPL(debug_object_destroy); |
553 | |
554 | /** |
555 | * debug_object_free - debug checks when an object is freed |
556 | * @addr: address of the object |
557 | * @descr: pointer to an object specific debug description structure |
558 | */ |
559 | void debug_object_free(void *addr, struct debug_obj_descr *descr) |
560 | { |
561 | enum debug_obj_state state; |
562 | struct debug_bucket *db; |
563 | struct debug_obj *obj; |
564 | unsigned long flags; |
565 | |
566 | if (!debug_objects_enabled) |
567 | return; |
568 | |
569 | db = get_bucket((unsigned long) addr); |
570 | |
571 | raw_spin_lock_irqsave(&db->lock, flags); |
572 | |
573 | obj = lookup_object(addr, db); |
574 | if (!obj) |
575 | goto out_unlock; |
576 | |
577 | switch (obj->state) { |
578 | case ODEBUG_STATE_ACTIVE: |
579 | debug_print_object(obj, "free"); |
580 | state = obj->state; |
581 | raw_spin_unlock_irqrestore(&db->lock, flags); |
582 | debug_object_fixup(descr->fixup_free, addr, state); |
583 | return; |
584 | default: |
585 | hlist_del(&obj->node); |
586 | raw_spin_unlock_irqrestore(&db->lock, flags); |
587 | free_object(obj); |
588 | return; |
589 | } |
590 | out_unlock: |
591 | raw_spin_unlock_irqrestore(&db->lock, flags); |
592 | } |
593 | EXPORT_SYMBOL_GPL(debug_object_free); |
594 | |
595 | /** |
596 | * debug_object_assert_init - debug checks when object should be init-ed |
597 | * @addr: address of the object |
598 | * @descr: pointer to an object specific debug description structure |
599 | */ |
600 | void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) |
601 | { |
602 | struct debug_bucket *db; |
603 | struct debug_obj *obj; |
604 | unsigned long flags; |
605 | |
606 | if (!debug_objects_enabled) |
607 | return; |
608 | |
609 | db = get_bucket((unsigned long) addr); |
610 | |
611 | raw_spin_lock_irqsave(&db->lock, flags); |
612 | |
613 | obj = lookup_object(addr, db); |
614 | if (!obj) { |
615 | struct debug_obj o = { .object = addr, |
616 | .state = ODEBUG_STATE_NOTAVAILABLE, |
617 | .descr = descr }; |
618 | |
619 | raw_spin_unlock_irqrestore(&db->lock, flags); |
620 | /* |
621 | * Maybe the object is static, and we let the type specific |
622 | * code confirm. Track this static object if true, else invoke |
623 | * fixup. |
624 | */ |
625 | if (descr->is_static_object && descr->is_static_object(addr)) { |
626 | /* Track this static object */ |
627 | debug_object_init(addr, descr); |
628 | } else { |
629 | debug_print_object(&o, "assert_init"); |
630 | debug_object_fixup(descr->fixup_assert_init, addr, |
631 | ODEBUG_STATE_NOTAVAILABLE); |
632 | } |
633 | return; |
634 | } |
635 | |
636 | raw_spin_unlock_irqrestore(&db->lock, flags); |
637 | } |
638 | EXPORT_SYMBOL_GPL(debug_object_assert_init); |
639 | |
640 | /** |
641 | * debug_object_active_state - debug checks object usage state machine |
642 | * @addr: address of the object |
643 | * @descr: pointer to an object specific debug description structure |
644 | * @expect: expected state |
645 | * @next: state to move to if expected state is found |
646 | */ |
647 | void |
648 | debug_object_active_state(void *addr, struct debug_obj_descr *descr, |
649 | unsigned int expect, unsigned int next) |
650 | { |
651 | struct debug_bucket *db; |
652 | struct debug_obj *obj; |
653 | unsigned long flags; |
654 | |
655 | if (!debug_objects_enabled) |
656 | return; |
657 | |
658 | db = get_bucket((unsigned long) addr); |
659 | |
660 | raw_spin_lock_irqsave(&db->lock, flags); |
661 | |
662 | obj = lookup_object(addr, db); |
663 | if (obj) { |
664 | switch (obj->state) { |
665 | case ODEBUG_STATE_ACTIVE: |
666 | if (obj->astate == expect) |
667 | obj->astate = next; |
668 | else |
669 | debug_print_object(obj, "active_state"); |
670 | break; |
671 | |
672 | default: |
673 | debug_print_object(obj, "active_state"); |
674 | break; |
675 | } |
676 | } else { |
677 | struct debug_obj o = { .object = addr, |
678 | .state = ODEBUG_STATE_NOTAVAILABLE, |
679 | .descr = descr }; |
680 | |
681 | debug_print_object(&o, "active_state"); |
682 | } |
683 | |
684 | raw_spin_unlock_irqrestore(&db->lock, flags); |
685 | } |
686 | EXPORT_SYMBOL_GPL(debug_object_active_state); |
687 | |
688 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
689 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
690 | { |
691 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
692 | struct hlist_node *tmp; |
693 | HLIST_HEAD(freelist); |
694 | struct debug_obj_descr *descr; |
695 | enum debug_obj_state state; |
696 | struct debug_bucket *db; |
697 | struct debug_obj *obj; |
698 | int cnt; |
699 | |
700 | saddr = (unsigned long) address; |
701 | eaddr = saddr + size; |
702 | paddr = saddr & ODEBUG_CHUNK_MASK; |
703 | chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); |
704 | chunks >>= ODEBUG_CHUNK_SHIFT; |
705 | |
706 | for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { |
707 | db = get_bucket(paddr); |
708 | |
709 | repeat: |
710 | cnt = 0; |
711 | raw_spin_lock_irqsave(&db->lock, flags); |
712 | hlist_for_each_entry_safe(obj, tmp, &db->list, node) { |
713 | cnt++; |
714 | oaddr = (unsigned long) obj->object; |
715 | if (oaddr < saddr || oaddr >= eaddr) |
716 | continue; |
717 | |
718 | switch (obj->state) { |
719 | case ODEBUG_STATE_ACTIVE: |
720 | debug_print_object(obj, "free"); |
721 | descr = obj->descr; |
722 | state = obj->state; |
723 | raw_spin_unlock_irqrestore(&db->lock, flags); |
724 | debug_object_fixup(descr->fixup_free, |
725 | (void *) oaddr, state); |
726 | goto repeat; |
727 | default: |
728 | hlist_del(&obj->node); |
729 | hlist_add_head(&obj->node, &freelist); |
730 | break; |
731 | } |
732 | } |
733 | raw_spin_unlock_irqrestore(&db->lock, flags); |
734 | |
735 | /* Now free them */ |
736 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
737 | hlist_del(&obj->node); |
738 | free_object(obj); |
739 | } |
740 | |
741 | if (cnt > debug_objects_maxchain) |
742 | debug_objects_maxchain = cnt; |
743 | } |
744 | } |
745 | |
746 | void debug_check_no_obj_freed(const void *address, unsigned long size) |
747 | { |
748 | if (debug_objects_enabled) |
749 | __debug_check_no_obj_freed(address, size); |
750 | } |
751 | #endif |
752 | |
753 | #ifdef CONFIG_DEBUG_FS |
754 | |
755 | static int debug_stats_show(struct seq_file *m, void *v) |
756 | { |
757 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); |
758 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); |
759 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); |
760 | seq_printf(m, "pool_free :%d\n", obj_pool_free); |
761 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); |
762 | seq_printf(m, "pool_used :%d\n", obj_pool_used); |
763 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); |
764 | return 0; |
765 | } |
766 | |
767 | static int debug_stats_open(struct inode *inode, struct file *filp) |
768 | { |
769 | return single_open(filp, debug_stats_show, NULL); |
770 | } |
771 | |
772 | static const struct file_operations debug_stats_fops = { |
773 | .open = debug_stats_open, |
774 | .read = seq_read, |
775 | .llseek = seq_lseek, |
776 | .release = single_release, |
777 | }; |
778 | |
779 | static int __init debug_objects_init_debugfs(void) |
780 | { |
781 | struct dentry *dbgdir, *dbgstats; |
782 | |
783 | if (!debug_objects_enabled) |
784 | return 0; |
785 | |
786 | dbgdir = debugfs_create_dir("debug_objects", NULL); |
787 | if (!dbgdir) |
788 | return -ENOMEM; |
789 | |
790 | dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, |
791 | &debug_stats_fops); |
792 | if (!dbgstats) |
793 | goto err; |
794 | |
795 | return 0; |
796 | |
797 | err: |
798 | debugfs_remove(dbgdir); |
799 | |
800 | return -ENOMEM; |
801 | } |
802 | __initcall(debug_objects_init_debugfs); |
803 | |
804 | #else |
805 | static inline void debug_objects_init_debugfs(void) { } |
806 | #endif |
807 | |
808 | #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST |
809 | |
810 | /* Random data structure for the self test */ |
811 | struct self_test { |
812 | unsigned long dummy1[6]; |
813 | int static_init; |
814 | unsigned long dummy2[3]; |
815 | }; |
816 | |
817 | static __initdata struct debug_obj_descr descr_type_test; |
818 | |
819 | static bool __init is_static_object(void *addr) |
820 | { |
821 | struct self_test *obj = addr; |
822 | |
823 | return obj->static_init; |
824 | } |
825 | |
826 | /* |
827 | * fixup_init is called when: |
828 | * - an active object is initialized |
829 | */ |
830 | static bool __init fixup_init(void *addr, enum debug_obj_state state) |
831 | { |
832 | struct self_test *obj = addr; |
833 | |
834 | switch (state) { |
835 | case ODEBUG_STATE_ACTIVE: |
836 | debug_object_deactivate(obj, &descr_type_test); |
837 | debug_object_init(obj, &descr_type_test); |
838 | return true; |
839 | default: |
840 | return false; |
841 | } |
842 | } |
843 | |
844 | /* |
845 | * fixup_activate is called when: |
846 | * - an active object is activated |
847 | * - an unknown non-static object is activated |
848 | */ |
849 | static bool __init fixup_activate(void *addr, enum debug_obj_state state) |
850 | { |
851 | struct self_test *obj = addr; |
852 | |
853 | switch (state) { |
854 | case ODEBUG_STATE_NOTAVAILABLE: |
855 | return true; |
856 | case ODEBUG_STATE_ACTIVE: |
857 | debug_object_deactivate(obj, &descr_type_test); |
858 | debug_object_activate(obj, &descr_type_test); |
859 | return true; |
860 | |
861 | default: |
862 | return false; |
863 | } |
864 | } |
865 | |
866 | /* |
867 | * fixup_destroy is called when: |
868 | * - an active object is destroyed |
869 | */ |
870 | static bool __init fixup_destroy(void *addr, enum debug_obj_state state) |
871 | { |
872 | struct self_test *obj = addr; |
873 | |
874 | switch (state) { |
875 | case ODEBUG_STATE_ACTIVE: |
876 | debug_object_deactivate(obj, &descr_type_test); |
877 | debug_object_destroy(obj, &descr_type_test); |
878 | return true; |
879 | default: |
880 | return false; |
881 | } |
882 | } |
883 | |
884 | /* |
885 | * fixup_free is called when: |
886 | * - an active object is freed |
887 | */ |
888 | static bool __init fixup_free(void *addr, enum debug_obj_state state) |
889 | { |
890 | struct self_test *obj = addr; |
891 | |
892 | switch (state) { |
893 | case ODEBUG_STATE_ACTIVE: |
894 | debug_object_deactivate(obj, &descr_type_test); |
895 | debug_object_free(obj, &descr_type_test); |
896 | return true; |
897 | default: |
898 | return false; |
899 | } |
900 | } |
901 | |
902 | static int __init |
903 | check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) |
904 | { |
905 | struct debug_bucket *db; |
906 | struct debug_obj *obj; |
907 | unsigned long flags; |
908 | int res = -EINVAL; |
909 | |
910 | db = get_bucket((unsigned long) addr); |
911 | |
912 | raw_spin_lock_irqsave(&db->lock, flags); |
913 | |
914 | obj = lookup_object(addr, db); |
915 | if (!obj && state != ODEBUG_STATE_NONE) { |
916 | WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); |
917 | goto out; |
918 | } |
919 | if (obj && obj->state != state) { |
920 | WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", |
921 | obj->state, state); |
922 | goto out; |
923 | } |
924 | if (fixups != debug_objects_fixups) { |
925 | WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", |
926 | fixups, debug_objects_fixups); |
927 | goto out; |
928 | } |
929 | if (warnings != debug_objects_warnings) { |
930 | WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", |
931 | warnings, debug_objects_warnings); |
932 | goto out; |
933 | } |
934 | res = 0; |
935 | out: |
936 | raw_spin_unlock_irqrestore(&db->lock, flags); |
937 | if (res) |
938 | debug_objects_enabled = 0; |
939 | return res; |
940 | } |
941 | |
942 | static __initdata struct debug_obj_descr descr_type_test = { |
943 | .name = "selftest", |
944 | .is_static_object = is_static_object, |
945 | .fixup_init = fixup_init, |
946 | .fixup_activate = fixup_activate, |
947 | .fixup_destroy = fixup_destroy, |
948 | .fixup_free = fixup_free, |
949 | }; |
950 | |
951 | static __initdata struct self_test obj = { .static_init = 0 }; |
952 | |
953 | static void __init debug_objects_selftest(void) |
954 | { |
955 | int fixups, oldfixups, warnings, oldwarnings; |
956 | unsigned long flags; |
957 | |
958 | local_irq_save(flags); |
959 | |
960 | fixups = oldfixups = debug_objects_fixups; |
961 | warnings = oldwarnings = debug_objects_warnings; |
962 | descr_test = &descr_type_test; |
963 | |
964 | debug_object_init(&obj, &descr_type_test); |
965 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) |
966 | goto out; |
967 | debug_object_activate(&obj, &descr_type_test); |
968 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
969 | goto out; |
970 | debug_object_activate(&obj, &descr_type_test); |
971 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) |
972 | goto out; |
973 | debug_object_deactivate(&obj, &descr_type_test); |
974 | if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) |
975 | goto out; |
976 | debug_object_destroy(&obj, &descr_type_test); |
977 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) |
978 | goto out; |
979 | debug_object_init(&obj, &descr_type_test); |
980 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) |
981 | goto out; |
982 | debug_object_activate(&obj, &descr_type_test); |
983 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) |
984 | goto out; |
985 | debug_object_deactivate(&obj, &descr_type_test); |
986 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) |
987 | goto out; |
988 | debug_object_free(&obj, &descr_type_test); |
989 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) |
990 | goto out; |
991 | |
992 | obj.static_init = 1; |
993 | debug_object_activate(&obj, &descr_type_test); |
994 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
995 | goto out; |
996 | debug_object_init(&obj, &descr_type_test); |
997 | if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) |
998 | goto out; |
999 | debug_object_free(&obj, &descr_type_test); |
1000 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) |
1001 | goto out; |
1002 | |
1003 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
1004 | debug_object_init(&obj, &descr_type_test); |
1005 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) |
1006 | goto out; |
1007 | debug_object_activate(&obj, &descr_type_test); |
1008 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
1009 | goto out; |
1010 | __debug_check_no_obj_freed(&obj, sizeof(obj)); |
1011 | if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) |
1012 | goto out; |
1013 | #endif |
1014 | pr_info("selftest passed\n"); |
1015 | |
1016 | out: |
1017 | debug_objects_fixups = oldfixups; |
1018 | debug_objects_warnings = oldwarnings; |
1019 | descr_test = NULL; |
1020 | |
1021 | local_irq_restore(flags); |
1022 | } |
1023 | #else |
1024 | static inline void debug_objects_selftest(void) { } |
1025 | #endif |
1026 | |
1027 | /* |
1028 | * Called during early boot to initialize the hash buckets and link |
1029 | * the static object pool objects into the poll list. After this call |
1030 | * the object tracker is fully operational. |
1031 | */ |
1032 | void __init debug_objects_early_init(void) |
1033 | { |
1034 | int i; |
1035 | |
1036 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) |
1037 | raw_spin_lock_init(&obj_hash[i].lock); |
1038 | |
1039 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) |
1040 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); |
1041 | } |
1042 | |
1043 | /* |
1044 | * Convert the statically allocated objects to dynamic ones: |
1045 | */ |
1046 | static int __init debug_objects_replace_static_objects(void) |
1047 | { |
1048 | struct debug_bucket *db = obj_hash; |
1049 | struct hlist_node *tmp; |
1050 | struct debug_obj *obj, *new; |
1051 | HLIST_HEAD(objects); |
1052 | int i, cnt = 0; |
1053 | |
1054 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) { |
1055 | obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); |
1056 | if (!obj) |
1057 | goto free; |
1058 | hlist_add_head(&obj->node, &objects); |
1059 | } |
1060 | |
1061 | /* |
1062 | * When debug_objects_mem_init() is called we know that only |
1063 | * one CPU is up, so disabling interrupts is enough |
1064 | * protection. This avoids the lockdep hell of lock ordering. |
1065 | */ |
1066 | local_irq_disable(); |
1067 | |
1068 | /* Remove the statically allocated objects from the pool */ |
1069 | hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) |
1070 | hlist_del(&obj->node); |
1071 | /* Move the allocated objects to the pool */ |
1072 | hlist_move_list(&objects, &obj_pool); |
1073 | |
1074 | /* Replace the active object references */ |
1075 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
1076 | hlist_move_list(&db->list, &objects); |
1077 | |
1078 | hlist_for_each_entry(obj, &objects, node) { |
1079 | new = hlist_entry(obj_pool.first, typeof(*obj), node); |
1080 | hlist_del(&new->node); |
1081 | /* copy object data */ |
1082 | *new = *obj; |
1083 | hlist_add_head(&new->node, &db->list); |
1084 | cnt++; |
1085 | } |
1086 | } |
1087 | local_irq_enable(); |
1088 | |
1089 | pr_debug("%d of %d active objects replaced\n", |
1090 | cnt, obj_pool_used); |
1091 | return 0; |
1092 | free: |
1093 | hlist_for_each_entry_safe(obj, tmp, &objects, node) { |
1094 | hlist_del(&obj->node); |
1095 | kmem_cache_free(obj_cache, obj); |
1096 | } |
1097 | return -ENOMEM; |
1098 | } |
1099 | |
1100 | /* |
1101 | * Called after the kmem_caches are functional to setup a dedicated |
1102 | * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag |
1103 | * prevents that the debug code is called on kmem_cache_free() for the |
1104 | * debug tracker objects to avoid recursive calls. |
1105 | */ |
1106 | void __init debug_objects_mem_init(void) |
1107 | { |
1108 | if (!debug_objects_enabled) |
1109 | return; |
1110 | |
1111 | obj_cache = kmem_cache_create("debug_objects_cache", |
1112 | sizeof (struct debug_obj), 0, |
1113 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, |
1114 | NULL); |
1115 | |
1116 | if (!obj_cache || debug_objects_replace_static_objects()) { |
1117 | debug_objects_enabled = 0; |
1118 | if (obj_cache) |
1119 | kmem_cache_destroy(obj_cache); |
1120 | pr_warn("out of memory.\n"); |
1121 | } else |
1122 | debug_objects_selftest(); |
1123 | } |
1124 |