1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic infrastructure for lifetime debugging of objects.
4 *
5 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
6 */
7
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20
21 #define ODEBUG_HASH_BITS 14
22 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
23
24 #define ODEBUG_POOL_SIZE 1024
25 #define ODEBUG_POOL_MIN_LEVEL 256
26 #define ODEBUG_POOL_PERCPU_SIZE 64
27 #define ODEBUG_BATCH_SIZE 16
28
29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
32
33 /*
34 * We limit the freeing of debug objects via workqueue at a maximum
35 * frequency of 10Hz and about 1024 objects for each freeing operation.
36 * So it is freeing at most 10k debug objects per second.
37 */
38 #define ODEBUG_FREE_WORK_MAX 1024
39 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
40
41 struct debug_bucket {
42 struct hlist_head list;
43 raw_spinlock_t lock;
44 };
45
46 /*
47 * Debug object percpu free list
48 * Access is protected by disabling irq
49 */
50 struct debug_percpu_free {
51 struct hlist_head free_objs;
52 int obj_free;
53 };
54
55 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
56
57 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
58
59 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
60
61 static DEFINE_RAW_SPINLOCK(pool_lock);
62
63 static HLIST_HEAD(obj_pool);
64 static HLIST_HEAD(obj_to_free);
65
66 /*
67 * Because of the presence of percpu free pools, obj_pool_free will
68 * under-count those in the percpu free pools. Similarly, obj_pool_used
69 * will over-count those in the percpu free pools. Adjustments will be
70 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
71 * can be off.
72 */
73 static int obj_pool_min_free = ODEBUG_POOL_SIZE;
74 static int obj_pool_free = ODEBUG_POOL_SIZE;
75 static int obj_pool_used;
76 static int obj_pool_max_used;
77 static bool obj_freeing;
78 /* The number of objs on the global free list */
79 static int obj_nr_tofree;
80
81 static int debug_objects_maxchain __read_mostly;
82 static int __maybe_unused debug_objects_maxchecked __read_mostly;
83 static int debug_objects_fixups __read_mostly;
84 static int debug_objects_warnings __read_mostly;
85 static int debug_objects_enabled __read_mostly
86 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87 static int debug_objects_pool_size __read_mostly
88 = ODEBUG_POOL_SIZE;
89 static int debug_objects_pool_min_level __read_mostly
90 = ODEBUG_POOL_MIN_LEVEL;
91 static const struct debug_obj_descr *descr_test __read_mostly;
92 static struct kmem_cache *obj_cache __read_mostly;
93
94 /*
95 * Track numbers of kmem_cache_alloc()/free() calls done.
96 */
97 static int debug_objects_allocated;
98 static int debug_objects_freed;
99
100 static void free_obj_work(struct work_struct *work);
101 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
102
enable_object_debug(char * str)103 static int __init enable_object_debug(char *str)
104 {
105 debug_objects_enabled = 1;
106 return 0;
107 }
108
disable_object_debug(char * str)109 static int __init disable_object_debug(char *str)
110 {
111 debug_objects_enabled = 0;
112 return 0;
113 }
114
115 early_param("debug_objects", enable_object_debug);
116 early_param("no_debug_objects", disable_object_debug);
117
118 static const char *obj_states[ODEBUG_STATE_MAX] = {
119 [ODEBUG_STATE_NONE] = "none",
120 [ODEBUG_STATE_INIT] = "initialized",
121 [ODEBUG_STATE_INACTIVE] = "inactive",
122 [ODEBUG_STATE_ACTIVE] = "active",
123 [ODEBUG_STATE_DESTROYED] = "destroyed",
124 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
125 };
126
fill_pool(void)127 static void fill_pool(void)
128 {
129 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
130 struct debug_obj *obj;
131 unsigned long flags;
132
133 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
134 return;
135
136 /*
137 * Reuse objs from the global free list; they will be reinitialized
138 * when allocating.
139 *
140 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
141 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
142 * sections.
143 */
144 while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
145 raw_spin_lock_irqsave(&pool_lock, flags);
146 /*
147 * Recheck with the lock held as the worker thread might have
148 * won the race and freed the global free list already.
149 */
150 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
151 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
152 hlist_del(&obj->node);
153 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
154 hlist_add_head(&obj->node, &obj_pool);
155 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
156 }
157 raw_spin_unlock_irqrestore(&pool_lock, flags);
158 }
159
160 if (unlikely(!obj_cache))
161 return;
162
163 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
164 struct debug_obj *new[ODEBUG_BATCH_SIZE];
165 int cnt;
166
167 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
168 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
169 if (!new[cnt])
170 break;
171 }
172 if (!cnt)
173 return;
174
175 raw_spin_lock_irqsave(&pool_lock, flags);
176 while (cnt) {
177 hlist_add_head(&new[--cnt]->node, &obj_pool);
178 debug_objects_allocated++;
179 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
180 }
181 raw_spin_unlock_irqrestore(&pool_lock, flags);
182 }
183 }
184
185 /*
186 * Lookup an object in the hash bucket.
187 */
lookup_object(void * addr,struct debug_bucket * b)188 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
189 {
190 struct debug_obj *obj;
191 int cnt = 0;
192
193 hlist_for_each_entry(obj, &b->list, node) {
194 cnt++;
195 if (obj->object == addr)
196 return obj;
197 }
198 if (cnt > debug_objects_maxchain)
199 debug_objects_maxchain = cnt;
200
201 return NULL;
202 }
203
204 /*
205 * Allocate a new object from the hlist
206 */
__alloc_object(struct hlist_head * list)207 static struct debug_obj *__alloc_object(struct hlist_head *list)
208 {
209 struct debug_obj *obj = NULL;
210
211 if (list->first) {
212 obj = hlist_entry(list->first, typeof(*obj), node);
213 hlist_del(&obj->node);
214 }
215
216 return obj;
217 }
218
219 /*
220 * Allocate a new object. If the pool is empty, switch off the debugger.
221 * Must be called with interrupts disabled.
222 */
223 static struct debug_obj *
alloc_object(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr)224 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
225 {
226 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
227 struct debug_obj *obj;
228
229 if (likely(obj_cache)) {
230 obj = __alloc_object(&percpu_pool->free_objs);
231 if (obj) {
232 percpu_pool->obj_free--;
233 goto init_obj;
234 }
235 }
236
237 raw_spin_lock(&pool_lock);
238 obj = __alloc_object(&obj_pool);
239 if (obj) {
240 obj_pool_used++;
241 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
242
243 /*
244 * Looking ahead, allocate one batch of debug objects and
245 * put them into the percpu free pool.
246 */
247 if (likely(obj_cache)) {
248 int i;
249
250 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
251 struct debug_obj *obj2;
252
253 obj2 = __alloc_object(&obj_pool);
254 if (!obj2)
255 break;
256 hlist_add_head(&obj2->node,
257 &percpu_pool->free_objs);
258 percpu_pool->obj_free++;
259 obj_pool_used++;
260 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
261 }
262 }
263
264 if (obj_pool_used > obj_pool_max_used)
265 obj_pool_max_used = obj_pool_used;
266
267 if (obj_pool_free < obj_pool_min_free)
268 obj_pool_min_free = obj_pool_free;
269 }
270 raw_spin_unlock(&pool_lock);
271
272 init_obj:
273 if (obj) {
274 obj->object = addr;
275 obj->descr = descr;
276 obj->state = ODEBUG_STATE_NONE;
277 obj->astate = 0;
278 hlist_add_head(&obj->node, &b->list);
279 }
280 return obj;
281 }
282
283 /*
284 * workqueue function to free objects.
285 *
286 * To reduce contention on the global pool_lock, the actual freeing of
287 * debug objects will be delayed if the pool_lock is busy.
288 */
free_obj_work(struct work_struct * work)289 static void free_obj_work(struct work_struct *work)
290 {
291 struct hlist_node *tmp;
292 struct debug_obj *obj;
293 unsigned long flags;
294 HLIST_HEAD(tofree);
295
296 WRITE_ONCE(obj_freeing, false);
297 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
298 return;
299
300 if (obj_pool_free >= debug_objects_pool_size)
301 goto free_objs;
302
303 /*
304 * The objs on the pool list might be allocated before the work is
305 * run, so recheck if pool list it full or not, if not fill pool
306 * list from the global free list. As it is likely that a workload
307 * may be gearing up to use more and more objects, don't free any
308 * of them until the next round.
309 */
310 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
311 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
312 hlist_del(&obj->node);
313 hlist_add_head(&obj->node, &obj_pool);
314 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
315 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
316 }
317 raw_spin_unlock_irqrestore(&pool_lock, flags);
318 return;
319
320 free_objs:
321 /*
322 * Pool list is already full and there are still objs on the free
323 * list. Move remaining free objs to a temporary list to free the
324 * memory outside the pool_lock held region.
325 */
326 if (obj_nr_tofree) {
327 hlist_move_list(&obj_to_free, &tofree);
328 debug_objects_freed += obj_nr_tofree;
329 WRITE_ONCE(obj_nr_tofree, 0);
330 }
331 raw_spin_unlock_irqrestore(&pool_lock, flags);
332
333 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
334 hlist_del(&obj->node);
335 kmem_cache_free(obj_cache, obj);
336 }
337 }
338
__free_object(struct debug_obj * obj)339 static void __free_object(struct debug_obj *obj)
340 {
341 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
342 struct debug_percpu_free *percpu_pool;
343 int lookahead_count = 0;
344 unsigned long flags;
345 bool work;
346
347 local_irq_save(flags);
348 if (!obj_cache)
349 goto free_to_obj_pool;
350
351 /*
352 * Try to free it into the percpu pool first.
353 */
354 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
355 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
356 hlist_add_head(&obj->node, &percpu_pool->free_objs);
357 percpu_pool->obj_free++;
358 local_irq_restore(flags);
359 return;
360 }
361
362 /*
363 * As the percpu pool is full, look ahead and pull out a batch
364 * of objects from the percpu pool and free them as well.
365 */
366 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
367 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
368 if (!objs[lookahead_count])
369 break;
370 percpu_pool->obj_free--;
371 }
372
373 free_to_obj_pool:
374 raw_spin_lock(&pool_lock);
375 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
376 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
377 obj_pool_used--;
378
379 if (work) {
380 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
381 hlist_add_head(&obj->node, &obj_to_free);
382 if (lookahead_count) {
383 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
384 obj_pool_used -= lookahead_count;
385 while (lookahead_count) {
386 hlist_add_head(&objs[--lookahead_count]->node,
387 &obj_to_free);
388 }
389 }
390
391 if ((obj_pool_free > debug_objects_pool_size) &&
392 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
393 int i;
394
395 /*
396 * Free one more batch of objects from obj_pool.
397 */
398 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
399 obj = __alloc_object(&obj_pool);
400 hlist_add_head(&obj->node, &obj_to_free);
401 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
402 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
403 }
404 }
405 } else {
406 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
407 hlist_add_head(&obj->node, &obj_pool);
408 if (lookahead_count) {
409 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
410 obj_pool_used -= lookahead_count;
411 while (lookahead_count) {
412 hlist_add_head(&objs[--lookahead_count]->node,
413 &obj_pool);
414 }
415 }
416 }
417 raw_spin_unlock(&pool_lock);
418 local_irq_restore(flags);
419 }
420
421 /*
422 * Put the object back into the pool and schedule work to free objects
423 * if necessary.
424 */
free_object(struct debug_obj * obj)425 static void free_object(struct debug_obj *obj)
426 {
427 __free_object(obj);
428 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
429 WRITE_ONCE(obj_freeing, true);
430 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
431 }
432 }
433
434 #ifdef CONFIG_HOTPLUG_CPU
object_cpu_offline(unsigned int cpu)435 static int object_cpu_offline(unsigned int cpu)
436 {
437 struct debug_percpu_free *percpu_pool;
438 struct hlist_node *tmp;
439 struct debug_obj *obj;
440 unsigned long flags;
441
442 /* Remote access is safe as the CPU is dead already */
443 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
444 hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
445 hlist_del(&obj->node);
446 kmem_cache_free(obj_cache, obj);
447 }
448
449 raw_spin_lock_irqsave(&pool_lock, flags);
450 obj_pool_used -= percpu_pool->obj_free;
451 debug_objects_freed += percpu_pool->obj_free;
452 raw_spin_unlock_irqrestore(&pool_lock, flags);
453
454 percpu_pool->obj_free = 0;
455
456 return 0;
457 }
458 #endif
459
460 /*
461 * We run out of memory. That means we probably have tons of objects
462 * allocated.
463 */
debug_objects_oom(void)464 static void debug_objects_oom(void)
465 {
466 struct debug_bucket *db = obj_hash;
467 struct hlist_node *tmp;
468 HLIST_HEAD(freelist);
469 struct debug_obj *obj;
470 unsigned long flags;
471 int i;
472
473 pr_warn("Out of memory. ODEBUG disabled\n");
474
475 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
476 raw_spin_lock_irqsave(&db->lock, flags);
477 hlist_move_list(&db->list, &freelist);
478 raw_spin_unlock_irqrestore(&db->lock, flags);
479
480 /* Now free them */
481 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
482 hlist_del(&obj->node);
483 free_object(obj);
484 }
485 }
486 }
487
488 /*
489 * We use the pfn of the address for the hash. That way we can check
490 * for freed objects simply by checking the affected bucket.
491 */
get_bucket(unsigned long addr)492 static struct debug_bucket *get_bucket(unsigned long addr)
493 {
494 unsigned long hash;
495
496 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
497 return &obj_hash[hash];
498 }
499
debug_print_object(struct debug_obj * obj,char * msg)500 static void debug_print_object(struct debug_obj *obj, char *msg)
501 {
502 const struct debug_obj_descr *descr = obj->descr;
503 static int limit;
504
505 if (limit < 5 && descr != descr_test) {
506 void *hint = descr->debug_hint ?
507 descr->debug_hint(obj->object) : NULL;
508 limit++;
509 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
510 "object type: %s hint: %pS\n",
511 msg, obj_states[obj->state], obj->astate,
512 descr->name, hint);
513 }
514 debug_objects_warnings++;
515 }
516
517 /*
518 * Try to repair the damage, so we have a better chance to get useful
519 * debug output.
520 */
521 static bool
debug_object_fixup(bool (* fixup)(void * addr,enum debug_obj_state state),void * addr,enum debug_obj_state state)522 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
523 void * addr, enum debug_obj_state state)
524 {
525 if (fixup && fixup(addr, state)) {
526 debug_objects_fixups++;
527 return true;
528 }
529 return false;
530 }
531
debug_object_is_on_stack(void * addr,int onstack)532 static void debug_object_is_on_stack(void *addr, int onstack)
533 {
534 int is_on_stack;
535 static int limit;
536
537 if (limit > 4)
538 return;
539
540 is_on_stack = object_is_on_stack(addr);
541 if (is_on_stack == onstack)
542 return;
543
544 limit++;
545 if (is_on_stack)
546 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
547 task_stack_page(current));
548 else
549 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
550 task_stack_page(current));
551
552 WARN_ON(1);
553 }
554
555 static void
__debug_object_init(void * addr,const struct debug_obj_descr * descr,int onstack)556 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
557 {
558 enum debug_obj_state state;
559 bool check_stack = false;
560 struct debug_bucket *db;
561 struct debug_obj *obj;
562 unsigned long flags;
563
564 /*
565 * On RT enabled kernels the pool refill must happen in preemptible
566 * context:
567 */
568 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
569 fill_pool();
570
571 db = get_bucket((unsigned long) addr);
572
573 raw_spin_lock_irqsave(&db->lock, flags);
574
575 obj = lookup_object(addr, db);
576 if (!obj) {
577 obj = alloc_object(addr, db, descr);
578 if (!obj) {
579 debug_objects_enabled = 0;
580 raw_spin_unlock_irqrestore(&db->lock, flags);
581 debug_objects_oom();
582 return;
583 }
584 check_stack = true;
585 }
586
587 switch (obj->state) {
588 case ODEBUG_STATE_NONE:
589 case ODEBUG_STATE_INIT:
590 case ODEBUG_STATE_INACTIVE:
591 obj->state = ODEBUG_STATE_INIT;
592 break;
593
594 case ODEBUG_STATE_ACTIVE:
595 state = obj->state;
596 raw_spin_unlock_irqrestore(&db->lock, flags);
597 debug_print_object(obj, "init");
598 debug_object_fixup(descr->fixup_init, addr, state);
599 return;
600
601 case ODEBUG_STATE_DESTROYED:
602 raw_spin_unlock_irqrestore(&db->lock, flags);
603 debug_print_object(obj, "init");
604 return;
605 default:
606 break;
607 }
608
609 raw_spin_unlock_irqrestore(&db->lock, flags);
610 if (check_stack)
611 debug_object_is_on_stack(addr, onstack);
612 }
613
614 /**
615 * debug_object_init - debug checks when an object is initialized
616 * @addr: address of the object
617 * @descr: pointer to an object specific debug description structure
618 */
debug_object_init(void * addr,const struct debug_obj_descr * descr)619 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
620 {
621 if (!debug_objects_enabled)
622 return;
623
624 __debug_object_init(addr, descr, 0);
625 }
626 EXPORT_SYMBOL_GPL(debug_object_init);
627
628 /**
629 * debug_object_init_on_stack - debug checks when an object on stack is
630 * initialized
631 * @addr: address of the object
632 * @descr: pointer to an object specific debug description structure
633 */
debug_object_init_on_stack(void * addr,const struct debug_obj_descr * descr)634 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
635 {
636 if (!debug_objects_enabled)
637 return;
638
639 __debug_object_init(addr, descr, 1);
640 }
641 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
642
643 /**
644 * debug_object_activate - debug checks when an object is activated
645 * @addr: address of the object
646 * @descr: pointer to an object specific debug description structure
647 * Returns 0 for success, -EINVAL for check failed.
648 */
debug_object_activate(void * addr,const struct debug_obj_descr * descr)649 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
650 {
651 enum debug_obj_state state;
652 struct debug_bucket *db;
653 struct debug_obj *obj;
654 unsigned long flags;
655 int ret;
656 struct debug_obj o = { .object = addr,
657 .state = ODEBUG_STATE_NOTAVAILABLE,
658 .descr = descr };
659
660 if (!debug_objects_enabled)
661 return 0;
662
663 db = get_bucket((unsigned long) addr);
664
665 raw_spin_lock_irqsave(&db->lock, flags);
666
667 obj = lookup_object(addr, db);
668 if (obj) {
669 bool print_object = false;
670
671 switch (obj->state) {
672 case ODEBUG_STATE_INIT:
673 case ODEBUG_STATE_INACTIVE:
674 obj->state = ODEBUG_STATE_ACTIVE;
675 ret = 0;
676 break;
677
678 case ODEBUG_STATE_ACTIVE:
679 state = obj->state;
680 raw_spin_unlock_irqrestore(&db->lock, flags);
681 debug_print_object(obj, "activate");
682 ret = debug_object_fixup(descr->fixup_activate, addr, state);
683 return ret ? 0 : -EINVAL;
684
685 case ODEBUG_STATE_DESTROYED:
686 print_object = true;
687 ret = -EINVAL;
688 break;
689 default:
690 ret = 0;
691 break;
692 }
693 raw_spin_unlock_irqrestore(&db->lock, flags);
694 if (print_object)
695 debug_print_object(obj, "activate");
696 return ret;
697 }
698
699 raw_spin_unlock_irqrestore(&db->lock, flags);
700
701 /*
702 * We are here when a static object is activated. We
703 * let the type specific code confirm whether this is
704 * true or not. if true, we just make sure that the
705 * static object is tracked in the object tracker. If
706 * not, this must be a bug, so we try to fix it up.
707 */
708 if (descr->is_static_object && descr->is_static_object(addr)) {
709 /* track this static object */
710 debug_object_init(addr, descr);
711 debug_object_activate(addr, descr);
712 } else {
713 debug_print_object(&o, "activate");
714 ret = debug_object_fixup(descr->fixup_activate, addr,
715 ODEBUG_STATE_NOTAVAILABLE);
716 return ret ? 0 : -EINVAL;
717 }
718 return 0;
719 }
720 EXPORT_SYMBOL_GPL(debug_object_activate);
721
722 /**
723 * debug_object_deactivate - debug checks when an object is deactivated
724 * @addr: address of the object
725 * @descr: pointer to an object specific debug description structure
726 */
debug_object_deactivate(void * addr,const struct debug_obj_descr * descr)727 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
728 {
729 struct debug_bucket *db;
730 struct debug_obj *obj;
731 unsigned long flags;
732 bool print_object = false;
733
734 if (!debug_objects_enabled)
735 return;
736
737 db = get_bucket((unsigned long) addr);
738
739 raw_spin_lock_irqsave(&db->lock, flags);
740
741 obj = lookup_object(addr, db);
742 if (obj) {
743 switch (obj->state) {
744 case ODEBUG_STATE_INIT:
745 case ODEBUG_STATE_INACTIVE:
746 case ODEBUG_STATE_ACTIVE:
747 if (!obj->astate)
748 obj->state = ODEBUG_STATE_INACTIVE;
749 else
750 print_object = true;
751 break;
752
753 case ODEBUG_STATE_DESTROYED:
754 print_object = true;
755 break;
756 default:
757 break;
758 }
759 }
760
761 raw_spin_unlock_irqrestore(&db->lock, flags);
762 if (!obj) {
763 struct debug_obj o = { .object = addr,
764 .state = ODEBUG_STATE_NOTAVAILABLE,
765 .descr = descr };
766
767 debug_print_object(&o, "deactivate");
768 } else if (print_object) {
769 debug_print_object(obj, "deactivate");
770 }
771 }
772 EXPORT_SYMBOL_GPL(debug_object_deactivate);
773
774 /**
775 * debug_object_destroy - debug checks when an object is destroyed
776 * @addr: address of the object
777 * @descr: pointer to an object specific debug description structure
778 */
debug_object_destroy(void * addr,const struct debug_obj_descr * descr)779 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
780 {
781 enum debug_obj_state state;
782 struct debug_bucket *db;
783 struct debug_obj *obj;
784 unsigned long flags;
785 bool print_object = false;
786
787 if (!debug_objects_enabled)
788 return;
789
790 db = get_bucket((unsigned long) addr);
791
792 raw_spin_lock_irqsave(&db->lock, flags);
793
794 obj = lookup_object(addr, db);
795 if (!obj)
796 goto out_unlock;
797
798 switch (obj->state) {
799 case ODEBUG_STATE_NONE:
800 case ODEBUG_STATE_INIT:
801 case ODEBUG_STATE_INACTIVE:
802 obj->state = ODEBUG_STATE_DESTROYED;
803 break;
804 case ODEBUG_STATE_ACTIVE:
805 state = obj->state;
806 raw_spin_unlock_irqrestore(&db->lock, flags);
807 debug_print_object(obj, "destroy");
808 debug_object_fixup(descr->fixup_destroy, addr, state);
809 return;
810
811 case ODEBUG_STATE_DESTROYED:
812 print_object = true;
813 break;
814 default:
815 break;
816 }
817 out_unlock:
818 raw_spin_unlock_irqrestore(&db->lock, flags);
819 if (print_object)
820 debug_print_object(obj, "destroy");
821 }
822 EXPORT_SYMBOL_GPL(debug_object_destroy);
823
824 /**
825 * debug_object_free - debug checks when an object is freed
826 * @addr: address of the object
827 * @descr: pointer to an object specific debug description structure
828 */
debug_object_free(void * addr,const struct debug_obj_descr * descr)829 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
830 {
831 enum debug_obj_state state;
832 struct debug_bucket *db;
833 struct debug_obj *obj;
834 unsigned long flags;
835
836 if (!debug_objects_enabled)
837 return;
838
839 db = get_bucket((unsigned long) addr);
840
841 raw_spin_lock_irqsave(&db->lock, flags);
842
843 obj = lookup_object(addr, db);
844 if (!obj)
845 goto out_unlock;
846
847 switch (obj->state) {
848 case ODEBUG_STATE_ACTIVE:
849 state = obj->state;
850 raw_spin_unlock_irqrestore(&db->lock, flags);
851 debug_print_object(obj, "free");
852 debug_object_fixup(descr->fixup_free, addr, state);
853 return;
854 default:
855 hlist_del(&obj->node);
856 raw_spin_unlock_irqrestore(&db->lock, flags);
857 free_object(obj);
858 return;
859 }
860 out_unlock:
861 raw_spin_unlock_irqrestore(&db->lock, flags);
862 }
863 EXPORT_SYMBOL_GPL(debug_object_free);
864
865 /**
866 * debug_object_assert_init - debug checks when object should be init-ed
867 * @addr: address of the object
868 * @descr: pointer to an object specific debug description structure
869 */
debug_object_assert_init(void * addr,const struct debug_obj_descr * descr)870 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
871 {
872 struct debug_bucket *db;
873 struct debug_obj *obj;
874 unsigned long flags;
875
876 if (!debug_objects_enabled)
877 return;
878
879 db = get_bucket((unsigned long) addr);
880
881 raw_spin_lock_irqsave(&db->lock, flags);
882
883 obj = lookup_object(addr, db);
884 if (!obj) {
885 struct debug_obj o = { .object = addr,
886 .state = ODEBUG_STATE_NOTAVAILABLE,
887 .descr = descr };
888
889 raw_spin_unlock_irqrestore(&db->lock, flags);
890 /*
891 * Maybe the object is static, and we let the type specific
892 * code confirm. Track this static object if true, else invoke
893 * fixup.
894 */
895 if (descr->is_static_object && descr->is_static_object(addr)) {
896 /* Track this static object */
897 debug_object_init(addr, descr);
898 } else {
899 debug_print_object(&o, "assert_init");
900 debug_object_fixup(descr->fixup_assert_init, addr,
901 ODEBUG_STATE_NOTAVAILABLE);
902 }
903 return;
904 }
905
906 raw_spin_unlock_irqrestore(&db->lock, flags);
907 }
908 EXPORT_SYMBOL_GPL(debug_object_assert_init);
909
910 /**
911 * debug_object_active_state - debug checks object usage state machine
912 * @addr: address of the object
913 * @descr: pointer to an object specific debug description structure
914 * @expect: expected state
915 * @next: state to move to if expected state is found
916 */
917 void
debug_object_active_state(void * addr,const struct debug_obj_descr * descr,unsigned int expect,unsigned int next)918 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
919 unsigned int expect, unsigned int next)
920 {
921 struct debug_bucket *db;
922 struct debug_obj *obj;
923 unsigned long flags;
924 bool print_object = false;
925
926 if (!debug_objects_enabled)
927 return;
928
929 db = get_bucket((unsigned long) addr);
930
931 raw_spin_lock_irqsave(&db->lock, flags);
932
933 obj = lookup_object(addr, db);
934 if (obj) {
935 switch (obj->state) {
936 case ODEBUG_STATE_ACTIVE:
937 if (obj->astate == expect)
938 obj->astate = next;
939 else
940 print_object = true;
941 break;
942
943 default:
944 print_object = true;
945 break;
946 }
947 }
948
949 raw_spin_unlock_irqrestore(&db->lock, flags);
950 if (!obj) {
951 struct debug_obj o = { .object = addr,
952 .state = ODEBUG_STATE_NOTAVAILABLE,
953 .descr = descr };
954
955 debug_print_object(&o, "active_state");
956 } else if (print_object) {
957 debug_print_object(obj, "active_state");
958 }
959 }
960 EXPORT_SYMBOL_GPL(debug_object_active_state);
961
962 #ifdef CONFIG_DEBUG_OBJECTS_FREE
__debug_check_no_obj_freed(const void * address,unsigned long size)963 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
964 {
965 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
966 const struct debug_obj_descr *descr;
967 enum debug_obj_state state;
968 struct debug_bucket *db;
969 struct hlist_node *tmp;
970 struct debug_obj *obj;
971 int cnt, objs_checked = 0;
972
973 saddr = (unsigned long) address;
974 eaddr = saddr + size;
975 paddr = saddr & ODEBUG_CHUNK_MASK;
976 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
977 chunks >>= ODEBUG_CHUNK_SHIFT;
978
979 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
980 db = get_bucket(paddr);
981
982 repeat:
983 cnt = 0;
984 raw_spin_lock_irqsave(&db->lock, flags);
985 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
986 cnt++;
987 oaddr = (unsigned long) obj->object;
988 if (oaddr < saddr || oaddr >= eaddr)
989 continue;
990
991 switch (obj->state) {
992 case ODEBUG_STATE_ACTIVE:
993 descr = obj->descr;
994 state = obj->state;
995 raw_spin_unlock_irqrestore(&db->lock, flags);
996 debug_print_object(obj, "free");
997 debug_object_fixup(descr->fixup_free,
998 (void *) oaddr, state);
999 goto repeat;
1000 default:
1001 hlist_del(&obj->node);
1002 __free_object(obj);
1003 break;
1004 }
1005 }
1006 raw_spin_unlock_irqrestore(&db->lock, flags);
1007
1008 if (cnt > debug_objects_maxchain)
1009 debug_objects_maxchain = cnt;
1010
1011 objs_checked += cnt;
1012 }
1013
1014 if (objs_checked > debug_objects_maxchecked)
1015 debug_objects_maxchecked = objs_checked;
1016
1017 /* Schedule work to actually kmem_cache_free() objects */
1018 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1019 WRITE_ONCE(obj_freeing, true);
1020 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1021 }
1022 }
1023
debug_check_no_obj_freed(const void * address,unsigned long size)1024 void debug_check_no_obj_freed(const void *address, unsigned long size)
1025 {
1026 if (debug_objects_enabled)
1027 __debug_check_no_obj_freed(address, size);
1028 }
1029 #endif
1030
1031 #ifdef CONFIG_DEBUG_FS
1032
debug_stats_show(struct seq_file * m,void * v)1033 static int debug_stats_show(struct seq_file *m, void *v)
1034 {
1035 int cpu, obj_percpu_free = 0;
1036
1037 for_each_possible_cpu(cpu)
1038 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1039
1040 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
1041 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
1042 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1043 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
1044 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1045 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1046 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1047 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
1048 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1049 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
1050 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1051 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
1052 return 0;
1053 }
1054 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1055
debug_objects_init_debugfs(void)1056 static int __init debug_objects_init_debugfs(void)
1057 {
1058 struct dentry *dbgdir;
1059
1060 if (!debug_objects_enabled)
1061 return 0;
1062
1063 dbgdir = debugfs_create_dir("debug_objects", NULL);
1064
1065 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1066
1067 return 0;
1068 }
1069 __initcall(debug_objects_init_debugfs);
1070
1071 #else
debug_objects_init_debugfs(void)1072 static inline void debug_objects_init_debugfs(void) { }
1073 #endif
1074
1075 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1076
1077 /* Random data structure for the self test */
1078 struct self_test {
1079 unsigned long dummy1[6];
1080 int static_init;
1081 unsigned long dummy2[3];
1082 };
1083
1084 static __initconst const struct debug_obj_descr descr_type_test;
1085
is_static_object(void * addr)1086 static bool __init is_static_object(void *addr)
1087 {
1088 struct self_test *obj = addr;
1089
1090 return obj->static_init;
1091 }
1092
1093 /*
1094 * fixup_init is called when:
1095 * - an active object is initialized
1096 */
fixup_init(void * addr,enum debug_obj_state state)1097 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1098 {
1099 struct self_test *obj = addr;
1100
1101 switch (state) {
1102 case ODEBUG_STATE_ACTIVE:
1103 debug_object_deactivate(obj, &descr_type_test);
1104 debug_object_init(obj, &descr_type_test);
1105 return true;
1106 default:
1107 return false;
1108 }
1109 }
1110
1111 /*
1112 * fixup_activate is called when:
1113 * - an active object is activated
1114 * - an unknown non-static object is activated
1115 */
fixup_activate(void * addr,enum debug_obj_state state)1116 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1117 {
1118 struct self_test *obj = addr;
1119
1120 switch (state) {
1121 case ODEBUG_STATE_NOTAVAILABLE:
1122 return true;
1123 case ODEBUG_STATE_ACTIVE:
1124 debug_object_deactivate(obj, &descr_type_test);
1125 debug_object_activate(obj, &descr_type_test);
1126 return true;
1127
1128 default:
1129 return false;
1130 }
1131 }
1132
1133 /*
1134 * fixup_destroy is called when:
1135 * - an active object is destroyed
1136 */
fixup_destroy(void * addr,enum debug_obj_state state)1137 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1138 {
1139 struct self_test *obj = addr;
1140
1141 switch (state) {
1142 case ODEBUG_STATE_ACTIVE:
1143 debug_object_deactivate(obj, &descr_type_test);
1144 debug_object_destroy(obj, &descr_type_test);
1145 return true;
1146 default:
1147 return false;
1148 }
1149 }
1150
1151 /*
1152 * fixup_free is called when:
1153 * - an active object is freed
1154 */
fixup_free(void * addr,enum debug_obj_state state)1155 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1156 {
1157 struct self_test *obj = addr;
1158
1159 switch (state) {
1160 case ODEBUG_STATE_ACTIVE:
1161 debug_object_deactivate(obj, &descr_type_test);
1162 debug_object_free(obj, &descr_type_test);
1163 return true;
1164 default:
1165 return false;
1166 }
1167 }
1168
1169 static int __init
check_results(void * addr,enum debug_obj_state state,int fixups,int warnings)1170 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1171 {
1172 struct debug_bucket *db;
1173 struct debug_obj *obj;
1174 unsigned long flags;
1175 int res = -EINVAL;
1176
1177 db = get_bucket((unsigned long) addr);
1178
1179 raw_spin_lock_irqsave(&db->lock, flags);
1180
1181 obj = lookup_object(addr, db);
1182 if (!obj && state != ODEBUG_STATE_NONE) {
1183 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1184 goto out;
1185 }
1186 if (obj && obj->state != state) {
1187 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1188 obj->state, state);
1189 goto out;
1190 }
1191 if (fixups != debug_objects_fixups) {
1192 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1193 fixups, debug_objects_fixups);
1194 goto out;
1195 }
1196 if (warnings != debug_objects_warnings) {
1197 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1198 warnings, debug_objects_warnings);
1199 goto out;
1200 }
1201 res = 0;
1202 out:
1203 raw_spin_unlock_irqrestore(&db->lock, flags);
1204 if (res)
1205 debug_objects_enabled = 0;
1206 return res;
1207 }
1208
1209 static __initconst const struct debug_obj_descr descr_type_test = {
1210 .name = "selftest",
1211 .is_static_object = is_static_object,
1212 .fixup_init = fixup_init,
1213 .fixup_activate = fixup_activate,
1214 .fixup_destroy = fixup_destroy,
1215 .fixup_free = fixup_free,
1216 };
1217
1218 static __initdata struct self_test obj = { .static_init = 0 };
1219
debug_objects_selftest(void)1220 static void __init debug_objects_selftest(void)
1221 {
1222 int fixups, oldfixups, warnings, oldwarnings;
1223 unsigned long flags;
1224
1225 local_irq_save(flags);
1226
1227 fixups = oldfixups = debug_objects_fixups;
1228 warnings = oldwarnings = debug_objects_warnings;
1229 descr_test = &descr_type_test;
1230
1231 debug_object_init(&obj, &descr_type_test);
1232 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1233 goto out;
1234 debug_object_activate(&obj, &descr_type_test);
1235 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1236 goto out;
1237 debug_object_activate(&obj, &descr_type_test);
1238 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1239 goto out;
1240 debug_object_deactivate(&obj, &descr_type_test);
1241 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1242 goto out;
1243 debug_object_destroy(&obj, &descr_type_test);
1244 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1245 goto out;
1246 debug_object_init(&obj, &descr_type_test);
1247 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1248 goto out;
1249 debug_object_activate(&obj, &descr_type_test);
1250 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1251 goto out;
1252 debug_object_deactivate(&obj, &descr_type_test);
1253 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1254 goto out;
1255 debug_object_free(&obj, &descr_type_test);
1256 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1257 goto out;
1258
1259 obj.static_init = 1;
1260 debug_object_activate(&obj, &descr_type_test);
1261 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1262 goto out;
1263 debug_object_init(&obj, &descr_type_test);
1264 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1265 goto out;
1266 debug_object_free(&obj, &descr_type_test);
1267 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1268 goto out;
1269
1270 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1271 debug_object_init(&obj, &descr_type_test);
1272 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1273 goto out;
1274 debug_object_activate(&obj, &descr_type_test);
1275 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1276 goto out;
1277 __debug_check_no_obj_freed(&obj, sizeof(obj));
1278 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1279 goto out;
1280 #endif
1281 pr_info("selftest passed\n");
1282
1283 out:
1284 debug_objects_fixups = oldfixups;
1285 debug_objects_warnings = oldwarnings;
1286 descr_test = NULL;
1287
1288 local_irq_restore(flags);
1289 }
1290 #else
debug_objects_selftest(void)1291 static inline void debug_objects_selftest(void) { }
1292 #endif
1293
1294 /*
1295 * Called during early boot to initialize the hash buckets and link
1296 * the static object pool objects into the poll list. After this call
1297 * the object tracker is fully operational.
1298 */
debug_objects_early_init(void)1299 void __init debug_objects_early_init(void)
1300 {
1301 int i;
1302
1303 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1304 raw_spin_lock_init(&obj_hash[i].lock);
1305
1306 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1307 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1308 }
1309
1310 /*
1311 * Convert the statically allocated objects to dynamic ones:
1312 */
debug_objects_replace_static_objects(void)1313 static int __init debug_objects_replace_static_objects(void)
1314 {
1315 struct debug_bucket *db = obj_hash;
1316 struct hlist_node *tmp;
1317 struct debug_obj *obj, *new;
1318 HLIST_HEAD(objects);
1319 int i, cnt = 0;
1320
1321 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1322 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1323 if (!obj)
1324 goto free;
1325 hlist_add_head(&obj->node, &objects);
1326 }
1327
1328 debug_objects_allocated += i;
1329
1330 /*
1331 * debug_objects_mem_init() is now called early that only one CPU is up
1332 * and interrupts have been disabled, so it is safe to replace the
1333 * active object references.
1334 */
1335
1336 /* Remove the statically allocated objects from the pool */
1337 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1338 hlist_del(&obj->node);
1339 /* Move the allocated objects to the pool */
1340 hlist_move_list(&objects, &obj_pool);
1341
1342 /* Replace the active object references */
1343 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1344 hlist_move_list(&db->list, &objects);
1345
1346 hlist_for_each_entry(obj, &objects, node) {
1347 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1348 hlist_del(&new->node);
1349 /* copy object data */
1350 *new = *obj;
1351 hlist_add_head(&new->node, &db->list);
1352 cnt++;
1353 }
1354 }
1355
1356 pr_debug("%d of %d active objects replaced\n",
1357 cnt, obj_pool_used);
1358 return 0;
1359 free:
1360 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1361 hlist_del(&obj->node);
1362 kmem_cache_free(obj_cache, obj);
1363 }
1364 return -ENOMEM;
1365 }
1366
1367 /*
1368 * Called after the kmem_caches are functional to setup a dedicated
1369 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1370 * prevents that the debug code is called on kmem_cache_free() for the
1371 * debug tracker objects to avoid recursive calls.
1372 */
debug_objects_mem_init(void)1373 void __init debug_objects_mem_init(void)
1374 {
1375 int cpu, extras;
1376
1377 if (!debug_objects_enabled)
1378 return;
1379
1380 /*
1381 * Initialize the percpu object pools
1382 *
1383 * Initialization is not strictly necessary, but was done for
1384 * completeness.
1385 */
1386 for_each_possible_cpu(cpu)
1387 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1388
1389 obj_cache = kmem_cache_create("debug_objects_cache",
1390 sizeof (struct debug_obj), 0,
1391 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1392 NULL);
1393
1394 if (!obj_cache || debug_objects_replace_static_objects()) {
1395 debug_objects_enabled = 0;
1396 kmem_cache_destroy(obj_cache);
1397 pr_warn("out of memory.\n");
1398 return;
1399 } else
1400 debug_objects_selftest();
1401
1402 #ifdef CONFIG_HOTPLUG_CPU
1403 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1404 object_cpu_offline);
1405 #endif
1406
1407 /*
1408 * Increase the thresholds for allocating and freeing objects
1409 * according to the number of possible CPUs available in the system.
1410 */
1411 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1412 debug_objects_pool_size += extras;
1413 debug_objects_pool_min_level += extras;
1414 }
1415