1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 #define ODEBUG_POOL_SIZE	1024
25 #define ODEBUG_POOL_MIN_LEVEL	256
26 #define ODEBUG_POOL_PERCPU_SIZE	64
27 #define ODEBUG_BATCH_SIZE	16
28 
29 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
32 
33 /*
34  * We limit the freeing of debug objects via workqueue at a maximum
35  * frequency of 10Hz and about 1024 objects for each freeing operation.
36  * So it is freeing at most 10k debug objects per second.
37  */
38 #define ODEBUG_FREE_WORK_MAX	1024
39 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
40 
41 struct debug_bucket {
42 	struct hlist_head	list;
43 	raw_spinlock_t		lock;
44 };
45 
46 /*
47  * Debug object percpu free list
48  * Access is protected by disabling irq
49  */
50 struct debug_percpu_free {
51 	struct hlist_head	free_objs;
52 	int			obj_free;
53 };
54 
55 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
56 
57 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
58 
59 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
60 
61 static DEFINE_RAW_SPINLOCK(pool_lock);
62 
63 static HLIST_HEAD(obj_pool);
64 static HLIST_HEAD(obj_to_free);
65 
66 /*
67  * Because of the presence of percpu free pools, obj_pool_free will
68  * under-count those in the percpu free pools. Similarly, obj_pool_used
69  * will over-count those in the percpu free pools. Adjustments will be
70  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
71  * can be off.
72  */
73 static int			obj_pool_min_free = ODEBUG_POOL_SIZE;
74 static int			obj_pool_free = ODEBUG_POOL_SIZE;
75 static int			obj_pool_used;
76 static int			obj_pool_max_used;
77 static bool			obj_freeing;
78 /* The number of objs on the global free list */
79 static int			obj_nr_tofree;
80 
81 static int			debug_objects_maxchain __read_mostly;
82 static int __maybe_unused	debug_objects_maxchecked __read_mostly;
83 static int			debug_objects_fixups __read_mostly;
84 static int			debug_objects_warnings __read_mostly;
85 static int			debug_objects_enabled __read_mostly
86 				= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87 static int			debug_objects_pool_size __read_mostly
88 				= ODEBUG_POOL_SIZE;
89 static int			debug_objects_pool_min_level __read_mostly
90 				= ODEBUG_POOL_MIN_LEVEL;
91 static const struct debug_obj_descr *descr_test  __read_mostly;
92 static struct kmem_cache	*obj_cache __read_mostly;
93 
94 /*
95  * Track numbers of kmem_cache_alloc()/free() calls done.
96  */
97 static int			debug_objects_allocated;
98 static int			debug_objects_freed;
99 
100 static void free_obj_work(struct work_struct *work);
101 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
102 
enable_object_debug(char * str)103 static int __init enable_object_debug(char *str)
104 {
105 	debug_objects_enabled = 1;
106 	return 0;
107 }
108 
disable_object_debug(char * str)109 static int __init disable_object_debug(char *str)
110 {
111 	debug_objects_enabled = 0;
112 	return 0;
113 }
114 
115 early_param("debug_objects", enable_object_debug);
116 early_param("no_debug_objects", disable_object_debug);
117 
118 static const char *obj_states[ODEBUG_STATE_MAX] = {
119 	[ODEBUG_STATE_NONE]		= "none",
120 	[ODEBUG_STATE_INIT]		= "initialized",
121 	[ODEBUG_STATE_INACTIVE]		= "inactive",
122 	[ODEBUG_STATE_ACTIVE]		= "active",
123 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
124 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
125 };
126 
fill_pool(void)127 static void fill_pool(void)
128 {
129 	gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
130 	struct debug_obj *obj;
131 	unsigned long flags;
132 
133 	if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
134 		return;
135 
136 	/*
137 	 * Reuse objs from the global free list; they will be reinitialized
138 	 * when allocating.
139 	 *
140 	 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
141 	 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
142 	 * sections.
143 	 */
144 	while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
145 		raw_spin_lock_irqsave(&pool_lock, flags);
146 		/*
147 		 * Recheck with the lock held as the worker thread might have
148 		 * won the race and freed the global free list already.
149 		 */
150 		while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
151 			obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
152 			hlist_del(&obj->node);
153 			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
154 			hlist_add_head(&obj->node, &obj_pool);
155 			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
156 		}
157 		raw_spin_unlock_irqrestore(&pool_lock, flags);
158 	}
159 
160 	if (unlikely(!obj_cache))
161 		return;
162 
163 	while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
164 		struct debug_obj *new[ODEBUG_BATCH_SIZE];
165 		int cnt;
166 
167 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
168 			new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
169 			if (!new[cnt])
170 				break;
171 		}
172 		if (!cnt)
173 			return;
174 
175 		raw_spin_lock_irqsave(&pool_lock, flags);
176 		while (cnt) {
177 			hlist_add_head(&new[--cnt]->node, &obj_pool);
178 			debug_objects_allocated++;
179 			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
180 		}
181 		raw_spin_unlock_irqrestore(&pool_lock, flags);
182 	}
183 }
184 
185 /*
186  * Lookup an object in the hash bucket.
187  */
lookup_object(void * addr,struct debug_bucket * b)188 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
189 {
190 	struct debug_obj *obj;
191 	int cnt = 0;
192 
193 	hlist_for_each_entry(obj, &b->list, node) {
194 		cnt++;
195 		if (obj->object == addr)
196 			return obj;
197 	}
198 	if (cnt > debug_objects_maxchain)
199 		debug_objects_maxchain = cnt;
200 
201 	return NULL;
202 }
203 
204 /*
205  * Allocate a new object from the hlist
206  */
__alloc_object(struct hlist_head * list)207 static struct debug_obj *__alloc_object(struct hlist_head *list)
208 {
209 	struct debug_obj *obj = NULL;
210 
211 	if (list->first) {
212 		obj = hlist_entry(list->first, typeof(*obj), node);
213 		hlist_del(&obj->node);
214 	}
215 
216 	return obj;
217 }
218 
219 /*
220  * Allocate a new object. If the pool is empty, switch off the debugger.
221  * Must be called with interrupts disabled.
222  */
223 static struct debug_obj *
alloc_object(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr)224 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
225 {
226 	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
227 	struct debug_obj *obj;
228 
229 	if (likely(obj_cache)) {
230 		obj = __alloc_object(&percpu_pool->free_objs);
231 		if (obj) {
232 			percpu_pool->obj_free--;
233 			goto init_obj;
234 		}
235 	}
236 
237 	raw_spin_lock(&pool_lock);
238 	obj = __alloc_object(&obj_pool);
239 	if (obj) {
240 		obj_pool_used++;
241 		WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
242 
243 		/*
244 		 * Looking ahead, allocate one batch of debug objects and
245 		 * put them into the percpu free pool.
246 		 */
247 		if (likely(obj_cache)) {
248 			int i;
249 
250 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
251 				struct debug_obj *obj2;
252 
253 				obj2 = __alloc_object(&obj_pool);
254 				if (!obj2)
255 					break;
256 				hlist_add_head(&obj2->node,
257 					       &percpu_pool->free_objs);
258 				percpu_pool->obj_free++;
259 				obj_pool_used++;
260 				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
261 			}
262 		}
263 
264 		if (obj_pool_used > obj_pool_max_used)
265 			obj_pool_max_used = obj_pool_used;
266 
267 		if (obj_pool_free < obj_pool_min_free)
268 			obj_pool_min_free = obj_pool_free;
269 	}
270 	raw_spin_unlock(&pool_lock);
271 
272 init_obj:
273 	if (obj) {
274 		obj->object = addr;
275 		obj->descr  = descr;
276 		obj->state  = ODEBUG_STATE_NONE;
277 		obj->astate = 0;
278 		hlist_add_head(&obj->node, &b->list);
279 	}
280 	return obj;
281 }
282 
283 /*
284  * workqueue function to free objects.
285  *
286  * To reduce contention on the global pool_lock, the actual freeing of
287  * debug objects will be delayed if the pool_lock is busy.
288  */
free_obj_work(struct work_struct * work)289 static void free_obj_work(struct work_struct *work)
290 {
291 	struct hlist_node *tmp;
292 	struct debug_obj *obj;
293 	unsigned long flags;
294 	HLIST_HEAD(tofree);
295 
296 	WRITE_ONCE(obj_freeing, false);
297 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
298 		return;
299 
300 	if (obj_pool_free >= debug_objects_pool_size)
301 		goto free_objs;
302 
303 	/*
304 	 * The objs on the pool list might be allocated before the work is
305 	 * run, so recheck if pool list it full or not, if not fill pool
306 	 * list from the global free list. As it is likely that a workload
307 	 * may be gearing up to use more and more objects, don't free any
308 	 * of them until the next round.
309 	 */
310 	while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
311 		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
312 		hlist_del(&obj->node);
313 		hlist_add_head(&obj->node, &obj_pool);
314 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
315 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
316 	}
317 	raw_spin_unlock_irqrestore(&pool_lock, flags);
318 	return;
319 
320 free_objs:
321 	/*
322 	 * Pool list is already full and there are still objs on the free
323 	 * list. Move remaining free objs to a temporary list to free the
324 	 * memory outside the pool_lock held region.
325 	 */
326 	if (obj_nr_tofree) {
327 		hlist_move_list(&obj_to_free, &tofree);
328 		debug_objects_freed += obj_nr_tofree;
329 		WRITE_ONCE(obj_nr_tofree, 0);
330 	}
331 	raw_spin_unlock_irqrestore(&pool_lock, flags);
332 
333 	hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
334 		hlist_del(&obj->node);
335 		kmem_cache_free(obj_cache, obj);
336 	}
337 }
338 
__free_object(struct debug_obj * obj)339 static void __free_object(struct debug_obj *obj)
340 {
341 	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
342 	struct debug_percpu_free *percpu_pool;
343 	int lookahead_count = 0;
344 	unsigned long flags;
345 	bool work;
346 
347 	local_irq_save(flags);
348 	if (!obj_cache)
349 		goto free_to_obj_pool;
350 
351 	/*
352 	 * Try to free it into the percpu pool first.
353 	 */
354 	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
355 	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
356 		hlist_add_head(&obj->node, &percpu_pool->free_objs);
357 		percpu_pool->obj_free++;
358 		local_irq_restore(flags);
359 		return;
360 	}
361 
362 	/*
363 	 * As the percpu pool is full, look ahead and pull out a batch
364 	 * of objects from the percpu pool and free them as well.
365 	 */
366 	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
367 		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
368 		if (!objs[lookahead_count])
369 			break;
370 		percpu_pool->obj_free--;
371 	}
372 
373 free_to_obj_pool:
374 	raw_spin_lock(&pool_lock);
375 	work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
376 	       (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
377 	obj_pool_used--;
378 
379 	if (work) {
380 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
381 		hlist_add_head(&obj->node, &obj_to_free);
382 		if (lookahead_count) {
383 			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
384 			obj_pool_used -= lookahead_count;
385 			while (lookahead_count) {
386 				hlist_add_head(&objs[--lookahead_count]->node,
387 					       &obj_to_free);
388 			}
389 		}
390 
391 		if ((obj_pool_free > debug_objects_pool_size) &&
392 		    (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
393 			int i;
394 
395 			/*
396 			 * Free one more batch of objects from obj_pool.
397 			 */
398 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
399 				obj = __alloc_object(&obj_pool);
400 				hlist_add_head(&obj->node, &obj_to_free);
401 				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
402 				WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
403 			}
404 		}
405 	} else {
406 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
407 		hlist_add_head(&obj->node, &obj_pool);
408 		if (lookahead_count) {
409 			WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
410 			obj_pool_used -= lookahead_count;
411 			while (lookahead_count) {
412 				hlist_add_head(&objs[--lookahead_count]->node,
413 					       &obj_pool);
414 			}
415 		}
416 	}
417 	raw_spin_unlock(&pool_lock);
418 	local_irq_restore(flags);
419 }
420 
421 /*
422  * Put the object back into the pool and schedule work to free objects
423  * if necessary.
424  */
free_object(struct debug_obj * obj)425 static void free_object(struct debug_obj *obj)
426 {
427 	__free_object(obj);
428 	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
429 		WRITE_ONCE(obj_freeing, true);
430 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
431 	}
432 }
433 
434 #ifdef CONFIG_HOTPLUG_CPU
object_cpu_offline(unsigned int cpu)435 static int object_cpu_offline(unsigned int cpu)
436 {
437 	struct debug_percpu_free *percpu_pool;
438 	struct hlist_node *tmp;
439 	struct debug_obj *obj;
440 
441 	/* Remote access is safe as the CPU is dead already */
442 	percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
443 	hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
444 		hlist_del(&obj->node);
445 		kmem_cache_free(obj_cache, obj);
446 	}
447 	percpu_pool->obj_free = 0;
448 
449 	return 0;
450 }
451 #endif
452 
453 /*
454  * We run out of memory. That means we probably have tons of objects
455  * allocated.
456  */
debug_objects_oom(void)457 static void debug_objects_oom(void)
458 {
459 	struct debug_bucket *db = obj_hash;
460 	struct hlist_node *tmp;
461 	HLIST_HEAD(freelist);
462 	struct debug_obj *obj;
463 	unsigned long flags;
464 	int i;
465 
466 	pr_warn("Out of memory. ODEBUG disabled\n");
467 
468 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
469 		raw_spin_lock_irqsave(&db->lock, flags);
470 		hlist_move_list(&db->list, &freelist);
471 		raw_spin_unlock_irqrestore(&db->lock, flags);
472 
473 		/* Now free them */
474 		hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
475 			hlist_del(&obj->node);
476 			free_object(obj);
477 		}
478 	}
479 }
480 
481 /*
482  * We use the pfn of the address for the hash. That way we can check
483  * for freed objects simply by checking the affected bucket.
484  */
get_bucket(unsigned long addr)485 static struct debug_bucket *get_bucket(unsigned long addr)
486 {
487 	unsigned long hash;
488 
489 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
490 	return &obj_hash[hash];
491 }
492 
debug_print_object(struct debug_obj * obj,char * msg)493 static void debug_print_object(struct debug_obj *obj, char *msg)
494 {
495 	const struct debug_obj_descr *descr = obj->descr;
496 	static int limit;
497 
498 	if (limit < 5 && descr != descr_test) {
499 		void *hint = descr->debug_hint ?
500 			descr->debug_hint(obj->object) : NULL;
501 		limit++;
502 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
503 				 "object type: %s hint: %pS\n",
504 			msg, obj_states[obj->state], obj->astate,
505 			descr->name, hint);
506 	}
507 	debug_objects_warnings++;
508 }
509 
510 /*
511  * Try to repair the damage, so we have a better chance to get useful
512  * debug output.
513  */
514 static bool
debug_object_fixup(bool (* fixup)(void * addr,enum debug_obj_state state),void * addr,enum debug_obj_state state)515 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
516 		   void * addr, enum debug_obj_state state)
517 {
518 	if (fixup && fixup(addr, state)) {
519 		debug_objects_fixups++;
520 		return true;
521 	}
522 	return false;
523 }
524 
debug_object_is_on_stack(void * addr,int onstack)525 static void debug_object_is_on_stack(void *addr, int onstack)
526 {
527 	int is_on_stack;
528 	static int limit;
529 
530 	if (limit > 4)
531 		return;
532 
533 	is_on_stack = object_is_on_stack(addr);
534 	if (is_on_stack == onstack)
535 		return;
536 
537 	limit++;
538 	if (is_on_stack)
539 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
540 			 task_stack_page(current));
541 	else
542 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
543 			 task_stack_page(current));
544 
545 	WARN_ON(1);
546 }
547 
548 static void
__debug_object_init(void * addr,const struct debug_obj_descr * descr,int onstack)549 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
550 {
551 	enum debug_obj_state state;
552 	bool check_stack = false;
553 	struct debug_bucket *db;
554 	struct debug_obj *obj;
555 	unsigned long flags;
556 
557 	/*
558 	 * On RT enabled kernels the pool refill must happen in preemptible
559 	 * context:
560 	 */
561 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
562 		fill_pool();
563 
564 	db = get_bucket((unsigned long) addr);
565 
566 	raw_spin_lock_irqsave(&db->lock, flags);
567 
568 	obj = lookup_object(addr, db);
569 	if (!obj) {
570 		obj = alloc_object(addr, db, descr);
571 		if (!obj) {
572 			debug_objects_enabled = 0;
573 			raw_spin_unlock_irqrestore(&db->lock, flags);
574 			debug_objects_oom();
575 			return;
576 		}
577 		check_stack = true;
578 	}
579 
580 	switch (obj->state) {
581 	case ODEBUG_STATE_NONE:
582 	case ODEBUG_STATE_INIT:
583 	case ODEBUG_STATE_INACTIVE:
584 		obj->state = ODEBUG_STATE_INIT;
585 		break;
586 
587 	case ODEBUG_STATE_ACTIVE:
588 		state = obj->state;
589 		raw_spin_unlock_irqrestore(&db->lock, flags);
590 		debug_print_object(obj, "init");
591 		debug_object_fixup(descr->fixup_init, addr, state);
592 		return;
593 
594 	case ODEBUG_STATE_DESTROYED:
595 		raw_spin_unlock_irqrestore(&db->lock, flags);
596 		debug_print_object(obj, "init");
597 		return;
598 	default:
599 		break;
600 	}
601 
602 	raw_spin_unlock_irqrestore(&db->lock, flags);
603 	if (check_stack)
604 		debug_object_is_on_stack(addr, onstack);
605 }
606 
607 /**
608  * debug_object_init - debug checks when an object is initialized
609  * @addr:	address of the object
610  * @descr:	pointer to an object specific debug description structure
611  */
debug_object_init(void * addr,const struct debug_obj_descr * descr)612 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
613 {
614 	if (!debug_objects_enabled)
615 		return;
616 
617 	__debug_object_init(addr, descr, 0);
618 }
619 EXPORT_SYMBOL_GPL(debug_object_init);
620 
621 /**
622  * debug_object_init_on_stack - debug checks when an object on stack is
623  *				initialized
624  * @addr:	address of the object
625  * @descr:	pointer to an object specific debug description structure
626  */
debug_object_init_on_stack(void * addr,const struct debug_obj_descr * descr)627 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
628 {
629 	if (!debug_objects_enabled)
630 		return;
631 
632 	__debug_object_init(addr, descr, 1);
633 }
634 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
635 
636 /**
637  * debug_object_activate - debug checks when an object is activated
638  * @addr:	address of the object
639  * @descr:	pointer to an object specific debug description structure
640  * Returns 0 for success, -EINVAL for check failed.
641  */
debug_object_activate(void * addr,const struct debug_obj_descr * descr)642 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
643 {
644 	enum debug_obj_state state;
645 	struct debug_bucket *db;
646 	struct debug_obj *obj;
647 	unsigned long flags;
648 	int ret;
649 	struct debug_obj o = { .object = addr,
650 			       .state = ODEBUG_STATE_NOTAVAILABLE,
651 			       .descr = descr };
652 
653 	if (!debug_objects_enabled)
654 		return 0;
655 
656 	db = get_bucket((unsigned long) addr);
657 
658 	raw_spin_lock_irqsave(&db->lock, flags);
659 
660 	obj = lookup_object(addr, db);
661 	if (obj) {
662 		bool print_object = false;
663 
664 		switch (obj->state) {
665 		case ODEBUG_STATE_INIT:
666 		case ODEBUG_STATE_INACTIVE:
667 			obj->state = ODEBUG_STATE_ACTIVE;
668 			ret = 0;
669 			break;
670 
671 		case ODEBUG_STATE_ACTIVE:
672 			state = obj->state;
673 			raw_spin_unlock_irqrestore(&db->lock, flags);
674 			debug_print_object(obj, "activate");
675 			ret = debug_object_fixup(descr->fixup_activate, addr, state);
676 			return ret ? 0 : -EINVAL;
677 
678 		case ODEBUG_STATE_DESTROYED:
679 			print_object = true;
680 			ret = -EINVAL;
681 			break;
682 		default:
683 			ret = 0;
684 			break;
685 		}
686 		raw_spin_unlock_irqrestore(&db->lock, flags);
687 		if (print_object)
688 			debug_print_object(obj, "activate");
689 		return ret;
690 	}
691 
692 	raw_spin_unlock_irqrestore(&db->lock, flags);
693 
694 	/*
695 	 * We are here when a static object is activated. We
696 	 * let the type specific code confirm whether this is
697 	 * true or not. if true, we just make sure that the
698 	 * static object is tracked in the object tracker. If
699 	 * not, this must be a bug, so we try to fix it up.
700 	 */
701 	if (descr->is_static_object && descr->is_static_object(addr)) {
702 		/* track this static object */
703 		debug_object_init(addr, descr);
704 		debug_object_activate(addr, descr);
705 	} else {
706 		debug_print_object(&o, "activate");
707 		ret = debug_object_fixup(descr->fixup_activate, addr,
708 					ODEBUG_STATE_NOTAVAILABLE);
709 		return ret ? 0 : -EINVAL;
710 	}
711 	return 0;
712 }
713 EXPORT_SYMBOL_GPL(debug_object_activate);
714 
715 /**
716  * debug_object_deactivate - debug checks when an object is deactivated
717  * @addr:	address of the object
718  * @descr:	pointer to an object specific debug description structure
719  */
debug_object_deactivate(void * addr,const struct debug_obj_descr * descr)720 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
721 {
722 	struct debug_bucket *db;
723 	struct debug_obj *obj;
724 	unsigned long flags;
725 	bool print_object = false;
726 
727 	if (!debug_objects_enabled)
728 		return;
729 
730 	db = get_bucket((unsigned long) addr);
731 
732 	raw_spin_lock_irqsave(&db->lock, flags);
733 
734 	obj = lookup_object(addr, db);
735 	if (obj) {
736 		switch (obj->state) {
737 		case ODEBUG_STATE_INIT:
738 		case ODEBUG_STATE_INACTIVE:
739 		case ODEBUG_STATE_ACTIVE:
740 			if (!obj->astate)
741 				obj->state = ODEBUG_STATE_INACTIVE;
742 			else
743 				print_object = true;
744 			break;
745 
746 		case ODEBUG_STATE_DESTROYED:
747 			print_object = true;
748 			break;
749 		default:
750 			break;
751 		}
752 	}
753 
754 	raw_spin_unlock_irqrestore(&db->lock, flags);
755 	if (!obj) {
756 		struct debug_obj o = { .object = addr,
757 				       .state = ODEBUG_STATE_NOTAVAILABLE,
758 				       .descr = descr };
759 
760 		debug_print_object(&o, "deactivate");
761 	} else if (print_object) {
762 		debug_print_object(obj, "deactivate");
763 	}
764 }
765 EXPORT_SYMBOL_GPL(debug_object_deactivate);
766 
767 /**
768  * debug_object_destroy - debug checks when an object is destroyed
769  * @addr:	address of the object
770  * @descr:	pointer to an object specific debug description structure
771  */
debug_object_destroy(void * addr,const struct debug_obj_descr * descr)772 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
773 {
774 	enum debug_obj_state state;
775 	struct debug_bucket *db;
776 	struct debug_obj *obj;
777 	unsigned long flags;
778 	bool print_object = false;
779 
780 	if (!debug_objects_enabled)
781 		return;
782 
783 	db = get_bucket((unsigned long) addr);
784 
785 	raw_spin_lock_irqsave(&db->lock, flags);
786 
787 	obj = lookup_object(addr, db);
788 	if (!obj)
789 		goto out_unlock;
790 
791 	switch (obj->state) {
792 	case ODEBUG_STATE_NONE:
793 	case ODEBUG_STATE_INIT:
794 	case ODEBUG_STATE_INACTIVE:
795 		obj->state = ODEBUG_STATE_DESTROYED;
796 		break;
797 	case ODEBUG_STATE_ACTIVE:
798 		state = obj->state;
799 		raw_spin_unlock_irqrestore(&db->lock, flags);
800 		debug_print_object(obj, "destroy");
801 		debug_object_fixup(descr->fixup_destroy, addr, state);
802 		return;
803 
804 	case ODEBUG_STATE_DESTROYED:
805 		print_object = true;
806 		break;
807 	default:
808 		break;
809 	}
810 out_unlock:
811 	raw_spin_unlock_irqrestore(&db->lock, flags);
812 	if (print_object)
813 		debug_print_object(obj, "destroy");
814 }
815 EXPORT_SYMBOL_GPL(debug_object_destroy);
816 
817 /**
818  * debug_object_free - debug checks when an object is freed
819  * @addr:	address of the object
820  * @descr:	pointer to an object specific debug description structure
821  */
debug_object_free(void * addr,const struct debug_obj_descr * descr)822 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
823 {
824 	enum debug_obj_state state;
825 	struct debug_bucket *db;
826 	struct debug_obj *obj;
827 	unsigned long flags;
828 
829 	if (!debug_objects_enabled)
830 		return;
831 
832 	db = get_bucket((unsigned long) addr);
833 
834 	raw_spin_lock_irqsave(&db->lock, flags);
835 
836 	obj = lookup_object(addr, db);
837 	if (!obj)
838 		goto out_unlock;
839 
840 	switch (obj->state) {
841 	case ODEBUG_STATE_ACTIVE:
842 		state = obj->state;
843 		raw_spin_unlock_irqrestore(&db->lock, flags);
844 		debug_print_object(obj, "free");
845 		debug_object_fixup(descr->fixup_free, addr, state);
846 		return;
847 	default:
848 		hlist_del(&obj->node);
849 		raw_spin_unlock_irqrestore(&db->lock, flags);
850 		free_object(obj);
851 		return;
852 	}
853 out_unlock:
854 	raw_spin_unlock_irqrestore(&db->lock, flags);
855 }
856 EXPORT_SYMBOL_GPL(debug_object_free);
857 
858 /**
859  * debug_object_assert_init - debug checks when object should be init-ed
860  * @addr:	address of the object
861  * @descr:	pointer to an object specific debug description structure
862  */
debug_object_assert_init(void * addr,const struct debug_obj_descr * descr)863 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
864 {
865 	struct debug_bucket *db;
866 	struct debug_obj *obj;
867 	unsigned long flags;
868 
869 	if (!debug_objects_enabled)
870 		return;
871 
872 	db = get_bucket((unsigned long) addr);
873 
874 	raw_spin_lock_irqsave(&db->lock, flags);
875 
876 	obj = lookup_object(addr, db);
877 	if (!obj) {
878 		struct debug_obj o = { .object = addr,
879 				       .state = ODEBUG_STATE_NOTAVAILABLE,
880 				       .descr = descr };
881 
882 		raw_spin_unlock_irqrestore(&db->lock, flags);
883 		/*
884 		 * Maybe the object is static, and we let the type specific
885 		 * code confirm. Track this static object if true, else invoke
886 		 * fixup.
887 		 */
888 		if (descr->is_static_object && descr->is_static_object(addr)) {
889 			/* Track this static object */
890 			debug_object_init(addr, descr);
891 		} else {
892 			debug_print_object(&o, "assert_init");
893 			debug_object_fixup(descr->fixup_assert_init, addr,
894 					   ODEBUG_STATE_NOTAVAILABLE);
895 		}
896 		return;
897 	}
898 
899 	raw_spin_unlock_irqrestore(&db->lock, flags);
900 }
901 EXPORT_SYMBOL_GPL(debug_object_assert_init);
902 
903 /**
904  * debug_object_active_state - debug checks object usage state machine
905  * @addr:	address of the object
906  * @descr:	pointer to an object specific debug description structure
907  * @expect:	expected state
908  * @next:	state to move to if expected state is found
909  */
910 void
debug_object_active_state(void * addr,const struct debug_obj_descr * descr,unsigned int expect,unsigned int next)911 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
912 			  unsigned int expect, unsigned int next)
913 {
914 	struct debug_bucket *db;
915 	struct debug_obj *obj;
916 	unsigned long flags;
917 	bool print_object = false;
918 
919 	if (!debug_objects_enabled)
920 		return;
921 
922 	db = get_bucket((unsigned long) addr);
923 
924 	raw_spin_lock_irqsave(&db->lock, flags);
925 
926 	obj = lookup_object(addr, db);
927 	if (obj) {
928 		switch (obj->state) {
929 		case ODEBUG_STATE_ACTIVE:
930 			if (obj->astate == expect)
931 				obj->astate = next;
932 			else
933 				print_object = true;
934 			break;
935 
936 		default:
937 			print_object = true;
938 			break;
939 		}
940 	}
941 
942 	raw_spin_unlock_irqrestore(&db->lock, flags);
943 	if (!obj) {
944 		struct debug_obj o = { .object = addr,
945 				       .state = ODEBUG_STATE_NOTAVAILABLE,
946 				       .descr = descr };
947 
948 		debug_print_object(&o, "active_state");
949 	} else if (print_object) {
950 		debug_print_object(obj, "active_state");
951 	}
952 }
953 EXPORT_SYMBOL_GPL(debug_object_active_state);
954 
955 #ifdef CONFIG_DEBUG_OBJECTS_FREE
__debug_check_no_obj_freed(const void * address,unsigned long size)956 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
957 {
958 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
959 	const struct debug_obj_descr *descr;
960 	enum debug_obj_state state;
961 	struct debug_bucket *db;
962 	struct hlist_node *tmp;
963 	struct debug_obj *obj;
964 	int cnt, objs_checked = 0;
965 
966 	saddr = (unsigned long) address;
967 	eaddr = saddr + size;
968 	paddr = saddr & ODEBUG_CHUNK_MASK;
969 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
970 	chunks >>= ODEBUG_CHUNK_SHIFT;
971 
972 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
973 		db = get_bucket(paddr);
974 
975 repeat:
976 		cnt = 0;
977 		raw_spin_lock_irqsave(&db->lock, flags);
978 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
979 			cnt++;
980 			oaddr = (unsigned long) obj->object;
981 			if (oaddr < saddr || oaddr >= eaddr)
982 				continue;
983 
984 			switch (obj->state) {
985 			case ODEBUG_STATE_ACTIVE:
986 				descr = obj->descr;
987 				state = obj->state;
988 				raw_spin_unlock_irqrestore(&db->lock, flags);
989 				debug_print_object(obj, "free");
990 				debug_object_fixup(descr->fixup_free,
991 						   (void *) oaddr, state);
992 				goto repeat;
993 			default:
994 				hlist_del(&obj->node);
995 				__free_object(obj);
996 				break;
997 			}
998 		}
999 		raw_spin_unlock_irqrestore(&db->lock, flags);
1000 
1001 		if (cnt > debug_objects_maxchain)
1002 			debug_objects_maxchain = cnt;
1003 
1004 		objs_checked += cnt;
1005 	}
1006 
1007 	if (objs_checked > debug_objects_maxchecked)
1008 		debug_objects_maxchecked = objs_checked;
1009 
1010 	/* Schedule work to actually kmem_cache_free() objects */
1011 	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1012 		WRITE_ONCE(obj_freeing, true);
1013 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1014 	}
1015 }
1016 
debug_check_no_obj_freed(const void * address,unsigned long size)1017 void debug_check_no_obj_freed(const void *address, unsigned long size)
1018 {
1019 	if (debug_objects_enabled)
1020 		__debug_check_no_obj_freed(address, size);
1021 }
1022 #endif
1023 
1024 #ifdef CONFIG_DEBUG_FS
1025 
debug_stats_show(struct seq_file * m,void * v)1026 static int debug_stats_show(struct seq_file *m, void *v)
1027 {
1028 	int cpu, obj_percpu_free = 0;
1029 
1030 	for_each_possible_cpu(cpu)
1031 		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1032 
1033 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1034 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1035 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1036 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1037 	seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1038 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1039 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1040 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1041 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1042 	seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1043 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1044 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1045 	return 0;
1046 }
1047 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1048 
debug_objects_init_debugfs(void)1049 static int __init debug_objects_init_debugfs(void)
1050 {
1051 	struct dentry *dbgdir;
1052 
1053 	if (!debug_objects_enabled)
1054 		return 0;
1055 
1056 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1057 
1058 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1059 
1060 	return 0;
1061 }
1062 __initcall(debug_objects_init_debugfs);
1063 
1064 #else
debug_objects_init_debugfs(void)1065 static inline void debug_objects_init_debugfs(void) { }
1066 #endif
1067 
1068 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1069 
1070 /* Random data structure for the self test */
1071 struct self_test {
1072 	unsigned long	dummy1[6];
1073 	int		static_init;
1074 	unsigned long	dummy2[3];
1075 };
1076 
1077 static __initconst const struct debug_obj_descr descr_type_test;
1078 
is_static_object(void * addr)1079 static bool __init is_static_object(void *addr)
1080 {
1081 	struct self_test *obj = addr;
1082 
1083 	return obj->static_init;
1084 }
1085 
1086 /*
1087  * fixup_init is called when:
1088  * - an active object is initialized
1089  */
fixup_init(void * addr,enum debug_obj_state state)1090 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1091 {
1092 	struct self_test *obj = addr;
1093 
1094 	switch (state) {
1095 	case ODEBUG_STATE_ACTIVE:
1096 		debug_object_deactivate(obj, &descr_type_test);
1097 		debug_object_init(obj, &descr_type_test);
1098 		return true;
1099 	default:
1100 		return false;
1101 	}
1102 }
1103 
1104 /*
1105  * fixup_activate is called when:
1106  * - an active object is activated
1107  * - an unknown non-static object is activated
1108  */
fixup_activate(void * addr,enum debug_obj_state state)1109 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1110 {
1111 	struct self_test *obj = addr;
1112 
1113 	switch (state) {
1114 	case ODEBUG_STATE_NOTAVAILABLE:
1115 		return true;
1116 	case ODEBUG_STATE_ACTIVE:
1117 		debug_object_deactivate(obj, &descr_type_test);
1118 		debug_object_activate(obj, &descr_type_test);
1119 		return true;
1120 
1121 	default:
1122 		return false;
1123 	}
1124 }
1125 
1126 /*
1127  * fixup_destroy is called when:
1128  * - an active object is destroyed
1129  */
fixup_destroy(void * addr,enum debug_obj_state state)1130 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1131 {
1132 	struct self_test *obj = addr;
1133 
1134 	switch (state) {
1135 	case ODEBUG_STATE_ACTIVE:
1136 		debug_object_deactivate(obj, &descr_type_test);
1137 		debug_object_destroy(obj, &descr_type_test);
1138 		return true;
1139 	default:
1140 		return false;
1141 	}
1142 }
1143 
1144 /*
1145  * fixup_free is called when:
1146  * - an active object is freed
1147  */
fixup_free(void * addr,enum debug_obj_state state)1148 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1149 {
1150 	struct self_test *obj = addr;
1151 
1152 	switch (state) {
1153 	case ODEBUG_STATE_ACTIVE:
1154 		debug_object_deactivate(obj, &descr_type_test);
1155 		debug_object_free(obj, &descr_type_test);
1156 		return true;
1157 	default:
1158 		return false;
1159 	}
1160 }
1161 
1162 static int __init
check_results(void * addr,enum debug_obj_state state,int fixups,int warnings)1163 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1164 {
1165 	struct debug_bucket *db;
1166 	struct debug_obj *obj;
1167 	unsigned long flags;
1168 	int res = -EINVAL;
1169 
1170 	db = get_bucket((unsigned long) addr);
1171 
1172 	raw_spin_lock_irqsave(&db->lock, flags);
1173 
1174 	obj = lookup_object(addr, db);
1175 	if (!obj && state != ODEBUG_STATE_NONE) {
1176 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1177 		goto out;
1178 	}
1179 	if (obj && obj->state != state) {
1180 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1181 		       obj->state, state);
1182 		goto out;
1183 	}
1184 	if (fixups != debug_objects_fixups) {
1185 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1186 		       fixups, debug_objects_fixups);
1187 		goto out;
1188 	}
1189 	if (warnings != debug_objects_warnings) {
1190 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1191 		       warnings, debug_objects_warnings);
1192 		goto out;
1193 	}
1194 	res = 0;
1195 out:
1196 	raw_spin_unlock_irqrestore(&db->lock, flags);
1197 	if (res)
1198 		debug_objects_enabled = 0;
1199 	return res;
1200 }
1201 
1202 static __initconst const struct debug_obj_descr descr_type_test = {
1203 	.name			= "selftest",
1204 	.is_static_object	= is_static_object,
1205 	.fixup_init		= fixup_init,
1206 	.fixup_activate		= fixup_activate,
1207 	.fixup_destroy		= fixup_destroy,
1208 	.fixup_free		= fixup_free,
1209 };
1210 
1211 static __initdata struct self_test obj = { .static_init = 0 };
1212 
debug_objects_selftest(void)1213 static void __init debug_objects_selftest(void)
1214 {
1215 	int fixups, oldfixups, warnings, oldwarnings;
1216 	unsigned long flags;
1217 
1218 	local_irq_save(flags);
1219 
1220 	fixups = oldfixups = debug_objects_fixups;
1221 	warnings = oldwarnings = debug_objects_warnings;
1222 	descr_test = &descr_type_test;
1223 
1224 	debug_object_init(&obj, &descr_type_test);
1225 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1226 		goto out;
1227 	debug_object_activate(&obj, &descr_type_test);
1228 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1229 		goto out;
1230 	debug_object_activate(&obj, &descr_type_test);
1231 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1232 		goto out;
1233 	debug_object_deactivate(&obj, &descr_type_test);
1234 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1235 		goto out;
1236 	debug_object_destroy(&obj, &descr_type_test);
1237 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1238 		goto out;
1239 	debug_object_init(&obj, &descr_type_test);
1240 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1241 		goto out;
1242 	debug_object_activate(&obj, &descr_type_test);
1243 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1244 		goto out;
1245 	debug_object_deactivate(&obj, &descr_type_test);
1246 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1247 		goto out;
1248 	debug_object_free(&obj, &descr_type_test);
1249 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1250 		goto out;
1251 
1252 	obj.static_init = 1;
1253 	debug_object_activate(&obj, &descr_type_test);
1254 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1255 		goto out;
1256 	debug_object_init(&obj, &descr_type_test);
1257 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1258 		goto out;
1259 	debug_object_free(&obj, &descr_type_test);
1260 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1261 		goto out;
1262 
1263 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1264 	debug_object_init(&obj, &descr_type_test);
1265 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1266 		goto out;
1267 	debug_object_activate(&obj, &descr_type_test);
1268 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1269 		goto out;
1270 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1271 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1272 		goto out;
1273 #endif
1274 	pr_info("selftest passed\n");
1275 
1276 out:
1277 	debug_objects_fixups = oldfixups;
1278 	debug_objects_warnings = oldwarnings;
1279 	descr_test = NULL;
1280 
1281 	local_irq_restore(flags);
1282 }
1283 #else
debug_objects_selftest(void)1284 static inline void debug_objects_selftest(void) { }
1285 #endif
1286 
1287 /*
1288  * Called during early boot to initialize the hash buckets and link
1289  * the static object pool objects into the poll list. After this call
1290  * the object tracker is fully operational.
1291  */
debug_objects_early_init(void)1292 void __init debug_objects_early_init(void)
1293 {
1294 	int i;
1295 
1296 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1297 		raw_spin_lock_init(&obj_hash[i].lock);
1298 
1299 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1300 		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1301 }
1302 
1303 /*
1304  * Convert the statically allocated objects to dynamic ones:
1305  */
debug_objects_replace_static_objects(void)1306 static int __init debug_objects_replace_static_objects(void)
1307 {
1308 	struct debug_bucket *db = obj_hash;
1309 	struct hlist_node *tmp;
1310 	struct debug_obj *obj, *new;
1311 	HLIST_HEAD(objects);
1312 	int i, cnt = 0;
1313 
1314 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1315 		obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1316 		if (!obj)
1317 			goto free;
1318 		hlist_add_head(&obj->node, &objects);
1319 	}
1320 
1321 	/*
1322 	 * debug_objects_mem_init() is now called early that only one CPU is up
1323 	 * and interrupts have been disabled, so it is safe to replace the
1324 	 * active object references.
1325 	 */
1326 
1327 	/* Remove the statically allocated objects from the pool */
1328 	hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1329 		hlist_del(&obj->node);
1330 	/* Move the allocated objects to the pool */
1331 	hlist_move_list(&objects, &obj_pool);
1332 
1333 	/* Replace the active object references */
1334 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1335 		hlist_move_list(&db->list, &objects);
1336 
1337 		hlist_for_each_entry(obj, &objects, node) {
1338 			new = hlist_entry(obj_pool.first, typeof(*obj), node);
1339 			hlist_del(&new->node);
1340 			/* copy object data */
1341 			*new = *obj;
1342 			hlist_add_head(&new->node, &db->list);
1343 			cnt++;
1344 		}
1345 	}
1346 
1347 	pr_debug("%d of %d active objects replaced\n",
1348 		 cnt, obj_pool_used);
1349 	return 0;
1350 free:
1351 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1352 		hlist_del(&obj->node);
1353 		kmem_cache_free(obj_cache, obj);
1354 	}
1355 	return -ENOMEM;
1356 }
1357 
1358 /*
1359  * Called after the kmem_caches are functional to setup a dedicated
1360  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1361  * prevents that the debug code is called on kmem_cache_free() for the
1362  * debug tracker objects to avoid recursive calls.
1363  */
debug_objects_mem_init(void)1364 void __init debug_objects_mem_init(void)
1365 {
1366 	int cpu, extras;
1367 
1368 	if (!debug_objects_enabled)
1369 		return;
1370 
1371 	/*
1372 	 * Initialize the percpu object pools
1373 	 *
1374 	 * Initialization is not strictly necessary, but was done for
1375 	 * completeness.
1376 	 */
1377 	for_each_possible_cpu(cpu)
1378 		INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1379 
1380 	obj_cache = kmem_cache_create("debug_objects_cache",
1381 				      sizeof (struct debug_obj), 0,
1382 				      SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1383 				      NULL);
1384 
1385 	if (!obj_cache || debug_objects_replace_static_objects()) {
1386 		debug_objects_enabled = 0;
1387 		kmem_cache_destroy(obj_cache);
1388 		pr_warn("out of memory.\n");
1389 	} else
1390 		debug_objects_selftest();
1391 
1392 #ifdef CONFIG_HOTPLUG_CPU
1393 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1394 					object_cpu_offline);
1395 #endif
1396 
1397 	/*
1398 	 * Increase the thresholds for allocating and freeing objects
1399 	 * according to the number of possible CPUs available in the system.
1400 	 */
1401 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1402 	debug_objects_pool_size += extras;
1403 	debug_objects_pool_min_level += extras;
1404 }
1405