1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
17 * accesses to the object_tree_root. The object_list is the main list
18 * holding the metadata (struct kmemleak_object) for the allocated memory
19 * blocks. The object_tree_root is a red black tree used to look-up
20 * metadata based on a pointer to the corresponding memory block. The
21 * kmemleak_object structures are added to the object_list and
22 * object_tree_root in the create_object() function called from the
23 * kmemleak_alloc() callback and removed in delete_object() called from the
24 * kmemleak_free() callback
25 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
26 * Accesses to the metadata (e.g. count) are protected by this lock. Note
27 * that some members of this structure may be protected by other means
28 * (atomic or kmemleak_lock). This lock is also held when scanning the
29 * corresponding memory block to avoid the kernel freeing it via the
30 * kmemleak_free() callback. This is less heavyweight than holding a global
31 * lock like kmemleak_lock during scanning.
32 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
33 * unreferenced objects at a time. The gray_list contains the objects which
34 * are already referenced or marked as false positives and need to be
35 * scanned. This list is only modified during a scanning episode when the
36 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
37 * Note that the kmemleak_object.use_count is incremented when an object is
38 * added to the gray_list and therefore cannot be freed. This mutex also
39 * prevents multiple users of the "kmemleak" debugfs file together with
40 * modifications to the memory scanning parameters including the scan_thread
41 * pointer
42 *
43 * Locks and mutexes are acquired/nested in the following order:
44 *
45 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
46 *
47 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
48 * regions.
49 *
50 * The kmemleak_object structures have a use_count incremented or decremented
51 * using the get_object()/put_object() functions. When the use_count becomes
52 * 0, this count can no longer be incremented and put_object() schedules the
53 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
54 * function must be protected by rcu_read_lock() to avoid accessing a freed
55 * structure.
56 */
57
58 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59
60 #include <linux/init.h>
61 #include <linux/kernel.h>
62 #include <linux/list.h>
63 #include <linux/sched/signal.h>
64 #include <linux/sched/task.h>
65 #include <linux/sched/task_stack.h>
66 #include <linux/jiffies.h>
67 #include <linux/delay.h>
68 #include <linux/export.h>
69 #include <linux/kthread.h>
70 #include <linux/rbtree.h>
71 #include <linux/fs.h>
72 #include <linux/debugfs.h>
73 #include <linux/seq_file.h>
74 #include <linux/cpumask.h>
75 #include <linux/spinlock.h>
76 #include <linux/module.h>
77 #include <linux/mutex.h>
78 #include <linux/rcupdate.h>
79 #include <linux/stacktrace.h>
80 #include <linux/cache.h>
81 #include <linux/percpu.h>
82 #include <linux/memblock.h>
83 #include <linux/pfn.h>
84 #include <linux/mmzone.h>
85 #include <linux/slab.h>
86 #include <linux/thread_info.h>
87 #include <linux/err.h>
88 #include <linux/uaccess.h>
89 #include <linux/string.h>
90 #include <linux/nodemask.h>
91 #include <linux/mm.h>
92 #include <linux/workqueue.h>
93 #include <linux/crc32.h>
94
95 #include <asm/sections.h>
96 #include <asm/processor.h>
97 #include <linux/atomic.h>
98
99 #include <linux/kasan.h>
100 #include <linux/kfence.h>
101 #include <linux/kmemleak.h>
102 #include <linux/memory_hotplug.h>
103
104 /*
105 * Kmemleak configuration and common defines.
106 */
107 #define MAX_TRACE 16 /* stack trace length */
108 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
109 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
110 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
111 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
112
113 #define BYTES_PER_POINTER sizeof(void *)
114
115 /* GFP bitmask for kmemleak internal allocations */
116 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
117 __GFP_NOLOCKDEP)) | \
118 __GFP_NORETRY | __GFP_NOMEMALLOC | \
119 __GFP_NOWARN)
120
121 /* scanning area inside a memory block */
122 struct kmemleak_scan_area {
123 struct hlist_node node;
124 unsigned long start;
125 size_t size;
126 };
127
128 #define KMEMLEAK_GREY 0
129 #define KMEMLEAK_BLACK -1
130
131 /*
132 * Structure holding the metadata for each allocated memory block.
133 * Modifications to such objects should be made while holding the
134 * object->lock. Insertions or deletions from object_list, gray_list or
135 * rb_node are already protected by the corresponding locks or mutex (see
136 * the notes on locking above). These objects are reference-counted
137 * (use_count) and freed using the RCU mechanism.
138 */
139 struct kmemleak_object {
140 raw_spinlock_t lock;
141 unsigned int flags; /* object status flags */
142 struct list_head object_list;
143 struct list_head gray_list;
144 struct rb_node rb_node;
145 struct rcu_head rcu; /* object_list lockless traversal */
146 /* object usage count; object freed when use_count == 0 */
147 atomic_t use_count;
148 unsigned long pointer;
149 size_t size;
150 /* pass surplus references to this pointer */
151 unsigned long excess_ref;
152 /* minimum number of a pointers found before it is considered leak */
153 int min_count;
154 /* the total number of pointers found pointing to this object */
155 int count;
156 /* checksum for detecting modified objects */
157 u32 checksum;
158 /* memory ranges to be scanned inside an object (empty for all) */
159 struct hlist_head area_list;
160 unsigned long trace[MAX_TRACE];
161 unsigned int trace_len;
162 unsigned long jiffies; /* creation timestamp */
163 pid_t pid; /* pid of the current task */
164 char comm[TASK_COMM_LEN]; /* executable name */
165 };
166
167 /* flag representing the memory block allocation status */
168 #define OBJECT_ALLOCATED (1 << 0)
169 /* flag set after the first reporting of an unreference object */
170 #define OBJECT_REPORTED (1 << 1)
171 /* flag set to not scan the object */
172 #define OBJECT_NO_SCAN (1 << 2)
173 /* flag set to fully scan the object when scan_area allocation failed */
174 #define OBJECT_FULL_SCAN (1 << 3)
175
176 #define HEX_PREFIX " "
177 /* number of bytes to print per line; must be 16 or 32 */
178 #define HEX_ROW_SIZE 16
179 /* number of bytes to print at a time (1, 2, 4, 8) */
180 #define HEX_GROUP_SIZE 1
181 /* include ASCII after the hex output */
182 #define HEX_ASCII 1
183 /* max number of lines to be printed */
184 #define HEX_MAX_LINES 2
185
186 /* the list of all allocated objects */
187 static LIST_HEAD(object_list);
188 /* the list of gray-colored objects (see color_gray comment below) */
189 static LIST_HEAD(gray_list);
190 /* memory pool allocation */
191 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
192 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
193 static LIST_HEAD(mem_pool_free_list);
194 /* search tree for object boundaries */
195 static struct rb_root object_tree_root = RB_ROOT;
196 /* protecting the access to object_list and object_tree_root */
197 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
198
199 /* allocation caches for kmemleak internal data */
200 static struct kmem_cache *object_cache;
201 static struct kmem_cache *scan_area_cache;
202
203 /* set if tracing memory operations is enabled */
204 static int kmemleak_enabled = 1;
205 /* same as above but only for the kmemleak_free() callback */
206 static int kmemleak_free_enabled = 1;
207 /* set in the late_initcall if there were no errors */
208 static int kmemleak_initialized;
209 /* set if a kmemleak warning was issued */
210 static int kmemleak_warning;
211 /* set if a fatal kmemleak error has occurred */
212 static int kmemleak_error;
213
214 /* minimum and maximum address that may be valid pointers */
215 static unsigned long min_addr = ULONG_MAX;
216 static unsigned long max_addr;
217
218 static struct task_struct *scan_thread;
219 /* used to avoid reporting of recently allocated objects */
220 static unsigned long jiffies_min_age;
221 static unsigned long jiffies_last_scan;
222 /* delay between automatic memory scannings */
223 static unsigned long jiffies_scan_wait;
224 /* enables or disables the task stacks scanning */
225 static int kmemleak_stack_scan = 1;
226 /* protects the memory scanning, parameters and debug/kmemleak file access */
227 static DEFINE_MUTEX(scan_mutex);
228 /* setting kmemleak=on, will set this var, skipping the disable */
229 static int kmemleak_skip_disable;
230 /* If there are leaks that can be reported */
231 static bool kmemleak_found_leaks;
232
233 static bool kmemleak_verbose;
234 module_param_named(verbose, kmemleak_verbose, bool, 0600);
235
236 static void kmemleak_disable(void);
237
238 /*
239 * Print a warning and dump the stack trace.
240 */
241 #define kmemleak_warn(x...) do { \
242 pr_warn(x); \
243 dump_stack(); \
244 kmemleak_warning = 1; \
245 } while (0)
246
247 /*
248 * Macro invoked when a serious kmemleak condition occurred and cannot be
249 * recovered from. Kmemleak will be disabled and further allocation/freeing
250 * tracing no longer available.
251 */
252 #define kmemleak_stop(x...) do { \
253 kmemleak_warn(x); \
254 kmemleak_disable(); \
255 } while (0)
256
257 #define warn_or_seq_printf(seq, fmt, ...) do { \
258 if (seq) \
259 seq_printf(seq, fmt, ##__VA_ARGS__); \
260 else \
261 pr_warn(fmt, ##__VA_ARGS__); \
262 } while (0)
263
warn_or_seq_hex_dump(struct seq_file * seq,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)264 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
265 int rowsize, int groupsize, const void *buf,
266 size_t len, bool ascii)
267 {
268 if (seq)
269 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
270 buf, len, ascii);
271 else
272 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
273 rowsize, groupsize, buf, len, ascii);
274 }
275
276 /*
277 * Printing of the objects hex dump to the seq file. The number of lines to be
278 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
279 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
280 * with the object->lock held.
281 */
hex_dump_object(struct seq_file * seq,struct kmemleak_object * object)282 static void hex_dump_object(struct seq_file *seq,
283 struct kmemleak_object *object)
284 {
285 const u8 *ptr = (const u8 *)object->pointer;
286 size_t len;
287
288 /* limit the number of lines to HEX_MAX_LINES */
289 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
290
291 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
292 kasan_disable_current();
293 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
294 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
295 kasan_enable_current();
296 }
297
298 /*
299 * Object colors, encoded with count and min_count:
300 * - white - orphan object, not enough references to it (count < min_count)
301 * - gray - not orphan, not marked as false positive (min_count == 0) or
302 * sufficient references to it (count >= min_count)
303 * - black - ignore, it doesn't contain references (e.g. text section)
304 * (min_count == -1). No function defined for this color.
305 * Newly created objects don't have any color assigned (object->count == -1)
306 * before the next memory scan when they become white.
307 */
color_white(const struct kmemleak_object * object)308 static bool color_white(const struct kmemleak_object *object)
309 {
310 return object->count != KMEMLEAK_BLACK &&
311 object->count < object->min_count;
312 }
313
color_gray(const struct kmemleak_object * object)314 static bool color_gray(const struct kmemleak_object *object)
315 {
316 return object->min_count != KMEMLEAK_BLACK &&
317 object->count >= object->min_count;
318 }
319
320 /*
321 * Objects are considered unreferenced only if their color is white, they have
322 * not be deleted and have a minimum age to avoid false positives caused by
323 * pointers temporarily stored in CPU registers.
324 */
unreferenced_object(struct kmemleak_object * object)325 static bool unreferenced_object(struct kmemleak_object *object)
326 {
327 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
328 time_before_eq(object->jiffies + jiffies_min_age,
329 jiffies_last_scan);
330 }
331
332 /*
333 * Printing of the unreferenced objects information to the seq file. The
334 * print_unreferenced function must be called with the object->lock held.
335 */
print_unreferenced(struct seq_file * seq,struct kmemleak_object * object)336 static void print_unreferenced(struct seq_file *seq,
337 struct kmemleak_object *object)
338 {
339 int i;
340 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
341
342 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
343 object->pointer, object->size);
344 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
345 object->comm, object->pid, object->jiffies,
346 msecs_age / 1000, msecs_age % 1000);
347 hex_dump_object(seq, object);
348 warn_or_seq_printf(seq, " backtrace:\n");
349
350 for (i = 0; i < object->trace_len; i++) {
351 void *ptr = (void *)object->trace[i];
352 warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
353 }
354 }
355
356 /*
357 * Print the kmemleak_object information. This function is used mainly for
358 * debugging special cases when kmemleak operations. It must be called with
359 * the object->lock held.
360 */
dump_object_info(struct kmemleak_object * object)361 static void dump_object_info(struct kmemleak_object *object)
362 {
363 pr_notice("Object 0x%08lx (size %zu):\n",
364 object->pointer, object->size);
365 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
366 object->comm, object->pid, object->jiffies);
367 pr_notice(" min_count = %d\n", object->min_count);
368 pr_notice(" count = %d\n", object->count);
369 pr_notice(" flags = 0x%x\n", object->flags);
370 pr_notice(" checksum = %u\n", object->checksum);
371 pr_notice(" backtrace:\n");
372 stack_trace_print(object->trace, object->trace_len, 4);
373 }
374
375 /*
376 * Look-up a memory block metadata (kmemleak_object) in the object search
377 * tree based on a pointer value. If alias is 0, only values pointing to the
378 * beginning of the memory block are allowed. The kmemleak_lock must be held
379 * when calling this function.
380 */
lookup_object(unsigned long ptr,int alias)381 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
382 {
383 struct rb_node *rb = object_tree_root.rb_node;
384 unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
385
386 while (rb) {
387 struct kmemleak_object *object;
388 unsigned long untagged_objp;
389
390 object = rb_entry(rb, struct kmemleak_object, rb_node);
391 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
392
393 if (untagged_ptr < untagged_objp)
394 rb = object->rb_node.rb_left;
395 else if (untagged_objp + object->size <= untagged_ptr)
396 rb = object->rb_node.rb_right;
397 else if (untagged_objp == untagged_ptr || alias)
398 return object;
399 else {
400 kmemleak_warn("Found object by alias at 0x%08lx\n",
401 ptr);
402 dump_object_info(object);
403 break;
404 }
405 }
406 return NULL;
407 }
408
409 /*
410 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
411 * that once an object's use_count reached 0, the RCU freeing was already
412 * registered and the object should no longer be used. This function must be
413 * called under the protection of rcu_read_lock().
414 */
get_object(struct kmemleak_object * object)415 static int get_object(struct kmemleak_object *object)
416 {
417 return atomic_inc_not_zero(&object->use_count);
418 }
419
420 /*
421 * Memory pool allocation and freeing. kmemleak_lock must not be held.
422 */
mem_pool_alloc(gfp_t gfp)423 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
424 {
425 unsigned long flags;
426 struct kmemleak_object *object;
427
428 /* try the slab allocator first */
429 if (object_cache) {
430 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
431 if (object)
432 return object;
433 }
434
435 /* slab allocation failed, try the memory pool */
436 raw_spin_lock_irqsave(&kmemleak_lock, flags);
437 object = list_first_entry_or_null(&mem_pool_free_list,
438 typeof(*object), object_list);
439 if (object)
440 list_del(&object->object_list);
441 else if (mem_pool_free_count)
442 object = &mem_pool[--mem_pool_free_count];
443 else
444 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
445 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
446
447 return object;
448 }
449
450 /*
451 * Return the object to either the slab allocator or the memory pool.
452 */
mem_pool_free(struct kmemleak_object * object)453 static void mem_pool_free(struct kmemleak_object *object)
454 {
455 unsigned long flags;
456
457 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
458 kmem_cache_free(object_cache, object);
459 return;
460 }
461
462 /* add the object to the memory pool free list */
463 raw_spin_lock_irqsave(&kmemleak_lock, flags);
464 list_add(&object->object_list, &mem_pool_free_list);
465 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
466 }
467
468 /*
469 * RCU callback to free a kmemleak_object.
470 */
free_object_rcu(struct rcu_head * rcu)471 static void free_object_rcu(struct rcu_head *rcu)
472 {
473 struct hlist_node *tmp;
474 struct kmemleak_scan_area *area;
475 struct kmemleak_object *object =
476 container_of(rcu, struct kmemleak_object, rcu);
477
478 /*
479 * Once use_count is 0 (guaranteed by put_object), there is no other
480 * code accessing this object, hence no need for locking.
481 */
482 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
483 hlist_del(&area->node);
484 kmem_cache_free(scan_area_cache, area);
485 }
486 mem_pool_free(object);
487 }
488
489 /*
490 * Decrement the object use_count. Once the count is 0, free the object using
491 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
492 * delete_object() path, the delayed RCU freeing ensures that there is no
493 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
494 * is also possible.
495 */
put_object(struct kmemleak_object * object)496 static void put_object(struct kmemleak_object *object)
497 {
498 if (!atomic_dec_and_test(&object->use_count))
499 return;
500
501 /* should only get here after delete_object was called */
502 WARN_ON(object->flags & OBJECT_ALLOCATED);
503
504 /*
505 * It may be too early for the RCU callbacks, however, there is no
506 * concurrent object_list traversal when !object_cache and all objects
507 * came from the memory pool. Free the object directly.
508 */
509 if (object_cache)
510 call_rcu(&object->rcu, free_object_rcu);
511 else
512 free_object_rcu(&object->rcu);
513 }
514
515 /*
516 * Look up an object in the object search tree and increase its use_count.
517 */
find_and_get_object(unsigned long ptr,int alias)518 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
519 {
520 unsigned long flags;
521 struct kmemleak_object *object;
522
523 rcu_read_lock();
524 raw_spin_lock_irqsave(&kmemleak_lock, flags);
525 object = lookup_object(ptr, alias);
526 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
527
528 /* check whether the object is still available */
529 if (object && !get_object(object))
530 object = NULL;
531 rcu_read_unlock();
532
533 return object;
534 }
535
536 /*
537 * Remove an object from the object_tree_root and object_list. Must be called
538 * with the kmemleak_lock held _if_ kmemleak is still enabled.
539 */
__remove_object(struct kmemleak_object * object)540 static void __remove_object(struct kmemleak_object *object)
541 {
542 rb_erase(&object->rb_node, &object_tree_root);
543 list_del_rcu(&object->object_list);
544 }
545
546 /*
547 * Look up an object in the object search tree and remove it from both
548 * object_tree_root and object_list. The returned object's use_count should be
549 * at least 1, as initially set by create_object().
550 */
find_and_remove_object(unsigned long ptr,int alias)551 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
552 {
553 unsigned long flags;
554 struct kmemleak_object *object;
555
556 raw_spin_lock_irqsave(&kmemleak_lock, flags);
557 object = lookup_object(ptr, alias);
558 if (object)
559 __remove_object(object);
560 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
561
562 return object;
563 }
564
565 /*
566 * Save stack trace to the given array of MAX_TRACE size.
567 */
__save_stack_trace(unsigned long * trace)568 static int __save_stack_trace(unsigned long *trace)
569 {
570 return stack_trace_save(trace, MAX_TRACE, 2);
571 }
572
573 /*
574 * Create the metadata (struct kmemleak_object) corresponding to an allocated
575 * memory block and add it to the object_list and object_tree_root.
576 */
create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp)577 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
578 int min_count, gfp_t gfp)
579 {
580 unsigned long flags;
581 struct kmemleak_object *object, *parent;
582 struct rb_node **link, *rb_parent;
583 unsigned long untagged_ptr;
584 unsigned long untagged_objp;
585
586 object = mem_pool_alloc(gfp);
587 if (!object) {
588 pr_warn("Cannot allocate a kmemleak_object structure\n");
589 kmemleak_disable();
590 return NULL;
591 }
592
593 INIT_LIST_HEAD(&object->object_list);
594 INIT_LIST_HEAD(&object->gray_list);
595 INIT_HLIST_HEAD(&object->area_list);
596 raw_spin_lock_init(&object->lock);
597 atomic_set(&object->use_count, 1);
598 object->flags = OBJECT_ALLOCATED;
599 object->pointer = ptr;
600 object->size = kfence_ksize((void *)ptr) ?: size;
601 object->excess_ref = 0;
602 object->min_count = min_count;
603 object->count = 0; /* white color initially */
604 object->jiffies = jiffies;
605 object->checksum = 0;
606
607 /* task information */
608 if (in_hardirq()) {
609 object->pid = 0;
610 strncpy(object->comm, "hardirq", sizeof(object->comm));
611 } else if (in_serving_softirq()) {
612 object->pid = 0;
613 strncpy(object->comm, "softirq", sizeof(object->comm));
614 } else {
615 object->pid = current->pid;
616 /*
617 * There is a small chance of a race with set_task_comm(),
618 * however using get_task_comm() here may cause locking
619 * dependency issues with current->alloc_lock. In the worst
620 * case, the command line is not correct.
621 */
622 strncpy(object->comm, current->comm, sizeof(object->comm));
623 }
624
625 /* kernel backtrace */
626 object->trace_len = __save_stack_trace(object->trace);
627
628 raw_spin_lock_irqsave(&kmemleak_lock, flags);
629
630 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
631 min_addr = min(min_addr, untagged_ptr);
632 max_addr = max(max_addr, untagged_ptr + size);
633 link = &object_tree_root.rb_node;
634 rb_parent = NULL;
635 while (*link) {
636 rb_parent = *link;
637 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
638 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
639 if (untagged_ptr + size <= untagged_objp)
640 link = &parent->rb_node.rb_left;
641 else if (untagged_objp + parent->size <= untagged_ptr)
642 link = &parent->rb_node.rb_right;
643 else {
644 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
645 ptr);
646 /*
647 * No need for parent->lock here since "parent" cannot
648 * be freed while the kmemleak_lock is held.
649 */
650 dump_object_info(parent);
651 kmem_cache_free(object_cache, object);
652 object = NULL;
653 goto out;
654 }
655 }
656 rb_link_node(&object->rb_node, rb_parent, link);
657 rb_insert_color(&object->rb_node, &object_tree_root);
658
659 list_add_tail_rcu(&object->object_list, &object_list);
660 out:
661 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
662 return object;
663 }
664
665 /*
666 * Mark the object as not allocated and schedule RCU freeing via put_object().
667 */
__delete_object(struct kmemleak_object * object)668 static void __delete_object(struct kmemleak_object *object)
669 {
670 unsigned long flags;
671
672 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
673 WARN_ON(atomic_read(&object->use_count) < 1);
674
675 /*
676 * Locking here also ensures that the corresponding memory block
677 * cannot be freed when it is being scanned.
678 */
679 raw_spin_lock_irqsave(&object->lock, flags);
680 object->flags &= ~OBJECT_ALLOCATED;
681 raw_spin_unlock_irqrestore(&object->lock, flags);
682 put_object(object);
683 }
684
685 /*
686 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
687 * delete it.
688 */
delete_object_full(unsigned long ptr)689 static void delete_object_full(unsigned long ptr)
690 {
691 struct kmemleak_object *object;
692
693 object = find_and_remove_object(ptr, 0);
694 if (!object) {
695 #ifdef DEBUG
696 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
697 ptr);
698 #endif
699 return;
700 }
701 __delete_object(object);
702 }
703
704 /*
705 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
706 * delete it. If the memory block is partially freed, the function may create
707 * additional metadata for the remaining parts of the block.
708 */
delete_object_part(unsigned long ptr,size_t size)709 static void delete_object_part(unsigned long ptr, size_t size)
710 {
711 struct kmemleak_object *object;
712 unsigned long start, end;
713
714 object = find_and_remove_object(ptr, 1);
715 if (!object) {
716 #ifdef DEBUG
717 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
718 ptr, size);
719 #endif
720 return;
721 }
722
723 /*
724 * Create one or two objects that may result from the memory block
725 * split. Note that partial freeing is only done by free_bootmem() and
726 * this happens before kmemleak_init() is called.
727 */
728 start = object->pointer;
729 end = object->pointer + object->size;
730 if (ptr > start)
731 create_object(start, ptr - start, object->min_count,
732 GFP_KERNEL);
733 if (ptr + size < end)
734 create_object(ptr + size, end - ptr - size, object->min_count,
735 GFP_KERNEL);
736
737 __delete_object(object);
738 }
739
__paint_it(struct kmemleak_object * object,int color)740 static void __paint_it(struct kmemleak_object *object, int color)
741 {
742 object->min_count = color;
743 if (color == KMEMLEAK_BLACK)
744 object->flags |= OBJECT_NO_SCAN;
745 }
746
paint_it(struct kmemleak_object * object,int color)747 static void paint_it(struct kmemleak_object *object, int color)
748 {
749 unsigned long flags;
750
751 raw_spin_lock_irqsave(&object->lock, flags);
752 __paint_it(object, color);
753 raw_spin_unlock_irqrestore(&object->lock, flags);
754 }
755
paint_ptr(unsigned long ptr,int color)756 static void paint_ptr(unsigned long ptr, int color)
757 {
758 struct kmemleak_object *object;
759
760 object = find_and_get_object(ptr, 0);
761 if (!object) {
762 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
763 ptr,
764 (color == KMEMLEAK_GREY) ? "Grey" :
765 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
766 return;
767 }
768 paint_it(object, color);
769 put_object(object);
770 }
771
772 /*
773 * Mark an object permanently as gray-colored so that it can no longer be
774 * reported as a leak. This is used in general to mark a false positive.
775 */
make_gray_object(unsigned long ptr)776 static void make_gray_object(unsigned long ptr)
777 {
778 paint_ptr(ptr, KMEMLEAK_GREY);
779 }
780
781 /*
782 * Mark the object as black-colored so that it is ignored from scans and
783 * reporting.
784 */
make_black_object(unsigned long ptr)785 static void make_black_object(unsigned long ptr)
786 {
787 paint_ptr(ptr, KMEMLEAK_BLACK);
788 }
789
790 /*
791 * Add a scanning area to the object. If at least one such area is added,
792 * kmemleak will only scan these ranges rather than the whole memory block.
793 */
add_scan_area(unsigned long ptr,size_t size,gfp_t gfp)794 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
795 {
796 unsigned long flags;
797 struct kmemleak_object *object;
798 struct kmemleak_scan_area *area = NULL;
799 unsigned long untagged_ptr;
800 unsigned long untagged_objp;
801
802 object = find_and_get_object(ptr, 1);
803 if (!object) {
804 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
805 ptr);
806 return;
807 }
808
809 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
810 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
811
812 if (scan_area_cache)
813 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
814
815 raw_spin_lock_irqsave(&object->lock, flags);
816 if (!area) {
817 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
818 /* mark the object for full scan to avoid false positives */
819 object->flags |= OBJECT_FULL_SCAN;
820 goto out_unlock;
821 }
822 if (size == SIZE_MAX) {
823 size = untagged_objp + object->size - untagged_ptr;
824 } else if (untagged_ptr + size > untagged_objp + object->size) {
825 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
826 dump_object_info(object);
827 kmem_cache_free(scan_area_cache, area);
828 goto out_unlock;
829 }
830
831 INIT_HLIST_NODE(&area->node);
832 area->start = ptr;
833 area->size = size;
834
835 hlist_add_head(&area->node, &object->area_list);
836 out_unlock:
837 raw_spin_unlock_irqrestore(&object->lock, flags);
838 put_object(object);
839 }
840
841 /*
842 * Any surplus references (object already gray) to 'ptr' are passed to
843 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
844 * vm_struct may be used as an alternative reference to the vmalloc'ed object
845 * (see free_thread_stack()).
846 */
object_set_excess_ref(unsigned long ptr,unsigned long excess_ref)847 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
848 {
849 unsigned long flags;
850 struct kmemleak_object *object;
851
852 object = find_and_get_object(ptr, 0);
853 if (!object) {
854 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
855 ptr);
856 return;
857 }
858
859 raw_spin_lock_irqsave(&object->lock, flags);
860 object->excess_ref = excess_ref;
861 raw_spin_unlock_irqrestore(&object->lock, flags);
862 put_object(object);
863 }
864
865 /*
866 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
867 * pointer. Such object will not be scanned by kmemleak but references to it
868 * are searched.
869 */
object_no_scan(unsigned long ptr)870 static void object_no_scan(unsigned long ptr)
871 {
872 unsigned long flags;
873 struct kmemleak_object *object;
874
875 object = find_and_get_object(ptr, 0);
876 if (!object) {
877 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
878 return;
879 }
880
881 raw_spin_lock_irqsave(&object->lock, flags);
882 object->flags |= OBJECT_NO_SCAN;
883 raw_spin_unlock_irqrestore(&object->lock, flags);
884 put_object(object);
885 }
886
887 /**
888 * kmemleak_alloc - register a newly allocated object
889 * @ptr: pointer to beginning of the object
890 * @size: size of the object
891 * @min_count: minimum number of references to this object. If during memory
892 * scanning a number of references less than @min_count is found,
893 * the object is reported as a memory leak. If @min_count is 0,
894 * the object is never reported as a leak. If @min_count is -1,
895 * the object is ignored (not scanned and not reported as a leak)
896 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
897 *
898 * This function is called from the kernel allocators when a new object
899 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
900 */
kmemleak_alloc(const void * ptr,size_t size,int min_count,gfp_t gfp)901 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
902 gfp_t gfp)
903 {
904 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
905
906 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
907 create_object((unsigned long)ptr, size, min_count, gfp);
908 }
909 EXPORT_SYMBOL_GPL(kmemleak_alloc);
910
911 /**
912 * kmemleak_alloc_percpu - register a newly allocated __percpu object
913 * @ptr: __percpu pointer to beginning of the object
914 * @size: size of the object
915 * @gfp: flags used for kmemleak internal memory allocations
916 *
917 * This function is called from the kernel percpu allocator when a new object
918 * (memory block) is allocated (alloc_percpu).
919 */
kmemleak_alloc_percpu(const void __percpu * ptr,size_t size,gfp_t gfp)920 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
921 gfp_t gfp)
922 {
923 unsigned int cpu;
924
925 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
926
927 /*
928 * Percpu allocations are only scanned and not reported as leaks
929 * (min_count is set to 0).
930 */
931 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
932 for_each_possible_cpu(cpu)
933 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
934 size, 0, gfp);
935 }
936 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
937
938 /**
939 * kmemleak_vmalloc - register a newly vmalloc'ed object
940 * @area: pointer to vm_struct
941 * @size: size of the object
942 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
943 *
944 * This function is called from the vmalloc() kernel allocator when a new
945 * object (memory block) is allocated.
946 */
kmemleak_vmalloc(const struct vm_struct * area,size_t size,gfp_t gfp)947 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
948 {
949 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
950
951 /*
952 * A min_count = 2 is needed because vm_struct contains a reference to
953 * the virtual address of the vmalloc'ed block.
954 */
955 if (kmemleak_enabled) {
956 create_object((unsigned long)area->addr, size, 2, gfp);
957 object_set_excess_ref((unsigned long)area,
958 (unsigned long)area->addr);
959 }
960 }
961 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
962
963 /**
964 * kmemleak_free - unregister a previously registered object
965 * @ptr: pointer to beginning of the object
966 *
967 * This function is called from the kernel allocators when an object (memory
968 * block) is freed (kmem_cache_free, kfree, vfree etc.).
969 */
kmemleak_free(const void * ptr)970 void __ref kmemleak_free(const void *ptr)
971 {
972 pr_debug("%s(0x%p)\n", __func__, ptr);
973
974 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
975 delete_object_full((unsigned long)ptr);
976 }
977 EXPORT_SYMBOL_GPL(kmemleak_free);
978
979 /**
980 * kmemleak_free_part - partially unregister a previously registered object
981 * @ptr: pointer to the beginning or inside the object. This also
982 * represents the start of the range to be freed
983 * @size: size to be unregistered
984 *
985 * This function is called when only a part of a memory block is freed
986 * (usually from the bootmem allocator).
987 */
kmemleak_free_part(const void * ptr,size_t size)988 void __ref kmemleak_free_part(const void *ptr, size_t size)
989 {
990 pr_debug("%s(0x%p)\n", __func__, ptr);
991
992 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
993 delete_object_part((unsigned long)ptr, size);
994 }
995 EXPORT_SYMBOL_GPL(kmemleak_free_part);
996
997 /**
998 * kmemleak_free_percpu - unregister a previously registered __percpu object
999 * @ptr: __percpu pointer to beginning of the object
1000 *
1001 * This function is called from the kernel percpu allocator when an object
1002 * (memory block) is freed (free_percpu).
1003 */
kmemleak_free_percpu(const void __percpu * ptr)1004 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1005 {
1006 unsigned int cpu;
1007
1008 pr_debug("%s(0x%p)\n", __func__, ptr);
1009
1010 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1011 for_each_possible_cpu(cpu)
1012 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1013 cpu));
1014 }
1015 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1016
1017 /**
1018 * kmemleak_update_trace - update object allocation stack trace
1019 * @ptr: pointer to beginning of the object
1020 *
1021 * Override the object allocation stack trace for cases where the actual
1022 * allocation place is not always useful.
1023 */
kmemleak_update_trace(const void * ptr)1024 void __ref kmemleak_update_trace(const void *ptr)
1025 {
1026 struct kmemleak_object *object;
1027 unsigned long flags;
1028
1029 pr_debug("%s(0x%p)\n", __func__, ptr);
1030
1031 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1032 return;
1033
1034 object = find_and_get_object((unsigned long)ptr, 1);
1035 if (!object) {
1036 #ifdef DEBUG
1037 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1038 ptr);
1039 #endif
1040 return;
1041 }
1042
1043 raw_spin_lock_irqsave(&object->lock, flags);
1044 object->trace_len = __save_stack_trace(object->trace);
1045 raw_spin_unlock_irqrestore(&object->lock, flags);
1046
1047 put_object(object);
1048 }
1049 EXPORT_SYMBOL(kmemleak_update_trace);
1050
1051 /**
1052 * kmemleak_not_leak - mark an allocated object as false positive
1053 * @ptr: pointer to beginning of the object
1054 *
1055 * Calling this function on an object will cause the memory block to no longer
1056 * be reported as leak and always be scanned.
1057 */
kmemleak_not_leak(const void * ptr)1058 void __ref kmemleak_not_leak(const void *ptr)
1059 {
1060 pr_debug("%s(0x%p)\n", __func__, ptr);
1061
1062 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1063 make_gray_object((unsigned long)ptr);
1064 }
1065 EXPORT_SYMBOL(kmemleak_not_leak);
1066
1067 /**
1068 * kmemleak_ignore - ignore an allocated object
1069 * @ptr: pointer to beginning of the object
1070 *
1071 * Calling this function on an object will cause the memory block to be
1072 * ignored (not scanned and not reported as a leak). This is usually done when
1073 * it is known that the corresponding block is not a leak and does not contain
1074 * any references to other allocated memory blocks.
1075 */
kmemleak_ignore(const void * ptr)1076 void __ref kmemleak_ignore(const void *ptr)
1077 {
1078 pr_debug("%s(0x%p)\n", __func__, ptr);
1079
1080 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1081 make_black_object((unsigned long)ptr);
1082 }
1083 EXPORT_SYMBOL(kmemleak_ignore);
1084
1085 /**
1086 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1087 * @ptr: pointer to beginning or inside the object. This also
1088 * represents the start of the scan area
1089 * @size: size of the scan area
1090 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1091 *
1092 * This function is used when it is known that only certain parts of an object
1093 * contain references to other objects. Kmemleak will only scan these areas
1094 * reducing the number false negatives.
1095 */
kmemleak_scan_area(const void * ptr,size_t size,gfp_t gfp)1096 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1097 {
1098 pr_debug("%s(0x%p)\n", __func__, ptr);
1099
1100 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1101 add_scan_area((unsigned long)ptr, size, gfp);
1102 }
1103 EXPORT_SYMBOL(kmemleak_scan_area);
1104
1105 /**
1106 * kmemleak_no_scan - do not scan an allocated object
1107 * @ptr: pointer to beginning of the object
1108 *
1109 * This function notifies kmemleak not to scan the given memory block. Useful
1110 * in situations where it is known that the given object does not contain any
1111 * references to other objects. Kmemleak will not scan such objects reducing
1112 * the number of false negatives.
1113 */
kmemleak_no_scan(const void * ptr)1114 void __ref kmemleak_no_scan(const void *ptr)
1115 {
1116 pr_debug("%s(0x%p)\n", __func__, ptr);
1117
1118 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1119 object_no_scan((unsigned long)ptr);
1120 }
1121 EXPORT_SYMBOL(kmemleak_no_scan);
1122
1123 /**
1124 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1125 * address argument
1126 * @phys: physical address of the object
1127 * @size: size of the object
1128 * @min_count: minimum number of references to this object.
1129 * See kmemleak_alloc()
1130 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1131 */
kmemleak_alloc_phys(phys_addr_t phys,size_t size,int min_count,gfp_t gfp)1132 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1133 gfp_t gfp)
1134 {
1135 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1136 kmemleak_alloc(__va(phys), size, min_count, gfp);
1137 }
1138 EXPORT_SYMBOL(kmemleak_alloc_phys);
1139
1140 /**
1141 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1142 * physical address argument
1143 * @phys: physical address if the beginning or inside an object. This
1144 * also represents the start of the range to be freed
1145 * @size: size to be unregistered
1146 */
kmemleak_free_part_phys(phys_addr_t phys,size_t size)1147 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1148 {
1149 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1150 kmemleak_free_part(__va(phys), size);
1151 }
1152 EXPORT_SYMBOL(kmemleak_free_part_phys);
1153
1154 /**
1155 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1156 * address argument
1157 * @phys: physical address of the object
1158 */
kmemleak_not_leak_phys(phys_addr_t phys)1159 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1160 {
1161 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1162 kmemleak_not_leak(__va(phys));
1163 }
1164 EXPORT_SYMBOL(kmemleak_not_leak_phys);
1165
1166 /**
1167 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1168 * address argument
1169 * @phys: physical address of the object
1170 */
kmemleak_ignore_phys(phys_addr_t phys)1171 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1172 {
1173 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1174 kmemleak_ignore(__va(phys));
1175 }
1176 EXPORT_SYMBOL(kmemleak_ignore_phys);
1177
1178 /*
1179 * Update an object's checksum and return true if it was modified.
1180 */
update_checksum(struct kmemleak_object * object)1181 static bool update_checksum(struct kmemleak_object *object)
1182 {
1183 u32 old_csum = object->checksum;
1184
1185 kasan_disable_current();
1186 kcsan_disable_current();
1187 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1188 kasan_enable_current();
1189 kcsan_enable_current();
1190
1191 return object->checksum != old_csum;
1192 }
1193
1194 /*
1195 * Update an object's references. object->lock must be held by the caller.
1196 */
update_refs(struct kmemleak_object * object)1197 static void update_refs(struct kmemleak_object *object)
1198 {
1199 if (!color_white(object)) {
1200 /* non-orphan, ignored or new */
1201 return;
1202 }
1203
1204 /*
1205 * Increase the object's reference count (number of pointers to the
1206 * memory block). If this count reaches the required minimum, the
1207 * object's color will become gray and it will be added to the
1208 * gray_list.
1209 */
1210 object->count++;
1211 if (color_gray(object)) {
1212 /* put_object() called when removing from gray_list */
1213 WARN_ON(!get_object(object));
1214 list_add_tail(&object->gray_list, &gray_list);
1215 }
1216 }
1217
1218 /*
1219 * Memory scanning is a long process and it needs to be interruptible. This
1220 * function checks whether such interrupt condition occurred.
1221 */
scan_should_stop(void)1222 static int scan_should_stop(void)
1223 {
1224 if (!kmemleak_enabled)
1225 return 1;
1226
1227 /*
1228 * This function may be called from either process or kthread context,
1229 * hence the need to check for both stop conditions.
1230 */
1231 if (current->mm)
1232 return signal_pending(current);
1233 else
1234 return kthread_should_stop();
1235
1236 return 0;
1237 }
1238
1239 /*
1240 * Scan a memory block (exclusive range) for valid pointers and add those
1241 * found to the gray list.
1242 */
scan_block(void * _start,void * _end,struct kmemleak_object * scanned)1243 static void scan_block(void *_start, void *_end,
1244 struct kmemleak_object *scanned)
1245 {
1246 unsigned long *ptr;
1247 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1248 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1249 unsigned long flags;
1250 unsigned long untagged_ptr;
1251
1252 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1253 for (ptr = start; ptr < end; ptr++) {
1254 struct kmemleak_object *object;
1255 unsigned long pointer;
1256 unsigned long excess_ref;
1257
1258 if (scan_should_stop())
1259 break;
1260
1261 kasan_disable_current();
1262 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1263 kasan_enable_current();
1264
1265 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1266 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1267 continue;
1268
1269 /*
1270 * No need for get_object() here since we hold kmemleak_lock.
1271 * object->use_count cannot be dropped to 0 while the object
1272 * is still present in object_tree_root and object_list
1273 * (with updates protected by kmemleak_lock).
1274 */
1275 object = lookup_object(pointer, 1);
1276 if (!object)
1277 continue;
1278 if (object == scanned)
1279 /* self referenced, ignore */
1280 continue;
1281
1282 /*
1283 * Avoid the lockdep recursive warning on object->lock being
1284 * previously acquired in scan_object(). These locks are
1285 * enclosed by scan_mutex.
1286 */
1287 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1288 /* only pass surplus references (object already gray) */
1289 if (color_gray(object)) {
1290 excess_ref = object->excess_ref;
1291 /* no need for update_refs() if object already gray */
1292 } else {
1293 excess_ref = 0;
1294 update_refs(object);
1295 }
1296 raw_spin_unlock(&object->lock);
1297
1298 if (excess_ref) {
1299 object = lookup_object(excess_ref, 0);
1300 if (!object)
1301 continue;
1302 if (object == scanned)
1303 /* circular reference, ignore */
1304 continue;
1305 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1306 update_refs(object);
1307 raw_spin_unlock(&object->lock);
1308 }
1309 }
1310 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1311 }
1312
1313 /*
1314 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1315 */
1316 #ifdef CONFIG_SMP
scan_large_block(void * start,void * end)1317 static void scan_large_block(void *start, void *end)
1318 {
1319 void *next;
1320
1321 while (start < end) {
1322 next = min(start + MAX_SCAN_SIZE, end);
1323 scan_block(start, next, NULL);
1324 start = next;
1325 cond_resched();
1326 }
1327 }
1328 #endif
1329
1330 /*
1331 * Scan a memory block corresponding to a kmemleak_object. A condition is
1332 * that object->use_count >= 1.
1333 */
scan_object(struct kmemleak_object * object)1334 static void scan_object(struct kmemleak_object *object)
1335 {
1336 struct kmemleak_scan_area *area;
1337 unsigned long flags;
1338
1339 /*
1340 * Once the object->lock is acquired, the corresponding memory block
1341 * cannot be freed (the same lock is acquired in delete_object).
1342 */
1343 raw_spin_lock_irqsave(&object->lock, flags);
1344 if (object->flags & OBJECT_NO_SCAN)
1345 goto out;
1346 if (!(object->flags & OBJECT_ALLOCATED))
1347 /* already freed object */
1348 goto out;
1349 if (hlist_empty(&object->area_list) ||
1350 object->flags & OBJECT_FULL_SCAN) {
1351 void *start = (void *)object->pointer;
1352 void *end = (void *)(object->pointer + object->size);
1353 void *next;
1354
1355 do {
1356 next = min(start + MAX_SCAN_SIZE, end);
1357 scan_block(start, next, object);
1358
1359 start = next;
1360 if (start >= end)
1361 break;
1362
1363 raw_spin_unlock_irqrestore(&object->lock, flags);
1364 cond_resched();
1365 raw_spin_lock_irqsave(&object->lock, flags);
1366 } while (object->flags & OBJECT_ALLOCATED);
1367 } else
1368 hlist_for_each_entry(area, &object->area_list, node)
1369 scan_block((void *)area->start,
1370 (void *)(area->start + area->size),
1371 object);
1372 out:
1373 raw_spin_unlock_irqrestore(&object->lock, flags);
1374 }
1375
1376 /*
1377 * Scan the objects already referenced (gray objects). More objects will be
1378 * referenced and, if there are no memory leaks, all the objects are scanned.
1379 */
scan_gray_list(void)1380 static void scan_gray_list(void)
1381 {
1382 struct kmemleak_object *object, *tmp;
1383
1384 /*
1385 * The list traversal is safe for both tail additions and removals
1386 * from inside the loop. The kmemleak objects cannot be freed from
1387 * outside the loop because their use_count was incremented.
1388 */
1389 object = list_entry(gray_list.next, typeof(*object), gray_list);
1390 while (&object->gray_list != &gray_list) {
1391 cond_resched();
1392
1393 /* may add new objects to the list */
1394 if (!scan_should_stop())
1395 scan_object(object);
1396
1397 tmp = list_entry(object->gray_list.next, typeof(*object),
1398 gray_list);
1399
1400 /* remove the object from the list and release it */
1401 list_del(&object->gray_list);
1402 put_object(object);
1403
1404 object = tmp;
1405 }
1406 WARN_ON(!list_empty(&gray_list));
1407 }
1408
1409 /*
1410 * Scan data sections and all the referenced memory blocks allocated via the
1411 * kernel's standard allocators. This function must be called with the
1412 * scan_mutex held.
1413 */
kmemleak_scan(void)1414 static void kmemleak_scan(void)
1415 {
1416 unsigned long flags;
1417 struct kmemleak_object *object;
1418 struct zone *zone;
1419 int __maybe_unused i;
1420 int new_leaks = 0;
1421
1422 jiffies_last_scan = jiffies;
1423
1424 /* prepare the kmemleak_object's */
1425 rcu_read_lock();
1426 list_for_each_entry_rcu(object, &object_list, object_list) {
1427 raw_spin_lock_irqsave(&object->lock, flags);
1428 #ifdef DEBUG
1429 /*
1430 * With a few exceptions there should be a maximum of
1431 * 1 reference to any object at this point.
1432 */
1433 if (atomic_read(&object->use_count) > 1) {
1434 pr_debug("object->use_count = %d\n",
1435 atomic_read(&object->use_count));
1436 dump_object_info(object);
1437 }
1438 #endif
1439 /* reset the reference count (whiten the object) */
1440 object->count = 0;
1441 if (color_gray(object) && get_object(object))
1442 list_add_tail(&object->gray_list, &gray_list);
1443
1444 raw_spin_unlock_irqrestore(&object->lock, flags);
1445 }
1446 rcu_read_unlock();
1447
1448 #ifdef CONFIG_SMP
1449 /* per-cpu sections scanning */
1450 for_each_possible_cpu(i)
1451 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1452 __per_cpu_end + per_cpu_offset(i));
1453 #endif
1454
1455 /*
1456 * Struct page scanning for each node.
1457 */
1458 get_online_mems();
1459 for_each_populated_zone(zone) {
1460 unsigned long start_pfn = zone->zone_start_pfn;
1461 unsigned long end_pfn = zone_end_pfn(zone);
1462 unsigned long pfn;
1463
1464 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1465 struct page *page = pfn_to_online_page(pfn);
1466
1467 if (!page)
1468 continue;
1469
1470 /* only scan pages belonging to this zone */
1471 if (page_zone(page) != zone)
1472 continue;
1473 /* only scan if page is in use */
1474 if (page_count(page) == 0)
1475 continue;
1476 scan_block(page, page + 1, NULL);
1477 if (!(pfn & 63))
1478 cond_resched();
1479 }
1480 }
1481 put_online_mems();
1482
1483 /*
1484 * Scanning the task stacks (may introduce false negatives).
1485 */
1486 if (kmemleak_stack_scan) {
1487 struct task_struct *p, *g;
1488
1489 rcu_read_lock();
1490 for_each_process_thread(g, p) {
1491 void *stack = try_get_task_stack(p);
1492 if (stack) {
1493 scan_block(stack, stack + THREAD_SIZE, NULL);
1494 put_task_stack(p);
1495 }
1496 }
1497 rcu_read_unlock();
1498 }
1499
1500 /*
1501 * Scan the objects already referenced from the sections scanned
1502 * above.
1503 */
1504 scan_gray_list();
1505
1506 /*
1507 * Check for new or unreferenced objects modified since the previous
1508 * scan and color them gray until the next scan.
1509 */
1510 rcu_read_lock();
1511 list_for_each_entry_rcu(object, &object_list, object_list) {
1512 raw_spin_lock_irqsave(&object->lock, flags);
1513 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1514 && update_checksum(object) && get_object(object)) {
1515 /* color it gray temporarily */
1516 object->count = object->min_count;
1517 list_add_tail(&object->gray_list, &gray_list);
1518 }
1519 raw_spin_unlock_irqrestore(&object->lock, flags);
1520 }
1521 rcu_read_unlock();
1522
1523 /*
1524 * Re-scan the gray list for modified unreferenced objects.
1525 */
1526 scan_gray_list();
1527
1528 /*
1529 * If scanning was stopped do not report any new unreferenced objects.
1530 */
1531 if (scan_should_stop())
1532 return;
1533
1534 /*
1535 * Scanning result reporting.
1536 */
1537 rcu_read_lock();
1538 list_for_each_entry_rcu(object, &object_list, object_list) {
1539 raw_spin_lock_irqsave(&object->lock, flags);
1540 if (unreferenced_object(object) &&
1541 !(object->flags & OBJECT_REPORTED)) {
1542 object->flags |= OBJECT_REPORTED;
1543
1544 if (kmemleak_verbose)
1545 print_unreferenced(NULL, object);
1546
1547 new_leaks++;
1548 }
1549 raw_spin_unlock_irqrestore(&object->lock, flags);
1550 }
1551 rcu_read_unlock();
1552
1553 if (new_leaks) {
1554 kmemleak_found_leaks = true;
1555
1556 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1557 new_leaks);
1558 }
1559
1560 }
1561
1562 /*
1563 * Thread function performing automatic memory scanning. Unreferenced objects
1564 * at the end of a memory scan are reported but only the first time.
1565 */
kmemleak_scan_thread(void * arg)1566 static int kmemleak_scan_thread(void *arg)
1567 {
1568 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1569
1570 pr_info("Automatic memory scanning thread started\n");
1571 set_user_nice(current, 10);
1572
1573 /*
1574 * Wait before the first scan to allow the system to fully initialize.
1575 */
1576 if (first_run) {
1577 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1578 first_run = 0;
1579 while (timeout && !kthread_should_stop())
1580 timeout = schedule_timeout_interruptible(timeout);
1581 }
1582
1583 while (!kthread_should_stop()) {
1584 signed long timeout = READ_ONCE(jiffies_scan_wait);
1585
1586 mutex_lock(&scan_mutex);
1587 kmemleak_scan();
1588 mutex_unlock(&scan_mutex);
1589
1590 /* wait before the next scan */
1591 while (timeout && !kthread_should_stop())
1592 timeout = schedule_timeout_interruptible(timeout);
1593 }
1594
1595 pr_info("Automatic memory scanning thread ended\n");
1596
1597 return 0;
1598 }
1599
1600 /*
1601 * Start the automatic memory scanning thread. This function must be called
1602 * with the scan_mutex held.
1603 */
start_scan_thread(void)1604 static void start_scan_thread(void)
1605 {
1606 if (scan_thread)
1607 return;
1608 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1609 if (IS_ERR(scan_thread)) {
1610 pr_warn("Failed to create the scan thread\n");
1611 scan_thread = NULL;
1612 }
1613 }
1614
1615 /*
1616 * Stop the automatic memory scanning thread.
1617 */
stop_scan_thread(void)1618 static void stop_scan_thread(void)
1619 {
1620 if (scan_thread) {
1621 kthread_stop(scan_thread);
1622 scan_thread = NULL;
1623 }
1624 }
1625
1626 /*
1627 * Iterate over the object_list and return the first valid object at or after
1628 * the required position with its use_count incremented. The function triggers
1629 * a memory scanning when the pos argument points to the first position.
1630 */
kmemleak_seq_start(struct seq_file * seq,loff_t * pos)1631 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1632 {
1633 struct kmemleak_object *object;
1634 loff_t n = *pos;
1635 int err;
1636
1637 err = mutex_lock_interruptible(&scan_mutex);
1638 if (err < 0)
1639 return ERR_PTR(err);
1640
1641 rcu_read_lock();
1642 list_for_each_entry_rcu(object, &object_list, object_list) {
1643 if (n-- > 0)
1644 continue;
1645 if (get_object(object))
1646 goto out;
1647 }
1648 object = NULL;
1649 out:
1650 return object;
1651 }
1652
1653 /*
1654 * Return the next object in the object_list. The function decrements the
1655 * use_count of the previous object and increases that of the next one.
1656 */
kmemleak_seq_next(struct seq_file * seq,void * v,loff_t * pos)1657 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1658 {
1659 struct kmemleak_object *prev_obj = v;
1660 struct kmemleak_object *next_obj = NULL;
1661 struct kmemleak_object *obj = prev_obj;
1662
1663 ++(*pos);
1664
1665 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1666 if (get_object(obj)) {
1667 next_obj = obj;
1668 break;
1669 }
1670 }
1671
1672 put_object(prev_obj);
1673 return next_obj;
1674 }
1675
1676 /*
1677 * Decrement the use_count of the last object required, if any.
1678 */
kmemleak_seq_stop(struct seq_file * seq,void * v)1679 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1680 {
1681 if (!IS_ERR(v)) {
1682 /*
1683 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1684 * waiting was interrupted, so only release it if !IS_ERR.
1685 */
1686 rcu_read_unlock();
1687 mutex_unlock(&scan_mutex);
1688 if (v)
1689 put_object(v);
1690 }
1691 }
1692
1693 /*
1694 * Print the information for an unreferenced object to the seq file.
1695 */
kmemleak_seq_show(struct seq_file * seq,void * v)1696 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1697 {
1698 struct kmemleak_object *object = v;
1699 unsigned long flags;
1700
1701 raw_spin_lock_irqsave(&object->lock, flags);
1702 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1703 print_unreferenced(seq, object);
1704 raw_spin_unlock_irqrestore(&object->lock, flags);
1705 return 0;
1706 }
1707
1708 static const struct seq_operations kmemleak_seq_ops = {
1709 .start = kmemleak_seq_start,
1710 .next = kmemleak_seq_next,
1711 .stop = kmemleak_seq_stop,
1712 .show = kmemleak_seq_show,
1713 };
1714
kmemleak_open(struct inode * inode,struct file * file)1715 static int kmemleak_open(struct inode *inode, struct file *file)
1716 {
1717 return seq_open(file, &kmemleak_seq_ops);
1718 }
1719
dump_str_object_info(const char * str)1720 static int dump_str_object_info(const char *str)
1721 {
1722 unsigned long flags;
1723 struct kmemleak_object *object;
1724 unsigned long addr;
1725
1726 if (kstrtoul(str, 0, &addr))
1727 return -EINVAL;
1728 object = find_and_get_object(addr, 0);
1729 if (!object) {
1730 pr_info("Unknown object at 0x%08lx\n", addr);
1731 return -EINVAL;
1732 }
1733
1734 raw_spin_lock_irqsave(&object->lock, flags);
1735 dump_object_info(object);
1736 raw_spin_unlock_irqrestore(&object->lock, flags);
1737
1738 put_object(object);
1739 return 0;
1740 }
1741
1742 /*
1743 * We use grey instead of black to ensure we can do future scans on the same
1744 * objects. If we did not do future scans these black objects could
1745 * potentially contain references to newly allocated objects in the future and
1746 * we'd end up with false positives.
1747 */
kmemleak_clear(void)1748 static void kmemleak_clear(void)
1749 {
1750 struct kmemleak_object *object;
1751 unsigned long flags;
1752
1753 rcu_read_lock();
1754 list_for_each_entry_rcu(object, &object_list, object_list) {
1755 raw_spin_lock_irqsave(&object->lock, flags);
1756 if ((object->flags & OBJECT_REPORTED) &&
1757 unreferenced_object(object))
1758 __paint_it(object, KMEMLEAK_GREY);
1759 raw_spin_unlock_irqrestore(&object->lock, flags);
1760 }
1761 rcu_read_unlock();
1762
1763 kmemleak_found_leaks = false;
1764 }
1765
1766 static void __kmemleak_do_cleanup(void);
1767
1768 /*
1769 * File write operation to configure kmemleak at run-time. The following
1770 * commands can be written to the /sys/kernel/debug/kmemleak file:
1771 * off - disable kmemleak (irreversible)
1772 * stack=on - enable the task stacks scanning
1773 * stack=off - disable the tasks stacks scanning
1774 * scan=on - start the automatic memory scanning thread
1775 * scan=off - stop the automatic memory scanning thread
1776 * scan=... - set the automatic memory scanning period in seconds (0 to
1777 * disable it)
1778 * scan - trigger a memory scan
1779 * clear - mark all current reported unreferenced kmemleak objects as
1780 * grey to ignore printing them, or free all kmemleak objects
1781 * if kmemleak has been disabled.
1782 * dump=... - dump information about the object found at the given address
1783 */
kmemleak_write(struct file * file,const char __user * user_buf,size_t size,loff_t * ppos)1784 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1785 size_t size, loff_t *ppos)
1786 {
1787 char buf[64];
1788 int buf_size;
1789 int ret;
1790
1791 buf_size = min(size, (sizeof(buf) - 1));
1792 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1793 return -EFAULT;
1794 buf[buf_size] = 0;
1795
1796 ret = mutex_lock_interruptible(&scan_mutex);
1797 if (ret < 0)
1798 return ret;
1799
1800 if (strncmp(buf, "clear", 5) == 0) {
1801 if (kmemleak_enabled)
1802 kmemleak_clear();
1803 else
1804 __kmemleak_do_cleanup();
1805 goto out;
1806 }
1807
1808 if (!kmemleak_enabled) {
1809 ret = -EPERM;
1810 goto out;
1811 }
1812
1813 if (strncmp(buf, "off", 3) == 0)
1814 kmemleak_disable();
1815 else if (strncmp(buf, "stack=on", 8) == 0)
1816 kmemleak_stack_scan = 1;
1817 else if (strncmp(buf, "stack=off", 9) == 0)
1818 kmemleak_stack_scan = 0;
1819 else if (strncmp(buf, "scan=on", 7) == 0)
1820 start_scan_thread();
1821 else if (strncmp(buf, "scan=off", 8) == 0)
1822 stop_scan_thread();
1823 else if (strncmp(buf, "scan=", 5) == 0) {
1824 unsigned secs;
1825 unsigned long msecs;
1826
1827 ret = kstrtouint(buf + 5, 0, &secs);
1828 if (ret < 0)
1829 goto out;
1830
1831 msecs = secs * MSEC_PER_SEC;
1832 if (msecs > UINT_MAX)
1833 msecs = UINT_MAX;
1834
1835 stop_scan_thread();
1836 if (msecs) {
1837 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
1838 start_scan_thread();
1839 }
1840 } else if (strncmp(buf, "scan", 4) == 0)
1841 kmemleak_scan();
1842 else if (strncmp(buf, "dump=", 5) == 0)
1843 ret = dump_str_object_info(buf + 5);
1844 else
1845 ret = -EINVAL;
1846
1847 out:
1848 mutex_unlock(&scan_mutex);
1849 if (ret < 0)
1850 return ret;
1851
1852 /* ignore the rest of the buffer, only one command at a time */
1853 *ppos += size;
1854 return size;
1855 }
1856
1857 static const struct file_operations kmemleak_fops = {
1858 .owner = THIS_MODULE,
1859 .open = kmemleak_open,
1860 .read = seq_read,
1861 .write = kmemleak_write,
1862 .llseek = seq_lseek,
1863 .release = seq_release,
1864 };
1865
__kmemleak_do_cleanup(void)1866 static void __kmemleak_do_cleanup(void)
1867 {
1868 struct kmemleak_object *object, *tmp;
1869
1870 /*
1871 * Kmemleak has already been disabled, no need for RCU list traversal
1872 * or kmemleak_lock held.
1873 */
1874 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1875 __remove_object(object);
1876 __delete_object(object);
1877 }
1878 }
1879
1880 /*
1881 * Stop the memory scanning thread and free the kmemleak internal objects if
1882 * no previous scan thread (otherwise, kmemleak may still have some useful
1883 * information on memory leaks).
1884 */
kmemleak_do_cleanup(struct work_struct * work)1885 static void kmemleak_do_cleanup(struct work_struct *work)
1886 {
1887 stop_scan_thread();
1888
1889 mutex_lock(&scan_mutex);
1890 /*
1891 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1892 * longer track object freeing. Ordering of the scan thread stopping and
1893 * the memory accesses below is guaranteed by the kthread_stop()
1894 * function.
1895 */
1896 kmemleak_free_enabled = 0;
1897 mutex_unlock(&scan_mutex);
1898
1899 if (!kmemleak_found_leaks)
1900 __kmemleak_do_cleanup();
1901 else
1902 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1903 }
1904
1905 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1906
1907 /*
1908 * Disable kmemleak. No memory allocation/freeing will be traced once this
1909 * function is called. Disabling kmemleak is an irreversible operation.
1910 */
kmemleak_disable(void)1911 static void kmemleak_disable(void)
1912 {
1913 /* atomically check whether it was already invoked */
1914 if (cmpxchg(&kmemleak_error, 0, 1))
1915 return;
1916
1917 /* stop any memory operation tracing */
1918 kmemleak_enabled = 0;
1919
1920 /* check whether it is too early for a kernel thread */
1921 if (kmemleak_initialized)
1922 schedule_work(&cleanup_work);
1923 else
1924 kmemleak_free_enabled = 0;
1925
1926 pr_info("Kernel memory leak detector disabled\n");
1927 }
1928
1929 /*
1930 * Allow boot-time kmemleak disabling (enabled by default).
1931 */
kmemleak_boot_config(char * str)1932 static int __init kmemleak_boot_config(char *str)
1933 {
1934 if (!str)
1935 return -EINVAL;
1936 if (strcmp(str, "off") == 0)
1937 kmemleak_disable();
1938 else if (strcmp(str, "on") == 0)
1939 kmemleak_skip_disable = 1;
1940 else
1941 return -EINVAL;
1942 return 0;
1943 }
1944 early_param("kmemleak", kmemleak_boot_config);
1945
1946 /*
1947 * Kmemleak initialization.
1948 */
kmemleak_init(void)1949 void __init kmemleak_init(void)
1950 {
1951 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1952 if (!kmemleak_skip_disable) {
1953 kmemleak_disable();
1954 return;
1955 }
1956 #endif
1957
1958 if (kmemleak_error)
1959 return;
1960
1961 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1962 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1963
1964 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1965 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1966
1967 /* register the data/bss sections */
1968 create_object((unsigned long)_sdata, _edata - _sdata,
1969 KMEMLEAK_GREY, GFP_ATOMIC);
1970 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1971 KMEMLEAK_GREY, GFP_ATOMIC);
1972 /* only register .data..ro_after_init if not within .data */
1973 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
1974 create_object((unsigned long)__start_ro_after_init,
1975 __end_ro_after_init - __start_ro_after_init,
1976 KMEMLEAK_GREY, GFP_ATOMIC);
1977 }
1978
1979 /*
1980 * Late initialization function.
1981 */
kmemleak_late_init(void)1982 static int __init kmemleak_late_init(void)
1983 {
1984 kmemleak_initialized = 1;
1985
1986 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
1987
1988 if (kmemleak_error) {
1989 /*
1990 * Some error occurred and kmemleak was disabled. There is a
1991 * small chance that kmemleak_disable() was called immediately
1992 * after setting kmemleak_initialized and we may end up with
1993 * two clean-up threads but serialized by scan_mutex.
1994 */
1995 schedule_work(&cleanup_work);
1996 return -ENOMEM;
1997 }
1998
1999 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2000 mutex_lock(&scan_mutex);
2001 start_scan_thread();
2002 mutex_unlock(&scan_mutex);
2003 }
2004
2005 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2006 mem_pool_free_count);
2007
2008 return 0;
2009 }
2010 late_initcall(kmemleak_late_init);
2011