1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KFENCE guarded object allocator and fault handling.
4 *
5 * Copyright (C) 2020, Google LLC.
6 */
7
8 #define pr_fmt(fmt) "kfence: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/notifier.h>
25 #include <linux/panic_notifier.h>
26 #include <linux/random.h>
27 #include <linux/rcupdate.h>
28 #include <linux/sched/clock.h>
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
33
34 #include <asm/kfence.h>
35
36 #include "kfence.h"
37
38 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
39 #define KFENCE_WARN_ON(cond) \
40 ({ \
41 const bool __cond = WARN_ON(cond); \
42 if (unlikely(__cond)) { \
43 WRITE_ONCE(kfence_enabled, false); \
44 disabled_by_warn = true; \
45 } \
46 __cond; \
47 })
48
49 /* === Data ================================================================= */
50
51 static bool kfence_enabled __read_mostly;
52 static bool disabled_by_warn __read_mostly;
53
54 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
55 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
56
57 #ifdef MODULE_PARAM_PREFIX
58 #undef MODULE_PARAM_PREFIX
59 #endif
60 #define MODULE_PARAM_PREFIX "kfence."
61
62 static int kfence_enable_late(void);
param_set_sample_interval(const char * val,const struct kernel_param * kp)63 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
64 {
65 unsigned long num;
66 int ret = kstrtoul(val, 0, &num);
67
68 if (ret < 0)
69 return ret;
70
71 /* Using 0 to indicate KFENCE is disabled. */
72 if (!num && READ_ONCE(kfence_enabled)) {
73 pr_info("disabled\n");
74 WRITE_ONCE(kfence_enabled, false);
75 }
76
77 *((unsigned long *)kp->arg) = num;
78
79 if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
80 return disabled_by_warn ? -EINVAL : kfence_enable_late();
81 return 0;
82 }
83
param_get_sample_interval(char * buffer,const struct kernel_param * kp)84 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
85 {
86 if (!READ_ONCE(kfence_enabled))
87 return sprintf(buffer, "0\n");
88
89 return param_get_ulong(buffer, kp);
90 }
91
92 static const struct kernel_param_ops sample_interval_param_ops = {
93 .set = param_set_sample_interval,
94 .get = param_get_sample_interval,
95 };
96 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
97
98 /* Pool usage% threshold when currently covered allocations are skipped. */
99 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
100 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
101
102 /* If true, use a deferrable timer. */
103 static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
104 module_param_named(deferrable, kfence_deferrable, bool, 0444);
105
106 /* If true, check all canary bytes on panic. */
107 static bool kfence_check_on_panic __read_mostly;
108 module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444);
109
110 /* The pool of pages used for guard pages and objects. */
111 char *__kfence_pool __read_mostly;
112 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
113
114 /*
115 * Per-object metadata, with one-to-one mapping of object metadata to
116 * backing pages (in __kfence_pool).
117 */
118 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
119 struct kfence_metadata *kfence_metadata __read_mostly;
120
121 /*
122 * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache().
123 * So introduce kfence_metadata_init to initialize metadata, and then make
124 * kfence_metadata visible after initialization is successful. This prevents
125 * potential UAF or access to uninitialized metadata.
126 */
127 static struct kfence_metadata *kfence_metadata_init __read_mostly;
128
129 /* Freelist with available objects. */
130 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
131 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
132
133 /*
134 * The static key to set up a KFENCE allocation; or if static keys are not used
135 * to gate allocations, to avoid a load and compare if KFENCE is disabled.
136 */
137 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
138
139 /* Gates the allocation, ensuring only one succeeds in a given period. */
140 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
141
142 /*
143 * A Counting Bloom filter of allocation coverage: limits currently covered
144 * allocations of the same source filling up the pool.
145 *
146 * Assuming a range of 15%-85% unique allocations in the pool at any point in
147 * time, the below parameters provide a probablity of 0.02-0.33 for false
148 * positive hits respectively:
149 *
150 * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
151 */
152 #define ALLOC_COVERED_HNUM 2
153 #define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
154 #define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER)
155 #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER)
156 #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1)
157 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
158
159 /* Stack depth used to determine uniqueness of an allocation. */
160 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
161
162 /*
163 * Randomness for stack hashes, making the same collisions across reboots and
164 * different machines less likely.
165 */
166 static u32 stack_hash_seed __ro_after_init;
167
168 /* Statistics counters for debugfs. */
169 enum kfence_counter_id {
170 KFENCE_COUNTER_ALLOCATED,
171 KFENCE_COUNTER_ALLOCS,
172 KFENCE_COUNTER_FREES,
173 KFENCE_COUNTER_ZOMBIES,
174 KFENCE_COUNTER_BUGS,
175 KFENCE_COUNTER_SKIP_INCOMPAT,
176 KFENCE_COUNTER_SKIP_CAPACITY,
177 KFENCE_COUNTER_SKIP_COVERED,
178 KFENCE_COUNTER_COUNT,
179 };
180 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
181 static const char *const counter_names[] = {
182 [KFENCE_COUNTER_ALLOCATED] = "currently allocated",
183 [KFENCE_COUNTER_ALLOCS] = "total allocations",
184 [KFENCE_COUNTER_FREES] = "total frees",
185 [KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
186 [KFENCE_COUNTER_BUGS] = "total bugs",
187 [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)",
188 [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)",
189 [KFENCE_COUNTER_SKIP_COVERED] = "skipped allocations (covered)",
190 };
191 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
192
193 /* === Internals ============================================================ */
194
should_skip_covered(void)195 static inline bool should_skip_covered(void)
196 {
197 unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
198
199 return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
200 }
201
get_alloc_stack_hash(unsigned long * stack_entries,size_t num_entries)202 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
203 {
204 num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
205 num_entries = filter_irq_stacks(stack_entries, num_entries);
206 return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
207 }
208
209 /*
210 * Adds (or subtracts) count @val for allocation stack trace hash
211 * @alloc_stack_hash from Counting Bloom filter.
212 */
alloc_covered_add(u32 alloc_stack_hash,int val)213 static void alloc_covered_add(u32 alloc_stack_hash, int val)
214 {
215 int i;
216
217 for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
218 atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
219 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
220 }
221 }
222
223 /*
224 * Returns true if the allocation stack trace hash @alloc_stack_hash is
225 * currently contained (non-zero count) in Counting Bloom filter.
226 */
alloc_covered_contains(u32 alloc_stack_hash)227 static bool alloc_covered_contains(u32 alloc_stack_hash)
228 {
229 int i;
230
231 for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
232 if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
233 return false;
234 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
235 }
236
237 return true;
238 }
239
kfence_protect(unsigned long addr)240 static bool kfence_protect(unsigned long addr)
241 {
242 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
243 }
244
kfence_unprotect(unsigned long addr)245 static bool kfence_unprotect(unsigned long addr)
246 {
247 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
248 }
249
metadata_to_pageaddr(const struct kfence_metadata * meta)250 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
251 {
252 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
253 unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
254
255 /* The checks do not affect performance; only called from slow-paths. */
256
257 /* Only call with a pointer into kfence_metadata. */
258 if (KFENCE_WARN_ON(meta < kfence_metadata ||
259 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
260 return 0;
261
262 /*
263 * This metadata object only ever maps to 1 page; verify that the stored
264 * address is in the expected range.
265 */
266 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
267 return 0;
268
269 return pageaddr;
270 }
271
272 /*
273 * Update the object's metadata state, including updating the alloc/free stacks
274 * depending on the state transition.
275 */
276 static noinline void
metadata_update_state(struct kfence_metadata * meta,enum kfence_object_state next,unsigned long * stack_entries,size_t num_stack_entries)277 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
278 unsigned long *stack_entries, size_t num_stack_entries)
279 {
280 struct kfence_track *track =
281 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
282
283 lockdep_assert_held(&meta->lock);
284
285 if (stack_entries) {
286 memcpy(track->stack_entries, stack_entries,
287 num_stack_entries * sizeof(stack_entries[0]));
288 } else {
289 /*
290 * Skip over 1 (this) functions; noinline ensures we do not
291 * accidentally skip over the caller by never inlining.
292 */
293 num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
294 }
295 track->num_stack_entries = num_stack_entries;
296 track->pid = task_pid_nr(current);
297 track->cpu = raw_smp_processor_id();
298 track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
299
300 /*
301 * Pairs with READ_ONCE() in
302 * kfence_shutdown_cache(),
303 * kfence_handle_page_fault().
304 */
305 WRITE_ONCE(meta->state, next);
306 }
307
308 /* Check canary byte at @addr. */
check_canary_byte(u8 * addr)309 static inline bool check_canary_byte(u8 *addr)
310 {
311 struct kfence_metadata *meta;
312 unsigned long flags;
313
314 if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr)))
315 return true;
316
317 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
318
319 meta = addr_to_metadata((unsigned long)addr);
320 raw_spin_lock_irqsave(&meta->lock, flags);
321 kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
322 raw_spin_unlock_irqrestore(&meta->lock, flags);
323
324 return false;
325 }
326
set_canary(const struct kfence_metadata * meta)327 static inline void set_canary(const struct kfence_metadata *meta)
328 {
329 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
330 unsigned long addr = pageaddr;
331
332 /*
333 * The canary may be written to part of the object memory, but it does
334 * not affect it. The user should initialize the object before using it.
335 */
336 for (; addr < meta->addr; addr += sizeof(u64))
337 *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
338
339 addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64));
340 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64))
341 *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
342 }
343
check_canary(const struct kfence_metadata * meta)344 static inline void check_canary(const struct kfence_metadata *meta)
345 {
346 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
347 unsigned long addr = pageaddr;
348
349 /*
350 * We'll iterate over each canary byte per-side until a corrupted byte
351 * is found. However, we'll still iterate over the canary bytes to the
352 * right of the object even if there was an error in the canary bytes to
353 * the left of the object. Specifically, if check_canary_byte()
354 * generates an error, showing both sides might give more clues as to
355 * what the error is about when displaying which bytes were corrupted.
356 */
357
358 /* Apply to left of object. */
359 for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) {
360 if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64))
361 break;
362 }
363
364 /*
365 * If the canary is corrupted in a certain 64 bytes, or the canary
366 * memory cannot be completely covered by multiple consecutive 64 bytes,
367 * it needs to be checked one by one.
368 */
369 for (; addr < meta->addr; addr++) {
370 if (unlikely(!check_canary_byte((u8 *)addr)))
371 break;
372 }
373
374 /* Apply to right of object. */
375 for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) {
376 if (unlikely(!check_canary_byte((u8 *)addr)))
377 return;
378 }
379 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) {
380 if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) {
381
382 for (; addr - pageaddr < PAGE_SIZE; addr++) {
383 if (!check_canary_byte((u8 *)addr))
384 return;
385 }
386 }
387 }
388 }
389
kfence_guarded_alloc(struct kmem_cache * cache,size_t size,gfp_t gfp,unsigned long * stack_entries,size_t num_stack_entries,u32 alloc_stack_hash)390 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
391 unsigned long *stack_entries, size_t num_stack_entries,
392 u32 alloc_stack_hash)
393 {
394 struct kfence_metadata *meta = NULL;
395 unsigned long flags;
396 struct slab *slab;
397 void *addr;
398 const bool random_right_allocate = get_random_u32_below(2);
399 const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
400 !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS);
401
402 /* Try to obtain a free object. */
403 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
404 if (!list_empty(&kfence_freelist)) {
405 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
406 list_del_init(&meta->list);
407 }
408 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
409 if (!meta) {
410 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
411 return NULL;
412 }
413
414 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
415 /*
416 * This is extremely unlikely -- we are reporting on a
417 * use-after-free, which locked meta->lock, and the reporting
418 * code via printk calls kmalloc() which ends up in
419 * kfence_alloc() and tries to grab the same object that we're
420 * reporting on. While it has never been observed, lockdep does
421 * report that there is a possibility of deadlock. Fix it by
422 * using trylock and bailing out gracefully.
423 */
424 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
425 /* Put the object back on the freelist. */
426 list_add_tail(&meta->list, &kfence_freelist);
427 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
428
429 return NULL;
430 }
431
432 meta->addr = metadata_to_pageaddr(meta);
433 /* Unprotect if we're reusing this page. */
434 if (meta->state == KFENCE_OBJECT_FREED)
435 kfence_unprotect(meta->addr);
436
437 /*
438 * Note: for allocations made before RNG initialization, will always
439 * return zero. We still benefit from enabling KFENCE as early as
440 * possible, even when the RNG is not yet available, as this will allow
441 * KFENCE to detect bugs due to earlier allocations. The only downside
442 * is that the out-of-bounds accesses detected are deterministic for
443 * such allocations.
444 */
445 if (random_right_allocate) {
446 /* Allocate on the "right" side, re-calculate address. */
447 meta->addr += PAGE_SIZE - size;
448 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
449 }
450
451 addr = (void *)meta->addr;
452
453 /* Update remaining metadata. */
454 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
455 /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
456 WRITE_ONCE(meta->cache, cache);
457 meta->size = size;
458 meta->alloc_stack_hash = alloc_stack_hash;
459 raw_spin_unlock_irqrestore(&meta->lock, flags);
460
461 alloc_covered_add(alloc_stack_hash, 1);
462
463 /* Set required slab fields. */
464 slab = virt_to_slab((void *)meta->addr);
465 slab->slab_cache = cache;
466 #if defined(CONFIG_SLUB)
467 slab->objects = 1;
468 #elif defined(CONFIG_SLAB)
469 slab->s_mem = addr;
470 #endif
471
472 /* Memory initialization. */
473 set_canary(meta);
474
475 /*
476 * We check slab_want_init_on_alloc() ourselves, rather than letting
477 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
478 * redzone.
479 */
480 if (unlikely(slab_want_init_on_alloc(gfp, cache)))
481 memzero_explicit(addr, size);
482 if (cache->ctor)
483 cache->ctor(addr);
484
485 if (random_fault)
486 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
487
488 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
489 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
490
491 return addr;
492 }
493
kfence_guarded_free(void * addr,struct kfence_metadata * meta,bool zombie)494 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
495 {
496 struct kcsan_scoped_access assert_page_exclusive;
497 unsigned long flags;
498 bool init;
499
500 raw_spin_lock_irqsave(&meta->lock, flags);
501
502 if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
503 /* Invalid or double-free, bail out. */
504 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
505 kfence_report_error((unsigned long)addr, false, NULL, meta,
506 KFENCE_ERROR_INVALID_FREE);
507 raw_spin_unlock_irqrestore(&meta->lock, flags);
508 return;
509 }
510
511 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
512 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
513 KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
514 &assert_page_exclusive);
515
516 if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
517 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
518
519 /* Restore page protection if there was an OOB access. */
520 if (meta->unprotected_page) {
521 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
522 kfence_protect(meta->unprotected_page);
523 meta->unprotected_page = 0;
524 }
525
526 /* Mark the object as freed. */
527 metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
528 init = slab_want_init_on_free(meta->cache);
529 raw_spin_unlock_irqrestore(&meta->lock, flags);
530
531 alloc_covered_add(meta->alloc_stack_hash, -1);
532
533 /* Check canary bytes for memory corruption. */
534 check_canary(meta);
535
536 /*
537 * Clear memory if init-on-free is set. While we protect the page, the
538 * data is still there, and after a use-after-free is detected, we
539 * unprotect the page, so the data is still accessible.
540 */
541 if (!zombie && unlikely(init))
542 memzero_explicit(addr, meta->size);
543
544 /* Protect to detect use-after-frees. */
545 kfence_protect((unsigned long)addr);
546
547 kcsan_end_scoped_access(&assert_page_exclusive);
548 if (!zombie) {
549 /* Add it to the tail of the freelist for reuse. */
550 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
551 KFENCE_WARN_ON(!list_empty(&meta->list));
552 list_add_tail(&meta->list, &kfence_freelist);
553 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
554
555 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
556 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
557 } else {
558 /* See kfence_shutdown_cache(). */
559 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
560 }
561 }
562
rcu_guarded_free(struct rcu_head * h)563 static void rcu_guarded_free(struct rcu_head *h)
564 {
565 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
566
567 kfence_guarded_free((void *)meta->addr, meta, false);
568 }
569
570 /*
571 * Initialization of the KFENCE pool after its allocation.
572 * Returns 0 on success; otherwise returns the address up to
573 * which partial initialization succeeded.
574 */
kfence_init_pool(void)575 static unsigned long kfence_init_pool(void)
576 {
577 unsigned long addr;
578 struct page *pages;
579 int i;
580
581 if (!arch_kfence_init_pool())
582 return (unsigned long)__kfence_pool;
583
584 addr = (unsigned long)__kfence_pool;
585 pages = virt_to_page(__kfence_pool);
586
587 /*
588 * Set up object pages: they must have PG_slab set, to avoid freeing
589 * these as real pages.
590 *
591 * We also want to avoid inserting kfence_free() in the kfree()
592 * fast-path in SLUB, and therefore need to ensure kfree() correctly
593 * enters __slab_free() slow-path.
594 */
595 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
596 struct slab *slab = page_slab(nth_page(pages, i));
597
598 if (!i || (i % 2))
599 continue;
600
601 __folio_set_slab(slab_folio(slab));
602 #ifdef CONFIG_MEMCG
603 slab->memcg_data = (unsigned long)&kfence_metadata_init[i / 2 - 1].objcg |
604 MEMCG_DATA_OBJCGS;
605 #endif
606 }
607
608 /*
609 * Protect the first 2 pages. The first page is mostly unnecessary, and
610 * merely serves as an extended guard page. However, adding one
611 * additional page in the beginning gives us an even number of pages,
612 * which simplifies the mapping of address to metadata index.
613 */
614 for (i = 0; i < 2; i++) {
615 if (unlikely(!kfence_protect(addr)))
616 return addr;
617
618 addr += PAGE_SIZE;
619 }
620
621 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
622 struct kfence_metadata *meta = &kfence_metadata_init[i];
623
624 /* Initialize metadata. */
625 INIT_LIST_HEAD(&meta->list);
626 raw_spin_lock_init(&meta->lock);
627 meta->state = KFENCE_OBJECT_UNUSED;
628 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
629 list_add_tail(&meta->list, &kfence_freelist);
630
631 /* Protect the right redzone. */
632 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
633 goto reset_slab;
634
635 addr += 2 * PAGE_SIZE;
636 }
637
638 /*
639 * Make kfence_metadata visible only when initialization is successful.
640 * Otherwise, if the initialization fails and kfence_metadata is freed,
641 * it may cause UAF in kfence_shutdown_cache().
642 */
643 smp_store_release(&kfence_metadata, kfence_metadata_init);
644 return 0;
645
646 reset_slab:
647 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
648 struct slab *slab = page_slab(nth_page(pages, i));
649
650 if (!i || (i % 2))
651 continue;
652 #ifdef CONFIG_MEMCG
653 slab->memcg_data = 0;
654 #endif
655 __folio_clear_slab(slab_folio(slab));
656 }
657
658 return addr;
659 }
660
kfence_init_pool_early(void)661 static bool __init kfence_init_pool_early(void)
662 {
663 unsigned long addr;
664
665 if (!__kfence_pool)
666 return false;
667
668 addr = kfence_init_pool();
669
670 if (!addr) {
671 /*
672 * The pool is live and will never be deallocated from this point on.
673 * Ignore the pool object from the kmemleak phys object tree, as it would
674 * otherwise overlap with allocations returned by kfence_alloc(), which
675 * are registered with kmemleak through the slab post-alloc hook.
676 */
677 kmemleak_ignore_phys(__pa(__kfence_pool));
678 return true;
679 }
680
681 /*
682 * Only release unprotected pages, and do not try to go back and change
683 * page attributes due to risk of failing to do so as well. If changing
684 * page attributes for some pages fails, it is very likely that it also
685 * fails for the first page, and therefore expect addr==__kfence_pool in
686 * most failure cases.
687 */
688 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
689 __kfence_pool = NULL;
690
691 memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
692 kfence_metadata_init = NULL;
693
694 return false;
695 }
696
697 /* === DebugFS Interface ==================================================== */
698
stats_show(struct seq_file * seq,void * v)699 static int stats_show(struct seq_file *seq, void *v)
700 {
701 int i;
702
703 seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
704 for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
705 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
706
707 return 0;
708 }
709 DEFINE_SHOW_ATTRIBUTE(stats);
710
711 /*
712 * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
713 * start_object() and next_object() return the object index + 1, because NULL is used
714 * to stop iteration.
715 */
start_object(struct seq_file * seq,loff_t * pos)716 static void *start_object(struct seq_file *seq, loff_t *pos)
717 {
718 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
719 return (void *)((long)*pos + 1);
720 return NULL;
721 }
722
stop_object(struct seq_file * seq,void * v)723 static void stop_object(struct seq_file *seq, void *v)
724 {
725 }
726
next_object(struct seq_file * seq,void * v,loff_t * pos)727 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
728 {
729 ++*pos;
730 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
731 return (void *)((long)*pos + 1);
732 return NULL;
733 }
734
show_object(struct seq_file * seq,void * v)735 static int show_object(struct seq_file *seq, void *v)
736 {
737 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
738 unsigned long flags;
739
740 raw_spin_lock_irqsave(&meta->lock, flags);
741 kfence_print_object(seq, meta);
742 raw_spin_unlock_irqrestore(&meta->lock, flags);
743 seq_puts(seq, "---------------------------------\n");
744
745 return 0;
746 }
747
748 static const struct seq_operations objects_sops = {
749 .start = start_object,
750 .next = next_object,
751 .stop = stop_object,
752 .show = show_object,
753 };
754 DEFINE_SEQ_ATTRIBUTE(objects);
755
kfence_debugfs_init(void)756 static int kfence_debugfs_init(void)
757 {
758 struct dentry *kfence_dir;
759
760 if (!READ_ONCE(kfence_enabled))
761 return 0;
762
763 kfence_dir = debugfs_create_dir("kfence", NULL);
764 debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
765 debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
766 return 0;
767 }
768
769 late_initcall(kfence_debugfs_init);
770
771 /* === Panic Notifier ====================================================== */
772
kfence_check_all_canary(void)773 static void kfence_check_all_canary(void)
774 {
775 int i;
776
777 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
778 struct kfence_metadata *meta = &kfence_metadata[i];
779
780 if (meta->state == KFENCE_OBJECT_ALLOCATED)
781 check_canary(meta);
782 }
783 }
784
kfence_check_canary_callback(struct notifier_block * nb,unsigned long reason,void * arg)785 static int kfence_check_canary_callback(struct notifier_block *nb,
786 unsigned long reason, void *arg)
787 {
788 kfence_check_all_canary();
789 return NOTIFY_OK;
790 }
791
792 static struct notifier_block kfence_check_canary_notifier = {
793 .notifier_call = kfence_check_canary_callback,
794 };
795
796 /* === Allocation Gate Timer ================================================ */
797
798 static struct delayed_work kfence_timer;
799
800 #ifdef CONFIG_KFENCE_STATIC_KEYS
801 /* Wait queue to wake up allocation-gate timer task. */
802 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
803
wake_up_kfence_timer(struct irq_work * work)804 static void wake_up_kfence_timer(struct irq_work *work)
805 {
806 wake_up(&allocation_wait);
807 }
808 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
809 #endif
810
811 /*
812 * Set up delayed work, which will enable and disable the static key. We need to
813 * use a work queue (rather than a simple timer), since enabling and disabling a
814 * static key cannot be done from an interrupt.
815 *
816 * Note: Toggling a static branch currently causes IPIs, and here we'll end up
817 * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
818 * more aggressive sampling intervals), we could get away with a variant that
819 * avoids IPIs, at the cost of not immediately capturing allocations if the
820 * instructions remain cached.
821 */
toggle_allocation_gate(struct work_struct * work)822 static void toggle_allocation_gate(struct work_struct *work)
823 {
824 if (!READ_ONCE(kfence_enabled))
825 return;
826
827 atomic_set(&kfence_allocation_gate, 0);
828 #ifdef CONFIG_KFENCE_STATIC_KEYS
829 /* Enable static key, and await allocation to happen. */
830 static_branch_enable(&kfence_allocation_key);
831
832 wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
833
834 /* Disable static key and reset timer. */
835 static_branch_disable(&kfence_allocation_key);
836 #endif
837 queue_delayed_work(system_unbound_wq, &kfence_timer,
838 msecs_to_jiffies(kfence_sample_interval));
839 }
840
841 /* === Public interface ===================================================== */
842
kfence_alloc_pool_and_metadata(void)843 void __init kfence_alloc_pool_and_metadata(void)
844 {
845 if (!kfence_sample_interval)
846 return;
847
848 /*
849 * If the pool has already been initialized by arch, there is no need to
850 * re-allocate the memory pool.
851 */
852 if (!__kfence_pool)
853 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
854
855 if (!__kfence_pool) {
856 pr_err("failed to allocate pool\n");
857 return;
858 }
859
860 /* The memory allocated by memblock has been zeroed out. */
861 kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE);
862 if (!kfence_metadata_init) {
863 pr_err("failed to allocate metadata\n");
864 memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
865 __kfence_pool = NULL;
866 }
867 }
868
kfence_init_enable(void)869 static void kfence_init_enable(void)
870 {
871 if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
872 static_branch_enable(&kfence_allocation_key);
873
874 if (kfence_deferrable)
875 INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate);
876 else
877 INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
878
879 if (kfence_check_on_panic)
880 atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
881
882 WRITE_ONCE(kfence_enabled, true);
883 queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
884
885 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
886 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
887 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
888 }
889
kfence_init(void)890 void __init kfence_init(void)
891 {
892 stack_hash_seed = get_random_u32();
893
894 /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
895 if (!kfence_sample_interval)
896 return;
897
898 if (!kfence_init_pool_early()) {
899 pr_err("%s failed\n", __func__);
900 return;
901 }
902
903 kfence_init_enable();
904 }
905
kfence_init_late(void)906 static int kfence_init_late(void)
907 {
908 const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE;
909 const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
910 unsigned long addr = (unsigned long)__kfence_pool;
911 unsigned long free_size = KFENCE_POOL_SIZE;
912 int err = -ENOMEM;
913
914 #ifdef CONFIG_CONTIG_ALLOC
915 struct page *pages;
916
917 pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node,
918 NULL);
919 if (!pages)
920 return -ENOMEM;
921
922 __kfence_pool = page_to_virt(pages);
923 pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
924 NULL);
925 if (pages)
926 kfence_metadata_init = page_to_virt(pages);
927 #else
928 if (nr_pages_pool > MAX_ORDER_NR_PAGES ||
929 nr_pages_meta > MAX_ORDER_NR_PAGES) {
930 pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
931 return -EINVAL;
932 }
933
934 __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
935 if (!__kfence_pool)
936 return -ENOMEM;
937
938 kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
939 #endif
940
941 if (!kfence_metadata_init)
942 goto free_pool;
943
944 memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE);
945 addr = kfence_init_pool();
946 if (!addr) {
947 kfence_init_enable();
948 kfence_debugfs_init();
949 return 0;
950 }
951
952 pr_err("%s failed\n", __func__);
953 free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
954 err = -EBUSY;
955
956 #ifdef CONFIG_CONTIG_ALLOC
957 free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)),
958 nr_pages_meta);
959 free_pool:
960 free_contig_range(page_to_pfn(virt_to_page((void *)addr)),
961 free_size / PAGE_SIZE);
962 #else
963 free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE);
964 free_pool:
965 free_pages_exact((void *)addr, free_size);
966 #endif
967
968 kfence_metadata_init = NULL;
969 __kfence_pool = NULL;
970 return err;
971 }
972
kfence_enable_late(void)973 static int kfence_enable_late(void)
974 {
975 if (!__kfence_pool)
976 return kfence_init_late();
977
978 WRITE_ONCE(kfence_enabled, true);
979 queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
980 pr_info("re-enabled\n");
981 return 0;
982 }
983
kfence_shutdown_cache(struct kmem_cache * s)984 void kfence_shutdown_cache(struct kmem_cache *s)
985 {
986 unsigned long flags;
987 struct kfence_metadata *meta;
988 int i;
989
990 /* Pairs with release in kfence_init_pool(). */
991 if (!smp_load_acquire(&kfence_metadata))
992 return;
993
994 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
995 bool in_use;
996
997 meta = &kfence_metadata[i];
998
999 /*
1000 * If we observe some inconsistent cache and state pair where we
1001 * should have returned false here, cache destruction is racing
1002 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
1003 * the lock will not help, as different critical section
1004 * serialization will have the same outcome.
1005 */
1006 if (READ_ONCE(meta->cache) != s ||
1007 READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
1008 continue;
1009
1010 raw_spin_lock_irqsave(&meta->lock, flags);
1011 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
1012 raw_spin_unlock_irqrestore(&meta->lock, flags);
1013
1014 if (in_use) {
1015 /*
1016 * This cache still has allocations, and we should not
1017 * release them back into the freelist so they can still
1018 * safely be used and retain the kernel's default
1019 * behaviour of keeping the allocations alive (leak the
1020 * cache); however, they effectively become "zombie
1021 * allocations" as the KFENCE objects are the only ones
1022 * still in use and the owning cache is being destroyed.
1023 *
1024 * We mark them freed, so that any subsequent use shows
1025 * more useful error messages that will include stack
1026 * traces of the user of the object, the original
1027 * allocation, and caller to shutdown_cache().
1028 */
1029 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
1030 }
1031 }
1032
1033 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
1034 meta = &kfence_metadata[i];
1035
1036 /* See above. */
1037 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
1038 continue;
1039
1040 raw_spin_lock_irqsave(&meta->lock, flags);
1041 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
1042 meta->cache = NULL;
1043 raw_spin_unlock_irqrestore(&meta->lock, flags);
1044 }
1045 }
1046
__kfence_alloc(struct kmem_cache * s,size_t size,gfp_t flags)1047 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
1048 {
1049 unsigned long stack_entries[KFENCE_STACK_DEPTH];
1050 size_t num_stack_entries;
1051 u32 alloc_stack_hash;
1052
1053 /*
1054 * Perform size check before switching kfence_allocation_gate, so that
1055 * we don't disable KFENCE without making an allocation.
1056 */
1057 if (size > PAGE_SIZE) {
1058 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1059 return NULL;
1060 }
1061
1062 /*
1063 * Skip allocations from non-default zones, including DMA. We cannot
1064 * guarantee that pages in the KFENCE pool will have the requested
1065 * properties (e.g. reside in DMAable memory).
1066 */
1067 if ((flags & GFP_ZONEMASK) ||
1068 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
1069 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1070 return NULL;
1071 }
1072
1073 /*
1074 * Skip allocations for this slab, if KFENCE has been disabled for
1075 * this slab.
1076 */
1077 if (s->flags & SLAB_SKIP_KFENCE)
1078 return NULL;
1079
1080 if (atomic_inc_return(&kfence_allocation_gate) > 1)
1081 return NULL;
1082 #ifdef CONFIG_KFENCE_STATIC_KEYS
1083 /*
1084 * waitqueue_active() is fully ordered after the update of
1085 * kfence_allocation_gate per atomic_inc_return().
1086 */
1087 if (waitqueue_active(&allocation_wait)) {
1088 /*
1089 * Calling wake_up() here may deadlock when allocations happen
1090 * from within timer code. Use an irq_work to defer it.
1091 */
1092 irq_work_queue(&wake_up_kfence_timer_work);
1093 }
1094 #endif
1095
1096 if (!READ_ONCE(kfence_enabled))
1097 return NULL;
1098
1099 num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
1100
1101 /*
1102 * Do expensive check for coverage of allocation in slow-path after
1103 * allocation_gate has already become non-zero, even though it might
1104 * mean not making any allocation within a given sample interval.
1105 *
1106 * This ensures reasonable allocation coverage when the pool is almost
1107 * full, including avoiding long-lived allocations of the same source
1108 * filling up the pool (e.g. pagecache allocations).
1109 */
1110 alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
1111 if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
1112 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
1113 return NULL;
1114 }
1115
1116 return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
1117 alloc_stack_hash);
1118 }
1119
kfence_ksize(const void * addr)1120 size_t kfence_ksize(const void *addr)
1121 {
1122 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1123
1124 /*
1125 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1126 * either a use-after-free or invalid access.
1127 */
1128 return meta ? meta->size : 0;
1129 }
1130
kfence_object_start(const void * addr)1131 void *kfence_object_start(const void *addr)
1132 {
1133 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1134
1135 /*
1136 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1137 * either a use-after-free or invalid access.
1138 */
1139 return meta ? (void *)meta->addr : NULL;
1140 }
1141
__kfence_free(void * addr)1142 void __kfence_free(void *addr)
1143 {
1144 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1145
1146 #ifdef CONFIG_MEMCG
1147 KFENCE_WARN_ON(meta->objcg);
1148 #endif
1149 /*
1150 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1151 * the object, as the object page may be recycled for other-typed
1152 * objects once it has been freed. meta->cache may be NULL if the cache
1153 * was destroyed.
1154 */
1155 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
1156 call_rcu(&meta->rcu_head, rcu_guarded_free);
1157 else
1158 kfence_guarded_free(addr, meta, false);
1159 }
1160
kfence_handle_page_fault(unsigned long addr,bool is_write,struct pt_regs * regs)1161 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
1162 {
1163 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
1164 struct kfence_metadata *to_report = NULL;
1165 enum kfence_error_type error_type;
1166 unsigned long flags;
1167
1168 if (!is_kfence_address((void *)addr))
1169 return false;
1170
1171 if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
1172 return kfence_unprotect(addr); /* ... unprotect and proceed. */
1173
1174 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
1175
1176 if (page_index % 2) {
1177 /* This is a redzone, report a buffer overflow. */
1178 struct kfence_metadata *meta;
1179 int distance = 0;
1180
1181 meta = addr_to_metadata(addr - PAGE_SIZE);
1182 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1183 to_report = meta;
1184 /* Data race ok; distance calculation approximate. */
1185 distance = addr - data_race(meta->addr + meta->size);
1186 }
1187
1188 meta = addr_to_metadata(addr + PAGE_SIZE);
1189 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1190 /* Data race ok; distance calculation approximate. */
1191 if (!to_report || distance > data_race(meta->addr) - addr)
1192 to_report = meta;
1193 }
1194
1195 if (!to_report)
1196 goto out;
1197
1198 raw_spin_lock_irqsave(&to_report->lock, flags);
1199 to_report->unprotected_page = addr;
1200 error_type = KFENCE_ERROR_OOB;
1201
1202 /*
1203 * If the object was freed before we took the look we can still
1204 * report this as an OOB -- the report will simply show the
1205 * stacktrace of the free as well.
1206 */
1207 } else {
1208 to_report = addr_to_metadata(addr);
1209 if (!to_report)
1210 goto out;
1211
1212 raw_spin_lock_irqsave(&to_report->lock, flags);
1213 error_type = KFENCE_ERROR_UAF;
1214 /*
1215 * We may race with __kfence_alloc(), and it is possible that a
1216 * freed object may be reallocated. We simply report this as a
1217 * use-after-free, with the stack trace showing the place where
1218 * the object was re-allocated.
1219 */
1220 }
1221
1222 out:
1223 if (to_report) {
1224 kfence_report_error(addr, is_write, regs, to_report, error_type);
1225 raw_spin_unlock_irqrestore(&to_report->lock, flags);
1226 } else {
1227 /* This may be a UAF or OOB access, but we can't be sure. */
1228 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1229 }
1230
1231 return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1232 }
1233