1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Test cases for KFENCE memory safety error detector. Since the interface with
4 * which KFENCE's reports are obtained is via the console, this is the output we
5 * should verify. For each test case checks the presence (or absence) of
6 * generated reports. Relies on 'console' tracepoint to capture reports as they
7 * appear in the kernel log.
8 *
9 * Copyright (C) 2020, Google LLC.
10 * Author: Alexander Potapenko <glider@google.com>
11 * Marco Elver <elver@google.com>
12 */
13
14 #include <kunit/test.h>
15 #include <linux/jiffies.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/string.h>
23 #include <linux/tracepoint.h>
24 #include <trace/events/printk.h>
25
26 #include <asm/kfence.h>
27
28 #include "kfence.h"
29
30 /* May be overridden by <asm/kfence.h>. */
31 #ifndef arch_kfence_test_address
32 #define arch_kfence_test_address(addr) (addr)
33 #endif
34
35 #define KFENCE_TEST_REQUIRES(test, cond) do { \
36 if (!(cond)) \
37 kunit_skip((test), "Test requires: " #cond); \
38 } while (0)
39
40 /* Report as observed from console. */
41 static struct {
42 spinlock_t lock;
43 int nlines;
44 char lines[2][256];
45 } observed = {
46 .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
47 };
48
49 /* Probe for console output: obtains observed lines of interest. */
probe_console(void * ignore,const char * buf,size_t len)50 static void probe_console(void *ignore, const char *buf, size_t len)
51 {
52 unsigned long flags;
53 int nlines;
54
55 spin_lock_irqsave(&observed.lock, flags);
56 nlines = observed.nlines;
57
58 if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) {
59 /*
60 * KFENCE report and related to the test.
61 *
62 * The provided @buf is not NUL-terminated; copy no more than
63 * @len bytes and let strscpy() add the missing NUL-terminator.
64 */
65 strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
66 nlines = 1;
67 } else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) {
68 strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
69 }
70
71 WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
72 spin_unlock_irqrestore(&observed.lock, flags);
73 }
74
75 /* Check if a report related to the test exists. */
report_available(void)76 static bool report_available(void)
77 {
78 return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
79 }
80
81 /* Information we expect in a report. */
82 struct expect_report {
83 enum kfence_error_type type; /* The type or error. */
84 void *fn; /* Function pointer to expected function where access occurred. */
85 char *addr; /* Address at which the bad access occurred. */
86 bool is_write; /* Is access a write. */
87 };
88
get_access_type(const struct expect_report * r)89 static const char *get_access_type(const struct expect_report *r)
90 {
91 return r->is_write ? "write" : "read";
92 }
93
94 /* Check observed report matches information in @r. */
report_matches(const struct expect_report * r)95 static bool report_matches(const struct expect_report *r)
96 {
97 unsigned long addr = (unsigned long)r->addr;
98 bool ret = false;
99 unsigned long flags;
100 typeof(observed.lines) expect;
101 const char *end;
102 char *cur;
103
104 /* Doubled-checked locking. */
105 if (!report_available())
106 return false;
107
108 /* Generate expected report contents. */
109
110 /* Title */
111 cur = expect[0];
112 end = &expect[0][sizeof(expect[0]) - 1];
113 switch (r->type) {
114 case KFENCE_ERROR_OOB:
115 cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
116 get_access_type(r));
117 break;
118 case KFENCE_ERROR_UAF:
119 cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
120 get_access_type(r));
121 break;
122 case KFENCE_ERROR_CORRUPTION:
123 cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
124 break;
125 case KFENCE_ERROR_INVALID:
126 cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
127 get_access_type(r));
128 break;
129 case KFENCE_ERROR_INVALID_FREE:
130 cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
131 break;
132 }
133
134 scnprintf(cur, end - cur, " in %pS", r->fn);
135 /* The exact offset won't match, remove it; also strip module name. */
136 cur = strchr(expect[0], '+');
137 if (cur)
138 *cur = '\0';
139
140 /* Access information */
141 cur = expect[1];
142 end = &expect[1][sizeof(expect[1]) - 1];
143
144 switch (r->type) {
145 case KFENCE_ERROR_OOB:
146 cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
147 addr = arch_kfence_test_address(addr);
148 break;
149 case KFENCE_ERROR_UAF:
150 cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
151 addr = arch_kfence_test_address(addr);
152 break;
153 case KFENCE_ERROR_CORRUPTION:
154 cur += scnprintf(cur, end - cur, "Corrupted memory at");
155 break;
156 case KFENCE_ERROR_INVALID:
157 cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
158 addr = arch_kfence_test_address(addr);
159 break;
160 case KFENCE_ERROR_INVALID_FREE:
161 cur += scnprintf(cur, end - cur, "Invalid free of");
162 break;
163 }
164
165 cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
166
167 spin_lock_irqsave(&observed.lock, flags);
168 if (!report_available())
169 goto out; /* A new report is being captured. */
170
171 /* Finally match expected output to what we actually observed. */
172 ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]);
173 out:
174 spin_unlock_irqrestore(&observed.lock, flags);
175 return ret;
176 }
177
178 /* ===== Test cases ===== */
179
180 #define TEST_PRIV_WANT_MEMCACHE ((void *)1)
181
182 /* Cache used by tests; if NULL, allocate from kmalloc instead. */
183 static struct kmem_cache *test_cache;
184
setup_test_cache(struct kunit * test,size_t size,slab_flags_t flags,void (* ctor)(void *))185 static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags,
186 void (*ctor)(void *))
187 {
188 if (test->priv != TEST_PRIV_WANT_MEMCACHE)
189 return size;
190
191 kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor);
192
193 /*
194 * Use SLAB_NO_MERGE to prevent merging with existing caches.
195 * Use SLAB_ACCOUNT to allocate via memcg, if enabled.
196 */
197 flags |= SLAB_NO_MERGE | SLAB_ACCOUNT;
198 test_cache = kmem_cache_create("test", size, 1, flags, ctor);
199 KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache");
200
201 return size;
202 }
203
test_cache_destroy(void)204 static void test_cache_destroy(void)
205 {
206 if (!test_cache)
207 return;
208
209 kmem_cache_destroy(test_cache);
210 test_cache = NULL;
211 }
212
kmalloc_cache_alignment(size_t size)213 static inline size_t kmalloc_cache_alignment(size_t size)
214 {
215 /* just to get ->align so no need to pass in the real caller */
216 enum kmalloc_cache_type type = kmalloc_type(GFP_KERNEL, 0);
217 return kmalloc_caches[type][__kmalloc_index(size, false)]->align;
218 }
219
220 /* Must always inline to match stack trace against caller. */
test_free(void * ptr)221 static __always_inline void test_free(void *ptr)
222 {
223 if (test_cache)
224 kmem_cache_free(test_cache, ptr);
225 else
226 kfree(ptr);
227 }
228
229 /*
230 * If this should be a KFENCE allocation, and on which side the allocation and
231 * the closest guard page should be.
232 */
233 enum allocation_policy {
234 ALLOCATE_ANY, /* KFENCE, any side. */
235 ALLOCATE_LEFT, /* KFENCE, left side of page. */
236 ALLOCATE_RIGHT, /* KFENCE, right side of page. */
237 ALLOCATE_NONE, /* No KFENCE allocation. */
238 };
239
240 /*
241 * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
242 * current test_cache if set up.
243 */
test_alloc(struct kunit * test,size_t size,gfp_t gfp,enum allocation_policy policy)244 static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy)
245 {
246 void *alloc;
247 unsigned long timeout, resched_after;
248 const char *policy_name;
249
250 switch (policy) {
251 case ALLOCATE_ANY:
252 policy_name = "any";
253 break;
254 case ALLOCATE_LEFT:
255 policy_name = "left";
256 break;
257 case ALLOCATE_RIGHT:
258 policy_name = "right";
259 break;
260 case ALLOCATE_NONE:
261 policy_name = "none";
262 break;
263 }
264
265 kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp,
266 policy_name, !!test_cache);
267
268 /*
269 * 100x the sample interval should be more than enough to ensure we get
270 * a KFENCE allocation eventually.
271 */
272 timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
273 /*
274 * Especially for non-preemption kernels, ensure the allocation-gate
275 * timer can catch up: after @resched_after, every failed allocation
276 * attempt yields, to ensure the allocation-gate timer is scheduled.
277 */
278 resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
279 do {
280 if (test_cache)
281 alloc = kmem_cache_alloc(test_cache, gfp);
282 else
283 alloc = kmalloc(size, gfp);
284
285 if (is_kfence_address(alloc)) {
286 struct slab *slab = virt_to_slab(alloc);
287 enum kmalloc_cache_type type = kmalloc_type(GFP_KERNEL, _RET_IP_);
288 struct kmem_cache *s = test_cache ?:
289 kmalloc_caches[type][__kmalloc_index(size, false)];
290
291 /*
292 * Verify that various helpers return the right values
293 * even for KFENCE objects; these are required so that
294 * memcg accounting works correctly.
295 */
296 KUNIT_EXPECT_EQ(test, obj_to_index(s, slab, alloc), 0U);
297 KUNIT_EXPECT_EQ(test, objs_per_slab(s, slab), 1);
298
299 if (policy == ALLOCATE_ANY)
300 return alloc;
301 if (policy == ALLOCATE_LEFT && PAGE_ALIGNED(alloc))
302 return alloc;
303 if (policy == ALLOCATE_RIGHT && !PAGE_ALIGNED(alloc))
304 return alloc;
305 } else if (policy == ALLOCATE_NONE)
306 return alloc;
307
308 test_free(alloc);
309
310 if (time_after(jiffies, resched_after))
311 cond_resched();
312 } while (time_before(jiffies, timeout));
313
314 KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE");
315 return NULL; /* Unreachable. */
316 }
317
test_out_of_bounds_read(struct kunit * test)318 static void test_out_of_bounds_read(struct kunit *test)
319 {
320 size_t size = 32;
321 struct expect_report expect = {
322 .type = KFENCE_ERROR_OOB,
323 .fn = test_out_of_bounds_read,
324 .is_write = false,
325 };
326 char *buf;
327
328 setup_test_cache(test, size, 0, NULL);
329
330 /*
331 * If we don't have our own cache, adjust based on alignment, so that we
332 * actually access guard pages on either side.
333 */
334 if (!test_cache)
335 size = kmalloc_cache_alignment(size);
336
337 /* Test both sides. */
338
339 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
340 expect.addr = buf - 1;
341 READ_ONCE(*expect.addr);
342 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
343 test_free(buf);
344
345 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
346 expect.addr = buf + size;
347 READ_ONCE(*expect.addr);
348 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
349 test_free(buf);
350 }
351
test_out_of_bounds_write(struct kunit * test)352 static void test_out_of_bounds_write(struct kunit *test)
353 {
354 size_t size = 32;
355 struct expect_report expect = {
356 .type = KFENCE_ERROR_OOB,
357 .fn = test_out_of_bounds_write,
358 .is_write = true,
359 };
360 char *buf;
361
362 setup_test_cache(test, size, 0, NULL);
363 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
364 expect.addr = buf - 1;
365 WRITE_ONCE(*expect.addr, 42);
366 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
367 test_free(buf);
368 }
369
test_use_after_free_read(struct kunit * test)370 static void test_use_after_free_read(struct kunit *test)
371 {
372 const size_t size = 32;
373 struct expect_report expect = {
374 .type = KFENCE_ERROR_UAF,
375 .fn = test_use_after_free_read,
376 .is_write = false,
377 };
378
379 setup_test_cache(test, size, 0, NULL);
380 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
381 test_free(expect.addr);
382 READ_ONCE(*expect.addr);
383 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
384 }
385
test_double_free(struct kunit * test)386 static void test_double_free(struct kunit *test)
387 {
388 const size_t size = 32;
389 struct expect_report expect = {
390 .type = KFENCE_ERROR_INVALID_FREE,
391 .fn = test_double_free,
392 };
393
394 setup_test_cache(test, size, 0, NULL);
395 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
396 test_free(expect.addr);
397 test_free(expect.addr); /* Double-free. */
398 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
399 }
400
test_invalid_addr_free(struct kunit * test)401 static void test_invalid_addr_free(struct kunit *test)
402 {
403 const size_t size = 32;
404 struct expect_report expect = {
405 .type = KFENCE_ERROR_INVALID_FREE,
406 .fn = test_invalid_addr_free,
407 };
408 char *buf;
409
410 setup_test_cache(test, size, 0, NULL);
411 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
412 expect.addr = buf + 1; /* Free on invalid address. */
413 test_free(expect.addr); /* Invalid address free. */
414 test_free(buf); /* No error. */
415 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
416 }
417
test_corruption(struct kunit * test)418 static void test_corruption(struct kunit *test)
419 {
420 size_t size = 32;
421 struct expect_report expect = {
422 .type = KFENCE_ERROR_CORRUPTION,
423 .fn = test_corruption,
424 };
425 char *buf;
426
427 setup_test_cache(test, size, 0, NULL);
428
429 /* Test both sides. */
430
431 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
432 expect.addr = buf + size;
433 WRITE_ONCE(*expect.addr, 42);
434 test_free(buf);
435 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
436
437 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
438 expect.addr = buf - 1;
439 WRITE_ONCE(*expect.addr, 42);
440 test_free(buf);
441 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
442 }
443
444 /*
445 * KFENCE is unable to detect an OOB if the allocation's alignment requirements
446 * leave a gap between the object and the guard page. Specifically, an
447 * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
448 * respectively. Therefore it is impossible for the allocated object to
449 * contiguously line up with the right guard page.
450 *
451 * However, we test that an access to memory beyond the gap results in KFENCE
452 * detecting an OOB access.
453 */
test_kmalloc_aligned_oob_read(struct kunit * test)454 static void test_kmalloc_aligned_oob_read(struct kunit *test)
455 {
456 const size_t size = 73;
457 const size_t align = kmalloc_cache_alignment(size);
458 struct expect_report expect = {
459 .type = KFENCE_ERROR_OOB,
460 .fn = test_kmalloc_aligned_oob_read,
461 .is_write = false,
462 };
463 char *buf;
464
465 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
466
467 /*
468 * The object is offset to the right, so there won't be an OOB to the
469 * left of it.
470 */
471 READ_ONCE(*(buf - 1));
472 KUNIT_EXPECT_FALSE(test, report_available());
473
474 /*
475 * @buf must be aligned on @align, therefore buf + size belongs to the
476 * same page -> no OOB.
477 */
478 READ_ONCE(*(buf + size));
479 KUNIT_EXPECT_FALSE(test, report_available());
480
481 /* Overflowing by @align bytes will result in an OOB. */
482 expect.addr = buf + size + align;
483 READ_ONCE(*expect.addr);
484 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
485
486 test_free(buf);
487 }
488
test_kmalloc_aligned_oob_write(struct kunit * test)489 static void test_kmalloc_aligned_oob_write(struct kunit *test)
490 {
491 const size_t size = 73;
492 struct expect_report expect = {
493 .type = KFENCE_ERROR_CORRUPTION,
494 .fn = test_kmalloc_aligned_oob_write,
495 };
496 char *buf;
497
498 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
499 /*
500 * The object is offset to the right, so we won't get a page
501 * fault immediately after it.
502 */
503 expect.addr = buf + size;
504 WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1);
505 KUNIT_EXPECT_FALSE(test, report_available());
506 test_free(buf);
507 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
508 }
509
510 /* Test cache shrinking and destroying with KFENCE. */
test_shrink_memcache(struct kunit * test)511 static void test_shrink_memcache(struct kunit *test)
512 {
513 const size_t size = 32;
514 void *buf;
515
516 setup_test_cache(test, size, 0, NULL);
517 KUNIT_EXPECT_TRUE(test, test_cache);
518 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
519 kmem_cache_shrink(test_cache);
520 test_free(buf);
521
522 KUNIT_EXPECT_FALSE(test, report_available());
523 }
524
ctor_set_x(void * obj)525 static void ctor_set_x(void *obj)
526 {
527 /* Every object has at least 8 bytes. */
528 memset(obj, 'x', 8);
529 }
530
531 /* Ensure that SL*B does not modify KFENCE objects on bulk free. */
test_free_bulk(struct kunit * test)532 static void test_free_bulk(struct kunit *test)
533 {
534 int iter;
535
536 for (iter = 0; iter < 5; iter++) {
537 const size_t size = setup_test_cache(test, get_random_u32_inclusive(8, 307),
538 0, (iter & 1) ? ctor_set_x : NULL);
539 void *objects[] = {
540 test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
541 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
542 test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT),
543 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
544 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
545 };
546
547 kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);
548 KUNIT_ASSERT_FALSE(test, report_available());
549 test_cache_destroy();
550 }
551 }
552
553 /* Test init-on-free works. */
test_init_on_free(struct kunit * test)554 static void test_init_on_free(struct kunit *test)
555 {
556 const size_t size = 32;
557 struct expect_report expect = {
558 .type = KFENCE_ERROR_UAF,
559 .fn = test_init_on_free,
560 .is_write = false,
561 };
562 int i;
563
564 KFENCE_TEST_REQUIRES(test, IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON));
565 /* Assume it hasn't been disabled on command line. */
566
567 setup_test_cache(test, size, 0, NULL);
568 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
569 for (i = 0; i < size; i++)
570 expect.addr[i] = i + 1;
571 test_free(expect.addr);
572
573 for (i = 0; i < size; i++) {
574 /*
575 * This may fail if the page was recycled by KFENCE and then
576 * written to again -- this however, is near impossible with a
577 * default config.
578 */
579 KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0);
580
581 if (!i) /* Only check first access to not fail test if page is ever re-protected. */
582 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
583 }
584 }
585
586 /* Ensure that constructors work properly. */
test_memcache_ctor(struct kunit * test)587 static void test_memcache_ctor(struct kunit *test)
588 {
589 const size_t size = 32;
590 char *buf;
591 int i;
592
593 setup_test_cache(test, size, 0, ctor_set_x);
594 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
595
596 for (i = 0; i < 8; i++)
597 KUNIT_EXPECT_EQ(test, buf[i], (char)'x');
598
599 test_free(buf);
600
601 KUNIT_EXPECT_FALSE(test, report_available());
602 }
603
604 /* Test that memory is zeroed if requested. */
test_gfpzero(struct kunit * test)605 static void test_gfpzero(struct kunit *test)
606 {
607 const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
608 char *buf1, *buf2;
609 int i;
610
611 /* Skip if we think it'd take too long. */
612 KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
613
614 setup_test_cache(test, size, 0, NULL);
615 buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
616 for (i = 0; i < size; i++)
617 buf1[i] = i + 1;
618 test_free(buf1);
619
620 /* Try to get same address again -- this can take a while. */
621 for (i = 0;; i++) {
622 buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY);
623 if (buf1 == buf2)
624 break;
625 test_free(buf2);
626
627 if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) {
628 kunit_warn(test, "giving up ... cannot get same object back\n");
629 return;
630 }
631 cond_resched();
632 }
633
634 for (i = 0; i < size; i++)
635 KUNIT_EXPECT_EQ(test, buf2[i], (char)0);
636
637 test_free(buf2);
638
639 KUNIT_EXPECT_FALSE(test, report_available());
640 }
641
test_invalid_access(struct kunit * test)642 static void test_invalid_access(struct kunit *test)
643 {
644 const struct expect_report expect = {
645 .type = KFENCE_ERROR_INVALID,
646 .fn = test_invalid_access,
647 .addr = &__kfence_pool[10],
648 .is_write = false,
649 };
650
651 READ_ONCE(__kfence_pool[10]);
652 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
653 }
654
655 /* Test SLAB_TYPESAFE_BY_RCU works. */
test_memcache_typesafe_by_rcu(struct kunit * test)656 static void test_memcache_typesafe_by_rcu(struct kunit *test)
657 {
658 const size_t size = 32;
659 struct expect_report expect = {
660 .type = KFENCE_ERROR_UAF,
661 .fn = test_memcache_typesafe_by_rcu,
662 .is_write = false,
663 };
664
665 setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL);
666 KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
667
668 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
669 *expect.addr = 42;
670
671 rcu_read_lock();
672 test_free(expect.addr);
673 KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
674 /*
675 * Up to this point, memory should not have been freed yet, and
676 * therefore there should be no KFENCE report from the above access.
677 */
678 rcu_read_unlock();
679
680 /* Above access to @expect.addr should not have generated a report! */
681 KUNIT_EXPECT_FALSE(test, report_available());
682
683 /* Only after rcu_barrier() is the memory guaranteed to be freed. */
684 rcu_barrier();
685
686 /* Expect use-after-free. */
687 KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
688 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
689 }
690
691 /* Test krealloc(). */
test_krealloc(struct kunit * test)692 static void test_krealloc(struct kunit *test)
693 {
694 const size_t size = 32;
695 const struct expect_report expect = {
696 .type = KFENCE_ERROR_UAF,
697 .fn = test_krealloc,
698 .addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
699 .is_write = false,
700 };
701 char *buf = expect.addr;
702 int i;
703
704 KUNIT_EXPECT_FALSE(test, test_cache);
705 KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */
706 for (i = 0; i < size; i++)
707 buf[i] = i + 1;
708
709 /* Check that we successfully change the size. */
710 buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */
711 /* Note: Might no longer be a KFENCE alloc. */
712 KUNIT_EXPECT_GE(test, ksize(buf), size * 3);
713 for (i = 0; i < size; i++)
714 KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
715 for (; i < size * 3; i++) /* Fill to extra bytes. */
716 buf[i] = i + 1;
717
718 buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */
719 KUNIT_EXPECT_GE(test, ksize(buf), size * 2);
720 for (i = 0; i < size * 2; i++)
721 KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
722
723 buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */
724 KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR);
725 KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */
726
727 READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */
728 KUNIT_ASSERT_TRUE(test, report_matches(&expect));
729 }
730
731 /* Test that some objects from a bulk allocation belong to KFENCE pool. */
test_memcache_alloc_bulk(struct kunit * test)732 static void test_memcache_alloc_bulk(struct kunit *test)
733 {
734 const size_t size = 32;
735 bool pass = false;
736 unsigned long timeout;
737
738 setup_test_cache(test, size, 0, NULL);
739 KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
740 /*
741 * 100x the sample interval should be more than enough to ensure we get
742 * a KFENCE allocation eventually.
743 */
744 timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
745 do {
746 void *objects[100];
747 int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
748 objects);
749 if (!num)
750 continue;
751 for (i = 0; i < ARRAY_SIZE(objects); i++) {
752 if (is_kfence_address(objects[i])) {
753 pass = true;
754 break;
755 }
756 }
757 kmem_cache_free_bulk(test_cache, num, objects);
758 /*
759 * kmem_cache_alloc_bulk() disables interrupts, and calling it
760 * in a tight loop may not give KFENCE a chance to switch the
761 * static branch. Call cond_resched() to let KFENCE chime in.
762 */
763 cond_resched();
764 } while (!pass && time_before(jiffies, timeout));
765
766 KUNIT_EXPECT_TRUE(test, pass);
767 KUNIT_EXPECT_FALSE(test, report_available());
768 }
769
770 /*
771 * KUnit does not provide a way to provide arguments to tests, and we encode
772 * additional info in the name. Set up 2 tests per test case, one using the
773 * default allocator, and another using a custom memcache (suffix '-memcache').
774 */
775 #define KFENCE_KUNIT_CASE(test_name) \
776 { .run_case = test_name, .name = #test_name }, \
777 { .run_case = test_name, .name = #test_name "-memcache" }
778
779 static struct kunit_case kfence_test_cases[] = {
780 KFENCE_KUNIT_CASE(test_out_of_bounds_read),
781 KFENCE_KUNIT_CASE(test_out_of_bounds_write),
782 KFENCE_KUNIT_CASE(test_use_after_free_read),
783 KFENCE_KUNIT_CASE(test_double_free),
784 KFENCE_KUNIT_CASE(test_invalid_addr_free),
785 KFENCE_KUNIT_CASE(test_corruption),
786 KFENCE_KUNIT_CASE(test_free_bulk),
787 KFENCE_KUNIT_CASE(test_init_on_free),
788 KUNIT_CASE(test_kmalloc_aligned_oob_read),
789 KUNIT_CASE(test_kmalloc_aligned_oob_write),
790 KUNIT_CASE(test_shrink_memcache),
791 KUNIT_CASE(test_memcache_ctor),
792 KUNIT_CASE(test_invalid_access),
793 KUNIT_CASE(test_gfpzero),
794 KUNIT_CASE(test_memcache_typesafe_by_rcu),
795 KUNIT_CASE(test_krealloc),
796 KUNIT_CASE(test_memcache_alloc_bulk),
797 {},
798 };
799
800 /* ===== End test cases ===== */
801
test_init(struct kunit * test)802 static int test_init(struct kunit *test)
803 {
804 unsigned long flags;
805 int i;
806
807 if (!__kfence_pool)
808 return -EINVAL;
809
810 spin_lock_irqsave(&observed.lock, flags);
811 for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
812 observed.lines[i][0] = '\0';
813 observed.nlines = 0;
814 spin_unlock_irqrestore(&observed.lock, flags);
815
816 /* Any test with 'memcache' in its name will want a memcache. */
817 if (strstr(test->name, "memcache"))
818 test->priv = TEST_PRIV_WANT_MEMCACHE;
819 else
820 test->priv = NULL;
821
822 return 0;
823 }
824
test_exit(struct kunit * test)825 static void test_exit(struct kunit *test)
826 {
827 test_cache_destroy();
828 }
829
kfence_suite_init(struct kunit_suite * suite)830 static int kfence_suite_init(struct kunit_suite *suite)
831 {
832 register_trace_console(probe_console, NULL);
833 return 0;
834 }
835
kfence_suite_exit(struct kunit_suite * suite)836 static void kfence_suite_exit(struct kunit_suite *suite)
837 {
838 unregister_trace_console(probe_console, NULL);
839 tracepoint_synchronize_unregister();
840 }
841
842 static struct kunit_suite kfence_test_suite = {
843 .name = "kfence",
844 .test_cases = kfence_test_cases,
845 .init = test_init,
846 .exit = test_exit,
847 .suite_init = kfence_suite_init,
848 .suite_exit = kfence_suite_exit,
849 };
850
851 kunit_test_suites(&kfence_test_suite);
852
853 MODULE_LICENSE("GPL v2");
854 MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>");
855