1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
6 */
7
8 #include <linux/bitops.h>
9 #include <linux/delay.h>
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/module.h>
15 #include <linux/printk.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/uaccess.h>
20 #include <linux/io.h>
21 #include <linux/vmalloc.h>
22 #include <linux/set_memory.h>
23
24 #include <asm/page.h>
25
26 #include <kunit/test.h>
27
28 #include "kasan.h"
29
30 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
31
32 /*
33 * Some tests use these global variables to store return values from function
34 * calls that could otherwise be eliminated by the compiler as dead code.
35 */
36 void *kasan_ptr_result;
37 int kasan_int_result;
38
39 static struct kunit_resource resource;
40 static struct kunit_kasan_status test_status;
41 static bool multishot;
42
43 /*
44 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
45 * first detected bug and panic the kernel if panic_on_warn is enabled. For
46 * hardware tag-based KASAN also allow tag checking to be reenabled for each
47 * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
48 */
kasan_test_init(struct kunit * test)49 static int kasan_test_init(struct kunit *test)
50 {
51 if (!kasan_enabled()) {
52 kunit_err(test, "can't run KASAN tests with KASAN disabled");
53 return -1;
54 }
55
56 multishot = kasan_save_enable_multi_shot();
57 test_status.report_found = false;
58 test_status.sync_fault = false;
59 kunit_add_named_resource(test, NULL, NULL, &resource,
60 "kasan_status", &test_status);
61 return 0;
62 }
63
kasan_test_exit(struct kunit * test)64 static void kasan_test_exit(struct kunit *test)
65 {
66 kasan_restore_multi_shot(multishot);
67 KUNIT_EXPECT_FALSE(test, test_status.report_found);
68 }
69
70 /**
71 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
72 * KASAN report; causes a test failure otherwise. This relies on a KUnit
73 * resource named "kasan_status". Do not use this name for KUnit resources
74 * outside of KASAN tests.
75 *
76 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
77 * checking is auto-disabled. When this happens, this test handler reenables
78 * tag checking. As tag checking can be only disabled or enabled per CPU,
79 * this handler disables migration (preemption).
80 *
81 * Since the compiler doesn't see that the expression can change the test_status
82 * fields, it can reorder or optimize away the accesses to those fields.
83 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
84 * expression to prevent that.
85 *
86 * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
87 * as false. This allows detecting KASAN reports that happen outside of the
88 * checks by asserting !test_status.report_found at the start of
89 * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
90 */
91 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
92 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
93 kasan_sync_fault_possible()) \
94 migrate_disable(); \
95 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
96 barrier(); \
97 expression; \
98 barrier(); \
99 if (kasan_async_fault_possible()) \
100 kasan_force_async_fault(); \
101 if (!READ_ONCE(test_status.report_found)) { \
102 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
103 "expected in \"" #expression \
104 "\", but none occurred"); \
105 } \
106 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
107 kasan_sync_fault_possible()) { \
108 if (READ_ONCE(test_status.report_found) && \
109 READ_ONCE(test_status.sync_fault)) \
110 kasan_enable_tagging(); \
111 migrate_enable(); \
112 } \
113 WRITE_ONCE(test_status.report_found, false); \
114 } while (0)
115
116 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
117 if (!IS_ENABLED(config)) \
118 kunit_skip((test), "Test requires " #config "=y"); \
119 } while (0)
120
121 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
122 if (IS_ENABLED(config)) \
123 kunit_skip((test), "Test requires " #config "=n"); \
124 } while (0)
125
kmalloc_oob_right(struct kunit * test)126 static void kmalloc_oob_right(struct kunit *test)
127 {
128 char *ptr;
129 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
130
131 ptr = kmalloc(size, GFP_KERNEL);
132 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
133
134 OPTIMIZER_HIDE_VAR(ptr);
135 /*
136 * An unaligned access past the requested kmalloc size.
137 * Only generic KASAN can precisely detect these.
138 */
139 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
140 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
141
142 /*
143 * An aligned access into the first out-of-bounds granule that falls
144 * within the aligned kmalloc object.
145 */
146 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
147
148 /* Out-of-bounds access past the aligned kmalloc object. */
149 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
150 ptr[size + KASAN_GRANULE_SIZE + 5]);
151
152 kfree(ptr);
153 }
154
kmalloc_oob_left(struct kunit * test)155 static void kmalloc_oob_left(struct kunit *test)
156 {
157 char *ptr;
158 size_t size = 15;
159
160 ptr = kmalloc(size, GFP_KERNEL);
161 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
162
163 OPTIMIZER_HIDE_VAR(ptr);
164 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
165 kfree(ptr);
166 }
167
kmalloc_node_oob_right(struct kunit * test)168 static void kmalloc_node_oob_right(struct kunit *test)
169 {
170 char *ptr;
171 size_t size = 4096;
172
173 ptr = kmalloc_node(size, GFP_KERNEL, 0);
174 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
175
176 OPTIMIZER_HIDE_VAR(ptr);
177 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
178 kfree(ptr);
179 }
180
181 /*
182 * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
183 * fit into a slab cache and therefore is allocated via the page allocator
184 * fallback. Since this kind of fallback is only implemented for SLUB, these
185 * tests are limited to that allocator.
186 */
kmalloc_pagealloc_oob_right(struct kunit * test)187 static void kmalloc_pagealloc_oob_right(struct kunit *test)
188 {
189 char *ptr;
190 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
191
192 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
193
194 ptr = kmalloc(size, GFP_KERNEL);
195 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
196
197 OPTIMIZER_HIDE_VAR(ptr);
198 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
199
200 kfree(ptr);
201 }
202
kmalloc_pagealloc_uaf(struct kunit * test)203 static void kmalloc_pagealloc_uaf(struct kunit *test)
204 {
205 char *ptr;
206 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
207
208 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
209
210 ptr = kmalloc(size, GFP_KERNEL);
211 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
212 kfree(ptr);
213
214 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
215 }
216
kmalloc_pagealloc_invalid_free(struct kunit * test)217 static void kmalloc_pagealloc_invalid_free(struct kunit *test)
218 {
219 char *ptr;
220 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
221
222 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
223
224 ptr = kmalloc(size, GFP_KERNEL);
225 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
226
227 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
228 }
229
pagealloc_oob_right(struct kunit * test)230 static void pagealloc_oob_right(struct kunit *test)
231 {
232 char *ptr;
233 struct page *pages;
234 size_t order = 4;
235 size_t size = (1UL << (PAGE_SHIFT + order));
236
237 /*
238 * With generic KASAN page allocations have no redzones, thus
239 * out-of-bounds detection is not guaranteed.
240 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
241 */
242 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
243
244 pages = alloc_pages(GFP_KERNEL, order);
245 ptr = page_address(pages);
246 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
247
248 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
249 free_pages((unsigned long)ptr, order);
250 }
251
pagealloc_uaf(struct kunit * test)252 static void pagealloc_uaf(struct kunit *test)
253 {
254 char *ptr;
255 struct page *pages;
256 size_t order = 4;
257
258 pages = alloc_pages(GFP_KERNEL, order);
259 ptr = page_address(pages);
260 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
261 free_pages((unsigned long)ptr, order);
262
263 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
264 }
265
kmalloc_large_oob_right(struct kunit * test)266 static void kmalloc_large_oob_right(struct kunit *test)
267 {
268 char *ptr;
269 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
270
271 /*
272 * Allocate a chunk that is large enough, but still fits into a slab
273 * and does not trigger the page allocator fallback in SLUB.
274 */
275 ptr = kmalloc(size, GFP_KERNEL);
276 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
277
278 OPTIMIZER_HIDE_VAR(ptr);
279 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
280 kfree(ptr);
281 }
282
krealloc_more_oob_helper(struct kunit * test,size_t size1,size_t size2)283 static void krealloc_more_oob_helper(struct kunit *test,
284 size_t size1, size_t size2)
285 {
286 char *ptr1, *ptr2;
287 size_t middle;
288
289 KUNIT_ASSERT_LT(test, size1, size2);
290 middle = size1 + (size2 - size1) / 2;
291
292 ptr1 = kmalloc(size1, GFP_KERNEL);
293 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
294
295 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
296 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
297
298 /* Suppress -Warray-bounds warnings. */
299 OPTIMIZER_HIDE_VAR(ptr2);
300
301 /* All offsets up to size2 must be accessible. */
302 ptr2[size1 - 1] = 'x';
303 ptr2[size1] = 'x';
304 ptr2[middle] = 'x';
305 ptr2[size2 - 1] = 'x';
306
307 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
308 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
309 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
310
311 /* For all modes first aligned offset after size2 must be inaccessible. */
312 KUNIT_EXPECT_KASAN_FAIL(test,
313 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
314
315 kfree(ptr2);
316 }
317
krealloc_less_oob_helper(struct kunit * test,size_t size1,size_t size2)318 static void krealloc_less_oob_helper(struct kunit *test,
319 size_t size1, size_t size2)
320 {
321 char *ptr1, *ptr2;
322 size_t middle;
323
324 KUNIT_ASSERT_LT(test, size2, size1);
325 middle = size2 + (size1 - size2) / 2;
326
327 ptr1 = kmalloc(size1, GFP_KERNEL);
328 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
329
330 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
331 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
332
333 /* Suppress -Warray-bounds warnings. */
334 OPTIMIZER_HIDE_VAR(ptr2);
335
336 /* Must be accessible for all modes. */
337 ptr2[size2 - 1] = 'x';
338
339 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
340 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
341 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
342
343 /* For all modes first aligned offset after size2 must be inaccessible. */
344 KUNIT_EXPECT_KASAN_FAIL(test,
345 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
346
347 /*
348 * For all modes all size2, middle, and size1 should land in separate
349 * granules and thus the latter two offsets should be inaccessible.
350 */
351 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
352 round_down(middle, KASAN_GRANULE_SIZE));
353 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
354 round_down(size1, KASAN_GRANULE_SIZE));
355 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
356 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
357 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
358
359 kfree(ptr2);
360 }
361
krealloc_more_oob(struct kunit * test)362 static void krealloc_more_oob(struct kunit *test)
363 {
364 krealloc_more_oob_helper(test, 201, 235);
365 }
366
krealloc_less_oob(struct kunit * test)367 static void krealloc_less_oob(struct kunit *test)
368 {
369 krealloc_less_oob_helper(test, 235, 201);
370 }
371
krealloc_pagealloc_more_oob(struct kunit * test)372 static void krealloc_pagealloc_more_oob(struct kunit *test)
373 {
374 /* page_alloc fallback in only implemented for SLUB. */
375 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
376
377 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
378 KMALLOC_MAX_CACHE_SIZE + 235);
379 }
380
krealloc_pagealloc_less_oob(struct kunit * test)381 static void krealloc_pagealloc_less_oob(struct kunit *test)
382 {
383 /* page_alloc fallback in only implemented for SLUB. */
384 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
385
386 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
387 KMALLOC_MAX_CACHE_SIZE + 201);
388 }
389
390 /*
391 * Check that krealloc() detects a use-after-free, returns NULL,
392 * and doesn't unpoison the freed object.
393 */
krealloc_uaf(struct kunit * test)394 static void krealloc_uaf(struct kunit *test)
395 {
396 char *ptr1, *ptr2;
397 int size1 = 201;
398 int size2 = 235;
399
400 ptr1 = kmalloc(size1, GFP_KERNEL);
401 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
402 kfree(ptr1);
403
404 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
405 KUNIT_ASSERT_NULL(test, ptr2);
406 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
407 }
408
kmalloc_oob_16(struct kunit * test)409 static void kmalloc_oob_16(struct kunit *test)
410 {
411 struct {
412 u64 words[2];
413 } *ptr1, *ptr2;
414
415 /* This test is specifically crafted for the generic mode. */
416 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
417
418 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
419 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
420
421 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
422 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
423
424 OPTIMIZER_HIDE_VAR(ptr1);
425 OPTIMIZER_HIDE_VAR(ptr2);
426 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
427 kfree(ptr1);
428 kfree(ptr2);
429 }
430
kmalloc_uaf_16(struct kunit * test)431 static void kmalloc_uaf_16(struct kunit *test)
432 {
433 struct {
434 u64 words[2];
435 } *ptr1, *ptr2;
436
437 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
438 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
439
440 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
441 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
442 kfree(ptr2);
443
444 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
445 kfree(ptr1);
446 }
447
448 /*
449 * Note: in the memset tests below, the written range touches both valid and
450 * invalid memory. This makes sure that the instrumentation does not only check
451 * the starting address but the whole range.
452 */
453
kmalloc_oob_memset_2(struct kunit * test)454 static void kmalloc_oob_memset_2(struct kunit *test)
455 {
456 char *ptr;
457 size_t size = 128 - KASAN_GRANULE_SIZE;
458
459 ptr = kmalloc(size, GFP_KERNEL);
460 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
461
462 OPTIMIZER_HIDE_VAR(size);
463 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
464 kfree(ptr);
465 }
466
kmalloc_oob_memset_4(struct kunit * test)467 static void kmalloc_oob_memset_4(struct kunit *test)
468 {
469 char *ptr;
470 size_t size = 128 - KASAN_GRANULE_SIZE;
471
472 ptr = kmalloc(size, GFP_KERNEL);
473 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
474
475 OPTIMIZER_HIDE_VAR(size);
476 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
477 kfree(ptr);
478 }
479
kmalloc_oob_memset_8(struct kunit * test)480 static void kmalloc_oob_memset_8(struct kunit *test)
481 {
482 char *ptr;
483 size_t size = 128 - KASAN_GRANULE_SIZE;
484
485 ptr = kmalloc(size, GFP_KERNEL);
486 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
487
488 OPTIMIZER_HIDE_VAR(size);
489 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
490 kfree(ptr);
491 }
492
kmalloc_oob_memset_16(struct kunit * test)493 static void kmalloc_oob_memset_16(struct kunit *test)
494 {
495 char *ptr;
496 size_t size = 128 - KASAN_GRANULE_SIZE;
497
498 ptr = kmalloc(size, GFP_KERNEL);
499 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
500
501 OPTIMIZER_HIDE_VAR(size);
502 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
503 kfree(ptr);
504 }
505
kmalloc_oob_in_memset(struct kunit * test)506 static void kmalloc_oob_in_memset(struct kunit *test)
507 {
508 char *ptr;
509 size_t size = 128 - KASAN_GRANULE_SIZE;
510
511 ptr = kmalloc(size, GFP_KERNEL);
512 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
513
514 OPTIMIZER_HIDE_VAR(ptr);
515 OPTIMIZER_HIDE_VAR(size);
516 KUNIT_EXPECT_KASAN_FAIL(test,
517 memset(ptr, 0, size + KASAN_GRANULE_SIZE));
518 kfree(ptr);
519 }
520
kmalloc_memmove_negative_size(struct kunit * test)521 static void kmalloc_memmove_negative_size(struct kunit *test)
522 {
523 char *ptr;
524 size_t size = 64;
525 size_t invalid_size = -2;
526
527 /*
528 * Hardware tag-based mode doesn't check memmove for negative size.
529 * As a result, this test introduces a side-effect memory corruption,
530 * which can result in a crash.
531 */
532 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
533
534 ptr = kmalloc(size, GFP_KERNEL);
535 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
536
537 memset((char *)ptr, 0, 64);
538 OPTIMIZER_HIDE_VAR(ptr);
539 OPTIMIZER_HIDE_VAR(invalid_size);
540 KUNIT_EXPECT_KASAN_FAIL(test,
541 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
542 kfree(ptr);
543 }
544
kmalloc_memmove_invalid_size(struct kunit * test)545 static void kmalloc_memmove_invalid_size(struct kunit *test)
546 {
547 char *ptr;
548 size_t size = 64;
549 size_t invalid_size = size;
550
551 ptr = kmalloc(size, GFP_KERNEL);
552 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
553
554 memset((char *)ptr, 0, 64);
555 OPTIMIZER_HIDE_VAR(ptr);
556 OPTIMIZER_HIDE_VAR(invalid_size);
557 KUNIT_EXPECT_KASAN_FAIL(test,
558 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
559 kfree(ptr);
560 }
561
kmalloc_uaf(struct kunit * test)562 static void kmalloc_uaf(struct kunit *test)
563 {
564 char *ptr;
565 size_t size = 10;
566
567 ptr = kmalloc(size, GFP_KERNEL);
568 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
569
570 kfree(ptr);
571 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
572 }
573
kmalloc_uaf_memset(struct kunit * test)574 static void kmalloc_uaf_memset(struct kunit *test)
575 {
576 char *ptr;
577 size_t size = 33;
578
579 /*
580 * Only generic KASAN uses quarantine, which is required to avoid a
581 * kernel memory corruption this test causes.
582 */
583 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
584
585 ptr = kmalloc(size, GFP_KERNEL);
586 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
587
588 kfree(ptr);
589 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
590 }
591
kmalloc_uaf2(struct kunit * test)592 static void kmalloc_uaf2(struct kunit *test)
593 {
594 char *ptr1, *ptr2;
595 size_t size = 43;
596 int counter = 0;
597
598 again:
599 ptr1 = kmalloc(size, GFP_KERNEL);
600 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
601
602 kfree(ptr1);
603
604 ptr2 = kmalloc(size, GFP_KERNEL);
605 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
606
607 /*
608 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
609 * Allow up to 16 attempts at generating different tags.
610 */
611 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
612 kfree(ptr2);
613 goto again;
614 }
615
616 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
617 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
618
619 kfree(ptr2);
620 }
621
622 /*
623 * Check that KASAN detects use-after-free when another object was allocated in
624 * the same slot. Relevant for the tag-based modes, which do not use quarantine.
625 */
kmalloc_uaf3(struct kunit * test)626 static void kmalloc_uaf3(struct kunit *test)
627 {
628 char *ptr1, *ptr2;
629 size_t size = 100;
630
631 /* This test is specifically crafted for tag-based modes. */
632 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
633
634 ptr1 = kmalloc(size, GFP_KERNEL);
635 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
636 kfree(ptr1);
637
638 ptr2 = kmalloc(size, GFP_KERNEL);
639 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
640 kfree(ptr2);
641
642 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
643 }
644
kfree_via_page(struct kunit * test)645 static void kfree_via_page(struct kunit *test)
646 {
647 char *ptr;
648 size_t size = 8;
649 struct page *page;
650 unsigned long offset;
651
652 ptr = kmalloc(size, GFP_KERNEL);
653 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
654
655 page = virt_to_page(ptr);
656 offset = offset_in_page(ptr);
657 kfree(page_address(page) + offset);
658 }
659
kfree_via_phys(struct kunit * test)660 static void kfree_via_phys(struct kunit *test)
661 {
662 char *ptr;
663 size_t size = 8;
664 phys_addr_t phys;
665
666 ptr = kmalloc(size, GFP_KERNEL);
667 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
668
669 phys = virt_to_phys(ptr);
670 kfree(phys_to_virt(phys));
671 }
672
kmem_cache_oob(struct kunit * test)673 static void kmem_cache_oob(struct kunit *test)
674 {
675 char *p;
676 size_t size = 200;
677 struct kmem_cache *cache;
678
679 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
680 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
681
682 p = kmem_cache_alloc(cache, GFP_KERNEL);
683 if (!p) {
684 kunit_err(test, "Allocation failed: %s\n", __func__);
685 kmem_cache_destroy(cache);
686 return;
687 }
688
689 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
690
691 kmem_cache_free(cache, p);
692 kmem_cache_destroy(cache);
693 }
694
kmem_cache_accounted(struct kunit * test)695 static void kmem_cache_accounted(struct kunit *test)
696 {
697 int i;
698 char *p;
699 size_t size = 200;
700 struct kmem_cache *cache;
701
702 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
703 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
704
705 /*
706 * Several allocations with a delay to allow for lazy per memcg kmem
707 * cache creation.
708 */
709 for (i = 0; i < 5; i++) {
710 p = kmem_cache_alloc(cache, GFP_KERNEL);
711 if (!p)
712 goto free_cache;
713
714 kmem_cache_free(cache, p);
715 msleep(100);
716 }
717
718 free_cache:
719 kmem_cache_destroy(cache);
720 }
721
kmem_cache_bulk(struct kunit * test)722 static void kmem_cache_bulk(struct kunit *test)
723 {
724 struct kmem_cache *cache;
725 size_t size = 200;
726 char *p[10];
727 bool ret;
728 int i;
729
730 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
731 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
732
733 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
734 if (!ret) {
735 kunit_err(test, "Allocation failed: %s\n", __func__);
736 kmem_cache_destroy(cache);
737 return;
738 }
739
740 for (i = 0; i < ARRAY_SIZE(p); i++)
741 p[i][0] = p[i][size - 1] = 42;
742
743 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
744 kmem_cache_destroy(cache);
745 }
746
747 static char global_array[10];
748
kasan_global_oob_right(struct kunit * test)749 static void kasan_global_oob_right(struct kunit *test)
750 {
751 /*
752 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
753 * from failing here and panicking the kernel, access the array via a
754 * volatile pointer, which will prevent the compiler from being able to
755 * determine the array bounds.
756 *
757 * This access uses a volatile pointer to char (char *volatile) rather
758 * than the more conventional pointer to volatile char (volatile char *)
759 * because we want to prevent the compiler from making inferences about
760 * the pointer itself (i.e. its array bounds), not the data that it
761 * refers to.
762 */
763 char *volatile array = global_array;
764 char *p = &array[ARRAY_SIZE(global_array) + 3];
765
766 /* Only generic mode instruments globals. */
767 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
768
769 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
770 }
771
kasan_global_oob_left(struct kunit * test)772 static void kasan_global_oob_left(struct kunit *test)
773 {
774 char *volatile array = global_array;
775 char *p = array - 3;
776
777 /*
778 * GCC is known to fail this test, skip it.
779 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
780 */
781 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
782 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
783 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
784 }
785
786 /* Check that ksize() makes the whole object accessible. */
ksize_unpoisons_memory(struct kunit * test)787 static void ksize_unpoisons_memory(struct kunit *test)
788 {
789 char *ptr;
790 size_t size = 123, real_size;
791
792 ptr = kmalloc(size, GFP_KERNEL);
793 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
794 real_size = ksize(ptr);
795
796 OPTIMIZER_HIDE_VAR(ptr);
797
798 /* This access shouldn't trigger a KASAN report. */
799 ptr[size] = 'x';
800
801 /* This one must. */
802 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
803
804 kfree(ptr);
805 }
806
807 /*
808 * Check that a use-after-free is detected by ksize() and via normal accesses
809 * after it.
810 */
ksize_uaf(struct kunit * test)811 static void ksize_uaf(struct kunit *test)
812 {
813 char *ptr;
814 int size = 128 - KASAN_GRANULE_SIZE;
815
816 ptr = kmalloc(size, GFP_KERNEL);
817 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
818 kfree(ptr);
819
820 OPTIMIZER_HIDE_VAR(ptr);
821 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
822 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
823 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
824 }
825
kasan_stack_oob(struct kunit * test)826 static void kasan_stack_oob(struct kunit *test)
827 {
828 char stack_array[10];
829 /* See comment in kasan_global_oob_right. */
830 char *volatile array = stack_array;
831 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
832
833 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
834
835 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
836 }
837
kasan_alloca_oob_left(struct kunit * test)838 static void kasan_alloca_oob_left(struct kunit *test)
839 {
840 volatile int i = 10;
841 char alloca_array[i];
842 /* See comment in kasan_global_oob_right. */
843 char *volatile array = alloca_array;
844 char *p = array - 1;
845
846 /* Only generic mode instruments dynamic allocas. */
847 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
848 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
849
850 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
851 }
852
kasan_alloca_oob_right(struct kunit * test)853 static void kasan_alloca_oob_right(struct kunit *test)
854 {
855 volatile int i = 10;
856 char alloca_array[i];
857 /* See comment in kasan_global_oob_right. */
858 char *volatile array = alloca_array;
859 char *p = array + i;
860
861 /* Only generic mode instruments dynamic allocas. */
862 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
863 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
864
865 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
866 }
867
kmem_cache_double_free(struct kunit * test)868 static void kmem_cache_double_free(struct kunit *test)
869 {
870 char *p;
871 size_t size = 200;
872 struct kmem_cache *cache;
873
874 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
875 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
876
877 p = kmem_cache_alloc(cache, GFP_KERNEL);
878 if (!p) {
879 kunit_err(test, "Allocation failed: %s\n", __func__);
880 kmem_cache_destroy(cache);
881 return;
882 }
883
884 kmem_cache_free(cache, p);
885 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
886 kmem_cache_destroy(cache);
887 }
888
kmem_cache_invalid_free(struct kunit * test)889 static void kmem_cache_invalid_free(struct kunit *test)
890 {
891 char *p;
892 size_t size = 200;
893 struct kmem_cache *cache;
894
895 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
896 NULL);
897 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
898
899 p = kmem_cache_alloc(cache, GFP_KERNEL);
900 if (!p) {
901 kunit_err(test, "Allocation failed: %s\n", __func__);
902 kmem_cache_destroy(cache);
903 return;
904 }
905
906 /* Trigger invalid free, the object doesn't get freed. */
907 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
908
909 /*
910 * Properly free the object to prevent the "Objects remaining in
911 * test_cache on __kmem_cache_shutdown" BUG failure.
912 */
913 kmem_cache_free(cache, p);
914
915 kmem_cache_destroy(cache);
916 }
917
empty_cache_ctor(void * object)918 static void empty_cache_ctor(void *object) { }
919
kmem_cache_double_destroy(struct kunit * test)920 static void kmem_cache_double_destroy(struct kunit *test)
921 {
922 struct kmem_cache *cache;
923
924 /* Provide a constructor to prevent cache merging. */
925 cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
926 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
927 kmem_cache_destroy(cache);
928 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
929 }
930
kasan_memchr(struct kunit * test)931 static void kasan_memchr(struct kunit *test)
932 {
933 char *ptr;
934 size_t size = 24;
935
936 /*
937 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
938 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
939 */
940 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
941
942 if (OOB_TAG_OFF)
943 size = round_up(size, OOB_TAG_OFF);
944
945 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
946 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
947
948 OPTIMIZER_HIDE_VAR(ptr);
949 OPTIMIZER_HIDE_VAR(size);
950 KUNIT_EXPECT_KASAN_FAIL(test,
951 kasan_ptr_result = memchr(ptr, '1', size + 1));
952
953 kfree(ptr);
954 }
955
kasan_memcmp(struct kunit * test)956 static void kasan_memcmp(struct kunit *test)
957 {
958 char *ptr;
959 size_t size = 24;
960 int arr[9];
961
962 /*
963 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
964 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
965 */
966 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
967
968 if (OOB_TAG_OFF)
969 size = round_up(size, OOB_TAG_OFF);
970
971 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
972 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
973 memset(arr, 0, sizeof(arr));
974
975 OPTIMIZER_HIDE_VAR(ptr);
976 OPTIMIZER_HIDE_VAR(size);
977 KUNIT_EXPECT_KASAN_FAIL(test,
978 kasan_int_result = memcmp(ptr, arr, size+1));
979 kfree(ptr);
980 }
981
kasan_strings(struct kunit * test)982 static void kasan_strings(struct kunit *test)
983 {
984 char *ptr;
985 size_t size = 24;
986
987 /*
988 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
989 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
990 */
991 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
992
993 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
994 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
995
996 kfree(ptr);
997
998 /*
999 * Try to cause only 1 invalid access (less spam in dmesg).
1000 * For that we need ptr to point to zeroed byte.
1001 * Skip metadata that could be stored in freed object so ptr
1002 * will likely point to zeroed byte.
1003 */
1004 ptr += 16;
1005 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
1006
1007 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
1008
1009 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
1010
1011 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
1012
1013 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
1014
1015 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
1016 }
1017
kasan_bitops_modify(struct kunit * test,int nr,void * addr)1018 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
1019 {
1020 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
1021 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
1022 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
1023 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
1024 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
1025 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
1026 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
1027 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
1028 }
1029
kasan_bitops_test_and_modify(struct kunit * test,int nr,void * addr)1030 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1031 {
1032 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1033 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1034 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1035 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1036 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1037 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1038 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1039 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
1040
1041 #if defined(clear_bit_unlock_is_negative_byte)
1042 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1043 clear_bit_unlock_is_negative_byte(nr, addr));
1044 #endif
1045 }
1046
kasan_bitops_generic(struct kunit * test)1047 static void kasan_bitops_generic(struct kunit *test)
1048 {
1049 long *bits;
1050
1051 /* This test is specifically crafted for the generic mode. */
1052 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1053
1054 /*
1055 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1056 * this way we do not actually corrupt other memory.
1057 */
1058 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
1059 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1060
1061 /*
1062 * Below calls try to access bit within allocated memory; however, the
1063 * below accesses are still out-of-bounds, since bitops are defined to
1064 * operate on the whole long the bit is in.
1065 */
1066 kasan_bitops_modify(test, BITS_PER_LONG, bits);
1067
1068 /*
1069 * Below calls try to access bit beyond allocated memory.
1070 */
1071 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
1072
1073 kfree(bits);
1074 }
1075
kasan_bitops_tags(struct kunit * test)1076 static void kasan_bitops_tags(struct kunit *test)
1077 {
1078 long *bits;
1079
1080 /* This test is specifically crafted for tag-based modes. */
1081 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1082
1083 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1084 bits = kzalloc(48, GFP_KERNEL);
1085 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1086
1087 /* Do the accesses past the 48 allocated bytes, but within the redone. */
1088 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1089 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1090
1091 kfree(bits);
1092 }
1093
kmalloc_double_kzfree(struct kunit * test)1094 static void kmalloc_double_kzfree(struct kunit *test)
1095 {
1096 char *ptr;
1097 size_t size = 16;
1098
1099 ptr = kmalloc(size, GFP_KERNEL);
1100 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1101
1102 kfree_sensitive(ptr);
1103 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
1104 }
1105
vmalloc_helpers_tags(struct kunit * test)1106 static void vmalloc_helpers_tags(struct kunit *test)
1107 {
1108 void *ptr;
1109
1110 /* This test is intended for tag-based modes. */
1111 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1112
1113 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1114
1115 ptr = vmalloc(PAGE_SIZE);
1116 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1117
1118 /* Check that the returned pointer is tagged. */
1119 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1120 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1121
1122 /* Make sure exported vmalloc helpers handle tagged pointers. */
1123 KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1124 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1125
1126 #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1127 {
1128 int rv;
1129
1130 /* Make sure vmalloc'ed memory permissions can be changed. */
1131 rv = set_memory_ro((unsigned long)ptr, 1);
1132 KUNIT_ASSERT_GE(test, rv, 0);
1133 rv = set_memory_rw((unsigned long)ptr, 1);
1134 KUNIT_ASSERT_GE(test, rv, 0);
1135 }
1136 #endif
1137
1138 vfree(ptr);
1139 }
1140
vmalloc_oob(struct kunit * test)1141 static void vmalloc_oob(struct kunit *test)
1142 {
1143 char *v_ptr, *p_ptr;
1144 struct page *page;
1145 size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1146
1147 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1148
1149 v_ptr = vmalloc(size);
1150 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1151
1152 OPTIMIZER_HIDE_VAR(v_ptr);
1153
1154 /*
1155 * We have to be careful not to hit the guard page in vmalloc tests.
1156 * The MMU will catch that and crash us.
1157 */
1158
1159 /* Make sure in-bounds accesses are valid. */
1160 v_ptr[0] = 0;
1161 v_ptr[size - 1] = 0;
1162
1163 /*
1164 * An unaligned access past the requested vmalloc size.
1165 * Only generic KASAN can precisely detect these.
1166 */
1167 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1168 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1169
1170 /* An aligned access into the first out-of-bounds granule. */
1171 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
1172
1173 /* Check that in-bounds accesses to the physical page are valid. */
1174 page = vmalloc_to_page(v_ptr);
1175 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1176 p_ptr = page_address(page);
1177 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1178 p_ptr[0] = 0;
1179
1180 vfree(v_ptr);
1181
1182 /*
1183 * We can't check for use-after-unmap bugs in this nor in the following
1184 * vmalloc tests, as the page might be fully unmapped and accessing it
1185 * will crash the kernel.
1186 */
1187 }
1188
vmap_tags(struct kunit * test)1189 static void vmap_tags(struct kunit *test)
1190 {
1191 char *p_ptr, *v_ptr;
1192 struct page *p_page, *v_page;
1193
1194 /*
1195 * This test is specifically crafted for the software tag-based mode,
1196 * the only tag-based mode that poisons vmap mappings.
1197 */
1198 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1199
1200 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1201
1202 p_page = alloc_pages(GFP_KERNEL, 1);
1203 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1204 p_ptr = page_address(p_page);
1205 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1206
1207 v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
1208 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1209
1210 /*
1211 * We can't check for out-of-bounds bugs in this nor in the following
1212 * vmalloc tests, as allocations have page granularity and accessing
1213 * the guard page will crash the kernel.
1214 */
1215
1216 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1217 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1218
1219 /* Make sure that in-bounds accesses through both pointers work. */
1220 *p_ptr = 0;
1221 *v_ptr = 0;
1222
1223 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1224 v_page = vmalloc_to_page(v_ptr);
1225 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1226 KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1227
1228 vunmap(v_ptr);
1229 free_pages((unsigned long)p_ptr, 1);
1230 }
1231
vm_map_ram_tags(struct kunit * test)1232 static void vm_map_ram_tags(struct kunit *test)
1233 {
1234 char *p_ptr, *v_ptr;
1235 struct page *page;
1236
1237 /*
1238 * This test is specifically crafted for the software tag-based mode,
1239 * the only tag-based mode that poisons vm_map_ram mappings.
1240 */
1241 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1242
1243 page = alloc_pages(GFP_KERNEL, 1);
1244 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1245 p_ptr = page_address(page);
1246 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1247
1248 v_ptr = vm_map_ram(&page, 1, -1);
1249 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1250
1251 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1252 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1253
1254 /* Make sure that in-bounds accesses through both pointers work. */
1255 *p_ptr = 0;
1256 *v_ptr = 0;
1257
1258 vm_unmap_ram(v_ptr, 1);
1259 free_pages((unsigned long)p_ptr, 1);
1260 }
1261
vmalloc_percpu(struct kunit * test)1262 static void vmalloc_percpu(struct kunit *test)
1263 {
1264 char __percpu *ptr;
1265 int cpu;
1266
1267 /*
1268 * This test is specifically crafted for the software tag-based mode,
1269 * the only tag-based mode that poisons percpu mappings.
1270 */
1271 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1272
1273 ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
1274
1275 for_each_possible_cpu(cpu) {
1276 char *c_ptr = per_cpu_ptr(ptr, cpu);
1277
1278 KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
1279 KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);
1280
1281 /* Make sure that in-bounds accesses don't crash the kernel. */
1282 *c_ptr = 0;
1283 }
1284
1285 free_percpu(ptr);
1286 }
1287
1288 /*
1289 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1290 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1291 * modes.
1292 */
match_all_not_assigned(struct kunit * test)1293 static void match_all_not_assigned(struct kunit *test)
1294 {
1295 char *ptr;
1296 struct page *pages;
1297 int i, size, order;
1298
1299 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1300
1301 for (i = 0; i < 256; i++) {
1302 size = prandom_u32_max(1024) + 1;
1303 ptr = kmalloc(size, GFP_KERNEL);
1304 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1305 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1306 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1307 kfree(ptr);
1308 }
1309
1310 for (i = 0; i < 256; i++) {
1311 order = prandom_u32_max(4) + 1;
1312 pages = alloc_pages(GFP_KERNEL, order);
1313 ptr = page_address(pages);
1314 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1315 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1316 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1317 free_pages((unsigned long)ptr, order);
1318 }
1319
1320 if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
1321 return;
1322
1323 for (i = 0; i < 256; i++) {
1324 size = prandom_u32_max(1024) + 1;
1325 ptr = vmalloc(size);
1326 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1327 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1328 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1329 vfree(ptr);
1330 }
1331 }
1332
1333 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
match_all_ptr_tag(struct kunit * test)1334 static void match_all_ptr_tag(struct kunit *test)
1335 {
1336 char *ptr;
1337 u8 tag;
1338
1339 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1340
1341 ptr = kmalloc(128, GFP_KERNEL);
1342 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1343
1344 /* Backup the assigned tag. */
1345 tag = get_tag(ptr);
1346 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1347
1348 /* Reset the tag to 0xff.*/
1349 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1350
1351 /* This access shouldn't trigger a KASAN report. */
1352 *ptr = 0;
1353
1354 /* Recover the pointer tag and free. */
1355 ptr = set_tag(ptr, tag);
1356 kfree(ptr);
1357 }
1358
1359 /* Check that there are no match-all memory tags for tag-based modes. */
match_all_mem_tag(struct kunit * test)1360 static void match_all_mem_tag(struct kunit *test)
1361 {
1362 char *ptr;
1363 int tag;
1364
1365 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1366
1367 ptr = kmalloc(128, GFP_KERNEL);
1368 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1369 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1370
1371 /* For each possible tag value not matching the pointer tag. */
1372 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1373 if (tag == get_tag(ptr))
1374 continue;
1375
1376 /* Mark the first memory granule with the chosen memory tag. */
1377 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1378
1379 /* This access must cause a KASAN report. */
1380 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1381 }
1382
1383 /* Recover the memory tag and free. */
1384 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1385 kfree(ptr);
1386 }
1387
1388 static struct kunit_case kasan_kunit_test_cases[] = {
1389 KUNIT_CASE(kmalloc_oob_right),
1390 KUNIT_CASE(kmalloc_oob_left),
1391 KUNIT_CASE(kmalloc_node_oob_right),
1392 KUNIT_CASE(kmalloc_pagealloc_oob_right),
1393 KUNIT_CASE(kmalloc_pagealloc_uaf),
1394 KUNIT_CASE(kmalloc_pagealloc_invalid_free),
1395 KUNIT_CASE(pagealloc_oob_right),
1396 KUNIT_CASE(pagealloc_uaf),
1397 KUNIT_CASE(kmalloc_large_oob_right),
1398 KUNIT_CASE(krealloc_more_oob),
1399 KUNIT_CASE(krealloc_less_oob),
1400 KUNIT_CASE(krealloc_pagealloc_more_oob),
1401 KUNIT_CASE(krealloc_pagealloc_less_oob),
1402 KUNIT_CASE(krealloc_uaf),
1403 KUNIT_CASE(kmalloc_oob_16),
1404 KUNIT_CASE(kmalloc_uaf_16),
1405 KUNIT_CASE(kmalloc_oob_in_memset),
1406 KUNIT_CASE(kmalloc_oob_memset_2),
1407 KUNIT_CASE(kmalloc_oob_memset_4),
1408 KUNIT_CASE(kmalloc_oob_memset_8),
1409 KUNIT_CASE(kmalloc_oob_memset_16),
1410 KUNIT_CASE(kmalloc_memmove_negative_size),
1411 KUNIT_CASE(kmalloc_memmove_invalid_size),
1412 KUNIT_CASE(kmalloc_uaf),
1413 KUNIT_CASE(kmalloc_uaf_memset),
1414 KUNIT_CASE(kmalloc_uaf2),
1415 KUNIT_CASE(kmalloc_uaf3),
1416 KUNIT_CASE(kfree_via_page),
1417 KUNIT_CASE(kfree_via_phys),
1418 KUNIT_CASE(kmem_cache_oob),
1419 KUNIT_CASE(kmem_cache_accounted),
1420 KUNIT_CASE(kmem_cache_bulk),
1421 KUNIT_CASE(kasan_global_oob_right),
1422 KUNIT_CASE(kasan_global_oob_left),
1423 KUNIT_CASE(kasan_stack_oob),
1424 KUNIT_CASE(kasan_alloca_oob_left),
1425 KUNIT_CASE(kasan_alloca_oob_right),
1426 KUNIT_CASE(ksize_unpoisons_memory),
1427 KUNIT_CASE(ksize_uaf),
1428 KUNIT_CASE(kmem_cache_double_free),
1429 KUNIT_CASE(kmem_cache_invalid_free),
1430 KUNIT_CASE(kmem_cache_double_destroy),
1431 KUNIT_CASE(kasan_memchr),
1432 KUNIT_CASE(kasan_memcmp),
1433 KUNIT_CASE(kasan_strings),
1434 KUNIT_CASE(kasan_bitops_generic),
1435 KUNIT_CASE(kasan_bitops_tags),
1436 KUNIT_CASE(kmalloc_double_kzfree),
1437 KUNIT_CASE(vmalloc_helpers_tags),
1438 KUNIT_CASE(vmalloc_oob),
1439 KUNIT_CASE(vmap_tags),
1440 KUNIT_CASE(vm_map_ram_tags),
1441 KUNIT_CASE(vmalloc_percpu),
1442 KUNIT_CASE(match_all_not_assigned),
1443 KUNIT_CASE(match_all_ptr_tag),
1444 KUNIT_CASE(match_all_mem_tag),
1445 {}
1446 };
1447
1448 static struct kunit_suite kasan_kunit_test_suite = {
1449 .name = "kasan",
1450 .init = kasan_test_init,
1451 .test_cases = kasan_kunit_test_cases,
1452 .exit = kasan_test_exit,
1453 };
1454
1455 kunit_test_suite(kasan_kunit_test_suite);
1456
1457 MODULE_LICENSE("GPL");
1458