1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This is for all the tests relating directly to heap memory, including
4  * page allocation and slab allocations.
5  */
6 #include "lkdtm.h"
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <linux/sched.h>
10 
11 static struct kmem_cache *double_free_cache;
12 static struct kmem_cache *a_cache;
13 static struct kmem_cache *b_cache;
14 
15 /*
16  * Using volatile here means the compiler cannot ever make assumptions
17  * about this value. This means compile-time length checks involving
18  * this variable cannot be performed; only run-time checks.
19  */
20 static volatile int __offset = 1;
21 
22 /*
23  * If there aren't guard pages, it's likely that a consecutive allocation will
24  * let us overflow into the second allocation without overwriting something real.
25  *
26  * This should always be caught because there is an unconditional unmapped
27  * page after vmap allocations.
28  */
lkdtm_VMALLOC_LINEAR_OVERFLOW(void)29 static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
30 {
31 	char *one, *two;
32 
33 	one = vzalloc(PAGE_SIZE);
34 	two = vzalloc(PAGE_SIZE);
35 
36 	pr_info("Attempting vmalloc linear overflow ...\n");
37 	memset(one, 0xAA, PAGE_SIZE + __offset);
38 
39 	vfree(two);
40 	vfree(one);
41 }
42 
43 /*
44  * This tries to stay within the next largest power-of-2 kmalloc cache
45  * to avoid actually overwriting anything important if it's not detected
46  * correctly.
47  *
48  * This should get caught by either memory tagging, KASan, or by using
49  * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
50  */
lkdtm_SLAB_LINEAR_OVERFLOW(void)51 static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
52 {
53 	size_t len = 1020;
54 	u32 *data = kmalloc(len, GFP_KERNEL);
55 	if (!data)
56 		return;
57 
58 	pr_info("Attempting slab linear overflow ...\n");
59 	OPTIMIZER_HIDE_VAR(data);
60 	data[1024 / sizeof(u32)] = 0x12345678;
61 	kfree(data);
62 }
63 
lkdtm_WRITE_AFTER_FREE(void)64 static void lkdtm_WRITE_AFTER_FREE(void)
65 {
66 	int *base, *again;
67 	size_t len = 1024;
68 	/*
69 	 * The slub allocator uses the first word to store the free
70 	 * pointer in some configurations. Use the middle of the
71 	 * allocation to avoid running into the freelist
72 	 */
73 	size_t offset = (len / sizeof(*base)) / 2;
74 
75 	base = kmalloc(len, GFP_KERNEL);
76 	if (!base)
77 		return;
78 	pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
79 	pr_info("Attempting bad write to freed memory at %p\n",
80 		&base[offset]);
81 	kfree(base);
82 	base[offset] = 0x0abcdef0;
83 	/* Attempt to notice the overwrite. */
84 	again = kmalloc(len, GFP_KERNEL);
85 	kfree(again);
86 	if (again != base)
87 		pr_info("Hmm, didn't get the same memory range.\n");
88 }
89 
lkdtm_READ_AFTER_FREE(void)90 static void lkdtm_READ_AFTER_FREE(void)
91 {
92 	int *base, *val, saw;
93 	size_t len = 1024;
94 	/*
95 	 * The slub allocator will use the either the first word or
96 	 * the middle of the allocation to store the free pointer,
97 	 * depending on configurations. Store in the second word to
98 	 * avoid running into the freelist.
99 	 */
100 	size_t offset = sizeof(*base);
101 
102 	base = kmalloc(len, GFP_KERNEL);
103 	if (!base) {
104 		pr_info("Unable to allocate base memory.\n");
105 		return;
106 	}
107 
108 	val = kmalloc(len, GFP_KERNEL);
109 	if (!val) {
110 		pr_info("Unable to allocate val memory.\n");
111 		kfree(base);
112 		return;
113 	}
114 
115 	*val = 0x12345678;
116 	base[offset] = *val;
117 	pr_info("Value in memory before free: %x\n", base[offset]);
118 
119 	kfree(base);
120 
121 	pr_info("Attempting bad read from freed memory\n");
122 	saw = base[offset];
123 	if (saw != *val) {
124 		/* Good! Poisoning happened, so declare a win. */
125 		pr_info("Memory correctly poisoned (%x)\n", saw);
126 	} else {
127 		pr_err("FAIL: Memory was not poisoned!\n");
128 		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
129 	}
130 
131 	kfree(val);
132 }
133 
lkdtm_WRITE_BUDDY_AFTER_FREE(void)134 static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
135 {
136 	unsigned long p = __get_free_page(GFP_KERNEL);
137 	if (!p) {
138 		pr_info("Unable to allocate free page\n");
139 		return;
140 	}
141 
142 	pr_info("Writing to the buddy page before free\n");
143 	memset((void *)p, 0x3, PAGE_SIZE);
144 	free_page(p);
145 	schedule();
146 	pr_info("Attempting bad write to the buddy page after free\n");
147 	memset((void *)p, 0x78, PAGE_SIZE);
148 	/* Attempt to notice the overwrite. */
149 	p = __get_free_page(GFP_KERNEL);
150 	free_page(p);
151 	schedule();
152 }
153 
lkdtm_READ_BUDDY_AFTER_FREE(void)154 static void lkdtm_READ_BUDDY_AFTER_FREE(void)
155 {
156 	unsigned long p = __get_free_page(GFP_KERNEL);
157 	int saw, *val;
158 	int *base;
159 
160 	if (!p) {
161 		pr_info("Unable to allocate free page\n");
162 		return;
163 	}
164 
165 	val = kmalloc(1024, GFP_KERNEL);
166 	if (!val) {
167 		pr_info("Unable to allocate val memory.\n");
168 		free_page(p);
169 		return;
170 	}
171 
172 	base = (int *)p;
173 
174 	*val = 0x12345678;
175 	base[0] = *val;
176 	pr_info("Value in memory before free: %x\n", base[0]);
177 	free_page(p);
178 	pr_info("Attempting to read from freed memory\n");
179 	saw = base[0];
180 	if (saw != *val) {
181 		/* Good! Poisoning happened, so declare a win. */
182 		pr_info("Memory correctly poisoned (%x)\n", saw);
183 	} else {
184 		pr_err("FAIL: Buddy page was not poisoned!\n");
185 		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
186 	}
187 
188 	kfree(val);
189 }
190 
lkdtm_SLAB_INIT_ON_ALLOC(void)191 static void lkdtm_SLAB_INIT_ON_ALLOC(void)
192 {
193 	u8 *first;
194 	u8 *val;
195 
196 	first = kmalloc(512, GFP_KERNEL);
197 	if (!first) {
198 		pr_info("Unable to allocate 512 bytes the first time.\n");
199 		return;
200 	}
201 
202 	memset(first, 0xAB, 512);
203 	kfree(first);
204 
205 	val = kmalloc(512, GFP_KERNEL);
206 	if (!val) {
207 		pr_info("Unable to allocate 512 bytes the second time.\n");
208 		return;
209 	}
210 	if (val != first) {
211 		pr_warn("Reallocation missed clobbered memory.\n");
212 	}
213 
214 	if (memchr(val, 0xAB, 512) == NULL) {
215 		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
216 	} else {
217 		pr_err("FAIL: Slab was not initialized\n");
218 		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
219 	}
220 	kfree(val);
221 }
222 
lkdtm_BUDDY_INIT_ON_ALLOC(void)223 static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
224 {
225 	u8 *first;
226 	u8 *val;
227 
228 	first = (u8 *)__get_free_page(GFP_KERNEL);
229 	if (!first) {
230 		pr_info("Unable to allocate first free page\n");
231 		return;
232 	}
233 
234 	memset(first, 0xAB, PAGE_SIZE);
235 	free_page((unsigned long)first);
236 
237 	val = (u8 *)__get_free_page(GFP_KERNEL);
238 	if (!val) {
239 		pr_info("Unable to allocate second free page\n");
240 		return;
241 	}
242 
243 	if (val != first) {
244 		pr_warn("Reallocation missed clobbered memory.\n");
245 	}
246 
247 	if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
248 		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
249 	} else {
250 		pr_err("FAIL: Slab was not initialized\n");
251 		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
252 	}
253 	free_page((unsigned long)val);
254 }
255 
lkdtm_SLAB_FREE_DOUBLE(void)256 static void lkdtm_SLAB_FREE_DOUBLE(void)
257 {
258 	int *val;
259 
260 	val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
261 	if (!val) {
262 		pr_info("Unable to allocate double_free_cache memory.\n");
263 		return;
264 	}
265 
266 	/* Just make sure we got real memory. */
267 	*val = 0x12345678;
268 	pr_info("Attempting double slab free ...\n");
269 	kmem_cache_free(double_free_cache, val);
270 	kmem_cache_free(double_free_cache, val);
271 }
272 
lkdtm_SLAB_FREE_CROSS(void)273 static void lkdtm_SLAB_FREE_CROSS(void)
274 {
275 	int *val;
276 
277 	val = kmem_cache_alloc(a_cache, GFP_KERNEL);
278 	if (!val) {
279 		pr_info("Unable to allocate a_cache memory.\n");
280 		return;
281 	}
282 
283 	/* Just make sure we got real memory. */
284 	*val = 0x12345679;
285 	pr_info("Attempting cross-cache slab free ...\n");
286 	kmem_cache_free(b_cache, val);
287 }
288 
lkdtm_SLAB_FREE_PAGE(void)289 static void lkdtm_SLAB_FREE_PAGE(void)
290 {
291 	unsigned long p = __get_free_page(GFP_KERNEL);
292 
293 	pr_info("Attempting non-Slab slab free ...\n");
294 	kmem_cache_free(NULL, (void *)p);
295 	free_page(p);
296 }
297 
298 /*
299  * We have constructors to keep the caches distinctly separated without
300  * needing to boot with "slab_nomerge".
301  */
ctor_double_free(void * region)302 static void ctor_double_free(void *region)
303 { }
ctor_a(void * region)304 static void ctor_a(void *region)
305 { }
ctor_b(void * region)306 static void ctor_b(void *region)
307 { }
308 
lkdtm_heap_init(void)309 void __init lkdtm_heap_init(void)
310 {
311 	double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
312 					      64, 0, 0, ctor_double_free);
313 	a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
314 	b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
315 }
316 
lkdtm_heap_exit(void)317 void __exit lkdtm_heap_exit(void)
318 {
319 	kmem_cache_destroy(double_free_cache);
320 	kmem_cache_destroy(a_cache);
321 	kmem_cache_destroy(b_cache);
322 }
323 
324 static struct crashtype crashtypes[] = {
325 	CRASHTYPE(SLAB_LINEAR_OVERFLOW),
326 	CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
327 	CRASHTYPE(WRITE_AFTER_FREE),
328 	CRASHTYPE(READ_AFTER_FREE),
329 	CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
330 	CRASHTYPE(READ_BUDDY_AFTER_FREE),
331 	CRASHTYPE(SLAB_INIT_ON_ALLOC),
332 	CRASHTYPE(BUDDY_INIT_ON_ALLOC),
333 	CRASHTYPE(SLAB_FREE_DOUBLE),
334 	CRASHTYPE(SLAB_FREE_CROSS),
335 	CRASHTYPE(SLAB_FREE_PAGE),
336 };
337 
338 struct crashtype_category heap_crashtypes = {
339 	.crashtypes = crashtypes,
340 	.len	    = ARRAY_SIZE(crashtypes),
341 };
342