1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains common KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  */
11 
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/slab.h>
25 #include <linux/stacktrace.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/bug.h>
29 
30 #include "kasan.h"
31 #include "../slab.h"
32 
kasan_addr_to_slab(const void * addr)33 struct slab *kasan_addr_to_slab(const void *addr)
34 {
35 	if (virt_addr_valid(addr))
36 		return virt_to_slab(addr);
37 	return NULL;
38 }
39 
kasan_save_stack(gfp_t flags,bool can_alloc)40 depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
41 {
42 	unsigned long entries[KASAN_STACK_DEPTH];
43 	unsigned int nr_entries;
44 
45 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
46 	return __stack_depot_save(entries, nr_entries, flags, can_alloc);
47 }
48 
kasan_set_track(struct kasan_track * track,gfp_t flags)49 void kasan_set_track(struct kasan_track *track, gfp_t flags)
50 {
51 	track->pid = current->pid;
52 	track->stack = kasan_save_stack(flags, true);
53 }
54 
55 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
kasan_enable_current(void)56 void kasan_enable_current(void)
57 {
58 	current->kasan_depth++;
59 }
60 EXPORT_SYMBOL(kasan_enable_current);
61 
kasan_disable_current(void)62 void kasan_disable_current(void)
63 {
64 	current->kasan_depth--;
65 }
66 EXPORT_SYMBOL(kasan_disable_current);
67 
68 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
69 
__kasan_unpoison_range(const void * address,size_t size)70 void __kasan_unpoison_range(const void *address, size_t size)
71 {
72 	kasan_unpoison(address, size, false);
73 }
74 
75 #ifdef CONFIG_KASAN_STACK
76 /* Unpoison the entire stack for a task. */
kasan_unpoison_task_stack(struct task_struct * task)77 void kasan_unpoison_task_stack(struct task_struct *task)
78 {
79 	void *base = task_stack_page(task);
80 
81 	kasan_unpoison(base, THREAD_SIZE, false);
82 }
83 
84 /* Unpoison the stack for the current task beyond a watermark sp value. */
kasan_unpoison_task_stack_below(const void * watermark)85 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
86 {
87 	/*
88 	 * Calculate the task stack base address.  Avoid using 'current'
89 	 * because this function is called by early resume code which hasn't
90 	 * yet set up the percpu register (%gs).
91 	 */
92 	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
93 
94 	kasan_unpoison(base, watermark - base, false);
95 }
96 #endif /* CONFIG_KASAN_STACK */
97 
__kasan_unpoison_pages(struct page * page,unsigned int order,bool init)98 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
99 {
100 	u8 tag;
101 	unsigned long i;
102 
103 	if (unlikely(PageHighMem(page)))
104 		return false;
105 
106 	if (!kasan_sample_page_alloc(order))
107 		return false;
108 
109 	tag = kasan_random_tag();
110 	kasan_unpoison(set_tag(page_address(page), tag),
111 		       PAGE_SIZE << order, init);
112 	for (i = 0; i < (1 << order); i++)
113 		page_kasan_tag_set(page + i, tag);
114 
115 	return true;
116 }
117 
__kasan_poison_pages(struct page * page,unsigned int order,bool init)118 void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
119 {
120 	if (likely(!PageHighMem(page)))
121 		kasan_poison(page_address(page), PAGE_SIZE << order,
122 			     KASAN_PAGE_FREE, init);
123 }
124 
__kasan_poison_slab(struct slab * slab)125 void __kasan_poison_slab(struct slab *slab)
126 {
127 	struct page *page = slab_page(slab);
128 	unsigned long i;
129 
130 	for (i = 0; i < compound_nr(page); i++)
131 		page_kasan_tag_reset(page + i);
132 	kasan_poison(page_address(page), page_size(page),
133 		     KASAN_SLAB_REDZONE, false);
134 }
135 
__kasan_unpoison_object_data(struct kmem_cache * cache,void * object)136 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
137 {
138 	kasan_unpoison(object, cache->object_size, false);
139 }
140 
__kasan_poison_object_data(struct kmem_cache * cache,void * object)141 void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
142 {
143 	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
144 			KASAN_SLAB_REDZONE, false);
145 }
146 
147 /*
148  * This function assigns a tag to an object considering the following:
149  * 1. A cache might have a constructor, which might save a pointer to a slab
150  *    object somewhere (e.g. in the object itself). We preassign a tag for
151  *    each object in caches with constructors during slab creation and reuse
152  *    the same tag each time a particular object is allocated.
153  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
154  *    accessed after being freed. We preassign tags for objects in these
155  *    caches as well.
156  * 3. For SLAB allocator we can't preassign tags randomly since the freelist
157  *    is stored as an array of indexes instead of a linked list. Assign tags
158  *    based on objects indexes, so that objects that are next to each other
159  *    get different tags.
160  */
assign_tag(struct kmem_cache * cache,const void * object,bool init)161 static inline u8 assign_tag(struct kmem_cache *cache,
162 					const void *object, bool init)
163 {
164 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
165 		return 0xff;
166 
167 	/*
168 	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
169 	 * set, assign a tag when the object is being allocated (init == false).
170 	 */
171 	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
172 		return init ? KASAN_TAG_KERNEL : kasan_random_tag();
173 
174 	/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
175 #ifdef CONFIG_SLAB
176 	/* For SLAB assign tags based on the object index in the freelist. */
177 	return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
178 #else
179 	/*
180 	 * For SLUB assign a random tag during slab creation, otherwise reuse
181 	 * the already assigned tag.
182 	 */
183 	return init ? kasan_random_tag() : get_tag(object);
184 #endif
185 }
186 
__kasan_init_slab_obj(struct kmem_cache * cache,const void * object)187 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
188 						const void *object)
189 {
190 	/* Initialize per-object metadata if it is present. */
191 	if (kasan_requires_meta())
192 		kasan_init_object_meta(cache, object);
193 
194 	/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
195 	object = set_tag(object, assign_tag(cache, object, true));
196 
197 	return (void *)object;
198 }
199 
____kasan_slab_free(struct kmem_cache * cache,void * object,unsigned long ip,bool quarantine,bool init)200 static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
201 				unsigned long ip, bool quarantine, bool init)
202 {
203 	void *tagged_object;
204 
205 	if (!kasan_arch_is_ready())
206 		return false;
207 
208 	tagged_object = object;
209 	object = kasan_reset_tag(object);
210 
211 	if (is_kfence_address(object))
212 		return false;
213 
214 	if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
215 	    object)) {
216 		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
217 		return true;
218 	}
219 
220 	/* RCU slabs could be legally used after free within the RCU period */
221 	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
222 		return false;
223 
224 	if (!kasan_byte_accessible(tagged_object)) {
225 		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
226 		return true;
227 	}
228 
229 	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
230 			KASAN_SLAB_FREE, init);
231 
232 	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
233 		return false;
234 
235 	if (kasan_stack_collection_enabled())
236 		kasan_save_free_info(cache, tagged_object);
237 
238 	return kasan_quarantine_put(cache, object);
239 }
240 
__kasan_slab_free(struct kmem_cache * cache,void * object,unsigned long ip,bool init)241 bool __kasan_slab_free(struct kmem_cache *cache, void *object,
242 				unsigned long ip, bool init)
243 {
244 	return ____kasan_slab_free(cache, object, ip, true, init);
245 }
246 
____kasan_kfree_large(void * ptr,unsigned long ip)247 static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
248 {
249 	if (!kasan_arch_is_ready())
250 		return false;
251 
252 	if (ptr != page_address(virt_to_head_page(ptr))) {
253 		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
254 		return true;
255 	}
256 
257 	if (!kasan_byte_accessible(ptr)) {
258 		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
259 		return true;
260 	}
261 
262 	/*
263 	 * The object will be poisoned by kasan_poison_pages() or
264 	 * kasan_slab_free_mempool().
265 	 */
266 
267 	return false;
268 }
269 
__kasan_kfree_large(void * ptr,unsigned long ip)270 void __kasan_kfree_large(void *ptr, unsigned long ip)
271 {
272 	____kasan_kfree_large(ptr, ip);
273 }
274 
__kasan_slab_free_mempool(void * ptr,unsigned long ip)275 void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
276 {
277 	struct folio *folio;
278 
279 	folio = virt_to_folio(ptr);
280 
281 	/*
282 	 * Even though this function is only called for kmem_cache_alloc and
283 	 * kmalloc backed mempool allocations, those allocations can still be
284 	 * !PageSlab() when the size provided to kmalloc is larger than
285 	 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
286 	 */
287 	if (unlikely(!folio_test_slab(folio))) {
288 		if (____kasan_kfree_large(ptr, ip))
289 			return;
290 		kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
291 	} else {
292 		struct slab *slab = folio_slab(folio);
293 
294 		____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
295 	}
296 }
297 
__kasan_slab_alloc(struct kmem_cache * cache,void * object,gfp_t flags,bool init)298 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
299 					void *object, gfp_t flags, bool init)
300 {
301 	u8 tag;
302 	void *tagged_object;
303 
304 	if (gfpflags_allow_blocking(flags))
305 		kasan_quarantine_reduce();
306 
307 	if (unlikely(object == NULL))
308 		return NULL;
309 
310 	if (is_kfence_address(object))
311 		return (void *)object;
312 
313 	/*
314 	 * Generate and assign random tag for tag-based modes.
315 	 * Tag is ignored in set_tag() for the generic mode.
316 	 */
317 	tag = assign_tag(cache, object, false);
318 	tagged_object = set_tag(object, tag);
319 
320 	/*
321 	 * Unpoison the whole object.
322 	 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
323 	 */
324 	kasan_unpoison(tagged_object, cache->object_size, init);
325 
326 	/* Save alloc info (if possible) for non-kmalloc() allocations. */
327 	if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
328 		kasan_save_alloc_info(cache, tagged_object, flags);
329 
330 	return tagged_object;
331 }
332 
____kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)333 static inline void *____kasan_kmalloc(struct kmem_cache *cache,
334 				const void *object, size_t size, gfp_t flags)
335 {
336 	unsigned long redzone_start;
337 	unsigned long redzone_end;
338 
339 	if (gfpflags_allow_blocking(flags))
340 		kasan_quarantine_reduce();
341 
342 	if (unlikely(object == NULL))
343 		return NULL;
344 
345 	if (is_kfence_address(kasan_reset_tag(object)))
346 		return (void *)object;
347 
348 	/*
349 	 * The object has already been unpoisoned by kasan_slab_alloc() for
350 	 * kmalloc() or by kasan_krealloc() for krealloc().
351 	 */
352 
353 	/*
354 	 * The redzone has byte-level precision for the generic mode.
355 	 * Partially poison the last object granule to cover the unaligned
356 	 * part of the redzone.
357 	 */
358 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
359 		kasan_poison_last_granule((void *)object, size);
360 
361 	/* Poison the aligned part of the redzone. */
362 	redzone_start = round_up((unsigned long)(object + size),
363 				KASAN_GRANULE_SIZE);
364 	redzone_end = round_up((unsigned long)(object + cache->object_size),
365 				KASAN_GRANULE_SIZE);
366 	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
367 			   KASAN_SLAB_REDZONE, false);
368 
369 	/*
370 	 * Save alloc info (if possible) for kmalloc() allocations.
371 	 * This also rewrites the alloc info when called from kasan_krealloc().
372 	 */
373 	if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
374 		kasan_save_alloc_info(cache, (void *)object, flags);
375 
376 	/* Keep the tag that was set by kasan_slab_alloc(). */
377 	return (void *)object;
378 }
379 
__kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)380 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
381 					size_t size, gfp_t flags)
382 {
383 	return ____kasan_kmalloc(cache, object, size, flags);
384 }
385 EXPORT_SYMBOL(__kasan_kmalloc);
386 
__kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)387 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
388 						gfp_t flags)
389 {
390 	unsigned long redzone_start;
391 	unsigned long redzone_end;
392 
393 	if (gfpflags_allow_blocking(flags))
394 		kasan_quarantine_reduce();
395 
396 	if (unlikely(ptr == NULL))
397 		return NULL;
398 
399 	/*
400 	 * The object has already been unpoisoned by kasan_unpoison_pages() for
401 	 * alloc_pages() or by kasan_krealloc() for krealloc().
402 	 */
403 
404 	/*
405 	 * The redzone has byte-level precision for the generic mode.
406 	 * Partially poison the last object granule to cover the unaligned
407 	 * part of the redzone.
408 	 */
409 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
410 		kasan_poison_last_granule(ptr, size);
411 
412 	/* Poison the aligned part of the redzone. */
413 	redzone_start = round_up((unsigned long)(ptr + size),
414 				KASAN_GRANULE_SIZE);
415 	redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
416 	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
417 		     KASAN_PAGE_REDZONE, false);
418 
419 	return (void *)ptr;
420 }
421 
__kasan_krealloc(const void * object,size_t size,gfp_t flags)422 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
423 {
424 	struct slab *slab;
425 
426 	if (unlikely(object == ZERO_SIZE_PTR))
427 		return (void *)object;
428 
429 	/*
430 	 * Unpoison the object's data.
431 	 * Part of it might already have been unpoisoned, but it's unknown
432 	 * how big that part is.
433 	 */
434 	kasan_unpoison(object, size, false);
435 
436 	slab = virt_to_slab(object);
437 
438 	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
439 	if (unlikely(!slab))
440 		return __kasan_kmalloc_large(object, size, flags);
441 	else
442 		return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
443 }
444 
__kasan_check_byte(const void * address,unsigned long ip)445 bool __kasan_check_byte(const void *address, unsigned long ip)
446 {
447 	if (!kasan_byte_accessible(address)) {
448 		kasan_report(address, 1, false, ip);
449 		return false;
450 	}
451 	return true;
452 }
453