1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * kaslr.c
4 *
5 * This contains the routines needed to generate a reasonable level of
6 * entropy to choose a randomized kernel base address offset in support
7 * of Kernel Address Space Layout Randomization (KASLR). Additionally
8 * handles walking the physical memory maps (and tracking memory regions
9 * to avoid) in order to select a physical memory location that can
10 * contain the entire properly aligned running kernel image.
11 *
12 */
13
14 /*
15 * isspace() in linux/ctype.h is expected by next_args() to filter
16 * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
17 * since isdigit() is implemented in both of them. Hence disable it
18 * here.
19 */
20 #define BOOT_CTYPE_H
21
22 #include "misc.h"
23 #include "error.h"
24 #include "../string.h"
25 #include "efi.h"
26
27 #include <generated/compile.h>
28 #include <linux/module.h>
29 #include <linux/uts.h>
30 #include <linux/utsname.h>
31 #include <linux/ctype.h>
32 #include <generated/utsversion.h>
33 #include <generated/utsrelease.h>
34
35 #define _SETUP
36 #include <asm/setup.h> /* For COMMAND_LINE_SIZE */
37 #undef _SETUP
38
39 extern unsigned long get_cmd_line_ptr(void);
40
41 /* Simplified build-specific string for starting entropy. */
42 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
43 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
44
rotate_xor(unsigned long hash,const void * area,size_t size)45 static unsigned long rotate_xor(unsigned long hash, const void *area,
46 size_t size)
47 {
48 size_t i;
49 unsigned long *ptr = (unsigned long *)area;
50
51 for (i = 0; i < size / sizeof(hash); i++) {
52 /* Rotate by odd number of bits and XOR. */
53 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
54 hash ^= ptr[i];
55 }
56
57 return hash;
58 }
59
60 /* Attempt to create a simple but unpredictable starting entropy. */
get_boot_seed(void)61 static unsigned long get_boot_seed(void)
62 {
63 unsigned long hash = 0;
64
65 hash = rotate_xor(hash, build_str, sizeof(build_str));
66 hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
67
68 return hash;
69 }
70
71 #define KASLR_COMPRESSED_BOOT
72 #include "../../lib/kaslr.c"
73
74
75 /* Only supporting at most 4 unusable memmap regions with kaslr */
76 #define MAX_MEMMAP_REGIONS 4
77
78 static bool memmap_too_large;
79
80
81 /*
82 * Store memory limit: MAXMEM on 64-bit and KERNEL_IMAGE_SIZE on 32-bit.
83 * It may be reduced by "mem=nn[KMG]" or "memmap=nn[KMG]" command line options.
84 */
85 static u64 mem_limit;
86
87 /* Number of immovable memory regions */
88 static int num_immovable_mem;
89
90 enum mem_avoid_index {
91 MEM_AVOID_ZO_RANGE = 0,
92 MEM_AVOID_INITRD,
93 MEM_AVOID_CMDLINE,
94 MEM_AVOID_BOOTPARAMS,
95 MEM_AVOID_MEMMAP_BEGIN,
96 MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
97 MEM_AVOID_MAX,
98 };
99
100 static struct mem_vector mem_avoid[MEM_AVOID_MAX];
101
mem_overlaps(struct mem_vector * one,struct mem_vector * two)102 static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
103 {
104 /* Item one is entirely before item two. */
105 if (one->start + one->size <= two->start)
106 return false;
107 /* Item one is entirely after item two. */
108 if (one->start >= two->start + two->size)
109 return false;
110 return true;
111 }
112
skip_spaces(const char * str)113 char *skip_spaces(const char *str)
114 {
115 while (isspace(*str))
116 ++str;
117 return (char *)str;
118 }
119 #include "../../../../lib/ctype.c"
120 #include "../../../../lib/cmdline.c"
121
122 enum parse_mode {
123 PARSE_MEMMAP,
124 PARSE_EFI,
125 };
126
127 static int
parse_memmap(char * p,u64 * start,u64 * size,enum parse_mode mode)128 parse_memmap(char *p, u64 *start, u64 *size, enum parse_mode mode)
129 {
130 char *oldp;
131
132 if (!p)
133 return -EINVAL;
134
135 /* We don't care about this option here */
136 if (!strncmp(p, "exactmap", 8))
137 return -EINVAL;
138
139 oldp = p;
140 *size = memparse(p, &p);
141 if (p == oldp)
142 return -EINVAL;
143
144 switch (*p) {
145 case '#':
146 case '$':
147 case '!':
148 *start = memparse(p + 1, &p);
149 return 0;
150 case '@':
151 if (mode == PARSE_MEMMAP) {
152 /*
153 * memmap=nn@ss specifies usable region, should
154 * be skipped
155 */
156 *size = 0;
157 } else {
158 u64 flags;
159
160 /*
161 * efi_fake_mem=nn@ss:attr the attr specifies
162 * flags that might imply a soft-reservation.
163 */
164 *start = memparse(p + 1, &p);
165 if (p && *p == ':') {
166 p++;
167 if (kstrtoull(p, 0, &flags) < 0)
168 *size = 0;
169 else if (flags & EFI_MEMORY_SP)
170 return 0;
171 }
172 *size = 0;
173 }
174 fallthrough;
175 default:
176 /*
177 * If w/o offset, only size specified, memmap=nn[KMG] has the
178 * same behaviour as mem=nn[KMG]. It limits the max address
179 * system can use. Region above the limit should be avoided.
180 */
181 *start = 0;
182 return 0;
183 }
184
185 return -EINVAL;
186 }
187
mem_avoid_memmap(enum parse_mode mode,char * str)188 static void mem_avoid_memmap(enum parse_mode mode, char *str)
189 {
190 static int i;
191
192 if (i >= MAX_MEMMAP_REGIONS)
193 return;
194
195 while (str && (i < MAX_MEMMAP_REGIONS)) {
196 int rc;
197 u64 start, size;
198 char *k = strchr(str, ',');
199
200 if (k)
201 *k++ = 0;
202
203 rc = parse_memmap(str, &start, &size, mode);
204 if (rc < 0)
205 break;
206 str = k;
207
208 if (start == 0) {
209 /* Store the specified memory limit if size > 0 */
210 if (size > 0 && size < mem_limit)
211 mem_limit = size;
212
213 continue;
214 }
215
216 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
217 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
218 i++;
219 }
220
221 /* More than 4 memmaps, fail kaslr */
222 if ((i >= MAX_MEMMAP_REGIONS) && str)
223 memmap_too_large = true;
224 }
225
226 /* Store the number of 1GB huge pages which users specified: */
227 static unsigned long max_gb_huge_pages;
228
parse_gb_huge_pages(char * param,char * val)229 static void parse_gb_huge_pages(char *param, char *val)
230 {
231 static bool gbpage_sz;
232 char *p;
233
234 if (!strcmp(param, "hugepagesz")) {
235 p = val;
236 if (memparse(p, &p) != PUD_SIZE) {
237 gbpage_sz = false;
238 return;
239 }
240
241 if (gbpage_sz)
242 warn("Repeatedly set hugeTLB page size of 1G!\n");
243 gbpage_sz = true;
244 return;
245 }
246
247 if (!strcmp(param, "hugepages") && gbpage_sz) {
248 p = val;
249 max_gb_huge_pages = simple_strtoull(p, &p, 0);
250 return;
251 }
252 }
253
handle_mem_options(void)254 static void handle_mem_options(void)
255 {
256 char *args = (char *)get_cmd_line_ptr();
257 size_t len;
258 char *tmp_cmdline;
259 char *param, *val;
260 u64 mem_size;
261
262 if (!args)
263 return;
264
265 len = strnlen(args, COMMAND_LINE_SIZE-1);
266 tmp_cmdline = malloc(len + 1);
267 if (!tmp_cmdline)
268 error("Failed to allocate space for tmp_cmdline");
269
270 memcpy(tmp_cmdline, args, len);
271 tmp_cmdline[len] = 0;
272 args = tmp_cmdline;
273
274 /* Chew leading spaces */
275 args = skip_spaces(args);
276
277 while (*args) {
278 args = next_arg(args, ¶m, &val);
279 /* Stop at -- */
280 if (!val && strcmp(param, "--") == 0)
281 break;
282
283 if (!strcmp(param, "memmap")) {
284 mem_avoid_memmap(PARSE_MEMMAP, val);
285 } else if (IS_ENABLED(CONFIG_X86_64) && strstr(param, "hugepages")) {
286 parse_gb_huge_pages(param, val);
287 } else if (!strcmp(param, "mem")) {
288 char *p = val;
289
290 if (!strcmp(p, "nopentium"))
291 continue;
292 mem_size = memparse(p, &p);
293 if (mem_size == 0)
294 break;
295
296 if (mem_size < mem_limit)
297 mem_limit = mem_size;
298 } else if (!strcmp(param, "efi_fake_mem")) {
299 mem_avoid_memmap(PARSE_EFI, val);
300 }
301 }
302
303 free(tmp_cmdline);
304 return;
305 }
306
307 /*
308 * In theory, KASLR can put the kernel anywhere in the range of [16M, MAXMEM)
309 * on 64-bit, and [16M, KERNEL_IMAGE_SIZE) on 32-bit.
310 *
311 * The mem_avoid array is used to store the ranges that need to be avoided
312 * when KASLR searches for an appropriate random address. We must avoid any
313 * regions that are unsafe to overlap with during decompression, and other
314 * things like the initrd, cmdline and boot_params. This comment seeks to
315 * explain mem_avoid as clearly as possible since incorrect mem_avoid
316 * memory ranges lead to really hard to debug boot failures.
317 *
318 * The initrd, cmdline, and boot_params are trivial to identify for
319 * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
320 * MEM_AVOID_BOOTPARAMS respectively below.
321 *
322 * What is not obvious how to avoid is the range of memory that is used
323 * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
324 * the compressed kernel (ZO) and its run space, which is used to extract
325 * the uncompressed kernel (VO) and relocs.
326 *
327 * ZO's full run size sits against the end of the decompression buffer, so
328 * we can calculate where text, data, bss, etc of ZO are positioned more
329 * easily.
330 *
331 * For additional background, the decompression calculations can be found
332 * in header.S, and the memory diagram is based on the one found in misc.c.
333 *
334 * The following conditions are already enforced by the image layouts and
335 * associated code:
336 * - input + input_size >= output + output_size
337 * - kernel_total_size <= init_size
338 * - kernel_total_size <= output_size (see Note below)
339 * - output + init_size >= output + output_size
340 *
341 * (Note that kernel_total_size and output_size have no fundamental
342 * relationship, but output_size is passed to choose_random_location
343 * as a maximum of the two. The diagram is showing a case where
344 * kernel_total_size is larger than output_size, but this case is
345 * handled by bumping output_size.)
346 *
347 * The above conditions can be illustrated by a diagram:
348 *
349 * 0 output input input+input_size output+init_size
350 * | | | | |
351 * | | | | |
352 * |-----|--------|--------|--------------|-----------|--|-------------|
353 * | | |
354 * | | |
355 * output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size
356 *
357 * [output, output+init_size) is the entire memory range used for
358 * extracting the compressed image.
359 *
360 * [output, output+kernel_total_size) is the range needed for the
361 * uncompressed kernel (VO) and its run size (bss, brk, etc).
362 *
363 * [output, output+output_size) is VO plus relocs (i.e. the entire
364 * uncompressed payload contained by ZO). This is the area of the buffer
365 * written to during decompression.
366 *
367 * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
368 * range of the copied ZO and decompression code. (i.e. the range
369 * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
370 *
371 * [input, input+input_size) is the original copied compressed image (ZO)
372 * (i.e. it does not include its run size). This range must be avoided
373 * because it contains the data used for decompression.
374 *
375 * [input+input_size, output+init_size) is [_text, _end) for ZO. This
376 * range includes ZO's heap and stack, and must be avoided since it
377 * performs the decompression.
378 *
379 * Since the above two ranges need to be avoided and they are adjacent,
380 * they can be merged, resulting in: [input, output+init_size) which
381 * becomes the MEM_AVOID_ZO_RANGE below.
382 */
mem_avoid_init(unsigned long input,unsigned long input_size,unsigned long output)383 static void mem_avoid_init(unsigned long input, unsigned long input_size,
384 unsigned long output)
385 {
386 unsigned long init_size = boot_params->hdr.init_size;
387 u64 initrd_start, initrd_size;
388 unsigned long cmd_line, cmd_line_size;
389
390 /*
391 * Avoid the region that is unsafe to overlap during
392 * decompression.
393 */
394 mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
395 mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
396
397 /* Avoid initrd. */
398 initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
399 initrd_start |= boot_params->hdr.ramdisk_image;
400 initrd_size = (u64)boot_params->ext_ramdisk_size << 32;
401 initrd_size |= boot_params->hdr.ramdisk_size;
402 mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
403 mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
404 /* No need to set mapping for initrd, it will be handled in VO. */
405
406 /* Avoid kernel command line. */
407 cmd_line = get_cmd_line_ptr();
408 /* Calculate size of cmd_line. */
409 if (cmd_line) {
410 cmd_line_size = strnlen((char *)cmd_line, COMMAND_LINE_SIZE-1) + 1;
411 mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
412 mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
413 }
414
415 /* Avoid boot parameters. */
416 mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
417 mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
418
419 /* We don't need to set a mapping for setup_data. */
420
421 /* Mark the memmap regions we need to avoid */
422 handle_mem_options();
423
424 /* Enumerate the immovable memory regions */
425 num_immovable_mem = count_immovable_mem_regions();
426 }
427
428 /*
429 * Does this memory vector overlap a known avoided area? If so, record the
430 * overlap region with the lowest address.
431 */
mem_avoid_overlap(struct mem_vector * img,struct mem_vector * overlap)432 static bool mem_avoid_overlap(struct mem_vector *img,
433 struct mem_vector *overlap)
434 {
435 int i;
436 struct setup_data *ptr;
437 u64 earliest = img->start + img->size;
438 bool is_overlapping = false;
439
440 for (i = 0; i < MEM_AVOID_MAX; i++) {
441 if (mem_overlaps(img, &mem_avoid[i]) &&
442 mem_avoid[i].start < earliest) {
443 *overlap = mem_avoid[i];
444 earliest = overlap->start;
445 is_overlapping = true;
446 }
447 }
448
449 /* Avoid all entries in the setup_data linked list. */
450 ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
451 while (ptr) {
452 struct mem_vector avoid;
453
454 avoid.start = (unsigned long)ptr;
455 avoid.size = sizeof(*ptr) + ptr->len;
456
457 if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
458 *overlap = avoid;
459 earliest = overlap->start;
460 is_overlapping = true;
461 }
462
463 if (ptr->type == SETUP_INDIRECT &&
464 ((struct setup_indirect *)ptr->data)->type != SETUP_INDIRECT) {
465 avoid.start = ((struct setup_indirect *)ptr->data)->addr;
466 avoid.size = ((struct setup_indirect *)ptr->data)->len;
467
468 if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
469 *overlap = avoid;
470 earliest = overlap->start;
471 is_overlapping = true;
472 }
473 }
474
475 ptr = (struct setup_data *)(unsigned long)ptr->next;
476 }
477
478 return is_overlapping;
479 }
480
481 struct slot_area {
482 u64 addr;
483 unsigned long num;
484 };
485
486 #define MAX_SLOT_AREA 100
487
488 static struct slot_area slot_areas[MAX_SLOT_AREA];
489 static unsigned int slot_area_index;
490 static unsigned long slot_max;
491
store_slot_info(struct mem_vector * region,unsigned long image_size)492 static void store_slot_info(struct mem_vector *region, unsigned long image_size)
493 {
494 struct slot_area slot_area;
495
496 if (slot_area_index == MAX_SLOT_AREA)
497 return;
498
499 slot_area.addr = region->start;
500 slot_area.num = 1 + (region->size - image_size) / CONFIG_PHYSICAL_ALIGN;
501
502 slot_areas[slot_area_index++] = slot_area;
503 slot_max += slot_area.num;
504 }
505
506 /*
507 * Skip as many 1GB huge pages as possible in the passed region
508 * according to the number which users specified:
509 */
510 static void
process_gb_huge_pages(struct mem_vector * region,unsigned long image_size)511 process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
512 {
513 u64 pud_start, pud_end;
514 unsigned long gb_huge_pages;
515 struct mem_vector tmp;
516
517 if (!IS_ENABLED(CONFIG_X86_64) || !max_gb_huge_pages) {
518 store_slot_info(region, image_size);
519 return;
520 }
521
522 /* Are there any 1GB pages in the region? */
523 pud_start = ALIGN(region->start, PUD_SIZE);
524 pud_end = ALIGN_DOWN(region->start + region->size, PUD_SIZE);
525
526 /* No good 1GB huge pages found: */
527 if (pud_start >= pud_end) {
528 store_slot_info(region, image_size);
529 return;
530 }
531
532 /* Check if the head part of the region is usable. */
533 if (pud_start >= region->start + image_size) {
534 tmp.start = region->start;
535 tmp.size = pud_start - region->start;
536 store_slot_info(&tmp, image_size);
537 }
538
539 /* Skip the good 1GB pages. */
540 gb_huge_pages = (pud_end - pud_start) >> PUD_SHIFT;
541 if (gb_huge_pages > max_gb_huge_pages) {
542 pud_end = pud_start + (max_gb_huge_pages << PUD_SHIFT);
543 max_gb_huge_pages = 0;
544 } else {
545 max_gb_huge_pages -= gb_huge_pages;
546 }
547
548 /* Check if the tail part of the region is usable. */
549 if (region->start + region->size >= pud_end + image_size) {
550 tmp.start = pud_end;
551 tmp.size = region->start + region->size - pud_end;
552 store_slot_info(&tmp, image_size);
553 }
554 }
555
slots_fetch_random(void)556 static u64 slots_fetch_random(void)
557 {
558 unsigned long slot;
559 unsigned int i;
560
561 /* Handle case of no slots stored. */
562 if (slot_max == 0)
563 return 0;
564
565 slot = kaslr_get_random_long("Physical") % slot_max;
566
567 for (i = 0; i < slot_area_index; i++) {
568 if (slot >= slot_areas[i].num) {
569 slot -= slot_areas[i].num;
570 continue;
571 }
572 return slot_areas[i].addr + ((u64)slot * CONFIG_PHYSICAL_ALIGN);
573 }
574
575 if (i == slot_area_index)
576 debug_putstr("slots_fetch_random() failed!?\n");
577 return 0;
578 }
579
__process_mem_region(struct mem_vector * entry,unsigned long minimum,unsigned long image_size)580 static void __process_mem_region(struct mem_vector *entry,
581 unsigned long minimum,
582 unsigned long image_size)
583 {
584 struct mem_vector region, overlap;
585 u64 region_end;
586
587 /* Enforce minimum and memory limit. */
588 region.start = max_t(u64, entry->start, minimum);
589 region_end = min(entry->start + entry->size, mem_limit);
590
591 /* Give up if slot area array is full. */
592 while (slot_area_index < MAX_SLOT_AREA) {
593 /* Potentially raise address to meet alignment needs. */
594 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
595
596 /* Did we raise the address above the passed in memory entry? */
597 if (region.start > region_end)
598 return;
599
600 /* Reduce size by any delta from the original address. */
601 region.size = region_end - region.start;
602
603 /* Return if region can't contain decompressed kernel */
604 if (region.size < image_size)
605 return;
606
607 /* If nothing overlaps, store the region and return. */
608 if (!mem_avoid_overlap(®ion, &overlap)) {
609 process_gb_huge_pages(®ion, image_size);
610 return;
611 }
612
613 /* Store beginning of region if holds at least image_size. */
614 if (overlap.start >= region.start + image_size) {
615 region.size = overlap.start - region.start;
616 process_gb_huge_pages(®ion, image_size);
617 }
618
619 /* Clip off the overlapping region and start over. */
620 region.start = overlap.start + overlap.size;
621 }
622 }
623
process_mem_region(struct mem_vector * region,unsigned long minimum,unsigned long image_size)624 static bool process_mem_region(struct mem_vector *region,
625 unsigned long minimum,
626 unsigned long image_size)
627 {
628 int i;
629 /*
630 * If no immovable memory found, or MEMORY_HOTREMOVE disabled,
631 * use @region directly.
632 */
633 if (!num_immovable_mem) {
634 __process_mem_region(region, minimum, image_size);
635
636 if (slot_area_index == MAX_SLOT_AREA) {
637 debug_putstr("Aborted e820/efi memmap scan (slot_areas full)!\n");
638 return true;
639 }
640 return false;
641 }
642
643 #if defined(CONFIG_MEMORY_HOTREMOVE) && defined(CONFIG_ACPI)
644 /*
645 * If immovable memory found, filter the intersection between
646 * immovable memory and @region.
647 */
648 for (i = 0; i < num_immovable_mem; i++) {
649 u64 start, end, entry_end, region_end;
650 struct mem_vector entry;
651
652 if (!mem_overlaps(region, &immovable_mem[i]))
653 continue;
654
655 start = immovable_mem[i].start;
656 end = start + immovable_mem[i].size;
657 region_end = region->start + region->size;
658
659 entry.start = clamp(region->start, start, end);
660 entry_end = clamp(region_end, start, end);
661 entry.size = entry_end - entry.start;
662
663 __process_mem_region(&entry, minimum, image_size);
664
665 if (slot_area_index == MAX_SLOT_AREA) {
666 debug_putstr("Aborted e820/efi memmap scan when walking immovable regions(slot_areas full)!\n");
667 return true;
668 }
669 }
670 #endif
671 return 0;
672 }
673
674 #ifdef CONFIG_EFI
675 /*
676 * Returns true if we processed the EFI memmap, which we prefer over the E820
677 * table if it is available.
678 */
679 static bool
process_efi_entries(unsigned long minimum,unsigned long image_size)680 process_efi_entries(unsigned long minimum, unsigned long image_size)
681 {
682 struct efi_info *e = &boot_params->efi_info;
683 bool efi_mirror_found = false;
684 struct mem_vector region;
685 efi_memory_desc_t *md;
686 unsigned long pmap;
687 char *signature;
688 u32 nr_desc;
689 int i;
690
691 signature = (char *)&e->efi_loader_signature;
692 if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
693 strncmp(signature, EFI64_LOADER_SIGNATURE, 4))
694 return false;
695
696 #ifdef CONFIG_X86_32
697 /* Can't handle data above 4GB at this time */
698 if (e->efi_memmap_hi) {
699 warn("EFI memmap is above 4GB, can't be handled now on x86_32. EFI should be disabled.\n");
700 return false;
701 }
702 pmap = e->efi_memmap;
703 #else
704 pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
705 #endif
706
707 nr_desc = e->efi_memmap_size / e->efi_memdesc_size;
708 for (i = 0; i < nr_desc; i++) {
709 md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
710 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
711 efi_mirror_found = true;
712 break;
713 }
714 }
715
716 for (i = 0; i < nr_desc; i++) {
717 md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
718
719 /*
720 * Here we are more conservative in picking free memory than
721 * the EFI spec allows:
722 *
723 * According to the spec, EFI_BOOT_SERVICES_{CODE|DATA} are also
724 * free memory and thus available to place the kernel image into,
725 * but in practice there's firmware where using that memory leads
726 * to crashes.
727 *
728 * Only EFI_CONVENTIONAL_MEMORY is guaranteed to be free.
729 */
730 if (md->type != EFI_CONVENTIONAL_MEMORY)
731 continue;
732
733 if (efi_soft_reserve_enabled() &&
734 (md->attribute & EFI_MEMORY_SP))
735 continue;
736
737 if (efi_mirror_found &&
738 !(md->attribute & EFI_MEMORY_MORE_RELIABLE))
739 continue;
740
741 region.start = md->phys_addr;
742 region.size = md->num_pages << EFI_PAGE_SHIFT;
743 if (process_mem_region(®ion, minimum, image_size))
744 break;
745 }
746 return true;
747 }
748 #else
749 static inline bool
process_efi_entries(unsigned long minimum,unsigned long image_size)750 process_efi_entries(unsigned long minimum, unsigned long image_size)
751 {
752 return false;
753 }
754 #endif
755
process_e820_entries(unsigned long minimum,unsigned long image_size)756 static void process_e820_entries(unsigned long minimum,
757 unsigned long image_size)
758 {
759 int i;
760 struct mem_vector region;
761 struct boot_e820_entry *entry;
762
763 /* Verify potential e820 positions, appending to slots list. */
764 for (i = 0; i < boot_params->e820_entries; i++) {
765 entry = &boot_params->e820_table[i];
766 /* Skip non-RAM entries. */
767 if (entry->type != E820_TYPE_RAM)
768 continue;
769 region.start = entry->addr;
770 region.size = entry->size;
771 if (process_mem_region(®ion, minimum, image_size))
772 break;
773 }
774 }
775
find_random_phys_addr(unsigned long minimum,unsigned long image_size)776 static unsigned long find_random_phys_addr(unsigned long minimum,
777 unsigned long image_size)
778 {
779 u64 phys_addr;
780
781 /* Bail out early if it's impossible to succeed. */
782 if (minimum + image_size > mem_limit)
783 return 0;
784
785 /* Check if we had too many memmaps. */
786 if (memmap_too_large) {
787 debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
788 return 0;
789 }
790
791 if (!process_efi_entries(minimum, image_size))
792 process_e820_entries(minimum, image_size);
793
794 phys_addr = slots_fetch_random();
795
796 /* Perform a final check to make sure the address is in range. */
797 if (phys_addr < minimum || phys_addr + image_size > mem_limit) {
798 warn("Invalid physical address chosen!\n");
799 return 0;
800 }
801
802 return (unsigned long)phys_addr;
803 }
804
find_random_virt_addr(unsigned long minimum,unsigned long image_size)805 static unsigned long find_random_virt_addr(unsigned long minimum,
806 unsigned long image_size)
807 {
808 unsigned long slots, random_addr;
809
810 /*
811 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
812 * that can hold image_size within the range of minimum to
813 * KERNEL_IMAGE_SIZE?
814 */
815 slots = 1 + (KERNEL_IMAGE_SIZE - minimum - image_size) / CONFIG_PHYSICAL_ALIGN;
816
817 random_addr = kaslr_get_random_long("Virtual") % slots;
818
819 return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
820 }
821
822 /*
823 * Since this function examines addresses much more numerically,
824 * it takes the input and output pointers as 'unsigned long'.
825 */
choose_random_location(unsigned long input,unsigned long input_size,unsigned long * output,unsigned long output_size,unsigned long * virt_addr)826 void choose_random_location(unsigned long input,
827 unsigned long input_size,
828 unsigned long *output,
829 unsigned long output_size,
830 unsigned long *virt_addr)
831 {
832 unsigned long random_addr, min_addr;
833
834 if (cmdline_find_option_bool("nokaslr")) {
835 warn("KASLR disabled: 'nokaslr' on cmdline.");
836 return;
837 }
838
839 boot_params->hdr.loadflags |= KASLR_FLAG;
840
841 if (IS_ENABLED(CONFIG_X86_32))
842 mem_limit = KERNEL_IMAGE_SIZE;
843 else
844 mem_limit = MAXMEM;
845
846 /* Record the various known unsafe memory ranges. */
847 mem_avoid_init(input, input_size, *output);
848
849 /*
850 * Low end of the randomization range should be the
851 * smaller of 512M or the initial kernel image
852 * location:
853 */
854 min_addr = min(*output, 512UL << 20);
855 /* Make sure minimum is aligned. */
856 min_addr = ALIGN(min_addr, CONFIG_PHYSICAL_ALIGN);
857
858 /* Walk available memory entries to find a random address. */
859 random_addr = find_random_phys_addr(min_addr, output_size);
860 if (!random_addr) {
861 warn("Physical KASLR disabled: no suitable memory region!");
862 } else {
863 /* Update the new physical address location. */
864 if (*output != random_addr)
865 *output = random_addr;
866 }
867
868
869 /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
870 if (IS_ENABLED(CONFIG_X86_64))
871 random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
872 *virt_addr = random_addr;
873 }
874