1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
3 #include <linux/elf.h>
4 #include <asm/boot_data.h>
5 #include <asm/sections.h>
6 #include <asm/cpu_mf.h>
7 #include <asm/setup.h>
8 #include <asm/kasan.h>
9 #include <asm/kexec.h>
10 #include <asm/sclp.h>
11 #include <asm/diag.h>
12 #include <asm/uv.h>
13 #include <asm/abs_lowcore.h>
14 #include "decompressor.h"
15 #include "boot.h"
16 #include "uv.h"
17 
18 unsigned long __bootdata_preserved(__kaslr_offset);
19 unsigned long __bootdata_preserved(__abs_lowcore);
20 unsigned long __bootdata_preserved(__memcpy_real_area);
21 unsigned long __bootdata(__amode31_base);
22 unsigned long __bootdata_preserved(VMALLOC_START);
23 unsigned long __bootdata_preserved(VMALLOC_END);
24 struct page *__bootdata_preserved(vmemmap);
25 unsigned long __bootdata_preserved(vmemmap_size);
26 unsigned long __bootdata_preserved(MODULES_VADDR);
27 unsigned long __bootdata_preserved(MODULES_END);
28 unsigned long __bootdata(ident_map_size);
29 int __bootdata(is_full_image) = 1;
30 struct initrd_data __bootdata(initrd_data);
31 
32 u64 __bootdata_preserved(stfle_fac_list[16]);
33 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
34 struct oldmem_data __bootdata_preserved(oldmem_data);
35 
error(char * x)36 void error(char *x)
37 {
38 	sclp_early_printk("\n\n");
39 	sclp_early_printk(x);
40 	sclp_early_printk("\n\n -- System halted");
41 
42 	disabled_wait();
43 }
44 
setup_lpp(void)45 static void setup_lpp(void)
46 {
47 	S390_lowcore.current_pid = 0;
48 	S390_lowcore.lpp = LPP_MAGIC;
49 	if (test_facility(40))
50 		lpp(&S390_lowcore.lpp);
51 }
52 
53 #ifdef CONFIG_KERNEL_UNCOMPRESSED
mem_safe_offset(void)54 unsigned long mem_safe_offset(void)
55 {
56 	return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
57 }
58 #endif
59 
rescue_initrd(unsigned long addr)60 static void rescue_initrd(unsigned long addr)
61 {
62 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
63 		return;
64 	if (!initrd_data.start || !initrd_data.size)
65 		return;
66 	if (addr <= initrd_data.start)
67 		return;
68 	memmove((void *)addr, (void *)initrd_data.start, initrd_data.size);
69 	initrd_data.start = addr;
70 }
71 
copy_bootdata(void)72 static void copy_bootdata(void)
73 {
74 	if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
75 		error(".boot.data section size mismatch");
76 	memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
77 	if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
78 		error(".boot.preserved.data section size mismatch");
79 	memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
80 }
81 
handle_relocs(unsigned long offset)82 static void handle_relocs(unsigned long offset)
83 {
84 	Elf64_Rela *rela_start, *rela_end, *rela;
85 	int r_type, r_sym, rc;
86 	Elf64_Addr loc, val;
87 	Elf64_Sym *dynsym;
88 
89 	rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
90 	rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
91 	dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
92 	for (rela = rela_start; rela < rela_end; rela++) {
93 		loc = rela->r_offset + offset;
94 		val = rela->r_addend;
95 		r_sym = ELF64_R_SYM(rela->r_info);
96 		if (r_sym) {
97 			if (dynsym[r_sym].st_shndx != SHN_UNDEF)
98 				val += dynsym[r_sym].st_value + offset;
99 		} else {
100 			/*
101 			 * 0 == undefined symbol table index (STN_UNDEF),
102 			 * used for R_390_RELATIVE, only add KASLR offset
103 			 */
104 			val += offset;
105 		}
106 		r_type = ELF64_R_TYPE(rela->r_info);
107 		rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
108 		if (rc)
109 			error("Unknown relocation type");
110 	}
111 }
112 
113 /*
114  * Merge information from several sources into a single ident_map_size value.
115  * "ident_map_size" represents the upper limit of physical memory we may ever
116  * reach. It might not be all online memory, but also include standby (offline)
117  * memory. "ident_map_size" could be lower then actual standby or even online
118  * memory present, due to limiting factors. We should never go above this limit.
119  * It is the size of our identity mapping.
120  *
121  * Consider the following factors:
122  * 1. max_physmem_end - end of physical memory online or standby.
123  *    Always <= end of the last online memory block (get_mem_detect_end()).
124  * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
125  *    kernel is able to support.
126  * 3. "mem=" kernel command line option which limits physical memory usage.
127  * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
128  *    crash kernel.
129  * 5. "hsa" size which is a memory limit when the kernel is executed during
130  *    zfcp/nvme dump.
131  */
setup_ident_map_size(unsigned long max_physmem_end)132 static void setup_ident_map_size(unsigned long max_physmem_end)
133 {
134 	unsigned long hsa_size;
135 
136 	ident_map_size = max_physmem_end;
137 	if (memory_limit)
138 		ident_map_size = min(ident_map_size, memory_limit);
139 	ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
140 
141 #ifdef CONFIG_CRASH_DUMP
142 	if (oldmem_data.start) {
143 		kaslr_enabled = 0;
144 		ident_map_size = min(ident_map_size, oldmem_data.size);
145 	} else if (ipl_block_valid && is_ipl_block_dump()) {
146 		kaslr_enabled = 0;
147 		if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
148 			ident_map_size = min(ident_map_size, hsa_size);
149 	}
150 #endif
151 }
152 
setup_kernel_memory_layout(void)153 static void setup_kernel_memory_layout(void)
154 {
155 	unsigned long vmemmap_start;
156 	unsigned long rte_size;
157 	unsigned long pages;
158 	unsigned long vmax;
159 
160 	pages = ident_map_size / PAGE_SIZE;
161 	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
162 	vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
163 
164 	/* choose kernel address space layout: 4 or 3 levels. */
165 	vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
166 	if (IS_ENABLED(CONFIG_KASAN) ||
167 	    vmalloc_size > _REGION2_SIZE ||
168 	    vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
169 		    _REGION2_SIZE) {
170 		vmax = _REGION1_SIZE;
171 		rte_size = _REGION2_SIZE;
172 	} else {
173 		vmax = _REGION2_SIZE;
174 		rte_size = _REGION3_SIZE;
175 	}
176 	/*
177 	 * forcing modules and vmalloc area under the ultravisor
178 	 * secure storage limit, so that any vmalloc allocation
179 	 * we do could be used to back secure guest storage.
180 	 */
181 	vmax = adjust_to_uv_max(vmax);
182 #ifdef CONFIG_KASAN
183 	/* force vmalloc and modules below kasan shadow */
184 	vmax = min(vmax, KASAN_SHADOW_START);
185 #endif
186 	__memcpy_real_area = round_down(vmax - PAGE_SIZE, PAGE_SIZE);
187 	__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
188 				   sizeof(struct lowcore));
189 	MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
190 	MODULES_VADDR = MODULES_END - MODULES_LEN;
191 	VMALLOC_END = MODULES_VADDR;
192 
193 	/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
194 	vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE));
195 	VMALLOC_START = VMALLOC_END - vmalloc_size;
196 
197 	/* split remaining virtual space between 1:1 mapping & vmemmap array */
198 	pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
199 	pages = SECTION_ALIGN_UP(pages);
200 	/* keep vmemmap_start aligned to a top level region table entry */
201 	vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
202 	/* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */
203 	vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
204 	/* make sure identity map doesn't overlay with vmemmap */
205 	ident_map_size = min(ident_map_size, vmemmap_start);
206 	vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
207 	/* make sure vmemmap doesn't overlay with vmalloc area */
208 	VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
209 	vmemmap = (struct page *)vmemmap_start;
210 }
211 
212 /*
213  * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
214  */
clear_bss_section(void)215 static void clear_bss_section(void)
216 {
217 	memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
218 }
219 
220 /*
221  * Set vmalloc area size to an 8th of (potential) physical memory
222  * size, unless size has been set by kernel command line parameter.
223  */
setup_vmalloc_size(void)224 static void setup_vmalloc_size(void)
225 {
226 	unsigned long size;
227 
228 	if (vmalloc_size_set)
229 		return;
230 	size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
231 	vmalloc_size = max(size, vmalloc_size);
232 }
233 
offset_vmlinux_info(unsigned long offset)234 static void offset_vmlinux_info(unsigned long offset)
235 {
236 	vmlinux.default_lma += offset;
237 	*(unsigned long *)(&vmlinux.entry) += offset;
238 	vmlinux.bootdata_off += offset;
239 	vmlinux.bootdata_preserved_off += offset;
240 	vmlinux.rela_dyn_start += offset;
241 	vmlinux.rela_dyn_end += offset;
242 	vmlinux.dynsym_start += offset;
243 }
244 
reserve_amode31(unsigned long safe_addr)245 static unsigned long reserve_amode31(unsigned long safe_addr)
246 {
247 	__amode31_base = PAGE_ALIGN(safe_addr);
248 	return safe_addr + vmlinux.amode31_size;
249 }
250 
startup_kernel(void)251 void startup_kernel(void)
252 {
253 	unsigned long random_lma;
254 	unsigned long safe_addr;
255 	void *img;
256 
257 	initrd_data.start = parmarea.initrd_start;
258 	initrd_data.size = parmarea.initrd_size;
259 	oldmem_data.start = parmarea.oldmem_base;
260 	oldmem_data.size = parmarea.oldmem_size;
261 
262 	setup_lpp();
263 	store_ipl_parmblock();
264 	safe_addr = mem_safe_offset();
265 	safe_addr = reserve_amode31(safe_addr);
266 	safe_addr = read_ipl_report(safe_addr);
267 	uv_query_info();
268 	rescue_initrd(safe_addr);
269 	sclp_early_read_info();
270 	setup_boot_command_line();
271 	parse_boot_command_line();
272 	sanitize_prot_virt_host();
273 	setup_ident_map_size(detect_memory());
274 	setup_vmalloc_size();
275 	setup_kernel_memory_layout();
276 
277 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
278 		random_lma = get_random_base(safe_addr);
279 		if (random_lma) {
280 			__kaslr_offset = random_lma - vmlinux.default_lma;
281 			img = (void *)vmlinux.default_lma;
282 			offset_vmlinux_info(__kaslr_offset);
283 		}
284 	}
285 
286 	if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
287 		img = decompress_kernel();
288 		memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
289 	} else if (__kaslr_offset)
290 		memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
291 
292 	clear_bss_section();
293 	copy_bootdata();
294 	handle_relocs(__kaslr_offset);
295 
296 	if (__kaslr_offset) {
297 		/*
298 		 * Save KASLR offset for early dumps, before vmcore_info is set.
299 		 * Mark as uneven to distinguish from real vmcore_info pointer.
300 		 */
301 		S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
302 		/* Clear non-relocated kernel */
303 		if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
304 			memset(img, 0, vmlinux.image_size);
305 	}
306 	vmlinux.entry();
307 }
308