1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 FORTH-ICS/CARV
4  *  Nick Kossifidis <mick@ics.forth.gr>
5  */
6 
7 #include <linux/kexec.h>
8 #include <asm/kexec.h>		/* For riscv_kexec_* symbol defines */
9 #include <linux/smp.h>		/* For smp_send_stop () */
10 #include <asm/cacheflush.h>	/* For local_flush_icache_all() */
11 #include <asm/barrier.h>	/* For smp_wmb() */
12 #include <asm/page.h>		/* For PAGE_MASK */
13 #include <linux/libfdt.h>	/* For fdt_check_header() */
14 #include <asm/set_memory.h>	/* For set_memory_x() */
15 #include <linux/compiler.h>	/* For unreachable() */
16 #include <linux/cpu.h>		/* For cpu_down() */
17 #include <linux/reboot.h>
18 
19 /*
20  * kexec_image_info - Print received image details
21  */
22 static void
kexec_image_info(const struct kimage * image)23 kexec_image_info(const struct kimage *image)
24 {
25 	unsigned long i;
26 
27 	pr_debug("Kexec image info:\n");
28 	pr_debug("\ttype:        %d\n", image->type);
29 	pr_debug("\tstart:       %lx\n", image->start);
30 	pr_debug("\thead:        %lx\n", image->head);
31 	pr_debug("\tnr_segments: %lu\n", image->nr_segments);
32 
33 	for (i = 0; i < image->nr_segments; i++) {
34 		pr_debug("\t    segment[%lu]: %016lx - %016lx", i,
35 			image->segment[i].mem,
36 			image->segment[i].mem + image->segment[i].memsz);
37 		pr_debug("\t\t0x%lx bytes, %lu pages\n",
38 			(unsigned long) image->segment[i].memsz,
39 			(unsigned long) image->segment[i].memsz /  PAGE_SIZE);
40 	}
41 }
42 
43 /*
44  * machine_kexec_prepare - Initialize kexec
45  *
46  * This function is called from do_kexec_load, when the user has
47  * provided us with an image to be loaded. Its goal is to validate
48  * the image and prepare the control code buffer as needed.
49  * Note that kimage_alloc_init has already been called and the
50  * control buffer has already been allocated.
51  */
52 int
machine_kexec_prepare(struct kimage * image)53 machine_kexec_prepare(struct kimage *image)
54 {
55 	struct kimage_arch *internal = &image->arch;
56 	struct fdt_header fdt = {0};
57 	void *control_code_buffer = NULL;
58 	unsigned int control_code_buffer_sz = 0;
59 	int i = 0;
60 
61 	kexec_image_info(image);
62 
63 	/* Find the Flattened Device Tree and save its physical address */
64 	for (i = 0; i < image->nr_segments; i++) {
65 		if (image->segment[i].memsz <= sizeof(fdt))
66 			continue;
67 
68 		if (image->file_mode)
69 			memcpy(&fdt, image->segment[i].buf, sizeof(fdt));
70 		else if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt)))
71 			continue;
72 
73 		if (fdt_check_header(&fdt))
74 			continue;
75 
76 		internal->fdt_addr = (unsigned long) image->segment[i].mem;
77 		break;
78 	}
79 
80 	if (!internal->fdt_addr) {
81 		pr_err("Device tree not included in the provided image\n");
82 		return -EINVAL;
83 	}
84 
85 	/* Copy the assembler code for relocation to the control page */
86 	if (image->type != KEXEC_TYPE_CRASH) {
87 		control_code_buffer = page_address(image->control_code_page);
88 		control_code_buffer_sz = page_size(image->control_code_page);
89 
90 		if (unlikely(riscv_kexec_relocate_size > control_code_buffer_sz)) {
91 			pr_err("Relocation code doesn't fit within a control page\n");
92 			return -EINVAL;
93 		}
94 
95 		memcpy(control_code_buffer, riscv_kexec_relocate,
96 			riscv_kexec_relocate_size);
97 
98 		/* Mark the control page executable */
99 		set_memory_x((unsigned long) control_code_buffer, 1);
100 	}
101 
102 	return 0;
103 }
104 
105 
106 /*
107  * machine_kexec_cleanup - Cleanup any leftovers from
108  *			   machine_kexec_prepare
109  *
110  * This function is called by kimage_free to handle any arch-specific
111  * allocations done on machine_kexec_prepare. Since we didn't do any
112  * allocations there, this is just an empty function. Note that the
113  * control buffer is freed by kimage_free.
114  */
115 void
machine_kexec_cleanup(struct kimage * image)116 machine_kexec_cleanup(struct kimage *image)
117 {
118 }
119 
120 
121 /*
122  * machine_shutdown - Prepare for a kexec reboot
123  *
124  * This function is called by kernel_kexec just before machine_kexec
125  * below. Its goal is to prepare the rest of the system (the other
126  * harts and possibly devices etc) for a kexec reboot.
127  */
machine_shutdown(void)128 void machine_shutdown(void)
129 {
130 	/*
131 	 * No more interrupts on this hart
132 	 * until we are back up.
133 	 */
134 	local_irq_disable();
135 
136 #if defined(CONFIG_HOTPLUG_CPU)
137 	smp_shutdown_nonboot_cpus(smp_processor_id());
138 #endif
139 }
140 
141 /* Override the weak function in kernel/panic.c */
crash_smp_send_stop(void)142 void crash_smp_send_stop(void)
143 {
144 	static int cpus_stopped;
145 
146 	/*
147 	 * This function can be called twice in panic path, but obviously
148 	 * we execute this only once.
149 	 */
150 	if (cpus_stopped)
151 		return;
152 
153 	smp_send_stop();
154 	cpus_stopped = 1;
155 }
156 
157 /*
158  * machine_crash_shutdown - Prepare to kexec after a kernel crash
159  *
160  * This function is called by crash_kexec just before machine_kexec
161  * and its goal is to shutdown non-crashing cpus and save registers.
162  */
163 void
machine_crash_shutdown(struct pt_regs * regs)164 machine_crash_shutdown(struct pt_regs *regs)
165 {
166 	local_irq_disable();
167 
168 	/* shutdown non-crashing cpus */
169 	crash_smp_send_stop();
170 
171 	crash_save_cpu(regs, smp_processor_id());
172 	pr_info("Starting crashdump kernel...\n");
173 }
174 
175 /*
176  * machine_kexec - Jump to the loaded kimage
177  *
178  * This function is called by kernel_kexec which is called by the
179  * reboot system call when the reboot cmd is LINUX_REBOOT_CMD_KEXEC,
180  * or by crash_kernel which is called by the kernel's arch-specific
181  * trap handler in case of a kernel panic. It's the final stage of
182  * the kexec process where the pre-loaded kimage is ready to be
183  * executed. We assume at this point that all other harts are
184  * suspended and this hart will be the new boot hart.
185  */
186 void __noreturn
machine_kexec(struct kimage * image)187 machine_kexec(struct kimage *image)
188 {
189 	struct kimage_arch *internal = &image->arch;
190 	unsigned long jump_addr = (unsigned long) image->start;
191 	unsigned long first_ind_entry = (unsigned long) &image->head;
192 	unsigned long this_cpu_id = __smp_processor_id();
193 	unsigned long this_hart_id = cpuid_to_hartid_map(this_cpu_id);
194 	unsigned long fdt_addr = internal->fdt_addr;
195 	void *control_code_buffer = page_address(image->control_code_page);
196 	riscv_kexec_method kexec_method = NULL;
197 
198 	if (image->type != KEXEC_TYPE_CRASH)
199 		kexec_method = control_code_buffer;
200 	else
201 		kexec_method = (riscv_kexec_method) &riscv_kexec_norelocate;
202 
203 	pr_notice("Will call new kernel at %08lx from hart id %lx\n",
204 		  jump_addr, this_hart_id);
205 	pr_notice("FDT image at %08lx\n", fdt_addr);
206 
207 	/* Make sure the relocation code is visible to the hart */
208 	local_flush_icache_all();
209 
210 	/* Jump to the relocation code */
211 	pr_notice("Bye...\n");
212 	kexec_method(first_ind_entry, jump_addr, fdt_addr,
213 		     this_hart_id, kernel_map.va_pa_offset);
214 	unreachable();
215 }
216