1 /*
2 * PPC64 code to handle Linux booting another kernel.
3 *
4 * Copyright (C) 2004-2005, IBM Corp.
5 *
6 * Created by: Milton D Miller II
7 *
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
10 */
11
12
13 #include <linux/kexec.h>
14 #include <linux/smp.h>
15 #include <linux/thread_info.h>
16 #include <linux/init_task.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/cpu.h>
20
21 #include <asm/page.h>
22 #include <asm/current.h>
23 #include <asm/machdep.h>
24 #include <asm/cacheflush.h>
25 #include <asm/paca.h>
26 #include <asm/mmu.h>
27 #include <asm/sections.h> /* _end */
28 #include <asm/prom.h>
29 #include <asm/smp.h>
30 #include <asm/hw_breakpoint.h>
31
default_machine_kexec_prepare(struct kimage * image)32 int default_machine_kexec_prepare(struct kimage *image)
33 {
34 int i;
35 unsigned long begin, end; /* limits of segment */
36 unsigned long low, high; /* limits of blocked memory range */
37 struct device_node *node;
38 const unsigned long *basep;
39 const unsigned int *sizep;
40
41 if (!ppc_md.hpte_clear_all)
42 return -ENOENT;
43
44 /*
45 * Since we use the kernel fault handlers and paging code to
46 * handle the virtual mode, we must make sure no destination
47 * overlaps kernel static data or bss.
48 */
49 for (i = 0; i < image->nr_segments; i++)
50 if (image->segment[i].mem < __pa(_end))
51 return -ETXTBSY;
52
53 /*
54 * For non-LPAR, we absolutely can not overwrite the mmu hash
55 * table, since we are still using the bolted entries in it to
56 * do the copy. Check that here.
57 *
58 * It is safe if the end is below the start of the blocked
59 * region (end <= low), or if the beginning is after the
60 * end of the blocked region (begin >= high). Use the
61 * boolean identity !(a || b) === (!a && !b).
62 */
63 if (htab_address) {
64 low = __pa(htab_address);
65 high = low + htab_size_bytes;
66
67 for (i = 0; i < image->nr_segments; i++) {
68 begin = image->segment[i].mem;
69 end = begin + image->segment[i].memsz;
70
71 if ((begin < high) && (end > low))
72 return -ETXTBSY;
73 }
74 }
75
76 /* We also should not overwrite the tce tables */
77 for_each_node_by_type(node, "pci") {
78 basep = of_get_property(node, "linux,tce-base", NULL);
79 sizep = of_get_property(node, "linux,tce-size", NULL);
80 if (basep == NULL || sizep == NULL)
81 continue;
82
83 low = *basep;
84 high = low + (*sizep);
85
86 for (i = 0; i < image->nr_segments; i++) {
87 begin = image->segment[i].mem;
88 end = begin + image->segment[i].memsz;
89
90 if ((begin < high) && (end > low))
91 return -ETXTBSY;
92 }
93 }
94
95 return 0;
96 }
97
98 #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
99
copy_segments(unsigned long ind)100 static void copy_segments(unsigned long ind)
101 {
102 unsigned long entry;
103 unsigned long *ptr;
104 void *dest;
105 void *addr;
106
107 /*
108 * We rely on kexec_load to create a lists that properly
109 * initializes these pointers before they are used.
110 * We will still crash if the list is wrong, but at least
111 * the compiler will be quiet.
112 */
113 ptr = NULL;
114 dest = NULL;
115
116 for (entry = ind; !(entry & IND_DONE); entry = *ptr++) {
117 addr = __va(entry & PAGE_MASK);
118
119 switch (entry & IND_FLAGS) {
120 case IND_DESTINATION:
121 dest = addr;
122 break;
123 case IND_INDIRECTION:
124 ptr = addr;
125 break;
126 case IND_SOURCE:
127 copy_page(dest, addr);
128 dest += PAGE_SIZE;
129 }
130 }
131 }
132
kexec_copy_flush(struct kimage * image)133 void kexec_copy_flush(struct kimage *image)
134 {
135 long i, nr_segments = image->nr_segments;
136 struct kexec_segment ranges[KEXEC_SEGMENT_MAX];
137
138 /* save the ranges on the stack to efficiently flush the icache */
139 memcpy(ranges, image->segment, sizeof(ranges));
140
141 /*
142 * After this call we may not use anything allocated in dynamic
143 * memory, including *image.
144 *
145 * Only globals and the stack are allowed.
146 */
147 copy_segments(image->head);
148
149 /*
150 * we need to clear the icache for all dest pages sometime,
151 * including ones that were in place on the original copy
152 */
153 for (i = 0; i < nr_segments; i++)
154 flush_icache_range((unsigned long)__va(ranges[i].mem),
155 (unsigned long)__va(ranges[i].mem + ranges[i].memsz));
156 }
157
158 #ifdef CONFIG_SMP
159
160 static int kexec_all_irq_disabled = 0;
161
kexec_smp_down(void * arg)162 static void kexec_smp_down(void *arg)
163 {
164 local_irq_disable();
165 hard_irq_disable();
166
167 mb(); /* make sure our irqs are disabled before we say they are */
168 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
169 while(kexec_all_irq_disabled == 0)
170 cpu_relax();
171 mb(); /* make sure all irqs are disabled before this */
172 hw_breakpoint_disable();
173 /*
174 * Now every CPU has IRQs off, we can clear out any pending
175 * IPIs and be sure that no more will come in after this.
176 */
177 if (ppc_md.kexec_cpu_down)
178 ppc_md.kexec_cpu_down(0, 1);
179
180 kexec_smp_wait();
181 /* NOTREACHED */
182 }
183
kexec_prepare_cpus_wait(int wait_state)184 static void kexec_prepare_cpus_wait(int wait_state)
185 {
186 int my_cpu, i, notified=-1;
187
188 hw_breakpoint_disable();
189 my_cpu = get_cpu();
190 /* Make sure each CPU has at least made it to the state we need.
191 *
192 * FIXME: There is a (slim) chance of a problem if not all of the CPUs
193 * are correctly onlined. If somehow we start a CPU on boot with RTAS
194 * start-cpu, but somehow that CPU doesn't write callin_cpu_map[] in
195 * time, the boot CPU will timeout. If it does eventually execute
196 * stuff, the secondary will start up (paca[].cpu_start was written) and
197 * get into a peculiar state. If the platform supports
198 * smp_ops->take_timebase(), the secondary CPU will probably be spinning
199 * in there. If not (i.e. pseries), the secondary will continue on and
200 * try to online itself/idle/etc. If it survives that, we need to find
201 * these possible-but-not-online-but-should-be CPUs and chaperone them
202 * into kexec_smp_wait().
203 */
204 for_each_online_cpu(i) {
205 if (i == my_cpu)
206 continue;
207
208 while (paca[i].kexec_state < wait_state) {
209 barrier();
210 if (i != notified) {
211 printk(KERN_INFO "kexec: waiting for cpu %d "
212 "(physical %d) to enter %i state\n",
213 i, paca[i].hw_cpu_id, wait_state);
214 notified = i;
215 }
216 }
217 }
218 mb();
219 }
220
221 /*
222 * We need to make sure each present CPU is online. The next kernel will scan
223 * the device tree and assume primary threads are online and query secondary
224 * threads via RTAS to online them if required. If we don't online primary
225 * threads, they will be stuck. However, we also online secondary threads as we
226 * may be using 'cede offline'. In this case RTAS doesn't see the secondary
227 * threads as offline -- and again, these CPUs will be stuck.
228 *
229 * So, we online all CPUs that should be running, including secondary threads.
230 */
wake_offline_cpus(void)231 static void wake_offline_cpus(void)
232 {
233 int cpu = 0;
234
235 for_each_present_cpu(cpu) {
236 if (!cpu_online(cpu)) {
237 printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
238 cpu);
239 cpu_up(cpu);
240 }
241 }
242 }
243
kexec_prepare_cpus(void)244 static void kexec_prepare_cpus(void)
245 {
246 wake_offline_cpus();
247 smp_call_function(kexec_smp_down, NULL, /* wait */0);
248 local_irq_disable();
249 hard_irq_disable();
250
251 mb(); /* make sure IRQs are disabled before we say they are */
252 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
253
254 kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
255 /* we are sure every CPU has IRQs off at this point */
256 kexec_all_irq_disabled = 1;
257
258 /* after we tell the others to go down */
259 if (ppc_md.kexec_cpu_down)
260 ppc_md.kexec_cpu_down(0, 0);
261
262 /*
263 * Before removing MMU mappings make sure all CPUs have entered real
264 * mode:
265 */
266 kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
267
268 put_cpu();
269 }
270
271 #else /* ! SMP */
272
kexec_prepare_cpus(void)273 static void kexec_prepare_cpus(void)
274 {
275 /*
276 * move the secondarys to us so that we can copy
277 * the new kernel 0-0x100 safely
278 *
279 * do this if kexec in setup.c ?
280 *
281 * We need to release the cpus if we are ever going from an
282 * UP to an SMP kernel.
283 */
284 smp_release_cpus();
285 if (ppc_md.kexec_cpu_down)
286 ppc_md.kexec_cpu_down(0, 0);
287 local_irq_disable();
288 hard_irq_disable();
289 }
290
291 #endif /* SMP */
292
293 /*
294 * kexec thread structure and stack.
295 *
296 * We need to make sure that this is 16384-byte aligned due to the
297 * way process stacks are handled. It also must be statically allocated
298 * or allocated as part of the kimage, because everything else may be
299 * overwritten when we copy the kexec image. We piggyback on the
300 * "init_task" linker section here to statically allocate a stack.
301 *
302 * We could use a smaller stack if we don't care about anything using
303 * current, but that audit has not been performed.
304 */
305 static union thread_union kexec_stack __init_task_data =
306 { };
307
308 /*
309 * For similar reasons to the stack above, the kexecing CPU needs to be on a
310 * static PACA; we switch to kexec_paca.
311 */
312 struct paca_struct kexec_paca;
313
314 /* Our assembly helper, in kexec_stub.S */
315 extern void kexec_sequence(void *newstack, unsigned long start,
316 void *image, void *control,
317 void (*clear_all)(void)) __noreturn;
318
319 /* too late to fail here */
default_machine_kexec(struct kimage * image)320 void default_machine_kexec(struct kimage *image)
321 {
322 /* prepare control code if any */
323
324 /*
325 * If the kexec boot is the normal one, need to shutdown other cpus
326 * into our wait loop and quiesce interrupts.
327 * Otherwise, in the case of crashed mode (crashing_cpu >= 0),
328 * stopping other CPUs and collecting their pt_regs is done before
329 * using debugger IPI.
330 */
331
332 if (crashing_cpu == -1)
333 kexec_prepare_cpus();
334
335 pr_debug("kexec: Starting switchover sequence.\n");
336
337 /* switch to a staticly allocated stack. Based on irq stack code.
338 * XXX: the task struct will likely be invalid once we do the copy!
339 */
340 kexec_stack.thread_info.task = current_thread_info()->task;
341 kexec_stack.thread_info.flags = 0;
342
343 /* We need a static PACA, too; copy this CPU's PACA over and switch to
344 * it. Also poison per_cpu_offset to catch anyone using non-static
345 * data.
346 */
347 memcpy(&kexec_paca, get_paca(), sizeof(struct paca_struct));
348 kexec_paca.data_offset = 0xedeaddeadeeeeeeeUL;
349 paca = (struct paca_struct *)RELOC_HIDE(&kexec_paca, 0) -
350 kexec_paca.paca_index;
351 setup_paca(&kexec_paca);
352
353 /* XXX: If anyone does 'dynamic lppacas' this will also need to be
354 * switched to a static version!
355 */
356
357 /* Some things are best done in assembly. Finding globals with
358 * a toc is easier in C, so pass in what we can.
359 */
360 kexec_sequence(&kexec_stack, image->start, image,
361 page_address(image->control_code_page),
362 ppc_md.hpte_clear_all);
363 /* NOTREACHED */
364 }
365
366 /* Values we need to export to the second kernel via the device tree. */
367 static unsigned long htab_base;
368
369 static struct property htab_base_prop = {
370 .name = "linux,htab-base",
371 .length = sizeof(unsigned long),
372 .value = &htab_base,
373 };
374
375 static struct property htab_size_prop = {
376 .name = "linux,htab-size",
377 .length = sizeof(unsigned long),
378 .value = &htab_size_bytes,
379 };
380
export_htab_values(void)381 static int __init export_htab_values(void)
382 {
383 struct device_node *node;
384 struct property *prop;
385
386 /* On machines with no htab htab_address is NULL */
387 if (!htab_address)
388 return -ENODEV;
389
390 node = of_find_node_by_path("/chosen");
391 if (!node)
392 return -ENODEV;
393
394 /* remove any stale propertys so ours can be found */
395 prop = of_find_property(node, htab_base_prop.name, NULL);
396 if (prop)
397 prom_remove_property(node, prop);
398 prop = of_find_property(node, htab_size_prop.name, NULL);
399 if (prop)
400 prom_remove_property(node, prop);
401
402 htab_base = __pa(htab_address);
403 prom_add_property(node, &htab_base_prop);
404 prom_add_property(node, &htab_size_prop);
405
406 of_node_put(node);
407 return 0;
408 }
409 late_initcall(export_htab_values);
410