1 /*
2 * PPC64 code to handle Linux booting another kernel.
3 *
4 * Copyright (C) 2004-2005, IBM Corp.
5 *
6 * Created by: Milton D Miller II
7 *
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
10 */
11
12
13 #include <linux/kexec.h>
14 #include <linux/smp.h>
15 #include <linux/thread_info.h>
16 #include <linux/init_task.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/cpu.h>
20
21 #include <asm/page.h>
22 #include <asm/current.h>
23 #include <asm/machdep.h>
24 #include <asm/cacheflush.h>
25 #include <asm/paca.h>
26 #include <asm/mmu.h>
27 #include <asm/sections.h> /* _end */
28 #include <asm/prom.h>
29 #include <asm/smp.h>
30 #include <asm/hw_breakpoint.h>
31
default_machine_kexec_prepare(struct kimage * image)32 int default_machine_kexec_prepare(struct kimage *image)
33 {
34 int i;
35 unsigned long begin, end; /* limits of segment */
36 unsigned long low, high; /* limits of blocked memory range */
37 struct device_node *node;
38 const unsigned long *basep;
39 const unsigned int *sizep;
40
41 if (!ppc_md.hpte_clear_all)
42 return -ENOENT;
43
44 /*
45 * Since we use the kernel fault handlers and paging code to
46 * handle the virtual mode, we must make sure no destination
47 * overlaps kernel static data or bss.
48 */
49 for (i = 0; i < image->nr_segments; i++)
50 if (image->segment[i].mem < __pa(_end))
51 return -ETXTBSY;
52
53 /*
54 * For non-LPAR, we absolutely can not overwrite the mmu hash
55 * table, since we are still using the bolted entries in it to
56 * do the copy. Check that here.
57 *
58 * It is safe if the end is below the start of the blocked
59 * region (end <= low), or if the beginning is after the
60 * end of the blocked region (begin >= high). Use the
61 * boolean identity !(a || b) === (!a && !b).
62 */
63 if (htab_address) {
64 low = __pa(htab_address);
65 high = low + htab_size_bytes;
66
67 for (i = 0; i < image->nr_segments; i++) {
68 begin = image->segment[i].mem;
69 end = begin + image->segment[i].memsz;
70
71 if ((begin < high) && (end > low))
72 return -ETXTBSY;
73 }
74 }
75
76 /* We also should not overwrite the tce tables */
77 for (node = of_find_node_by_type(NULL, "pci"); node != NULL;
78 node = of_find_node_by_type(node, "pci")) {
79 basep = of_get_property(node, "linux,tce-base", NULL);
80 sizep = of_get_property(node, "linux,tce-size", NULL);
81 if (basep == NULL || sizep == NULL)
82 continue;
83
84 low = *basep;
85 high = low + (*sizep);
86
87 for (i = 0; i < image->nr_segments; i++) {
88 begin = image->segment[i].mem;
89 end = begin + image->segment[i].memsz;
90
91 if ((begin < high) && (end > low))
92 return -ETXTBSY;
93 }
94 }
95
96 return 0;
97 }
98
99 #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
100
copy_segments(unsigned long ind)101 static void copy_segments(unsigned long ind)
102 {
103 unsigned long entry;
104 unsigned long *ptr;
105 void *dest;
106 void *addr;
107
108 /*
109 * We rely on kexec_load to create a lists that properly
110 * initializes these pointers before they are used.
111 * We will still crash if the list is wrong, but at least
112 * the compiler will be quiet.
113 */
114 ptr = NULL;
115 dest = NULL;
116
117 for (entry = ind; !(entry & IND_DONE); entry = *ptr++) {
118 addr = __va(entry & PAGE_MASK);
119
120 switch (entry & IND_FLAGS) {
121 case IND_DESTINATION:
122 dest = addr;
123 break;
124 case IND_INDIRECTION:
125 ptr = addr;
126 break;
127 case IND_SOURCE:
128 copy_page(dest, addr);
129 dest += PAGE_SIZE;
130 }
131 }
132 }
133
kexec_copy_flush(struct kimage * image)134 void kexec_copy_flush(struct kimage *image)
135 {
136 long i, nr_segments = image->nr_segments;
137 struct kexec_segment ranges[KEXEC_SEGMENT_MAX];
138
139 /* save the ranges on the stack to efficiently flush the icache */
140 memcpy(ranges, image->segment, sizeof(ranges));
141
142 /*
143 * After this call we may not use anything allocated in dynamic
144 * memory, including *image.
145 *
146 * Only globals and the stack are allowed.
147 */
148 copy_segments(image->head);
149
150 /*
151 * we need to clear the icache for all dest pages sometime,
152 * including ones that were in place on the original copy
153 */
154 for (i = 0; i < nr_segments; i++)
155 flush_icache_range((unsigned long)__va(ranges[i].mem),
156 (unsigned long)__va(ranges[i].mem + ranges[i].memsz));
157 }
158
159 #ifdef CONFIG_SMP
160
161 static int kexec_all_irq_disabled = 0;
162
kexec_smp_down(void * arg)163 static void kexec_smp_down(void *arg)
164 {
165 local_irq_disable();
166 mb(); /* make sure our irqs are disabled before we say they are */
167 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
168 while(kexec_all_irq_disabled == 0)
169 cpu_relax();
170 mb(); /* make sure all irqs are disabled before this */
171 hw_breakpoint_disable();
172 /*
173 * Now every CPU has IRQs off, we can clear out any pending
174 * IPIs and be sure that no more will come in after this.
175 */
176 if (ppc_md.kexec_cpu_down)
177 ppc_md.kexec_cpu_down(0, 1);
178
179 kexec_smp_wait();
180 /* NOTREACHED */
181 }
182
kexec_prepare_cpus_wait(int wait_state)183 static void kexec_prepare_cpus_wait(int wait_state)
184 {
185 int my_cpu, i, notified=-1;
186
187 hw_breakpoint_disable();
188 my_cpu = get_cpu();
189 /* Make sure each CPU has at least made it to the state we need.
190 *
191 * FIXME: There is a (slim) chance of a problem if not all of the CPUs
192 * are correctly onlined. If somehow we start a CPU on boot with RTAS
193 * start-cpu, but somehow that CPU doesn't write callin_cpu_map[] in
194 * time, the boot CPU will timeout. If it does eventually execute
195 * stuff, the secondary will start up (paca[].cpu_start was written) and
196 * get into a peculiar state. If the platform supports
197 * smp_ops->take_timebase(), the secondary CPU will probably be spinning
198 * in there. If not (i.e. pseries), the secondary will continue on and
199 * try to online itself/idle/etc. If it survives that, we need to find
200 * these possible-but-not-online-but-should-be CPUs and chaperone them
201 * into kexec_smp_wait().
202 */
203 for_each_online_cpu(i) {
204 if (i == my_cpu)
205 continue;
206
207 while (paca[i].kexec_state < wait_state) {
208 barrier();
209 if (i != notified) {
210 printk(KERN_INFO "kexec: waiting for cpu %d "
211 "(physical %d) to enter %i state\n",
212 i, paca[i].hw_cpu_id, wait_state);
213 notified = i;
214 }
215 }
216 }
217 mb();
218 }
219
220 /*
221 * We need to make sure each present CPU is online. The next kernel will scan
222 * the device tree and assume primary threads are online and query secondary
223 * threads via RTAS to online them if required. If we don't online primary
224 * threads, they will be stuck. However, we also online secondary threads as we
225 * may be using 'cede offline'. In this case RTAS doesn't see the secondary
226 * threads as offline -- and again, these CPUs will be stuck.
227 *
228 * So, we online all CPUs that should be running, including secondary threads.
229 */
wake_offline_cpus(void)230 static void wake_offline_cpus(void)
231 {
232 int cpu = 0;
233
234 for_each_present_cpu(cpu) {
235 if (!cpu_online(cpu)) {
236 printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
237 cpu);
238 cpu_up(cpu);
239 }
240 }
241 }
242
kexec_prepare_cpus(void)243 static void kexec_prepare_cpus(void)
244 {
245 wake_offline_cpus();
246 smp_call_function(kexec_smp_down, NULL, /* wait */0);
247 local_irq_disable();
248 mb(); /* make sure IRQs are disabled before we say they are */
249 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
250
251 kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
252 /* we are sure every CPU has IRQs off at this point */
253 kexec_all_irq_disabled = 1;
254
255 /* after we tell the others to go down */
256 if (ppc_md.kexec_cpu_down)
257 ppc_md.kexec_cpu_down(0, 0);
258
259 /*
260 * Before removing MMU mappings make sure all CPUs have entered real
261 * mode:
262 */
263 kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
264
265 put_cpu();
266 }
267
268 #else /* ! SMP */
269
kexec_prepare_cpus(void)270 static void kexec_prepare_cpus(void)
271 {
272 /*
273 * move the secondarys to us so that we can copy
274 * the new kernel 0-0x100 safely
275 *
276 * do this if kexec in setup.c ?
277 *
278 * We need to release the cpus if we are ever going from an
279 * UP to an SMP kernel.
280 */
281 smp_release_cpus();
282 if (ppc_md.kexec_cpu_down)
283 ppc_md.kexec_cpu_down(0, 0);
284 local_irq_disable();
285 }
286
287 #endif /* SMP */
288
289 /*
290 * kexec thread structure and stack.
291 *
292 * We need to make sure that this is 16384-byte aligned due to the
293 * way process stacks are handled. It also must be statically allocated
294 * or allocated as part of the kimage, because everything else may be
295 * overwritten when we copy the kexec image. We piggyback on the
296 * "init_task" linker section here to statically allocate a stack.
297 *
298 * We could use a smaller stack if we don't care about anything using
299 * current, but that audit has not been performed.
300 */
301 static union thread_union kexec_stack __init_task_data =
302 { };
303
304 /*
305 * For similar reasons to the stack above, the kexecing CPU needs to be on a
306 * static PACA; we switch to kexec_paca.
307 */
308 struct paca_struct kexec_paca;
309
310 /* Our assembly helper, in kexec_stub.S */
311 extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
312 void *image, void *control,
313 void (*clear_all)(void)) ATTRIB_NORET;
314
315 /* too late to fail here */
default_machine_kexec(struct kimage * image)316 void default_machine_kexec(struct kimage *image)
317 {
318 /* prepare control code if any */
319
320 /*
321 * If the kexec boot is the normal one, need to shutdown other cpus
322 * into our wait loop and quiesce interrupts.
323 * Otherwise, in the case of crashed mode (crashing_cpu >= 0),
324 * stopping other CPUs and collecting their pt_regs is done before
325 * using debugger IPI.
326 */
327
328 if (crashing_cpu == -1)
329 kexec_prepare_cpus();
330
331 pr_debug("kexec: Starting switchover sequence.\n");
332
333 /* switch to a staticly allocated stack. Based on irq stack code.
334 * XXX: the task struct will likely be invalid once we do the copy!
335 */
336 kexec_stack.thread_info.task = current_thread_info()->task;
337 kexec_stack.thread_info.flags = 0;
338
339 /* We need a static PACA, too; copy this CPU's PACA over and switch to
340 * it. Also poison per_cpu_offset to catch anyone using non-static
341 * data.
342 */
343 memcpy(&kexec_paca, get_paca(), sizeof(struct paca_struct));
344 kexec_paca.data_offset = 0xedeaddeadeeeeeeeUL;
345 paca = (struct paca_struct *)RELOC_HIDE(&kexec_paca, 0) -
346 kexec_paca.paca_index;
347 setup_paca(&kexec_paca);
348
349 /* XXX: If anyone does 'dynamic lppacas' this will also need to be
350 * switched to a static version!
351 */
352
353 /* Some things are best done in assembly. Finding globals with
354 * a toc is easier in C, so pass in what we can.
355 */
356 kexec_sequence(&kexec_stack, image->start, image,
357 page_address(image->control_code_page),
358 ppc_md.hpte_clear_all);
359 /* NOTREACHED */
360 }
361
362 /* Values we need to export to the second kernel via the device tree. */
363 static unsigned long htab_base;
364
365 static struct property htab_base_prop = {
366 .name = "linux,htab-base",
367 .length = sizeof(unsigned long),
368 .value = &htab_base,
369 };
370
371 static struct property htab_size_prop = {
372 .name = "linux,htab-size",
373 .length = sizeof(unsigned long),
374 .value = &htab_size_bytes,
375 };
376
export_htab_values(void)377 static int __init export_htab_values(void)
378 {
379 struct device_node *node;
380 struct property *prop;
381
382 /* On machines with no htab htab_address is NULL */
383 if (!htab_address)
384 return -ENODEV;
385
386 node = of_find_node_by_path("/chosen");
387 if (!node)
388 return -ENODEV;
389
390 /* remove any stale propertys so ours can be found */
391 prop = of_find_property(node, htab_base_prop.name, NULL);
392 if (prop)
393 prom_remove_property(node, prop);
394 prop = of_find_property(node, htab_size_prop.name, NULL);
395 if (prop)
396 prom_remove_property(node, prop);
397
398 htab_base = __pa(htab_address);
399 prom_add_property(node, &htab_base_prop);
400 prom_add_property(node, &htab_size_prop);
401
402 of_node_put(node);
403 return 0;
404 }
405 late_initcall(export_htab_values);
406