1 /*
2 * SMP support for ppc.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
6 *
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8 *
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18 #undef DEBUG
19
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/sysdev.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
34
35 #include <asm/ptrace.h>
36 #include <asm/atomic.h>
37 #include <asm/irq.h>
38 #include <asm/page.h>
39 #include <asm/pgtable.h>
40 #include <asm/prom.h>
41 #include <asm/smp.h>
42 #include <asm/time.h>
43 #include <asm/machdep.h>
44 #include <asm/cputhreads.h>
45 #include <asm/cputable.h>
46 #include <asm/system.h>
47 #include <asm/mpic.h>
48 #include <asm/vdso_datapage.h>
49 #ifdef CONFIG_PPC64
50 #include <asm/paca.h>
51 #endif
52
53 #ifdef DEBUG
54 #include <asm/udbg.h>
55 #define DBG(fmt...) udbg_printf(fmt)
56 #else
57 #define DBG(fmt...)
58 #endif
59
60
61 /* Store all idle threads, this can be reused instead of creating
62 * a new thread. Also avoids complicated thread destroy functionality
63 * for idle threads.
64 */
65 #ifdef CONFIG_HOTPLUG_CPU
66 /*
67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
68 * removed after init for !CONFIG_HOTPLUG_CPU.
69 */
70 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
71 #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72 #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
73 #else
74 static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
75 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
76 #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
77 #endif
78
79 struct thread_info *secondary_ti;
80
81 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
82 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
83
84 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
85 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
86
87 /* SMP operations for this machine */
88 struct smp_ops_t *smp_ops;
89
90 /* Can't be static due to PowerMac hackery */
91 volatile unsigned int cpu_callin_map[NR_CPUS];
92
93 int smt_enabled_at_boot = 1;
94
95 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
96
97 #ifdef CONFIG_PPC64
smp_generic_kick_cpu(int nr)98 void __devinit smp_generic_kick_cpu(int nr)
99 {
100 BUG_ON(nr < 0 || nr >= NR_CPUS);
101
102 /*
103 * The processor is currently spinning, waiting for the
104 * cpu_start field to become non-zero After we set cpu_start,
105 * the processor will continue on to secondary_start
106 */
107 paca[nr].cpu_start = 1;
108 smp_mb();
109 }
110 #endif
111
smp_message_recv(int msg)112 void smp_message_recv(int msg)
113 {
114 switch(msg) {
115 case PPC_MSG_CALL_FUNCTION:
116 generic_smp_call_function_interrupt();
117 break;
118 case PPC_MSG_RESCHEDULE:
119 /* we notice need_resched on exit */
120 break;
121 case PPC_MSG_CALL_FUNC_SINGLE:
122 generic_smp_call_function_single_interrupt();
123 break;
124 case PPC_MSG_DEBUGGER_BREAK:
125 if (crash_ipi_function_ptr) {
126 crash_ipi_function_ptr(get_irq_regs());
127 break;
128 }
129 #ifdef CONFIG_DEBUGGER
130 debugger_ipi(get_irq_regs());
131 break;
132 #endif /* CONFIG_DEBUGGER */
133 /* FALLTHROUGH */
134 default:
135 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
136 smp_processor_id(), msg);
137 break;
138 }
139 }
140
call_function_action(int irq,void * data)141 static irqreturn_t call_function_action(int irq, void *data)
142 {
143 generic_smp_call_function_interrupt();
144 return IRQ_HANDLED;
145 }
146
reschedule_action(int irq,void * data)147 static irqreturn_t reschedule_action(int irq, void *data)
148 {
149 /* we just need the return path side effect of checking need_resched */
150 return IRQ_HANDLED;
151 }
152
call_function_single_action(int irq,void * data)153 static irqreturn_t call_function_single_action(int irq, void *data)
154 {
155 generic_smp_call_function_single_interrupt();
156 return IRQ_HANDLED;
157 }
158
debug_ipi_action(int irq,void * data)159 static irqreturn_t debug_ipi_action(int irq, void *data)
160 {
161 smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
162 return IRQ_HANDLED;
163 }
164
165 static irq_handler_t smp_ipi_action[] = {
166 [PPC_MSG_CALL_FUNCTION] = call_function_action,
167 [PPC_MSG_RESCHEDULE] = reschedule_action,
168 [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
169 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
170 };
171
172 const char *smp_ipi_name[] = {
173 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
174 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
175 [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
176 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
177 };
178
179 /* optional function to request ipi, for controllers with >= 4 ipis */
smp_request_message_ipi(int virq,int msg)180 int smp_request_message_ipi(int virq, int msg)
181 {
182 int err;
183
184 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
185 return -EINVAL;
186 }
187 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
188 if (msg == PPC_MSG_DEBUGGER_BREAK) {
189 return 1;
190 }
191 #endif
192 err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU,
193 smp_ipi_name[msg], 0);
194 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
195 virq, smp_ipi_name[msg], err);
196
197 return err;
198 }
199
smp_send_reschedule(int cpu)200 void smp_send_reschedule(int cpu)
201 {
202 if (likely(smp_ops))
203 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
204 }
205
arch_send_call_function_single_ipi(int cpu)206 void arch_send_call_function_single_ipi(int cpu)
207 {
208 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
209 }
210
arch_send_call_function_ipi_mask(const struct cpumask * mask)211 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
212 {
213 unsigned int cpu;
214
215 for_each_cpu(cpu, mask)
216 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
217 }
218
219 #ifdef CONFIG_DEBUGGER
smp_send_debugger_break(int cpu)220 void smp_send_debugger_break(int cpu)
221 {
222 if (likely(smp_ops))
223 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
224 }
225 #endif
226
227 #ifdef CONFIG_KEXEC
crash_send_ipi(void (* crash_ipi_callback)(struct pt_regs *))228 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
229 {
230 crash_ipi_function_ptr = crash_ipi_callback;
231 if (crash_ipi_callback && smp_ops) {
232 mb();
233 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
234 }
235 }
236 #endif
237
stop_this_cpu(void * dummy)238 static void stop_this_cpu(void *dummy)
239 {
240 /* Remove this CPU */
241 set_cpu_online(smp_processor_id(), false);
242
243 local_irq_disable();
244 while (1)
245 ;
246 }
247
smp_send_stop(void)248 void smp_send_stop(void)
249 {
250 smp_call_function(stop_this_cpu, NULL, 0);
251 }
252
253 struct thread_info *current_set[NR_CPUS];
254
smp_store_cpu_info(int id)255 static void __devinit smp_store_cpu_info(int id)
256 {
257 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
258 }
259
smp_prepare_cpus(unsigned int max_cpus)260 void __init smp_prepare_cpus(unsigned int max_cpus)
261 {
262 unsigned int cpu;
263
264 DBG("smp_prepare_cpus\n");
265
266 /*
267 * setup_cpu may need to be called on the boot cpu. We havent
268 * spun any cpus up but lets be paranoid.
269 */
270 BUG_ON(boot_cpuid != smp_processor_id());
271
272 /* Fixup boot cpu */
273 smp_store_cpu_info(boot_cpuid);
274 cpu_callin_map[boot_cpuid] = 1;
275
276 for_each_possible_cpu(cpu) {
277 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
278 GFP_KERNEL, cpu_to_node(cpu));
279 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
280 GFP_KERNEL, cpu_to_node(cpu));
281 }
282
283 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
284 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
285
286 if (smp_ops)
287 if (smp_ops->probe)
288 max_cpus = smp_ops->probe();
289 else
290 max_cpus = NR_CPUS;
291 else
292 max_cpus = 1;
293 }
294
smp_prepare_boot_cpu(void)295 void __devinit smp_prepare_boot_cpu(void)
296 {
297 BUG_ON(smp_processor_id() != boot_cpuid);
298 #ifdef CONFIG_PPC64
299 paca[boot_cpuid].__current = current;
300 #endif
301 current_set[boot_cpuid] = task_thread_info(current);
302 }
303
304 #ifdef CONFIG_HOTPLUG_CPU
305 /* State of each CPU during hotplug phases */
306 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
307
generic_cpu_disable(void)308 int generic_cpu_disable(void)
309 {
310 unsigned int cpu = smp_processor_id();
311
312 if (cpu == boot_cpuid)
313 return -EBUSY;
314
315 set_cpu_online(cpu, false);
316 #ifdef CONFIG_PPC64
317 vdso_data->processorCount--;
318 #endif
319 migrate_irqs();
320 return 0;
321 }
322
generic_cpu_die(unsigned int cpu)323 void generic_cpu_die(unsigned int cpu)
324 {
325 int i;
326
327 for (i = 0; i < 100; i++) {
328 smp_rmb();
329 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
330 return;
331 msleep(100);
332 }
333 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
334 }
335
generic_mach_cpu_die(void)336 void generic_mach_cpu_die(void)
337 {
338 unsigned int cpu;
339
340 local_irq_disable();
341 idle_task_exit();
342 cpu = smp_processor_id();
343 printk(KERN_DEBUG "CPU%d offline\n", cpu);
344 __get_cpu_var(cpu_state) = CPU_DEAD;
345 smp_wmb();
346 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
347 cpu_relax();
348 }
349
generic_set_cpu_dead(unsigned int cpu)350 void generic_set_cpu_dead(unsigned int cpu)
351 {
352 per_cpu(cpu_state, cpu) = CPU_DEAD;
353 }
354 #endif
355
356 struct create_idle {
357 struct work_struct work;
358 struct task_struct *idle;
359 struct completion done;
360 int cpu;
361 };
362
do_fork_idle(struct work_struct * work)363 static void __cpuinit do_fork_idle(struct work_struct *work)
364 {
365 struct create_idle *c_idle =
366 container_of(work, struct create_idle, work);
367
368 c_idle->idle = fork_idle(c_idle->cpu);
369 complete(&c_idle->done);
370 }
371
create_idle(unsigned int cpu)372 static int __cpuinit create_idle(unsigned int cpu)
373 {
374 struct thread_info *ti;
375 struct create_idle c_idle = {
376 .cpu = cpu,
377 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
378 };
379 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
380
381 c_idle.idle = get_idle_for_cpu(cpu);
382
383 /* We can't use kernel_thread since we must avoid to
384 * reschedule the child. We use a workqueue because
385 * we want to fork from a kernel thread, not whatever
386 * userspace process happens to be trying to online us.
387 */
388 if (!c_idle.idle) {
389 schedule_work(&c_idle.work);
390 wait_for_completion(&c_idle.done);
391 } else
392 init_idle(c_idle.idle, cpu);
393 if (IS_ERR(c_idle.idle)) {
394 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
395 return PTR_ERR(c_idle.idle);
396 }
397 ti = task_thread_info(c_idle.idle);
398
399 #ifdef CONFIG_PPC64
400 paca[cpu].__current = c_idle.idle;
401 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
402 #endif
403 ti->cpu = cpu;
404 current_set[cpu] = ti;
405
406 return 0;
407 }
408
__cpu_up(unsigned int cpu)409 int __cpuinit __cpu_up(unsigned int cpu)
410 {
411 int rc, c;
412
413 secondary_ti = current_set[cpu];
414
415 if (smp_ops == NULL ||
416 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
417 return -EINVAL;
418
419 /* Make sure we have an idle thread */
420 rc = create_idle(cpu);
421 if (rc)
422 return rc;
423
424 /* Make sure callin-map entry is 0 (can be leftover a CPU
425 * hotplug
426 */
427 cpu_callin_map[cpu] = 0;
428
429 /* The information for processor bringup must
430 * be written out to main store before we release
431 * the processor.
432 */
433 smp_mb();
434
435 /* wake up cpus */
436 DBG("smp: kicking cpu %d\n", cpu);
437 smp_ops->kick_cpu(cpu);
438
439 /*
440 * wait to see if the cpu made a callin (is actually up).
441 * use this value that I found through experimentation.
442 * -- Cort
443 */
444 if (system_state < SYSTEM_RUNNING)
445 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
446 udelay(100);
447 #ifdef CONFIG_HOTPLUG_CPU
448 else
449 /*
450 * CPUs can take much longer to come up in the
451 * hotplug case. Wait five seconds.
452 */
453 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
454 msleep(1);
455 #endif
456
457 if (!cpu_callin_map[cpu]) {
458 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
459 return -ENOENT;
460 }
461
462 DBG("Processor %u found.\n", cpu);
463
464 if (smp_ops->give_timebase)
465 smp_ops->give_timebase();
466
467 /* Wait until cpu puts itself in the online map */
468 while (!cpu_online(cpu))
469 cpu_relax();
470
471 return 0;
472 }
473
474 /* Return the value of the reg property corresponding to the given
475 * logical cpu.
476 */
cpu_to_core_id(int cpu)477 int cpu_to_core_id(int cpu)
478 {
479 struct device_node *np;
480 const int *reg;
481 int id = -1;
482
483 np = of_get_cpu_node(cpu, NULL);
484 if (!np)
485 goto out;
486
487 reg = of_get_property(np, "reg", NULL);
488 if (!reg)
489 goto out;
490
491 id = *reg;
492 out:
493 of_node_put(np);
494 return id;
495 }
496
497 /* Helper routines for cpu to core mapping */
cpu_core_index_of_thread(int cpu)498 int cpu_core_index_of_thread(int cpu)
499 {
500 return cpu >> threads_shift;
501 }
502 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
503
cpu_first_thread_of_core(int core)504 int cpu_first_thread_of_core(int core)
505 {
506 return core << threads_shift;
507 }
508 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
509
510 /* Must be called when no change can occur to cpu_present_map,
511 * i.e. during cpu online or offline.
512 */
cpu_to_l2cache(int cpu)513 static struct device_node *cpu_to_l2cache(int cpu)
514 {
515 struct device_node *np;
516 struct device_node *cache;
517
518 if (!cpu_present(cpu))
519 return NULL;
520
521 np = of_get_cpu_node(cpu, NULL);
522 if (np == NULL)
523 return NULL;
524
525 cache = of_find_next_cache_node(np);
526
527 of_node_put(np);
528
529 return cache;
530 }
531
532 /* Activate a secondary processor. */
start_secondary(void * unused)533 void __devinit start_secondary(void *unused)
534 {
535 unsigned int cpu = smp_processor_id();
536 struct device_node *l2_cache;
537 int i, base;
538
539 atomic_inc(&init_mm.mm_count);
540 current->active_mm = &init_mm;
541
542 smp_store_cpu_info(cpu);
543 set_dec(tb_ticks_per_jiffy);
544 preempt_disable();
545 cpu_callin_map[cpu] = 1;
546
547 if (smp_ops->setup_cpu)
548 smp_ops->setup_cpu(cpu);
549 if (smp_ops->take_timebase)
550 smp_ops->take_timebase();
551
552 secondary_cpu_time_init();
553
554 #ifdef CONFIG_PPC64
555 if (system_state == SYSTEM_RUNNING)
556 vdso_data->processorCount++;
557 #endif
558 ipi_call_lock();
559 notify_cpu_starting(cpu);
560 set_cpu_online(cpu, true);
561 /* Update sibling maps */
562 base = cpu_first_thread_sibling(cpu);
563 for (i = 0; i < threads_per_core; i++) {
564 if (cpu_is_offline(base + i))
565 continue;
566 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
567 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
568
569 /* cpu_core_map should be a superset of
570 * cpu_sibling_map even if we don't have cache
571 * information, so update the former here, too.
572 */
573 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
574 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
575 }
576 l2_cache = cpu_to_l2cache(cpu);
577 for_each_online_cpu(i) {
578 struct device_node *np = cpu_to_l2cache(i);
579 if (!np)
580 continue;
581 if (np == l2_cache) {
582 cpumask_set_cpu(cpu, cpu_core_mask(i));
583 cpumask_set_cpu(i, cpu_core_mask(cpu));
584 }
585 of_node_put(np);
586 }
587 of_node_put(l2_cache);
588 ipi_call_unlock();
589
590 local_irq_enable();
591
592 cpu_idle();
593
594 BUG();
595 }
596
setup_profiling_timer(unsigned int multiplier)597 int setup_profiling_timer(unsigned int multiplier)
598 {
599 return 0;
600 }
601
smp_cpus_done(unsigned int max_cpus)602 void __init smp_cpus_done(unsigned int max_cpus)
603 {
604 cpumask_var_t old_mask;
605
606 /* We want the setup_cpu() here to be called from CPU 0, but our
607 * init thread may have been "borrowed" by another CPU in the meantime
608 * se we pin us down to CPU 0 for a short while
609 */
610 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
611 cpumask_copy(old_mask, ¤t->cpus_allowed);
612 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
613
614 if (smp_ops && smp_ops->setup_cpu)
615 smp_ops->setup_cpu(boot_cpuid);
616
617 set_cpus_allowed_ptr(current, old_mask);
618
619 free_cpumask_var(old_mask);
620
621 if (smp_ops && smp_ops->bringup_done)
622 smp_ops->bringup_done();
623
624 dump_numa_cpu_topology();
625
626 }
627
arch_sd_sibling_asym_packing(void)628 int arch_sd_sibling_asym_packing(void)
629 {
630 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
631 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
632 return SD_ASYM_PACKING;
633 }
634 return 0;
635 }
636
637 #ifdef CONFIG_HOTPLUG_CPU
__cpu_disable(void)638 int __cpu_disable(void)
639 {
640 struct device_node *l2_cache;
641 int cpu = smp_processor_id();
642 int base, i;
643 int err;
644
645 if (!smp_ops->cpu_disable)
646 return -ENOSYS;
647
648 err = smp_ops->cpu_disable();
649 if (err)
650 return err;
651
652 /* Update sibling maps */
653 base = cpu_first_thread_sibling(cpu);
654 for (i = 0; i < threads_per_core; i++) {
655 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
656 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
657 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
658 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
659 }
660
661 l2_cache = cpu_to_l2cache(cpu);
662 for_each_present_cpu(i) {
663 struct device_node *np = cpu_to_l2cache(i);
664 if (!np)
665 continue;
666 if (np == l2_cache) {
667 cpumask_clear_cpu(cpu, cpu_core_mask(i));
668 cpumask_clear_cpu(i, cpu_core_mask(cpu));
669 }
670 of_node_put(np);
671 }
672 of_node_put(l2_cache);
673
674
675 return 0;
676 }
677
__cpu_die(unsigned int cpu)678 void __cpu_die(unsigned int cpu)
679 {
680 if (smp_ops->cpu_die)
681 smp_ops->cpu_die(cpu);
682 }
683
684 static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
685
cpu_hotplug_driver_lock()686 void cpu_hotplug_driver_lock()
687 {
688 mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
689 }
690
cpu_hotplug_driver_unlock()691 void cpu_hotplug_driver_unlock()
692 {
693 mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
694 }
695
cpu_die(void)696 void cpu_die(void)
697 {
698 if (ppc_md.cpu_die)
699 ppc_md.cpu_die();
700
701 /* If we return, we re-enter start_secondary */
702 start_secondary_resume();
703 }
704
705 #endif
706