1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * SMP support for ppc.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6 * deal of code from the sparc and intel versions.
7 *
8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 *
10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 */
13
14 #undef DEBUG
15
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/sched/topology.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/spinlock.h>
26 #include <linux/cache.h>
27 #include <linux/err.h>
28 #include <linux/device.h>
29 #include <linux/cpu.h>
30 #include <linux/notifier.h>
31 #include <linux/topology.h>
32 #include <linux/profile.h>
33 #include <linux/processor.h>
34 #include <linux/random.h>
35 #include <linux/stackprotector.h>
36 #include <linux/pgtable.h>
37 #include <linux/clockchips.h>
38
39 #include <asm/ptrace.h>
40 #include <linux/atomic.h>
41 #include <asm/irq.h>
42 #include <asm/hw_irq.h>
43 #include <asm/kvm_ppc.h>
44 #include <asm/dbell.h>
45 #include <asm/page.h>
46 #include <asm/smp.h>
47 #include <asm/time.h>
48 #include <asm/machdep.h>
49 #include <asm/cputhreads.h>
50 #include <asm/cputable.h>
51 #include <asm/mpic.h>
52 #include <asm/vdso_datapage.h>
53 #ifdef CONFIG_PPC64
54 #include <asm/paca.h>
55 #endif
56 #include <asm/vdso.h>
57 #include <asm/debug.h>
58 #include <asm/kexec.h>
59 #include <asm/cpu_has_feature.h>
60 #include <asm/ftrace.h>
61 #include <asm/kup.h>
62 #include <asm/fadump.h>
63
64 #ifdef DEBUG
65 #include <asm/udbg.h>
66 #define DBG(fmt...) udbg_printf(fmt)
67 #else
68 #define DBG(fmt...)
69 #endif
70
71 #ifdef CONFIG_HOTPLUG_CPU
72 /* State of each CPU during hotplug phases */
73 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
74 #endif
75
76 struct task_struct *secondary_current;
77 bool has_big_cores;
78 bool coregroup_enabled;
79 bool thread_group_shares_l2;
80 bool thread_group_shares_l3;
81
82 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
83 DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
84 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
85 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
86 static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
87
88 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
89 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
90 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
91 EXPORT_SYMBOL_GPL(has_big_cores);
92
93 enum {
94 #ifdef CONFIG_SCHED_SMT
95 smt_idx,
96 #endif
97 cache_idx,
98 mc_idx,
99 die_idx,
100 };
101
102 #define MAX_THREAD_LIST_SIZE 8
103 #define THREAD_GROUP_SHARE_L1 1
104 #define THREAD_GROUP_SHARE_L2_L3 2
105 struct thread_groups {
106 unsigned int property;
107 unsigned int nr_groups;
108 unsigned int threads_per_group;
109 unsigned int thread_list[MAX_THREAD_LIST_SIZE];
110 };
111
112 /* Maximum number of properties that groups of threads within a core can share */
113 #define MAX_THREAD_GROUP_PROPERTIES 2
114
115 struct thread_groups_list {
116 unsigned int nr_properties;
117 struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES];
118 };
119
120 static struct thread_groups_list tgl[NR_CPUS] __initdata;
121 /*
122 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
123 * the set its siblings that share the L1-cache.
124 */
125 DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
126
127 /*
128 * On some big-cores system, thread_group_l2_cache_map for each CPU
129 * corresponds to the set its siblings within the core that share the
130 * L2-cache.
131 */
132 DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
133
134 /*
135 * On P10, thread_group_l3_cache_map for each CPU is equal to the
136 * thread_group_l2_cache_map
137 */
138 DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
139
140 /* SMP operations for this machine */
141 struct smp_ops_t *smp_ops;
142
143 /* Can't be static due to PowerMac hackery */
144 volatile unsigned int cpu_callin_map[NR_CPUS];
145
146 int smt_enabled_at_boot = 1;
147
148 /*
149 * Returns 1 if the specified cpu should be brought up during boot.
150 * Used to inhibit booting threads if they've been disabled or
151 * limited on the command line
152 */
smp_generic_cpu_bootable(unsigned int nr)153 int smp_generic_cpu_bootable(unsigned int nr)
154 {
155 /* Special case - we inhibit secondary thread startup
156 * during boot if the user requests it.
157 */
158 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
159 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
160 return 0;
161 if (smt_enabled_at_boot
162 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
163 return 0;
164 }
165
166 return 1;
167 }
168
169
170 #ifdef CONFIG_PPC64
smp_generic_kick_cpu(int nr)171 int smp_generic_kick_cpu(int nr)
172 {
173 if (nr < 0 || nr >= nr_cpu_ids)
174 return -EINVAL;
175
176 /*
177 * The processor is currently spinning, waiting for the
178 * cpu_start field to become non-zero After we set cpu_start,
179 * the processor will continue on to secondary_start
180 */
181 if (!paca_ptrs[nr]->cpu_start) {
182 paca_ptrs[nr]->cpu_start = 1;
183 smp_mb();
184 return 0;
185 }
186
187 #ifdef CONFIG_HOTPLUG_CPU
188 /*
189 * Ok it's not there, so it might be soft-unplugged, let's
190 * try to bring it back
191 */
192 generic_set_cpu_up(nr);
193 smp_wmb();
194 smp_send_reschedule(nr);
195 #endif /* CONFIG_HOTPLUG_CPU */
196
197 return 0;
198 }
199 #endif /* CONFIG_PPC64 */
200
call_function_action(int irq,void * data)201 static irqreturn_t call_function_action(int irq, void *data)
202 {
203 generic_smp_call_function_interrupt();
204 return IRQ_HANDLED;
205 }
206
reschedule_action(int irq,void * data)207 static irqreturn_t reschedule_action(int irq, void *data)
208 {
209 scheduler_ipi();
210 return IRQ_HANDLED;
211 }
212
213 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
tick_broadcast_ipi_action(int irq,void * data)214 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
215 {
216 timer_broadcast_interrupt();
217 return IRQ_HANDLED;
218 }
219 #endif
220
221 #ifdef CONFIG_NMI_IPI
nmi_ipi_action(int irq,void * data)222 static irqreturn_t nmi_ipi_action(int irq, void *data)
223 {
224 smp_handle_nmi_ipi(get_irq_regs());
225 return IRQ_HANDLED;
226 }
227 #endif
228
229 static irq_handler_t smp_ipi_action[] = {
230 [PPC_MSG_CALL_FUNCTION] = call_function_action,
231 [PPC_MSG_RESCHEDULE] = reschedule_action,
232 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
233 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
234 #endif
235 #ifdef CONFIG_NMI_IPI
236 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
237 #endif
238 };
239
240 /*
241 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
242 * than going through the call function infrastructure, and strongly
243 * serialized, so it is more appropriate for debugging.
244 */
245 const char *smp_ipi_name[] = {
246 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
247 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
248 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
249 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
250 #endif
251 #ifdef CONFIG_NMI_IPI
252 [PPC_MSG_NMI_IPI] = "nmi ipi",
253 #endif
254 };
255
256 /* optional function to request ipi, for controllers with >= 4 ipis */
smp_request_message_ipi(int virq,int msg)257 int smp_request_message_ipi(int virq, int msg)
258 {
259 int err;
260
261 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
262 return -EINVAL;
263 #ifndef CONFIG_NMI_IPI
264 if (msg == PPC_MSG_NMI_IPI)
265 return 1;
266 #endif
267
268 err = request_irq(virq, smp_ipi_action[msg],
269 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
270 smp_ipi_name[msg], NULL);
271 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
272 virq, smp_ipi_name[msg], err);
273
274 return err;
275 }
276
277 #ifdef CONFIG_PPC_SMP_MUXED_IPI
278 struct cpu_messages {
279 long messages; /* current messages */
280 };
281 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
282
smp_muxed_ipi_set_message(int cpu,int msg)283 void smp_muxed_ipi_set_message(int cpu, int msg)
284 {
285 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
286 char *message = (char *)&info->messages;
287
288 /*
289 * Order previous accesses before accesses in the IPI handler.
290 */
291 smp_mb();
292 message[msg] = 1;
293 }
294
smp_muxed_ipi_message_pass(int cpu,int msg)295 void smp_muxed_ipi_message_pass(int cpu, int msg)
296 {
297 smp_muxed_ipi_set_message(cpu, msg);
298
299 /*
300 * cause_ipi functions are required to include a full barrier
301 * before doing whatever causes the IPI.
302 */
303 smp_ops->cause_ipi(cpu);
304 }
305
306 #ifdef __BIG_ENDIAN__
307 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
308 #else
309 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
310 #endif
311
smp_ipi_demux(void)312 irqreturn_t smp_ipi_demux(void)
313 {
314 mb(); /* order any irq clear */
315
316 return smp_ipi_demux_relaxed();
317 }
318
319 /* sync-free variant. Callers should ensure synchronization */
smp_ipi_demux_relaxed(void)320 irqreturn_t smp_ipi_demux_relaxed(void)
321 {
322 struct cpu_messages *info;
323 unsigned long all;
324
325 info = this_cpu_ptr(&ipi_message);
326 do {
327 all = xchg(&info->messages, 0);
328 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
329 /*
330 * Must check for PPC_MSG_RM_HOST_ACTION messages
331 * before PPC_MSG_CALL_FUNCTION messages because when
332 * a VM is destroyed, we call kick_all_cpus_sync()
333 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
334 * messages have completed before we free any VCPUs.
335 */
336 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
337 kvmppc_xics_ipi_action();
338 #endif
339 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
340 generic_smp_call_function_interrupt();
341 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
342 scheduler_ipi();
343 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
344 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
345 timer_broadcast_interrupt();
346 #endif
347 #ifdef CONFIG_NMI_IPI
348 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
349 nmi_ipi_action(0, NULL);
350 #endif
351 } while (info->messages);
352
353 return IRQ_HANDLED;
354 }
355 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
356
do_message_pass(int cpu,int msg)357 static inline void do_message_pass(int cpu, int msg)
358 {
359 if (smp_ops->message_pass)
360 smp_ops->message_pass(cpu, msg);
361 #ifdef CONFIG_PPC_SMP_MUXED_IPI
362 else
363 smp_muxed_ipi_message_pass(cpu, msg);
364 #endif
365 }
366
smp_send_reschedule(int cpu)367 void smp_send_reschedule(int cpu)
368 {
369 if (likely(smp_ops))
370 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
371 }
372 EXPORT_SYMBOL_GPL(smp_send_reschedule);
373
arch_send_call_function_single_ipi(int cpu)374 void arch_send_call_function_single_ipi(int cpu)
375 {
376 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
377 }
378
arch_send_call_function_ipi_mask(const struct cpumask * mask)379 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
380 {
381 unsigned int cpu;
382
383 for_each_cpu(cpu, mask)
384 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
385 }
386
387 #ifdef CONFIG_NMI_IPI
388
389 /*
390 * "NMI IPI" system.
391 *
392 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
393 * a running system. They can be used for crash, debug, halt/reboot, etc.
394 *
395 * The IPI call waits with interrupts disabled until all targets enter the
396 * NMI handler, then returns. Subsequent IPIs can be issued before targets
397 * have returned from their handlers, so there is no guarantee about
398 * concurrency or re-entrancy.
399 *
400 * A new NMI can be issued before all targets exit the handler.
401 *
402 * The IPI call may time out without all targets entering the NMI handler.
403 * In that case, there is some logic to recover (and ignore subsequent
404 * NMI interrupts that may eventually be raised), but the platform interrupt
405 * handler may not be able to distinguish this from other exception causes,
406 * which may cause a crash.
407 */
408
409 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
410 static struct cpumask nmi_ipi_pending_mask;
411 static bool nmi_ipi_busy = false;
412 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
413
nmi_ipi_lock_start(unsigned long * flags)414 noinstr static void nmi_ipi_lock_start(unsigned long *flags)
415 {
416 raw_local_irq_save(*flags);
417 hard_irq_disable();
418 while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
419 raw_local_irq_restore(*flags);
420 spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
421 raw_local_irq_save(*flags);
422 hard_irq_disable();
423 }
424 }
425
nmi_ipi_lock(void)426 noinstr static void nmi_ipi_lock(void)
427 {
428 while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
429 spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
430 }
431
nmi_ipi_unlock(void)432 noinstr static void nmi_ipi_unlock(void)
433 {
434 smp_mb();
435 WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1);
436 arch_atomic_set(&__nmi_ipi_lock, 0);
437 }
438
nmi_ipi_unlock_end(unsigned long * flags)439 noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
440 {
441 nmi_ipi_unlock();
442 raw_local_irq_restore(*flags);
443 }
444
445 /*
446 * Platform NMI handler calls this to ack
447 */
smp_handle_nmi_ipi(struct pt_regs * regs)448 noinstr int smp_handle_nmi_ipi(struct pt_regs *regs)
449 {
450 void (*fn)(struct pt_regs *) = NULL;
451 unsigned long flags;
452 int me = raw_smp_processor_id();
453 int ret = 0;
454
455 /*
456 * Unexpected NMIs are possible here because the interrupt may not
457 * be able to distinguish NMI IPIs from other types of NMIs, or
458 * because the caller may have timed out.
459 */
460 nmi_ipi_lock_start(&flags);
461 if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
462 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
463 fn = READ_ONCE(nmi_ipi_function);
464 WARN_ON_ONCE(!fn);
465 ret = 1;
466 }
467 nmi_ipi_unlock_end(&flags);
468
469 if (fn)
470 fn(regs);
471
472 return ret;
473 }
474
do_smp_send_nmi_ipi(int cpu,bool safe)475 static void do_smp_send_nmi_ipi(int cpu, bool safe)
476 {
477 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
478 return;
479
480 if (cpu >= 0) {
481 do_message_pass(cpu, PPC_MSG_NMI_IPI);
482 } else {
483 int c;
484
485 for_each_online_cpu(c) {
486 if (c == raw_smp_processor_id())
487 continue;
488 do_message_pass(c, PPC_MSG_NMI_IPI);
489 }
490 }
491 }
492
493 /*
494 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
495 * - fn is the target callback function.
496 * - delay_us > 0 is the delay before giving up waiting for targets to
497 * begin executing the handler, == 0 specifies indefinite delay.
498 */
__smp_send_nmi_ipi(int cpu,void (* fn)(struct pt_regs *),u64 delay_us,bool safe)499 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
500 u64 delay_us, bool safe)
501 {
502 unsigned long flags;
503 int me = raw_smp_processor_id();
504 int ret = 1;
505
506 BUG_ON(cpu == me);
507 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
508
509 if (unlikely(!smp_ops))
510 return 0;
511
512 nmi_ipi_lock_start(&flags);
513 while (nmi_ipi_busy) {
514 nmi_ipi_unlock_end(&flags);
515 spin_until_cond(!nmi_ipi_busy);
516 nmi_ipi_lock_start(&flags);
517 }
518 nmi_ipi_busy = true;
519 nmi_ipi_function = fn;
520
521 WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
522
523 if (cpu < 0) {
524 /* ALL_OTHERS */
525 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
526 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
527 } else {
528 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
529 }
530
531 nmi_ipi_unlock();
532
533 /* Interrupts remain hard disabled */
534
535 do_smp_send_nmi_ipi(cpu, safe);
536
537 nmi_ipi_lock();
538 /* nmi_ipi_busy is set here, so unlock/lock is okay */
539 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
540 nmi_ipi_unlock();
541 udelay(1);
542 nmi_ipi_lock();
543 if (delay_us) {
544 delay_us--;
545 if (!delay_us)
546 break;
547 }
548 }
549
550 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
551 /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
552 ret = 0;
553 cpumask_clear(&nmi_ipi_pending_mask);
554 }
555
556 nmi_ipi_function = NULL;
557 nmi_ipi_busy = false;
558
559 nmi_ipi_unlock_end(&flags);
560
561 return ret;
562 }
563
smp_send_nmi_ipi(int cpu,void (* fn)(struct pt_regs *),u64 delay_us)564 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
565 {
566 return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
567 }
568
smp_send_safe_nmi_ipi(int cpu,void (* fn)(struct pt_regs *),u64 delay_us)569 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
570 {
571 return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
572 }
573 #endif /* CONFIG_NMI_IPI */
574
575 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
tick_broadcast(const struct cpumask * mask)576 void tick_broadcast(const struct cpumask *mask)
577 {
578 unsigned int cpu;
579
580 for_each_cpu(cpu, mask)
581 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
582 }
583 #endif
584
585 #ifdef CONFIG_DEBUGGER
debugger_ipi_callback(struct pt_regs * regs)586 static void debugger_ipi_callback(struct pt_regs *regs)
587 {
588 debugger_ipi(regs);
589 }
590
smp_send_debugger_break(void)591 void smp_send_debugger_break(void)
592 {
593 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
594 }
595 #endif
596
597 #ifdef CONFIG_KEXEC_CORE
crash_send_ipi(void (* crash_ipi_callback)(struct pt_regs *))598 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
599 {
600 int cpu;
601
602 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
603 if (kdump_in_progress() && crash_wake_offline) {
604 for_each_present_cpu(cpu) {
605 if (cpu_online(cpu))
606 continue;
607 /*
608 * crash_ipi_callback will wait for
609 * all cpus, including offline CPUs.
610 * We don't care about nmi_ipi_function.
611 * Offline cpus will jump straight into
612 * crash_ipi_callback, we can skip the
613 * entire NMI dance and waiting for
614 * cpus to clear pending mask, etc.
615 */
616 do_smp_send_nmi_ipi(cpu, false);
617 }
618 }
619 }
620 #endif
621
622 #ifdef CONFIG_NMI_IPI
crash_stop_this_cpu(struct pt_regs * regs)623 static void crash_stop_this_cpu(struct pt_regs *regs)
624 #else
625 static void crash_stop_this_cpu(void *dummy)
626 #endif
627 {
628 /*
629 * Just busy wait here and avoid marking CPU as offline to ensure
630 * register data is captured appropriately.
631 */
632 while (1)
633 cpu_relax();
634 }
635
crash_smp_send_stop(void)636 void crash_smp_send_stop(void)
637 {
638 static bool stopped = false;
639
640 /*
641 * In case of fadump, register data for all CPUs is captured by f/w
642 * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
643 * this rtas call to avoid tricky post processing of those CPUs'
644 * backtraces.
645 */
646 if (should_fadump_crash())
647 return;
648
649 if (stopped)
650 return;
651
652 stopped = true;
653
654 #ifdef CONFIG_NMI_IPI
655 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000);
656 #else
657 smp_call_function(crash_stop_this_cpu, NULL, 0);
658 #endif /* CONFIG_NMI_IPI */
659 }
660
661 #ifdef CONFIG_NMI_IPI
nmi_stop_this_cpu(struct pt_regs * regs)662 static void nmi_stop_this_cpu(struct pt_regs *regs)
663 {
664 /*
665 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
666 */
667 set_cpu_online(smp_processor_id(), false);
668
669 spin_begin();
670 while (1)
671 spin_cpu_relax();
672 }
673
smp_send_stop(void)674 void smp_send_stop(void)
675 {
676 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
677 }
678
679 #else /* CONFIG_NMI_IPI */
680
stop_this_cpu(void * dummy)681 static void stop_this_cpu(void *dummy)
682 {
683 hard_irq_disable();
684
685 /*
686 * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
687 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
688 * to know other CPUs are offline before it breaks locks to flush
689 * printk buffers, in case we panic()ed while holding the lock.
690 */
691 set_cpu_online(smp_processor_id(), false);
692
693 spin_begin();
694 while (1)
695 spin_cpu_relax();
696 }
697
smp_send_stop(void)698 void smp_send_stop(void)
699 {
700 static bool stopped = false;
701
702 /*
703 * Prevent waiting on csd lock from a previous smp_send_stop.
704 * This is racy, but in general callers try to do the right
705 * thing and only fire off one smp_send_stop (e.g., see
706 * kernel/panic.c)
707 */
708 if (stopped)
709 return;
710
711 stopped = true;
712
713 smp_call_function(stop_this_cpu, NULL, 0);
714 }
715 #endif /* CONFIG_NMI_IPI */
716
717 static struct task_struct *current_set[NR_CPUS];
718
smp_store_cpu_info(int id)719 static void smp_store_cpu_info(int id)
720 {
721 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
722 #ifdef CONFIG_PPC_FSL_BOOK3E
723 per_cpu(next_tlbcam_idx, id)
724 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
725 #endif
726 }
727
728 /*
729 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
730 * rather than just passing around the cpumask we pass around a function that
731 * returns the that cpumask for the given CPU.
732 */
set_cpus_related(int i,int j,struct cpumask * (* get_cpumask)(int))733 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
734 {
735 cpumask_set_cpu(i, get_cpumask(j));
736 cpumask_set_cpu(j, get_cpumask(i));
737 }
738
739 #ifdef CONFIG_HOTPLUG_CPU
set_cpus_unrelated(int i,int j,struct cpumask * (* get_cpumask)(int))740 static void set_cpus_unrelated(int i, int j,
741 struct cpumask *(*get_cpumask)(int))
742 {
743 cpumask_clear_cpu(i, get_cpumask(j));
744 cpumask_clear_cpu(j, get_cpumask(i));
745 }
746 #endif
747
748 /*
749 * Extends set_cpus_related. Instead of setting one CPU at a time in
750 * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
751 */
or_cpumasks_related(int i,int j,struct cpumask * (* srcmask)(int),struct cpumask * (* dstmask)(int))752 static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
753 struct cpumask *(*dstmask)(int))
754 {
755 struct cpumask *mask;
756 int k;
757
758 mask = srcmask(j);
759 for_each_cpu(k, srcmask(i))
760 cpumask_or(dstmask(k), dstmask(k), mask);
761
762 if (i == j)
763 return;
764
765 mask = srcmask(i);
766 for_each_cpu(k, srcmask(j))
767 cpumask_or(dstmask(k), dstmask(k), mask);
768 }
769
770 /*
771 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
772 * property for the CPU device node @dn and stores
773 * the parsed output in the thread_groups_list
774 * structure @tglp.
775 *
776 * @dn: The device node of the CPU device.
777 * @tglp: Pointer to a thread group list structure into which the parsed
778 * output of "ibm,thread-groups" is stored.
779 *
780 * ibm,thread-groups[0..N-1] array defines which group of threads in
781 * the CPU-device node can be grouped together based on the property.
782 *
783 * This array can represent thread groupings for multiple properties.
784 *
785 * ibm,thread-groups[i + 0] tells us the property based on which the
786 * threads are being grouped together. If this value is 1, it implies
787 * that the threads in the same group share L1, translation cache. If
788 * the value is 2, it implies that the threads in the same group share
789 * the same L2 cache.
790 *
791 * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
792 * property ibm,thread-groups[i]
793 *
794 * ibm,thread-groups[i+2] tells us the number of threads in each such
795 * group.
796 * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
797 *
798 * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
799 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
800 * the grouping.
801 *
802 * Example:
803 * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
804 * This can be decomposed up into two consecutive arrays:
805 * a) [1,2,4,8,10,12,14,9,11,13,15]
806 * b) [2,2,4,8,10,12,14,9,11,13,15]
807 *
808 * where in,
809 *
810 * a) provides information of Property "1" being shared by "2" groups,
811 * each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
812 * the first group is {8,10,12,14} and the
813 * "ibm,ppc-interrupt-server#s" of the second group is
814 * {9,11,13,15}. Property "1" is indicative of the thread in the
815 * group sharing L1 cache, translation cache and Instruction Data
816 * flow.
817 *
818 * b) provides information of Property "2" being shared by "2" groups,
819 * each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
820 * the first group is {8,10,12,14} and the
821 * "ibm,ppc-interrupt-server#s" of the second group is
822 * {9,11,13,15}. Property "2" indicates that the threads in each
823 * group share the L2-cache.
824 *
825 * Returns 0 on success, -EINVAL if the property does not exist,
826 * -ENODATA if property does not have a value, and -EOVERFLOW if the
827 * property data isn't large enough.
828 */
parse_thread_groups(struct device_node * dn,struct thread_groups_list * tglp)829 static int parse_thread_groups(struct device_node *dn,
830 struct thread_groups_list *tglp)
831 {
832 unsigned int property_idx = 0;
833 u32 *thread_group_array;
834 size_t total_threads;
835 int ret = 0, count;
836 u32 *thread_list;
837 int i = 0;
838
839 count = of_property_count_u32_elems(dn, "ibm,thread-groups");
840 thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL);
841 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
842 thread_group_array, count);
843 if (ret)
844 goto out_free;
845
846 while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) {
847 int j;
848 struct thread_groups *tg = &tglp->property_tgs[property_idx++];
849
850 tg->property = thread_group_array[i];
851 tg->nr_groups = thread_group_array[i + 1];
852 tg->threads_per_group = thread_group_array[i + 2];
853 total_threads = tg->nr_groups * tg->threads_per_group;
854
855 thread_list = &thread_group_array[i + 3];
856
857 for (j = 0; j < total_threads; j++)
858 tg->thread_list[j] = thread_list[j];
859 i = i + 3 + total_threads;
860 }
861
862 tglp->nr_properties = property_idx;
863
864 out_free:
865 kfree(thread_group_array);
866 return ret;
867 }
868
869 /*
870 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
871 * that @cpu belongs to.
872 *
873 * @cpu : The logical CPU whose thread group is being searched.
874 * @tg : The thread-group structure of the CPU node which @cpu belongs
875 * to.
876 *
877 * Returns the index to tg->thread_list that points to the start
878 * of the thread_group that @cpu belongs to.
879 *
880 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
881 * tg->thread_list.
882 */
get_cpu_thread_group_start(int cpu,struct thread_groups * tg)883 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
884 {
885 int hw_cpu_id = get_hard_smp_processor_id(cpu);
886 int i, j;
887
888 for (i = 0; i < tg->nr_groups; i++) {
889 int group_start = i * tg->threads_per_group;
890
891 for (j = 0; j < tg->threads_per_group; j++) {
892 int idx = group_start + j;
893
894 if (tg->thread_list[idx] == hw_cpu_id)
895 return group_start;
896 }
897 }
898
899 return -1;
900 }
901
get_thread_groups(int cpu,int group_property,int * err)902 static struct thread_groups *__init get_thread_groups(int cpu,
903 int group_property,
904 int *err)
905 {
906 struct device_node *dn = of_get_cpu_node(cpu, NULL);
907 struct thread_groups_list *cpu_tgl = &tgl[cpu];
908 struct thread_groups *tg = NULL;
909 int i;
910 *err = 0;
911
912 if (!dn) {
913 *err = -ENODATA;
914 return NULL;
915 }
916
917 if (!cpu_tgl->nr_properties) {
918 *err = parse_thread_groups(dn, cpu_tgl);
919 if (*err)
920 goto out;
921 }
922
923 for (i = 0; i < cpu_tgl->nr_properties; i++) {
924 if (cpu_tgl->property_tgs[i].property == group_property) {
925 tg = &cpu_tgl->property_tgs[i];
926 break;
927 }
928 }
929
930 if (!tg)
931 *err = -EINVAL;
932 out:
933 of_node_put(dn);
934 return tg;
935 }
936
update_mask_from_threadgroup(cpumask_var_t * mask,struct thread_groups * tg,int cpu,int cpu_group_start)937 static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg,
938 int cpu, int cpu_group_start)
939 {
940 int first_thread = cpu_first_thread_sibling(cpu);
941 int i;
942
943 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
944
945 for (i = first_thread; i < first_thread + threads_per_core; i++) {
946 int i_group_start = get_cpu_thread_group_start(i, tg);
947
948 if (unlikely(i_group_start == -1)) {
949 WARN_ON_ONCE(1);
950 return -ENODATA;
951 }
952
953 if (i_group_start == cpu_group_start)
954 cpumask_set_cpu(i, *mask);
955 }
956
957 return 0;
958 }
959
init_thread_group_cache_map(int cpu,int cache_property)960 static int __init init_thread_group_cache_map(int cpu, int cache_property)
961
962 {
963 int cpu_group_start = -1, err = 0;
964 struct thread_groups *tg = NULL;
965 cpumask_var_t *mask = NULL;
966
967 if (cache_property != THREAD_GROUP_SHARE_L1 &&
968 cache_property != THREAD_GROUP_SHARE_L2_L3)
969 return -EINVAL;
970
971 tg = get_thread_groups(cpu, cache_property, &err);
972
973 if (!tg)
974 return err;
975
976 cpu_group_start = get_cpu_thread_group_start(cpu, tg);
977
978 if (unlikely(cpu_group_start == -1)) {
979 WARN_ON_ONCE(1);
980 return -ENODATA;
981 }
982
983 if (cache_property == THREAD_GROUP_SHARE_L1) {
984 mask = &per_cpu(thread_group_l1_cache_map, cpu);
985 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
986 }
987 else if (cache_property == THREAD_GROUP_SHARE_L2_L3) {
988 mask = &per_cpu(thread_group_l2_cache_map, cpu);
989 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
990 mask = &per_cpu(thread_group_l3_cache_map, cpu);
991 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
992 }
993
994
995 return 0;
996 }
997
998 static bool shared_caches;
999
1000 #ifdef CONFIG_SCHED_SMT
1001 /* cpumask of CPUs with asymmetric SMT dependency */
powerpc_smt_flags(void)1002 static int powerpc_smt_flags(void)
1003 {
1004 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1005
1006 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1007 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1008 flags |= SD_ASYM_PACKING;
1009 }
1010 return flags;
1011 }
1012 #endif
1013
1014 /*
1015 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1016 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1017 * since the migrated task remains cache hot. We want to take advantage of this
1018 * at the scheduler level so an extra topology level is required.
1019 */
powerpc_shared_cache_flags(void)1020 static int powerpc_shared_cache_flags(void)
1021 {
1022 return SD_SHARE_PKG_RESOURCES;
1023 }
1024
1025 /*
1026 * We can't just pass cpu_l2_cache_mask() directly because
1027 * returns a non-const pointer and the compiler barfs on that.
1028 */
shared_cache_mask(int cpu)1029 static const struct cpumask *shared_cache_mask(int cpu)
1030 {
1031 return per_cpu(cpu_l2_cache_map, cpu);
1032 }
1033
1034 #ifdef CONFIG_SCHED_SMT
smallcore_smt_mask(int cpu)1035 static const struct cpumask *smallcore_smt_mask(int cpu)
1036 {
1037 return cpu_smallcore_mask(cpu);
1038 }
1039 #endif
1040
cpu_coregroup_mask(int cpu)1041 static struct cpumask *cpu_coregroup_mask(int cpu)
1042 {
1043 return per_cpu(cpu_coregroup_map, cpu);
1044 }
1045
has_coregroup_support(void)1046 static bool has_coregroup_support(void)
1047 {
1048 return coregroup_enabled;
1049 }
1050
cpu_mc_mask(int cpu)1051 static const struct cpumask *cpu_mc_mask(int cpu)
1052 {
1053 return cpu_coregroup_mask(cpu);
1054 }
1055
1056 static struct sched_domain_topology_level powerpc_topology[] = {
1057 #ifdef CONFIG_SCHED_SMT
1058 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1059 #endif
1060 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1061 { cpu_mc_mask, SD_INIT_NAME(MC) },
1062 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1063 { NULL, },
1064 };
1065
init_big_cores(void)1066 static int __init init_big_cores(void)
1067 {
1068 int cpu;
1069
1070 for_each_possible_cpu(cpu) {
1071 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
1072
1073 if (err)
1074 return err;
1075
1076 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
1077 GFP_KERNEL,
1078 cpu_to_node(cpu));
1079 }
1080
1081 has_big_cores = true;
1082
1083 for_each_possible_cpu(cpu) {
1084 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3);
1085
1086 if (err)
1087 return err;
1088 }
1089
1090 thread_group_shares_l2 = true;
1091 thread_group_shares_l3 = true;
1092 pr_debug("L2/L3 cache only shared by the threads in the small core\n");
1093
1094 return 0;
1095 }
1096
smp_prepare_cpus(unsigned int max_cpus)1097 void __init smp_prepare_cpus(unsigned int max_cpus)
1098 {
1099 unsigned int cpu;
1100
1101 DBG("smp_prepare_cpus\n");
1102
1103 /*
1104 * setup_cpu may need to be called on the boot cpu. We haven't
1105 * spun any cpus up but lets be paranoid.
1106 */
1107 BUG_ON(boot_cpuid != smp_processor_id());
1108
1109 /* Fixup boot cpu */
1110 smp_store_cpu_info(boot_cpuid);
1111 cpu_callin_map[boot_cpuid] = 1;
1112
1113 for_each_possible_cpu(cpu) {
1114 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
1115 GFP_KERNEL, cpu_to_node(cpu));
1116 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
1117 GFP_KERNEL, cpu_to_node(cpu));
1118 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
1119 GFP_KERNEL, cpu_to_node(cpu));
1120 if (has_coregroup_support())
1121 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
1122 GFP_KERNEL, cpu_to_node(cpu));
1123
1124 #ifdef CONFIG_NUMA
1125 /*
1126 * numa_node_id() works after this.
1127 */
1128 if (cpu_present(cpu)) {
1129 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
1130 set_cpu_numa_mem(cpu,
1131 local_memory_node(numa_cpu_lookup_table[cpu]));
1132 }
1133 #endif
1134 }
1135
1136 /* Init the cpumasks so the boot CPU is related to itself */
1137 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
1138 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
1139 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
1140
1141 if (has_coregroup_support())
1142 cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
1143
1144 init_big_cores();
1145 if (has_big_cores) {
1146 cpumask_set_cpu(boot_cpuid,
1147 cpu_smallcore_mask(boot_cpuid));
1148 }
1149
1150 if (cpu_to_chip_id(boot_cpuid) != -1) {
1151 int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1152
1153 /*
1154 * All threads of a core will all belong to the same core,
1155 * chip_id_lookup_table will have one entry per core.
1156 * Assumption: if boot_cpuid doesn't have a chip-id, then no
1157 * other CPUs, will also not have chip-id.
1158 */
1159 chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL);
1160 if (chip_id_lookup_table)
1161 memset(chip_id_lookup_table, -1, sizeof(int) * idx);
1162 }
1163
1164 if (smp_ops && smp_ops->probe)
1165 smp_ops->probe();
1166 }
1167
smp_prepare_boot_cpu(void)1168 void smp_prepare_boot_cpu(void)
1169 {
1170 BUG_ON(smp_processor_id() != boot_cpuid);
1171 #ifdef CONFIG_PPC64
1172 paca_ptrs[boot_cpuid]->__current = current;
1173 #endif
1174 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
1175 current_set[boot_cpuid] = current;
1176 }
1177
1178 #ifdef CONFIG_HOTPLUG_CPU
1179
generic_cpu_disable(void)1180 int generic_cpu_disable(void)
1181 {
1182 unsigned int cpu = smp_processor_id();
1183
1184 if (cpu == boot_cpuid)
1185 return -EBUSY;
1186
1187 set_cpu_online(cpu, false);
1188 #ifdef CONFIG_PPC64
1189 vdso_data->processorCount--;
1190 #endif
1191 /* Update affinity of all IRQs previously aimed at this CPU */
1192 irq_migrate_all_off_this_cpu();
1193
1194 /*
1195 * Depending on the details of the interrupt controller, it's possible
1196 * that one of the interrupts we just migrated away from this CPU is
1197 * actually already pending on this CPU. If we leave it in that state
1198 * the interrupt will never be EOI'ed, and will never fire again. So
1199 * temporarily enable interrupts here, to allow any pending interrupt to
1200 * be received (and EOI'ed), before we take this CPU offline.
1201 */
1202 local_irq_enable();
1203 mdelay(1);
1204 local_irq_disable();
1205
1206 return 0;
1207 }
1208
generic_cpu_die(unsigned int cpu)1209 void generic_cpu_die(unsigned int cpu)
1210 {
1211 int i;
1212
1213 for (i = 0; i < 100; i++) {
1214 smp_rmb();
1215 if (is_cpu_dead(cpu))
1216 return;
1217 msleep(100);
1218 }
1219 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1220 }
1221
generic_set_cpu_dead(unsigned int cpu)1222 void generic_set_cpu_dead(unsigned int cpu)
1223 {
1224 per_cpu(cpu_state, cpu) = CPU_DEAD;
1225 }
1226
1227 /*
1228 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1229 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1230 * which makes the delay in generic_cpu_die() not happen.
1231 */
generic_set_cpu_up(unsigned int cpu)1232 void generic_set_cpu_up(unsigned int cpu)
1233 {
1234 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1235 }
1236
generic_check_cpu_restart(unsigned int cpu)1237 int generic_check_cpu_restart(unsigned int cpu)
1238 {
1239 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1240 }
1241
is_cpu_dead(unsigned int cpu)1242 int is_cpu_dead(unsigned int cpu)
1243 {
1244 return per_cpu(cpu_state, cpu) == CPU_DEAD;
1245 }
1246
secondaries_inhibited(void)1247 static bool secondaries_inhibited(void)
1248 {
1249 return kvm_hv_mode_active();
1250 }
1251
1252 #else /* HOTPLUG_CPU */
1253
1254 #define secondaries_inhibited() 0
1255
1256 #endif
1257
cpu_idle_thread_init(unsigned int cpu,struct task_struct * idle)1258 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1259 {
1260 #ifdef CONFIG_PPC64
1261 paca_ptrs[cpu]->__current = idle;
1262 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1263 THREAD_SIZE - STACK_FRAME_OVERHEAD;
1264 #endif
1265 task_thread_info(idle)->cpu = cpu;
1266 secondary_current = current_set[cpu] = idle;
1267 }
1268
__cpu_up(unsigned int cpu,struct task_struct * tidle)1269 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1270 {
1271 int rc, c;
1272
1273 /*
1274 * Don't allow secondary threads to come online if inhibited
1275 */
1276 if (threads_per_core > 1 && secondaries_inhibited() &&
1277 cpu_thread_in_subcore(cpu))
1278 return -EBUSY;
1279
1280 if (smp_ops == NULL ||
1281 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1282 return -EINVAL;
1283
1284 cpu_idle_thread_init(cpu, tidle);
1285
1286 /*
1287 * The platform might need to allocate resources prior to bringing
1288 * up the CPU
1289 */
1290 if (smp_ops->prepare_cpu) {
1291 rc = smp_ops->prepare_cpu(cpu);
1292 if (rc)
1293 return rc;
1294 }
1295
1296 /* Make sure callin-map entry is 0 (can be leftover a CPU
1297 * hotplug
1298 */
1299 cpu_callin_map[cpu] = 0;
1300
1301 /* The information for processor bringup must
1302 * be written out to main store before we release
1303 * the processor.
1304 */
1305 smp_mb();
1306
1307 /* wake up cpus */
1308 DBG("smp: kicking cpu %d\n", cpu);
1309 rc = smp_ops->kick_cpu(cpu);
1310 if (rc) {
1311 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1312 return rc;
1313 }
1314
1315 /*
1316 * wait to see if the cpu made a callin (is actually up).
1317 * use this value that I found through experimentation.
1318 * -- Cort
1319 */
1320 if (system_state < SYSTEM_RUNNING)
1321 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1322 udelay(100);
1323 #ifdef CONFIG_HOTPLUG_CPU
1324 else
1325 /*
1326 * CPUs can take much longer to come up in the
1327 * hotplug case. Wait five seconds.
1328 */
1329 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1330 msleep(1);
1331 #endif
1332
1333 if (!cpu_callin_map[cpu]) {
1334 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1335 return -ENOENT;
1336 }
1337
1338 DBG("Processor %u found.\n", cpu);
1339
1340 if (smp_ops->give_timebase)
1341 smp_ops->give_timebase();
1342
1343 /* Wait until cpu puts itself in the online & active maps */
1344 spin_until_cond(cpu_online(cpu));
1345
1346 return 0;
1347 }
1348
1349 /* Return the value of the reg property corresponding to the given
1350 * logical cpu.
1351 */
cpu_to_core_id(int cpu)1352 int cpu_to_core_id(int cpu)
1353 {
1354 struct device_node *np;
1355 int id = -1;
1356
1357 np = of_get_cpu_node(cpu, NULL);
1358 if (!np)
1359 goto out;
1360
1361 id = of_get_cpu_hwid(np, 0);
1362 out:
1363 of_node_put(np);
1364 return id;
1365 }
1366 EXPORT_SYMBOL_GPL(cpu_to_core_id);
1367
1368 /* Helper routines for cpu to core mapping */
cpu_core_index_of_thread(int cpu)1369 int cpu_core_index_of_thread(int cpu)
1370 {
1371 return cpu >> threads_shift;
1372 }
1373 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1374
cpu_first_thread_of_core(int core)1375 int cpu_first_thread_of_core(int core)
1376 {
1377 return core << threads_shift;
1378 }
1379 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1380
1381 /* Must be called when no change can occur to cpu_present_mask,
1382 * i.e. during cpu online or offline.
1383 */
cpu_to_l2cache(int cpu)1384 static struct device_node *cpu_to_l2cache(int cpu)
1385 {
1386 struct device_node *np;
1387 struct device_node *cache;
1388
1389 if (!cpu_present(cpu))
1390 return NULL;
1391
1392 np = of_get_cpu_node(cpu, NULL);
1393 if (np == NULL)
1394 return NULL;
1395
1396 cache = of_find_next_cache_node(np);
1397
1398 of_node_put(np);
1399
1400 return cache;
1401 }
1402
update_mask_by_l2(int cpu,cpumask_var_t * mask)1403 static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
1404 {
1405 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1406 struct device_node *l2_cache, *np;
1407 int i;
1408
1409 if (has_big_cores)
1410 submask_fn = cpu_smallcore_mask;
1411
1412 /*
1413 * If the threads in a thread-group share L2 cache, then the
1414 * L2-mask can be obtained from thread_group_l2_cache_map.
1415 */
1416 if (thread_group_shares_l2) {
1417 cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
1418
1419 for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
1420 if (cpu_online(i))
1421 set_cpus_related(i, cpu, cpu_l2_cache_mask);
1422 }
1423
1424 /* Verify that L1-cache siblings are a subset of L2 cache-siblings */
1425 if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
1426 !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
1427 pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n",
1428 cpu);
1429 }
1430
1431 return true;
1432 }
1433
1434 l2_cache = cpu_to_l2cache(cpu);
1435 if (!l2_cache || !*mask) {
1436 /* Assume only core siblings share cache with this CPU */
1437 for_each_cpu(i, cpu_sibling_mask(cpu))
1438 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1439
1440 return false;
1441 }
1442
1443 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1444
1445 /* Update l2-cache mask with all the CPUs that are part of submask */
1446 or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
1447
1448 /* Skip all CPUs already part of current CPU l2-cache mask */
1449 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
1450
1451 for_each_cpu(i, *mask) {
1452 /*
1453 * when updating the marks the current CPU has not been marked
1454 * online, but we need to update the cache masks
1455 */
1456 np = cpu_to_l2cache(i);
1457
1458 /* Skip all CPUs already part of current CPU l2-cache */
1459 if (np == l2_cache) {
1460 or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
1461 cpumask_andnot(*mask, *mask, submask_fn(i));
1462 } else {
1463 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
1464 }
1465
1466 of_node_put(np);
1467 }
1468 of_node_put(l2_cache);
1469
1470 return true;
1471 }
1472
1473 #ifdef CONFIG_HOTPLUG_CPU
remove_cpu_from_masks(int cpu)1474 static void remove_cpu_from_masks(int cpu)
1475 {
1476 struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
1477 int i;
1478
1479 unmap_cpu_from_node(cpu);
1480
1481 if (shared_caches)
1482 mask_fn = cpu_l2_cache_mask;
1483
1484 for_each_cpu(i, mask_fn(cpu)) {
1485 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1486 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1487 if (has_big_cores)
1488 set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1489 }
1490
1491 for_each_cpu(i, cpu_core_mask(cpu))
1492 set_cpus_unrelated(cpu, i, cpu_core_mask);
1493
1494 if (has_coregroup_support()) {
1495 for_each_cpu(i, cpu_coregroup_mask(cpu))
1496 set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1497 }
1498 }
1499 #endif
1500
add_cpu_to_smallcore_masks(int cpu)1501 static inline void add_cpu_to_smallcore_masks(int cpu)
1502 {
1503 int i;
1504
1505 if (!has_big_cores)
1506 return;
1507
1508 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1509
1510 for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
1511 if (cpu_online(i))
1512 set_cpus_related(i, cpu, cpu_smallcore_mask);
1513 }
1514 }
1515
update_coregroup_mask(int cpu,cpumask_var_t * mask)1516 static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1517 {
1518 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1519 int coregroup_id = cpu_to_coregroup_id(cpu);
1520 int i;
1521
1522 if (shared_caches)
1523 submask_fn = cpu_l2_cache_mask;
1524
1525 if (!*mask) {
1526 /* Assume only siblings are part of this CPU's coregroup */
1527 for_each_cpu(i, submask_fn(cpu))
1528 set_cpus_related(cpu, i, cpu_coregroup_mask);
1529
1530 return;
1531 }
1532
1533 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1534
1535 /* Update coregroup mask with all the CPUs that are part of submask */
1536 or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
1537
1538 /* Skip all CPUs already part of coregroup mask */
1539 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
1540
1541 for_each_cpu(i, *mask) {
1542 /* Skip all CPUs not part of this coregroup */
1543 if (coregroup_id == cpu_to_coregroup_id(i)) {
1544 or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
1545 cpumask_andnot(*mask, *mask, submask_fn(i));
1546 } else {
1547 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
1548 }
1549 }
1550 }
1551
add_cpu_to_masks(int cpu)1552 static void add_cpu_to_masks(int cpu)
1553 {
1554 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1555 int first_thread = cpu_first_thread_sibling(cpu);
1556 cpumask_var_t mask;
1557 int chip_id = -1;
1558 bool ret;
1559 int i;
1560
1561 /*
1562 * This CPU will not be in the online mask yet so we need to manually
1563 * add it to it's own thread sibling mask.
1564 */
1565 map_cpu_to_node(cpu, cpu_to_node(cpu));
1566 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1567 cpumask_set_cpu(cpu, cpu_core_mask(cpu));
1568
1569 for (i = first_thread; i < first_thread + threads_per_core; i++)
1570 if (cpu_online(i))
1571 set_cpus_related(i, cpu, cpu_sibling_mask);
1572
1573 add_cpu_to_smallcore_masks(cpu);
1574
1575 /* In CPU-hotplug path, hence use GFP_ATOMIC */
1576 ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1577 update_mask_by_l2(cpu, &mask);
1578
1579 if (has_coregroup_support())
1580 update_coregroup_mask(cpu, &mask);
1581
1582 if (chip_id_lookup_table && ret)
1583 chip_id = cpu_to_chip_id(cpu);
1584
1585 if (shared_caches)
1586 submask_fn = cpu_l2_cache_mask;
1587
1588 /* Update core_mask with all the CPUs that are part of submask */
1589 or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
1590
1591 /* Skip all CPUs already part of current CPU core mask */
1592 cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
1593
1594 /* If chip_id is -1; limit the cpu_core_mask to within DIE*/
1595 if (chip_id == -1)
1596 cpumask_and(mask, mask, cpu_cpu_mask(cpu));
1597
1598 for_each_cpu(i, mask) {
1599 if (chip_id == cpu_to_chip_id(i)) {
1600 or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
1601 cpumask_andnot(mask, mask, submask_fn(i));
1602 } else {
1603 cpumask_andnot(mask, mask, cpu_core_mask(i));
1604 }
1605 }
1606
1607 free_cpumask_var(mask);
1608 }
1609
1610 /* Activate a secondary processor. */
start_secondary(void * unused)1611 void start_secondary(void *unused)
1612 {
1613 unsigned int cpu = raw_smp_processor_id();
1614
1615 /* PPC64 calls setup_kup() in early_setup_secondary() */
1616 if (IS_ENABLED(CONFIG_PPC32))
1617 setup_kup();
1618
1619 mmgrab(&init_mm);
1620 current->active_mm = &init_mm;
1621
1622 smp_store_cpu_info(cpu);
1623 set_dec(tb_ticks_per_jiffy);
1624 rcu_cpu_starting(cpu);
1625 cpu_callin_map[cpu] = 1;
1626
1627 if (smp_ops->setup_cpu)
1628 smp_ops->setup_cpu(cpu);
1629 if (smp_ops->take_timebase)
1630 smp_ops->take_timebase();
1631
1632 secondary_cpu_time_init();
1633
1634 #ifdef CONFIG_PPC64
1635 if (system_state == SYSTEM_RUNNING)
1636 vdso_data->processorCount++;
1637
1638 vdso_getcpu_init();
1639 #endif
1640 set_numa_node(numa_cpu_lookup_table[cpu]);
1641 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1642
1643 /* Update topology CPU masks */
1644 add_cpu_to_masks(cpu);
1645
1646 /*
1647 * Check for any shared caches. Note that this must be done on a
1648 * per-core basis because one core in the pair might be disabled.
1649 */
1650 if (!shared_caches) {
1651 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1652 struct cpumask *mask = cpu_l2_cache_mask(cpu);
1653
1654 if (has_big_cores)
1655 sibling_mask = cpu_smallcore_mask;
1656
1657 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1658 shared_caches = true;
1659 }
1660
1661 smp_wmb();
1662 notify_cpu_starting(cpu);
1663 set_cpu_online(cpu, true);
1664
1665 boot_init_stack_canary();
1666
1667 local_irq_enable();
1668
1669 /* We can enable ftrace for secondary cpus now */
1670 this_cpu_enable_ftrace();
1671
1672 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1673
1674 BUG();
1675 }
1676
1677 #ifdef CONFIG_PROFILING
setup_profiling_timer(unsigned int multiplier)1678 int setup_profiling_timer(unsigned int multiplier)
1679 {
1680 return 0;
1681 }
1682 #endif
1683
fixup_topology(void)1684 static void __init fixup_topology(void)
1685 {
1686 int i;
1687
1688 #ifdef CONFIG_SCHED_SMT
1689 if (has_big_cores) {
1690 pr_info("Big cores detected but using small core scheduling\n");
1691 powerpc_topology[smt_idx].mask = smallcore_smt_mask;
1692 }
1693 #endif
1694
1695 if (!has_coregroup_support())
1696 powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
1697
1698 /*
1699 * Try to consolidate topology levels here instead of
1700 * allowing scheduler to degenerate.
1701 * - Dont consolidate if masks are different.
1702 * - Dont consolidate if sd_flags exists and are different.
1703 */
1704 for (i = 1; i <= die_idx; i++) {
1705 if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
1706 continue;
1707
1708 if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
1709 powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
1710 continue;
1711
1712 if (!powerpc_topology[i - 1].sd_flags)
1713 powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
1714
1715 powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
1716 powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
1717 #ifdef CONFIG_SCHED_DEBUG
1718 powerpc_topology[i].name = powerpc_topology[i + 1].name;
1719 #endif
1720 }
1721 }
1722
smp_cpus_done(unsigned int max_cpus)1723 void __init smp_cpus_done(unsigned int max_cpus)
1724 {
1725 /*
1726 * We are running pinned to the boot CPU, see rest_init().
1727 */
1728 if (smp_ops && smp_ops->setup_cpu)
1729 smp_ops->setup_cpu(boot_cpuid);
1730
1731 if (smp_ops && smp_ops->bringup_done)
1732 smp_ops->bringup_done();
1733
1734 dump_numa_cpu_topology();
1735
1736 fixup_topology();
1737 set_sched_topology(powerpc_topology);
1738 }
1739
1740 #ifdef CONFIG_HOTPLUG_CPU
__cpu_disable(void)1741 int __cpu_disable(void)
1742 {
1743 int cpu = smp_processor_id();
1744 int err;
1745
1746 if (!smp_ops->cpu_disable)
1747 return -ENOSYS;
1748
1749 this_cpu_disable_ftrace();
1750
1751 err = smp_ops->cpu_disable();
1752 if (err)
1753 return err;
1754
1755 /* Update sibling maps */
1756 remove_cpu_from_masks(cpu);
1757
1758 return 0;
1759 }
1760
__cpu_die(unsigned int cpu)1761 void __cpu_die(unsigned int cpu)
1762 {
1763 if (smp_ops->cpu_die)
1764 smp_ops->cpu_die(cpu);
1765 }
1766
arch_cpu_idle_dead(void)1767 void arch_cpu_idle_dead(void)
1768 {
1769 /*
1770 * Disable on the down path. This will be re-enabled by
1771 * start_secondary() via start_secondary_resume() below
1772 */
1773 this_cpu_disable_ftrace();
1774
1775 if (smp_ops->cpu_offline_self)
1776 smp_ops->cpu_offline_self();
1777
1778 /* If we return, we re-enter start_secondary */
1779 start_secondary_resume();
1780 }
1781
1782 #endif
1783