1 /* smp.c: Sparc64 SMP support.
2  *
3  * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5 
6 #include <linux/export.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
24 #include <linux/vmalloc.h>
25 #include <linux/ftrace.h>
26 #include <linux/cpu.h>
27 #include <linux/slab.h>
28 
29 #include <asm/head.h>
30 #include <asm/ptrace.h>
31 #include <linux/atomic.h>
32 #include <asm/tlbflush.h>
33 #include <asm/mmu_context.h>
34 #include <asm/cpudata.h>
35 #include <asm/hvtramp.h>
36 #include <asm/io.h>
37 #include <asm/timer.h>
38 
39 #include <asm/irq.h>
40 #include <asm/irq_regs.h>
41 #include <asm/page.h>
42 #include <asm/pgtable.h>
43 #include <asm/oplib.h>
44 #include <asm/uaccess.h>
45 #include <asm/starfire.h>
46 #include <asm/tlb.h>
47 #include <asm/sections.h>
48 #include <asm/prom.h>
49 #include <asm/mdesc.h>
50 #include <asm/ldc.h>
51 #include <asm/hypervisor.h>
52 #include <asm/pcr.h>
53 
54 #include "cpumap.h"
55 
56 int sparc64_multi_core __read_mostly;
57 
58 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
59 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
60 	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
61 
62 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
63 EXPORT_SYMBOL(cpu_core_map);
64 
65 static cpumask_t smp_commenced_mask;
66 
smp_info(struct seq_file * m)67 void smp_info(struct seq_file *m)
68 {
69 	int i;
70 
71 	seq_printf(m, "State:\n");
72 	for_each_online_cpu(i)
73 		seq_printf(m, "CPU%d:\t\tonline\n", i);
74 }
75 
smp_bogo(struct seq_file * m)76 void smp_bogo(struct seq_file *m)
77 {
78 	int i;
79 
80 	for_each_online_cpu(i)
81 		seq_printf(m,
82 			   "Cpu%dClkTck\t: %016lx\n",
83 			   i, cpu_data(i).clock_tick);
84 }
85 
86 extern void setup_sparc64_timer(void);
87 
88 static volatile unsigned long callin_flag = 0;
89 
smp_callin(void)90 void __cpuinit smp_callin(void)
91 {
92 	int cpuid = hard_smp_processor_id();
93 
94 	__local_per_cpu_offset = __per_cpu_offset(cpuid);
95 
96 	if (tlb_type == hypervisor)
97 		sun4v_ktsb_register();
98 
99 	__flush_tlb_all();
100 
101 	setup_sparc64_timer();
102 
103 	if (cheetah_pcache_forced_on)
104 		cheetah_enable_pcache();
105 
106 	local_irq_enable();
107 
108 	callin_flag = 1;
109 	__asm__ __volatile__("membar #Sync\n\t"
110 			     "flush  %%g6" : : : "memory");
111 
112 	/* Clear this or we will die instantly when we
113 	 * schedule back to this idler...
114 	 */
115 	current_thread_info()->new_child = 0;
116 
117 	/* Attach to the address space of init_task. */
118 	atomic_inc(&init_mm.mm_count);
119 	current->active_mm = &init_mm;
120 
121 	/* inform the notifiers about the new cpu */
122 	notify_cpu_starting(cpuid);
123 
124 	while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
125 		rmb();
126 
127 	ipi_call_lock_irq();
128 	set_cpu_online(cpuid, true);
129 	ipi_call_unlock_irq();
130 
131 	/* idle thread is expected to have preempt disabled */
132 	preempt_disable();
133 }
134 
cpu_panic(void)135 void cpu_panic(void)
136 {
137 	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
138 	panic("SMP bolixed\n");
139 }
140 
141 /* This tick register synchronization scheme is taken entirely from
142  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
143  *
144  * The only change I've made is to rework it so that the master
145  * initiates the synchonization instead of the slave. -DaveM
146  */
147 
148 #define MASTER	0
149 #define SLAVE	(SMP_CACHE_BYTES/sizeof(unsigned long))
150 
151 #define NUM_ROUNDS	64	/* magic value */
152 #define NUM_ITERS	5	/* likewise */
153 
154 static DEFINE_SPINLOCK(itc_sync_lock);
155 static unsigned long go[SLAVE + 1];
156 
157 #define DEBUG_TICK_SYNC	0
158 
get_delta(long * rt,long * master)159 static inline long get_delta (long *rt, long *master)
160 {
161 	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
162 	unsigned long tcenter, t0, t1, tm;
163 	unsigned long i;
164 
165 	for (i = 0; i < NUM_ITERS; i++) {
166 		t0 = tick_ops->get_tick();
167 		go[MASTER] = 1;
168 		membar_safe("#StoreLoad");
169 		while (!(tm = go[SLAVE]))
170 			rmb();
171 		go[SLAVE] = 0;
172 		wmb();
173 		t1 = tick_ops->get_tick();
174 
175 		if (t1 - t0 < best_t1 - best_t0)
176 			best_t0 = t0, best_t1 = t1, best_tm = tm;
177 	}
178 
179 	*rt = best_t1 - best_t0;
180 	*master = best_tm - best_t0;
181 
182 	/* average best_t0 and best_t1 without overflow: */
183 	tcenter = (best_t0/2 + best_t1/2);
184 	if (best_t0 % 2 + best_t1 % 2 == 2)
185 		tcenter++;
186 	return tcenter - best_tm;
187 }
188 
smp_synchronize_tick_client(void)189 void smp_synchronize_tick_client(void)
190 {
191 	long i, delta, adj, adjust_latency = 0, done = 0;
192 	unsigned long flags, rt, master_time_stamp;
193 #if DEBUG_TICK_SYNC
194 	struct {
195 		long rt;	/* roundtrip time */
196 		long master;	/* master's timestamp */
197 		long diff;	/* difference between midpoint and master's timestamp */
198 		long lat;	/* estimate of itc adjustment latency */
199 	} t[NUM_ROUNDS];
200 #endif
201 
202 	go[MASTER] = 1;
203 
204 	while (go[MASTER])
205 		rmb();
206 
207 	local_irq_save(flags);
208 	{
209 		for (i = 0; i < NUM_ROUNDS; i++) {
210 			delta = get_delta(&rt, &master_time_stamp);
211 			if (delta == 0)
212 				done = 1;	/* let's lock on to this... */
213 
214 			if (!done) {
215 				if (i > 0) {
216 					adjust_latency += -delta;
217 					adj = -delta + adjust_latency/4;
218 				} else
219 					adj = -delta;
220 
221 				tick_ops->add_tick(adj);
222 			}
223 #if DEBUG_TICK_SYNC
224 			t[i].rt = rt;
225 			t[i].master = master_time_stamp;
226 			t[i].diff = delta;
227 			t[i].lat = adjust_latency/4;
228 #endif
229 		}
230 	}
231 	local_irq_restore(flags);
232 
233 #if DEBUG_TICK_SYNC
234 	for (i = 0; i < NUM_ROUNDS; i++)
235 		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
236 		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
237 #endif
238 
239 	printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
240 	       "(last diff %ld cycles, maxerr %lu cycles)\n",
241 	       smp_processor_id(), delta, rt);
242 }
243 
244 static void smp_start_sync_tick_client(int cpu);
245 
smp_synchronize_one_tick(int cpu)246 static void smp_synchronize_one_tick(int cpu)
247 {
248 	unsigned long flags, i;
249 
250 	go[MASTER] = 0;
251 
252 	smp_start_sync_tick_client(cpu);
253 
254 	/* wait for client to be ready */
255 	while (!go[MASTER])
256 		rmb();
257 
258 	/* now let the client proceed into his loop */
259 	go[MASTER] = 0;
260 	membar_safe("#StoreLoad");
261 
262 	spin_lock_irqsave(&itc_sync_lock, flags);
263 	{
264 		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
265 			while (!go[MASTER])
266 				rmb();
267 			go[MASTER] = 0;
268 			wmb();
269 			go[SLAVE] = tick_ops->get_tick();
270 			membar_safe("#StoreLoad");
271 		}
272 	}
273 	spin_unlock_irqrestore(&itc_sync_lock, flags);
274 }
275 
276 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
277 /* XXX Put this in some common place. XXX */
kimage_addr_to_ra(void * p)278 static unsigned long kimage_addr_to_ra(void *p)
279 {
280 	unsigned long val = (unsigned long) p;
281 
282 	return kern_base + (val - KERNBASE);
283 }
284 
ldom_startcpu_cpuid(unsigned int cpu,unsigned long thread_reg,void ** descrp)285 static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
286 {
287 	extern unsigned long sparc64_ttable_tl0;
288 	extern unsigned long kern_locked_tte_data;
289 	struct hvtramp_descr *hdesc;
290 	unsigned long trampoline_ra;
291 	struct trap_per_cpu *tb;
292 	u64 tte_vaddr, tte_data;
293 	unsigned long hv_err;
294 	int i;
295 
296 	hdesc = kzalloc(sizeof(*hdesc) +
297 			(sizeof(struct hvtramp_mapping) *
298 			 num_kernel_image_mappings - 1),
299 			GFP_KERNEL);
300 	if (!hdesc) {
301 		printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
302 		       "hvtramp_descr.\n");
303 		return;
304 	}
305 	*descrp = hdesc;
306 
307 	hdesc->cpu = cpu;
308 	hdesc->num_mappings = num_kernel_image_mappings;
309 
310 	tb = &trap_block[cpu];
311 
312 	hdesc->fault_info_va = (unsigned long) &tb->fault_info;
313 	hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
314 
315 	hdesc->thread_reg = thread_reg;
316 
317 	tte_vaddr = (unsigned long) KERNBASE;
318 	tte_data = kern_locked_tte_data;
319 
320 	for (i = 0; i < hdesc->num_mappings; i++) {
321 		hdesc->maps[i].vaddr = tte_vaddr;
322 		hdesc->maps[i].tte   = tte_data;
323 		tte_vaddr += 0x400000;
324 		tte_data  += 0x400000;
325 	}
326 
327 	trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
328 
329 	hv_err = sun4v_cpu_start(cpu, trampoline_ra,
330 				 kimage_addr_to_ra(&sparc64_ttable_tl0),
331 				 __pa(hdesc));
332 	if (hv_err)
333 		printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
334 		       "gives error %lu\n", hv_err);
335 }
336 #endif
337 
338 extern unsigned long sparc64_cpu_startup;
339 
340 /* The OBP cpu startup callback truncates the 3rd arg cookie to
341  * 32-bits (I think) so to be safe we have it read the pointer
342  * contained here so we work on >4GB machines. -DaveM
343  */
344 static struct thread_info *cpu_new_thread = NULL;
345 
smp_boot_one_cpu(unsigned int cpu)346 static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
347 {
348 	unsigned long entry =
349 		(unsigned long)(&sparc64_cpu_startup);
350 	unsigned long cookie =
351 		(unsigned long)(&cpu_new_thread);
352 	struct task_struct *p;
353 	void *descr = NULL;
354 	int timeout, ret;
355 
356 	p = fork_idle(cpu);
357 	if (IS_ERR(p))
358 		return PTR_ERR(p);
359 	callin_flag = 0;
360 	cpu_new_thread = task_thread_info(p);
361 
362 	if (tlb_type == hypervisor) {
363 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
364 		if (ldom_domaining_enabled)
365 			ldom_startcpu_cpuid(cpu,
366 					    (unsigned long) cpu_new_thread,
367 					    &descr);
368 		else
369 #endif
370 			prom_startcpu_cpuid(cpu, entry, cookie);
371 	} else {
372 		struct device_node *dp = of_find_node_by_cpuid(cpu);
373 
374 		prom_startcpu(dp->phandle, entry, cookie);
375 	}
376 
377 	for (timeout = 0; timeout < 50000; timeout++) {
378 		if (callin_flag)
379 			break;
380 		udelay(100);
381 	}
382 
383 	if (callin_flag) {
384 		ret = 0;
385 	} else {
386 		printk("Processor %d is stuck.\n", cpu);
387 		ret = -ENODEV;
388 	}
389 	cpu_new_thread = NULL;
390 
391 	kfree(descr);
392 
393 	return ret;
394 }
395 
spitfire_xcall_helper(u64 data0,u64 data1,u64 data2,u64 pstate,unsigned long cpu)396 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
397 {
398 	u64 result, target;
399 	int stuck, tmp;
400 
401 	if (this_is_starfire) {
402 		/* map to real upaid */
403 		cpu = (((cpu & 0x3c) << 1) |
404 			((cpu & 0x40) >> 4) |
405 			(cpu & 0x3));
406 	}
407 
408 	target = (cpu << 14) | 0x70;
409 again:
410 	/* Ok, this is the real Spitfire Errata #54.
411 	 * One must read back from a UDB internal register
412 	 * after writes to the UDB interrupt dispatch, but
413 	 * before the membar Sync for that write.
414 	 * So we use the high UDB control register (ASI 0x7f,
415 	 * ADDR 0x20) for the dummy read. -DaveM
416 	 */
417 	tmp = 0x40;
418 	__asm__ __volatile__(
419 	"wrpr	%1, %2, %%pstate\n\t"
420 	"stxa	%4, [%0] %3\n\t"
421 	"stxa	%5, [%0+%8] %3\n\t"
422 	"add	%0, %8, %0\n\t"
423 	"stxa	%6, [%0+%8] %3\n\t"
424 	"membar	#Sync\n\t"
425 	"stxa	%%g0, [%7] %3\n\t"
426 	"membar	#Sync\n\t"
427 	"mov	0x20, %%g1\n\t"
428 	"ldxa	[%%g1] 0x7f, %%g0\n\t"
429 	"membar	#Sync"
430 	: "=r" (tmp)
431 	: "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
432 	  "r" (data0), "r" (data1), "r" (data2), "r" (target),
433 	  "r" (0x10), "0" (tmp)
434         : "g1");
435 
436 	/* NOTE: PSTATE_IE is still clear. */
437 	stuck = 100000;
438 	do {
439 		__asm__ __volatile__("ldxa [%%g0] %1, %0"
440 			: "=r" (result)
441 			: "i" (ASI_INTR_DISPATCH_STAT));
442 		if (result == 0) {
443 			__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
444 					     : : "r" (pstate));
445 			return;
446 		}
447 		stuck -= 1;
448 		if (stuck == 0)
449 			break;
450 	} while (result & 0x1);
451 	__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
452 			     : : "r" (pstate));
453 	if (stuck == 0) {
454 		printk("CPU[%d]: mondo stuckage result[%016llx]\n",
455 		       smp_processor_id(), result);
456 	} else {
457 		udelay(2);
458 		goto again;
459 	}
460 }
461 
spitfire_xcall_deliver(struct trap_per_cpu * tb,int cnt)462 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
463 {
464 	u64 *mondo, data0, data1, data2;
465 	u16 *cpu_list;
466 	u64 pstate;
467 	int i;
468 
469 	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
470 	cpu_list = __va(tb->cpu_list_pa);
471 	mondo = __va(tb->cpu_mondo_block_pa);
472 	data0 = mondo[0];
473 	data1 = mondo[1];
474 	data2 = mondo[2];
475 	for (i = 0; i < cnt; i++)
476 		spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
477 }
478 
479 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
480  * packet, but we have no use for that.  However we do take advantage of
481  * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
482  */
cheetah_xcall_deliver(struct trap_per_cpu * tb,int cnt)483 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
484 {
485 	int nack_busy_id, is_jbus, need_more;
486 	u64 *mondo, pstate, ver, busy_mask;
487 	u16 *cpu_list;
488 
489 	cpu_list = __va(tb->cpu_list_pa);
490 	mondo = __va(tb->cpu_mondo_block_pa);
491 
492 	/* Unfortunately, someone at Sun had the brilliant idea to make the
493 	 * busy/nack fields hard-coded by ITID number for this Ultra-III
494 	 * derivative processor.
495 	 */
496 	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
497 	is_jbus = ((ver >> 32) == __JALAPENO_ID ||
498 		   (ver >> 32) == __SERRANO_ID);
499 
500 	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
501 
502 retry:
503 	need_more = 0;
504 	__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
505 			     : : "r" (pstate), "i" (PSTATE_IE));
506 
507 	/* Setup the dispatch data registers. */
508 	__asm__ __volatile__("stxa	%0, [%3] %6\n\t"
509 			     "stxa	%1, [%4] %6\n\t"
510 			     "stxa	%2, [%5] %6\n\t"
511 			     "membar	#Sync\n\t"
512 			     : /* no outputs */
513 			     : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
514 			       "r" (0x40), "r" (0x50), "r" (0x60),
515 			       "i" (ASI_INTR_W));
516 
517 	nack_busy_id = 0;
518 	busy_mask = 0;
519 	{
520 		int i;
521 
522 		for (i = 0; i < cnt; i++) {
523 			u64 target, nr;
524 
525 			nr = cpu_list[i];
526 			if (nr == 0xffff)
527 				continue;
528 
529 			target = (nr << 14) | 0x70;
530 			if (is_jbus) {
531 				busy_mask |= (0x1UL << (nr * 2));
532 			} else {
533 				target |= (nack_busy_id << 24);
534 				busy_mask |= (0x1UL <<
535 					      (nack_busy_id * 2));
536 			}
537 			__asm__ __volatile__(
538 				"stxa	%%g0, [%0] %1\n\t"
539 				"membar	#Sync\n\t"
540 				: /* no outputs */
541 				: "r" (target), "i" (ASI_INTR_W));
542 			nack_busy_id++;
543 			if (nack_busy_id == 32) {
544 				need_more = 1;
545 				break;
546 			}
547 		}
548 	}
549 
550 	/* Now, poll for completion. */
551 	{
552 		u64 dispatch_stat, nack_mask;
553 		long stuck;
554 
555 		stuck = 100000 * nack_busy_id;
556 		nack_mask = busy_mask << 1;
557 		do {
558 			__asm__ __volatile__("ldxa	[%%g0] %1, %0"
559 					     : "=r" (dispatch_stat)
560 					     : "i" (ASI_INTR_DISPATCH_STAT));
561 			if (!(dispatch_stat & (busy_mask | nack_mask))) {
562 				__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
563 						     : : "r" (pstate));
564 				if (unlikely(need_more)) {
565 					int i, this_cnt = 0;
566 					for (i = 0; i < cnt; i++) {
567 						if (cpu_list[i] == 0xffff)
568 							continue;
569 						cpu_list[i] = 0xffff;
570 						this_cnt++;
571 						if (this_cnt == 32)
572 							break;
573 					}
574 					goto retry;
575 				}
576 				return;
577 			}
578 			if (!--stuck)
579 				break;
580 		} while (dispatch_stat & busy_mask);
581 
582 		__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
583 				     : : "r" (pstate));
584 
585 		if (dispatch_stat & busy_mask) {
586 			/* Busy bits will not clear, continue instead
587 			 * of freezing up on this cpu.
588 			 */
589 			printk("CPU[%d]: mondo stuckage result[%016llx]\n",
590 			       smp_processor_id(), dispatch_stat);
591 		} else {
592 			int i, this_busy_nack = 0;
593 
594 			/* Delay some random time with interrupts enabled
595 			 * to prevent deadlock.
596 			 */
597 			udelay(2 * nack_busy_id);
598 
599 			/* Clear out the mask bits for cpus which did not
600 			 * NACK us.
601 			 */
602 			for (i = 0; i < cnt; i++) {
603 				u64 check_mask, nr;
604 
605 				nr = cpu_list[i];
606 				if (nr == 0xffff)
607 					continue;
608 
609 				if (is_jbus)
610 					check_mask = (0x2UL << (2*nr));
611 				else
612 					check_mask = (0x2UL <<
613 						      this_busy_nack);
614 				if ((dispatch_stat & check_mask) == 0)
615 					cpu_list[i] = 0xffff;
616 				this_busy_nack += 2;
617 				if (this_busy_nack == 64)
618 					break;
619 			}
620 
621 			goto retry;
622 		}
623 	}
624 }
625 
626 /* Multi-cpu list version.  */
hypervisor_xcall_deliver(struct trap_per_cpu * tb,int cnt)627 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
628 {
629 	int retries, this_cpu, prev_sent, i, saw_cpu_error;
630 	unsigned long status;
631 	u16 *cpu_list;
632 
633 	this_cpu = smp_processor_id();
634 
635 	cpu_list = __va(tb->cpu_list_pa);
636 
637 	saw_cpu_error = 0;
638 	retries = 0;
639 	prev_sent = 0;
640 	do {
641 		int forward_progress, n_sent;
642 
643 		status = sun4v_cpu_mondo_send(cnt,
644 					      tb->cpu_list_pa,
645 					      tb->cpu_mondo_block_pa);
646 
647 		/* HV_EOK means all cpus received the xcall, we're done.  */
648 		if (likely(status == HV_EOK))
649 			break;
650 
651 		/* First, see if we made any forward progress.
652 		 *
653 		 * The hypervisor indicates successful sends by setting
654 		 * cpu list entries to the value 0xffff.
655 		 */
656 		n_sent = 0;
657 		for (i = 0; i < cnt; i++) {
658 			if (likely(cpu_list[i] == 0xffff))
659 				n_sent++;
660 		}
661 
662 		forward_progress = 0;
663 		if (n_sent > prev_sent)
664 			forward_progress = 1;
665 
666 		prev_sent = n_sent;
667 
668 		/* If we get a HV_ECPUERROR, then one or more of the cpus
669 		 * in the list are in error state.  Use the cpu_state()
670 		 * hypervisor call to find out which cpus are in error state.
671 		 */
672 		if (unlikely(status == HV_ECPUERROR)) {
673 			for (i = 0; i < cnt; i++) {
674 				long err;
675 				u16 cpu;
676 
677 				cpu = cpu_list[i];
678 				if (cpu == 0xffff)
679 					continue;
680 
681 				err = sun4v_cpu_state(cpu);
682 				if (err == HV_CPU_STATE_ERROR) {
683 					saw_cpu_error = (cpu + 1);
684 					cpu_list[i] = 0xffff;
685 				}
686 			}
687 		} else if (unlikely(status != HV_EWOULDBLOCK))
688 			goto fatal_mondo_error;
689 
690 		/* Don't bother rewriting the CPU list, just leave the
691 		 * 0xffff and non-0xffff entries in there and the
692 		 * hypervisor will do the right thing.
693 		 *
694 		 * Only advance timeout state if we didn't make any
695 		 * forward progress.
696 		 */
697 		if (unlikely(!forward_progress)) {
698 			if (unlikely(++retries > 10000))
699 				goto fatal_mondo_timeout;
700 
701 			/* Delay a little bit to let other cpus catch up
702 			 * on their cpu mondo queue work.
703 			 */
704 			udelay(2 * cnt);
705 		}
706 	} while (1);
707 
708 	if (unlikely(saw_cpu_error))
709 		goto fatal_mondo_cpu_error;
710 
711 	return;
712 
713 fatal_mondo_cpu_error:
714 	printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
715 	       "(including %d) were in error state\n",
716 	       this_cpu, saw_cpu_error - 1);
717 	return;
718 
719 fatal_mondo_timeout:
720 	printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
721 	       " progress after %d retries.\n",
722 	       this_cpu, retries);
723 	goto dump_cpu_list_and_out;
724 
725 fatal_mondo_error:
726 	printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
727 	       this_cpu, status);
728 	printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
729 	       "mondo_block_pa(%lx)\n",
730 	       this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
731 
732 dump_cpu_list_and_out:
733 	printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
734 	for (i = 0; i < cnt; i++)
735 		printk("%u ", cpu_list[i]);
736 	printk("]\n");
737 }
738 
739 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
740 
xcall_deliver(u64 data0,u64 data1,u64 data2,const cpumask_t * mask)741 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
742 {
743 	struct trap_per_cpu *tb;
744 	int this_cpu, i, cnt;
745 	unsigned long flags;
746 	u16 *cpu_list;
747 	u64 *mondo;
748 
749 	/* We have to do this whole thing with interrupts fully disabled.
750 	 * Otherwise if we send an xcall from interrupt context it will
751 	 * corrupt both our mondo block and cpu list state.
752 	 *
753 	 * One consequence of this is that we cannot use timeout mechanisms
754 	 * that depend upon interrupts being delivered locally.  So, for
755 	 * example, we cannot sample jiffies and expect it to advance.
756 	 *
757 	 * Fortunately, udelay() uses %stick/%tick so we can use that.
758 	 */
759 	local_irq_save(flags);
760 
761 	this_cpu = smp_processor_id();
762 	tb = &trap_block[this_cpu];
763 
764 	mondo = __va(tb->cpu_mondo_block_pa);
765 	mondo[0] = data0;
766 	mondo[1] = data1;
767 	mondo[2] = data2;
768 	wmb();
769 
770 	cpu_list = __va(tb->cpu_list_pa);
771 
772 	/* Setup the initial cpu list.  */
773 	cnt = 0;
774 	for_each_cpu(i, mask) {
775 		if (i == this_cpu || !cpu_online(i))
776 			continue;
777 		cpu_list[cnt++] = i;
778 	}
779 
780 	if (cnt)
781 		xcall_deliver_impl(tb, cnt);
782 
783 	local_irq_restore(flags);
784 }
785 
786 /* Send cross call to all processors mentioned in MASK_P
787  * except self.  Really, there are only two cases currently,
788  * "cpu_online_mask" and "mm_cpumask(mm)".
789  */
smp_cross_call_masked(unsigned long * func,u32 ctx,u64 data1,u64 data2,const cpumask_t * mask)790 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
791 {
792 	u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
793 
794 	xcall_deliver(data0, data1, data2, mask);
795 }
796 
797 /* Send cross call to all processors except self. */
smp_cross_call(unsigned long * func,u32 ctx,u64 data1,u64 data2)798 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
799 {
800 	smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
801 }
802 
803 extern unsigned long xcall_sync_tick;
804 
smp_start_sync_tick_client(int cpu)805 static void smp_start_sync_tick_client(int cpu)
806 {
807 	xcall_deliver((u64) &xcall_sync_tick, 0, 0,
808 		      cpumask_of(cpu));
809 }
810 
811 extern unsigned long xcall_call_function;
812 
arch_send_call_function_ipi_mask(const struct cpumask * mask)813 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
814 {
815 	xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
816 }
817 
818 extern unsigned long xcall_call_function_single;
819 
arch_send_call_function_single_ipi(int cpu)820 void arch_send_call_function_single_ipi(int cpu)
821 {
822 	xcall_deliver((u64) &xcall_call_function_single, 0, 0,
823 		      cpumask_of(cpu));
824 }
825 
smp_call_function_client(int irq,struct pt_regs * regs)826 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
827 {
828 	clear_softint(1 << irq);
829 	generic_smp_call_function_interrupt();
830 }
831 
smp_call_function_single_client(int irq,struct pt_regs * regs)832 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
833 {
834 	clear_softint(1 << irq);
835 	generic_smp_call_function_single_interrupt();
836 }
837 
tsb_sync(void * info)838 static void tsb_sync(void *info)
839 {
840 	struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
841 	struct mm_struct *mm = info;
842 
843 	/* It is not valid to test "current->active_mm == mm" here.
844 	 *
845 	 * The value of "current" is not changed atomically with
846 	 * switch_mm().  But that's OK, we just need to check the
847 	 * current cpu's trap block PGD physical address.
848 	 */
849 	if (tp->pgd_paddr == __pa(mm->pgd))
850 		tsb_context_switch(mm);
851 }
852 
smp_tsb_sync(struct mm_struct * mm)853 void smp_tsb_sync(struct mm_struct *mm)
854 {
855 	smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
856 }
857 
858 extern unsigned long xcall_flush_tlb_mm;
859 extern unsigned long xcall_flush_tlb_page;
860 extern unsigned long xcall_flush_tlb_kernel_range;
861 extern unsigned long xcall_fetch_glob_regs;
862 extern unsigned long xcall_receive_signal;
863 extern unsigned long xcall_new_mmu_context_version;
864 #ifdef CONFIG_KGDB
865 extern unsigned long xcall_kgdb_capture;
866 #endif
867 
868 #ifdef DCACHE_ALIASING_POSSIBLE
869 extern unsigned long xcall_flush_dcache_page_cheetah;
870 #endif
871 extern unsigned long xcall_flush_dcache_page_spitfire;
872 
873 #ifdef CONFIG_DEBUG_DCFLUSH
874 extern atomic_t dcpage_flushes;
875 extern atomic_t dcpage_flushes_xcall;
876 #endif
877 
__local_flush_dcache_page(struct page * page)878 static inline void __local_flush_dcache_page(struct page *page)
879 {
880 #ifdef DCACHE_ALIASING_POSSIBLE
881 	__flush_dcache_page(page_address(page),
882 			    ((tlb_type == spitfire) &&
883 			     page_mapping(page) != NULL));
884 #else
885 	if (page_mapping(page) != NULL &&
886 	    tlb_type == spitfire)
887 		__flush_icache_page(__pa(page_address(page)));
888 #endif
889 }
890 
smp_flush_dcache_page_impl(struct page * page,int cpu)891 void smp_flush_dcache_page_impl(struct page *page, int cpu)
892 {
893 	int this_cpu;
894 
895 	if (tlb_type == hypervisor)
896 		return;
897 
898 #ifdef CONFIG_DEBUG_DCFLUSH
899 	atomic_inc(&dcpage_flushes);
900 #endif
901 
902 	this_cpu = get_cpu();
903 
904 	if (cpu == this_cpu) {
905 		__local_flush_dcache_page(page);
906 	} else if (cpu_online(cpu)) {
907 		void *pg_addr = page_address(page);
908 		u64 data0 = 0;
909 
910 		if (tlb_type == spitfire) {
911 			data0 = ((u64)&xcall_flush_dcache_page_spitfire);
912 			if (page_mapping(page) != NULL)
913 				data0 |= ((u64)1 << 32);
914 		} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
915 #ifdef DCACHE_ALIASING_POSSIBLE
916 			data0 =	((u64)&xcall_flush_dcache_page_cheetah);
917 #endif
918 		}
919 		if (data0) {
920 			xcall_deliver(data0, __pa(pg_addr),
921 				      (u64) pg_addr, cpumask_of(cpu));
922 #ifdef CONFIG_DEBUG_DCFLUSH
923 			atomic_inc(&dcpage_flushes_xcall);
924 #endif
925 		}
926 	}
927 
928 	put_cpu();
929 }
930 
flush_dcache_page_all(struct mm_struct * mm,struct page * page)931 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
932 {
933 	void *pg_addr;
934 	u64 data0;
935 
936 	if (tlb_type == hypervisor)
937 		return;
938 
939 	preempt_disable();
940 
941 #ifdef CONFIG_DEBUG_DCFLUSH
942 	atomic_inc(&dcpage_flushes);
943 #endif
944 	data0 = 0;
945 	pg_addr = page_address(page);
946 	if (tlb_type == spitfire) {
947 		data0 = ((u64)&xcall_flush_dcache_page_spitfire);
948 		if (page_mapping(page) != NULL)
949 			data0 |= ((u64)1 << 32);
950 	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
951 #ifdef DCACHE_ALIASING_POSSIBLE
952 		data0 = ((u64)&xcall_flush_dcache_page_cheetah);
953 #endif
954 	}
955 	if (data0) {
956 		xcall_deliver(data0, __pa(pg_addr),
957 			      (u64) pg_addr, cpu_online_mask);
958 #ifdef CONFIG_DEBUG_DCFLUSH
959 		atomic_inc(&dcpage_flushes_xcall);
960 #endif
961 	}
962 	__local_flush_dcache_page(page);
963 
964 	preempt_enable();
965 }
966 
smp_new_mmu_context_version_client(int irq,struct pt_regs * regs)967 void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
968 {
969 	struct mm_struct *mm;
970 	unsigned long flags;
971 
972 	clear_softint(1 << irq);
973 
974 	/* See if we need to allocate a new TLB context because
975 	 * the version of the one we are using is now out of date.
976 	 */
977 	mm = current->active_mm;
978 	if (unlikely(!mm || (mm == &init_mm)))
979 		return;
980 
981 	spin_lock_irqsave(&mm->context.lock, flags);
982 
983 	if (unlikely(!CTX_VALID(mm->context)))
984 		get_new_mmu_context(mm);
985 
986 	spin_unlock_irqrestore(&mm->context.lock, flags);
987 
988 	load_secondary_context(mm);
989 	__flush_tlb_mm(CTX_HWBITS(mm->context),
990 		       SECONDARY_CONTEXT);
991 }
992 
smp_new_mmu_context_version(void)993 void smp_new_mmu_context_version(void)
994 {
995 	smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
996 }
997 
998 #ifdef CONFIG_KGDB
kgdb_roundup_cpus(unsigned long flags)999 void kgdb_roundup_cpus(unsigned long flags)
1000 {
1001 	smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1002 }
1003 #endif
1004 
smp_fetch_global_regs(void)1005 void smp_fetch_global_regs(void)
1006 {
1007 	smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1008 }
1009 
1010 /* We know that the window frames of the user have been flushed
1011  * to the stack before we get here because all callers of us
1012  * are flush_tlb_*() routines, and these run after flush_cache_*()
1013  * which performs the flushw.
1014  *
1015  * The SMP TLB coherency scheme we use works as follows:
1016  *
1017  * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1018  *    space has (potentially) executed on, this is the heuristic
1019  *    we use to avoid doing cross calls.
1020  *
1021  *    Also, for flushing from kswapd and also for clones, we
1022  *    use cpu_vm_mask as the list of cpus to make run the TLB.
1023  *
1024  * 2) TLB context numbers are shared globally across all processors
1025  *    in the system, this allows us to play several games to avoid
1026  *    cross calls.
1027  *
1028  *    One invariant is that when a cpu switches to a process, and
1029  *    that processes tsk->active_mm->cpu_vm_mask does not have the
1030  *    current cpu's bit set, that tlb context is flushed locally.
1031  *
1032  *    If the address space is non-shared (ie. mm->count == 1) we avoid
1033  *    cross calls when we want to flush the currently running process's
1034  *    tlb state.  This is done by clearing all cpu bits except the current
1035  *    processor's in current->mm->cpu_vm_mask and performing the
1036  *    flush locally only.  This will force any subsequent cpus which run
1037  *    this task to flush the context from the local tlb if the process
1038  *    migrates to another cpu (again).
1039  *
1040  * 3) For shared address spaces (threads) and swapping we bite the
1041  *    bullet for most cases and perform the cross call (but only to
1042  *    the cpus listed in cpu_vm_mask).
1043  *
1044  *    The performance gain from "optimizing" away the cross call for threads is
1045  *    questionable (in theory the big win for threads is the massive sharing of
1046  *    address space state across processors).
1047  */
1048 
1049 /* This currently is only used by the hugetlb arch pre-fault
1050  * hook on UltraSPARC-III+ and later when changing the pagesize
1051  * bits of the context register for an address space.
1052  */
smp_flush_tlb_mm(struct mm_struct * mm)1053 void smp_flush_tlb_mm(struct mm_struct *mm)
1054 {
1055 	u32 ctx = CTX_HWBITS(mm->context);
1056 	int cpu = get_cpu();
1057 
1058 	if (atomic_read(&mm->mm_users) == 1) {
1059 		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1060 		goto local_flush_and_out;
1061 	}
1062 
1063 	smp_cross_call_masked(&xcall_flush_tlb_mm,
1064 			      ctx, 0, 0,
1065 			      mm_cpumask(mm));
1066 
1067 local_flush_and_out:
1068 	__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1069 
1070 	put_cpu();
1071 }
1072 
1073 struct tlb_pending_info {
1074 	unsigned long ctx;
1075 	unsigned long nr;
1076 	unsigned long *vaddrs;
1077 };
1078 
tlb_pending_func(void * info)1079 static void tlb_pending_func(void *info)
1080 {
1081 	struct tlb_pending_info *t = info;
1082 
1083 	__flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1084 }
1085 
smp_flush_tlb_pending(struct mm_struct * mm,unsigned long nr,unsigned long * vaddrs)1086 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1087 {
1088 	u32 ctx = CTX_HWBITS(mm->context);
1089 	struct tlb_pending_info info;
1090 	int cpu = get_cpu();
1091 
1092 	info.ctx = ctx;
1093 	info.nr = nr;
1094 	info.vaddrs = vaddrs;
1095 
1096 	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1097 		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1098 	else
1099 		smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1100 				       &info, 1);
1101 
1102 	__flush_tlb_pending(ctx, nr, vaddrs);
1103 
1104 	put_cpu();
1105 }
1106 
smp_flush_tlb_page(struct mm_struct * mm,unsigned long vaddr)1107 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1108 {
1109 	unsigned long context = CTX_HWBITS(mm->context);
1110 	int cpu = get_cpu();
1111 
1112 	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1113 		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1114 	else
1115 		smp_cross_call_masked(&xcall_flush_tlb_page,
1116 				      context, vaddr, 0,
1117 				      mm_cpumask(mm));
1118 	__flush_tlb_page(context, vaddr);
1119 
1120 	put_cpu();
1121 }
1122 
smp_flush_tlb_kernel_range(unsigned long start,unsigned long end)1123 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1124 {
1125 	start &= PAGE_MASK;
1126 	end    = PAGE_ALIGN(end);
1127 	if (start != end) {
1128 		smp_cross_call(&xcall_flush_tlb_kernel_range,
1129 			       0, start, end);
1130 
1131 		__flush_tlb_kernel_range(start, end);
1132 	}
1133 }
1134 
1135 /* CPU capture. */
1136 /* #define CAPTURE_DEBUG */
1137 extern unsigned long xcall_capture;
1138 
1139 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1140 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1141 static unsigned long penguins_are_doing_time;
1142 
smp_capture(void)1143 void smp_capture(void)
1144 {
1145 	int result = atomic_add_ret(1, &smp_capture_depth);
1146 
1147 	if (result == 1) {
1148 		int ncpus = num_online_cpus();
1149 
1150 #ifdef CAPTURE_DEBUG
1151 		printk("CPU[%d]: Sending penguins to jail...",
1152 		       smp_processor_id());
1153 #endif
1154 		penguins_are_doing_time = 1;
1155 		atomic_inc(&smp_capture_registry);
1156 		smp_cross_call(&xcall_capture, 0, 0, 0);
1157 		while (atomic_read(&smp_capture_registry) != ncpus)
1158 			rmb();
1159 #ifdef CAPTURE_DEBUG
1160 		printk("done\n");
1161 #endif
1162 	}
1163 }
1164 
smp_release(void)1165 void smp_release(void)
1166 {
1167 	if (atomic_dec_and_test(&smp_capture_depth)) {
1168 #ifdef CAPTURE_DEBUG
1169 		printk("CPU[%d]: Giving pardon to "
1170 		       "imprisoned penguins\n",
1171 		       smp_processor_id());
1172 #endif
1173 		penguins_are_doing_time = 0;
1174 		membar_safe("#StoreLoad");
1175 		atomic_dec(&smp_capture_registry);
1176 	}
1177 }
1178 
1179 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1180  * set, so they can service tlb flush xcalls...
1181  */
1182 extern void prom_world(int);
1183 
smp_penguin_jailcell(int irq,struct pt_regs * regs)1184 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1185 {
1186 	clear_softint(1 << irq);
1187 
1188 	preempt_disable();
1189 
1190 	__asm__ __volatile__("flushw");
1191 	prom_world(1);
1192 	atomic_inc(&smp_capture_registry);
1193 	membar_safe("#StoreLoad");
1194 	while (penguins_are_doing_time)
1195 		rmb();
1196 	atomic_dec(&smp_capture_registry);
1197 	prom_world(0);
1198 
1199 	preempt_enable();
1200 }
1201 
1202 /* /proc/profile writes can call this, don't __init it please. */
setup_profiling_timer(unsigned int multiplier)1203 int setup_profiling_timer(unsigned int multiplier)
1204 {
1205 	return -EINVAL;
1206 }
1207 
smp_prepare_cpus(unsigned int max_cpus)1208 void __init smp_prepare_cpus(unsigned int max_cpus)
1209 {
1210 }
1211 
smp_prepare_boot_cpu(void)1212 void __devinit smp_prepare_boot_cpu(void)
1213 {
1214 }
1215 
smp_setup_processor_id(void)1216 void __init smp_setup_processor_id(void)
1217 {
1218 	if (tlb_type == spitfire)
1219 		xcall_deliver_impl = spitfire_xcall_deliver;
1220 	else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1221 		xcall_deliver_impl = cheetah_xcall_deliver;
1222 	else
1223 		xcall_deliver_impl = hypervisor_xcall_deliver;
1224 }
1225 
smp_fill_in_sib_core_maps(void)1226 void __devinit smp_fill_in_sib_core_maps(void)
1227 {
1228 	unsigned int i;
1229 
1230 	for_each_present_cpu(i) {
1231 		unsigned int j;
1232 
1233 		cpumask_clear(&cpu_core_map[i]);
1234 		if (cpu_data(i).core_id == 0) {
1235 			cpumask_set_cpu(i, &cpu_core_map[i]);
1236 			continue;
1237 		}
1238 
1239 		for_each_present_cpu(j) {
1240 			if (cpu_data(i).core_id ==
1241 			    cpu_data(j).core_id)
1242 				cpumask_set_cpu(j, &cpu_core_map[i]);
1243 		}
1244 	}
1245 
1246 	for_each_present_cpu(i) {
1247 		unsigned int j;
1248 
1249 		cpumask_clear(&per_cpu(cpu_sibling_map, i));
1250 		if (cpu_data(i).proc_id == -1) {
1251 			cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1252 			continue;
1253 		}
1254 
1255 		for_each_present_cpu(j) {
1256 			if (cpu_data(i).proc_id ==
1257 			    cpu_data(j).proc_id)
1258 				cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1259 		}
1260 	}
1261 }
1262 
__cpu_up(unsigned int cpu)1263 int __cpuinit __cpu_up(unsigned int cpu)
1264 {
1265 	int ret = smp_boot_one_cpu(cpu);
1266 
1267 	if (!ret) {
1268 		cpumask_set_cpu(cpu, &smp_commenced_mask);
1269 		while (!cpu_online(cpu))
1270 			mb();
1271 		if (!cpu_online(cpu)) {
1272 			ret = -ENODEV;
1273 		} else {
1274 			/* On SUN4V, writes to %tick and %stick are
1275 			 * not allowed.
1276 			 */
1277 			if (tlb_type != hypervisor)
1278 				smp_synchronize_one_tick(cpu);
1279 		}
1280 	}
1281 	return ret;
1282 }
1283 
1284 #ifdef CONFIG_HOTPLUG_CPU
cpu_play_dead(void)1285 void cpu_play_dead(void)
1286 {
1287 	int cpu = smp_processor_id();
1288 	unsigned long pstate;
1289 
1290 	idle_task_exit();
1291 
1292 	if (tlb_type == hypervisor) {
1293 		struct trap_per_cpu *tb = &trap_block[cpu];
1294 
1295 		sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1296 				tb->cpu_mondo_pa, 0);
1297 		sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1298 				tb->dev_mondo_pa, 0);
1299 		sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1300 				tb->resum_mondo_pa, 0);
1301 		sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1302 				tb->nonresum_mondo_pa, 0);
1303 	}
1304 
1305 	cpumask_clear_cpu(cpu, &smp_commenced_mask);
1306 	membar_safe("#Sync");
1307 
1308 	local_irq_disable();
1309 
1310 	__asm__ __volatile__(
1311 		"rdpr	%%pstate, %0\n\t"
1312 		"wrpr	%0, %1, %%pstate"
1313 		: "=r" (pstate)
1314 		: "i" (PSTATE_IE));
1315 
1316 	while (1)
1317 		barrier();
1318 }
1319 
__cpu_disable(void)1320 int __cpu_disable(void)
1321 {
1322 	int cpu = smp_processor_id();
1323 	cpuinfo_sparc *c;
1324 	int i;
1325 
1326 	for_each_cpu(i, &cpu_core_map[cpu])
1327 		cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1328 	cpumask_clear(&cpu_core_map[cpu]);
1329 
1330 	for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1331 		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1332 	cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1333 
1334 	c = &cpu_data(cpu);
1335 
1336 	c->core_id = 0;
1337 	c->proc_id = -1;
1338 
1339 	smp_wmb();
1340 
1341 	/* Make sure no interrupts point to this cpu.  */
1342 	fixup_irqs();
1343 
1344 	local_irq_enable();
1345 	mdelay(1);
1346 	local_irq_disable();
1347 
1348 	ipi_call_lock();
1349 	set_cpu_online(cpu, false);
1350 	ipi_call_unlock();
1351 
1352 	cpu_map_rebuild();
1353 
1354 	return 0;
1355 }
1356 
__cpu_die(unsigned int cpu)1357 void __cpu_die(unsigned int cpu)
1358 {
1359 	int i;
1360 
1361 	for (i = 0; i < 100; i++) {
1362 		smp_rmb();
1363 		if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1364 			break;
1365 		msleep(100);
1366 	}
1367 	if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1368 		printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1369 	} else {
1370 #if defined(CONFIG_SUN_LDOMS)
1371 		unsigned long hv_err;
1372 		int limit = 100;
1373 
1374 		do {
1375 			hv_err = sun4v_cpu_stop(cpu);
1376 			if (hv_err == HV_EOK) {
1377 				set_cpu_present(cpu, false);
1378 				break;
1379 			}
1380 		} while (--limit > 0);
1381 		if (limit <= 0) {
1382 			printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1383 			       hv_err);
1384 		}
1385 #endif
1386 	}
1387 }
1388 #endif
1389 
smp_cpus_done(unsigned int max_cpus)1390 void __init smp_cpus_done(unsigned int max_cpus)
1391 {
1392 	pcr_arch_init();
1393 }
1394 
smp_send_reschedule(int cpu)1395 void smp_send_reschedule(int cpu)
1396 {
1397 	xcall_deliver((u64) &xcall_receive_signal, 0, 0,
1398 		      cpumask_of(cpu));
1399 }
1400 
smp_receive_signal_client(int irq,struct pt_regs * regs)1401 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1402 {
1403 	clear_softint(1 << irq);
1404 	scheduler_ipi();
1405 }
1406 
1407 /* This is a nop because we capture all other cpus
1408  * anyways when making the PROM active.
1409  */
smp_send_stop(void)1410 void smp_send_stop(void)
1411 {
1412 }
1413 
1414 /**
1415  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1416  * @cpu: cpu to allocate for
1417  * @size: size allocation in bytes
1418  * @align: alignment
1419  *
1420  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
1421  * does the right thing for NUMA regardless of the current
1422  * configuration.
1423  *
1424  * RETURNS:
1425  * Pointer to the allocated area on success, NULL on failure.
1426  */
pcpu_alloc_bootmem(unsigned int cpu,size_t size,size_t align)1427 static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1428 					size_t align)
1429 {
1430 	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1431 #ifdef CONFIG_NEED_MULTIPLE_NODES
1432 	int node = cpu_to_node(cpu);
1433 	void *ptr;
1434 
1435 	if (!node_online(node) || !NODE_DATA(node)) {
1436 		ptr = __alloc_bootmem(size, align, goal);
1437 		pr_info("cpu %d has no node %d or node-local memory\n",
1438 			cpu, node);
1439 		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1440 			 cpu, size, __pa(ptr));
1441 	} else {
1442 		ptr = __alloc_bootmem_node(NODE_DATA(node),
1443 					   size, align, goal);
1444 		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1445 			 "%016lx\n", cpu, size, node, __pa(ptr));
1446 	}
1447 	return ptr;
1448 #else
1449 	return __alloc_bootmem(size, align, goal);
1450 #endif
1451 }
1452 
pcpu_free_bootmem(void * ptr,size_t size)1453 static void __init pcpu_free_bootmem(void *ptr, size_t size)
1454 {
1455 	free_bootmem(__pa(ptr), size);
1456 }
1457 
pcpu_cpu_distance(unsigned int from,unsigned int to)1458 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1459 {
1460 	if (cpu_to_node(from) == cpu_to_node(to))
1461 		return LOCAL_DISTANCE;
1462 	else
1463 		return REMOTE_DISTANCE;
1464 }
1465 
pcpu_populate_pte(unsigned long addr)1466 static void __init pcpu_populate_pte(unsigned long addr)
1467 {
1468 	pgd_t *pgd = pgd_offset_k(addr);
1469 	pud_t *pud;
1470 	pmd_t *pmd;
1471 
1472 	pud = pud_offset(pgd, addr);
1473 	if (pud_none(*pud)) {
1474 		pmd_t *new;
1475 
1476 		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1477 		pud_populate(&init_mm, pud, new);
1478 	}
1479 
1480 	pmd = pmd_offset(pud, addr);
1481 	if (!pmd_present(*pmd)) {
1482 		pte_t *new;
1483 
1484 		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1485 		pmd_populate_kernel(&init_mm, pmd, new);
1486 	}
1487 }
1488 
setup_per_cpu_areas(void)1489 void __init setup_per_cpu_areas(void)
1490 {
1491 	unsigned long delta;
1492 	unsigned int cpu;
1493 	int rc = -EINVAL;
1494 
1495 	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1496 		rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1497 					    PERCPU_DYNAMIC_RESERVE, 4 << 20,
1498 					    pcpu_cpu_distance,
1499 					    pcpu_alloc_bootmem,
1500 					    pcpu_free_bootmem);
1501 		if (rc)
1502 			pr_warning("PERCPU: %s allocator failed (%d), "
1503 				   "falling back to page size\n",
1504 				   pcpu_fc_names[pcpu_chosen_fc], rc);
1505 	}
1506 	if (rc < 0)
1507 		rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1508 					   pcpu_alloc_bootmem,
1509 					   pcpu_free_bootmem,
1510 					   pcpu_populate_pte);
1511 	if (rc < 0)
1512 		panic("cannot initialize percpu area (err=%d)", rc);
1513 
1514 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1515 	for_each_possible_cpu(cpu)
1516 		__per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1517 
1518 	/* Setup %g5 for the boot cpu.  */
1519 	__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1520 
1521 	of_fill_in_cpu_data();
1522 	if (tlb_type == hypervisor)
1523 		mdesc_fill_in_cpu_data(cpu_all_mask);
1524 }
1525