1 /* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/sched.h>
8 #include <linux/mm.h>
9 #include <linux/pagemap.h>
10 #include <linux/threads.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/timer.h>
22
23 #include <asm/head.h>
24 #include <asm/ptrace.h>
25 #include <asm/atomic.h>
26
27 #include <asm/irq.h>
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/oplib.h>
31 #include <asm/hardirq.h>
32 #include <asm/softirq.h>
33 #include <asm/uaccess.h>
34 #include <asm/timer.h>
35 #include <asm/starfire.h>
36
37 #define __KERNEL_SYSCALLS__
38 #include <linux/unistd.h>
39
40 extern int linux_num_cpus;
41 extern void calibrate_delay(void);
42 extern unsigned prom_cpu_nodes[];
43
44 cpuinfo_sparc cpu_data[NR_CPUS];
45
46 volatile int __cpu_number_map[NR_CPUS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
47 volatile int __cpu_logical_map[NR_CPUS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
48
49 /* Please don't make this stuff initdata!!! --DaveM */
50 static unsigned char boot_cpu_id;
51 static int smp_activated;
52
53 /* Kernel spinlock */
54 spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
55
56 volatile int smp_processors_ready = 0;
57 unsigned long cpu_present_map = 0;
58 int smp_num_cpus = 1;
59 int smp_threads_ready = 0;
60
smp_setup(char * str,int * ints)61 void __init smp_setup(char *str, int *ints)
62 {
63 /* XXX implement me XXX */
64 }
65
66 static int max_cpus = NR_CPUS;
maxcpus(char * str)67 static int __init maxcpus(char *str)
68 {
69 get_option(&str, &max_cpus);
70 return 1;
71 }
72
73 __setup("maxcpus=", maxcpus);
74
smp_info(struct seq_file * m)75 void smp_info(struct seq_file *m)
76 {
77 int i;
78
79 seq_printf(m, "State:\n");
80 for (i = 0; i < NR_CPUS; i++) {
81 if (cpu_present_map & (1UL << i))
82 seq_printf(m,
83 "CPU%d:\t\tonline\n", i);
84 }
85 }
86
smp_bogo(struct seq_file * m)87 void smp_bogo(struct seq_file *m)
88 {
89 int i;
90
91 for (i = 0; i < NR_CPUS; i++)
92 if (cpu_present_map & (1UL << i))
93 seq_printf(m,
94 "Cpu%dBogo\t: %lu.%02lu\n"
95 "Cpu%dClkTck\t: %016lx\n",
96 i, cpu_data[i].udelay_val / (500000/HZ),
97 (cpu_data[i].udelay_val / (5000/HZ)) % 100,
98 i, cpu_data[i].clock_tick);
99 }
100
smp_store_cpu_info(int id)101 void __init smp_store_cpu_info(int id)
102 {
103 int i, no;
104
105 /* multiplier and counter set by
106 smp_setup_percpu_timer() */
107 cpu_data[id].udelay_val = loops_per_jiffy;
108
109 for (no = 0; no < linux_num_cpus; no++)
110 if (linux_cpus[no].mid == id)
111 break;
112
113 cpu_data[id].clock_tick = prom_getintdefault(linux_cpus[no].prom_node,
114 "clock-frequency", 0);
115
116 cpu_data[id].pgcache_size = 0;
117 cpu_data[id].pte_cache[0] = NULL;
118 cpu_data[id].pte_cache[1] = NULL;
119 cpu_data[id].pgdcache_size = 0;
120 cpu_data[id].pgd_cache = NULL;
121 cpu_data[id].idle_volume = 1;
122
123 for (i = 0; i < 16; i++)
124 cpu_data[id].irq_worklists[i] = 0;
125 }
126
smp_commence(void)127 void __init smp_commence(void)
128 {
129 }
130
131 static void smp_setup_percpu_timer(void);
132
133 static volatile unsigned long callin_flag = 0;
134
135 extern void inherit_locked_prom_mappings(int save_p);
136
smp_callin(void)137 void __init smp_callin(void)
138 {
139 int cpuid = hard_smp_processor_id();
140
141 inherit_locked_prom_mappings(0);
142
143 __flush_cache_all();
144 __flush_tlb_all();
145
146 smp_setup_percpu_timer();
147
148 __sti();
149
150 calibrate_delay();
151 smp_store_cpu_info(cpuid);
152 callin_flag = 1;
153 __asm__ __volatile__("membar #Sync\n\t"
154 "flush %%g6" : : : "memory");
155
156 /* Clear this or we will die instantly when we
157 * schedule back to this idler...
158 */
159 current->thread.flags &= ~(SPARC_FLAG_NEWCHILD);
160
161 /* Attach to the address space of init_task. */
162 atomic_inc(&init_mm.mm_count);
163 current->active_mm = &init_mm;
164
165 while (!smp_threads_ready)
166 rmb();
167 }
168
169 extern int cpu_idle(void);
170 extern void init_IRQ(void);
171
start_secondary(void * unused)172 int start_secondary(void *unused)
173 {
174 trap_init();
175 init_IRQ();
176 return cpu_idle();
177 }
178
cpu_panic(void)179 void cpu_panic(void)
180 {
181 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
182 panic("SMP bolixed\n");
183 }
184
185 static unsigned long current_tick_offset;
186
187 /* This tick register synchronization scheme is taken entirely from
188 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
189 *
190 * The only change I've made is to rework it so that the master
191 * initiates the synchonization instead of the slave. -DaveM
192 */
193
194 #define MASTER 0
195 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
196
197 #define NUM_ROUNDS 64 /* magic value */
198 #define NUM_ITERS 5 /* likewise */
199
200 static spinlock_t itc_sync_lock = SPIN_LOCK_UNLOCKED;
201 static unsigned long go[SLAVE + 1];
202
203 #define DEBUG_TICK_SYNC 0
204
get_delta(long * rt,long * master)205 static inline long get_delta (long *rt, long *master)
206 {
207 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
208 unsigned long tcenter, t0, t1, tm;
209 unsigned long i;
210
211 for (i = 0; i < NUM_ITERS; i++) {
212 t0 = tick_ops->get_tick();
213 go[MASTER] = 1;
214 membar_safe("#StoreLoad");
215 while (!(tm = go[SLAVE]))
216 rmb();
217 go[SLAVE] = 0;
218 membar_safe("#StoreStore");
219 t1 = tick_ops->get_tick();
220
221 if (t1 - t0 < best_t1 - best_t0)
222 best_t0 = t0, best_t1 = t1, best_tm = tm;
223 }
224
225 *rt = best_t1 - best_t0;
226 *master = best_tm - best_t0;
227
228 /* average best_t0 and best_t1 without overflow: */
229 tcenter = (best_t0/2 + best_t1/2);
230 if (best_t0 % 2 + best_t1 % 2 == 2)
231 tcenter++;
232 return tcenter - best_tm;
233 }
234
smp_synchronize_tick_client(void)235 void smp_synchronize_tick_client(void)
236 {
237 long i, delta, adj, adjust_latency = 0, done = 0;
238 unsigned long flags, rt, master_time_stamp, bound;
239 #if DEBUG_TICK_SYNC
240 struct {
241 long rt; /* roundtrip time */
242 long master; /* master's timestamp */
243 long diff; /* difference between midpoint and master's timestamp */
244 long lat; /* estimate of itc adjustment latency */
245 } t[NUM_ROUNDS];
246 #endif
247
248 go[MASTER] = 1;
249
250 while (go[MASTER])
251 rmb();
252
253 local_irq_save(flags);
254 {
255 for (i = 0; i < NUM_ROUNDS; i++) {
256 delta = get_delta(&rt, &master_time_stamp);
257 if (delta == 0) {
258 done = 1; /* let's lock on to this... */
259 bound = rt;
260 }
261
262 if (!done) {
263 if (i > 0) {
264 adjust_latency += -delta;
265 adj = -delta + adjust_latency/4;
266 } else
267 adj = -delta;
268
269 tick_ops->add_tick(adj, current_tick_offset);
270 }
271 #if DEBUG_TICK_SYNC
272 t[i].rt = rt;
273 t[i].master = master_time_stamp;
274 t[i].diff = delta;
275 t[i].lat = adjust_latency/4;
276 #endif
277 }
278 }
279 local_irq_restore(flags);
280
281 #if DEBUG_TICK_SYNC
282 for (i = 0; i < NUM_ROUNDS; i++)
283 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
284 t[i].rt, t[i].master, t[i].diff, t[i].lat);
285 #endif
286
287 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
288 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
289 }
290
291 static void smp_start_sync_tick_client(int cpu);
292
smp_synchronize_one_tick(int cpu)293 static void smp_synchronize_one_tick(int cpu)
294 {
295 unsigned long flags, i;
296
297 go[MASTER] = 0;
298
299 smp_start_sync_tick_client(cpu);
300
301 /* wait for client to be ready */
302 while (!go[MASTER])
303 rmb();
304
305 /* now let the client proceed into his loop */
306 go[MASTER] = 0;
307 membar_safe("#StoreLoad");
308
309 spin_lock_irqsave(&itc_sync_lock, flags);
310 {
311 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
312 while (!go[MASTER])
313 rmb();
314 go[MASTER] = 0;
315 membar_safe("#StoreStore");
316 go[SLAVE] = tick_ops->get_tick();
317 membar_safe("#StoreLoad");
318 }
319 }
320 spin_unlock_irqrestore(&itc_sync_lock, flags);
321 }
322
smp_synchronize_tick(void)323 static void smp_synchronize_tick(void)
324 {
325 int cpu = smp_processor_id();
326 int i;
327
328 for (i = 0; i < NR_CPUS; i++) {
329 if (cpu_present_map & (1UL << i)) {
330 if (i == cpu)
331 continue;
332 smp_synchronize_one_tick(i);
333 }
334 }
335 }
336
337 extern struct prom_cpuinfo linux_cpus[64];
338
339 extern unsigned long sparc64_cpu_startup;
340
341 /* The OBP cpu startup callback truncates the 3rd arg cookie to
342 * 32-bits (I think) so to be safe we have it read the pointer
343 * contained here so we work on >4GB machines. -DaveM
344 */
345 static struct task_struct *cpu_new_task = NULL;
346
smp_boot_cpus(void)347 void __init smp_boot_cpus(void)
348 {
349 int cpucount = 0, i;
350
351 printk("Entering UltraSMPenguin Mode...\n");
352 __sti();
353 smp_store_cpu_info(boot_cpu_id);
354 init_idle();
355
356 if (linux_num_cpus == 1)
357 return;
358
359 for (i = 0; i < NR_CPUS; i++) {
360 if (i == boot_cpu_id)
361 continue;
362
363 if ((cpucount + 1) == max_cpus)
364 goto ignorecpu;
365 if (cpu_present_map & (1UL << i)) {
366 unsigned long entry = (unsigned long)(&sparc64_cpu_startup);
367 unsigned long cookie = (unsigned long)(&cpu_new_task);
368 struct task_struct *p;
369 int timeout;
370 int no;
371
372 prom_printf("Starting CPU %d... ", i);
373 kernel_thread(start_secondary, NULL, CLONE_PID);
374 cpucount++;
375
376 p = init_task.prev_task;
377 init_tasks[cpucount] = p;
378
379 p->processor = i;
380 p->cpus_runnable = 1UL << i; /* we schedule the first task manually */
381
382 del_from_runqueue(p);
383 unhash_process(p);
384
385 callin_flag = 0;
386 for (no = 0; no < linux_num_cpus; no++)
387 if (linux_cpus[no].mid == i)
388 break;
389 cpu_new_task = p;
390 prom_startcpu(linux_cpus[no].prom_node,
391 entry, cookie);
392 for (timeout = 0; timeout < 5000000; timeout++) {
393 if (callin_flag)
394 break;
395 udelay(100);
396 }
397 if (callin_flag) {
398 __cpu_number_map[i] = cpucount;
399 __cpu_logical_map[cpucount] = i;
400 prom_cpu_nodes[i] = linux_cpus[no].prom_node;
401 prom_printf("OK\n");
402 } else {
403 cpucount--;
404 printk("Processor %d is stuck.\n", i);
405 prom_printf("FAILED\n");
406 }
407 }
408 if (!callin_flag) {
409 ignorecpu:
410 cpu_present_map &= ~(1UL << i);
411 __cpu_number_map[i] = -1;
412 }
413 }
414 cpu_new_task = NULL;
415 if (cpucount == 0) {
416 if (max_cpus != 1)
417 printk("Error: only one processor found.\n");
418 cpu_present_map = (1UL << smp_processor_id());
419 } else {
420 unsigned long bogosum = 0;
421
422 for (i = 0; i < NR_CPUS; i++) {
423 if (cpu_present_map & (1UL << i))
424 bogosum += cpu_data[i].udelay_val;
425 }
426 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
427 cpucount + 1,
428 bogosum/(500000/HZ),
429 (bogosum/(5000/HZ))%100);
430 smp_activated = 1;
431 smp_num_cpus = cpucount + 1;
432 }
433 smp_processors_ready = 1;
434 membar_safe("#StoreStore | #StoreLoad");
435
436 smp_synchronize_tick();
437 }
438
spitfire_xcall_helper(u64 data0,u64 data1,u64 data2,u64 pstate,unsigned long cpu)439 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
440 {
441 u64 result, target;
442 int stuck, tmp;
443
444 if (this_is_starfire) {
445 /* map to real upaid */
446 cpu = (((cpu & 0x3c) << 1) |
447 ((cpu & 0x40) >> 4) |
448 (cpu & 0x3));
449 }
450
451 target = (cpu << 14) | 0x70;
452 again:
453 /* Ok, this is the real Spitfire Errata #54.
454 * One must read back from a UDB internal register
455 * after writes to the UDB interrupt dispatch, but
456 * before the membar Sync for that write.
457 * So we use the high UDB control register (ASI 0x7f,
458 * ADDR 0x20) for the dummy read. -DaveM
459 */
460 tmp = 0x40;
461 __asm__ __volatile__(
462 "wrpr %1, %2, %%pstate\n\t"
463 "stxa %4, [%0] %3\n\t"
464 "stxa %5, [%0+%8] %3\n\t"
465 "add %0, %8, %0\n\t"
466 "stxa %6, [%0+%8] %3\n\t"
467 "membar #Sync\n\t"
468 "stxa %%g0, [%7] %3\n\t"
469 "membar #Sync\n\t"
470 "mov 0x20, %%g1\n\t"
471 "ldxa [%%g1] 0x7f, %%g0\n\t"
472 "membar #Sync"
473 : "=r" (tmp)
474 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
475 "r" (data0), "r" (data1), "r" (data2), "r" (target), "r" (0x10), "0" (tmp)
476 : "g1");
477
478 /* NOTE: PSTATE_IE is still clear. */
479 stuck = 100000;
480 do {
481 __asm__ __volatile__("ldxa [%%g0] %1, %0"
482 : "=r" (result)
483 : "i" (ASI_INTR_DISPATCH_STAT));
484 if (result == 0) {
485 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
486 : : "r" (pstate));
487 return;
488 }
489 stuck -= 1;
490 if (stuck == 0)
491 break;
492 } while (result & 0x1);
493 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
494 : : "r" (pstate));
495 if (stuck == 0) {
496 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
497 smp_processor_id(), result);
498 } else {
499 udelay(2);
500 goto again;
501 }
502 }
503
spitfire_xcall_deliver(u64 data0,u64 data1,u64 data2,unsigned long mask)504 static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long mask)
505 {
506 int ncpus = smp_num_cpus - 1;
507 int i;
508 u64 pstate;
509
510 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
511 for (i = 0; (i < NR_CPUS) && ncpus; i++) {
512 if (mask & (1UL << i)) {
513 spitfire_xcall_helper(data0, data1, data2, pstate, i);
514 ncpus--;
515 }
516 }
517 }
518
519 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
520 * packet, but we have no use for that. However we do take advantage of
521 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
522 */
523 #if NR_CPUS > 32
524 #error Fixup cheetah_xcall_deliver Dave...
525 #endif
cheetah_xcall_deliver(u64 data0,u64 data1,u64 data2,unsigned long mask)526 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long mask)
527 {
528 u64 pstate, ver;
529 int nack_busy_id, is_jalapeno;
530
531 if (!mask)
532 return;
533
534 /* Unfortunately, someone at Sun had the brilliant idea to make the
535 * busy/nack fields hard-coded by ITID number for this Ultra-III
536 * derivative processor.
537 */
538 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
539 is_jalapeno = ((ver >> 32) == 0x003e0016);
540
541 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
542
543 retry:
544 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
545 : : "r" (pstate), "i" (PSTATE_IE));
546
547 /* Setup the dispatch data registers. */
548 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
549 "stxa %1, [%4] %6\n\t"
550 "stxa %2, [%5] %6\n\t"
551 "membar #Sync\n\t"
552 : /* no outputs */
553 : "r" (data0), "r" (data1), "r" (data2),
554 "r" (0x40), "r" (0x50), "r" (0x60),
555 "i" (ASI_INTR_W));
556
557 nack_busy_id = 0;
558 {
559 int i, ncpus = smp_num_cpus - 1;
560
561 for (i = 0; (i < NR_CPUS) && ncpus; i++) {
562 if (mask & (1UL << i)) {
563 u64 target = (i << 14) | 0x70;
564
565 if (!is_jalapeno)
566 target |= (nack_busy_id << 24);
567 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
568 "membar #Sync\n\t"
569 : /* no outputs */
570 : "r" (target), "i" (ASI_INTR_W));
571 nack_busy_id++;
572 ncpus--;
573 }
574 }
575 }
576
577 /* Now, poll for completion. */
578 {
579 u64 dispatch_stat;
580 long stuck;
581
582 stuck = 100000 * nack_busy_id;
583 do {
584 __asm__ __volatile__("ldxa [%%g0] %1, %0"
585 : "=r" (dispatch_stat)
586 : "i" (ASI_INTR_DISPATCH_STAT));
587 if (dispatch_stat == 0UL) {
588 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
589 : : "r" (pstate));
590 return;
591 }
592 if (!--stuck)
593 break;
594 } while (dispatch_stat & 0x5555555555555555UL);
595
596 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
597 : : "r" (pstate));
598
599 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
600 /* Busy bits will not clear, continue instead
601 * of freezing up on this cpu.
602 */
603 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
604 smp_processor_id(), dispatch_stat);
605 } else {
606 int i, this_busy_nack = 0;
607
608 /* Delay some random time with interrupts enabled
609 * to prevent deadlock.
610 */
611 udelay(2 * nack_busy_id);
612
613 /* Clear out the mask bits for cpus which did not
614 * NACK us.
615 */
616 for (i = 0; i < NR_CPUS; i++) {
617 if (mask & (1UL << i)) {
618 u64 check_mask;
619
620 if (is_jalapeno)
621 check_mask = (0x2UL << (2*i));
622 else
623 check_mask = (0x2UL <<
624 this_busy_nack);
625 if ((dispatch_stat & check_mask) == 0)
626 mask &= ~(1UL << i);
627 this_busy_nack += 2;
628 }
629 }
630
631 goto retry;
632 }
633 }
634 }
635
636 /* Send cross call to all processors mentioned in MASK
637 * except self.
638 */
smp_cross_call_masked(unsigned long * func,u32 ctx,u64 data1,u64 data2,unsigned long mask)639 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, unsigned long mask)
640 {
641 if (smp_processors_ready) {
642 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
643
644 mask &= ~(1UL<<smp_processor_id());
645
646 if (tlb_type == spitfire)
647 spitfire_xcall_deliver(data0, data1, data2, mask);
648 else
649 cheetah_xcall_deliver(data0, data1, data2, mask);
650
651 /* NOTE: Caller runs local copy on master. */
652 }
653 }
654
655 extern unsigned long xcall_sync_tick;
656
smp_start_sync_tick_client(int cpu)657 static void smp_start_sync_tick_client(int cpu)
658 {
659 smp_cross_call_masked(&xcall_sync_tick,
660 0, 0, 0,
661 (1UL << cpu));
662 }
663
664 /* Send cross call to all processors except self. */
665 #define smp_cross_call(func, ctx, data1, data2) \
666 smp_cross_call_masked(func, ctx, data1, data2, cpu_present_map)
667
668 struct call_data_struct {
669 void (*func) (void *info);
670 void *info;
671 atomic_t finished;
672 int wait;
673 };
674
675 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
676 static struct call_data_struct *call_data;
677
678 extern unsigned long xcall_call_function;
679
smp_call_function(void (* func)(void * info),void * info,int nonatomic,int wait)680 int smp_call_function(void (*func)(void *info), void *info,
681 int nonatomic, int wait)
682 {
683 struct call_data_struct data;
684 int cpus = smp_num_cpus - 1;
685 long timeout;
686
687 if (!cpus)
688 return 0;
689
690 data.func = func;
691 data.info = info;
692 atomic_set(&data.finished, 0);
693 data.wait = wait;
694
695 spin_lock_bh(&call_lock);
696
697 call_data = &data;
698
699 smp_cross_call(&xcall_call_function, 0, 0, 0);
700
701 /*
702 * Wait for other cpus to complete function or at
703 * least snap the call data.
704 */
705 timeout = 1000000;
706 while (atomic_read(&data.finished) != cpus) {
707 if (--timeout <= 0)
708 goto out_timeout;
709 barrier();
710 udelay(1);
711 }
712
713 spin_unlock_bh(&call_lock);
714
715 return 0;
716
717 out_timeout:
718 spin_unlock_bh(&call_lock);
719 printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
720 smp_num_cpus - 1, atomic_read(&data.finished));
721 return 0;
722 }
723
smp_call_function_client(int irq,struct pt_regs * regs)724 void smp_call_function_client(int irq, struct pt_regs *regs)
725 {
726 void (*func) (void *info) = call_data->func;
727 void *info = call_data->info;
728
729 clear_softint(1 << irq);
730 if (call_data->wait) {
731 /* let initiator proceed only after completion */
732 func(info);
733 atomic_inc(&call_data->finished);
734 } else {
735 /* let initiator proceed after getting data */
736 atomic_inc(&call_data->finished);
737 func(info);
738 }
739 }
740
741 extern unsigned long xcall_flush_tlb_page;
742 extern unsigned long xcall_flush_tlb_mm;
743 extern unsigned long xcall_flush_tlb_range;
744 extern unsigned long xcall_flush_tlb_all_spitfire;
745 extern unsigned long xcall_flush_tlb_all_cheetah;
746 extern unsigned long xcall_flush_cache_all_spitfire;
747 extern unsigned long xcall_report_regs;
748 extern unsigned long xcall_receive_signal;
749 extern unsigned long xcall_flush_dcache_page_cheetah;
750 extern unsigned long xcall_flush_dcache_page_spitfire;
751
752 #ifdef CONFIG_DEBUG_DCFLUSH
753 extern atomic_t dcpage_flushes;
754 extern atomic_t dcpage_flushes_xcall;
755 #endif
756
__local_flush_dcache_page(struct page * page)757 static __inline__ void __local_flush_dcache_page(struct page *page)
758 {
759 #if (L1DCACHE_SIZE > PAGE_SIZE)
760 __flush_dcache_page(page->virtual,
761 ((tlb_type == spitfire) &&
762 page->mapping != NULL));
763 #else
764 if (page->mapping != NULL &&
765 tlb_type == spitfire)
766 __flush_icache_page(__pa(page->virtual));
767 #endif
768 }
769
smp_flush_dcache_page_impl(struct page * page,int cpu)770 void smp_flush_dcache_page_impl(struct page *page, int cpu)
771 {
772 if (smp_processors_ready) {
773 unsigned long mask = 1UL << cpu;
774
775 #ifdef CONFIG_DEBUG_DCFLUSH
776 atomic_inc(&dcpage_flushes);
777 #endif
778 if (cpu == smp_processor_id()) {
779 __local_flush_dcache_page(page);
780 } else if ((cpu_present_map & mask) != 0) {
781 u64 data0;
782
783 if (tlb_type == spitfire) {
784 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
785 if (page->mapping != NULL)
786 data0 |= ((u64)1 << 32);
787 spitfire_xcall_deliver(data0,
788 __pa(page->virtual),
789 (u64) page->virtual,
790 mask);
791 } else {
792 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
793 cheetah_xcall_deliver(data0,
794 __pa(page->virtual),
795 0, mask);
796 }
797 #ifdef CONFIG_DEBUG_DCFLUSH
798 atomic_inc(&dcpage_flushes_xcall);
799 #endif
800 }
801 }
802 }
803
flush_dcache_page_all(struct mm_struct * mm,struct page * page)804 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
805 {
806 if (smp_processors_ready) {
807 unsigned long mask = cpu_present_map & ~(1UL << smp_processor_id());
808 u64 data0;
809
810 #ifdef CONFIG_DEBUG_DCFLUSH
811 atomic_inc(&dcpage_flushes);
812 #endif
813 if (mask == 0UL)
814 goto flush_self;
815 if (tlb_type == spitfire) {
816 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
817 if (page->mapping != NULL)
818 data0 |= ((u64)1 << 32);
819 spitfire_xcall_deliver(data0,
820 __pa(page->virtual),
821 (u64) page->virtual,
822 mask);
823 } else {
824 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
825 cheetah_xcall_deliver(data0,
826 __pa(page->virtual),
827 0, mask);
828 }
829 #ifdef CONFIG_DEBUG_DCFLUSH
830 atomic_inc(&dcpage_flushes_xcall);
831 #endif
832 flush_self:
833 __local_flush_dcache_page(page);
834 }
835 }
836
smp_receive_signal(int cpu)837 void smp_receive_signal(int cpu)
838 {
839 if (smp_processors_ready) {
840 unsigned long mask = 1UL << cpu;
841
842 if ((cpu_present_map & mask) != 0) {
843 u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
844
845 if (tlb_type == spitfire)
846 spitfire_xcall_deliver(data0, 0, 0, mask);
847 else
848 cheetah_xcall_deliver(data0, 0, 0, mask);
849 }
850 }
851 }
852
smp_receive_signal_client(int irq,struct pt_regs * regs)853 void smp_receive_signal_client(int irq, struct pt_regs *regs)
854 {
855 /* Just return, rtrap takes care of the rest. */
856 clear_softint(1 << irq);
857 }
858
smp_report_regs(void)859 void smp_report_regs(void)
860 {
861 smp_cross_call(&xcall_report_regs, 0, 0, 0);
862 }
863
smp_flush_cache_all(void)864 void smp_flush_cache_all(void)
865 {
866 /* Cheetah need do nothing. */
867 if (tlb_type == spitfire) {
868 smp_cross_call(&xcall_flush_cache_all_spitfire, 0, 0, 0);
869 __flush_cache_all();
870 }
871 }
872
smp_flush_tlb_all(void)873 void smp_flush_tlb_all(void)
874 {
875 if (tlb_type == spitfire)
876 smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0);
877 else
878 smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0);
879 __flush_tlb_all();
880 }
881
882 /* We know that the window frames of the user have been flushed
883 * to the stack before we get here because all callers of us
884 * are flush_tlb_*() routines, and these run after flush_cache_*()
885 * which performs the flushw.
886 *
887 * The SMP TLB coherency scheme we use works as follows:
888 *
889 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
890 * space has (potentially) executed on, this is the heuristic
891 * we use to avoid doing cross calls.
892 *
893 * Also, for flushing from kswapd and also for clones, we
894 * use cpu_vm_mask as the list of cpus to make run the TLB.
895 *
896 * 2) TLB context numbers are shared globally across all processors
897 * in the system, this allows us to play several games to avoid
898 * cross calls.
899 *
900 * One invariant is that when a cpu switches to a process, and
901 * that processes tsk->active_mm->cpu_vm_mask does not have the
902 * current cpu's bit set, that tlb context is flushed locally.
903 *
904 * If the address space is non-shared (ie. mm->count == 1) we avoid
905 * cross calls when we want to flush the currently running process's
906 * tlb state. This is done by clearing all cpu bits except the current
907 * processor's in current->active_mm->cpu_vm_mask and performing the
908 * flush locally only. This will force any subsequent cpus which run
909 * this task to flush the context from the local tlb if the process
910 * migrates to another cpu (again).
911 *
912 * 3) For shared address spaces (threads) and swapping we bite the
913 * bullet for most cases and perform the cross call (but only to
914 * the cpus listed in cpu_vm_mask).
915 *
916 * The performance gain from "optimizing" away the cross call for threads is
917 * questionable (in theory the big win for threads is the massive sharing of
918 * address space state across processors).
919 */
smp_flush_tlb_mm(struct mm_struct * mm)920 void smp_flush_tlb_mm(struct mm_struct *mm)
921 {
922 /*
923 * This code is called from two places, dup_mmap and exit_mmap. In the
924 * former case, we really need a flush. In the later case, the callers
925 * are single threaded exec_mmap (really need a flush), multithreaded
926 * exec_mmap case (do not need to flush, since the caller gets a new
927 * context via activate_mm), and all other callers of mmput() whence
928 * the flush can be optimized since the associated threads are dead and
929 * the mm is being torn down (__exit_mm and other mmput callers) or the
930 * owning thread is dissociating itself from the mm. The
931 * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
932 * for single thread exec and dup_mmap cases. An alternate check might
933 * have been (current->mm != mm).
934 * Kanoj Sarcar
935 */
936 if (atomic_read(&mm->mm_users) == 0)
937 return;
938
939 {
940 u32 ctx = CTX_HWBITS(mm->context);
941 int cpu = smp_processor_id();
942
943 if (atomic_read(&mm->mm_users) == 1) {
944 /* See smp_flush_tlb_page for info about this. */
945 mm->cpu_vm_mask = (1UL << cpu);
946 goto local_flush_and_out;
947 }
948
949 smp_cross_call_masked(&xcall_flush_tlb_mm,
950 ctx, 0, 0,
951 mm->cpu_vm_mask);
952
953 local_flush_and_out:
954 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
955 }
956 }
957
smp_flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)958 void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
959 unsigned long end)
960 {
961 {
962 u32 ctx = CTX_HWBITS(mm->context);
963 int cpu = smp_processor_id();
964
965 start &= PAGE_MASK;
966 end = PAGE_ALIGN(end);
967
968 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
969 mm->cpu_vm_mask = (1UL << cpu);
970 goto local_flush_and_out;
971 }
972
973 smp_cross_call_masked(&xcall_flush_tlb_range,
974 ctx, start, end,
975 mm->cpu_vm_mask);
976
977 local_flush_and_out:
978 __flush_tlb_range(ctx, start, SECONDARY_CONTEXT, end, PAGE_SIZE, (end-start));
979 }
980 }
981
smp_flush_tlb_page(struct mm_struct * mm,unsigned long page)982 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
983 {
984 {
985 u32 ctx = CTX_HWBITS(mm->context);
986 int cpu = smp_processor_id();
987
988 page &= PAGE_MASK;
989 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
990 /* By virtue of being the current address space, and
991 * having the only reference to it, the following operation
992 * is safe.
993 *
994 * It would not be a win to perform the xcall tlb flush in
995 * this case, because even if we switch back to one of the
996 * other processors in cpu_vm_mask it is almost certain that
997 * all TLB entries for this context will be replaced by the
998 * time that happens.
999 */
1000 mm->cpu_vm_mask = (1UL << cpu);
1001 goto local_flush_and_out;
1002 } else {
1003 /* By virtue of running under the mm->page_table_lock,
1004 * and mmu_context.h:switch_mm doing the same, the following
1005 * operation is safe.
1006 */
1007 if (mm->cpu_vm_mask == (1UL << cpu))
1008 goto local_flush_and_out;
1009 }
1010
1011 /* OK, we have to actually perform the cross call. Most likely
1012 * this is a cloned mm or kswapd is kicking out pages for a task
1013 * which has run recently on another cpu.
1014 */
1015 smp_cross_call_masked(&xcall_flush_tlb_page,
1016 ctx, page, 0,
1017 mm->cpu_vm_mask);
1018 if (!(mm->cpu_vm_mask & (1UL << cpu)))
1019 return;
1020
1021 local_flush_and_out:
1022 __flush_tlb_page(ctx, page, SECONDARY_CONTEXT);
1023 }
1024 }
1025
1026 /* CPU capture. */
1027 /* #define CAPTURE_DEBUG */
1028 extern unsigned long xcall_capture;
1029
1030 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1031 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1032 static unsigned long penguins_are_doing_time;
1033
smp_capture(void)1034 void smp_capture(void)
1035 {
1036 if (smp_processors_ready) {
1037 int result = atomic_add_ret(1, &smp_capture_depth);
1038
1039 membar_safe("#StoreStore | #LoadStore");
1040 if (result == 1) {
1041 int ncpus = smp_num_cpus;
1042
1043 #ifdef CAPTURE_DEBUG
1044 printk("CPU[%d]: Sending penguins to jail...",
1045 smp_processor_id());
1046 #endif
1047 penguins_are_doing_time = 1;
1048 membar_safe("#StoreStore | #LoadStore");
1049 atomic_inc(&smp_capture_registry);
1050 smp_cross_call(&xcall_capture, 0, 0, 0);
1051 while (atomic_read(&smp_capture_registry) != ncpus)
1052 rmb();
1053 #ifdef CAPTURE_DEBUG
1054 printk("done\n");
1055 #endif
1056 }
1057 }
1058 }
1059
smp_release(void)1060 void smp_release(void)
1061 {
1062 if (smp_processors_ready) {
1063 if (atomic_dec_and_test(&smp_capture_depth)) {
1064 #ifdef CAPTURE_DEBUG
1065 printk("CPU[%d]: Giving pardon to imprisoned penguins\n",
1066 smp_processor_id());
1067 #endif
1068 penguins_are_doing_time = 0;
1069 membar_safe("#StoreStore | #StoreLoad");
1070 atomic_dec(&smp_capture_registry);
1071 }
1072 }
1073 }
1074
1075 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1076 * can service tlb flush xcalls...
1077 */
1078 extern void prom_world(int);
1079 extern void save_alternate_globals(unsigned long *);
1080 extern void restore_alternate_globals(unsigned long *);
smp_penguin_jailcell(int irq,struct pt_regs * regs)1081 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1082 {
1083 unsigned long global_save[24];
1084
1085 clear_softint(1 << irq);
1086
1087 __asm__ __volatile__("flushw");
1088 save_alternate_globals(global_save);
1089 prom_world(1);
1090 atomic_inc(&smp_capture_registry);
1091 membar_safe("#StoreLoad | #StoreStore");
1092 while (penguins_are_doing_time)
1093 rmb();
1094 restore_alternate_globals(global_save);
1095 atomic_dec(&smp_capture_registry);
1096 prom_world(0);
1097 }
1098
1099 extern unsigned long xcall_promstop;
1100
smp_promstop_others(void)1101 void smp_promstop_others(void)
1102 {
1103 if (smp_processors_ready)
1104 smp_cross_call(&xcall_promstop, 0, 0, 0);
1105 }
1106
1107 extern void sparc64_do_profile(unsigned long pc, unsigned long o7);
1108
1109 #define prof_multiplier(__cpu) cpu_data[(__cpu)].multiplier
1110 #define prof_counter(__cpu) cpu_data[(__cpu)].counter
1111
smp_percpu_timer_interrupt(struct pt_regs * regs)1112 void smp_percpu_timer_interrupt(struct pt_regs *regs)
1113 {
1114 unsigned long compare, tick, pstate;
1115 int cpu = smp_processor_id();
1116 int user = user_mode(regs);
1117
1118 /*
1119 * Check for level 14 softint.
1120 */
1121 {
1122 unsigned long tick_mask = tick_ops->softint_mask;
1123
1124 if (!(get_softint() & tick_mask)) {
1125 extern void handler_irq(int, struct pt_regs *);
1126
1127 handler_irq(14, regs);
1128 return;
1129 }
1130 clear_softint(tick_mask);
1131 }
1132
1133 do {
1134 if (!user)
1135 sparc64_do_profile(regs->tpc, regs->u_regs[UREG_RETPC]);
1136 if (!--prof_counter(cpu)) {
1137 irq_enter(cpu, 0);
1138
1139 if (cpu == boot_cpu_id) {
1140 kstat.irqs[cpu][0]++;
1141 timer_tick_interrupt(regs);
1142 }
1143
1144 update_process_times(user);
1145
1146 irq_exit(cpu, 0);
1147
1148 prof_counter(cpu) = prof_multiplier(cpu);
1149 }
1150
1151 /* Guarentee that the following sequences execute
1152 * uninterrupted.
1153 */
1154 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1155 "wrpr %0, %1, %%pstate"
1156 : "=r" (pstate)
1157 : "i" (PSTATE_IE));
1158
1159 compare = tick_ops->add_compare(current_tick_offset);
1160 tick = tick_ops->get_tick();
1161
1162 /* Restore PSTATE_IE. */
1163 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1164 : /* no outputs */
1165 : "r" (pstate));
1166 } while (time_after_eq(tick, compare));
1167 }
1168
smp_setup_percpu_timer(void)1169 static void __init smp_setup_percpu_timer(void)
1170 {
1171 int cpu = smp_processor_id();
1172 unsigned long pstate;
1173
1174 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1175
1176 /* Guarentee that the following sequences execute
1177 * uninterrupted.
1178 */
1179 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1180 "wrpr %0, %1, %%pstate"
1181 : "=r" (pstate)
1182 : "i" (PSTATE_IE));
1183
1184 tick_ops->init_tick(current_tick_offset);
1185
1186 /* Restore PSTATE_IE. */
1187 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1188 : /* no outputs */
1189 : "r" (pstate));
1190 }
1191
smp_tick_init(void)1192 void __init smp_tick_init(void)
1193 {
1194 int i;
1195
1196 boot_cpu_id = hard_smp_processor_id();
1197 current_tick_offset = timer_tick_offset;
1198 cpu_present_map = 0;
1199 for (i = 0; i < linux_num_cpus; i++)
1200 cpu_present_map |= (1UL << linux_cpus[i].mid);
1201 for (i = 0; i < NR_CPUS; i++) {
1202 __cpu_number_map[i] = -1;
1203 __cpu_logical_map[i] = -1;
1204 }
1205 __cpu_number_map[boot_cpu_id] = 0;
1206 prom_cpu_nodes[boot_cpu_id] = linux_cpus[0].prom_node;
1207 __cpu_logical_map[0] = boot_cpu_id;
1208 current->processor = boot_cpu_id;
1209 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1210 }
1211
find_flush_base(unsigned long size)1212 static inline unsigned long find_flush_base(unsigned long size)
1213 {
1214 struct page *p = mem_map;
1215 unsigned long found, base;
1216
1217 size = PAGE_ALIGN(size);
1218 found = size;
1219 base = (unsigned long) page_address(p);
1220 while (found != 0) {
1221 /* Failure. */
1222 if (p >= (mem_map + max_mapnr))
1223 return 0UL;
1224 if (PageReserved(p)) {
1225 found = size;
1226 base = (unsigned long) page_address(p);
1227 } else {
1228 found -= PAGE_SIZE;
1229 }
1230 p++;
1231 }
1232 return base;
1233 }
1234
1235 /* /proc/profile writes can call this, don't __init it please. */
setup_profiling_timer(unsigned int multiplier)1236 int setup_profiling_timer(unsigned int multiplier)
1237 {
1238 unsigned long flags;
1239 int i;
1240
1241 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1242 return -EINVAL;
1243
1244 save_and_cli(flags);
1245 for (i = 0; i < NR_CPUS; i++) {
1246 if (cpu_present_map & (1UL << i))
1247 prof_multiplier(i) = multiplier;
1248 }
1249 current_tick_offset = (timer_tick_offset / multiplier);
1250 restore_flags(flags);
1251
1252 return 0;
1253 }
1254