1 /* smp.h: Sparc specific SMP stuff.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6 #ifndef _SPARC_SMP_H
7 #define _SPARC_SMP_H
8
9 #include <linux/threads.h>
10 #include <asm/head.h>
11 #include <asm/btfixup.h>
12
13 #ifndef __ASSEMBLY__
14
15 #include <linux/cpumask.h>
16
17 #endif /* __ASSEMBLY__ */
18
19 #ifdef CONFIG_SMP
20
21 #ifndef __ASSEMBLY__
22
23 #include <asm/ptrace.h>
24 #include <asm/asi.h>
25 #include <linux/atomic.h>
26
27 /*
28 * Private routines/data
29 */
30
31 extern unsigned char boot_cpu_id;
32 extern volatile unsigned long cpu_callin_map[NR_CPUS];
33 extern cpumask_t smp_commenced_mask;
34 extern struct linux_prom_registers smp_penguin_ctable;
35
36 typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
37 unsigned long, unsigned long);
38
39 void cpu_panic(void);
40 extern void smp4m_irq_rotate(int cpu);
41
42 /*
43 * General functions that each host system must provide.
44 */
45
46 void sun4m_init_smp(void);
47 void sun4d_init_smp(void);
48
49 void smp_callin(void);
50 void smp_boot_cpus(void);
51 void smp_store_cpu_info(int);
52
53 void smp_resched_interrupt(void);
54 void smp_call_function_single_interrupt(void);
55 void smp_call_function_interrupt(void);
56
57 struct seq_file;
58 void smp_bogo(struct seq_file *);
59 void smp_info(struct seq_file *);
60
61 BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, cpumask_t, unsigned long, unsigned long, unsigned long, unsigned long)
62 BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
63 BTFIXUPDEF_CALL(void, smp_ipi_resched, int);
64 BTFIXUPDEF_CALL(void, smp_ipi_single, int);
65 BTFIXUPDEF_CALL(void, smp_ipi_mask_one, int);
66 BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
BTFIXUPDEF_BLACKBOX(load_current)67 BTFIXUPDEF_BLACKBOX(load_current)
68
69 #define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4)
70
71 static inline void xc0(smpfunc_t func) { smp_cross_call(func, *cpu_online_mask, 0, 0, 0, 0); }
xc1(smpfunc_t func,unsigned long arg1)72 static inline void xc1(smpfunc_t func, unsigned long arg1)
73 { smp_cross_call(func, *cpu_online_mask, arg1, 0, 0, 0); }
xc2(smpfunc_t func,unsigned long arg1,unsigned long arg2)74 static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
75 { smp_cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0); }
xc3(smpfunc_t func,unsigned long arg1,unsigned long arg2,unsigned long arg3)76 static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
77 unsigned long arg3)
78 { smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, 0); }
xc4(smpfunc_t func,unsigned long arg1,unsigned long arg2,unsigned long arg3,unsigned long arg4)79 static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
80 unsigned long arg3, unsigned long arg4)
81 { smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, arg4); }
82
83 extern void arch_send_call_function_single_ipi(int cpu);
84 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
85
cpu_logical_map(int cpu)86 static inline int cpu_logical_map(int cpu)
87 {
88 return cpu;
89 }
90
hard_smp4m_processor_id(void)91 static inline int hard_smp4m_processor_id(void)
92 {
93 int cpuid;
94
95 __asm__ __volatile__("rd %%tbr, %0\n\t"
96 "srl %0, 12, %0\n\t"
97 "and %0, 3, %0\n\t" :
98 "=&r" (cpuid));
99 return cpuid;
100 }
101
hard_smp4d_processor_id(void)102 static inline int hard_smp4d_processor_id(void)
103 {
104 int cpuid;
105
106 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
107 "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
108 return cpuid;
109 }
110
hard_smpleon_processor_id(void)111 extern inline int hard_smpleon_processor_id(void)
112 {
113 int cpuid;
114 __asm__ __volatile__("rd %%asr17,%0\n\t"
115 "srl %0,28,%0" :
116 "=&r" (cpuid) : );
117 return cpuid;
118 }
119
120 #ifndef MODULE
hard_smp_processor_id(void)121 static inline int hard_smp_processor_id(void)
122 {
123 int cpuid;
124
125 /* Black box - sun4m
126 __asm__ __volatile__("rd %%tbr, %0\n\t"
127 "srl %0, 12, %0\n\t"
128 "and %0, 3, %0\n\t" :
129 "=&r" (cpuid));
130 - sun4d
131 __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
132 "nop; nop" :
133 "=&r" (cpuid));
134 - leon
135 __asm__ __volatile__( "rd %asr17, %0\n\t"
136 "srl %0, 0x1c, %0\n\t"
137 "nop\n\t" :
138 "=&r" (cpuid));
139 See btfixup.h and btfixupprep.c to understand how a blackbox works.
140 */
141 __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
142 "sethi %%hi(boot_cpu_id), %0\n\t"
143 "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
144 "=&r" (cpuid));
145 return cpuid;
146 }
147 #else
hard_smp_processor_id(void)148 static inline int hard_smp_processor_id(void)
149 {
150 int cpuid;
151
152 __asm__ __volatile__("mov %%o7, %%g1\n\t"
153 "call ___f___hard_smp_processor_id\n\t"
154 " nop\n\t"
155 "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
156 return cpuid;
157 }
158 #endif
159
160 #define raw_smp_processor_id() (current_thread_info()->cpu)
161
162 #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
163 #define prof_counter(__cpu) cpu_data(__cpu).counter
164
165 void smp_setup_cpu_possible_map(void);
166
167 #endif /* !(__ASSEMBLY__) */
168
169 /* Sparc specific messages. */
170 #define MSG_CROSS_CALL 0x0005 /* run func on cpus */
171
172 /* Empirical PROM processor mailbox constants. If the per-cpu mailbox
173 * contains something other than one of these then the ipi is from
174 * Linux's active_kernel_processor. This facility exists so that
175 * the boot monitor can capture all the other cpus when one catches
176 * a watchdog reset or the user enters the monitor using L1-A keys.
177 */
178 #define MBOX_STOPCPU 0xFB
179 #define MBOX_IDLECPU 0xFC
180 #define MBOX_IDLECPU2 0xFD
181 #define MBOX_STOPCPU2 0xFE
182
183 #else /* SMP */
184
185 #define hard_smp_processor_id() 0
186 #define smp_setup_cpu_possible_map() do { } while (0)
187
188 #endif /* !(SMP) */
189 #endif /* !(_SPARC_SMP_H) */
190