1 /* smp.c: Sparc SMP support.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
6
7 #include <asm/head.h>
8
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/mm.h>
19 #include <linux/fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22
23 #include <asm/ptrace.h>
24 #include <asm/atomic.h>
25
26 #include <asm/delay.h>
27 #include <asm/irq.h>
28 #include <asm/page.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
31 #include <asm/oplib.h>
32 #include <asm/hardirq.h>
33 #include <asm/softirq.h>
34
35 #define __KERNEL_SYSCALLS__
36 #include <linux/unistd.h>
37
38 #define IRQ_RESCHEDULE 13
39 #define IRQ_STOP_CPU 14
40 #define IRQ_CROSS_CALL 15
41
42 volatile int smp_processors_ready = 0;
43 unsigned long cpu_present_map = 0;
44 int smp_num_cpus = 1;
45 int smp_threads_ready=0;
46 unsigned char mid_xlate[NR_CPUS] = { 0, 0, 0, 0, };
47 volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
48 #ifdef NOTUSED
49 volatile unsigned long smp_spinning[NR_CPUS] = { 0, };
50 #endif
51 unsigned long smp_proc_in_lock[NR_CPUS] = { 0, };
52 struct cpuinfo_sparc cpu_data[NR_CPUS];
53 unsigned long cpu_offset[NR_CPUS];
54 unsigned char boot_cpu_id = 0;
55 unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
56 int smp_activated = 0;
57 volatile int __cpu_number_map[NR_CPUS];
58 volatile int __cpu_logical_map[NR_CPUS];
59 cycles_t cacheflush_time = 0; /* XXX */
60
61 /* The only guaranteed locking primitive available on all Sparc
62 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
63 * places the current byte at the effective address into dest_reg and
64 * places 0xff there afterwards. Pretty lame locking primitive
65 * compared to the Alpha and the Intel no? Most Sparcs have 'swap'
66 * instruction which is much better...
67 */
68
69 /* Kernel spinlock */
70 spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
71
72 /* Used to make bitops atomic */
73 unsigned char bitops_spinlock = 0;
74
75 volatile unsigned long ipi_count;
76
77 volatile int smp_process_available=0;
78 volatile int smp_commenced = 0;
79
80 /* Not supported on Sparc yet. */
smp_setup(char * str,int * ints)81 void __init smp_setup(char *str, int *ints)
82 {
83 }
84
85 /*
86 * The bootstrap kernel entry code has set these up. Save them for
87 * a given CPU
88 */
89
smp_store_cpu_info(int id)90 void __init smp_store_cpu_info(int id)
91 {
92 cpu_data[id].udelay_val = loops_per_jiffy; /* this is it on sparc. */
93 }
94
smp_commence(void)95 void __init smp_commence(void)
96 {
97 /*
98 * Lets the callin's below out of their loop.
99 */
100 local_flush_cache_all();
101 local_flush_tlb_all();
102 smp_commenced = 1;
103 local_flush_cache_all();
104 local_flush_tlb_all();
105 }
106
107 extern int cpu_idle(void);
108
109 /* Activate a secondary processor. */
start_secondary(void * unused)110 int start_secondary(void *unused)
111 {
112 prom_printf("Start secondary called. Should not happen\n");
113 return cpu_idle();
114 }
115
cpu_panic(void)116 void cpu_panic(void)
117 {
118 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
119 panic("SMP bolixed\n");
120 }
121
122 /*
123 * Cycle through the processors asking the PROM to start each one.
124 */
125
126 extern struct prom_cpuinfo linux_cpus[NR_CPUS];
127 struct linux_prom_registers smp_penguin_ctable __initdata = { 0 };
128
smp_boot_cpus(void)129 void __init smp_boot_cpus(void)
130 {
131 extern void smp4m_boot_cpus(void);
132 extern void smp4d_boot_cpus(void);
133
134 if (sparc_cpu_model == sun4m)
135 smp4m_boot_cpus();
136 else
137 smp4d_boot_cpus();
138 }
139
smp_flush_cache_all(void)140 void smp_flush_cache_all(void)
141 {
142 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
143 local_flush_cache_all();
144 }
145
smp_flush_tlb_all(void)146 void smp_flush_tlb_all(void)
147 {
148 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
149 local_flush_tlb_all();
150 }
151
smp_flush_cache_mm(struct mm_struct * mm)152 void smp_flush_cache_mm(struct mm_struct *mm)
153 {
154 if(mm->context != NO_CONTEXT) {
155 if(mm->cpu_vm_mask != (1 << smp_processor_id()))
156 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
157 local_flush_cache_mm(mm);
158 }
159 }
160
smp_flush_tlb_mm(struct mm_struct * mm)161 void smp_flush_tlb_mm(struct mm_struct *mm)
162 {
163 if(mm->context != NO_CONTEXT) {
164 if(mm->cpu_vm_mask != (1 << smp_processor_id())) {
165 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
166 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
167 mm->cpu_vm_mask = (1 << smp_processor_id());
168 }
169 local_flush_tlb_mm(mm);
170 }
171 }
172
smp_flush_cache_range(struct mm_struct * mm,unsigned long start,unsigned long end)173 void smp_flush_cache_range(struct mm_struct *mm, unsigned long start,
174 unsigned long end)
175 {
176 if(mm->context != NO_CONTEXT) {
177 if(mm->cpu_vm_mask != (1 << smp_processor_id()))
178 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) mm, start, end);
179 local_flush_cache_range(mm, start, end);
180 }
181 }
182
smp_flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)183 void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
184 unsigned long end)
185 {
186 if(mm->context != NO_CONTEXT) {
187 if(mm->cpu_vm_mask != (1 << smp_processor_id()))
188 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) mm, start, end);
189 local_flush_tlb_range(mm, start, end);
190 }
191 }
192
smp_flush_cache_page(struct vm_area_struct * vma,unsigned long page)193 void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
194 {
195 struct mm_struct *mm = vma->vm_mm;
196
197 if(mm->context != NO_CONTEXT) {
198 if(mm->cpu_vm_mask != (1 << smp_processor_id()))
199 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
200 local_flush_cache_page(vma, page);
201 }
202 }
203
smp_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)204 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
205 {
206 struct mm_struct *mm = vma->vm_mm;
207
208 if(mm->context != NO_CONTEXT) {
209 if(mm->cpu_vm_mask != (1 << smp_processor_id()))
210 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
211 local_flush_tlb_page(vma, page);
212 }
213 }
214
smp_flush_page_to_ram(unsigned long page)215 void smp_flush_page_to_ram(unsigned long page)
216 {
217 /* Current theory is that those who call this are the one's
218 * who have just dirtied their cache with the pages contents
219 * in kernel space, therefore we only run this on local cpu.
220 *
221 * XXX This experiment failed, research further... -DaveM
222 */
223 #if 1
224 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
225 #endif
226 local_flush_page_to_ram(page);
227 }
228
smp_flush_sig_insns(struct mm_struct * mm,unsigned long insn_addr)229 void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
230 {
231 if(mm->cpu_vm_mask != (1 << smp_processor_id()))
232 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
233 local_flush_sig_insns(mm, insn_addr);
234 }
235
236 /* Reschedule call back. */
smp_reschedule_irq(void)237 void smp_reschedule_irq(void)
238 {
239 current->need_resched = 1;
240 }
241
242 /* Stopping processors. */
smp_stop_cpu_irq(void)243 void smp_stop_cpu_irq(void)
244 {
245 __sti();
246 while(1)
247 barrier();
248 }
249
250 unsigned int prof_multiplier[NR_CPUS];
251 unsigned int prof_counter[NR_CPUS];
252 extern unsigned int lvl14_resolution;
253
setup_profiling_timer(unsigned int multiplier)254 int setup_profiling_timer(unsigned int multiplier)
255 {
256 int i;
257 unsigned long flags;
258
259 /* Prevent level14 ticker IRQ flooding. */
260 if((!multiplier) || (lvl14_resolution / multiplier) < 500)
261 return -EINVAL;
262
263 save_and_cli(flags);
264 for(i = 0; i < NR_CPUS; i++) {
265 if(cpu_present_map & (1 << i)) {
266 load_profile_irq(mid_xlate[i], lvl14_resolution / multiplier);
267 prof_multiplier[i] = multiplier;
268 }
269 }
270 restore_flags(flags);
271
272 return 0;
273 }
274
smp_bogo_info(struct seq_file * m)275 void smp_bogo_info(struct seq_file *m)
276 {
277 int i;
278
279 for (i = 0; i < NR_CPUS; i++) {
280 if (cpu_present_map & (1 << i))
281 seq_printf(m,
282 "Cpu%dBogo\t: %lu.%02lu\n",
283 i,
284 cpu_data[i].udelay_val/(500000/HZ),
285 (cpu_data[i].udelay_val/(5000/HZ))%100);
286 }
287 }
288
smp_info(struct seq_file * m)289 void smp_info(struct seq_file *m)
290 {
291 int i;
292
293 for (i = 0; i < NR_CPUS; i++) {
294 if (cpu_present_map & (1 << i))
295 seq_printf(m, "CPU%d\t\t: online\n", i);
296 }
297 }
298