1 /*
2  *	Intel SMP support routines.
3  *
4  *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5  *	(c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6  *	(c) 2002,2003 Andi Kleen, SuSE Labs.
7  *
8  *	This code is released under the GNU General Public License version 2 or
9  *	later.
10  */
11 
12 #include <linux/init.h>
13 
14 #include <linux/mm.h>
15 #include <linux/irq.h>
16 #include <linux/delay.h>
17 #include <linux/spinlock.h>
18 #include <linux/smp_lock.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mc146818rtc.h>
21 
22 #include <asm/mtrr.h>
23 #include <asm/pgalloc.h>
24 
25 /*
26  *	Some notes on x86 processor bugs affecting SMP operation:
27  *
28  *	Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
29  *	The Linux implications for SMP are handled as follows:
30  *
31  *	Pentium III / [Xeon]
32  *		None of the E1AP-E3AP errata are visible to the user.
33  *
34  *	E1AP.	see PII A1AP
35  *	E2AP.	see PII A2AP
36  *	E3AP.	see PII A3AP
37  *
38  *	Pentium II / [Xeon]
39  *		None of the A1AP-A3AP errata are visible to the user.
40  *
41  *	A1AP.	see PPro 1AP
42  *	A2AP.	see PPro 2AP
43  *	A3AP.	see PPro 7AP
44  *
45  *	Pentium Pro
46  *		None of 1AP-9AP errata are visible to the normal user,
47  *	except occasional delivery of 'spurious interrupt' as trap #15.
48  *	This is very rare and a non-problem.
49  *
50  *	1AP.	Linux maps APIC as non-cacheable
51  *	2AP.	worked around in hardware
52  *	3AP.	fixed in C0 and above steppings microcode update.
53  *		Linux does not use excessive STARTUP_IPIs.
54  *	4AP.	worked around in hardware
55  *	5AP.	symmetric IO mode (normal Linux operation) not affected.
56  *		'noapic' mode has vector 0xf filled out properly.
57  *	6AP.	'noapic' mode might be affected - fixed in later steppings
58  *	7AP.	We do not assume writes to the LVT deassering IRQs
59  *	8AP.	We do not enable low power mode (deep sleep) during MP bootup
60  *	9AP.	We do not use mixed mode
61  *
62  *	Pentium
63  *		There is a marginal case where REP MOVS on 100MHz SMP
64  *	machines with B stepping processors can fail. XXX should provide
65  *	an L1cache=Writethrough or L1cache=off option.
66  *
67  *		B stepping CPUs may hang. There are hardware work arounds
68  *	for this. We warn about it in case your board doesnt have the work
69  *	arounds. Basically thats so I can tell anyone with a B stepping
70  *	CPU and SMP problems "tough".
71  *
72  *	Specific items [From Pentium Processor Specification Update]
73  *
74  *	1AP.	Linux doesn't use remote read
75  *	2AP.	Linux doesn't trust APIC errors
76  *	3AP.	We work around this
77  *	4AP.	Linux never generated 3 interrupts of the same priority
78  *		to cause a lost local interrupt.
79  *	5AP.	Remote read is never used
80  *	6AP.	not affected - worked around in hardware
81  *	7AP.	not affected - worked around in hardware
82  *	8AP.	worked around in hardware - we get explicit CS errors if not
83  *	9AP.	only 'noapic' mode affected. Might generate spurious
84  *		interrupts, we log only the first one and count the
85  *		rest silently.
86  *	10AP.	not affected - worked around in hardware
87  *	11AP.	Linux reads the APIC between writes to avoid this, as per
88  *		the documentation. Make sure you preserve this as it affects
89  *		the C stepping chips too.
90  *	12AP.	not affected - worked around in hardware
91  *	13AP.	not affected - worked around in hardware
92  *	14AP.	we always deassert INIT during bootup
93  *	15AP.	not affected - worked around in hardware
94  *	16AP.	not affected - worked around in hardware
95  *	17AP.	not affected - worked around in hardware
96  *	18AP.	not affected - worked around in hardware
97  *	19AP.	not affected - worked around in BIOS
98  *
99  *	If this sounds worrying believe me these bugs are either ___RARE___,
100  *	or are signal timing bugs worked around in hardware and there's
101  *	about nothing of note with C stepping upwards.
102  */
103 
104 /* The 'big kernel lock' */
105 spinlock_cacheline_t kernel_flag_cacheline = {SPIN_LOCK_UNLOCKED};
106 
107 struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0, }};
108 
109 /*
110  * the following functions deal with sending IPIs between CPUs.
111  *
112  * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
113  */
114 
__prepare_ICR(unsigned int shortcut,int vector)115 static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector)
116 {
117 	unsigned int icr =  APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;
118 	return icr;
119 }
120 
__prepare_ICR2(unsigned int mask)121 static inline int __prepare_ICR2 (unsigned int mask)
122 {
123 	return SET_APIC_DEST_FIELD(mask);
124 }
125 
__send_IPI_shortcut(unsigned int shortcut,int vector)126 static inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
127 {
128 	/*
129 	 * Subtle. In the case of the 'never do double writes' workaround
130 	 * we have to lock out interrupts to be safe.  As we don't care
131 	 * of the value read we use an atomic rmw access to avoid costly
132 	 * cli/sti.  Otherwise we use an even cheaper single atomic write
133 	 * to the APIC.
134 	 */
135 	unsigned int cfg;
136 
137 	/*
138 	 * Wait for idle.
139 	 */
140 	apic_wait_icr_idle();
141 
142 	/*
143 	 * No need to touch the target chip field
144 	 */
145 	cfg = __prepare_ICR(shortcut, vector);
146 
147 	/*
148 	 * Send the IPI. The write to APIC_ICR fires this off.
149 	 */
150 	apic_write_around(APIC_ICR, cfg);
151 }
152 
send_IPI_allbutself(int vector)153 static inline void send_IPI_allbutself(int vector)
154 {
155 	/*
156 	 * if there are no other CPUs in the system then
157 	 * we get an APIC send error if we try to broadcast.
158 	 * thus we have to avoid sending IPIs in this case.
159 	 */
160 	if (smp_num_cpus > 1)
161 		__send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
162 }
163 
send_IPI_all(int vector)164 static inline void send_IPI_all(int vector)
165 {
166 	__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
167 }
168 
send_IPI_self(int vector)169 void send_IPI_self(int vector)
170 {
171 	__send_IPI_shortcut(APIC_DEST_SELF, vector);
172 }
173 
send_IPI_mask(int mask,int vector)174 static inline void send_IPI_mask(int mask, int vector)
175 {
176 	unsigned long cfg;
177 	unsigned long flags;
178 
179 	__save_flags(flags);
180 	__cli();
181 
182 	/*
183 	 * Wait for idle.
184 	 */
185 	apic_wait_icr_idle();
186 
187 	/*
188 	 * prepare target chip field
189 	 */
190 	cfg = __prepare_ICR2(mask);
191 	apic_write_around(APIC_ICR2, cfg);
192 
193 	/*
194 	 * program the ICR
195 	 */
196 	cfg = __prepare_ICR(0, vector);
197 
198 	/*
199 	 * Send the IPI. The write to APIC_ICR fires this off.
200 	 */
201 	apic_write_around(APIC_ICR, cfg);
202 	__restore_flags(flags);
203 }
204 
205 /*
206  *	Smarter SMP flushing macros.
207  *		c/o Linus Torvalds.
208  *
209  *	These mean you can really definitely utterly forget about
210  *	writing to user space from interrupts. (Its not allowed anyway).
211  *
212  *	Optimizations Manfred Spraul <manfred@colorfullife.com>
213  */
214 
215 static volatile unsigned long flush_cpumask;
216 static struct mm_struct * flush_mm;
217 static unsigned long flush_va;
218 static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
219 #define FLUSH_ALL	0xffffffff
220 
221 /*
222  * We cannot call mmdrop() because we are in interrupt context,
223  * instead update mm->cpu_vm_mask.
224  */
leave_mm(unsigned long cpu)225 static void inline leave_mm (unsigned long cpu)
226 {
227 	if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
228 		BUG();
229 	clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
230 	/* flush TLB before it goes away. this stops speculative prefetches */
231 	*read_pda(level4_pgt) = __pa(init_mm.pgd) | _PAGE_TABLE;
232 	__flush_tlb();
233 }
234 
235 /*
236  *
237  * The flush IPI assumes that a thread switch happens in this order:
238  * [cpu0: the cpu that switches]
239  * 1) switch_mm() either 1a) or 1b)
240  * 1a) thread switch to a different mm
241  * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
242  * 	Stop ipi delivery for the old mm. This is not synchronized with
243  * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
244  * 	for the wrong mm, and in the worst case we perform a superflous
245  * 	tlb flush.
246  * 1a2) set cpu_tlbstate to TLBSTATE_OK
247  * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
248  *	was in lazy tlb mode.
249  * 1a3) update cpu_tlbstate[].active_mm
250  * 	Now cpu0 accepts tlb flushes for the new mm.
251  * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
252  * 	Now the other cpus will send tlb flush ipis.
253  * 1a4) change cr3.
254  * 1b) thread switch without mm change
255  *	cpu_tlbstate[].active_mm is correct, cpu0 already handles
256  *	flush ipis.
257  * 1b1) set cpu_tlbstate to TLBSTATE_OK
258  * 1b2) test_and_set the cpu bit in cpu_vm_mask.
259  * 	Atomically set the bit [other cpus will start sending flush ipis],
260  * 	and test the bit.
261  * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
262  * 2) switch %%esp, ie current
263  *
264  * The interrupt must handle 2 special cases:
265  * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
266  * - the cpu performs speculative tlb reads, i.e. even if the cpu only
267  *   runs in kernel space, the cpu could load tlb entries for user space
268  *   pages.
269  *
270  * The good news is that cpu_tlbstate is local to each cpu, no
271  * write/read ordering problems.
272  */
273 
274 /*
275  * TLB flush IPI:
276  *
277  * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
278  * 2) Leave the mm if we are in the lazy tlb mode.
279  */
280 
smp_invalidate_interrupt(void)281 asmlinkage void smp_invalidate_interrupt (void)
282 {
283 	unsigned long cpu = smp_processor_id();
284 
285 	if (!test_bit(cpu, &flush_cpumask))
286 		return;
287 		/*
288 		 * This was a BUG() but until someone can quote me the
289 		 * line from the intel manual that guarantees an IPI to
290 		 * multiple CPUs is retried _only_ on the erroring CPUs
291 		 * its staying as a return
292 		 *
293 		 * BUG();
294 		 */
295 
296 	if (flush_mm == cpu_tlbstate[cpu].active_mm) {
297 		if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
298 			if (flush_va == FLUSH_ALL)
299 				local_flush_tlb();
300 			else
301 				__flush_tlb_one(flush_va);
302 		} else
303 			leave_mm(cpu);
304 	}
305 	ack_APIC_irq();
306 	clear_bit(cpu, &flush_cpumask);
307 }
308 
flush_tlb_others(unsigned long cpumask,struct mm_struct * mm,unsigned long va)309 static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
310 						unsigned long va)
311 {
312 	/*
313 	 * A couple of (to be removed) sanity checks:
314 	 *
315 	 * - we do not send IPIs to not-yet booted CPUs.
316 	 * - current CPU must not be in mask
317 	 * - mask must exist :)
318 	 */
319 	if (!cpumask)
320 		BUG();
321 	if ((cpumask & cpu_online_map) != cpumask)
322 		BUG();
323 	if (cpumask & (1 << smp_processor_id()))
324 		BUG();
325 	if (!mm)
326 		BUG();
327 
328 	/*
329 	 * i'm not happy about this global shared spinlock in the
330 	 * MM hot path, but we'll see how contended it is.
331 	 * Temporarily this turns IRQs off, so that lockups are
332 	 * detected by the NMI watchdog.
333 	 */
334 	spin_lock(&tlbstate_lock);
335 
336 	flush_mm = mm;
337 	flush_va = va;
338 	atomic_set_mask(cpumask, &flush_cpumask);
339 	/*
340 	 * We have to send the IPI only to
341 	 * CPUs affected.
342 	 */
343 	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
344 
345 	while (flush_cpumask)
346 		/* nothing. lockup detection does not belong here */;
347 
348 	flush_mm = NULL;
349 	flush_va = 0;
350 	spin_unlock(&tlbstate_lock);
351 }
352 
flush_tlb_current_task(void)353 void flush_tlb_current_task(void)
354 {
355 	struct mm_struct *mm = current->mm;
356 	unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
357 
358 	local_flush_tlb();
359 	if (cpu_mask)
360 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
361 }
362 
flush_tlb_mm(struct mm_struct * mm)363 void flush_tlb_mm (struct mm_struct * mm)
364 {
365 	unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
366 
367 	if (current->active_mm == mm) {
368 		if (current->mm)
369 			local_flush_tlb();
370 		else
371 			leave_mm(smp_processor_id());
372 	}
373 	if (cpu_mask)
374 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
375 }
376 
flush_tlb_page(struct vm_area_struct * vma,unsigned long va)377 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
378 {
379 	struct mm_struct *mm = vma->vm_mm;
380 	unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
381 
382 	if (current->active_mm == mm) {
383 		if(current->mm)
384 			__flush_tlb_one(va);
385 		 else
386 		 	leave_mm(smp_processor_id());
387 	}
388 
389 	if (cpu_mask)
390 		flush_tlb_others(cpu_mask, mm, va);
391 }
392 
do_flush_tlb_all_local(void)393 static inline void do_flush_tlb_all_local(void)
394 {
395 	unsigned long cpu = smp_processor_id();
396 
397 	__flush_tlb_all();
398 	if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
399 		leave_mm(cpu);
400 }
401 
flush_tlb_all_ipi(void * info)402 static void flush_tlb_all_ipi(void* info)
403 {
404 	do_flush_tlb_all_local();
405 }
406 
flush_tlb_all(void)407 void flush_tlb_all(void)
408 {
409 	smp_call_function (flush_tlb_all_ipi,0,1,1);
410 
411 	do_flush_tlb_all_local();
412 }
413 
414 /*
415  * this function sends a 'reschedule' IPI to another CPU.
416  * it goes straight through and wastes no time serializing
417  * anything. Worst case is that we lose a reschedule ...
418  */
419 
smp_send_reschedule(int cpu)420 void smp_send_reschedule(int cpu)
421 {
422 	send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR);
423 }
424 
425 /*
426  * Structure and data for smp_call_function(). This is designed to minimise
427  * static memory requirements. It also looks cleaner.
428  */
429 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
430 
431 struct call_data_struct {
432 	void (*func) (void *info);
433 	void *info;
434 	atomic_t started;
435 	atomic_t finished;
436 	int wait;
437 };
438 
439 static struct call_data_struct * call_data;
440 
441 /*
442  * this function sends a 'generic call function' IPI to all other CPUs
443  * in the system.
444  */
445 
smp_call_function(void (* func)(void * info),void * info,int nonatomic,int wait)446 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
447 			int wait)
448 /*
449  * [SUMMARY] Run a function on all other CPUs.
450  * <func> The function to run. This must be fast and non-blocking.
451  * <info> An arbitrary pointer to pass to the function.
452  * <nonatomic> currently unused.
453  * <wait> If true, wait (atomically) until function has completed on other CPUs.
454  * [RETURNS] 0 on success, else a negative status code. Does not return until
455  * remote CPUs are nearly ready to execute <<func>> or are or have executed.
456  *
457  * You must not call this function with disabled interrupts or from a
458  * hardware interrupt handler or from a bottom half handler.
459  */
460 {
461 	struct call_data_struct data;
462 	int cpus = smp_num_cpus-1;
463 
464 	if (!cpus)
465 		return 0;
466 
467 	data.func = func;
468 	data.info = info;
469 	atomic_set(&data.started, 0);
470 	data.wait = wait;
471 	if (wait)
472 		atomic_set(&data.finished, 0);
473 
474 	spin_lock(&call_lock);
475 	call_data = &data;
476 	wmb();
477 	/* Send a message to all other CPUs and wait for them to respond */
478 	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
479 
480 	/* Wait for response */
481 	while (atomic_read(&data.started) != cpus)
482 		barrier();
483 
484 	if (wait)
485 		while (atomic_read(&data.finished) != cpus)
486 			barrier();
487 	spin_unlock(&call_lock);
488 
489 	return 0;
490 }
491 
smp_stop_cpu(void)492 void smp_stop_cpu(void)
493 {
494 	/*
495 	 * Remove this CPU:
496 	 */
497 	clear_bit(smp_processor_id(), &cpu_online_map);
498 	__cli();
499 	disable_local_APIC();
500 	__sti();
501 }
502 
smp_really_stop_cpu(void * dummy)503 static void smp_really_stop_cpu(void *dummy)
504 {
505 	smp_stop_cpu();
506 	for (;;)
507 		asm("hlt");
508 }
509 
510 /*
511  * this function calls the 'stop' function on all other CPUs in the system.
512  */
513 
smp_send_stop(void)514 void smp_send_stop(void)
515 {
516 	smp_call_function(smp_really_stop_cpu, NULL, 1, 0);
517 	smp_stop_cpu();
518 }
519 
520 /*
521  * Reschedule call back. Nothing to do,
522  * all the work is done automatically when
523  * we return from the interrupt.
524  */
smp_reschedule_interrupt(void)525 asmlinkage void smp_reschedule_interrupt(void)
526 {
527 	ack_APIC_irq();
528 }
529 
smp_call_function_interrupt(void)530 asmlinkage void smp_call_function_interrupt(void)
531 {
532 	void (*func) (void *info) = call_data->func;
533 	void *info = call_data->info;
534 	int wait = call_data->wait;
535 
536 	ack_APIC_irq();
537 	/*
538 	 * Notify initiating CPU that I've grabbed the data and am
539 	 * about to execute the function
540 	 */
541 	mb();
542 	atomic_inc(&call_data->started);
543 	/*
544 	 * At this point the info structure may be out of scope unless wait==1
545 	 */
546 	(*func)(info);
547 	if (wait) {
548 		mb();
549 		atomic_inc(&call_data->finished);
550 	}
551 }
552