1 /*
2  *	Local APIC handling, local APIC timers
3  *
4  *	(c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5  *
6  *	Fixes
7  *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
8  *					thanks to Eric Gilmore
9  *					and Rolf G. Tews
10  *					for testing these extensively.
11  *	Maciej W. Rozycki	:	Various updates and fixes.
12  *	Mikael Pettersson	:	Power Management for UP-APIC.
13  */
14 
15 #include <linux/config.h>
16 #include <linux/init.h>
17 
18 #include <linux/mm.h>
19 #include <linux/irq.h>
20 #include <linux/delay.h>
21 #include <linux/bootmem.h>
22 #include <linux/smp_lock.h>
23 #include <linux/interrupt.h>
24 #include <linux/mc146818rtc.h>
25 #include <linux/kernel_stat.h>
26 
27 #include <asm/atomic.h>
28 #include <asm/smp.h>
29 #include <asm/mtrr.h>
30 #include <asm/mpspec.h>
31 #include <asm/pgalloc.h>
32 #include <asm/smpboot.h>
33 
34 /* Using APIC to generate smp_local_timer_interrupt? */
35 int using_apic_timer = 0;
36 
37 int prof_multiplier[NR_CPUS] = { 1, };
38 int prof_old_multiplier[NR_CPUS] = { 1, };
39 int prof_counter[NR_CPUS] = { 1, };
40 
41 static int enabled_via_apicbase;
42 
get_maxlvt(void)43 int get_maxlvt(void)
44 {
45 	unsigned int v, ver, maxlvt;
46 
47 	v = apic_read(APIC_LVR);
48 	ver = GET_APIC_VERSION(v);
49 	/* 82489DXs do not report # of LVT entries. */
50 	maxlvt = APIC_INTEGRATED(ver) ? GET_APIC_MAXLVT(v) : 2;
51 	return maxlvt;
52 }
53 
clear_local_APIC(void)54 void clear_local_APIC(void)
55 {
56 	int maxlvt;
57 	unsigned long v;
58 
59 	maxlvt = get_maxlvt();
60 
61 	/*
62 	 * Masking an LVT entry on a P6 can trigger a local APIC error
63 	 * if the vector is zero. Mask LVTERR first to prevent this.
64 	 */
65 	if (maxlvt >= 3) {
66 		v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
67 		apic_write_around(APIC_LVTERR, v | APIC_LVT_MASKED);
68 	}
69 	/*
70 	 * Careful: we have to set masks only first to deassert
71 	 * any level-triggered sources.
72 	 */
73 	v = apic_read(APIC_LVTT);
74 	apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
75 	v = apic_read(APIC_LVT0);
76 	apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
77 	v = apic_read(APIC_LVT1);
78 	apic_write_around(APIC_LVT1, v | APIC_LVT_MASKED);
79 	if (maxlvt >= 4) {
80 		v = apic_read(APIC_LVTPC);
81 		apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED);
82 	}
83 
84 	/*
85 	 * Clean APIC state for other OSs:
86 	 */
87 	apic_write_around(APIC_LVTT, APIC_LVT_MASKED);
88 	apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
89 	apic_write_around(APIC_LVT1, APIC_LVT_MASKED);
90 	if (maxlvt >= 3)
91 		apic_write_around(APIC_LVTERR, APIC_LVT_MASKED);
92 	if (maxlvt >= 4)
93 		apic_write_around(APIC_LVTPC, APIC_LVT_MASKED);
94 	v = GET_APIC_VERSION(apic_read(APIC_LVR));
95 	if (APIC_INTEGRATED(v)) {	/* !82489DX */
96 		if (maxlvt > 3)
97 			apic_write(APIC_ESR, 0);
98 		apic_read(APIC_ESR);
99 	}
100 }
101 
connect_bsp_APIC(void)102 void __init connect_bsp_APIC(void)
103 {
104 	if (pic_mode) {
105 		/*
106 		 * Do not trust the local APIC being empty at bootup.
107 		 */
108 		clear_local_APIC();
109 		/*
110 		 * PIC mode, enable APIC mode in the IMCR, i.e.
111 		 * connect BSP's local APIC to INT and NMI lines.
112 		 */
113 		printk("leaving PIC mode, enabling APIC mode.\n");
114 		outb(0x70, 0x22);
115 		outb(0x01, 0x23);
116 	}
117 }
118 
disconnect_bsp_APIC(void)119 void disconnect_bsp_APIC(void)
120 {
121 	if (pic_mode) {
122 		/*
123 		 * Put the board back into PIC mode (has an effect
124 		 * only on certain older boards).  Note that APIC
125 		 * interrupts, including IPIs, won't work beyond
126 		 * this point!  The only exception are INIT IPIs.
127 		 */
128 		printk("disabling APIC mode, entering PIC mode.\n");
129 		outb(0x70, 0x22);
130 		outb(0x00, 0x23);
131 	}
132 }
133 
disable_local_APIC(void)134 void disable_local_APIC(void)
135 {
136 	unsigned long value;
137 
138 	clear_local_APIC();
139 
140 	/*
141 	 * Disable APIC (implies clearing of registers
142 	 * for 82489DX!).
143 	 */
144 	value = apic_read(APIC_SPIV);
145 	value &= ~APIC_SPIV_APIC_ENABLED;
146 	apic_write_around(APIC_SPIV, value);
147 
148 	if (enabled_via_apicbase) {
149 		unsigned int l, h;
150 		rdmsr(MSR_IA32_APICBASE, l, h);
151 		l &= ~MSR_IA32_APICBASE_ENABLE;
152 		wrmsr(MSR_IA32_APICBASE, l, h);
153 	}
154 }
155 
156 /*
157  * This is to verify that we're looking at a real local APIC.
158  * Check these against your board if the CPUs aren't getting
159  * started for no apparent reason.
160  */
verify_local_APIC(void)161 int __init verify_local_APIC(void)
162 {
163 	unsigned int reg0, reg1;
164 
165 	/*
166 	 * The version register is read-only in a real APIC.
167 	 */
168 	reg0 = apic_read(APIC_LVR);
169 	Dprintk("Getting VERSION: %x\n", reg0);
170 	apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
171 	reg1 = apic_read(APIC_LVR);
172 	Dprintk("Getting VERSION: %x\n", reg1);
173 
174 	/*
175 	 * The two version reads above should print the same
176 	 * numbers.  If the second one is different, then we
177 	 * poke at a non-APIC.
178 	 */
179 	if (reg1 != reg0)
180 		return 0;
181 
182 	/*
183 	 * Check if the version looks reasonably.
184 	 */
185 	reg1 = GET_APIC_VERSION(reg0);
186 	if (reg1 == 0x00 || reg1 == 0xff)
187 		return 0;
188 	reg1 = get_maxlvt();
189 	if (reg1 < 0x02 || reg1 == 0xff)
190 		return 0;
191 
192 	/*
193 	 * The ID register is read/write in a real APIC.
194 	 */
195 	reg0 = apic_read(APIC_ID);
196 	Dprintk("Getting ID: %x\n", reg0);
197 	apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
198 	reg1 = apic_read(APIC_ID);
199 	Dprintk("Getting ID: %x\n", reg1);
200 	apic_write(APIC_ID, reg0);
201 	if (reg1 != (reg0 ^ APIC_ID_MASK))
202 		return 0;
203 
204 	/*
205 	 * The next two are just to see if we have sane values.
206 	 * They're only really relevant if we're in Virtual Wire
207 	 * compatibility mode, but most boxes are anymore.
208 	 */
209 	reg0 = apic_read(APIC_LVT0);
210 	Dprintk("Getting LVT0: %x\n", reg0);
211 	reg1 = apic_read(APIC_LVT1);
212 	Dprintk("Getting LVT1: %x\n", reg1);
213 
214 	return 1;
215 }
216 
sync_Arb_IDs(void)217 void __init sync_Arb_IDs(void)
218 {
219 	/*
220 	 * Wait for idle.
221 	 */
222 	apic_wait_icr_idle();
223 
224 	Dprintk("Synchronizing Arb IDs.\n");
225 	apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
226 				| APIC_DM_INIT);
227 }
228 
229 extern void __error_in_apic_c (void);
230 
231 /*
232  * An initial setup of the virtual wire mode.
233  */
init_bsp_APIC(void)234 void __init init_bsp_APIC(void)
235 {
236 	unsigned long value, ver;
237 
238 	/*
239 	 * Don't do the setup now if we have a SMP BIOS as the
240 	 * through-I/O-APIC virtual wire mode might be active.
241 	 */
242 	if (smp_found_config || !cpu_has_apic)
243 		return;
244 
245 	value = apic_read(APIC_LVR);
246 	ver = GET_APIC_VERSION(value);
247 
248 	/*
249 	 * Do not trust the local APIC being empty at bootup.
250 	 */
251 	clear_local_APIC();
252 
253 	/*
254 	 * Enable APIC.
255 	 */
256 	value = apic_read(APIC_SPIV);
257 	value &= ~APIC_VECTOR_MASK;
258 	value |= APIC_SPIV_APIC_ENABLED;
259 	value |= APIC_SPIV_FOCUS_DISABLED;
260 	value |= SPURIOUS_APIC_VECTOR;
261 	apic_write_around(APIC_SPIV, value);
262 
263 	/*
264 	 * Set up the virtual wire mode.
265 	 */
266 	apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
267 	value = APIC_DM_NMI;
268 	if (!APIC_INTEGRATED(ver))		/* 82489DX */
269 		value |= APIC_LVT_LEVEL_TRIGGER;
270 	apic_write_around(APIC_LVT1, value);
271 }
272 
calculate_ldr(unsigned long old)273 static unsigned long calculate_ldr(unsigned long old)
274 {
275 	unsigned long id;
276 	if(clustered_apic_mode == CLUSTERED_APIC_XAPIC)
277 		id = physical_to_logical_apicid(hard_smp_processor_id());
278 	else
279 		id = 1UL << smp_processor_id();
280 	return (old & ~APIC_LDR_MASK)|SET_APIC_LOGICAL_ID(id);
281 }
282 
setup_local_APIC(void)283 void __init setup_local_APIC (void)
284 {
285 	unsigned long value, ver, maxlvt;
286 
287 	/* Pound the ESR really hard over the head with a big hammer - mbligh */
288 	if (esr_disable) {
289 		apic_write(APIC_ESR, 0);
290 		apic_write(APIC_ESR, 0);
291 		apic_write(APIC_ESR, 0);
292 		apic_write(APIC_ESR, 0);
293 	}
294 
295 	value = apic_read(APIC_LVR);
296 	ver = GET_APIC_VERSION(value);
297 
298 	if ((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f)
299 		__error_in_apic_c();
300 
301 	/*
302 	 * Double-check wether this APIC is really registered.
303 	 * This is meaningless in clustered apic mode, so we skip it.
304 	 */
305 	if (!clustered_apic_mode &&
306 	    !test_bit(GET_APIC_ID(apic_read(APIC_ID)), &phys_cpu_present_map))
307 		BUG();
308 
309 	/*
310 	 * Intel recommends to set DFR, LDR and TPR before enabling
311 	 * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
312 	 * document number 292116).  So here it goes...
313 	 */
314 	if (clustered_apic_mode != CLUSTERED_APIC_NUMAQ) {
315 		/*
316 		 * For NUMA-Q (clustered apic logical), the firmware does this
317 		 * for us. Otherwise put the APIC into clustered or flat
318 		 * delivery mode. Must be "all ones" explicitly for 82489DX.
319 		 */
320 		if(clustered_apic_mode == CLUSTERED_APIC_XAPIC)
321 			apic_write_around(APIC_DFR, APIC_DFR_CLUSTER);
322 		else
323 			apic_write_around(APIC_DFR, APIC_DFR_FLAT);
324 
325 		/*
326 		 * Set up the logical destination ID.
327 		 */
328 		value = apic_read(APIC_LDR);
329 		apic_write_around(APIC_LDR, calculate_ldr(value));
330 	}
331 
332 	/*
333 	 * Set Task Priority to 'accept all'. We never change this
334 	 * later on.
335 	 */
336 	value = apic_read(APIC_TASKPRI);
337 	value &= ~APIC_TPRI_MASK;
338 	apic_write_around(APIC_TASKPRI, value);
339 
340 	/*
341 	 * Now that we are all set up, enable the APIC
342 	 */
343 	value = apic_read(APIC_SPIV);
344 	value &= ~APIC_VECTOR_MASK;
345 	/*
346 	 * Enable APIC
347 	 */
348 	value |= APIC_SPIV_APIC_ENABLED;
349 
350 	/*
351 	 * Some unknown Intel IO/APIC (or APIC) errata is biting us with
352 	 * certain networking cards. If high frequency interrupts are
353 	 * happening on a particular IOAPIC pin, plus the IOAPIC routing
354 	 * entry is masked/unmasked at a high rate as well then sooner or
355 	 * later IOAPIC line gets 'stuck', no more interrupts are received
356 	 * from the device. If focus CPU is disabled then the hang goes
357 	 * away, oh well :-(
358 	 *
359 	 * [ This bug can be reproduced easily with a level-triggered
360 	 *   PCI Ne2000 networking cards and PII/PIII processors, dual
361 	 *   BX chipset. ]
362 	 */
363 	/*
364 	 * Actually disabling the focus CPU check just makes the hang less
365 	 * frequent as it makes the interrupt distributon model be more
366 	 * like LRU than MRU (the short-term load is more even across CPUs).
367 	 * See also the comment in end_level_ioapic_irq().  --macro
368 	 */
369 #if 1
370 	/* Enable focus processor (bit==0) */
371 	value &= ~APIC_SPIV_FOCUS_DISABLED;
372 #else
373 	/* Disable focus processor (bit==1) */
374 	value |= APIC_SPIV_FOCUS_DISABLED;
375 #endif
376 	/*
377 	 * Set spurious IRQ vector
378 	 */
379 	value |= SPURIOUS_APIC_VECTOR;
380 	apic_write_around(APIC_SPIV, value);
381 
382 	/*
383 	 * Set up LVT0, LVT1:
384 	 *
385 	 * set up through-local-APIC on the BP's LINT0. This is not
386 	 * strictly necessery in pure symmetric-IO mode, but sometimes
387 	 * we delegate interrupts to the 8259A.
388 	 */
389 	/*
390 	 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
391 	 */
392 	value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
393 	if (!smp_processor_id() && (pic_mode || !value)) {
394 		value = APIC_DM_EXTINT;
395 		printk("enabled ExtINT on CPU#%d\n", smp_processor_id());
396 	} else {
397 		value = APIC_DM_EXTINT | APIC_LVT_MASKED;
398 		printk("masked ExtINT on CPU#%d\n", smp_processor_id());
399 	}
400 	apic_write_around(APIC_LVT0, value);
401 
402 	/*
403 	 * only the BP should see the LINT1 NMI signal, obviously.
404 	 */
405 	if (!smp_processor_id())
406 		value = APIC_DM_NMI;
407 	else
408 		value = APIC_DM_NMI | APIC_LVT_MASKED;
409 	if (!APIC_INTEGRATED(ver))		/* 82489DX */
410 		value |= APIC_LVT_LEVEL_TRIGGER;
411 	apic_write_around(APIC_LVT1, value);
412 
413 	if (APIC_INTEGRATED(ver) && !esr_disable) {		/* !82489DX */
414 		maxlvt = get_maxlvt();
415 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
416 			apic_write(APIC_ESR, 0);
417 		value = apic_read(APIC_ESR);
418 		printk("ESR value before enabling vector: %08lx\n", value);
419 
420 		value = ERROR_APIC_VECTOR;      // enables sending errors
421 		apic_write_around(APIC_LVTERR, value);
422 		/*
423 		 * spec says clear errors after enabling vector.
424 		 */
425 		if (maxlvt > 3)
426 			apic_write(APIC_ESR, 0);
427 		value = apic_read(APIC_ESR);
428 		printk("ESR value after enabling vector: %08lx\n", value);
429 	} else {
430 		if (esr_disable)
431 			/*
432 			 * Something untraceble is creating bad interrupts on
433 			 * secondary quads ... for the moment, just leave the
434 			 * ESR disabled - we can't do anything useful with the
435 			 * errors anyway - mbligh
436 			 */
437 			printk("Leaving ESR disabled.\n");
438 		else
439 			printk("No ESR for 82489DX.\n");
440 	}
441 
442 	if (nmi_watchdog == NMI_LOCAL_APIC)
443 		setup_apic_nmi_watchdog();
444 }
445 
446 #ifdef CONFIG_PM
447 
448 #include <linux/slab.h>
449 #include <linux/pm.h>
450 
451 static struct {
452 	/* 'active' is true if the local APIC was enabled by us and
453 	   not the BIOS; this signifies that we are also responsible
454 	   for disabling it before entering apm/acpi suspend */
455 	int active;
456 	/* 'perfctr_pmdev' is here because the current (2.4.1) PM
457 	   callback system doesn't handle hierarchical dependencies */
458 	struct pm_dev *perfctr_pmdev;
459 	/* r/w apic fields */
460 	unsigned int apic_id;
461 	unsigned int apic_taskpri;
462 	unsigned int apic_ldr;
463 	unsigned int apic_dfr;
464 	unsigned int apic_spiv;
465 	unsigned int apic_lvtt;
466 	unsigned int apic_lvtpc;
467 	unsigned int apic_lvt0;
468 	unsigned int apic_lvt1;
469 	unsigned int apic_lvterr;
470 	unsigned int apic_tmict;
471 	unsigned int apic_tdcr;
472 } apic_pm_state;
473 
apic_pm_suspend(void * data)474 static void apic_pm_suspend(void *data)
475 {
476 	unsigned long flags;
477 
478 	if (apic_pm_state.perfctr_pmdev)
479 		pm_send(apic_pm_state.perfctr_pmdev, PM_SUSPEND, data);
480 	apic_pm_state.apic_id = apic_read(APIC_ID);
481 	apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
482 	apic_pm_state.apic_ldr = apic_read(APIC_LDR);
483 	apic_pm_state.apic_dfr = apic_read(APIC_DFR);
484 	apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
485 	apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
486 	apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
487 	apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
488 	apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
489 	apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
490 	apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
491 	apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
492 	__save_flags(flags);
493 	__cli();
494 	disable_local_APIC();
495 	__restore_flags(flags);
496 }
497 
apic_pm_resume(void * data)498 static void apic_pm_resume(void *data)
499 {
500 	unsigned int l, h;
501 	unsigned long flags;
502 
503 	__save_flags(flags);
504 	__cli();
505 
506 	/*
507 	 * Make sure the APICBASE points to the right address
508 	 *
509 	 * FIXME! This will be wrong if we ever support suspend on
510 	 * SMP! We'll need to do this as part of the CPU restore!
511 	 */
512 	rdmsr(MSR_IA32_APICBASE, l, h);
513 	l &= ~MSR_IA32_APICBASE_BASE;
514 	l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
515 	wrmsr(MSR_IA32_APICBASE, l, h);
516 
517 	apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
518 	apic_write(APIC_ID, apic_pm_state.apic_id);
519 	apic_write(APIC_DFR, apic_pm_state.apic_dfr);
520 	apic_write(APIC_LDR, apic_pm_state.apic_ldr);
521 	apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
522 	apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
523 	apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
524 	apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
525 	apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
526 	apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
527 	apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
528 	apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
529 	apic_write(APIC_ESR, 0);
530 	apic_read(APIC_ESR);
531 	apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
532 	apic_write(APIC_ESR, 0);
533 	apic_read(APIC_ESR);
534 	__restore_flags(flags);
535 	if (apic_pm_state.perfctr_pmdev)
536 		pm_send(apic_pm_state.perfctr_pmdev, PM_RESUME, data);
537 }
538 
apic_pm_callback(struct pm_dev * dev,pm_request_t rqst,void * data)539 static int apic_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
540 {
541 	switch (rqst) {
542 	case PM_SUSPEND:
543 		apic_pm_suspend(data);
544 		break;
545 	case PM_RESUME:
546 		apic_pm_resume(data);
547 		break;
548 	}
549 	return 0;
550 }
551 
552 /* perfctr driver should call this instead of pm_register() */
apic_pm_register(pm_dev_t type,unsigned long id,pm_callback callback)553 struct pm_dev *apic_pm_register(pm_dev_t type,
554 				unsigned long id,
555 				pm_callback callback)
556 {
557 	struct pm_dev *dev;
558 
559 	if (!apic_pm_state.active)
560 		return pm_register(type, id, callback);
561 	if (apic_pm_state.perfctr_pmdev)
562 		return NULL;	/* we're busy */
563 	dev = kmalloc(sizeof(struct pm_dev), GFP_KERNEL);
564 	if (dev) {
565 		memset(dev, 0, sizeof(*dev));
566 		dev->type = type;
567 		dev->id = id;
568 		dev->callback = callback;
569 		apic_pm_state.perfctr_pmdev = dev;
570 	}
571 	return dev;
572 }
573 
574 /* perfctr driver should call this instead of pm_unregister() */
apic_pm_unregister(struct pm_dev * dev)575 void apic_pm_unregister(struct pm_dev *dev)
576 {
577 	if (!apic_pm_state.active) {
578 		pm_unregister(dev);
579 	} else if (dev == apic_pm_state.perfctr_pmdev) {
580 		apic_pm_state.perfctr_pmdev = NULL;
581 		kfree(dev);
582 	}
583 }
584 
apic_pm_init1(void)585 static void __init apic_pm_init1(void)
586 {
587 	/* can't pm_register() at this early stage in the boot process
588 	   (causes an immediate reboot), so just set the flag */
589 	apic_pm_state.active = 1;
590 }
591 
apic_pm_init2(void)592 static void __init apic_pm_init2(void)
593 {
594 	if (apic_pm_state.active)
595 		pm_register(PM_SYS_DEV, 0, apic_pm_callback);
596 }
597 
598 #else	/* CONFIG_PM */
599 
apic_pm_init1(void)600 static inline void apic_pm_init1(void) { }
apic_pm_init2(void)601 static inline void apic_pm_init2(void) { }
602 
603 #endif	/* CONFIG_PM */
604 
605 /*
606  * Detect and enable local APICs on non-SMP boards.
607  * Original code written by Keir Fraser.
608  */
609 
610 /*
611  * Knob to control our willingness to enable the local APIC.
612  */
613 int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
614 
lapic_disable(char * str)615 static int __init lapic_disable(char *str)
616 {
617 	enable_local_apic = -1;
618 	clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
619 	return 0;
620 }
621 __setup("nolapic", lapic_disable);
622 
lapic_enable(char * str)623 static int __init lapic_enable(char *str)
624 {
625 	enable_local_apic = 1;
626 	return 0;
627 }
628 __setup("lapic", lapic_enable);
629 
detect_init_APIC(void)630 static int __init detect_init_APIC (void)
631 {
632 	u32 h, l, features;
633 	extern void get_cpu_vendor(struct cpuinfo_x86*);
634 
635 	/* Disabled by DMI scan or kernel option? */
636 	if (enable_local_apic < 0)
637 		return -1;
638 
639 	/* Workaround for us being called before identify_cpu(). */
640 	get_cpu_vendor(&boot_cpu_data);
641 
642 	switch (boot_cpu_data.x86_vendor) {
643 	case X86_VENDOR_AMD:
644 		if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1)
645 			break;
646 		if (boot_cpu_data.x86 == 15 && cpu_has_apic)
647 			break;
648 		goto no_apic;
649 	case X86_VENDOR_INTEL:
650 		if (boot_cpu_data.x86 == 6 ||
651 		    (boot_cpu_data.x86 == 15 && (cpu_has_apic || enable_local_apic > 0)) ||
652 		    (boot_cpu_data.x86 == 5 && cpu_has_apic))
653 			break;
654 		goto no_apic;
655 	default:
656 		goto no_apic;
657 	}
658 
659 	if (!cpu_has_apic) {
660 		/*
661 		 * Over-ride BIOS and try to enable LAPIC
662 		 * only if "lapic" specified
663 		 */
664 		if (enable_local_apic != 1)
665 			goto no_apic;
666 		/*
667 		 * Some BIOSes disable the local APIC in the
668 		 * APIC_BASE MSR. This can only be done in
669 		 * software for Intel P6 and AMD K7 (Model > 1).
670 		 */
671 		rdmsr(MSR_IA32_APICBASE, l, h);
672 		if (!(l & MSR_IA32_APICBASE_ENABLE)) {
673 			printk("Local APIC disabled by BIOS -- reenabling.\n");
674 			l &= ~MSR_IA32_APICBASE_BASE;
675 			l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
676 			wrmsr(MSR_IA32_APICBASE, l, h);
677 			enabled_via_apicbase = 1;
678 		}
679 	}
680 	/*
681 	 * The APIC feature bit should now be enabled
682 	 * in `cpuid'
683 	 */
684 	features = cpuid_edx(1);
685 	if (!(features & (1 << X86_FEATURE_APIC))) {
686 		printk("Could not enable APIC!\n");
687 		return -1;
688 	}
689 	set_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability);
690 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
691 
692 	/* The BIOS may have set up the APIC at some other address */
693 	rdmsr(MSR_IA32_APICBASE, l, h);
694 	if (l & MSR_IA32_APICBASE_ENABLE)
695 		mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
696 
697 	if (nmi_watchdog != NMI_NONE)
698 		nmi_watchdog = NMI_LOCAL_APIC;
699 
700 	printk("Found and enabled local APIC!\n");
701 
702 	apic_pm_init1();
703 
704 	return 0;
705 
706 no_apic:
707 	printk("No local APIC present or hardware disabled\n");
708 	return -1;
709 }
710 
init_apic_mappings(void)711 void __init init_apic_mappings(void)
712 {
713 	unsigned long apic_phys;
714 
715 	/*
716 	 * If no local APIC can be found then set up a fake all
717 	 * zeroes page to simulate the local APIC and another
718 	 * one for the IO-APIC.
719 	 */
720 	if (!smp_found_config && detect_init_APIC()) {
721 		apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
722 		apic_phys = __pa(apic_phys);
723 	} else
724 		apic_phys = mp_lapic_addr;
725 
726 	set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
727 	Dprintk("mapped APIC to %08lx (%08lx)\n", APIC_BASE, apic_phys);
728 
729 	/*
730 	 * Fetch the APIC ID of the BSP in case we have a
731 	 * default configuration (or the MP table is broken).
732 	 */
733 	if (boot_cpu_physical_apicid == -1U)
734 		boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
735 
736 #ifdef CONFIG_X86_IO_APIC
737 	{
738 		unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
739 		int i;
740 
741 		for (i = 0; i < nr_ioapics; i++) {
742 			if (smp_found_config) {
743 				ioapic_phys = mp_ioapics[i].mpc_apicaddr;
744 				if (!ioapic_phys) {
745 					printk(KERN_ERR "WARNING: bogus zero IO-APIC address found in MPTABLE, disabling IO/APIC support!\n");
746 
747 					smp_found_config = 0;
748 					skip_ioapic_setup = 1;
749 					goto fake_ioapic_page;
750 				}
751 			} else {
752 fake_ioapic_page:
753 				ioapic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
754 				ioapic_phys = __pa(ioapic_phys);
755 			}
756 			set_fixmap_nocache(idx, ioapic_phys);
757 			Dprintk("mapped IOAPIC to %08lx (%08lx)\n",
758 					__fix_to_virt(idx), ioapic_phys);
759 			idx++;
760 		}
761 	}
762 #endif
763 }
764 
765 /*
766  * This part sets up the APIC 32 bit clock in LVTT1, with HZ interrupts
767  * per second. We assume that the caller has already set up the local
768  * APIC.
769  *
770  * The APIC timer is not exactly sync with the external timer chip, it
771  * closely follows bus clocks.
772  */
773 
774 /*
775  * The timer chip is already set up at HZ interrupts per second here,
776  * but we do not accept timer interrupts yet. We only allow the BP
777  * to calibrate.
778  */
get_8254_timer_count(void)779 static unsigned int __init get_8254_timer_count(void)
780 {
781 	extern spinlock_t i8253_lock;
782 	unsigned long flags;
783 
784 	unsigned int count;
785 
786 	spin_lock_irqsave(&i8253_lock, flags);
787 
788 	outb_p(0x00, 0x43);
789 	count = inb_p(0x40);
790 	count |= inb_p(0x40) << 8;
791 
792 	spin_unlock_irqrestore(&i8253_lock, flags);
793 
794 	return count;
795 }
796 
wait_8254_wraparound(void)797 void __init wait_8254_wraparound(void)
798 {
799 	unsigned int curr_count, prev_count=~0;
800 	int delta;
801 
802 	curr_count = get_8254_timer_count();
803 
804 	do {
805 		prev_count = curr_count;
806 		curr_count = get_8254_timer_count();
807 		delta = curr_count-prev_count;
808 
809 	/*
810 	 * This limit for delta seems arbitrary, but it isn't, it's
811 	 * slightly above the level of error a buggy Mercury/Neptune
812 	 * chipset timer can cause.
813 	 */
814 
815 	} while (delta < 300);
816 }
817 
818 /*
819  * This function sets up the local APIC timer, with a timeout of
820  * 'clocks' APIC bus clock. During calibration we actually call
821  * this function twice on the boot CPU, once with a bogus timeout
822  * value, second time for real. The other (noncalibrating) CPUs
823  * call this function only once, with the real, calibrated value.
824  *
825  * We do reads before writes even if unnecessary, to get around the
826  * P5 APIC double write bug.
827  */
828 
829 #define APIC_DIVISOR 16
830 
__setup_APIC_LVTT(unsigned int clocks)831 void __setup_APIC_LVTT(unsigned int clocks)
832 {
833 	unsigned int lvtt1_value, tmp_value;
834 
835 	lvtt1_value = SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV) |
836 			APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
837 	apic_write_around(APIC_LVTT, lvtt1_value);
838 
839 	/*
840 	 * Divide PICLK by 16
841 	 */
842 	tmp_value = apic_read(APIC_TDCR);
843 	apic_write_around(APIC_TDCR, (tmp_value
844 				& ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
845 				| APIC_TDR_DIV_16);
846 
847 	apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
848 }
849 
setup_APIC_timer(void * data)850 void setup_APIC_timer(void * data)
851 {
852 	unsigned int clocks = (unsigned int) data, slice, t0, t1;
853 	unsigned long flags;
854 	int delta;
855 
856 	__save_flags(flags);
857 	__sti();
858 	/*
859 	 * ok, Intel has some smart code in their APIC that knows
860 	 * if a CPU was in 'hlt' lowpower mode, and this increases
861 	 * its APIC arbitration priority. To avoid the external timer
862 	 * IRQ APIC event being in synchron with the APIC clock we
863 	 * introduce an interrupt skew to spread out timer events.
864 	 *
865 	 * The number of slices within a 'big' timeslice is smp_num_cpus+1
866 	 */
867 
868 	slice = clocks / (smp_num_cpus+1);
869 	printk("cpu: %d, clocks: %d, slice: %d\n", smp_processor_id(), clocks, slice);
870 
871 	/*
872 	 * Wait for IRQ0's slice:
873 	 */
874 	wait_8254_wraparound();
875 
876 	__setup_APIC_LVTT(clocks);
877 
878 	t0 = apic_read(APIC_TMICT)*APIC_DIVISOR;
879 	/* Wait till TMCCT gets reloaded from TMICT... */
880 	do {
881 		t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR;
882 		delta = (int)(t0 - t1 - slice*(smp_processor_id()+1));
883 	} while (delta >= 0);
884 	/* Now wait for our slice for real. */
885 	do {
886 		t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR;
887 		delta = (int)(t0 - t1 - slice*(smp_processor_id()+1));
888 	} while (delta < 0);
889 
890 	__setup_APIC_LVTT(clocks);
891 
892 	printk("CPU%d<T0:%d,T1:%d,D:%d,S:%d,C:%d>\n", smp_processor_id(), t0, t1, delta, slice, clocks);
893 
894 	__restore_flags(flags);
895 }
896 
897 /*
898  * In this function we calibrate APIC bus clocks to the external
899  * timer. Unfortunately we cannot use jiffies and the timer irq
900  * to calibrate, since some later bootup code depends on getting
901  * the first irq? Ugh.
902  *
903  * We want to do the calibration only once since we
904  * want to have local timer irqs syncron. CPUs connected
905  * by the same APIC bus have the very same bus frequency.
906  * And we want to have irqs off anyways, no accidental
907  * APIC irq that way.
908  */
909 
calibrate_APIC_clock(void)910 int __init calibrate_APIC_clock(void)
911 {
912 	unsigned long long t1 = 0, t2 = 0;
913 	long tt1, tt2;
914 	long result;
915 	int i;
916 	const int LOOPS = HZ/10;
917 
918 	printk("calibrating APIC timer ...\n");
919 
920 	/*
921 	 * Put whatever arbitrary (but long enough) timeout
922 	 * value into the APIC clock, we just want to get the
923 	 * counter running for calibration.
924 	 */
925 	__setup_APIC_LVTT(1000000000);
926 
927 	/*
928 	 * The timer chip counts down to zero. Let's wait
929 	 * for a wraparound to start exact measurement:
930 	 * (the current tick might have been already half done)
931 	 */
932 
933 	wait_8254_wraparound();
934 
935 	/*
936 	 * We wrapped around just now. Let's start:
937 	 */
938 	if (cpu_has_tsc)
939 		rdtscll(t1);
940 	tt1 = apic_read(APIC_TMCCT);
941 
942 	/*
943 	 * Let's wait LOOPS wraprounds:
944 	 */
945 	for (i = 0; i < LOOPS; i++)
946 		wait_8254_wraparound();
947 
948 	tt2 = apic_read(APIC_TMCCT);
949 	if (cpu_has_tsc)
950 		rdtscll(t2);
951 
952 	/*
953 	 * The APIC bus clock counter is 32 bits only, it
954 	 * might have overflown, but note that we use signed
955 	 * longs, thus no extra care needed.
956 	 *
957 	 * underflown to be exact, as the timer counts down ;)
958 	 */
959 
960 	result = (tt1-tt2)*APIC_DIVISOR/LOOPS;
961 
962 	if (cpu_has_tsc)
963 		printk("..... CPU clock speed is %ld.%04ld MHz.\n",
964 			((long)(t2-t1)/LOOPS)/(1000000/HZ),
965 			((long)(t2-t1)/LOOPS)%(1000000/HZ));
966 
967 	printk("..... host bus clock speed is %ld.%04ld MHz.\n",
968 		result/(1000000/HZ),
969 		result%(1000000/HZ));
970 
971 	return result;
972 }
973 
974 static unsigned int calibration_result;
975 
setup_APIC_clocks(void)976 void __init setup_APIC_clocks (void)
977 {
978 	printk("Using local APIC timer interrupts.\n");
979 	using_apic_timer = 1;
980 
981 	__cli();
982 
983 	calibration_result = calibrate_APIC_clock();
984 	/*
985 	 * Now set up the timer for real.
986 	 */
987 	setup_APIC_timer((void *)calibration_result);
988 
989 	__sti();
990 
991 	/* and update all other cpus */
992 	smp_call_function(setup_APIC_timer, (void *)calibration_result, 1, 1);
993 }
994 
disable_APIC_timer(void)995 void __init disable_APIC_timer(void)
996 {
997 	if (using_apic_timer) {
998 		unsigned long v;
999 
1000 		v = apic_read(APIC_LVTT);
1001 		apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
1002 	}
1003 }
1004 
enable_APIC_timer(void)1005 void enable_APIC_timer(void)
1006 {
1007 	if (using_apic_timer) {
1008 		unsigned long v;
1009 
1010 		v = apic_read(APIC_LVTT);
1011 		apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED);
1012 	}
1013 }
1014 
1015 /*
1016  * the frequency of the profiling timer can be changed
1017  * by writing a multiplier value into /proc/profile.
1018  */
setup_profiling_timer(unsigned int multiplier)1019 int setup_profiling_timer(unsigned int multiplier)
1020 {
1021 	int i;
1022 
1023 	/*
1024 	 * Sanity check. [at least 500 APIC cycles should be
1025 	 * between APIC interrupts as a rule of thumb, to avoid
1026 	 * irqs flooding us]
1027 	 */
1028 	if ( (!multiplier) || (calibration_result/multiplier < 500))
1029 		return -EINVAL;
1030 
1031 	/*
1032 	 * Set the new multiplier for each CPU. CPUs don't start using the
1033 	 * new values until the next timer interrupt in which they do process
1034 	 * accounting. At that time they also adjust their APIC timers
1035 	 * accordingly.
1036 	 */
1037 	for (i = 0; i < NR_CPUS; ++i)
1038 		prof_multiplier[i] = multiplier;
1039 
1040 	return 0;
1041 }
1042 
1043 #undef APIC_DIVISOR
1044 
1045 /*
1046  * Local timer interrupt handler. It does both profiling and
1047  * process statistics/rescheduling.
1048  *
1049  * We do profiling in every local tick, statistics/rescheduling
1050  * happen only every 'profiling multiplier' ticks. The default
1051  * multiplier is 1 and it can be changed by writing the new multiplier
1052  * value into /proc/profile.
1053  */
1054 
smp_local_timer_interrupt(struct pt_regs * regs)1055 inline void smp_local_timer_interrupt(struct pt_regs * regs)
1056 {
1057 	int user = user_mode(regs);
1058 	int cpu = smp_processor_id();
1059 
1060 	/*
1061 	 * The profiling function is SMP safe. (nothing can mess
1062 	 * around with "current", and the profiling counters are
1063 	 * updated with atomic operations). This is especially
1064 	 * useful with a profiling multiplier != 1
1065 	 */
1066 	if (!user)
1067 		x86_do_profile(regs->eip);
1068 
1069 	if (--prof_counter[cpu] <= 0) {
1070 		/*
1071 		 * The multiplier may have changed since the last time we got
1072 		 * to this point as a result of the user writing to
1073 		 * /proc/profile. In this case we need to adjust the APIC
1074 		 * timer accordingly.
1075 		 *
1076 		 * Interrupts are already masked off at this point.
1077 		 */
1078 		prof_counter[cpu] = prof_multiplier[cpu];
1079 		if (prof_counter[cpu] != prof_old_multiplier[cpu]) {
1080 			__setup_APIC_LVTT(calibration_result/prof_counter[cpu]);
1081 			prof_old_multiplier[cpu] = prof_counter[cpu];
1082 		}
1083 
1084 #ifdef CONFIG_SMP
1085 		update_process_times(user);
1086 #endif
1087 	}
1088 
1089 	/*
1090 	 * We take the 'long' return path, and there every subsystem
1091 	 * grabs the apropriate locks (kernel lock/ irq lock).
1092 	 *
1093 	 * we might want to decouple profiling from the 'long path',
1094 	 * and do the profiling totally in assembly.
1095 	 *
1096 	 * Currently this isn't too much of an issue (performance wise),
1097 	 * we can take more than 100K local irqs per second on a 100 MHz P5.
1098 	 */
1099 }
1100 
1101 /*
1102  * Local APIC timer interrupt. This is the most natural way for doing
1103  * local interrupts, but local timer interrupts can be emulated by
1104  * broadcast interrupts too. [in case the hw doesn't support APIC timers]
1105  *
1106  * [ if a single-CPU system runs an SMP kernel then we call the local
1107  *   interrupt as well. Thus we cannot inline the local irq ... ]
1108  */
1109 unsigned int apic_timer_irqs [NR_CPUS];
1110 
smp_apic_timer_interrupt(struct pt_regs * regs)1111 void smp_apic_timer_interrupt(struct pt_regs * regs)
1112 {
1113 	int cpu = smp_processor_id();
1114 
1115 	/*
1116 	 * the NMI deadlock-detector uses this.
1117 	 */
1118 	apic_timer_irqs[cpu]++;
1119 
1120 	/*
1121 	 * NOTE! We'd better ACK the irq immediately,
1122 	 * because timer handling can be slow.
1123 	 */
1124 	ack_APIC_irq();
1125 	/*
1126 	 * update_process_times() expects us to have done irq_enter().
1127 	 * Besides, if we don't timer interrupts ignore the global
1128 	 * interrupt lock, which is the WrongThing (tm) to do.
1129 	 */
1130 	irq_enter(cpu, 0);
1131 	smp_local_timer_interrupt(regs);
1132 	irq_exit(cpu, 0);
1133 
1134 	if (softirq_pending(cpu))
1135 		do_softirq();
1136 }
1137 
1138 /*
1139  * This interrupt should _never_ happen with our APIC/SMP architecture
1140  */
smp_spurious_interrupt(void)1141 asmlinkage void smp_spurious_interrupt(void)
1142 {
1143 	unsigned long v;
1144 
1145 	/*
1146 	 * Check if this really is a spurious interrupt and ACK it
1147 	 * if it is a vectored one.  Just in case...
1148 	 * Spurious interrupts should not be ACKed.
1149 	 */
1150 	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1151 	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1152 		ack_APIC_irq();
1153 
1154 	/* see sw-dev-man vol 3, chapter 7.4.13.5 */
1155 	printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n",
1156 			smp_processor_id());
1157 }
1158 
1159 /*
1160  * This interrupt should never happen with our APIC/SMP architecture
1161  */
1162 
smp_error_interrupt(void)1163 asmlinkage void smp_error_interrupt(void)
1164 {
1165 	unsigned long v, v1;
1166 
1167 	/* First tickle the hardware, only then report what went on. -- REW */
1168 	v = apic_read(APIC_ESR);
1169 	apic_write(APIC_ESR, 0);
1170 	v1 = apic_read(APIC_ESR);
1171 	ack_APIC_irq();
1172 	atomic_inc(&irq_err_count);
1173 
1174 	/* Here is what the APIC error bits mean:
1175 	   0: Send CS error
1176 	   1: Receive CS error
1177 	   2: Send accept error
1178 	   3: Receive accept error
1179 	   4: Reserved
1180 	   5: Send illegal vector
1181 	   6: Received illegal vector
1182 	   7: Illegal register address
1183 	*/
1184 	printk (KERN_ERR "APIC error on CPU%d: %02lx(%02lx)\n",
1185 	        smp_processor_id(), v , v1);
1186 }
1187 
1188 /*
1189  * This initializes the IO-APIC and APIC hardware if this is
1190  * a UP kernel.
1191  */
APIC_init_uniprocessor(void)1192 int __init APIC_init_uniprocessor (void)
1193 {
1194 	if (enable_local_apic < 0)
1195 		clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
1196 
1197 	if (!smp_found_config && !cpu_has_apic)
1198 		return -1;
1199 
1200 	/*
1201 	 * Complain if the BIOS pretends there is one.
1202 	 */
1203 	if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1204 		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1205 			boot_cpu_physical_apicid);
1206 		return -1;
1207 	}
1208 
1209 	verify_local_APIC();
1210 
1211 	connect_bsp_APIC();
1212 
1213 	phys_cpu_present_map = 1 << boot_cpu_physical_apicid;
1214 
1215 	apic_pm_init2();
1216 
1217 	setup_local_APIC();
1218 
1219 	if (nmi_watchdog == NMI_LOCAL_APIC)
1220 		check_nmi_watchdog();
1221 #ifdef CONFIG_X86_IO_APIC
1222 	if (smp_found_config)
1223 		if (!skip_ioapic_setup && nr_ioapics)
1224 			setup_IO_APIC();
1225 #endif
1226 	setup_APIC_clocks();
1227 
1228 	return 0;
1229 }
1230