1 /*
2  *	Intel Multiprocessor Specificiation 1.1 and 1.4
3  *	compliant MP-table parsing routines.
4  *
5  *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6  *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7  *
8  *	Fixes
9  *		Erich Boleyn	:	MP v1.4 and additional changes.
10  *		Alan Cox	:	Added EBDA scanning
11  *		Ingo Molnar	:	various cleanups and rewrites
12  *		Maciej W. Rozycki:	Bits for default MP configurations
13  *		Paul Diefenbaugh:	Added full ACPI support
14  */
15 
16 #include <linux/mm.h>
17 #include <linux/irq.h>
18 #include <linux/init.h>
19 #include <linux/acpi.h>
20 #include <linux/delay.h>
21 #include <linux/config.h>
22 #include <linux/bootmem.h>
23 #include <linux/smp_lock.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/mc146818rtc.h>
26 
27 #include <asm/smp.h>
28 #include <asm/acpi.h>
29 #include <asm/mtrr.h>
30 #include <asm/mpspec.h>
31 #include <asm/pgalloc.h>
32 #include <asm/smpboot.h>
33 #include <asm/io_apic.h>
34 
35 /* Have we found an MP table */
36 int smp_found_config;
37 #ifdef	CONFIG_SMP
38 extern unsigned int max_cpus;
39 #endif
40 
41 /*
42  * Various Linux-internal data structures created from the
43  * MP-table.
44  */
45 int apic_version [MAX_APICS];
46 int quad_local_to_mp_bus_id [NR_CPUS/4][4];
47 int mp_current_pci_id;
48 int *mp_bus_id_to_type;
49 int *mp_bus_id_to_node;
50 int *mp_bus_id_to_local;
51 int *mp_bus_id_to_pci_bus;
52 int max_mp_busses;
53 int max_irq_sources;
54 
55 /* I/O APIC entries */
56 struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
57 
58 /* # of MP IRQ source entries */
59 struct mpc_config_intsrc *mp_irqs;
60 
61 /* MP IRQ source entries */
62 int mp_irq_entries;
63 
64 int nr_ioapics;
65 
66 int pic_mode;
67 unsigned long mp_lapic_addr;
68 
69 /* Processor that is doing the boot up */
70 unsigned int boot_cpu_physical_apicid = -1U;
71 unsigned int boot_cpu_logical_apicid = -1U;
72 /* Internal processor count */
73 static unsigned int num_processors;
74 
75 /* Bitmask of physically existing CPUs */
76 unsigned long phys_cpu_present_map;
77 unsigned long logical_cpu_present_map;
78 
79 #ifdef CONFIG_X86_CLUSTERED_APIC
80 unsigned char esr_disable = 0;
81 unsigned char clustered_apic_mode = CLUSTERED_APIC_NONE;
82 unsigned int apic_broadcast_id = APIC_BROADCAST_ID_APIC;
83 #endif
84 unsigned char raw_phys_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
85 
86 /*
87  * Intel MP BIOS table parsing routines:
88  */
89 
90 #ifndef CONFIG_X86_VISWS_APIC
91 /*
92  * Checksum an MP configuration block.
93  */
94 
mpf_checksum(unsigned char * mp,int len)95 static int __init mpf_checksum(unsigned char *mp, int len)
96 {
97 	int sum = 0;
98 
99 	while (len--)
100 		sum += *mp++;
101 
102 	return sum & 0xFF;
103 }
104 
105 /*
106  * Processor encoding in an MP configuration block
107  */
108 
mpc_family(int family,int model)109 static char __init *mpc_family(int family,int model)
110 {
111 	static char n[32];
112 	static char *model_defs[]=
113 	{
114 		"80486DX","80486DX",
115 		"80486SX","80486DX/2 or 80487",
116 		"80486SL","80486SX/2",
117 		"Unknown","80486DX/2-WB",
118 		"80486DX/4","80486DX/4-WB"
119 	};
120 
121 	switch (family) {
122 		case 0x04:
123 			if (model < 10)
124 				return model_defs[model];
125 			break;
126 
127 		case 0x05:
128 			return("Pentium(tm)");
129 
130 		case 0x06:
131 			return("Pentium(tm) Pro");
132 
133 		case 0x0F:
134 			if (model == 0x00)
135 				return("Pentium 4(tm)");
136 			if (model == 0x01)
137 				return("Pentium 4(tm)");
138 			if (model == 0x02)
139 				return("Pentium 4(tm) XEON(tm)");
140 			if (model == 0x0F)
141 				return("Special controller");
142 	}
143 	sprintf(n,"Unknown CPU [%d:%d]",family, model);
144 	return n;
145 }
146 
147 /*
148  * Have to match translation table entries to main table entries by counter
149  * hence the mpc_record variable .... can't see a less disgusting way of
150  * doing this ....
151  */
152 
153 static int mpc_record;
154 static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
155 
MP_processor_info(struct mpc_config_processor * m)156 void __init MP_processor_info (struct mpc_config_processor *m)
157 {
158  	int ver, quad, logical_apicid;
159 
160 	if (!(m->mpc_cpuflag & CPU_ENABLED))
161 		return;
162 
163 	logical_apicid = m->mpc_apicid;
164 	if (clustered_apic_mode == CLUSTERED_APIC_NUMAQ) {
165 		quad = translation_table[mpc_record]->trans_quad;
166 		logical_apicid = (quad << 4) +
167 			(m->mpc_apicid ? m->mpc_apicid << 1 : 1);
168 		printk("Processor #%d %s APIC version %d (quad %d, apic %d)\n",
169 			m->mpc_apicid,
170 			mpc_family((m->mpc_cpufeature & CPU_FAMILY_MASK)>>8 ,
171 				   (m->mpc_cpufeature & CPU_MODEL_MASK)>>4),
172 			m->mpc_apicver, quad, logical_apicid);
173 	} else {
174 		printk("Processor #%d %s APIC version %d\n",
175 			m->mpc_apicid,
176 			mpc_family((m->mpc_cpufeature & CPU_FAMILY_MASK)>>8 ,
177 				   (m->mpc_cpufeature & CPU_MODEL_MASK)>>4),
178 			m->mpc_apicver);
179 	}
180 
181 	if (m->mpc_featureflag&(1<<0))
182 		Dprintk("    Floating point unit present.\n");
183 	if (m->mpc_featureflag&(1<<7))
184 		Dprintk("    Machine Exception supported.\n");
185 	if (m->mpc_featureflag&(1<<8))
186 		Dprintk("    64 bit compare & exchange supported.\n");
187 	if (m->mpc_featureflag&(1<<9))
188 		Dprintk("    Internal APIC present.\n");
189 	if (m->mpc_featureflag&(1<<11))
190 		Dprintk("    SEP present.\n");
191 	if (m->mpc_featureflag&(1<<12))
192 		Dprintk("    MTRR  present.\n");
193 	if (m->mpc_featureflag&(1<<13))
194 		Dprintk("    PGE  present.\n");
195 	if (m->mpc_featureflag&(1<<14))
196 		Dprintk("    MCA  present.\n");
197 	if (m->mpc_featureflag&(1<<15))
198 		Dprintk("    CMOV  present.\n");
199 	if (m->mpc_featureflag&(1<<16))
200 		Dprintk("    PAT  present.\n");
201 	if (m->mpc_featureflag&(1<<17))
202 		Dprintk("    PSE  present.\n");
203 	if (m->mpc_featureflag&(1<<18))
204 		Dprintk("    PSN  present.\n");
205 	if (m->mpc_featureflag&(1<<19))
206 		Dprintk("    Cache Line Flush Instruction present.\n");
207 	/* 20 Reserved */
208 	if (m->mpc_featureflag&(1<<21))
209 		Dprintk("    Debug Trace and EMON Store present.\n");
210 	if (m->mpc_featureflag&(1<<22))
211 		Dprintk("    ACPI Thermal Throttle Registers  present.\n");
212 	if (m->mpc_featureflag&(1<<23))
213 		Dprintk("    MMX  present.\n");
214 	if (m->mpc_featureflag&(1<<24))
215 		Dprintk("    FXSR  present.\n");
216 	if (m->mpc_featureflag&(1<<25))
217 		Dprintk("    XMM  present.\n");
218 	if (m->mpc_featureflag&(1<<26))
219 		Dprintk("    Willamette New Instructions  present.\n");
220 	if (m->mpc_featureflag&(1<<27))
221 		Dprintk("    Self Snoop  present.\n");
222 	if (m->mpc_featureflag&(1<<28))
223 		Dprintk("    HT  present.\n");
224 	if (m->mpc_featureflag&(1<<29))
225 		Dprintk("    Thermal Monitor present.\n");
226 	/* 30, 31 Reserved */
227 
228 
229 	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
230 		Dprintk("    Bootup CPU\n");
231 		boot_cpu_physical_apicid = m->mpc_apicid;
232 		boot_cpu_logical_apicid = logical_apicid;
233 	}
234 
235 #ifdef	CONFIG_SMP
236 	if (num_processors >= NR_CPUS) {
237 		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
238 			" Processor ignored.\n", NR_CPUS);
239 		return;
240 	}
241 	if (num_processors >= max_cpus) {
242 		printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
243 			" Processor ignored.\n", max_cpus);
244 		return;
245 	}
246 #endif
247 
248 	num_processors++;
249 
250 	if (m->mpc_apicid > MAX_APICS) {
251 		printk("Processor #%d INVALID. (Max ID: %d).\n",
252 			m->mpc_apicid, MAX_APICS);
253 		--num_processors;
254 		return;
255 	}
256 	ver = m->mpc_apicver;
257 
258 	logical_cpu_present_map |= 1 << (num_processors-1);
259  	phys_cpu_present_map |= apicid_to_phys_cpu_present(m->mpc_apicid);
260 
261 	/*
262 	 * Validate version
263 	 */
264 	if (ver == 0x0) {
265 		printk("BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
266 		ver = 0x10;
267 	}
268 	apic_version[m->mpc_apicid] = ver;
269 	raw_phys_apicid[num_processors - 1] = m->mpc_apicid;
270 }
271 
MP_bus_info(struct mpc_config_bus * m)272 static void __init MP_bus_info (struct mpc_config_bus *m)
273 {
274 	char str[7];
275 	int quad;
276 
277 	memcpy(str, m->mpc_bustype, 6);
278 	str[6] = 0;
279 
280 	if (clustered_apic_mode == CLUSTERED_APIC_NUMAQ) {
281 		quad = translation_table[mpc_record]->trans_quad;
282 		mp_bus_id_to_node[m->mpc_busid] = quad;
283 		mp_bus_id_to_local[m->mpc_busid] = translation_table[mpc_record]->trans_local;
284 		quad_local_to_mp_bus_id[quad][translation_table[mpc_record]->trans_local] = m->mpc_busid;
285 		printk("Bus #%d is %s (node %d)\n", m->mpc_busid, str, quad);
286 	} else {
287 		Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
288 	}
289 
290 	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
291 		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
292 	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
293 		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
294 	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
295 		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
296 		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
297 		mp_current_pci_id++;
298 	} else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
299 		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
300 	} else {
301 		printk("Unknown bustype %s - ignoring\n", str);
302 	}
303 }
304 
MP_ioapic_info(struct mpc_config_ioapic * m)305 static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
306 {
307 	if (!(m->mpc_flags & MPC_APIC_USABLE))
308 		return;
309 
310 	printk("I/O APIC #%d Version %d at 0x%lX.\n",
311 		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
312 	if (nr_ioapics >= MAX_IO_APICS) {
313 		printk("Max # of I/O APICs (%d) exceeded (found %d).\n",
314 			MAX_IO_APICS, nr_ioapics);
315 		panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
316 	}
317 	if (!m->mpc_apicaddr) {
318 		printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
319 			" found in MP table, skipping!\n");
320 		return;
321 	}
322 	mp_ioapics[nr_ioapics] = *m;
323 	nr_ioapics++;
324 }
325 
MP_intsrc_info(struct mpc_config_intsrc * m)326 static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
327 {
328 	mp_irqs [mp_irq_entries] = *m;
329 	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
330 		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
331 			m->mpc_irqtype, m->mpc_irqflag & 3,
332 			(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
333 			m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
334 	if (++mp_irq_entries == max_irq_sources)
335 		panic("Max # of irq sources exceeded!!\n");
336 }
337 
MP_lintsrc_info(struct mpc_config_lintsrc * m)338 static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
339 {
340 	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
341 		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
342 			m->mpc_irqtype, m->mpc_irqflag & 3,
343 			(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
344 			m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
345 	/*
346 	 * Well it seems all SMP boards in existence
347 	 * use ExtINT/LVT1 == LINT0 and
348 	 * NMI/LVT2 == LINT1 - the following check
349 	 * will show us if this assumptions is false.
350 	 * Until then we do not have to add baggage.
351 	 */
352 	if ((m->mpc_irqtype == mp_ExtINT) &&
353 		(m->mpc_destapiclint != 0))
354 			BUG();
355 	if ((m->mpc_irqtype == mp_NMI) &&
356 		(m->mpc_destapiclint != 1))
357 			BUG();
358 }
359 
MP_translation_info(struct mpc_config_translation * m)360 static void __init MP_translation_info (struct mpc_config_translation *m)
361 {
362 	printk("Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
363 
364 	if (mpc_record >= MAX_MPC_ENTRY)
365 		printk("MAX_MPC_ENTRY exceeded!\n");
366 	else
367 		translation_table[mpc_record] = m; /* stash this for later */
368 	if (m->trans_quad+1 > numnodes)
369 		numnodes = m->trans_quad+1;
370 }
371 
372 /*
373  * Read/parse the MPC oem tables
374  */
375 
smp_read_mpc_oem(struct mp_config_oemtable * oemtable,unsigned short oemsize)376 static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
377 	unsigned short oemsize)
378 {
379 	int count = sizeof (*oemtable); /* the header size */
380 	unsigned char *oemptr = ((unsigned char *)oemtable)+count;
381 
382 	printk("Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
383 	if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
384 	{
385 		printk("SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
386 			oemtable->oem_signature[0],
387 			oemtable->oem_signature[1],
388 			oemtable->oem_signature[2],
389 			oemtable->oem_signature[3]);
390 		return;
391 	}
392 	if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
393 	{
394 		printk("SMP oem mptable: checksum error!\n");
395 		return;
396 	}
397 	while (count < oemtable->oem_length) {
398 		switch (*oemptr) {
399 			case MP_TRANSLATION:
400 			{
401 				struct mpc_config_translation *m=
402 					(struct mpc_config_translation *)oemptr;
403 				MP_translation_info(m);
404 				oemptr += sizeof(*m);
405 				count += sizeof(*m);
406 				++mpc_record;
407 				break;
408 			}
409 			default:
410 			{
411 				printk("Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
412 				return;
413 			}
414 		}
415        }
416 }
417 
418 /*
419  * Read/parse the MPC
420  */
421 
smp_read_mpc(struct mp_config_table * mpc)422 static int __init smp_read_mpc(struct mp_config_table *mpc)
423 {
424 	char oem[16], prod[14];
425 	int count=sizeof(*mpc);
426 	unsigned char *mpt=((unsigned char *)mpc)+count;
427 	int num_bus = 0;
428 	int num_irq = 0;
429 	unsigned char *bus_data;
430 
431 	if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
432 		panic("SMP mptable: bad signature [%c%c%c%c]!\n",
433 			mpc->mpc_signature[0],
434 			mpc->mpc_signature[1],
435 			mpc->mpc_signature[2],
436 			mpc->mpc_signature[3]);
437 		return 0;
438 	}
439 	if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
440 		panic("SMP mptable: checksum error!\n");
441 		return 0;
442 	}
443 	if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
444 		printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
445 			mpc->mpc_spec);
446 		return 0;
447 	}
448 	if (!mpc->mpc_lapic) {
449 		printk(KERN_ERR "SMP mptable: null local APIC address!\n");
450 		return 0;
451 	}
452 	memcpy(oem,mpc->mpc_oem,8);
453 	oem[8]=0;
454 	printk("OEM ID: %s ",oem);
455 
456 	memcpy(prod,mpc->mpc_productid,12);
457 	prod[12]=0;
458 	printk("Product ID: %s ",prod);
459 
460 	detect_clustered_apic(oem, prod);
461 
462 	printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
463 
464 	/*
465 	 * Save the local APIC address (it might be non-default) -- but only
466 	 * if we're not using ACPI.
467 	 */
468 	if (!acpi_lapic)
469 		mp_lapic_addr = mpc->mpc_lapic;
470 
471 	if ((clustered_apic_mode == CLUSTERED_APIC_NUMAQ) && mpc->mpc_oemptr) {
472 		/* We need to process the oem mpc tables to tell us which quad things are in ... */
473 		mpc_record = 0;
474 		smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr, mpc->mpc_oemsize);
475 		mpc_record = 0;
476 	}
477 
478 	/* Pre-scan to determine the number of bus and
479 	 * interrupts records we have
480 	 */
481 	while (count < mpc->mpc_length) {
482 		switch (*mpt) {
483 			case MP_PROCESSOR:
484 				mpt += sizeof(struct mpc_config_processor);
485 				count += sizeof(struct mpc_config_processor);
486 				break;
487 			case MP_BUS:
488 				++num_bus;
489 				mpt += sizeof(struct mpc_config_bus);
490 				count += sizeof(struct mpc_config_bus);
491 				break;
492 			case MP_INTSRC:
493 				++num_irq;
494 				mpt += sizeof(struct mpc_config_intsrc);
495 				count += sizeof(struct mpc_config_intsrc);
496 				break;
497 			case MP_IOAPIC:
498 				mpt += sizeof(struct mpc_config_ioapic);
499 				count += sizeof(struct mpc_config_ioapic);
500 				break;
501 			case MP_LINTSRC:
502 				mpt += sizeof(struct mpc_config_lintsrc);
503 				count += sizeof(struct mpc_config_lintsrc);
504 				break;
505 			default:
506 				count = mpc->mpc_length;
507 				break;
508 		}
509 	}
510 	/*
511 	 * Paranoia: Allocate one extra of both the number of busses and number
512 	 * of irqs, and make sure that we have at least 4 interrupts per PCI
513 	 * slot.  But some machines do not report very many busses, so we need
514 	 * to fall back on the older defaults.
515 	 */
516 	++num_bus;
517 	max_mp_busses = max(num_bus, MAX_MP_BUSSES);
518 	if (num_irq < (4 * max_mp_busses))
519 		num_irq = 4 * num_bus;	/* 4 intr/PCI slot */
520 	++num_irq;
521 	max_irq_sources = max(num_irq, MAX_IRQ_SOURCES);
522 
523 	count = (max_mp_busses * sizeof(int)) * 4;
524 	count += (max_irq_sources * sizeof(struct mpc_config_intsrc));
525 	bus_data = alloc_bootmem(count);
526 	if (!bus_data) {
527 		printk(KERN_ERR "SMP mptable: out of memory!\n");
528 		return 0;
529 	}
530 	mp_bus_id_to_type = (int *)&bus_data[0];
531 	mp_bus_id_to_node = (int *)&bus_data[(max_mp_busses * sizeof(int))];
532 	mp_bus_id_to_local = (int *)&bus_data[(max_mp_busses * sizeof(int)) * 2];
533 	mp_bus_id_to_pci_bus = (int *)&bus_data[(max_mp_busses * sizeof(int)) * 3];
534 	mp_irqs = (struct mpc_config_intsrc *)&bus_data[(max_mp_busses * sizeof(int)) * 4];
535 	memset(mp_bus_id_to_pci_bus, -1, max_mp_busses * sizeof(int));
536 
537 	/*
538 	 *	Now process the configuration blocks.
539 	 */
540 	count = sizeof(*mpc);
541 	mpt = ((unsigned char *)mpc)+count;
542 	while (count < mpc->mpc_length) {
543 		switch(*mpt) {
544 			case MP_PROCESSOR:
545 			{
546 				struct mpc_config_processor *m=
547 					(struct mpc_config_processor *)mpt;
548 				/* ACPI may have already provided this data */
549 				if (!acpi_lapic)
550 					MP_processor_info(m);
551 				mpt += sizeof(*m);
552 				count += sizeof(*m);
553 				break;
554 			}
555 			case MP_BUS:
556 			{
557 				struct mpc_config_bus *m=
558 					(struct mpc_config_bus *)mpt;
559 				MP_bus_info(m);
560 				mpt += sizeof(*m);
561 				count += sizeof(*m);
562 				break;
563 			}
564 			case MP_IOAPIC:
565 			{
566 				struct mpc_config_ioapic *m=
567 					(struct mpc_config_ioapic *)mpt;
568 				MP_ioapic_info(m);
569 				mpt+=sizeof(*m);
570 				count+=sizeof(*m);
571 				break;
572 			}
573 			case MP_INTSRC:
574 			{
575 				struct mpc_config_intsrc *m=
576 					(struct mpc_config_intsrc *)mpt;
577 
578 				MP_intsrc_info(m);
579 				mpt+=sizeof(*m);
580 				count+=sizeof(*m);
581 				break;
582 			}
583 			case MP_LINTSRC:
584 			{
585 				struct mpc_config_lintsrc *m=
586 					(struct mpc_config_lintsrc *)mpt;
587 				MP_lintsrc_info(m);
588 				mpt+=sizeof(*m);
589 				count+=sizeof(*m);
590 				break;
591 			}
592 			default:
593 			{
594 				count = mpc->mpc_length;
595 				break;
596 			}
597 		}
598 		++mpc_record;
599 	}
600 
601 
602 	printk("Enabling APIC mode: ");
603 	if(clustered_apic_mode == CLUSTERED_APIC_NUMAQ)
604 		printk("Clustered Logical.	");
605 	else if(clustered_apic_mode == CLUSTERED_APIC_XAPIC)
606 		printk("Physical.	");
607 	else
608 		printk("Flat.	");
609 	printk("Using %d I/O APICs\n",nr_ioapics);
610 
611 	if (!num_processors)
612 		printk(KERN_ERR "SMP mptable: no processors registered!\n");
613 	return num_processors;
614 }
615 
ELCR_trigger(unsigned int irq)616 static int __init ELCR_trigger(unsigned int irq)
617 {
618 	unsigned int port;
619 
620 	port = 0x4d0 + (irq >> 3);
621 	return (inb(port) >> (irq & 7)) & 1;
622 }
623 
construct_default_ioirq_mptable(int mpc_default_type)624 static void __init construct_default_ioirq_mptable(int mpc_default_type)
625 {
626 	struct mpc_config_intsrc intsrc;
627 	int i;
628 	int ELCR_fallback = 0;
629 
630 	intsrc.mpc_type = MP_INTSRC;
631 	intsrc.mpc_irqflag = 0;			/* conforming */
632 	intsrc.mpc_srcbus = 0;
633 	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
634 
635 	intsrc.mpc_irqtype = mp_INT;
636 
637 	/*
638 	 *  If true, we have an ISA/PCI system with no IRQ entries
639 	 *  in the MP table. To prevent the PCI interrupts from being set up
640 	 *  incorrectly, we try to use the ELCR. The sanity check to see if
641 	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
642 	 *  never be level sensitive, so we simply see if the ELCR agrees.
643 	 *  If it does, we assume it's valid.
644 	 */
645 	if (mpc_default_type == 5) {
646 		printk("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
647 
648 		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
649 			printk("ELCR contains invalid data... not using ELCR\n");
650 		else {
651 			printk("Using ELCR to identify PCI interrupts\n");
652 			ELCR_fallback = 1;
653 		}
654 	}
655 
656 	for (i = 0; i < 16; i++) {
657 		switch (mpc_default_type) {
658 		case 2:
659 			if (i == 0 || i == 13)
660 				continue;	/* IRQ0 & IRQ13 not connected */
661 			/* fall through */
662 		default:
663 			if (i == 2)
664 				continue;	/* IRQ2 is never connected */
665 		}
666 
667 		if (ELCR_fallback) {
668 			/*
669 			 *  If the ELCR indicates a level-sensitive interrupt, we
670 			 *  copy that information over to the MP table in the
671 			 *  irqflag field (level sensitive, active high polarity).
672 			 */
673 			if (ELCR_trigger(i))
674 				intsrc.mpc_irqflag = 13;
675 			else
676 				intsrc.mpc_irqflag = 0;
677 		}
678 
679 		intsrc.mpc_srcbusirq = i;
680 		intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
681 		MP_intsrc_info(&intsrc);
682 	}
683 
684 	intsrc.mpc_irqtype = mp_ExtINT;
685 	intsrc.mpc_srcbusirq = 0;
686 	intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
687 	MP_intsrc_info(&intsrc);
688 }
689 
construct_default_ISA_mptable(int mpc_default_type)690 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
691 {
692 	struct mpc_config_processor processor;
693 	struct mpc_config_bus bus;
694 	struct mpc_config_ioapic ioapic;
695 	struct mpc_config_lintsrc lintsrc;
696 	int linttypes[2] = { mp_ExtINT, mp_NMI };
697 	int i;
698 	struct {
699 		int mp_bus_id_to_type[MAX_MP_BUSSES];
700 		int mp_bus_id_to_node[MAX_MP_BUSSES];
701 		int mp_bus_id_to_local[MAX_MP_BUSSES];
702 		int mp_bus_id_to_pci_bus[MAX_MP_BUSSES];
703 		struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
704 	} *bus_data;
705 
706 	bus_data = alloc_bootmem(sizeof(*bus_data));
707 	if (!bus_data)
708 		panic("SMP mptable: out of memory!\n");
709 	mp_bus_id_to_type = bus_data->mp_bus_id_to_type;
710 	mp_bus_id_to_node = bus_data->mp_bus_id_to_node;
711 	mp_bus_id_to_local = bus_data->mp_bus_id_to_local;
712 	mp_bus_id_to_pci_bus = bus_data->mp_bus_id_to_pci_bus;
713 	mp_irqs = bus_data->mp_irqs;
714 	for (i = 0; i < MAX_MP_BUSSES; ++i)
715 		mp_bus_id_to_pci_bus[i] = -1;
716 
717 	/*
718 	 * local APIC has default address
719 	 */
720 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
721 
722 	/*
723 	 * 2 CPUs, numbered 0 & 1.
724 	 */
725 	processor.mpc_type = MP_PROCESSOR;
726 	/* Either an integrated APIC or a discrete 82489DX. */
727 	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
728 	processor.mpc_cpuflag = CPU_ENABLED;
729 	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
730 				   (boot_cpu_data.x86_model << 4) |
731 				   boot_cpu_data.x86_mask;
732 	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
733 	processor.mpc_reserved[0] = 0;
734 	processor.mpc_reserved[1] = 0;
735 	for (i = 0; i < 2; i++) {
736 		processor.mpc_apicid = i;
737 		MP_processor_info(&processor);
738 	}
739 
740 	bus.mpc_type = MP_BUS;
741 	bus.mpc_busid = 0;
742 	switch (mpc_default_type) {
743 		default:
744 			printk("???\nUnknown standard configuration %d\n",
745 				mpc_default_type);
746 			/* fall through */
747 		case 1:
748 		case 5:
749 			memcpy(bus.mpc_bustype, "ISA   ", 6);
750 			break;
751 		case 2:
752 		case 6:
753 		case 3:
754 			memcpy(bus.mpc_bustype, "EISA  ", 6);
755 			break;
756 		case 4:
757 		case 7:
758 			memcpy(bus.mpc_bustype, "MCA   ", 6);
759 	}
760 	MP_bus_info(&bus);
761 	if (mpc_default_type > 4) {
762 		bus.mpc_busid = 1;
763 		memcpy(bus.mpc_bustype, "PCI   ", 6);
764 		MP_bus_info(&bus);
765 	}
766 
767 	ioapic.mpc_type = MP_IOAPIC;
768 	ioapic.mpc_apicid = 2;
769 	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
770 	ioapic.mpc_flags = MPC_APIC_USABLE;
771 	ioapic.mpc_apicaddr = 0xFEC00000;
772 	MP_ioapic_info(&ioapic);
773 
774 	/*
775 	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
776 	 */
777 	construct_default_ioirq_mptable(mpc_default_type);
778 
779 	lintsrc.mpc_type = MP_LINTSRC;
780 	lintsrc.mpc_irqflag = 0;		/* conforming */
781 	lintsrc.mpc_srcbusid = 0;
782 	lintsrc.mpc_srcbusirq = 0;
783 	lintsrc.mpc_destapic = MP_APIC_ALL;
784 	for (i = 0; i < 2; i++) {
785 		lintsrc.mpc_irqtype = linttypes[i];
786 		lintsrc.mpc_destapiclint = i;
787 		MP_lintsrc_info(&lintsrc);
788 	}
789 }
790 
791 static struct intel_mp_floating *mpf_found;
792 
793 /*
794  * Scan the memory blocks for an SMP configuration block.
795  */
get_smp_config(void)796 void __init get_smp_config (void)
797 {
798 	struct intel_mp_floating *mpf = mpf_found;
799 
800 	/*
801 	 * ACPI may be used to obtain the entire SMP configuration or just to
802 	 * enumerate/configure processors (CONFIG_ACPI_HT_ONLY).  Note that
803 	 * ACPI supports both logical (e.g. Hyper-Threading) and physical
804 	 * processors, where MPS only supports physical.
805 	 */
806 	if (acpi_lapic && acpi_ioapic) {
807 		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
808 		return;
809 	}
810 	else if (acpi_lapic)
811 		printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
812 
813 	printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
814 	if (mpf->mpf_feature2 & (1<<7)) {
815 		printk("    IMCR and PIC compatibility mode.\n");
816 		pic_mode = 1;
817 	} else {
818 		printk("    Virtual Wire compatibility mode.\n");
819 		pic_mode = 0;
820 	}
821 
822 	/*
823 	 * Now see if we need to read further.
824 	 */
825 	if (mpf->mpf_feature1 != 0) {
826 
827 		printk("Default MP configuration #%d\n", mpf->mpf_feature1);
828 		construct_default_ISA_mptable(mpf->mpf_feature1);
829 
830 	} else if (mpf->mpf_physptr) {
831 
832 		/*
833 		 * Read the physical hardware table.  Anything here will
834 		 * override the defaults.
835 		 */
836 		if (!smp_read_mpc((void *)mpf->mpf_physptr)) {
837 			smp_found_config = 0;
838 			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
839 			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
840 			return;
841 		}
842 		/*
843 		 * If there are no explicit MP IRQ entries, then we are
844 		 * broken.  We set up most of the low 16 IO-APIC pins to
845 		 * ISA defaults and hope it will work.
846 		 */
847 		if (!mp_irq_entries) {
848 			struct mpc_config_bus bus;
849 
850 			printk("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
851 
852 			bus.mpc_type = MP_BUS;
853 			bus.mpc_busid = 0;
854 			memcpy(bus.mpc_bustype, "ISA   ", 6);
855 			MP_bus_info(&bus);
856 
857 			construct_default_ioirq_mptable(0);
858 		}
859 
860 	} else
861 		BUG();
862 
863 	printk("Processors: %d\n", num_processors);
864 	/*
865 	 * Only use the first configuration found.
866 	 */
867 }
868 
smp_scan_config(unsigned long base,unsigned long length)869 static int __init smp_scan_config (unsigned long base, unsigned long length)
870 {
871 	unsigned long *bp = phys_to_virt(base);
872 	struct intel_mp_floating *mpf;
873 
874 	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
875 	if (sizeof(*mpf) != 16)
876 		printk("Error: MPF size\n");
877 
878 	while (length > 0) {
879 		mpf = (struct intel_mp_floating *)bp;
880 		if ((*bp == SMP_MAGIC_IDENT) &&
881 			(mpf->mpf_length == 1) &&
882 			!mpf_checksum((unsigned char *)bp, 16) &&
883 			((mpf->mpf_specification == 1)
884 				|| (mpf->mpf_specification == 4)) ) {
885 
886 			smp_found_config = 1;
887 			printk("found SMP MP-table at %08lx\n",
888 						virt_to_phys(mpf));
889 			reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
890 			if (mpf->mpf_physptr)
891 				reserve_bootmem(mpf->mpf_physptr, PAGE_SIZE);
892 			mpf_found = mpf;
893 			return 1;
894 		}
895 		bp += 4;
896 		length -= 16;
897 	}
898 	return 0;
899 }
900 
find_intel_smp(void)901 void __init find_intel_smp (void)
902 {
903 	unsigned int address;
904 
905 	/*
906 	 * FIXME: Linux assumes you have 640K of base ram..
907 	 * this continues the error...
908 	 *
909 	 * 1) Scan the bottom 1K for a signature
910 	 * 2) Scan the top 1K of base RAM
911 	 * 3) Scan the 64K of bios
912 	 */
913 	if (smp_scan_config(0x0,0x400) ||
914 		smp_scan_config(639*0x400,0x400) ||
915 			smp_scan_config(0xF0000,0x10000))
916 		return;
917 	/*
918 	 * If it is an SMP machine we should know now, unless the
919 	 * configuration is in an EISA/MCA bus machine with an
920 	 * extended bios data area.
921 	 *
922 	 * there is a real-mode segmented pointer pointing to the
923 	 * 4K EBDA area at 0x40E, calculate and scan it here.
924 	 *
925 	 * NOTE! There were Linux loaders that will corrupt the EBDA
926 	 * area, and as such this kind of SMP config may be less
927 	 * trustworthy, simply because the SMP table may have been
928 	 * stomped on during early boot.  Thankfully the bootloaders
929 	 * now honour the EBDA.
930 	 */
931 
932 	address = *(unsigned short *)phys_to_virt(0x40E);
933 	address <<= 4;
934 	smp_scan_config(address, 0x1000);
935 }
936 
937 #else
938 
939 /*
940  * The Visual Workstation is Intel MP compliant in the hardware
941  * sense, but it doesn't have a BIOS(-configuration table).
942  * No problem for Linux.
943  */
find_visws_smp(void)944 void __init find_visws_smp(void)
945 {
946 	smp_found_config = 1;
947 
948 	phys_cpu_present_map |= 2; /* or in id 1 */
949 	apic_version[1] |= 0x10; /* integrated APIC */
950 	apic_version[0] |= 0x10;
951 
952 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
953 }
954 
955 #endif
956 
957 /*
958  * - Intel MP Configuration Table
959  * - or SGI Visual Workstation configuration
960  */
find_smp_config(void)961 void __init find_smp_config (void)
962 {
963 #ifdef CONFIG_X86_LOCAL_APIC
964 	find_intel_smp();
965 #endif
966 #ifdef CONFIG_VISWS
967 	find_visws_smp();
968 #endif
969 }
970 
971 
972 /* --------------------------------------------------------------------------
973                             ACPI-based MP Configuration
974    -------------------------------------------------------------------------- */
975 
976 #ifdef CONFIG_ACPI_BOOT
977 
mp_register_lapic_address(u64 address)978 void __init mp_register_lapic_address (
979 	u64			address)
980 {
981 	mp_lapic_addr = (unsigned long) address;
982 
983 	set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
984 
985 	if (boot_cpu_physical_apicid == -1U)
986 		boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
987 
988 	Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
989 }
990 
991 
mp_register_lapic(u8 id,u8 enabled)992 void __init mp_register_lapic (
993 	u8			id,
994 	u8			enabled)
995 {
996 	struct mpc_config_processor processor;
997 	int			boot_cpu = 0;
998 
999 	if (id >= MAX_APICS) {
1000 		printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
1001 			id, MAX_APICS);
1002 		return;
1003 	}
1004 
1005 	if (id == boot_cpu_physical_apicid)
1006 		boot_cpu = 1;
1007 
1008 	processor.mpc_type = MP_PROCESSOR;
1009 	processor.mpc_apicid = id;
1010 
1011 	/*
1012 	 * mp_register_lapic_address() which is called before the
1013 	 * current function does the fixmap of FIX_APIC_BASE.
1014 	 * Read in the correct APIC version from there
1015 	 */
1016 	processor.mpc_apicver = apic_read(APIC_LVR);
1017 
1018 	processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
1019 	processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
1020 	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
1021 		(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
1022 	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
1023 	processor.mpc_reserved[0] = 0;
1024 	processor.mpc_reserved[1] = 0;
1025 
1026 	MP_processor_info(&processor);
1027 }
1028 
1029 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
1030 
1031 #define MP_ISA_BUS		0
1032 #define MP_MAX_IOAPIC_PIN	127
1033 
1034 struct mp_ioapic_routing {
1035 	int			apic_id;
1036 	int			irq_start;
1037 	int			irq_end;
1038 	u32			pin_programmed[4];
1039 } mp_ioapic_routing[MAX_IO_APICS];
1040 
1041 
mp_find_ioapic(int irq)1042 static int __init mp_find_ioapic (
1043 	int			irq)
1044 {
1045 	int			i = 0;
1046 
1047 	/* Find the IOAPIC that manages this IRQ. */
1048 	for (i = 0; i < nr_ioapics; i++) {
1049 		if ((irq >= mp_ioapic_routing[i].irq_start)
1050 			&& (irq <= mp_ioapic_routing[i].irq_end))
1051 			return i;
1052 	}
1053 
1054 	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for IRQ %d\n", irq);
1055 
1056 	return -1;
1057 }
1058 
1059 
mp_register_ioapic(u8 id,u32 address,u32 irq_base)1060 void __init mp_register_ioapic (
1061 	u8			id,
1062 	u32			address,
1063 	u32			irq_base)
1064 {
1065 	int			idx = 0;
1066 
1067 	if (nr_ioapics >= MAX_IO_APICS) {
1068 		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
1069 			"(found %d)\n", MAX_IO_APICS, nr_ioapics);
1070 		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
1071 	}
1072 	if (!address) {
1073 		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
1074 			" found in MADT table, skipping!\n");
1075 		return;
1076 	}
1077 
1078 	idx = nr_ioapics++;
1079 
1080 	mp_ioapics[idx].mpc_type = MP_IOAPIC;
1081 	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
1082 	mp_ioapics[idx].mpc_apicaddr = address;
1083 
1084 	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
1085 	mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
1086 	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
1087 
1088 	/*
1089 	 * Build basic IRQ lookup table to facilitate irq->io_apic lookups
1090 	 * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
1091 	 */
1092 	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
1093 	mp_ioapic_routing[idx].irq_start = irq_base;
1094 	mp_ioapic_routing[idx].irq_end = irq_base +
1095 		io_apic_get_redir_entries(idx);
1096 
1097 	printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
1098 		"IRQ %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
1099 		mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
1100 		mp_ioapic_routing[idx].irq_start,
1101 		mp_ioapic_routing[idx].irq_end);
1102 
1103 	return;
1104 }
1105 
1106 
1107 /* allocate mp_irqs[] for ACPI parsing table parsing */
mp_irqs_alloc(void)1108 int __init mp_irqs_alloc(void)
1109 {
1110 	int size = (MAX_IRQ_SOURCES * sizeof(int)) * 4;
1111 
1112 	mp_irqs = (struct mpc_config_intsrc *)alloc_bootmem(size);
1113 	if (!mp_irqs) {
1114 		printk(KERN_ERR "mp_irqs_alloc(): alloc_bootmem(%d) failed!\n", size);
1115 		return -ENOMEM;
1116 	}
1117 	return 0;
1118 }
1119 
1120 
mp_override_legacy_irq(u8 bus_irq,u8 polarity,u8 trigger,u32 global_irq)1121 void __init mp_override_legacy_irq (
1122 	u8			bus_irq,
1123 	u8			polarity,
1124 	u8			trigger,
1125 	u32			global_irq)
1126 {
1127 	struct mpc_config_intsrc intsrc;
1128 	int			ioapic = -1;
1129 	int			pin = -1;
1130 
1131 	/*
1132 	 * Convert 'global_irq' to 'ioapic.pin'.
1133 	 */
1134 	ioapic = mp_find_ioapic(global_irq);
1135 	if (ioapic < 0)
1136 		return;
1137 	pin = global_irq - mp_ioapic_routing[ioapic].irq_start;
1138 
1139 	/*
1140 	 * TBD: This check is for faulty timer entries, where the override
1141 	 *      erroneously sets the trigger to level, resulting in a HUGE
1142 	 *      increase of timer interrupts!
1143 	 */
1144 	if ((bus_irq == 0) && (trigger == 3))
1145 		trigger = 1;
1146 
1147 	intsrc.mpc_type = MP_INTSRC;
1148 	intsrc.mpc_irqtype = mp_INT;
1149 	intsrc.mpc_irqflag = (trigger << 2) | polarity;
1150 	intsrc.mpc_srcbus = MP_ISA_BUS;
1151 	intsrc.mpc_srcbusirq = bus_irq;				       /* IRQ */
1152 	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	   /* APIC ID */
1153 	intsrc.mpc_dstirq = pin;				    /* INTIN# */
1154 
1155 	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
1156 		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
1157 		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
1158 		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
1159 
1160 	mp_irqs[mp_irq_entries] = intsrc;
1161 	if (++mp_irq_entries == MAX_IRQ_SOURCES)
1162 		panic("Max # of irq sources exceeded!\n");
1163 
1164 	return;
1165 }
1166 
1167 
mp_config_acpi_legacy_irqs(void)1168 void __init mp_config_acpi_legacy_irqs (void)
1169 {
1170 	int			i = 0;
1171 	int			ioapic = -1;
1172 
1173 	/*
1174 	 * Initialize mp_irqs for IRQ configuration.
1175 	 */
1176 	unsigned char *bus_data;
1177 	int count;
1178 
1179 	count = (MAX_MP_BUSSES * sizeof(int)) * 4;
1180 	bus_data = alloc_bootmem(count);
1181 	if (!bus_data) {
1182 		panic("Fatal: can't allocate bus memory for ACPI legacy IRQ!");
1183 	}
1184 	mp_bus_id_to_type = (int *)&bus_data[0];
1185 	mp_bus_id_to_node = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int))];
1186 	mp_bus_id_to_local = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 2];
1187 	mp_bus_id_to_pci_bus = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 3];
1188 
1189 	for (i = 0; i < MAX_MP_BUSSES; ++i)
1190 	  mp_bus_id_to_pci_bus[i] = -1;
1191 
1192 	/*
1193 	 * Fabricate the legacy ISA bus (bus #31).
1194 	 */
1195 	mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
1196 	Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
1197 
1198 	/*
1199 	 * Locate the IOAPIC that manages the ISA IRQs (0-15).
1200 	 */
1201 	ioapic = mp_find_ioapic(0);
1202 	if (ioapic < 0)
1203 		return;
1204 
1205 	/*
1206 	 * Use the default configuration for the IRQs 0-15.  Unless
1207 	 * overriden by (MADT) interrupt source override entries.
1208 	 */
1209 	for (i = 0; i < 16; i++) {
1210 		int idx;
1211 
1212 		for (idx = 0; idx < mp_irq_entries; idx++)
1213 			if (mp_irqs[idx].mpc_srcbus == MP_ISA_BUS &&
1214 				(mp_irqs[idx].mpc_dstapic == mp_ioapics[ioapic].mpc_apicid) &&
1215 				(mp_irqs[idx].mpc_srcbusirq == i ||
1216 				mp_irqs[idx].mpc_dstirq == i))
1217 					break;
1218 		if (idx != mp_irq_entries)
1219 			continue;			  /* IRQ already used */
1220 
1221 		mp_irqs[mp_irq_entries].mpc_type = MP_INTSRC;
1222 		mp_irqs[mp_irq_entries].mpc_irqflag = 0;	/* Conforming */
1223 		mp_irqs[mp_irq_entries].mpc_srcbus = MP_ISA_BUS;
1224 		mp_irqs[mp_irq_entries].mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
1225 		mp_irqs[mp_irq_entries].mpc_irqtype = mp_INT;
1226 		mp_irqs[mp_irq_entries].mpc_srcbusirq = i;	   /* Identity mapped */
1227 		mp_irqs[mp_irq_entries].mpc_dstirq = i;
1228 
1229 		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
1230 			"%d-%d\n",
1231 			mp_irqs[mp_irq_entries].mpc_irqtype,
1232 			mp_irqs[mp_irq_entries].mpc_irqflag & 3,
1233 			(mp_irqs[mp_irq_entries].mpc_irqflag >> 2) & 3,
1234 			mp_irqs[mp_irq_entries].mpc_srcbus,
1235 			mp_irqs[mp_irq_entries].mpc_srcbusirq,
1236 			mp_irqs[mp_irq_entries].mpc_dstapic,
1237 			mp_irqs[mp_irq_entries].mpc_dstirq);
1238 
1239 		if (++mp_irq_entries == MAX_IRQ_SOURCES)
1240 			panic("Max # of irq sources exceeded!\n");
1241 	}
1242 }
1243 
1244 extern FADT_DESCRIPTOR acpi_fadt;
1245 
1246 #ifdef CONFIG_ACPI_PCI
1247 
mp_parse_prt(void)1248 void __init mp_parse_prt (void)
1249 {
1250 	struct list_head	*node = NULL;
1251 	struct acpi_prt_entry	*entry = NULL;
1252 	int			ioapic = -1;
1253 	int			ioapic_pin = 0;
1254 	int			irq = 0;
1255 	int			idx, bit = 0;
1256 	int			edge_level = 0;
1257 	int			active_high_low = 0;
1258 
1259 	/*
1260 	 * Parsing through the PCI Interrupt Routing Table (PRT) and program
1261 	 * routing for all entries.
1262 	 */
1263 	list_for_each(node, &acpi_prt.entries) {
1264 		entry = list_entry(node, struct acpi_prt_entry, node);
1265 
1266 		/* Need to get irq for dynamic entry */
1267 		if (entry->link.handle) {
1268 			irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index, &edge_level, &active_high_low);
1269 			if (!irq)
1270 				continue;
1271 		}
1272 		else {
1273 			/* Hardwired IRQ. Assume PCI standard settings */
1274 			irq = entry->link.index;
1275 			edge_level = 1;
1276 			active_high_low = 1;
1277 		}
1278 
1279 		/* Don't set up the ACPI SCI because it's already set up */
1280                 if (acpi_fadt.sci_int == irq) {
1281                         entry->irq = irq; /*we still need to set entry's irq*/
1282 			continue;
1283                 }
1284 
1285 		ioapic = mp_find_ioapic(irq);
1286 		if (ioapic < 0)
1287 			continue;
1288 		ioapic_pin = irq - mp_ioapic_routing[ioapic].irq_start;
1289 
1290 		/*
1291 		 * Avoid pin reprogramming.  PRTs typically include entries
1292 		 * with redundant pin->irq mappings (but unique PCI devices);
1293 		 * we only only program the IOAPIC on the first.
1294 		 */
1295 		bit = ioapic_pin % 32;
1296 		idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1297 		if (idx > 3) {
1298 			printk(KERN_ERR "Invalid reference to IOAPIC pin "
1299 				"%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1300 				ioapic_pin);
1301 			continue;
1302 		}
1303 		if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1304 			Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1305 				mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1306 			entry->irq = irq;
1307 			continue;
1308 		}
1309 
1310 		mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
1311 
1312 		if (!io_apic_set_pci_routing(ioapic, ioapic_pin, irq, edge_level, active_high_low))
1313 			entry->irq = irq;
1314 
1315                 printk(KERN_DEBUG "%02x:%02x:%02x[%c] -> %d-%d"
1316                         " -> IRQ %d %s %s\n", entry->id.segment, entry->id.bus,
1317                         entry->id.device, ('A' + entry->pin),
1318                         mp_ioapic_routing[ioapic].apic_id, ioapic_pin,
1319                         entry->irq, edge_level ? "level" : "edge",
1320                         active_high_low ? "low" : "high");
1321 
1322 	}
1323 
1324 	print_IO_APIC();
1325 
1326 	return;
1327 }
1328 
1329 #endif /*CONFIG_ACPI_PCI*/
1330 
1331 #endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
1332 
1333 #endif /*CONFIG_ACPI*/
1334