1 /*
2  *  acpi.c - Architecture-Specific Low-Level ACPI Support
3  *
4  *  Copyright (C) 1999 VA Linux Systems
5  *  Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
6  *  Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
7  *	David Mosberger-Tang <davidm@hpl.hp.com>
8  *  Copyright (C) 2000 Intel Corp.
9  *  Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
10  *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
11  *  Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
12  *  Copyright (C) 2001 Takayoshi Kochi <t-kouchi@cq.jp.nec.com>
13  *  Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
14  *
15  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
16  *
17  *  This program is free software; you can redistribute it and/or modify
18  *  it under the terms of the GNU General Public License as published by
19  *  the Free Software Foundation; either version 2 of the License, or
20  *  (at your option) any later version.
21  *
22  *  This program is distributed in the hope that it will be useful,
23  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
24  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
25  *  GNU General Public License for more details.
26  *
27  *  You should have received a copy of the GNU General Public License
28  *  along with this program; if not, write to the Free Software
29  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30  *
31  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
32  */
33 
34 #include <linux/config.h>
35 #include <linux/init.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/smp.h>
39 #include <linux/string.h>
40 #include <linux/types.h>
41 #include <linux/irq.h>
42 #include <linux/acpi.h>
43 #include <linux/efi.h>
44 #include <linux/mm.h>
45 #include <linux/mmzone.h>
46 #include <asm/io.h>
47 #include <asm/iosapic.h>
48 #include <asm/machvec.h>
49 #include <asm/page.h>
50 #include <asm/system.h>
51 #include <asm/numa.h>
52 
53 
54 #define PREFIX			"ACPI: "
55 
56 asm (".weak iosapic_register_intr");
57 asm (".weak iosapic_override_isa_irq");
58 asm (".weak iosapic_register_platform_intr");
59 asm (".weak iosapic_init");
60 asm (".weak iosapic_system_init");
61 asm (".weak iosapic_version");
62 
63 void (*pm_idle) (void);
64 void (*pm_power_off) (void);
65 
66 unsigned char acpi_kbd_controller_present = 1;
67 
68 const char *
acpi_get_sysname(void)69 acpi_get_sysname (void)
70 {
71 #ifdef CONFIG_IA64_GENERIC
72 	unsigned long rsdp_phys;
73 	struct acpi20_table_rsdp *rsdp;
74 	struct acpi_table_xsdt *xsdt;
75 	struct acpi_table_header *hdr;
76 
77 	rsdp_phys = acpi_find_rsdp();
78 	if (!rsdp_phys) {
79 		printk(KERN_ERR "ACPI 2.0 RSDP not found, default to \"dig\"\n");
80 		return "dig";
81 	}
82 
83 	rsdp = (struct acpi20_table_rsdp *) __va(rsdp_phys);
84 	if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) {
85 		printk(KERN_ERR "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
86 		return "dig";
87 	}
88 
89 	xsdt = (struct acpi_table_xsdt *) __va(rsdp->xsdt_address);
90 	hdr = &xsdt->header;
91 	if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) {
92 		printk(KERN_ERR "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
93 		return "dig";
94 	}
95 
96 	if (!strcmp(hdr->oem_id, "HP")) {
97 		return "hp";
98 	}
99 	else if (!strcmp(hdr->oem_id, "SGI")) {
100 		return "sn2";
101 	}
102 
103 	return "dig";
104 #else
105 # if defined (CONFIG_IA64_HP_SIM)
106 	return "hpsim";
107 # elif defined (CONFIG_IA64_HP_ZX1)
108 	return "hp";
109 # elif defined (CONFIG_IA64_SGI_SN2)
110 	return "sn2";
111 # elif defined (CONFIG_IA64_DIG)
112 	return "dig";
113 # else
114 #	error Unknown platform.  Fix acpi.c.
115 # endif
116 #endif
117 }
118 
119 #ifdef CONFIG_ACPI
120 
121 struct acpi_vendor_descriptor {
122 	u8				guid_id;
123 	efi_guid_t			guid;
124 };
125 
126 struct acpi_vendor_info {
127 	struct acpi_vendor_descriptor	*descriptor;
128 	u8				*data;
129 	u32				length;
130 };
131 
132 acpi_status
acpi_vendor_resource_match(struct acpi_resource * resource,void * context)133 acpi_vendor_resource_match (struct acpi_resource *resource, void *context)
134 {
135 	struct acpi_vendor_info *info = (struct acpi_vendor_info *) context;
136 	struct acpi_resource_vendor *vendor;
137 	struct acpi_vendor_descriptor *descriptor;
138 	u32 length;
139 
140 	if (resource->id != ACPI_RSTYPE_VENDOR)
141 		return AE_OK;
142 
143 	vendor = (struct acpi_resource_vendor *) &resource->data;
144 	descriptor = (struct acpi_vendor_descriptor *) vendor->reserved;
145 	if (vendor->length <= sizeof(*info->descriptor) ||
146 	    descriptor->guid_id != info->descriptor->guid_id ||
147 	    efi_guidcmp(descriptor->guid, info->descriptor->guid))
148 		return AE_OK;
149 
150 	length = vendor->length - sizeof(struct acpi_vendor_descriptor);
151 	info->data = acpi_os_allocate(length);
152 	if (!info->data)
153 		return AE_NO_MEMORY;
154 
155 	memcpy(info->data, vendor->reserved + sizeof(struct acpi_vendor_descriptor), length);
156 	info->length = length;
157 	return AE_CTRL_TERMINATE;
158 }
159 
160 acpi_status
acpi_find_vendor_resource(acpi_handle obj,struct acpi_vendor_descriptor * id,u8 ** data,u32 * length)161 acpi_find_vendor_resource (acpi_handle obj, struct acpi_vendor_descriptor *id,
162 		u8 **data, u32 *length)
163 {
164 	struct acpi_vendor_info info;
165 
166 	info.descriptor = id;
167 	info.data = 0;
168 
169 	acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match, &info);
170 	if (!info.data)
171 		return AE_NOT_FOUND;
172 
173 	*data = info.data;
174 	*length = info.length;
175 	return AE_OK;
176 }
177 
178 struct acpi_vendor_descriptor hp_ccsr_descriptor = {
179 	.guid_id = 2,
180 	.guid    = EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad)
181 };
182 
183 acpi_status
acpi_hp_csr_space(acpi_handle obj,u64 * csr_base,u64 * csr_length)184 acpi_hp_csr_space (acpi_handle obj, u64 *csr_base, u64 *csr_length)
185 {
186 	acpi_status status;
187 	u8 *data;
188 	u32 length;
189 
190 	status = acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length);
191 
192 	if (ACPI_FAILURE(status) || length != 16)
193 		return AE_NOT_FOUND;
194 
195 	memcpy(csr_base, data, sizeof(*csr_base));
196 	memcpy(csr_length, data + 8, sizeof(*csr_length));
197 	acpi_os_free(data);
198 
199 	return AE_OK;
200 }
201 
202 #endif /* CONFIG_ACPI */
203 
204 #ifdef CONFIG_ACPI_BOOT
205 
206 #define ACPI_MAX_PLATFORM_INTERRUPTS	256
207 
208 /* Array to record platform interrupt vectors for generic interrupt routing. */
209 int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
210 	[0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
211 };
212 
213 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
214 
215 /*
216  * Interrupt routing API for device drivers.  Provides interrupt vector for
217  * a generic platform event.  Currently only CPEI is implemented.
218  */
219 int
acpi_request_vector(u32 int_type)220 acpi_request_vector (u32 int_type)
221 {
222 	int vector = -1;
223 
224 	if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
225 		/* corrected platform error interrupt */
226 		vector = platform_intr_list[int_type];
227 	} else
228 		printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n");
229 	return vector;
230 }
231 
232 char *
__acpi_map_table(unsigned long phys_addr,unsigned long size)233 __acpi_map_table (unsigned long phys_addr, unsigned long size)
234 {
235 	return __va(phys_addr);
236 }
237 
238 /* --------------------------------------------------------------------------
239                             Boot-time Table Parsing
240    -------------------------------------------------------------------------- */
241 
242 static int			total_cpus __initdata;
243 static int			available_cpus __initdata;
244 struct acpi_table_madt *	acpi_madt __initdata;
245 static u8			has_8259;
246 
247 
248 static int __init
acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header)249 acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header)
250 {
251 	struct acpi_table_lapic_addr_ovr *lapic;
252 
253 	lapic = (struct acpi_table_lapic_addr_ovr *) header;
254 	if (!lapic)
255 		return -EINVAL;
256 
257 	acpi_table_print_madt_entry(header);
258 
259 	if (lapic->address) {
260 		iounmap((void *) ipi_base_addr);
261 		ipi_base_addr = (unsigned long) ioremap(lapic->address, 0);
262 	}
263 	return 0;
264 }
265 
266 
267 static int __init
acpi_parse_lsapic(acpi_table_entry_header * header)268 acpi_parse_lsapic (acpi_table_entry_header *header)
269 {
270 	struct acpi_table_lsapic *lsapic;
271 
272 	lsapic = (struct acpi_table_lsapic *) header;
273 	if (!lsapic)
274 		return -EINVAL;
275 
276 	acpi_table_print_madt_entry(header);
277 
278 	printk(KERN_INFO "CPU %d (0x%04x)", total_cpus, (lsapic->id << 8) | lsapic->eid);
279 
280 	if (!lsapic->flags.enabled)
281 		printk(" disabled");
282 	else if (available_cpus >= NR_CPUS)
283 		printk(" ignored (increase NR_CPUS)");
284 	else {
285 		printk(" enabled");
286 #ifdef CONFIG_SMP
287 		smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid;
288 		if (hard_smp_processor_id()
289 		    == (unsigned int) smp_boot_data.cpu_phys_id[available_cpus])
290 			printk(" (BSP)");
291 #endif
292 		++available_cpus;
293 	}
294 
295 	printk("\n");
296 
297 	total_cpus++;
298 	return 0;
299 }
300 
301 
302 static int __init
acpi_parse_lapic_nmi(acpi_table_entry_header * header)303 acpi_parse_lapic_nmi (acpi_table_entry_header *header)
304 {
305 	struct acpi_table_lapic_nmi *lacpi_nmi;
306 
307 	lacpi_nmi = (struct acpi_table_lapic_nmi*) header;
308 	if (!lacpi_nmi)
309 		return -EINVAL;
310 
311 	acpi_table_print_madt_entry(header);
312 
313 	/* TBD: Support lapic_nmi entries */
314 	return 0;
315 }
316 
317 
318 static int __init
acpi_parse_iosapic(acpi_table_entry_header * header)319 acpi_parse_iosapic (acpi_table_entry_header *header)
320 {
321 	struct acpi_table_iosapic *iosapic;
322 
323 	iosapic = (struct acpi_table_iosapic *) header;
324 	if (!iosapic)
325 		return -EINVAL;
326 
327 	acpi_table_print_madt_entry(header);
328 
329 	if (iosapic_init)
330 		iosapic_init(iosapic->address, iosapic->global_irq_base);
331 
332 	return 0;
333 }
334 
335 
336 static int __init
acpi_parse_plat_int_src(acpi_table_entry_header * header)337 acpi_parse_plat_int_src (acpi_table_entry_header *header)
338 {
339 	struct acpi_table_plat_int_src *plintsrc;
340 	int vector;
341 
342 	plintsrc = (struct acpi_table_plat_int_src *) header;
343 	if (!plintsrc)
344 		return -EINVAL;
345 
346 	acpi_table_print_madt_entry(header);
347 
348 	if (!iosapic_register_platform_intr) {
349 		printk(KERN_WARNING PREFIX "No ACPI platform interrupt support\n");
350 		return -ENODEV;
351 	}
352 
353 	/*
354 	 * Get vector assignment for this interrupt, set attributes,
355 	 * and program the IOSAPIC routing table.
356 	 */
357 	vector = iosapic_register_platform_intr(plintsrc->type,
358 						plintsrc->global_irq,
359 						plintsrc->iosapic_vector,
360 						plintsrc->eid,
361 						plintsrc->id,
362 						(plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
363 						(plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
364 
365 	platform_intr_list[plintsrc->type] = vector;
366 	return 0;
367 }
368 
369 
370 static int __init
acpi_parse_int_src_ovr(acpi_table_entry_header * header)371 acpi_parse_int_src_ovr (acpi_table_entry_header *header)
372 {
373 	struct acpi_table_int_src_ovr *p;
374 
375 	p = (struct acpi_table_int_src_ovr *) header;
376 	if (!p)
377 		return -EINVAL;
378 
379 	acpi_table_print_madt_entry(header);
380 
381 	/* Ignore if the platform doesn't support overrides */
382 	if (!iosapic_override_isa_irq)
383 		return 0;
384 
385 	iosapic_override_isa_irq(p->bus_irq, p->global_irq,
386 				 (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
387 				 (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
388 	return 0;
389 }
390 
391 
392 static int __init
acpi_parse_nmi_src(acpi_table_entry_header * header)393 acpi_parse_nmi_src (acpi_table_entry_header *header)
394 {
395 	struct acpi_table_nmi_src *nmi_src;
396 
397 	nmi_src = (struct acpi_table_nmi_src*) header;
398 	if (!nmi_src)
399 		return -EINVAL;
400 
401 	acpi_table_print_madt_entry(header);
402 
403 	/* TBD: Support nimsrc entries */
404 	return 0;
405 }
406 
407 
408 static int __init
acpi_parse_madt(unsigned long phys_addr,unsigned long size)409 acpi_parse_madt (unsigned long phys_addr, unsigned long size)
410 {
411 	if (!phys_addr || !size)
412 		return -EINVAL;
413 
414 	acpi_madt = (struct acpi_table_madt *) __va(phys_addr);
415 
416 	/* remember the value for reference after free_initmem() */
417 #ifdef CONFIG_ITANIUM
418 	has_8259 = 1; /* Firmware on old Itanium systems is broken */
419 #else
420 	has_8259 = acpi_madt->flags.pcat_compat;
421 #endif
422 	if (iosapic_system_init)
423 		iosapic_system_init(has_8259);
424 
425 	/* Get base address of IPI Message Block */
426 
427 	if (acpi_madt->lapic_address)
428 		ipi_base_addr = (unsigned long) ioremap(acpi_madt->lapic_address, 0);
429 
430 	printk(KERN_INFO PREFIX "Local APIC address 0x%lx\n", ipi_base_addr);
431 	return 0;
432 }
433 
434 
435 #ifdef CONFIG_ACPI_NUMA
436 
437 #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
438 
439 static int __initdata srat_num_cpus;			/* number of cpus */
440 static u32 __initdata pxm_flag[PXM_FLAG_LEN];
441 #define pxm_bit_set(bit)	(set_bit(bit,(void *)pxm_flag))
442 #define pxm_bit_test(bit)	(test_bit(bit,(void *)pxm_flag))
443 /* maps to convert between proximity domain and logical node ID */
444 int __initdata pxm_to_nid_map[MAX_PXM_DOMAINS];
445 int __initdata nid_to_pxm_map[NR_NODES];
446 struct acpi_table_slit __initdata *slit_table;
447 
448 /*
449  * ACPI 2.0 SLIT (System Locality Information Table)
450  * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
451  */
452 void __init
acpi_numa_slit_init(struct acpi_table_slit * slit)453 acpi_numa_slit_init (struct acpi_table_slit *slit)
454 {
455 	u32 len;
456 
457 	len = sizeof(struct acpi_table_header) + 8
458 		+ slit->localities * slit->localities;
459 	if (slit->header.length != len) {
460 		printk("KERN_INFO ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
461 		      len, slit->header.length);
462 		memset(numa_slit, 10, sizeof(numa_slit));
463 		return;
464 	}
465 	slit_table = slit;
466 }
467 
468 void __init
acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity * pa)469 acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa)
470 {
471 	/* record this node in proximity bitmap */
472 	pxm_bit_set(pa->proximity_domain);
473 
474 	node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid);
475 	/* nid should be overridden as logical node id later */
476 	node_cpuid[srat_num_cpus].nid = pa->proximity_domain;
477 	srat_num_cpus++;
478 }
479 
480 void __init
acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity * ma)481 acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma)
482 {
483 	unsigned long paddr, size, hole_size, min_hole_size;
484 	u8 pxm;
485 	struct node_memblk_s *p, *q, *pend;
486 
487 	pxm = ma->proximity_domain;
488 
489 	/* fill node memory chunk structure */
490 	paddr = ma->base_addr_hi;
491 	paddr = (paddr << 32) | ma->base_addr_lo;
492 	size = ma->length_hi;
493 	size = (size << 32) | ma->length_lo;
494 
495 	if (num_memblks >= NR_MEMBLKS) {
496 		printk(KERN_ERR "Too many mem chunks in SRAT. Ignoring %ld MBytes at %lx\n",
497 			size/(1024*1024), paddr);
498 		return;
499 	}
500 
501 	/* Ignore disabled entries */
502 	if (!ma->flags.enabled)
503 		return;
504 
505 	/*
506 	 * When the chunk is not the first one in the node, check distance
507 	 * from the other chunks. When the hole is too huge ignore the chunk.
508 	 * This restriction should be removed when multiple chunks per node
509 	 * is supported.
510 	 */
511 	pend = &node_memblk[num_memblks];
512 	min_hole_size = 0;
513 	for (p = &node_memblk[0]; p < pend; p++) {
514 		if (p->nid != pxm)
515 			continue;
516 		if (p->start_paddr < paddr)
517 			hole_size = paddr - (p->start_paddr + p->size);
518 		else
519 			hole_size = p->start_paddr - (paddr + size);
520 
521 		if (!min_hole_size || hole_size < min_hole_size)
522 			min_hole_size = hole_size;
523 	}
524 
525 #if 0	/* test */
526 	if (min_hole_size) {
527 		if (min_hole_size > size) {
528 			printk(KERN_ERR "Too huge memory hole. Ignoring %ld MBytes at %lx\n",
529 				size/(1024*1024), paddr);
530 			return;
531 		}
532 	}
533 #endif
534 
535 	/* record this node in proximity bitmap */
536 	pxm_bit_set(pxm);
537 
538 	/* Insertion sort based on base address */
539 	pend = &node_memblk[num_memblks];
540 	for (p = &node_memblk[0]; p < pend; p++) {
541 		if (paddr < p->start_paddr)
542 			break;
543 	}
544 	if (p < pend) {
545 		for (q = pend; q >= p; q--)
546 			*(q + 1) = *q;
547 	}
548 	p->start_paddr = paddr;
549 	p->size = size;
550 	p->nid = pxm;
551 	num_memblks++;
552 }
553 
554 void __init
acpi_numa_arch_fixup(void)555 acpi_numa_arch_fixup(void)
556 {
557 	int i, j, node_from, node_to;
558 
559 	if (srat_num_cpus == 0) {
560 		node_cpuid[0].phys_id = hard_smp_processor_id();
561 		return;
562 	}
563 
564 	/* calculate total number of nodes in system from PXM bitmap */
565 	numnodes = 0;		/* init total nodes in system */
566 
567 	memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
568 	memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
569 	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
570 		if (pxm_bit_test(i)) {
571 			pxm_to_nid_map[i] = numnodes;
572 			nid_to_pxm_map[numnodes++] = i;
573 		}
574 	}
575 
576 	/* set logical node id in memory chunk structure */
577 	for (i = 0; i < num_memblks; i++)
578 		node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];
579 
580 	/* assign memory bank numbers for each chunk on each node */
581 	for (i = 0; i < numnodes; i++) {
582 		int bank;
583 
584 		bank = 0;
585 		for (j = 0; j < num_memblks; j++)
586 			if (node_memblk[j].nid == i)
587 				node_memblk[j].bank = bank++;
588 	}
589 
590 	/* set logical node id in cpu structure */
591 	for (i = 0; i < srat_num_cpus; i++)
592 		node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
593 
594 	printk(KERN_INFO "Number of logical nodes in system = %d\n", numnodes);
595 	printk(KERN_INFO "Number of memory chunks in system = %d\n", num_memblks);
596 
597 	if (!slit_table) return;
598 	memset(numa_slit, -1, sizeof(numa_slit));
599 	for (i=0; i<slit_table->localities; i++) {
600 		if (!pxm_bit_test(i))
601 			continue;
602 		node_from = pxm_to_nid_map[i];
603 		for (j=0; j<slit_table->localities; j++) {
604 			if (!pxm_bit_test(j))
605 				continue;
606 			node_to = pxm_to_nid_map[j];
607 			node_distance(node_from, node_to) =
608 				slit_table->entry[i*slit_table->localities + j];
609 		}
610 	}
611 
612 #ifdef SLIT_DEBUG
613 	printk(KERN_DEBUG "ACPI 2.0 SLIT locality table:\n");
614 	for (i = 0; i < numnodes; i++) {
615 		for (j = 0; j < numnodes; j++)
616 			printk(KERN_DEBUG "%03d ", node_distance(i,j));
617 		printk("\n");
618 	}
619 #endif
620 }
621 #endif /* CONFIG_ACPI_NUMA */
622 
623 static int __init
acpi_parse_fadt(unsigned long phys_addr,unsigned long size)624 acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
625 {
626 	struct acpi_table_header *fadt_header;
627 	struct fadt_descriptor_rev2 *fadt;
628 	u32 sci_irq;
629 
630 	if (!phys_addr || !size)
631 		return -EINVAL;
632 
633 	fadt_header = (struct acpi_table_header *) __va(phys_addr);
634 	if (fadt_header->revision != 3)
635 		return -ENODEV;		/* Only deal with ACPI 2.0 FADT */
636 
637 	fadt = (struct fadt_descriptor_rev2 *) fadt_header;
638 
639 	if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
640 		acpi_kbd_controller_present = 0;
641 
642 	sci_irq = fadt->sci_int;
643 
644 	if (has_8259 && sci_irq < 16)
645 		return 0;	/* legacy, no setup required */
646 
647 	if (!iosapic_register_intr)
648 		return -ENODEV;
649 
650 	iosapic_register_intr(sci_irq, IOSAPIC_POL_LOW, IOSAPIC_LEVEL);
651 	return 0;
652 }
653 
654 
655 unsigned long __init
acpi_find_rsdp(void)656 acpi_find_rsdp (void)
657 {
658 	unsigned long rsdp_phys = 0;
659 
660 	if (efi.acpi20)
661 		rsdp_phys = __pa(efi.acpi20);
662 	else if (efi.acpi)
663 		printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n");
664 	return rsdp_phys;
665 }
666 
667 
668 int __init
acpi_boot_init(void)669 acpi_boot_init (void)
670 {
671 
672 	/*
673 	 * MADT
674 	 * ----
675 	 * Parse the Multiple APIC Description Table (MADT), if exists.
676 	 * Note that this table provides platform SMP configuration
677 	 * information -- the successor to MPS tables.
678 	 */
679 
680 	if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) {
681 		printk(KERN_ERR PREFIX "Can't find MADT\n");
682 		goto skip_madt;
683 	}
684 
685 	/* Local APIC */
686 
687 	if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr) < 0)
688 		printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
689 
690 	if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic) < 1)
691 		printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n");
692 
693 	if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi) < 0)
694 		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
695 
696 	/* I/O APIC */
697 
698 	if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic) < 1)
699 		printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n");
700 
701 	/* System-Level Interrupt Routing */
702 
703 	if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src) < 0)
704 		printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n");
705 
706 	if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr) < 0)
707 		printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
708 
709 	if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src) < 0)
710 		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
711   skip_madt:
712 
713 	/*
714 	 * FADT says whether a legacy keyboard controller is present.
715 	 * The FADT also contains an SCI_INT line, by which the system
716 	 * gets interrupts such as power and sleep buttons.  If it's not
717 	 * on a Legacy interrupt, it needs to be setup.
718 	 */
719 	if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1)
720 		printk(KERN_ERR PREFIX "Can't find FADT\n");
721 
722 #ifdef CONFIG_SMP
723 	if (available_cpus == 0) {
724 		printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
725 		printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
726 		smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id();
727 		available_cpus = 1; /* We've got at least one of these, no? */
728 	}
729 	smp_boot_data.cpu_count = available_cpus;
730 
731 	smp_build_cpu_map();
732 # ifdef CONFIG_NUMA
733 	/* If the platform did not have an SRAT table, initialize the
734 	 * node_cpuid table from the smp_boot_data array. All cpus
735 	 * will be on node 0.
736 	 */
737 	if (srat_num_cpus == 0) {
738 		int cpu, i=1;
739 		for (cpu=0; cpu<smp_boot_data.cpu_count; cpu++)
740 			if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id())
741 				node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu];
742 	}
743 	build_cpu_to_node_map();
744 # endif
745 
746 #endif
747 	/* Make boot-up look pretty */
748 	printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
749 	return 0;
750 }
751 
752 /*
753  * PCI Interrupt Routing
754  */
755 
756 #ifdef CONFIG_PCI
757 int __init
acpi_get_prt(struct pci_vector_struct ** vectors,int * count)758 acpi_get_prt (struct pci_vector_struct **vectors, int *count)
759 {
760 	struct pci_vector_struct *vector;
761 	struct list_head *node;
762 	struct acpi_prt_entry *entry;
763 	int i = 0;
764 
765 	if (!vectors || !count)
766 		return -EINVAL;
767 
768 	*vectors = NULL;
769 	*count = 0;
770 
771 	if (acpi_prt.count < 0) {
772 		printk(KERN_ERR PREFIX "No PCI interrupt routing entries\n");
773 		return -ENODEV;
774 	}
775 
776 	/* Allocate vectors */
777 
778 	*vectors = kmalloc(sizeof(struct pci_vector_struct) * acpi_prt.count, GFP_KERNEL);
779 	if (!(*vectors))
780 		return -ENOMEM;
781 
782 	/* Convert PRT entries to IOSAPIC PCI vectors */
783 
784 	vector = *vectors;
785 
786 	list_for_each(node, &acpi_prt.entries) {
787 		entry = (struct acpi_prt_entry *)node;
788 		vector[i].segment = entry->id.segment;
789 		vector[i].bus    = entry->id.bus;
790 		vector[i].pci_id = ((u32) entry->id.device << 16) | 0xffff;
791 		vector[i].pin    = entry->pin;
792 		vector[i].irq    = entry->link.index;
793 		i++;
794 	}
795 	*count = acpi_prt.count;
796 	return 0;
797 }
798 #endif /* CONFIG_PCI */
799 
800 /* Assume IA64 always use I/O SAPIC */
801 
802 int __init
acpi_get_interrupt_model(int * type)803 acpi_get_interrupt_model (int *type)
804 {
805         if (!type)
806                 return -EINVAL;
807 
808 	*type = ACPI_IRQ_MODEL_IOSAPIC;
809         return 0;
810 }
811 
812 int
acpi_irq_to_vector(u32 irq)813 acpi_irq_to_vector (u32 irq)
814 {
815 	if (has_8259 && irq < 16)
816 		return isa_irq_to_vector(irq);
817 
818 	return gsi_to_vector(irq);
819 }
820 
821 int
acpi_register_irq(u32 gsi,u32 polarity,u32 trigger)822 acpi_register_irq (u32 gsi, u32 polarity, u32 trigger)
823 {
824 	int vector = 0;
825 
826 	if (has_8259 && gsi < 16)
827 		return isa_irq_to_vector(gsi);
828 
829 	if (!iosapic_register_intr)
830 		return 0;
831 
832 	/* Turn it on */
833 	vector = iosapic_register_intr(gsi,
834 		       	(polarity == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
835 			(trigger == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
836 	return vector;
837 }
838 
839 #endif /* CONFIG_ACPI_BOOT */
840