1 /*
2  *  linux/arch/i386/kernel/setup.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *
6  *  Enhanced CPU type detection by Mike Jagdis, Patrick St. Jean
7  *  and Martin Mares, November 1997.
8  *
9  *  Force Cyrix 6x86(MX) and M II processors to report MTRR capability
10  *  and Cyrix "coma bug" recognition by
11  *      Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu> February 1999.
12  *
13  *  Force Centaur C6 processors to report MTRR capability.
14  *      Bart Hartgers <bart@etpmod.phys.tue.nl>, May 1999.
15  *
16  *  Intel Mobile Pentium II detection fix. Sean Gilley, June 1999.
17  *
18  *  IDT Winchip tweaks, misc clean ups.
19  *	Dave Jones <davej@suse.de>, August 1999
20  *
21  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
22  *
23  *  Better detection of Centaur/IDT WinChip models.
24  *      Bart Hartgers <bart@etpmod.phys.tue.nl>, August 1999.
25  *
26  *  Memory region support
27  *	David Parsons <orc@pell.chi.il.us>, July-August 1999
28  *
29  *  Cleaned up cache-detection code
30  *	Dave Jones <davej@suse.de>, October 1999
31  *
32  *	Added proper L2 cache detection for Coppermine
33  *	Dragan Stancevic <visitor@valinux.com>, October 1999
34  *
35  *  Added the original array for capability flags but forgot to credit
36  *  myself :) (~1998) Fixed/cleaned up some cpu_model_info and other stuff
37  *  	Jauder Ho <jauderho@carumba.com>, January 2000
38  *
39  *  Detection for Celeron coppermine, identify_cpu() overhauled,
40  *  and a few other clean ups.
41  *  Dave Jones <davej@suse.de>, April 2000
42  *
43  *  Pentium III FXSR, SSE support
44  *  General FPU state handling cleanups
45  *	Gareth Hughes <gareth@valinux.com>, May 2000
46  *
47  *  Added proper Cascades CPU and L2 cache detection for Cascades
48  *  and 8-way type cache happy bunch from Intel:^)
49  *  Dragan Stancevic <visitor@valinux.com>, May 2000
50  *
51  *  Forward port AMD Duron errata T13 from 2.2.17pre
52  *  Dave Jones <davej@suse.de>, August 2000
53  *
54  *  Forward port lots of fixes/improvements from 2.2.18pre
55  *  Cyrix III, Pentium IV support.
56  *  Dave Jones <davej@suse.de>, October 2000
57  *
58  *  Massive cleanup of CPU detection and bug handling;
59  *  Transmeta CPU detection,
60  *  H. Peter Anvin <hpa@zytor.com>, November 2000
61  *
62  *  Added E820 sanitization routine (removes overlapping memory regions);
63  *  Brian Moyle <bmoyle@mvista.com>, February 2001
64  *
65  *  VIA C3 Support.
66  *  Dave Jones <davej@suse.de>, March 2001
67  *
68  *  AMD Athlon/Duron/Thunderbird bluesmoke support.
69  *  Dave Jones <davej@suse.de>, April 2001.
70  *
71  *  CacheSize bug workaround updates for AMD, Intel & VIA Cyrix.
72  *  Dave Jones <davej@suse.de>, September, October 2001.
73  *
74  *  Provisions for empty E820 memory regions (reported by certain BIOSes).
75  *  Alex Achenbach <xela@slit.de>, December 2002.
76  *
77  */
78 
79 /*
80  * This file handles the architecture-dependent parts of initialization
81  */
82 
83 #include <linux/errno.h>
84 #include <linux/sched.h>
85 #include <linux/kernel.h>
86 #include <linux/mm.h>
87 #include <linux/stddef.h>
88 #include <linux/unistd.h>
89 #include <linux/ptrace.h>
90 #include <linux/slab.h>
91 #include <linux/user.h>
92 #include <linux/a.out.h>
93 #include <linux/tty.h>
94 #include <linux/ioport.h>
95 #include <linux/delay.h>
96 #include <linux/config.h>
97 #include <linux/init.h>
98 #include <linux/acpi.h>
99 #include <linux/apm_bios.h>
100 #ifdef CONFIG_BLK_DEV_RAM
101 #include <linux/blk.h>
102 #endif
103 #include <linux/highmem.h>
104 #include <linux/bootmem.h>
105 #include <linux/pci.h>
106 #include <linux/pci_ids.h>
107 #include <linux/seq_file.h>
108 #include <asm/processor.h>
109 #include <linux/console.h>
110 #include <linux/module.h>
111 #include <asm/mtrr.h>
112 #include <asm/uaccess.h>
113 #include <asm/system.h>
114 #include <asm/io.h>
115 #include <asm/smp.h>
116 #include <asm/cobalt.h>
117 #include <asm/msr.h>
118 #include <asm/desc.h>
119 #include <asm/e820.h>
120 #include <asm/dma.h>
121 #include <asm/mpspec.h>
122 #include <asm/mmu_context.h>
123 #include <asm/io_apic.h>
124 #include <asm/edd.h>
125 /*
126  * Machine setup..
127  */
128 
129 char ignore_irq13;		/* set if exception 16 works */
130 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
131 
132 unsigned long mmu_cr4_features;
133 EXPORT_SYMBOL(mmu_cr4_features);
134 
135 /*
136  * Bus types ..
137  */
138 #ifdef CONFIG_EISA
139 int EISA_bus;
140 #endif
141 int MCA_bus;
142 
143 /* for MCA, but anyone else can use it if they want */
144 unsigned int machine_id;
145 unsigned int machine_submodel_id;
146 unsigned int BIOS_revision;
147 unsigned int mca_pentium_flag;
148 
149 /* For PCI or other memory-mapped resources */
150 unsigned long pci_mem_start = 0x10000000;
151 
152 /* user-defined highmem size */
153 static unsigned int highmem_pages __initdata = -1;
154 
155 /*
156  * Setup options
157  */
158 struct drive_info_struct { char dummy[32]; } drive_info;
159 struct screen_info screen_info;
160 struct apm_info apm_info;
161 struct sys_desc_table_struct {
162 	unsigned short length;
163 	unsigned char table[0];
164 };
165 
166 struct e820map e820;
167 
168 unsigned char aux_device_present;
169 
170 extern void mcheck_init(struct cpuinfo_x86 *c);
171 extern void dmi_scan_machine(void);
172 extern int root_mountflags;
173 extern char _text, _etext, _edata, _end;
174 
175 static int have_cpuid_p(void) __init;
176 
177 static int disable_x86_serial_nr __initdata = 1;
178 static u32 disabled_x86_caps[NCAPINTS] __initdata = { 0 };
179 
180 #ifdef	CONFIG_ACPI_INTERPRETER
181 	int acpi_disabled = 0;
182 #else
183 	int acpi_disabled = 1;
184 #endif
185 EXPORT_SYMBOL(acpi_disabled);
186 
187 #ifdef	CONFIG_ACPI_BOOT
188 extern	int __initdata acpi_ht;
189 int acpi_force __initdata = 0;
190 extern acpi_interrupt_flags	acpi_sci_flags;
191 #endif
192 
193 extern int blk_nohighio;
194 
195 /*
196  * This is set up by the setup-routine at boot-time
197  */
198 #define PARAM	((unsigned char *)empty_zero_page)
199 #define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
200 #define EXT_MEM_K (*(unsigned short *) (PARAM+2))
201 #define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
202 #define E820_MAP_NR (*(char*) (PARAM+E820NR))
203 #define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
204 #define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
205 #define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
206 #define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
207 #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
208 #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
209 #define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
210 #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
211 #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
212 #define KERNEL_START (*(unsigned long *) (PARAM+0x214))
213 #define INITRD_START (*(unsigned long *) (PARAM+0x218))
214 #define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
215 #define DISK80_SIGNATURE_BUFFER (*(unsigned int*) (PARAM+DISK80_SIG_BUFFER))
216 #define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
217 #define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
218 #define COMMAND_LINE ((char *) (PARAM+2048))
219 #define COMMAND_LINE_SIZE 256
220 
221 #define RAMDISK_IMAGE_START_MASK  	0x07FF
222 #define RAMDISK_PROMPT_FLAG		0x8000
223 #define RAMDISK_LOAD_FLAG		0x4000
224 
225 #ifdef	CONFIG_VISWS
226 char visws_board_type = -1;
227 char visws_board_rev = -1;
228 
229 #define	PIIX_PM_START		0x0F80
230 
231 #define	SIO_GPIO_START		0x0FC0
232 
233 #define	SIO_PM_START		0x0FC8
234 
235 #define	PMBASE			PIIX_PM_START
236 #define	GPIREG0			(PMBASE+0x30)
237 #define	GPIREG(x)		(GPIREG0+((x)/8))
238 #define	PIIX_GPI_BD_ID1		18
239 #define	PIIX_GPI_BD_REG		GPIREG(PIIX_GPI_BD_ID1)
240 
241 #define	PIIX_GPI_BD_SHIFT	(PIIX_GPI_BD_ID1 % 8)
242 
243 #define	SIO_INDEX	0x2e
244 #define	SIO_DATA	0x2f
245 
246 #define	SIO_DEV_SEL	0x7
247 #define	SIO_DEV_ENB	0x30
248 #define	SIO_DEV_MSB	0x60
249 #define	SIO_DEV_LSB	0x61
250 
251 #define	SIO_GP_DEV	0x7
252 
253 #define	SIO_GP_BASE	SIO_GPIO_START
254 #define	SIO_GP_MSB	(SIO_GP_BASE>>8)
255 #define	SIO_GP_LSB	(SIO_GP_BASE&0xff)
256 
257 #define	SIO_GP_DATA1	(SIO_GP_BASE+0)
258 
259 #define	SIO_PM_DEV	0x8
260 
261 #define	SIO_PM_BASE	SIO_PM_START
262 #define	SIO_PM_MSB	(SIO_PM_BASE>>8)
263 #define	SIO_PM_LSB	(SIO_PM_BASE&0xff)
264 #define	SIO_PM_INDEX	(SIO_PM_BASE+0)
265 #define	SIO_PM_DATA	(SIO_PM_BASE+1)
266 
267 #define	SIO_PM_FER2	0x1
268 
269 #define	SIO_PM_GP_EN	0x80
270 
visws_get_board_type_and_rev(void)271 static void __init visws_get_board_type_and_rev(void)
272 {
273 	int raw;
274 
275 	visws_board_type = (char)(inb_p(PIIX_GPI_BD_REG) & PIIX_GPI_BD_REG)
276 							 >> PIIX_GPI_BD_SHIFT;
277 /*
278  * Get Board rev.
279  * First, we have to initialize the 307 part to allow us access
280  * to the GPIO registers.  Let's map them at 0x0fc0 which is right
281  * after the PIIX4 PM section.
282  */
283 	outb_p(SIO_DEV_SEL, SIO_INDEX);
284 	outb_p(SIO_GP_DEV, SIO_DATA);	/* Talk to GPIO regs. */
285 
286 	outb_p(SIO_DEV_MSB, SIO_INDEX);
287 	outb_p(SIO_GP_MSB, SIO_DATA);	/* MSB of GPIO base address */
288 
289 	outb_p(SIO_DEV_LSB, SIO_INDEX);
290 	outb_p(SIO_GP_LSB, SIO_DATA);	/* LSB of GPIO base address */
291 
292 	outb_p(SIO_DEV_ENB, SIO_INDEX);
293 	outb_p(1, SIO_DATA);		/* Enable GPIO registers. */
294 
295 /*
296  * Now, we have to map the power management section to write
297  * a bit which enables access to the GPIO registers.
298  * What lunatic came up with this shit?
299  */
300 	outb_p(SIO_DEV_SEL, SIO_INDEX);
301 	outb_p(SIO_PM_DEV, SIO_DATA);	/* Talk to GPIO regs. */
302 
303 	outb_p(SIO_DEV_MSB, SIO_INDEX);
304 	outb_p(SIO_PM_MSB, SIO_DATA);	/* MSB of PM base address */
305 
306 	outb_p(SIO_DEV_LSB, SIO_INDEX);
307 	outb_p(SIO_PM_LSB, SIO_DATA);	/* LSB of PM base address */
308 
309 	outb_p(SIO_DEV_ENB, SIO_INDEX);
310 	outb_p(1, SIO_DATA);		/* Enable PM registers. */
311 
312 /*
313  * Now, write the PM register which enables the GPIO registers.
314  */
315 	outb_p(SIO_PM_FER2, SIO_PM_INDEX);
316 	outb_p(SIO_PM_GP_EN, SIO_PM_DATA);
317 
318 /*
319  * Now, initialize the GPIO registers.
320  * We want them all to be inputs which is the
321  * power on default, so let's leave them alone.
322  * So, let's just read the board rev!
323  */
324 	raw = inb_p(SIO_GP_DATA1);
325 	raw &= 0x7f;	/* 7 bits of valid board revision ID. */
326 
327 	if (visws_board_type == VISWS_320) {
328 		if (raw < 0x6) {
329 			visws_board_rev = 4;
330 		} else if (raw < 0xc) {
331 			visws_board_rev = 5;
332 		} else {
333 			visws_board_rev = 6;
334 
335 		}
336 	} else if (visws_board_type == VISWS_540) {
337 			visws_board_rev = 2;
338 		} else {
339 			visws_board_rev = raw;
340 		}
341 
342 		printk(KERN_INFO "Silicon Graphics %s (rev %d)\n",
343 			visws_board_type == VISWS_320 ? "320" :
344 			(visws_board_type == VISWS_540 ? "540" :
345 					"unknown"),
346 					visws_board_rev);
347 	}
348 #endif
349 
350 
351 static char command_line[COMMAND_LINE_SIZE];
352        char saved_command_line[COMMAND_LINE_SIZE];
353 
354 struct resource standard_io_resources[] = {
355 	{ "dma1", 0x00, 0x1f, IORESOURCE_BUSY },
356 	{ "pic1", 0x20, 0x3f, IORESOURCE_BUSY },
357 	{ "timer0", 0x40, 0x43, IORESOURCE_BUSY },
358 	{ "timer1", 0x50, 0x53, IORESOURCE_BUSY },
359 	{ "keyboard", 0x60, 0x6f, IORESOURCE_BUSY },
360 	{ "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY },
361 	{ "pic2", 0xa0, 0xbf, IORESOURCE_BUSY },
362 	{ "dma2", 0xc0, 0xdf, IORESOURCE_BUSY },
363 	{ "fpu", 0xf0, 0xff, IORESOURCE_BUSY }
364 };
365 
366 #define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
367 
368 static struct resource code_resource = { "Kernel code", 0x100000, 0 };
369 static struct resource data_resource = { "Kernel data", 0, 0 };
370 static struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY };
371 
372 /* System ROM resources */
373 #define MAXROMS 6
374 static struct resource rom_resources[MAXROMS] = {
375 	{ "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
376 	{ "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_BUSY }
377 };
378 
379 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
380 
probe_roms(void)381 static void __init probe_roms(void)
382 {
383 	int roms = 1;
384 	unsigned long base;
385 	unsigned char *romstart;
386 
387 	request_resource(&iomem_resource, rom_resources+0);
388 
389 	/* Video ROM is standard at C000:0000 - C7FF:0000, check signature */
390 	for (base = 0xC0000; base < 0xE0000; base += 2048) {
391 		romstart = bus_to_virt(base);
392 		if (!romsignature(romstart))
393 			continue;
394 		request_resource(&iomem_resource, rom_resources + roms);
395 		roms++;
396 		break;
397 	}
398 
399 	/* Extension roms at C800:0000 - DFFF:0000 */
400 	for (base = 0xC8000; base < 0xE0000; base += 2048) {
401 		unsigned long length;
402 
403 		romstart = bus_to_virt(base);
404 		if (!romsignature(romstart))
405 			continue;
406 		length = romstart[2] * 512;
407 		if (length) {
408 			unsigned int i;
409 			unsigned char chksum;
410 
411 			chksum = 0;
412 			for (i = 0; i < length; i++)
413 				chksum += romstart[i];
414 
415 			/* Good checksum? */
416 			if (!chksum) {
417 				rom_resources[roms].start = base;
418 				rom_resources[roms].end = base + length - 1;
419 				rom_resources[roms].name = "Extension ROM";
420 				rom_resources[roms].flags = IORESOURCE_BUSY;
421 
422 				request_resource(&iomem_resource, rom_resources + roms);
423 				roms++;
424 				if (roms >= MAXROMS)
425 					return;
426 			}
427 		}
428 	}
429 
430 	/* Final check for motherboard extension rom at E000:0000 */
431 	base = 0xE0000;
432 	romstart = bus_to_virt(base);
433 
434 	if (romsignature(romstart)) {
435 		rom_resources[roms].start = base;
436 		rom_resources[roms].end = base + 65535;
437 		rom_resources[roms].name = "Extension ROM";
438 		rom_resources[roms].flags = IORESOURCE_BUSY;
439 
440 		request_resource(&iomem_resource, rom_resources + roms);
441 	}
442 }
443 
limit_regions(unsigned long long size)444 static void __init limit_regions (unsigned long long size)
445 {
446 	unsigned long long current_addr = 0;
447 	int i;
448 
449 	for (i = 0; i < e820.nr_map; i++) {
450 		if (e820.map[i].type == E820_RAM) {
451 			current_addr = e820.map[i].addr + e820.map[i].size;
452 			if (current_addr >= size) {
453 				e820.map[i].size -= current_addr-size;
454 				e820.nr_map = i + 1;
455 				return;
456 			}
457 		}
458 	}
459 }
add_memory_region(unsigned long long start,unsigned long long size,int type)460 static void __init add_memory_region(unsigned long long start,
461                                   unsigned long long size, int type)
462 {
463 	int x = e820.nr_map;
464 
465 	if (x == E820MAX) {
466 	    printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
467 	    return;
468 	}
469 
470 	e820.map[x].addr = start;
471 	e820.map[x].size = size;
472 	e820.map[x].type = type;
473 	e820.nr_map++;
474 } /* add_memory_region */
475 
476 #define E820_DEBUG	1
477 
print_memory_map(char * who)478 static void __init print_memory_map(char *who)
479 {
480 	int i;
481 
482 	for (i = 0; i < e820.nr_map; i++) {
483 		printk(" %s: %016Lx - %016Lx ", who,
484 			e820.map[i].addr,
485 			e820.map[i].addr + e820.map[i].size);
486 		switch (e820.map[i].type) {
487 		case E820_RAM:	printk("(usable)\n");
488 				break;
489 		case E820_RESERVED:
490 				printk("(reserved)\n");
491 				break;
492 		case E820_ACPI:
493 				printk("(ACPI data)\n");
494 				break;
495 		case E820_NVS:
496 				printk("(ACPI NVS)\n");
497 				break;
498 		default:	printk("type %lu\n", e820.map[i].type);
499 				break;
500 		}
501 	}
502 }
503 
504 /*
505  * Sanitize the BIOS e820 map.
506  *
507  * Some e820 responses include overlapping entries.  The following
508  * replaces the original e820 map with a new one, removing overlaps.
509  *
510  */
sanitize_e820_map(struct e820entry * biosmap,char * pnr_map)511 static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
512 {
513 	struct change_member {
514 		struct e820entry *pbios; /* pointer to original bios entry */
515 		unsigned long long addr; /* address for this change point */
516 	};
517 	struct change_member change_point_list[2*E820MAX];
518 	struct change_member *change_point[2*E820MAX];
519 	struct e820entry *overlap_list[E820MAX];
520 	struct e820entry new_bios[E820MAX];
521 	struct change_member *change_tmp;
522 	unsigned long current_type, last_type;
523 	unsigned long long last_addr;
524 	int chgidx, still_changing;
525 	int overlap_entries;
526 	int new_bios_entry;
527 	int old_nr, new_nr, chg_nr;
528 	int i;
529 
530 	/*
531 		Visually we're performing the following (1,2,3,4 = memory types)...
532 
533 		Sample memory map (w/overlaps):
534 		   ____22__________________
535 		   ______________________4_
536 		   ____1111________________
537 		   _44_____________________
538 		   11111111________________
539 		   ____________________33__
540 		   ___________44___________
541 		   __________33333_________
542 		   ______________22________
543 		   ___________________2222_
544 		   _________111111111______
545 		   _____________________11_
546 		   _________________4______
547 
548 		Sanitized equivalent (no overlap):
549 		   1_______________________
550 		   _44_____________________
551 		   ___1____________________
552 		   ____22__________________
553 		   ______11________________
554 		   _________1______________
555 		   __________3_____________
556 		   ___________44___________
557 		   _____________33_________
558 		   _______________2________
559 		   ________________1_______
560 		   _________________4______
561 		   ___________________2____
562 		   ____________________33__
563 		   ______________________4_
564 	*/
565 
566 	/* if there's only one memory region, don't bother */
567 	if (*pnr_map < 2)
568 		return -1;
569 
570 	old_nr = *pnr_map;
571 
572 	/* bail out if we find any unreasonable addresses in bios map */
573 	for (i=0; i<old_nr; i++)
574 		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
575 			return -1;
576 
577 	/* create pointers for initial change-point information (for sorting) */
578 	for (i=0; i < 2*old_nr; i++)
579 		change_point[i] = &change_point_list[i];
580 
581 	/* record all known change-points (starting and ending addresses),
582 	   omitting those that are for empty memory regions */
583 	chgidx = 0;
584 	for (i=0; i < old_nr; i++)	{
585 		if (biosmap[i].size != 0) {
586 			change_point[chgidx]->addr = biosmap[i].addr;
587 			change_point[chgidx++]->pbios = &biosmap[i];
588 			change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
589 			change_point[chgidx++]->pbios = &biosmap[i];
590 		}
591 	}
592 	chg_nr = chgidx;    	/* true number of change-points */
593 
594 	/* sort change-point list by memory addresses (low -> high) */
595 	still_changing = 1;
596 	while (still_changing)	{
597 		still_changing = 0;
598 		for (i=1; i < chg_nr; i++)  {
599 			/* if <current_addr> > <last_addr>, swap */
600 			/* or, if current=<start_addr> & last=<end_addr>, swap */
601 			if ((change_point[i]->addr < change_point[i-1]->addr) ||
602 				((change_point[i]->addr == change_point[i-1]->addr) &&
603 				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
604 				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
605 			   )
606 			{
607 				change_tmp = change_point[i];
608 				change_point[i] = change_point[i-1];
609 				change_point[i-1] = change_tmp;
610 				still_changing=1;
611 			}
612 		}
613 	}
614 
615 	/* create a new bios memory map, removing overlaps */
616 	overlap_entries=0;	 /* number of entries in the overlap table */
617 	new_bios_entry=0;	 /* index for creating new bios map entries */
618 	last_type = 0;		 /* start with undefined memory type */
619 	last_addr = 0;		 /* start with 0 as last starting address */
620 	/* loop through change-points, determining affect on the new bios map */
621 	for (chgidx=0; chgidx < chg_nr; chgidx++)
622 	{
623 		/* keep track of all overlapping bios entries */
624 		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
625 		{
626 			/* add map entry to overlap list (> 1 entry implies an overlap) */
627 			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
628 		}
629 		else
630 		{
631 			/* remove entry from list (order independent, so swap with last) */
632 			for (i=0; i<overlap_entries; i++)
633 			{
634 				if (overlap_list[i] == change_point[chgidx]->pbios)
635 					overlap_list[i] = overlap_list[overlap_entries-1];
636 			}
637 			overlap_entries--;
638 		}
639 		/* if there are overlapping entries, decide which "type" to use */
640 		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
641 		current_type = 0;
642 		for (i=0; i<overlap_entries; i++)
643 			if (overlap_list[i]->type > current_type)
644 				current_type = overlap_list[i]->type;
645 		/* continue building up new bios map based on this information */
646 		if (current_type != last_type)	{
647 			if (last_type != 0)	 {
648 				new_bios[new_bios_entry].size =
649 					change_point[chgidx]->addr - last_addr;
650 				/* move forward only if the new size was non-zero */
651 				if (new_bios[new_bios_entry].size != 0)
652 					if (++new_bios_entry >= E820MAX)
653 						break; 	/* no more space left for new bios entries */
654 			}
655 			if (current_type != 0)	{
656 				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
657 				new_bios[new_bios_entry].type = current_type;
658 				last_addr=change_point[chgidx]->addr;
659 			}
660 			last_type = current_type;
661 		}
662 	}
663 	new_nr = new_bios_entry;   /* retain count for new bios entries */
664 
665 	/* copy new bios mapping into original location */
666 	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
667 	*pnr_map = new_nr;
668 
669 	return 0;
670 }
671 
672 /*
673  * Copy the BIOS e820 map into a safe place.
674  *
675  * Sanity-check it while we're at it..
676  *
677  * If we're lucky and live on a modern system, the setup code
678  * will have given us a memory map that we can use to properly
679  * set up memory.  If we aren't, we'll fake a memory map.
680  *
681  * We check to see that the memory map contains at least 2 elements
682  * before we'll use it, because the detection code in setup.S may
683  * not be perfect and most every PC known to man has two memory
684  * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
685  * thinkpad 560x, for example, does not cooperate with the memory
686  * detection code.)
687  */
copy_e820_map(struct e820entry * biosmap,int nr_map)688 static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
689 {
690 	/* Only one memory region (or negative)? Ignore it */
691 	if (nr_map < 2)
692 		return -1;
693 
694 	do {
695 		unsigned long long start = biosmap->addr;
696 		unsigned long long size = biosmap->size;
697 		unsigned long long end = start + size;
698 		unsigned long type = biosmap->type;
699 
700 		/* Overflow in 64 bits? Ignore the memory map. */
701 		if (start > end)
702 			return -1;
703 
704 		/*
705 		 * Some BIOSes claim RAM in the 640k - 1M region.
706 		 * Not right. Fix it up.
707 		 */
708 		if (type == E820_RAM) {
709 			if (start < 0x100000ULL && end > 0xA0000ULL) {
710 				if (start < 0xA0000ULL)
711 					add_memory_region(start, 0xA0000ULL-start, type);
712 				if (end <= 0x100000ULL)
713 					continue;
714 				start = 0x100000ULL;
715 				size = end - start;
716 			}
717 		}
718 		add_memory_region(start, size, type);
719 	} while (biosmap++,--nr_map);
720 	return 0;
721 }
722 
723 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
724 unsigned char eddnr;
725 struct edd_info edd[EDDMAXNR];
726 unsigned int edd_disk80_sig;
727 /**
728  * copy_edd() - Copy the BIOS EDD information
729  *              from empty_zero_page into a safe place.
730  *
731  */
copy_edd(void)732 static inline void copy_edd(void)
733 {
734      eddnr = EDD_NR;
735      memcpy(edd, EDD_BUF, sizeof(edd));
736      edd_disk80_sig = DISK80_SIGNATURE_BUFFER;
737 }
738 #else
copy_edd(void)739 static inline void copy_edd(void) {}
740 #endif
741 
742 /*
743  * Do NOT EVER look at the BIOS memory size location.
744  * It does not work on many machines.
745  */
746 #define LOWMEMSIZE()	(0x9f000)
747 
setup_memory_region(void)748 static void __init setup_memory_region(void)
749 {
750 	char *who = "BIOS-e820";
751 
752 	/*
753 	 * Try to copy the BIOS-supplied E820-map.
754 	 *
755 	 * Otherwise fake a memory map; one section from 0k->640k,
756 	 * the next section from 1mb->appropriate_mem_k
757 	 */
758 	sanitize_e820_map(E820_MAP, &E820_MAP_NR);
759 	if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
760 		unsigned long mem_size;
761 
762 		/* compare results from other methods and take the greater */
763 		if (ALT_MEM_K < EXT_MEM_K) {
764 			mem_size = EXT_MEM_K;
765 			who = "BIOS-88";
766 		} else {
767 			mem_size = ALT_MEM_K;
768 			who = "BIOS-e801";
769 		}
770 
771 		e820.nr_map = 0;
772 		add_memory_region(0, LOWMEMSIZE(), E820_RAM);
773 		add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
774   	}
775 	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
776 	print_memory_map(who);
777 } /* setup_memory_region */
778 
779 
parse_cmdline_early(char ** cmdline_p)780 static void __init parse_cmdline_early (char ** cmdline_p)
781 {
782 	char c = ' ', *to = command_line, *from = COMMAND_LINE;
783 	int len = 0;
784 	int userdef = 0;
785 
786 	/* Save unparsed command line copy for /proc/cmdline */
787 	memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
788 	saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
789 
790 	for (;;) {
791 		if (c != ' ')
792 			goto nextchar;
793 		/*
794 		 * "mem=nopentium" disables the 4MB page tables.
795 		 * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
796 		 * to <mem>, overriding the bios size.
797 		 * "mem=XXX[KkmM]@XXX[KkmM]" defines a memory region from
798 		 * <start> to <start>+<mem>, overriding the bios size.
799 		 */
800 		if (!memcmp(from, "mem=", 4)) {
801 			if (to != command_line)
802 				to--;
803 			if (!memcmp(from+4, "nopentium", 9)) {
804 				from += 9+4;
805 				clear_bit(X86_FEATURE_PSE, &boot_cpu_data.x86_capability);
806 				set_bit(X86_FEATURE_PSE, &disabled_x86_caps);
807 			} else if (!memcmp(from+4, "exactmap", 8)) {
808 				from += 8+4;
809 				e820.nr_map = 0;
810 				userdef = 1;
811 			} else {
812 				/* If the user specifies memory size, we
813 				 * limit the BIOS-provided memory map to
814 				 * that size. exactmap can be used to specify
815 				 * the exact map. mem=number can be used to
816 				 * trim the existing memory map.
817 				 */
818 				unsigned long long start_at, mem_size;
819 
820 				mem_size = memparse(from+4, &from);
821 				if (*from == '@') {
822 					start_at = memparse(from+1, &from);
823 					add_memory_region(start_at, mem_size, E820_RAM);
824 				} else if (*from == '#') {
825 					start_at = memparse(from+1, &from);
826 					add_memory_region(start_at, mem_size, E820_ACPI);
827 				} else if (*from == '$') {
828 					start_at = memparse(from+1, &from);
829 					add_memory_region(start_at, mem_size, E820_RESERVED);
830 				} else {
831 					limit_regions(mem_size);
832 					userdef=1;
833 				}
834 			}
835 		}
836 #ifdef	CONFIG_SMP
837 		/*
838 		 * If the BIOS enumerates physical processors before logical,
839 		 * maxcpus=N at enumeration-time can be used to disable HT.
840 		 */
841 		else if (!memcmp(from, "maxcpus=", 8)) {
842 			extern unsigned int max_cpus;
843 
844 			max_cpus = simple_strtoul(from + 8, NULL, 0);
845 		}
846 #endif
847 
848 #ifdef CONFIG_ACPI_BOOT
849 		/* "acpi=off" disables both ACPI table parsing and interpreter */
850 		else if (!memcmp(from, "acpi=off", 8)) {
851 			disable_acpi();
852 		}
853 
854 		/* acpi=force to over-ride black-list */
855 		else if (!memcmp(from, "acpi=force", 10)) {
856 			acpi_force = 1;
857 			acpi_ht = 1;
858 			acpi_disabled = 0;
859 		}
860 
861 		/* Limit ACPI to boot-time only, still enabled HT */
862 		else if (!memcmp(from, "acpi=ht", 7)) {
863 			if (!acpi_force)
864 				disable_acpi();
865 			acpi_ht = 1;
866 		}
867 
868 		/* acpi=strict disables out-of-spec workarounds */
869 		else if (!memcmp(from, "acpi=strict", 11)) {
870 			acpi_strict = 1;
871 		}
872 
873 		else if (!memcmp(from, "pci=noacpi", 10)) {
874 			acpi_noirq_set();
875 		}
876 
877                 /* disable IO-APIC */
878                 else if (!memcmp(from, "noapic", 6))
879                         disable_ioapic_setup();
880 
881 		else if (!memcmp(from, "acpi_sci=edge", 13))
882 			acpi_sci_flags.trigger =  1;
883 		else if (!memcmp(from, "acpi_sci=level", 14))
884 			acpi_sci_flags.trigger = 3;
885 		else if (!memcmp(from, "acpi_sci=high", 13))
886 			acpi_sci_flags.polarity = 1;
887 		else if (!memcmp(from, "acpi_sci=low", 12))
888 			acpi_sci_flags.polarity = 3;
889 
890 #endif
891 		/*
892 		 * highmem=size forces highmem to be exactly 'size' bytes.
893 		 * This works even on boxes that have no highmem otherwise.
894 		 * This also works to reduce highmem size on bigger boxes.
895 		 */
896 		else if (!memcmp(from, "highmem=", 8))
897 			highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
898 nextchar:
899 		c = *(from++);
900 		if (!c)
901 			break;
902 		if (COMMAND_LINE_SIZE <= ++len)
903 			break;
904 		*(to++) = c;
905 	}
906 	*to = '\0';
907 	*cmdline_p = command_line;
908 	if (userdef) {
909 		printk(KERN_INFO "user-defined physical RAM map:\n");
910 		print_memory_map("user");
911 	}
912 }
913 
914 #define PFN_UP(x)	(((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
915 #define PFN_DOWN(x)	((x) >> PAGE_SHIFT)
916 #define PFN_PHYS(x)	((x) << PAGE_SHIFT)
917 
918 /*
919  * Reserved space for vmalloc and iomap - defined in asm/page.h
920  */
921 #define MAXMEM_PFN	PFN_DOWN(MAXMEM)
922 #define MAX_NONPAE_PFN	(1 << 20)
923 
924 /*
925  * Find the highest page frame number we have available
926  */
find_max_pfn(void)927 static void __init find_max_pfn(void)
928 {
929 	int i;
930 
931 	max_pfn = 0;
932 	for (i = 0; i < e820.nr_map; i++) {
933 		unsigned long start, end;
934 		/* RAM? */
935 		if (e820.map[i].type != E820_RAM)
936 			continue;
937 		start = PFN_UP(e820.map[i].addr);
938 		end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
939 		if (start >= end)
940 			continue;
941 		if (end > max_pfn)
942 			max_pfn = end;
943 	}
944 }
945 
946 /*
947  * Determine low and high memory ranges:
948  */
find_max_low_pfn(void)949 static unsigned long __init find_max_low_pfn(void)
950 {
951 	unsigned long max_low_pfn;
952 
953 	max_low_pfn = max_pfn;
954 	if (max_low_pfn > MAXMEM_PFN) {
955 		if (highmem_pages == -1)
956 			highmem_pages = max_pfn - MAXMEM_PFN;
957 		if (highmem_pages + MAXMEM_PFN < max_pfn)
958 			max_pfn = MAXMEM_PFN + highmem_pages;
959 		if (highmem_pages + MAXMEM_PFN > max_pfn) {
960 			printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
961 			highmem_pages = 0;
962 		}
963 		max_low_pfn = MAXMEM_PFN;
964 #ifndef CONFIG_HIGHMEM
965 		/* Maximum memory usable is what is directly addressable */
966 		printk(KERN_WARNING "Warning only %ldMB will be used.\n",
967 					MAXMEM>>20);
968 		if (max_pfn > MAX_NONPAE_PFN)
969 			printk(KERN_WARNING "Use a PAE enabled kernel.\n");
970 		else
971 			printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
972 #else /* !CONFIG_HIGHMEM */
973 #ifndef CONFIG_X86_PAE
974 		if (max_pfn > MAX_NONPAE_PFN) {
975 			max_pfn = MAX_NONPAE_PFN;
976 			printk(KERN_WARNING "Warning only 4GB will be used.\n");
977 			printk(KERN_WARNING "Use a PAE enabled kernel.\n");
978 		}
979 #endif /* !CONFIG_X86_PAE */
980 #endif /* !CONFIG_HIGHMEM */
981 	} else {
982 		if (highmem_pages == -1)
983 			highmem_pages = 0;
984 #if CONFIG_HIGHMEM
985 		if (highmem_pages >= max_pfn) {
986 			printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
987 			highmem_pages = 0;
988 		}
989 		if (highmem_pages) {
990 			if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
991 				printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
992 				highmem_pages = 0;
993 			}
994 			max_low_pfn -= highmem_pages;
995 		}
996 #else
997 		if (highmem_pages)
998 			printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
999 #endif
1000 	}
1001 
1002 	return max_low_pfn;
1003 }
1004 
1005 /*
1006  * Register fully available low RAM pages with the bootmem allocator.
1007  */
register_bootmem_low_pages(unsigned long max_low_pfn)1008 static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
1009 {
1010 	int i;
1011 
1012 	for (i = 0; i < e820.nr_map; i++) {
1013 		unsigned long curr_pfn, last_pfn, size;
1014 		/*
1015 		 * Reserve usable low memory
1016 		 */
1017 		if (e820.map[i].type != E820_RAM)
1018 			continue;
1019 		/*
1020 		 * We are rounding up the start address of usable memory:
1021 		 */
1022 		curr_pfn = PFN_UP(e820.map[i].addr);
1023 		if (curr_pfn >= max_low_pfn)
1024 			continue;
1025 		/*
1026 		 * ... and at the end of the usable range downwards:
1027 		 */
1028 		last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
1029 
1030 		if (last_pfn > max_low_pfn)
1031 			last_pfn = max_low_pfn;
1032 
1033 		/*
1034 		 * .. finally, did all the rounding and playing
1035 		 * around just make the area go away?
1036 		 */
1037 		if (last_pfn <= curr_pfn)
1038 			continue;
1039 
1040 		size = last_pfn - curr_pfn;
1041 		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
1042 	}
1043 }
1044 
setup_memory(void)1045 static unsigned long __init setup_memory(void)
1046 {
1047 	unsigned long bootmap_size, start_pfn, max_low_pfn;
1048 
1049 	/*
1050 	 * partially used pages are not usable - thus
1051 	 * we are rounding upwards:
1052 	 */
1053 	start_pfn = PFN_UP(__pa(&_end));
1054 
1055 	find_max_pfn();
1056 
1057 	max_low_pfn = find_max_low_pfn();
1058 
1059 #ifdef CONFIG_HIGHMEM
1060 	highstart_pfn = highend_pfn = max_pfn;
1061 	if (max_pfn > max_low_pfn) {
1062 		highstart_pfn = max_low_pfn;
1063 	}
1064 	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
1065 		pages_to_mb(highend_pfn - highstart_pfn));
1066 #endif
1067 	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
1068 			pages_to_mb(max_low_pfn));
1069 	/*
1070 	 * Initialize the boot-time allocator (with low memory only):
1071 	 */
1072 	bootmap_size = init_bootmem(start_pfn, max_low_pfn);
1073 
1074 	register_bootmem_low_pages(max_low_pfn);
1075 
1076 	/*
1077 	 * Reserve the bootmem bitmap itself as well. We do this in two
1078 	 * steps (first step was init_bootmem()) because this catches
1079 	 * the (very unlikely) case of us accidentally initializing the
1080 	 * bootmem allocator with an invalid RAM area.
1081 	 */
1082 	reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
1083 			 bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
1084 
1085 	/*
1086 	 * reserve physical page 0 - it's a special BIOS page on many boxes,
1087 	 * enabling clean reboots, SMP operation, laptop functions.
1088 	 */
1089 	reserve_bootmem(0, PAGE_SIZE);
1090 
1091 #ifdef CONFIG_SMP
1092 	/*
1093 	 * But first pinch a few for the stack/trampoline stuff
1094 	 * FIXME: Don't need the extra page at 4K, but need to fix
1095 	 * trampoline before removing it. (see the GDT stuff)
1096 	 */
1097 	reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
1098 #endif
1099 #ifdef CONFIG_ACPI_SLEEP
1100 	/*
1101 	 * Reserve low memory region for sleep support.
1102 	 */
1103 	acpi_reserve_bootmem();
1104 #endif
1105 #ifdef CONFIG_X86_LOCAL_APIC
1106 	/*
1107 	 * Find and reserve possible boot-time SMP configuration.
1108 	 */
1109 	find_smp_config();
1110 #endif
1111 #ifdef CONFIG_BLK_DEV_INITRD
1112 	if (LOADER_TYPE && INITRD_START) {
1113 		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
1114 			reserve_bootmem(INITRD_START, INITRD_SIZE);
1115 			initrd_start =
1116 				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
1117 			initrd_end = initrd_start+INITRD_SIZE;
1118 		}
1119 		else {
1120 			printk(KERN_ERR "initrd extends beyond end of memory "
1121 			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
1122 			    INITRD_START + INITRD_SIZE,
1123 			    max_low_pfn << PAGE_SHIFT);
1124 			initrd_start = 0;
1125 		}
1126 	}
1127 #endif
1128 
1129 	return max_low_pfn;
1130 }
1131 
1132 /*
1133  * Request address space for all standard RAM and ROM resources
1134  * and also for regions reported as reserved by the e820.
1135  */
register_memory(unsigned long max_low_pfn)1136 static void __init register_memory(unsigned long max_low_pfn)
1137 {
1138 	unsigned long low_mem_size;
1139 	int i;
1140 	probe_roms();
1141 	for (i = 0; i < e820.nr_map; i++) {
1142 		struct resource *res;
1143 		if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
1144 			continue;
1145 		res = alloc_bootmem_low(sizeof(struct resource));
1146 		switch (e820.map[i].type) {
1147 		case E820_RAM:	res->name = "System RAM"; break;
1148 		case E820_ACPI:	res->name = "ACPI Tables"; break;
1149 		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;
1150 		default:	res->name = "reserved";
1151 		}
1152 		res->start = e820.map[i].addr;
1153 		res->end = res->start + e820.map[i].size - 1;
1154 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
1155 		request_resource(&iomem_resource, res);
1156 		if (e820.map[i].type == E820_RAM) {
1157 			/*
1158 			 *  We dont't know which RAM region contains kernel data,
1159 			 *  so we try it repeatedly and let the resource manager
1160 			 *  test it.
1161 			 */
1162 			request_resource(res, &code_resource);
1163 			request_resource(res, &data_resource);
1164 		}
1165 	}
1166 	request_resource(&iomem_resource, &vram_resource);
1167 
1168 	/* request I/O space for devices used on all i[345]86 PCs */
1169 	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
1170 		request_resource(&ioport_resource, standard_io_resources+i);
1171 
1172 	/* Tell the PCI layer not to allocate too close to the RAM area.. */
1173 	low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
1174 	if (low_mem_size > pci_mem_start)
1175 		pci_mem_start = low_mem_size;
1176 }
1177 
setup_arch(char ** cmdline_p)1178 void __init setup_arch(char **cmdline_p)
1179 {
1180 	unsigned long max_low_pfn;
1181 
1182 #ifdef CONFIG_VISWS
1183 	visws_get_board_type_and_rev();
1184 #endif
1185 
1186 #ifndef CONFIG_HIGHIO
1187 	blk_nohighio = 1;
1188 #endif
1189 
1190  	ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV);
1191  	drive_info = DRIVE_INFO;
1192  	screen_info = SCREEN_INFO;
1193 	apm_info.bios = APM_BIOS_INFO;
1194 	if( SYS_DESC_TABLE.length != 0 ) {
1195 		MCA_bus = SYS_DESC_TABLE.table[3] &0x2;
1196 		machine_id = SYS_DESC_TABLE.table[0];
1197 		machine_submodel_id = SYS_DESC_TABLE.table[1];
1198 		BIOS_revision = SYS_DESC_TABLE.table[2];
1199 	}
1200 	aux_device_present = AUX_DEVICE_INFO;
1201 
1202 #ifdef CONFIG_BLK_DEV_RAM
1203 	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
1204 	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
1205 	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
1206 #endif
1207 	setup_memory_region();
1208 	copy_edd();
1209 
1210 	if (!MOUNT_ROOT_RDONLY)
1211 		root_mountflags &= ~MS_RDONLY;
1212 	init_mm.start_code = (unsigned long) &_text;
1213 	init_mm.end_code = (unsigned long) &_etext;
1214 	init_mm.end_data = (unsigned long) &_edata;
1215 	init_mm.brk = (unsigned long) &_end;
1216 
1217 	code_resource.start = virt_to_bus(&_text);
1218 	code_resource.end = virt_to_bus(&_etext)-1;
1219 	data_resource.start = virt_to_bus(&_etext);
1220 	data_resource.end = virt_to_bus(&_edata)-1;
1221 
1222 	parse_cmdline_early(cmdline_p);
1223 
1224 	max_low_pfn = setup_memory();
1225 
1226 	/*
1227 	 * NOTE: before this point _nobody_ is allowed to allocate
1228 	 * any memory using the bootmem allocator.
1229 	 */
1230 
1231 #ifdef CONFIG_SMP
1232 	smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
1233 #endif
1234 	paging_init();
1235 
1236 	dmi_scan_machine();
1237 
1238 	/*
1239 	 * Parse the ACPI tables for possible boot-time SMP configuration.
1240 	 */
1241 	acpi_boot_init();
1242 
1243 #ifdef CONFIG_X86_LOCAL_APIC
1244 	/*
1245 	 * get boot-time SMP configuration:
1246 	 */
1247 	if (smp_found_config)
1248 		get_smp_config();
1249 #endif
1250 
1251 	register_memory(max_low_pfn);
1252 
1253 #ifdef CONFIG_VT
1254 #if defined(CONFIG_VGA_CONSOLE)
1255 	conswitchp = &vga_con;
1256 #elif defined(CONFIG_DUMMY_CONSOLE)
1257 	conswitchp = &dummy_con;
1258 #endif
1259 #endif
1260 }
1261 
1262 static int cachesize_override __initdata = -1;
cachesize_setup(char * str)1263 static int __init cachesize_setup(char *str)
1264 {
1265 	get_option (&str, &cachesize_override);
1266 	return 1;
1267 }
1268 __setup("cachesize=", cachesize_setup);
1269 
1270 
1271 #ifndef CONFIG_X86_TSC
1272 static int tsc_disable __initdata = 0;
1273 
notsc_setup(char * str)1274 static int __init notsc_setup(char *str)
1275 {
1276 	tsc_disable = 1;
1277 	return 1;
1278 }
1279 #else
notsc_setup(char * str)1280 static int __init notsc_setup(char *str)
1281 {
1282 	printk("notsc: Kernel compiled with CONFIG_X86_TSC, cannot disable TSC.\n");
1283 	return 1;
1284 }
1285 #endif
1286 __setup("notsc", notsc_setup);
1287 
highio_setup(char * str)1288 static int __init highio_setup(char *str)
1289 {
1290 	printk("i386: disabling HIGHMEM block I/O\n");
1291 	blk_nohighio = 1;
1292 	return 1;
1293 }
1294 __setup("nohighio", highio_setup);
1295 
get_model_name(struct cpuinfo_x86 * c)1296 static int __init get_model_name(struct cpuinfo_x86 *c)
1297 {
1298 	unsigned int *v;
1299 	char *p, *q;
1300 
1301 	if (cpuid_eax(0x80000000) < 0x80000004)
1302 		return 0;
1303 
1304 	v = (unsigned int *) c->x86_model_id;
1305 	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
1306 	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
1307 	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
1308 	c->x86_model_id[48] = 0;
1309 
1310 	/* Intel chips right-justify this string for some dumb reason;
1311 	   undo that brain damage */
1312 	p = q = &c->x86_model_id[0];
1313 	while ( *p == ' ' )
1314 	     p++;
1315 	if ( p != q ) {
1316 	     while ( *p )
1317 		  *q++ = *p++;
1318 	     while ( q <= &c->x86_model_id[48] )
1319 		  *q++ = '\0';	/* Zero-pad the rest */
1320 	}
1321 
1322 	return 1;
1323 }
1324 
1325 
display_cacheinfo(struct cpuinfo_x86 * c)1326 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
1327 {
1328 	unsigned int n, dummy, ecx, edx, l2size;
1329 
1330 	n = cpuid_eax(0x80000000);
1331 
1332 	if (n >= 0x80000005) {
1333 		cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
1334 		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
1335 			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
1336 		c->x86_cache_size=(ecx>>24)+(edx>>24);
1337 	}
1338 
1339 	if (n < 0x80000006)	/* Some chips just has a large L1. */
1340 		return;
1341 
1342 	ecx = cpuid_ecx(0x80000006);
1343 	l2size = ecx >> 16;
1344 
1345 	/* AMD errata T13 (order #21922) */
1346 	if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
1347 		if (c->x86_model == 3 && c->x86_mask == 0)	/* Duron Rev A0 */
1348 			l2size = 64;
1349 		if (c->x86_model == 4 &&
1350 			(c->x86_mask==0 || c->x86_mask==1))	/* Tbird rev A1/A2 */
1351 			l2size = 256;
1352 	}
1353 
1354 	if (c->x86_vendor == X86_VENDOR_CENTAUR) {
1355 		/* VIA C3 CPUs (670-68F) need further shifting. */
1356 		if ((c->x86 == 6) &&
1357 		    ((c->x86_model == 7) || (c->x86_model == 8))) {
1358 			l2size >>= 8;
1359 		}
1360 
1361 		/* VIA also screwed up Nehemiah stepping 1, and made
1362 		   it return '65KB' instead of '64KB'
1363 		   - Note, it seems this may only be in engineering samples. */
1364 		if ((c->x86==6) && (c->x86_model==9) &&
1365 		    (c->x86_mask==1) && (l2size==65))
1366 			l2size -= 1;
1367 	}
1368 
1369 	/* Allow user to override all this if necessary. */
1370 	if (cachesize_override != -1)
1371 		l2size = cachesize_override;
1372 
1373 	if ( l2size == 0 )
1374 		return;		/* Again, no L2 cache is possible */
1375 
1376 	c->x86_cache_size = l2size;
1377 
1378 	printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
1379 	       l2size, ecx & 0xFF);
1380 }
1381 
1382 /*
1383  *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
1384  *	misexecution of code under Linux. Owners of such processors should
1385  *	contact AMD for precise details and a CPU swap.
1386  *
1387  *	See	http://www.multimania.com/poulot/k6bug.html
1388  *		http://www.amd.com/K6/k6docs/revgd.html
1389  *
1390  *	The following test is erm.. interesting. AMD neglected to up
1391  *	the chip setting when fixing the bug but they also tweaked some
1392  *	performance at the same time..
1393  */
1394 
1395 extern void vide(void);
1396 __asm__(".align 4\nvide: ret");
1397 
init_amd(struct cpuinfo_x86 * c)1398 static int __init init_amd(struct cpuinfo_x86 *c)
1399 {
1400 	u32 l, h;
1401 	int mbytes = max_mapnr >> (20-PAGE_SHIFT);
1402 	int r;
1403 
1404 	/*
1405 	 *	FIXME: We should handle the K5 here. Set up the write
1406 	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc,
1407 	 *	no bus pipeline)
1408 	 */
1409 
1410 	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1411 	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1412 	clear_bit(0*32+31, &c->x86_capability);
1413 
1414 	r = get_model_name(c);
1415 
1416 	switch(c->x86)
1417 	{
1418 		case 5:
1419 			if( c->x86_model < 6 )
1420 			{
1421 				/* Based on AMD doc 20734R - June 2000 */
1422 				if ( c->x86_model == 0 ) {
1423 					clear_bit(X86_FEATURE_APIC, &c->x86_capability);
1424 					set_bit(X86_FEATURE_PGE, &c->x86_capability);
1425 				}
1426 				break;
1427 			}
1428 
1429 			if ( c->x86_model == 6 && c->x86_mask == 1 ) {
1430 				const int K6_BUG_LOOP = 1000000;
1431 				int n;
1432 				void (*f_vide)(void);
1433 				unsigned long d, d2;
1434 
1435 				printk(KERN_INFO "AMD K6 stepping B detected - ");
1436 
1437 				/*
1438 				 * It looks like AMD fixed the 2.6.2 bug and improved indirect
1439 				 * calls at the same time.
1440 				 */
1441 
1442 				n = K6_BUG_LOOP;
1443 				f_vide = vide;
1444 				rdtscl(d);
1445 				while (n--)
1446 					f_vide();
1447 				rdtscl(d2);
1448 				d = d2-d;
1449 
1450 				/* Knock these two lines out if it debugs out ok */
1451 				printk(KERN_INFO "K6 BUG %ld %d (Report these if test report is incorrect)\n", d, 20*K6_BUG_LOOP);
1452 				printk(KERN_INFO "AMD K6 stepping B detected - ");
1453 				/* -- cut here -- */
1454 				if (d > 20*K6_BUG_LOOP)
1455 					printk("system stability may be impaired when more than 32 MB are used.\n");
1456 				else
1457 					printk("probably OK (after B9730xxxx).\n");
1458 				printk(KERN_INFO "Please see http://www.mygale.com/~poulot/k6bug.html\n");
1459 			}
1460 
1461 			/* K6 with old style WHCR */
1462 			if (c->x86_model < 8 ||
1463 			   (c->x86_model== 8 && c->x86_mask < 8)) {
1464 				/* We can only write allocate on the low 508Mb */
1465 				if(mbytes>508)
1466 					mbytes=508;
1467 
1468 				rdmsr(MSR_K6_WHCR, l, h);
1469 				if ((l&0x0000FFFF)==0) {
1470 					unsigned long flags;
1471 					l=(1<<0)|((mbytes/4)<<1);
1472 					local_irq_save(flags);
1473 					wbinvd();
1474 					wrmsr(MSR_K6_WHCR, l, h);
1475 					local_irq_restore(flags);
1476 					printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
1477 						mbytes);
1478 				}
1479 				break;
1480 			}
1481 
1482 			if ((c->x86_model == 8 && c->x86_mask >7) ||
1483 			     c->x86_model == 9 || c->x86_model == 13) {
1484 				/* The more serious chips .. */
1485 
1486 				if(mbytes>4092)
1487 					mbytes=4092;
1488 
1489 				rdmsr(MSR_K6_WHCR, l, h);
1490 				if ((l&0xFFFF0000)==0) {
1491 					unsigned long flags;
1492 					l=((mbytes>>2)<<22)|(1<<16);
1493 					local_irq_save(flags);
1494 					wbinvd();
1495 					wrmsr(MSR_K6_WHCR, l, h);
1496 					local_irq_restore(flags);
1497 					printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
1498 						mbytes);
1499 				}
1500 
1501 				/*  Set MTRR capability flag if appropriate */
1502 				if (c->x86_model == 13 || c->x86_model == 9 ||
1503 				   (c->x86_model == 8 && c->x86_mask >= 8))
1504 					set_bit(X86_FEATURE_K6_MTRR, &c->x86_capability);
1505 				break;
1506 			}
1507 			break;
1508 
1509 		case 6: /* An Athlon/Duron */
1510 
1511 			/* Bit 15 of Athlon specific MSR 15, needs to be 0
1512  			 * to enable SSE on Palomino/Morgan CPU's.
1513 			 * If the BIOS didn't enable it already, enable it
1514 			 * here.
1515 			 */
1516 			if (c->x86_model >= 6 && c->x86_model <= 10) {
1517 				if (!test_bit(X86_FEATURE_XMM,
1518 					      &c->x86_capability)) {
1519 					printk(KERN_INFO
1520 					       "Enabling Disabled K7/SSE Support...\n");
1521 					rdmsr(MSR_K7_HWCR, l, h);
1522 					l &= ~0x00008000;
1523 					wrmsr(MSR_K7_HWCR, l, h);
1524 					set_bit(X86_FEATURE_XMM,
1525                                                 &c->x86_capability);
1526 				}
1527 			}
1528 
1529 			/* It's been determined by AMD that Athlons since model 8 stepping 1
1530 			 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
1531 			 * As per AMD technical note 27212 0.2
1532 			 */
1533 			if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
1534 				rdmsr(MSR_K7_CLK_CTL, l, h);
1535 				if ((l & 0xfff00000) != 0x20000000) {
1536 					printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
1537 						((l & 0x000fffff)|0x20000000));
1538 					wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
1539 				}
1540 			}
1541 			break;
1542 	}
1543 
1544 	display_cacheinfo(c);
1545 	return r;
1546 }
1547 
1548 /*
1549  * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
1550  */
do_cyrix_devid(unsigned char * dir0,unsigned char * dir1)1551 static void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
1552 {
1553 	unsigned char ccr2, ccr3;
1554 	unsigned long flags;
1555 
1556 	/* we test for DEVID by checking whether CCR3 is writable */
1557 	local_irq_save(flags);
1558 	ccr3 = getCx86(CX86_CCR3);
1559 	setCx86(CX86_CCR3, ccr3 ^ 0x80);
1560 	getCx86(0xc0);   /* dummy to change bus */
1561 
1562 	if (getCx86(CX86_CCR3) == ccr3) {       /* no DEVID regs. */
1563 		ccr2 = getCx86(CX86_CCR2);
1564 		setCx86(CX86_CCR2, ccr2 ^ 0x04);
1565 		getCx86(0xc0);  /* dummy */
1566 
1567 		if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
1568 			*dir0 = 0xfd;
1569 		else {                          /* Cx486S A step */
1570 			setCx86(CX86_CCR2, ccr2);
1571 			*dir0 = 0xfe;
1572 		}
1573 	}
1574 	else {
1575 		setCx86(CX86_CCR3, ccr3);  /* restore CCR3 */
1576 
1577 		/* read DIR0 and DIR1 CPU registers */
1578 		*dir0 = getCx86(CX86_DIR0);
1579 		*dir1 = getCx86(CX86_DIR1);
1580 	}
1581 	local_irq_restore(flags);
1582 }
1583 
1584 /*
1585  * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in
1586  * order to identify the Cyrix CPU model after we're out of the
1587  * initial setup.
1588  */
1589 static unsigned char Cx86_dir0_msb __initdata = 0;
1590 
1591 static char Cx86_model[][9] __initdata = {
1592 	"Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
1593 	"M II ", "Unknown"
1594 };
1595 static char Cx486_name[][5] __initdata = {
1596 	"SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
1597 	"SRx2", "DRx2"
1598 };
1599 static char Cx486S_name[][4] __initdata = {
1600 	"S", "S2", "Se", "S2e"
1601 };
1602 static char Cx486D_name[][4] __initdata = {
1603 	"DX", "DX2", "?", "?", "?", "DX4"
1604 };
1605 static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";
1606 static char cyrix_model_mult1[] __initdata = "12??43";
1607 static char cyrix_model_mult2[] __initdata = "12233445";
1608 
1609 /*
1610  * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
1611  * BIOSes for compatability with DOS games.  This makes the udelay loop
1612  * work correctly, and improves performance.
1613  *
1614  * FIXME: our newer udelay uses the tsc. We dont need to frob with SLOP
1615  */
1616 
1617 extern void calibrate_delay(void) __init;
1618 
check_cx686_slop(struct cpuinfo_x86 * c)1619 static void __init check_cx686_slop(struct cpuinfo_x86 *c)
1620 {
1621 	unsigned long flags;
1622 
1623 	if (Cx86_dir0_msb == 3) {
1624 		unsigned char ccr3, ccr5;
1625 
1626 		local_irq_save(flags);
1627 		ccr3 = getCx86(CX86_CCR3);
1628 		setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
1629 		ccr5 = getCx86(CX86_CCR5);
1630 		if (ccr5 & 2)
1631 			setCx86(CX86_CCR5, ccr5 & 0xfd);  /* reset SLOP */
1632 		setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
1633 		local_irq_restore(flags);
1634 
1635 		if (ccr5 & 2) { /* possible wrong calibration done */
1636 			printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
1637 			calibrate_delay();
1638 			c->loops_per_jiffy = loops_per_jiffy;
1639 		}
1640 	}
1641 }
1642 
init_cyrix(struct cpuinfo_x86 * c)1643 static void __init init_cyrix(struct cpuinfo_x86 *c)
1644 {
1645 	unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
1646 	char *buf = c->x86_model_id;
1647 	const char *p = NULL;
1648 
1649 	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1650 	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1651 	clear_bit(0*32+31, &c->x86_capability);
1652 
1653 	/* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
1654 	if ( test_bit(1*32+24, &c->x86_capability) ) {
1655 		clear_bit(1*32+24, &c->x86_capability);
1656 		set_bit(X86_FEATURE_CXMMX, &c->x86_capability);
1657 	}
1658 
1659 	do_cyrix_devid(&dir0, &dir1);
1660 
1661 	check_cx686_slop(c);
1662 
1663 	Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
1664 	dir0_lsn = dir0 & 0xf;                /* model or clock multiplier */
1665 
1666 	/* common case step number/rev -- exceptions handled below */
1667 	c->x86_model = (dir1 >> 4) + 1;
1668 	c->x86_mask = dir1 & 0xf;
1669 
1670 	/* Now cook; the original recipe is by Channing Corn, from Cyrix.
1671 	 * We do the same thing for each generation: we work out
1672 	 * the model, multiplier and stepping.  Black magic included,
1673 	 * to make the silicon step/rev numbers match the printed ones.
1674 	 */
1675 
1676 	switch (dir0_msn) {
1677 		unsigned char tmp;
1678 
1679 	case 0: /* Cx486SLC/DLC/SRx/DRx */
1680 		p = Cx486_name[dir0_lsn & 7];
1681 		break;
1682 
1683 	case 1: /* Cx486S/DX/DX2/DX4 */
1684 		p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
1685 			: Cx486S_name[dir0_lsn & 3];
1686 		break;
1687 
1688 	case 2: /* 5x86 */
1689 		Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
1690 		p = Cx86_cb+2;
1691 		break;
1692 
1693 	case 3: /* 6x86/6x86L */
1694 		Cx86_cb[1] = ' ';
1695 		Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
1696 		if (dir1 > 0x21) { /* 686L */
1697 			Cx86_cb[0] = 'L';
1698 			p = Cx86_cb;
1699 			(c->x86_model)++;
1700 		} else             /* 686 */
1701 			p = Cx86_cb+1;
1702 		/* Emulate MTRRs using Cyrix's ARRs. */
1703 		set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability);
1704 		/* 6x86's contain this bug */
1705 		c->coma_bug = 1;
1706 		break;
1707 
1708 	case 4: /* MediaGX/GXm */
1709 #ifdef CONFIG_PCI
1710 		/* It isnt really a PCI quirk directly, but the cure is the
1711 		   same. The MediaGX has deep magic SMM stuff that handles the
1712 		   SB emulation. It thows away the fifo on disable_dma() which
1713 		   is wrong and ruins the audio.
1714 
1715 		   Bug2: VSA1 has a wrap bug so that using maximum sized DMA
1716 		   causes bad things. According to NatSemi VSA2 has another
1717 		   bug to do with 'hlt'. I've not seen any boards using VSA2
1718 		   and X doesn't seem to support it either so who cares 8).
1719 		   VSA1 we work around however.
1720 		*/
1721 
1722 		printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
1723 		isa_dma_bridge_buggy = 2;
1724 #endif
1725 		c->x86_cache_size=16;	/* Yep 16K integrated cache thats it */
1726 
1727 		/* GXm supports extended cpuid levels 'ala' AMD */
1728 		if (c->cpuid_level == 2) {
1729 			get_model_name(c);  /* get CPU marketing name */
1730 			/*
1731 	 		 *	The 5510/5520 companion chips have a funky PIT
1732 			 *	that breaks the TSC synchronizing, so turn it off
1733 			 */
1734 			if(pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, NULL) ||
1735 			   pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, NULL))
1736 				clear_bit(X86_FEATURE_TSC, c->x86_capability);
1737 			return;
1738 		}
1739 		else {  /* MediaGX */
1740 			Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
1741 			p = Cx86_cb+2;
1742 			c->x86_model = (dir1 & 0x20) ? 1 : 2;
1743 			if(pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, NULL) ||
1744 			   pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, NULL))
1745 				clear_bit(X86_FEATURE_TSC, &c->x86_capability);
1746 		}
1747 		break;
1748 
1749         case 5: /* 6x86MX/M II */
1750 		if (dir1 > 7)
1751 		{
1752 			dir0_msn++;  /* M II */
1753 			/* Enable MMX extensions (App note 108) */
1754 			setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
1755 		}
1756 		else
1757 		{
1758 			c->coma_bug = 1;      /* 6x86MX, it has the bug. */
1759 		}
1760 		tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
1761 		Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
1762 		p = Cx86_cb+tmp;
1763         	if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
1764 			(c->x86_model)++;
1765 		/* Emulate MTRRs using Cyrix's ARRs. */
1766 		set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability);
1767 		break;
1768 
1769 	case 0xf:  /* Cyrix 486 without DEVID registers */
1770 		switch (dir0_lsn) {
1771 		case 0xd:  /* either a 486SLC or DLC w/o DEVID */
1772 			dir0_msn = 0;
1773 			p = Cx486_name[(c->hard_math) ? 1 : 0];
1774 			break;
1775 
1776 		case 0xe:  /* a 486S A step */
1777 			dir0_msn = 0;
1778 			p = Cx486S_name[0];
1779 			break;
1780 		}
1781 		break;
1782 
1783 	default:  /* unknown (shouldn't happen, we know everyone ;-) */
1784 		dir0_msn = 7;
1785 		break;
1786 	}
1787 	strcpy(buf, Cx86_model[dir0_msn & 7]);
1788 	if (p) strcat(buf, p);
1789 	return;
1790 }
1791 
1792 #ifdef CONFIG_X86_OOSTORE
1793 
power2(u32 x)1794 static u32 __init power2(u32 x)
1795 {
1796 	u32 s=1;
1797 	while(s<=x)
1798 		s<<=1;
1799 	return s>>=1;
1800 }
1801 
1802 /*
1803  *	Set up an actual MCR
1804  */
1805 
winchip_mcr_insert(int reg,u32 base,u32 size,int key)1806 static void __init winchip_mcr_insert(int reg, u32 base, u32 size, int key)
1807 {
1808 	u32 lo, hi;
1809 
1810 	hi = base & ~0xFFF;
1811 	lo = ~(size-1);		/* Size is a power of 2 so this makes a mask */
1812 	lo &= ~0xFFF;		/* Remove the ctrl value bits */
1813 	lo |= key;		/* Attribute we wish to set */
1814 	wrmsr(reg+MSR_IDT_MCR0, lo, hi);
1815 	mtrr_centaur_report_mcr(reg, lo, hi);	/* Tell the mtrr driver */
1816 }
1817 
1818 /*
1819  *	Figure what we can cover with MCR's
1820  *
1821  *	Shortcut: We know you can't put 4Gig of RAM on a winchip
1822  */
1823 
ramtop(void)1824 static u32 __init ramtop(void)		/* 16388 */
1825 {
1826 	int i;
1827 	u32 top = 0;
1828 	u32 clip = 0xFFFFFFFFUL;
1829 
1830 	for (i = 0; i < e820.nr_map; i++) {
1831 		unsigned long start, end;
1832 
1833 		if (e820.map[i].addr > 0xFFFFFFFFUL)
1834 			continue;
1835 		/*
1836 		 *	Don't MCR over reserved space. Ignore the ISA hole
1837 		 *	we frob around that catastrophy already
1838 		 */
1839 
1840 		if (e820.map[i].type == E820_RESERVED)
1841 		{
1842 			if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
1843 				clip = e820.map[i].addr;
1844 			continue;
1845 		}
1846 		start = e820.map[i].addr;
1847 		end = e820.map[i].addr + e820.map[i].size;
1848 		if (start >= end)
1849 			continue;
1850 		if (end > top)
1851 			top = end;
1852 	}
1853 	/* Everything below 'top' should be RAM except for the ISA hole.
1854 	   Because of the limited MCR's we want to map NV/ACPI into our
1855 	   MCR range for gunk in RAM
1856 
1857 	   Clip might cause us to MCR insufficient RAM but that is an
1858 	   acceptable failure mode and should only bite obscure boxes with
1859 	   a VESA hole at 15Mb
1860 
1861 	   The second case Clip sometimes kicks in is when the EBDA is marked
1862 	   as reserved. Again we fail safe with reasonable results
1863 	*/
1864 
1865 	if(top>clip)
1866 		top=clip;
1867 
1868 	return top;
1869 }
1870 
1871 /*
1872  *	Compute a set of MCR's to give maximum coverage
1873  */
1874 
winchip_mcr_compute(int nr,int key)1875 static int __init winchip_mcr_compute(int nr, int key)
1876 {
1877 	u32 mem = ramtop();
1878 	u32 root = power2(mem);
1879 	u32 base = root;
1880 	u32 top = root;
1881 	u32 floor = 0;
1882 	int ct = 0;
1883 
1884 	while(ct<nr)
1885 	{
1886 		u32 fspace = 0;
1887 
1888 		/*
1889 		 *	Find the largest block we will fill going upwards
1890 		 */
1891 
1892 		u32 high = power2(mem-top);
1893 
1894 		/*
1895 		 *	Find the largest block we will fill going downwards
1896 		 */
1897 
1898 		u32 low = base/2;
1899 
1900 		/*
1901 		 *	Don't fill below 1Mb going downwards as there
1902 		 *	is an ISA hole in the way.
1903 		 */
1904 
1905 		if(base <= 1024*1024)
1906 			low = 0;
1907 
1908 		/*
1909 		 *	See how much space we could cover by filling below
1910 		 *	the ISA hole
1911 		 */
1912 
1913 		if(floor == 0)
1914 			fspace = 512*1024;
1915 		else if(floor ==512*1024)
1916 			fspace = 128*1024;
1917 
1918 		/* And forget ROM space */
1919 
1920 		/*
1921 		 *	Now install the largest coverage we get
1922 		 */
1923 
1924 		if(fspace > high && fspace > low)
1925 		{
1926 			winchip_mcr_insert(ct, floor, fspace, key);
1927 			floor += fspace;
1928 		}
1929 		else if(high > low)
1930 		{
1931 			winchip_mcr_insert(ct, top, high, key);
1932 			top += high;
1933 		}
1934 		else if(low > 0)
1935 		{
1936 			base -= low;
1937 			winchip_mcr_insert(ct, base, low, key);
1938 		}
1939 		else break;
1940 		ct++;
1941 	}
1942 	/*
1943 	 *	We loaded ct values. We now need to set the mask. The caller
1944 	 *	must do this bit.
1945 	 */
1946 
1947 	return ct;
1948 }
1949 
winchip_create_optimal_mcr(void)1950 static void __init winchip_create_optimal_mcr(void)
1951 {
1952 	int i;
1953 	/*
1954 	 *	Allocate up to 6 mcrs to mark as much of ram as possible
1955 	 *	as write combining and weak write ordered.
1956 	 *
1957 	 *	To experiment with: Linux never uses stack operations for
1958 	 *	mmio spaces so we could globally enable stack operation wc
1959 	 *
1960 	 *	Load the registers with type 31 - full write combining, all
1961 	 *	writes weakly ordered.
1962 	 */
1963 	int used = winchip_mcr_compute(6, 31);
1964 
1965 	/*
1966 	 *	Wipe unused MCRs
1967 	 */
1968 
1969 	for(i=used;i<8;i++)
1970 		wrmsr(MSR_IDT_MCR0+i, 0, 0);
1971 }
1972 
winchip2_create_optimal_mcr(void)1973 static void __init winchip2_create_optimal_mcr(void)
1974 {
1975 	u32 lo, hi;
1976 	int i;
1977 
1978 	/*
1979 	 *	Allocate up to 6 mcrs to mark as much of ram as possible
1980 	 *	as write combining, weak store ordered.
1981 	 *
1982 	 *	Load the registers with type 25
1983 	 *		8	-	weak write ordering
1984 	 *		16	-	weak read ordering
1985 	 *		1	-	write combining
1986 	 */
1987 
1988 	int used = winchip_mcr_compute(6, 25);
1989 
1990 	/*
1991 	 *	Mark the registers we are using.
1992 	 */
1993 
1994 	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
1995 	for(i=0;i<used;i++)
1996 		lo|=1<<(9+i);
1997 	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
1998 
1999 	/*
2000 	 *	Wipe unused MCRs
2001 	 */
2002 
2003 	for(i=used;i<8;i++)
2004 		wrmsr(MSR_IDT_MCR0+i, 0, 0);
2005 }
2006 
2007 /*
2008  *	Handle the MCR key on the Winchip 2.
2009  */
2010 
winchip2_unprotect_mcr(void)2011 static void __init winchip2_unprotect_mcr(void)
2012 {
2013 	u32 lo, hi;
2014 	u32 key;
2015 
2016 	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
2017 	lo&=~0x1C0;	/* blank bits 8-6 */
2018 	key = (lo>>17) & 7;
2019 	lo |= key<<6;	/* replace with unlock key */
2020 	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
2021 }
2022 
winchip2_protect_mcr(void)2023 static void __init winchip2_protect_mcr(void)
2024 {
2025 	u32 lo, hi;
2026 
2027 	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
2028 	lo&=~0x1C0;	/* blank bits 8-6 */
2029 	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
2030 }
2031 
2032 #endif
2033 
init_c3(struct cpuinfo_x86 * c)2034 static void __init init_c3(struct cpuinfo_x86 *c)
2035 {
2036 	u32  lo, hi;
2037 
2038 	/* Test for Centaur Extended Feature Flags presence */
2039 	if (cpuid_eax(0xC0000000) >= 0xC0000001) {
2040 		/* store Centaur Extended Feature Flags as
2041 		 * word 5 of the CPU capability bit array
2042 		 */
2043 		c->x86_capability[5] = cpuid_edx(0xC0000001);
2044 	}
2045 
2046 	switch (c->x86_model) {
2047 		case 6 ... 8:		/* Cyrix III family */
2048 			rdmsr (MSR_VIA_FCR, lo, hi);
2049 			lo |= (1<<1 | 1<<7);	/* Report CX8 & enable PGE */
2050 			wrmsr (MSR_VIA_FCR, lo, hi);
2051 
2052 			set_bit(X86_FEATURE_CX8, c->x86_capability);
2053 			set_bit(X86_FEATURE_3DNOW, c->x86_capability);
2054 
2055 			/* fall through */
2056 
2057 		case 9:	/* Nehemiah */
2058 		default:
2059 			get_model_name(c);
2060 			display_cacheinfo(c);
2061 			break;
2062 	}
2063 }
2064 
init_centaur(struct cpuinfo_x86 * c)2065 static void __init init_centaur(struct cpuinfo_x86 *c)
2066 {
2067 	enum {
2068 		ECX8=1<<1,
2069 		EIERRINT=1<<2,
2070 		DPM=1<<3,
2071 		DMCE=1<<4,
2072 		DSTPCLK=1<<5,
2073 		ELINEAR=1<<6,
2074 		DSMC=1<<7,
2075 		DTLOCK=1<<8,
2076 		EDCTLB=1<<8,
2077 		EMMX=1<<9,
2078 		DPDC=1<<11,
2079 		EBRPRED=1<<12,
2080 		DIC=1<<13,
2081 		DDC=1<<14,
2082 		DNA=1<<15,
2083 		ERETSTK=1<<16,
2084 		E2MMX=1<<19,
2085 		EAMD3D=1<<20,
2086 	};
2087 
2088 	char *name;
2089 	u32  fcr_set=0;
2090 	u32  fcr_clr=0;
2091 	u32  lo,hi,newlo;
2092 	u32  aa,bb,cc,dd;
2093 
2094 	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
2095 	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
2096 	clear_bit(0*32+31, &c->x86_capability);
2097 
2098 	switch (c->x86) {
2099 
2100 		case 5:
2101 			switch(c->x86_model) {
2102 			case 4:
2103 				name="C6";
2104 				fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
2105 				fcr_clr=DPDC;
2106 				printk(KERN_NOTICE "Disabling bugged TSC.\n");
2107 				clear_bit(X86_FEATURE_TSC, &c->x86_capability);
2108 #ifdef CONFIG_X86_OOSTORE
2109 				winchip_create_optimal_mcr();
2110 				/* Enable
2111 					write combining on non-stack, non-string
2112 					write combining on string, all types
2113 					weak write ordering
2114 
2115 				   The C6 original lacks weak read order
2116 
2117 				   Note 0x120 is write only on Winchip 1 */
2118 
2119 				wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
2120 #endif
2121 				break;
2122 			case 8:
2123 				switch(c->x86_mask) {
2124 				default:
2125 					name="2";
2126 					break;
2127 				case 7 ... 9:
2128 					name="2A";
2129 					break;
2130 				case 10 ... 15:
2131 					name="2B";
2132 					break;
2133 				}
2134 				fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
2135 				fcr_clr=DPDC;
2136 #ifdef CONFIG_X86_OOSTORE
2137 				winchip2_unprotect_mcr();
2138 				winchip2_create_optimal_mcr();
2139 				rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
2140 				/* Enable
2141 					write combining on non-stack, non-string
2142 					write combining on string, all types
2143 					weak write ordering
2144 				*/
2145 				lo|=31;
2146 				wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
2147 				winchip2_protect_mcr();
2148 #endif
2149 				break;
2150 			case 9:
2151 				name="3";
2152 				fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
2153 				fcr_clr=DPDC;
2154 #ifdef CONFIG_X86_OOSTORE
2155 				winchip2_unprotect_mcr();
2156 				winchip2_create_optimal_mcr();
2157 				rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
2158 				/* Enable
2159 					write combining on non-stack, non-string
2160 					write combining on string, all types
2161 					weak write ordering
2162 				*/
2163 				lo|=31;
2164 				wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
2165 				winchip2_protect_mcr();
2166 #endif
2167 				break;
2168 			case 10:
2169 				name="4";
2170 				/* no info on the WC4 yet */
2171 				break;
2172 			default:
2173 				name="??";
2174 			}
2175 
2176 			rdmsr(MSR_IDT_FCR1, lo, hi);
2177 			newlo=(lo|fcr_set) & (~fcr_clr);
2178 
2179 			if (newlo!=lo) {
2180 				printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
2181 				wrmsr(MSR_IDT_FCR1, newlo, hi );
2182 			} else {
2183 				printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
2184 			}
2185 			/* Emulate MTRRs using Centaur's MCR. */
2186 			set_bit(X86_FEATURE_CENTAUR_MCR, &c->x86_capability);
2187 			/* Report CX8 */
2188 			set_bit(X86_FEATURE_CX8, &c->x86_capability);
2189 			/* Set 3DNow! on Winchip 2 and above. */
2190 			if (c->x86_model >=8)
2191 				set_bit(X86_FEATURE_3DNOW, &c->x86_capability);
2192 			/* See if we can find out some more. */
2193 			if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
2194 				/* Yes, we can. */
2195 				cpuid(0x80000005,&aa,&bb,&cc,&dd);
2196 				/* Add L1 data and code cache sizes. */
2197 				c->x86_cache_size = (cc>>24)+(dd>>24);
2198 			}
2199 			sprintf( c->x86_model_id, "WinChip %s", name );
2200 			break;
2201 
2202 		case 6:
2203 			init_c3(c);
2204 			break;
2205 	}
2206 }
2207 
2208 
init_transmeta(struct cpuinfo_x86 * c)2209 static void __init init_transmeta(struct cpuinfo_x86 *c)
2210 {
2211 	unsigned int cap_mask, uk, max, dummy;
2212 	unsigned int cms_rev1, cms_rev2;
2213 	unsigned int cpu_rev, cpu_freq, cpu_flags;
2214 	char cpu_info[65];
2215 
2216 	get_model_name(c);	/* Same as AMD/Cyrix */
2217 	display_cacheinfo(c);
2218 
2219 	/* Print CMS and CPU revision */
2220 	max = cpuid_eax(0x80860000);
2221 	if ( max >= 0x80860001 ) {
2222 		cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
2223 		printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
2224 		       (cpu_rev >> 24) & 0xff,
2225 		       (cpu_rev >> 16) & 0xff,
2226 		       (cpu_rev >> 8) & 0xff,
2227 		       cpu_rev & 0xff,
2228 		       cpu_freq);
2229 	}
2230 	if ( max >= 0x80860002 ) {
2231 		cpuid(0x80860002, &dummy, &cms_rev1, &cms_rev2, &dummy);
2232 		printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
2233 		       (cms_rev1 >> 24) & 0xff,
2234 		       (cms_rev1 >> 16) & 0xff,
2235 		       (cms_rev1 >> 8) & 0xff,
2236 		       cms_rev1 & 0xff,
2237 		       cms_rev2);
2238 	}
2239 	if ( max >= 0x80860006 ) {
2240 		cpuid(0x80860003,
2241 		      (void *)&cpu_info[0],
2242 		      (void *)&cpu_info[4],
2243 		      (void *)&cpu_info[8],
2244 		      (void *)&cpu_info[12]);
2245 		cpuid(0x80860004,
2246 		      (void *)&cpu_info[16],
2247 		      (void *)&cpu_info[20],
2248 		      (void *)&cpu_info[24],
2249 		      (void *)&cpu_info[28]);
2250 		cpuid(0x80860005,
2251 		      (void *)&cpu_info[32],
2252 		      (void *)&cpu_info[36],
2253 		      (void *)&cpu_info[40],
2254 		      (void *)&cpu_info[44]);
2255 		cpuid(0x80860006,
2256 		      (void *)&cpu_info[48],
2257 		      (void *)&cpu_info[52],
2258 		      (void *)&cpu_info[56],
2259 		      (void *)&cpu_info[60]);
2260 		cpu_info[64] = '\0';
2261 		printk(KERN_INFO "CPU: %s\n", cpu_info);
2262 	}
2263 
2264 	/* Unhide possibly hidden capability flags */
2265 	rdmsr(0x80860004, cap_mask, uk);
2266 	wrmsr(0x80860004, ~0, uk);
2267 	c->x86_capability[0] = cpuid_edx(0x00000001);
2268 	wrmsr(0x80860004, cap_mask, uk);
2269 
2270 	/* If we can run i686 user-space code, call us an i686 */
2271 #define USER686 (X86_FEATURE_TSC|X86_FEATURE_CX8|X86_FEATURE_CMOV)
2272 	if ( c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686 )
2273 	     c->x86 = 6;
2274 }
2275 
2276 
init_rise(struct cpuinfo_x86 * c)2277 static void __init init_rise(struct cpuinfo_x86 *c)
2278 {
2279 	printk("CPU: Rise iDragon");
2280 	if (c->x86_model > 2)
2281 		printk(" II");
2282 	printk("\n");
2283 
2284 	/* Unhide possibly hidden capability flags
2285 	   The mp6 iDragon family don't have MSRs.
2286 	   We switch on extra features with this cpuid weirdness: */
2287 	__asm__ (
2288 		"movl $0x6363452a, %%eax\n\t"
2289 		"movl $0x3231206c, %%ecx\n\t"
2290 		"movl $0x2a32313a, %%edx\n\t"
2291 		"cpuid\n\t"
2292 		"movl $0x63634523, %%eax\n\t"
2293 		"movl $0x32315f6c, %%ecx\n\t"
2294 		"movl $0x2333313a, %%edx\n\t"
2295 		"cpuid\n\t" : : : "eax", "ebx", "ecx", "edx"
2296 	);
2297 	set_bit(X86_FEATURE_CX8, &c->x86_capability);
2298 }
2299 
2300 
2301 extern void trap_init_f00f_bug(void);
2302 
2303 #define LVL_1_INST      1
2304 #define LVL_1_DATA      2
2305 #define LVL_2           3
2306 #define LVL_3           4
2307 #define LVL_TRACE       5
2308 
2309 struct _cache_table
2310 {
2311         unsigned char descriptor;
2312         char cache_type;
2313         short size;
2314 };
2315 
2316 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
2317 static struct _cache_table cache_table[] __initdata =
2318 {
2319 	{ 0x06, LVL_1_INST, 8 },
2320 	{ 0x08, LVL_1_INST, 16 },
2321 	{ 0x0A, LVL_1_DATA, 8 },
2322 	{ 0x0C, LVL_1_DATA, 16 },
2323 	{ 0x22, LVL_3,      512 },
2324 	{ 0x23, LVL_3,      1024 },
2325 	{ 0x25, LVL_3,      2048 },
2326 	{ 0x29, LVL_3,      4096 },
2327 	{ 0x2c, LVL_1_DATA, 32 },
2328 	{ 0x30, LVL_1_INST, 32 },
2329 	{ 0x39, LVL_2,      128 },
2330 	{ 0x3b, LVL_2,      128 },
2331 	{ 0x3C, LVL_2,      256 },
2332 	{ 0x41, LVL_2,      128 },
2333 	{ 0x42, LVL_2,      256 },
2334 	{ 0x43, LVL_2,      512 },
2335 	{ 0x44, LVL_2,      1024 },
2336 	{ 0x45, LVL_2,      2048 },
2337 	{ 0x60, LVL_1_DATA, 16 },
2338 	{ 0x66, LVL_1_DATA, 8 },
2339 	{ 0x67, LVL_1_DATA, 16 },
2340 	{ 0x68, LVL_1_DATA, 32 },
2341 	{ 0x70, LVL_TRACE,  12 },
2342 	{ 0x71, LVL_TRACE,  16 },
2343 	{ 0x72, LVL_TRACE,  32 },
2344 	{ 0x79, LVL_2,      128 },
2345 	{ 0x7A, LVL_2,      256 },
2346 	{ 0x7B, LVL_2,      512 },
2347 	{ 0x7C, LVL_2,      1024 },
2348 	{ 0x82, LVL_2,      256 },
2349 	{ 0x83, LVL_2,      512 },
2350 	{ 0x84, LVL_2,      1024 },
2351 	{ 0x85, LVL_2,      2048 },
2352 	{ 0x86, LVL_2,      512 },
2353 	{ 0x87, LVL_2,      1024 },
2354 	{ 0x00, 0, 0}
2355 };
2356 
init_intel(struct cpuinfo_x86 * c)2357 static void __init init_intel(struct cpuinfo_x86 *c)
2358 {
2359 	unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
2360 	char *p = NULL;
2361 #ifndef CONFIG_X86_F00F_WORKS_OK
2362 	static int f00f_workaround_enabled = 0;
2363 
2364 	/*
2365 	 * All current models of Pentium and Pentium with MMX technology CPUs
2366 	 * have the F0 0F bug, which lets nonpriviledged users lock up the system.
2367 	 * Note that the workaround only should be initialized once...
2368 	 */
2369 	c->f00f_bug = 0;
2370 	if (c->x86 == 5) {
2371 		c->f00f_bug = 1;
2372 		if (!f00f_workaround_enabled) {
2373 			trap_init_f00f_bug();
2374 			printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
2375 			f00f_workaround_enabled = 1;
2376 		}
2377 	}
2378 #endif /* CONFIG_X86_F00F_WORKS_OK */
2379 
2380 	if (c->cpuid_level > 1) {
2381 		/* supports eax=2  call */
2382 		int i, j, n;
2383 		int regs[4];
2384 		unsigned char *dp = (unsigned char *)regs;
2385 
2386 		/* Number of times to iterate */
2387 		n = cpuid_eax(2) & 0xFF;
2388 
2389 		for ( i = 0 ; i < n ; i++ ) {
2390 			cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
2391 
2392 			/* If bit 31 is set, this is an unknown format */
2393 			for ( j = 0 ; j < 3 ; j++ ) {
2394 				if ( regs[j] < 0 ) regs[j] = 0;
2395 			}
2396 
2397 			/* Byte 0 is level count, not a descriptor */
2398 			for ( j = 1 ; j < 16 ; j++ ) {
2399 				unsigned char des = dp[j];
2400 				unsigned char k = 0;
2401 
2402 				/* look up this descriptor in the table */
2403 				while (cache_table[k].descriptor != 0)
2404 				{
2405 					if (cache_table[k].descriptor == des) {
2406 						switch (cache_table[k].cache_type) {
2407 						case LVL_1_INST:
2408 							l1i += cache_table[k].size;
2409 							break;
2410 						case LVL_1_DATA:
2411 							l1d += cache_table[k].size;
2412 							break;
2413 						case LVL_2:
2414 							l2 += cache_table[k].size;
2415 							break;
2416 						case LVL_3:
2417 							l3 += cache_table[k].size;
2418 							break;
2419 						case LVL_TRACE:
2420 							trace += cache_table[k].size;
2421 							break;
2422 						}
2423 						break;
2424 					}
2425 
2426 					k++;
2427 				}
2428 			}
2429 		}
2430 
2431 		/* Intel PIII Tualatin. This comes in two flavours.
2432 		 * One has 256kb of cache, the other 512. We have no way
2433 		 * to determine which, so we use a boottime override
2434 		 * for the 512kb model, and assume 256 otherwise.
2435 		 */
2436 		if ((c->x86 == 6) && (c->x86_model == 11) && (l2 == 0))
2437 			l2 = 256;
2438 		/* Allow user to override all this if necessary. */
2439 		if (cachesize_override != -1)
2440 			l2 = cachesize_override;
2441 
2442 		if ( trace )
2443 			printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
2444 		else if ( l1i )
2445 			printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
2446 		if ( l1d )
2447 			printk(", L1 D cache: %dK\n", l1d);
2448 		else
2449 			printk("\n");
2450 
2451 		if ( l2 )
2452 			printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
2453 		if ( l3 )
2454 			printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
2455 
2456 		/*
2457 		 * This assumes the L3 cache is shared; it typically lives in
2458 		 * the northbridge.  The L1 caches are included by the L2
2459 		 * cache, and so should not be included for the purpose of
2460 		 * SMP switching weights.
2461 		 */
2462 		c->x86_cache_size = l2 ? l2 : (l1i+l1d);
2463 	}
2464 
2465 	/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
2466 	if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
2467 		clear_bit(X86_FEATURE_SEP, &c->x86_capability);
2468 
2469 	/* Names for the Pentium II/Celeron processors
2470 	   detectable only by also checking the cache size.
2471 	   Dixon is NOT a Celeron. */
2472 	if (c->x86 == 6) {
2473 		switch (c->x86_model) {
2474 		case 5:
2475 			if (l2 == 0)
2476 				p = "Celeron (Covington)";
2477 			if (l2 == 256)
2478 				p = "Mobile Pentium II (Dixon)";
2479 			break;
2480 
2481 		case 6:
2482 			if (l2 == 128)
2483 				p = "Celeron (Mendocino)";
2484 			break;
2485 
2486 		case 8:
2487 			if (l2 == 128)
2488 				p = "Celeron (Coppermine)";
2489 			break;
2490 		}
2491 	}
2492 
2493 	if ( p )
2494 		strcpy(c->x86_model_id, p);
2495 
2496 #ifdef CONFIG_SMP
2497 	if (test_bit(X86_FEATURE_HT, &c->x86_capability)) {
2498 		extern	int phys_proc_id[NR_CPUS];
2499 
2500 		u32 	eax, ebx, ecx, edx;
2501 		int 	index_lsb, index_msb, tmp;
2502 		int	initial_apic_id;
2503 		int 	cpu = smp_processor_id();
2504 
2505 		cpuid(1, &eax, &ebx, &ecx, &edx);
2506 		smp_num_siblings = (ebx & 0xff0000) >> 16;
2507 
2508 		if (smp_num_siblings == 1) {
2509 			printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
2510 		} else if (smp_num_siblings > 1 ) {
2511 			index_lsb = 0;
2512 			index_msb = 31;
2513 			/*
2514 			 * At this point we only support two siblings per
2515 			 * processor package.
2516 			 */
2517 #define NR_SIBLINGS	2
2518 			if (smp_num_siblings != NR_SIBLINGS) {
2519 				printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
2520 				smp_num_siblings = 1;
2521 				return;
2522 			}
2523 			tmp = smp_num_siblings;
2524 			while ((tmp & 1) == 0) {
2525 				tmp >>=1 ;
2526 				index_lsb++;
2527 			}
2528 			tmp = smp_num_siblings;
2529 			while ((tmp & 0x80000000 ) == 0) {
2530 				tmp <<=1 ;
2531 				index_msb--;
2532 			}
2533 			if (index_lsb != index_msb )
2534 				index_msb++;
2535 			initial_apic_id = ebx >> 24 & 0xff;
2536 			phys_proc_id[cpu] = initial_apic_id >> index_msb;
2537 
2538 			printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
2539                                phys_proc_id[cpu]);
2540 		}
2541 
2542 	}
2543 #endif
2544 }
2545 
get_cpu_vendor(struct cpuinfo_x86 * c)2546 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
2547 {
2548 	char *v = c->x86_vendor_id;
2549 
2550 	if (!strcmp(v, "GenuineIntel"))
2551 		c->x86_vendor = X86_VENDOR_INTEL;
2552 	else if (!strcmp(v, "AuthenticAMD"))
2553 		c->x86_vendor = X86_VENDOR_AMD;
2554 	else if (!strcmp(v, "CyrixInstead"))
2555 		c->x86_vendor = X86_VENDOR_CYRIX;
2556 	else if (!strcmp(v, "Geode by NSC"))
2557 		c->x86_vendor = X86_VENDOR_NSC;
2558 	else if (!strcmp(v, "UMC UMC UMC "))
2559 		c->x86_vendor = X86_VENDOR_UMC;
2560 	else if (!strcmp(v, "CentaurHauls"))
2561 		c->x86_vendor = X86_VENDOR_CENTAUR;
2562 	else if (!strcmp(v, "NexGenDriven"))
2563 		c->x86_vendor = X86_VENDOR_NEXGEN;
2564 	else if (!strcmp(v, "RiseRiseRise"))
2565 		c->x86_vendor = X86_VENDOR_RISE;
2566 	else if (!strcmp(v, "GenuineTMx86") ||
2567 		 !strcmp(v, "TransmetaCPU"))
2568 		c->x86_vendor = X86_VENDOR_TRANSMETA;
2569 	else if (!strcmp(v, "SiS SiS SiS "))
2570 		c->x86_vendor = X86_VENDOR_SIS;
2571 	else
2572 		c->x86_vendor = X86_VENDOR_UNKNOWN;
2573 }
2574 
2575 struct cpu_model_info {
2576 	int vendor;
2577 	int family;
2578 	char *model_names[16];
2579 };
2580 
2581 /* Naming convention should be: <Name> [(<Codename>)] */
2582 /* This table only is used unless init_<vendor>() below doesn't set it; */
2583 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
2584 static struct cpu_model_info cpu_models[] __initdata = {
2585 	{ X86_VENDOR_INTEL,	4,
2586 	  { "486 DX-25/33", "486 DX-50", "486 SX", "486 DX/2", "486 SL",
2587 	    "486 SX/2", NULL, "486 DX/2-WB", "486 DX/4", "486 DX/4-WB", NULL,
2588 	    NULL, NULL, NULL, NULL, NULL }},
2589 	{ X86_VENDOR_INTEL,	5,
2590 	  { "Pentium 60/66 A-step", "Pentium 60/66", "Pentium 75 - 200",
2591 	    "OverDrive PODP5V83", "Pentium MMX", NULL, NULL,
2592 	    "Mobile Pentium 75 - 200", "Mobile Pentium MMX", NULL, NULL, NULL,
2593 	    NULL, NULL, NULL, NULL }},
2594 	{ X86_VENDOR_INTEL,	6,
2595 	  { "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)",
2596 	    NULL, "Pentium II (Deschutes)", "Mobile Pentium II",
2597 	    "Pentium III (Katmai)", "Pentium III (Coppermine)", NULL,
2598 	    "Pentium III (Cascades)", NULL, NULL, NULL, NULL }},
2599 	{ X86_VENDOR_AMD,	4,
2600 	  { NULL, NULL, NULL, "486 DX/2", NULL, NULL, NULL, "486 DX/2-WB",
2601 	    "486 DX/4", "486 DX/4-WB", NULL, NULL, NULL, NULL, "Am5x86-WT",
2602 	    "Am5x86-WB" }},
2603 	{ X86_VENDOR_AMD,	5, /* Is this this really necessary?? */
2604 	  { "K5/SSA5", "K5",
2605 	    "K5", "K5", NULL, NULL,
2606 	    "K6", "K6", "K6-2",
2607 	    "K6-3", NULL, NULL, NULL, NULL, NULL, NULL }},
2608 	{ X86_VENDOR_AMD,	6, /* Is this this really necessary?? */
2609 	  { "Athlon", "Athlon",
2610 	    "Athlon", NULL, "Athlon", NULL,
2611 	    NULL, NULL, NULL,
2612 	    NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
2613 	{ X86_VENDOR_UMC,	4,
2614 	  { NULL, "U5D", "U5S", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2615 	    NULL, NULL, NULL, NULL, NULL, NULL }},
2616 	{ X86_VENDOR_NEXGEN,	5,
2617 	  { "Nx586", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2618 	    NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
2619 	{ X86_VENDOR_RISE,	5,
2620 	  { "iDragon", NULL, "iDragon", NULL, NULL, NULL, NULL,
2621 	    NULL, "iDragon II", "iDragon II", NULL, NULL, NULL, NULL, NULL, NULL }},
2622 	{ X86_VENDOR_SIS,	5,
2623 	  { NULL, NULL, NULL, NULL, "SiS55x", NULL, NULL,
2624 	    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
2625 };
2626 
2627 /* Look up CPU names by table lookup. */
table_lookup_model(struct cpuinfo_x86 * c)2628 static char __init *table_lookup_model(struct cpuinfo_x86 *c)
2629 {
2630 	struct cpu_model_info *info = cpu_models;
2631 	int i;
2632 
2633 	if ( c->x86_model >= 16 )
2634 		return NULL;	/* Range check */
2635 
2636 	for ( i = 0 ; i < sizeof(cpu_models)/sizeof(struct cpu_model_info) ; i++ ) {
2637 		if ( info->vendor == c->x86_vendor &&
2638 		     info->family == c->x86 ) {
2639 			return info->model_names[c->x86_model];
2640 		}
2641 		info++;
2642 	}
2643 	return NULL;		/* Not found */
2644 }
2645 
2646 /*
2647  *	Detect a NexGen CPU running without BIOS hypercode new enough
2648  *	to have CPUID. (Thanks to Herbert Oppmann)
2649  */
2650 
deep_magic_nexgen_probe(void)2651 static int __init deep_magic_nexgen_probe(void)
2652 {
2653 	int ret;
2654 
2655 	__asm__ __volatile__ (
2656 		"	movw	$0x5555, %%ax\n"
2657 		"	xorw	%%dx,%%dx\n"
2658 		"	movw	$2, %%cx\n"
2659 		"	divw	%%cx\n"
2660 		"	movl	$0, %%eax\n"
2661 		"	jnz	1f\n"
2662 		"	movl	$1, %%eax\n"
2663 		"1:\n"
2664 		: "=a" (ret) : : "cx", "dx" );
2665 	return  ret;
2666 }
2667 
squash_the_stupid_serial_number(struct cpuinfo_x86 * c)2668 static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
2669 {
2670 	if( test_bit(X86_FEATURE_PN, &c->x86_capability) &&
2671 	    disable_x86_serial_nr ) {
2672 		/* Disable processor serial number */
2673 		unsigned long lo,hi;
2674 		rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
2675 		lo |= 0x200000;
2676 		wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
2677 		printk(KERN_NOTICE "CPU serial number disabled.\n");
2678 		clear_bit(X86_FEATURE_PN, &c->x86_capability);
2679 
2680 		/* Disabling the serial number may affect the cpuid level */
2681 		c->cpuid_level = cpuid_eax(0);
2682 	}
2683 }
2684 
2685 
x86_serial_nr_setup(char * s)2686 static int __init x86_serial_nr_setup(char *s)
2687 {
2688 	disable_x86_serial_nr = 0;
2689 	return 1;
2690 }
2691 __setup("serialnumber", x86_serial_nr_setup);
2692 
x86_fxsr_setup(char * s)2693 static int __init x86_fxsr_setup(char * s)
2694 {
2695 	set_bit(X86_FEATURE_XMM, disabled_x86_caps);
2696 	set_bit(X86_FEATURE_FXSR, disabled_x86_caps);
2697 	return 1;
2698 }
2699 __setup("nofxsr", x86_fxsr_setup);
2700 
2701 
2702 /* Standard macro to see if a specific flag is changeable */
flag_is_changeable_p(u32 flag)2703 static inline int flag_is_changeable_p(u32 flag)
2704 {
2705 	u32 f1, f2;
2706 
2707 	asm("pushfl\n\t"
2708 	    "pushfl\n\t"
2709 	    "popl %0\n\t"
2710 	    "movl %0,%1\n\t"
2711 	    "xorl %2,%0\n\t"
2712 	    "pushl %0\n\t"
2713 	    "popfl\n\t"
2714 	    "pushfl\n\t"
2715 	    "popl %0\n\t"
2716 	    "popfl\n\t"
2717 	    : "=&r" (f1), "=&r" (f2)
2718 	    : "ir" (flag));
2719 
2720 	return ((f1^f2) & flag) != 0;
2721 }
2722 
2723 
2724 /* Probe for the CPUID instruction */
have_cpuid_p(void)2725 static int __init have_cpuid_p(void)
2726 {
2727 	return flag_is_changeable_p(X86_EFLAGS_ID);
2728 }
2729 
2730 /*
2731  * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
2732  * by the fact that they preserve the flags across the division of 5/2.
2733  * PII and PPro exhibit this behavior too, but they have cpuid available.
2734  */
2735 
2736 /*
2737  * Perform the Cyrix 5/2 test. A Cyrix won't change
2738  * the flags, while other 486 chips will.
2739  */
test_cyrix_52div(void)2740 static inline int test_cyrix_52div(void)
2741 {
2742 	unsigned int test;
2743 
2744 	__asm__ __volatile__(
2745 	     "sahf\n\t"		/* clear flags (%eax = 0x0005) */
2746 	     "div %b2\n\t"	/* divide 5 by 2 */
2747 	     "lahf"		/* store flags into %ah */
2748 	     : "=a" (test)
2749 	     : "0" (5), "q" (2)
2750 	     : "cc");
2751 
2752 	/* AH is 0x02 on Cyrix after the divide.. */
2753 	return (unsigned char) (test >> 8) == 0x02;
2754 }
2755 
2756 /* Try to detect a CPU with disabled CPUID, and if so, enable.  This routine
2757    may also be used to detect non-CPUID processors and fill in some of
2758    the information manually. */
id_and_try_enable_cpuid(struct cpuinfo_x86 * c)2759 static int __init id_and_try_enable_cpuid(struct cpuinfo_x86 *c)
2760 {
2761 	/* First of all, decide if this is a 486 or higher */
2762 	/* It's a 486 if we can modify the AC flag */
2763 	if ( flag_is_changeable_p(X86_EFLAGS_AC) )
2764 		c->x86 = 4;
2765 	else
2766 		c->x86 = 3;
2767 
2768 	/* Detect Cyrix with disabled CPUID */
2769 	if ( c->x86 == 4 && test_cyrix_52div() ) {
2770 		unsigned char dir0, dir1;
2771 
2772 		strcpy(c->x86_vendor_id, "CyrixInstead");
2773 	        c->x86_vendor = X86_VENDOR_CYRIX;
2774 
2775 	        /* Actually enable cpuid on the older cyrix */
2776 
2777 	    	/* Retrieve CPU revisions */
2778 
2779 		do_cyrix_devid(&dir0, &dir1);
2780 
2781 		dir0>>=4;
2782 
2783 		/* Check it is an affected model */
2784 
2785    	        if (dir0 == 5 || dir0 == 3)
2786    	        {
2787 			unsigned char ccr3, ccr4;
2788 			unsigned long flags;
2789 			printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
2790 			local_irq_save(flags);
2791 			ccr3 = getCx86(CX86_CCR3);
2792 			setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
2793 			ccr4 = getCx86(CX86_CCR4);
2794 			setCx86(CX86_CCR4, ccr4 | 0x80);          /* enable cpuid  */
2795 			setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
2796 			local_irq_restore(flags);
2797 		}
2798 	} else
2799 
2800 	/* Detect NexGen with old hypercode */
2801 	if ( deep_magic_nexgen_probe() ) {
2802 		strcpy(c->x86_vendor_id, "NexGenDriven");
2803 	}
2804 
2805 	return have_cpuid_p();	/* Check to see if CPUID now enabled? */
2806 }
2807 
2808 /*
2809  * This does the hard work of actually picking apart the CPU stuff...
2810  */
identify_cpu(struct cpuinfo_x86 * c)2811 void __init identify_cpu(struct cpuinfo_x86 *c)
2812 {
2813 	int junk, i;
2814 	u32 xlvl, tfms;
2815 
2816 	c->loops_per_jiffy = loops_per_jiffy;
2817 	c->x86_cache_size = -1;
2818 	c->x86_vendor = X86_VENDOR_UNKNOWN;
2819 	c->cpuid_level = -1;	/* CPUID not detected */
2820 	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
2821 	c->x86_vendor_id[0] = '\0'; /* Unset */
2822 	c->x86_model_id[0] = '\0';  /* Unset */
2823 	memset(&c->x86_capability, 0, sizeof c->x86_capability);
2824 
2825 	if ( !have_cpuid_p() && !id_and_try_enable_cpuid(c) ) {
2826 		/* CPU doesn't have CPUID */
2827 
2828 		/* If there are any capabilities, they're vendor-specific */
2829 		/* enable_cpuid() would have set c->x86 for us. */
2830 	} else {
2831 		/* CPU does have CPUID */
2832 
2833 		/* Get vendor name */
2834 		cpuid(0x00000000, &c->cpuid_level,
2835 		      (int *)&c->x86_vendor_id[0],
2836 		      (int *)&c->x86_vendor_id[8],
2837 		      (int *)&c->x86_vendor_id[4]);
2838 
2839 		get_cpu_vendor(c);
2840 		/* Initialize the standard set of capabilities */
2841 		/* Note that the vendor-specific code below might override */
2842 
2843 		/* Intel-defined flags: level 0x00000001 */
2844 		if ( c->cpuid_level >= 0x00000001 ) {
2845 			u32 capability, excap;
2846 			cpuid(0x00000001, &tfms, &junk, &excap, &capability);
2847 			c->x86_capability[0] = capability;
2848 			c->x86_capability[4] = excap;
2849 			c->x86 = (tfms >> 8) & 15;
2850 			c->x86_model = (tfms >> 4) & 15;
2851 			if (c->x86 == 0xf) {
2852 				c->x86 += (tfms >> 20) & 0xff;
2853 				c->x86_model += ((tfms >> 16) & 0xF) << 4;
2854 			}
2855 			c->x86_mask = tfms & 15;
2856 		} else {
2857 			/* Have CPUID level 0 only - unheard of */
2858 			c->x86 = 4;
2859 		}
2860 
2861 		/* AMD-defined flags: level 0x80000001 */
2862 		xlvl = cpuid_eax(0x80000000);
2863 		if ( (xlvl & 0xffff0000) == 0x80000000 ) {
2864 			if ( xlvl >= 0x80000001 )
2865 				c->x86_capability[1] = cpuid_edx(0x80000001);
2866 			if ( xlvl >= 0x80000004 )
2867 				get_model_name(c); /* Default name */
2868 		}
2869 
2870 		/* Transmeta-defined flags: level 0x80860001 */
2871 		xlvl = cpuid_eax(0x80860000);
2872 		if ( (xlvl & 0xffff0000) == 0x80860000 ) {
2873 			if (  xlvl >= 0x80860001 )
2874 				c->x86_capability[2] = cpuid_edx(0x80860001);
2875 		}
2876 	}
2877 
2878 	/*
2879 	 * Vendor-specific initialization.  In this section we
2880 	 * canonicalize the feature flags, meaning if there are
2881 	 * features a certain CPU supports which CPUID doesn't
2882 	 * tell us, CPUID claiming incorrect flags, or other bugs,
2883 	 * we handle them here.
2884 	 *
2885 	 * At the end of this section, c->x86_capability better
2886 	 * indicate the features this CPU genuinely supports!
2887 	 */
2888 	switch ( c->x86_vendor ) {
2889 	case X86_VENDOR_UNKNOWN:
2890 	default:
2891 		/* Not much we can do here... */
2892 		/* Check if at least it has cpuid */
2893 		if (c->cpuid_level == -1)
2894 		{
2895 			/* No cpuid. It must be an ancient CPU */
2896 			if (c->x86 == 4)
2897 				strcpy(c->x86_model_id, "486");
2898 			else if (c->x86 == 3)
2899 				strcpy(c->x86_model_id, "386");
2900 		}
2901 		break;
2902 
2903 	case X86_VENDOR_CYRIX:
2904 		init_cyrix(c);
2905 		break;
2906 
2907 	case X86_VENDOR_NSC:
2908 	        init_cyrix(c);
2909 		break;
2910 
2911 	case X86_VENDOR_AMD:
2912 		init_amd(c);
2913 		break;
2914 
2915 	case X86_VENDOR_CENTAUR:
2916 		init_centaur(c);
2917 		break;
2918 
2919 	case X86_VENDOR_INTEL:
2920 		init_intel(c);
2921 		break;
2922 
2923 	case X86_VENDOR_NEXGEN:
2924 		c->x86_cache_size = 256; /* A few had 1 MB... */
2925 		break;
2926 
2927 	case X86_VENDOR_TRANSMETA:
2928 		init_transmeta(c);
2929 		break;
2930 
2931 	case X86_VENDOR_RISE:
2932 		init_rise(c);
2933 		break;
2934 	}
2935 
2936 	/*
2937 	 * The vendor-specific functions might have changed features.  Now
2938 	 * we do "generic changes."
2939 	 */
2940 
2941 	/* TSC disabled? */
2942 #ifndef CONFIG_X86_TSC
2943 	if ( tsc_disable )
2944 		clear_bit(X86_FEATURE_TSC, &c->x86_capability);
2945 #endif
2946 
2947 	/* check for caps that have been disabled earlier */
2948 	for (i = 0; i < NCAPINTS; i++) {
2949 	     c->x86_capability[i] &= ~disabled_x86_caps[i];
2950 	}
2951 
2952 	/* Disable the PN if appropriate */
2953 	squash_the_stupid_serial_number(c);
2954 
2955 	/* Init Machine Check Exception if available. */
2956 	mcheck_init(c);
2957 
2958 	/* If the model name is still unset, do table lookup. */
2959 	if ( !c->x86_model_id[0] ) {
2960 		char *p;
2961 		p = table_lookup_model(c);
2962 		if ( p )
2963 			strcpy(c->x86_model_id, p);
2964 		else
2965 			/* Last resort... */
2966 			sprintf(c->x86_model_id, "%02x/%02x",
2967 				c->x86_vendor, c->x86_model);
2968 	}
2969 
2970 	/* Now the feature flags better reflect actual CPU features! */
2971 
2972 	printk(KERN_DEBUG "CPU:     After generic, caps: %08x %08x %08x %08x\n",
2973 	       c->x86_capability[0],
2974 	       c->x86_capability[1],
2975 	       c->x86_capability[2],
2976 	       c->x86_capability[3]);
2977 
2978 	/*
2979 	 * On SMP, boot_cpu_data holds the common feature set between
2980 	 * all CPUs; so make sure that we indicate which features are
2981 	 * common between the CPUs.  The first time this routine gets
2982 	 * executed, c == &boot_cpu_data.
2983 	 */
2984 	if ( c != &boot_cpu_data ) {
2985 		/* AND the already accumulated flags with these */
2986 		for ( i = 0 ; i < NCAPINTS ; i++ )
2987 			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
2988 	}
2989 
2990 	printk(KERN_DEBUG "CPU:             Common caps: %08x %08x %08x %08x\n",
2991 	       boot_cpu_data.x86_capability[0],
2992 	       boot_cpu_data.x86_capability[1],
2993 	       boot_cpu_data.x86_capability[2],
2994 	       boot_cpu_data.x86_capability[3]);
2995 }
2996 /*
2997  *	Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
2998  */
2999 
dodgy_tsc(void)3000 void __init dodgy_tsc(void)
3001 {
3002 	get_cpu_vendor(&boot_cpu_data);
3003 
3004 	if ( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ||
3005 	     boot_cpu_data.x86_vendor == X86_VENDOR_NSC )
3006 		init_cyrix(&boot_cpu_data);
3007 }
3008 
3009 
3010 /* These need to match <asm/processor.h> */
3011 static char *cpu_vendor_names[] __initdata = {
3012 	"Intel", "Cyrix", "AMD", "UMC", "NexGen",
3013 	"Centaur", "Rise", "Transmeta", "NSC"
3014 };
3015 
3016 
print_cpu_info(struct cpuinfo_x86 * c)3017 void __init print_cpu_info(struct cpuinfo_x86 *c)
3018 {
3019 	char *vendor = NULL;
3020 
3021 	if (c->x86_vendor < sizeof(cpu_vendor_names)/sizeof(char *))
3022 		vendor = cpu_vendor_names[c->x86_vendor];
3023 	else if (c->cpuid_level >= 0)
3024 		vendor = c->x86_vendor_id;
3025 
3026 	if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
3027 		printk("%s ", vendor);
3028 
3029 	if (!c->x86_model_id[0])
3030 		printk("%d86", c->x86);
3031 	else
3032 		printk("%s", c->x86_model_id);
3033 
3034 	if (c->x86_mask || c->cpuid_level >= 0)
3035 		printk(" stepping %02x\n", c->x86_mask);
3036 	else
3037 		printk("\n");
3038 }
3039 
3040 /*
3041  *	Get CPU information for use by the procfs.
3042  */
show_cpuinfo(struct seq_file * m,void * v)3043 static int show_cpuinfo(struct seq_file *m, void *v)
3044 {
3045 	/*
3046 	 * These flag bits must match the definitions in <asm/cpufeature.h>.
3047 	 * NULL means this bit is undefined or reserved; either way it doesn't
3048 	 * have meaning as far as Linux is concerned.  Note that it's important
3049 	 * to realize there is a difference between this table and CPUID -- if
3050 	 * applications want to get the raw CPUID data, they should access
3051 	 * /dev/cpu/<cpu_nr>/cpuid instead.
3052 	 */
3053 	static char *x86_cap_flags[] = {
3054 		/* Intel-defined */
3055 	        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
3056 	        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
3057 	        "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
3058 	        "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
3059 
3060 		/* AMD-defined */
3061 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3062 		NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
3063 		NULL, NULL, NULL, "mp", NULL, NULL, "mmxext", NULL,
3064 		NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
3065 
3066 		/* Transmeta-defined */
3067 		"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
3068 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3069 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3070 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3071 
3072 		/* Other (Linux-defined) */
3073 		"cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
3074 		NULL, NULL, NULL, NULL,
3075 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3076 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3077 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3078 
3079 		/* Intel-defined (#2) */
3080 		"pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "tm2",
3081 		"est", NULL, "cid", NULL, NULL, NULL, NULL, NULL,
3082 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3083 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3084 
3085 		/* VIA/Cyrix/Centaur-defined */
3086 		NULL, NULL, "xstore", NULL, NULL, NULL, NULL, NULL,
3087 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3088 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3089 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3090 	};
3091 	struct cpuinfo_x86 *c = v;
3092 	int i, n = c - cpu_data;
3093 	int fpu_exception;
3094 
3095 #ifdef CONFIG_SMP
3096 	if (!(cpu_online_map & (1<<n)))
3097 		return 0;
3098 #endif
3099 	seq_printf(m, "processor\t: %d\n"
3100 		"vendor_id\t: %s\n"
3101 		"cpu family\t: %d\n"
3102 		"model\t\t: %d\n"
3103 		"model name\t: %s\n",
3104 		n,
3105 		c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
3106 		c->x86,
3107 		c->x86_model,
3108 		c->x86_model_id[0] ? c->x86_model_id : "unknown");
3109 
3110 	if (c->x86_mask || c->cpuid_level >= 0)
3111 		seq_printf(m, "stepping\t: %d\n", c->x86_mask);
3112 	else
3113 		seq_printf(m, "stepping\t: unknown\n");
3114 
3115 	if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
3116 		seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n",
3117 			cpu_khz / 1000, (cpu_khz % 1000));
3118 	}
3119 
3120 	/* Cache size */
3121 	if (c->x86_cache_size >= 0)
3122 		seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
3123 
3124 	/* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
3125 	fpu_exception = c->hard_math && (ignore_irq13 || cpu_has_fpu);
3126 	seq_printf(m, "fdiv_bug\t: %s\n"
3127 			"hlt_bug\t\t: %s\n"
3128 			"f00f_bug\t: %s\n"
3129 			"coma_bug\t: %s\n"
3130 			"fpu\t\t: %s\n"
3131 			"fpu_exception\t: %s\n"
3132 			"cpuid level\t: %d\n"
3133 			"wp\t\t: %s\n"
3134 			"flags\t\t:",
3135 		     c->fdiv_bug ? "yes" : "no",
3136 		     c->hlt_works_ok ? "no" : "yes",
3137 		     c->f00f_bug ? "yes" : "no",
3138 		     c->coma_bug ? "yes" : "no",
3139 		     c->hard_math ? "yes" : "no",
3140 		     fpu_exception ? "yes" : "no",
3141 		     c->cpuid_level,
3142 		     c->wp_works_ok ? "yes" : "no");
3143 
3144 	for ( i = 0 ; i < 32*NCAPINTS ; i++ )
3145 		if ( test_bit(i, &c->x86_capability) &&
3146 		     x86_cap_flags[i] != NULL )
3147 			seq_printf(m, " %s", x86_cap_flags[i]);
3148 
3149 	seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
3150 		     c->loops_per_jiffy/(500000/HZ),
3151 		     (c->loops_per_jiffy/(5000/HZ)) % 100);
3152 	return 0;
3153 }
3154 
c_start(struct seq_file * m,loff_t * pos)3155 static void *c_start(struct seq_file *m, loff_t *pos)
3156 {
3157 	return *pos < NR_CPUS ? cpu_data + *pos : NULL;
3158 }
c_next(struct seq_file * m,void * v,loff_t * pos)3159 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
3160 {
3161 	++*pos;
3162 	return c_start(m, pos);
3163 }
c_stop(struct seq_file * m,void * v)3164 static void c_stop(struct seq_file *m, void *v)
3165 {
3166 }
3167 struct seq_operations cpuinfo_op = {
3168 	start:	c_start,
3169 	next:	c_next,
3170 	stop:	c_stop,
3171 	show:	show_cpuinfo,
3172 };
3173 
3174 unsigned long cpu_initialized __initdata = 0;
3175 
3176 /*
3177  * cpu_init() initializes state that is per-CPU. Some data is already
3178  * initialized (naturally) in the bootstrap process, such as the GDT
3179  * and IDT. We reload them nevertheless, this function acts as a
3180  * 'CPU state barrier', nothing should get across.
3181  */
cpu_init(void)3182 void __init cpu_init (void)
3183 {
3184 	int nr = smp_processor_id();
3185 	struct tss_struct * t = &init_tss[nr];
3186 
3187 	if (test_and_set_bit(nr, &cpu_initialized)) {
3188 		printk(KERN_WARNING "CPU#%d already initialized!\n", nr);
3189 		for (;;) __sti();
3190 	}
3191 	printk(KERN_INFO "Initializing CPU#%d\n", nr);
3192 
3193 	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
3194 		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
3195 #ifndef CONFIG_X86_TSC
3196 	if (tsc_disable && cpu_has_tsc) {
3197 		printk(KERN_NOTICE "Disabling TSC...\n");
3198 		/**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
3199 		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
3200 		set_in_cr4(X86_CR4_TSD);
3201 	}
3202 #endif
3203 
3204 	__asm__ __volatile__("lgdt %0": "=m" (gdt_descr));
3205 	__asm__ __volatile__("lidt %0": "=m" (idt_descr));
3206 
3207 	/*
3208 	 * Delete NT
3209 	 */
3210 	__asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
3211 
3212 	/*
3213 	 * set up and load the per-CPU TSS and LDT
3214 	 */
3215 	atomic_inc(&init_mm.mm_count);
3216 	current->active_mm = &init_mm;
3217 	if(current->mm)
3218 		BUG();
3219 	enter_lazy_tlb(&init_mm, current, nr);
3220 
3221 	t->esp0 = current->thread.esp0;
3222 	set_tss_desc(nr,t);
3223 	gdt_table[__TSS(nr)].b &= 0xfffffdff;
3224 	load_TR(nr);
3225 	load_LDT(&init_mm.context);
3226 
3227 	/*
3228 	 * Clear all 6 debug registers:
3229 	 */
3230 
3231 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
3232 
3233 	CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
3234 
3235 #undef CD
3236 
3237 	/*
3238 	 * Force FPU initialization:
3239 	 */
3240 	current->flags &= ~PF_USEDFPU;
3241 	current->used_math = 0;
3242 	stts();
3243 }
3244 
3245 /*
3246  *	Early probe support logic for ppro memory erratum #50
3247  *
3248  *	This is called before we do cpu ident work
3249  */
3250 
ppro_with_ram_bug(void)3251 int __init ppro_with_ram_bug(void)
3252 {
3253 	char vendor_id[16];
3254 	int ident;
3255 
3256 	/* Must have CPUID */
3257 	if(!have_cpuid_p())
3258 		return 0;
3259 	if(cpuid_eax(0)<1)
3260 		return 0;
3261 
3262 	/* Must be Intel */
3263 	cpuid(0, &ident,
3264 		(int *)&vendor_id[0],
3265 		(int *)&vendor_id[8],
3266 		(int *)&vendor_id[4]);
3267 
3268 	if(memcmp(vendor_id, "IntelInside", 12))
3269 		return 0;
3270 
3271 	ident = cpuid_eax(1);
3272 
3273 	/* Model 6 */
3274 
3275 	if(((ident>>8)&15)!=6)
3276 		return 0;
3277 
3278 	/* Pentium Pro */
3279 
3280 	if(((ident>>4)&15)!=1)
3281 		return 0;
3282 
3283 	if((ident&15) < 8)
3284 	{
3285 		printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
3286 		return 1;
3287 	}
3288 	printk(KERN_INFO "Your Pentium Pro seems ok.\n");
3289 	return 0;
3290 }
3291 
3292 /*
3293  * Local Variables:
3294  * mode:c
3295  * c-file-style:"k&r"
3296  * c-basic-offset:8
3297  * End:
3298  */
3299