1 /*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
8 */
9
10 /*
11 * This file handles the architecture-dependent parts of initialization
12 */
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/slab.h>
22 #include <linux/user.h>
23 #include <linux/a.out.h>
24 #include <linux/tty.h>
25 #include <linux/ioport.h>
26 #include <linux/delay.h>
27 #include <linux/config.h>
28 #include <linux/init.h>
29 #include <linux/acpi.h>
30 #include <linux/blk.h>
31 #include <linux/highmem.h>
32 #include <linux/bootmem.h>
33 #include <linux/module.h>
34 #include <asm/processor.h>
35 #include <linux/console.h>
36 #include <linux/seq_file.h>
37 #include <asm/mtrr.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
40 #include <asm/io.h>
41 #include <asm/smp.h>
42 #include <asm/msr.h>
43 #include <asm/desc.h>
44 #include <asm/e820.h>
45 #include <asm/dma.h>
46 #include <asm/mpspec.h>
47 #include <asm/mmu_context.h>
48 #include <asm/bootsetup.h>
49 #include <asm/proto.h>
50
51 int acpi_disabled;
52 EXPORT_SYMBOL(acpi_disabled);
53
54 int swiotlb;
55 EXPORT_SYMBOL(swiotlb);
56
57 extern int phys_proc_id[NR_CPUS];
58
59 /*
60 * Machine setup..
61 */
62
63 struct cpuinfo_x86 boot_cpu_data = {
64 cpuid_level: -1,
65 };
66
67 unsigned long mmu_cr4_features;
68 EXPORT_SYMBOL(mmu_cr4_features);
69
70 /* For PCI or other memory-mapped resources */
71 unsigned long pci_mem_start = 0x10000000;
72
73 /*
74 * Setup options
75 */
76 struct drive_info_struct { char dummy[32]; } drive_info;
77 struct screen_info screen_info;
78 struct sys_desc_table_struct {
79 unsigned short length;
80 unsigned char table[0];
81 };
82
83 struct e820map e820;
84
85 unsigned char aux_device_present;
86
87 extern int root_mountflags;
88 extern char _text, _etext, _edata, _end;
89
90 char command_line[COMMAND_LINE_SIZE];
91 char saved_command_line[COMMAND_LINE_SIZE];
92
93 struct resource standard_io_resources[] = {
94 { "dma1", 0x00, 0x1f, IORESOURCE_BUSY },
95 { "pic1", 0x20, 0x3f, IORESOURCE_BUSY },
96 { "timer0", 0x40, 0x43, IORESOURCE_BUSY },
97 { "timer1", 0x50, 0x53, IORESOURCE_BUSY },
98 { "keyboard", 0x60, 0x6f, IORESOURCE_BUSY },
99 { "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY },
100 { "pic2", 0xa0, 0xbf, IORESOURCE_BUSY },
101 { "dma2", 0xc0, 0xdf, IORESOURCE_BUSY },
102 { "fpu", 0xf0, 0xff, IORESOURCE_BUSY }
103 };
104
105 #define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
106
107 struct resource code_resource = { "Kernel code", 0x100000, 0 };
108 struct resource data_resource = { "Kernel data", 0, 0 };
109 struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY };
110
111
112 /* System ROM resources */
113 #define MAXROMS 6
114 static struct resource rom_resources[MAXROMS] = {
115 { "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
116 { "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_BUSY }
117 };
118
119 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
120
probe_roms(void)121 static void __init probe_roms(void)
122 {
123 int roms = 1;
124 unsigned long base;
125 unsigned char *romstart;
126
127 request_resource(&iomem_resource, rom_resources+0);
128
129 /* Video ROM is standard at C000:0000 - C7FF:0000, check signature */
130 for (base = 0xC0000; base < 0xE0000; base += 2048) {
131 romstart = bus_to_virt(base);
132 if (!romsignature(romstart))
133 continue;
134 request_resource(&iomem_resource, rom_resources + roms);
135 roms++;
136 break;
137 }
138
139 /* Extension roms at C800:0000 - DFFF:0000 */
140 for (base = 0xC8000; base < 0xE0000; base += 2048) {
141 unsigned long length;
142
143 romstart = bus_to_virt(base);
144 if (!romsignature(romstart))
145 continue;
146 length = romstart[2] * 512;
147 if (length) {
148 unsigned int i;
149 unsigned char chksum;
150
151 chksum = 0;
152 for (i = 0; i < length; i++)
153 chksum += romstart[i];
154
155 /* Good checksum? */
156 if (!chksum) {
157 rom_resources[roms].start = base;
158 rom_resources[roms].end = base + length - 1;
159 rom_resources[roms].name = "Extension ROM";
160 rom_resources[roms].flags = IORESOURCE_BUSY;
161
162 request_resource(&iomem_resource, rom_resources + roms);
163 roms++;
164 if (roms >= MAXROMS)
165 return;
166 }
167 }
168 }
169
170 /* Final check for motherboard extension rom at E000:0000 */
171 base = 0xE0000;
172 romstart = bus_to_virt(base);
173
174 if (romsignature(romstart)) {
175 rom_resources[roms].start = base;
176 rom_resources[roms].end = base + 65535;
177 rom_resources[roms].name = "Extension ROM";
178 rom_resources[roms].flags = IORESOURCE_BUSY;
179
180 request_resource(&iomem_resource, rom_resources + roms);
181 }
182 }
183
184 unsigned long start_pfn, end_pfn;
185 extern unsigned long table_start, table_end;
186
187 #ifndef CONFIG_DISCONTIGMEM
contig_initmem_init(void)188 static void __init contig_initmem_init(void)
189 {
190 unsigned long bootmap_size, bootmap;
191 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
192 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
193 if (bootmap == -1L)
194 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
195 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
196 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
197 reserve_bootmem(bootmap, bootmap_size);
198 }
199 #endif
200
setup_arch(char ** cmdline_p)201 void __init setup_arch(char **cmdline_p)
202 {
203 int i;
204 unsigned long kernel_end;
205
206 ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV);
207 drive_info = DRIVE_INFO;
208 screen_info = SCREEN_INFO;
209 aux_device_present = AUX_DEVICE_INFO;
210
211 #ifdef CONFIG_BLK_DEV_RAM
212 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
213 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
214 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
215 #endif
216 setup_memory_region();
217
218 if (!MOUNT_ROOT_RDONLY)
219 root_mountflags &= ~MS_RDONLY;
220 init_mm.start_code = (unsigned long) &_text;
221 init_mm.end_code = (unsigned long) &_etext;
222 init_mm.end_data = (unsigned long) &_edata;
223 init_mm.brk = (unsigned long) &_end;
224
225 code_resource.start = virt_to_bus(&_text);
226 code_resource.end = virt_to_bus(&_etext)-1;
227 data_resource.start = virt_to_bus(&_etext);
228 data_resource.end = virt_to_bus(&_edata)-1;
229
230 parse_mem_cmdline(cmdline_p);
231
232 e820_end_of_ram();
233
234 check_efer();
235
236 init_memory_mapping();
237
238 #ifdef CONFIG_BLK_DEV_INITRD
239 if (LOADER_TYPE && INITRD_START) {
240 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
241 initrd_start =
242 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
243 initrd_end = initrd_start+INITRD_SIZE;
244 }
245 else {
246 printk(KERN_ERR "initrd extends beyond end of memory "
247 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
248 (unsigned long)INITRD_START + INITRD_SIZE,
249 (unsigned long)(end_pfn << PAGE_SHIFT));
250 initrd_start = 0;
251 }
252 }
253 #endif
254
255 #ifdef CONFIG_DISCONTIGMEM
256 numa_initmem_init(0, end_pfn);
257 #else
258 contig_initmem_init();
259 #endif
260
261 /* Reserve direct mapping */
262 reserve_bootmem_generic(table_start << PAGE_SHIFT,
263 (table_end - table_start) << PAGE_SHIFT);
264
265 #ifdef CONFIG_BLK_DEV_INITRD
266 if (initrd_start)
267 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
268 #endif
269
270 /* Reserve BIOS data page. Some things still need it */
271 reserve_bootmem_generic(0, PAGE_SIZE);
272
273 #ifdef CONFIG_SMP
274 /*
275 * But first pinch a few for the stack/trampoline stuff
276 * FIXME: Don't need the extra page at 4K, but need to fix
277 * trampoline before removing it. (see the GDT stuff)
278 */
279 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
280
281 /* Reserve SMP trampoline */
282 reserve_bootmem_generic(0x6000, PAGE_SIZE);
283 #endif
284 /* Reserve Kernel */
285 kernel_end = round_up(__pa_symbol(&_end), PAGE_SIZE);
286 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
287
288 #ifdef CONFIG_ACPI_SLEEP
289 /*
290 * Reserve low memory region for sleep support.
291 */
292 acpi_reserve_bootmem();
293 #endif
294 #ifdef CONFIG_X86_LOCAL_APIC
295 /*
296 * Find and reserve possible boot-time SMP configuration:
297 */
298 find_smp_config();
299 #endif
300
301 #ifdef CONFIG_SMP
302 /* AP processor realmode stacks in low memory*/
303 smp_alloc_memory();
304 #endif
305
306 paging_init();
307 #if defined(CONFIG_X86_IO_APIC)
308 extern void check_ioapic(void);
309 check_ioapic();
310 #endif
311
312 #ifdef CONFIG_ACPI_BOOT
313 /*
314 * Parse the ACPI tables for possible boot-time SMP configuration.
315 */
316 acpi_boot_init();
317 #endif
318 #ifdef CONFIG_X86_LOCAL_APIC
319 /*
320 * get boot-time SMP configuration:
321 */
322 if (smp_found_config)
323 get_smp_config();
324 init_apic_mappings();
325 #endif
326
327 /*
328 * Request address space for all standard RAM and ROM resources
329 * and also for regions reported as reserved by the e820.
330 */
331 probe_roms();
332 e820_reserve_resources();
333 request_resource(&iomem_resource, &vram_resource);
334
335 /* request I/O space for devices used on all i[345]86 PCs */
336 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
337 request_resource(&ioport_resource, standard_io_resources+i);
338
339 /* We put PCI memory up to make sure VALID_PAGE with DISCONTIGMEM
340 never returns true for it */
341
342 /* Tell the PCI layer not to allocate too close to the RAM area.. */
343 pci_mem_start = IOMAP_START;
344
345 #ifdef CONFIG_GART_IOMMU
346 iommu_hole_init();
347 #endif
348 #ifdef CONFIG_SWIOTLB
349 if (!iommu_aperture && end_pfn >= 0xffffffff>>PAGE_SHIFT) {
350 swiotlb_init();
351 swiotlb = 1;
352 }
353 #endif
354
355 #ifdef CONFIG_VT
356 #if defined(CONFIG_VGA_CONSOLE)
357 conswitchp = &vga_con;
358 #elif defined(CONFIG_DUMMY_CONSOLE)
359 conswitchp = &dummy_con;
360 #endif
361 #endif
362
363 num_mappedpages = end_pfn;
364 }
365
get_model_name(struct cpuinfo_x86 * c)366 static int __init get_model_name(struct cpuinfo_x86 *c)
367 {
368 unsigned int *v;
369
370 if (cpuid_eax(0x80000000) < 0x80000004)
371 return 0;
372
373 v = (unsigned int *) c->x86_model_id;
374 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
375 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
376 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
377 c->x86_model_id[48] = 0;
378 return 1;
379 }
380
381
display_cacheinfo(struct cpuinfo_x86 * c)382 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
383 {
384 unsigned int n, dummy, ecx, edx, eax, ebx, eax_2, ebx_2, ecx_2;
385
386 n = cpuid_eax(0x80000000);
387
388 if (n >= 0x80000005) {
389 if (n >= 0x80000006)
390 cpuid(0x80000006, &eax_2, &ebx_2, &ecx_2, &dummy);
391
392 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
393 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line/%d way), D cache %dK (%d bytes/line/%d way)\n",
394 edx>>24, edx&0xFF, (edx>>16)&0xff,
395 ecx>>24, ecx&0xFF, (ecx>>16)&0xff);
396 c->x86_cache_size=(ecx>>24)+(edx>>24);
397 if (n >= 0x80000006) {
398 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line/%d way)\n",
399 ecx_2>>16, ecx_2&0xFF,
400 /* use bits[15:13] as power of 2 for # of ways */
401 1 << ((ecx>>13) & 0x7)
402 /* Direct and Full associative L2 are very unlikely */);
403 c->x86_cache_size = ecx_2 >> 16;
404 c->x86_tlbsize = ((ebx>>16)&0xff) + ((ebx_2>>16)&0xfff) +
405 (ebx&0xff) + ((ebx_2)&0xfff);
406 }
407 if (n >= 0x80000007)
408 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
409 if (n >= 0x80000008) {
410 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
411 c->x86_virt_bits = (eax >> 8) & 0xff;
412 c->x86_phys_bits = eax & 0xff;
413 }
414 }
415 }
416
417 #define LVL_1_INST 1
418 #define LVL_1_DATA 2
419 #define LVL_2 3
420 #define LVL_3 4
421 #define LVL_TRACE 5
422
423 struct _cache_table
424 {
425 unsigned char descriptor;
426 char cache_type;
427 short size;
428 };
429
430 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
431 static struct _cache_table cache_table[] __initdata =
432 {
433 { 0x06, LVL_1_INST, 8 },
434 { 0x08, LVL_1_INST, 16 },
435 { 0x0A, LVL_1_DATA, 8 },
436 { 0x0C, LVL_1_DATA, 16 },
437 { 0x22, LVL_3, 512 },
438 { 0x23, LVL_3, 1024 },
439 { 0x25, LVL_3, 2048 },
440 { 0x29, LVL_3, 4096 },
441 { 0x39, LVL_2, 128 },
442 { 0x3C, LVL_2, 256 },
443 { 0x41, LVL_2, 128 },
444 { 0x42, LVL_2, 256 },
445 { 0x43, LVL_2, 512 },
446 { 0x44, LVL_2, 1024 },
447 { 0x45, LVL_2, 2048 },
448 { 0x66, LVL_1_DATA, 8 },
449 { 0x67, LVL_1_DATA, 16 },
450 { 0x68, LVL_1_DATA, 32 },
451 { 0x70, LVL_TRACE, 12 },
452 { 0x71, LVL_TRACE, 16 },
453 { 0x72, LVL_TRACE, 32 },
454 { 0x79, LVL_2, 128 },
455 { 0x7A, LVL_2, 256 },
456 { 0x7B, LVL_2, 512 },
457 { 0x7C, LVL_2, 1024 },
458 { 0x82, LVL_2, 256 },
459 { 0x83, LVL_2, 512 },
460 { 0x84, LVL_2, 1024 },
461 { 0x85, LVL_2, 2048 },
462 { 0x00, 0, 0}
463 };
464
465 int select_idle_routine(struct cpuinfo_x86 *c);
466
init_intel(struct cpuinfo_x86 * c)467 static void __init init_intel(struct cpuinfo_x86 *c)
468 {
469 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
470 char *p = NULL;
471 u32 eax, dummy;
472
473 unsigned int n;
474
475
476 select_idle_routine(c);
477 if (c->cpuid_level > 1) {
478 /* supports eax=2 call */
479 int i, j, n;
480 int regs[4];
481 unsigned char *dp = (unsigned char *)regs;
482
483 /* Number of times to iterate */
484 n = cpuid_eax(2) & 0xFF;
485
486 for ( i = 0 ; i < n ; i++ ) {
487 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
488
489 /* If bit 31 is set, this is an unknown format */
490 for ( j = 0 ; j < 3 ; j++ ) {
491 if ( regs[j] < 0 ) regs[j] = 0;
492 }
493
494 /* Byte 0 is level count, not a descriptor */
495 for ( j = 1 ; j < 16 ; j++ ) {
496 unsigned char des = dp[j];
497 unsigned char k = 0;
498
499 /* look up this descriptor in the table */
500 while (cache_table[k].descriptor != 0)
501 {
502 if (cache_table[k].descriptor == des) {
503 switch (cache_table[k].cache_type) {
504 case LVL_1_INST:
505 l1i += cache_table[k].size;
506 break;
507 case LVL_1_DATA:
508 l1d += cache_table[k].size;
509 break;
510 case LVL_2:
511 l2 += cache_table[k].size;
512 break;
513 case LVL_3:
514 l3 += cache_table[k].size;
515 break;
516 case LVL_TRACE:
517 trace += cache_table[k].size;
518 break;
519 }
520 break;
521 }
522
523 k++;
524 }
525 }
526 }
527
528 if ( trace )
529 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
530 else if ( l1i )
531 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
532 if ( l1d )
533 printk(", L1 D cache: %dK\n", l1d);
534 else
535 printk("\n");
536 if ( l2 )
537 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
538 if ( l3 )
539 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
540
541 /*
542 * This assumes the L3 cache is shared; it typically lives in
543 * the northbridge. The L1 caches are included by the L2
544 * cache, and so should not be included for the purpose of
545 * SMP switching weights.
546 */
547 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
548 }
549
550 if ( p )
551 strcpy(c->x86_model_id, p);
552
553 #ifdef CONFIG_SMP
554 if (test_bit(X86_FEATURE_HT, &c->x86_capability)) {
555 int index_lsb, index_msb, tmp;
556 int initial_apic_id;
557 int cpu = smp_processor_id();
558 u32 ebx, ecx, edx;
559
560 cpuid(1, &eax, &ebx, &ecx, &edx);
561 smp_num_siblings = (ebx & 0xff0000) >> 16;
562
563 if (smp_num_siblings == 1) {
564 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
565 } else if (smp_num_siblings > 1 ) {
566 index_lsb = 0;
567 index_msb = 31;
568 /*
569 * At this point we only support two siblings per
570 * processor package.
571 */
572 #define NR_SIBLINGS 2
573 if (smp_num_siblings != NR_SIBLINGS) {
574 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
575 smp_num_siblings = 1;
576 return;
577 }
578 tmp = smp_num_siblings;
579 while ((tmp & 1) == 0) {
580 tmp >>=1 ;
581 index_lsb++;
582 }
583 tmp = smp_num_siblings;
584 while ((tmp & 0x80000000 ) == 0) {
585 tmp <<=1 ;
586 index_msb--;
587 }
588 if (index_lsb != index_msb )
589 index_msb++;
590 initial_apic_id = ebx >> 24 & 0xff;
591 phys_proc_id[cpu] = initial_apic_id >> index_msb;
592
593 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
594 phys_proc_id[cpu]);
595 }
596
597 }
598 #endif
599
600 n = cpuid_eax(0x80000000);
601 if (n >= 0x80000008) {
602 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
603 c->x86_virt_bits = (eax >> 8) & 0xff;
604 c->x86_phys_bits = eax & 0xff;
605 }
606
607 }
608
init_amd(struct cpuinfo_x86 * c)609 static int __init init_amd(struct cpuinfo_x86 *c)
610 {
611 int r;
612
613 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
614 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
615 clear_bit(0*32+31, &c->x86_capability);
616
617 r = get_model_name(c);
618 if (!r) {
619 switch (c->x86) {
620 case 15:
621 /* Should distingush Models here, but this is only
622 a fallback anyways. */
623 strcpy(c->x86_model_id, "Hammer");
624 break;
625 }
626 }
627 display_cacheinfo(c);
628 return r;
629 }
630
631
get_cpu_vendor(struct cpuinfo_x86 * c)632 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
633 {
634 char *v = c->x86_vendor_id;
635
636 if (!strcmp(v, "AuthenticAMD"))
637 c->x86_vendor = X86_VENDOR_AMD;
638 else if (!strcmp(v, "GenuineIntel"))
639 c->x86_vendor = X86_VENDOR_INTEL;
640 else
641 c->x86_vendor = X86_VENDOR_UNKNOWN;
642 }
643
644 struct cpu_model_info {
645 int vendor;
646 int family;
647 char *model_names[16];
648 };
649
650 /*
651 * This does the hard work of actually picking apart the CPU stuff...
652 */
identify_cpu(struct cpuinfo_x86 * c)653 void __init identify_cpu(struct cpuinfo_x86 *c)
654 {
655 int i;
656 u32 xlvl, tfms;
657
658 c->loops_per_jiffy = loops_per_jiffy;
659 c->x86_cache_size = -1;
660 c->x86_vendor = X86_VENDOR_UNKNOWN;
661 c->x86_model = c->x86_mask = 0; /* So far unknown... */
662 c->x86_vendor_id[0] = '\0'; /* Unset */
663 c->x86_model_id[0] = '\0'; /* Unset */
664 memset(&c->x86_capability, 0, sizeof c->x86_capability);
665
666 /* Get vendor name */
667 cpuid(0x00000000, &c->cpuid_level,
668 (int *)&c->x86_vendor_id[0],
669 (int *)&c->x86_vendor_id[8],
670 (int *)&c->x86_vendor_id[4]);
671
672 get_cpu_vendor(c);
673 /* Initialize the standard set of capabilities */
674 /* Note that the vendor-specific code below might override */
675
676 /* Intel-defined flags: level 0x00000001 */
677 if ( c->cpuid_level >= 0x00000001 ) {
678 __u32 misc;
679 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
680 &c->x86_capability[0]);
681 c->x86 = (tfms >> 8) & 15;
682 c->x86_model = (tfms >> 4) & 15;
683 if (c->x86 == 0xf) { /* extended */
684 c->x86 += (tfms >> 20) & 0xff;
685 c->x86_model += ((tfms >> 16) & 0xF) << 4;
686 }
687 c->x86_mask = tfms & 15;
688 if (c->x86_capability[0] & (1<<19))
689 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
690 } else {
691 /* Have CPUID level 0 only - unheard of */
692 c->x86 = 4;
693 }
694
695 /* AMD-defined flags: level 0x80000001 */
696 xlvl = cpuid_eax(0x80000000);
697 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
698 if ( xlvl >= 0x80000001 )
699 c->x86_capability[1] = cpuid_edx(0x80000001);
700 if ( xlvl >= 0x80000004 )
701 get_model_name(c); /* Default name */
702 }
703
704 /* Transmeta-defined flags: level 0x80860001 */
705 xlvl = cpuid_eax(0x80860000);
706 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
707 if ( xlvl >= 0x80860001 )
708 c->x86_capability[2] = cpuid_edx(0x80860001);
709 }
710
711
712 /*
713 * Vendor-specific initialization. In this section we
714 * canonicalize the feature flags, meaning if there are
715 * features a certain CPU supports which CPUID doesn't
716 * tell us, CPUID claiming incorrect flags, or other bugs,
717 * we handle them here.
718 *
719 * At the end of this section, c->x86_capability better
720 * indicate the features this CPU genuinely supports!
721 */
722 switch ( c->x86_vendor ) {
723
724 case X86_VENDOR_AMD:
725 init_amd(c);
726 break;
727
728 case X86_VENDOR_INTEL:
729 init_intel(c);
730 break;
731 case X86_VENDOR_UNKNOWN:
732 default:
733 display_cacheinfo(c);
734 break;
735 }
736
737 /*
738 * The vendor-specific functions might have changed features. Now
739 * we do "generic changes."
740 */
741
742 /*
743 * On SMP, boot_cpu_data holds the common feature set between
744 * all CPUs; so make sure that we indicate which features are
745 * common between the CPUs. The first time this routine gets
746 * executed, c == &boot_cpu_data.
747 */
748 if ( c != &boot_cpu_data ) {
749 /* AND the already accumulated flags with these */
750 for ( i = 0 ; i < NCAPINTS ; i++ )
751 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
752 }
753
754 #ifdef CONFIG_MCE
755 mcheck_init(c);
756 #endif
757 }
758
print_cpu_info(struct cpuinfo_x86 * c)759 void __init print_cpu_info(struct cpuinfo_x86 *c)
760 {
761 if (c->x86_model_id[0])
762 printk("%s", c->x86_model_id);
763
764 if (c->x86_mask || c->cpuid_level >= 0)
765 printk(" stepping %02x\n", c->x86_mask);
766 else
767 printk("\n");
768 }
769
770 /*
771 * Get CPU information for use by the procfs.
772 */
773
show_cpuinfo(struct seq_file * m,void * v)774 static int show_cpuinfo(struct seq_file *m, void *v)
775 {
776 struct cpuinfo_x86 *c = v;
777
778 /*
779 * These flag bits must match the definitions in <asm/cpufeature.h>.
780 * NULL means this bit is undefined or reserved; either way it doesn't
781 * have meaning as far as Linux is concerned. Note that it's important
782 * to realize there is a difference between this table and CPUID -- if
783 * applications want to get the raw CPUID data, they should access
784 * /dev/cpu/<cpu_nr>/cpuid instead.
785 */
786 static char *x86_cap_flags[] = {
787 /* Intel-defined */
788 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
789 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
790 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
791 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
792
793 /* AMD-defined */
794 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
795 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
796 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
797 NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
798
799 /* Transmeta-defined */
800 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
801 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
802 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
803 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
804
805 /* Other (Linux-defined) */
806 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
807 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
808 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
809 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
810
811 /* Intel Defined (cpuid 1 and ecx) */
812 "pni", NULL, NULL, "monitor", "ds-cpl", NULL, NULL, "est",
813 "tm2", NULL, "cid", NULL, NULL, "cmpxchg16b", NULL, NULL,
814 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
815 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
816 };
817 static char *x86_power_flags[] = {
818 "ts", /* temperature sensor */
819 "fid", /* frequency id control */
820 "vid", /* voltage id control */
821 "ttp", /* thermal trip */
822 };
823
824 #ifdef CONFIG_SMP
825 if (!(cpu_online_map & (1<<(c-cpu_data))))
826 return 0;
827 #endif
828
829 seq_printf(m,"processor\t: %u\n"
830 "vendor_id\t: %s\n"
831 "cpu family\t: %d\n"
832 "model\t\t: %d\n"
833 "model name\t: %s\n",
834 (unsigned)(c-cpu_data),
835 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
836 c->x86,
837 (int)c->x86_model,
838 c->x86_model_id[0] ? c->x86_model_id : "unknown");
839
840 if (c->x86_mask || c->cpuid_level >= 0)
841 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
842 else
843 seq_printf(m, "stepping\t: unknown\n");
844
845 if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
846 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
847 cpu_khz / 1000, (cpu_khz % 1000));
848 }
849
850 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
851
852 #ifdef CONFIG_SMP
853 seq_printf(m, "physical id\t: %d\n",phys_proc_id[c - cpu_data]);
854 seq_printf(m, "siblings\t: %d\n",smp_num_siblings);
855 #endif
856
857 seq_printf(m,
858 "fpu\t\t: yes\n"
859 "fpu_exception\t: yes\n"
860 "cpuid level\t: %d\n"
861 "wp\t\t: yes\n"
862 "flags\t\t:",
863 c->cpuid_level);
864
865 {
866 int i;
867 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
868 if ( test_bit(i, &c->x86_capability) &&
869 x86_cap_flags[i] != NULL )
870 seq_printf(m, " %s", x86_cap_flags[i]);
871 }
872
873 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
874 c->loops_per_jiffy/(500000/HZ),
875 (c->loops_per_jiffy/(5000/HZ)) % 100);
876
877 if (c->x86_tlbsize > 0)
878 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
879 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
880
881 if (c->x86_phys_bits > 0)
882 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
883 c->x86_phys_bits, c->x86_virt_bits);
884
885 seq_printf(m, "power management:");
886 {
887 int i;
888 for (i = 0; i < 32; i++)
889 if (c->x86_power & (1 << i)) {
890 if (i < ARRAY_SIZE(x86_power_flags))
891 seq_printf(m, " %s", x86_power_flags[i]);
892 else
893 seq_printf(m, " [%d]", i);
894 }
895 }
896
897 seq_printf(m, "\n\n");
898 return 0;
899 }
900
c_start(struct seq_file * m,loff_t * pos)901 static void *c_start(struct seq_file *m, loff_t *pos)
902 {
903 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
904 }
905
c_next(struct seq_file * m,void * v,loff_t * pos)906 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
907 {
908 ++*pos;
909 return c_start(m, pos);
910 }
911
c_stop(struct seq_file * m,void * v)912 static void c_stop(struct seq_file *m, void *v)
913 {
914 }
915
916 struct seq_operations cpuinfo_op = {
917 start: c_start,
918 next: c_next,
919 stop: c_stop,
920 show: show_cpuinfo,
921 };
922