1 /*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/fs.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34
35 #include <asm/unified.h>
36 #include <asm/cp15.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/elf.h>
40 #include <asm/procinfo.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
48
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
58
59 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
60 #include "compat.h"
61 #endif
62 #include "atags.h"
63 #include "tcm.h"
64
65 #ifndef MEM_SIZE
66 #define MEM_SIZE (16*1024*1024)
67 #endif
68
69 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70 char fpe_type[8];
71
fpe_setup(char * line)72 static int __init fpe_setup(char *line)
73 {
74 memcpy(fpe_type, line, 8);
75 return 1;
76 }
77
78 __setup("fpe=", fpe_setup);
79 #endif
80
81 extern void paging_init(struct machine_desc *desc);
82 extern void sanity_check_meminfo(void);
83 extern void reboot_setup(char *str);
84
85 unsigned int processor_id;
86 EXPORT_SYMBOL(processor_id);
87 unsigned int __machine_arch_type __read_mostly;
88 EXPORT_SYMBOL(__machine_arch_type);
89 unsigned int cacheid __read_mostly;
90 EXPORT_SYMBOL(cacheid);
91
92 unsigned int __atags_pointer __initdata;
93
94 unsigned int system_rev;
95 EXPORT_SYMBOL(system_rev);
96
97 unsigned int system_serial_low;
98 EXPORT_SYMBOL(system_serial_low);
99
100 unsigned int system_serial_high;
101 EXPORT_SYMBOL(system_serial_high);
102
103 unsigned int elf_hwcap __read_mostly;
104 EXPORT_SYMBOL(elf_hwcap);
105
106
107 #ifdef MULTI_CPU
108 struct processor processor __read_mostly;
109 #endif
110 #ifdef MULTI_TLB
111 struct cpu_tlb_fns cpu_tlb __read_mostly;
112 #endif
113 #ifdef MULTI_USER
114 struct cpu_user_fns cpu_user __read_mostly;
115 #endif
116 #ifdef MULTI_CACHE
117 struct cpu_cache_fns cpu_cache __read_mostly;
118 #endif
119 #ifdef CONFIG_OUTER_CACHE
120 struct outer_cache_fns outer_cache __read_mostly;
121 EXPORT_SYMBOL(outer_cache);
122 #endif
123
124 /*
125 * Cached cpu_architecture() result for use by assembler code.
126 * C code should use the cpu_architecture() function instead of accessing this
127 * variable directly.
128 */
129 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
130
131 struct stack {
132 u32 irq[3];
133 u32 abt[3];
134 u32 und[3];
135 } ____cacheline_aligned;
136
137 static struct stack stacks[NR_CPUS];
138
139 char elf_platform[ELF_PLATFORM_SIZE];
140 EXPORT_SYMBOL(elf_platform);
141
142 static const char *cpu_name;
143 static const char *machine_name;
144 static char __initdata cmd_line[COMMAND_LINE_SIZE];
145 struct machine_desc *machine_desc __initdata;
146
147 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
148 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
149 #define ENDIANNESS ((char)endian_test.l)
150
151 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
152
153 /*
154 * Standard memory resources
155 */
156 static struct resource mem_res[] = {
157 {
158 .name = "Video RAM",
159 .start = 0,
160 .end = 0,
161 .flags = IORESOURCE_MEM
162 },
163 {
164 .name = "Kernel code",
165 .start = 0,
166 .end = 0,
167 .flags = IORESOURCE_MEM
168 },
169 {
170 .name = "Kernel data",
171 .start = 0,
172 .end = 0,
173 .flags = IORESOURCE_MEM
174 }
175 };
176
177 #define video_ram mem_res[0]
178 #define kernel_code mem_res[1]
179 #define kernel_data mem_res[2]
180
181 static struct resource io_res[] = {
182 {
183 .name = "reserved",
184 .start = 0x3bc,
185 .end = 0x3be,
186 .flags = IORESOURCE_IO | IORESOURCE_BUSY
187 },
188 {
189 .name = "reserved",
190 .start = 0x378,
191 .end = 0x37f,
192 .flags = IORESOURCE_IO | IORESOURCE_BUSY
193 },
194 {
195 .name = "reserved",
196 .start = 0x278,
197 .end = 0x27f,
198 .flags = IORESOURCE_IO | IORESOURCE_BUSY
199 }
200 };
201
202 #define lp0 io_res[0]
203 #define lp1 io_res[1]
204 #define lp2 io_res[2]
205
206 static const char *proc_arch[] = {
207 "undefined/unknown",
208 "3",
209 "4",
210 "4T",
211 "5",
212 "5T",
213 "5TE",
214 "5TEJ",
215 "6TEJ",
216 "7",
217 "?(11)",
218 "?(12)",
219 "?(13)",
220 "?(14)",
221 "?(15)",
222 "?(16)",
223 "?(17)",
224 };
225
__get_cpu_architecture(void)226 static int __get_cpu_architecture(void)
227 {
228 int cpu_arch;
229
230 if ((read_cpuid_id() & 0x0008f000) == 0) {
231 cpu_arch = CPU_ARCH_UNKNOWN;
232 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
233 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
234 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
235 cpu_arch = (read_cpuid_id() >> 16) & 7;
236 if (cpu_arch)
237 cpu_arch += CPU_ARCH_ARMv3;
238 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
239 unsigned int mmfr0;
240
241 /* Revised CPUID format. Read the Memory Model Feature
242 * Register 0 and check for VMSAv7 or PMSAv7 */
243 asm("mrc p15, 0, %0, c0, c1, 4"
244 : "=r" (mmfr0));
245 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
246 (mmfr0 & 0x000000f0) >= 0x00000030)
247 cpu_arch = CPU_ARCH_ARMv7;
248 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
249 (mmfr0 & 0x000000f0) == 0x00000020)
250 cpu_arch = CPU_ARCH_ARMv6;
251 else
252 cpu_arch = CPU_ARCH_UNKNOWN;
253 } else
254 cpu_arch = CPU_ARCH_UNKNOWN;
255
256 return cpu_arch;
257 }
258
cpu_architecture(void)259 int __pure cpu_architecture(void)
260 {
261 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
262
263 return __cpu_architecture;
264 }
265
cpu_has_aliasing_icache(unsigned int arch)266 static int cpu_has_aliasing_icache(unsigned int arch)
267 {
268 int aliasing_icache;
269 unsigned int id_reg, num_sets, line_size;
270
271 /* PIPT caches never alias. */
272 if (icache_is_pipt())
273 return 0;
274
275 /* arch specifies the register format */
276 switch (arch) {
277 case CPU_ARCH_ARMv7:
278 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
279 : /* No output operands */
280 : "r" (1));
281 isb();
282 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
283 : "=r" (id_reg));
284 line_size = 4 << ((id_reg & 0x7) + 2);
285 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
286 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
287 break;
288 case CPU_ARCH_ARMv6:
289 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
290 break;
291 default:
292 /* I-cache aliases will be handled by D-cache aliasing code */
293 aliasing_icache = 0;
294 }
295
296 return aliasing_icache;
297 }
298
cacheid_init(void)299 static void __init cacheid_init(void)
300 {
301 unsigned int cachetype = read_cpuid_cachetype();
302 unsigned int arch = cpu_architecture();
303
304 if (arch >= CPU_ARCH_ARMv6) {
305 if ((cachetype & (7 << 29)) == 4 << 29) {
306 /* ARMv7 register format */
307 arch = CPU_ARCH_ARMv7;
308 cacheid = CACHEID_VIPT_NONALIASING;
309 switch (cachetype & (3 << 14)) {
310 case (1 << 14):
311 cacheid |= CACHEID_ASID_TAGGED;
312 break;
313 case (3 << 14):
314 cacheid |= CACHEID_PIPT;
315 break;
316 }
317 } else {
318 arch = CPU_ARCH_ARMv6;
319 if (cachetype & (1 << 23))
320 cacheid = CACHEID_VIPT_ALIASING;
321 else
322 cacheid = CACHEID_VIPT_NONALIASING;
323 }
324 if (cpu_has_aliasing_icache(arch))
325 cacheid |= CACHEID_VIPT_I_ALIASING;
326 } else {
327 cacheid = CACHEID_VIVT;
328 }
329
330 printk("CPU: %s data cache, %s instruction cache\n",
331 cache_is_vivt() ? "VIVT" :
332 cache_is_vipt_aliasing() ? "VIPT aliasing" :
333 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
334 cache_is_vivt() ? "VIVT" :
335 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
336 icache_is_vipt_aliasing() ? "VIPT aliasing" :
337 icache_is_pipt() ? "PIPT" :
338 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
339 }
340
341 /*
342 * These functions re-use the assembly code in head.S, which
343 * already provide the required functionality.
344 */
345 extern struct proc_info_list *lookup_processor_type(unsigned int);
346
early_print(const char * str,...)347 void __init early_print(const char *str, ...)
348 {
349 extern void printascii(const char *);
350 char buf[256];
351 va_list ap;
352
353 va_start(ap, str);
354 vsnprintf(buf, sizeof(buf), str, ap);
355 va_end(ap);
356
357 #ifdef CONFIG_DEBUG_LL
358 printascii(buf);
359 #endif
360 printk("%s", buf);
361 }
362
feat_v6_fixup(void)363 static void __init feat_v6_fixup(void)
364 {
365 int id = read_cpuid_id();
366
367 if ((id & 0xff0f0000) != 0x41070000)
368 return;
369
370 /*
371 * HWCAP_TLS is available only on 1136 r1p0 and later,
372 * see also kuser_get_tls_init.
373 */
374 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
375 elf_hwcap &= ~HWCAP_TLS;
376 }
377
378 /*
379 * cpu_init - initialise one CPU.
380 *
381 * cpu_init sets up the per-CPU stacks.
382 */
cpu_init(void)383 void cpu_init(void)
384 {
385 unsigned int cpu = smp_processor_id();
386 struct stack *stk = &stacks[cpu];
387
388 if (cpu >= NR_CPUS) {
389 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
390 BUG();
391 }
392
393 cpu_proc_init();
394
395 /*
396 * Define the placement constraint for the inline asm directive below.
397 * In Thumb-2, msr with an immediate value is not allowed.
398 */
399 #ifdef CONFIG_THUMB2_KERNEL
400 #define PLC "r"
401 #else
402 #define PLC "I"
403 #endif
404
405 /*
406 * setup stacks for re-entrant exception handlers
407 */
408 __asm__ (
409 "msr cpsr_c, %1\n\t"
410 "add r14, %0, %2\n\t"
411 "mov sp, r14\n\t"
412 "msr cpsr_c, %3\n\t"
413 "add r14, %0, %4\n\t"
414 "mov sp, r14\n\t"
415 "msr cpsr_c, %5\n\t"
416 "add r14, %0, %6\n\t"
417 "mov sp, r14\n\t"
418 "msr cpsr_c, %7"
419 :
420 : "r" (stk),
421 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
422 "I" (offsetof(struct stack, irq[0])),
423 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
424 "I" (offsetof(struct stack, abt[0])),
425 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
426 "I" (offsetof(struct stack, und[0])),
427 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
428 : "r14");
429 }
430
431 int __cpu_logical_map[NR_CPUS];
432
smp_setup_processor_id(void)433 void __init smp_setup_processor_id(void)
434 {
435 int i;
436 u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
437
438 cpu_logical_map(0) = cpu;
439 for (i = 1; i < NR_CPUS; ++i)
440 cpu_logical_map(i) = i == cpu ? 0 : i;
441
442 printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
443 }
444
setup_processor(void)445 static void __init setup_processor(void)
446 {
447 struct proc_info_list *list;
448
449 /*
450 * locate processor in the list of supported processor
451 * types. The linker builds this table for us from the
452 * entries in arch/arm/mm/proc-*.S
453 */
454 list = lookup_processor_type(read_cpuid_id());
455 if (!list) {
456 printk("CPU configuration botched (ID %08x), unable "
457 "to continue.\n", read_cpuid_id());
458 while (1);
459 }
460
461 cpu_name = list->cpu_name;
462 __cpu_architecture = __get_cpu_architecture();
463
464 #ifdef MULTI_CPU
465 processor = *list->proc;
466 #endif
467 #ifdef MULTI_TLB
468 cpu_tlb = *list->tlb;
469 #endif
470 #ifdef MULTI_USER
471 cpu_user = *list->user;
472 #endif
473 #ifdef MULTI_CACHE
474 cpu_cache = *list->cache;
475 #endif
476
477 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
478 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
479 proc_arch[cpu_architecture()], cr_alignment);
480
481 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
482 list->arch_name, ENDIANNESS);
483 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
484 list->elf_name, ENDIANNESS);
485 elf_hwcap = list->elf_hwcap;
486 #ifndef CONFIG_ARM_THUMB
487 elf_hwcap &= ~HWCAP_THUMB;
488 #endif
489
490 feat_v6_fixup();
491
492 cacheid_init();
493 cpu_init();
494 }
495
dump_machine_table(void)496 void __init dump_machine_table(void)
497 {
498 struct machine_desc *p;
499
500 early_print("Available machine support:\n\nID (hex)\tNAME\n");
501 for_each_machine_desc(p)
502 early_print("%08x\t%s\n", p->nr, p->name);
503
504 early_print("\nPlease check your kernel config and/or bootloader.\n");
505
506 while (true)
507 /* can't use cpu_relax() here as it may require MMU setup */;
508 }
509
arm_add_memory(phys_addr_t start,unsigned long size)510 int __init arm_add_memory(phys_addr_t start, unsigned long size)
511 {
512 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
513
514 if (meminfo.nr_banks >= NR_BANKS) {
515 printk(KERN_CRIT "NR_BANKS too low, "
516 "ignoring memory at 0x%08llx\n", (long long)start);
517 return -EINVAL;
518 }
519
520 /*
521 * Ensure that start/size are aligned to a page boundary.
522 * Size is appropriately rounded down, start is rounded up.
523 */
524 size -= start & ~PAGE_MASK;
525 bank->start = PAGE_ALIGN(start);
526
527 #ifndef CONFIG_LPAE
528 if (bank->start + size < bank->start) {
529 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
530 "32-bit physical address space\n", (long long)start);
531 /*
532 * To ensure bank->start + bank->size is representable in
533 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
534 * This means we lose a page after masking.
535 */
536 size = ULONG_MAX - bank->start;
537 }
538 #endif
539
540 bank->size = size & PAGE_MASK;
541
542 /*
543 * Check whether this memory region has non-zero size or
544 * invalid node number.
545 */
546 if (bank->size == 0)
547 return -EINVAL;
548
549 meminfo.nr_banks++;
550 return 0;
551 }
552
553 /*
554 * Pick out the memory size. We look for mem=size@start,
555 * where start and size are "size[KkMm]"
556 */
early_mem(char * p)557 static int __init early_mem(char *p)
558 {
559 static int usermem __initdata = 0;
560 unsigned long size;
561 phys_addr_t start;
562 char *endp;
563
564 /*
565 * If the user specifies memory size, we
566 * blow away any automatically generated
567 * size.
568 */
569 if (usermem == 0) {
570 usermem = 1;
571 meminfo.nr_banks = 0;
572 }
573
574 start = PHYS_OFFSET;
575 size = memparse(p, &endp);
576 if (*endp == '@')
577 start = memparse(endp + 1, NULL);
578
579 arm_add_memory(start, size);
580
581 return 0;
582 }
583 early_param("mem", early_mem);
584
585 static void __init
setup_ramdisk(int doload,int prompt,int image_start,unsigned int rd_sz)586 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
587 {
588 #ifdef CONFIG_BLK_DEV_RAM
589 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
590
591 rd_image_start = image_start;
592 rd_prompt = prompt;
593 rd_doload = doload;
594
595 if (rd_sz)
596 rd_size = rd_sz;
597 #endif
598 }
599
request_standard_resources(struct machine_desc * mdesc)600 static void __init request_standard_resources(struct machine_desc *mdesc)
601 {
602 struct memblock_region *region;
603 struct resource *res;
604
605 kernel_code.start = virt_to_phys(_text);
606 kernel_code.end = virt_to_phys(_etext - 1);
607 kernel_data.start = virt_to_phys(_sdata);
608 kernel_data.end = virt_to_phys(_end - 1);
609
610 for_each_memblock(memory, region) {
611 res = alloc_bootmem_low(sizeof(*res));
612 res->name = "System RAM";
613 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
614 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
615 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
616
617 request_resource(&iomem_resource, res);
618
619 if (kernel_code.start >= res->start &&
620 kernel_code.end <= res->end)
621 request_resource(res, &kernel_code);
622 if (kernel_data.start >= res->start &&
623 kernel_data.end <= res->end)
624 request_resource(res, &kernel_data);
625 }
626
627 if (mdesc->video_start) {
628 video_ram.start = mdesc->video_start;
629 video_ram.end = mdesc->video_end;
630 request_resource(&iomem_resource, &video_ram);
631 }
632
633 /*
634 * Some machines don't have the possibility of ever
635 * possessing lp0, lp1 or lp2
636 */
637 if (mdesc->reserve_lp0)
638 request_resource(&ioport_resource, &lp0);
639 if (mdesc->reserve_lp1)
640 request_resource(&ioport_resource, &lp1);
641 if (mdesc->reserve_lp2)
642 request_resource(&ioport_resource, &lp2);
643 }
644
645 /*
646 * Tag parsing.
647 *
648 * This is the new way of passing data to the kernel at boot time. Rather
649 * than passing a fixed inflexible structure to the kernel, we pass a list
650 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
651 * tag for the list to be recognised (to distinguish the tagged list from
652 * a param_struct). The list is terminated with a zero-length tag (this tag
653 * is not parsed in any way).
654 */
parse_tag_core(const struct tag * tag)655 static int __init parse_tag_core(const struct tag *tag)
656 {
657 if (tag->hdr.size > 2) {
658 if ((tag->u.core.flags & 1) == 0)
659 root_mountflags &= ~MS_RDONLY;
660 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
661 }
662 return 0;
663 }
664
665 __tagtable(ATAG_CORE, parse_tag_core);
666
parse_tag_mem32(const struct tag * tag)667 static int __init parse_tag_mem32(const struct tag *tag)
668 {
669 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
670 }
671
672 __tagtable(ATAG_MEM, parse_tag_mem32);
673
674 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
675 struct screen_info screen_info = {
676 .orig_video_lines = 30,
677 .orig_video_cols = 80,
678 .orig_video_mode = 0,
679 .orig_video_ega_bx = 0,
680 .orig_video_isVGA = 1,
681 .orig_video_points = 8
682 };
683
parse_tag_videotext(const struct tag * tag)684 static int __init parse_tag_videotext(const struct tag *tag)
685 {
686 screen_info.orig_x = tag->u.videotext.x;
687 screen_info.orig_y = tag->u.videotext.y;
688 screen_info.orig_video_page = tag->u.videotext.video_page;
689 screen_info.orig_video_mode = tag->u.videotext.video_mode;
690 screen_info.orig_video_cols = tag->u.videotext.video_cols;
691 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
692 screen_info.orig_video_lines = tag->u.videotext.video_lines;
693 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
694 screen_info.orig_video_points = tag->u.videotext.video_points;
695 return 0;
696 }
697
698 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
699 #endif
700
parse_tag_ramdisk(const struct tag * tag)701 static int __init parse_tag_ramdisk(const struct tag *tag)
702 {
703 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
704 (tag->u.ramdisk.flags & 2) == 0,
705 tag->u.ramdisk.start, tag->u.ramdisk.size);
706 return 0;
707 }
708
709 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
710
parse_tag_serialnr(const struct tag * tag)711 static int __init parse_tag_serialnr(const struct tag *tag)
712 {
713 system_serial_low = tag->u.serialnr.low;
714 system_serial_high = tag->u.serialnr.high;
715 return 0;
716 }
717
718 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
719
parse_tag_revision(const struct tag * tag)720 static int __init parse_tag_revision(const struct tag *tag)
721 {
722 system_rev = tag->u.revision.rev;
723 return 0;
724 }
725
726 __tagtable(ATAG_REVISION, parse_tag_revision);
727
parse_tag_cmdline(const struct tag * tag)728 static int __init parse_tag_cmdline(const struct tag *tag)
729 {
730 #if defined(CONFIG_CMDLINE_EXTEND)
731 strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
732 strlcat(default_command_line, tag->u.cmdline.cmdline,
733 COMMAND_LINE_SIZE);
734 #elif defined(CONFIG_CMDLINE_FORCE)
735 pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
736 #else
737 strlcpy(default_command_line, tag->u.cmdline.cmdline,
738 COMMAND_LINE_SIZE);
739 #endif
740 return 0;
741 }
742
743 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
744
745 /*
746 * Scan the tag table for this tag, and call its parse function.
747 * The tag table is built by the linker from all the __tagtable
748 * declarations.
749 */
parse_tag(const struct tag * tag)750 static int __init parse_tag(const struct tag *tag)
751 {
752 extern struct tagtable __tagtable_begin, __tagtable_end;
753 struct tagtable *t;
754
755 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
756 if (tag->hdr.tag == t->tag) {
757 t->parse(tag);
758 break;
759 }
760
761 return t < &__tagtable_end;
762 }
763
764 /*
765 * Parse all tags in the list, checking both the global and architecture
766 * specific tag tables.
767 */
parse_tags(const struct tag * t)768 static void __init parse_tags(const struct tag *t)
769 {
770 for (; t->hdr.size; t = tag_next(t))
771 if (!parse_tag(t))
772 printk(KERN_WARNING
773 "Ignoring unrecognised tag 0x%08x\n",
774 t->hdr.tag);
775 }
776
777 /*
778 * This holds our defaults.
779 */
780 static struct init_tags {
781 struct tag_header hdr1;
782 struct tag_core core;
783 struct tag_header hdr2;
784 struct tag_mem32 mem;
785 struct tag_header hdr3;
786 } init_tags __initdata = {
787 { tag_size(tag_core), ATAG_CORE },
788 { 1, PAGE_SIZE, 0xff },
789 { tag_size(tag_mem32), ATAG_MEM },
790 { MEM_SIZE },
791 { 0, ATAG_NONE }
792 };
793
customize_machine(void)794 static int __init customize_machine(void)
795 {
796 /* customizes platform devices, or adds new ones */
797 if (machine_desc->init_machine)
798 machine_desc->init_machine();
799 return 0;
800 }
801 arch_initcall(customize_machine);
802
803 #ifdef CONFIG_KEXEC
get_total_mem(void)804 static inline unsigned long long get_total_mem(void)
805 {
806 unsigned long total;
807
808 total = max_low_pfn - min_low_pfn;
809 return total << PAGE_SHIFT;
810 }
811
812 /**
813 * reserve_crashkernel() - reserves memory are for crash kernel
814 *
815 * This function reserves memory area given in "crashkernel=" kernel command
816 * line parameter. The memory reserved is used by a dump capture kernel when
817 * primary kernel is crashing.
818 */
reserve_crashkernel(void)819 static void __init reserve_crashkernel(void)
820 {
821 unsigned long long crash_size, crash_base;
822 unsigned long long total_mem;
823 int ret;
824
825 total_mem = get_total_mem();
826 ret = parse_crashkernel(boot_command_line, total_mem,
827 &crash_size, &crash_base);
828 if (ret)
829 return;
830
831 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
832 if (ret < 0) {
833 printk(KERN_WARNING "crashkernel reservation failed - "
834 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
835 return;
836 }
837
838 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
839 "for crashkernel (System RAM: %ldMB)\n",
840 (unsigned long)(crash_size >> 20),
841 (unsigned long)(crash_base >> 20),
842 (unsigned long)(total_mem >> 20));
843
844 crashk_res.start = crash_base;
845 crashk_res.end = crash_base + crash_size - 1;
846 insert_resource(&iomem_resource, &crashk_res);
847 }
848 #else
reserve_crashkernel(void)849 static inline void reserve_crashkernel(void) {}
850 #endif /* CONFIG_KEXEC */
851
squash_mem_tags(struct tag * tag)852 static void __init squash_mem_tags(struct tag *tag)
853 {
854 for (; tag->hdr.size; tag = tag_next(tag))
855 if (tag->hdr.tag == ATAG_MEM)
856 tag->hdr.tag = ATAG_NONE;
857 }
858
setup_machine_tags(unsigned int nr)859 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
860 {
861 struct tag *tags = (struct tag *)&init_tags;
862 struct machine_desc *mdesc = NULL, *p;
863 char *from = default_command_line;
864
865 init_tags.mem.start = PHYS_OFFSET;
866
867 /*
868 * locate machine in the list of supported machines.
869 */
870 for_each_machine_desc(p)
871 if (nr == p->nr) {
872 printk("Machine: %s\n", p->name);
873 mdesc = p;
874 break;
875 }
876
877 if (!mdesc) {
878 early_print("\nError: unrecognized/unsupported machine ID"
879 " (r1 = 0x%08x).\n\n", nr);
880 dump_machine_table(); /* does not return */
881 }
882
883 if (__atags_pointer)
884 tags = phys_to_virt(__atags_pointer);
885 else if (mdesc->atag_offset)
886 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
887
888 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
889 /*
890 * If we have the old style parameters, convert them to
891 * a tag list.
892 */
893 if (tags->hdr.tag != ATAG_CORE)
894 convert_to_tag_list(tags);
895 #endif
896
897 if (tags->hdr.tag != ATAG_CORE) {
898 #if defined(CONFIG_OF)
899 /*
900 * If CONFIG_OF is set, then assume this is a reasonably
901 * modern system that should pass boot parameters
902 */
903 early_print("Warning: Neither atags nor dtb found\n");
904 #endif
905 tags = (struct tag *)&init_tags;
906 }
907
908 if (mdesc->fixup)
909 mdesc->fixup(tags, &from, &meminfo);
910
911 if (tags->hdr.tag == ATAG_CORE) {
912 if (meminfo.nr_banks != 0)
913 squash_mem_tags(tags);
914 save_atags(tags);
915 parse_tags(tags);
916 }
917
918 /* parse_early_param needs a boot_command_line */
919 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
920
921 return mdesc;
922 }
923
meminfo_cmp(const void * _a,const void * _b)924 static int __init meminfo_cmp(const void *_a, const void *_b)
925 {
926 const struct membank *a = _a, *b = _b;
927 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
928 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
929 }
930
setup_arch(char ** cmdline_p)931 void __init setup_arch(char **cmdline_p)
932 {
933 struct machine_desc *mdesc;
934
935 setup_processor();
936 mdesc = setup_machine_fdt(__atags_pointer);
937 if (!mdesc)
938 mdesc = setup_machine_tags(machine_arch_type);
939 machine_desc = mdesc;
940 machine_name = mdesc->name;
941
942 #ifdef CONFIG_ZONE_DMA
943 if (mdesc->dma_zone_size) {
944 extern unsigned long arm_dma_zone_size;
945 arm_dma_zone_size = mdesc->dma_zone_size;
946 }
947 #endif
948 if (mdesc->restart_mode)
949 reboot_setup(&mdesc->restart_mode);
950
951 init_mm.start_code = (unsigned long) _text;
952 init_mm.end_code = (unsigned long) _etext;
953 init_mm.end_data = (unsigned long) _edata;
954 init_mm.brk = (unsigned long) _end;
955
956 /* populate cmd_line too for later use, preserving boot_command_line */
957 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
958 *cmdline_p = cmd_line;
959
960 parse_early_param();
961
962 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
963 sanity_check_meminfo();
964 arm_memblock_init(&meminfo, mdesc);
965
966 paging_init(mdesc);
967 request_standard_resources(mdesc);
968
969 if (mdesc->restart)
970 arm_pm_restart = mdesc->restart;
971
972 unflatten_device_tree();
973
974 #ifdef CONFIG_SMP
975 if (is_smp())
976 smp_init_cpus();
977 #endif
978 reserve_crashkernel();
979
980 tcm_init();
981
982 #ifdef CONFIG_MULTI_IRQ_HANDLER
983 handle_arch_irq = mdesc->handle_irq;
984 #endif
985
986 #ifdef CONFIG_VT
987 #if defined(CONFIG_VGA_CONSOLE)
988 conswitchp = &vga_con;
989 #elif defined(CONFIG_DUMMY_CONSOLE)
990 conswitchp = &dummy_con;
991 #endif
992 #endif
993
994 if (mdesc->init_early)
995 mdesc->init_early();
996 }
997
998
topology_init(void)999 static int __init topology_init(void)
1000 {
1001 int cpu;
1002
1003 for_each_possible_cpu(cpu) {
1004 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1005 cpuinfo->cpu.hotpluggable = 1;
1006 register_cpu(&cpuinfo->cpu, cpu);
1007 }
1008
1009 return 0;
1010 }
1011 subsys_initcall(topology_init);
1012
1013 #ifdef CONFIG_HAVE_PROC_CPU
proc_cpu_init(void)1014 static int __init proc_cpu_init(void)
1015 {
1016 struct proc_dir_entry *res;
1017
1018 res = proc_mkdir("cpu", NULL);
1019 if (!res)
1020 return -ENOMEM;
1021 return 0;
1022 }
1023 fs_initcall(proc_cpu_init);
1024 #endif
1025
1026 static const char *hwcap_str[] = {
1027 "swp",
1028 "half",
1029 "thumb",
1030 "26bit",
1031 "fastmult",
1032 "fpa",
1033 "vfp",
1034 "edsp",
1035 "java",
1036 "iwmmxt",
1037 "crunch",
1038 "thumbee",
1039 "neon",
1040 "vfpv3",
1041 "vfpv3d16",
1042 "tls",
1043 "vfpv4",
1044 "idiva",
1045 "idivt",
1046 NULL
1047 };
1048
c_show(struct seq_file * m,void * v)1049 static int c_show(struct seq_file *m, void *v)
1050 {
1051 int i;
1052
1053 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1054 cpu_name, read_cpuid_id() & 15, elf_platform);
1055
1056 #if defined(CONFIG_SMP)
1057 for_each_online_cpu(i) {
1058 /*
1059 * glibc reads /proc/cpuinfo to determine the number of
1060 * online processors, looking for lines beginning with
1061 * "processor". Give glibc what it expects.
1062 */
1063 seq_printf(m, "processor\t: %d\n", i);
1064 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1065 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1066 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1067 }
1068 #else /* CONFIG_SMP */
1069 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1070 loops_per_jiffy / (500000/HZ),
1071 (loops_per_jiffy / (5000/HZ)) % 100);
1072 #endif
1073
1074 /* dump out the processor features */
1075 seq_puts(m, "Features\t: ");
1076
1077 for (i = 0; hwcap_str[i]; i++)
1078 if (elf_hwcap & (1 << i))
1079 seq_printf(m, "%s ", hwcap_str[i]);
1080
1081 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1082 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1083
1084 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1085 /* pre-ARM7 */
1086 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1087 } else {
1088 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1089 /* ARM7 */
1090 seq_printf(m, "CPU variant\t: 0x%02x\n",
1091 (read_cpuid_id() >> 16) & 127);
1092 } else {
1093 /* post-ARM7 */
1094 seq_printf(m, "CPU variant\t: 0x%x\n",
1095 (read_cpuid_id() >> 20) & 15);
1096 }
1097 seq_printf(m, "CPU part\t: 0x%03x\n",
1098 (read_cpuid_id() >> 4) & 0xfff);
1099 }
1100 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1101
1102 seq_puts(m, "\n");
1103
1104 seq_printf(m, "Hardware\t: %s\n", machine_name);
1105 seq_printf(m, "Revision\t: %04x\n", system_rev);
1106 seq_printf(m, "Serial\t\t: %08x%08x\n",
1107 system_serial_high, system_serial_low);
1108
1109 return 0;
1110 }
1111
c_start(struct seq_file * m,loff_t * pos)1112 static void *c_start(struct seq_file *m, loff_t *pos)
1113 {
1114 return *pos < 1 ? (void *)1 : NULL;
1115 }
1116
c_next(struct seq_file * m,void * v,loff_t * pos)1117 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1118 {
1119 ++*pos;
1120 return NULL;
1121 }
1122
c_stop(struct seq_file * m,void * v)1123 static void c_stop(struct seq_file *m, void *v)
1124 {
1125 }
1126
1127 const struct seq_operations cpuinfo_op = {
1128 .start = c_start,
1129 .next = c_next,
1130 .stop = c_stop,
1131 .show = c_show
1132 };
1133