1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Procedures for interfacing to Open Firmware.
4 *
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
7 *
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
10 */
11
12 #undef DEBUG_PROM
13
14 /* we cannot use FORTIFY as it brings in new symbols */
15 #define __NO_FORTIFY
16
17 #include <linux/stdarg.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/threads.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 #include <linux/pci.h>
25 #include <linux/proc_fs.h>
26 #include <linux/delay.h>
27 #include <linux/initrd.h>
28 #include <linux/bitops.h>
29 #include <linux/pgtable.h>
30 #include <linux/printk.h>
31 #include <linux/of.h>
32 #include <linux/of_fdt.h>
33 #include <asm/prom.h>
34 #include <asm/rtas.h>
35 #include <asm/page.h>
36 #include <asm/processor.h>
37 #include <asm/interrupt.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/mmu.h>
42 #include <asm/iommu.h>
43 #include <asm/btext.h>
44 #include <asm/sections.h>
45 #include <asm/machdep.h>
46 #include <asm/asm-prototypes.h>
47 #include <asm/ultravisor-api.h>
48
49 #include <linux/linux_logo.h>
50
51 /* All of prom_init bss lives here */
52 #define __prombss __section(".bss.prominit")
53
54 /*
55 * Eventually bump that one up
56 */
57 #define DEVTREE_CHUNK_SIZE 0x100000
58
59 /*
60 * This is the size of the local memory reserve map that gets copied
61 * into the boot params passed to the kernel. That size is totally
62 * flexible as the kernel just reads the list until it encounters an
63 * entry with size 0, so it can be changed without breaking binary
64 * compatibility
65 */
66 #define MEM_RESERVE_MAP_SIZE 8
67
68 /*
69 * prom_init() is called very early on, before the kernel text
70 * and data have been mapped to KERNELBASE. At this point the code
71 * is running at whatever address it has been loaded at.
72 * On ppc32 we compile with -mrelocatable, which means that references
73 * to extern and static variables get relocated automatically.
74 * ppc64 objects are always relocatable, we just need to relocate the
75 * TOC.
76 *
77 * Because OF may have mapped I/O devices into the area starting at
78 * KERNELBASE, particularly on CHRP machines, we can't safely call
79 * OF once the kernel has been mapped to KERNELBASE. Therefore all
80 * OF calls must be done within prom_init().
81 *
82 * ADDR is used in calls to call_prom. The 4th and following
83 * arguments to call_prom should be 32-bit values.
84 * On ppc64, 64 bit values are truncated to 32 bits (and
85 * fortunately don't get interpreted as two arguments).
86 */
87 #define ADDR(x) (u32)(unsigned long)(x)
88
89 #ifdef CONFIG_PPC64
90 #define OF_WORKAROUNDS 0
91 #else
92 #define OF_WORKAROUNDS of_workarounds
93 static int of_workarounds __prombss;
94 #endif
95
96 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
97 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
98
99 #define PROM_BUG() do { \
100 prom_printf("kernel BUG at %s line 0x%x!\n", \
101 __FILE__, __LINE__); \
102 __builtin_trap(); \
103 } while (0)
104
105 #ifdef DEBUG_PROM
106 #define prom_debug(x...) prom_printf(x)
107 #else
108 #define prom_debug(x...) do { } while (0)
109 #endif
110
111
112 typedef u32 prom_arg_t;
113
114 struct prom_args {
115 __be32 service;
116 __be32 nargs;
117 __be32 nret;
118 __be32 args[10];
119 };
120
121 struct prom_t {
122 ihandle root;
123 phandle chosen;
124 int cpu;
125 ihandle stdout;
126 ihandle mmumap;
127 ihandle memory;
128 };
129
130 struct mem_map_entry {
131 __be64 base;
132 __be64 size;
133 };
134
135 typedef __be32 cell_t;
136
137 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
138 unsigned long r6, unsigned long r7, unsigned long r8,
139 unsigned long r9);
140
141 #ifdef CONFIG_PPC64
142 extern int enter_prom(struct prom_args *args, unsigned long entry);
143 #else
enter_prom(struct prom_args * args,unsigned long entry)144 static inline int enter_prom(struct prom_args *args, unsigned long entry)
145 {
146 return ((int (*)(struct prom_args *))entry)(args);
147 }
148 #endif
149
150 extern void copy_and_flush(unsigned long dest, unsigned long src,
151 unsigned long size, unsigned long offset);
152
153 /* prom structure */
154 static struct prom_t __prombss prom;
155
156 static unsigned long __prombss prom_entry;
157
158 static char __prombss of_stdout_device[256];
159 static char __prombss prom_scratch[256];
160
161 static unsigned long __prombss dt_header_start;
162 static unsigned long __prombss dt_struct_start, dt_struct_end;
163 static unsigned long __prombss dt_string_start, dt_string_end;
164
165 static unsigned long __prombss prom_initrd_start, prom_initrd_end;
166
167 #ifdef CONFIG_PPC64
168 static int __prombss prom_iommu_force_on;
169 static int __prombss prom_iommu_off;
170 static unsigned long __prombss prom_tce_alloc_start;
171 static unsigned long __prombss prom_tce_alloc_end;
172 #endif
173
174 #ifdef CONFIG_PPC_PSERIES
175 static bool __prombss prom_radix_disable;
176 static bool __prombss prom_radix_gtse_disable;
177 static bool __prombss prom_xive_disable;
178 #endif
179
180 #ifdef CONFIG_PPC_SVM
181 static bool __prombss prom_svm_enable;
182 #endif
183
184 struct platform_support {
185 bool hash_mmu;
186 bool radix_mmu;
187 bool radix_gtse;
188 bool xive;
189 };
190
191 /* Platforms codes are now obsolete in the kernel. Now only used within this
192 * file and ultimately gone too. Feel free to change them if you need, they
193 * are not shared with anything outside of this file anymore
194 */
195 #define PLATFORM_PSERIES 0x0100
196 #define PLATFORM_PSERIES_LPAR 0x0101
197 #define PLATFORM_LPAR 0x0001
198 #define PLATFORM_POWERMAC 0x0400
199 #define PLATFORM_GENERIC 0x0500
200
201 static int __prombss of_platform;
202
203 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
204
205 static unsigned long __prombss prom_memory_limit;
206
207 static unsigned long __prombss alloc_top;
208 static unsigned long __prombss alloc_top_high;
209 static unsigned long __prombss alloc_bottom;
210 static unsigned long __prombss rmo_top;
211 static unsigned long __prombss ram_top;
212
213 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
214 static int __prombss mem_reserve_cnt;
215
216 static cell_t __prombss regbuf[1024];
217
218 static bool __prombss rtas_has_query_cpu_stopped;
219
220
221 /*
222 * Error results ... some OF calls will return "-1" on error, some
223 * will return 0, some will return either. To simplify, here are
224 * macros to use with any ihandle or phandle return value to check if
225 * it is valid
226 */
227
228 #define PROM_ERROR (-1u)
229 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
230 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
231
232 /* Copied from lib/string.c and lib/kstrtox.c */
233
prom_strcmp(const char * cs,const char * ct)234 static int __init prom_strcmp(const char *cs, const char *ct)
235 {
236 unsigned char c1, c2;
237
238 while (1) {
239 c1 = *cs++;
240 c2 = *ct++;
241 if (c1 != c2)
242 return c1 < c2 ? -1 : 1;
243 if (!c1)
244 break;
245 }
246 return 0;
247 }
248
prom_strscpy_pad(char * dest,const char * src,size_t n)249 static ssize_t __init prom_strscpy_pad(char *dest, const char *src, size_t n)
250 {
251 ssize_t rc;
252 size_t i;
253
254 if (n == 0 || n > INT_MAX)
255 return -E2BIG;
256
257 // Copy up to n bytes
258 for (i = 0; i < n && src[i] != '\0'; i++)
259 dest[i] = src[i];
260
261 rc = i;
262
263 // If we copied all n then we have run out of space for the nul
264 if (rc == n) {
265 // Rewind by one character to ensure nul termination
266 i--;
267 rc = -E2BIG;
268 }
269
270 for (; i < n; i++)
271 dest[i] = '\0';
272
273 return rc;
274 }
275
prom_strncmp(const char * cs,const char * ct,size_t count)276 static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
277 {
278 unsigned char c1, c2;
279
280 while (count) {
281 c1 = *cs++;
282 c2 = *ct++;
283 if (c1 != c2)
284 return c1 < c2 ? -1 : 1;
285 if (!c1)
286 break;
287 count--;
288 }
289 return 0;
290 }
291
prom_strlen(const char * s)292 static size_t __init prom_strlen(const char *s)
293 {
294 const char *sc;
295
296 for (sc = s; *sc != '\0'; ++sc)
297 /* nothing */;
298 return sc - s;
299 }
300
prom_memcmp(const void * cs,const void * ct,size_t count)301 static int __init prom_memcmp(const void *cs, const void *ct, size_t count)
302 {
303 const unsigned char *su1, *su2;
304 int res = 0;
305
306 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
307 if ((res = *su1 - *su2) != 0)
308 break;
309 return res;
310 }
311
prom_strstr(const char * s1,const char * s2)312 static char __init *prom_strstr(const char *s1, const char *s2)
313 {
314 size_t l1, l2;
315
316 l2 = prom_strlen(s2);
317 if (!l2)
318 return (char *)s1;
319 l1 = prom_strlen(s1);
320 while (l1 >= l2) {
321 l1--;
322 if (!prom_memcmp(s1, s2, l2))
323 return (char *)s1;
324 s1++;
325 }
326 return NULL;
327 }
328
prom_strlcat(char * dest,const char * src,size_t count)329 static size_t __init prom_strlcat(char *dest, const char *src, size_t count)
330 {
331 size_t dsize = prom_strlen(dest);
332 size_t len = prom_strlen(src);
333 size_t res = dsize + len;
334
335 /* This would be a bug */
336 if (dsize >= count)
337 return count;
338
339 dest += dsize;
340 count -= dsize;
341 if (len >= count)
342 len = count-1;
343 memcpy(dest, src, len);
344 dest[len] = 0;
345 return res;
346
347 }
348
349 #ifdef CONFIG_PPC_PSERIES
prom_strtobool(const char * s,bool * res)350 static int __init prom_strtobool(const char *s, bool *res)
351 {
352 if (!s)
353 return -EINVAL;
354
355 switch (s[0]) {
356 case 'y':
357 case 'Y':
358 case '1':
359 *res = true;
360 return 0;
361 case 'n':
362 case 'N':
363 case '0':
364 *res = false;
365 return 0;
366 case 'o':
367 case 'O':
368 switch (s[1]) {
369 case 'n':
370 case 'N':
371 *res = true;
372 return 0;
373 case 'f':
374 case 'F':
375 *res = false;
376 return 0;
377 default:
378 break;
379 }
380 break;
381 default:
382 break;
383 }
384
385 return -EINVAL;
386 }
387 #endif
388
389 /* This is the one and *ONLY* place where we actually call open
390 * firmware.
391 */
392
call_prom(const char * service,int nargs,int nret,...)393 static int __init call_prom(const char *service, int nargs, int nret, ...)
394 {
395 int i;
396 struct prom_args args;
397 va_list list;
398
399 args.service = cpu_to_be32(ADDR(service));
400 args.nargs = cpu_to_be32(nargs);
401 args.nret = cpu_to_be32(nret);
402
403 va_start(list, nret);
404 for (i = 0; i < nargs; i++)
405 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
406 va_end(list);
407
408 for (i = 0; i < nret; i++)
409 args.args[nargs+i] = 0;
410
411 if (enter_prom(&args, prom_entry) < 0)
412 return PROM_ERROR;
413
414 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
415 }
416
call_prom_ret(const char * service,int nargs,int nret,prom_arg_t * rets,...)417 static int __init call_prom_ret(const char *service, int nargs, int nret,
418 prom_arg_t *rets, ...)
419 {
420 int i;
421 struct prom_args args;
422 va_list list;
423
424 args.service = cpu_to_be32(ADDR(service));
425 args.nargs = cpu_to_be32(nargs);
426 args.nret = cpu_to_be32(nret);
427
428 va_start(list, rets);
429 for (i = 0; i < nargs; i++)
430 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
431 va_end(list);
432
433 for (i = 0; i < nret; i++)
434 args.args[nargs+i] = 0;
435
436 if (enter_prom(&args, prom_entry) < 0)
437 return PROM_ERROR;
438
439 if (rets != NULL)
440 for (i = 1; i < nret; ++i)
441 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
442
443 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
444 }
445
446
prom_print(const char * msg)447 static void __init prom_print(const char *msg)
448 {
449 const char *p, *q;
450
451 if (prom.stdout == 0)
452 return;
453
454 for (p = msg; *p != 0; p = q) {
455 for (q = p; *q != 0 && *q != '\n'; ++q)
456 ;
457 if (q > p)
458 call_prom("write", 3, 1, prom.stdout, p, q - p);
459 if (*q == 0)
460 break;
461 ++q;
462 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
463 }
464 }
465
466
467 /*
468 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
469 * we do not need __udivdi3 or __umoddi3 on 32bits.
470 */
prom_print_hex(unsigned long val)471 static void __init prom_print_hex(unsigned long val)
472 {
473 int i, nibbles = sizeof(val)*2;
474 char buf[sizeof(val)*2+1];
475
476 for (i = nibbles-1; i >= 0; i--) {
477 buf[i] = (val & 0xf) + '0';
478 if (buf[i] > '9')
479 buf[i] += ('a'-'0'-10);
480 val >>= 4;
481 }
482 buf[nibbles] = '\0';
483 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
484 }
485
486 /* max number of decimal digits in an unsigned long */
487 #define UL_DIGITS 21
prom_print_dec(unsigned long val)488 static void __init prom_print_dec(unsigned long val)
489 {
490 int i, size;
491 char buf[UL_DIGITS+1];
492
493 for (i = UL_DIGITS-1; i >= 0; i--) {
494 buf[i] = (val % 10) + '0';
495 val = val/10;
496 if (val == 0)
497 break;
498 }
499 /* shift stuff down */
500 size = UL_DIGITS - i;
501 call_prom("write", 3, 1, prom.stdout, buf+i, size);
502 }
503
504 __printf(1, 2)
prom_printf(const char * format,...)505 static void __init prom_printf(const char *format, ...)
506 {
507 const char *p, *q, *s;
508 va_list args;
509 unsigned long v;
510 long vs;
511 int n = 0;
512
513 va_start(args, format);
514 for (p = format; *p != 0; p = q) {
515 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
516 ;
517 if (q > p)
518 call_prom("write", 3, 1, prom.stdout, p, q - p);
519 if (*q == 0)
520 break;
521 if (*q == '\n') {
522 ++q;
523 call_prom("write", 3, 1, prom.stdout,
524 ADDR("\r\n"), 2);
525 continue;
526 }
527 ++q;
528 if (*q == 0)
529 break;
530 while (*q == 'l') {
531 ++q;
532 ++n;
533 }
534 switch (*q) {
535 case 's':
536 ++q;
537 s = va_arg(args, const char *);
538 prom_print(s);
539 break;
540 case 'x':
541 ++q;
542 switch (n) {
543 case 0:
544 v = va_arg(args, unsigned int);
545 break;
546 case 1:
547 v = va_arg(args, unsigned long);
548 break;
549 case 2:
550 default:
551 v = va_arg(args, unsigned long long);
552 break;
553 }
554 prom_print_hex(v);
555 break;
556 case 'u':
557 ++q;
558 switch (n) {
559 case 0:
560 v = va_arg(args, unsigned int);
561 break;
562 case 1:
563 v = va_arg(args, unsigned long);
564 break;
565 case 2:
566 default:
567 v = va_arg(args, unsigned long long);
568 break;
569 }
570 prom_print_dec(v);
571 break;
572 case 'd':
573 ++q;
574 switch (n) {
575 case 0:
576 vs = va_arg(args, int);
577 break;
578 case 1:
579 vs = va_arg(args, long);
580 break;
581 case 2:
582 default:
583 vs = va_arg(args, long long);
584 break;
585 }
586 if (vs < 0) {
587 prom_print("-");
588 vs = -vs;
589 }
590 prom_print_dec(vs);
591 break;
592 }
593 }
594 va_end(args);
595 }
596
597
prom_claim(unsigned long virt,unsigned long size,unsigned long align)598 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
599 unsigned long align)
600 {
601
602 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
603 /*
604 * Old OF requires we claim physical and virtual separately
605 * and then map explicitly (assuming virtual mode)
606 */
607 int ret;
608 prom_arg_t result;
609
610 ret = call_prom_ret("call-method", 5, 2, &result,
611 ADDR("claim"), prom.memory,
612 align, size, virt);
613 if (ret != 0 || result == -1)
614 return -1;
615 ret = call_prom_ret("call-method", 5, 2, &result,
616 ADDR("claim"), prom.mmumap,
617 align, size, virt);
618 if (ret != 0) {
619 call_prom("call-method", 4, 1, ADDR("release"),
620 prom.memory, size, virt);
621 return -1;
622 }
623 /* the 0x12 is M (coherence) + PP == read/write */
624 call_prom("call-method", 6, 1,
625 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
626 return virt;
627 }
628 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
629 (prom_arg_t)align);
630 }
631
prom_panic(const char * reason)632 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
633 {
634 prom_print(reason);
635 /* Do not call exit because it clears the screen on pmac
636 * it also causes some sort of double-fault on early pmacs */
637 if (of_platform == PLATFORM_POWERMAC)
638 asm("trap\n");
639
640 /* ToDo: should put up an SRC here on pSeries */
641 call_prom("exit", 0, 0);
642
643 for (;;) /* should never get here */
644 ;
645 }
646
647
prom_next_node(phandle * nodep)648 static int __init prom_next_node(phandle *nodep)
649 {
650 phandle node;
651
652 if ((node = *nodep) != 0
653 && (*nodep = call_prom("child", 1, 1, node)) != 0)
654 return 1;
655 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
656 return 1;
657 for (;;) {
658 if ((node = call_prom("parent", 1, 1, node)) == 0)
659 return 0;
660 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
661 return 1;
662 }
663 }
664
prom_getprop(phandle node,const char * pname,void * value,size_t valuelen)665 static inline int __init prom_getprop(phandle node, const char *pname,
666 void *value, size_t valuelen)
667 {
668 return call_prom("getprop", 4, 1, node, ADDR(pname),
669 (u32)(unsigned long) value, (u32) valuelen);
670 }
671
prom_getproplen(phandle node,const char * pname)672 static inline int __init prom_getproplen(phandle node, const char *pname)
673 {
674 return call_prom("getproplen", 2, 1, node, ADDR(pname));
675 }
676
add_string(char ** str,const char * q)677 static void __init add_string(char **str, const char *q)
678 {
679 char *p = *str;
680
681 while (*q)
682 *p++ = *q++;
683 *p++ = ' ';
684 *str = p;
685 }
686
tohex(unsigned int x)687 static char *__init tohex(unsigned int x)
688 {
689 static const char digits[] __initconst = "0123456789abcdef";
690 static char result[9] __prombss;
691 int i;
692
693 result[8] = 0;
694 i = 8;
695 do {
696 --i;
697 result[i] = digits[x & 0xf];
698 x >>= 4;
699 } while (x != 0 && i > 0);
700 return &result[i];
701 }
702
prom_setprop(phandle node,const char * nodename,const char * pname,void * value,size_t valuelen)703 static int __init prom_setprop(phandle node, const char *nodename,
704 const char *pname, void *value, size_t valuelen)
705 {
706 char cmd[256], *p;
707
708 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
709 return call_prom("setprop", 4, 1, node, ADDR(pname),
710 (u32)(unsigned long) value, (u32) valuelen);
711
712 /* gah... setprop doesn't work on longtrail, have to use interpret */
713 p = cmd;
714 add_string(&p, "dev");
715 add_string(&p, nodename);
716 add_string(&p, tohex((u32)(unsigned long) value));
717 add_string(&p, tohex(valuelen));
718 add_string(&p, tohex(ADDR(pname)));
719 add_string(&p, tohex(prom_strlen(pname)));
720 add_string(&p, "property");
721 *p = 0;
722 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
723 }
724
725 /* We can't use the standard versions because of relocation headaches. */
726 #define prom_isxdigit(c) \
727 (('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F'))
728
729 #define prom_isdigit(c) ('0' <= (c) && (c) <= '9')
730 #define prom_islower(c) ('a' <= (c) && (c) <= 'z')
731 #define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c))
732
prom_strtoul(const char * cp,const char ** endp)733 static unsigned long __init prom_strtoul(const char *cp, const char **endp)
734 {
735 unsigned long result = 0, base = 10, value;
736
737 if (*cp == '0') {
738 base = 8;
739 cp++;
740 if (prom_toupper(*cp) == 'X') {
741 cp++;
742 base = 16;
743 }
744 }
745
746 while (prom_isxdigit(*cp) &&
747 (value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) {
748 result = result * base + value;
749 cp++;
750 }
751
752 if (endp)
753 *endp = cp;
754
755 return result;
756 }
757
prom_memparse(const char * ptr,const char ** retptr)758 static unsigned long __init prom_memparse(const char *ptr, const char **retptr)
759 {
760 unsigned long ret = prom_strtoul(ptr, retptr);
761 int shift = 0;
762
763 /*
764 * We can't use a switch here because GCC *may* generate a
765 * jump table which won't work, because we're not running at
766 * the address we're linked at.
767 */
768 if ('G' == **retptr || 'g' == **retptr)
769 shift = 30;
770
771 if ('M' == **retptr || 'm' == **retptr)
772 shift = 20;
773
774 if ('K' == **retptr || 'k' == **retptr)
775 shift = 10;
776
777 if (shift) {
778 ret <<= shift;
779 (*retptr)++;
780 }
781
782 return ret;
783 }
784
785 /*
786 * Early parsing of the command line passed to the kernel, used for
787 * "mem=x" and the options that affect the iommu
788 */
early_cmdline_parse(void)789 static void __init early_cmdline_parse(void)
790 {
791 const char *opt;
792
793 char *p;
794 int l = 0;
795
796 prom_cmd_line[0] = 0;
797 p = prom_cmd_line;
798
799 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
800 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
801
802 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
803 prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE,
804 sizeof(prom_cmd_line));
805
806 prom_printf("command line: %s\n", prom_cmd_line);
807
808 #ifdef CONFIG_PPC64
809 opt = prom_strstr(prom_cmd_line, "iommu=");
810 if (opt) {
811 prom_printf("iommu opt is: %s\n", opt);
812 opt += 6;
813 while (*opt && *opt == ' ')
814 opt++;
815 if (!prom_strncmp(opt, "off", 3))
816 prom_iommu_off = 1;
817 else if (!prom_strncmp(opt, "force", 5))
818 prom_iommu_force_on = 1;
819 }
820 #endif
821 opt = prom_strstr(prom_cmd_line, "mem=");
822 if (opt) {
823 opt += 4;
824 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
825 #ifdef CONFIG_PPC64
826 /* Align to 16 MB == size of ppc64 large page */
827 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
828 #endif
829 }
830
831 #ifdef CONFIG_PPC_PSERIES
832 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
833 opt = prom_strstr(prom_cmd_line, "disable_radix");
834 if (opt) {
835 opt += 13;
836 if (*opt && *opt == '=') {
837 bool val;
838
839 if (prom_strtobool(++opt, &val))
840 prom_radix_disable = false;
841 else
842 prom_radix_disable = val;
843 } else
844 prom_radix_disable = true;
845 }
846 if (prom_radix_disable)
847 prom_debug("Radix disabled from cmdline\n");
848
849 opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on");
850 if (opt) {
851 prom_radix_gtse_disable = true;
852 prom_debug("Radix GTSE disabled from cmdline\n");
853 }
854
855 opt = prom_strstr(prom_cmd_line, "xive=off");
856 if (opt) {
857 prom_xive_disable = true;
858 prom_debug("XIVE disabled from cmdline\n");
859 }
860 #endif /* CONFIG_PPC_PSERIES */
861
862 #ifdef CONFIG_PPC_SVM
863 opt = prom_strstr(prom_cmd_line, "svm=");
864 if (opt) {
865 bool val;
866
867 opt += sizeof("svm=") - 1;
868 if (!prom_strtobool(opt, &val))
869 prom_svm_enable = val;
870 }
871 #endif /* CONFIG_PPC_SVM */
872 }
873
874 #ifdef CONFIG_PPC_PSERIES
875 /*
876 * The architecture vector has an array of PVR mask/value pairs,
877 * followed by # option vectors - 1, followed by the option vectors.
878 *
879 * See prom.h for the definition of the bits specified in the
880 * architecture vector.
881 */
882
883 /* Firmware expects the value to be n - 1, where n is the # of vectors */
884 #define NUM_VECTORS(n) ((n) - 1)
885
886 /*
887 * Firmware expects 1 + n - 2, where n is the length of the option vector in
888 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
889 */
890 #define VECTOR_LENGTH(n) (1 + (n) - 2)
891
892 struct option_vector1 {
893 u8 byte1;
894 u8 arch_versions;
895 u8 arch_versions3;
896 } __packed;
897
898 struct option_vector2 {
899 u8 byte1;
900 __be16 reserved;
901 __be32 real_base;
902 __be32 real_size;
903 __be32 virt_base;
904 __be32 virt_size;
905 __be32 load_base;
906 __be32 min_rma;
907 __be32 min_load;
908 u8 min_rma_percent;
909 u8 max_pft_size;
910 } __packed;
911
912 struct option_vector3 {
913 u8 byte1;
914 u8 byte2;
915 } __packed;
916
917 struct option_vector4 {
918 u8 byte1;
919 u8 min_vp_cap;
920 } __packed;
921
922 struct option_vector5 {
923 u8 byte1;
924 u8 byte2;
925 u8 byte3;
926 u8 cmo;
927 u8 associativity;
928 u8 bin_opts;
929 u8 micro_checkpoint;
930 u8 reserved0;
931 __be32 max_cpus;
932 __be16 papr_level;
933 __be16 reserved1;
934 u8 platform_facilities;
935 u8 reserved2;
936 __be16 reserved3;
937 u8 subprocessors;
938 u8 byte22;
939 u8 intarch;
940 u8 mmu;
941 u8 hash_ext;
942 u8 radix_ext;
943 } __packed;
944
945 struct option_vector6 {
946 u8 reserved;
947 u8 secondary_pteg;
948 u8 os_name;
949 } __packed;
950
951 struct option_vector7 {
952 u8 os_id[256];
953 } __packed;
954
955 struct ibm_arch_vec {
956 struct { u32 mask, val; } pvrs[14];
957
958 u8 num_vectors;
959
960 u8 vec1_len;
961 struct option_vector1 vec1;
962
963 u8 vec2_len;
964 struct option_vector2 vec2;
965
966 u8 vec3_len;
967 struct option_vector3 vec3;
968
969 u8 vec4_len;
970 struct option_vector4 vec4;
971
972 u8 vec5_len;
973 struct option_vector5 vec5;
974
975 u8 vec6_len;
976 struct option_vector6 vec6;
977
978 u8 vec7_len;
979 struct option_vector7 vec7;
980 } __packed;
981
982 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
983 .pvrs = {
984 {
985 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
986 .val = cpu_to_be32(0x003a0000),
987 },
988 {
989 .mask = cpu_to_be32(0xffff0000), /* POWER6 */
990 .val = cpu_to_be32(0x003e0000),
991 },
992 {
993 .mask = cpu_to_be32(0xffff0000), /* POWER7 */
994 .val = cpu_to_be32(0x003f0000),
995 },
996 {
997 .mask = cpu_to_be32(0xffff0000), /* POWER8E */
998 .val = cpu_to_be32(0x004b0000),
999 },
1000 {
1001 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
1002 .val = cpu_to_be32(0x004c0000),
1003 },
1004 {
1005 .mask = cpu_to_be32(0xffff0000), /* POWER8 */
1006 .val = cpu_to_be32(0x004d0000),
1007 },
1008 {
1009 .mask = cpu_to_be32(0xffff0000), /* POWER9 */
1010 .val = cpu_to_be32(0x004e0000),
1011 },
1012 {
1013 .mask = cpu_to_be32(0xffff0000), /* POWER10 */
1014 .val = cpu_to_be32(0x00800000),
1015 },
1016 {
1017 .mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
1018 .val = cpu_to_be32(0x0f000006),
1019 },
1020 {
1021 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
1022 .val = cpu_to_be32(0x0f000005),
1023 },
1024 {
1025 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
1026 .val = cpu_to_be32(0x0f000004),
1027 },
1028 {
1029 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
1030 .val = cpu_to_be32(0x0f000003),
1031 },
1032 {
1033 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
1034 .val = cpu_to_be32(0x0f000002),
1035 },
1036 {
1037 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
1038 .val = cpu_to_be32(0x0f000001),
1039 },
1040 },
1041
1042 .num_vectors = NUM_VECTORS(6),
1043
1044 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
1045 .vec1 = {
1046 .byte1 = 0,
1047 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
1048 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
1049 .arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
1050 },
1051
1052 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
1053 /* option vector 2: Open Firmware options supported */
1054 .vec2 = {
1055 .byte1 = OV2_REAL_MODE,
1056 .reserved = 0,
1057 .real_base = cpu_to_be32(0xffffffff),
1058 .real_size = cpu_to_be32(0xffffffff),
1059 .virt_base = cpu_to_be32(0xffffffff),
1060 .virt_size = cpu_to_be32(0xffffffff),
1061 .load_base = cpu_to_be32(0xffffffff),
1062 .min_rma = cpu_to_be32(512), /* 512MB min RMA */
1063 .min_load = cpu_to_be32(0xffffffff), /* full client load */
1064 .min_rma_percent = 0, /* min RMA percentage of total RAM */
1065 .max_pft_size = 48, /* max log_2(hash table size) */
1066 },
1067
1068 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
1069 /* option vector 3: processor options supported */
1070 .vec3 = {
1071 .byte1 = 0, /* don't ignore, don't halt */
1072 .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
1073 },
1074
1075 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
1076 /* option vector 4: IBM PAPR implementation */
1077 .vec4 = {
1078 .byte1 = 0, /* don't halt */
1079 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
1080 },
1081
1082 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
1083 /* option vector 5: PAPR/OF options */
1084 .vec5 = {
1085 .byte1 = 0, /* don't ignore, don't halt */
1086 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
1087 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
1088 #ifdef CONFIG_PCI_MSI
1089 /* PCIe/MSI support. Without MSI full PCIe is not supported */
1090 OV5_FEAT(OV5_MSI),
1091 #else
1092 0,
1093 #endif
1094 .byte3 = 0,
1095 .cmo =
1096 #ifdef CONFIG_PPC_SMLPAR
1097 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
1098 #else
1099 0,
1100 #endif
1101 .associativity = OV5_FEAT(OV5_FORM1_AFFINITY) | OV5_FEAT(OV5_PRRN) |
1102 OV5_FEAT(OV5_FORM2_AFFINITY),
1103 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
1104 .micro_checkpoint = 0,
1105 .reserved0 = 0,
1106 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
1107 .papr_level = 0,
1108 .reserved1 = 0,
1109 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
1110 .reserved2 = 0,
1111 .reserved3 = 0,
1112 .subprocessors = 1,
1113 .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
1114 .intarch = 0,
1115 .mmu = 0,
1116 .hash_ext = 0,
1117 .radix_ext = 0,
1118 },
1119
1120 /* option vector 6: IBM PAPR hints */
1121 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
1122 .vec6 = {
1123 .reserved = 0,
1124 .secondary_pteg = 0,
1125 .os_name = OV6_LINUX,
1126 },
1127
1128 /* option vector 7: OS Identification */
1129 .vec7_len = VECTOR_LENGTH(sizeof(struct option_vector7)),
1130 };
1131
1132 static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
1133
1134 /* Old method - ELF header with PT_NOTE sections only works on BE */
1135 #ifdef __BIG_ENDIAN__
1136 static const struct fake_elf {
1137 Elf32_Ehdr elfhdr;
1138 Elf32_Phdr phdr[2];
1139 struct chrpnote {
1140 u32 namesz;
1141 u32 descsz;
1142 u32 type;
1143 char name[8]; /* "PowerPC" */
1144 struct chrpdesc {
1145 u32 real_mode;
1146 u32 real_base;
1147 u32 real_size;
1148 u32 virt_base;
1149 u32 virt_size;
1150 u32 load_base;
1151 } chrpdesc;
1152 } chrpnote;
1153 struct rpanote {
1154 u32 namesz;
1155 u32 descsz;
1156 u32 type;
1157 char name[24]; /* "IBM,RPA-Client-Config" */
1158 struct rpadesc {
1159 u32 lpar_affinity;
1160 u32 min_rmo_size;
1161 u32 min_rmo_percent;
1162 u32 max_pft_size;
1163 u32 splpar;
1164 u32 min_load;
1165 u32 new_mem_def;
1166 u32 ignore_me;
1167 } rpadesc;
1168 } rpanote;
1169 } fake_elf __initconst = {
1170 .elfhdr = {
1171 .e_ident = { 0x7f, 'E', 'L', 'F',
1172 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
1173 .e_type = ET_EXEC, /* yeah right */
1174 .e_machine = EM_PPC,
1175 .e_version = EV_CURRENT,
1176 .e_phoff = offsetof(struct fake_elf, phdr),
1177 .e_phentsize = sizeof(Elf32_Phdr),
1178 .e_phnum = 2
1179 },
1180 .phdr = {
1181 [0] = {
1182 .p_type = PT_NOTE,
1183 .p_offset = offsetof(struct fake_elf, chrpnote),
1184 .p_filesz = sizeof(struct chrpnote)
1185 }, [1] = {
1186 .p_type = PT_NOTE,
1187 .p_offset = offsetof(struct fake_elf, rpanote),
1188 .p_filesz = sizeof(struct rpanote)
1189 }
1190 },
1191 .chrpnote = {
1192 .namesz = sizeof("PowerPC"),
1193 .descsz = sizeof(struct chrpdesc),
1194 .type = 0x1275,
1195 .name = "PowerPC",
1196 .chrpdesc = {
1197 .real_mode = ~0U, /* ~0 means "don't care" */
1198 .real_base = ~0U,
1199 .real_size = ~0U,
1200 .virt_base = ~0U,
1201 .virt_size = ~0U,
1202 .load_base = ~0U
1203 },
1204 },
1205 .rpanote = {
1206 .namesz = sizeof("IBM,RPA-Client-Config"),
1207 .descsz = sizeof(struct rpadesc),
1208 .type = 0x12759999,
1209 .name = "IBM,RPA-Client-Config",
1210 .rpadesc = {
1211 .lpar_affinity = 0,
1212 .min_rmo_size = 64, /* in megabytes */
1213 .min_rmo_percent = 0,
1214 .max_pft_size = 48, /* 2^48 bytes max PFT size */
1215 .splpar = 1,
1216 .min_load = ~0U,
1217 .new_mem_def = 0
1218 }
1219 }
1220 };
1221 #endif /* __BIG_ENDIAN__ */
1222
prom_count_smt_threads(void)1223 static int __init prom_count_smt_threads(void)
1224 {
1225 phandle node;
1226 char type[64];
1227 unsigned int plen;
1228
1229 /* Pick up th first CPU node we can find */
1230 for (node = 0; prom_next_node(&node); ) {
1231 type[0] = 0;
1232 prom_getprop(node, "device_type", type, sizeof(type));
1233
1234 if (prom_strcmp(type, "cpu"))
1235 continue;
1236 /*
1237 * There is an entry for each smt thread, each entry being
1238 * 4 bytes long. All cpus should have the same number of
1239 * smt threads, so return after finding the first.
1240 */
1241 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1242 if (plen == PROM_ERROR)
1243 break;
1244 plen >>= 2;
1245 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1246
1247 /* Sanity check */
1248 if (plen < 1 || plen > 64) {
1249 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1250 (unsigned long)plen);
1251 return 1;
1252 }
1253 return plen;
1254 }
1255 prom_debug("No threads found, assuming 1 per core\n");
1256
1257 return 1;
1258
1259 }
1260
prom_parse_mmu_model(u8 val,struct platform_support * support)1261 static void __init prom_parse_mmu_model(u8 val,
1262 struct platform_support *support)
1263 {
1264 switch (val) {
1265 case OV5_FEAT(OV5_MMU_DYNAMIC):
1266 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1267 prom_debug("MMU - either supported\n");
1268 support->radix_mmu = !prom_radix_disable;
1269 support->hash_mmu = true;
1270 break;
1271 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1272 prom_debug("MMU - radix only\n");
1273 if (prom_radix_disable) {
1274 /*
1275 * If we __have__ to do radix, we're better off ignoring
1276 * the command line rather than not booting.
1277 */
1278 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1279 }
1280 support->radix_mmu = true;
1281 break;
1282 case OV5_FEAT(OV5_MMU_HASH):
1283 prom_debug("MMU - hash only\n");
1284 support->hash_mmu = true;
1285 break;
1286 default:
1287 prom_debug("Unknown mmu support option: 0x%x\n", val);
1288 break;
1289 }
1290 }
1291
prom_parse_xive_model(u8 val,struct platform_support * support)1292 static void __init prom_parse_xive_model(u8 val,
1293 struct platform_support *support)
1294 {
1295 switch (val) {
1296 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1297 prom_debug("XIVE - either mode supported\n");
1298 support->xive = !prom_xive_disable;
1299 break;
1300 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1301 prom_debug("XIVE - exploitation mode supported\n");
1302 if (prom_xive_disable) {
1303 /*
1304 * If we __have__ to do XIVE, we're better off ignoring
1305 * the command line rather than not booting.
1306 */
1307 prom_printf("WARNING: Ignoring cmdline option xive=off\n");
1308 }
1309 support->xive = true;
1310 break;
1311 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1312 prom_debug("XIVE - legacy mode supported\n");
1313 break;
1314 default:
1315 prom_debug("Unknown xive support option: 0x%x\n", val);
1316 break;
1317 }
1318 }
1319
prom_parse_platform_support(u8 index,u8 val,struct platform_support * support)1320 static void __init prom_parse_platform_support(u8 index, u8 val,
1321 struct platform_support *support)
1322 {
1323 switch (index) {
1324 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1325 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1326 break;
1327 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1328 if (val & OV5_FEAT(OV5_RADIX_GTSE))
1329 support->radix_gtse = !prom_radix_gtse_disable;
1330 break;
1331 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1332 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1333 support);
1334 break;
1335 }
1336 }
1337
prom_check_platform_support(void)1338 static void __init prom_check_platform_support(void)
1339 {
1340 struct platform_support supported = {
1341 .hash_mmu = false,
1342 .radix_mmu = false,
1343 .radix_gtse = false,
1344 .xive = false
1345 };
1346 int prop_len = prom_getproplen(prom.chosen,
1347 "ibm,arch-vec-5-platform-support");
1348
1349 /*
1350 * First copy the architecture vec template
1351 *
1352 * use memcpy() instead of *vec = *vec_template so that GCC replaces it
1353 * by __memcpy() when KASAN is active
1354 */
1355 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
1356 sizeof(ibm_architecture_vec));
1357
1358 prom_strscpy_pad(ibm_architecture_vec.vec7.os_id, linux_banner, 256);
1359
1360 if (prop_len > 1) {
1361 int i;
1362 u8 vec[8];
1363 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1364 prop_len);
1365 if (prop_len > sizeof(vec))
1366 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
1367 prop_len);
1368 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
1369 for (i = 0; i < prop_len; i += 2) {
1370 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
1371 prom_parse_platform_support(vec[i], vec[i + 1], &supported);
1372 }
1373 }
1374
1375 if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1376 /* Radix preferred - Check if GTSE is also supported */
1377 prom_debug("Asking for radix\n");
1378 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1379 if (supported.radix_gtse)
1380 ibm_architecture_vec.vec5.radix_ext =
1381 OV5_FEAT(OV5_RADIX_GTSE);
1382 else
1383 prom_debug("Radix GTSE isn't supported\n");
1384 } else if (supported.hash_mmu) {
1385 /* Default to hash mmu (if we can) */
1386 prom_debug("Asking for hash\n");
1387 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1388 } else {
1389 /* We're probably on a legacy hypervisor */
1390 prom_debug("Assuming legacy hash support\n");
1391 }
1392
1393 if (supported.xive) {
1394 prom_debug("Asking for XIVE\n");
1395 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1396 }
1397 }
1398
prom_send_capabilities(void)1399 static void __init prom_send_capabilities(void)
1400 {
1401 ihandle root;
1402 prom_arg_t ret;
1403 u32 cores;
1404
1405 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1406 prom_check_platform_support();
1407
1408 root = call_prom("open", 1, 1, ADDR("/"));
1409 if (root != 0) {
1410 /* We need to tell the FW about the number of cores we support.
1411 *
1412 * To do that, we count the number of threads on the first core
1413 * (we assume this is the same for all cores) and use it to
1414 * divide NR_CPUS.
1415 */
1416
1417 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1418 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1419 cores, NR_CPUS);
1420
1421 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1422
1423 /* try calling the ibm,client-architecture-support method */
1424 prom_printf("Calling ibm,client-architecture-support...");
1425 if (call_prom_ret("call-method", 3, 2, &ret,
1426 ADDR("ibm,client-architecture-support"),
1427 root,
1428 ADDR(&ibm_architecture_vec)) == 0) {
1429 /* the call exists... */
1430 if (ret)
1431 prom_printf("\nWARNING: ibm,client-architecture"
1432 "-support call FAILED!\n");
1433 call_prom("close", 1, 0, root);
1434 prom_printf(" done\n");
1435 return;
1436 }
1437 call_prom("close", 1, 0, root);
1438 prom_printf(" not implemented\n");
1439 }
1440
1441 #ifdef __BIG_ENDIAN__
1442 {
1443 ihandle elfloader;
1444
1445 /* no ibm,client-architecture-support call, try the old way */
1446 elfloader = call_prom("open", 1, 1,
1447 ADDR("/packages/elf-loader"));
1448 if (elfloader == 0) {
1449 prom_printf("couldn't open /packages/elf-loader\n");
1450 return;
1451 }
1452 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1453 elfloader, ADDR(&fake_elf));
1454 call_prom("close", 1, 0, elfloader);
1455 }
1456 #endif /* __BIG_ENDIAN__ */
1457 }
1458 #endif /* CONFIG_PPC_PSERIES */
1459
1460 /*
1461 * Memory allocation strategy... our layout is normally:
1462 *
1463 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1464 * rare cases, initrd might end up being before the kernel though.
1465 * We assume this won't override the final kernel at 0, we have no
1466 * provision to handle that in this version, but it should hopefully
1467 * never happen.
1468 *
1469 * alloc_top is set to the top of RMO, eventually shrink down if the
1470 * TCEs overlap
1471 *
1472 * alloc_bottom is set to the top of kernel/initrd
1473 *
1474 * from there, allocations are done this way : rtas is allocated
1475 * topmost, and the device-tree is allocated from the bottom. We try
1476 * to grow the device-tree allocation as we progress. If we can't,
1477 * then we fail, we don't currently have a facility to restart
1478 * elsewhere, but that shouldn't be necessary.
1479 *
1480 * Note that calls to reserve_mem have to be done explicitly, memory
1481 * allocated with either alloc_up or alloc_down isn't automatically
1482 * reserved.
1483 */
1484
1485
1486 /*
1487 * Allocates memory in the RMO upward from the kernel/initrd
1488 *
1489 * When align is 0, this is a special case, it means to allocate in place
1490 * at the current location of alloc_bottom or fail (that is basically
1491 * extending the previous allocation). Used for the device-tree flattening
1492 */
alloc_up(unsigned long size,unsigned long align)1493 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1494 {
1495 unsigned long base = alloc_bottom;
1496 unsigned long addr = 0;
1497
1498 if (align)
1499 base = ALIGN(base, align);
1500 prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1501 if (ram_top == 0)
1502 prom_panic("alloc_up() called with mem not initialized\n");
1503
1504 if (align)
1505 base = ALIGN(alloc_bottom, align);
1506 else
1507 base = alloc_bottom;
1508
1509 for(; (base + size) <= alloc_top;
1510 base = ALIGN(base + 0x100000, align)) {
1511 prom_debug(" trying: 0x%lx\n\r", base);
1512 addr = (unsigned long)prom_claim(base, size, 0);
1513 if (addr != PROM_ERROR && addr != 0)
1514 break;
1515 addr = 0;
1516 if (align == 0)
1517 break;
1518 }
1519 if (addr == 0)
1520 return 0;
1521 alloc_bottom = addr + size;
1522
1523 prom_debug(" -> %lx\n", addr);
1524 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1525 prom_debug(" alloc_top : %lx\n", alloc_top);
1526 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1527 prom_debug(" rmo_top : %lx\n", rmo_top);
1528 prom_debug(" ram_top : %lx\n", ram_top);
1529
1530 return addr;
1531 }
1532
1533 /*
1534 * Allocates memory downward, either from top of RMO, or if highmem
1535 * is set, from the top of RAM. Note that this one doesn't handle
1536 * failures. It does claim memory if highmem is not set.
1537 */
alloc_down(unsigned long size,unsigned long align,int highmem)1538 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1539 int highmem)
1540 {
1541 unsigned long base, addr = 0;
1542
1543 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1544 highmem ? "(high)" : "(low)");
1545 if (ram_top == 0)
1546 prom_panic("alloc_down() called with mem not initialized\n");
1547
1548 if (highmem) {
1549 /* Carve out storage for the TCE table. */
1550 addr = ALIGN_DOWN(alloc_top_high - size, align);
1551 if (addr <= alloc_bottom)
1552 return 0;
1553 /* Will we bump into the RMO ? If yes, check out that we
1554 * didn't overlap existing allocations there, if we did,
1555 * we are dead, we must be the first in town !
1556 */
1557 if (addr < rmo_top) {
1558 /* Good, we are first */
1559 if (alloc_top == rmo_top)
1560 alloc_top = rmo_top = addr;
1561 else
1562 return 0;
1563 }
1564 alloc_top_high = addr;
1565 goto bail;
1566 }
1567
1568 base = ALIGN_DOWN(alloc_top - size, align);
1569 for (; base > alloc_bottom;
1570 base = ALIGN_DOWN(base - 0x100000, align)) {
1571 prom_debug(" trying: 0x%lx\n\r", base);
1572 addr = (unsigned long)prom_claim(base, size, 0);
1573 if (addr != PROM_ERROR && addr != 0)
1574 break;
1575 addr = 0;
1576 }
1577 if (addr == 0)
1578 return 0;
1579 alloc_top = addr;
1580
1581 bail:
1582 prom_debug(" -> %lx\n", addr);
1583 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1584 prom_debug(" alloc_top : %lx\n", alloc_top);
1585 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1586 prom_debug(" rmo_top : %lx\n", rmo_top);
1587 prom_debug(" ram_top : %lx\n", ram_top);
1588
1589 return addr;
1590 }
1591
1592 /*
1593 * Parse a "reg" cell
1594 */
prom_next_cell(int s,cell_t ** cellp)1595 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1596 {
1597 cell_t *p = *cellp;
1598 unsigned long r = 0;
1599
1600 /* Ignore more than 2 cells */
1601 while (s > sizeof(unsigned long) / 4) {
1602 p++;
1603 s--;
1604 }
1605 r = be32_to_cpu(*p++);
1606 #ifdef CONFIG_PPC64
1607 if (s > 1) {
1608 r <<= 32;
1609 r |= be32_to_cpu(*(p++));
1610 }
1611 #endif
1612 *cellp = p;
1613 return r;
1614 }
1615
1616 /*
1617 * Very dumb function for adding to the memory reserve list, but
1618 * we don't need anything smarter at this point
1619 *
1620 * XXX Eventually check for collisions. They should NEVER happen.
1621 * If problems seem to show up, it would be a good start to track
1622 * them down.
1623 */
reserve_mem(u64 base,u64 size)1624 static void __init reserve_mem(u64 base, u64 size)
1625 {
1626 u64 top = base + size;
1627 unsigned long cnt = mem_reserve_cnt;
1628
1629 if (size == 0)
1630 return;
1631
1632 /* We need to always keep one empty entry so that we
1633 * have our terminator with "size" set to 0 since we are
1634 * dumb and just copy this entire array to the boot params
1635 */
1636 base = ALIGN_DOWN(base, PAGE_SIZE);
1637 top = ALIGN(top, PAGE_SIZE);
1638 size = top - base;
1639
1640 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1641 prom_panic("Memory reserve map exhausted !\n");
1642 mem_reserve_map[cnt].base = cpu_to_be64(base);
1643 mem_reserve_map[cnt].size = cpu_to_be64(size);
1644 mem_reserve_cnt = cnt + 1;
1645 }
1646
1647 /*
1648 * Initialize memory allocation mechanism, parse "memory" nodes and
1649 * obtain that way the top of memory and RMO to setup out local allocator
1650 */
prom_init_mem(void)1651 static void __init prom_init_mem(void)
1652 {
1653 phandle node;
1654 char type[64];
1655 unsigned int plen;
1656 cell_t *p, *endp;
1657 __be32 val;
1658 u32 rac, rsc;
1659
1660 /*
1661 * We iterate the memory nodes to find
1662 * 1) top of RMO (first node)
1663 * 2) top of memory
1664 */
1665 val = cpu_to_be32(2);
1666 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1667 rac = be32_to_cpu(val);
1668 val = cpu_to_be32(1);
1669 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1670 rsc = be32_to_cpu(val);
1671 prom_debug("root_addr_cells: %x\n", rac);
1672 prom_debug("root_size_cells: %x\n", rsc);
1673
1674 prom_debug("scanning memory:\n");
1675
1676 for (node = 0; prom_next_node(&node); ) {
1677 type[0] = 0;
1678 prom_getprop(node, "device_type", type, sizeof(type));
1679
1680 if (type[0] == 0) {
1681 /*
1682 * CHRP Longtrail machines have no device_type
1683 * on the memory node, so check the name instead...
1684 */
1685 prom_getprop(node, "name", type, sizeof(type));
1686 }
1687 if (prom_strcmp(type, "memory"))
1688 continue;
1689
1690 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1691 if (plen > sizeof(regbuf)) {
1692 prom_printf("memory node too large for buffer !\n");
1693 plen = sizeof(regbuf);
1694 }
1695 p = regbuf;
1696 endp = p + (plen / sizeof(cell_t));
1697
1698 #ifdef DEBUG_PROM
1699 memset(prom_scratch, 0, sizeof(prom_scratch));
1700 call_prom("package-to-path", 3, 1, node, prom_scratch,
1701 sizeof(prom_scratch) - 1);
1702 prom_debug(" node %s :\n", prom_scratch);
1703 #endif /* DEBUG_PROM */
1704
1705 while ((endp - p) >= (rac + rsc)) {
1706 unsigned long base, size;
1707
1708 base = prom_next_cell(rac, &p);
1709 size = prom_next_cell(rsc, &p);
1710
1711 if (size == 0)
1712 continue;
1713 prom_debug(" %lx %lx\n", base, size);
1714 if (base == 0 && (of_platform & PLATFORM_LPAR))
1715 rmo_top = size;
1716 if ((base + size) > ram_top)
1717 ram_top = base + size;
1718 }
1719 }
1720
1721 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1722
1723 /*
1724 * If prom_memory_limit is set we reduce the upper limits *except* for
1725 * alloc_top_high. This must be the real top of RAM so we can put
1726 * TCE's up there.
1727 */
1728
1729 alloc_top_high = ram_top;
1730
1731 if (prom_memory_limit) {
1732 if (prom_memory_limit <= alloc_bottom) {
1733 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1734 prom_memory_limit);
1735 prom_memory_limit = 0;
1736 } else if (prom_memory_limit >= ram_top) {
1737 prom_printf("Ignoring mem=%lx >= ram_top.\n",
1738 prom_memory_limit);
1739 prom_memory_limit = 0;
1740 } else {
1741 ram_top = prom_memory_limit;
1742 rmo_top = min(rmo_top, prom_memory_limit);
1743 }
1744 }
1745
1746 /*
1747 * Setup our top alloc point, that is top of RMO or top of
1748 * segment 0 when running non-LPAR.
1749 * Some RS64 machines have buggy firmware where claims up at
1750 * 1GB fail. Cap at 768MB as a workaround.
1751 * Since 768MB is plenty of room, and we need to cap to something
1752 * reasonable on 32-bit, cap at 768MB on all machines.
1753 */
1754 if (!rmo_top)
1755 rmo_top = ram_top;
1756 rmo_top = min(0x30000000ul, rmo_top);
1757 alloc_top = rmo_top;
1758 alloc_top_high = ram_top;
1759
1760 /*
1761 * Check if we have an initrd after the kernel but still inside
1762 * the RMO. If we do move our bottom point to after it.
1763 */
1764 if (prom_initrd_start &&
1765 prom_initrd_start < rmo_top &&
1766 prom_initrd_end > alloc_bottom)
1767 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1768
1769 prom_printf("memory layout at init:\n");
1770 prom_printf(" memory_limit : %lx (16 MB aligned)\n",
1771 prom_memory_limit);
1772 prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
1773 prom_printf(" alloc_top : %lx\n", alloc_top);
1774 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
1775 prom_printf(" rmo_top : %lx\n", rmo_top);
1776 prom_printf(" ram_top : %lx\n", ram_top);
1777 }
1778
prom_close_stdin(void)1779 static void __init prom_close_stdin(void)
1780 {
1781 __be32 val;
1782 ihandle stdin;
1783
1784 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1785 stdin = be32_to_cpu(val);
1786 call_prom("close", 1, 0, stdin);
1787 }
1788 }
1789
1790 #ifdef CONFIG_PPC_SVM
prom_rtas_hcall(uint64_t args)1791 static int __init prom_rtas_hcall(uint64_t args)
1792 {
1793 register uint64_t arg1 asm("r3") = H_RTAS;
1794 register uint64_t arg2 asm("r4") = args;
1795
1796 asm volatile("sc 1\n" : "=r" (arg1) :
1797 "r" (arg1),
1798 "r" (arg2) :);
1799 srr_regs_clobbered();
1800
1801 return arg1;
1802 }
1803
1804 static struct rtas_args __prombss os_term_args;
1805
prom_rtas_os_term(char * str)1806 static void __init prom_rtas_os_term(char *str)
1807 {
1808 phandle rtas_node;
1809 __be32 val;
1810 u32 token;
1811
1812 prom_debug("%s: start...\n", __func__);
1813 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1814 prom_debug("rtas_node: %x\n", rtas_node);
1815 if (!PHANDLE_VALID(rtas_node))
1816 return;
1817
1818 val = 0;
1819 prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val));
1820 token = be32_to_cpu(val);
1821 prom_debug("ibm,os-term: %x\n", token);
1822 if (token == 0)
1823 prom_panic("Could not get token for ibm,os-term\n");
1824 os_term_args.token = cpu_to_be32(token);
1825 os_term_args.nargs = cpu_to_be32(1);
1826 os_term_args.nret = cpu_to_be32(1);
1827 os_term_args.args[0] = cpu_to_be32(__pa(str));
1828 prom_rtas_hcall((uint64_t)&os_term_args);
1829 }
1830 #endif /* CONFIG_PPC_SVM */
1831
1832 /*
1833 * Allocate room for and instantiate RTAS
1834 */
prom_instantiate_rtas(void)1835 static void __init prom_instantiate_rtas(void)
1836 {
1837 phandle rtas_node;
1838 ihandle rtas_inst;
1839 u32 base, entry = 0;
1840 __be32 val;
1841 u32 size = 0;
1842
1843 prom_debug("prom_instantiate_rtas: start...\n");
1844
1845 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1846 prom_debug("rtas_node: %x\n", rtas_node);
1847 if (!PHANDLE_VALID(rtas_node))
1848 return;
1849
1850 val = 0;
1851 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1852 size = be32_to_cpu(val);
1853 if (size == 0)
1854 return;
1855
1856 base = alloc_down(size, PAGE_SIZE, 0);
1857 if (base == 0)
1858 prom_panic("Could not allocate memory for RTAS\n");
1859
1860 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1861 if (!IHANDLE_VALID(rtas_inst)) {
1862 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1863 return;
1864 }
1865
1866 prom_printf("instantiating rtas at 0x%x...", base);
1867
1868 if (call_prom_ret("call-method", 3, 2, &entry,
1869 ADDR("instantiate-rtas"),
1870 rtas_inst, base) != 0
1871 || entry == 0) {
1872 prom_printf(" failed\n");
1873 return;
1874 }
1875 prom_printf(" done\n");
1876
1877 reserve_mem(base, size);
1878
1879 val = cpu_to_be32(base);
1880 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1881 &val, sizeof(val));
1882 val = cpu_to_be32(entry);
1883 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1884 &val, sizeof(val));
1885
1886 /* Check if it supports "query-cpu-stopped-state" */
1887 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1888 &val, sizeof(val)) != PROM_ERROR)
1889 rtas_has_query_cpu_stopped = true;
1890
1891 prom_debug("rtas base = 0x%x\n", base);
1892 prom_debug("rtas entry = 0x%x\n", entry);
1893 prom_debug("rtas size = 0x%x\n", size);
1894
1895 prom_debug("prom_instantiate_rtas: end...\n");
1896 }
1897
1898 #ifdef CONFIG_PPC64
1899 /*
1900 * Allocate room for and instantiate Stored Measurement Log (SML)
1901 */
prom_instantiate_sml(void)1902 static void __init prom_instantiate_sml(void)
1903 {
1904 phandle ibmvtpm_node;
1905 ihandle ibmvtpm_inst;
1906 u32 entry = 0, size = 0, succ = 0;
1907 u64 base;
1908 __be32 val;
1909
1910 prom_debug("prom_instantiate_sml: start...\n");
1911
1912 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1913 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1914 if (!PHANDLE_VALID(ibmvtpm_node))
1915 return;
1916
1917 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1918 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1919 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1920 return;
1921 }
1922
1923 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1924 &val, sizeof(val)) != PROM_ERROR) {
1925 if (call_prom_ret("call-method", 2, 2, &succ,
1926 ADDR("reformat-sml-to-efi-alignment"),
1927 ibmvtpm_inst) != 0 || succ == 0) {
1928 prom_printf("Reformat SML to EFI alignment failed\n");
1929 return;
1930 }
1931
1932 if (call_prom_ret("call-method", 2, 2, &size,
1933 ADDR("sml-get-allocated-size"),
1934 ibmvtpm_inst) != 0 || size == 0) {
1935 prom_printf("SML get allocated size failed\n");
1936 return;
1937 }
1938 } else {
1939 if (call_prom_ret("call-method", 2, 2, &size,
1940 ADDR("sml-get-handover-size"),
1941 ibmvtpm_inst) != 0 || size == 0) {
1942 prom_printf("SML get handover size failed\n");
1943 return;
1944 }
1945 }
1946
1947 base = alloc_down(size, PAGE_SIZE, 0);
1948 if (base == 0)
1949 prom_panic("Could not allocate memory for sml\n");
1950
1951 prom_printf("instantiating sml at 0x%llx...", base);
1952
1953 memset((void *)base, 0, size);
1954
1955 if (call_prom_ret("call-method", 4, 2, &entry,
1956 ADDR("sml-handover"),
1957 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1958 prom_printf("SML handover failed\n");
1959 return;
1960 }
1961 prom_printf(" done\n");
1962
1963 reserve_mem(base, size);
1964
1965 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1966 &base, sizeof(base));
1967 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1968 &size, sizeof(size));
1969
1970 prom_debug("sml base = 0x%llx\n", base);
1971 prom_debug("sml size = 0x%x\n", size);
1972
1973 prom_debug("prom_instantiate_sml: end...\n");
1974 }
1975
1976 /*
1977 * Allocate room for and initialize TCE tables
1978 */
1979 #ifdef __BIG_ENDIAN__
prom_initialize_tce_table(void)1980 static void __init prom_initialize_tce_table(void)
1981 {
1982 phandle node;
1983 ihandle phb_node;
1984 char compatible[64], type[64], model[64];
1985 char *path = prom_scratch;
1986 u64 base, align;
1987 u32 minalign, minsize;
1988 u64 tce_entry, *tce_entryp;
1989 u64 local_alloc_top, local_alloc_bottom;
1990 u64 i;
1991
1992 if (prom_iommu_off)
1993 return;
1994
1995 prom_debug("starting prom_initialize_tce_table\n");
1996
1997 /* Cache current top of allocs so we reserve a single block */
1998 local_alloc_top = alloc_top_high;
1999 local_alloc_bottom = local_alloc_top;
2000
2001 /* Search all nodes looking for PHBs. */
2002 for (node = 0; prom_next_node(&node); ) {
2003 compatible[0] = 0;
2004 type[0] = 0;
2005 model[0] = 0;
2006 prom_getprop(node, "compatible",
2007 compatible, sizeof(compatible));
2008 prom_getprop(node, "device_type", type, sizeof(type));
2009 prom_getprop(node, "model", model, sizeof(model));
2010
2011 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL))
2012 continue;
2013
2014 /* Keep the old logic intact to avoid regression. */
2015 if (compatible[0] != 0) {
2016 if ((prom_strstr(compatible, "python") == NULL) &&
2017 (prom_strstr(compatible, "Speedwagon") == NULL) &&
2018 (prom_strstr(compatible, "Winnipeg") == NULL))
2019 continue;
2020 } else if (model[0] != 0) {
2021 if ((prom_strstr(model, "ython") == NULL) &&
2022 (prom_strstr(model, "peedwagon") == NULL) &&
2023 (prom_strstr(model, "innipeg") == NULL))
2024 continue;
2025 }
2026
2027 if (prom_getprop(node, "tce-table-minalign", &minalign,
2028 sizeof(minalign)) == PROM_ERROR)
2029 minalign = 0;
2030 if (prom_getprop(node, "tce-table-minsize", &minsize,
2031 sizeof(minsize)) == PROM_ERROR)
2032 minsize = 4UL << 20;
2033
2034 /*
2035 * Even though we read what OF wants, we just set the table
2036 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
2037 * By doing this, we avoid the pitfalls of trying to DMA to
2038 * MMIO space and the DMA alias hole.
2039 */
2040 minsize = 4UL << 20;
2041
2042 /* Align to the greater of the align or size */
2043 align = max(minalign, minsize);
2044 base = alloc_down(minsize, align, 1);
2045 if (base == 0)
2046 prom_panic("ERROR, cannot find space for TCE table.\n");
2047 if (base < local_alloc_bottom)
2048 local_alloc_bottom = base;
2049
2050 /* It seems OF doesn't null-terminate the path :-( */
2051 memset(path, 0, sizeof(prom_scratch));
2052 /* Call OF to setup the TCE hardware */
2053 if (call_prom("package-to-path", 3, 1, node,
2054 path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
2055 prom_printf("package-to-path failed\n");
2056 }
2057
2058 /* Save away the TCE table attributes for later use. */
2059 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
2060 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
2061
2062 prom_debug("TCE table: %s\n", path);
2063 prom_debug("\tnode = 0x%x\n", node);
2064 prom_debug("\tbase = 0x%llx\n", base);
2065 prom_debug("\tsize = 0x%x\n", minsize);
2066
2067 /* Initialize the table to have a one-to-one mapping
2068 * over the allocated size.
2069 */
2070 tce_entryp = (u64 *)base;
2071 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
2072 tce_entry = (i << PAGE_SHIFT);
2073 tce_entry |= 0x3;
2074 *tce_entryp = tce_entry;
2075 }
2076
2077 prom_printf("opening PHB %s", path);
2078 phb_node = call_prom("open", 1, 1, path);
2079 if (phb_node == 0)
2080 prom_printf("... failed\n");
2081 else
2082 prom_printf("... done\n");
2083
2084 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
2085 phb_node, -1, minsize,
2086 (u32) base, (u32) (base >> 32));
2087 call_prom("close", 1, 0, phb_node);
2088 }
2089
2090 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
2091
2092 /* These are only really needed if there is a memory limit in
2093 * effect, but we don't know so export them always. */
2094 prom_tce_alloc_start = local_alloc_bottom;
2095 prom_tce_alloc_end = local_alloc_top;
2096
2097 /* Flag the first invalid entry */
2098 prom_debug("ending prom_initialize_tce_table\n");
2099 }
2100 #endif /* __BIG_ENDIAN__ */
2101 #endif /* CONFIG_PPC64 */
2102
2103 /*
2104 * With CHRP SMP we need to use the OF to start the other processors.
2105 * We can't wait until smp_boot_cpus (the OF is trashed by then)
2106 * so we have to put the processors into a holding pattern controlled
2107 * by the kernel (not OF) before we destroy the OF.
2108 *
2109 * This uses a chunk of low memory, puts some holding pattern
2110 * code there and sends the other processors off to there until
2111 * smp_boot_cpus tells them to do something. The holding pattern
2112 * checks that address until its cpu # is there, when it is that
2113 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
2114 * of setting those values.
2115 *
2116 * We also use physical address 0x4 here to tell when a cpu
2117 * is in its holding pattern code.
2118 *
2119 * -- Cort
2120 */
2121 /*
2122 * We want to reference the copy of __secondary_hold_* in the
2123 * 0 - 0x100 address range
2124 */
2125 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
2126
prom_hold_cpus(void)2127 static void __init prom_hold_cpus(void)
2128 {
2129 unsigned long i;
2130 phandle node;
2131 char type[64];
2132 unsigned long *spinloop
2133 = (void *) LOW_ADDR(__secondary_hold_spinloop);
2134 unsigned long *acknowledge
2135 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
2136 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
2137
2138 /*
2139 * On pseries, if RTAS supports "query-cpu-stopped-state",
2140 * we skip this stage, the CPUs will be started by the
2141 * kernel using RTAS.
2142 */
2143 if ((of_platform == PLATFORM_PSERIES ||
2144 of_platform == PLATFORM_PSERIES_LPAR) &&
2145 rtas_has_query_cpu_stopped) {
2146 prom_printf("prom_hold_cpus: skipped\n");
2147 return;
2148 }
2149
2150 prom_debug("prom_hold_cpus: start...\n");
2151 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
2152 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
2153 prom_debug(" 1) acknowledge = 0x%lx\n",
2154 (unsigned long)acknowledge);
2155 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
2156 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
2157
2158 /* Set the common spinloop variable, so all of the secondary cpus
2159 * will block when they are awakened from their OF spinloop.
2160 * This must occur for both SMP and non SMP kernels, since OF will
2161 * be trashed when we move the kernel.
2162 */
2163 *spinloop = 0;
2164
2165 /* look for cpus */
2166 for (node = 0; prom_next_node(&node); ) {
2167 unsigned int cpu_no;
2168 __be32 reg;
2169
2170 type[0] = 0;
2171 prom_getprop(node, "device_type", type, sizeof(type));
2172 if (prom_strcmp(type, "cpu") != 0)
2173 continue;
2174
2175 /* Skip non-configured cpus. */
2176 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
2177 if (prom_strcmp(type, "okay") != 0)
2178 continue;
2179
2180 reg = cpu_to_be32(-1); /* make sparse happy */
2181 prom_getprop(node, "reg", ®, sizeof(reg));
2182 cpu_no = be32_to_cpu(reg);
2183
2184 prom_debug("cpu hw idx = %u\n", cpu_no);
2185
2186 /* Init the acknowledge var which will be reset by
2187 * the secondary cpu when it awakens from its OF
2188 * spinloop.
2189 */
2190 *acknowledge = (unsigned long)-1;
2191
2192 if (cpu_no != prom.cpu) {
2193 /* Primary Thread of non-boot cpu or any thread */
2194 prom_printf("starting cpu hw idx %u... ", cpu_no);
2195 call_prom("start-cpu", 3, 0, node,
2196 secondary_hold, cpu_no);
2197
2198 for (i = 0; (i < 100000000) &&
2199 (*acknowledge == ((unsigned long)-1)); i++ )
2200 mb();
2201
2202 if (*acknowledge == cpu_no)
2203 prom_printf("done\n");
2204 else
2205 prom_printf("failed: %lx\n", *acknowledge);
2206 }
2207 #ifdef CONFIG_SMP
2208 else
2209 prom_printf("boot cpu hw idx %u\n", cpu_no);
2210 #endif /* CONFIG_SMP */
2211 }
2212
2213 prom_debug("prom_hold_cpus: end...\n");
2214 }
2215
2216
prom_init_client_services(unsigned long pp)2217 static void __init prom_init_client_services(unsigned long pp)
2218 {
2219 /* Get a handle to the prom entry point before anything else */
2220 prom_entry = pp;
2221
2222 /* get a handle for the stdout device */
2223 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2224 if (!PHANDLE_VALID(prom.chosen))
2225 prom_panic("cannot find chosen"); /* msg won't be printed :( */
2226
2227 /* get device tree root */
2228 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2229 if (!PHANDLE_VALID(prom.root))
2230 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2231
2232 prom.mmumap = 0;
2233 }
2234
2235 #ifdef CONFIG_PPC32
2236 /*
2237 * For really old powermacs, we need to map things we claim.
2238 * For that, we need the ihandle of the mmu.
2239 * Also, on the longtrail, we need to work around other bugs.
2240 */
prom_find_mmu(void)2241 static void __init prom_find_mmu(void)
2242 {
2243 phandle oprom;
2244 char version[64];
2245
2246 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2247 if (!PHANDLE_VALID(oprom))
2248 return;
2249 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2250 return;
2251 version[sizeof(version) - 1] = 0;
2252 /* XXX might need to add other versions here */
2253 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0)
2254 of_workarounds = OF_WA_CLAIM;
2255 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) {
2256 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2257 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2258 } else
2259 return;
2260 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2261 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2262 sizeof(prom.mmumap));
2263 prom.mmumap = be32_to_cpu(prom.mmumap);
2264 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2265 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
2266 }
2267 #else
2268 #define prom_find_mmu()
2269 #endif
2270
prom_init_stdout(void)2271 static void __init prom_init_stdout(void)
2272 {
2273 char *path = of_stdout_device;
2274 char type[16];
2275 phandle stdout_node;
2276 __be32 val;
2277
2278 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2279 prom_panic("cannot find stdout");
2280
2281 prom.stdout = be32_to_cpu(val);
2282
2283 /* Get the full OF pathname of the stdout device */
2284 memset(path, 0, 256);
2285 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2286 prom_printf("OF stdout device is: %s\n", of_stdout_device);
2287 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2288 path, prom_strlen(path) + 1);
2289
2290 /* instance-to-package fails on PA-Semi */
2291 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2292 if (stdout_node != PROM_ERROR) {
2293 val = cpu_to_be32(stdout_node);
2294
2295 /* If it's a display, note it */
2296 memset(type, 0, sizeof(type));
2297 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2298 if (prom_strcmp(type, "display") == 0)
2299 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2300 }
2301 }
2302
prom_find_machine_type(void)2303 static int __init prom_find_machine_type(void)
2304 {
2305 static char compat[256] __prombss;
2306 int len, i = 0;
2307 #ifdef CONFIG_PPC64
2308 phandle rtas;
2309 int x;
2310 #endif
2311
2312 /* Look for a PowerMac or a Cell */
2313 len = prom_getprop(prom.root, "compatible",
2314 compat, sizeof(compat)-1);
2315 if (len > 0) {
2316 compat[len] = 0;
2317 while (i < len) {
2318 char *p = &compat[i];
2319 int sl = prom_strlen(p);
2320 if (sl == 0)
2321 break;
2322 if (prom_strstr(p, "Power Macintosh") ||
2323 prom_strstr(p, "MacRISC"))
2324 return PLATFORM_POWERMAC;
2325 #ifdef CONFIG_PPC64
2326 /* We must make sure we don't detect the IBM Cell
2327 * blades as pSeries due to some firmware issues,
2328 * so we do it here.
2329 */
2330 if (prom_strstr(p, "IBM,CBEA") ||
2331 prom_strstr(p, "IBM,CPBW-1.0"))
2332 return PLATFORM_GENERIC;
2333 #endif /* CONFIG_PPC64 */
2334 i += sl + 1;
2335 }
2336 }
2337 #ifdef CONFIG_PPC64
2338 /* Try to figure out if it's an IBM pSeries or any other
2339 * PAPR compliant platform. We assume it is if :
2340 * - /device_type is "chrp" (please, do NOT use that for future
2341 * non-IBM designs !
2342 * - it has /rtas
2343 */
2344 len = prom_getprop(prom.root, "device_type",
2345 compat, sizeof(compat)-1);
2346 if (len <= 0)
2347 return PLATFORM_GENERIC;
2348 if (prom_strcmp(compat, "chrp"))
2349 return PLATFORM_GENERIC;
2350
2351 /* Default to pSeries. We need to know if we are running LPAR */
2352 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2353 if (!PHANDLE_VALID(rtas))
2354 return PLATFORM_GENERIC;
2355 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2356 if (x != PROM_ERROR) {
2357 prom_debug("Hypertas detected, assuming LPAR !\n");
2358 return PLATFORM_PSERIES_LPAR;
2359 }
2360 return PLATFORM_PSERIES;
2361 #else
2362 return PLATFORM_GENERIC;
2363 #endif
2364 }
2365
prom_set_color(ihandle ih,int i,int r,int g,int b)2366 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2367 {
2368 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2369 }
2370
2371 /*
2372 * If we have a display that we don't know how to drive,
2373 * we will want to try to execute OF's open method for it
2374 * later. However, OF will probably fall over if we do that
2375 * we've taken over the MMU.
2376 * So we check whether we will need to open the display,
2377 * and if so, open it now.
2378 */
prom_check_displays(void)2379 static void __init prom_check_displays(void)
2380 {
2381 char type[16], *path;
2382 phandle node;
2383 ihandle ih;
2384 int i;
2385
2386 static const unsigned char default_colors[] __initconst = {
2387 0x00, 0x00, 0x00,
2388 0x00, 0x00, 0xaa,
2389 0x00, 0xaa, 0x00,
2390 0x00, 0xaa, 0xaa,
2391 0xaa, 0x00, 0x00,
2392 0xaa, 0x00, 0xaa,
2393 0xaa, 0xaa, 0x00,
2394 0xaa, 0xaa, 0xaa,
2395 0x55, 0x55, 0x55,
2396 0x55, 0x55, 0xff,
2397 0x55, 0xff, 0x55,
2398 0x55, 0xff, 0xff,
2399 0xff, 0x55, 0x55,
2400 0xff, 0x55, 0xff,
2401 0xff, 0xff, 0x55,
2402 0xff, 0xff, 0xff
2403 };
2404 const unsigned char *clut;
2405
2406 prom_debug("Looking for displays\n");
2407 for (node = 0; prom_next_node(&node); ) {
2408 memset(type, 0, sizeof(type));
2409 prom_getprop(node, "device_type", type, sizeof(type));
2410 if (prom_strcmp(type, "display") != 0)
2411 continue;
2412
2413 /* It seems OF doesn't null-terminate the path :-( */
2414 path = prom_scratch;
2415 memset(path, 0, sizeof(prom_scratch));
2416
2417 /*
2418 * leave some room at the end of the path for appending extra
2419 * arguments
2420 */
2421 if (call_prom("package-to-path", 3, 1, node, path,
2422 sizeof(prom_scratch) - 10) == PROM_ERROR)
2423 continue;
2424 prom_printf("found display : %s, opening... ", path);
2425
2426 ih = call_prom("open", 1, 1, path);
2427 if (ih == 0) {
2428 prom_printf("failed\n");
2429 continue;
2430 }
2431
2432 /* Success */
2433 prom_printf("done\n");
2434 prom_setprop(node, path, "linux,opened", NULL, 0);
2435
2436 /* Setup a usable color table when the appropriate
2437 * method is available. Should update this to set-colors */
2438 clut = default_colors;
2439 for (i = 0; i < 16; i++, clut += 3)
2440 if (prom_set_color(ih, i, clut[0], clut[1],
2441 clut[2]) != 0)
2442 break;
2443
2444 #ifdef CONFIG_LOGO_LINUX_CLUT224
2445 clut = PTRRELOC(logo_linux_clut224.clut);
2446 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2447 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2448 clut[2]) != 0)
2449 break;
2450 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2451
2452 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2453 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2454 PROM_ERROR) {
2455 u32 width, height, pitch, addr;
2456
2457 prom_printf("Setting btext !\n");
2458
2459 if (prom_getprop(node, "width", &width, 4) == PROM_ERROR)
2460 return;
2461
2462 if (prom_getprop(node, "height", &height, 4) == PROM_ERROR)
2463 return;
2464
2465 if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR)
2466 return;
2467
2468 if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR)
2469 return;
2470
2471 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2472 width, height, pitch, addr);
2473 btext_setup_display(width, height, 8, pitch, addr);
2474 btext_prepare_BAT();
2475 }
2476 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2477 }
2478 }
2479
2480
2481 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
make_room(unsigned long * mem_start,unsigned long * mem_end,unsigned long needed,unsigned long align)2482 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2483 unsigned long needed, unsigned long align)
2484 {
2485 void *ret;
2486
2487 *mem_start = ALIGN(*mem_start, align);
2488 while ((*mem_start + needed) > *mem_end) {
2489 unsigned long room, chunk;
2490
2491 prom_debug("Chunk exhausted, claiming more at %lx...\n",
2492 alloc_bottom);
2493 room = alloc_top - alloc_bottom;
2494 if (room > DEVTREE_CHUNK_SIZE)
2495 room = DEVTREE_CHUNK_SIZE;
2496 if (room < PAGE_SIZE)
2497 prom_panic("No memory for flatten_device_tree "
2498 "(no room)\n");
2499 chunk = alloc_up(room, 0);
2500 if (chunk == 0)
2501 prom_panic("No memory for flatten_device_tree "
2502 "(claim failed)\n");
2503 *mem_end = chunk + room;
2504 }
2505
2506 ret = (void *)*mem_start;
2507 *mem_start += needed;
2508
2509 return ret;
2510 }
2511
2512 #define dt_push_token(token, mem_start, mem_end) do { \
2513 void *room = make_room(mem_start, mem_end, 4, 4); \
2514 *(__be32 *)room = cpu_to_be32(token); \
2515 } while(0)
2516
dt_find_string(char * str)2517 static unsigned long __init dt_find_string(char *str)
2518 {
2519 char *s, *os;
2520
2521 s = os = (char *)dt_string_start;
2522 s += 4;
2523 while (s < (char *)dt_string_end) {
2524 if (prom_strcmp(s, str) == 0)
2525 return s - os;
2526 s += prom_strlen(s) + 1;
2527 }
2528 return 0;
2529 }
2530
2531 /*
2532 * The Open Firmware 1275 specification states properties must be 31 bytes or
2533 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2534 */
2535 #define MAX_PROPERTY_NAME 64
2536
scan_dt_build_strings(phandle node,unsigned long * mem_start,unsigned long * mem_end)2537 static void __init scan_dt_build_strings(phandle node,
2538 unsigned long *mem_start,
2539 unsigned long *mem_end)
2540 {
2541 char *prev_name, *namep, *sstart;
2542 unsigned long soff;
2543 phandle child;
2544
2545 sstart = (char *)dt_string_start;
2546
2547 /* get and store all property names */
2548 prev_name = "";
2549 for (;;) {
2550 /* 64 is max len of name including nul. */
2551 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2552 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2553 /* No more nodes: unwind alloc */
2554 *mem_start = (unsigned long)namep;
2555 break;
2556 }
2557
2558 /* skip "name" */
2559 if (prom_strcmp(namep, "name") == 0) {
2560 *mem_start = (unsigned long)namep;
2561 prev_name = "name";
2562 continue;
2563 }
2564 /* get/create string entry */
2565 soff = dt_find_string(namep);
2566 if (soff != 0) {
2567 *mem_start = (unsigned long)namep;
2568 namep = sstart + soff;
2569 } else {
2570 /* Trim off some if we can */
2571 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2572 dt_string_end = *mem_start;
2573 }
2574 prev_name = namep;
2575 }
2576
2577 /* do all our children */
2578 child = call_prom("child", 1, 1, node);
2579 while (child != 0) {
2580 scan_dt_build_strings(child, mem_start, mem_end);
2581 child = call_prom("peer", 1, 1, child);
2582 }
2583 }
2584
scan_dt_build_struct(phandle node,unsigned long * mem_start,unsigned long * mem_end)2585 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2586 unsigned long *mem_end)
2587 {
2588 phandle child;
2589 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2590 unsigned long soff;
2591 unsigned char *valp;
2592 static char pname[MAX_PROPERTY_NAME] __prombss;
2593 int l, room, has_phandle = 0;
2594
2595 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2596
2597 /* get the node's full name */
2598 namep = (char *)*mem_start;
2599 room = *mem_end - *mem_start;
2600 if (room > 255)
2601 room = 255;
2602 l = call_prom("package-to-path", 3, 1, node, namep, room);
2603 if (l >= 0) {
2604 /* Didn't fit? Get more room. */
2605 if (l >= room) {
2606 if (l >= *mem_end - *mem_start)
2607 namep = make_room(mem_start, mem_end, l+1, 1);
2608 call_prom("package-to-path", 3, 1, node, namep, l);
2609 }
2610 namep[l] = '\0';
2611
2612 /* Fixup an Apple bug where they have bogus \0 chars in the
2613 * middle of the path in some properties, and extract
2614 * the unit name (everything after the last '/').
2615 */
2616 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2617 if (*p == '/')
2618 lp = namep;
2619 else if (*p != 0)
2620 *lp++ = *p;
2621 }
2622 *lp = 0;
2623 *mem_start = ALIGN((unsigned long)lp + 1, 4);
2624 }
2625
2626 /* get it again for debugging */
2627 path = prom_scratch;
2628 memset(path, 0, sizeof(prom_scratch));
2629 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
2630
2631 /* get and store all properties */
2632 prev_name = "";
2633 sstart = (char *)dt_string_start;
2634 for (;;) {
2635 if (call_prom("nextprop", 3, 1, node, prev_name,
2636 pname) != 1)
2637 break;
2638
2639 /* skip "name" */
2640 if (prom_strcmp(pname, "name") == 0) {
2641 prev_name = "name";
2642 continue;
2643 }
2644
2645 /* find string offset */
2646 soff = dt_find_string(pname);
2647 if (soff == 0) {
2648 prom_printf("WARNING: Can't find string index for"
2649 " <%s>, node %s\n", pname, path);
2650 break;
2651 }
2652 prev_name = sstart + soff;
2653
2654 /* get length */
2655 l = call_prom("getproplen", 2, 1, node, pname);
2656
2657 /* sanity checks */
2658 if (l == PROM_ERROR)
2659 continue;
2660
2661 /* push property head */
2662 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2663 dt_push_token(l, mem_start, mem_end);
2664 dt_push_token(soff, mem_start, mem_end);
2665
2666 /* push property content */
2667 valp = make_room(mem_start, mem_end, l, 4);
2668 call_prom("getprop", 4, 1, node, pname, valp, l);
2669 *mem_start = ALIGN(*mem_start, 4);
2670
2671 if (!prom_strcmp(pname, "phandle"))
2672 has_phandle = 1;
2673 }
2674
2675 /* Add a "phandle" property if none already exist */
2676 if (!has_phandle) {
2677 soff = dt_find_string("phandle");
2678 if (soff == 0)
2679 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
2680 else {
2681 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2682 dt_push_token(4, mem_start, mem_end);
2683 dt_push_token(soff, mem_start, mem_end);
2684 valp = make_room(mem_start, mem_end, 4, 4);
2685 *(__be32 *)valp = cpu_to_be32(node);
2686 }
2687 }
2688
2689 /* do all our children */
2690 child = call_prom("child", 1, 1, node);
2691 while (child != 0) {
2692 scan_dt_build_struct(child, mem_start, mem_end);
2693 child = call_prom("peer", 1, 1, child);
2694 }
2695
2696 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2697 }
2698
flatten_device_tree(void)2699 static void __init flatten_device_tree(void)
2700 {
2701 phandle root;
2702 unsigned long mem_start, mem_end, room;
2703 struct boot_param_header *hdr;
2704 char *namep;
2705 u64 *rsvmap;
2706
2707 /*
2708 * Check how much room we have between alloc top & bottom (+/- a
2709 * few pages), crop to 1MB, as this is our "chunk" size
2710 */
2711 room = alloc_top - alloc_bottom - 0x4000;
2712 if (room > DEVTREE_CHUNK_SIZE)
2713 room = DEVTREE_CHUNK_SIZE;
2714 prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2715
2716 /* Now try to claim that */
2717 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2718 if (mem_start == 0)
2719 prom_panic("Can't allocate initial device-tree chunk\n");
2720 mem_end = mem_start + room;
2721
2722 /* Get root of tree */
2723 root = call_prom("peer", 1, 1, (phandle)0);
2724 if (root == (phandle)0)
2725 prom_panic ("couldn't get device tree root\n");
2726
2727 /* Build header and make room for mem rsv map */
2728 mem_start = ALIGN(mem_start, 4);
2729 hdr = make_room(&mem_start, &mem_end,
2730 sizeof(struct boot_param_header), 4);
2731 dt_header_start = (unsigned long)hdr;
2732 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2733
2734 /* Start of strings */
2735 mem_start = PAGE_ALIGN(mem_start);
2736 dt_string_start = mem_start;
2737 mem_start += 4; /* hole */
2738
2739 /* Add "phandle" in there, we'll need it */
2740 namep = make_room(&mem_start, &mem_end, 16, 1);
2741 prom_strscpy_pad(namep, "phandle", sizeof("phandle"));
2742 mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2743
2744 /* Build string array */
2745 prom_printf("Building dt strings...\n");
2746 scan_dt_build_strings(root, &mem_start, &mem_end);
2747 dt_string_end = mem_start;
2748
2749 /* Build structure */
2750 mem_start = PAGE_ALIGN(mem_start);
2751 dt_struct_start = mem_start;
2752 prom_printf("Building dt structure...\n");
2753 scan_dt_build_struct(root, &mem_start, &mem_end);
2754 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2755 dt_struct_end = PAGE_ALIGN(mem_start);
2756
2757 /* Finish header */
2758 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2759 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2760 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2761 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2762 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2763 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2764 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2765 hdr->version = cpu_to_be32(OF_DT_VERSION);
2766 /* Version 16 is not backward compatible */
2767 hdr->last_comp_version = cpu_to_be32(0x10);
2768
2769 /* Copy the reserve map in */
2770 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2771
2772 #ifdef DEBUG_PROM
2773 {
2774 int i;
2775 prom_printf("reserved memory map:\n");
2776 for (i = 0; i < mem_reserve_cnt; i++)
2777 prom_printf(" %llx - %llx\n",
2778 be64_to_cpu(mem_reserve_map[i].base),
2779 be64_to_cpu(mem_reserve_map[i].size));
2780 }
2781 #endif
2782 /* Bump mem_reserve_cnt to cause further reservations to fail
2783 * since it's too late.
2784 */
2785 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2786
2787 prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2788 dt_string_start, dt_string_end);
2789 prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
2790 dt_struct_start, dt_struct_end);
2791 }
2792
2793 #ifdef CONFIG_PPC_MAPLE
2794 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2795 * The values are bad, and it doesn't even have the right number of cells. */
fixup_device_tree_maple(void)2796 static void __init fixup_device_tree_maple(void)
2797 {
2798 phandle isa;
2799 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2800 u32 isa_ranges[6];
2801 char *name;
2802
2803 name = "/ht@0/isa@4";
2804 isa = call_prom("finddevice", 1, 1, ADDR(name));
2805 if (!PHANDLE_VALID(isa)) {
2806 name = "/ht@0/isa@6";
2807 isa = call_prom("finddevice", 1, 1, ADDR(name));
2808 rloc = 0x01003000; /* IO space; PCI device = 6 */
2809 }
2810 if (!PHANDLE_VALID(isa))
2811 return;
2812
2813 if (prom_getproplen(isa, "ranges") != 12)
2814 return;
2815 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2816 == PROM_ERROR)
2817 return;
2818
2819 if (isa_ranges[0] != 0x1 ||
2820 isa_ranges[1] != 0xf4000000 ||
2821 isa_ranges[2] != 0x00010000)
2822 return;
2823
2824 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2825
2826 isa_ranges[0] = 0x1;
2827 isa_ranges[1] = 0x0;
2828 isa_ranges[2] = rloc;
2829 isa_ranges[3] = 0x0;
2830 isa_ranges[4] = 0x0;
2831 isa_ranges[5] = 0x00010000;
2832 prom_setprop(isa, name, "ranges",
2833 isa_ranges, sizeof(isa_ranges));
2834 }
2835
2836 #define CPC925_MC_START 0xf8000000
2837 #define CPC925_MC_LENGTH 0x1000000
2838 /* The values for memory-controller don't have right number of cells */
fixup_device_tree_maple_memory_controller(void)2839 static void __init fixup_device_tree_maple_memory_controller(void)
2840 {
2841 phandle mc;
2842 u32 mc_reg[4];
2843 char *name = "/hostbridge@f8000000";
2844 u32 ac, sc;
2845
2846 mc = call_prom("finddevice", 1, 1, ADDR(name));
2847 if (!PHANDLE_VALID(mc))
2848 return;
2849
2850 if (prom_getproplen(mc, "reg") != 8)
2851 return;
2852
2853 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2854 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2855 if ((ac != 2) || (sc != 2))
2856 return;
2857
2858 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2859 return;
2860
2861 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2862 return;
2863
2864 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2865
2866 mc_reg[0] = 0x0;
2867 mc_reg[1] = CPC925_MC_START;
2868 mc_reg[2] = 0x0;
2869 mc_reg[3] = CPC925_MC_LENGTH;
2870 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2871 }
2872 #else
2873 #define fixup_device_tree_maple()
2874 #define fixup_device_tree_maple_memory_controller()
2875 #endif
2876
2877 #ifdef CONFIG_PPC_CHRP
2878 /*
2879 * Pegasos and BriQ lacks the "ranges" property in the isa node
2880 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2881 * Pegasos has the IDE configured in legacy mode, but advertised as native
2882 */
fixup_device_tree_chrp(void)2883 static void __init fixup_device_tree_chrp(void)
2884 {
2885 phandle ph;
2886 u32 prop[6];
2887 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2888 char *name;
2889 int rc;
2890
2891 name = "/pci@80000000/isa@c";
2892 ph = call_prom("finddevice", 1, 1, ADDR(name));
2893 if (!PHANDLE_VALID(ph)) {
2894 name = "/pci@ff500000/isa@6";
2895 ph = call_prom("finddevice", 1, 1, ADDR(name));
2896 rloc = 0x01003000; /* IO space; PCI device = 6 */
2897 }
2898 if (PHANDLE_VALID(ph)) {
2899 rc = prom_getproplen(ph, "ranges");
2900 if (rc == 0 || rc == PROM_ERROR) {
2901 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2902
2903 prop[0] = 0x1;
2904 prop[1] = 0x0;
2905 prop[2] = rloc;
2906 prop[3] = 0x0;
2907 prop[4] = 0x0;
2908 prop[5] = 0x00010000;
2909 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2910 }
2911 }
2912
2913 name = "/pci@80000000/ide@C,1";
2914 ph = call_prom("finddevice", 1, 1, ADDR(name));
2915 if (PHANDLE_VALID(ph)) {
2916 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2917 prop[0] = 14;
2918 prop[1] = 0x0;
2919 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2920 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2921 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2922 if (rc == sizeof(u32)) {
2923 prop[0] &= ~0x5;
2924 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2925 }
2926 }
2927 }
2928 #else
2929 #define fixup_device_tree_chrp()
2930 #endif
2931
2932 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
fixup_device_tree_pmac(void)2933 static void __init fixup_device_tree_pmac(void)
2934 {
2935 phandle u3, i2c, mpic;
2936 u32 u3_rev;
2937 u32 interrupts[2];
2938 u32 parent;
2939
2940 /* Some G5s have a missing interrupt definition, fix it up here */
2941 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2942 if (!PHANDLE_VALID(u3))
2943 return;
2944 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2945 if (!PHANDLE_VALID(i2c))
2946 return;
2947 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2948 if (!PHANDLE_VALID(mpic))
2949 return;
2950
2951 /* check if proper rev of u3 */
2952 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2953 == PROM_ERROR)
2954 return;
2955 if (u3_rev < 0x35 || u3_rev > 0x39)
2956 return;
2957 /* does it need fixup ? */
2958 if (prom_getproplen(i2c, "interrupts") > 0)
2959 return;
2960
2961 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2962
2963 /* interrupt on this revision of u3 is number 0 and level */
2964 interrupts[0] = 0;
2965 interrupts[1] = 1;
2966 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2967 &interrupts, sizeof(interrupts));
2968 parent = (u32)mpic;
2969 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2970 &parent, sizeof(parent));
2971 }
2972 #else
2973 #define fixup_device_tree_pmac()
2974 #endif
2975
2976 #ifdef CONFIG_PPC_EFIKA
2977 /*
2978 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2979 * to talk to the phy. If the phy-handle property is missing, then this
2980 * function is called to add the appropriate nodes and link it to the
2981 * ethernet node.
2982 */
fixup_device_tree_efika_add_phy(void)2983 static void __init fixup_device_tree_efika_add_phy(void)
2984 {
2985 u32 node;
2986 char prop[64];
2987 int rv;
2988
2989 /* Check if /builtin/ethernet exists - bail if it doesn't */
2990 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2991 if (!PHANDLE_VALID(node))
2992 return;
2993
2994 /* Check if the phy-handle property exists - bail if it does */
2995 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2996 if (rv <= 0)
2997 return;
2998
2999 /*
3000 * At this point the ethernet device doesn't have a phy described.
3001 * Now we need to add the missing phy node and linkage
3002 */
3003
3004 /* Check for an MDIO bus node - if missing then create one */
3005 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
3006 if (!PHANDLE_VALID(node)) {
3007 prom_printf("Adding Ethernet MDIO node\n");
3008 call_prom("interpret", 1, 1,
3009 " s\" /builtin\" find-device"
3010 " new-device"
3011 " 1 encode-int s\" #address-cells\" property"
3012 " 0 encode-int s\" #size-cells\" property"
3013 " s\" mdio\" device-name"
3014 " s\" fsl,mpc5200b-mdio\" encode-string"
3015 " s\" compatible\" property"
3016 " 0xf0003000 0x400 reg"
3017 " 0x2 encode-int"
3018 " 0x5 encode-int encode+"
3019 " 0x3 encode-int encode+"
3020 " s\" interrupts\" property"
3021 " finish-device");
3022 }
3023
3024 /* Check for a PHY device node - if missing then create one and
3025 * give it's phandle to the ethernet node */
3026 node = call_prom("finddevice", 1, 1,
3027 ADDR("/builtin/mdio/ethernet-phy"));
3028 if (!PHANDLE_VALID(node)) {
3029 prom_printf("Adding Ethernet PHY node\n");
3030 call_prom("interpret", 1, 1,
3031 " s\" /builtin/mdio\" find-device"
3032 " new-device"
3033 " s\" ethernet-phy\" device-name"
3034 " 0x10 encode-int s\" reg\" property"
3035 " my-self"
3036 " ihandle>phandle"
3037 " finish-device"
3038 " s\" /builtin/ethernet\" find-device"
3039 " encode-int"
3040 " s\" phy-handle\" property"
3041 " device-end");
3042 }
3043 }
3044
fixup_device_tree_efika(void)3045 static void __init fixup_device_tree_efika(void)
3046 {
3047 int sound_irq[3] = { 2, 2, 0 };
3048 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
3049 3,4,0, 3,5,0, 3,6,0, 3,7,0,
3050 3,8,0, 3,9,0, 3,10,0, 3,11,0,
3051 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
3052 u32 node;
3053 char prop[64];
3054 int rv, len;
3055
3056 /* Check if we're really running on a EFIKA */
3057 node = call_prom("finddevice", 1, 1, ADDR("/"));
3058 if (!PHANDLE_VALID(node))
3059 return;
3060
3061 rv = prom_getprop(node, "model", prop, sizeof(prop));
3062 if (rv == PROM_ERROR)
3063 return;
3064 if (prom_strcmp(prop, "EFIKA5K2"))
3065 return;
3066
3067 prom_printf("Applying EFIKA device tree fixups\n");
3068
3069 /* Claiming to be 'chrp' is death */
3070 node = call_prom("finddevice", 1, 1, ADDR("/"));
3071 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
3072 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0))
3073 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
3074
3075 /* CODEGEN,description is exposed in /proc/cpuinfo so
3076 fix that too */
3077 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
3078 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP")))
3079 prom_setprop(node, "/", "CODEGEN,description",
3080 "Efika 5200B PowerPC System",
3081 sizeof("Efika 5200B PowerPC System"));
3082
3083 /* Fixup bestcomm interrupts property */
3084 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
3085 if (PHANDLE_VALID(node)) {
3086 len = prom_getproplen(node, "interrupts");
3087 if (len == 12) {
3088 prom_printf("Fixing bestcomm interrupts property\n");
3089 prom_setprop(node, "/builtin/bestcom", "interrupts",
3090 bcomm_irq, sizeof(bcomm_irq));
3091 }
3092 }
3093
3094 /* Fixup sound interrupts property */
3095 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
3096 if (PHANDLE_VALID(node)) {
3097 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
3098 if (rv == PROM_ERROR) {
3099 prom_printf("Adding sound interrupts property\n");
3100 prom_setprop(node, "/builtin/sound", "interrupts",
3101 sound_irq, sizeof(sound_irq));
3102 }
3103 }
3104
3105 /* Make sure ethernet phy-handle property exists */
3106 fixup_device_tree_efika_add_phy();
3107 }
3108 #else
3109 #define fixup_device_tree_efika()
3110 #endif
3111
3112 #ifdef CONFIG_PPC_PASEMI_NEMO
3113 /*
3114 * CFE supplied on Nemo is broken in several ways, biggest
3115 * problem is that it reassigns ISA interrupts to unused mpic ints.
3116 * Add an interrupt-controller property for the io-bridge to use
3117 * and correct the ints so we can attach them to an irq_domain
3118 */
fixup_device_tree_pasemi(void)3119 static void __init fixup_device_tree_pasemi(void)
3120 {
3121 u32 interrupts[2], parent, rval, val = 0;
3122 char *name, *pci_name;
3123 phandle iob, node;
3124
3125 /* Find the root pci node */
3126 name = "/pxp@0,e0000000";
3127 iob = call_prom("finddevice", 1, 1, ADDR(name));
3128 if (!PHANDLE_VALID(iob))
3129 return;
3130
3131 /* check if interrupt-controller node set yet */
3132 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
3133 return;
3134
3135 prom_printf("adding interrupt-controller property for SB600...\n");
3136
3137 prom_setprop(iob, name, "interrupt-controller", &val, 0);
3138
3139 pci_name = "/pxp@0,e0000000/pci@11";
3140 node = call_prom("finddevice", 1, 1, ADDR(pci_name));
3141 parent = ADDR(iob);
3142
3143 for( ; prom_next_node(&node); ) {
3144 /* scan each node for one with an interrupt */
3145 if (!PHANDLE_VALID(node))
3146 continue;
3147
3148 rval = prom_getproplen(node, "interrupts");
3149 if (rval == 0 || rval == PROM_ERROR)
3150 continue;
3151
3152 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
3153 if ((interrupts[0] < 212) || (interrupts[0] > 222))
3154 continue;
3155
3156 /* found a node, update both interrupts and interrupt-parent */
3157 if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
3158 interrupts[0] -= 203;
3159 if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
3160 interrupts[0] -= 213;
3161 if (interrupts[0] == 221)
3162 interrupts[0] = 14;
3163 if (interrupts[0] == 222)
3164 interrupts[0] = 8;
3165
3166 prom_setprop(node, pci_name, "interrupts", interrupts,
3167 sizeof(interrupts));
3168 prom_setprop(node, pci_name, "interrupt-parent", &parent,
3169 sizeof(parent));
3170 }
3171
3172 /*
3173 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
3174 * so that generic isa-bridge code can add the SB600 and its on-board
3175 * peripherals.
3176 */
3177 name = "/pxp@0,e0000000/io-bridge@0";
3178 iob = call_prom("finddevice", 1, 1, ADDR(name));
3179 if (!PHANDLE_VALID(iob))
3180 return;
3181
3182 /* device_type is already set, just change it. */
3183
3184 prom_printf("Changing device_type of SB600 node...\n");
3185
3186 prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
3187 }
3188 #else /* !CONFIG_PPC_PASEMI_NEMO */
fixup_device_tree_pasemi(void)3189 static inline void fixup_device_tree_pasemi(void) { }
3190 #endif
3191
fixup_device_tree(void)3192 static void __init fixup_device_tree(void)
3193 {
3194 fixup_device_tree_maple();
3195 fixup_device_tree_maple_memory_controller();
3196 fixup_device_tree_chrp();
3197 fixup_device_tree_pmac();
3198 fixup_device_tree_efika();
3199 fixup_device_tree_pasemi();
3200 }
3201
prom_find_boot_cpu(void)3202 static void __init prom_find_boot_cpu(void)
3203 {
3204 __be32 rval;
3205 ihandle prom_cpu;
3206 phandle cpu_pkg;
3207
3208 rval = 0;
3209 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
3210 return;
3211 prom_cpu = be32_to_cpu(rval);
3212
3213 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
3214
3215 if (!PHANDLE_VALID(cpu_pkg))
3216 return;
3217
3218 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3219 prom.cpu = be32_to_cpu(rval);
3220
3221 prom_debug("Booting CPU hw index = %d\n", prom.cpu);
3222 }
3223
prom_check_initrd(unsigned long r3,unsigned long r4)3224 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3225 {
3226 #ifdef CONFIG_BLK_DEV_INITRD
3227 if (r3 && r4 && r4 != 0xdeadbeef) {
3228 __be64 val;
3229
3230 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3231 prom_initrd_end = prom_initrd_start + r4;
3232
3233 val = cpu_to_be64(prom_initrd_start);
3234 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3235 &val, sizeof(val));
3236 val = cpu_to_be64(prom_initrd_end);
3237 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3238 &val, sizeof(val));
3239
3240 reserve_mem(prom_initrd_start,
3241 prom_initrd_end - prom_initrd_start);
3242
3243 prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
3244 prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
3245 }
3246 #endif /* CONFIG_BLK_DEV_INITRD */
3247 }
3248
3249 #ifdef CONFIG_PPC_SVM
3250 /*
3251 * Perform the Enter Secure Mode ultracall.
3252 */
enter_secure_mode(unsigned long kbase,unsigned long fdt)3253 static int __init enter_secure_mode(unsigned long kbase, unsigned long fdt)
3254 {
3255 register unsigned long r3 asm("r3") = UV_ESM;
3256 register unsigned long r4 asm("r4") = kbase;
3257 register unsigned long r5 asm("r5") = fdt;
3258
3259 asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5));
3260
3261 return r3;
3262 }
3263
3264 /*
3265 * Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
3266 */
setup_secure_guest(unsigned long kbase,unsigned long fdt)3267 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3268 {
3269 int ret;
3270
3271 if (!prom_svm_enable)
3272 return;
3273
3274 /* Switch to secure mode. */
3275 prom_printf("Switching to secure mode.\n");
3276
3277 /*
3278 * The ultravisor will do an integrity check of the kernel image but we
3279 * relocated it so the check will fail. Restore the original image by
3280 * relocating it back to the kernel virtual base address.
3281 */
3282 relocate(KERNELBASE);
3283
3284 ret = enter_secure_mode(kbase, fdt);
3285
3286 /* Relocate the kernel again. */
3287 relocate(kbase);
3288
3289 if (ret != U_SUCCESS) {
3290 prom_printf("Returned %d from switching to secure mode.\n", ret);
3291 prom_rtas_os_term("Switch to secure mode failed.\n");
3292 }
3293 }
3294 #else
setup_secure_guest(unsigned long kbase,unsigned long fdt)3295 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3296 {
3297 }
3298 #endif /* CONFIG_PPC_SVM */
3299
3300 /*
3301 * We enter here early on, when the Open Firmware prom is still
3302 * handling exceptions and the MMU hash table for us.
3303 */
3304
prom_init(unsigned long r3,unsigned long r4,unsigned long pp,unsigned long r6,unsigned long r7,unsigned long kbase)3305 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3306 unsigned long pp,
3307 unsigned long r6, unsigned long r7,
3308 unsigned long kbase)
3309 {
3310 unsigned long hdr;
3311
3312 #ifdef CONFIG_PPC32
3313 unsigned long offset = reloc_offset();
3314 reloc_got2(offset);
3315 #endif
3316
3317 /*
3318 * First zero the BSS
3319 */
3320 memset(&__bss_start, 0, __bss_stop - __bss_start);
3321
3322 /*
3323 * Init interface to Open Firmware, get some node references,
3324 * like /chosen
3325 */
3326 prom_init_client_services(pp);
3327
3328 /*
3329 * See if this OF is old enough that we need to do explicit maps
3330 * and other workarounds
3331 */
3332 prom_find_mmu();
3333
3334 /*
3335 * Init prom stdout device
3336 */
3337 prom_init_stdout();
3338
3339 prom_printf("Preparing to boot %s", linux_banner);
3340
3341 /*
3342 * Get default machine type. At this point, we do not differentiate
3343 * between pSeries SMP and pSeries LPAR
3344 */
3345 of_platform = prom_find_machine_type();
3346 prom_printf("Detected machine type: %x\n", of_platform);
3347
3348 #ifndef CONFIG_NONSTATIC_KERNEL
3349 /* Bail if this is a kdump kernel. */
3350 if (PHYSICAL_START > 0)
3351 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3352 #endif
3353
3354 /*
3355 * Check for an initrd
3356 */
3357 prom_check_initrd(r3, r4);
3358
3359 /*
3360 * Do early parsing of command line
3361 */
3362 early_cmdline_parse();
3363
3364 #ifdef CONFIG_PPC_PSERIES
3365 /*
3366 * On pSeries, inform the firmware about our capabilities
3367 */
3368 if (of_platform == PLATFORM_PSERIES ||
3369 of_platform == PLATFORM_PSERIES_LPAR)
3370 prom_send_capabilities();
3371 #endif
3372
3373 /*
3374 * Copy the CPU hold code
3375 */
3376 if (of_platform != PLATFORM_POWERMAC)
3377 copy_and_flush(0, kbase, 0x100, 0);
3378
3379 /*
3380 * Initialize memory management within prom_init
3381 */
3382 prom_init_mem();
3383
3384 /*
3385 * Determine which cpu is actually running right _now_
3386 */
3387 prom_find_boot_cpu();
3388
3389 /*
3390 * Initialize display devices
3391 */
3392 prom_check_displays();
3393
3394 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3395 /*
3396 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3397 * that uses the allocator, we need to make sure we get the top of memory
3398 * available for us here...
3399 */
3400 if (of_platform == PLATFORM_PSERIES)
3401 prom_initialize_tce_table();
3402 #endif
3403
3404 /*
3405 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3406 * have a usable RTAS implementation.
3407 */
3408 if (of_platform != PLATFORM_POWERMAC)
3409 prom_instantiate_rtas();
3410
3411 #ifdef CONFIG_PPC64
3412 /* instantiate sml */
3413 prom_instantiate_sml();
3414 #endif
3415
3416 /*
3417 * On non-powermacs, put all CPUs in spin-loops.
3418 *
3419 * PowerMacs use a different mechanism to spin CPUs
3420 *
3421 * (This must be done after instantiating RTAS)
3422 */
3423 if (of_platform != PLATFORM_POWERMAC)
3424 prom_hold_cpus();
3425
3426 /*
3427 * Fill in some infos for use by the kernel later on
3428 */
3429 if (prom_memory_limit) {
3430 __be64 val = cpu_to_be64(prom_memory_limit);
3431 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3432 &val, sizeof(val));
3433 }
3434 #ifdef CONFIG_PPC64
3435 if (prom_iommu_off)
3436 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3437 NULL, 0);
3438
3439 if (prom_iommu_force_on)
3440 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3441 NULL, 0);
3442
3443 if (prom_tce_alloc_start) {
3444 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3445 &prom_tce_alloc_start,
3446 sizeof(prom_tce_alloc_start));
3447 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3448 &prom_tce_alloc_end,
3449 sizeof(prom_tce_alloc_end));
3450 }
3451 #endif
3452
3453 /*
3454 * Fixup any known bugs in the device-tree
3455 */
3456 fixup_device_tree();
3457
3458 /*
3459 * Now finally create the flattened device-tree
3460 */
3461 prom_printf("copying OF device tree...\n");
3462 flatten_device_tree();
3463
3464 /*
3465 * in case stdin is USB and still active on IBM machines...
3466 * Unfortunately quiesce crashes on some powermacs if we have
3467 * closed stdin already (in particular the powerbook 101).
3468 */
3469 if (of_platform != PLATFORM_POWERMAC)
3470 prom_close_stdin();
3471
3472 /*
3473 * Call OF "quiesce" method to shut down pending DMA's from
3474 * devices etc...
3475 */
3476 prom_printf("Quiescing Open Firmware ...\n");
3477 call_prom("quiesce", 0, 0);
3478
3479 /*
3480 * And finally, call the kernel passing it the flattened device
3481 * tree and NULL as r5, thus triggering the new entry point which
3482 * is common to us and kexec
3483 */
3484 hdr = dt_header_start;
3485
3486 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3487 prom_debug("->dt_header_start=0x%lx\n", hdr);
3488
3489 #ifdef CONFIG_PPC32
3490 reloc_got2(-offset);
3491 #endif
3492
3493 /* Move to secure memory if we're supposed to be secure guests. */
3494 setup_secure_guest(kbase, hdr);
3495
3496 __start(hdr, kbase, 0, 0, 0, 0, 0);
3497
3498 return 0;
3499 }
3500