1/* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 7 * Adapted for Power Macintosh by Paul Mackerras. 8 * Low-level exception handlers and MMU support 9 * rewritten by Paul Mackerras. 10 * Copyright (C) 1996 Paul Mackerras. 11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). 12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). 13 * 14 * This file contains the low-level support and setup for the 15 * PowerPC platform, including trap and interrupt dispatch. 16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.) 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License 20 * as published by the Free Software Foundation; either version 21 * 2 of the License, or (at your option) any later version. 22 * 23 */ 24 25#include <linux/config.h> 26#include <linux/threads.h> 27#include <asm/processor.h> 28#include <asm/page.h> 29#include <asm/mmu.h> 30#include <asm/pgtable.h> 31#include <asm/cputable.h> 32#include <asm/cache.h> 33#include <asm/ppc_asm.h> 34#include "ppc_defs.h" 35 36#ifdef CONFIG_APUS 37#include <asm/amigappc.h> 38#endif 39 40#ifdef CONFIG_PPC64BRIDGE 41#define LOAD_BAT(n, reg, RA, RB) \ 42 ld RA,(n*32)+0(reg); \ 43 ld RB,(n*32)+8(reg); \ 44 mtspr IBAT##n##U,RA; \ 45 mtspr IBAT##n##L,RB; \ 46 ld RA,(n*32)+16(reg); \ 47 ld RB,(n*32)+24(reg); \ 48 mtspr DBAT##n##U,RA; \ 49 mtspr DBAT##n##L,RB; \ 50 51#else /* CONFIG_PPC64BRIDGE */ 52 53/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ 54#define LOAD_BAT(n, reg, RA, RB) \ 55 /* see the comment for clear_bats() -- Cort */ \ 56 li RA,0; \ 57 mtspr IBAT##n##U,RA; \ 58 mtspr DBAT##n##U,RA; \ 59 lwz RA,(n*16)+0(reg); \ 60 lwz RB,(n*16)+4(reg); \ 61 mtspr IBAT##n##U,RA; \ 62 mtspr IBAT##n##L,RB; \ 63 beq 1f; \ 64 lwz RA,(n*16)+8(reg); \ 65 lwz RB,(n*16)+12(reg); \ 66 mtspr DBAT##n##U,RA; \ 67 mtspr DBAT##n##L,RB; \ 681: 69#endif /* CONFIG_PPC64BRIDGE */ 70 71 .text 72 .stabs "arch/ppc/kernel/",N_SO,0,0,0f 73 .stabs "head.S",N_SO,0,0,0f 740: 75 .globl _stext 76_stext: 77 78/* 79 * _start is defined this way because the XCOFF loader in the OpenFirmware 80 * on the powermac expects the entry point to be a procedure descriptor. 81 */ 82 .text 83 .globl _start 84_start: 85 /* 86 * These are here for legacy reasons, the kernel used to 87 * need to look like a coff function entry for the pmac 88 * but we're always started by some kind of bootloader now. 89 * -- Cort 90 */ 91 nop 92 nop 93 nop 94 95/* PMAC 96 * Enter here with the kernel text, data and bss loaded starting at 97 * 0, running with virtual == physical mapping. 98 * r5 points to the prom entry point (the client interface handler 99 * address). Address translation is turned on, with the prom 100 * managing the hash table. Interrupts are disabled. The stack 101 * pointer (r1) points to just below the end of the half-meg region 102 * from 0x380000 - 0x400000, which is mapped in already. 103 * 104 * If we are booted from MacOS via BootX, we enter with the kernel 105 * image loaded somewhere, and the following values in registers: 106 * r3: 'BooX' (0x426f6f58) 107 * r4: virtual address of boot_infos_t 108 * r5: 0 109 * 110 * APUS 111 * r3: 'APUS' 112 * r4: physical address of memory base 113 * Linux/m68k style BootInfo structure at &_end. 114 * 115 * PREP 116 * This is jumped to on prep systems right after the kernel is relocated 117 * to its proper place in memory by the boot loader. The expected layout 118 * of the regs is: 119 * r3: ptr to residual data 120 * r4: initrd_start or if no initrd then 0 121 * r5: initrd_end - unused if r4 is 0 122 * r6: Start of command line string 123 * r7: End of command line string 124 * 125 * This just gets a minimal mmu environment setup so we can call 126 * start_here() to do the real work. 127 * -- Cort 128 */ 129 130 .globl __start 131__start: 132/* 133 * We have to do any OF calls before we map ourselves to KERNELBASE, 134 * because OF may have I/O devices mapped into that area 135 * (particularly on CHRP). 136 */ 137 mr r31,r3 /* save parameters */ 138 mr r30,r4 139 mr r29,r5 140 mr r28,r6 141 mr r27,r7 142 li r24,0 /* cpu # */ 143 144#ifdef CONFIG_POWER4 145/* 146 * On the PPC970, we have to turn off real-mode cache inhibit 147 * early, before we first turn the MMU off. 148 */ 149 mfspr r0,SPRN_PVR 150 srwi r0,r0,16 151 cmpwi r0,0x39 152 beql ppc970_setup_hid 153#endif 154 155/* 156 * early_init() does the early machine identification and does 157 * the necessary low-level setup and clears the BSS 158 * -- Cort <cort@fsmlabs.com> 159 */ 160 bl early_init 161 162#ifdef CONFIG_APUS 163/* On APUS the __va/__pa constants need to be set to the correct 164 * values before continuing. 165 */ 166 mr r4,r30 167 bl fix_mem_constants 168#endif /* CONFIG_APUS */ 169 170/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains 171 * the physical address we are running at, returned by early_init() 172 */ 173 bl mmu_off 174__after_mmu_off: 175 176#ifndef CONFIG_POWER4 177 bl clear_bats 178 bl flush_tlbs 179 bl initial_bats 180#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) 181 bl setup_disp_bat 182#endif 183#else /* CONFIG_POWER4 */ 184/* 185 * Load up the SDR1 and segment register values now 186 * since we don't have the BATs. 187 * Also make sure we are running in 32-bit mode. 188 */ 189 bl reloc_offset 190 addis r14,r3,_SDR1@ha /* get the value from _SDR1 */ 191 lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */ 192 mtspr SDR1,r14 193 slbia 194 lis r4,0x2000 /* set pseudo-segment reg 12 */ 195 ori r5,r4,0x0ccc 196 mtsr 12,r5 197 ori r4,r4,0x0888 /* set pseudo-segment reg 8 */ 198 mtsr 8,r4 /* (for access to serial port) */ 199 mfmsr r0 200 clrldi r0,r0,1 201 sync 202 mtmsr r0 203 isync 204#endif /* CONFIG_POWER4 */ 205 206 /* 207 * Call setup_cpu for CPU 0 208 */ 209 bl reloc_offset 210 li r24,0 /* cpu# */ 211 bl call_setup_cpu /* Call setup_cpu for this CPU */ 212#ifdef CONFIG_6xx 213 bl reloc_offset 214 bl init_idle_6xx 215#endif /* CONFIG_6xx */ 216 217#ifndef CONFIG_APUS 218/* 219 * We need to run with _start at physical address 0. 220 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses 221 * the exception vectors at 0 (and therefore this copy 222 * overwrites OF's exception vectors with our own). 223 * The MMU is off at this point. 224 */ 225 bl reloc_offset 226 mr r26,r3 227 addis r4,r3,KERNELBASE@h /* current address of _start */ 228 cmpwi 0,r4,0 /* are we already running at 0? */ 229 bne relocate_kernel 230#endif /* CONFIG_APUS */ 231/* 232 * we now have the 1st 16M of ram mapped with the bats. 233 * prep needs the mmu to be turned on here, but pmac already has it on. 234 * this shouldn't bother the pmac since it just gets turned on again 235 * as we jump to our code at KERNELBASE. -- Cort 236 * Actually no, pmac doesn't have it on any more. BootX enters with MMU 237 * off, and in other cases, we now turn it off before changing BATs above. 238 */ 239turn_on_mmu: 240 mfmsr r0 241 ori r0,r0,MSR_DR|MSR_IR 242 mtspr SRR1,r0 243 lis r0,start_here@h 244 ori r0,r0,start_here@l 245 mtspr SRR0,r0 246 SYNC 247 RFI /* enables MMU */ 248 249/* 250 * We need __secondary_hold as a place to hold the other cpus on 251 * an SMP machine, even when we are running a UP kernel. 252 */ 253 . = 0xc0 /* for prep bootloader */ 254 li r3,1 /* MTX only has 1 cpu */ 255 .globl __secondary_hold 256__secondary_hold: 257 /* tell the master we're here */ 258 stw r3,4(0) 259#ifdef CONFIG_SMP 260100: lwz r4,0(0) 261 /* wait until we're told to start */ 262 cmpw 0,r4,r3 263 bne 100b 264 /* our cpu # was at addr 0 - go */ 265 mr r24,r3 /* cpu # */ 266 b __secondary_start 267#else 268 b . 269#endif /* CONFIG_SMP */ 270 271/* 272 * Exception entry code. This code runs with address translation 273 * turned off, i.e. using physical addresses. 274 * We assume sprg3 has the physical address of the current 275 * task's thread_struct. 276 */ 277#define EXCEPTION_PROLOG \ 278 mtspr SPRG0,r20; \ 279 mtspr SPRG1,r21; \ 280 mfcr r20; \ 281 mfspr r21,SPRG2; /* exception stack to use from */ \ 282 cmpwi 0,r21,0; /* user mode or RTAS */ \ 283 bne 1f; \ 284 tophys(r21,r1); /* use tophys(kernel sp) otherwise */ \ 285 subi r21,r21,INT_FRAME_SIZE; /* alloc exc. frame */\ 2861: CLR_TOP32(r21); \ 287 stw r20,_CCR(r21); /* save registers */ \ 288 stw r22,GPR22(r21); \ 289 stw r23,GPR23(r21); \ 290 mfspr r20,SPRG0; \ 291 stw r20,GPR20(r21); \ 292 mfspr r22,SPRG1; \ 293 stw r22,GPR21(r21); \ 294 mflr r20; \ 295 stw r20,_LINK(r21); \ 296 mfctr r22; \ 297 stw r22,_CTR(r21); \ 298 mfspr r20,XER; \ 299 stw r20,_XER(r21); \ 300 mfspr r22,SRR0; \ 301 mfspr r23,SRR1; \ 302 stw r0,GPR0(r21); \ 303 stw r1,GPR1(r21); \ 304 stw r2,GPR2(r21); \ 305 stw r1,0(r21); \ 306 tovirt(r1,r21); /* set new kernel sp */ \ 307 SAVE_4GPRS(3, r21); \ 308 SAVE_GPR(7, r21); 309/* 310 * Note: code which follows this uses cr0.eq (set if from kernel), 311 * r21, r22 (SRR0), and r23 (SRR1). 312 */ 313 314/* 315 * Exception vectors. 316 */ 317#define STD_EXCEPTION(n, label, hdlr) \ 318 . = n; \ 319label: \ 320 EXCEPTION_PROLOG; \ 321 addi r3,r1,STACK_FRAME_OVERHEAD; \ 322 li r20,MSR_KERNEL; \ 323 bl transfer_to_handler; \ 324i##n: \ 325 .long hdlr; \ 326 .long ret_from_except 327 328/* System reset */ 329#ifdef CONFIG_SMP /* MVME/MTX and gemini start the secondary here */ 330#ifdef CONFIG_GEMINI 331 . = 0x100 332 b __secondary_start_gemini 333#else /* CONFIG_GEMINI */ 334 STD_EXCEPTION(0x100, Reset, __secondary_start_psurge) 335#endif /* CONFIG_GEMINI */ 336#else 337 STD_EXCEPTION(0x100, Reset, UnknownException) 338#endif 339 340/* Machine check */ 341BEGIN_FTR_SECTION 342 DSSALL 343 sync 344END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 345 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException) 346 347/* Data access exception. */ 348 . = 0x300 349#ifdef CONFIG_PPC64BRIDGE 350 b DataAccess 351DataAccessCont: 352#else 353DataAccess: 354 EXCEPTION_PROLOG 355#endif /* CONFIG_PPC64BRIDGE */ 356 mfspr r20,DSISR 357BEGIN_FTR_SECTION 358 andis. r0,r20,0xa470 /* weird error? */ 359 bne 1f /* if not, try to put a PTE */ 360 mfspr r4,DAR /* into the hash table */ 361 rlwinm r3,r20,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ 362 bl hash_page 363END_FTR_SECTION_IFSET(CPU_FTR_HPTE_TABLE) 3641: stw r20,_DSISR(r21) 365 mr r5,r20 366 mfspr r4,DAR 367 stw r4,_DAR(r21) 368 addi r3,r1,STACK_FRAME_OVERHEAD 369 li r20,MSR_KERNEL 370 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ 371 bl transfer_to_handler 372i0x300: 373 .long do_page_fault 374 .long ret_from_except 375 376#ifdef CONFIG_PPC64BRIDGE 377/* SLB fault on data access. */ 378 . = 0x380 379 b DataSegment 380DataSegmentCont: 381 mfspr r4,DAR 382 stw r4,_DAR(r21) 383 addi r3,r1,STACK_FRAME_OVERHEAD 384 li r20,MSR_KERNEL 385 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ 386 bl transfer_to_handler 387 .long UnknownException 388 .long ret_from_except 389#endif /* CONFIG_PPC64BRIDGE */ 390 391/* Instruction access exception. */ 392 . = 0x400 393#ifdef CONFIG_PPC64BRIDGE 394 b InstructionAccess 395InstructionAccessCont: 396#else 397InstructionAccess: 398 EXCEPTION_PROLOG 399#endif /* CONFIG_PPC64BRIDGE */ 400BEGIN_FTR_SECTION 401 andis. r0,r23,0x4000 /* no pte found? */ 402 beq 1f /* if so, try to put a PTE */ 403 li r3,0 /* into the hash table */ 404 mr r4,r22 /* SRR0 is fault address */ 405 bl hash_page 406END_FTR_SECTION_IFSET(CPU_FTR_HPTE_TABLE) 4071: addi r3,r1,STACK_FRAME_OVERHEAD 408 mr r4,r22 409 mr r5,r23 410 li r20,MSR_KERNEL 411 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ 412 bl transfer_to_handler 413i0x400: 414 .long do_page_fault 415 .long ret_from_except 416 417#ifdef CONFIG_PPC64BRIDGE 418/* SLB fault on instruction access. */ 419 . = 0x480 420 b InstructionSegment 421InstructionSegmentCont: 422 addi r3,r1,STACK_FRAME_OVERHEAD 423 li r20,MSR_KERNEL 424 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ 425 bl transfer_to_handler 426 .long UnknownException 427 .long ret_from_except 428#endif /* CONFIG_PPC64BRIDGE */ 429 430/* External interrupt */ 431 . = 0x500; 432HardwareInterrupt: 433 EXCEPTION_PROLOG; 434 addi r3,r1,STACK_FRAME_OVERHEAD 435 li r20,MSR_KERNEL 436 li r4,0 437 bl transfer_to_handler 438 .globl do_IRQ_intercept 439do_IRQ_intercept: 440 .long do_IRQ; 441 .long ret_from_intercept 442 443/* Alignment exception */ 444 . = 0x600 445Alignment: 446 EXCEPTION_PROLOG 447 mfspr r4,DAR 448 stw r4,_DAR(r21) 449 mfspr r5,DSISR 450 stw r5,_DSISR(r21) 451 addi r3,r1,STACK_FRAME_OVERHEAD 452 li r20,MSR_KERNEL 453 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ 454 bl transfer_to_handler 455i0x600: 456 .long AlignmentException 457 .long ret_from_except 458 459/* Program check exception */ 460 . = 0x700 461ProgramCheck: 462 EXCEPTION_PROLOG 463 addi r3,r1,STACK_FRAME_OVERHEAD 464 li r20,MSR_KERNEL 465 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ 466 bl transfer_to_handler 467i0x700: 468 .long ProgramCheckException 469 .long ret_from_except 470 471/* Floating-point unavailable */ 472 . = 0x800 473FPUnavailable: 474 EXCEPTION_PROLOG 475 bne load_up_fpu /* if from user, just load it up */ 476 li r20,MSR_KERNEL 477 bl transfer_to_handler /* if from kernel, take a trap */ 478i0x800: 479 .long KernelFP 480 .long ret_from_except 481 482 . = 0x900 483Decrementer: 484 EXCEPTION_PROLOG 485 addi r3,r1,STACK_FRAME_OVERHEAD 486 li r20,MSR_KERNEL 487 bl transfer_to_handler 488 .globl timer_interrupt_intercept 489timer_interrupt_intercept: 490 .long timer_interrupt 491 .long ret_from_intercept 492 493 STD_EXCEPTION(0xa00, Trap_0a, UnknownException) 494 STD_EXCEPTION(0xb00, Trap_0b, UnknownException) 495 496/* System call */ 497 . = 0xc00 498SystemCall: 499 EXCEPTION_PROLOG 500 stw r3,ORIG_GPR3(r21) 501 li r20,MSR_KERNEL 502 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ 503 bl transfer_to_handler 504 .long DoSyscall 505 .long ret_from_except 506 507/* Single step - not used on 601 */ 508 STD_EXCEPTION(0xd00, SingleStep, SingleStepException) 509 STD_EXCEPTION(0xe00, Trap_0e, UnknownException) 510 511/* 512 * The Altivec unavailable trap is at 0x0f20. Foo. 513 * We effectively remap it to 0x3000. 514 */ 515 . = 0xf00 516 b Trap_0f 517trap_0f_cont: 518 addi r3,r1,STACK_FRAME_OVERHEAD 519 li r20,MSR_KERNEL 520 bl transfer_to_handler 521 .long UnknownException 522 .long ret_from_except 523 524 . = 0xf20 525#ifdef CONFIG_ALTIVEC 526 b AltiVecUnavailable 527#endif 528Trap_0f: 529 EXCEPTION_PROLOG 530 b trap_0f_cont 531 532/* 533 * Handle TLB miss for instruction on 603/603e. 534 * Note: we get an alternate set of r0 - r3 to use automatically. 535 */ 536 . = 0x1000 537InstructionTLBMiss: 538/* 539 * r0: stored ctr 540 * r1: linux style pte ( later becomes ppc hardware pte ) 541 * r2: ptr to linux-style pte 542 * r3: scratch 543 */ 544 mfctr r0 545 /* Get PTE (linux-style) and check access */ 546 mfspr r3,IMISS 547 lis r1,KERNELBASE@h /* check if kernel address */ 548 cmplw 0,r3,r1 549 mfspr r2,SPRG3 550 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ 551 lwz r2,PGDIR(r2) 552 blt+ 112f 553 lis r2,swapper_pg_dir@ha /* if kernel address, use */ 554 addi r2,r2,swapper_pg_dir@l /* kernel page table */ 555 mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */ 556 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ 557112: tophys(r2,r2) 558 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ 559 lwz r2,0(r2) /* get pmd entry */ 560 rlwinm. r2,r2,0,0,19 /* extract address of pte page */ 561 beq- InstructionAddressInvalid /* return if no mapping */ 562 tophys(r2,r2) 563 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ 564 lwz r3,0(r2) /* get linux-style pte */ 565 andc. r1,r1,r3 /* check access & ~permission */ 566 bne- InstructionAddressInvalid /* return if access not permitted */ 567 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ 568 /* 569 * NOTE! We are assuming this is not an SMP system, otherwise 570 * we would need to update the pte atomically with lwarx/stwcx. 571 */ 572 stw r3,0(r2) /* update PTE (accessed bit) */ 573 /* Convert linux-style PTE to low word of PPC-style PTE */ 574 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ 575 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ 576 and r1,r1,r2 /* writable if _RW and _DIRTY */ 577 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ 578 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ 579 ori r1,r1,0xe14 /* clear out reserved bits and M */ 580 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ 581 mtspr RPA,r1 582 mfspr r3,IMISS 583 tlbli r3 584 mfspr r3,SRR1 /* Need to restore CR0 */ 585 mtcrf 0x80,r3 586 rfi 587InstructionAddressInvalid: 588 mfspr r3,SRR1 589 rlwinm r1,r3,9,6,6 /* Get load/store bit */ 590 591 addis r1,r1,0x2000 592 mtspr DSISR,r1 /* (shouldn't be needed) */ 593 mtctr r0 /* Restore CTR */ 594 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ 595 or r2,r2,r1 596 mtspr SRR1,r2 597 mfspr r1,IMISS /* Get failing address */ 598 rlwinm. r2,r2,0,31,31 /* Check for little endian access */ 599 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ 600 xor r1,r1,r2 601 mtspr DAR,r1 /* Set fault address */ 602 mfmsr r0 /* Restore "normal" registers */ 603 xoris r0,r0,MSR_TGPR>>16 604 mtcrf 0x80,r3 /* Restore CR0 */ 605 mtmsr r0 606 b InstructionAccess 607 608/* 609 * Handle TLB miss for DATA Load operation on 603/603e 610 */ 611 . = 0x1100 612DataLoadTLBMiss: 613/* 614 * r0: stored ctr 615 * r1: linux style pte ( later becomes ppc hardware pte ) 616 * r2: ptr to linux-style pte 617 * r3: scratch 618 */ 619 mfctr r0 620 /* Get PTE (linux-style) and check access */ 621 mfspr r3,DMISS 622 lis r1,KERNELBASE@h /* check if kernel address */ 623 cmplw 0,r3,r1 624 mfspr r2,SPRG3 625 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ 626 lwz r2,PGDIR(r2) 627 blt+ 112f 628 lis r2,swapper_pg_dir@ha /* if kernel address, use */ 629 addi r2,r2,swapper_pg_dir@l /* kernel page table */ 630 mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */ 631 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ 632112: tophys(r2,r2) 633 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ 634 lwz r2,0(r2) /* get pmd entry */ 635 rlwinm. r2,r2,0,0,19 /* extract address of pte page */ 636 beq- DataAddressInvalid /* return if no mapping */ 637 tophys(r2,r2) 638 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ 639 lwz r3,0(r2) /* get linux-style pte */ 640 andc. r1,r1,r3 /* check access & ~permission */ 641 bne- DataAddressInvalid /* return if access not permitted */ 642 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ 643 /* 644 * NOTE! We are assuming this is not an SMP system, otherwise 645 * we would need to update the pte atomically with lwarx/stwcx. 646 */ 647 stw r3,0(r2) /* update PTE (accessed bit) */ 648 /* Convert linux-style PTE to low word of PPC-style PTE */ 649 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ 650 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ 651 and r1,r1,r2 /* writable if _RW and _DIRTY */ 652 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ 653 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ 654 ori r1,r1,0xe14 /* clear out reserved bits and M */ 655 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ 656 mtspr RPA,r1 657 mfspr r3,DMISS 658 tlbld r3 659 mfspr r3,SRR1 /* Need to restore CR0 */ 660 mtcrf 0x80,r3 661 rfi 662DataAddressInvalid: 663 mfspr r3,SRR1 664 rlwinm r1,r3,9,6,6 /* Get load/store bit */ 665 addis r1,r1,0x2000 666 mtspr DSISR,r1 667 mtctr r0 /* Restore CTR */ 668 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ 669 mtspr SRR1,r2 670 mfspr r1,DMISS /* Get failing address */ 671 rlwinm. r2,r2,0,31,31 /* Check for little endian access */ 672 beq 20f /* Jump if big endian */ 673 xori r1,r1,3 67420: mtspr DAR,r1 /* Set fault address */ 675 mfmsr r0 /* Restore "normal" registers */ 676 xoris r0,r0,MSR_TGPR>>16 677 mtcrf 0x80,r3 /* Restore CR0 */ 678 mtmsr r0 679 b DataAccess 680 681/* 682 * Handle TLB miss for DATA Store on 603/603e 683 */ 684 . = 0x1200 685DataStoreTLBMiss: 686/* 687 * r0: stored ctr 688 * r1: linux style pte ( later becomes ppc hardware pte ) 689 * r2: ptr to linux-style pte 690 * r3: scratch 691 */ 692 mfctr r0 693 /* Get PTE (linux-style) and check access */ 694 mfspr r3,DMISS 695 lis r1,KERNELBASE@h /* check if kernel address */ 696 cmplw 0,r3,r1 697 mfspr r2,SPRG3 698 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ 699 lwz r2,PGDIR(r2) 700 blt+ 112f 701 lis r2,swapper_pg_dir@ha /* if kernel address, use */ 702 addi r2,r2,swapper_pg_dir@l /* kernel page table */ 703 mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */ 704 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ 705112: tophys(r2,r2) 706 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ 707 lwz r2,0(r2) /* get pmd entry */ 708 rlwinm. r2,r2,0,0,19 /* extract address of pte page */ 709 beq- DataAddressInvalid /* return if no mapping */ 710 tophys(r2,r2) 711 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ 712 lwz r3,0(r2) /* get linux-style pte */ 713 andc. r1,r1,r3 /* check access & ~permission */ 714 bne- DataAddressInvalid /* return if access not permitted */ 715 ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY 716 /* 717 * NOTE! We are assuming this is not an SMP system, otherwise 718 * we would need to update the pte atomically with lwarx/stwcx. 719 */ 720 stw r3,0(r2) /* update PTE (accessed/dirty bits) */ 721 /* Convert linux-style PTE to low word of PPC-style PTE */ 722 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ 723 li r1,0xe15 /* clear out reserved bits and M */ 724 andc r1,r3,r1 /* PP = user? 2: 0 */ 725 mtspr RPA,r1 726 mfspr r3,DMISS 727 tlbld r3 728 mfspr r3,SRR1 /* Need to restore CR0 */ 729 mtcrf 0x80,r3 730 rfi 731 732#ifndef CONFIG_ALTIVEC 733#define AltivecAssistException UnknownException 734#endif 735 736 STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint) 737 STD_EXCEPTION(0x1400, SMI, SMIException) 738 STD_EXCEPTION(0x1500, Trap_15, UnknownException) 739#ifdef CONFIG_POWER4 740 STD_EXCEPTION(0x1600, Trap_16, UnknownException) 741 STD_EXCEPTION(0x1700, Trap_17, AltivecAssistException) 742 STD_EXCEPTION(0x1800, Trap_18, TAUException) 743#else /* !CONFIG_POWER4 */ 744 STD_EXCEPTION(0x1600, Trap_16, AltivecAssistException) 745 STD_EXCEPTION(0x1700, Trap_17, TAUException) 746 STD_EXCEPTION(0x1800, Trap_18, UnknownException) 747#endif 748 STD_EXCEPTION(0x1900, Trap_19, UnknownException) 749 STD_EXCEPTION(0x1a00, Trap_1a, UnknownException) 750 STD_EXCEPTION(0x1b00, Trap_1b, UnknownException) 751 STD_EXCEPTION(0x1c00, Trap_1c, UnknownException) 752 STD_EXCEPTION(0x1d00, Trap_1d, UnknownException) 753 STD_EXCEPTION(0x1e00, Trap_1e, UnknownException) 754 STD_EXCEPTION(0x1f00, Trap_1f, UnknownException) 755 STD_EXCEPTION(0x2000, RunMode, RunModeException) 756 STD_EXCEPTION(0x2100, Trap_21, UnknownException) 757 STD_EXCEPTION(0x2200, Trap_22, UnknownException) 758 STD_EXCEPTION(0x2300, Trap_23, UnknownException) 759 STD_EXCEPTION(0x2400, Trap_24, UnknownException) 760 STD_EXCEPTION(0x2500, Trap_25, UnknownException) 761 STD_EXCEPTION(0x2600, Trap_26, UnknownException) 762 STD_EXCEPTION(0x2700, Trap_27, UnknownException) 763 STD_EXCEPTION(0x2800, Trap_28, UnknownException) 764 STD_EXCEPTION(0x2900, Trap_29, UnknownException) 765 STD_EXCEPTION(0x2a00, Trap_2a, UnknownException) 766 STD_EXCEPTION(0x2b00, Trap_2b, UnknownException) 767 STD_EXCEPTION(0x2c00, Trap_2c, UnknownException) 768 STD_EXCEPTION(0x2d00, Trap_2d, UnknownException) 769 STD_EXCEPTION(0x2e00, Trap_2e, UnknownException) 770 STD_EXCEPTION(0x2f00, Trap_2f, UnknownException) 771 772 . = 0x3000 773 774#ifdef CONFIG_ALTIVEC 775AltiVecUnavailable: 776 EXCEPTION_PROLOG 777 bne load_up_altivec /* if from user, just load it up */ 778 li r20,MSR_KERNEL 779 bl transfer_to_handler /* if from kernel, take a trap */ 780 .long KernelAltiVec 781 .long ret_from_except 782#endif /* CONFIG_ALTIVEC */ 783 784#ifdef CONFIG_PPC64BRIDGE 785DataAccess: 786 EXCEPTION_PROLOG 787 b DataAccessCont 788InstructionAccess: 789 EXCEPTION_PROLOG 790 b InstructionAccessCont 791DataSegment: 792 EXCEPTION_PROLOG 793 b DataSegmentCont 794InstructionSegment: 795 EXCEPTION_PROLOG 796 b InstructionSegmentCont 797#endif /* CONFIG_PPC64BRIDGE */ 798 799/* 800 * This code finishes saving the registers to the exception frame 801 * and jumps to the appropriate handler for the exception, turning 802 * on address translation. 803 */ 804 .globl transfer_to_handler 805transfer_to_handler: 806 stw r22,_NIP(r21) 807 stw r23,_MSR(r21) 808 SAVE_4GPRS(8, r21) 809 SAVE_8GPRS(12, r21) 810 SAVE_8GPRS(24, r21) 811 andi. r23,r23,MSR_PR 812 mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */ 813 addi r2,r23,-THREAD /* set r2 to current */ 814 beq 2f 815 addi r24,r1,STACK_FRAME_OVERHEAD 816 stw r24,PT_REGS(r23) 817#ifdef CONFIG_ALTIVEC 818BEGIN_FTR_SECTION 819 mfspr r22,SPRN_VRSAVE /* if G4, save vrsave register value */ 820 stw r22,THREAD_VRSAVE(r23) 821END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 822#endif /* CONFIG_ALTIVEC */ 823#ifndef CONFIG_6xx 8242: 825#endif 826 .globl transfer_to_handler_cont 827transfer_to_handler_cont: 828 tovirt(r2,r2) 829 mflr r23 830 andi. r24,r23,0x3f00 /* get vector offset */ 831 stw r24,TRAP(r21) 832 li r22,0 833 stw r22,RESULT(r21) 834 mtspr SPRG2,r22 /* r1 is now kernel sp */ 835 addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */ 836 cmplw 0,r1,r2 837 cmplw 1,r1,r24 838 crand 1,1,4 839 bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */ 840 lwz r24,0(r23) /* virtual address of handler */ 841 lwz r23,4(r23) /* where to go when done */ 842 FIX_SRR1(r20,r22) 843 mtspr SRR0,r24 844 mtspr SRR1,r20 845 mtlr r23 846 SYNC 847 RFI /* jump to handler, enable MMU */ 848 849#ifdef CONFIG_6xx 8502: 851 /* Out of line case when returning to kernel, 852 * check return from power_save_6xx 853 */ 854 mfspr r24,SPRN_HID0 855 mtcr r24 856BEGIN_FTR_SECTION 857 bt- 8,power_save_6xx_restore /* Check DOZE */ 858END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) 859BEGIN_FTR_SECTION 860 bt- 9,power_save_6xx_restore /* Check NAP */ 861END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 862 b transfer_to_handler_cont 863 864#endif /* CONFIG_6xx */ 865 866/* 867 * On kernel stack overflow, load up an initial stack pointer 868 * and call StackOverflow(regs), which should not return. 869 */ 870stack_ovf: 871 addi r3,r1,STACK_FRAME_OVERHEAD 872 lis r1,init_task_union@ha 873 addi r1,r1,init_task_union@l 874 addi r1,r1,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD 875 lis r24,StackOverflow@ha 876 addi r24,r24,StackOverflow@l 877 li r20,MSR_KERNEL 878 FIX_SRR1(r20,r22) 879 mtspr SRR0,r24 880 mtspr SRR1,r20 881 SYNC 882 RFI 883 884/* 885 * This task wants to use the FPU now. 886 * On UP, disable FP for the task which had the FPU previously, 887 * and save its floating-point registers in its thread_struct. 888 * Load up this task's FP registers from its thread_struct, 889 * enable the FPU for the current task and return to the task. 890 */ 891load_up_fpu: 892 mfmsr r5 893 ori r5,r5,MSR_FP 894#ifdef CONFIG_PPC64BRIDGE 895 clrldi r5,r5,1 /* turn off 64-bit mode */ 896#endif /* CONFIG_PPC64BRIDGE */ 897 SYNC 898 MTMSRD(r5) /* enable use of fpu now */ 899 isync 900/* 901 * For SMP, we don't do lazy FPU switching because it just gets too 902 * horrendously complex, especially when a task switches from one CPU 903 * to another. Instead we call giveup_fpu in switch_to. 904 */ 905#ifndef CONFIG_SMP 906 tophys(r6,0) /* get __pa constant */ 907 addis r3,r6,last_task_used_math@ha 908 lwz r4,last_task_used_math@l(r3) 909 cmpi 0,r4,0 910 beq 1f 911 add r4,r4,r6 912 addi r4,r4,THREAD /* want last_task_used_math->thread */ 913 SAVE_32FPRS(0, r4) 914 mffs fr0 915 stfd fr0,THREAD_FPSCR-4(r4) 916 lwz r5,PT_REGS(r4) 917 add r5,r5,r6 918 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 919 li r20,MSR_FP|MSR_FE0|MSR_FE1 920 andc r4,r4,r20 /* disable FP for previous task */ 921 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 9221: 923#endif /* CONFIG_SMP */ 924 /* enable use of FP after return */ 925 mfspr r5,SPRG3 /* current task's THREAD (phys) */ 926 lwz r4,THREAD_FPEXC_MODE(r5) 927 ori r23,r23,MSR_FP /* enable FP for current */ 928 or r23,r23,r4 929 lfd fr0,THREAD_FPSCR-4(r5) 930 mtfsf 0xff,fr0 931 REST_32FPRS(0, r5) 932#ifndef CONFIG_SMP 933 subi r4,r5,THREAD 934 sub r4,r4,r6 935 stw r4,last_task_used_math@l(r3) 936#endif /* CONFIG_SMP */ 937 /* restore registers and return */ 938 lwz r3,_CCR(r21) 939 lwz r4,_LINK(r21) 940 mtcrf 0xff,r3 941 mtlr r4 942 REST_GPR(1, r21) 943 REST_4GPRS(3, r21) 944 /* we haven't used ctr or xer */ 945 mtspr SRR1,r23 946 mtspr SRR0,r22 947 REST_GPR(20, r21) 948 REST_2GPRS(22, r21) 949 lwz r21,GPR21(r21) 950 SYNC 951 RFI 952 953/* 954 * FP unavailable trap from kernel - print a message, but let 955 * the task use FP in the kernel until it returns to user mode. 956 */ 957KernelFP: 958 lwz r3,_MSR(r1) 959 ori r3,r3,MSR_FP 960 stw r3,_MSR(r1) /* enable use of FP after return */ 961 lis r3,86f@h 962 ori r3,r3,86f@l 963 mr r4,r2 /* current */ 964 lwz r5,_NIP(r1) 965 bl printk 966 b ret_from_except 96786: .string "floating point used in kernel (task=%p, pc=%x)\n" 968 .align 4 969 970#ifdef CONFIG_ALTIVEC 971/* Note that the AltiVec support is closely modeled after the FP 972 * support. Changes to one are likely to be applicable to the 973 * other! */ 974load_up_altivec: 975/* 976 * Disable AltiVec for the task which had AltiVec previously, 977 * and save its AltiVec registers in its thread_struct. 978 * Enables AltiVec for use in the kernel on return. 979 * On SMP we know the AltiVec units are free, since we give it up every 980 * switch. -- Kumar 981 */ 982 mfmsr r5 983 oris r5,r5,MSR_VEC@h 984 mtmsr r5 /* enable use of AltiVec now */ 985 isync 986/* 987 * For SMP, we don't do lazy AltiVec switching because it just gets too 988 * horrendously complex, especially when a task switches from one CPU 989 * to another. Instead we call giveup_altivec in switch_to. 990 */ 991#ifndef CONFIG_SMP 992#ifndef CONFIG_APUS 993 lis r6,-KERNELBASE@h 994#else 995 lis r6,CYBERBASEp@h 996 lwz r6,0(r6) 997#endif 998 addis r3,r6,last_task_used_altivec@ha 999 lwz r4,last_task_used_altivec@l(r3) 1000 cmpi 0,r4,0 1001 beq 1f 1002 add r4,r4,r6 1003 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ 1004 SAVE_32VR(0,r20,r4) 1005 MFVSCR(vr0) 1006 li r20,THREAD_VSCR 1007 STVX(vr0,r20,r4) 1008 lwz r5,PT_REGS(r4) 1009 add r5,r5,r6 1010 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1011 lis r20,MSR_VEC@h 1012 andc r4,r4,r20 /* disable altivec for previous task */ 1013 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 10141: 1015#endif /* CONFIG_SMP */ 1016 /* enable use of AltiVec after return */ 1017 oris r23,r23,MSR_VEC@h 1018 mfspr r5,SPRG3 /* current task's THREAD (phys) */ 1019 li r4,1 1020 li r20,THREAD_VSCR 1021 stw r4,THREAD_USED_VR(r5) 1022 LVX(vr0,r20,r5) 1023 MTVSCR(vr0) 1024 REST_32VR(0,r20,r5) 1025#ifndef CONFIG_SMP 1026 subi r4,r5,THREAD 1027 sub r4,r4,r6 1028 stw r4,last_task_used_altivec@l(r3) 1029#endif /* CONFIG_SMP */ 1030 /* restore registers and return */ 1031 lwz r3,_CCR(r21) 1032 lwz r4,_LINK(r21) 1033 mtcrf 0xff,r3 1034 mtlr r4 1035 REST_GPR(1, r21) 1036 REST_4GPRS(3, r21) 1037 /* we haven't used ctr or xer */ 1038 mtspr SRR1,r23 1039 mtspr SRR0,r22 1040 REST_GPR(20, r21) 1041 REST_2GPRS(22, r21) 1042 lwz r21,GPR21(r21) 1043 SYNC 1044 RFI 1045 1046/* 1047 * AltiVec unavailable trap from kernel - print a message, but let 1048 * the task use AltiVec in the kernel until it returns to user mode. 1049 */ 1050KernelAltiVec: 1051 lwz r3,_MSR(r1) 1052 oris r3,r3,MSR_VEC@h 1053 stw r3,_MSR(r1) /* enable use of AltiVec after return */ 1054 lis r3,87f@h 1055 ori r3,r3,87f@l 1056 mr r4,r2 /* current */ 1057 lwz r5,_NIP(r1) 1058 bl printk 1059 b ret_from_except 106087: .string "AltiVec used in kernel (task=%p, pc=%x) \n" 1061 .align 4 1062 1063/* 1064 * giveup_altivec(tsk) 1065 * Disable AltiVec for the task given as the argument, 1066 * and save the AltiVec registers in its thread_struct. 1067 * Enables AltiVec for use in the kernel on return. 1068 */ 1069 1070 .globl giveup_altivec 1071giveup_altivec: 1072 mfmsr r5 1073 oris r5,r5,MSR_VEC@h 1074 SYNC 1075 mtmsr r5 /* enable use of AltiVec now */ 1076 isync 1077 cmpi 0,r3,0 1078 beqlr- /* if no previous owner, done */ 1079 addi r3,r3,THREAD /* want THREAD of task */ 1080 lwz r5,PT_REGS(r3) 1081 cmpi 0,r5,0 1082 SAVE_32VR(0, r4, r3) 1083 MFVSCR(vr0) 1084 li r4,THREAD_VSCR 1085 STVX(vr0, r4, r3) 1086 beq 1f 1087 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1088 lis r3,MSR_VEC@h 1089 andc r4,r4,r3 /* disable AltiVec for previous task */ 1090 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 10911: 1092#ifndef CONFIG_SMP 1093 li r5,0 1094 lis r4,last_task_used_altivec@ha 1095 stw r5,last_task_used_altivec@l(r4) 1096#endif /* CONFIG_SMP */ 1097 blr 1098#endif /* CONFIG_ALTIVEC */ 1099 1100/* 1101 * giveup_fpu(tsk) 1102 * Disable FP for the task given as the argument, 1103 * and save the floating-point registers in its thread_struct. 1104 * Enables the FPU for use in the kernel on return. 1105 */ 1106 .globl giveup_fpu 1107giveup_fpu: 1108 mfmsr r5 1109 ori r5,r5,MSR_FP 1110 SYNC_601 1111 ISYNC_601 1112 mtmsr r5 /* enable use of fpu now */ 1113 SYNC_601 1114 isync 1115 cmpi 0,r3,0 1116 beqlr- /* if no previous owner, done */ 1117 addi r3,r3,THREAD /* want THREAD of task */ 1118 lwz r5,PT_REGS(r3) 1119 cmpi 0,r5,0 1120 SAVE_32FPRS(0, r3) 1121 mffs fr0 1122 stfd fr0,THREAD_FPSCR-4(r3) 1123 beq 1f 1124 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1125 li r3,MSR_FP|MSR_FE0|MSR_FE1 1126 andc r4,r4,r3 /* disable FP for previous task */ 1127 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 11281: 1129#ifndef CONFIG_SMP 1130 li r5,0 1131 lis r4,last_task_used_math@ha 1132 stw r5,last_task_used_math@l(r4) 1133#endif /* CONFIG_SMP */ 1134 blr 1135 1136/* 1137 * This code is jumped to from the startup code to copy 1138 * the kernel image to physical address 0. 1139 */ 1140relocate_kernel: 1141 addis r9,r26,klimit@ha /* fetch klimit */ 1142 lwz r25,klimit@l(r9) 1143 addis r25,r25,-KERNELBASE@h 1144 li r3,0 /* Destination base address */ 1145 li r6,0 /* Destination offset */ 1146 li r5,0x4000 /* # bytes of memory to copy */ 1147 bl copy_and_flush /* copy the first 0x4000 bytes */ 1148 addi r0,r3,4f@l /* jump to the address of 4f */ 1149 mtctr r0 /* in copy and do the rest. */ 1150 bctr /* jump to the copy */ 11514: mr r5,r25 1152 bl copy_and_flush /* copy the rest */ 1153 b turn_on_mmu 1154 1155/* 1156 * Copy routine used to copy the kernel to start at physical address 0 1157 * and flush and invalidate the caches as needed. 1158 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset 1159 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. 1160 */ 1161copy_and_flush: 1162 addi r5,r5,-4 1163 addi r6,r6,-4 11644: li r0,L1_CACHE_LINE_SIZE/4 1165 mtctr r0 11663: addi r6,r6,4 /* copy a cache line */ 1167 lwzx r0,r6,r4 1168 stwx r0,r6,r3 1169 bdnz 3b 1170 dcbst r6,r3 /* write it to memory */ 1171 sync 1172 icbi r6,r3 /* flush the icache line */ 1173 cmplw 0,r6,r5 1174 blt 4b 1175 sync /* additional sync needed on g4 */ 1176 isync 1177 addi r5,r5,4 1178 addi r6,r6,4 1179 blr 1180 1181#ifdef CONFIG_APUS 1182/* 1183 * On APUS the physical base address of the kernel is not known at compile 1184 * time, which means the __pa/__va constants used are incorrect. In the 1185 * __init section is recorded the virtual addresses of instructions using 1186 * these constants, so all that has to be done is fix these before 1187 * continuing the kernel boot. 1188 * 1189 * r4 = The physical address of the kernel base. 1190 */ 1191fix_mem_constants: 1192 mr r10,r4 1193 addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */ 1194 neg r11,r10 /* phys_to_virt constant */ 1195 1196 lis r12,__vtop_table_begin@h 1197 ori r12,r12,__vtop_table_begin@l 1198 add r12,r12,r10 /* table begin phys address */ 1199 lis r13,__vtop_table_end@h 1200 ori r13,r13,__vtop_table_end@l 1201 add r13,r13,r10 /* table end phys address */ 1202 subi r12,r12,4 1203 subi r13,r13,4 12041: lwzu r14,4(r12) /* virt address of instruction */ 1205 add r14,r14,r10 /* phys address of instruction */ 1206 lwz r15,0(r14) /* instruction, now insert top */ 1207 rlwimi r15,r10,16,16,31 /* half of vp const in low half */ 1208 stw r15,0(r14) /* of instruction and restore. */ 1209 dcbst r0,r14 /* write it to memory */ 1210 sync 1211 icbi r0,r14 /* flush the icache line */ 1212 cmpw r12,r13 1213 bne 1b 1214 sync /* additional sync needed on g4 */ 1215 isync 1216 1217/* 1218 * Map the memory where the exception handlers will 1219 * be copied to when hash constants have been patched. 1220 */ 1221#ifdef CONFIG_APUS_FAST_EXCEPT 1222 lis r8,0xfff0 1223#else 1224 lis r8,0 1225#endif 1226 ori r8,r8,0x2 /* 128KB, supervisor */ 1227 mtspr DBAT3U,r8 1228 mtspr DBAT3L,r8 1229 1230 lis r12,__ptov_table_begin@h 1231 ori r12,r12,__ptov_table_begin@l 1232 add r12,r12,r10 /* table begin phys address */ 1233 lis r13,__ptov_table_end@h 1234 ori r13,r13,__ptov_table_end@l 1235 add r13,r13,r10 /* table end phys address */ 1236 subi r12,r12,4 1237 subi r13,r13,4 12381: lwzu r14,4(r12) /* virt address of instruction */ 1239 add r14,r14,r10 /* phys address of instruction */ 1240 lwz r15,0(r14) /* instruction, now insert top */ 1241 rlwimi r15,r11,16,16,31 /* half of pv const in low half*/ 1242 stw r15,0(r14) /* of instruction and restore. */ 1243 dcbst r0,r14 /* write it to memory */ 1244 sync 1245 icbi r0,r14 /* flush the icache line */ 1246 cmpw r12,r13 1247 bne 1b 1248 1249 sync /* additional sync needed on g4 */ 1250 isync /* No speculative loading until now */ 1251 blr 1252 1253/*********************************************************************** 1254 * Please note that on APUS the exception handlers are located at the 1255 * physical address 0xfff0000. For this reason, the exception handlers 1256 * cannot use relative branches to access the code below. 1257 ***********************************************************************/ 1258#endif /* CONFIG_APUS */ 1259 1260#ifdef CONFIG_SMP 1261#ifdef CONFIG_GEMINI 1262 .globl __secondary_start_gemini 1263__secondary_start_gemini: 1264 mfspr r4,HID0 1265 ori r4,r4,HID0_ICFI 1266 li r3,0 1267 ori r3,r3,HID0_ICE 1268 andc r4,r4,r3 1269 mtspr HID0,r4 1270 sync 1271 b __secondary_start 1272#endif /* CONFIG_GEMINI */ 1273 .globl __secondary_start_psurge 1274__secondary_start_psurge: 1275 li r24,1 /* cpu # */ 1276 b __secondary_start_psurge99 1277 .globl __secondary_start_psurge2 1278__secondary_start_psurge2: 1279 li r24,2 /* cpu # */ 1280 b __secondary_start_psurge99 1281 .globl __secondary_start_psurge3 1282__secondary_start_psurge3: 1283 li r24,3 /* cpu # */ 1284 b __secondary_start_psurge99 1285__secondary_start_psurge99: 1286 /* we come in here with IR=0 and DR=1, and DBAT 0 1287 set to map the 0xf0000000 - 0xffffffff region */ 1288 mfmsr r0 1289 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ 1290 SYNC 1291 mtmsr r0 1292 isync 1293 1294 .globl __secondary_start 1295__secondary_start: 1296#ifdef CONFIG_PPC64BRIDGE 1297 mfmsr r0 1298 clrldi r0,r0,1 /* make sure it's in 32-bit mode */ 1299 SYNC 1300 MTMSRD(r0) 1301 isync 1302#endif 1303 /* Copy some CPU settings from CPU 0 */ 1304 bl __restore_cpu_setup 1305 1306 lis r3,-KERNELBASE@h 1307 mr r4,r24 1308 bl identify_cpu 1309 bl call_setup_cpu /* Call setup_cpu for this CPU */ 1310#ifdef CONFIG_6xx 1311 lis r3,-KERNELBASE@h 1312 bl init_idle_6xx 1313#endif /* CONFIG_6xx */ 1314 1315 /* get current */ 1316 lis r2,current_set@h 1317 ori r2,r2,current_set@l 1318 tophys(r2,r2) 1319 slwi r24,r24,2 /* get current_set[cpu#] */ 1320 lwzx r2,r2,r24 1321 1322 /* stack */ 1323 addi r1,r2,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD 1324 li r0,0 1325 tophys(r3,r1) 1326 stw r0,0(r3) 1327 1328 /* load up the MMU */ 1329 bl load_up_mmu 1330 1331 /* ptr to phys current thread */ 1332 tophys(r4,r2) 1333 addi r4,r4,THREAD /* phys address of our thread_struct */ 1334 CLR_TOP32(r4) 1335 mtspr SPRG3,r4 1336 li r3,0 1337 mtspr SPRG2,r3 /* 0 => r1 has kernel sp */ 1338 stw r3,PT_REGS(r4) /* set thread.regs to 0 for kernel thread */ 1339 1340 /* enable MMU and jump to start_secondary */ 1341 li r4,MSR_KERNEL 1342 FIX_SRR1(r4,r5) 1343 lis r3,start_secondary@h 1344 ori r3,r3,start_secondary@l 1345 mtspr SRR0,r3 1346 mtspr SRR1,r4 1347 SYNC 1348 RFI 1349#endif /* CONFIG_SMP */ 1350 1351/* 1352 * Those generic dummy functions are kept for CPUs not 1353 * included in CONFIG_6xx 1354 */ 1355_GLOBAL(__setup_cpu_power3) 1356 blr 1357_GLOBAL(__setup_cpu_power4) 1358 blr 1359_GLOBAL(__setup_cpu_ppc970) 1360 blr 1361_GLOBAL(__setup_cpu_generic) 1362 blr 1363 1364#ifndef CONFIG_6xx 1365_GLOBAL(__save_cpu_setup) 1366 blr 1367_GLOBAL(__restore_cpu_setup) 1368#ifdef CONFIG_POWER4 1369 /* turn off real-mode cache inhibit on the PPC970 */ 1370 mfspr r0,SPRN_PVR 1371 srwi r0,r0,16 1372 cmpwi r0,0x39 1373 beq ppc970_setup_hid 1374 blr 1375#endif 1376#endif /* CONFIG_6xx */ 1377 1378/* 1379 * Load stuff into the MMU. Intended to be called with 1380 * IR=0 and DR=0. 1381 */ 1382load_up_mmu: 1383 sync /* Force all PTE updates to finish */ 1384 isync 1385 tlbia /* Clear all TLB entries */ 1386 sync /* wait for tlbia/tlbie to finish */ 1387 TLBSYNC /* ... on all CPUs */ 1388 /* Load the SDR1 register (hash table base & size) */ 1389 lis r6,_SDR1@ha 1390 tophys(r6,r6) 1391 lwz r6,_SDR1@l(r6) 1392 mtspr SDR1,r6 1393#ifdef CONFIG_PPC64BRIDGE 1394 /* clear the ASR so we only use the pseudo-segment registers. */ 1395 li r6,0 1396 mtasr r6 1397#endif /* CONFIG_PPC64BRIDGE */ 1398 li r0,16 /* load up segment register values */ 1399 mtctr r0 /* for context 0 */ 1400 lis r3,0x2000 /* Ku = 1, VSID = 0 */ 1401 li r4,0 14023: mtsrin r3,r4 1403 addi r3,r3,0x111 /* increment VSID */ 1404 addis r4,r4,0x1000 /* address of next segment */ 1405 bdnz 3b 1406#ifndef CONFIG_POWER4 1407/* Load the BAT registers with the values set up by MMU_init. 1408 MMU_init takes care of whether we're on a 601 or not. */ 1409 mfpvr r3 1410 srwi r3,r3,16 1411 cmpwi r3,1 1412 lis r3,BATS@ha 1413 addi r3,r3,BATS@l 1414 tophys(r3,r3) 1415 LOAD_BAT(0,r3,r4,r5) 1416 LOAD_BAT(1,r3,r4,r5) 1417 LOAD_BAT(2,r3,r4,r5) 1418 LOAD_BAT(3,r3,r4,r5) 1419#endif /* CONFIG_POWER4 */ 1420 blr 1421 1422/* 1423 * This is where the main kernel code starts. 1424 */ 1425start_here: 1426 /* ptr to current */ 1427 lis r2,init_task_union@h 1428 ori r2,r2,init_task_union@l 1429 /* Set up for using our exception vectors */ 1430 /* ptr to phys current thread */ 1431 tophys(r4,r2) 1432 addi r4,r4,THREAD /* init task's THREAD */ 1433 CLR_TOP32(r4) 1434 mtspr SPRG3,r4 1435 li r3,0 1436 mtspr SPRG2,r3 /* 0 => r1 has kernel sp */ 1437 1438 /* stack */ 1439 addi r1,r2,TASK_UNION_SIZE 1440 li r0,0 1441 stwu r0,-STACK_FRAME_OVERHEAD(r1) 1442/* 1443 * Do early bootinfo parsing, platform-specific initialization, 1444 * and set up the MMU. 1445 */ 1446 mr r3,r31 1447 mr r4,r30 1448 mr r5,r29 1449 mr r6,r28 1450 mr r7,r27 1451 bl machine_init 1452 bl MMU_init 1453 1454#ifdef CONFIG_APUS 1455 /* Copy exception code to exception vector base on APUS. */ 1456 lis r4,KERNELBASE@h 1457#ifdef CONFIG_APUS_FAST_EXCEPT 1458 lis r3,0xfff0 /* Copy to 0xfff00000 */ 1459#else 1460 lis r3,0 /* Copy to 0x00000000 */ 1461#endif 1462 li r5,0x4000 /* # bytes of memory to copy */ 1463 li r6,0 1464 bl copy_and_flush /* copy the first 0x4000 bytes */ 1465#endif /* CONFIG_APUS */ 1466 1467/* 1468 * Go back to running unmapped so we can load up new values 1469 * for SDR1 (hash table pointer) and the segment registers 1470 * and change to using our exception vectors. 1471 */ 1472 lis r4,2f@h 1473 ori r4,r4,2f@l 1474 tophys(r4,r4) 1475 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) 1476 FIX_SRR1(r3,r5) 1477 mtspr SRR0,r4 1478 mtspr SRR1,r3 1479 SYNC 1480 RFI 1481/* Load up the kernel context */ 14822: bl load_up_mmu 1483 1484#ifdef CONFIG_BDI_SWITCH 1485 /* Add helper information for the Abatron bdiGDB debugger. 1486 * We do this here because we know the mmu is disabled, and 1487 * will be enabled for real in just a few instructions. 1488 */ 1489 lis r5, abatron_pteptrs@h 1490 ori r5, r5, abatron_pteptrs@l 1491 stw r5, 0xf0(r0) /* This much match your Abatron config */ 1492 lis r6, swapper_pg_dir@h 1493 ori r6, r6, swapper_pg_dir@l 1494 tophys(r5, r5) 1495 stw r6, 0(r5) 1496#endif 1497 1498/* Now turn on the MMU for real! */ 1499 li r4,MSR_KERNEL 1500 FIX_SRR1(r4,r5) 1501 lis r3,start_kernel@h 1502 ori r3,r3,start_kernel@l 1503 mtspr SRR0,r3 1504 mtspr SRR1,r4 1505 SYNC 1506 RFI 1507 1508/* 1509 * Set up the segment registers for a new context. 1510 */ 1511_GLOBAL(set_context) 1512 mulli r3,r3,897 /* multiply context by skew factor */ 1513 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ 1514 addis r3,r3,0x6000 /* Set Ks, Ku bits */ 1515 li r0,NUM_USER_SEGMENTS 1516 mtctr r0 1517 1518#ifdef CONFIG_BDI_SWITCH 1519 /* Context switch the PTE pointer for the Abatron BDI2000. 1520 * The PGDIR is passed as second argument. 1521 */ 1522 lis r5, KERNELBASE@h 1523 lwz r5, 0xf0(r5) 1524 stw r4, 0x4(r5) 1525#endif 1526 1527 li r4,0 1528BEGIN_FTR_SECTION 1529 DSSALL 1530 sync 1531END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 15323: isync 1533#ifdef CONFIG_PPC64BRIDGE 1534 slbie r4 1535#endif /* CONFIG_PPC64BRIDGE */ 1536 mtsrin r3,r4 1537 addi r3,r3,0x111 /* next VSID */ 1538 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */ 1539 addis r4,r4,0x1000 /* address of next segment */ 1540 bdnz 3b 1541 sync 1542 isync 1543 blr 1544 1545/* 1546 * An undocumented "feature" of 604e requires that the v bit 1547 * be cleared before changing BAT values. 1548 * 1549 * Also, newer IBM firmware does not clear bat3 and 4 so 1550 * this makes sure it's done. 1551 * -- Cort 1552 */ 1553clear_bats: 1554 li r20,0 1555 mfspr r9,PVR 1556 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ 1557 cmpwi r9, 1 1558 beq 1f 1559 1560 mtspr DBAT0U,r20 1561 mtspr DBAT0L,r20 1562 mtspr DBAT1U,r20 1563 mtspr DBAT1L,r20 1564 mtspr DBAT2U,r20 1565 mtspr DBAT2L,r20 1566 mtspr DBAT3U,r20 1567 mtspr DBAT3L,r20 15681: 1569 mtspr IBAT0U,r20 1570 mtspr IBAT0L,r20 1571 mtspr IBAT1U,r20 1572 mtspr IBAT1L,r20 1573 mtspr IBAT2U,r20 1574 mtspr IBAT2L,r20 1575 mtspr IBAT3U,r20 1576 mtspr IBAT3L,r20 1577BEGIN_FTR_SECTION 1578 /* Here's a tweak: at this point, CPU setup have 1579 * not been called yet, so HIGH_BAT_EN may not be 1580 * set in HID0 for the 745x processors. However, it 1581 * seems that doesn't affect our ability to actually 1582 * write to these SPRs. 1583 */ 1584 mtspr SPRN_DBAT4U,r20 1585 mtspr SPRN_DBAT4L,r20 1586 mtspr SPRN_DBAT5U,r20 1587 mtspr SPRN_DBAT5L,r20 1588 mtspr SPRN_DBAT6U,r20 1589 mtspr SPRN_DBAT6L,r20 1590 mtspr SPRN_DBAT7U,r20 1591 mtspr SPRN_DBAT7L,r20 1592 mtspr SPRN_IBAT4U,r20 1593 mtspr SPRN_IBAT4L,r20 1594 mtspr SPRN_IBAT5U,r20 1595 mtspr SPRN_IBAT5L,r20 1596 mtspr SPRN_IBAT6U,r20 1597 mtspr SPRN_IBAT6L,r20 1598 mtspr SPRN_IBAT7U,r20 1599 mtspr SPRN_IBAT7L,r20 1600END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) 1601 blr 1602 1603flush_tlbs: 1604 lis r20, 0x40 16051: addic. r20, r20, -0x1000 1606 tlbie r20 1607 blt 1b 1608 sync 1609 blr 1610 1611mmu_off: 1612 addi r4, r3, __after_mmu_off - _start 1613 mfmsr r3 1614 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ 1615 beqlr 1616 andc r3,r3,r0 1617 mtspr SRR0,r4 1618 mtspr SRR1,r3 1619 sync 1620 RFI 1621 1622#ifndef CONFIG_POWER4 1623/* 1624 * Use the first pair of BAT registers to map the 1st 16MB 1625 * of RAM to KERNELBASE. From this point on we can't safely 1626 * call OF any more. 1627 */ 1628initial_bats: 1629 lis r11,KERNELBASE@h 1630#ifndef CONFIG_PPC64BRIDGE 1631 mfspr r9,PVR 1632 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ 1633 cmpi 0,r9,1 1634 bne 4f 1635 ori r11,r11,4 /* set up BAT registers for 601 */ 1636 li r8,0x7f /* valid, block length = 8MB */ 1637 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */ 1638 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */ 1639 mtspr IBAT0U,r11 /* N.B. 601 has valid bit in */ 1640 mtspr IBAT0L,r8 /* lower BAT register */ 1641 mtspr IBAT1U,r9 1642 mtspr IBAT1L,r10 1643 isync 1644 blr 1645#endif /* CONFIG_PPC64BRIDGE */ 1646 16474: tophys(r8,r11) 1648#ifdef CONFIG_SMP 1649 ori r8,r8,0x12 /* R/W access, M=1 */ 1650#else 1651 ori r8,r8,2 /* R/W access */ 1652#endif /* CONFIG_SMP */ 1653#ifdef CONFIG_APUS 1654 ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */ 1655#else 1656 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ 1657#endif /* CONFIG_APUS */ 1658 1659#ifdef CONFIG_PPC64BRIDGE 1660 /* clear out the high 32 bits in the BAT */ 1661 clrldi r11,r11,32 1662 clrldi r8,r8,32 1663#endif /* CONFIG_PPC64BRIDGE */ 1664 mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ 1665 mtspr DBAT0U,r11 /* bit in upper BAT register */ 1666 mtspr IBAT0L,r8 1667 mtspr IBAT0U,r11 1668 isync 1669 blr 1670 1671#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) 1672setup_disp_bat: 1673 /* 1674 * setup the display bat prepared for us in prom.c 1675 */ 1676 mflr r8 1677 bl reloc_offset 1678 mtlr r8 1679 addis r8,r3,disp_BAT@ha 1680 addi r8,r8,disp_BAT@l 1681 lwz r11,0(r8) 1682 lwz r8,4(r8) 1683 mfspr r9,PVR 1684 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ 1685 cmpi 0,r9,1 1686 beq 1f 1687 mtspr DBAT3L,r8 1688 mtspr DBAT3U,r11 1689 blr 16901: mtspr IBAT3L,r8 1691 mtspr IBAT3U,r11 1692 blr 1693 1694#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */ 1695 1696#else /* CONFIG_POWER4 */ 1697ppc970_setup_hid: 1698 li r0,0 1699 sync 1700 mtspr 0x3f4,r0 1701 isync 1702 sync 1703 mtspr 0x3f6,r0 1704 isync 1705 mfspr r0,SPRN_HID0 1706 li r11,5 /* clear DOZE and SLEEP */ 1707 rldimi r0,r11,52,8 /* and set NAP and DPM */ 1708 li r11,0 1709 rldimi r0,r11,32,31 /* clear EN_ATTN */ 1710 mtspr SPRN_HID0,r0 1711 mfspr r0,SPRN_HID0 1712 mfspr r0,SPRN_HID0 1713 mfspr r0,SPRN_HID0 1714 mfspr r0,SPRN_HID0 1715 mfspr r0,SPRN_HID0 1716 mfspr r0,SPRN_HID0 1717 sync 1718 isync 1719 mfspr r0,SPRN_HID1 1720 li r11,0x1200 /* enable i-fetch cacheability */ 1721 sldi r11,r11,44 /* and prefetch */ 1722 or r0,r0,r11 1723 mtspr SPRN_HID1,r0 1724 mtspr SPRN_HID1,r0 1725 isync 1726 li r0,0 1727 sync 1728 mtspr 0x137,0 1729 isync 1730 blr 1731#endif /* CONFIG_POWER4 */ 1732 1733#ifdef CONFIG_8260 1734/* Jump into the system reset for the rom. 1735 * We first disable the MMU, and then jump to the ROM reset address. 1736 * 1737 * r3 is the board info structure, r4 is the location for starting. 1738 * I use this for building a small kernel that can load other kernels, 1739 * rather than trying to write or rely on a rom monitor that can tftp load. 1740 */ 1741 .globl m8260_gorom 1742m8260_gorom: 1743 mfmsr r0 1744 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ 1745 sync 1746 mtmsr r0 1747 sync 1748 mfspr r11, HID0 1749 lis r10, 0 1750 ori r10,r10,HID0_ICE|HID0_DCE 1751 andc r11, r11, r10 1752 mtspr HID0, r11 1753 isync 1754 li r5, MSR_ 1755 lis r6,2f@h 1756 addis r6,r6,-KERNELBASE@h 1757 ori r6,r6,2f@l 1758 mtspr SRR0,r6 1759 mtspr SRR1,r5 1760 isync 1761 sync 1762 rfi 17632: 1764 mtlr r4 1765 blr 1766#endif 1767 1768 1769/* 1770 * We put a few things here that have to be page-aligned. 1771 * This stuff goes at the beginning of the data segment, 1772 * which is page-aligned. 1773 */ 1774 .data 1775 .globl sdata 1776sdata: 1777 .globl empty_zero_page 1778empty_zero_page: 1779 .space 4096 1780 1781 .globl swapper_pg_dir 1782swapper_pg_dir: 1783 .space 4096 1784 1785/* 1786 * This space gets a copy of optional info passed to us by the bootstrap 1787 * Used to pass parameters into the kernel like root=/dev/sda1, etc. 1788 */ 1789 .globl cmd_line 1790cmd_line: 1791 .space 512 1792 1793 .globl intercept_table 1794intercept_table: 1795 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700 1796 .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0 1797 .long 0, 0, 0, i0x1300, 0, 0, 0, 0 1798 .long 0, 0, 0, 0, 0, 0, 0, 0 1799 .long 0, 0, 0, 0, 0, 0, 0, 0 1800 .long 0, 0, 0, 0, 0, 0, 0, 0 1801 1802#ifdef CONFIG_BDI_SWITCH 1803/* Room for two PTE pointers, usually the kernel and current user pointers 1804 * to their respective root page table. 1805 */ 1806abatron_pteptrs: 1807 .space 8 1808#endif 1809