1/* 2 * This file contains the 64-bit "server" PowerPC variant 3 * of the low level exception handling including exception 4 * vectors, exception return, part of the slb and stab 5 * handling and other fixed offset specific things. 6 * 7 * This file is meant to be #included from head_64.S due to 8 * position dependent assembly. 9 * 10 * Most of this originates from head_64.S and thus has the same 11 * copyright history. 12 * 13 */ 14 15#include <asm/hw_irq.h> 16#include <asm/exception-64s.h> 17#include <asm/ptrace.h> 18 19/* 20 * We layout physical memory as follows: 21 * 0x0000 - 0x00ff : Secondary processor spin code 22 * 0x0100 - 0x2fff : pSeries Interrupt prologs 23 * 0x3000 - 0x5fff : interrupt support common interrupt prologs 24 * 0x6000 - 0x6fff : Initial (CPU0) segment table 25 * 0x7000 - 0x7fff : FWNMI data area 26 * 0x8000 - : Early init and support code 27 */ 28 29/* 30 * This is the start of the interrupt handlers for pSeries 31 * This code runs with relocation off. 32 * Code from here to __end_interrupts gets copied down to real 33 * address 0x100 when we are running a relocatable kernel. 34 * Therefore any relative branches in this section must only 35 * branch to labels in this section. 36 */ 37 . = 0x100 38 .globl __start_interrupts 39__start_interrupts: 40 41 .globl system_reset_pSeries; 42system_reset_pSeries: 43 HMT_MEDIUM; 44 SET_SCRATCH0(r13) 45#ifdef CONFIG_PPC_P7_NAP 46BEGIN_FTR_SECTION 47 /* Running native on arch 2.06 or later, check if we are 48 * waking up from nap. We only handle no state loss and 49 * supervisor state loss. We do -not- handle hypervisor 50 * state loss at this time. 51 */ 52 mfspr r13,SPRN_SRR1 53 rlwinm. r13,r13,47-31,30,31 54 beq 9f 55 56 /* waking up from powersave (nap) state */ 57 cmpwi cr1,r13,2 58 /* Total loss of HV state is fatal, we could try to use the 59 * PIR to locate a PACA, then use an emergency stack etc... 60 * but for now, let's just stay stuck here 61 */ 62 bgt cr1,. 63 GET_PACA(r13) 64 65#ifdef CONFIG_KVM_BOOK3S_64_HV 66 lbz r0,PACAPROCSTART(r13) 67 cmpwi r0,0x80 68 bne 1f 69 li r0,1 70 stb r0,PACAPROCSTART(r13) 71 b kvm_start_guest 721: 73#endif 74 75 beq cr1,2f 76 b .power7_wakeup_noloss 772: b .power7_wakeup_loss 789: 79END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 80#endif /* CONFIG_PPC_P7_NAP */ 81 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 82 NOTEST, 0x100) 83 84 . = 0x200 85machine_check_pSeries_1: 86 /* This is moved out of line as it can be patched by FW, but 87 * some code path might still want to branch into the original 88 * vector 89 */ 90 b machine_check_pSeries 91 92 . = 0x300 93 .globl data_access_pSeries 94data_access_pSeries: 95 HMT_MEDIUM 96 SET_SCRATCH0(r13) 97#ifndef CONFIG_POWER4_ONLY 98BEGIN_FTR_SECTION 99 b data_access_check_stab 100data_access_not_stab: 101END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 102#endif 103 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, 104 KVMTEST, 0x300) 105 106 . = 0x380 107 .globl data_access_slb_pSeries 108data_access_slb_pSeries: 109 HMT_MEDIUM 110 SET_SCRATCH0(r13) 111 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) 112 std r3,PACA_EXSLB+EX_R3(r13) 113 mfspr r3,SPRN_DAR 114#ifdef __DISABLED__ 115 /* Keep that around for when we re-implement dynamic VSIDs */ 116 cmpdi r3,0 117 bge slb_miss_user_pseries 118#endif /* __DISABLED__ */ 119 mfspr r12,SPRN_SRR1 120#ifndef CONFIG_RELOCATABLE 121 b .slb_miss_realmode 122#else 123 /* 124 * We can't just use a direct branch to .slb_miss_realmode 125 * because the distance from here to there depends on where 126 * the kernel ends up being put. 127 */ 128 mfctr r11 129 ld r10,PACAKBASE(r13) 130 LOAD_HANDLER(r10, .slb_miss_realmode) 131 mtctr r10 132 bctr 133#endif 134 135 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access) 136 137 . = 0x480 138 .globl instruction_access_slb_pSeries 139instruction_access_slb_pSeries: 140 HMT_MEDIUM 141 SET_SCRATCH0(r13) 142 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) 143 std r3,PACA_EXSLB+EX_R3(r13) 144 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 145#ifdef __DISABLED__ 146 /* Keep that around for when we re-implement dynamic VSIDs */ 147 cmpdi r3,0 148 bge slb_miss_user_pseries 149#endif /* __DISABLED__ */ 150 mfspr r12,SPRN_SRR1 151#ifndef CONFIG_RELOCATABLE 152 b .slb_miss_realmode 153#else 154 mfctr r11 155 ld r10,PACAKBASE(r13) 156 LOAD_HANDLER(r10, .slb_miss_realmode) 157 mtctr r10 158 bctr 159#endif 160 161 /* We open code these as we can't have a ". = x" (even with 162 * x = "." within a feature section 163 */ 164 . = 0x500; 165 .globl hardware_interrupt_pSeries; 166 .globl hardware_interrupt_hv; 167hardware_interrupt_pSeries: 168hardware_interrupt_hv: 169 BEGIN_FTR_SECTION 170 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, 171 EXC_HV, SOFTEN_TEST_HV) 172 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) 173 FTR_SECTION_ELSE 174 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, 175 EXC_STD, SOFTEN_TEST_HV_201) 176 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) 177 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 178 179 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) 180 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600) 181 182 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) 183 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700) 184 185 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) 186 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) 187 188 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) 189 MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer) 190 191 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) 192 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) 193 194 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) 195 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00) 196 197 . = 0xc00 198 .globl system_call_pSeries 199system_call_pSeries: 200 HMT_MEDIUM 201#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 202 SET_SCRATCH0(r13) 203 GET_PACA(r13) 204 std r9,PACA_EXGEN+EX_R9(r13) 205 std r10,PACA_EXGEN+EX_R10(r13) 206 mfcr r9 207 KVMTEST(0xc00) 208 GET_SCRATCH0(r13) 209#endif 210BEGIN_FTR_SECTION 211 cmpdi r0,0x1ebe 212 beq- 1f 213END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 214 mr r9,r13 215 GET_PACA(r13) 216 mfspr r11,SPRN_SRR0 217 mfspr r12,SPRN_SRR1 218 ld r10,PACAKBASE(r13) 219 LOAD_HANDLER(r10, system_call_entry) 220 mtspr SPRN_SRR0,r10 221 ld r10,PACAKMSR(r13) 222 mtspr SPRN_SRR1,r10 223 rfid 224 b . /* prevent speculative execution */ 225 226 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) 227 228/* Fast LE/BE switch system call */ 2291: mfspr r12,SPRN_SRR1 230 xori r12,r12,MSR_LE 231 mtspr SPRN_SRR1,r12 232 rfid /* return to userspace */ 233 b . 234 235 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) 236 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) 237 238 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch 239 * out of line to handle them 240 */ 241 . = 0xe00 242 b h_data_storage_hv 243 . = 0xe20 244 b h_instr_storage_hv 245 . = 0xe40 246 b emulation_assist_hv 247 . = 0xe50 248 b hmi_exception_hv 249 . = 0xe60 250 b hmi_exception_hv 251 252 /* We need to deal with the Altivec unavailable exception 253 * here which is at 0xf20, thus in the middle of the 254 * prolog code of the PerformanceMonitor one. A little 255 * trickery is thus necessary 256 */ 257performance_monitor_pSeries_1: 258 . = 0xf00 259 b performance_monitor_pSeries 260 261altivec_unavailable_pSeries_1: 262 . = 0xf20 263 b altivec_unavailable_pSeries 264 265vsx_unavailable_pSeries_1: 266 . = 0xf40 267 b vsx_unavailable_pSeries 268 269#ifdef CONFIG_CBE_RAS 270 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 271 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) 272#endif /* CONFIG_CBE_RAS */ 273 274 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 275 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 276 277#ifdef CONFIG_CBE_RAS 278 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 279 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 280#endif /* CONFIG_CBE_RAS */ 281 282 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 283 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700) 284 285#ifdef CONFIG_CBE_RAS 286 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 287 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) 288#endif /* CONFIG_CBE_RAS */ 289 290 . = 0x3000 291 292/*** Out of line interrupts support ***/ 293 294 /* moved from 0x200 */ 295machine_check_pSeries: 296 .globl machine_check_fwnmi 297machine_check_fwnmi: 298 HMT_MEDIUM 299 SET_SCRATCH0(r13) /* save r13 */ 300 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, 301 EXC_STD, KVMTEST, 0x200) 302 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) 303 304#ifndef CONFIG_POWER4_ONLY 305 /* moved from 0x300 */ 306data_access_check_stab: 307 GET_PACA(r13) 308 std r9,PACA_EXSLB+EX_R9(r13) 309 std r10,PACA_EXSLB+EX_R10(r13) 310 mfspr r10,SPRN_DAR 311 mfspr r9,SPRN_DSISR 312 srdi r10,r10,60 313 rlwimi r10,r9,16,0x20 314#ifdef CONFIG_KVM_BOOK3S_PR 315 lbz r9,HSTATE_IN_GUEST(r13) 316 rlwimi r10,r9,8,0x300 317#endif 318 mfcr r9 319 cmpwi r10,0x2c 320 beq do_stab_bolted_pSeries 321 mtcrf 0x80,r9 322 ld r9,PACA_EXSLB+EX_R9(r13) 323 ld r10,PACA_EXSLB+EX_R10(r13) 324 b data_access_not_stab 325do_stab_bolted_pSeries: 326 std r11,PACA_EXSLB+EX_R11(r13) 327 std r12,PACA_EXSLB+EX_R12(r13) 328 GET_SCRATCH0(r10) 329 std r10,PACA_EXSLB+EX_R13(r13) 330 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) 331#endif /* CONFIG_POWER4_ONLY */ 332 333 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) 334 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) 335 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) 336 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) 337 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) 338 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 339 340 .align 7 341 /* moved from 0xe00 */ 342 STD_EXCEPTION_HV(., 0xe02, h_data_storage) 343 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02) 344 STD_EXCEPTION_HV(., 0xe22, h_instr_storage) 345 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22) 346 STD_EXCEPTION_HV(., 0xe42, emulation_assist) 347 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42) 348 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */ 349 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62) 350 351 /* moved from 0xf00 */ 352 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) 353 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) 354 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) 355 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) 356 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) 357 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) 358 359/* 360 * An interrupt came in while soft-disabled. We set paca->irq_happened, 361 * then, if it was a decrementer interrupt, we bump the dec to max and 362 * and return, else we hard disable and return. This is called with 363 * r10 containing the value to OR to the paca field. 364 */ 365#define MASKED_INTERRUPT(_H) \ 366masked_##_H##interrupt: \ 367 std r11,PACA_EXGEN+EX_R11(r13); \ 368 lbz r11,PACAIRQHAPPENED(r13); \ 369 or r11,r11,r10; \ 370 stb r11,PACAIRQHAPPENED(r13); \ 371 andi. r10,r10,PACA_IRQ_DEC; \ 372 beq 1f; \ 373 lis r10,0x7fff; \ 374 ori r10,r10,0xffff; \ 375 mtspr SPRN_DEC,r10; \ 376 b 2f; \ 3771: mfspr r10,SPRN_##_H##SRR1; \ 378 rldicl r10,r10,48,1; /* clear MSR_EE */ \ 379 rotldi r10,r10,16; \ 380 mtspr SPRN_##_H##SRR1,r10; \ 3812: mtcrf 0x80,r9; \ 382 ld r9,PACA_EXGEN+EX_R9(r13); \ 383 ld r10,PACA_EXGEN+EX_R10(r13); \ 384 ld r11,PACA_EXGEN+EX_R11(r13); \ 385 GET_SCRATCH0(r13); \ 386 ##_H##rfid; \ 387 b . 388 389 MASKED_INTERRUPT() 390 MASKED_INTERRUPT(H) 391 392/* 393 * Called from arch_local_irq_enable when an interrupt needs 394 * to be resent. r3 contains 0x500 or 0x900 to indicate which 395 * kind of interrupt. MSR:EE is already off. We generate a 396 * stackframe like if a real interrupt had happened. 397 * 398 * Note: While MSR:EE is off, we need to make sure that _MSR 399 * in the generated frame has EE set to 1 or the exception 400 * handler will not properly re-enable them. 401 */ 402_GLOBAL(__replay_interrupt) 403 /* We are going to jump to the exception common code which 404 * will retrieve various register values from the PACA which 405 * we don't give a damn about, so we don't bother storing them. 406 */ 407 mfmsr r12 408 mflr r11 409 mfcr r9 410 ori r12,r12,MSR_EE 411 andi. r3,r3,0x0800 412 bne decrementer_common 413 b hardware_interrupt_common 414 415#ifdef CONFIG_PPC_PSERIES 416/* 417 * Vectors for the FWNMI option. Share common code. 418 */ 419 .globl system_reset_fwnmi 420 .align 7 421system_reset_fwnmi: 422 HMT_MEDIUM 423 SET_SCRATCH0(r13) /* save r13 */ 424 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 425 NOTEST, 0x100) 426 427#endif /* CONFIG_PPC_PSERIES */ 428 429#ifdef __DISABLED__ 430/* 431 * This is used for when the SLB miss handler has to go virtual, 432 * which doesn't happen for now anymore but will once we re-implement 433 * dynamic VSIDs for shared page tables 434 */ 435slb_miss_user_pseries: 436 std r10,PACA_EXGEN+EX_R10(r13) 437 std r11,PACA_EXGEN+EX_R11(r13) 438 std r12,PACA_EXGEN+EX_R12(r13) 439 GET_SCRATCH0(r10) 440 ld r11,PACA_EXSLB+EX_R9(r13) 441 ld r12,PACA_EXSLB+EX_R3(r13) 442 std r10,PACA_EXGEN+EX_R13(r13) 443 std r11,PACA_EXGEN+EX_R9(r13) 444 std r12,PACA_EXGEN+EX_R3(r13) 445 clrrdi r12,r13,32 446 mfmsr r10 447 mfspr r11,SRR0 /* save SRR0 */ 448 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ 449 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 450 mtspr SRR0,r12 451 mfspr r12,SRR1 /* and SRR1 */ 452 mtspr SRR1,r10 453 rfid 454 b . /* prevent spec. execution */ 455#endif /* __DISABLED__ */ 456 457 .align 7 458 .globl __end_interrupts 459__end_interrupts: 460 461/* 462 * Code from here down to __end_handlers is invoked from the 463 * exception prologs above. Because the prologs assemble the 464 * addresses of these handlers using the LOAD_HANDLER macro, 465 * which uses an addi instruction, these handlers must be in 466 * the first 32k of the kernel image. 467 */ 468 469/*** Common interrupt handlers ***/ 470 471 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 472 473 /* 474 * Machine check is different because we use a different 475 * save area: PACA_EXMC instead of PACA_EXGEN. 476 */ 477 .align 7 478 .globl machine_check_common 479machine_check_common: 480 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 481 FINISH_NAP 482 DISABLE_INTS 483 bl .save_nvgprs 484 addi r3,r1,STACK_FRAME_OVERHEAD 485 bl .machine_check_exception 486 b .ret_from_except 487 488 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) 489 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) 490 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) 491 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 492 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 493 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 494 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) 495 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 496 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) 497 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 498#ifdef CONFIG_ALTIVEC 499 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 500#else 501 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 502#endif 503#ifdef CONFIG_CBE_RAS 504 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) 505 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) 506 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 507#endif /* CONFIG_CBE_RAS */ 508 509 .align 7 510system_call_entry: 511 b system_call_common 512 513ppc64_runlatch_on_trampoline: 514 b .__ppc64_runlatch_on 515 516/* 517 * Here we have detected that the kernel stack pointer is bad. 518 * R9 contains the saved CR, r13 points to the paca, 519 * r10 contains the (bad) kernel stack pointer, 520 * r11 and r12 contain the saved SRR0 and SRR1. 521 * We switch to using an emergency stack, save the registers there, 522 * and call kernel_bad_stack(), which panics. 523 */ 524bad_stack: 525 ld r1,PACAEMERGSP(r13) 526 subi r1,r1,64+INT_FRAME_SIZE 527 std r9,_CCR(r1) 528 std r10,GPR1(r1) 529 std r11,_NIP(r1) 530 std r12,_MSR(r1) 531 mfspr r11,SPRN_DAR 532 mfspr r12,SPRN_DSISR 533 std r11,_DAR(r1) 534 std r12,_DSISR(r1) 535 mflr r10 536 mfctr r11 537 mfxer r12 538 std r10,_LINK(r1) 539 std r11,_CTR(r1) 540 std r12,_XER(r1) 541 SAVE_GPR(0,r1) 542 SAVE_GPR(2,r1) 543 ld r10,EX_R3(r3) 544 std r10,GPR3(r1) 545 SAVE_GPR(4,r1) 546 SAVE_4GPRS(5,r1) 547 ld r9,EX_R9(r3) 548 ld r10,EX_R10(r3) 549 SAVE_2GPRS(9,r1) 550 ld r9,EX_R11(r3) 551 ld r10,EX_R12(r3) 552 ld r11,EX_R13(r3) 553 std r9,GPR11(r1) 554 std r10,GPR12(r1) 555 std r11,GPR13(r1) 556BEGIN_FTR_SECTION 557 ld r10,EX_CFAR(r3) 558 std r10,ORIG_GPR3(r1) 559END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 560 SAVE_8GPRS(14,r1) 561 SAVE_10GPRS(22,r1) 562 lhz r12,PACA_TRAP_SAVE(r13) 563 std r12,_TRAP(r1) 564 addi r11,r1,INT_FRAME_SIZE 565 std r11,0(r1) 566 li r12,0 567 std r12,0(r11) 568 ld r2,PACATOC(r13) 569 ld r11,exception_marker@toc(r2) 570 std r12,RESULT(r1) 571 std r11,STACK_FRAME_OVERHEAD-16(r1) 5721: addi r3,r1,STACK_FRAME_OVERHEAD 573 bl .kernel_bad_stack 574 b 1b 575 576/* 577 * Here r13 points to the paca, r9 contains the saved CR, 578 * SRR0 and SRR1 are saved in r11 and r12, 579 * r9 - r13 are saved in paca->exgen. 580 */ 581 .align 7 582 .globl data_access_common 583data_access_common: 584 mfspr r10,SPRN_DAR 585 std r10,PACA_EXGEN+EX_DAR(r13) 586 mfspr r10,SPRN_DSISR 587 stw r10,PACA_EXGEN+EX_DSISR(r13) 588 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 589 DISABLE_INTS 590 ld r12,_MSR(r1) 591 ld r3,PACA_EXGEN+EX_DAR(r13) 592 lwz r4,PACA_EXGEN+EX_DSISR(r13) 593 li r5,0x300 594 b .do_hash_page /* Try to handle as hpte fault */ 595 596 .align 7 597 .globl h_data_storage_common 598h_data_storage_common: 599 mfspr r10,SPRN_HDAR 600 std r10,PACA_EXGEN+EX_DAR(r13) 601 mfspr r10,SPRN_HDSISR 602 stw r10,PACA_EXGEN+EX_DSISR(r13) 603 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) 604 bl .save_nvgprs 605 DISABLE_INTS 606 addi r3,r1,STACK_FRAME_OVERHEAD 607 bl .unknown_exception 608 b .ret_from_except 609 610 .align 7 611 .globl instruction_access_common 612instruction_access_common: 613 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 614 DISABLE_INTS 615 ld r12,_MSR(r1) 616 ld r3,_NIP(r1) 617 andis. r4,r12,0x5820 618 li r5,0x400 619 b .do_hash_page /* Try to handle as hpte fault */ 620 621 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) 622 623/* 624 * Here is the common SLB miss user that is used when going to virtual 625 * mode for SLB misses, that is currently not used 626 */ 627#ifdef __DISABLED__ 628 .align 7 629 .globl slb_miss_user_common 630slb_miss_user_common: 631 mflr r10 632 std r3,PACA_EXGEN+EX_DAR(r13) 633 stw r9,PACA_EXGEN+EX_CCR(r13) 634 std r10,PACA_EXGEN+EX_LR(r13) 635 std r11,PACA_EXGEN+EX_SRR0(r13) 636 bl .slb_allocate_user 637 638 ld r10,PACA_EXGEN+EX_LR(r13) 639 ld r3,PACA_EXGEN+EX_R3(r13) 640 lwz r9,PACA_EXGEN+EX_CCR(r13) 641 ld r11,PACA_EXGEN+EX_SRR0(r13) 642 mtlr r10 643 beq- slb_miss_fault 644 645 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 646 beq- unrecov_user_slb 647 mfmsr r10 648 649.machine push 650.machine "power4" 651 mtcrf 0x80,r9 652.machine pop 653 654 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ 655 mtmsrd r10,1 656 657 mtspr SRR0,r11 658 mtspr SRR1,r12 659 660 ld r9,PACA_EXGEN+EX_R9(r13) 661 ld r10,PACA_EXGEN+EX_R10(r13) 662 ld r11,PACA_EXGEN+EX_R11(r13) 663 ld r12,PACA_EXGEN+EX_R12(r13) 664 ld r13,PACA_EXGEN+EX_R13(r13) 665 rfid 666 b . 667 668slb_miss_fault: 669 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) 670 ld r4,PACA_EXGEN+EX_DAR(r13) 671 li r5,0 672 std r4,_DAR(r1) 673 std r5,_DSISR(r1) 674 b handle_page_fault 675 676unrecov_user_slb: 677 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 678 DISABLE_INTS 679 bl .save_nvgprs 6801: addi r3,r1,STACK_FRAME_OVERHEAD 681 bl .unrecoverable_exception 682 b 1b 683 684#endif /* __DISABLED__ */ 685 686 687/* 688 * r13 points to the PACA, r9 contains the saved CR, 689 * r12 contain the saved SRR1, SRR0 is still ready for return 690 * r3 has the faulting address 691 * r9 - r13 are saved in paca->exslb. 692 * r3 is saved in paca->slb_r3 693 * We assume we aren't going to take any exceptions during this procedure. 694 */ 695_GLOBAL(slb_miss_realmode) 696 mflr r10 697#ifdef CONFIG_RELOCATABLE 698 mtctr r11 699#endif 700 701 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 702 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 703 704 bl .slb_allocate_realmode 705 706 /* All done -- return from exception. */ 707 708 ld r10,PACA_EXSLB+EX_LR(r13) 709 ld r3,PACA_EXSLB+EX_R3(r13) 710 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 711 712 mtlr r10 713 714 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 715 beq- 2f 716 717.machine push 718.machine "power4" 719 mtcrf 0x80,r9 720 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 721.machine pop 722 723 ld r9,PACA_EXSLB+EX_R9(r13) 724 ld r10,PACA_EXSLB+EX_R10(r13) 725 ld r11,PACA_EXSLB+EX_R11(r13) 726 ld r12,PACA_EXSLB+EX_R12(r13) 727 ld r13,PACA_EXSLB+EX_R13(r13) 728 rfid 729 b . /* prevent speculative execution */ 730 7312: mfspr r11,SPRN_SRR0 732 ld r10,PACAKBASE(r13) 733 LOAD_HANDLER(r10,unrecov_slb) 734 mtspr SPRN_SRR0,r10 735 ld r10,PACAKMSR(r13) 736 mtspr SPRN_SRR1,r10 737 rfid 738 b . 739 740unrecov_slb: 741 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 742 DISABLE_INTS 743 bl .save_nvgprs 7441: addi r3,r1,STACK_FRAME_OVERHEAD 745 bl .unrecoverable_exception 746 b 1b 747 748 749#ifdef CONFIG_PPC_970_NAP 750power4_fixup_nap: 751 andc r9,r9,r10 752 std r9,TI_LOCAL_FLAGS(r11) 753 ld r10,_LINK(r1) /* make idle task do the */ 754 std r10,_NIP(r1) /* equivalent of a blr */ 755 blr 756#endif 757 758 .align 7 759 .globl alignment_common 760alignment_common: 761 mfspr r10,SPRN_DAR 762 std r10,PACA_EXGEN+EX_DAR(r13) 763 mfspr r10,SPRN_DSISR 764 stw r10,PACA_EXGEN+EX_DSISR(r13) 765 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 766 ld r3,PACA_EXGEN+EX_DAR(r13) 767 lwz r4,PACA_EXGEN+EX_DSISR(r13) 768 std r3,_DAR(r1) 769 std r4,_DSISR(r1) 770 bl .save_nvgprs 771 DISABLE_INTS 772 addi r3,r1,STACK_FRAME_OVERHEAD 773 bl .alignment_exception 774 b .ret_from_except 775 776 .align 7 777 .globl program_check_common 778program_check_common: 779 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 780 bl .save_nvgprs 781 DISABLE_INTS 782 addi r3,r1,STACK_FRAME_OVERHEAD 783 bl .program_check_exception 784 b .ret_from_except 785 786 .align 7 787 .globl fp_unavailable_common 788fp_unavailable_common: 789 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 790 bne 1f /* if from user, just load it up */ 791 bl .save_nvgprs 792 DISABLE_INTS 793 addi r3,r1,STACK_FRAME_OVERHEAD 794 bl .kernel_fp_unavailable_exception 795 BUG_OPCODE 7961: bl .load_up_fpu 797 b fast_exception_return 798 799 .align 7 800 .globl altivec_unavailable_common 801altivec_unavailable_common: 802 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 803#ifdef CONFIG_ALTIVEC 804BEGIN_FTR_SECTION 805 beq 1f 806 bl .load_up_altivec 807 b fast_exception_return 8081: 809END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 810#endif 811 bl .save_nvgprs 812 DISABLE_INTS 813 addi r3,r1,STACK_FRAME_OVERHEAD 814 bl .altivec_unavailable_exception 815 b .ret_from_except 816 817 .align 7 818 .globl vsx_unavailable_common 819vsx_unavailable_common: 820 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) 821#ifdef CONFIG_VSX 822BEGIN_FTR_SECTION 823 beq 1f 824 b .load_up_vsx 8251: 826END_FTR_SECTION_IFSET(CPU_FTR_VSX) 827#endif 828 bl .save_nvgprs 829 DISABLE_INTS 830 addi r3,r1,STACK_FRAME_OVERHEAD 831 bl .vsx_unavailable_exception 832 b .ret_from_except 833 834 .align 7 835 .globl __end_handlers 836__end_handlers: 837 838/* 839 * Hash table stuff 840 */ 841 .align 7 842_STATIC(do_hash_page) 843 std r3,_DAR(r1) 844 std r4,_DSISR(r1) 845 846 andis. r0,r4,0xa410 /* weird error? */ 847 bne- handle_page_fault /* if not, try to insert a HPTE */ 848 andis. r0,r4,DSISR_DABRMATCH@h 849 bne- handle_dabr_fault 850 851BEGIN_FTR_SECTION 852 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 853 bne- do_ste_alloc /* If so handle it */ 854END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 855 856 clrrdi r11,r1,THREAD_SHIFT 857 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 858 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 859 bne 77f /* then don't call hash_page now */ 860 /* 861 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 862 * accessing a userspace segment (even from the kernel). We assume 863 * kernel addresses always have the high bit set. 864 */ 865 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ 866 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ 867 orc r0,r12,r0 /* MSR_PR | ~high_bit */ 868 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ 869 ori r4,r4,1 /* add _PAGE_PRESENT */ 870 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ 871 872 /* 873 * r3 contains the faulting address 874 * r4 contains the required access permissions 875 * r5 contains the trap number 876 * 877 * at return r3 = 0 for success, 1 for page fault, negative for error 878 */ 879 bl .hash_page /* build HPTE if possible */ 880 cmpdi r3,0 /* see if hash_page succeeded */ 881 882 /* Success */ 883 beq fast_exc_return_irq /* Return from exception on success */ 884 885 /* Error */ 886 blt- 13f 887 888/* Here we have a page fault that hash_page can't handle. */ 889handle_page_fault: 89011: ld r4,_DAR(r1) 891 ld r5,_DSISR(r1) 892 addi r3,r1,STACK_FRAME_OVERHEAD 893 bl .do_page_fault 894 cmpdi r3,0 895 beq+ 12f 896 bl .save_nvgprs 897 mr r5,r3 898 addi r3,r1,STACK_FRAME_OVERHEAD 899 lwz r4,_DAR(r1) 900 bl .bad_page_fault 901 b .ret_from_except 902 903/* We have a data breakpoint exception - handle it */ 904handle_dabr_fault: 905 bl .save_nvgprs 906 ld r4,_DAR(r1) 907 ld r5,_DSISR(r1) 908 addi r3,r1,STACK_FRAME_OVERHEAD 909 bl .do_dabr 91012: b .ret_from_except_lite 911 912 913/* We have a page fault that hash_page could handle but HV refused 914 * the PTE insertion 915 */ 91613: bl .save_nvgprs 917 mr r5,r3 918 addi r3,r1,STACK_FRAME_OVERHEAD 919 ld r4,_DAR(r1) 920 bl .low_hash_fault 921 b .ret_from_except 922 923/* 924 * We come here as a result of a DSI at a point where we don't want 925 * to call hash_page, such as when we are accessing memory (possibly 926 * user memory) inside a PMU interrupt that occurred while interrupts 927 * were soft-disabled. We want to invoke the exception handler for 928 * the access, or panic if there isn't a handler. 929 */ 93077: bl .save_nvgprs 931 mr r4,r3 932 addi r3,r1,STACK_FRAME_OVERHEAD 933 li r5,SIGSEGV 934 bl .bad_page_fault 935 b .ret_from_except 936 937 /* here we have a segment miss */ 938do_ste_alloc: 939 bl .ste_allocate /* try to insert stab entry */ 940 cmpdi r3,0 941 bne- handle_page_fault 942 b fast_exception_return 943 944/* 945 * r13 points to the PACA, r9 contains the saved CR, 946 * r11 and r12 contain the saved SRR0 and SRR1. 947 * r9 - r13 are saved in paca->exslb. 948 * We assume we aren't going to take any exceptions during this procedure. 949 * We assume (DAR >> 60) == 0xc. 950 */ 951 .align 7 952_GLOBAL(do_stab_bolted) 953 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 954 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 955 956 /* Hash to the primary group */ 957 ld r10,PACASTABVIRT(r13) 958 mfspr r11,SPRN_DAR 959 srdi r11,r11,28 960 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 961 962 /* Calculate VSID */ 963 /* This is a kernel address, so protovsid = ESID */ 964 ASM_VSID_SCRAMBLE(r11, r9, 256M) 965 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 966 967 /* Search the primary group for a free entry */ 9681: ld r11,0(r10) /* Test valid bit of the current ste */ 969 andi. r11,r11,0x80 970 beq 2f 971 addi r10,r10,16 972 andi. r11,r10,0x70 973 bne 1b 974 975 /* Stick for only searching the primary group for now. */ 976 /* At least for now, we use a very simple random castout scheme */ 977 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ 978 mftb r11 979 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ 980 ori r11,r11,0x10 981 982 /* r10 currently points to an ste one past the group of interest */ 983 /* make it point to the randomly selected entry */ 984 subi r10,r10,128 985 or r10,r10,r11 /* r10 is the entry to invalidate */ 986 987 isync /* mark the entry invalid */ 988 ld r11,0(r10) 989 rldicl r11,r11,56,1 /* clear the valid bit */ 990 rotldi r11,r11,8 991 std r11,0(r10) 992 sync 993 994 clrrdi r11,r11,28 /* Get the esid part of the ste */ 995 slbie r11 996 9972: std r9,8(r10) /* Store the vsid part of the ste */ 998 eieio 999 1000 mfspr r11,SPRN_DAR /* Get the new esid */ 1001 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1002 ori r11,r11,0x90 /* Turn on valid and kp */ 1003 std r11,0(r10) /* Put new entry back into the stab */ 1004 1005 sync 1006 1007 /* All done -- return from exception. */ 1008 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1009 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ 1010 1011 andi. r10,r12,MSR_RI 1012 beq- unrecov_slb 1013 1014 mtcrf 0x80,r9 /* restore CR */ 1015 1016 mfmsr r10 1017 clrrdi r10,r10,2 1018 mtmsrd r10,1 1019 1020 mtspr SPRN_SRR0,r11 1021 mtspr SPRN_SRR1,r12 1022 ld r9,PACA_EXSLB+EX_R9(r13) 1023 ld r10,PACA_EXSLB+EX_R10(r13) 1024 ld r11,PACA_EXSLB+EX_R11(r13) 1025 ld r12,PACA_EXSLB+EX_R12(r13) 1026 ld r13,PACA_EXSLB+EX_R13(r13) 1027 rfid 1028 b . /* prevent speculative execution */ 1029 1030#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 1031/* 1032 * Data area reserved for FWNMI option. 1033 * This address (0x7000) is fixed by the RPA. 1034 */ 1035 .= 0x7000 1036 .globl fwnmi_data_area 1037fwnmi_data_area: 1038 1039 /* pseries and powernv need to keep the whole page from 1040 * 0x7000 to 0x8000 free for use by the firmware 1041 */ 1042 . = 0x8000 1043#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1044 1045/* Space for CPU0's segment table */ 1046 .balign 4096 1047 .globl initial_stab 1048initial_stab: 1049 .space 4096 1050 1051#ifdef CONFIG_PPC_POWERNV 1052_GLOBAL(opal_mc_secondary_handler) 1053 HMT_MEDIUM 1054 SET_SCRATCH0(r13) 1055 GET_PACA(r13) 1056 clrldi r3,r3,2 1057 tovirt(r3,r3) 1058 std r3,PACA_OPAL_MC_EVT(r13) 1059 ld r13,OPAL_MC_SRR0(r3) 1060 mtspr SPRN_SRR0,r13 1061 ld r13,OPAL_MC_SRR1(r3) 1062 mtspr SPRN_SRR1,r13 1063 ld r3,OPAL_MC_GPR3(r3) 1064 GET_SCRATCH0(r13) 1065 b machine_check_pSeries 1066#endif /* CONFIG_PPC_POWERNV */ 1067