1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * This file contains the 64-bit "server" PowerPC variant 4 * of the low level exception handling including exception 5 * vectors, exception return, part of the slb and stab 6 * handling and other fixed offset specific things. 7 * 8 * This file is meant to be #included from head_64.S due to 9 * position dependent assembly. 10 * 11 * Most of this originates from head_64.S and thus has the same 12 * copyright history. 13 * 14 */ 15 16#include <asm/hw_irq.h> 17#include <asm/exception-64s.h> 18#include <asm/ptrace.h> 19#include <asm/cpuidle.h> 20#include <asm/head-64.h> 21#include <asm/feature-fixups.h> 22#include <asm/kup.h> 23 24/* 25 * Following are fixed section helper macros. 26 * 27 * EXC_REAL_BEGIN/END - real, unrelocated exception vectors 28 * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors 29 * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these) 30 * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use) 31 * EXC_COMMON - After switching to virtual, relocated mode. 32 */ 33 34#define EXC_REAL_BEGIN(name, start, size) \ 35 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 36 37#define EXC_REAL_END(name, start, size) \ 38 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 39 40#define EXC_VIRT_BEGIN(name, start, size) \ 41 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 42 43#define EXC_VIRT_END(name, start, size) \ 44 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 45 46#define EXC_COMMON_BEGIN(name) \ 47 USE_TEXT_SECTION(); \ 48 .balign IFETCH_ALIGN_BYTES; \ 49 .global name; \ 50 _ASM_NOKPROBE_SYMBOL(name); \ 51 DEFINE_FIXED_SYMBOL(name, text); \ 52name: 53 54#define TRAMP_REAL_BEGIN(name) \ 55 FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) 56 57#define TRAMP_VIRT_BEGIN(name) \ 58 FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) 59 60#define EXC_REAL_NONE(start, size) \ 61 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ 62 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) 63 64#define EXC_VIRT_NONE(start, size) \ 65 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ 66 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) 67 68/* 69 * We're short on space and time in the exception prolog, so we can't 70 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. 71 * Instead we get the base of the kernel from paca->kernelbase and or in the low 72 * part of label. This requires that the label be within 64KB of kernelbase, and 73 * that kernelbase be 64K aligned. 74 */ 75#define LOAD_HANDLER(reg, label) \ 76 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 77 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) 78 79#define __LOAD_HANDLER(reg, label, section) \ 80 ld reg,PACAKBASE(r13); \ 81 ori reg,reg,(ABS_ADDR(label, section))@l 82 83/* 84 * Branches from unrelocated code (e.g., interrupts) to labels outside 85 * head-y require >64K offsets. 86 */ 87#define __LOAD_FAR_HANDLER(reg, label, section) \ 88 ld reg,PACAKBASE(r13); \ 89 ori reg,reg,(ABS_ADDR(label, section))@l; \ 90 addis reg,reg,(ABS_ADDR(label, section))@h 91 92/* 93 * Interrupt code generation macros 94 */ 95#define IVEC .L_IVEC_\name\() /* Interrupt vector address */ 96#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */ 97#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */ 98#define IAREA .L_IAREA_\name\() /* PACA save area */ 99#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */ 100#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */ 101#define ICFAR .L_ICFAR_\name\() /* Uses CFAR */ 102#define ICFAR_IF_HVMODE .L_ICFAR_IF_HVMODE_\name\() /* Uses CFAR if HV */ 103#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */ 104#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */ 105#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */ 106#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */ 107#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */ 108#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */ 109#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name 110#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ 111#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ 112#define __ISTACK(name) .L_ISTACK_ ## name 113#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ 114 115#define INT_DEFINE_BEGIN(n) \ 116.macro int_define_ ## n name 117 118#define INT_DEFINE_END(n) \ 119.endm ; \ 120int_define_ ## n n ; \ 121do_define_int n 122 123.macro do_define_int name 124 .ifndef IVEC 125 .error "IVEC not defined" 126 .endif 127 .ifndef IHSRR 128 IHSRR=0 129 .endif 130 .ifndef IHSRR_IF_HVMODE 131 IHSRR_IF_HVMODE=0 132 .endif 133 .ifndef IAREA 134 IAREA=PACA_EXGEN 135 .endif 136 .ifndef IVIRT 137 IVIRT=1 138 .endif 139 .ifndef IISIDE 140 IISIDE=0 141 .endif 142 .ifndef ICFAR 143 ICFAR=1 144 .endif 145 .ifndef ICFAR_IF_HVMODE 146 ICFAR_IF_HVMODE=0 147 .endif 148 .ifndef IDAR 149 IDAR=0 150 .endif 151 .ifndef IDSISR 152 IDSISR=0 153 .endif 154 .ifndef IBRANCH_TO_COMMON 155 IBRANCH_TO_COMMON=1 156 .endif 157 .ifndef IREALMODE_COMMON 158 IREALMODE_COMMON=0 159 .else 160 .if ! IBRANCH_TO_COMMON 161 .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0" 162 .endif 163 .endif 164 .ifndef IMASK 165 IMASK=0 166 .endif 167 .ifndef IKVM_REAL 168 IKVM_REAL=0 169 .endif 170 .ifndef IKVM_VIRT 171 IKVM_VIRT=0 172 .endif 173 .ifndef ISTACK 174 ISTACK=1 175 .endif 176 .ifndef IKUAP 177 IKUAP=1 178 .endif 179.endm 180 181/* 182 * All interrupts which set HSRR registers, as well as SRESET and MCE and 183 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken, 184 * so they all generally need to test whether they were taken in guest context. 185 * 186 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be 187 * taken with MSR[HV]=0. 188 * 189 * Interrupts which set SRR registers (with the above exceptions) do not 190 * elevate to MSR[HV]=1 mode, though most can be taken when running with 191 * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do 192 * not need to test whether a guest is running because they get delivered to 193 * the guest directly, including nested HV KVM guests. 194 * 195 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host 196 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the 197 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be 198 * delivered to the real-mode entry point, therefore such interrupts only test 199 * KVM in their real mode handlers, and only when PR KVM is possible. 200 * 201 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always 202 * delivered in real-mode when the MMU is in hash mode because the MMU 203 * registers are not set appropriately to translate host addresses. In nested 204 * radix mode these can be delivered in virt-mode as the host translations are 205 * used implicitly (see: effective LPID, effective PID). 206 */ 207 208/* 209 * If an interrupt is taken while a guest is running, it is immediately routed 210 * to KVM to handle. 211 */ 212 213.macro KVMTEST name handler 214#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 215 lbz r10,HSTATE_IN_GUEST(r13) 216 cmpwi r10,0 217 /* HSRR variants have the 0x2 bit added to their trap number */ 218 .if IHSRR_IF_HVMODE 219 BEGIN_FTR_SECTION 220 li r10,(IVEC + 0x2) 221 FTR_SECTION_ELSE 222 li r10,(IVEC) 223 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 224 .elseif IHSRR 225 li r10,(IVEC + 0x2) 226 .else 227 li r10,(IVEC) 228 .endif 229 bne \handler 230#endif 231.endm 232 233/* 234 * This is the BOOK3S interrupt entry code macro. 235 * 236 * This can result in one of several things happening: 237 * - Branch to the _common handler, relocated, in virtual mode. 238 * These are normal interrupts (synchronous and asynchronous) handled by 239 * the kernel. 240 * - Branch to KVM, relocated but real mode interrupts remain in real mode. 241 * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by 242 * / intended for host or guest kernel, but KVM must always be involved 243 * because the machine state is set for guest execution. 244 * - Branch to the masked handler, unrelocated. 245 * These occur when maskable asynchronous interrupts are taken with the 246 * irq_soft_mask set. 247 * - Branch to an "early" handler in real mode but relocated. 248 * This is done if early=1. MCE and HMI use these to handle errors in real 249 * mode. 250 * - Fall through and continue executing in real, unrelocated mode. 251 * This is done if early=2. 252 */ 253 254.macro GEN_BRANCH_TO_COMMON name, virt 255 .if IREALMODE_COMMON 256 LOAD_HANDLER(r10, \name\()_common) 257 mtctr r10 258 bctr 259 .else 260 .if \virt 261#ifndef CONFIG_RELOCATABLE 262 b \name\()_common_virt 263#else 264 LOAD_HANDLER(r10, \name\()_common_virt) 265 mtctr r10 266 bctr 267#endif 268 .else 269 LOAD_HANDLER(r10, \name\()_common_real) 270 mtctr r10 271 bctr 272 .endif 273 .endif 274.endm 275 276.macro GEN_INT_ENTRY name, virt, ool=0 277 SET_SCRATCH0(r13) /* save r13 */ 278 GET_PACA(r13) 279 std r9,IAREA+EX_R9(r13) /* save r9 */ 280BEGIN_FTR_SECTION 281 mfspr r9,SPRN_PPR 282END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 283 HMT_MEDIUM 284 std r10,IAREA+EX_R10(r13) /* save r10 */ 285 .if ICFAR 286BEGIN_FTR_SECTION 287 mfspr r10,SPRN_CFAR 288END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 289 .elseif ICFAR_IF_HVMODE 290BEGIN_FTR_SECTION 291 BEGIN_FTR_SECTION_NESTED(69) 292 mfspr r10,SPRN_CFAR 293 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69) 294FTR_SECTION_ELSE 295 BEGIN_FTR_SECTION_NESTED(69) 296 li r10,0 297 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69) 298ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 299 .endif 300 .if \ool 301 .if !\virt 302 b tramp_real_\name 303 .pushsection .text 304 TRAMP_REAL_BEGIN(tramp_real_\name) 305 .else 306 b tramp_virt_\name 307 .pushsection .text 308 TRAMP_VIRT_BEGIN(tramp_virt_\name) 309 .endif 310 .endif 311 312BEGIN_FTR_SECTION 313 std r9,IAREA+EX_PPR(r13) 314END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 315 .if ICFAR || ICFAR_IF_HVMODE 316BEGIN_FTR_SECTION 317 std r10,IAREA+EX_CFAR(r13) 318END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 319 .endif 320 INTERRUPT_TO_KERNEL 321 mfctr r10 322 std r10,IAREA+EX_CTR(r13) 323 mfcr r9 324 std r11,IAREA+EX_R11(r13) /* save r11 - r12 */ 325 std r12,IAREA+EX_R12(r13) 326 327 /* 328 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], 329 * because a d-side MCE will clobber those registers so is 330 * not recoverable if they are live. 331 */ 332 GET_SCRATCH0(r10) 333 std r10,IAREA+EX_R13(r13) 334 .if IDAR && !IISIDE 335 .if IHSRR 336 mfspr r10,SPRN_HDAR 337 .else 338 mfspr r10,SPRN_DAR 339 .endif 340 std r10,IAREA+EX_DAR(r13) 341 .endif 342 .if IDSISR && !IISIDE 343 .if IHSRR 344 mfspr r10,SPRN_HDSISR 345 .else 346 mfspr r10,SPRN_DSISR 347 .endif 348 stw r10,IAREA+EX_DSISR(r13) 349 .endif 350 351 .if IHSRR_IF_HVMODE 352 BEGIN_FTR_SECTION 353 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 354 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 355 FTR_SECTION_ELSE 356 mfspr r11,SPRN_SRR0 /* save SRR0 */ 357 mfspr r12,SPRN_SRR1 /* and SRR1 */ 358 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 359 .elseif IHSRR 360 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 361 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 362 .else 363 mfspr r11,SPRN_SRR0 /* save SRR0 */ 364 mfspr r12,SPRN_SRR1 /* and SRR1 */ 365 .endif 366 367 .if IBRANCH_TO_COMMON 368 GEN_BRANCH_TO_COMMON \name \virt 369 .endif 370 371 .if \ool 372 .popsection 373 .endif 374.endm 375 376/* 377 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt 378 * entry, except in the case of the real-mode handlers which require 379 * __GEN_REALMODE_COMMON_ENTRY. 380 * 381 * This switches to virtual mode and sets MSR[RI]. 382 */ 383.macro __GEN_COMMON_ENTRY name 384DEFINE_FIXED_SYMBOL(\name\()_common_real, text) 385\name\()_common_real: 386 .if IKVM_REAL 387 KVMTEST \name kvm_interrupt 388 .endif 389 390 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 391 /* MSR[RI] is clear iff using SRR regs */ 392 .if IHSRR_IF_HVMODE 393 BEGIN_FTR_SECTION 394 xori r10,r10,MSR_RI 395 END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) 396 .elseif ! IHSRR 397 xori r10,r10,MSR_RI 398 .endif 399 mtmsrd r10 400 401 .if IVIRT 402 .if IKVM_VIRT 403 b 1f /* skip the virt test coming from real */ 404 .endif 405 406 .balign IFETCH_ALIGN_BYTES 407DEFINE_FIXED_SYMBOL(\name\()_common_virt, text) 408\name\()_common_virt: 409 .if IKVM_VIRT 410 KVMTEST \name kvm_interrupt 4111: 412 .endif 413 .endif /* IVIRT */ 414.endm 415 416/* 417 * Don't switch to virt mode. Used for early MCE and HMI handlers that 418 * want to run in real mode. 419 */ 420.macro __GEN_REALMODE_COMMON_ENTRY name 421DEFINE_FIXED_SYMBOL(\name\()_common_real, text) 422\name\()_common_real: 423 .if IKVM_REAL 424 KVMTEST \name kvm_interrupt 425 .endif 426.endm 427 428.macro __GEN_COMMON_BODY name 429 .if IMASK 430 .if ! ISTACK 431 .error "No support for masked interrupt to use custom stack" 432 .endif 433 434 /* If coming from user, skip soft-mask tests. */ 435 andi. r10,r12,MSR_PR 436 bne 3f 437 438 /* 439 * Kernel code running below __end_soft_masked may be 440 * implicitly soft-masked if it is within the regions 441 * in the soft mask table. 442 */ 443 LOAD_HANDLER(r10, __end_soft_masked) 444 cmpld r11,r10 445 bge+ 1f 446 447 /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */ 448 mtctr r12 449 stw r9,PACA_EXGEN+EX_CCR(r13) 450 SEARCH_SOFT_MASK_TABLE 451 cmpdi r12,0 452 mfctr r12 /* Restore r12 to SRR1 */ 453 lwz r9,PACA_EXGEN+EX_CCR(r13) 454 beq 1f /* Not in soft-mask table */ 455 li r10,IMASK 456 b 2f /* In soft-mask table, always mask */ 457 458 /* Test the soft mask state against our interrupt's bit */ 4591: lbz r10,PACAIRQSOFTMASK(r13) 4602: andi. r10,r10,IMASK 461 /* Associate vector numbers with bits in paca->irq_happened */ 462 .if IVEC == 0x500 || IVEC == 0xea0 463 li r10,PACA_IRQ_EE 464 .elseif IVEC == 0x900 465 li r10,PACA_IRQ_DEC 466 .elseif IVEC == 0xa00 || IVEC == 0xe80 467 li r10,PACA_IRQ_DBELL 468 .elseif IVEC == 0xe60 469 li r10,PACA_IRQ_HMI 470 .elseif IVEC == 0xf00 471 li r10,PACA_IRQ_PMI 472 .else 473 .abort "Bad maskable vector" 474 .endif 475 476 .if IHSRR_IF_HVMODE 477 BEGIN_FTR_SECTION 478 bne masked_Hinterrupt 479 FTR_SECTION_ELSE 480 bne masked_interrupt 481 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 482 .elseif IHSRR 483 bne masked_Hinterrupt 484 .else 485 bne masked_interrupt 486 .endif 487 .endif 488 489 .if ISTACK 490 andi. r10,r12,MSR_PR /* See if coming from user */ 4913: mr r10,r1 /* Save r1 */ 492 subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ 493 beq- 100f 494 ld r1,PACAKSAVE(r13) /* kernel stack to use */ 495100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */ 496 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 497 .endif 498 499 std r9,_CCR(r1) /* save CR in stackframe */ 500 std r11,_NIP(r1) /* save SRR0 in stackframe */ 501 std r12,_MSR(r1) /* save SRR1 in stackframe */ 502 std r10,0(r1) /* make stack chain pointer */ 503 std r0,GPR0(r1) /* save r0 in stackframe */ 504 std r10,GPR1(r1) /* save r1 in stackframe */ 505 506 /* Mark our [H]SRRs valid for return */ 507 li r10,1 508 .if IHSRR_IF_HVMODE 509 BEGIN_FTR_SECTION 510 stb r10,PACAHSRR_VALID(r13) 511 FTR_SECTION_ELSE 512 stb r10,PACASRR_VALID(r13) 513 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 514 .elseif IHSRR 515 stb r10,PACAHSRR_VALID(r13) 516 .else 517 stb r10,PACASRR_VALID(r13) 518 .endif 519 520 .if ISTACK 521 .if IKUAP 522 kuap_save_amr_and_lock r9, r10, cr1, cr0 523 .endif 524 beq 101f /* if from kernel mode */ 525BEGIN_FTR_SECTION 526 ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ 527 std r9,_PPR(r1) 528END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 529101: 530 .else 531 .if IKUAP 532 kuap_save_amr_and_lock r9, r10, cr1 533 .endif 534 .endif 535 536 /* Save original regs values from save area to stack frame. */ 537 ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */ 538 ld r10,IAREA+EX_R10(r13) 539 std r9,GPR9(r1) 540 std r10,GPR10(r1) 541 ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */ 542 ld r10,IAREA+EX_R12(r13) 543 ld r11,IAREA+EX_R13(r13) 544 std r9,GPR11(r1) 545 std r10,GPR12(r1) 546 std r11,GPR13(r1) 547 548 SAVE_NVGPRS(r1) 549 550 .if IDAR 551 .if IISIDE 552 ld r10,_NIP(r1) 553 .else 554 ld r10,IAREA+EX_DAR(r13) 555 .endif 556 std r10,_DAR(r1) 557 .endif 558 559 .if IDSISR 560 .if IISIDE 561 ld r10,_MSR(r1) 562 lis r11,DSISR_SRR1_MATCH_64S@h 563 and r10,r10,r11 564 .else 565 lwz r10,IAREA+EX_DSISR(r13) 566 .endif 567 std r10,_DSISR(r1) 568 .endif 569 570BEGIN_FTR_SECTION 571 .if ICFAR || ICFAR_IF_HVMODE 572 ld r10,IAREA+EX_CFAR(r13) 573 .else 574 li r10,0 575 .endif 576 std r10,ORIG_GPR3(r1) 577END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 578 ld r10,IAREA+EX_CTR(r13) 579 std r10,_CTR(r1) 580 std r2,GPR2(r1) /* save r2 in stackframe */ 581 SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */ 582 mflr r9 /* Get LR, later save to stack */ 583 LOAD_PACA_TOC() /* get kernel TOC into r2 */ 584 std r9,_LINK(r1) 585 lbz r10,PACAIRQSOFTMASK(r13) 586 mfspr r11,SPRN_XER /* save XER in stackframe */ 587 std r10,SOFTE(r1) 588 std r11,_XER(r1) 589 li r9,IVEC 590 std r9,_TRAP(r1) /* set trap number */ 591 li r10,0 592 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) 593 std r10,RESULT(r1) /* clear regs->result */ 594 std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ 595.endm 596 597/* 598 * On entry r13 points to the paca, r9-r13 are saved in the paca, 599 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 600 * SRR1, and relocation is on. 601 * 602 * If stack=0, then the stack is already set in r1, and r1 is saved in r10. 603 * PPR save and CPU accounting is not done for the !stack case (XXX why not?) 604 */ 605.macro GEN_COMMON name 606 __GEN_COMMON_ENTRY \name 607 __GEN_COMMON_BODY \name 608.endm 609 610.macro SEARCH_RESTART_TABLE 611#ifdef CONFIG_RELOCATABLE 612 mr r12,r2 613 LOAD_PACA_TOC() 614 LOAD_REG_ADDR(r9, __start___restart_table) 615 LOAD_REG_ADDR(r10, __stop___restart_table) 616 mr r2,r12 617#else 618 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table) 619 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table) 620#endif 621300: 622 cmpd r9,r10 623 beq 302f 624 ld r12,0(r9) 625 cmpld r11,r12 626 blt 301f 627 ld r12,8(r9) 628 cmpld r11,r12 629 bge 301f 630 ld r12,16(r9) 631 b 303f 632301: 633 addi r9,r9,24 634 b 300b 635302: 636 li r12,0 637303: 638.endm 639 640.macro SEARCH_SOFT_MASK_TABLE 641#ifdef CONFIG_RELOCATABLE 642 mr r12,r2 643 LOAD_PACA_TOC() 644 LOAD_REG_ADDR(r9, __start___soft_mask_table) 645 LOAD_REG_ADDR(r10, __stop___soft_mask_table) 646 mr r2,r12 647#else 648 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table) 649 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table) 650#endif 651300: 652 cmpd r9,r10 653 beq 302f 654 ld r12,0(r9) 655 cmpld r11,r12 656 blt 301f 657 ld r12,8(r9) 658 cmpld r11,r12 659 bge 301f 660 li r12,1 661 b 303f 662301: 663 addi r9,r9,16 664 b 300b 665302: 666 li r12,0 667303: 668.endm 669 670/* 671 * Restore all registers including H/SRR0/1 saved in a stack frame of a 672 * standard exception. 673 */ 674.macro EXCEPTION_RESTORE_REGS hsrr=0 675 /* Move original SRR0 and SRR1 into the respective regs */ 676 ld r9,_MSR(r1) 677 li r10,0 678 .if \hsrr 679 mtspr SPRN_HSRR1,r9 680 stb r10,PACAHSRR_VALID(r13) 681 .else 682 mtspr SPRN_SRR1,r9 683 stb r10,PACASRR_VALID(r13) 684 .endif 685 ld r9,_NIP(r1) 686 .if \hsrr 687 mtspr SPRN_HSRR0,r9 688 .else 689 mtspr SPRN_SRR0,r9 690 .endif 691 ld r9,_CTR(r1) 692 mtctr r9 693 ld r9,_XER(r1) 694 mtxer r9 695 ld r9,_LINK(r1) 696 mtlr r9 697 ld r9,_CCR(r1) 698 mtcr r9 699 REST_GPRS(2, 13, r1) 700 REST_GPR(0, r1) 701 /* restore original r1. */ 702 ld r1,GPR1(r1) 703.endm 704 705/* 706 * EARLY_BOOT_FIXUP - Fix real-mode interrupt with wrong endian in early boot. 707 * 708 * There's a short window during boot where although the kernel is running 709 * little endian, any exceptions will cause the CPU to switch back to big 710 * endian. For example a WARN() boils down to a trap instruction, which will 711 * cause a program check, and we end up here but with the CPU in big endian 712 * mode. The first instruction of the program check handler (in GEN_INT_ENTRY 713 * below) is an mtsprg, which when executed in the wrong endian is an lhzu with 714 * a ~3GB displacement from r3. The content of r3 is random, so that is a load 715 * from some random location, and depending on the system can easily lead to a 716 * checkstop, or an infinitely recursive page fault. 717 * 718 * So to handle that case we have a trampoline here that can detect we are in 719 * the wrong endian and flip us back to the correct endian. We can't flip 720 * MSR[LE] using mtmsr, so we have to use rfid. That requires backing up SRR0/1 721 * as well as a GPR. To do that we use SPRG0/2/3, as SPRG1 is already used for 722 * the paca. SPRG3 is user readable, but this trampoline is only active very 723 * early in boot, and SPRG3 will be reinitialised in vdso_getcpu_init() before 724 * userspace starts. 725 */ 726.macro EARLY_BOOT_FIXUP 727BEGIN_FTR_SECTION 728#ifdef CONFIG_CPU_LITTLE_ENDIAN 729 tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 730 b 2f // Skip trampoline if endian is correct 731 .long 0xa643707d // mtsprg 0, r11 Backup r11 732 .long 0xa6027a7d // mfsrr0 r11 733 .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2 734 .long 0xa6027b7d // mfsrr1 r11 735 .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3 736 .long 0xa600607d // mfmsr r11 737 .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE] 738 .long 0xa6037b7d // mtsrr1 r11 739 /* 740 * This is 'li r11,1f' where 1f is the absolute address of that 741 * label, byteswapped into the SI field of the instruction. 742 */ 743 .long 0x00006039 | \ 744 ((ABS_ADDR(1f, real_vectors) & 0x00ff) << 24) | \ 745 ((ABS_ADDR(1f, real_vectors) & 0xff00) << 8) 746 .long 0xa6037a7d // mtsrr0 r11 747 .long 0x2400004c // rfid 7481: 749 mfsprg r11, 3 750 mtsrr1 r11 // Restore SRR1 751 mfsprg r11, 2 752 mtsrr0 r11 // Restore SRR0 753 mfsprg r11, 0 // Restore r11 7542: 755#endif 756 /* 757 * program check could hit at any time, and pseries can not block 758 * MSR[ME] in early boot. So check if there is anything useful in r13 759 * yet, and spin forever if not. 760 */ 761 mtsprg 0, r11 762 mfcr r11 763 cmpdi r13, 0 764 beq . 765 mtcr r11 766 mfsprg r11, 0 767END_FTR_SECTION(0, 1) // nop out after boot 768.endm 769 770/* 771 * There are a few constraints to be concerned with. 772 * - Real mode exceptions code/data must be located at their physical location. 773 * - Virtual mode exceptions must be mapped at their 0xc000... location. 774 * - Fixed location code must not call directly beyond the __end_interrupts 775 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence 776 * must be used. 777 * - LOAD_HANDLER targets must be within first 64K of physical 0 / 778 * virtual 0xc00... 779 * - Conditional branch targets must be within +/-32K of caller. 780 * 781 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and 782 * therefore don't have to run in physically located code or rfid to 783 * virtual mode kernel code. However on relocatable kernels they do have 784 * to branch to KERNELBASE offset because the rest of the kernel (outside 785 * the exception vectors) may be located elsewhere. 786 * 787 * Virtual exceptions correspond with physical, except their entry points 788 * are offset by 0xc000000000000000 and also tend to get an added 0x4000 789 * offset applied. Virtual exceptions are enabled with the Alternate 790 * Interrupt Location (AIL) bit set in the LPCR. However this does not 791 * guarantee they will be delivered virtually. Some conditions (see the ISA) 792 * cause exceptions to be delivered in real mode. 793 * 794 * The scv instructions are a special case. They get a 0x3000 offset applied. 795 * scv exceptions have unique reentrancy properties, see below. 796 * 797 * It's impossible to receive interrupts below 0x300 via AIL. 798 * 799 * KVM: None of the virtual exceptions are from the guest. Anything that 800 * escalated to HV=1 from HV=0 is delivered via real mode handlers. 801 * 802 * 803 * We layout physical memory as follows: 804 * 0x0000 - 0x00ff : Secondary processor spin code 805 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors 806 * 0x1900 - 0x2fff : Real mode trampolines 807 * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors 808 * 0x5900 - 0x6fff : Relon mode trampolines 809 * 0x7000 - 0x7fff : FWNMI data area 810 * 0x8000 - .... : Common interrupt handlers, remaining early 811 * setup code, rest of kernel. 812 * 813 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space 814 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE 815 * vectors there. 816 */ 817OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) 818OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000) 819OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900) 820OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) 821 822#ifdef CONFIG_PPC_POWERNV 823 .globl start_real_trampolines 824 .globl end_real_trampolines 825 .globl start_virt_trampolines 826 .globl end_virt_trampolines 827#endif 828 829#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 830/* 831 * Data area reserved for FWNMI option. 832 * This address (0x7000) is fixed by the RPA. 833 * pseries and powernv need to keep the whole page from 834 * 0x7000 to 0x8000 free for use by the firmware 835 */ 836ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000) 837OPEN_TEXT_SECTION(0x8000) 838#else 839OPEN_TEXT_SECTION(0x7000) 840#endif 841 842USE_FIXED_SECTION(real_vectors) 843 844/* 845 * This is the start of the interrupt handlers for pSeries 846 * This code runs with relocation off. 847 * Code from here to __end_interrupts gets copied down to real 848 * address 0x100 when we are running a relocatable kernel. 849 * Therefore any relative branches in this section must only 850 * branch to labels in this section. 851 */ 852 .globl __start_interrupts 853__start_interrupts: 854 855/** 856 * Interrupt 0x3000 - System Call Vectored Interrupt (syscall). 857 * This is a synchronous interrupt invoked with the "scv" instruction. The 858 * system call does not alter the HV bit, so it is directed to the OS. 859 * 860 * Handling: 861 * scv instructions enter the kernel without changing EE, RI, ME, or HV. 862 * In particular, this means we can take a maskable interrupt at any point 863 * in the scv handler, which is unlike any other interrupt. This is solved 864 * by treating the instruction addresses in the handler as being soft-masked, 865 * by adding a SOFT_MASK_TABLE entry for them. 866 * 867 * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and 868 * ensure scv is never executed with relocation off, which means AIL-0 869 * should never happen. 870 * 871 * Before leaving the following inside-__end_soft_masked text, at least of the 872 * following must be true: 873 * - MSR[PR]=1 (i.e., return to userspace) 874 * - MSR_EE|MSR_RI is clear (no reentrant exceptions) 875 * - Standard kernel environment is set up (stack, paca, etc) 876 * 877 * KVM: 878 * These interrupts do not elevate HV 0->1, so HV is not involved. PR KVM 879 * ensures that FSCR[SCV] is disabled whenever it has to force AIL off. 880 * 881 * Call convention: 882 * 883 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst 884 */ 885EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000) 886 /* SCV 0 */ 887 mr r9,r13 888 GET_PACA(r13) 889 mflr r11 890 mfctr r12 891 li r10,IRQS_ALL_DISABLED 892 stb r10,PACAIRQSOFTMASK(r13) 893#ifdef CONFIG_RELOCATABLE 894 b system_call_vectored_tramp 895#else 896 b system_call_vectored_common 897#endif 898 nop 899 900 /* SCV 1 - 127 */ 901 .rept 127 902 mr r9,r13 903 GET_PACA(r13) 904 mflr r11 905 mfctr r12 906 li r10,IRQS_ALL_DISABLED 907 stb r10,PACAIRQSOFTMASK(r13) 908 li r0,-1 /* cause failure */ 909#ifdef CONFIG_RELOCATABLE 910 b system_call_vectored_sigill_tramp 911#else 912 b system_call_vectored_sigill 913#endif 914 .endr 915EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000) 916 917// Treat scv vectors as soft-masked, see comment above. 918// Use absolute values rather than labels here, so they don't get relocated, 919// because this code runs unrelocated. 920SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000) 921 922#ifdef CONFIG_RELOCATABLE 923TRAMP_VIRT_BEGIN(system_call_vectored_tramp) 924 __LOAD_HANDLER(r10, system_call_vectored_common, virt_trampolines) 925 mtctr r10 926 bctr 927 928TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp) 929 __LOAD_HANDLER(r10, system_call_vectored_sigill, virt_trampolines) 930 mtctr r10 931 bctr 932#endif 933 934 935/* No virt vectors corresponding with 0x0..0x100 */ 936EXC_VIRT_NONE(0x4000, 0x100) 937 938 939/** 940 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI). 941 * This is a non-maskable, asynchronous interrupt always taken in real-mode. 942 * It is caused by: 943 * - Wake from power-saving state, on powernv. 944 * - An NMI from another CPU, triggered by firmware or hypercall. 945 * - As crash/debug signal injected from BMC, firmware or hypervisor. 946 * 947 * Handling: 948 * Power-save wakeup is the only performance critical path, so this is 949 * determined quickly as possible first. In this case volatile registers 950 * can be discarded and SPRs like CFAR don't need to be read. 951 * 952 * If not a powersave wakeup, then it's run as a regular interrupt, however 953 * it uses its own stack and PACA save area to preserve the regular kernel 954 * environment for debugging. 955 * 956 * This interrupt is not maskable, so triggering it when MSR[RI] is clear, 957 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely 958 * correct to switch to virtual mode to run the regular interrupt handler 959 * because it might be interrupted when the MMU is in a bad state (e.g., SLB 960 * is clear). 961 * 962 * FWNMI: 963 * PAPR specifies a "fwnmi" facility which sends the sreset to a different 964 * entry point with a different register set up. Some hypervisors will 965 * send the sreset to 0x100 in the guest if it is not fwnmi capable. 966 * 967 * KVM: 968 * Unlike most SRR interrupts, this may be taken by the host while executing 969 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest 970 * mode and then raise the sreset. 971 */ 972INT_DEFINE_BEGIN(system_reset) 973 IVEC=0x100 974 IAREA=PACA_EXNMI 975 IVIRT=0 /* no virt entry point */ 976 ISTACK=0 977 IKVM_REAL=1 978INT_DEFINE_END(system_reset) 979 980EXC_REAL_BEGIN(system_reset, 0x100, 0x100) 981#ifdef CONFIG_PPC_P7_NAP 982 /* 983 * If running native on arch 2.06 or later, check if we are waking up 984 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1 985 * bits 46:47. A non-0 value indicates that we are coming from a power 986 * saving state. The idle wakeup handler initially runs in real mode, 987 * but we branch to the 0xc000... address so we can turn on relocation 988 * with mtmsrd later, after SPRs are restored. 989 * 990 * Careful to minimise cost for the fast path (idle wakeup) while 991 * also avoiding clobbering CFAR for the debug path (non-idle). 992 * 993 * For the idle wake case volatile registers can be clobbered, which 994 * is why we use those initially. If it turns out to not be an idle 995 * wake, carefully put everything back the way it was, so we can use 996 * common exception macros to handle it. 997 */ 998BEGIN_FTR_SECTION 999 SET_SCRATCH0(r13) 1000 GET_PACA(r13) 1001 std r3,PACA_EXNMI+0*8(r13) 1002 std r4,PACA_EXNMI+1*8(r13) 1003 std r5,PACA_EXNMI+2*8(r13) 1004 mfspr r3,SPRN_SRR1 1005 mfocrf r4,0x80 1006 rlwinm. r5,r3,47-31,30,31 1007 bne+ system_reset_idle_wake 1008 /* Not powersave wakeup. Restore regs for regular interrupt handler. */ 1009 mtocrf 0x80,r4 1010 ld r3,PACA_EXNMI+0*8(r13) 1011 ld r4,PACA_EXNMI+1*8(r13) 1012 ld r5,PACA_EXNMI+2*8(r13) 1013 GET_SCRATCH0(r13) 1014END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1015#endif 1016 1017 GEN_INT_ENTRY system_reset, virt=0 1018 /* 1019 * In theory, we should not enable relocation here if it was disabled 1020 * in SRR1, because the MMU may not be configured to support it (e.g., 1021 * SLB may have been cleared). In practice, there should only be a few 1022 * small windows where that's the case, and sreset is considered to 1023 * be dangerous anyway. 1024 */ 1025EXC_REAL_END(system_reset, 0x100, 0x100) 1026EXC_VIRT_NONE(0x4100, 0x100) 1027 1028#ifdef CONFIG_PPC_P7_NAP 1029TRAMP_REAL_BEGIN(system_reset_idle_wake) 1030 /* We are waking up from idle, so may clobber any volatile register */ 1031 cmpwi cr1,r5,2 1032 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 1033 __LOAD_FAR_HANDLER(r12, DOTSYM(idle_return_gpr_loss), real_trampolines) 1034 mtctr r12 1035 bctr 1036#endif 1037 1038#ifdef CONFIG_PPC_PSERIES 1039/* 1040 * Vectors for the FWNMI option. Share common code. 1041 */ 1042TRAMP_REAL_BEGIN(system_reset_fwnmi) 1043 GEN_INT_ENTRY system_reset, virt=0 1044 1045#endif /* CONFIG_PPC_PSERIES */ 1046 1047EXC_COMMON_BEGIN(system_reset_common) 1048 __GEN_COMMON_ENTRY system_reset 1049 /* 1050 * Increment paca->in_nmi. When the interrupt entry wrapper later 1051 * enable MSR_RI, then SLB or MCE will be able to recover, but a nested 1052 * NMI will notice in_nmi and not recover because of the use of the NMI 1053 * stack. in_nmi reentrancy is tested in system_reset_exception. 1054 */ 1055 lhz r10,PACA_IN_NMI(r13) 1056 addi r10,r10,1 1057 sth r10,PACA_IN_NMI(r13) 1058 1059 mr r10,r1 1060 ld r1,PACA_NMI_EMERG_SP(r13) 1061 subi r1,r1,INT_FRAME_SIZE 1062 __GEN_COMMON_BODY system_reset 1063 1064 addi r3,r1,STACK_FRAME_OVERHEAD 1065 bl system_reset_exception 1066 1067 /* Clear MSR_RI before setting SRR0 and SRR1. */ 1068 li r9,0 1069 mtmsrd r9,1 1070 1071 /* 1072 * MSR_RI is clear, now we can decrement paca->in_nmi. 1073 */ 1074 lhz r10,PACA_IN_NMI(r13) 1075 subi r10,r10,1 1076 sth r10,PACA_IN_NMI(r13) 1077 1078 kuap_kernel_restore r9, r10 1079 EXCEPTION_RESTORE_REGS 1080 RFI_TO_USER_OR_KERNEL 1081 1082 1083/** 1084 * Interrupt 0x200 - Machine Check Interrupt (MCE). 1085 * This is a non-maskable interrupt always taken in real-mode. It can be 1086 * synchronous or asynchronous, caused by hardware or software, and it may be 1087 * taken in a power-saving state. 1088 * 1089 * Handling: 1090 * Similarly to system reset, this uses its own stack and PACA save area, 1091 * the difference is re-entrancy is allowed on the machine check stack. 1092 * 1093 * machine_check_early is run in real mode, and carefully decodes the 1094 * machine check and tries to handle it (e.g., flush the SLB if there was an 1095 * error detected there), determines if it was recoverable and logs the 1096 * event. 1097 * 1098 * This early code does not "reconcile" irq soft-mask state like SRESET or 1099 * regular interrupts do, so irqs_disabled() among other things may not work 1100 * properly (irq disable/enable already doesn't work because irq tracing can 1101 * not work in real mode). 1102 * 1103 * Then, depending on the execution context when the interrupt is taken, there 1104 * are 3 main actions: 1105 * - Executing in kernel mode. The event is queued with irq_work, which means 1106 * it is handled when it is next safe to do so (i.e., the kernel has enabled 1107 * interrupts), which could be immediately when the interrupt returns. This 1108 * avoids nasty issues like switching to virtual mode when the MMU is in a 1109 * bad state, or when executing OPAL code. (SRESET is exposed to such issues, 1110 * but it has different priorities). Check to see if the CPU was in power 1111 * save, and return via the wake up code if it was. 1112 * 1113 * - Executing in user mode. machine_check_exception is run like a normal 1114 * interrupt handler, which processes the data generated by the early handler. 1115 * 1116 * - Executing in guest mode. The interrupt is run with its KVM test, and 1117 * branches to KVM to deal with. KVM may queue the event for the host 1118 * to report later. 1119 * 1120 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear, 1121 * or SCRATCH0 is in use, it may cause a crash. 1122 * 1123 * KVM: 1124 * See SRESET. 1125 */ 1126INT_DEFINE_BEGIN(machine_check_early) 1127 IVEC=0x200 1128 IAREA=PACA_EXMC 1129 IVIRT=0 /* no virt entry point */ 1130 IREALMODE_COMMON=1 1131 ISTACK=0 1132 IDAR=1 1133 IDSISR=1 1134 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 1135INT_DEFINE_END(machine_check_early) 1136 1137INT_DEFINE_BEGIN(machine_check) 1138 IVEC=0x200 1139 IAREA=PACA_EXMC 1140 IVIRT=0 /* no virt entry point */ 1141 IDAR=1 1142 IDSISR=1 1143 IKVM_REAL=1 1144INT_DEFINE_END(machine_check) 1145 1146EXC_REAL_BEGIN(machine_check, 0x200, 0x100) 1147 EARLY_BOOT_FIXUP 1148 GEN_INT_ENTRY machine_check_early, virt=0 1149EXC_REAL_END(machine_check, 0x200, 0x100) 1150EXC_VIRT_NONE(0x4200, 0x100) 1151 1152#ifdef CONFIG_PPC_PSERIES 1153TRAMP_REAL_BEGIN(machine_check_fwnmi) 1154 /* See comment at machine_check exception, don't turn on RI */ 1155 GEN_INT_ENTRY machine_check_early, virt=0 1156#endif 1157 1158#define MACHINE_CHECK_HANDLER_WINDUP \ 1159 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1160 li r9,0; \ 1161 mtmsrd r9,1; /* Clear MSR_RI */ \ 1162 /* Decrement paca->in_mce now RI is clear. */ \ 1163 lhz r12,PACA_IN_MCE(r13); \ 1164 subi r12,r12,1; \ 1165 sth r12,PACA_IN_MCE(r13); \ 1166 EXCEPTION_RESTORE_REGS 1167 1168EXC_COMMON_BEGIN(machine_check_early_common) 1169 __GEN_REALMODE_COMMON_ENTRY machine_check_early 1170 1171 /* 1172 * Switch to mc_emergency stack and handle re-entrancy (we limit 1173 * the nested MCE upto level 4 to avoid stack overflow). 1174 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 1175 * 1176 * We use paca->in_mce to check whether this is the first entry or 1177 * nested machine check. We increment paca->in_mce to track nested 1178 * machine checks. 1179 * 1180 * If this is the first entry then set stack pointer to 1181 * paca->mc_emergency_sp, otherwise r1 is already pointing to 1182 * stack frame on mc_emergency stack. 1183 * 1184 * NOTE: We are here with MSR_ME=0 (off), which means we risk a 1185 * checkstop if we get another machine check exception before we do 1186 * rfid with MSR_ME=1. 1187 * 1188 * This interrupt can wake directly from idle. If that is the case, 1189 * the machine check is handled then the idle wakeup code is called 1190 * to restore state. 1191 */ 1192 lhz r10,PACA_IN_MCE(r13) 1193 cmpwi r10,0 /* Are we in nested machine check */ 1194 cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */ 1195 addi r10,r10,1 /* increment paca->in_mce */ 1196 sth r10,PACA_IN_MCE(r13) 1197 1198 mr r10,r1 /* Save r1 */ 1199 bne 1f 1200 /* First machine check entry */ 1201 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 12021: /* Limit nested MCE to level 4 to avoid stack overflow */ 1203 bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */ 1204 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1205 1206 __GEN_COMMON_BODY machine_check_early 1207 1208BEGIN_FTR_SECTION 1209 bl enable_machine_check 1210END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1211 addi r3,r1,STACK_FRAME_OVERHEAD 1212BEGIN_FTR_SECTION 1213 bl machine_check_early_boot 1214END_FTR_SECTION(0, 1) // nop out after boot 1215 bl machine_check_early 1216 std r3,RESULT(r1) /* Save result */ 1217 ld r12,_MSR(r1) 1218 1219#ifdef CONFIG_PPC_P7_NAP 1220 /* 1221 * Check if thread was in power saving mode. We come here when any 1222 * of the following is true: 1223 * a. thread wasn't in power saving mode 1224 * b. thread was in power saving mode with no state loss, 1225 * supervisor state loss or hypervisor state loss. 1226 * 1227 * Go back to nap/sleep/winkle mode again if (b) is true. 1228 */ 1229BEGIN_FTR_SECTION 1230 rlwinm. r11,r12,47-31,30,31 1231 bne machine_check_idle_common 1232END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1233#endif 1234 1235#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1236 /* 1237 * Check if we are coming from guest. If yes, then run the normal 1238 * exception handler which will take the 1239 * machine_check_kvm->kvm_interrupt branch to deliver the MC event 1240 * to guest. 1241 */ 1242 lbz r11,HSTATE_IN_GUEST(r13) 1243 cmpwi r11,0 /* Check if coming from guest */ 1244 bne mce_deliver /* continue if we are. */ 1245#endif 1246 1247 /* 1248 * Check if we are coming from userspace. If yes, then run the normal 1249 * exception handler which will deliver the MC event to this kernel. 1250 */ 1251 andi. r11,r12,MSR_PR /* See if coming from user. */ 1252 bne mce_deliver /* continue in V mode if we are. */ 1253 1254 /* 1255 * At this point we are coming from kernel context. 1256 * Queue up the MCE event and return from the interrupt. 1257 * But before that, check if this is an un-recoverable exception. 1258 * If yes, then stay on emergency stack and panic. 1259 */ 1260 andi. r11,r12,MSR_RI 1261 beq unrecoverable_mce 1262 1263 /* 1264 * Check if we have successfully handled/recovered from error, if not 1265 * then stay on emergency stack and panic. 1266 */ 1267 ld r3,RESULT(r1) /* Load result */ 1268 cmpdi r3,0 /* see if we handled MCE successfully */ 1269 beq unrecoverable_mce /* if !handled then panic */ 1270 1271 /* 1272 * Return from MC interrupt. 1273 * Queue up the MCE event so that we can log it later, while 1274 * returning from kernel or opal call. 1275 */ 1276 bl machine_check_queue_event 1277 MACHINE_CHECK_HANDLER_WINDUP 1278 RFI_TO_KERNEL 1279 1280mce_deliver: 1281 /* 1282 * This is a host user or guest MCE. Restore all registers, then 1283 * run the "late" handler. For host user, this will run the 1284 * machine_check_exception handler in virtual mode like a normal 1285 * interrupt handler. For guest, this will trigger the KVM test 1286 * and branch to the KVM interrupt similarly to other interrupts. 1287 */ 1288BEGIN_FTR_SECTION 1289 ld r10,ORIG_GPR3(r1) 1290 mtspr SPRN_CFAR,r10 1291END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1292 MACHINE_CHECK_HANDLER_WINDUP 1293 GEN_INT_ENTRY machine_check, virt=0 1294 1295EXC_COMMON_BEGIN(machine_check_common) 1296 /* 1297 * Machine check is different because we use a different 1298 * save area: PACA_EXMC instead of PACA_EXGEN. 1299 */ 1300 GEN_COMMON machine_check 1301 addi r3,r1,STACK_FRAME_OVERHEAD 1302 bl machine_check_exception_async 1303 b interrupt_return_srr 1304 1305 1306#ifdef CONFIG_PPC_P7_NAP 1307/* 1308 * This is an idle wakeup. Low level machine check has already been 1309 * done. Queue the event then call the idle code to do the wake up. 1310 */ 1311EXC_COMMON_BEGIN(machine_check_idle_common) 1312 bl machine_check_queue_event 1313 1314 /* 1315 * GPR-loss wakeups are relatively straightforward, because the 1316 * idle sleep code has saved all non-volatile registers on its 1317 * own stack, and r1 in PACAR1. 1318 * 1319 * For no-loss wakeups the r1 and lr registers used by the 1320 * early machine check handler have to be restored first. r2 is 1321 * the kernel TOC, so no need to restore it. 1322 * 1323 * Then decrement MCE nesting after finishing with the stack. 1324 */ 1325 ld r3,_MSR(r1) 1326 ld r4,_LINK(r1) 1327 ld r1,GPR1(r1) 1328 1329 lhz r11,PACA_IN_MCE(r13) 1330 subi r11,r11,1 1331 sth r11,PACA_IN_MCE(r13) 1332 1333 mtlr r4 1334 rlwinm r10,r3,47-31,30,31 1335 cmpwi cr1,r10,2 1336 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 1337 b idle_return_gpr_loss 1338#endif 1339 1340EXC_COMMON_BEGIN(unrecoverable_mce) 1341 /* 1342 * We are going down. But there are chances that we might get hit by 1343 * another MCE during panic path and we may run into unstable state 1344 * with no way out. Hence, turn ME bit off while going down, so that 1345 * when another MCE is hit during panic path, system will checkstop 1346 * and hypervisor will get restarted cleanly by SP. 1347 */ 1348BEGIN_FTR_SECTION 1349 li r10,0 /* clear MSR_RI */ 1350 mtmsrd r10,1 1351 bl disable_machine_check 1352END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1353 ld r10,PACAKMSR(r13) 1354 li r3,MSR_ME 1355 andc r10,r10,r3 1356 mtmsrd r10 1357 1358 lhz r12,PACA_IN_MCE(r13) 1359 subi r12,r12,1 1360 sth r12,PACA_IN_MCE(r13) 1361 1362 /* 1363 * Invoke machine_check_exception to print MCE event and panic. 1364 * This is the NMI version of the handler because we are called from 1365 * the early handler which is a true NMI. 1366 */ 1367 addi r3,r1,STACK_FRAME_OVERHEAD 1368 bl machine_check_exception 1369 1370 /* 1371 * We will not reach here. Even if we did, there is no way out. 1372 * Call unrecoverable_exception and die. 1373 */ 1374 addi r3,r1,STACK_FRAME_OVERHEAD 1375 bl unrecoverable_exception 1376 b . 1377 1378 1379/** 1380 * Interrupt 0x300 - Data Storage Interrupt (DSI). 1381 * This is a synchronous interrupt generated due to a data access exception, 1382 * e.g., a load orstore which does not have a valid page table entry with 1383 * permissions. DAWR matches also fault here, as do RC updates, and minor misc 1384 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc. 1385 * 1386 * Handling: 1387 * - Hash MMU 1388 * Go to do_hash_fault, which attempts to fill the HPT from an entry in the 1389 * Linux page table. Hash faults can hit in kernel mode in a fairly 1390 * arbitrary state (e.g., interrupts disabled, locks held) when accessing 1391 * "non-bolted" regions, e.g., vmalloc space. However these should always be 1392 * backed by Linux page table entries. 1393 * 1394 * If no entry is found the Linux page fault handler is invoked (by 1395 * do_hash_fault). Linux page faults can happen in kernel mode due to user 1396 * copy operations of course. 1397 * 1398 * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest 1399 * MMU context, which may cause a DSI in the host, which must go to the 1400 * KVM handler. MSR[IR] is not enabled, so the real-mode handler will 1401 * always be used regardless of AIL setting. 1402 * 1403 * - Radix MMU 1404 * The hardware loads from the Linux page table directly, so a fault goes 1405 * immediately to Linux page fault. 1406 * 1407 * Conditions like DAWR match are handled on the way in to Linux page fault. 1408 */ 1409INT_DEFINE_BEGIN(data_access) 1410 IVEC=0x300 1411 IDAR=1 1412 IDSISR=1 1413 IKVM_REAL=1 1414INT_DEFINE_END(data_access) 1415 1416EXC_REAL_BEGIN(data_access, 0x300, 0x80) 1417 GEN_INT_ENTRY data_access, virt=0 1418EXC_REAL_END(data_access, 0x300, 0x80) 1419EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) 1420 GEN_INT_ENTRY data_access, virt=1 1421EXC_VIRT_END(data_access, 0x4300, 0x80) 1422EXC_COMMON_BEGIN(data_access_common) 1423 GEN_COMMON data_access 1424 ld r4,_DSISR(r1) 1425 addi r3,r1,STACK_FRAME_OVERHEAD 1426 andis. r0,r4,DSISR_DABRMATCH@h 1427 bne- 1f 1428#ifdef CONFIG_PPC_64S_HASH_MMU 1429BEGIN_MMU_FTR_SECTION 1430 bl do_hash_fault 1431MMU_FTR_SECTION_ELSE 1432 bl do_page_fault 1433ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1434#else 1435 bl do_page_fault 1436#endif 1437 b interrupt_return_srr 1438 14391: bl do_break 1440 /* 1441 * do_break() may have changed the NV GPRS while handling a breakpoint. 1442 * If so, we need to restore them with their updated values. 1443 */ 1444 REST_NVGPRS(r1) 1445 b interrupt_return_srr 1446 1447 1448/** 1449 * Interrupt 0x380 - Data Segment Interrupt (DSLB). 1450 * This is a synchronous interrupt in response to an MMU fault missing SLB 1451 * entry for HPT, or an address outside RPT translation range. 1452 * 1453 * Handling: 1454 * - HPT: 1455 * This refills the SLB, or reports an access fault similarly to a bad page 1456 * fault. When coming from user-mode, the SLB handler may access any kernel 1457 * data, though it may itself take a DSLB. When coming from kernel mode, 1458 * recursive faults must be avoided so access is restricted to the kernel 1459 * image text/data, kernel stack, and any data allocated below 1460 * ppc64_bolted_size (first segment). The kernel handler must avoid stomping 1461 * on user-handler data structures. 1462 * 1463 * KVM: Same as 0x300, DSLB must test for KVM guest. 1464 */ 1465INT_DEFINE_BEGIN(data_access_slb) 1466 IVEC=0x380 1467 IDAR=1 1468 IKVM_REAL=1 1469INT_DEFINE_END(data_access_slb) 1470 1471EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) 1472 GEN_INT_ENTRY data_access_slb, virt=0 1473EXC_REAL_END(data_access_slb, 0x380, 0x80) 1474EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) 1475 GEN_INT_ENTRY data_access_slb, virt=1 1476EXC_VIRT_END(data_access_slb, 0x4380, 0x80) 1477EXC_COMMON_BEGIN(data_access_slb_common) 1478 GEN_COMMON data_access_slb 1479#ifdef CONFIG_PPC_64S_HASH_MMU 1480BEGIN_MMU_FTR_SECTION 1481 /* HPT case, do SLB fault */ 1482 addi r3,r1,STACK_FRAME_OVERHEAD 1483 bl do_slb_fault 1484 cmpdi r3,0 1485 bne- 1f 1486 b fast_interrupt_return_srr 14871: /* Error case */ 1488MMU_FTR_SECTION_ELSE 1489 /* Radix case, access is outside page table range */ 1490 li r3,-EFAULT 1491ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1492#else 1493 li r3,-EFAULT 1494#endif 1495 std r3,RESULT(r1) 1496 addi r3,r1,STACK_FRAME_OVERHEAD 1497 bl do_bad_segment_interrupt 1498 b interrupt_return_srr 1499 1500 1501/** 1502 * Interrupt 0x400 - Instruction Storage Interrupt (ISI). 1503 * This is a synchronous interrupt in response to an MMU fault due to an 1504 * instruction fetch. 1505 * 1506 * Handling: 1507 * Similar to DSI, though in response to fetch. The faulting address is found 1508 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR). 1509 */ 1510INT_DEFINE_BEGIN(instruction_access) 1511 IVEC=0x400 1512 IISIDE=1 1513 IDAR=1 1514 IDSISR=1 1515#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1516 IKVM_REAL=1 1517#endif 1518INT_DEFINE_END(instruction_access) 1519 1520EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) 1521 GEN_INT_ENTRY instruction_access, virt=0 1522EXC_REAL_END(instruction_access, 0x400, 0x80) 1523EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) 1524 GEN_INT_ENTRY instruction_access, virt=1 1525EXC_VIRT_END(instruction_access, 0x4400, 0x80) 1526EXC_COMMON_BEGIN(instruction_access_common) 1527 GEN_COMMON instruction_access 1528 addi r3,r1,STACK_FRAME_OVERHEAD 1529#ifdef CONFIG_PPC_64S_HASH_MMU 1530BEGIN_MMU_FTR_SECTION 1531 bl do_hash_fault 1532MMU_FTR_SECTION_ELSE 1533 bl do_page_fault 1534ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1535#else 1536 bl do_page_fault 1537#endif 1538 b interrupt_return_srr 1539 1540 1541/** 1542 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB). 1543 * This is a synchronous interrupt in response to an MMU fault due to an 1544 * instruction fetch. 1545 * 1546 * Handling: 1547 * Similar to DSLB, though in response to fetch. The faulting address is found 1548 * in SRR0 (rather than DAR). 1549 */ 1550INT_DEFINE_BEGIN(instruction_access_slb) 1551 IVEC=0x480 1552 IISIDE=1 1553 IDAR=1 1554#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1555 IKVM_REAL=1 1556#endif 1557INT_DEFINE_END(instruction_access_slb) 1558 1559EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) 1560 GEN_INT_ENTRY instruction_access_slb, virt=0 1561EXC_REAL_END(instruction_access_slb, 0x480, 0x80) 1562EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) 1563 GEN_INT_ENTRY instruction_access_slb, virt=1 1564EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) 1565EXC_COMMON_BEGIN(instruction_access_slb_common) 1566 GEN_COMMON instruction_access_slb 1567#ifdef CONFIG_PPC_64S_HASH_MMU 1568BEGIN_MMU_FTR_SECTION 1569 /* HPT case, do SLB fault */ 1570 addi r3,r1,STACK_FRAME_OVERHEAD 1571 bl do_slb_fault 1572 cmpdi r3,0 1573 bne- 1f 1574 b fast_interrupt_return_srr 15751: /* Error case */ 1576MMU_FTR_SECTION_ELSE 1577 /* Radix case, access is outside page table range */ 1578 li r3,-EFAULT 1579ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1580#else 1581 li r3,-EFAULT 1582#endif 1583 std r3,RESULT(r1) 1584 addi r3,r1,STACK_FRAME_OVERHEAD 1585 bl do_bad_segment_interrupt 1586 b interrupt_return_srr 1587 1588 1589/** 1590 * Interrupt 0x500 - External Interrupt. 1591 * This is an asynchronous maskable interrupt in response to an "external 1592 * exception" from the interrupt controller or hypervisor (e.g., device 1593 * interrupt). It is maskable in hardware by clearing MSR[EE], and 1594 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()). 1595 * 1596 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that 1597 * interrupts are delivered with HSRR registers, guests use SRRs, which 1598 * reqiures IHSRR_IF_HVMODE. 1599 * 1600 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that 1601 * external interrupts are delivered as Hypervisor Virtualization Interrupts 1602 * rather than External Interrupts. 1603 * 1604 * Handling: 1605 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead, 1606 * because registers at the time of the interrupt are not so important as it is 1607 * asynchronous. 1608 * 1609 * If soft masked, the masked handler will note the pending interrupt for 1610 * replay, and clear MSR[EE] in the interrupted context. 1611 * 1612 * CFAR is not required because this is an asynchronous interrupt that in 1613 * general won't have much bearing on the state of the CPU, with the possible 1614 * exception of crash/debug IPIs, but those are generally moving to use SRESET 1615 * IPIs. Unless this is an HV interrupt and KVM HV is possible, in which case 1616 * it may be exiting the guest and need CFAR to be saved. 1617 */ 1618INT_DEFINE_BEGIN(hardware_interrupt) 1619 IVEC=0x500 1620 IHSRR_IF_HVMODE=1 1621 IMASK=IRQS_DISABLED 1622 IKVM_REAL=1 1623 IKVM_VIRT=1 1624 ICFAR=0 1625#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1626 ICFAR_IF_HVMODE=1 1627#endif 1628INT_DEFINE_END(hardware_interrupt) 1629 1630EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) 1631 GEN_INT_ENTRY hardware_interrupt, virt=0 1632EXC_REAL_END(hardware_interrupt, 0x500, 0x100) 1633EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) 1634 GEN_INT_ENTRY hardware_interrupt, virt=1 1635EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) 1636EXC_COMMON_BEGIN(hardware_interrupt_common) 1637 GEN_COMMON hardware_interrupt 1638 addi r3,r1,STACK_FRAME_OVERHEAD 1639 bl do_IRQ 1640 BEGIN_FTR_SECTION 1641 b interrupt_return_hsrr 1642 FTR_SECTION_ELSE 1643 b interrupt_return_srr 1644 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1645 1646 1647/** 1648 * Interrupt 0x600 - Alignment Interrupt 1649 * This is a synchronous interrupt in response to data alignment fault. 1650 */ 1651INT_DEFINE_BEGIN(alignment) 1652 IVEC=0x600 1653 IDAR=1 1654 IDSISR=1 1655#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1656 IKVM_REAL=1 1657#endif 1658INT_DEFINE_END(alignment) 1659 1660EXC_REAL_BEGIN(alignment, 0x600, 0x100) 1661 GEN_INT_ENTRY alignment, virt=0 1662EXC_REAL_END(alignment, 0x600, 0x100) 1663EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) 1664 GEN_INT_ENTRY alignment, virt=1 1665EXC_VIRT_END(alignment, 0x4600, 0x100) 1666EXC_COMMON_BEGIN(alignment_common) 1667 GEN_COMMON alignment 1668 addi r3,r1,STACK_FRAME_OVERHEAD 1669 bl alignment_exception 1670 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1671 b interrupt_return_srr 1672 1673 1674/** 1675 * Interrupt 0x700 - Program Interrupt (program check). 1676 * This is a synchronous interrupt in response to various instruction faults: 1677 * traps, privilege errors, TM errors, floating point exceptions. 1678 * 1679 * Handling: 1680 * This interrupt may use the "emergency stack" in some cases when being taken 1681 * from kernel context, which complicates handling. 1682 */ 1683INT_DEFINE_BEGIN(program_check) 1684 IVEC=0x700 1685#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1686 IKVM_REAL=1 1687#endif 1688INT_DEFINE_END(program_check) 1689 1690EXC_REAL_BEGIN(program_check, 0x700, 0x100) 1691 EARLY_BOOT_FIXUP 1692 GEN_INT_ENTRY program_check, virt=0 1693EXC_REAL_END(program_check, 0x700, 0x100) 1694EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) 1695 GEN_INT_ENTRY program_check, virt=1 1696EXC_VIRT_END(program_check, 0x4700, 0x100) 1697EXC_COMMON_BEGIN(program_check_common) 1698 __GEN_COMMON_ENTRY program_check 1699 1700 /* 1701 * It's possible to receive a TM Bad Thing type program check with 1702 * userspace register values (in particular r1), but with SRR1 reporting 1703 * that we came from the kernel. Normally that would confuse the bad 1704 * stack logic, and we would report a bad kernel stack pointer. Instead 1705 * we switch to the emergency stack if we're taking a TM Bad Thing from 1706 * the kernel. 1707 */ 1708 1709 andi. r10,r12,MSR_PR 1710 bne .Lnormal_stack /* If userspace, go normal path */ 1711 1712 andis. r10,r12,(SRR1_PROGTM)@h 1713 bne .Lemergency_stack /* If TM, emergency */ 1714 1715 cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ 1716 blt .Lnormal_stack /* normal path if not */ 1717 1718 /* Use the emergency stack */ 1719.Lemergency_stack: 1720 andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ 1721 /* 3 in EXCEPTION_PROLOG_COMMON */ 1722 mr r10,r1 /* Save r1 */ 1723 ld r1,PACAEMERGSP(r13) /* Use emergency stack */ 1724 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1725 __ISTACK(program_check)=0 1726 __GEN_COMMON_BODY program_check 1727 b .Ldo_program_check 1728 1729.Lnormal_stack: 1730 __ISTACK(program_check)=1 1731 __GEN_COMMON_BODY program_check 1732 1733.Ldo_program_check: 1734 addi r3,r1,STACK_FRAME_OVERHEAD 1735 bl program_check_exception 1736 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1737 b interrupt_return_srr 1738 1739 1740/* 1741 * Interrupt 0x800 - Floating-Point Unavailable Interrupt. 1742 * This is a synchronous interrupt in response to executing an fp instruction 1743 * with MSR[FP]=0. 1744 * 1745 * Handling: 1746 * This will load FP registers and enable the FP bit if coming from userspace, 1747 * otherwise report a bad kernel use of FP. 1748 */ 1749INT_DEFINE_BEGIN(fp_unavailable) 1750 IVEC=0x800 1751#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1752 IKVM_REAL=1 1753#endif 1754INT_DEFINE_END(fp_unavailable) 1755 1756EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) 1757 GEN_INT_ENTRY fp_unavailable, virt=0 1758EXC_REAL_END(fp_unavailable, 0x800, 0x100) 1759EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100) 1760 GEN_INT_ENTRY fp_unavailable, virt=1 1761EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) 1762EXC_COMMON_BEGIN(fp_unavailable_common) 1763 GEN_COMMON fp_unavailable 1764 bne 1f /* if from user, just load it up */ 1765 addi r3,r1,STACK_FRAME_OVERHEAD 1766 bl kernel_fp_unavailable_exception 17670: trap 1768 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 17691: 1770#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1771BEGIN_FTR_SECTION 1772 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1773 * transaction), go do TM stuff 1774 */ 1775 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1776 bne- 2f 1777END_FTR_SECTION_IFSET(CPU_FTR_TM) 1778#endif 1779 bl load_up_fpu 1780 b fast_interrupt_return_srr 1781#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 17822: /* User process was in a transaction */ 1783 addi r3,r1,STACK_FRAME_OVERHEAD 1784 bl fp_unavailable_tm 1785 b interrupt_return_srr 1786#endif 1787 1788 1789/** 1790 * Interrupt 0x900 - Decrementer Interrupt. 1791 * This is an asynchronous interrupt in response to a decrementer exception 1792 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing 1793 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e., 1794 * local_irq_disable()). 1795 * 1796 * Handling: 1797 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500). 1798 * 1799 * If soft masked, the masked handler will note the pending interrupt for 1800 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled 1801 * in the interrupted context. 1802 * If PPC_WATCHDOG is configured, the soft masked handler will actually set 1803 * things back up to run soft_nmi_interrupt as a regular interrupt handler 1804 * on the emergency stack. 1805 * 1806 * CFAR is not required because this is asynchronous (see hardware_interrupt). 1807 * A watchdog interrupt may like to have CFAR, but usually the interesting 1808 * branch is long gone by that point (e.g., infinite loop). 1809 */ 1810INT_DEFINE_BEGIN(decrementer) 1811 IVEC=0x900 1812 IMASK=IRQS_DISABLED 1813#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1814 IKVM_REAL=1 1815#endif 1816 ICFAR=0 1817INT_DEFINE_END(decrementer) 1818 1819EXC_REAL_BEGIN(decrementer, 0x900, 0x80) 1820 GEN_INT_ENTRY decrementer, virt=0 1821EXC_REAL_END(decrementer, 0x900, 0x80) 1822EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) 1823 GEN_INT_ENTRY decrementer, virt=1 1824EXC_VIRT_END(decrementer, 0x4900, 0x80) 1825EXC_COMMON_BEGIN(decrementer_common) 1826 GEN_COMMON decrementer 1827 addi r3,r1,STACK_FRAME_OVERHEAD 1828 bl timer_interrupt 1829 b interrupt_return_srr 1830 1831 1832/** 1833 * Interrupt 0x980 - Hypervisor Decrementer Interrupt. 1834 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC 1835 * register. 1836 * 1837 * Handling: 1838 * Linux does not use this outside KVM where it's used to keep a host timer 1839 * while the guest is given control of DEC. It should normally be caught by 1840 * the KVM test and routed there. 1841 */ 1842INT_DEFINE_BEGIN(hdecrementer) 1843 IVEC=0x980 1844 IHSRR=1 1845 ISTACK=0 1846 IKVM_REAL=1 1847 IKVM_VIRT=1 1848INT_DEFINE_END(hdecrementer) 1849 1850EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) 1851 GEN_INT_ENTRY hdecrementer, virt=0 1852EXC_REAL_END(hdecrementer, 0x980, 0x80) 1853EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) 1854 GEN_INT_ENTRY hdecrementer, virt=1 1855EXC_VIRT_END(hdecrementer, 0x4980, 0x80) 1856EXC_COMMON_BEGIN(hdecrementer_common) 1857 __GEN_COMMON_ENTRY hdecrementer 1858 /* 1859 * Hypervisor decrementer interrupts not caught by the KVM test 1860 * shouldn't occur but are sometimes left pending on exit from a KVM 1861 * guest. We don't need to do anything to clear them, as they are 1862 * edge-triggered. 1863 * 1864 * Be careful to avoid touching the kernel stack. 1865 */ 1866 li r10,0 1867 stb r10,PACAHSRR_VALID(r13) 1868 ld r10,PACA_EXGEN+EX_CTR(r13) 1869 mtctr r10 1870 mtcrf 0x80,r9 1871 ld r9,PACA_EXGEN+EX_R9(r13) 1872 ld r10,PACA_EXGEN+EX_R10(r13) 1873 ld r11,PACA_EXGEN+EX_R11(r13) 1874 ld r12,PACA_EXGEN+EX_R12(r13) 1875 ld r13,PACA_EXGEN+EX_R13(r13) 1876 HRFI_TO_KERNEL 1877 1878 1879/** 1880 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt. 1881 * This is an asynchronous interrupt in response to a msgsndp doorbell. 1882 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 1883 * IRQS_DISABLED mask (i.e., local_irq_disable()). 1884 * 1885 * Handling: 1886 * Guests may use this for IPIs between threads in a core if the 1887 * hypervisor supports it. NVGPRS are not saved (see 0x500). 1888 * 1889 * If soft masked, the masked handler will note the pending interrupt for 1890 * replay, leaving MSR[EE] enabled in the interrupted context because the 1891 * doorbells are edge triggered. 1892 * 1893 * CFAR is not required, similarly to hardware_interrupt. 1894 */ 1895INT_DEFINE_BEGIN(doorbell_super) 1896 IVEC=0xa00 1897 IMASK=IRQS_DISABLED 1898#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1899 IKVM_REAL=1 1900#endif 1901 ICFAR=0 1902INT_DEFINE_END(doorbell_super) 1903 1904EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100) 1905 GEN_INT_ENTRY doorbell_super, virt=0 1906EXC_REAL_END(doorbell_super, 0xa00, 0x100) 1907EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) 1908 GEN_INT_ENTRY doorbell_super, virt=1 1909EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) 1910EXC_COMMON_BEGIN(doorbell_super_common) 1911 GEN_COMMON doorbell_super 1912 addi r3,r1,STACK_FRAME_OVERHEAD 1913#ifdef CONFIG_PPC_DOORBELL 1914 bl doorbell_exception 1915#else 1916 bl unknown_async_exception 1917#endif 1918 b interrupt_return_srr 1919 1920 1921EXC_REAL_NONE(0xb00, 0x100) 1922EXC_VIRT_NONE(0x4b00, 0x100) 1923 1924/** 1925 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall). 1926 * This is a synchronous interrupt invoked with the "sc" instruction. The 1927 * system call is invoked with "sc 0" and does not alter the HV bit, so it 1928 * is directed to the currently running OS. The hypercall is invoked with 1929 * "sc 1" and it sets HV=1, so it elevates to hypervisor. 1930 * 1931 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to 1932 * 0x4c00 virtual mode. 1933 * 1934 * Handling: 1935 * If the KVM test fires then it was due to a hypercall and is accordingly 1936 * routed to KVM. Otherwise this executes a normal Linux system call. 1937 * 1938 * Call convention: 1939 * 1940 * syscall and hypercalls register conventions are documented in 1941 * Documentation/powerpc/syscall64-abi.rst and 1942 * Documentation/powerpc/papr_hcalls.rst respectively. 1943 * 1944 * The intersection of volatile registers that don't contain possible 1945 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry 1946 * without saving, though xer is not a good idea to use, as hardware may 1947 * interpret some bits so it may be costly to change them. 1948 */ 1949INT_DEFINE_BEGIN(system_call) 1950 IVEC=0xc00 1951 IKVM_REAL=1 1952 IKVM_VIRT=1 1953 ICFAR=0 1954INT_DEFINE_END(system_call) 1955 1956.macro SYSTEM_CALL virt 1957#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1958 /* 1959 * There is a little bit of juggling to get syscall and hcall 1960 * working well. Save r13 in ctr to avoid using SPRG scratch 1961 * register. 1962 * 1963 * Userspace syscalls have already saved the PPR, hcalls must save 1964 * it before setting HMT_MEDIUM. 1965 */ 1966 mtctr r13 1967 GET_PACA(r13) 1968 std r10,PACA_EXGEN+EX_R10(r13) 1969 INTERRUPT_TO_KERNEL 1970 KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */ 1971 mfctr r9 1972#else 1973 mr r9,r13 1974 GET_PACA(r13) 1975 INTERRUPT_TO_KERNEL 1976#endif 1977 1978#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1979BEGIN_FTR_SECTION 1980 cmpdi r0,0x1ebe 1981 beq- 1f 1982END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 1983#endif 1984 1985 /* We reach here with PACA in r13, r13 in r9. */ 1986 mfspr r11,SPRN_SRR0 1987 mfspr r12,SPRN_SRR1 1988 1989 HMT_MEDIUM 1990 1991 .if ! \virt 1992 __LOAD_HANDLER(r10, system_call_common_real, real_vectors) 1993 mtctr r10 1994 bctr 1995 .else 1996#ifdef CONFIG_RELOCATABLE 1997 __LOAD_HANDLER(r10, system_call_common, virt_vectors) 1998 mtctr r10 1999 bctr 2000#else 2001 b system_call_common 2002#endif 2003 .endif 2004 2005#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 2006 /* Fast LE/BE switch system call */ 20071: mfspr r12,SPRN_SRR1 2008 xori r12,r12,MSR_LE 2009 mtspr SPRN_SRR1,r12 2010 mr r13,r9 2011 RFI_TO_USER /* return to userspace */ 2012 b . /* prevent speculative execution */ 2013#endif 2014.endm 2015 2016EXC_REAL_BEGIN(system_call, 0xc00, 0x100) 2017 SYSTEM_CALL 0 2018EXC_REAL_END(system_call, 0xc00, 0x100) 2019EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) 2020 SYSTEM_CALL 1 2021EXC_VIRT_END(system_call, 0x4c00, 0x100) 2022 2023#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 2024TRAMP_REAL_BEGIN(kvm_hcall) 2025 std r9,PACA_EXGEN+EX_R9(r13) 2026 std r11,PACA_EXGEN+EX_R11(r13) 2027 std r12,PACA_EXGEN+EX_R12(r13) 2028 mfcr r9 2029 mfctr r10 2030 std r10,PACA_EXGEN+EX_R13(r13) 2031 li r10,0 2032 std r10,PACA_EXGEN+EX_CFAR(r13) 2033 std r10,PACA_EXGEN+EX_CTR(r13) 2034 /* 2035 * Save the PPR (on systems that support it) before changing to 2036 * HMT_MEDIUM. That allows the KVM code to save that value into the 2037 * guest state (it is the guest's PPR value). 2038 */ 2039BEGIN_FTR_SECTION 2040 mfspr r10,SPRN_PPR 2041 std r10,PACA_EXGEN+EX_PPR(r13) 2042END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2043 2044 HMT_MEDIUM 2045 2046#ifdef CONFIG_RELOCATABLE 2047 /* 2048 * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives 2049 * outside the head section. 2050 */ 2051 __LOAD_FAR_HANDLER(r10, kvmppc_hcall, real_trampolines) 2052 mtctr r10 2053 bctr 2054#else 2055 b kvmppc_hcall 2056#endif 2057#endif 2058 2059/** 2060 * Interrupt 0xd00 - Trace Interrupt. 2061 * This is a synchronous interrupt in response to instruction step or 2062 * breakpoint faults. 2063 */ 2064INT_DEFINE_BEGIN(single_step) 2065 IVEC=0xd00 2066#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2067 IKVM_REAL=1 2068#endif 2069INT_DEFINE_END(single_step) 2070 2071EXC_REAL_BEGIN(single_step, 0xd00, 0x100) 2072 GEN_INT_ENTRY single_step, virt=0 2073EXC_REAL_END(single_step, 0xd00, 0x100) 2074EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) 2075 GEN_INT_ENTRY single_step, virt=1 2076EXC_VIRT_END(single_step, 0x4d00, 0x100) 2077EXC_COMMON_BEGIN(single_step_common) 2078 GEN_COMMON single_step 2079 addi r3,r1,STACK_FRAME_OVERHEAD 2080 bl single_step_exception 2081 b interrupt_return_srr 2082 2083 2084/** 2085 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI). 2086 * This is a synchronous interrupt in response to an MMU fault caused by a 2087 * guest data access. 2088 * 2089 * Handling: 2090 * This should always get routed to KVM. In radix MMU mode, this is caused 2091 * by a guest nested radix access that can't be performed due to the 2092 * partition scope page table. In hash mode, this can be caused by guests 2093 * running with translation disabled (virtual real mode) or with VPM enabled. 2094 * KVM will update the page table structures or disallow the access. 2095 */ 2096INT_DEFINE_BEGIN(h_data_storage) 2097 IVEC=0xe00 2098 IHSRR=1 2099 IDAR=1 2100 IDSISR=1 2101 IKVM_REAL=1 2102 IKVM_VIRT=1 2103INT_DEFINE_END(h_data_storage) 2104 2105EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20) 2106 GEN_INT_ENTRY h_data_storage, virt=0, ool=1 2107EXC_REAL_END(h_data_storage, 0xe00, 0x20) 2108EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) 2109 GEN_INT_ENTRY h_data_storage, virt=1, ool=1 2110EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) 2111EXC_COMMON_BEGIN(h_data_storage_common) 2112 GEN_COMMON h_data_storage 2113 addi r3,r1,STACK_FRAME_OVERHEAD 2114BEGIN_MMU_FTR_SECTION 2115 bl do_bad_page_fault_segv 2116MMU_FTR_SECTION_ELSE 2117 bl unknown_exception 2118ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) 2119 b interrupt_return_hsrr 2120 2121 2122/** 2123 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI). 2124 * This is a synchronous interrupt in response to an MMU fault caused by a 2125 * guest instruction fetch, similar to HDSI. 2126 */ 2127INT_DEFINE_BEGIN(h_instr_storage) 2128 IVEC=0xe20 2129 IHSRR=1 2130 IKVM_REAL=1 2131 IKVM_VIRT=1 2132INT_DEFINE_END(h_instr_storage) 2133 2134EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20) 2135 GEN_INT_ENTRY h_instr_storage, virt=0, ool=1 2136EXC_REAL_END(h_instr_storage, 0xe20, 0x20) 2137EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) 2138 GEN_INT_ENTRY h_instr_storage, virt=1, ool=1 2139EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) 2140EXC_COMMON_BEGIN(h_instr_storage_common) 2141 GEN_COMMON h_instr_storage 2142 addi r3,r1,STACK_FRAME_OVERHEAD 2143 bl unknown_exception 2144 b interrupt_return_hsrr 2145 2146 2147/** 2148 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt. 2149 */ 2150INT_DEFINE_BEGIN(emulation_assist) 2151 IVEC=0xe40 2152 IHSRR=1 2153 IKVM_REAL=1 2154 IKVM_VIRT=1 2155INT_DEFINE_END(emulation_assist) 2156 2157EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20) 2158 GEN_INT_ENTRY emulation_assist, virt=0, ool=1 2159EXC_REAL_END(emulation_assist, 0xe40, 0x20) 2160EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) 2161 GEN_INT_ENTRY emulation_assist, virt=1, ool=1 2162EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) 2163EXC_COMMON_BEGIN(emulation_assist_common) 2164 GEN_COMMON emulation_assist 2165 addi r3,r1,STACK_FRAME_OVERHEAD 2166 bl emulation_assist_interrupt 2167 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2168 b interrupt_return_hsrr 2169 2170 2171/** 2172 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI). 2173 * This is an asynchronous interrupt caused by a Hypervisor Maintenance 2174 * Exception. It is always taken in real mode but uses HSRR registers 2175 * unlike SRESET and MCE. 2176 * 2177 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable 2178 * with IRQS_DISABLED mask (i.e., local_irq_disable()). 2179 * 2180 * Handling: 2181 * This is a special case, this is handled similarly to machine checks, with an 2182 * initial real mode handler that is not soft-masked, which attempts to fix the 2183 * problem. Then a regular handler which is soft-maskable and reports the 2184 * problem. 2185 * 2186 * The emergency stack is used for the early real mode handler. 2187 * 2188 * XXX: unclear why MCE and HMI schemes could not be made common, e.g., 2189 * either use soft-masking for the MCE, or use irq_work for the HMI. 2190 * 2191 * KVM: 2192 * Unlike MCE, this calls into KVM without calling the real mode handler 2193 * first. 2194 */ 2195INT_DEFINE_BEGIN(hmi_exception_early) 2196 IVEC=0xe60 2197 IHSRR=1 2198 IREALMODE_COMMON=1 2199 ISTACK=0 2200 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 2201 IKVM_REAL=1 2202INT_DEFINE_END(hmi_exception_early) 2203 2204INT_DEFINE_BEGIN(hmi_exception) 2205 IVEC=0xe60 2206 IHSRR=1 2207 IMASK=IRQS_DISABLED 2208 IKVM_REAL=1 2209INT_DEFINE_END(hmi_exception) 2210 2211EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) 2212 GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1 2213EXC_REAL_END(hmi_exception, 0xe60, 0x20) 2214EXC_VIRT_NONE(0x4e60, 0x20) 2215 2216EXC_COMMON_BEGIN(hmi_exception_early_common) 2217 __GEN_REALMODE_COMMON_ENTRY hmi_exception_early 2218 2219 mr r10,r1 /* Save r1 */ 2220 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ 2221 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 2222 2223 __GEN_COMMON_BODY hmi_exception_early 2224 2225 addi r3,r1,STACK_FRAME_OVERHEAD 2226 bl hmi_exception_realmode 2227 cmpdi cr0,r3,0 2228 bne 1f 2229 2230 EXCEPTION_RESTORE_REGS hsrr=1 2231 HRFI_TO_USER_OR_KERNEL 2232 22331: 2234 /* 2235 * Go to virtual mode and pull the HMI event information from 2236 * firmware. 2237 */ 2238 EXCEPTION_RESTORE_REGS hsrr=1 2239 GEN_INT_ENTRY hmi_exception, virt=0 2240 2241EXC_COMMON_BEGIN(hmi_exception_common) 2242 GEN_COMMON hmi_exception 2243 addi r3,r1,STACK_FRAME_OVERHEAD 2244 bl handle_hmi_exception 2245 b interrupt_return_hsrr 2246 2247 2248/** 2249 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt. 2250 * This is an asynchronous interrupt in response to a msgsnd doorbell. 2251 * Similar to the 0xa00 doorbell but for host rather than guest. 2252 * 2253 * CFAR is not required (similar to doorbell_interrupt), unless KVM HV 2254 * is enabled, in which case it may be a guest exit. Most PowerNV kernels 2255 * include KVM support so it would be nice if this could be dynamically 2256 * patched out if KVM was not currently running any guests. 2257 */ 2258INT_DEFINE_BEGIN(h_doorbell) 2259 IVEC=0xe80 2260 IHSRR=1 2261 IMASK=IRQS_DISABLED 2262 IKVM_REAL=1 2263 IKVM_VIRT=1 2264#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE 2265 ICFAR=0 2266#endif 2267INT_DEFINE_END(h_doorbell) 2268 2269EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20) 2270 GEN_INT_ENTRY h_doorbell, virt=0, ool=1 2271EXC_REAL_END(h_doorbell, 0xe80, 0x20) 2272EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) 2273 GEN_INT_ENTRY h_doorbell, virt=1, ool=1 2274EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) 2275EXC_COMMON_BEGIN(h_doorbell_common) 2276 GEN_COMMON h_doorbell 2277 addi r3,r1,STACK_FRAME_OVERHEAD 2278#ifdef CONFIG_PPC_DOORBELL 2279 bl doorbell_exception 2280#else 2281 bl unknown_async_exception 2282#endif 2283 b interrupt_return_hsrr 2284 2285 2286/** 2287 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt. 2288 * This is an asynchronous interrupt in response to an "external exception". 2289 * Similar to 0x500 but for host only. 2290 * 2291 * Like h_doorbell, CFAR is only required for KVM HV because this can be 2292 * a guest exit. 2293 */ 2294INT_DEFINE_BEGIN(h_virt_irq) 2295 IVEC=0xea0 2296 IHSRR=1 2297 IMASK=IRQS_DISABLED 2298 IKVM_REAL=1 2299 IKVM_VIRT=1 2300#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE 2301 ICFAR=0 2302#endif 2303INT_DEFINE_END(h_virt_irq) 2304 2305EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20) 2306 GEN_INT_ENTRY h_virt_irq, virt=0, ool=1 2307EXC_REAL_END(h_virt_irq, 0xea0, 0x20) 2308EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) 2309 GEN_INT_ENTRY h_virt_irq, virt=1, ool=1 2310EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) 2311EXC_COMMON_BEGIN(h_virt_irq_common) 2312 GEN_COMMON h_virt_irq 2313 addi r3,r1,STACK_FRAME_OVERHEAD 2314 bl do_IRQ 2315 b interrupt_return_hsrr 2316 2317 2318EXC_REAL_NONE(0xec0, 0x20) 2319EXC_VIRT_NONE(0x4ec0, 0x20) 2320EXC_REAL_NONE(0xee0, 0x20) 2321EXC_VIRT_NONE(0x4ee0, 0x20) 2322 2323 2324/* 2325 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU). 2326 * This is an asynchronous interrupt in response to a PMU exception. 2327 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 2328 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()). 2329 * 2330 * Handling: 2331 * This calls into the perf subsystem. 2332 * 2333 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it 2334 * runs under local_irq_disable. However it may be soft-masked in 2335 * powerpc-specific code. 2336 * 2337 * If soft masked, the masked handler will note the pending interrupt for 2338 * replay, and clear MSR[EE] in the interrupted context. 2339 * 2340 * CFAR is not used by perf interrupts so not required. 2341 */ 2342INT_DEFINE_BEGIN(performance_monitor) 2343 IVEC=0xf00 2344 IMASK=IRQS_PMI_DISABLED 2345#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2346 IKVM_REAL=1 2347#endif 2348 ICFAR=0 2349INT_DEFINE_END(performance_monitor) 2350 2351EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20) 2352 GEN_INT_ENTRY performance_monitor, virt=0, ool=1 2353EXC_REAL_END(performance_monitor, 0xf00, 0x20) 2354EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) 2355 GEN_INT_ENTRY performance_monitor, virt=1, ool=1 2356EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) 2357EXC_COMMON_BEGIN(performance_monitor_common) 2358 GEN_COMMON performance_monitor 2359 addi r3,r1,STACK_FRAME_OVERHEAD 2360 lbz r4,PACAIRQSOFTMASK(r13) 2361 cmpdi r4,IRQS_ENABLED 2362 bne 1f 2363 bl performance_monitor_exception_async 2364 b interrupt_return_srr 23651: 2366 bl performance_monitor_exception_nmi 2367 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2368 li r9,0 2369 mtmsrd r9,1 2370 2371 kuap_kernel_restore r9, r10 2372 2373 EXCEPTION_RESTORE_REGS hsrr=0 2374 RFI_TO_KERNEL 2375 2376/** 2377 * Interrupt 0xf20 - Vector Unavailable Interrupt. 2378 * This is a synchronous interrupt in response to 2379 * executing a vector (or altivec) instruction with MSR[VEC]=0. 2380 * Similar to FP unavailable. 2381 */ 2382INT_DEFINE_BEGIN(altivec_unavailable) 2383 IVEC=0xf20 2384#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2385 IKVM_REAL=1 2386#endif 2387INT_DEFINE_END(altivec_unavailable) 2388 2389EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) 2390 GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1 2391EXC_REAL_END(altivec_unavailable, 0xf20, 0x20) 2392EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20) 2393 GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1 2394EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20) 2395EXC_COMMON_BEGIN(altivec_unavailable_common) 2396 GEN_COMMON altivec_unavailable 2397#ifdef CONFIG_ALTIVEC 2398BEGIN_FTR_SECTION 2399 beq 1f 2400#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2401 BEGIN_FTR_SECTION_NESTED(69) 2402 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2403 * transaction), go do TM stuff 2404 */ 2405 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2406 bne- 2f 2407 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2408#endif 2409 bl load_up_altivec 2410 b fast_interrupt_return_srr 2411#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 24122: /* User process was in a transaction */ 2413 addi r3,r1,STACK_FRAME_OVERHEAD 2414 bl altivec_unavailable_tm 2415 b interrupt_return_srr 2416#endif 24171: 2418END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2419#endif 2420 addi r3,r1,STACK_FRAME_OVERHEAD 2421 bl altivec_unavailable_exception 2422 b interrupt_return_srr 2423 2424 2425/** 2426 * Interrupt 0xf40 - VSX Unavailable Interrupt. 2427 * This is a synchronous interrupt in response to 2428 * executing a VSX instruction with MSR[VSX]=0. 2429 * Similar to FP unavailable. 2430 */ 2431INT_DEFINE_BEGIN(vsx_unavailable) 2432 IVEC=0xf40 2433#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2434 IKVM_REAL=1 2435#endif 2436INT_DEFINE_END(vsx_unavailable) 2437 2438EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) 2439 GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1 2440EXC_REAL_END(vsx_unavailable, 0xf40, 0x20) 2441EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20) 2442 GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1 2443EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20) 2444EXC_COMMON_BEGIN(vsx_unavailable_common) 2445 GEN_COMMON vsx_unavailable 2446#ifdef CONFIG_VSX 2447BEGIN_FTR_SECTION 2448 beq 1f 2449#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2450 BEGIN_FTR_SECTION_NESTED(69) 2451 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2452 * transaction), go do TM stuff 2453 */ 2454 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2455 bne- 2f 2456 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2457#endif 2458 b load_up_vsx 2459#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 24602: /* User process was in a transaction */ 2461 addi r3,r1,STACK_FRAME_OVERHEAD 2462 bl vsx_unavailable_tm 2463 b interrupt_return_srr 2464#endif 24651: 2466END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2467#endif 2468 addi r3,r1,STACK_FRAME_OVERHEAD 2469 bl vsx_unavailable_exception 2470 b interrupt_return_srr 2471 2472 2473/** 2474 * Interrupt 0xf60 - Facility Unavailable Interrupt. 2475 * This is a synchronous interrupt in response to 2476 * executing an instruction without access to the facility that can be 2477 * resolved by the OS (e.g., FSCR, MSR). 2478 * Similar to FP unavailable. 2479 */ 2480INT_DEFINE_BEGIN(facility_unavailable) 2481 IVEC=0xf60 2482#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2483 IKVM_REAL=1 2484#endif 2485INT_DEFINE_END(facility_unavailable) 2486 2487EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20) 2488 GEN_INT_ENTRY facility_unavailable, virt=0, ool=1 2489EXC_REAL_END(facility_unavailable, 0xf60, 0x20) 2490EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) 2491 GEN_INT_ENTRY facility_unavailable, virt=1, ool=1 2492EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) 2493EXC_COMMON_BEGIN(facility_unavailable_common) 2494 GEN_COMMON facility_unavailable 2495 addi r3,r1,STACK_FRAME_OVERHEAD 2496 bl facility_unavailable_exception 2497 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2498 b interrupt_return_srr 2499 2500 2501/** 2502 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt. 2503 * This is a synchronous interrupt in response to 2504 * executing an instruction without access to the facility that can only 2505 * be resolved in HV mode (e.g., HFSCR). 2506 * Similar to FP unavailable. 2507 */ 2508INT_DEFINE_BEGIN(h_facility_unavailable) 2509 IVEC=0xf80 2510 IHSRR=1 2511 IKVM_REAL=1 2512 IKVM_VIRT=1 2513INT_DEFINE_END(h_facility_unavailable) 2514 2515EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20) 2516 GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1 2517EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20) 2518EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) 2519 GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1 2520EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) 2521EXC_COMMON_BEGIN(h_facility_unavailable_common) 2522 GEN_COMMON h_facility_unavailable 2523 addi r3,r1,STACK_FRAME_OVERHEAD 2524 bl facility_unavailable_exception 2525 REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */ 2526 b interrupt_return_hsrr 2527 2528 2529EXC_REAL_NONE(0xfa0, 0x20) 2530EXC_VIRT_NONE(0x4fa0, 0x20) 2531EXC_REAL_NONE(0xfc0, 0x20) 2532EXC_VIRT_NONE(0x4fc0, 0x20) 2533EXC_REAL_NONE(0xfe0, 0x20) 2534EXC_VIRT_NONE(0x4fe0, 0x20) 2535 2536EXC_REAL_NONE(0x1000, 0x100) 2537EXC_VIRT_NONE(0x5000, 0x100) 2538EXC_REAL_NONE(0x1100, 0x100) 2539EXC_VIRT_NONE(0x5100, 0x100) 2540 2541#ifdef CONFIG_CBE_RAS 2542INT_DEFINE_BEGIN(cbe_system_error) 2543 IVEC=0x1200 2544 IHSRR=1 2545INT_DEFINE_END(cbe_system_error) 2546 2547EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) 2548 GEN_INT_ENTRY cbe_system_error, virt=0 2549EXC_REAL_END(cbe_system_error, 0x1200, 0x100) 2550EXC_VIRT_NONE(0x5200, 0x100) 2551EXC_COMMON_BEGIN(cbe_system_error_common) 2552 GEN_COMMON cbe_system_error 2553 addi r3,r1,STACK_FRAME_OVERHEAD 2554 bl cbe_system_error_exception 2555 b interrupt_return_hsrr 2556 2557#else /* CONFIG_CBE_RAS */ 2558EXC_REAL_NONE(0x1200, 0x100) 2559EXC_VIRT_NONE(0x5200, 0x100) 2560#endif 2561 2562/** 2563 * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt. 2564 * This has been removed from the ISA before 2.01, which is the earliest 2565 * 64-bit BookS ISA supported, however the G5 / 970 implements this 2566 * interrupt with a non-architected feature available through the support 2567 * processor interface. 2568 */ 2569INT_DEFINE_BEGIN(instruction_breakpoint) 2570 IVEC=0x1300 2571#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2572 IKVM_REAL=1 2573#endif 2574INT_DEFINE_END(instruction_breakpoint) 2575 2576EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100) 2577 GEN_INT_ENTRY instruction_breakpoint, virt=0 2578EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100) 2579EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) 2580 GEN_INT_ENTRY instruction_breakpoint, virt=1 2581EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) 2582EXC_COMMON_BEGIN(instruction_breakpoint_common) 2583 GEN_COMMON instruction_breakpoint 2584 addi r3,r1,STACK_FRAME_OVERHEAD 2585 bl instruction_breakpoint_exception 2586 b interrupt_return_srr 2587 2588 2589EXC_REAL_NONE(0x1400, 0x100) 2590EXC_VIRT_NONE(0x5400, 0x100) 2591 2592/** 2593 * Interrupt 0x1500 - Soft Patch Interrupt 2594 * 2595 * Handling: 2596 * This is an implementation specific interrupt which can be used for a 2597 * range of exceptions. 2598 * 2599 * This interrupt handler is unique in that it runs the denormal assist 2600 * code even for guests (and even in guest context) without going to KVM, 2601 * for speed. POWER9 does not raise denorm exceptions, so this special case 2602 * could be phased out in future to reduce special cases. 2603 */ 2604INT_DEFINE_BEGIN(denorm_exception) 2605 IVEC=0x1500 2606 IHSRR=1 2607 IBRANCH_TO_COMMON=0 2608 IKVM_REAL=1 2609INT_DEFINE_END(denorm_exception) 2610 2611EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100) 2612 GEN_INT_ENTRY denorm_exception, virt=0 2613#ifdef CONFIG_PPC_DENORMALISATION 2614 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2615 bne+ denorm_assist 2616#endif 2617 GEN_BRANCH_TO_COMMON denorm_exception, virt=0 2618EXC_REAL_END(denorm_exception, 0x1500, 0x100) 2619#ifdef CONFIG_PPC_DENORMALISATION 2620EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) 2621 GEN_INT_ENTRY denorm_exception, virt=1 2622 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2623 bne+ denorm_assist 2624 GEN_BRANCH_TO_COMMON denorm_exception, virt=1 2625EXC_VIRT_END(denorm_exception, 0x5500, 0x100) 2626#else 2627EXC_VIRT_NONE(0x5500, 0x100) 2628#endif 2629 2630#ifdef CONFIG_PPC_DENORMALISATION 2631TRAMP_REAL_BEGIN(denorm_assist) 2632BEGIN_FTR_SECTION 2633/* 2634 * To denormalise we need to move a copy of the register to itself. 2635 * For POWER6 do that here for all FP regs. 2636 */ 2637 mfmsr r10 2638 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 2639 xori r10,r10,(MSR_FE0|MSR_FE1) 2640 mtmsrd r10 2641 sync 2642 2643 .Lreg=0 2644 .rept 32 2645 fmr .Lreg,.Lreg 2646 .Lreg=.Lreg+1 2647 .endr 2648 2649FTR_SECTION_ELSE 2650/* 2651 * To denormalise we need to move a copy of the register to itself. 2652 * For POWER7 do that here for the first 32 VSX registers only. 2653 */ 2654 mfmsr r10 2655 oris r10,r10,MSR_VSX@h 2656 mtmsrd r10 2657 sync 2658 2659 .Lreg=0 2660 .rept 32 2661 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2662 .Lreg=.Lreg+1 2663 .endr 2664 2665ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 2666 2667BEGIN_FTR_SECTION 2668 b denorm_done 2669END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 2670/* 2671 * To denormalise we need to move a copy of the register to itself. 2672 * For POWER8 we need to do that for all 64 VSX registers 2673 */ 2674 .Lreg=32 2675 .rept 32 2676 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2677 .Lreg=.Lreg+1 2678 .endr 2679 2680denorm_done: 2681 mfspr r11,SPRN_HSRR0 2682 subi r11,r11,4 2683 mtspr SPRN_HSRR0,r11 2684 mtcrf 0x80,r9 2685 ld r9,PACA_EXGEN+EX_R9(r13) 2686BEGIN_FTR_SECTION 2687 ld r10,PACA_EXGEN+EX_PPR(r13) 2688 mtspr SPRN_PPR,r10 2689END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2690BEGIN_FTR_SECTION 2691 ld r10,PACA_EXGEN+EX_CFAR(r13) 2692 mtspr SPRN_CFAR,r10 2693END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 2694 li r10,0 2695 stb r10,PACAHSRR_VALID(r13) 2696 ld r10,PACA_EXGEN+EX_R10(r13) 2697 ld r11,PACA_EXGEN+EX_R11(r13) 2698 ld r12,PACA_EXGEN+EX_R12(r13) 2699 ld r13,PACA_EXGEN+EX_R13(r13) 2700 HRFI_TO_UNKNOWN 2701 b . 2702#endif 2703 2704EXC_COMMON_BEGIN(denorm_exception_common) 2705 GEN_COMMON denorm_exception 2706 addi r3,r1,STACK_FRAME_OVERHEAD 2707 bl unknown_exception 2708 b interrupt_return_hsrr 2709 2710 2711#ifdef CONFIG_CBE_RAS 2712INT_DEFINE_BEGIN(cbe_maintenance) 2713 IVEC=0x1600 2714 IHSRR=1 2715INT_DEFINE_END(cbe_maintenance) 2716 2717EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) 2718 GEN_INT_ENTRY cbe_maintenance, virt=0 2719EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) 2720EXC_VIRT_NONE(0x5600, 0x100) 2721EXC_COMMON_BEGIN(cbe_maintenance_common) 2722 GEN_COMMON cbe_maintenance 2723 addi r3,r1,STACK_FRAME_OVERHEAD 2724 bl cbe_maintenance_exception 2725 b interrupt_return_hsrr 2726 2727#else /* CONFIG_CBE_RAS */ 2728EXC_REAL_NONE(0x1600, 0x100) 2729EXC_VIRT_NONE(0x5600, 0x100) 2730#endif 2731 2732 2733INT_DEFINE_BEGIN(altivec_assist) 2734 IVEC=0x1700 2735#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2736 IKVM_REAL=1 2737#endif 2738INT_DEFINE_END(altivec_assist) 2739 2740EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100) 2741 GEN_INT_ENTRY altivec_assist, virt=0 2742EXC_REAL_END(altivec_assist, 0x1700, 0x100) 2743EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) 2744 GEN_INT_ENTRY altivec_assist, virt=1 2745EXC_VIRT_END(altivec_assist, 0x5700, 0x100) 2746EXC_COMMON_BEGIN(altivec_assist_common) 2747 GEN_COMMON altivec_assist 2748 addi r3,r1,STACK_FRAME_OVERHEAD 2749#ifdef CONFIG_ALTIVEC 2750 bl altivec_assist_exception 2751 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2752#else 2753 bl unknown_exception 2754#endif 2755 b interrupt_return_srr 2756 2757 2758#ifdef CONFIG_CBE_RAS 2759INT_DEFINE_BEGIN(cbe_thermal) 2760 IVEC=0x1800 2761 IHSRR=1 2762INT_DEFINE_END(cbe_thermal) 2763 2764EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) 2765 GEN_INT_ENTRY cbe_thermal, virt=0 2766EXC_REAL_END(cbe_thermal, 0x1800, 0x100) 2767EXC_VIRT_NONE(0x5800, 0x100) 2768EXC_COMMON_BEGIN(cbe_thermal_common) 2769 GEN_COMMON cbe_thermal 2770 addi r3,r1,STACK_FRAME_OVERHEAD 2771 bl cbe_thermal_exception 2772 b interrupt_return_hsrr 2773 2774#else /* CONFIG_CBE_RAS */ 2775EXC_REAL_NONE(0x1800, 0x100) 2776EXC_VIRT_NONE(0x5800, 0x100) 2777#endif 2778 2779 2780#ifdef CONFIG_PPC_WATCHDOG 2781 2782INT_DEFINE_BEGIN(soft_nmi) 2783 IVEC=0x900 2784 ISTACK=0 2785 ICFAR=0 2786INT_DEFINE_END(soft_nmi) 2787 2788/* 2789 * Branch to soft_nmi_interrupt using the emergency stack. The emergency 2790 * stack is one that is usable by maskable interrupts so long as MSR_EE 2791 * remains off. It is used for recovery when something has corrupted the 2792 * normal kernel stack, for example. The "soft NMI" must not use the process 2793 * stack because we want irq disabled sections to avoid touching the stack 2794 * at all (other than PMU interrupts), so use the emergency stack for this, 2795 * and run it entirely with interrupts hard disabled. 2796 */ 2797EXC_COMMON_BEGIN(soft_nmi_common) 2798 mr r10,r1 2799 ld r1,PACAEMERGSP(r13) 2800 subi r1,r1,INT_FRAME_SIZE 2801 __GEN_COMMON_BODY soft_nmi 2802 2803 addi r3,r1,STACK_FRAME_OVERHEAD 2804 bl soft_nmi_interrupt 2805 2806 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2807 li r9,0 2808 mtmsrd r9,1 2809 2810 kuap_kernel_restore r9, r10 2811 2812 EXCEPTION_RESTORE_REGS hsrr=0 2813 RFI_TO_KERNEL 2814 2815#endif /* CONFIG_PPC_WATCHDOG */ 2816 2817/* 2818 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 2819 * - If it was a decrementer interrupt, we bump the dec to max and return. 2820 * - If it was a doorbell we return immediately since doorbells are edge 2821 * triggered and won't automatically refire. 2822 * - If it was a HMI we return immediately since we handled it in realmode 2823 * and it won't refire. 2824 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. 2825 * This is called with r10 containing the value to OR to the paca field. 2826 */ 2827.macro MASKED_INTERRUPT hsrr=0 2828 .if \hsrr 2829masked_Hinterrupt: 2830 .else 2831masked_interrupt: 2832 .endif 2833 stw r9,PACA_EXGEN+EX_CCR(r13) 2834#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG 2835 /* 2836 * Ensure there was no previous MUST_HARD_MASK interrupt or 2837 * HARD_DIS setting. If this does fire, the interrupt is still 2838 * masked and MSR[EE] will be cleared on return, so no need to 2839 * panic, but somebody probably enabled MSR[EE] under 2840 * PACA_IRQ_HARD_DIS, mtmsr(mfmsr() | MSR_x) being a common 2841 * cause. 2842 */ 2843 lbz r9,PACAIRQHAPPENED(r13) 2844 andi. r9,r9,(PACA_IRQ_MUST_HARD_MASK|PACA_IRQ_HARD_DIS) 28450: tdnei r9,0 2846 EMIT_WARN_ENTRY 0b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) 2847#endif 2848 lbz r9,PACAIRQHAPPENED(r13) 2849 or r9,r9,r10 2850 stb r9,PACAIRQHAPPENED(r13) 2851 2852 .if ! \hsrr 2853 cmpwi r10,PACA_IRQ_DEC 2854 bne 1f 2855 LOAD_REG_IMMEDIATE(r9, 0x7fffffff) 2856 mtspr SPRN_DEC,r9 2857#ifdef CONFIG_PPC_WATCHDOG 2858 lwz r9,PACA_EXGEN+EX_CCR(r13) 2859 b soft_nmi_common 2860#else 2861 b 2f 2862#endif 2863 .endif 2864 28651: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK 2866 beq 2f 2867 xori r12,r12,MSR_EE /* clear MSR_EE */ 2868 .if \hsrr 2869 mtspr SPRN_HSRR1,r12 2870 .else 2871 mtspr SPRN_SRR1,r12 2872 .endif 2873 ori r9,r9,PACA_IRQ_HARD_DIS 2874 stb r9,PACAIRQHAPPENED(r13) 28752: /* done */ 2876 li r9,0 2877 .if \hsrr 2878 stb r9,PACAHSRR_VALID(r13) 2879 .else 2880 stb r9,PACASRR_VALID(r13) 2881 .endif 2882 2883 SEARCH_RESTART_TABLE 2884 cmpdi r12,0 2885 beq 3f 2886 .if \hsrr 2887 mtspr SPRN_HSRR0,r12 2888 .else 2889 mtspr SPRN_SRR0,r12 2890 .endif 28913: 2892 2893 ld r9,PACA_EXGEN+EX_CTR(r13) 2894 mtctr r9 2895 lwz r9,PACA_EXGEN+EX_CCR(r13) 2896 mtcrf 0x80,r9 2897 std r1,PACAR1(r13) 2898 ld r9,PACA_EXGEN+EX_R9(r13) 2899 ld r10,PACA_EXGEN+EX_R10(r13) 2900 ld r11,PACA_EXGEN+EX_R11(r13) 2901 ld r12,PACA_EXGEN+EX_R12(r13) 2902 ld r13,PACA_EXGEN+EX_R13(r13) 2903 /* May return to masked low address where r13 is not set up */ 2904 .if \hsrr 2905 HRFI_TO_KERNEL 2906 .else 2907 RFI_TO_KERNEL 2908 .endif 2909 b . 2910.endm 2911 2912TRAMP_REAL_BEGIN(stf_barrier_fallback) 2913 std r9,PACA_EXRFI+EX_R9(r13) 2914 std r10,PACA_EXRFI+EX_R10(r13) 2915 sync 2916 ld r9,PACA_EXRFI+EX_R9(r13) 2917 ld r10,PACA_EXRFI+EX_R10(r13) 2918 ori 31,31,0 2919 .rept 14 2920 b 1f 29211: 2922 .endr 2923 blr 2924 2925/* Clobbers r10, r11, ctr */ 2926.macro L1D_DISPLACEMENT_FLUSH 2927 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2928 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2929 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2930 mtctr r11 2931 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2932 2933 /* order ld/st prior to dcbt stop all streams with flushing */ 2934 sync 2935 2936 /* 2937 * The load addresses are at staggered offsets within cachelines, 2938 * which suits some pipelines better (on others it should not 2939 * hurt). 2940 */ 29411: 2942 ld r11,(0x80 + 8)*0(r10) 2943 ld r11,(0x80 + 8)*1(r10) 2944 ld r11,(0x80 + 8)*2(r10) 2945 ld r11,(0x80 + 8)*3(r10) 2946 ld r11,(0x80 + 8)*4(r10) 2947 ld r11,(0x80 + 8)*5(r10) 2948 ld r11,(0x80 + 8)*6(r10) 2949 ld r11,(0x80 + 8)*7(r10) 2950 addi r10,r10,0x80*8 2951 bdnz 1b 2952.endm 2953 2954TRAMP_REAL_BEGIN(entry_flush_fallback) 2955 std r9,PACA_EXRFI+EX_R9(r13) 2956 std r10,PACA_EXRFI+EX_R10(r13) 2957 std r11,PACA_EXRFI+EX_R11(r13) 2958 mfctr r9 2959 L1D_DISPLACEMENT_FLUSH 2960 mtctr r9 2961 ld r9,PACA_EXRFI+EX_R9(r13) 2962 ld r10,PACA_EXRFI+EX_R10(r13) 2963 ld r11,PACA_EXRFI+EX_R11(r13) 2964 blr 2965 2966/* 2967 * The SCV entry flush happens with interrupts enabled, so it must disable 2968 * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10 2969 * (containing LR) does not need to be preserved here because scv entry 2970 * puts 0 in the pt_regs, CTR can be clobbered for the same reason. 2971 */ 2972TRAMP_REAL_BEGIN(scv_entry_flush_fallback) 2973 li r10,0 2974 mtmsrd r10,1 2975 lbz r10,PACAIRQHAPPENED(r13) 2976 ori r10,r10,PACA_IRQ_HARD_DIS 2977 stb r10,PACAIRQHAPPENED(r13) 2978 std r11,PACA_EXRFI+EX_R11(r13) 2979 L1D_DISPLACEMENT_FLUSH 2980 ld r11,PACA_EXRFI+EX_R11(r13) 2981 li r10,MSR_RI 2982 mtmsrd r10,1 2983 blr 2984 2985TRAMP_REAL_BEGIN(rfi_flush_fallback) 2986 SET_SCRATCH0(r13); 2987 GET_PACA(r13); 2988 std r1,PACA_EXRFI+EX_R12(r13) 2989 ld r1,PACAKSAVE(r13) 2990 std r9,PACA_EXRFI+EX_R9(r13) 2991 std r10,PACA_EXRFI+EX_R10(r13) 2992 std r11,PACA_EXRFI+EX_R11(r13) 2993 mfctr r9 2994 L1D_DISPLACEMENT_FLUSH 2995 mtctr r9 2996 ld r9,PACA_EXRFI+EX_R9(r13) 2997 ld r10,PACA_EXRFI+EX_R10(r13) 2998 ld r11,PACA_EXRFI+EX_R11(r13) 2999 ld r1,PACA_EXRFI+EX_R12(r13) 3000 GET_SCRATCH0(r13); 3001 rfid 3002 3003TRAMP_REAL_BEGIN(hrfi_flush_fallback) 3004 SET_SCRATCH0(r13); 3005 GET_PACA(r13); 3006 std r1,PACA_EXRFI+EX_R12(r13) 3007 ld r1,PACAKSAVE(r13) 3008 std r9,PACA_EXRFI+EX_R9(r13) 3009 std r10,PACA_EXRFI+EX_R10(r13) 3010 std r11,PACA_EXRFI+EX_R11(r13) 3011 mfctr r9 3012 L1D_DISPLACEMENT_FLUSH 3013 mtctr r9 3014 ld r9,PACA_EXRFI+EX_R9(r13) 3015 ld r10,PACA_EXRFI+EX_R10(r13) 3016 ld r11,PACA_EXRFI+EX_R11(r13) 3017 ld r1,PACA_EXRFI+EX_R12(r13) 3018 GET_SCRATCH0(r13); 3019 hrfid 3020 3021TRAMP_REAL_BEGIN(rfscv_flush_fallback) 3022 /* system call volatile */ 3023 mr r7,r13 3024 GET_PACA(r13); 3025 mr r8,r1 3026 ld r1,PACAKSAVE(r13) 3027 mfctr r9 3028 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 3029 ld r11,PACA_L1D_FLUSH_SIZE(r13) 3030 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 3031 mtctr r11 3032 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 3033 3034 /* order ld/st prior to dcbt stop all streams with flushing */ 3035 sync 3036 3037 /* 3038 * The load adresses are at staggered offsets within cachelines, 3039 * which suits some pipelines better (on others it should not 3040 * hurt). 3041 */ 30421: 3043 ld r11,(0x80 + 8)*0(r10) 3044 ld r11,(0x80 + 8)*1(r10) 3045 ld r11,(0x80 + 8)*2(r10) 3046 ld r11,(0x80 + 8)*3(r10) 3047 ld r11,(0x80 + 8)*4(r10) 3048 ld r11,(0x80 + 8)*5(r10) 3049 ld r11,(0x80 + 8)*6(r10) 3050 ld r11,(0x80 + 8)*7(r10) 3051 addi r10,r10,0x80*8 3052 bdnz 1b 3053 3054 mtctr r9 3055 li r9,0 3056 li r10,0 3057 li r11,0 3058 mr r1,r8 3059 mr r13,r7 3060 RFSCV 3061 3062USE_TEXT_SECTION() 3063 3064#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 3065kvm_interrupt: 3066 /* 3067 * The conditional branch in KVMTEST can't reach all the way, 3068 * make a stub. 3069 */ 3070 b kvmppc_interrupt 3071#endif 3072 3073_GLOBAL(do_uaccess_flush) 3074 UACCESS_FLUSH_FIXUP_SECTION 3075 nop 3076 nop 3077 nop 3078 blr 3079 L1D_DISPLACEMENT_FLUSH 3080 blr 3081_ASM_NOKPROBE_SYMBOL(do_uaccess_flush) 3082EXPORT_SYMBOL(do_uaccess_flush) 3083 3084 3085MASKED_INTERRUPT 3086MASKED_INTERRUPT hsrr=1 3087 3088USE_FIXED_SECTION(virt_trampolines) 3089 /* 3090 * All code below __end_soft_masked is treated as soft-masked. If 3091 * any code runs here with MSR[EE]=1, it must then cope with pending 3092 * soft interrupt being raised (i.e., by ensuring it is replayed). 3093 * 3094 * The __end_interrupts marker must be past the out-of-line (OOL) 3095 * handlers, so that they are copied to real address 0x100 when running 3096 * a relocatable kernel. This ensures they can be reached from the short 3097 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch 3098 * directly, without using LOAD_HANDLER(). 3099 */ 3100 .align 7 3101 .globl __end_interrupts 3102__end_interrupts: 3103DEFINE_FIXED_SYMBOL(__end_interrupts, virt_trampolines) 3104 3105CLOSE_FIXED_SECTION(real_vectors); 3106CLOSE_FIXED_SECTION(real_trampolines); 3107CLOSE_FIXED_SECTION(virt_vectors); 3108CLOSE_FIXED_SECTION(virt_trampolines); 3109 3110USE_TEXT_SECTION() 3111 3112/* MSR[RI] should be clear because this uses SRR[01] */ 3113_GLOBAL(enable_machine_check) 3114 mflr r0 3115 bcl 20,31,$+4 31160: mflr r3 3117 addi r3,r3,(1f - 0b) 3118 mtspr SPRN_SRR0,r3 3119 mfmsr r3 3120 ori r3,r3,MSR_ME 3121 mtspr SPRN_SRR1,r3 3122 RFI_TO_KERNEL 31231: mtlr r0 3124 blr 3125 3126/* MSR[RI] should be clear because this uses SRR[01] */ 3127disable_machine_check: 3128 mflr r0 3129 bcl 20,31,$+4 31300: mflr r3 3131 addi r3,r3,(1f - 0b) 3132 mtspr SPRN_SRR0,r3 3133 mfmsr r3 3134 li r4,MSR_ME 3135 andc r3,r3,r4 3136 mtspr SPRN_SRR1,r3 3137 RFI_TO_KERNEL 31381: mtlr r0 3139 blr 3140