1/* 2 * ultra.S: Don't expand these all over the place... 3 * 4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net) 5 */ 6 7#include <asm/asi.h> 8#include <asm/pgtable.h> 9#include <asm/page.h> 10#include <asm/spitfire.h> 11#include <asm/mmu_context.h> 12#include <asm/mmu.h> 13#include <asm/pil.h> 14#include <asm/head.h> 15#include <asm/thread_info.h> 16#include <asm/cacheflush.h> 17#include <asm/hypervisor.h> 18#include <asm/cpudata.h> 19 20 /* Basically, most of the Spitfire vs. Cheetah madness 21 * has to do with the fact that Cheetah does not support 22 * IMMU flushes out of the secondary context. Someone needs 23 * to throw a south lake birthday party for the folks 24 * in Microelectronics who refused to fix this shit. 25 */ 26 27 /* This file is meant to be read efficiently by the CPU, not humans. 28 * Staraj sie tego nikomu nie pierdolnac... 29 */ 30 .text 31 .align 32 32 .globl __flush_tlb_mm 33__flush_tlb_mm: /* 18 insns */ 34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ 35 ldxa [%o1] ASI_DMMU, %g2 36 cmp %g2, %o0 37 bne,pn %icc, __spitfire_flush_tlb_mm_slow 38 mov 0x50, %g3 39 stxa %g0, [%g3] ASI_DMMU_DEMAP 40 stxa %g0, [%g3] ASI_IMMU_DEMAP 41 sethi %hi(KERNBASE), %g3 42 flush %g3 43 retl 44 nop 45 nop 46 nop 47 nop 48 nop 49 nop 50 nop 51 nop 52 nop 53 nop 54 55 .align 32 56 .globl __flush_tlb_page 57__flush_tlb_page: /* 22 insns */ 58 /* %o0 = context, %o1 = vaddr */ 59 rdpr %pstate, %g7 60 andn %g7, PSTATE_IE, %g2 61 wrpr %g2, %pstate 62 mov SECONDARY_CONTEXT, %o4 63 ldxa [%o4] ASI_DMMU, %g2 64 stxa %o0, [%o4] ASI_DMMU 65 andcc %o1, 1, %g0 66 andn %o1, 1, %o3 67 be,pn %icc, 1f 68 or %o3, 0x10, %o3 69 stxa %g0, [%o3] ASI_IMMU_DEMAP 701: stxa %g0, [%o3] ASI_DMMU_DEMAP 71 membar #Sync 72 stxa %g2, [%o4] ASI_DMMU 73 sethi %hi(KERNBASE), %o4 74 flush %o4 75 retl 76 wrpr %g7, 0x0, %pstate 77 nop 78 nop 79 nop 80 nop 81 82 .align 32 83 .globl __flush_tlb_pending 84__flush_tlb_pending: /* 26 insns */ 85 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 86 rdpr %pstate, %g7 87 sllx %o1, 3, %o1 88 andn %g7, PSTATE_IE, %g2 89 wrpr %g2, %pstate 90 mov SECONDARY_CONTEXT, %o4 91 ldxa [%o4] ASI_DMMU, %g2 92 stxa %o0, [%o4] ASI_DMMU 931: sub %o1, (1 << 3), %o1 94 ldx [%o2 + %o1], %o3 95 andcc %o3, 1, %g0 96 andn %o3, 1, %o3 97 be,pn %icc, 2f 98 or %o3, 0x10, %o3 99 stxa %g0, [%o3] ASI_IMMU_DEMAP 1002: stxa %g0, [%o3] ASI_DMMU_DEMAP 101 membar #Sync 102 brnz,pt %o1, 1b 103 nop 104 stxa %g2, [%o4] ASI_DMMU 105 sethi %hi(KERNBASE), %o4 106 flush %o4 107 retl 108 wrpr %g7, 0x0, %pstate 109 nop 110 nop 111 nop 112 nop 113 114 .align 32 115 .globl __flush_tlb_kernel_range 116__flush_tlb_kernel_range: /* 16 insns */ 117 /* %o0=start, %o1=end */ 118 cmp %o0, %o1 119 be,pn %xcc, 2f 120 sethi %hi(PAGE_SIZE), %o4 121 sub %o1, %o0, %o3 122 sub %o3, %o4, %o3 123 or %o0, 0x20, %o0 ! Nucleus 1241: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP 125 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP 126 membar #Sync 127 brnz,pt %o3, 1b 128 sub %o3, %o4, %o3 1292: sethi %hi(KERNBASE), %o3 130 flush %o3 131 retl 132 nop 133 nop 134 135__spitfire_flush_tlb_mm_slow: 136 rdpr %pstate, %g1 137 wrpr %g1, PSTATE_IE, %pstate 138 stxa %o0, [%o1] ASI_DMMU 139 stxa %g0, [%g3] ASI_DMMU_DEMAP 140 stxa %g0, [%g3] ASI_IMMU_DEMAP 141 flush %g6 142 stxa %g2, [%o1] ASI_DMMU 143 sethi %hi(KERNBASE), %o1 144 flush %o1 145 retl 146 wrpr %g1, 0, %pstate 147 148/* 149 * The following code flushes one page_size worth. 150 */ 151 .section .kprobes.text, "ax" 152 .align 32 153 .globl __flush_icache_page 154__flush_icache_page: /* %o0 = phys_page */ 155 srlx %o0, PAGE_SHIFT, %o0 156 sethi %uhi(PAGE_OFFSET), %g1 157 sllx %o0, PAGE_SHIFT, %o0 158 sethi %hi(PAGE_SIZE), %g2 159 sllx %g1, 32, %g1 160 add %o0, %g1, %o0 1611: subcc %g2, 32, %g2 162 bne,pt %icc, 1b 163 flush %o0 + %g2 164 retl 165 nop 166 167#ifdef DCACHE_ALIASING_POSSIBLE 168 169#if (PAGE_SHIFT != 13) 170#error only page shift of 13 is supported by dcache flush 171#endif 172 173#define DTAG_MASK 0x3 174 175 /* This routine is Spitfire specific so the hardcoded 176 * D-cache size and line-size are OK. 177 */ 178 .align 64 179 .globl __flush_dcache_page 180__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ 181 sethi %uhi(PAGE_OFFSET), %g1 182 sllx %g1, 32, %g1 183 sub %o0, %g1, %o0 ! physical address 184 srlx %o0, 11, %o0 ! make D-cache TAG 185 sethi %hi(1 << 14), %o2 ! D-cache size 186 sub %o2, (1 << 5), %o2 ! D-cache line size 1871: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG 188 andcc %o3, DTAG_MASK, %g0 ! Valid? 189 be,pn %xcc, 2f ! Nope, branch 190 andn %o3, DTAG_MASK, %o3 ! Clear valid bits 191 cmp %o3, %o0 ! TAG match? 192 bne,pt %xcc, 2f ! Nope, branch 193 nop 194 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG 195 membar #Sync 1962: brnz,pt %o2, 1b 197 sub %o2, (1 << 5), %o2 ! D-cache line size 198 199 /* The I-cache does not snoop local stores so we 200 * better flush that too when necessary. 201 */ 202 brnz,pt %o1, __flush_icache_page 203 sllx %o0, 11, %o0 204 retl 205 nop 206 207#endif /* DCACHE_ALIASING_POSSIBLE */ 208 209 .previous 210 211 /* Cheetah specific versions, patched at boot time. */ 212__cheetah_flush_tlb_mm: /* 19 insns */ 213 rdpr %pstate, %g7 214 andn %g7, PSTATE_IE, %g2 215 wrpr %g2, 0x0, %pstate 216 wrpr %g0, 1, %tl 217 mov PRIMARY_CONTEXT, %o2 218 mov 0x40, %g3 219 ldxa [%o2] ASI_DMMU, %g2 220 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1 221 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1 222 or %o0, %o1, %o0 /* Preserve nucleus page size fields */ 223 stxa %o0, [%o2] ASI_DMMU 224 stxa %g0, [%g3] ASI_DMMU_DEMAP 225 stxa %g0, [%g3] ASI_IMMU_DEMAP 226 stxa %g2, [%o2] ASI_DMMU 227 sethi %hi(KERNBASE), %o2 228 flush %o2 229 wrpr %g0, 0, %tl 230 retl 231 wrpr %g7, 0x0, %pstate 232 233__cheetah_flush_tlb_page: /* 22 insns */ 234 /* %o0 = context, %o1 = vaddr */ 235 rdpr %pstate, %g7 236 andn %g7, PSTATE_IE, %g2 237 wrpr %g2, 0x0, %pstate 238 wrpr %g0, 1, %tl 239 mov PRIMARY_CONTEXT, %o4 240 ldxa [%o4] ASI_DMMU, %g2 241 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 242 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 243 or %o0, %o3, %o0 /* Preserve nucleus page size fields */ 244 stxa %o0, [%o4] ASI_DMMU 245 andcc %o1, 1, %g0 246 be,pn %icc, 1f 247 andn %o1, 1, %o3 248 stxa %g0, [%o3] ASI_IMMU_DEMAP 2491: stxa %g0, [%o3] ASI_DMMU_DEMAP 250 membar #Sync 251 stxa %g2, [%o4] ASI_DMMU 252 sethi %hi(KERNBASE), %o4 253 flush %o4 254 wrpr %g0, 0, %tl 255 retl 256 wrpr %g7, 0x0, %pstate 257 258__cheetah_flush_tlb_pending: /* 27 insns */ 259 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 260 rdpr %pstate, %g7 261 sllx %o1, 3, %o1 262 andn %g7, PSTATE_IE, %g2 263 wrpr %g2, 0x0, %pstate 264 wrpr %g0, 1, %tl 265 mov PRIMARY_CONTEXT, %o4 266 ldxa [%o4] ASI_DMMU, %g2 267 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 268 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 269 or %o0, %o3, %o0 /* Preserve nucleus page size fields */ 270 stxa %o0, [%o4] ASI_DMMU 2711: sub %o1, (1 << 3), %o1 272 ldx [%o2 + %o1], %o3 273 andcc %o3, 1, %g0 274 be,pn %icc, 2f 275 andn %o3, 1, %o3 276 stxa %g0, [%o3] ASI_IMMU_DEMAP 2772: stxa %g0, [%o3] ASI_DMMU_DEMAP 278 membar #Sync 279 brnz,pt %o1, 1b 280 nop 281 stxa %g2, [%o4] ASI_DMMU 282 sethi %hi(KERNBASE), %o4 283 flush %o4 284 wrpr %g0, 0, %tl 285 retl 286 wrpr %g7, 0x0, %pstate 287 288#ifdef DCACHE_ALIASING_POSSIBLE 289__cheetah_flush_dcache_page: /* 11 insns */ 290 sethi %uhi(PAGE_OFFSET), %g1 291 sllx %g1, 32, %g1 292 sub %o0, %g1, %o0 293 sethi %hi(PAGE_SIZE), %o4 2941: subcc %o4, (1 << 5), %o4 295 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE 296 membar #Sync 297 bne,pt %icc, 1b 298 nop 299 retl /* I-cache flush never needed on Cheetah, see callers. */ 300 nop 301#endif /* DCACHE_ALIASING_POSSIBLE */ 302 303 /* Hypervisor specific versions, patched at boot time. */ 304__hypervisor_tlb_tl0_error: 305 save %sp, -192, %sp 306 mov %i0, %o0 307 call hypervisor_tlbop_error 308 mov %i1, %o1 309 ret 310 restore 311 312__hypervisor_flush_tlb_mm: /* 10 insns */ 313 mov %o0, %o2 /* ARG2: mmu context */ 314 mov 0, %o0 /* ARG0: CPU lists unimplemented */ 315 mov 0, %o1 /* ARG1: CPU lists unimplemented */ 316 mov HV_MMU_ALL, %o3 /* ARG3: flags */ 317 mov HV_FAST_MMU_DEMAP_CTX, %o5 318 ta HV_FAST_TRAP 319 brnz,pn %o0, __hypervisor_tlb_tl0_error 320 mov HV_FAST_MMU_DEMAP_CTX, %o1 321 retl 322 nop 323 324__hypervisor_flush_tlb_page: /* 11 insns */ 325 /* %o0 = context, %o1 = vaddr */ 326 mov %o0, %g2 327 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ 328 mov %g2, %o1 /* ARG1: mmu context */ 329 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 330 srlx %o0, PAGE_SHIFT, %o0 331 sllx %o0, PAGE_SHIFT, %o0 332 ta HV_MMU_UNMAP_ADDR_TRAP 333 brnz,pn %o0, __hypervisor_tlb_tl0_error 334 mov HV_MMU_UNMAP_ADDR_TRAP, %o1 335 retl 336 nop 337 338__hypervisor_flush_tlb_pending: /* 16 insns */ 339 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 340 sllx %o1, 3, %g1 341 mov %o2, %g2 342 mov %o0, %g3 3431: sub %g1, (1 << 3), %g1 344 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ 345 mov %g3, %o1 /* ARG1: mmu context */ 346 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 347 srlx %o0, PAGE_SHIFT, %o0 348 sllx %o0, PAGE_SHIFT, %o0 349 ta HV_MMU_UNMAP_ADDR_TRAP 350 brnz,pn %o0, __hypervisor_tlb_tl0_error 351 mov HV_MMU_UNMAP_ADDR_TRAP, %o1 352 brnz,pt %g1, 1b 353 nop 354 retl 355 nop 356 357__hypervisor_flush_tlb_kernel_range: /* 16 insns */ 358 /* %o0=start, %o1=end */ 359 cmp %o0, %o1 360 be,pn %xcc, 2f 361 sethi %hi(PAGE_SIZE), %g3 362 mov %o0, %g1 363 sub %o1, %g1, %g2 364 sub %g2, %g3, %g2 3651: add %g1, %g2, %o0 /* ARG0: virtual address */ 366 mov 0, %o1 /* ARG1: mmu context */ 367 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 368 ta HV_MMU_UNMAP_ADDR_TRAP 369 brnz,pn %o0, __hypervisor_tlb_tl0_error 370 mov HV_MMU_UNMAP_ADDR_TRAP, %o1 371 brnz,pt %g2, 1b 372 sub %g2, %g3, %g2 3732: retl 374 nop 375 376#ifdef DCACHE_ALIASING_POSSIBLE 377 /* XXX Niagara and friends have an 8K cache, so no aliasing is 378 * XXX possible, but nothing explicit in the Hypervisor API 379 * XXX guarantees this. 380 */ 381__hypervisor_flush_dcache_page: /* 2 insns */ 382 retl 383 nop 384#endif 385 386tlb_patch_one: 3871: lduw [%o1], %g1 388 stw %g1, [%o0] 389 flush %o0 390 subcc %o2, 1, %o2 391 add %o1, 4, %o1 392 bne,pt %icc, 1b 393 add %o0, 4, %o0 394 retl 395 nop 396 397 .globl cheetah_patch_cachetlbops 398cheetah_patch_cachetlbops: 399 save %sp, -128, %sp 400 401 sethi %hi(__flush_tlb_mm), %o0 402 or %o0, %lo(__flush_tlb_mm), %o0 403 sethi %hi(__cheetah_flush_tlb_mm), %o1 404 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 405 call tlb_patch_one 406 mov 19, %o2 407 408 sethi %hi(__flush_tlb_page), %o0 409 or %o0, %lo(__flush_tlb_page), %o0 410 sethi %hi(__cheetah_flush_tlb_page), %o1 411 or %o1, %lo(__cheetah_flush_tlb_page), %o1 412 call tlb_patch_one 413 mov 22, %o2 414 415 sethi %hi(__flush_tlb_pending), %o0 416 or %o0, %lo(__flush_tlb_pending), %o0 417 sethi %hi(__cheetah_flush_tlb_pending), %o1 418 or %o1, %lo(__cheetah_flush_tlb_pending), %o1 419 call tlb_patch_one 420 mov 27, %o2 421 422#ifdef DCACHE_ALIASING_POSSIBLE 423 sethi %hi(__flush_dcache_page), %o0 424 or %o0, %lo(__flush_dcache_page), %o0 425 sethi %hi(__cheetah_flush_dcache_page), %o1 426 or %o1, %lo(__cheetah_flush_dcache_page), %o1 427 call tlb_patch_one 428 mov 11, %o2 429#endif /* DCACHE_ALIASING_POSSIBLE */ 430 431 ret 432 restore 433 434#ifdef CONFIG_SMP 435 /* These are all called by the slaves of a cross call, at 436 * trap level 1, with interrupts fully disabled. 437 * 438 * Register usage: 439 * %g5 mm->context (all tlb flushes) 440 * %g1 address arg 1 (tlb page and range flushes) 441 * %g7 address arg 2 (tlb range flush only) 442 * 443 * %g6 scratch 1 444 * %g2 scratch 2 445 * %g3 scratch 3 446 * %g4 scratch 4 447 */ 448 .align 32 449 .globl xcall_flush_tlb_mm 450xcall_flush_tlb_mm: /* 21 insns */ 451 mov PRIMARY_CONTEXT, %g2 452 ldxa [%g2] ASI_DMMU, %g3 453 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 454 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 455 or %g5, %g4, %g5 /* Preserve nucleus page size fields */ 456 stxa %g5, [%g2] ASI_DMMU 457 mov 0x40, %g4 458 stxa %g0, [%g4] ASI_DMMU_DEMAP 459 stxa %g0, [%g4] ASI_IMMU_DEMAP 460 stxa %g3, [%g2] ASI_DMMU 461 retry 462 nop 463 nop 464 nop 465 nop 466 nop 467 nop 468 nop 469 nop 470 nop 471 nop 472 473 .globl xcall_flush_tlb_page 474xcall_flush_tlb_page: /* 17 insns */ 475 /* %g5=context, %g1=vaddr */ 476 mov PRIMARY_CONTEXT, %g4 477 ldxa [%g4] ASI_DMMU, %g2 478 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 479 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 480 or %g5, %g4, %g5 481 mov PRIMARY_CONTEXT, %g4 482 stxa %g5, [%g4] ASI_DMMU 483 andcc %g1, 0x1, %g0 484 be,pn %icc, 2f 485 andn %g1, 0x1, %g5 486 stxa %g0, [%g5] ASI_IMMU_DEMAP 4872: stxa %g0, [%g5] ASI_DMMU_DEMAP 488 membar #Sync 489 stxa %g2, [%g4] ASI_DMMU 490 retry 491 nop 492 nop 493 494 .globl xcall_flush_tlb_kernel_range 495xcall_flush_tlb_kernel_range: /* 25 insns */ 496 sethi %hi(PAGE_SIZE - 1), %g2 497 or %g2, %lo(PAGE_SIZE - 1), %g2 498 andn %g1, %g2, %g1 499 andn %g7, %g2, %g7 500 sub %g7, %g1, %g3 501 add %g2, 1, %g2 502 sub %g3, %g2, %g3 503 or %g1, 0x20, %g1 ! Nucleus 5041: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP 505 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP 506 membar #Sync 507 brnz,pt %g3, 1b 508 sub %g3, %g2, %g3 509 retry 510 nop 511 nop 512 nop 513 nop 514 nop 515 nop 516 nop 517 nop 518 nop 519 nop 520 nop 521 522 /* This runs in a very controlled environment, so we do 523 * not need to worry about BH races etc. 524 */ 525 .globl xcall_sync_tick 526xcall_sync_tick: 527 528661: rdpr %pstate, %g2 529 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate 530 .section .sun4v_2insn_patch, "ax" 531 .word 661b 532 nop 533 nop 534 .previous 535 536 rdpr %pil, %g2 537 wrpr %g0, PIL_NORMAL_MAX, %pil 538 sethi %hi(109f), %g7 539 b,pt %xcc, etrap_irq 540109: or %g7, %lo(109b), %g7 541#ifdef CONFIG_TRACE_IRQFLAGS 542 call trace_hardirqs_off 543 nop 544#endif 545 call smp_synchronize_tick_client 546 nop 547 b rtrap_xcall 548 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 549 550 .globl xcall_fetch_glob_regs 551xcall_fetch_glob_regs: 552 sethi %hi(global_reg_snapshot), %g1 553 or %g1, %lo(global_reg_snapshot), %g1 554 __GET_CPUID(%g2) 555 sllx %g2, 6, %g3 556 add %g1, %g3, %g1 557 rdpr %tstate, %g7 558 stx %g7, [%g1 + GR_SNAP_TSTATE] 559 rdpr %tpc, %g7 560 stx %g7, [%g1 + GR_SNAP_TPC] 561 rdpr %tnpc, %g7 562 stx %g7, [%g1 + GR_SNAP_TNPC] 563 stx %o7, [%g1 + GR_SNAP_O7] 564 stx %i7, [%g1 + GR_SNAP_I7] 565 /* Don't try this at home kids... */ 566 rdpr %cwp, %g3 567 sub %g3, 1, %g7 568 wrpr %g7, %cwp 569 mov %i7, %g7 570 wrpr %g3, %cwp 571 stx %g7, [%g1 + GR_SNAP_RPC] 572 sethi %hi(trap_block), %g7 573 or %g7, %lo(trap_block), %g7 574 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2 575 add %g7, %g2, %g7 576 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3 577 stx %g3, [%g1 + GR_SNAP_THREAD] 578 retry 579 580#ifdef DCACHE_ALIASING_POSSIBLE 581 .align 32 582 .globl xcall_flush_dcache_page_cheetah 583xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */ 584 sethi %hi(PAGE_SIZE), %g3 5851: subcc %g3, (1 << 5), %g3 586 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE 587 membar #Sync 588 bne,pt %icc, 1b 589 nop 590 retry 591 nop 592#endif /* DCACHE_ALIASING_POSSIBLE */ 593 594 .globl xcall_flush_dcache_page_spitfire 595xcall_flush_dcache_page_spitfire: /* %g1 == physical page address 596 %g7 == kernel page virtual address 597 %g5 == (page->mapping != NULL) */ 598#ifdef DCACHE_ALIASING_POSSIBLE 599 srlx %g1, (13 - 2), %g1 ! Form tag comparitor 600 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K 601 sub %g3, (1 << 5), %g3 ! D$ linesize == 32 6021: ldxa [%g3] ASI_DCACHE_TAG, %g2 603 andcc %g2, 0x3, %g0 604 be,pn %xcc, 2f 605 andn %g2, 0x3, %g2 606 cmp %g2, %g1 607 608 bne,pt %xcc, 2f 609 nop 610 stxa %g0, [%g3] ASI_DCACHE_TAG 611 membar #Sync 6122: cmp %g3, 0 613 bne,pt %xcc, 1b 614 sub %g3, (1 << 5), %g3 615 616 brz,pn %g5, 2f 617#endif /* DCACHE_ALIASING_POSSIBLE */ 618 sethi %hi(PAGE_SIZE), %g3 619 6201: flush %g7 621 subcc %g3, (1 << 5), %g3 622 bne,pt %icc, 1b 623 add %g7, (1 << 5), %g7 624 6252: retry 626 nop 627 nop 628 629 /* %g5: error 630 * %g6: tlb op 631 */ 632__hypervisor_tlb_xcall_error: 633 mov %g5, %g4 634 mov %g6, %g5 635 ba,pt %xcc, etrap 636 rd %pc, %g7 637 mov %l4, %o0 638 call hypervisor_tlbop_error_xcall 639 mov %l5, %o1 640 ba,a,pt %xcc, rtrap 641 642 .globl __hypervisor_xcall_flush_tlb_mm 643__hypervisor_xcall_flush_tlb_mm: /* 21 insns */ 644 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ 645 mov %o0, %g2 646 mov %o1, %g3 647 mov %o2, %g4 648 mov %o3, %g1 649 mov %o5, %g7 650 clr %o0 /* ARG0: CPU lists unimplemented */ 651 clr %o1 /* ARG1: CPU lists unimplemented */ 652 mov %g5, %o2 /* ARG2: mmu context */ 653 mov HV_MMU_ALL, %o3 /* ARG3: flags */ 654 mov HV_FAST_MMU_DEMAP_CTX, %o5 655 ta HV_FAST_TRAP 656 mov HV_FAST_MMU_DEMAP_CTX, %g6 657 brnz,pn %o0, __hypervisor_tlb_xcall_error 658 mov %o0, %g5 659 mov %g2, %o0 660 mov %g3, %o1 661 mov %g4, %o2 662 mov %g1, %o3 663 mov %g7, %o5 664 membar #Sync 665 retry 666 667 .globl __hypervisor_xcall_flush_tlb_page 668__hypervisor_xcall_flush_tlb_page: /* 17 insns */ 669 /* %g5=ctx, %g1=vaddr */ 670 mov %o0, %g2 671 mov %o1, %g3 672 mov %o2, %g4 673 mov %g1, %o0 /* ARG0: virtual address */ 674 mov %g5, %o1 /* ARG1: mmu context */ 675 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 676 srlx %o0, PAGE_SHIFT, %o0 677 sllx %o0, PAGE_SHIFT, %o0 678 ta HV_MMU_UNMAP_ADDR_TRAP 679 mov HV_MMU_UNMAP_ADDR_TRAP, %g6 680 brnz,a,pn %o0, __hypervisor_tlb_xcall_error 681 mov %o0, %g5 682 mov %g2, %o0 683 mov %g3, %o1 684 mov %g4, %o2 685 membar #Sync 686 retry 687 688 .globl __hypervisor_xcall_flush_tlb_kernel_range 689__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ 690 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ 691 sethi %hi(PAGE_SIZE - 1), %g2 692 or %g2, %lo(PAGE_SIZE - 1), %g2 693 andn %g1, %g2, %g1 694 andn %g7, %g2, %g7 695 sub %g7, %g1, %g3 696 add %g2, 1, %g2 697 sub %g3, %g2, %g3 698 mov %o0, %g2 699 mov %o1, %g4 700 mov %o2, %g7 7011: add %g1, %g3, %o0 /* ARG0: virtual address */ 702 mov 0, %o1 /* ARG1: mmu context */ 703 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 704 ta HV_MMU_UNMAP_ADDR_TRAP 705 mov HV_MMU_UNMAP_ADDR_TRAP, %g6 706 brnz,pn %o0, __hypervisor_tlb_xcall_error 707 mov %o0, %g5 708 sethi %hi(PAGE_SIZE), %o2 709 brnz,pt %g3, 1b 710 sub %g3, %o2, %g3 711 mov %g2, %o0 712 mov %g4, %o1 713 mov %g7, %o2 714 membar #Sync 715 retry 716 717 /* These just get rescheduled to PIL vectors. */ 718 .globl xcall_call_function 719xcall_call_function: 720 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint 721 retry 722 723 .globl xcall_call_function_single 724xcall_call_function_single: 725 wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint 726 retry 727 728 .globl xcall_receive_signal 729xcall_receive_signal: 730 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint 731 retry 732 733 .globl xcall_capture 734xcall_capture: 735 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint 736 retry 737 738 .globl xcall_new_mmu_context_version 739xcall_new_mmu_context_version: 740 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint 741 retry 742 743#ifdef CONFIG_KGDB 744 .globl xcall_kgdb_capture 745xcall_kgdb_capture: 746 wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint 747 retry 748#endif 749 750#endif /* CONFIG_SMP */ 751 752 753 .globl hypervisor_patch_cachetlbops 754hypervisor_patch_cachetlbops: 755 save %sp, -128, %sp 756 757 sethi %hi(__flush_tlb_mm), %o0 758 or %o0, %lo(__flush_tlb_mm), %o0 759 sethi %hi(__hypervisor_flush_tlb_mm), %o1 760 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 761 call tlb_patch_one 762 mov 10, %o2 763 764 sethi %hi(__flush_tlb_page), %o0 765 or %o0, %lo(__flush_tlb_page), %o0 766 sethi %hi(__hypervisor_flush_tlb_page), %o1 767 or %o1, %lo(__hypervisor_flush_tlb_page), %o1 768 call tlb_patch_one 769 mov 11, %o2 770 771 sethi %hi(__flush_tlb_pending), %o0 772 or %o0, %lo(__flush_tlb_pending), %o0 773 sethi %hi(__hypervisor_flush_tlb_pending), %o1 774 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 775 call tlb_patch_one 776 mov 16, %o2 777 778 sethi %hi(__flush_tlb_kernel_range), %o0 779 or %o0, %lo(__flush_tlb_kernel_range), %o0 780 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 781 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 782 call tlb_patch_one 783 mov 16, %o2 784 785#ifdef DCACHE_ALIASING_POSSIBLE 786 sethi %hi(__flush_dcache_page), %o0 787 or %o0, %lo(__flush_dcache_page), %o0 788 sethi %hi(__hypervisor_flush_dcache_page), %o1 789 or %o1, %lo(__hypervisor_flush_dcache_page), %o1 790 call tlb_patch_one 791 mov 2, %o2 792#endif /* DCACHE_ALIASING_POSSIBLE */ 793 794#ifdef CONFIG_SMP 795 sethi %hi(xcall_flush_tlb_mm), %o0 796 or %o0, %lo(xcall_flush_tlb_mm), %o0 797 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 798 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 799 call tlb_patch_one 800 mov 21, %o2 801 802 sethi %hi(xcall_flush_tlb_page), %o0 803 or %o0, %lo(xcall_flush_tlb_page), %o0 804 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 805 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 806 call tlb_patch_one 807 mov 17, %o2 808 809 sethi %hi(xcall_flush_tlb_kernel_range), %o0 810 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 811 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 812 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 813 call tlb_patch_one 814 mov 25, %o2 815#endif /* CONFIG_SMP */ 816 817 ret 818 restore 819