1/* $Id: ultra.S,v 1.70.2.1 2002/03/03 10:31:56 davem Exp $ 2 * ultra.S: Don't expand these all over the place... 3 * 4 * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com) 5 */ 6 7#include <linux/config.h> 8#include <asm/asi.h> 9#include <asm/pgtable.h> 10#include <asm/page.h> 11#include <asm/spitfire.h> 12#include <asm/mmu_context.h> 13#include <asm/pil.h> 14#include <asm/head.h> 15 16 /* Basically, most of the Spitfire vs. Cheetah madness 17 * has to do with the fact that Cheetah does not support 18 * IMMU flushes out of the secondary context. Someone needs 19 * to throw a south lake birthday party for the folks 20 * in Microelectronics who refused to fix this shit. 21 */ 22 23 /* This file is meant to be read efficiently by the CPU, not humans. 24 * Staraj sie tego nikomu nie pierdolnac... 25 */ 26 .text 27 .align 32 28 .globl __flush_tlb_page, __flush_tlb_mm, __flush_tlb_range 29__flush_tlb_page: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=page&PAGE_MASK, %o2=SECONDARY_CONTEXT */ 30 ldxa [%o2] ASI_DMMU, %g2 31 cmp %g2, %o0 32 bne,pn %icc, __spitfire_flush_tlb_page_slow 33 or %o1, 0x10, %g3 34 stxa %g0, [%g3] ASI_DMMU_DEMAP 35 stxa %g0, [%g3] ASI_IMMU_DEMAP 36 retl 37 flush %g6 38 nop 39 nop 40 nop 41 nop 42 nop 43 nop 44 nop 45 nop 46 47__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ 48 ldxa [%o1] ASI_DMMU, %g2 49 cmp %g2, %o0 50 bne,pn %icc, __spitfire_flush_tlb_mm_slow 51 mov 0x50, %g3 52 stxa %g0, [%g3] ASI_DMMU_DEMAP 53 stxa %g0, [%g3] ASI_IMMU_DEMAP 54 retl 55 flush %g6 56 nop 57 nop 58 nop 59 nop 60 nop 61 nop 62 nop 63 nop 64 65__flush_tlb_range: /* %o0=(ctx&TAG_CONTEXT_BITS), %o1=start&PAGE_MASK, %o2=SECONDARY_CONTEXT, 66 * %o3=end&PAGE_MASK, %o4=PAGE_SIZE, %o5=(end - start) 67 */ 68#define TLB_MAGIC 207 /* Students, do you know how I calculated this? -DaveM */ 69 cmp %o5, %o4 70 bleu,pt %xcc, __flush_tlb_page 71 srlx %o5, PAGE_SHIFT, %g5 72 cmp %g5, TLB_MAGIC 73 bgeu,pn %icc, __spitfire_flush_tlb_range_constant_time 74 or %o1, 0x10, %g5 75 ldxa [%o2] ASI_DMMU, %g2 76 cmp %g2, %o0 77__spitfire_flush_tlb_range_page_by_page: 78 bne,pn %icc, __spitfire_flush_tlb_range_pbp_slow 79 sub %o5, %o4, %o5 801: stxa %g0, [%g5 + %o5] ASI_DMMU_DEMAP 81 stxa %g0, [%g5 + %o5] ASI_IMMU_DEMAP 82 brnz,pt %o5, 1b 83 sub %o5, %o4, %o5 84 retl 85 flush %g6 86__spitfire_flush_tlb_range_constant_time: /* %o0=ctx, %o1=start, %o3=end */ 87 rdpr %pstate, %g1 88 wrpr %g1, PSTATE_IE, %pstate 89 mov TLB_TAG_ACCESS, %g3 90 mov ((SPITFIRE_HIGHEST_LOCKED_TLBENT-1) << 3), %g2 91 92 /* Spitfire Errata #32 workaround. */ 93 mov 0x8, %o4 94 stxa %g0, [%o4] ASI_DMMU 95 flush %g6 96 971: ldxa [%g2] ASI_ITLB_TAG_READ, %o4 98 and %o4, TAG_CONTEXT_BITS, %o5 99 cmp %o5, %o0 100 bne,pt %icc, 2f 101 andn %o4, TAG_CONTEXT_BITS, %o4 102 cmp %o4, %o1 103 blu,pt %xcc, 2f 104 cmp %o4, %o3 105 blu,pn %xcc, 4f 1062: ldxa [%g2] ASI_DTLB_TAG_READ, %o4 107 and %o4, TAG_CONTEXT_BITS, %o5 108 cmp %o5, %o0 109 andn %o4, TAG_CONTEXT_BITS, %o4 110 bne,pt %icc, 3f 111 cmp %o4, %o1 112 blu,pt %xcc, 3f 113 cmp %o4, %o3 114 blu,pn %xcc, 5f 115 nop 1163: brnz,pt %g2, 1b 117 sub %g2, (1 << 3), %g2 118 retl 119 wrpr %g1, 0x0, %pstate 1204: stxa %g0, [%g3] ASI_IMMU 121 stxa %g0, [%g2] ASI_ITLB_DATA_ACCESS 122 flush %g6 123 124 /* Spitfire Errata #32 workaround. */ 125 mov 0x8, %o4 126 stxa %g0, [%o4] ASI_DMMU 127 flush %g6 128 129 ba,pt %xcc, 2b 130 nop 131 1325: stxa %g0, [%g3] ASI_DMMU 133 stxa %g0, [%g2] ASI_DTLB_DATA_ACCESS 134 flush %g6 135 136 /* Spitfire Errata #32 workaround. */ 137 mov 0x8, %o4 138 stxa %g0, [%o4] ASI_DMMU 139 flush %g6 140 141 ba,pt %xcc, 3b 142 nop 143 144__spitfire_flush_tlb_mm_slow: 145 rdpr %pstate, %g1 146 wrpr %g1, PSTATE_IE, %pstate 147 stxa %o0, [%o1] ASI_DMMU 148 stxa %g0, [%g3] ASI_DMMU_DEMAP 149 stxa %g0, [%g3] ASI_IMMU_DEMAP 150 flush %g6 151 stxa %g2, [%o1] ASI_DMMU 152 flush %g6 153 retl 154 wrpr %g1, 0, %pstate 155 156__spitfire_flush_tlb_page_slow: 157 rdpr %pstate, %g1 158 wrpr %g1, PSTATE_IE, %pstate 159 stxa %o0, [%o2] ASI_DMMU 160 stxa %g0, [%g3] ASI_DMMU_DEMAP 161 stxa %g0, [%g3] ASI_IMMU_DEMAP 162 flush %g6 163 stxa %g2, [%o2] ASI_DMMU 164 flush %g6 165 retl 166 wrpr %g1, 0, %pstate 167 168__spitfire_flush_tlb_range_pbp_slow: 169 rdpr %pstate, %g1 170 wrpr %g1, PSTATE_IE, %pstate 171 stxa %o0, [%o2] ASI_DMMU 172 1732: stxa %g0, [%g5 + %o5] ASI_DMMU_DEMAP 174 stxa %g0, [%g5 + %o5] ASI_IMMU_DEMAP 175 brnz,pt %o5, 2b 176 sub %o5, %o4, %o5 177 flush %g6 178 stxa %g2, [%o2] ASI_DMMU 179 flush %g6 180 retl 181 wrpr %g1, 0x0, %pstate 182 183/* 184 * The following code flushes one page_size worth. 185 */ 186#if (PAGE_SHIFT == 13) 187#define ITAG_MASK 0xfe 188#elif (PAGE_SHIFT == 16) 189#define ITAG_MASK 0x7fe 190#else 191#error unsupported PAGE_SIZE 192#endif 193 .align 32 194 .globl __flush_icache_page 195__flush_icache_page: /* %o0 = phys_page */ 196 sethi %hi(1 << 13), %o2 ! IC_set bit 197 mov 1, %g1 198 srlx %o0, 5, %o0 199 clr %o1 ! IC_addr 200 sllx %g1, 36, %g1 201 ldda [%o1] ASI_IC_TAG, %o4 202 sub %g1, 1, %g2 203 or %o0, %g1, %o0 ! VALID+phys-addr comparitor 204 205 sllx %g2, 1, %g2 206 andn %g2, ITAG_MASK, %g2 ! IC_tag mask 207 nop 208 nop 209 nop 210 nop 211 nop 212 nop 213 2141: addx %g0, %g0, %g0 215 ldda [%o1 + %o2] ASI_IC_TAG, %g4 216 addx %g0, %g0, %g0 217 and %o5, %g2, %g3 218 cmp %g3, %o0 219 add %o1, 0x20, %o1 220 ldda [%o1] ASI_IC_TAG, %o4 221 be,pn %xcc, iflush1 222 2232: nop 224 and %g5, %g2, %g5 225 cmp %g5, %o0 226 be,pn %xcc, iflush2 2273: cmp %o1, %o2 228 bne,pt %xcc, 1b 229 addx %g0, %g0, %g0 230 nop 231 232 sethi %uhi(PAGE_OFFSET), %g4 233 retl 234 sllx %g4, 32, %g4 235 236iflush1:sub %o1, 0x20, %g3 237 stxa %g0, [%g3] ASI_IC_TAG 238 flush %g6 239 ba,a,pt %xcc, 2b 240iflush2:sub %o1, 0x20, %g3 241 stxa %g0, [%o1 + %o2] ASI_IC_TAG 242 flush %g6 243 ba,a,pt %xcc, 3b 244 245#if (PAGE_SHIFT == 13) 246#define DTAG_MASK 0x3 247#elif (PAGE_SHIFT == 16) 248#define DTAG_MASK 0x1f 249#elif (PAGE_SHIFT == 19) 250#define DTAG_MASK 0xff 251#elif (PAGE_SHIFT == 22) 252#define DTAG_MASK 0x3ff 253#endif 254 255 .align 64 256 .globl __flush_dcache_page 257__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ 258 sub %o0, %g4, %o0 259 clr %o4 260 srlx %o0, 11, %o0 261 sethi %hi(1 << 14), %o2 2621: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group 263 add %o4, (1 << 5), %o4 ! IEU0 264 ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group 265 add %o4, (1 << 5), %o4 ! IEU0 266 ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available 267 add %o4, (1 << 5), %o4 ! IEU0 268 andn %o3, DTAG_MASK, %o3 ! IEU1 269 ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group 270 add %o4, (1 << 5), %o4 ! IEU0 271 andn %g1, DTAG_MASK, %g1 ! IEU1 272 cmp %o0, %o3 ! IEU1 Group 273 be,a,pn %xcc, dflush1 ! CTI 274 sub %o4, (4 << 5), %o4 ! IEU0 (Group) 275 cmp %o0, %g1 ! IEU1 Group 276 andn %g2, DTAG_MASK, %g2 ! IEU0 277 be,a,pn %xcc, dflush2 ! CTI 278 sub %o4, (3 << 5), %o4 ! IEU0 (Group) 279 cmp %o0, %g2 ! IEU1 Group 280 andn %g3, DTAG_MASK, %g3 ! IEU0 281 be,a,pn %xcc, dflush3 ! CTI 282 sub %o4, (2 << 5), %o4 ! IEU0 (Group) 283 cmp %o0, %g3 ! IEU1 Group 284 be,a,pn %xcc, dflush4 ! CTI 285 sub %o4, (1 << 5), %o4 ! IEU0 2862: cmp %o4, %o2 ! IEU1 Group 287 bne,pt %xcc, 1b ! CTI 288 nop ! IEU0 289 290 /* The I-cache does not snoop local stores so we 291 * better flush that too when necessary. 292 */ 293 brnz,pt %o1, __flush_icache_page 294 sllx %o0, 11, %o0 295 retl 296 nop 297 298dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG 299 add %o4, (1 << 5), %o4 300dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG 301 add %o4, (1 << 5), %o4 302dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG 303 add %o4, (1 << 5), %o4 304dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG 305 add %o4, (1 << 5), %o4 306 membar #Sync 307 ba,pt %xcc, 2b 308 nop 309 310 .align 32 311__prefill_dtlb: 312 rdpr %pstate, %g7 313 wrpr %g7, PSTATE_IE, %pstate 314 mov TLB_TAG_ACCESS, %g1 315 stxa %o0, [%g1] ASI_DMMU 316 stxa %o1, [%g0] ASI_DTLB_DATA_IN 317 flush %g6 318 retl 319 wrpr %g7, %pstate 320__prefill_itlb: 321 rdpr %pstate, %g7 322 wrpr %g7, PSTATE_IE, %pstate 323 mov TLB_TAG_ACCESS, %g1 324 stxa %o0, [%g1] ASI_IMMU 325 stxa %o1, [%g0] ASI_ITLB_DATA_IN 326 flush %g6 327 retl 328 wrpr %g7, %pstate 329 330 .globl __update_mmu_cache 331__update_mmu_cache: /* %o0=vma, %o1=address, %o2=pte */ 332 ldub [%g6 + AOFF_task_thread + AOFF_thread_fault_code], %o3 333 srlx %o1, PAGE_SHIFT, %o1 334 ldx [%o0 + 0x0], %o4 /* XXX vma->vm_mm */ 335 brz,pn %o3, 1f 336 sllx %o1, PAGE_SHIFT, %o0 337 ldx [%o4 + AOFF_mm_context], %o5 338 andcc %o3, FAULT_CODE_DTLB, %g0 339 mov %o2, %o1 340 and %o5, TAG_CONTEXT_BITS, %o5 341 bne,pt %xcc, __prefill_dtlb 342 or %o0, %o5, %o0 343 ba,a,pt %xcc, __prefill_itlb 3441: retl 345 nop 346 347 /* Cheetah specific versions, patched at boot time. */ 348__cheetah_flush_tlb_page: /* 14 insns */ 349 rdpr %pstate, %g5 350 andn %g5, PSTATE_IE, %g2 351 wrpr %g2, 0x0, %pstate 352 wrpr %g0, 1, %tl 353 mov PRIMARY_CONTEXT, %o2 354 ldxa [%o2] ASI_DMMU, %g2 355 stxa %o0, [%o2] ASI_DMMU 356 stxa %g0, [%o1] ASI_DMMU_DEMAP 357 stxa %g0, [%o1] ASI_IMMU_DEMAP 358 stxa %g2, [%o2] ASI_DMMU 359 flush %g6 360 wrpr %g0, 0, %tl 361 retl 362 wrpr %g5, 0x0, %pstate 363 364__cheetah_flush_tlb_mm: /* 15 insns */ 365 rdpr %pstate, %g5 366 andn %g5, PSTATE_IE, %g2 367 wrpr %g2, 0x0, %pstate 368 wrpr %g0, 1, %tl 369 mov PRIMARY_CONTEXT, %o2 370 mov 0x40, %g3 371 ldxa [%o2] ASI_DMMU, %g2 372 stxa %o0, [%o2] ASI_DMMU 373 stxa %g0, [%g3] ASI_DMMU_DEMAP 374 stxa %g0, [%g3] ASI_IMMU_DEMAP 375 stxa %g2, [%o2] ASI_DMMU 376 flush %g6 377 wrpr %g0, 0, %tl 378 retl 379 wrpr %g5, 0x0, %pstate 380 381__cheetah_flush_tlb_range: /* 20 insns */ 382 cmp %o5, %o4 383 blu,pt %xcc, 9f 384 rdpr %pstate, %g5 385 andn %g5, PSTATE_IE, %g2 386 wrpr %g2, 0x0, %pstate 387 wrpr %g0, 1, %tl 388 mov PRIMARY_CONTEXT, %o2 389 sub %o5, %o4, %o5 390 ldxa [%o2] ASI_DMMU, %g2 391 stxa %o0, [%o2] ASI_DMMU 3921: stxa %g0, [%o1 + %o5] ASI_DMMU_DEMAP 393 stxa %g0, [%o1 + %o5] ASI_IMMU_DEMAP 394 membar #Sync 395 brnz,pt %o5, 1b 396 sub %o5, %o4, %o5 397 stxa %g2, [%o2] ASI_DMMU 398 flush %g6 399 wrpr %g0, 0, %tl 4009: retl 401 wrpr %g5, 0x0, %pstate 402 403flush_dcpage_cheetah: /* 9 insns */ 404 sub %o0, %g4, %o0 405 sethi %hi(PAGE_SIZE), %o4 4061: subcc %o4, (1 << 5), %o4 407 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE 408 membar #Sync 409 bne,pt %icc, 1b 410 nop 411 retl /* I-cache flush never needed on Cheetah, see callers. */ 412 nop 413 414cheetah_patch_one: 4151: lduw [%o1], %g1 416 stw %g1, [%o0] 417 flush %o0 418 subcc %o2, 1, %o2 419 add %o1, 4, %o1 420 bne,pt %icc, 1b 421 add %o0, 4, %o0 422 retl 423 nop 424 425 .globl cheetah_patch_cachetlbops 426cheetah_patch_cachetlbops: 427 save %sp, -128, %sp 428 429 sethi %hi(__flush_tlb_page), %o0 430 or %o0, %lo(__flush_tlb_page), %o0 431 sethi %hi(__cheetah_flush_tlb_page), %o1 432 or %o1, %lo(__cheetah_flush_tlb_page), %o1 433 call cheetah_patch_one 434 mov 14, %o2 435 436 sethi %hi(__flush_tlb_mm), %o0 437 or %o0, %lo(__flush_tlb_mm), %o0 438 sethi %hi(__cheetah_flush_tlb_mm), %o1 439 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 440 call cheetah_patch_one 441 mov 15, %o2 442 443 sethi %hi(__flush_tlb_range), %o0 444 or %o0, %lo(__flush_tlb_range), %o0 445 sethi %hi(__cheetah_flush_tlb_range), %o1 446 or %o1, %lo(__cheetah_flush_tlb_range), %o1 447 call cheetah_patch_one 448 mov 20, %o2 449 450 sethi %hi(__flush_dcache_page), %o0 451 or %o0, %lo(__flush_dcache_page), %o0 452 sethi %hi(flush_dcpage_cheetah), %o1 453 or %o1, %lo(flush_dcpage_cheetah), %o1 454 call cheetah_patch_one 455 mov 9, %o2 456 457 ret 458 restore 459 460#ifdef CONFIG_SMP 461 /* These are all called by the slaves of a cross call, at 462 * trap level 1, with interrupts fully disabled. 463 * 464 * Register usage: 465 * %g5 mm->context (all tlb flushes) 466 * %g1 address arg 1 (tlb page and range flushes) 467 * %g7 address arg 2 (tlb range flush only) 468 * 469 * %g6 ivector table, don't touch 470 * %g2 scratch 1 471 * %g3 scratch 2 472 * %g4 scratch 3 473 * 474 * TODO: Make xcall TLB range flushes use the tricks above... -DaveM 475 */ 476 .align 32 477 .globl xcall_flush_tlb_page, xcall_flush_tlb_mm, xcall_flush_tlb_range 478xcall_flush_tlb_page: 479 mov PRIMARY_CONTEXT, %g2 480 ldxa [%g2] ASI_DMMU, %g3 481 stxa %g5, [%g2] ASI_DMMU 482 stxa %g0, [%g1] ASI_DMMU_DEMAP 483 stxa %g0, [%g1] ASI_IMMU_DEMAP 484 stxa %g3, [%g2] ASI_DMMU 485 retry 486 nop 487 488xcall_flush_tlb_mm: 489 mov PRIMARY_CONTEXT, %g2 490 mov 0x40, %g4 491 ldxa [%g2] ASI_DMMU, %g3 492 stxa %g5, [%g2] ASI_DMMU 493 stxa %g0, [%g4] ASI_DMMU_DEMAP 494 stxa %g0, [%g4] ASI_IMMU_DEMAP 495 stxa %g3, [%g2] ASI_DMMU 496 retry 497 498xcall_flush_tlb_range: 499 sethi %hi(PAGE_SIZE - 1), %g2 500 or %g2, %lo(PAGE_SIZE - 1), %g2 501 andn %g1, %g2, %g1 502 andn %g7, %g2, %g7 503 sub %g7, %g1, %g3 504 add %g2, 1, %g2 505 srlx %g3, PAGE_SHIFT, %g4 506 cmp %g4, 96 507 508 bgu,pn %icc, xcall_flush_tlb_mm 509 mov PRIMARY_CONTEXT, %g4 510 ldxa [%g4] ASI_DMMU, %g7 511 sub %g3, %g2, %g3 512 stxa %g5, [%g4] ASI_DMMU 513 nop 514 nop 515 nop 516 5171: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP 518 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP 519 membar #Sync 520 brnz,pt %g3, 1b 521 sub %g3, %g2, %g3 522 stxa %g7, [%g4] ASI_DMMU 523 retry 524 nop 525 nop 526 527 /* This runs in a very controlled environment, so we do 528 * not need to worry about BH races etc. 529 */ 530 .globl xcall_sync_tick 531xcall_sync_tick: 532 rdpr %pstate, %g2 533 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate 534 rdpr %pil, %g2 535 wrpr %g0, 15, %pil 536 sethi %hi(109f), %g7 537 b,pt %xcc, etrap_irq 538109: or %g7, %lo(109b), %g7 539 call smp_synchronize_tick_client 540 nop 541 clr %l6 542 b rtrap_xcall 543 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 544 545 /* NOTE: This is SPECIAL!! We do etrap/rtrap however 546 * we choose to deal with the "BH's run with 547 * %pil==15" problem (described in asm/pil.h) 548 * by just invoking rtrap directly past where 549 * BH's are checked for. 550 * 551 * We do it like this because we do not want %pil==15 552 * lockups to prevent regs being reported. 553 */ 554 .globl xcall_report_regs 555xcall_report_regs: 556 rdpr %pstate, %g2 557 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate 558 rdpr %pil, %g2 559 wrpr %g0, 15, %pil 560 sethi %hi(109f), %g7 561 b,pt %xcc, etrap_irq 562109: or %g7, %lo(109b), %g7 563 call __show_regs 564 add %sp, PTREGS_OFF, %o0 565 clr %l6 566 /* Has to be a non-v9 branch due to the large distance. */ 567 b rtrap_xcall 568 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 569 570 .align 32 571 .globl xcall_flush_dcache_page_cheetah 572xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */ 573 sethi %hi(PAGE_SIZE), %g3 5741: subcc %g3, (1 << 5), %g3 575 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE 576 membar #Sync 577 bne,pt %icc, 1b 578 nop 579 retry 580 nop 581 582 .globl xcall_flush_dcache_page_spitfire 583xcall_flush_dcache_page_spitfire: /* %g1 == physical page address 584 %g7 == kernel page virtual address 585 %g5 == (page->mapping != NULL) */ 586#if (L1DCACHE_SIZE > PAGE_SIZE) 587 srlx %g1, (13 - 2), %g1 ! Form tag comparitor 588 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K 589 sub %g3, (1 << 5), %g3 ! D$ linesize == 32 5901: ldxa [%g3] ASI_DCACHE_TAG, %g2 591 andcc %g2, 0x3, %g0 592 be,pn %xcc, 2f 593 andn %g2, 0x3, %g2 594 cmp %g2, %g1 595 596 bne,pt %xcc, 2f 597 nop 598 stxa %g0, [%g3] ASI_DCACHE_TAG 599 membar #Sync 6002: cmp %g3, 0 601 bne,pt %xcc, 1b 602 sub %g3, (1 << 5), %g3 603 604 brz,pn %g5, 2f 605#endif /* L1DCACHE_SIZE > PAGE_SIZE */ 606 sethi %hi(PAGE_SIZE), %g3 607 6081: flush %g7 609 subcc %g3, (1 << 5), %g3 610 bne,pt %icc, 1b 611 add %g7, (1 << 5), %g7 612 6132: retry 614 nop 615 nop 616 617 .globl xcall_promstop 618xcall_promstop: 619 rdpr %pstate, %g2 620 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate 621 rdpr %pil, %g2 622 wrpr %g0, 15, %pil 623 sethi %hi(109f), %g7 624 b,pt %xcc, etrap_irq 625109: or %g7, %lo(109b), %g7 626 flushw 627 call prom_stopself 628 nop 629 /* We should not return, just spin if we do... */ 6301: b,a,pt %xcc, 1b 631 nop 632 633 .data 634 635errata32_hwbug: 636 .xword 0 637 638 .text 639 640 /* These two are not performance critical... */ 641 .globl xcall_flush_tlb_all_spitfire 642xcall_flush_tlb_all_spitfire: 643 /* Spitfire Errata #32 workaround. */ 644 sethi %hi(errata32_hwbug), %g4 645 stx %g0, [%g4 + %lo(errata32_hwbug)] 646 647 clr %g2 648 clr %g3 6491: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4 650 and %g4, _PAGE_L, %g5 651 brnz,pn %g5, 2f 652 mov TLB_TAG_ACCESS, %g7 653 654 stxa %g0, [%g7] ASI_DMMU 655 membar #Sync 656 stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS 657 membar #Sync 658 659 /* Spitfire Errata #32 workaround. */ 660 sethi %hi(errata32_hwbug), %g4 661 stx %g0, [%g4 + %lo(errata32_hwbug)] 662 6632: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4 664 and %g4, _PAGE_L, %g5 665 brnz,pn %g5, 2f 666 mov TLB_TAG_ACCESS, %g7 667 668 stxa %g0, [%g7] ASI_IMMU 669 membar #Sync 670 stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS 671 membar #Sync 672 673 /* Spitfire Errata #32 workaround. */ 674 sethi %hi(errata32_hwbug), %g4 675 stx %g0, [%g4 + %lo(errata32_hwbug)] 676 6772: add %g2, 1, %g2 678 cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT 679 ble,pt %icc, 1b 680 sll %g2, 3, %g3 681 flush %g6 682 retry 683 684 .globl xcall_flush_tlb_all_cheetah 685xcall_flush_tlb_all_cheetah: 686 mov 0x80, %g2 687 stxa %g0, [%g2] ASI_DMMU_DEMAP 688 stxa %g0, [%g2] ASI_IMMU_DEMAP 689 retry 690 691 .globl xcall_flush_cache_all_spitfire 692xcall_flush_cache_all_spitfire: 693 sethi %hi(16383), %g2 694 or %g2, %lo(16383), %g2 695 clr %g3 6961: stxa %g0, [%g3] ASI_IC_TAG 697 membar #Sync 698 add %g3, 32, %g3 699 cmp %g3, %g2 700 bleu,pt %xcc, 1b 701 nop 702 flush %g6 703 retry 704 705 /* These just get rescheduled to PIL vectors. */ 706 .globl xcall_call_function 707xcall_call_function: 708 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint 709 retry 710 711 .globl xcall_receive_signal 712xcall_receive_signal: 713 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint 714 retry 715 716 .globl xcall_capture 717xcall_capture: 718 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint 719 retry 720 721#endif /* CONFIG_SMP */ 722