/linux-2.6.39/arch/sh/kernel/ |
D | head_64.S | 189 movi MMUIR_FIRST, r21 192 putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */ 193 addi r21, MMUIR_STEP, r21 194 bne r21, r22, tr1 198 movi MMUDR_FIRST, r21 201 putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */ 202 addi r21, MMUDR_STEP, r21 203 bne r21, r22, tr1 206 movi MMUIR_FIRST, r21 209 putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */ [all …]
|
/linux-2.6.39/arch/sh/boot/compressed/ |
D | head_64.S | 65 movi ITLB_FIXED, r21 67 1: putcfg r21, 0, r63 /* Clear MMUIR[n].PTEH.V */ 68 addi r21, TLB_STEP, r21 69 bne r21, r22, tr1 73 movi DTLB_FIXED, r21 75 1: putcfg r21, 0, r63 /* Clear MMUDR[n].PTEH.V */ 76 addi r21, TLB_STEP, r21 77 bne r21, r22, tr1 80 movi ITLB_FIXED, r21 82 putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */ [all …]
|
/linux-2.6.39/arch/sh/lib64/ |
D | sdivsi3.S | 18 shari r25, 58, r21 /* extract 5(6) bit index (s2.4 with hole -1..1) */ 20 ldx.ub r20, r21, r19 /* u0.8 */ 22 shlli r21, 1, r21 24 ldx.w r20, r21, r21 /* s2.14 */ 27 sub r21, r19, r19 /* some 11 bit inverse in s1.14 */ 28 muls.l r19, r19, r21 /* u0.28 */ 31 muls.l r25, r21, r18 /* s2.58 */ 45 xor r21, r0, r21 /* You could also use the constant 1 << 27. */ 46 add r21, r25, r21 47 sub r21, r19, r21 [all …]
|
D | udivdi3.S | 9 movi 0xffffffffffffbaf1,r21 /* .l shift count 17. */ 10 sub r21,r5,r1 35 mshalds.l r1,r21,r1 42 shlri r2,22,r21 43 mulu.l r21,r1,r21 46 shlrd r21,r0,r21 47 mulu.l r21,r3,r5 48 add r8,r21,r8 49 mcmpgt.l r21,r63,r21 // See Note 1 51 mshfhi.l r63,r21,r21 [all …]
|
D | udivsi3.S | 16 sub r20,r25,r21 17 mmulfx.w r21,r21,r19 18 mshflo.w r21,r63,r21 23 msub.w r21,r19,r19 30 addi r19,-2,r21 31 mulu.l r4,r21,r18 33 shlli r21,15,r21 36 mmacnfx.wl r25,r19,r21 40 mulu.l r25,r21,r19 49 mulu.l r25,r21,r19
|
D | strcpy.S | 35 sub r3, r2, r21 36 addi r21, 8, r20 37 ldx.q r0, r21, r5 87 ldx.q r0, r21, r5
|
/linux-2.6.39/arch/ia64/lib/ |
D | flush.S | 29 mov r21=1 36 shl r21=r21,r20 // r21: stride size of the i-cache(s) 52 add r24=r21,r24 // we flush "stride size" bytes per iteration 81 mov r21=1 89 shl r21=r21,r20 // r21: stride size of the i-cache(s) 107 add r24=r21,r24 // we flush "stride size" bytes per iteration
|
D | ip_fast_csum.S | 43 (p7) ld4 r21=[r15],8 50 add r20=r20,r21 98 ld4 r21=[in1],4 108 add r16=r20,r21
|
D | memcpy_mck.S | 41 #define src_pre_l2 r21 172 and r21=-8,tmp 178 add src0=src0,r21 // setting up src pointer 179 add dst0=dst0,r21 // setting up dest pointer 294 shr.u r21=in2,7 // this much cache line 299 cmp.lt p7,p8=1,r21 300 add cnt=-1,r21 362 (p6) or r21=r28,r27 392 EX(.ex_handler, (p6) st8 [dst1]=r21,8) // more than 8 byte to copy 512 shrp r21=r22,r38,shift; /* speculative work */ \ [all …]
|
/linux-2.6.39/arch/parisc/hpux/ |
D | gate.S | 32 ldw -52(%r30), %r21 ;! 5th argument 66 STREG %r21, TASK_PT_GR21(%r1) /* 5th argument */ 86 stw %r21, -52(%r30) ;! 5th argument 91 ldil L%hpux_call_table, %r21 92 ldo R%hpux_call_table(%r21), %r21 95 LDREGX %r22(%r21), %r21 97 be 0(%sr7,%r21)
|
/linux-2.6.39/arch/tile/kernel/ |
D | intvec_32.S | 441 push_reg r21, r52 467 moveli r21, lo16(__per_cpu_offset) 470 auli r21, r21, ha16(__per_cpu_offset) 473 s2a r20, r20, r21 489 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS) 501 sw r21, r32 561 IRQ_DISABLE(r20, r21) 844 IRQ_DISABLE(r20, r21) 901 IRQ_DISABLE(r20,r21) 910 IRQ_ENABLE(r20, r21) [all …]
|
/linux-2.6.39/arch/parisc/kernel/ |
D | pacache.S | 81 LDREG ITLB_SID_STRIDE(%r1), %r21 94 add %r21, %r20, %r20 /* increment space */ 117 add %r21, %r20, %r20 /* increment space */ 124 LDREG DTLB_SID_STRIDE(%r1), %r21 137 add %r21, %r20, %r20 /* increment space */ 160 add %r21, %r20, %r20 /* increment space */ 301 ldd 16(%r25), %r21 308 std %r21, 16(%r26) 311 ldd 48(%r25), %r21 318 std %r21, 48(%r26) [all …]
|
D | syscall.S | 109 depdi 0, 31, 32, %r21 151 STREG %r21, TASK_PT_GR21(%r1) 178 stw %r21, -56(%r30) /* 6th argument */ 313 LDREG TASK_PT_GR21(%r1), %r21 452 LDREGX %r20(%sr2,r28), %r21 /* Scratch use of r21 */ 455 be,n 0(%sr2,%r21) 458 ldo -ENOSYS(%r0),%r21 /* set errno */ 551 mfctl %cr27, %r21 /* Get current thread register */ 552 cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */ 554 ldo -EDEADLOCK(%r0), %r21 [all …]
|
/linux-2.6.39/arch/ia64/kvm/ |
D | optvfault.S | 28 add r16=VMM_VPD_BASE_OFFSET,r21; \ 51 adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21 124 add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 125 add r16=VMM_VCPU_LAST_ITC_OFFSET,r21 151 add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 154 add r16=VMM_VCPU_LAST_ITC_OFFSET,r21 187 add r27=VMM_VCPU_VRR0_OFFSET,r21 218 add r27=VMM_VCPU_VRR0_OFFSET,r21 264 adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21 265 (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 [all …]
|
D | vmm_ivt.S | 371 .mem.offset 8,0; st8.spill [r3]=r21,16 430 (p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 606 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 609 adds r17 = VMM_VCPU_GP_OFFSET, r21 628 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 633 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 634 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 642 adds r18=VMM_VPD_BASE_OFFSET,r21 885 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 886 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 [all …]
|
D | kvm_minstate.h | 56 add r25 = VMM_VPD_BASE_OFFSET, r21; \ 69 #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21 164 mov r13 = r21; /* establish `current' */ \ 215 .mem.offset 8,0; st8.spill [r3] = r21,16; \
|
/linux-2.6.39/arch/ia64/kernel/ |
D | ivt.S | 121 shl r21=r16,3 // shift bit 60 into sign bit 124 shr.u r22=r21,3 144 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 145 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 149 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 175 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) 177 (p7) ld8 r18=[r21] // read *pte 224 ld8 r25=[r21] // read *pte again 342 MOV_FROM_IPSR(p0, r21) 357 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl [all …]
|
D | entry.S | 185 adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 199 ld8 sp=[r21] // load kernel stack pointer of new task 294 mov r21=b0 308 st8 [r14]=r21,SW(B1)-SW(B0) // save b0 315 mov r21=ar.lc // I-unit 325 st8 [r15]=r21 // save ar.lc 353 mov r21=pr 356 st8 [r3]=r21 // save predicate registers 385 ld8 r21=[r2],16 // restore b0 420 mov b0=r21 [all …]
|
D | fsys.S | 249 add r21 = IA64_CLKSRC_MMIO_OFFSET,r20 266 ld8 r30 = [r21] // clocksource->mmio_ptr 329 mov r21 = r8 348 (p14) shr.u r21 = r2, 4 351 EX(.fail_efault, st8 [r23] = r21) 614 mov r21=ar.fpsr 733 ld8 r21=[r17] // cumulated utime 740 add r21=r21,r18 // sum utime 743 st8 [r17]=r21 // update utime
|
D | mca_asm.S | 69 ld4 r21=[r17],4 // r21=ptce_stride[0] 89 add r18=r21,r18 876 movl r21=PAGE_KERNEL // page properties 880 or r21=r20,r21 // construct PA | page properties 887 itr.d dtr[r20]=r21 1050 movl r21=PAGE_KERNEL // page properties 1053 or r21=r20,r21 // construct PA | page properties 1063 itr.d dtr[r20]=r21
|
/linux-2.6.39/arch/tile/lib/ |
D | atomic_asm_32.S | 137 tns r21, ATOMIC_LOCK_REG_NAME 141 bzt r21, 1b /* branch if lock acquired */ 155 tns r21, ATOMIC_LOCK_REG_NAME 159 bzt r21, 1b /* branch if lock acquired */
|
/linux-2.6.39/arch/powerpc/kvm/ |
D | booke_interrupts.S | 144 stw r21, VCPU_GPR(r21)(r4) 222 lwz r21, VCPU_GPR(r21)(r4) 252 stw r21, VCPU_GPR(r21)(r4) 272 lwz r21, HOST_NV_GPR(r21)(r1) 313 stw r21, HOST_NV_GPR(r21)(r1) 333 lwz r21, VCPU_GPR(r21)(r4)
|
D | book3s_interrupts.S | 66 PPC_LL r21, VCPU_GPR(r21)(vcpu); \ 202 PPC_STL r21, VCPU_GPR(r21)(r7)
|
/linux-2.6.39/arch/parisc/include/asm/ |
D | asmregs.h | 39 arg5: .reg r21 67 r21: .reg %r21
|
/linux-2.6.39/arch/powerpc/lib/ |
D | copyuser_64.S | 449 std r21,-112(1) 457 21: ld r21,512(4) 471 33: std r21,520(3) 489 51: ld r21,528(4) 508 69: std r21,520(3) 537 ld r21,-112(1) 556 ld r21,-112(1)
|