/linux-3.4.99/arch/hexagon/mm/ |
D | copy_user_template.S | 32 p0 = cmp.gtu(bytes,#0) define 33 if (!p0.new) jump:nt .Ldone 39 p0 = bitsclr(r3,#7) define 40 if (!p0.new) jump:nt .Loop_not_aligned_8 65 p0 = bitsclr(r4,#7) define 66 if (p0.new) jump:nt .Lalign 69 p0 = bitsclr(r3,#3) define 70 if (!p0.new) jump:nt .Loop_not_aligned_4 95 p0 = bitsclr(r3,#1) define 96 if (!p0.new) jump:nt .Loop_not_aligned [all …]
|
/linux-3.4.99/arch/hexagon/lib/ |
D | memset.S | 42 p0 = cmp.eq(r2, #0) define 49 if p0 jumpr r31 /* count == 0, so return */ 54 p0 = tstbit(r9, #0) define 71 p0 = tstbit(r9, #1) define 73 if !p0 jump 3f /* skip initial byte store */ 84 p0 = tstbit(r9, #2) define 86 if !p0 jump 4f /* skip initial half store */ 97 p0 = cmp.gtu(r2, #7) define 99 if !p0 jump 5f /* skip initial word store */ 104 p0 = cmp.gtu(r2, #11) define [all …]
|
/linux-3.4.99/arch/blackfin/mach-bf561/ |
D | secondary.S | 64 trace_buffer_init(p0,r0); 87 GET_PDA(p0, r0); 88 r0 = [p0 + PDA_DF_RETX]; 89 r1 = [p0 + PDA_DF_DCPLB]; 90 r2 = [p0 + PDA_DF_ICPLB]; 91 r3 = [p0 + PDA_DF_SEQSTAT]; 120 p0.l = .LWAIT_HERE; 121 p0.h = .LWAIT_HERE; 122 reti = p0; 152 p0.h = hi(COREB_L1_CODE_START); [all …]
|
D | atomic.S | 44 coreslot_loadaddr p0; 46 safe_testset p0, r2; 51 p0 = r1; define 54 flushinv[p0]; 68 coreslot_loadaddr p0; 70 safe_testset p0, r2; 94 coreslot_loadaddr p0; 96 [p0] = r1; 109 [--sp] = p0; 135 p0 = [sp++]; define [all …]
|
/linux-3.4.99/drivers/staging/omapdrm/ |
D | tcm.h | 54 struct tcm_pt p0; member 224 slice->p0.y != slice->p1.y && in tcm_slice() 225 (slice->p0.x || (slice->p1.x != slice->tcm->width - 1))) { in tcm_slice() 228 slice->p1.y = (slice->p0.x) ? slice->p0.y : slice->p1.y - 1; in tcm_slice() 230 parent->p0.x = 0; in tcm_slice() 231 parent->p0.y = slice->p1.y + 1; in tcm_slice() 245 area->p0.y <= area->p1.y && in tcm_area_is_valid() 248 area->p0.x < area->tcm->width && in tcm_area_is_valid() 249 area->p0.x + area->p0.y * area->tcm->width <= in tcm_area_is_valid() 253 area->p0.x <= area->p1.x)); in tcm_area_is_valid() [all …]
|
D | tcm-sita.c | 243 WARN_ON(pvt->map[area->p0.x][area->p0.y] != area || in sita_free() 282 start_x = field->p0.x; in scan_r2l_t2b() 284 start_y = field->p0.y; in scan_r2l_t2b() 288 if (field->p0.x < field->p1.x || in scan_r2l_t2b() 289 field->p1.y < field->p0.y) in scan_r2l_t2b() 320 x = ALIGN(map[x][y]->p0.x - w + 1, align); in scan_r2l_t2b() 332 assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y); in scan_r2l_t2b() 356 start_x = field->p0.x; in scan_l2r_t2b() 358 start_y = field->p0.y; in scan_l2r_t2b() 362 if (field->p1.x < field->p0.x || in scan_l2r_t2b() [all …]
|
/linux-3.4.99/arch/blackfin/kernel/ |
D | ftrace-entry.S | 99 p0.l = _ftrace_trace_function; 100 p0.h = _ftrace_trace_function; 101 r3 = [p0]; 113 p0.l = _ftrace_graph_return; 114 p0.h = _ftrace_graph_return; 115 r3 = [p0]; 122 p0.l = _ftrace_graph_entry; 123 p0.h = _ftrace_graph_entry; 126 r3 = [p0]; 142 p0 = r3; define [all …]
|
D | entry.S | 29 p0.l = ___ipipe_root_status; 30 p0.h = ___ipipe_root_status; 31 r4 = [p0]; 33 [p0] = r4; 41 p0.l = _bfin_irq_flags; 42 p0.h = _bfin_irq_flags; 43 r4 = [p0]; 64 jump (p0);
|
/linux-3.4.99/arch/blackfin/include/asm/ |
D | entry.h | 71 p0.l = lo(IPEND); \ 72 p0.h = hi(IPEND); \ 73 r1 = [p0]; \ 87 p0.l = lo(IPEND); \ 88 p0.h = hi(IPEND); \ 89 r1 = [p0]; \ 114 ANOMALY_283_315_WORKAROUND(p0, r0) \ 124 p0.l = lo(IPEND); \ 125 p0.h = hi(IPEND); \ 126 r1 = [p0]; \ [all …]
|
/linux-3.4.99/arch/blackfin/mach-common/ |
D | head.S | 73 trace_buffer_init(p0,r0); 96 GET_PDA(p0, r0); 97 r0 = [p0 + PDA_DF_RETX]; 98 r1 = [p0 + PDA_DF_DCPLB]; 99 r2 = [p0 + PDA_DF_ICPLB]; 100 r3 = [p0 + PDA_DF_SEQSTAT]; 189 p0.l = _early_trap; 190 p0.h = _early_trap; 192 p0.l = .LWAIT_HERE; 193 p0.h = .LWAIT_HERE; [all …]
|
D | cache.S | 83 p0.L = LO(DSPID); 84 p0.H = HI(DSPID); 85 r3 = [p0]; 88 p0.L = _blackfin_iflush_l1_entry; 89 p0.H = _blackfin_iflush_l1_entry; 90 p0 = p0 + (p2 << 2); define 91 p1 = [p0];
|
D | entry.S | 542 p0 = sp; define 546 [p0++] = r4; 551 p0 = sp; define 553 [sp + 12] = p0; 570 p0 = fp; define 571 r4 = [p0--]; 574 r4 = [p0--]; 618 pseudo_long_call ___ipipe_syscall_root, p0; 626 p0 = [sp + PT_ORIG_P0]; define 644 cc = p4 <= p0; [all …]
|
/linux-3.4.99/arch/unicore32/mm/ |
D | tlb-ucv2.S | 34 movc p0.c6, r0, #3 40 movc p0.c6, r0, #5 47 movc p0.c6, r0, #2 53 movc p0.c6, r0, #4 72 movc p0.c6, r0, #3 75 movc p0.c6, r0, #5 82 movc p0.c6, r0, #2 85 movc p0.c6, r0, #4
|
D | proc-ucv2.S | 40 movc p0.c5, ip, #28 @ Cache invalidate all 43 movc p0.c6, ip, #6 @ TLB invalidate all 46 movc ip, p0.c1, #0 @ ctrl register 49 movc p0.c1, ip, #0 @ disable caches and mmu 78 3: movc p0.c5, r10, #11 @ clean D entry 87 movc p0.c5, ip, #10 @ Dcache clean all 104 movc p0.c2, r0, #0 @ update page table ptr 107 movc p0.c6, ip, #6 @ TLB invalidate all 125 movc p0.c5, r2, #11 @ Dcache clean line 129 movc p0.c5, ip, #10 @ Dcache clean all
|
D | cache-ucv2.S | 34 movc p0.c5, r0, #14 @ Dcache flush all 38 movc p0.c5, r0, #20 @ Icache invalidate all 73 movc p0.c5, ip, #14 @ Dcache flush all 77 movc p0.c5, ip, #20 @ Icache invalidate all 113 103: movc p0.c5, r10, #11 @ Dcache clean line of R10 123 movc p0.c5, ip, #10 @ Dcache clean all 127 movc p0.c5, ip, #20 @ Icache invalidate all 140 movc p0.c5, ip, #14 @ Dcache flush all 167 1: movc p0.c5, r10, #11 @ Dcache clean line of R10 176 movc p0.c5, ip, #10 @ Dcache clean all [all …]
|
/linux-3.4.99/scripts/coccinelle/misc/ |
D | doubleinit.cocci | 18 position p0,p; 22 struct I s =@p0 { ..., .fld@p = E, ...}; 26 position r.p0,p; 30 struct I s =@p0 { ..., .fld@p = E, ...}; 33 p0 << r.p0; 40 cocci.print_main(fld,p0) 45 p0 << r.p0; 53 coccilib.report.print_report(p0[0],msg)
|
/linux-3.4.99/arch/ia64/kernel/ |
D | fsys.S | 88 cmp.ne p8,p0=0,r9 110 cmp.ne p8,p0=0,r9 126 cmp.ne p6,p0=r18,r19 // did real_parent change? 164 cmp.ne p8,p0=0,r9 192 tnat.nz p6,p0 = r33 // guard against NaT argument 241 tnat.nz p6,p0 = r31 // guard against Nat argument 256 cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled 276 (p8) cmp.ne p13,p0 = r2,r0 // need itc_jitter compensation, set p13 279 (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control 298 (p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful [all …]
|
D | ivt.S | 127 cmp.ne p8,p0=r18,r26 267 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 270 ITC_I(p0, r18, r19) 282 cmp.ne p7,p0=r18,r19 311 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 314 ITC_D(p0, r18, r19) 326 cmp.ne p7,p0=r18,r19 341 MOV_FROM_IPSR(p0, r21) 348 cmp.gt p8,p0=6,r22 // user mode 361 cmp.ne p8,p0=r0,r23 // psr.cpl != 0? [all …]
|
/linux-3.4.99/drivers/scsi/qla4xxx/ |
D | ql4_dbg.c | 107 offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf), in qla4xxx_dump_registers() 108 readw(&ha->reg->u2.isp4022.p0.ext_hw_conf)); in qla4xxx_dump_registers() 110 offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl), in qla4xxx_dump_registers() 111 readw(&ha->reg->u2.isp4022.p0.port_ctrl)); in qla4xxx_dump_registers() 113 offsetof(struct isp_reg, u2.isp4022.p0.port_status), in qla4xxx_dump_registers() 114 readw(&ha->reg->u2.isp4022.p0.port_status)); in qla4xxx_dump_registers() 116 (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out), in qla4xxx_dump_registers() 117 readw(&ha->reg->u2.isp4022.p0.gp_out)); in qla4xxx_dump_registers() 119 (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in), in qla4xxx_dump_registers() 120 readw(&ha->reg->u2.isp4022.p0.gp_in)); in qla4xxx_dump_registers() [all …]
|
/linux-3.4.99/arch/ia64/lib/ |
D | memset.S | 67 cmp.eq p_scr, p0 = cnt, r0 79 cmp.ne p_unalgn, p0 = tmp, r0 // 82 cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task? 116 cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task? 135 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value 184 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? 192 cmp.le p_scr, p0 = 8, cnt // just a few bytes left ? 205 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value 238 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? 246 cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? [all …]
|
D | clear_user.S | 58 cmp.eq p6,p0=r0,len // check for zero length 67 cmp.lt p6,p0=16,len // if len > 16 then long memset 102 tbit.nz p6,p0=buf,0 // odd alignment (for long_do_clear) 106 tbit.nz p6,p0=buf,1 110 tbit.nz p6,p0=buf,2 114 tbit.nz p6,p0=buf,3 120 cmp.eq p6,p0=r0,cnt 165 tbit.nz p6,p0=len,3
|
D | strlen.S | 102 cmp.eq p6,p0=r0,r0 // sets p6 to true for cmp.and 117 cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8 118 cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8 135 cmp.eq.and p7,p0=8,val1// val1==8? 136 tnat.nz.and p7,p0=val2 // test NaT if val2 172 cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop 182 cmp.eq p6,p0=8,val1 // val1==8 ?
|
D | strlen_user.S | 104 cmp.eq p6,p0=r0,r0 // sets p6 (required because of // cmp.and) 120 cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8 121 cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8 138 cmp.eq.and p7,p0=8,val1// val1==8? 139 tnat.nz.and p7,p0=val2 // test NaT if val2 169 cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop 179 cmp.eq p6,p0=8,val1 // val1==8 ?
|
D | do_csum.S | 133 cmp.lt p0,p6=r0,len // check for zero length or negative (32bit len) 141 tbit.nz p15,p0=buf,0 // is buf an odd address? 186 cmp.ltu p6,p0=result1[0],word1[0] // check the carry 187 cmp.eq.or.andcm p8,p0=0,count // exit if zero 8-byte 200 cmp.ltu p6,p0=result1[0],word1[1] 226 (ELD_1) cmp.ltu pC1[0],p0=result1[LOAD_LATENCY],word1[LOAD_LATENCY+1] 228 (ELD_1) cmp.ltu pC2[0],p0=result2[LOAD_LATENCY],word2[LOAD_LATENCY+1] 244 cmp.ltu p6,p0=result1[LOAD_LATENCY+1],carry1 245 cmp.ltu p7,p0=result2[LOAD_LATENCY+1],carry2 252 cmp.ltu p6,p0=result1[0],result2[LOAD_LATENCY+1]
|
/linux-3.4.99/arch/unicore32/kernel/ |
D | sleep.S | 24 movc r3, p0.c7, #0 @ PID 25 movc r4, p0.c2, #0 @ translation table base addr 26 movc r5, p0.c1, #0 @ control reg 70 movc p0.c5, r1, #14 171 movc p0.c6, r1, #6 @ invalidate I & D TLBs 172 movc p0.c5, r1, #28 @ invalidate I & D caches, BTB 174 movc p0.c7, r3, #0 @ PID 175 movc p0.c2, r4, #0 @ translation table base addr 176 movc p0.c1, r5, #0 @ control reg, turn on mmu
|