Searched refs:p_scr (Results 1 – 2 of 2) sorted by relevance
/glibc-2.36/sysdeps/ia64/ |
D | memset.S | 54 #define p_scr p6 // default register for same-cycle branches macro 92 cmp.eq p_scr, p0 = cnt, r0 101 (p_scr) br.ret.dpnt.many rp // return immediately if count = 0 107 cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task? 108 (p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U) 141 cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task? 144 (p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few 162 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value 165 (p_scr) add loopcnt = -1, linecnt // start of stores 211 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? [all …]
|
D | memcpy.S | 72 #define p_scr p6 macro 177 cmp.eq p_scr, p0 = in2, r0 // if (len == 0) 184 (p_scr) br.cond.dpnt.few .restore_and_exit // Branch no. 1: return dest 194 cmp.eq p_scr, p0 = tmp4, r0 // is destination aligned? 196 (p_scr) br.cond.dptk.many .dest_aligned 203 cmp.ne p_scr, p0 = 0, loopcnt // avoid loading beyond end-point 209 (p_scr) ld1 tmp2 = [src], 1 // 211 cmp.lt p_scr, p0 = 1, loopcnt // avoid load beyond end-point 222 cmp.ne p_scr, p0 = tmp4, r0 // is source also aligned 227 (p_scr) br.cond.dptk.many .src_not_aligned [all …]
|