1/* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy. 2 * 3 * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz) 5 */ 6 7#ifdef __KERNEL__ 8#include <asm/visasm.h> 9#include <asm/asi.h> 10#else 11#define ASI_BLK_P 0xf0 12#define FPRS_FEF 0x04 13#ifdef MEMCPY_DEBUG 14#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \ 15 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0; 16#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs 17#else 18#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs 19#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs 20#endif 21#endif 22 23#ifndef EX_LD 24#define EX_LD(x) x 25#endif 26 27#ifndef EX_ST 28#define EX_ST(x) x 29#endif 30 31#ifndef EX_RETVAL 32#define EX_RETVAL(x) x 33#endif 34 35#ifndef LOAD 36#define LOAD(type,addr,dest) type [addr], dest 37#endif 38 39#ifndef LOAD_BLK 40#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest 41#endif 42 43#ifndef STORE 44#define STORE(type,src,addr) type src, [addr] 45#endif 46 47#ifndef STORE_BLK 48#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P 49#endif 50 51#ifndef FUNC_NAME 52#define FUNC_NAME memcpy 53#endif 54 55#ifndef PREAMBLE 56#define PREAMBLE 57#endif 58 59#ifndef XCC 60#define XCC xcc 61#endif 62 63#define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \ 64 faligndata %f1, %f2, %f48; \ 65 faligndata %f2, %f3, %f50; \ 66 faligndata %f3, %f4, %f52; \ 67 faligndata %f4, %f5, %f54; \ 68 faligndata %f5, %f6, %f56; \ 69 faligndata %f6, %f7, %f58; \ 70 faligndata %f7, %f8, %f60; \ 71 faligndata %f8, %f9, %f62; 72 73#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \ 74 EX_LD(LOAD_BLK(%src, %fdest)); \ 75 EX_ST(STORE_BLK(%fsrc, %dest)); \ 76 add %src, 0x40, %src; \ 77 subcc %len, 0x40, %len; \ 78 be,pn %xcc, jmptgt; \ 79 add %dest, 0x40, %dest; \ 80 81#define LOOP_CHUNK1(src, dest, len, branch_dest) \ 82 MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest) 83#define LOOP_CHUNK2(src, dest, len, branch_dest) \ 84 MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest) 85#define LOOP_CHUNK3(src, dest, len, branch_dest) \ 86 MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest) 87 88#define STORE_SYNC(dest, fsrc) \ 89 EX_ST(STORE_BLK(%fsrc, %dest)); \ 90 add %dest, 0x40, %dest; 91 92#define STORE_JUMP(dest, fsrc, target) \ 93 EX_ST(STORE_BLK(%fsrc, %dest)); \ 94 add %dest, 0x40, %dest; \ 95 ba,pt %xcc, target; 96 97#define FINISH_VISCHUNK(dest, f0, f1, left) \ 98 subcc %left, 8, %left;\ 99 bl,pn %xcc, 95f; \ 100 faligndata %f0, %f1, %f48; \ 101 EX_ST(STORE(std, %f48, %dest)); \ 102 add %dest, 8, %dest; 103 104#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ 105 subcc %left, 8, %left; \ 106 bl,pn %xcc, 95f; \ 107 fsrc1 %f0, %f1; 108 109#define UNEVEN_VISCHUNK(dest, f0, f1, left) \ 110 UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ 111 ba,a,pt %xcc, 93f; 112 113 .register %g2,#scratch 114 .register %g3,#scratch 115 116 .text 117 .align 64 118 119 .globl FUNC_NAME 120FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 121 PREAMBLE 122 mov %o0, %g5 123 cmp %o2, 0 124 be,pn %XCC, 85f 125 or %o0, %o1, %o3 126 cmp %o2, 16 127 blu,a,pn %XCC, 80f 128 or %o3, %o2, %o3 129 130 cmp %o2, (5 * 64) 131 blu,pt %XCC, 70f 132 andcc %o3, 0x7, %g0 133 134 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. */ 135 VISEntry 136 137 /* Is 'dst' already aligned on an 64-byte boundary? */ 138 andcc %o0, 0x3f, %g2 139 be,pt %XCC, 2f 140 141 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number 142 * of bytes to copy to make 'dst' 64-byte aligned. We pre- 143 * subtract this from 'len'. 144 */ 145 sub %o0, %o1, %o4 146 sub %g2, 0x40, %g2 147 sub %g0, %g2, %g2 148 sub %o2, %g2, %o2 149 andcc %g2, 0x7, %g1 150 be,pt %icc, 2f 151 and %g2, 0x38, %g2 152 1531: subcc %g1, 0x1, %g1 154 EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) 155 EX_ST(STORE(stb, %o3, %o1 + %o4)) 156 bgu,pt %XCC, 1b 157 add %o1, 0x1, %o1 158 159 add %o1, %o4, %o0 160 1612: cmp %g2, 0x0 162 and %o1, 0x7, %g1 163 be,pt %icc, 3f 164 alignaddr %o1, %g0, %o1 165 166 EX_LD(LOAD(ldd, %o1, %f4)) 1671: EX_LD(LOAD(ldd, %o1 + 0x8, %f6)) 168 add %o1, 0x8, %o1 169 subcc %g2, 0x8, %g2 170 faligndata %f4, %f6, %f0 171 EX_ST(STORE(std, %f0, %o0)) 172 be,pn %icc, 3f 173 add %o0, 0x8, %o0 174 175 EX_LD(LOAD(ldd, %o1 + 0x8, %f4)) 176 add %o1, 0x8, %o1 177 subcc %g2, 0x8, %g2 178 faligndata %f6, %f4, %f0 179 EX_ST(STORE(std, %f0, %o0)) 180 bne,pt %icc, 1b 181 add %o0, 0x8, %o0 182 183 /* Destination is 64-byte aligned. */ 1843: 185 membar #LoadStore | #StoreStore | #StoreLoad 186 187 subcc %o2, 0x40, %o4 188 add %o1, %g1, %g1 189 andncc %o4, (0x40 - 1), %o4 190 srl %g1, 3, %g2 191 sub %o2, %o4, %g3 192 andn %o1, (0x40 - 1), %o1 193 and %g2, 7, %g2 194 andncc %g3, 0x7, %g3 195 fmovd %f0, %f2 196 sub %g3, 0x8, %g3 197 sub %o2, %o4, %o2 198 199 add %g1, %o4, %g1 200 subcc %o2, %g3, %o2 201 202 EX_LD(LOAD_BLK(%o1, %f0)) 203 add %o1, 0x40, %o1 204 add %g1, %g3, %g1 205 EX_LD(LOAD_BLK(%o1, %f16)) 206 add %o1, 0x40, %o1 207 sub %o4, 0x80, %o4 208 EX_LD(LOAD_BLK(%o1, %f32)) 209 add %o1, 0x40, %o1 210 211 /* There are 8 instances of the unrolled loop, 212 * one for each possible alignment of the 213 * source buffer. Each loop instance is 452 214 * bytes. 215 */ 216 sll %g2, 3, %o3 217 sub %o3, %g2, %o3 218 sllx %o3, 4, %o3 219 add %o3, %g2, %o3 220 sllx %o3, 2, %g2 2211: rd %pc, %o3 222 add %o3, %lo(1f - 1b), %o3 223 jmpl %o3 + %g2, %g0 224 nop 225 226 .align 64 2271: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) 228 LOOP_CHUNK1(o1, o0, o4, 1f) 229 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) 230 LOOP_CHUNK2(o1, o0, o4, 2f) 231 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) 232 LOOP_CHUNK3(o1, o0, o4, 3f) 233 ba,pt %xcc, 1b+4 234 faligndata %f0, %f2, %f48 2351: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) 236 STORE_SYNC(o0, f48) membar #Sync 237 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) 238 STORE_JUMP(o0, f48, 40f) membar #Sync 2392: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) 240 STORE_SYNC(o0, f48) membar #Sync 241 FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) 242 STORE_JUMP(o0, f48, 48f) membar #Sync 2433: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) 244 STORE_SYNC(o0, f48) membar #Sync 245 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) 246 STORE_JUMP(o0, f48, 56f) membar #Sync 247 2481: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) 249 LOOP_CHUNK1(o1, o0, o4, 1f) 250 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) 251 LOOP_CHUNK2(o1, o0, o4, 2f) 252 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) 253 LOOP_CHUNK3(o1, o0, o4, 3f) 254 ba,pt %xcc, 1b+4 255 faligndata %f2, %f4, %f48 2561: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) 257 STORE_SYNC(o0, f48) membar #Sync 258 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) 259 STORE_JUMP(o0, f48, 41f) membar #Sync 2602: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) 261 STORE_SYNC(o0, f48) membar #Sync 262 FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) 263 STORE_JUMP(o0, f48, 49f) membar #Sync 2643: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) 265 STORE_SYNC(o0, f48) membar #Sync 266 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) 267 STORE_JUMP(o0, f48, 57f) membar #Sync 268 2691: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) 270 LOOP_CHUNK1(o1, o0, o4, 1f) 271 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) 272 LOOP_CHUNK2(o1, o0, o4, 2f) 273 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) 274 LOOP_CHUNK3(o1, o0, o4, 3f) 275 ba,pt %xcc, 1b+4 276 faligndata %f4, %f6, %f48 2771: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) 278 STORE_SYNC(o0, f48) membar #Sync 279 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) 280 STORE_JUMP(o0, f48, 42f) membar #Sync 2812: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) 282 STORE_SYNC(o0, f48) membar #Sync 283 FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) 284 STORE_JUMP(o0, f48, 50f) membar #Sync 2853: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) 286 STORE_SYNC(o0, f48) membar #Sync 287 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) 288 STORE_JUMP(o0, f48, 58f) membar #Sync 289 2901: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) 291 LOOP_CHUNK1(o1, o0, o4, 1f) 292 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) 293 LOOP_CHUNK2(o1, o0, o4, 2f) 294 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) 295 LOOP_CHUNK3(o1, o0, o4, 3f) 296 ba,pt %xcc, 1b+4 297 faligndata %f6, %f8, %f48 2981: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) 299 STORE_SYNC(o0, f48) membar #Sync 300 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) 301 STORE_JUMP(o0, f48, 43f) membar #Sync 3022: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) 303 STORE_SYNC(o0, f48) membar #Sync 304 FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) 305 STORE_JUMP(o0, f48, 51f) membar #Sync 3063: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) 307 STORE_SYNC(o0, f48) membar #Sync 308 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) 309 STORE_JUMP(o0, f48, 59f) membar #Sync 310 3111: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) 312 LOOP_CHUNK1(o1, o0, o4, 1f) 313 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) 314 LOOP_CHUNK2(o1, o0, o4, 2f) 315 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) 316 LOOP_CHUNK3(o1, o0, o4, 3f) 317 ba,pt %xcc, 1b+4 318 faligndata %f8, %f10, %f48 3191: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) 320 STORE_SYNC(o0, f48) membar #Sync 321 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) 322 STORE_JUMP(o0, f48, 44f) membar #Sync 3232: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) 324 STORE_SYNC(o0, f48) membar #Sync 325 FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) 326 STORE_JUMP(o0, f48, 52f) membar #Sync 3273: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) 328 STORE_SYNC(o0, f48) membar #Sync 329 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) 330 STORE_JUMP(o0, f48, 60f) membar #Sync 331 3321: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) 333 LOOP_CHUNK1(o1, o0, o4, 1f) 334 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) 335 LOOP_CHUNK2(o1, o0, o4, 2f) 336 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) 337 LOOP_CHUNK3(o1, o0, o4, 3f) 338 ba,pt %xcc, 1b+4 339 faligndata %f10, %f12, %f48 3401: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) 341 STORE_SYNC(o0, f48) membar #Sync 342 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) 343 STORE_JUMP(o0, f48, 45f) membar #Sync 3442: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) 345 STORE_SYNC(o0, f48) membar #Sync 346 FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) 347 STORE_JUMP(o0, f48, 53f) membar #Sync 3483: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) 349 STORE_SYNC(o0, f48) membar #Sync 350 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) 351 STORE_JUMP(o0, f48, 61f) membar #Sync 352 3531: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) 354 LOOP_CHUNK1(o1, o0, o4, 1f) 355 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) 356 LOOP_CHUNK2(o1, o0, o4, 2f) 357 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) 358 LOOP_CHUNK3(o1, o0, o4, 3f) 359 ba,pt %xcc, 1b+4 360 faligndata %f12, %f14, %f48 3611: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) 362 STORE_SYNC(o0, f48) membar #Sync 363 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) 364 STORE_JUMP(o0, f48, 46f) membar #Sync 3652: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) 366 STORE_SYNC(o0, f48) membar #Sync 367 FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) 368 STORE_JUMP(o0, f48, 54f) membar #Sync 3693: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) 370 STORE_SYNC(o0, f48) membar #Sync 371 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) 372 STORE_JUMP(o0, f48, 62f) membar #Sync 373 3741: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) 375 LOOP_CHUNK1(o1, o0, o4, 1f) 376 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) 377 LOOP_CHUNK2(o1, o0, o4, 2f) 378 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) 379 LOOP_CHUNK3(o1, o0, o4, 3f) 380 ba,pt %xcc, 1b+4 381 faligndata %f14, %f16, %f48 3821: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) 383 STORE_SYNC(o0, f48) membar #Sync 384 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) 385 STORE_JUMP(o0, f48, 47f) membar #Sync 3862: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) 387 STORE_SYNC(o0, f48) membar #Sync 388 FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) 389 STORE_JUMP(o0, f48, 55f) membar #Sync 3903: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) 391 STORE_SYNC(o0, f48) membar #Sync 392 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) 393 STORE_JUMP(o0, f48, 63f) membar #Sync 394 39540: FINISH_VISCHUNK(o0, f0, f2, g3) 39641: FINISH_VISCHUNK(o0, f2, f4, g3) 39742: FINISH_VISCHUNK(o0, f4, f6, g3) 39843: FINISH_VISCHUNK(o0, f6, f8, g3) 39944: FINISH_VISCHUNK(o0, f8, f10, g3) 40045: FINISH_VISCHUNK(o0, f10, f12, g3) 40146: FINISH_VISCHUNK(o0, f12, f14, g3) 40247: UNEVEN_VISCHUNK(o0, f14, f0, g3) 40348: FINISH_VISCHUNK(o0, f16, f18, g3) 40449: FINISH_VISCHUNK(o0, f18, f20, g3) 40550: FINISH_VISCHUNK(o0, f20, f22, g3) 40651: FINISH_VISCHUNK(o0, f22, f24, g3) 40752: FINISH_VISCHUNK(o0, f24, f26, g3) 40853: FINISH_VISCHUNK(o0, f26, f28, g3) 40954: FINISH_VISCHUNK(o0, f28, f30, g3) 41055: UNEVEN_VISCHUNK(o0, f30, f0, g3) 41156: FINISH_VISCHUNK(o0, f32, f34, g3) 41257: FINISH_VISCHUNK(o0, f34, f36, g3) 41358: FINISH_VISCHUNK(o0, f36, f38, g3) 41459: FINISH_VISCHUNK(o0, f38, f40, g3) 41560: FINISH_VISCHUNK(o0, f40, f42, g3) 41661: FINISH_VISCHUNK(o0, f42, f44, g3) 41762: FINISH_VISCHUNK(o0, f44, f46, g3) 41863: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3) 419 42093: EX_LD(LOAD(ldd, %o1, %f2)) 421 add %o1, 8, %o1 422 subcc %g3, 8, %g3 423 faligndata %f0, %f2, %f8 424 EX_ST(STORE(std, %f8, %o0)) 425 bl,pn %xcc, 95f 426 add %o0, 8, %o0 427 EX_LD(LOAD(ldd, %o1, %f0)) 428 add %o1, 8, %o1 429 subcc %g3, 8, %g3 430 faligndata %f2, %f0, %f8 431 EX_ST(STORE(std, %f8, %o0)) 432 bge,pt %xcc, 93b 433 add %o0, 8, %o0 434 43595: brz,pt %o2, 2f 436 mov %g1, %o1 437 4381: EX_LD(LOAD(ldub, %o1, %o3)) 439 add %o1, 1, %o1 440 subcc %o2, 1, %o2 441 EX_ST(STORE(stb, %o3, %o0)) 442 bne,pt %xcc, 1b 443 add %o0, 1, %o0 444 4452: membar #StoreLoad | #StoreStore 446 VISExit 447 retl 448 mov EX_RETVAL(%g5), %o0 449 450 .align 64 45170: /* 16 < len <= (5 * 64) */ 452 bne,pn %XCC, 75f 453 sub %o0, %o1, %o3 454 45572: andn %o2, 0xf, %o4 456 and %o2, 0xf, %o2 4571: EX_LD(LOAD(ldx, %o1 + 0x00, %o5)) 458 EX_LD(LOAD(ldx, %o1 + 0x08, %g1)) 459 subcc %o4, 0x10, %o4 460 EX_ST(STORE(stx, %o5, %o1 + %o3)) 461 add %o1, 0x8, %o1 462 EX_ST(STORE(stx, %g1, %o1 + %o3)) 463 bgu,pt %XCC, 1b 464 add %o1, 0x8, %o1 46573: andcc %o2, 0x8, %g0 466 be,pt %XCC, 1f 467 nop 468 EX_LD(LOAD(ldx, %o1, %o5)) 469 sub %o2, 0x8, %o2 470 EX_ST(STORE(stx, %o5, %o1 + %o3)) 471 add %o1, 0x8, %o1 4721: andcc %o2, 0x4, %g0 473 be,pt %XCC, 1f 474 nop 475 EX_LD(LOAD(lduw, %o1, %o5)) 476 sub %o2, 0x4, %o2 477 EX_ST(STORE(stw, %o5, %o1 + %o3)) 478 add %o1, 0x4, %o1 4791: cmp %o2, 0 480 be,pt %XCC, 85f 481 nop 482 ba,pt %xcc, 90f 483 nop 484 48575: andcc %o0, 0x7, %g1 486 sub %g1, 0x8, %g1 487 be,pn %icc, 2f 488 sub %g0, %g1, %g1 489 sub %o2, %g1, %o2 490 4911: EX_LD(LOAD(ldub, %o1, %o5)) 492 subcc %g1, 1, %g1 493 EX_ST(STORE(stb, %o5, %o1 + %o3)) 494 bgu,pt %icc, 1b 495 add %o1, 1, %o1 496 4972: add %o1, %o3, %o0 498 andcc %o1, 0x7, %g1 499 bne,pt %icc, 8f 500 sll %g1, 3, %g1 501 502 cmp %o2, 16 503 bgeu,pt %icc, 72b 504 nop 505 ba,a,pt %xcc, 73b 506 5078: mov 64, %o3 508 andn %o1, 0x7, %o1 509 EX_LD(LOAD(ldx, %o1, %g2)) 510 sub %o3, %g1, %o3 511 andn %o2, 0x7, %o4 512 sllx %g2, %g1, %g2 5131: EX_LD(LOAD(ldx, %o1 + 0x8, %g3)) 514 subcc %o4, 0x8, %o4 515 add %o1, 0x8, %o1 516 srlx %g3, %o3, %o5 517 or %o5, %g2, %o5 518 EX_ST(STORE(stx, %o5, %o0)) 519 add %o0, 0x8, %o0 520 bgu,pt %icc, 1b 521 sllx %g3, %g1, %g2 522 523 srl %g1, 3, %g1 524 andcc %o2, 0x7, %o2 525 be,pn %icc, 85f 526 add %o1, %g1, %o1 527 ba,pt %xcc, 90f 528 sub %o0, %o1, %o3 529 530 .align 64 53180: /* 0 < len <= 16 */ 532 andcc %o3, 0x3, %g0 533 bne,pn %XCC, 90f 534 sub %o0, %o1, %o3 535 5361: EX_LD(LOAD(lduw, %o1, %g1)) 537 subcc %o2, 4, %o2 538 EX_ST(STORE(stw, %g1, %o1 + %o3)) 539 bgu,pt %XCC, 1b 540 add %o1, 4, %o1 541 54285: retl 543 mov EX_RETVAL(%g5), %o0 544 545 .align 32 54690: EX_LD(LOAD(ldub, %o1, %g1)) 547 subcc %o2, 1, %o2 548 EX_ST(STORE(stb, %g1, %o1 + %o3)) 549 bgu,pt %XCC, 90b 550 add %o1, 1, %o1 551 retl 552 mov EX_RETVAL(%g5), %o0 553