1/* 2 * Itanium 2-optimized version of memcpy and copy_user function 3 * 4 * Inputs: 5 * in0: destination address 6 * in1: source address 7 * in2: number of bytes to copy 8 * Output: 9 * for bcopy: return nothing 10 * for memcpy: return dest 11 * for copy_user: 0 if success, 12 * or number of bytes NOT copied if error occurred. 13 * 14 * Copyright (C) 2002 Intel Corp. 15 * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com> 16 */ 17#include <linux/config.h> 18#include <asm/asmmacro.h> 19#include <asm/page.h> 20 21#if __GNUC__ >= 3 22# define EK(y...) EX(y) 23#else 24# define EK(y,x...) x 25#endif 26 27/* McKinley specific optimization */ 28 29#define retval r8 30#define saved_pfs r31 31#define saved_lc r10 32#define saved_pr r11 33#define saved_in0 r14 34#define saved_in1 r15 35#define saved_in2 r16 36 37#define src0 r2 38#define src1 r3 39#define dst0 r17 40#define dst1 r18 41#define cnt r9 42 43/* r19-r30 are temp for each code section */ 44#define PREFETCH_DIST 8 45#define src_pre_mem r19 46#define dst_pre_mem r20 47#define src_pre_l2 r21 48#define dst_pre_l2 r22 49#define t1 r23 50#define t2 r24 51#define t3 r25 52#define t4 r26 53#define t5 t1 // alias! 54#define t6 t2 // alias! 55#define t7 t3 // alias! 56#define n8 r27 57#define t9 t5 // alias! 58#define t10 t4 // alias! 59#define t11 t7 // alias! 60#define t12 t6 // alias! 61#define t14 t10 // alias! 62#define t13 r28 63#define t15 r29 64#define tmp r30 65 66/* defines for long_copy block */ 67#define A 0 68#define B (PREFETCH_DIST) 69#define C (B + PREFETCH_DIST) 70#define D (C + 1) 71#define N (D + 1) 72#define Nrot ((N + 7) & ~7) 73 74/* alias */ 75#define in0 r32 76#define in1 r33 77#define in2 r34 78 79GLOBAL_ENTRY(bcopy) 80 .regstk 3,0,0,0 81 mov r8=in0 // swap the src and dest arguments 82 mov in0=in1 83 ;; 84 mov in1=r8 85 ;; 86END(bcopy) // fall through to memcpy 87GLOBAL_ENTRY(memcpy) 88 and r28=0x7,in0 89 and r29=0x7,in1 90 mov f6=f0 91 mov retval=in0 92 br.cond.sptk .common_code 93 ;; 94END(memcpy) 95GLOBAL_ENTRY(__copy_user) 96 .prologue 97// check dest alignment 98 and r28=0x7,in0 99 and r29=0x7,in1 100 mov f6=f1 101 mov saved_in0=in0 // save dest pointer 102 mov saved_in1=in1 // save src pointer 103 mov retval=r0 // initialize return value 104 ;; 105.common_code: 106 cmp.gt p15,p0=8,in2 // check for small size 107 cmp.ne p13,p0=0,r28 // check dest alignment 108 cmp.ne p14,p0=0,r29 // check src alignment 109 add src0=0,in1 110 sub r30=8,r28 // for .align_dest 111 mov saved_in2=in2 // save len 112 ;; 113 add dst0=0,in0 114 add dst1=1,in0 // dest odd index 115 cmp.le p6,p0 = 1,r30 // for .align_dest 116(p15) br.cond.dpnt .memcpy_short 117(p13) br.cond.dpnt .align_dest 118(p14) br.cond.dpnt .unaligned_src 119 ;; 120 121// both dest and src are aligned on 8-byte boundary 122.aligned_src: 123 .save ar.pfs, saved_pfs 124 alloc saved_pfs=ar.pfs,3,Nrot-3,0,Nrot 125 .save pr, saved_pr 126 mov saved_pr=pr 127 128 shr.u cnt=in2,7 // this much cache line 129 ;; 130 cmp.lt p6,p0=2*PREFETCH_DIST,cnt 131 cmp.lt p7,p8=1,cnt 132 .save ar.lc, saved_lc 133 mov saved_lc=ar.lc 134 .body 135 add cnt=-1,cnt 136 add src_pre_mem=0,in1 // prefetch src pointer 137 add dst_pre_mem=0,in0 // prefetch dest pointer 138 ;; 139(p7) mov ar.lc=cnt // prefetch count 140(p8) mov ar.lc=r0 141(p6) br.cond.dpnt .long_copy 142 ;; 143 144.prefetch: 145 lfetch [src_pre_mem], 128 146 lfetch.excl [dst_pre_mem], 128 147 br.cloop.dptk.few .prefetch 148 ;; 149 150.medium_copy: 151 and tmp=31,in2 // copy length after iteration 152 shr.u r29=in2,5 // number of 32-byte iteration 153 add dst1=8,dst0 // 2nd dest pointer 154 ;; 155 add cnt=-1,r29 // ctop iteration adjustment 156 cmp.eq p10,p0=r29,r0 // do we really need to loop? 157 add src1=8,src0 // 2nd src pointer 158 cmp.le p6,p0=8,tmp 159 ;; 160 cmp.le p7,p0=16,tmp 161 mov ar.lc=cnt // loop setup 162 cmp.eq p16,p17 = r0,r0 163 mov ar.ec=2 164(p10) br.dpnt.few .aligned_src_tail 165 ;; 166// .align 32 1671: 168EX(.ex_handler, (p16) ld8 r34=[src0],16) 169EK(.ex_handler, (p16) ld8 r38=[src1],16) 170EX(.ex_handler, (p17) st8 [dst0]=r33,16) 171EK(.ex_handler, (p17) st8 [dst1]=r37,16) 172 ;; 173EX(.ex_handler, (p16) ld8 r32=[src0],16) 174EK(.ex_handler, (p16) ld8 r36=[src1],16) 175EX(.ex_handler, (p16) st8 [dst0]=r34,16) 176EK(.ex_handler, (p16) st8 [dst1]=r38,16) 177 br.ctop.dptk.few 1b 178 ;; 179 180.aligned_src_tail: 181EX(.ex_handler, (p6) ld8 t1=[src0]) 182 mov ar.lc=saved_lc 183 mov ar.pfs=saved_pfs 184EX(.ex_hndlr_s, (p7) ld8 t2=[src1],8) 185 cmp.le p8,p0=24,tmp 186 and r21=-8,tmp 187 ;; 188EX(.ex_hndlr_s, (p8) ld8 t3=[src1]) 189EX(.ex_handler, (p6) st8 [dst0]=t1) // store byte 1 190 and in2=7,tmp // remaining length 191EX(.ex_hndlr_d, (p7) st8 [dst1]=t2,8) // store byte 2 192 add src0=src0,r21 // setting up src pointer 193 add dst0=dst0,r21 // setting up dest pointer 194 ;; 195EX(.ex_handler, (p8) st8 [dst1]=t3) // store byte 3 196 mov pr=saved_pr,-1 197 br.dptk.many .memcpy_short 198 ;; 199 200/* code taken from copy_page_mck */ 201.long_copy: 202 .rotr v[2*PREFETCH_DIST] 203 .rotp p[N] 204 205 mov src_pre_mem = src0 206 mov pr.rot = 0x10000 207 mov ar.ec = 1 // special unrolled loop 208 209 mov dst_pre_mem = dst0 210 211 add src_pre_l2 = 8*8, src0 212 add dst_pre_l2 = 8*8, dst0 213 ;; 214 add src0 = 8, src_pre_mem // first t1 src 215 mov ar.lc = 2*PREFETCH_DIST - 1 216 shr.u cnt=in2,7 // number of lines 217 add src1 = 3*8, src_pre_mem // first t3 src 218 add dst0 = 8, dst_pre_mem // first t1 dst 219 add dst1 = 3*8, dst_pre_mem // first t3 dst 220 ;; 221 and tmp=127,in2 // remaining bytes after this block 222 add cnt = -(2*PREFETCH_DIST) - 1, cnt 223 // same as .line_copy loop, but with all predicated-off instructions removed: 224.prefetch_loop: 225EX(.ex_hndlr_lcpy_1, (p[A]) ld8 v[A] = [src_pre_mem], 128) // M0 226EK(.ex_hndlr_lcpy_1, (p[B]) st8 [dst_pre_mem] = v[B], 128) // M2 227 br.ctop.sptk .prefetch_loop 228 ;; 229 cmp.eq p16, p0 = r0, r0 // reset p16 to 1 230 mov ar.lc = cnt 231 mov ar.ec = N // # of stages in pipeline 232 ;; 233.line_copy: 234EX(.ex_handler, (p[D]) ld8 t2 = [src0], 3*8) // M0 235EK(.ex_handler, (p[D]) ld8 t4 = [src1], 3*8) // M1 236EX(.ex_handler_lcpy, (p[B]) st8 [dst_pre_mem] = v[B], 128) // M2 prefetch dst from memory 237EK(.ex_handler_lcpy, (p[D]) st8 [dst_pre_l2] = n8, 128) // M3 prefetch dst from L2 238 ;; 239EX(.ex_handler_lcpy, (p[A]) ld8 v[A] = [src_pre_mem], 128) // M0 prefetch src from memory 240EK(.ex_handler_lcpy, (p[C]) ld8 n8 = [src_pre_l2], 128) // M1 prefetch src from L2 241EX(.ex_handler, (p[D]) st8 [dst0] = t1, 8) // M2 242EK(.ex_handler, (p[D]) st8 [dst1] = t3, 8) // M3 243 ;; 244EX(.ex_handler, (p[D]) ld8 t5 = [src0], 8) 245EK(.ex_handler, (p[D]) ld8 t7 = [src1], 3*8) 246EX(.ex_handler, (p[D]) st8 [dst0] = t2, 3*8) 247EK(.ex_handler, (p[D]) st8 [dst1] = t4, 3*8) 248 ;; 249EX(.ex_handler, (p[D]) ld8 t6 = [src0], 3*8) 250EK(.ex_handler, (p[D]) ld8 t10 = [src1], 8) 251EX(.ex_handler, (p[D]) st8 [dst0] = t5, 8) 252EK(.ex_handler, (p[D]) st8 [dst1] = t7, 3*8) 253 ;; 254EX(.ex_handler, (p[D]) ld8 t9 = [src0], 3*8) 255EK(.ex_handler, (p[D]) ld8 t11 = [src1], 3*8) 256EX(.ex_handler, (p[D]) st8 [dst0] = t6, 3*8) 257EK(.ex_handler, (p[D]) st8 [dst1] = t10, 8) 258 ;; 259EX(.ex_handler, (p[D]) ld8 t12 = [src0], 8) 260EK(.ex_handler, (p[D]) ld8 t14 = [src1], 8) 261EX(.ex_handler, (p[D]) st8 [dst0] = t9, 3*8) 262EK(.ex_handler, (p[D]) st8 [dst1] = t11, 3*8) 263 ;; 264EX(.ex_handler, (p[D]) ld8 t13 = [src0], 4*8) 265EK(.ex_handler, (p[D]) ld8 t15 = [src1], 4*8) 266EX(.ex_handler, (p[D]) st8 [dst0] = t12, 8) 267EK(.ex_handler, (p[D]) st8 [dst1] = t14, 8) 268 ;; 269EX(.ex_handler, (p[C]) ld8 t1 = [src0], 8) 270EK(.ex_handler, (p[C]) ld8 t3 = [src1], 8) 271EX(.ex_handler, (p[D]) st8 [dst0] = t13, 4*8) 272EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8) 273 br.ctop.sptk .line_copy 274 ;; 275 276 add dst0=-8,dst0 277 add src0=-8,src0 278 mov in2=tmp 279 .restore sp 280 br.sptk.many .medium_copy 281 ;; 282 283#define BLOCK_SIZE 128*32 284#define blocksize r23 285#define curlen r24 286 287// dest is on 8-byte boundary, src is not. We need to do 288// ld8-ld8, shrp, then st8. Max 8 byte copy per cycle. 289.unaligned_src: 290 .prologue 291 .save ar.pfs, saved_pfs 292 alloc saved_pfs=ar.pfs,3,5,0,8 293 .save ar.lc, saved_lc 294 mov saved_lc=ar.lc 295 .save pr, saved_pr 296 mov saved_pr=pr 297 .body 298.4k_block: 299 mov saved_in0=dst0 // need to save all input arguments 300 mov saved_in2=in2 301 mov blocksize=BLOCK_SIZE 302 ;; 303 cmp.lt p6,p7=blocksize,in2 304 mov saved_in1=src0 305 ;; 306(p6) mov in2=blocksize 307 ;; 308 shr.u r21=in2,7 // this much cache line 309 shr.u r22=in2,4 // number of 16-byte iteration 310 and curlen=15,in2 // copy length after iteration 311 and r30=7,src0 // source alignment 312 ;; 313 cmp.lt p7,p8=1,r21 314 add cnt=-1,r21 315 ;; 316 317 add src_pre_mem=0,src0 // prefetch src pointer 318 add dst_pre_mem=0,dst0 // prefetch dest pointer 319 and src0=-8,src0 // 1st src pointer 320(p7) mov ar.lc = r21 321(p8) mov ar.lc = r0 322 ;; 323// .align 32 3241: lfetch [src_pre_mem], 128 325 lfetch.excl [dst_pre_mem], 128 326 br.cloop.dptk.few 1b 327 ;; 328 329 shladd dst1=r22,3,dst0 // 2nd dest pointer 330 shladd src1=r22,3,src0 // 2nd src pointer 331 cmp.eq p8,p9=r22,r0 // do we really need to loop? 332 cmp.le p6,p7=8,curlen; // have at least 8 byte remaining? 333 add cnt=-1,r22 // ctop iteration adjustment 334 ;; 335EX(.ex_handler, (p9) ld8 r33=[src0],8) // loop primer 336EK(.ex_handler, (p9) ld8 r37=[src1],8) 337(p8) br.dpnt.few .noloop 338 ;; 339 340// The jump address is calculated based on src alignment. The COPYU 341// macro below need to confine its size to power of two, so an entry 342// can be caulated using shl instead of an expensive multiply. The 343// size is then hard coded by the following #define to match the 344// actual size. This make it somewhat tedious when COPYU macro gets 345// changed and this need to be adjusted to match. 346#define LOOP_SIZE 6 3471: 348 mov r29=ip // jmp_table thread 349 mov ar.lc=cnt 350 ;; 351 add r29=.jump_table - 1b - (.jmp1-.jump_table), r29 352 shl r28=r30, LOOP_SIZE // jmp_table thread 353 mov ar.ec=2 // loop setup 354 ;; 355 add r29=r29,r28 // jmp_table thread 356 cmp.eq p16,p17=r0,r0 357 ;; 358 mov b6=r29 // jmp_table thread 359 ;; 360 br.cond.sptk.few b6 361 362// for 8-15 byte case 363// We will skip the loop, but need to replicate the side effect 364// that the loop produces. 365.noloop: 366EX(.ex_handler, (p6) ld8 r37=[src1],8) 367 add src0=8,src0 368(p6) shl r25=r30,3 369 ;; 370EX(.ex_handler, (p6) ld8 r27=[src1]) 371(p6) shr.u r28=r37,r25 372(p6) sub r26=64,r25 373 ;; 374(p6) shl r27=r27,r26 375 ;; 376(p6) or r21=r28,r27 377 378.unaligned_src_tail: 379/* check if we have more than blocksize to copy, if so go back */ 380 cmp.gt p8,p0=saved_in2,blocksize 381 ;; 382(p8) add dst0=saved_in0,blocksize 383(p8) add src0=saved_in1,blocksize 384(p8) sub in2=saved_in2,blocksize 385(p8) br.dpnt .4k_block 386 ;; 387 388/* we have up to 15 byte to copy in the tail. 389 * part of work is already done in the jump table code 390 * we are at the following state. 391 * src side: 392 * 393 * xxxxxx xx <----- r21 has xxxxxxxx already 394 * -------- -------- -------- 395 * 0 8 16 396 * ^ 397 * | 398 * src1 399 * 400 * dst 401 * -------- -------- -------- 402 * ^ 403 * | 404 * dst1 405 */ 406EX(.ex_handler, (p6) st8 [dst1]=r21,8) // more than 8 byte to copy 407(p6) add curlen=-8,curlen // update length 408 mov ar.pfs=saved_pfs 409 ;; 410 mov ar.lc=saved_lc 411 mov pr=saved_pr,-1 412 mov in2=curlen // remaining length 413 mov dst0=dst1 // dest pointer 414 add src0=src1,r30 // forward by src alignment 415 ;; 416 417// 7 byte or smaller. 418.memcpy_short: 419 cmp.le p8,p9 = 1,in2 420 cmp.le p10,p11 = 2,in2 421 cmp.le p12,p13 = 3,in2 422 cmp.le p14,p15 = 4,in2 423 add src1=1,src0 // second src pointer 424 add dst1=1,dst0 // second dest pointer 425 ;; 426 427EX(.ex_handler_short, (p8) ld1 t1=[src0],2) 428EK(.ex_handler_short, (p10) ld1 t2=[src1],2) 429(p9) br.ret.dpnt rp // 0 byte copy 430 ;; 431 432EX(.ex_handler_short, (p8) st1 [dst0]=t1,2) 433EK(.ex_handler_short, (p10) st1 [dst1]=t2,2) 434(p11) br.ret.dpnt rp // 1 byte copy 435 436EX(.ex_handler_short, (p12) ld1 t3=[src0],2) 437EK(.ex_handler_short, (p14) ld1 t4=[src1],2) 438(p13) br.ret.dpnt rp // 2 byte copy 439 ;; 440 441 cmp.le p6,p7 = 5,in2 442 cmp.le p8,p9 = 6,in2 443 cmp.le p10,p11 = 7,in2 444 445EX(.ex_handler_short, (p12) st1 [dst0]=t3,2) 446EK(.ex_handler_short, (p14) st1 [dst1]=t4,2) 447(p15) br.ret.dpnt rp // 3 byte copy 448 ;; 449 450EX(.ex_handler_short, (p6) ld1 t5=[src0],2) 451EK(.ex_handler_short, (p8) ld1 t6=[src1],2) 452(p7) br.ret.dpnt rp // 4 byte copy 453 ;; 454 455EX(.ex_handler_short, (p6) st1 [dst0]=t5,2) 456EK(.ex_handler_short, (p8) st1 [dst1]=t6,2) 457(p9) br.ret.dptk rp // 5 byte copy 458 459EX(.ex_handler_short, (p10) ld1 t7=[src0],2) 460(p11) br.ret.dptk rp // 6 byte copy 461 ;; 462 463EX(.ex_handler_short, (p10) st1 [dst0]=t7,2) 464 br.ret.dptk rp // done all cases 465 466 467/* Align dest to nearest 8-byte boundary. We know we have at 468 * least 7 bytes to copy, enough to crawl to 8-byte boundary. 469 * Actual number of byte to crawl depend on the dest alignment. 470 * 7 byte or less is taken care at .memcpy_short 471 472 * src0 - source even index 473 * src1 - source odd index 474 * dst0 - dest even index 475 * dst1 - dest odd index 476 * r30 - distance to 8-byte boundary 477 */ 478 479.align_dest: 480 add src1=1,in1 // source odd index 481 cmp.le p7,p0 = 2,r30 // for .align_dest 482 cmp.le p8,p0 = 3,r30 // for .align_dest 483EX(.ex_handler_short, (p6) ld1 t1=[src0],2) 484 cmp.le p9,p0 = 4,r30 // for .align_dest 485 cmp.le p10,p0 = 5,r30 486 ;; 487EX(.ex_handler_short, (p7) ld1 t2=[src1],2) 488EK(.ex_handler_short, (p8) ld1 t3=[src0],2) 489 cmp.le p11,p0 = 6,r30 490EX(.ex_handler_short, (p6) st1 [dst0] = t1,2) 491 cmp.le p12,p0 = 7,r30 492 ;; 493EX(.ex_handler_short, (p9) ld1 t4=[src1],2) 494EK(.ex_handler_short, (p10) ld1 t5=[src0],2) 495EX(.ex_handler_short, (p7) st1 [dst1] = t2,2) 496EK(.ex_handler_short, (p8) st1 [dst0] = t3,2) 497 ;; 498EX(.ex_handler_short, (p11) ld1 t6=[src1],2) 499EK(.ex_handler_short, (p12) ld1 t7=[src0],2) 500 cmp.eq p6,p7=r28,r29 501EX(.ex_handler_short, (p9) st1 [dst1] = t4,2) 502EK(.ex_handler_short, (p10) st1 [dst0] = t5,2) 503 sub in2=in2,r30 504 ;; 505EX(.ex_handler_short, (p11) st1 [dst1] = t6,2) 506EK(.ex_handler_short, (p12) st1 [dst0] = t7) 507 add dst0=in0,r30 // setup arguments 508 add src0=in1,r30 509(p6) br.cond.dptk .aligned_src 510(p7) br.cond.dpnt .unaligned_src 511 ;; 512 513/* main loop body in jump table format */ 514#define COPYU(shift) \ 5151: \ 516EX(.ex_handler, (p16) ld8 r32=[src0],8); /* 1 */ \ 517EK(.ex_handler, (p16) ld8 r36=[src1],8); \ 518 (p17) shrp r35=r33,r34,shift;; /* 1 */ \ 519EX(.ex_handler, (p6) ld8 r22=[src1]); /* common, prime for tail section */ \ 520 nop.m 0; \ 521 (p16) shrp r38=r36,r37,shift; \ 522EX(.ex_handler, (p17) st8 [dst0]=r35,8); /* 1 */ \ 523EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ 524 br.ctop.dptk.few 1b;; \ 525 (p7) add src1=-8,src1; /* back out for <8 byte case */ \ 526 shrp r21=r22,r38,shift; /* speculative work */ \ 527 br.sptk.few .unaligned_src_tail /* branch out of jump table */ \ 528 ;; 529// .align 32 530.jump_table: 531 COPYU(8) // unaligned cases 532.jmp1: 533 COPYU(16) 534 COPYU(24) 535 COPYU(32) 536 COPYU(40) 537 COPYU(48) 538 COPYU(56) 539 540#undef A 541#undef B 542#undef C 543#undef D 544END(memcpy) 545 546/* 547 * Due to lack of local tag support in gcc 2.x assembler, it is not clear which 548 * instruction failed in the bundle. The exception algorithm is that we 549 * first figure out the faulting address, then detect if there is any 550 * progress made on the copy, if so, redo the copy from last known copied 551 * location up to the faulting address (exclusive). In the copy_from_user 552 * case, remaining byte in kernel buffer will be zeroed. 553 * 554 * Take copy_from_user as an example, in the code there are multiple loads 555 * in a bundle and those multiple loads could span over two pages, the 556 * faulting address is calculated as page_round_down(max(src0, src1)). 557 * This is based on knowledge that if we can access one byte in a page, we 558 * can access any byte in that page. 559 * 560 * predicate used in the exception handler: 561 * p6-p7: direction 562 * p10-p11: src faulting addr calculation 563 * p12-p13: dst faulting addr calculation 564 */ 565 566#define A r19 567#define B r20 568#define C r21 569#define D r22 570#define F r28 571 572#define memset_arg0 r32 573#define memset_arg2 r33 574 575#define saved_retval loc0 576#define saved_rtlink loc1 577#define saved_pfs_stack loc2 578 579.ex_hndlr_s: 580 add src0=8,src0 581 br.sptk .ex_handler 582 ;; 583.ex_hndlr_d: 584 add dst0=8,dst0 585 br.sptk .ex_handler 586 ;; 587.ex_hndlr_lcpy_1: 588 mov src1=src_pre_mem 589 mov dst1=dst_pre_mem 590 cmp.gtu p10,p11=src_pre_mem,saved_in1 591 cmp.gtu p12,p13=dst_pre_mem,saved_in0 592 ;; 593(p10) add src0=8,saved_in1 594(p11) mov src0=saved_in1 595(p12) add dst0=8,saved_in0 596(p13) mov dst0=saved_in0 597 br.sptk .ex_handler 598.ex_handler_lcpy: 599 // in line_copy block, the preload addresses should always ahead 600 // of the other two src/dst pointers. Furthermore, src1/dst1 should 601 // always ahead of src0/dst0. 602 mov src1=src_pre_mem 603 mov dst1=dst_pre_mem 604.ex_handler: 605 mov pr=saved_pr,-1 // first restore pr, lc, and pfs 606 mov ar.lc=saved_lc 607 mov ar.pfs=saved_pfs 608 ;; 609.ex_handler_short: // fault occurred in these sections didn't change pr, lc, pfs 610 cmp.ltu p6,p7=saved_in0, saved_in1 // get the copy direction 611 cmp.ltu p10,p11=src0,src1 612 cmp.ltu p12,p13=dst0,dst1 613 fcmp.eq p8,p0=f6,f0 // is it memcpy? 614 mov tmp = dst0 615 ;; 616(p11) mov src1 = src0 // pick the larger of the two 617(p13) mov dst0 = dst1 // make dst0 the smaller one 618(p13) mov dst1 = tmp // and dst1 the larger one 619 ;; 620(p6) dep F = r0,dst1,0,PAGE_SHIFT // usr dst round down to page boundary 621(p7) dep F = r0,src1,0,PAGE_SHIFT // usr src round down to page boundary 622 ;; 623(p6) cmp.le p14,p0=dst0,saved_in0 // no progress has been made on store 624(p7) cmp.le p14,p0=src0,saved_in1 // no progress has been made on load 625 mov retval=saved_in2 626(p8) ld1 tmp=[src1] // force an oops for memcpy call 627(p8) st1 [dst1]=r0 // force an oops for memcpy call 628(p14) br.ret.sptk.many rp 629 630/* 631 * The remaining byte to copy is calculated as: 632 * 633 * A = (faulting_addr - orig_src) -> len to faulting ld address 634 * or 635 * (faulting_addr - orig_dst) -> len to faulting st address 636 * B = (cur_dst - orig_dst) -> len copied so far 637 * C = A - B -> len need to be copied 638 * D = orig_len - A -> len need to be zeroed 639 */ 640(p6) sub A = F, saved_in0 641(p7) sub A = F, saved_in1 642 clrrrb 643 ;; 644 alloc saved_pfs_stack=ar.pfs,3,3,3,0 645 sub B = dst0, saved_in0 // how many byte copied so far 646 ;; 647 sub C = A, B 648 sub D = saved_in2, A 649 ;; 650 cmp.gt p8,p0=C,r0 // more than 1 byte? 651 add memset_arg0=saved_in0, A 652(p6) mov memset_arg2=0 // copy_to_user should not call memset 653(p7) mov memset_arg2=D // copy_from_user need to have kbuf zeroed 654 mov r8=0 655 mov saved_retval = D 656 mov saved_rtlink = b0 657 658 add out0=saved_in0, B 659 add out1=saved_in1, B 660 mov out2=C 661(p8) br.call.sptk.few b0=__copy_user // recursive call 662 ;; 663 664 add saved_retval=saved_retval,r8 // above might return non-zero value 665 cmp.gt p8,p0=memset_arg2,r0 // more than 1 byte? 666 mov out0=memset_arg0 // *s 667 mov out1=r0 // c 668 mov out2=memset_arg2 // n 669(p8) br.call.sptk.few b0=memset 670 ;; 671 672 mov retval=saved_retval 673 mov ar.pfs=saved_pfs_stack 674 mov b0=saved_rtlink 675 br.ret.sptk.many rp 676 677/* end of McKinley specific optimization */ 678END(__copy_user) 679