1/* checksum.S: Sparc optimized checksum code. 2 * 3 * Copyright(C) 1995 Linus Torvalds 4 * Copyright(C) 1995 Miguel de Icaza 5 * Copyright(C) 1996 David S. Miller 6 * Copyright(C) 1997 Jakub Jelinek 7 * 8 * derived from: 9 * Linux/Alpha checksum c-code 10 * Linux/ix86 inline checksum assembly 11 * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code) 12 * David Mosberger-Tang for optimized reference c-code 13 * BSD4.4 portable checksum routine 14 */ 15 16#include <asm/cprefix.h> 17#include <asm/errno.h> 18 19#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \ 20 ldd [buf + offset + 0x00], t0; \ 21 ldd [buf + offset + 0x08], t2; \ 22 addxcc t0, sum, sum; \ 23 addxcc t1, sum, sum; \ 24 ldd [buf + offset + 0x10], t4; \ 25 addxcc t2, sum, sum; \ 26 addxcc t3, sum, sum; \ 27 ldd [buf + offset + 0x18], t0; \ 28 addxcc t4, sum, sum; \ 29 addxcc t5, sum, sum; \ 30 addxcc t0, sum, sum; \ 31 addxcc t1, sum, sum; 32 33#define CSUM_LASTCHUNK(buf, offset, sum, t0, t1, t2, t3) \ 34 ldd [buf - offset - 0x08], t0; \ 35 ldd [buf - offset - 0x00], t2; \ 36 addxcc t0, sum, sum; \ 37 addxcc t1, sum, sum; \ 38 addxcc t2, sum, sum; \ 39 addxcc t3, sum, sum; 40 41 /* Do end cruft out of band to get better cache patterns. */ 42csum_partial_end_cruft: 43 be 1f ! caller asks %o1 & 0x8 44 andcc %o1, 4, %g0 ! nope, check for word remaining 45 ldd [%o0], %g2 ! load two 46 addcc %g2, %o2, %o2 ! add first word to sum 47 addxcc %g3, %o2, %o2 ! add second word as well 48 add %o0, 8, %o0 ! advance buf ptr 49 addx %g0, %o2, %o2 ! add in final carry 50 andcc %o1, 4, %g0 ! check again for word remaining 511: be 1f ! nope, skip this code 52 andcc %o1, 3, %o1 ! check for trailing bytes 53 ld [%o0], %g2 ! load it 54 addcc %g2, %o2, %o2 ! add to sum 55 add %o0, 4, %o0 ! advance buf ptr 56 addx %g0, %o2, %o2 ! add in final carry 57 andcc %o1, 3, %g0 ! check again for trailing bytes 581: be 1f ! no trailing bytes, return 59 addcc %o1, -1, %g0 ! only one byte remains? 60 bne 2f ! at least two bytes more 61 subcc %o1, 2, %o1 ! only two bytes more? 62 b 4f ! only one byte remains 63 or %g0, %g0, %o4 ! clear fake hword value 642: lduh [%o0], %o4 ! get hword 65 be 6f ! jmp if only hword remains 66 add %o0, 2, %o0 ! advance buf ptr either way 67 sll %o4, 16, %o4 ! create upper hword 684: ldub [%o0], %o5 ! get final byte 69 sll %o5, 8, %o5 ! put into place 70 or %o5, %o4, %o4 ! coalese with hword (if any) 716: addcc %o4, %o2, %o2 ! add to sum 721: retl ! get outta here 73 addx %g0, %o2, %o0 ! add final carry into retval 74 75 /* Also do alignment out of band to get better cache patterns. */ 76csum_partial_fix_alignment: 77 cmp %o1, 6 78 bl cpte - 0x4 79 andcc %o0, 0x2, %g0 80 be 1f 81 andcc %o0, 0x4, %g0 82 lduh [%o0 + 0x00], %g2 83 sub %o1, 2, %o1 84 add %o0, 2, %o0 85 sll %g2, 16, %g2 86 addcc %g2, %o2, %o2 87 srl %o2, 16, %g3 88 addx %g0, %g3, %g2 89 sll %o2, 16, %o2 90 sll %g2, 16, %g3 91 srl %o2, 16, %o2 92 andcc %o0, 0x4, %g0 93 or %g3, %o2, %o2 941: be cpa 95 andcc %o1, 0xffffff80, %o3 96 ld [%o0 + 0x00], %g2 97 sub %o1, 4, %o1 98 addcc %g2, %o2, %o2 99 add %o0, 4, %o0 100 addx %g0, %o2, %o2 101 b cpa 102 andcc %o1, 0xffffff80, %o3 103 104 /* The common case is to get called with a nicely aligned 105 * buffer of size 0x20. Follow the code path for that case. 106 */ 107 .globl C_LABEL(csum_partial) 108C_LABEL(csum_partial): /* %o0=buf, %o1=len, %o2=sum */ 109 andcc %o0, 0x7, %g0 ! alignment problems? 110 bne csum_partial_fix_alignment ! yep, handle it 111 sethi %hi(cpte - 8), %g7 ! prepare table jmp ptr 112 andcc %o1, 0xffffff80, %o3 ! num loop iterations 113cpa: be 3f ! none to do 114 andcc %o1, 0x70, %g1 ! clears carry flag too 1155: CSUM_BIGCHUNK(%o0, 0x00, %o2, %o4, %o5, %g2, %g3, %g4, %g5) 116 CSUM_BIGCHUNK(%o0, 0x20, %o2, %o4, %o5, %g2, %g3, %g4, %g5) 117 CSUM_BIGCHUNK(%o0, 0x40, %o2, %o4, %o5, %g2, %g3, %g4, %g5) 118 CSUM_BIGCHUNK(%o0, 0x60, %o2, %o4, %o5, %g2, %g3, %g4, %g5) 119 addx %g0, %o2, %o2 ! sink in final carry 120 subcc %o3, 128, %o3 ! detract from loop iters 121 bne 5b ! more to do 122 add %o0, 128, %o0 ! advance buf ptr 123 andcc %o1, 0x70, %g1 ! clears carry flag too 1243: be cpte ! nope 125 andcc %o1, 0xf, %g0 ! anything left at all? 126 srl %g1, 1, %o4 ! compute offset 127 sub %g7, %g1, %g7 ! adjust jmp ptr 128 sub %g7, %o4, %g7 ! final jmp ptr adjust 129 jmp %g7 + %lo(cpte - 8) ! enter the table 130 add %o0, %g1, %o0 ! advance buf ptr 131cptbl: CSUM_LASTCHUNK(%o0, 0x68, %o2, %g2, %g3, %g4, %g5) 132 CSUM_LASTCHUNK(%o0, 0x58, %o2, %g2, %g3, %g4, %g5) 133 CSUM_LASTCHUNK(%o0, 0x48, %o2, %g2, %g3, %g4, %g5) 134 CSUM_LASTCHUNK(%o0, 0x38, %o2, %g2, %g3, %g4, %g5) 135 CSUM_LASTCHUNK(%o0, 0x28, %o2, %g2, %g3, %g4, %g5) 136 CSUM_LASTCHUNK(%o0, 0x18, %o2, %g2, %g3, %g4, %g5) 137 CSUM_LASTCHUNK(%o0, 0x08, %o2, %g2, %g3, %g4, %g5) 138 addx %g0, %o2, %o2 ! fetch final carry 139 andcc %o1, 0xf, %g0 ! anything left at all? 140cpte: bne csum_partial_end_cruft ! yep, handle it 141 andcc %o1, 8, %g0 ! check how much 142cpout: retl ! get outta here 143 mov %o2, %o0 ! return computed csum 144 145 .globl C_LABEL(__csum_partial_copy_start), C_LABEL(__csum_partial_copy_end) 146C_LABEL(__csum_partial_copy_start): 147 148/* Work around cpp -rob */ 149#define ALLOC #alloc 150#define EXECINSTR #execinstr 151#define EX(x,y,a,b) \ 15298: x,y; \ 153 .section .fixup,ALLOC,EXECINSTR; \ 154 .align 4; \ 15599: ba 30f; \ 156 a, b, %o3; \ 157 .section __ex_table,ALLOC; \ 158 .align 4; \ 159 .word 98b, 99b; \ 160 .text; \ 161 .align 4 162 163#define EX2(x,y) \ 16498: x,y; \ 165 .section __ex_table,ALLOC; \ 166 .align 4; \ 167 .word 98b, 30f; \ 168 .text; \ 169 .align 4 170 171#define EX3(x,y) \ 17298: x,y; \ 173 .section __ex_table,ALLOC; \ 174 .align 4; \ 175 .word 98b, 96f; \ 176 .text; \ 177 .align 4 178 179#define EXT(start,end,handler) \ 180 .section __ex_table,ALLOC; \ 181 .align 4; \ 182 .word start, 0, end, handler; \ 183 .text; \ 184 .align 4 185 186 /* This aligned version executes typically in 8.5 superscalar cycles, this 187 * is the best I can do. I say 8.5 because the final add will pair with 188 * the next ldd in the main unrolled loop. Thus the pipe is always full. 189 * If you change these macros (including order of instructions), 190 * please check the fixup code below as well. 191 */ 192#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ 193 ldd [src + off + 0x00], t0; \ 194 ldd [src + off + 0x08], t2; \ 195 addxcc t0, sum, sum; \ 196 ldd [src + off + 0x10], t4; \ 197 addxcc t1, sum, sum; \ 198 ldd [src + off + 0x18], t6; \ 199 addxcc t2, sum, sum; \ 200 std t0, [dst + off + 0x00]; \ 201 addxcc t3, sum, sum; \ 202 std t2, [dst + off + 0x08]; \ 203 addxcc t4, sum, sum; \ 204 std t4, [dst + off + 0x10]; \ 205 addxcc t5, sum, sum; \ 206 std t6, [dst + off + 0x18]; \ 207 addxcc t6, sum, sum; \ 208 addxcc t7, sum, sum; 209 210 /* 12 superscalar cycles seems to be the limit for this case, 211 * because of this we thus do all the ldd's together to get 212 * Viking MXCC into streaming mode. Ho hum... 213 */ 214#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ 215 ldd [src + off + 0x00], t0; \ 216 ldd [src + off + 0x08], t2; \ 217 ldd [src + off + 0x10], t4; \ 218 ldd [src + off + 0x18], t6; \ 219 st t0, [dst + off + 0x00]; \ 220 addxcc t0, sum, sum; \ 221 st t1, [dst + off + 0x04]; \ 222 addxcc t1, sum, sum; \ 223 st t2, [dst + off + 0x08]; \ 224 addxcc t2, sum, sum; \ 225 st t3, [dst + off + 0x0c]; \ 226 addxcc t3, sum, sum; \ 227 st t4, [dst + off + 0x10]; \ 228 addxcc t4, sum, sum; \ 229 st t5, [dst + off + 0x14]; \ 230 addxcc t5, sum, sum; \ 231 st t6, [dst + off + 0x18]; \ 232 addxcc t6, sum, sum; \ 233 st t7, [dst + off + 0x1c]; \ 234 addxcc t7, sum, sum; 235 236 /* Yuck, 6 superscalar cycles... */ 237#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \ 238 ldd [src - off - 0x08], t0; \ 239 ldd [src - off - 0x00], t2; \ 240 addxcc t0, sum, sum; \ 241 st t0, [dst - off - 0x08]; \ 242 addxcc t1, sum, sum; \ 243 st t1, [dst - off - 0x04]; \ 244 addxcc t2, sum, sum; \ 245 st t2, [dst - off - 0x00]; \ 246 addxcc t3, sum, sum; \ 247 st t3, [dst - off + 0x04]; 248 249 /* Handle the end cruft code out of band for better cache patterns. */ 250cc_end_cruft: 251 be 1f 252 andcc %o3, 4, %g0 253 EX(ldd [%o0 + 0x00], %g2, and %o3, 0xf) 254 add %o1, 8, %o1 255 addcc %g2, %g7, %g7 256 add %o0, 8, %o0 257 addxcc %g3, %g7, %g7 258 EX2(st %g2, [%o1 - 0x08]) 259 addx %g0, %g7, %g7 260 andcc %o3, 4, %g0 261 EX2(st %g3, [%o1 - 0x04]) 2621: be 1f 263 andcc %o3, 3, %o3 264 EX(ld [%o0 + 0x00], %g2, add %o3, 4) 265 add %o1, 4, %o1 266 addcc %g2, %g7, %g7 267 EX2(st %g2, [%o1 - 0x04]) 268 addx %g0, %g7, %g7 269 andcc %o3, 3, %g0 270 add %o0, 4, %o0 2711: be 1f 272 addcc %o3, -1, %g0 273 bne 2f 274 subcc %o3, 2, %o3 275 b 4f 276 or %g0, %g0, %o4 2772: EX(lduh [%o0 + 0x00], %o4, add %o3, 2) 278 add %o0, 2, %o0 279 EX2(sth %o4, [%o1 + 0x00]) 280 be 6f 281 add %o1, 2, %o1 282 sll %o4, 16, %o4 2834: EX(ldub [%o0 + 0x00], %o5, add %g0, 1) 284 EX2(stb %o5, [%o1 + 0x00]) 285 sll %o5, 8, %o5 286 or %o5, %o4, %o4 2876: addcc %o4, %g7, %g7 2881: retl 289 addx %g0, %g7, %o0 290 291 /* Also, handle the alignment code out of band. */ 292cc_dword_align: 293 cmp %g1, 6 294 bl,a ccte 295 andcc %g1, 0xf, %o3 296 andcc %o0, 0x1, %g0 297 bne ccslow 298 andcc %o0, 0x2, %g0 299 be 1f 300 andcc %o0, 0x4, %g0 301 EX(lduh [%o0 + 0x00], %g4, add %g1, 0) 302 sub %g1, 2, %g1 303 EX2(sth %g4, [%o1 + 0x00]) 304 add %o0, 2, %o0 305 sll %g4, 16, %g4 306 addcc %g4, %g7, %g7 307 add %o1, 2, %o1 308 srl %g7, 16, %g3 309 addx %g0, %g3, %g4 310 sll %g7, 16, %g7 311 sll %g4, 16, %g3 312 srl %g7, 16, %g7 313 andcc %o0, 0x4, %g0 314 or %g3, %g7, %g7 3151: be 3f 316 andcc %g1, 0xffffff80, %g0 317 EX(ld [%o0 + 0x00], %g4, add %g1, 0) 318 sub %g1, 4, %g1 319 EX2(st %g4, [%o1 + 0x00]) 320 add %o0, 4, %o0 321 addcc %g4, %g7, %g7 322 add %o1, 4, %o1 323 addx %g0, %g7, %g7 324 b 3f 325 andcc %g1, 0xffffff80, %g0 326 327 /* Sun, you just can't beat me, you just can't. Stop trying, 328 * give up. I'm serious, I am going to kick the living shit 329 * out of you, game over, lights out. 330 */ 331 .align 8 332 .globl C_LABEL(__csum_partial_copy_sparc_generic) 333C_LABEL(__csum_partial_copy_sparc_generic): 334 /* %o0=src, %o1=dest, %g1=len, %g7=sum */ 335 xor %o0, %o1, %o4 ! get changing bits 336 andcc %o4, 3, %g0 ! check for mismatched alignment 337 bne ccslow ! better this than unaligned/fixups 338 andcc %o0, 7, %g0 ! need to align things? 339 bne cc_dword_align ! yes, we check for short lengths there 340 andcc %g1, 0xffffff80, %g0 ! can we use unrolled loop? 3413: be 3f ! nope, less than one loop remains 342 andcc %o1, 4, %g0 ! dest aligned on 4 or 8 byte boundry? 343 be ccdbl + 4 ! 8 byte aligned, kick ass 3445: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) 345 CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) 346 CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) 347 CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) 34810: EXT(5b, 10b, 20f) ! note for exception handling 349 sub %g1, 128, %g1 ! detract from length 350 addx %g0, %g7, %g7 ! add in last carry bit 351 andcc %g1, 0xffffff80, %g0 ! more to csum? 352 add %o0, 128, %o0 ! advance src ptr 353 bne 5b ! we did not go negative, continue looping 354 add %o1, 128, %o1 ! advance dest ptr 3553: andcc %g1, 0x70, %o2 ! can use table? 356ccmerge:be ccte ! nope, go and check for end cruft 357 andcc %g1, 0xf, %o3 ! get low bits of length (clears carry btw) 358 srl %o2, 1, %o4 ! begin negative offset computation 359 sethi %hi(12f), %o5 ! set up table ptr end 360 add %o0, %o2, %o0 ! advance src ptr 361 sub %o5, %o4, %o5 ! continue table calculation 362 sll %o2, 1, %g2 ! constant multiplies are fun... 363 sub %o5, %g2, %o5 ! some more adjustments 364 jmp %o5 + %lo(12f) ! jump into it, duff style, wheee... 365 add %o1, %o2, %o1 ! advance dest ptr (carry is clear btw) 366cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5) 367 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3,%g4,%g5) 368 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3,%g4,%g5) 369 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3,%g4,%g5) 370 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5) 371 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5) 372 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5) 37312: EXT(cctbl, 12b, 22f) ! note for exception table handling 374 addx %g0, %g7, %g7 375 andcc %o3, 0xf, %g0 ! check for low bits set 376ccte: bne cc_end_cruft ! something left, handle it out of band 377 andcc %o3, 8, %g0 ! begin checks for that code 378 retl ! return 379 mov %g7, %o0 ! give em the computed checksum 380ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) 381 CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) 382 CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) 383 CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) 38411: EXT(ccdbl, 11b, 21f) ! note for exception table handling 385 sub %g1, 128, %g1 ! detract from length 386 addx %g0, %g7, %g7 ! add in last carry bit 387 andcc %g1, 0xffffff80, %g0 ! more to csum? 388 add %o0, 128, %o0 ! advance src ptr 389 bne ccdbl ! we did not go negative, continue looping 390 add %o1, 128, %o1 ! advance dest ptr 391 b ccmerge ! finish it off, above 392 andcc %g1, 0x70, %o2 ! can use table? (clears carry btw) 393 394ccslow: cmp %g1, 0 395 mov 0, %g5 396 bleu 4f 397 andcc %o0, 1, %o5 398 be,a 1f 399 srl %g1, 1, %g4 400 sub %g1, 1, %g1 401 EX(ldub [%o0], %g5, add %g1, 1) 402 add %o0, 1, %o0 403 EX2(stb %g5, [%o1]) 404 srl %g1, 1, %g4 405 add %o1, 1, %o1 4061: cmp %g4, 0 407 be,a 3f 408 andcc %g1, 1, %g0 409 andcc %o0, 2, %g0 410 be,a 1f 411 srl %g4, 1, %g4 412 EX(lduh [%o0], %o4, add %g1, 0) 413 sub %g1, 2, %g1 414 srl %o4, 8, %g2 415 sub %g4, 1, %g4 416 EX2(stb %g2, [%o1]) 417 add %o4, %g5, %g5 418 EX2(stb %o4, [%o1 + 1]) 419 add %o0, 2, %o0 420 srl %g4, 1, %g4 421 add %o1, 2, %o1 4221: cmp %g4, 0 423 be,a 2f 424 andcc %g1, 2, %g0 425 EX3(ld [%o0], %o4) 4265: srl %o4, 24, %g2 427 srl %o4, 16, %g3 428 EX2(stb %g2, [%o1]) 429 srl %o4, 8, %g2 430 EX2(stb %g3, [%o1 + 1]) 431 add %o0, 4, %o0 432 EX2(stb %g2, [%o1 + 2]) 433 addcc %o4, %g5, %g5 434 EX2(stb %o4, [%o1 + 3]) 435 addx %g5, %g0, %g5 ! I am now to lazy to optimize this (question it 436 add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl 437 subcc %g4, 1, %g4 ! tricks 438 bne,a 5b 439 EX3(ld [%o0], %o4) 440 sll %g5, 16, %g2 441 srl %g5, 16, %g5 442 srl %g2, 16, %g2 443 andcc %g1, 2, %g0 444 add %g2, %g5, %g5 4452: be,a 3f 446 andcc %g1, 1, %g0 447 EX(lduh [%o0], %o4, and %g1, 3) 448 andcc %g1, 1, %g0 449 srl %o4, 8, %g2 450 add %o0, 2, %o0 451 EX2(stb %g2, [%o1]) 452 add %g5, %o4, %g5 453 EX2(stb %o4, [%o1 + 1]) 454 add %o1, 2, %o1 4553: be,a 1f 456 sll %g5, 16, %o4 457 EX(ldub [%o0], %g2, add %g0, 1) 458 sll %g2, 8, %o4 459 EX2(stb %g2, [%o1]) 460 add %g5, %o4, %g5 461 sll %g5, 16, %o4 4621: addcc %o4, %g5, %g5 463 srl %g5, 16, %o4 464 addx %g0, %o4, %g5 465 orcc %o5, %g0, %g0 466 be 4f 467 srl %g5, 8, %o4 468 and %g5, 0xff, %g2 469 and %o4, 0xff, %o4 470 sll %g2, 8, %g2 471 or %g2, %o4, %g5 4724: addcc %g7, %g5, %g7 473 retl 474 addx %g0, %g7, %o0 475C_LABEL(__csum_partial_copy_end): 476 477/* We do these strange calculations for the csum_*_from_user case only, ie. 478 * we only bother with faults on loads... */ 479 480/* o2 = ((g2%20)&3)*8 481 * o3 = g1 - (g2/20)*32 - o2 */ 48220: 483 cmp %g2, 20 484 blu,a 1f 485 and %g2, 3, %o2 486 sub %g1, 32, %g1 487 b 20b 488 sub %g2, 20, %g2 4891: 490 sll %o2, 3, %o2 491 b 31f 492 sub %g1, %o2, %o3 493 494/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8) 495 * o3 = g1 - (g2/16)*32 - o2 */ 49621: 497 andcc %g2, 15, %o3 498 srl %g2, 4, %g2 499 be,a 1f 500 clr %o2 501 add %o3, 1, %o3 502 and %o3, 14, %o3 503 sll %o3, 3, %o2 5041: 505 sll %g2, 5, %g2 506 sub %g1, %g2, %o3 507 b 31f 508 sub %o3, %o2, %o3 509 510/* o0 += (g2/10)*16 - 0x70 511 * 01 += (g2/10)*16 - 0x70 512 * o2 = (g2 % 10) ? 8 : 0 513 * o3 += 0x70 - (g2/10)*16 - o2 */ 51422: 515 cmp %g2, 10 516 blu,a 1f 517 sub %o0, 0x70, %o0 518 add %o0, 16, %o0 519 add %o1, 16, %o1 520 sub %o3, 16, %o3 521 b 22b 522 sub %g2, 10, %g2 5231: 524 sub %o1, 0x70, %o1 525 add %o3, 0x70, %o3 526 clr %o2 527 tst %g2 528 bne,a 1f 529 mov 8, %o2 5301: 531 b 31f 532 sub %o3, %o2, %o3 53396: 534 and %g1, 3, %g1 535 sll %g4, 2, %g4 536 add %g1, %g4, %o3 53730: 538/* %o1 is dst 539 * %o3 is # bytes to zero out 540 * %o4 is faulting address 541 * %o5 is %pc where fault occurred */ 542 clr %o2 54331: 544/* %o0 is src 545 * %o1 is dst 546 * %o2 is # of bytes to copy from src to dst 547 * %o3 is # bytes to zero out 548 * %o4 is faulting address 549 * %o5 is %pc where fault occurred */ 550 save %sp, -104, %sp 551 mov %i5, %o0 552 mov %i7, %o1 553 mov %i4, %o2 554 call C_LABEL(lookup_fault) 555 mov %g7, %i4 556 cmp %o0, 2 557 bne 1f 558 add %g0, -EFAULT, %i5 559 tst %i2 560 be 2f 561 mov %i0, %o1 562 mov %i1, %o0 5635: 564 call C_LABEL(__memcpy) 565 mov %i2, %o2 566 tst %o0 567 bne,a 2f 568 add %i3, %i2, %i3 569 add %i1, %i2, %i1 5702: 571 mov %i1, %o0 5726: 573 call C_LABEL(__bzero) 574 mov %i3, %o1 5751: 576 ld [%sp + 168], %o2 ! struct_ptr of parent 577 st %i5, [%o2] 578 ret 579 restore 580 581 .section __ex_table,#alloc 582 .align 4 583 .word 5b,2 584 .word 6b,2 585