Lines Matching refs:l
35 bra.l _060LSP__idivs64_
37 bra.l _060LSP__idivu64_
40 bra.l _060LSP__imuls64_
42 bra.l _060LSP__imulu64_
45 bra.l _060LSP__cmp2_Ab_
47 bra.l _060LSP__cmp2_Aw_
49 bra.l _060LSP__cmp2_Al_
51 bra.l _060LSP__cmp2_Db_
53 bra.l _060LSP__cmp2_Dw_
55 bra.l _060LSP__cmp2_Dl_
67 # and therefore does not work exactly like the 680X0 div{s,u}.l #
105 # divs.l #
111 movm.l &0x3f00,-(%sp) # save d2-d7
112 # fmovm.l &0x0,-(%sp) # save no fpregs
120 # divu.l #
126 movm.l &0x3f00,-(%sp) # save d2-d7
127 # fmovm.l &0x0,-(%sp) # save no fpregs
134 mov.l 0x8(%a6),%d7 # fetch divisor
138 mov.l 0xc(%a6), %d5 # get dividend hi
139 mov.l 0x10(%a6), %d6 # get dividend lo
155 tst.l %d5 # chk sign of hi(dividend)
160 negx.l %d6 # complement signed dividend
161 negx.l %d5
167 tst.l %d5 # is (hi(dividend) == 0)
170 tst.l %d6 # is (lo(dividend) == 0), too
173 cmp.l %d7,%d6 # is (divisor <= lo(dividend))
180 tdivu.l %d7, %d5:%d6 # it's only a 32/32 bit div!
191 bsr.l ldclassical # do int divide
198 # it was a divs.l, so ccode setting is a little more complicated...
201 neg.l %d5 # sgn(rem) = sgn(dividend)
209 cmpi.l %d6, &0x80000000 # will (-quot) fit in 32 bits?
212 neg.l %d6 # make (-quot) 2's comp
286 cmpi.l %d7, &0xffff
298 clr.l %d1
315 mov.l %d1, %d6 # and quotient
327 clr.l DDNORMAL(%a6) # count of shifts for normalization
329 clr.l %d1 # %d1 will hold trial quotient
333 addq.l &0x1, DDNORMAL(%a6) # count normalization shifts
334 lsl.l &0x1, %d7 # shift the divisor
335 lsl.l &0x1, %d6 # shift u4,u3 with overflow to u2
336 roxl.l &0x1, %d5 # shift u1,u2
342 mov.l %d7, %d3 # divisor
343 mov.l %d5, %d2 # dividend mslw
351 mov.l %d5, %d1
355 andi.l &0x0000ffff, %d1 # zero any remainder
361 mov.l %d6, -(%sp)
364 lddadj1: mov.l %d7, %d3
365 mov.l %d1, %d2
369 mov.l %d5, %d4 # U1U2
370 sub.l %d3, %d4 # U1U2 - V1q
380 # add.l %d6, %d4 # (U1U2 - V1q) + U3
382 cmp.l %d2, %d4
384 subq.l &0x1, %d1 # yes, decrement and recheck
389 mov.l %d5, -(%sp) # save %d5 (%d6 already saved)
390 mov.l %d1, %d6
392 mov.l %d7, %d5
393 bsr.l ldmm2
394 mov.l %d5, %d2 # now %d2,%d3 are trial*divisor
395 mov.l %d6, %d3
396 mov.l (%sp)+, %d5 # restore dividend
397 mov.l (%sp)+, %d6
398 sub.l %d3, %d6
399 subx.l %d2, %d5 # subtract double precision
401 subq.l &0x1, %d1 # q is one too large
405 clr.l %d2
406 mov.l %d7, %d3
409 add.l %d3, %d6 # aligned with 3rd word of dividend
410 addx.l %d2, %d5
411 mov.l %d7, %d3
414 add.l %d3, %d5
421 clr.l %d1
435 mov.l DDNORMAL(%a6), %d7 # get norm shift count
437 subq.l &0x1, %d7 # set for loop count
439 lsr.l &0x1, %d5 # shift into %d6
440 roxr.l &0x1, %d6
443 mov.l %d6, %d5 # remainder
444 mov.l DDQUOTIENT(%a6), %d6 # quotient
453 mov.l %d6, %d2
454 mov.l %d6, %d3
455 mov.l %d5, %d4
463 clr.l %d4
465 add.w %d5, %d6 # add msw of l*l to lsw of m*l product
467 add.w %d2, %d6 # add in lsw of other m*l product
474 add.l %d2, %d5
475 add.l %d3, %d5 # %d5 now ms 32 bits of final product
484 # and therefore does not work exactly like the 680X0 mul{s,u}.l #
513 movm.l &0x3800,-(%sp) # save d2-d4
514 # fmovm.l &0x0,-(%sp) # save no fpregs
519 mov.l 0x8(%a6),%d0 # store multiplier in d0
522 mov.l 0xc(%a6),%d1 # get multiplicand in d1
546 mov.l %d0,%d2 # mr in d2
547 mov.l %d0,%d3 # mr in d3
548 mov.l %d1,%d4 # md in d4
561 clr.l %d4 # load d4 w/ zero value
564 addx.l %d4,%d3 # [4] + carry
566 addx.l %d4,%d3 # [4] + carry
575 add.l %d2,%d1 # [4] + hi([2])
576 add.l %d3,%d1 # [4] + hi([3])
582 tst.l %d1 # may set 'N' bit
593 movm.l &0x0003,([0x10,%a6]) # save result
596 # fmovm.l (%sp)+,&0x0 # restore no fpregs
597 movm.l (%sp)+,&0x001c # restore d2-d4
606 clr.l %d0
607 clr.l %d1
617 # muls.l #
624 movm.l &0x3c00,-(%sp) # save d2-d5
625 # fmovm.l &0x0,-(%sp) # save no fpregs
630 mov.l 0x8(%a6),%d0 # store multiplier in d0
633 mov.l 0xc(%a6),%d1 # get multiplicand in d1
637 tst.l %d0 # is multiplier negative?
639 neg.l %d0 # make multiplier positive
645 tst.l %d1 # is multiplicand negative?
647 neg.l %d1 # make multiplicand positive
672 mov.l %d0,%d2 # mr in d2
673 mov.l %d0,%d3 # mr in d3
674 mov.l %d1,%d4 # md in d4
687 clr.l %d4 # load d4 w/ zero value
690 addx.l %d4,%d3 # [4] + carry
692 addx.l %d4,%d3 # [4] + carry
701 add.l %d2,%d1 # [4] + hi([2])
702 add.l %d3,%d1 # [4] + hi([3])