HomeSort by: relevance | last modified time | path
    Searched refs:mulx (Results 1 - 25 of 115) sorted by relevancy

1 2 3 4 5

  /src/common/lib/libc/arch/sparc64/gen/
mul.S 61 mulx %o0, %o1, %o0
64 mulx %o1, %o0, %o0
umul.S 59 mulx %o0, %o1, %o0
62 mulx %o1, %o0, %o0
  /src/external/lgpl3/gmp/dist/mpn/x86_64/zen/
sbpi1_bdiv_r.asm 94 L(o4): mulx( -32,(dp), %r9, %r14)
95 mulx( -24,(dp), %r11, %r10)
96 mulx( -16,(dp), %r13, %r12)
97 mulx( -8,(dp), %rbx, %rax)
106 mulx( %r11, %rdx, %r12) C next quotient
121 L(o8): mulx( -64,(dp), %r9, %r14)
122 mulx( -56,(dp), %rcx, %r10)
123 mulx( -48,(dp), %r13, %r12)
124 mulx( -40,(dp), %rbx, %rax)
130 mulx( -32,(dp), %r9, %r14
    [all...]
mullo_basecase.asm 76 mulx( %r9, %rax, %rdx)
90 mulx( %r9, %rax, %r10) C u0 x v0
93 mulx( %r9, %rax, %rdx) C u1 x v0
99 mulx( %r8, %rax, %rdx) C u0 x v1
137 L(mb1): mulx( %r9, %rbx, %rax)
139 .byte 0xc4,0x22,0xb3,0xf6,0x44,0xf6,0xf0 C mulx -0x10(%rsi,%r14,8),%r9,%r8
140 .byte 0xc4,0x22,0xa3,0xf6,0x54,0xf6,0xf8 C mulx -0x8(%rsi,%r14,8),%r11,%r10
143 L(mb3): mulx( %r9, %r11, %r10)
144 .byte 0xc4,0x22,0x93,0xf6,0x64,0xf6,0xf0 C mulx -0x10(%rsi,%r14,8),%r13,%r12
145 .byte 0xc4,0xa2,0xe3,0xf6,0x44,0xf6,0xf8 C mulx -0x8(%rsi,%r14,8),%rbx,%ra
    [all...]
mul_basecase.asm 39 C * Improve the overlapped software pipelining. The mulx in the osp block now
71 mulx( (up), %rax, %r9) C 0 1
80 mulx( 8,(up), %r8, %r10) C 1 2
95 mulx( (up), %r8, %r11) C 1 2
96 mulx( 8,(up), %rax, %rdx) C 2 3
133 L(mb1): mulx( %r9, %rbx, %rax)
135 .byte 0xc4,0x22,0xb3,0xf6,0x44,0xf6,0x08 C mulx 8(up,un,8), %r9, %r8
136 .byte 0xc4,0x22,0xa3,0xf6,0x54,0xf6,0x10 C mulx 16(up,un,8), %r11, %r10
139 L(mb3): mulx( %r9, %r11, %r10)
140 .byte 0xc4,0x22,0x93,0xf6,0x64,0xf6,0x08 C mulx 8(up,un,8), %r13, %r1
    [all...]
aorsmul_1.asm 1 dnl AMD64 mpn_addmul_1 and mpn_submul_1 for CPUs with mulx.
103 L(b1): mulx( %r8, %rbx, %rax)
106 .byte 0xc4,0x62,0xb3,0xf6,0x04,0xce C mulx (up,n,8), %r9, %r8
107 .byte 0xc4,0x62,0xa3,0xf6,0x54,0xce,0x08 C mulx 8(up,n,8), %r11, %r10
111 L(b0): mulx( %r8, %r9, %r8)
112 .byte 0xc4,0x62,0xa3,0xf6,0x54,0xce,0x08 C mulx 8(up,n,8), %r11, %r10
113 .byte 0xc4,0x62,0x93,0xf6,0x64,0xce,0x10 C mulx 16(up,n,8), %r13, %r12
117 L(b3): mulx( %r8, %r11, %r10)
118 .byte 0xc4,0x62,0x93,0xf6,0x64,0xce,0x08 C mulx 8(up,n,8), %r13, %r12
119 .byte 0xc4,0xe2,0xe3,0xf6,0x44,0xce,0x10 C mulx 16(up,n,8), %rbx, %ra
    [all...]
mul_1.asm 1 dnl AMD64 mpn_mul_1 for CPUs with mulx.
98 mulx( %r9, %rbx, %rax)
101 .byte 0xc4,0x62,0xb3,0xf6,0x04,0xce C mulx (up,n,8), %r9, %r8
102 .byte 0xc4,0x62,0xa3,0xf6,0x54,0xce,0x08 C mulx 8(up,n,8), %r11, %r10
106 L(b3): mulx( %r9, %r11, %r10)
107 .byte 0xc4,0x62,0x93,0xf6,0x64,0xce,0x08 C mulx 8(up,n,8), %r13, %r12
108 .byte 0xc4,0xe2,0xe3,0xf6,0x44,0xce,0x10 C mulx 16(up,n,8), %rbx, %rax
115 mulx( %r9, %r13, %r12)
116 .byte 0xc4,0xe2,0xe3,0xf6,0x44,0xce,0x08 C mulx 8(up,n,8), %rbx, %rax
119 .byte 0xc4,0x62,0xb3,0xf6,0x04,0xce C mulx (up,n,8), %r9, %r
    [all...]
  /src/external/lgpl3/gmp/dist/mpn/x86_64/coreibwl/
mullo_basecase.asm 77 mulx( %r9, %rax, %rdx)
91 mulx( %r9, %rax, %r10) C u0 x v0
94 mulx( %r9, %rax, %rdx) C u1 x v0
100 mulx( %r8, %rax, %rdx) C u0 x v1
137 L(mf0): mulx( (up), %r10, %r8)
143 L(mf3): mulx( (up), %r9, %rax)
151 L(mc): mulx( -8,(up), %r10, %r8)
154 mulx( (up), %r9, %rax)
160 L(mf4): mulx( (up), %r10, %r8)
167 L(mf5): mulx( (up), %r9, %rax
    [all...]
sqr_basecase.asm 68 C * Try swapping adox and adcx insn, making mulx have more time to run.
96 mulx( %rdx, %rax, %rdx)
106 mulx( %rcx, %r9, %r10) C v0 * v1 W 1 2
107 mulx( %rdx, %rax, %r8) C v0 * v0 W 0 1
109 mulx( %rdx, %r11, %rdx) C v1 * v1 W 2 3
128 mulx( 8,(up), w2, w3)
129 mulx( 16,(up), w0, w1)
132 mulx( 16,(up), %rax, w3)
137 mulx( %rdx, %rbx, %rcx)
140 mulx( %rdx, %rax, %rbx
    [all...]
mul_basecase.asm 94 mulx( (up), %rax, %r9) C 0 1
103 mulx( 8,(up), %r8, %r10) C 1 2
118 mulx( (up), %r8, %r11) C 1 2
119 mulx( 8,(up), %rax, %rdx) C 2 3
160 L(mf0): mulx( (up), w2, w3)
165 L(mf3): mulx( (up), w0, w1)
171 L(mf4): mulx( (up), w2, w3)
177 L(mf5): mulx( (up), w0, w1)
183 L(mf6): mulx( (up), w2, w3)
189 L(mf7): mulx( (up), w0, w1
    [all...]
mul_1.asm 58 C * Put an initial mulx before switching, targeting some free registers.
111 L(f0): mulx( (up), %r10, %r8)
116 L(f3): mulx( (up), %r9, %rax)
122 L(f4): mulx( (up), %r10, %r8)
128 L(f5): mulx( (up), %r9, %rax)
134 L(f6): mulx( (up), %r10, %r8)
140 L(f7): mulx( (up), %r9, %rax)
146 L(f1): mulx( (up), %r9, %rax)
152 L(f2): mulx( (up), %r10, %r8)
155 mulx( (up), %r9, %rax
    [all...]
  /src/external/lgpl3/gmp/dist/mpn/x86_64/coreihwl/
mul_basecase.asm 111 mulx( (up), w5, w2)
112 mulx( 8,(up), w1, w3)
117 mulx( (up), w3, w4)
118 mulx( 8,(up), w1, w5)
122 mulx( 16,(up), w0, w2)
130 mulx( (up), w4, w5)
134 mulx( 8,(up), w0, w2)
139 mulx( (up), w2, w3)
140 mulx( 8,(up), w0, w4)
141 mulx( 16,(up), w1, w5
    [all...]
mullo_basecase.asm 112 L(m2x0):mulx( v0, w0, w3)
125 L(m2x1):mulx( v0, w2, w1)
137 L(m2tp):mulx( v1, %rax, w0)
140 mulx( v0, %rax, w1)
147 mulx( v1, %rax, w2)
151 mulx( v0, %rax, w3)
157 mulx( v1, %rax, w0)
160 mulx( v0, %rax, w1)
167 mulx( v1, %rax, w2)
171 mulx( v0, %rax, w3
    [all...]
mul_1.asm 1 dnl AMD64 mpn_mul_1 using mulx optimised for Intel Haswell.
89 L(b00): mulx( (up), %r9, %r8)
90 mulx( 8,(up), %r11, %r10)
91 mulx( 16,(up), %rcx, %r12)
95 L(b10): mulx( (up), %rcx, %r12)
96 mulx( 8,(up), %rbx, %rax)
100 mulx( 16,(up), %r9, %r8)
108 L(b01): mulx( (up), %rbx, %rax)
112 mulx( 8,(up), %r9, %r8)
116 L(b11): mulx( (up), %r11, %r10
    [all...]
  /src/external/lgpl3/gmp/dist/mpn/x86_64/mulx/adx/
addmul_1.asm 1 dnl AMD64 mpn_addmul_1 for CPUs with mulx and adx.
71 define(`mulx', ``mulx' $1, $2, $3')
97 L(b3): mulx( (up,n,8), %r11, %r10)
98 mulx( 8(up,n,8), %r13, %r12)
99 mulx( 16(up,n,8), %rbx, %rax)
103 L(b0): mulx( (up,n,8), %r9, %r8)
104 mulx( 8(up,n,8), %r11, %r10)
105 mulx( 16(up,n,8), %r13, %r12)
108 L(b2): mulx( (up,n,8), %r13, %r12
    [all...]
  /src/crypto/external/apache2/openssl/dist/crypto/bn/asm/
sparcv8plus.S 196 mulx %o3,%g2,%g2
205 mulx %o3,%g3,%g3
214 mulx %o3,%g2,%g2
222 mulx %o3,%g3,%g3
240 mulx %o3,%g2,%g2
250 mulx %o3,%g2,%g2
260 mulx %o3,%g2,%g2
297 mulx %o3,%g2,%g2
304 mulx %o3,%g3,%g3
311 mulx %o3,%g2,%g
    [all...]
sparcv9-mont.pl 123 mulx $car0,$mul0,$car0 ! ap[0]*bp[0]
124 mulx $apj,$mul0,$tmp0 !prologue! ap[1]*bp[0]
129 mulx $n0,$acc0,$mul1 ! "t[0]"*n0
132 mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0
133 mulx $npj,$mul1,$acc1 !prologue! np[1]*"t[0]"*n0
141 mulx $apj,$mul0,$tmp0
142 mulx $npj,$mul1,$tmp1
160 mulx $apj,$mul0,$tmp0 !epilogue!
161 mulx $npj,$mul1,$tmp1
195 mulx $car0,$mul0,$car
    [all...]
  /src/crypto/external/bsd/openssl/dist/crypto/bn/asm/
sparcv8plus.S 196 mulx %o3,%g2,%g2
205 mulx %o3,%g3,%g3
214 mulx %o3,%g2,%g2
222 mulx %o3,%g3,%g3
240 mulx %o3,%g2,%g2
250 mulx %o3,%g2,%g2
260 mulx %o3,%g2,%g2
297 mulx %o3,%g2,%g2
304 mulx %o3,%g3,%g3
311 mulx %o3,%g2,%g
    [all...]
sparcv9-mont.pl 123 mulx $car0,$mul0,$car0 ! ap[0]*bp[0]
124 mulx $apj,$mul0,$tmp0 !prologue! ap[1]*bp[0]
129 mulx $n0,$acc0,$mul1 ! "t[0]"*n0
132 mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0
133 mulx $npj,$mul1,$acc1 !prologue! np[1]*"t[0]"*n0
141 mulx $apj,$mul0,$tmp0
142 mulx $npj,$mul1,$tmp1
160 mulx $apj,$mul0,$tmp0 !epilogue!
161 mulx $npj,$mul1,$tmp1
195 mulx $car0,$mul0,$car
    [all...]
  /src/crypto/external/bsd/openssl.old/dist/crypto/bn/asm/
sparcv8plus.S 196 mulx %o3,%g2,%g2
205 mulx %o3,%g3,%g3
214 mulx %o3,%g2,%g2
222 mulx %o3,%g3,%g3
240 mulx %o3,%g2,%g2
250 mulx %o3,%g2,%g2
260 mulx %o3,%g2,%g2
297 mulx %o3,%g2,%g2
304 mulx %o3,%g3,%g3
311 mulx %o3,%g2,%g
    [all...]
sparcv9-mont.pl 121 mulx $car0,$mul0,$car0 ! ap[0]*bp[0]
122 mulx $apj,$mul0,$tmp0 !prologue! ap[1]*bp[0]
127 mulx $n0,$acc0,$mul1 ! "t[0]"*n0
130 mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0
131 mulx $npj,$mul1,$acc1 !prologue! np[1]*"t[0]"*n0
139 mulx $apj,$mul0,$tmp0
140 mulx $npj,$mul1,$tmp1
158 mulx $apj,$mul0,$tmp0 !epilogue!
159 mulx $npj,$mul1,$tmp1
193 mulx $car0,$mul0,$car
    [all...]
  /src/external/lgpl3/gmp/dist/mpn/sparc64/ultrasparct3/
mul_1.asm 64 mulx %l0, v0, %o0
68 mulx %l1, v0, %o2
73 mulx %l1, v0, %o2
84 mulx %l1, v0, %o2
86 mulx %l2, v0, %o4
91 mulx %l1, v0, %o2
94 mulx %l2, v0, %o4
104 mulx %l2, v0, %o4
111 mulx %l2, v0, %o4
114 mulx %l3, v0, %i
    [all...]
  /src/crypto/external/apache2/openssl/lib/libcrypto/arch/sparc/
sparcv9-mont.S 38 mulx %o0,%l2,%o0 ! ap[0]*bp[0]
39 mulx %l5,%l2,%g4 !prologue! ap[1]*bp[0]
44 mulx %i4,%o3,%l3 ! "t[0]"*n0
47 mulx %o1,%l3,%o1 ! np[0]*"t[0]"*n0
48 mulx %l6,%l3,%o4 !prologue! np[1]*"t[0]"*n0
56 mulx %l5,%l2,%g4
57 mulx %l6,%l3,%g5
75 mulx %l5,%l2,%g4 !epilogue!
76 mulx %l6,%l3,%g5
110 mulx %o0,%l2,%o
    [all...]
  /src/crypto/external/apache2/openssl/lib/libcrypto/arch/sparc64/
sparcv9-mont.S 38 mulx %o0,%l2,%o0 ! ap[0]*bp[0]
39 mulx %l5,%l2,%g4 !prologue! ap[1]*bp[0]
44 mulx %i4,%o3,%l3 ! "t[0]"*n0
47 mulx %o1,%l3,%o1 ! np[0]*"t[0]"*n0
48 mulx %l6,%l3,%o4 !prologue! np[1]*"t[0]"*n0
56 mulx %l5,%l2,%g4
57 mulx %l6,%l3,%g5
75 mulx %l5,%l2,%g4 !epilogue!
76 mulx %l6,%l3,%g5
110 mulx %o0,%l2,%o
    [all...]
  /src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/
sparcv9-mont.S 38 mulx %o0,%l2,%o0 ! ap[0]*bp[0]
39 mulx %l5,%l2,%g4 !prologue! ap[1]*bp[0]
44 mulx %i4,%o3,%l3 ! "t[0]"*n0
47 mulx %o1,%l3,%o1 ! np[0]*"t[0]"*n0
48 mulx %l6,%l3,%o4 !prologue! np[1]*"t[0]"*n0
56 mulx %l5,%l2,%g4
57 mulx %l6,%l3,%g5
75 mulx %l5,%l2,%g4 !epilogue!
76 mulx %l6,%l3,%g5
110 mulx %o0,%l2,%o
    [all...]

Completed in 23 milliseconds

1 2 3 4 5