HomeSort by: relevance | last modified time | path
    Searched refs:q0 (Results 1 - 25 of 28) sorted by relevancy

1 2

  /src/sys/arch/arm/cortex/
cpu_in_cksum_asm_neon.S 72 vmovl.u32 q0, d0 /* 2 U32 -> 2 U64 */
73 vshl.u64 q1, q1, q0 /* apply shifts to masks */
99 vmovl.u16 q0, d6 /* 4 U16 -> 4 U32 */
100 vadd.u32 q2, q2, q0 /* add 4 U32 to accumulator */
101 vmovl.u16 q0, d7 /* 4 U16 -> 4 U32 */
102 vadd.u32 q2, q2, q0 /* add 4 U32 to accumulator */
104 vmovl.u16 q0, d8 /* 4 U16 -> 4 U32 */
105 vadd.u32 q2, q2, q0 /* add 4 U32 to accumulator */
106 vmovl.u16 q0, d9 /* 4 U16 -> 4 U32 */
107 vadd.u32 q2, q2, q0 /* add 4 U32 to accumulator *
    [all...]
  /src/common/lib/libc/arch/arm/string/
strlen_neon.S 45 veor q0, q0, q0 /* clear q0 */
55 vmovl.u32 q0, d0 /* 2 U32 -> 2 U64 */
56 vshl.u64 q2, q2, q0 /* shift */
63 vrev64.8 q0, q0 /* convert to BE for clz */
66 vorr q0, q0, q2 /* or "in" leading byte mask *
    [all...]
memset_neon.S 52 vdup.8 q0, r3 /* move fill to SIMD */
85 vmov q1, q0 /* put fill in q1 (d2-d3) */
86 vmov q2, q0 /* put fill in q2 (d4-d5) */
87 vmov q3, q0 /* put fill in q3 (d6-d7) */
173 vmov q1, q0 /* restore d2 & d3 */
memset_arm.S 92 vdup.8 q0, r3 /* move fill to SIMD */
93 vmov q1, q0 /* put fill in q1 (d2-d3) */
  /src/common/lib/libc/arch/arm/gen/
neon_mask.S 39 * OUT q0 = mask
54 vmvn.u64 q0, #0 /* create a mask */
55 vshl.u64 q0, q0, q1 /* shift out unneeded bytes */
  /src/sys/crypto/aes/
aes_ct_dec.c 56 uint32_t q0, q1, q2, q3, q4, q5, q6, q7; local in function:br_aes_ct_bitslice_invSbox
58 q0 = ~q[0];
67 q[6] = q0 ^ q3 ^ q5;
70 q[3] = q5 ^ q0 ^ q2;
72 q[1] = q3 ^ q6 ^ q0;
77 q0 = ~q[0];
86 q[6] = q0 ^ q3 ^ q5;
89 q[3] = q5 ^ q0 ^ q2;
91 q[1] = q3 ^ q6 ^ q0;
130 uint32_t q0, q1, q2, q3, q4, q5, q6, q7 local in function:inv_mix_columns
    [all...]
aes_ct_enc.c 72 uint32_t q0, q1, q2, q3, q4, q5, q6, q7; local in function:mix_columns
75 q0 = q[0];
83 r0 = (q0 >> 8) | (q0 << 24);
92 q[0] = q7 ^ r7 ^ r0 ^ rotr16(q0 ^ r0);
93 q[1] = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr16(q1 ^ r1);
  /src/lib/libm/src/
k_rem_pio2f.c 53 int32_t jz,jx,jv,jp,jk,carry,n,iq[20],i,j,k,m,q0,ih; local in function:__kernel_rem_pio2f
65 /* determine jx,jv,q0, note that 3>q0 */
68 q0 = e0-8*(jv+1);
91 z = scalbnf(z,q0); /* actual value of z */
96 if(q0>0) { /* need iq[jz-1] to determine n */
97 i = (iq[jz-1]>>(8-q0)); n += i;
98 iq[jz-1] -= i<<(8-q0);
99 ih = iq[jz-1]>>(7-q0);
101 else if(q0==0) ih = iq[jz-1]>>8
    [all...]
e_jnf.c 113 float q0,q1,h,tmp; int32_t k,m; local in function:__ieee754_jnf
115 q0 = w; z = w+h; q1 = w*z - (float)1.0; k=1;
118 tmp = z*q1 - q0;
119 q0 = q1;
k_rem_pio2.c 111 * q0 the corresponding exponent of q[0]. Note that the
112 * exponent for q[i] would be q0-24*i.
302 int32_t jz,jx,jv,jp,jk,carry,n,iq[20],i,j,k,m,q0,ih; local in function:__kernel_rem_pio2
314 /* determine jx,jv,q0, note that 3>q0 */
317 q0 = e0-24*(jv+1);
340 z = scalbn(z,q0); /* actual value of z */
345 if(q0>0) { /* need iq[jz-1] to determine n */
346 i = (iq[jz-1]>>(24-q0)); n += i;
347 iq[jz-1] -= i<<(24-q0);
    [all...]
e_jn.c 159 double q0,q1,h,tmp; int32_t k,m; local in function:__ieee754_jn
161 q0 = w; z = w+h; q1 = w*z - 1.0; k=1;
164 tmp = z*q1 - q0;
165 q0 = q1;
  /src/sys/crypto/aes/arch/x86/
aes_sse2_dec.c 40 __m128i q0, q1, q2, q3, q4, q5, q6, q7; local in function:aes_sse2_bitslice_invSbox
42 q0 = ~q[0];
51 q[3] = _mm_unpacklo_epi64(q5 ^ q0 ^ q2, q1 ^ q4 ^ q6);
52 q[2] = _mm_unpacklo_epi64(q4 ^ q7 ^ q1, q0 ^ q3 ^ q5);
53 q[1] = _mm_unpacklo_epi64(q3 ^ q6 ^ q0, q7 ^ q2 ^ q4);
58 q0 = ~q[0];
67 q[3] = _mm_unpacklo_epi64(q5 ^ q0 ^ q2, q1 ^ q4 ^ q6);
68 q[2] = _mm_unpacklo_epi64(q4 ^ q7 ^ q1, q0 ^ q3 ^ q5);
69 q[1] = _mm_unpacklo_epi64(q3 ^ q6 ^ q0, q7 ^ q2 ^ q4);
123 __m128i q0, q1, q2, q3, q4, q5, q6, q7 local in function:inv_mix_columns
    [all...]
aes_sse2_enc.c 82 __m128i q0, q1, q2, q3, q4, q5, q6, q7; local in function:mix_columns
86 q0 = q[0];
90 r0 = _mm_srli_epi64(q0, 16) | _mm_slli_epi64(q0, 48);
98 q4 = _mm_shuffle_epi32(q0, 0x0e);
105 s0 = q7 ^ r7 ^ r0 ^ rotr32(q0 ^ r0);
106 s1 = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr32(q1 ^ r1);
aes_sse2.c 239 __m128i q0 = q[0]; local in function:aes_sse2_ortho
247 SWAP8(q0, q4);
251 q[0] = _mm_unpacklo_epi64(q0, q4);
356 __m128i q[4], q0, q1, q2, q3, q4, q5, q6, q7; local in function:aes_sse2_keysched
362 q0 = q[0] & _mm_set1_epi64x(0x1111111111111111);
366 q4 = _mm_shuffle_epi32(q0, 0x0e);
370 _mm_storeu_si64(&comp_skey[j + 0], q0 | q1 | q2 | q3);
  /src/sbin/mount_portal/
conf.c 128 pinsert(path *p0, qelem *q0)
135 for (q = q0->q_forw; q != q0; q = q->q_forw) {
141 ins_que(&p0->p_q, q0->q_back);
238 * Discard all currently held path structures on q0.
242 preplace(qelem *q0, qelem *xq)
249 while (q0->q_forw != q0) {
250 qelem *q = q0->q_forw;
257 ins_que(q, q0);
    [all...]
  /src/sys/crypto/aes/arch/arm/
aes_neon_32.S 178 * uint8x16_t@q0
179 * aes_neon_enc1(const struct aesenc *enc@r0, uint8x16_t x@q0,
205 * q0={d0-d1}: x/ak/A
250 vshr.u8 q3, q0, #4
251 vand q2, q0, q1 /* q2 := x & 0x0f0f... */
260 /* q0 := rk[0] + iptlo(lo) + ipthi(hi) */
261 veor q0, q14, q2
262 veor q0, q0, q3
269 /* q0 := A = rk[i] + sb1_0(io) + sb1_1(jo) *
    [all...]
  /src/sys/crypto/chacha/arch/arm/
chacha_neon_32.S 189 vdup.32 q0, d24[0] /* q0-q3 := constant */
210 1: ROUNDLD q0,q1,q2,q3, q5,q6,q7,q4, q10,q11,q8,q9, q15,q12,q13,q14
212 ROUND q0,q1,q2,q3, q4,q5,q6,q7, q8,q9,q10,q11, q12,q13,q14,q15, \
214 ROUNDLD q0,q1,q2,q3, q4,q5,q6,q7, q8,q9,q10,q11, q12,q13,q14,q15
215 ROUND q0,q1,q2,q3, q5,q6,q7,q4, q10,q11,q8,q9, q15,q12,q13,q14, \
222 * q0 = (x0[0], x1[0]; x2[0], x3[0])
232 * q0 = (x0[0], x0[1]; x1[0], x1[1])
250 * q0 = (x0[0], x0[1]; x0[2], x0[3])
262 vzip.32 q0, q
    [all...]
  /src/sys/external/isc/libsodium/dist/src/libsodium/include/sodium/private/
sse2_64_32.h 18 # define _mm_set_epi64x(Q0, Q1) sodium__mm_set_epi64x((Q0), (Q1))
20 sodium__mm_set_epi64x(int64_t q1, int64_t q0)
23 x0.as64 = q0; x1.as64 = q1;
  /src/sys/arch/arm/vfp/
pmap_vfp.S 58 veor q0, q0, q0
  /src/lib/libm/noieee_src/
n_jn.c 218 double q0,q1,h,tmp; int k,m; local in function:jn
220 q0 = w; z = w+h; q1 = w*z - 1.0; k=1;
223 tmp = z*q1 - q0;
224 q0 = q1;
n_lgamma.c 120 #define q0 1.000000000000000444089209850062e+00 macro
249 q = q0+y*(q1+y*(q2+y*(q3+y*(q4+y*(q5+y*q6)))));
  /src/libexec/ld.elf_so/arch/aarch64/
rtld_start.S 117 /* save q0-q7 for arguments */
118 stp q0, q1, [sp, #-32]!
131 /* restore q0-q7 for arguments */
135 ldp q0, q1, [sp], #32
  /src/sys/arch/aarch64/aarch64/
cpuswitch.S 417 ldp q0, q1, [x0, #FPREG_Q0]
441 stp q0, q1, [x0, #FPREG_Q0]
  /src/sys/dev/raidframe/
rf_pq.c 802 unsigned char *q0 = &(rf_qinv[col][0]); local in function:rf_PQ_recover
810 a = q0[a0 << 5 | a1];
814 a = a | INSERT(q0[a0<<5 | a1],i)
  /src/lib/libcrypt/
crypt.c 811 #define CRUNCH(p0, p1, q0, q1) \
812 k = (q0 ^ q1) & SALT; \
813 B.b32.i0 = k ^ q0 ^ kp->b32.i0; \

Completed in 25 milliseconds

1 2