xmmintrin.h revision 1.1.1.9 1 /* Copyright (C) 2002-2022 Free Software Foundation, Inc.
2
3 This file is part of GCC.
4
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3, or (at your option)
8 any later version.
9
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 Under Section 7 of GPL version 3, you are granted additional
16 permissions described in the GCC Runtime Library Exception, version
17 3.1, as published by the Free Software Foundation.
18
19 You should have received a copy of the GNU General Public License and
20 a copy of the GCC Runtime Library Exception along with this program;
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 <http://www.gnu.org/licenses/>. */
23
24 /* Implemented from the specification included in the Intel C++ Compiler
25 User Guide and Reference, version 9.0. */
26
27 #ifndef _XMMINTRIN_H_INCLUDED
28 #define _XMMINTRIN_H_INCLUDED
29
30 /* We need type definitions from the MMX header file. */
31 #include <mmintrin.h>
32
33 /* Get _mm_malloc () and _mm_free (). */
34 #include <mm_malloc.h>
35
36 /* Constants for use with _mm_prefetch. */
37 enum _mm_hint
38 {
39 /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */
40 _MM_HINT_ET0 = 7,
41 _MM_HINT_ET1 = 6,
42 _MM_HINT_T0 = 3,
43 _MM_HINT_T1 = 2,
44 _MM_HINT_T2 = 1,
45 _MM_HINT_NTA = 0
46 };
47
48 /* Loads one cache line from address P to a location "closer" to the
49 processor. The selector I specifies the type of prefetch operation. */
50 #ifdef __OPTIMIZE__
51 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52 _mm_prefetch (const void *__P, enum _mm_hint __I)
53 {
54 __builtin_prefetch (__P, (__I & 0x4) >> 2, __I & 0x3);
55 }
56 #else
57 #define _mm_prefetch(P, I) \
58 __builtin_prefetch ((P), ((I & 0x4) >> 2), (I & 0x3))
59 #endif
60
61 #ifndef __SSE__
62 #pragma GCC push_options
63 #pragma GCC target("sse")
64 #define __DISABLE_SSE__
65 #endif /* __SSE__ */
66
67 /* The Intel API is flexible enough that we must allow aliasing with other
68 vector types, and their scalar components. */
69 typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
70
71 /* Unaligned version of the same type. */
72 typedef float __m128_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
73
74 /* Internal data types for implementing the intrinsics. */
75 typedef float __v4sf __attribute__ ((__vector_size__ (16)));
76
77 /* Create a selector for use with the SHUFPS instruction. */
78 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
79 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
80
81 /* Bits in the MXCSR. */
82 #define _MM_EXCEPT_MASK 0x003f
83 #define _MM_EXCEPT_INVALID 0x0001
84 #define _MM_EXCEPT_DENORM 0x0002
85 #define _MM_EXCEPT_DIV_ZERO 0x0004
86 #define _MM_EXCEPT_OVERFLOW 0x0008
87 #define _MM_EXCEPT_UNDERFLOW 0x0010
88 #define _MM_EXCEPT_INEXACT 0x0020
89
90 #define _MM_MASK_MASK 0x1f80
91 #define _MM_MASK_INVALID 0x0080
92 #define _MM_MASK_DENORM 0x0100
93 #define _MM_MASK_DIV_ZERO 0x0200
94 #define _MM_MASK_OVERFLOW 0x0400
95 #define _MM_MASK_UNDERFLOW 0x0800
96 #define _MM_MASK_INEXACT 0x1000
97
98 #define _MM_ROUND_MASK 0x6000
99 #define _MM_ROUND_NEAREST 0x0000
100 #define _MM_ROUND_DOWN 0x2000
101 #define _MM_ROUND_UP 0x4000
102 #define _MM_ROUND_TOWARD_ZERO 0x6000
103
104 #define _MM_FLUSH_ZERO_MASK 0x8000
105 #define _MM_FLUSH_ZERO_ON 0x8000
106 #define _MM_FLUSH_ZERO_OFF 0x0000
107
108 /* Create an undefined vector. */
109 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
110 _mm_undefined_ps (void)
111 {
112 #pragma GCC diagnostic push
113 #pragma GCC diagnostic ignored "-Winit-self"
114 __m128 __Y = __Y;
115 #pragma GCC diagnostic pop
116 return __Y;
117 }
118
119 /* Create a vector of zeros. */
120 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
121 _mm_setzero_ps (void)
122 {
123 return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
124 }
125
126 /* Perform the respective operation on the lower SPFP (single-precision
127 floating-point) values of A and B; the upper three SPFP values are
128 passed through from A. */
129
130 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
131 _mm_add_ss (__m128 __A, __m128 __B)
132 {
133 return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
134 }
135
136 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
137 _mm_sub_ss (__m128 __A, __m128 __B)
138 {
139 return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
140 }
141
142 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
143 _mm_mul_ss (__m128 __A, __m128 __B)
144 {
145 return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
146 }
147
148 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
149 _mm_div_ss (__m128 __A, __m128 __B)
150 {
151 return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
152 }
153
154 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
155 _mm_sqrt_ss (__m128 __A)
156 {
157 return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
158 }
159
160 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
161 _mm_rcp_ss (__m128 __A)
162 {
163 return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
164 }
165
166 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
167 _mm_rsqrt_ss (__m128 __A)
168 {
169 return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
170 }
171
172 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
173 _mm_min_ss (__m128 __A, __m128 __B)
174 {
175 return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
176 }
177
178 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
179 _mm_max_ss (__m128 __A, __m128 __B)
180 {
181 return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
182 }
183
184 /* Perform the respective operation on the four SPFP values in A and B. */
185
186 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
187 _mm_add_ps (__m128 __A, __m128 __B)
188 {
189 return (__m128) ((__v4sf)__A + (__v4sf)__B);
190 }
191
192 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
193 _mm_sub_ps (__m128 __A, __m128 __B)
194 {
195 return (__m128) ((__v4sf)__A - (__v4sf)__B);
196 }
197
198 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
199 _mm_mul_ps (__m128 __A, __m128 __B)
200 {
201 return (__m128) ((__v4sf)__A * (__v4sf)__B);
202 }
203
204 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
205 _mm_div_ps (__m128 __A, __m128 __B)
206 {
207 return (__m128) ((__v4sf)__A / (__v4sf)__B);
208 }
209
210 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
211 _mm_sqrt_ps (__m128 __A)
212 {
213 return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
214 }
215
216 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
217 _mm_rcp_ps (__m128 __A)
218 {
219 return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
220 }
221
222 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
223 _mm_rsqrt_ps (__m128 __A)
224 {
225 return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
226 }
227
228 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
229 _mm_min_ps (__m128 __A, __m128 __B)
230 {
231 return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
232 }
233
234 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
235 _mm_max_ps (__m128 __A, __m128 __B)
236 {
237 return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
238 }
239
240 /* Perform logical bit-wise operations on 128-bit values. */
241
242 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
243 _mm_and_ps (__m128 __A, __m128 __B)
244 {
245 return __builtin_ia32_andps (__A, __B);
246 }
247
248 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
249 _mm_andnot_ps (__m128 __A, __m128 __B)
250 {
251 return __builtin_ia32_andnps (__A, __B);
252 }
253
254 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
255 _mm_or_ps (__m128 __A, __m128 __B)
256 {
257 return __builtin_ia32_orps (__A, __B);
258 }
259
260 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
261 _mm_xor_ps (__m128 __A, __m128 __B)
262 {
263 return __builtin_ia32_xorps (__A, __B);
264 }
265
266 /* Perform a comparison on the lower SPFP values of A and B. If the
267 comparison is true, place a mask of all ones in the result, otherwise a
268 mask of zeros. The upper three SPFP values are passed through from A. */
269
270 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
271 _mm_cmpeq_ss (__m128 __A, __m128 __B)
272 {
273 return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
274 }
275
276 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
277 _mm_cmplt_ss (__m128 __A, __m128 __B)
278 {
279 return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
280 }
281
282 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
283 _mm_cmple_ss (__m128 __A, __m128 __B)
284 {
285 return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
286 }
287
288 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
289 _mm_cmpgt_ss (__m128 __A, __m128 __B)
290 {
291 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
292 (__v4sf)
293 __builtin_ia32_cmpltss ((__v4sf) __B,
294 (__v4sf)
295 __A));
296 }
297
298 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
299 _mm_cmpge_ss (__m128 __A, __m128 __B)
300 {
301 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
302 (__v4sf)
303 __builtin_ia32_cmpless ((__v4sf) __B,
304 (__v4sf)
305 __A));
306 }
307
308 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
309 _mm_cmpneq_ss (__m128 __A, __m128 __B)
310 {
311 return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
312 }
313
314 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
315 _mm_cmpnlt_ss (__m128 __A, __m128 __B)
316 {
317 return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
318 }
319
320 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
321 _mm_cmpnle_ss (__m128 __A, __m128 __B)
322 {
323 return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
324 }
325
326 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
327 _mm_cmpngt_ss (__m128 __A, __m128 __B)
328 {
329 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
330 (__v4sf)
331 __builtin_ia32_cmpnltss ((__v4sf) __B,
332 (__v4sf)
333 __A));
334 }
335
336 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
337 _mm_cmpnge_ss (__m128 __A, __m128 __B)
338 {
339 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
340 (__v4sf)
341 __builtin_ia32_cmpnless ((__v4sf) __B,
342 (__v4sf)
343 __A));
344 }
345
346 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
347 _mm_cmpord_ss (__m128 __A, __m128 __B)
348 {
349 return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
350 }
351
352 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
353 _mm_cmpunord_ss (__m128 __A, __m128 __B)
354 {
355 return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
356 }
357
358 /* Perform a comparison on the four SPFP values of A and B. For each
359 element, if the comparison is true, place a mask of all ones in the
360 result, otherwise a mask of zeros. */
361
362 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
363 _mm_cmpeq_ps (__m128 __A, __m128 __B)
364 {
365 return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
366 }
367
368 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
369 _mm_cmplt_ps (__m128 __A, __m128 __B)
370 {
371 return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
372 }
373
374 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
375 _mm_cmple_ps (__m128 __A, __m128 __B)
376 {
377 return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
378 }
379
380 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
381 _mm_cmpgt_ps (__m128 __A, __m128 __B)
382 {
383 return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
384 }
385
386 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
387 _mm_cmpge_ps (__m128 __A, __m128 __B)
388 {
389 return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
390 }
391
392 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
393 _mm_cmpneq_ps (__m128 __A, __m128 __B)
394 {
395 return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
396 }
397
398 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
399 _mm_cmpnlt_ps (__m128 __A, __m128 __B)
400 {
401 return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
402 }
403
404 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
405 _mm_cmpnle_ps (__m128 __A, __m128 __B)
406 {
407 return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
408 }
409
410 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
411 _mm_cmpngt_ps (__m128 __A, __m128 __B)
412 {
413 return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
414 }
415
416 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
417 _mm_cmpnge_ps (__m128 __A, __m128 __B)
418 {
419 return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
420 }
421
422 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
423 _mm_cmpord_ps (__m128 __A, __m128 __B)
424 {
425 return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
426 }
427
428 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
429 _mm_cmpunord_ps (__m128 __A, __m128 __B)
430 {
431 return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
432 }
433
434 /* Compare the lower SPFP values of A and B and return 1 if true
435 and 0 if false. */
436
437 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
438 _mm_comieq_ss (__m128 __A, __m128 __B)
439 {
440 return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
441 }
442
443 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
444 _mm_comilt_ss (__m128 __A, __m128 __B)
445 {
446 return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
447 }
448
449 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
450 _mm_comile_ss (__m128 __A, __m128 __B)
451 {
452 return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
453 }
454
455 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
456 _mm_comigt_ss (__m128 __A, __m128 __B)
457 {
458 return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
459 }
460
461 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
462 _mm_comige_ss (__m128 __A, __m128 __B)
463 {
464 return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
465 }
466
467 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
468 _mm_comineq_ss (__m128 __A, __m128 __B)
469 {
470 return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
471 }
472
473 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
474 _mm_ucomieq_ss (__m128 __A, __m128 __B)
475 {
476 return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
477 }
478
479 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
480 _mm_ucomilt_ss (__m128 __A, __m128 __B)
481 {
482 return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
483 }
484
485 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
486 _mm_ucomile_ss (__m128 __A, __m128 __B)
487 {
488 return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
489 }
490
491 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
492 _mm_ucomigt_ss (__m128 __A, __m128 __B)
493 {
494 return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
495 }
496
497 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
498 _mm_ucomige_ss (__m128 __A, __m128 __B)
499 {
500 return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
501 }
502
503 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
504 _mm_ucomineq_ss (__m128 __A, __m128 __B)
505 {
506 return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
507 }
508
509 /* Convert the lower SPFP value to a 32-bit integer according to the current
510 rounding mode. */
511 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
512 _mm_cvtss_si32 (__m128 __A)
513 {
514 return __builtin_ia32_cvtss2si ((__v4sf) __A);
515 }
516
517 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
518 _mm_cvt_ss2si (__m128 __A)
519 {
520 return _mm_cvtss_si32 (__A);
521 }
522
523 #ifdef __x86_64__
524 /* Convert the lower SPFP value to a 32-bit integer according to the
525 current rounding mode. */
526
527 /* Intel intrinsic. */
528 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
529 _mm_cvtss_si64 (__m128 __A)
530 {
531 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
532 }
533
534 /* Microsoft intrinsic. */
535 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
536 _mm_cvtss_si64x (__m128 __A)
537 {
538 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
539 }
540 #endif
541
542 /* Convert the two lower SPFP values to 32-bit integers according to the
543 current rounding mode. Return the integers in packed form. */
544 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
545 _mm_cvtps_pi32 (__m128 __A)
546 {
547 return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
548 }
549
550 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
551 _mm_cvt_ps2pi (__m128 __A)
552 {
553 return _mm_cvtps_pi32 (__A);
554 }
555
556 /* Truncate the lower SPFP value to a 32-bit integer. */
557 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
558 _mm_cvttss_si32 (__m128 __A)
559 {
560 return __builtin_ia32_cvttss2si ((__v4sf) __A);
561 }
562
563 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
564 _mm_cvtt_ss2si (__m128 __A)
565 {
566 return _mm_cvttss_si32 (__A);
567 }
568
569 #ifdef __x86_64__
570 /* Truncate the lower SPFP value to a 32-bit integer. */
571
572 /* Intel intrinsic. */
573 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
574 _mm_cvttss_si64 (__m128 __A)
575 {
576 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
577 }
578
579 /* Microsoft intrinsic. */
580 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
581 _mm_cvttss_si64x (__m128 __A)
582 {
583 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
584 }
585 #endif
586
587 /* Truncate the two lower SPFP values to 32-bit integers. Return the
588 integers in packed form. */
589 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
590 _mm_cvttps_pi32 (__m128 __A)
591 {
592 return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
593 }
594
595 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
596 _mm_cvtt_ps2pi (__m128 __A)
597 {
598 return _mm_cvttps_pi32 (__A);
599 }
600
601 /* Convert B to a SPFP value and insert it as element zero in A. */
602 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
603 _mm_cvtsi32_ss (__m128 __A, int __B)
604 {
605 return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
606 }
607
608 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
609 _mm_cvt_si2ss (__m128 __A, int __B)
610 {
611 return _mm_cvtsi32_ss (__A, __B);
612 }
613
614 #ifdef __x86_64__
615 /* Convert B to a SPFP value and insert it as element zero in A. */
616
617 /* Intel intrinsic. */
618 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
619 _mm_cvtsi64_ss (__m128 __A, long long __B)
620 {
621 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
622 }
623
624 /* Microsoft intrinsic. */
625 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
626 _mm_cvtsi64x_ss (__m128 __A, long long __B)
627 {
628 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
629 }
630 #endif
631
632 /* Convert the two 32-bit values in B to SPFP form and insert them
633 as the two lower elements in A. */
634 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
635 _mm_cvtpi32_ps (__m128 __A, __m64 __B)
636 {
637 return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
638 }
639
640 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
641 _mm_cvt_pi2ps (__m128 __A, __m64 __B)
642 {
643 return _mm_cvtpi32_ps (__A, __B);
644 }
645
646 /* Convert the four signed 16-bit values in A to SPFP form. */
647 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
648 _mm_cvtpi16_ps (__m64 __A)
649 {
650 __v4hi __sign;
651 __v2si __hisi, __losi;
652 __v4sf __zero, __ra, __rb;
653
654 /* This comparison against zero gives us a mask that can be used to
655 fill in the missing sign bits in the unpack operations below, so
656 that we get signed values after unpacking. */
657 __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
658
659 /* Convert the four words to doublewords. */
660 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
661 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
662
663 /* Convert the doublewords to floating point two at a time. */
664 __zero = (__v4sf) _mm_setzero_ps ();
665 __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
666 __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
667
668 return (__m128) __builtin_ia32_movlhps (__ra, __rb);
669 }
670
671 /* Convert the four unsigned 16-bit values in A to SPFP form. */
672 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
673 _mm_cvtpu16_ps (__m64 __A)
674 {
675 __v2si __hisi, __losi;
676 __v4sf __zero, __ra, __rb;
677
678 /* Convert the four words to doublewords. */
679 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
680 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
681
682 /* Convert the doublewords to floating point two at a time. */
683 __zero = (__v4sf) _mm_setzero_ps ();
684 __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
685 __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
686
687 return (__m128) __builtin_ia32_movlhps (__ra, __rb);
688 }
689
690 /* Convert the low four signed 8-bit values in A to SPFP form. */
691 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
692 _mm_cvtpi8_ps (__m64 __A)
693 {
694 __v8qi __sign;
695
696 /* This comparison against zero gives us a mask that can be used to
697 fill in the missing sign bits in the unpack operations below, so
698 that we get signed values after unpacking. */
699 __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
700
701 /* Convert the four low bytes to words. */
702 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
703
704 return _mm_cvtpi16_ps(__A);
705 }
706
707 /* Convert the low four unsigned 8-bit values in A to SPFP form. */
708 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
709 _mm_cvtpu8_ps(__m64 __A)
710 {
711 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
712 return _mm_cvtpu16_ps(__A);
713 }
714
715 /* Convert the four signed 32-bit values in A and B to SPFP form. */
716 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
717 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
718 {
719 __v4sf __zero = (__v4sf) _mm_setzero_ps ();
720 __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
721 __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B);
722 return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
723 }
724
725 /* Convert the four SPFP values in A to four signed 16-bit integers. */
726 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
727 _mm_cvtps_pi16(__m128 __A)
728 {
729 __v4sf __hisf = (__v4sf)__A;
730 __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
731 __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
732 __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
733 return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
734 }
735
736 /* Convert the four SPFP values in A to four signed 8-bit integers. */
737 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
738 _mm_cvtps_pi8(__m128 __A)
739 {
740 __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
741 return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
742 }
743
744 /* Selects four specific SPFP values from A and B based on MASK. */
745 #ifdef __OPTIMIZE__
746 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
747 _mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
748 {
749 return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
750 }
751 #else
752 #define _mm_shuffle_ps(A, B, MASK) \
753 ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A), \
754 (__v4sf)(__m128)(B), (int)(MASK)))
755 #endif
756
757 /* Selects and interleaves the upper two SPFP values from A and B. */
758 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
759 _mm_unpackhi_ps (__m128 __A, __m128 __B)
760 {
761 return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
762 }
763
764 /* Selects and interleaves the lower two SPFP values from A and B. */
765 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
766 _mm_unpacklo_ps (__m128 __A, __m128 __B)
767 {
768 return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
769 }
770
771 /* Sets the upper two SPFP values with 64-bits of data loaded from P;
772 the lower two values are passed through from A. */
773 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
774 _mm_loadh_pi (__m128 __A, __m64 const *__P)
775 {
776 return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P);
777 }
778
779 /* Stores the upper two SPFP values of A into P. */
780 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
781 _mm_storeh_pi (__m64 *__P, __m128 __A)
782 {
783 __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A);
784 }
785
786 /* Moves the upper two values of B into the lower two values of A. */
787 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
788 _mm_movehl_ps (__m128 __A, __m128 __B)
789 {
790 return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
791 }
792
793 /* Moves the lower two values of B into the upper two values of A. */
794 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
795 _mm_movelh_ps (__m128 __A, __m128 __B)
796 {
797 return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
798 }
799
800 /* Sets the lower two SPFP values with 64-bits of data loaded from P;
801 the upper two values are passed through from A. */
802 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
803 _mm_loadl_pi (__m128 __A, __m64 const *__P)
804 {
805 return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P);
806 }
807
808 /* Stores the lower two SPFP values of A into P. */
809 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
810 _mm_storel_pi (__m64 *__P, __m128 __A)
811 {
812 __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A);
813 }
814
815 /* Creates a 4-bit mask from the most significant bits of the SPFP values. */
816 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
817 _mm_movemask_ps (__m128 __A)
818 {
819 return __builtin_ia32_movmskps ((__v4sf)__A);
820 }
821
822 /* Return the contents of the control register. */
823 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
824 _mm_getcsr (void)
825 {
826 return __builtin_ia32_stmxcsr ();
827 }
828
829 /* Read exception bits from the control register. */
830 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
831 _MM_GET_EXCEPTION_STATE (void)
832 {
833 return _mm_getcsr() & _MM_EXCEPT_MASK;
834 }
835
836 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
837 _MM_GET_EXCEPTION_MASK (void)
838 {
839 return _mm_getcsr() & _MM_MASK_MASK;
840 }
841
842 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
843 _MM_GET_ROUNDING_MODE (void)
844 {
845 return _mm_getcsr() & _MM_ROUND_MASK;
846 }
847
848 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
849 _MM_GET_FLUSH_ZERO_MODE (void)
850 {
851 return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
852 }
853
854 /* Set the control register to I. */
855 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
856 _mm_setcsr (unsigned int __I)
857 {
858 __builtin_ia32_ldmxcsr (__I);
859 }
860
861 /* Set exception bits in the control register. */
862 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
863 _MM_SET_EXCEPTION_STATE(unsigned int __mask)
864 {
865 _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
866 }
867
868 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
869 _MM_SET_EXCEPTION_MASK (unsigned int __mask)
870 {
871 _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
872 }
873
874 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
875 _MM_SET_ROUNDING_MODE (unsigned int __mode)
876 {
877 _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
878 }
879
880 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
881 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
882 {
883 _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
884 }
885
886 /* Create a vector with element 0 as F and the rest zero. */
887 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
888 _mm_set_ss (float __F)
889 {
890 return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
891 }
892
893 /* Create a vector with all four elements equal to F. */
894 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
895 _mm_set1_ps (float __F)
896 {
897 return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
898 }
899
900 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
901 _mm_set_ps1 (float __F)
902 {
903 return _mm_set1_ps (__F);
904 }
905
906 /* Create a vector with element 0 as *P and the rest zero. */
907 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
908 _mm_load_ss (float const *__P)
909 {
910 return _mm_set_ss (*__P);
911 }
912
913 /* Create a vector with all four elements equal to *P. */
914 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
915 _mm_load1_ps (float const *__P)
916 {
917 return _mm_set1_ps (*__P);
918 }
919
920 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
921 _mm_load_ps1 (float const *__P)
922 {
923 return _mm_load1_ps (__P);
924 }
925
926 /* Load four SPFP values from P. The address must be 16-byte aligned. */
927 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
928 _mm_load_ps (float const *__P)
929 {
930 return *(__m128 const *)__P;
931 }
932
933 /* Load four SPFP values from P. The address need not be 16-byte aligned. */
934 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
935 _mm_loadu_ps (float const *__P)
936 {
937 return *(__m128_u const *)__P;
938 }
939
940 /* Load four SPFP values in reverse order. The address must be aligned. */
941 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
942 _mm_loadr_ps (float const *__P)
943 {
944 __v4sf __tmp = *(__v4sf const *)__P;
945 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
946 }
947
948 /* Create the vector [Z Y X W]. */
949 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
950 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
951 {
952 return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
953 }
954
955 /* Create the vector [W X Y Z]. */
956 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
957 _mm_setr_ps (float __Z, float __Y, float __X, float __W)
958 {
959 return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
960 }
961
962 /* Stores the lower SPFP value. */
963 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
964 _mm_store_ss (float *__P, __m128 __A)
965 {
966 *__P = ((__v4sf)__A)[0];
967 }
968
969 extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
970 _mm_cvtss_f32 (__m128 __A)
971 {
972 return ((__v4sf)__A)[0];
973 }
974
975 /* Store four SPFP values. The address must be 16-byte aligned. */
976 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
977 _mm_store_ps (float *__P, __m128 __A)
978 {
979 *(__m128 *)__P = __A;
980 }
981
982 /* Store four SPFP values. The address need not be 16-byte aligned. */
983 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
984 _mm_storeu_ps (float *__P, __m128 __A)
985 {
986 *(__m128_u *)__P = __A;
987 }
988
989 /* Store the lower SPFP value across four words. */
990 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
991 _mm_store1_ps (float *__P, __m128 __A)
992 {
993 __v4sf __va = (__v4sf)__A;
994 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
995 _mm_storeu_ps (__P, __tmp);
996 }
997
998 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
999 _mm_store_ps1 (float *__P, __m128 __A)
1000 {
1001 _mm_store1_ps (__P, __A);
1002 }
1003
1004 /* Store four SPFP values in reverse order. The address must be aligned. */
1005 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1006 _mm_storer_ps (float *__P, __m128 __A)
1007 {
1008 __v4sf __va = (__v4sf)__A;
1009 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
1010 _mm_store_ps (__P, __tmp);
1011 }
1012
1013 /* Sets the low SPFP value of A from the low value of B. */
1014 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1015 _mm_move_ss (__m128 __A, __m128 __B)
1016 {
1017 return (__m128) __builtin_shuffle ((__v4sf)__A, (__v4sf)__B,
1018 __extension__
1019 (__attribute__((__vector_size__ (16))) int)
1020 {4,1,2,3});
1021 }
1022
1023 /* Extracts one of the four words of A. The selector N must be immediate. */
1024 #ifdef __OPTIMIZE__
1025 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1026 _mm_extract_pi16 (__m64 const __A, int const __N)
1027 {
1028 return (unsigned short) __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
1029 }
1030
1031 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1032 _m_pextrw (__m64 const __A, int const __N)
1033 {
1034 return _mm_extract_pi16 (__A, __N);
1035 }
1036 #else
1037 #define _mm_extract_pi16(A, N) \
1038 ((int) (unsigned short) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N)))
1039
1040 #define _m_pextrw(A, N) _mm_extract_pi16(A, N)
1041 #endif
1042
1043 /* Inserts word D into one of four words of A. The selector N must be
1044 immediate. */
1045 #ifdef __OPTIMIZE__
1046 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1047 _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
1048 {
1049 return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
1050 }
1051
1052 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1053 _m_pinsrw (__m64 const __A, int const __D, int const __N)
1054 {
1055 return _mm_insert_pi16 (__A, __D, __N);
1056 }
1057 #else
1058 #define _mm_insert_pi16(A, D, N) \
1059 ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A), \
1060 (int)(D), (int)(N)))
1061
1062 #define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N)
1063 #endif
1064
1065 /* Compute the element-wise maximum of signed 16-bit values. */
1066 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1067 _mm_max_pi16 (__m64 __A, __m64 __B)
1068 {
1069 return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1070 }
1071
1072 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1073 _m_pmaxsw (__m64 __A, __m64 __B)
1074 {
1075 return _mm_max_pi16 (__A, __B);
1076 }
1077
1078 /* Compute the element-wise maximum of unsigned 8-bit values. */
1079 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1080 _mm_max_pu8 (__m64 __A, __m64 __B)
1081 {
1082 return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1083 }
1084
1085 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1086 _m_pmaxub (__m64 __A, __m64 __B)
1087 {
1088 return _mm_max_pu8 (__A, __B);
1089 }
1090
1091 /* Compute the element-wise minimum of signed 16-bit values. */
1092 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1093 _mm_min_pi16 (__m64 __A, __m64 __B)
1094 {
1095 return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1096 }
1097
1098 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1099 _m_pminsw (__m64 __A, __m64 __B)
1100 {
1101 return _mm_min_pi16 (__A, __B);
1102 }
1103
1104 /* Compute the element-wise minimum of unsigned 8-bit values. */
1105 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1106 _mm_min_pu8 (__m64 __A, __m64 __B)
1107 {
1108 return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1109 }
1110
1111 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1112 _m_pminub (__m64 __A, __m64 __B)
1113 {
1114 return _mm_min_pu8 (__A, __B);
1115 }
1116
1117 /* Create an 8-bit mask of the signs of 8-bit values. */
1118 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1119 _mm_movemask_pi8 (__m64 __A)
1120 {
1121 return __builtin_ia32_pmovmskb ((__v8qi)__A);
1122 }
1123
1124 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1125 _m_pmovmskb (__m64 __A)
1126 {
1127 return _mm_movemask_pi8 (__A);
1128 }
1129
1130 /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1131 in B and produce the high 16 bits of the 32-bit results. */
1132 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1133 _mm_mulhi_pu16 (__m64 __A, __m64 __B)
1134 {
1135 return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1136 }
1137
1138 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1139 _m_pmulhuw (__m64 __A, __m64 __B)
1140 {
1141 return _mm_mulhi_pu16 (__A, __B);
1142 }
1143
1144 /* Return a combination of the four 16-bit values in A. The selector
1145 must be an immediate. */
1146 #ifdef __OPTIMIZE__
1147 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1148 _mm_shuffle_pi16 (__m64 __A, int const __N)
1149 {
1150 return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1151 }
1152
1153 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1154 _m_pshufw (__m64 __A, int const __N)
1155 {
1156 return _mm_shuffle_pi16 (__A, __N);
1157 }
1158 #else
1159 #define _mm_shuffle_pi16(A, N) \
1160 ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N)))
1161
1162 #define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N)
1163 #endif
1164
1165 /* Conditionally store byte elements of A into P. The high bit of each
1166 byte in the selector N determines whether the corresponding byte from
1167 A is stored. */
1168 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1169 _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1170 {
1171 #ifdef __MMX_WITH_SSE__
1172 /* Emulate MMX maskmovq with SSE2 maskmovdqu and handle unmapped bits
1173 64:127 at address __P. */
1174 typedef long long __v2di __attribute__ ((__vector_size__ (16)));
1175 typedef char __v16qi __attribute__ ((__vector_size__ (16)));
1176 /* Zero-extend __A and __N to 128 bits. */
1177 __v2di __A128 = __extension__ (__v2di) { ((__v1di) __A)[0], 0 };
1178 __v2di __N128 = __extension__ (__v2di) { ((__v1di) __N)[0], 0 };
1179
1180 /* Check the alignment of __P. */
1181 __SIZE_TYPE__ offset = ((__SIZE_TYPE__) __P) & 0xf;
1182 if (offset)
1183 {
1184 /* If the misalignment of __P > 8, subtract __P by 8 bytes.
1185 Otherwise, subtract __P by the misalignment. */
1186 if (offset > 8)
1187 offset = 8;
1188 __P = (char *) (((__SIZE_TYPE__) __P) - offset);
1189
1190 /* Shift __A128 and __N128 to the left by the adjustment. */
1191 switch (offset)
1192 {
1193 case 1:
1194 __A128 = __builtin_ia32_pslldqi128 (__A128, 8);
1195 __N128 = __builtin_ia32_pslldqi128 (__N128, 8);
1196 break;
1197 case 2:
1198 __A128 = __builtin_ia32_pslldqi128 (__A128, 2 * 8);
1199 __N128 = __builtin_ia32_pslldqi128 (__N128, 2 * 8);
1200 break;
1201 case 3:
1202 __A128 = __builtin_ia32_pslldqi128 (__A128, 3 * 8);
1203 __N128 = __builtin_ia32_pslldqi128 (__N128, 3 * 8);
1204 break;
1205 case 4:
1206 __A128 = __builtin_ia32_pslldqi128 (__A128, 4 * 8);
1207 __N128 = __builtin_ia32_pslldqi128 (__N128, 4 * 8);
1208 break;
1209 case 5:
1210 __A128 = __builtin_ia32_pslldqi128 (__A128, 5 * 8);
1211 __N128 = __builtin_ia32_pslldqi128 (__N128, 5 * 8);
1212 break;
1213 case 6:
1214 __A128 = __builtin_ia32_pslldqi128 (__A128, 6 * 8);
1215 __N128 = __builtin_ia32_pslldqi128 (__N128, 6 * 8);
1216 break;
1217 case 7:
1218 __A128 = __builtin_ia32_pslldqi128 (__A128, 7 * 8);
1219 __N128 = __builtin_ia32_pslldqi128 (__N128, 7 * 8);
1220 break;
1221 case 8:
1222 __A128 = __builtin_ia32_pslldqi128 (__A128, 8 * 8);
1223 __N128 = __builtin_ia32_pslldqi128 (__N128, 8 * 8);
1224 break;
1225 default:
1226 break;
1227 }
1228 }
1229 __builtin_ia32_maskmovdqu ((__v16qi)__A128, (__v16qi)__N128, __P);
1230 #else
1231 __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1232 #endif
1233 }
1234
1235 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1236 _m_maskmovq (__m64 __A, __m64 __N, char *__P)
1237 {
1238 _mm_maskmove_si64 (__A, __N, __P);
1239 }
1240
1241 /* Compute the rounded averages of the unsigned 8-bit values in A and B. */
1242 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1243 _mm_avg_pu8 (__m64 __A, __m64 __B)
1244 {
1245 return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1246 }
1247
1248 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1249 _m_pavgb (__m64 __A, __m64 __B)
1250 {
1251 return _mm_avg_pu8 (__A, __B);
1252 }
1253
1254 /* Compute the rounded averages of the unsigned 16-bit values in A and B. */
1255 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1256 _mm_avg_pu16 (__m64 __A, __m64 __B)
1257 {
1258 return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1259 }
1260
1261 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1262 _m_pavgw (__m64 __A, __m64 __B)
1263 {
1264 return _mm_avg_pu16 (__A, __B);
1265 }
1266
1267 /* Compute the sum of the absolute differences of the unsigned 8-bit
1268 values in A and B. Return the value in the lower 16-bit word; the
1269 upper words are cleared. */
1270 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1271 _mm_sad_pu8 (__m64 __A, __m64 __B)
1272 {
1273 return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1274 }
1275
1276 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1277 _m_psadbw (__m64 __A, __m64 __B)
1278 {
1279 return _mm_sad_pu8 (__A, __B);
1280 }
1281
1282 /* Stores the data in A to the address P without polluting the caches. */
1283 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1284 _mm_stream_pi (__m64 *__P, __m64 __A)
1285 {
1286 __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1287 }
1288
1289 /* Likewise. The address must be 16-byte aligned. */
1290 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1291 _mm_stream_ps (float *__P, __m128 __A)
1292 {
1293 __builtin_ia32_movntps (__P, (__v4sf)__A);
1294 }
1295
1296 /* Guarantees that every preceding store is globally visible before
1297 any subsequent store. */
1298 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1299 _mm_sfence (void)
1300 {
1301 __builtin_ia32_sfence ();
1302 }
1303
1304 /* Transpose the 4x4 matrix composed of row[0-3]. */
1305 #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
1306 do { \
1307 __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
1308 __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \
1309 __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \
1310 __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \
1311 __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \
1312 (row0) = __builtin_ia32_movlhps (__t0, __t1); \
1313 (row1) = __builtin_ia32_movhlps (__t1, __t0); \
1314 (row2) = __builtin_ia32_movlhps (__t2, __t3); \
1315 (row3) = __builtin_ia32_movhlps (__t3, __t2); \
1316 } while (0)
1317
1318 /* For backward source compatibility. */
1319 # include <emmintrin.h>
1320
1321 #ifdef __DISABLE_SSE__
1322 #undef __DISABLE_SSE__
1323 #pragma GCC pop_options
1324 #endif /* __DISABLE_SSE__ */
1325
1326 /* The execution of the next instruction is delayed by an implementation
1327 specific amount of time. The instruction does not modify the
1328 architectural state. This is after the pop_options pragma because
1329 it does not require SSE support in the processor--the encoding is a
1330 nop on processors that do not support it. */
1331 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1332 _mm_pause (void)
1333 {
1334 __builtin_ia32_pause ();
1335 }
1336
1337 #endif /* _XMMINTRIN_H_INCLUDED */
1338