xmmintrin.h revision 1.1 1 1.1 mrg /* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
2 1.1 mrg Free Software Foundation, Inc.
3 1.1 mrg
4 1.1 mrg This file is part of GCC.
5 1.1 mrg
6 1.1 mrg GCC is free software; you can redistribute it and/or modify
7 1.1 mrg it under the terms of the GNU General Public License as published by
8 1.1 mrg the Free Software Foundation; either version 3, or (at your option)
9 1.1 mrg any later version.
10 1.1 mrg
11 1.1 mrg GCC is distributed in the hope that it will be useful,
12 1.1 mrg but WITHOUT ANY WARRANTY; without even the implied warranty of
13 1.1 mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 1.1 mrg GNU General Public License for more details.
15 1.1 mrg
16 1.1 mrg Under Section 7 of GPL version 3, you are granted additional
17 1.1 mrg permissions described in the GCC Runtime Library Exception, version
18 1.1 mrg 3.1, as published by the Free Software Foundation.
19 1.1 mrg
20 1.1 mrg You should have received a copy of the GNU General Public License and
21 1.1 mrg a copy of the GCC Runtime Library Exception along with this program;
22 1.1 mrg see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 1.1 mrg <http://www.gnu.org/licenses/>. */
24 1.1 mrg
25 1.1 mrg /* Implemented from the specification included in the Intel C++ Compiler
26 1.1 mrg User Guide and Reference, version 9.0. */
27 1.1 mrg
28 1.1 mrg #ifndef _XMMINTRIN_H_INCLUDED
29 1.1 mrg #define _XMMINTRIN_H_INCLUDED
30 1.1 mrg
31 1.1 mrg #ifndef __SSE__
32 1.1 mrg # error "SSE instruction set not enabled"
33 1.1 mrg #else
34 1.1 mrg
35 1.1 mrg /* We need type definitions from the MMX header file. */
36 1.1 mrg #include <mmintrin.h>
37 1.1 mrg
38 1.1 mrg /* Get _mm_malloc () and _mm_free (). */
39 1.1 mrg #include <mm_malloc.h>
40 1.1 mrg
41 1.1 mrg /* The Intel API is flexible enough that we must allow aliasing with other
42 1.1 mrg vector types, and their scalar components. */
43 1.1 mrg typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
44 1.1 mrg
45 1.1 mrg /* Internal data types for implementing the intrinsics. */
46 1.1 mrg typedef float __v4sf __attribute__ ((__vector_size__ (16)));
47 1.1 mrg
48 1.1 mrg /* Create a selector for use with the SHUFPS instruction. */
49 1.1 mrg #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
50 1.1 mrg (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
51 1.1 mrg
52 1.1 mrg /* Constants for use with _mm_prefetch. */
53 1.1 mrg enum _mm_hint
54 1.1 mrg {
55 1.1 mrg _MM_HINT_T0 = 3,
56 1.1 mrg _MM_HINT_T1 = 2,
57 1.1 mrg _MM_HINT_T2 = 1,
58 1.1 mrg _MM_HINT_NTA = 0
59 1.1 mrg };
60 1.1 mrg
61 1.1 mrg /* Bits in the MXCSR. */
62 1.1 mrg #define _MM_EXCEPT_MASK 0x003f
63 1.1 mrg #define _MM_EXCEPT_INVALID 0x0001
64 1.1 mrg #define _MM_EXCEPT_DENORM 0x0002
65 1.1 mrg #define _MM_EXCEPT_DIV_ZERO 0x0004
66 1.1 mrg #define _MM_EXCEPT_OVERFLOW 0x0008
67 1.1 mrg #define _MM_EXCEPT_UNDERFLOW 0x0010
68 1.1 mrg #define _MM_EXCEPT_INEXACT 0x0020
69 1.1 mrg
70 1.1 mrg #define _MM_MASK_MASK 0x1f80
71 1.1 mrg #define _MM_MASK_INVALID 0x0080
72 1.1 mrg #define _MM_MASK_DENORM 0x0100
73 1.1 mrg #define _MM_MASK_DIV_ZERO 0x0200
74 1.1 mrg #define _MM_MASK_OVERFLOW 0x0400
75 1.1 mrg #define _MM_MASK_UNDERFLOW 0x0800
76 1.1 mrg #define _MM_MASK_INEXACT 0x1000
77 1.1 mrg
78 1.1 mrg #define _MM_ROUND_MASK 0x6000
79 1.1 mrg #define _MM_ROUND_NEAREST 0x0000
80 1.1 mrg #define _MM_ROUND_DOWN 0x2000
81 1.1 mrg #define _MM_ROUND_UP 0x4000
82 1.1 mrg #define _MM_ROUND_TOWARD_ZERO 0x6000
83 1.1 mrg
84 1.1 mrg #define _MM_FLUSH_ZERO_MASK 0x8000
85 1.1 mrg #define _MM_FLUSH_ZERO_ON 0x8000
86 1.1 mrg #define _MM_FLUSH_ZERO_OFF 0x0000
87 1.1 mrg
88 1.1 mrg /* Create a vector of zeros. */
89 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
90 1.1 mrg _mm_setzero_ps (void)
91 1.1 mrg {
92 1.1 mrg return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
93 1.1 mrg }
94 1.1 mrg
95 1.1 mrg /* Perform the respective operation on the lower SPFP (single-precision
96 1.1 mrg floating-point) values of A and B; the upper three SPFP values are
97 1.1 mrg passed through from A. */
98 1.1 mrg
99 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
100 1.1 mrg _mm_add_ss (__m128 __A, __m128 __B)
101 1.1 mrg {
102 1.1 mrg return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
103 1.1 mrg }
104 1.1 mrg
105 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
106 1.1 mrg _mm_sub_ss (__m128 __A, __m128 __B)
107 1.1 mrg {
108 1.1 mrg return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
109 1.1 mrg }
110 1.1 mrg
111 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
112 1.1 mrg _mm_mul_ss (__m128 __A, __m128 __B)
113 1.1 mrg {
114 1.1 mrg return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
115 1.1 mrg }
116 1.1 mrg
117 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
118 1.1 mrg _mm_div_ss (__m128 __A, __m128 __B)
119 1.1 mrg {
120 1.1 mrg return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
121 1.1 mrg }
122 1.1 mrg
123 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
124 1.1 mrg _mm_sqrt_ss (__m128 __A)
125 1.1 mrg {
126 1.1 mrg return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
127 1.1 mrg }
128 1.1 mrg
129 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
130 1.1 mrg _mm_rcp_ss (__m128 __A)
131 1.1 mrg {
132 1.1 mrg return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
133 1.1 mrg }
134 1.1 mrg
135 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
136 1.1 mrg _mm_rsqrt_ss (__m128 __A)
137 1.1 mrg {
138 1.1 mrg return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
139 1.1 mrg }
140 1.1 mrg
141 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
142 1.1 mrg _mm_min_ss (__m128 __A, __m128 __B)
143 1.1 mrg {
144 1.1 mrg return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
145 1.1 mrg }
146 1.1 mrg
147 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
148 1.1 mrg _mm_max_ss (__m128 __A, __m128 __B)
149 1.1 mrg {
150 1.1 mrg return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
151 1.1 mrg }
152 1.1 mrg
153 1.1 mrg /* Perform the respective operation on the four SPFP values in A and B. */
154 1.1 mrg
155 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
156 1.1 mrg _mm_add_ps (__m128 __A, __m128 __B)
157 1.1 mrg {
158 1.1 mrg return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
159 1.1 mrg }
160 1.1 mrg
161 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
162 1.1 mrg _mm_sub_ps (__m128 __A, __m128 __B)
163 1.1 mrg {
164 1.1 mrg return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
165 1.1 mrg }
166 1.1 mrg
167 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
168 1.1 mrg _mm_mul_ps (__m128 __A, __m128 __B)
169 1.1 mrg {
170 1.1 mrg return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
171 1.1 mrg }
172 1.1 mrg
173 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
174 1.1 mrg _mm_div_ps (__m128 __A, __m128 __B)
175 1.1 mrg {
176 1.1 mrg return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
177 1.1 mrg }
178 1.1 mrg
179 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
180 1.1 mrg _mm_sqrt_ps (__m128 __A)
181 1.1 mrg {
182 1.1 mrg return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
183 1.1 mrg }
184 1.1 mrg
185 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
186 1.1 mrg _mm_rcp_ps (__m128 __A)
187 1.1 mrg {
188 1.1 mrg return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
189 1.1 mrg }
190 1.1 mrg
191 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
192 1.1 mrg _mm_rsqrt_ps (__m128 __A)
193 1.1 mrg {
194 1.1 mrg return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
195 1.1 mrg }
196 1.1 mrg
197 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
198 1.1 mrg _mm_min_ps (__m128 __A, __m128 __B)
199 1.1 mrg {
200 1.1 mrg return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
201 1.1 mrg }
202 1.1 mrg
203 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
204 1.1 mrg _mm_max_ps (__m128 __A, __m128 __B)
205 1.1 mrg {
206 1.1 mrg return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
207 1.1 mrg }
208 1.1 mrg
209 1.1 mrg /* Perform logical bit-wise operations on 128-bit values. */
210 1.1 mrg
211 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
212 1.1 mrg _mm_and_ps (__m128 __A, __m128 __B)
213 1.1 mrg {
214 1.1 mrg return __builtin_ia32_andps (__A, __B);
215 1.1 mrg }
216 1.1 mrg
217 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
218 1.1 mrg _mm_andnot_ps (__m128 __A, __m128 __B)
219 1.1 mrg {
220 1.1 mrg return __builtin_ia32_andnps (__A, __B);
221 1.1 mrg }
222 1.1 mrg
223 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
224 1.1 mrg _mm_or_ps (__m128 __A, __m128 __B)
225 1.1 mrg {
226 1.1 mrg return __builtin_ia32_orps (__A, __B);
227 1.1 mrg }
228 1.1 mrg
229 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
230 1.1 mrg _mm_xor_ps (__m128 __A, __m128 __B)
231 1.1 mrg {
232 1.1 mrg return __builtin_ia32_xorps (__A, __B);
233 1.1 mrg }
234 1.1 mrg
235 1.1 mrg /* Perform a comparison on the lower SPFP values of A and B. If the
236 1.1 mrg comparison is true, place a mask of all ones in the result, otherwise a
237 1.1 mrg mask of zeros. The upper three SPFP values are passed through from A. */
238 1.1 mrg
239 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
240 1.1 mrg _mm_cmpeq_ss (__m128 __A, __m128 __B)
241 1.1 mrg {
242 1.1 mrg return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
243 1.1 mrg }
244 1.1 mrg
245 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
246 1.1 mrg _mm_cmplt_ss (__m128 __A, __m128 __B)
247 1.1 mrg {
248 1.1 mrg return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
249 1.1 mrg }
250 1.1 mrg
251 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
252 1.1 mrg _mm_cmple_ss (__m128 __A, __m128 __B)
253 1.1 mrg {
254 1.1 mrg return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
255 1.1 mrg }
256 1.1 mrg
257 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
258 1.1 mrg _mm_cmpgt_ss (__m128 __A, __m128 __B)
259 1.1 mrg {
260 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf) __A,
261 1.1 mrg (__v4sf)
262 1.1 mrg __builtin_ia32_cmpltss ((__v4sf) __B,
263 1.1 mrg (__v4sf)
264 1.1 mrg __A));
265 1.1 mrg }
266 1.1 mrg
267 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
268 1.1 mrg _mm_cmpge_ss (__m128 __A, __m128 __B)
269 1.1 mrg {
270 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf) __A,
271 1.1 mrg (__v4sf)
272 1.1 mrg __builtin_ia32_cmpless ((__v4sf) __B,
273 1.1 mrg (__v4sf)
274 1.1 mrg __A));
275 1.1 mrg }
276 1.1 mrg
277 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
278 1.1 mrg _mm_cmpneq_ss (__m128 __A, __m128 __B)
279 1.1 mrg {
280 1.1 mrg return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
281 1.1 mrg }
282 1.1 mrg
283 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
284 1.1 mrg _mm_cmpnlt_ss (__m128 __A, __m128 __B)
285 1.1 mrg {
286 1.1 mrg return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
287 1.1 mrg }
288 1.1 mrg
289 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
290 1.1 mrg _mm_cmpnle_ss (__m128 __A, __m128 __B)
291 1.1 mrg {
292 1.1 mrg return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
293 1.1 mrg }
294 1.1 mrg
295 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
296 1.1 mrg _mm_cmpngt_ss (__m128 __A, __m128 __B)
297 1.1 mrg {
298 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf) __A,
299 1.1 mrg (__v4sf)
300 1.1 mrg __builtin_ia32_cmpnltss ((__v4sf) __B,
301 1.1 mrg (__v4sf)
302 1.1 mrg __A));
303 1.1 mrg }
304 1.1 mrg
305 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
306 1.1 mrg _mm_cmpnge_ss (__m128 __A, __m128 __B)
307 1.1 mrg {
308 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf) __A,
309 1.1 mrg (__v4sf)
310 1.1 mrg __builtin_ia32_cmpnless ((__v4sf) __B,
311 1.1 mrg (__v4sf)
312 1.1 mrg __A));
313 1.1 mrg }
314 1.1 mrg
315 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
316 1.1 mrg _mm_cmpord_ss (__m128 __A, __m128 __B)
317 1.1 mrg {
318 1.1 mrg return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
319 1.1 mrg }
320 1.1 mrg
321 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
322 1.1 mrg _mm_cmpunord_ss (__m128 __A, __m128 __B)
323 1.1 mrg {
324 1.1 mrg return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
325 1.1 mrg }
326 1.1 mrg
327 1.1 mrg /* Perform a comparison on the four SPFP values of A and B. For each
328 1.1 mrg element, if the comparison is true, place a mask of all ones in the
329 1.1 mrg result, otherwise a mask of zeros. */
330 1.1 mrg
331 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
332 1.1 mrg _mm_cmpeq_ps (__m128 __A, __m128 __B)
333 1.1 mrg {
334 1.1 mrg return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
335 1.1 mrg }
336 1.1 mrg
337 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
338 1.1 mrg _mm_cmplt_ps (__m128 __A, __m128 __B)
339 1.1 mrg {
340 1.1 mrg return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
341 1.1 mrg }
342 1.1 mrg
343 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
344 1.1 mrg _mm_cmple_ps (__m128 __A, __m128 __B)
345 1.1 mrg {
346 1.1 mrg return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
347 1.1 mrg }
348 1.1 mrg
349 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
350 1.1 mrg _mm_cmpgt_ps (__m128 __A, __m128 __B)
351 1.1 mrg {
352 1.1 mrg return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
353 1.1 mrg }
354 1.1 mrg
355 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
356 1.1 mrg _mm_cmpge_ps (__m128 __A, __m128 __B)
357 1.1 mrg {
358 1.1 mrg return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
359 1.1 mrg }
360 1.1 mrg
361 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
362 1.1 mrg _mm_cmpneq_ps (__m128 __A, __m128 __B)
363 1.1 mrg {
364 1.1 mrg return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
365 1.1 mrg }
366 1.1 mrg
367 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
368 1.1 mrg _mm_cmpnlt_ps (__m128 __A, __m128 __B)
369 1.1 mrg {
370 1.1 mrg return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
371 1.1 mrg }
372 1.1 mrg
373 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
374 1.1 mrg _mm_cmpnle_ps (__m128 __A, __m128 __B)
375 1.1 mrg {
376 1.1 mrg return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
377 1.1 mrg }
378 1.1 mrg
379 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
380 1.1 mrg _mm_cmpngt_ps (__m128 __A, __m128 __B)
381 1.1 mrg {
382 1.1 mrg return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
383 1.1 mrg }
384 1.1 mrg
385 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
386 1.1 mrg _mm_cmpnge_ps (__m128 __A, __m128 __B)
387 1.1 mrg {
388 1.1 mrg return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
389 1.1 mrg }
390 1.1 mrg
391 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
392 1.1 mrg _mm_cmpord_ps (__m128 __A, __m128 __B)
393 1.1 mrg {
394 1.1 mrg return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
395 1.1 mrg }
396 1.1 mrg
397 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
398 1.1 mrg _mm_cmpunord_ps (__m128 __A, __m128 __B)
399 1.1 mrg {
400 1.1 mrg return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
401 1.1 mrg }
402 1.1 mrg
403 1.1 mrg /* Compare the lower SPFP values of A and B and return 1 if true
404 1.1 mrg and 0 if false. */
405 1.1 mrg
406 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
407 1.1 mrg _mm_comieq_ss (__m128 __A, __m128 __B)
408 1.1 mrg {
409 1.1 mrg return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
410 1.1 mrg }
411 1.1 mrg
412 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
413 1.1 mrg _mm_comilt_ss (__m128 __A, __m128 __B)
414 1.1 mrg {
415 1.1 mrg return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
416 1.1 mrg }
417 1.1 mrg
418 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
419 1.1 mrg _mm_comile_ss (__m128 __A, __m128 __B)
420 1.1 mrg {
421 1.1 mrg return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
422 1.1 mrg }
423 1.1 mrg
424 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
425 1.1 mrg _mm_comigt_ss (__m128 __A, __m128 __B)
426 1.1 mrg {
427 1.1 mrg return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
428 1.1 mrg }
429 1.1 mrg
430 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
431 1.1 mrg _mm_comige_ss (__m128 __A, __m128 __B)
432 1.1 mrg {
433 1.1 mrg return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
434 1.1 mrg }
435 1.1 mrg
436 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
437 1.1 mrg _mm_comineq_ss (__m128 __A, __m128 __B)
438 1.1 mrg {
439 1.1 mrg return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
440 1.1 mrg }
441 1.1 mrg
442 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
443 1.1 mrg _mm_ucomieq_ss (__m128 __A, __m128 __B)
444 1.1 mrg {
445 1.1 mrg return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
446 1.1 mrg }
447 1.1 mrg
448 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
449 1.1 mrg _mm_ucomilt_ss (__m128 __A, __m128 __B)
450 1.1 mrg {
451 1.1 mrg return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
452 1.1 mrg }
453 1.1 mrg
454 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
455 1.1 mrg _mm_ucomile_ss (__m128 __A, __m128 __B)
456 1.1 mrg {
457 1.1 mrg return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
458 1.1 mrg }
459 1.1 mrg
460 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
461 1.1 mrg _mm_ucomigt_ss (__m128 __A, __m128 __B)
462 1.1 mrg {
463 1.1 mrg return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
464 1.1 mrg }
465 1.1 mrg
466 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
467 1.1 mrg _mm_ucomige_ss (__m128 __A, __m128 __B)
468 1.1 mrg {
469 1.1 mrg return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
470 1.1 mrg }
471 1.1 mrg
472 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
473 1.1 mrg _mm_ucomineq_ss (__m128 __A, __m128 __B)
474 1.1 mrg {
475 1.1 mrg return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
476 1.1 mrg }
477 1.1 mrg
478 1.1 mrg /* Convert the lower SPFP value to a 32-bit integer according to the current
479 1.1 mrg rounding mode. */
480 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
481 1.1 mrg _mm_cvtss_si32 (__m128 __A)
482 1.1 mrg {
483 1.1 mrg return __builtin_ia32_cvtss2si ((__v4sf) __A);
484 1.1 mrg }
485 1.1 mrg
486 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
487 1.1 mrg _mm_cvt_ss2si (__m128 __A)
488 1.1 mrg {
489 1.1 mrg return _mm_cvtss_si32 (__A);
490 1.1 mrg }
491 1.1 mrg
492 1.1 mrg #ifdef __x86_64__
493 1.1 mrg /* Convert the lower SPFP value to a 32-bit integer according to the
494 1.1 mrg current rounding mode. */
495 1.1 mrg
496 1.1 mrg /* Intel intrinsic. */
497 1.1 mrg extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
498 1.1 mrg _mm_cvtss_si64 (__m128 __A)
499 1.1 mrg {
500 1.1 mrg return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
501 1.1 mrg }
502 1.1 mrg
503 1.1 mrg /* Microsoft intrinsic. */
504 1.1 mrg extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
505 1.1 mrg _mm_cvtss_si64x (__m128 __A)
506 1.1 mrg {
507 1.1 mrg return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
508 1.1 mrg }
509 1.1 mrg #endif
510 1.1 mrg
511 1.1 mrg /* Convert the two lower SPFP values to 32-bit integers according to the
512 1.1 mrg current rounding mode. Return the integers in packed form. */
513 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
514 1.1 mrg _mm_cvtps_pi32 (__m128 __A)
515 1.1 mrg {
516 1.1 mrg return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
517 1.1 mrg }
518 1.1 mrg
519 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
520 1.1 mrg _mm_cvt_ps2pi (__m128 __A)
521 1.1 mrg {
522 1.1 mrg return _mm_cvtps_pi32 (__A);
523 1.1 mrg }
524 1.1 mrg
525 1.1 mrg /* Truncate the lower SPFP value to a 32-bit integer. */
526 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
527 1.1 mrg _mm_cvttss_si32 (__m128 __A)
528 1.1 mrg {
529 1.1 mrg return __builtin_ia32_cvttss2si ((__v4sf) __A);
530 1.1 mrg }
531 1.1 mrg
532 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
533 1.1 mrg _mm_cvtt_ss2si (__m128 __A)
534 1.1 mrg {
535 1.1 mrg return _mm_cvttss_si32 (__A);
536 1.1 mrg }
537 1.1 mrg
538 1.1 mrg #ifdef __x86_64__
539 1.1 mrg /* Truncate the lower SPFP value to a 32-bit integer. */
540 1.1 mrg
541 1.1 mrg /* Intel intrinsic. */
542 1.1 mrg extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
543 1.1 mrg _mm_cvttss_si64 (__m128 __A)
544 1.1 mrg {
545 1.1 mrg return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
546 1.1 mrg }
547 1.1 mrg
548 1.1 mrg /* Microsoft intrinsic. */
549 1.1 mrg extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
550 1.1 mrg _mm_cvttss_si64x (__m128 __A)
551 1.1 mrg {
552 1.1 mrg return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
553 1.1 mrg }
554 1.1 mrg #endif
555 1.1 mrg
556 1.1 mrg /* Truncate the two lower SPFP values to 32-bit integers. Return the
557 1.1 mrg integers in packed form. */
558 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
559 1.1 mrg _mm_cvttps_pi32 (__m128 __A)
560 1.1 mrg {
561 1.1 mrg return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
562 1.1 mrg }
563 1.1 mrg
564 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
565 1.1 mrg _mm_cvtt_ps2pi (__m128 __A)
566 1.1 mrg {
567 1.1 mrg return _mm_cvttps_pi32 (__A);
568 1.1 mrg }
569 1.1 mrg
570 1.1 mrg /* Convert B to a SPFP value and insert it as element zero in A. */
571 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
572 1.1 mrg _mm_cvtsi32_ss (__m128 __A, int __B)
573 1.1 mrg {
574 1.1 mrg return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
575 1.1 mrg }
576 1.1 mrg
577 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
578 1.1 mrg _mm_cvt_si2ss (__m128 __A, int __B)
579 1.1 mrg {
580 1.1 mrg return _mm_cvtsi32_ss (__A, __B);
581 1.1 mrg }
582 1.1 mrg
583 1.1 mrg #ifdef __x86_64__
584 1.1 mrg /* Convert B to a SPFP value and insert it as element zero in A. */
585 1.1 mrg
586 1.1 mrg /* Intel intrinsic. */
587 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
588 1.1 mrg _mm_cvtsi64_ss (__m128 __A, long long __B)
589 1.1 mrg {
590 1.1 mrg return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
591 1.1 mrg }
592 1.1 mrg
593 1.1 mrg /* Microsoft intrinsic. */
594 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
595 1.1 mrg _mm_cvtsi64x_ss (__m128 __A, long long __B)
596 1.1 mrg {
597 1.1 mrg return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
598 1.1 mrg }
599 1.1 mrg #endif
600 1.1 mrg
601 1.1 mrg /* Convert the two 32-bit values in B to SPFP form and insert them
602 1.1 mrg as the two lower elements in A. */
603 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
604 1.1 mrg _mm_cvtpi32_ps (__m128 __A, __m64 __B)
605 1.1 mrg {
606 1.1 mrg return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
607 1.1 mrg }
608 1.1 mrg
609 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
610 1.1 mrg _mm_cvt_pi2ps (__m128 __A, __m64 __B)
611 1.1 mrg {
612 1.1 mrg return _mm_cvtpi32_ps (__A, __B);
613 1.1 mrg }
614 1.1 mrg
615 1.1 mrg /* Convert the four signed 16-bit values in A to SPFP form. */
616 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
617 1.1 mrg _mm_cvtpi16_ps (__m64 __A)
618 1.1 mrg {
619 1.1 mrg __v4hi __sign;
620 1.1 mrg __v2si __hisi, __losi;
621 1.1 mrg __v4sf __zero, __ra, __rb;
622 1.1 mrg
623 1.1 mrg /* This comparison against zero gives us a mask that can be used to
624 1.1 mrg fill in the missing sign bits in the unpack operations below, so
625 1.1 mrg that we get signed values after unpacking. */
626 1.1 mrg __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
627 1.1 mrg
628 1.1 mrg /* Convert the four words to doublewords. */
629 1.1 mrg __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
630 1.1 mrg __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
631 1.1 mrg
632 1.1 mrg /* Convert the doublewords to floating point two at a time. */
633 1.1 mrg __zero = (__v4sf) _mm_setzero_ps ();
634 1.1 mrg __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
635 1.1 mrg __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
636 1.1 mrg
637 1.1 mrg return (__m128) __builtin_ia32_movlhps (__ra, __rb);
638 1.1 mrg }
639 1.1 mrg
640 1.1 mrg /* Convert the four unsigned 16-bit values in A to SPFP form. */
641 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
642 1.1 mrg _mm_cvtpu16_ps (__m64 __A)
643 1.1 mrg {
644 1.1 mrg __v2si __hisi, __losi;
645 1.1 mrg __v4sf __zero, __ra, __rb;
646 1.1 mrg
647 1.1 mrg /* Convert the four words to doublewords. */
648 1.1 mrg __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
649 1.1 mrg __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
650 1.1 mrg
651 1.1 mrg /* Convert the doublewords to floating point two at a time. */
652 1.1 mrg __zero = (__v4sf) _mm_setzero_ps ();
653 1.1 mrg __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
654 1.1 mrg __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
655 1.1 mrg
656 1.1 mrg return (__m128) __builtin_ia32_movlhps (__ra, __rb);
657 1.1 mrg }
658 1.1 mrg
659 1.1 mrg /* Convert the low four signed 8-bit values in A to SPFP form. */
660 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
661 1.1 mrg _mm_cvtpi8_ps (__m64 __A)
662 1.1 mrg {
663 1.1 mrg __v8qi __sign;
664 1.1 mrg
665 1.1 mrg /* This comparison against zero gives us a mask that can be used to
666 1.1 mrg fill in the missing sign bits in the unpack operations below, so
667 1.1 mrg that we get signed values after unpacking. */
668 1.1 mrg __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
669 1.1 mrg
670 1.1 mrg /* Convert the four low bytes to words. */
671 1.1 mrg __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
672 1.1 mrg
673 1.1 mrg return _mm_cvtpi16_ps(__A);
674 1.1 mrg }
675 1.1 mrg
676 1.1 mrg /* Convert the low four unsigned 8-bit values in A to SPFP form. */
677 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
678 1.1 mrg _mm_cvtpu8_ps(__m64 __A)
679 1.1 mrg {
680 1.1 mrg __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
681 1.1 mrg return _mm_cvtpu16_ps(__A);
682 1.1 mrg }
683 1.1 mrg
684 1.1 mrg /* Convert the four signed 32-bit values in A and B to SPFP form. */
685 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
686 1.1 mrg _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
687 1.1 mrg {
688 1.1 mrg __v4sf __zero = (__v4sf) _mm_setzero_ps ();
689 1.1 mrg __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
690 1.1 mrg __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B);
691 1.1 mrg return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
692 1.1 mrg }
693 1.1 mrg
694 1.1 mrg /* Convert the four SPFP values in A to four signed 16-bit integers. */
695 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
696 1.1 mrg _mm_cvtps_pi16(__m128 __A)
697 1.1 mrg {
698 1.1 mrg __v4sf __hisf = (__v4sf)__A;
699 1.1 mrg __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
700 1.1 mrg __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
701 1.1 mrg __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
702 1.1 mrg return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
703 1.1 mrg }
704 1.1 mrg
705 1.1 mrg /* Convert the four SPFP values in A to four signed 8-bit integers. */
706 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
707 1.1 mrg _mm_cvtps_pi8(__m128 __A)
708 1.1 mrg {
709 1.1 mrg __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
710 1.1 mrg return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
711 1.1 mrg }
712 1.1 mrg
713 1.1 mrg /* Selects four specific SPFP values from A and B based on MASK. */
714 1.1 mrg #ifdef __OPTIMIZE__
715 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
716 1.1 mrg _mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
717 1.1 mrg {
718 1.1 mrg return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
719 1.1 mrg }
720 1.1 mrg #else
721 1.1 mrg #define _mm_shuffle_ps(A, B, MASK) \
722 1.1 mrg ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A), \
723 1.1 mrg (__v4sf)(__m128)(B), (int)(MASK)))
724 1.1 mrg #endif
725 1.1 mrg
726 1.1 mrg /* Selects and interleaves the upper two SPFP values from A and B. */
727 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
728 1.1 mrg _mm_unpackhi_ps (__m128 __A, __m128 __B)
729 1.1 mrg {
730 1.1 mrg return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
731 1.1 mrg }
732 1.1 mrg
733 1.1 mrg /* Selects and interleaves the lower two SPFP values from A and B. */
734 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
735 1.1 mrg _mm_unpacklo_ps (__m128 __A, __m128 __B)
736 1.1 mrg {
737 1.1 mrg return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
738 1.1 mrg }
739 1.1 mrg
740 1.1 mrg /* Sets the upper two SPFP values with 64-bits of data loaded from P;
741 1.1 mrg the lower two values are passed through from A. */
742 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
743 1.1 mrg _mm_loadh_pi (__m128 __A, __m64 const *__P)
744 1.1 mrg {
745 1.1 mrg return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P);
746 1.1 mrg }
747 1.1 mrg
748 1.1 mrg /* Stores the upper two SPFP values of A into P. */
749 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
750 1.1 mrg _mm_storeh_pi (__m64 *__P, __m128 __A)
751 1.1 mrg {
752 1.1 mrg __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A);
753 1.1 mrg }
754 1.1 mrg
755 1.1 mrg /* Moves the upper two values of B into the lower two values of A. */
756 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
757 1.1 mrg _mm_movehl_ps (__m128 __A, __m128 __B)
758 1.1 mrg {
759 1.1 mrg return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
760 1.1 mrg }
761 1.1 mrg
762 1.1 mrg /* Moves the lower two values of B into the upper two values of A. */
763 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
764 1.1 mrg _mm_movelh_ps (__m128 __A, __m128 __B)
765 1.1 mrg {
766 1.1 mrg return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
767 1.1 mrg }
768 1.1 mrg
769 1.1 mrg /* Sets the lower two SPFP values with 64-bits of data loaded from P;
770 1.1 mrg the upper two values are passed through from A. */
771 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
772 1.1 mrg _mm_loadl_pi (__m128 __A, __m64 const *__P)
773 1.1 mrg {
774 1.1 mrg return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P);
775 1.1 mrg }
776 1.1 mrg
777 1.1 mrg /* Stores the lower two SPFP values of A into P. */
778 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
779 1.1 mrg _mm_storel_pi (__m64 *__P, __m128 __A)
780 1.1 mrg {
781 1.1 mrg __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A);
782 1.1 mrg }
783 1.1 mrg
784 1.1 mrg /* Creates a 4-bit mask from the most significant bits of the SPFP values. */
785 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
786 1.1 mrg _mm_movemask_ps (__m128 __A)
787 1.1 mrg {
788 1.1 mrg return __builtin_ia32_movmskps ((__v4sf)__A);
789 1.1 mrg }
790 1.1 mrg
791 1.1 mrg /* Return the contents of the control register. */
792 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
793 1.1 mrg _mm_getcsr (void)
794 1.1 mrg {
795 1.1 mrg return __builtin_ia32_stmxcsr ();
796 1.1 mrg }
797 1.1 mrg
798 1.1 mrg /* Read exception bits from the control register. */
799 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
800 1.1 mrg _MM_GET_EXCEPTION_STATE (void)
801 1.1 mrg {
802 1.1 mrg return _mm_getcsr() & _MM_EXCEPT_MASK;
803 1.1 mrg }
804 1.1 mrg
805 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
806 1.1 mrg _MM_GET_EXCEPTION_MASK (void)
807 1.1 mrg {
808 1.1 mrg return _mm_getcsr() & _MM_MASK_MASK;
809 1.1 mrg }
810 1.1 mrg
811 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
812 1.1 mrg _MM_GET_ROUNDING_MODE (void)
813 1.1 mrg {
814 1.1 mrg return _mm_getcsr() & _MM_ROUND_MASK;
815 1.1 mrg }
816 1.1 mrg
817 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
818 1.1 mrg _MM_GET_FLUSH_ZERO_MODE (void)
819 1.1 mrg {
820 1.1 mrg return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
821 1.1 mrg }
822 1.1 mrg
823 1.1 mrg /* Set the control register to I. */
824 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
825 1.1 mrg _mm_setcsr (unsigned int __I)
826 1.1 mrg {
827 1.1 mrg __builtin_ia32_ldmxcsr (__I);
828 1.1 mrg }
829 1.1 mrg
830 1.1 mrg /* Set exception bits in the control register. */
831 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
832 1.1 mrg _MM_SET_EXCEPTION_STATE(unsigned int __mask)
833 1.1 mrg {
834 1.1 mrg _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
835 1.1 mrg }
836 1.1 mrg
837 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
838 1.1 mrg _MM_SET_EXCEPTION_MASK (unsigned int __mask)
839 1.1 mrg {
840 1.1 mrg _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
841 1.1 mrg }
842 1.1 mrg
843 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
844 1.1 mrg _MM_SET_ROUNDING_MODE (unsigned int __mode)
845 1.1 mrg {
846 1.1 mrg _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
847 1.1 mrg }
848 1.1 mrg
849 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
850 1.1 mrg _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
851 1.1 mrg {
852 1.1 mrg _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
853 1.1 mrg }
854 1.1 mrg
855 1.1 mrg /* Create a vector with element 0 as F and the rest zero. */
856 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
857 1.1 mrg _mm_set_ss (float __F)
858 1.1 mrg {
859 1.1 mrg return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
860 1.1 mrg }
861 1.1 mrg
862 1.1 mrg /* Create a vector with all four elements equal to F. */
863 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
864 1.1 mrg _mm_set1_ps (float __F)
865 1.1 mrg {
866 1.1 mrg return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
867 1.1 mrg }
868 1.1 mrg
869 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
870 1.1 mrg _mm_set_ps1 (float __F)
871 1.1 mrg {
872 1.1 mrg return _mm_set1_ps (__F);
873 1.1 mrg }
874 1.1 mrg
875 1.1 mrg /* Create a vector with element 0 as *P and the rest zero. */
876 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
877 1.1 mrg _mm_load_ss (float const *__P)
878 1.1 mrg {
879 1.1 mrg return _mm_set_ss (*__P);
880 1.1 mrg }
881 1.1 mrg
882 1.1 mrg /* Create a vector with all four elements equal to *P. */
883 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
884 1.1 mrg _mm_load1_ps (float const *__P)
885 1.1 mrg {
886 1.1 mrg return _mm_set1_ps (*__P);
887 1.1 mrg }
888 1.1 mrg
889 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
890 1.1 mrg _mm_load_ps1 (float const *__P)
891 1.1 mrg {
892 1.1 mrg return _mm_load1_ps (__P);
893 1.1 mrg }
894 1.1 mrg
895 1.1 mrg /* Load four SPFP values from P. The address must be 16-byte aligned. */
896 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
897 1.1 mrg _mm_load_ps (float const *__P)
898 1.1 mrg {
899 1.1 mrg return (__m128) *(__v4sf *)__P;
900 1.1 mrg }
901 1.1 mrg
902 1.1 mrg /* Load four SPFP values from P. The address need not be 16-byte aligned. */
903 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
904 1.1 mrg _mm_loadu_ps (float const *__P)
905 1.1 mrg {
906 1.1 mrg return (__m128) __builtin_ia32_loadups (__P);
907 1.1 mrg }
908 1.1 mrg
909 1.1 mrg /* Load four SPFP values in reverse order. The address must be aligned. */
910 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
911 1.1 mrg _mm_loadr_ps (float const *__P)
912 1.1 mrg {
913 1.1 mrg __v4sf __tmp = *(__v4sf *)__P;
914 1.1 mrg return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
915 1.1 mrg }
916 1.1 mrg
917 1.1 mrg /* Create the vector [Z Y X W]. */
918 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
919 1.1 mrg _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
920 1.1 mrg {
921 1.1 mrg return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
922 1.1 mrg }
923 1.1 mrg
924 1.1 mrg /* Create the vector [W X Y Z]. */
925 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
926 1.1 mrg _mm_setr_ps (float __Z, float __Y, float __X, float __W)
927 1.1 mrg {
928 1.1 mrg return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
929 1.1 mrg }
930 1.1 mrg
931 1.1 mrg /* Stores the lower SPFP value. */
932 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
933 1.1 mrg _mm_store_ss (float *__P, __m128 __A)
934 1.1 mrg {
935 1.1 mrg *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
936 1.1 mrg }
937 1.1 mrg
938 1.1 mrg extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
939 1.1 mrg _mm_cvtss_f32 (__m128 __A)
940 1.1 mrg {
941 1.1 mrg return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
942 1.1 mrg }
943 1.1 mrg
944 1.1 mrg /* Store four SPFP values. The address must be 16-byte aligned. */
945 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
946 1.1 mrg _mm_store_ps (float *__P, __m128 __A)
947 1.1 mrg {
948 1.1 mrg *(__v4sf *)__P = (__v4sf)__A;
949 1.1 mrg }
950 1.1 mrg
951 1.1 mrg /* Store four SPFP values. The address need not be 16-byte aligned. */
952 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
953 1.1 mrg _mm_storeu_ps (float *__P, __m128 __A)
954 1.1 mrg {
955 1.1 mrg __builtin_ia32_storeups (__P, (__v4sf)__A);
956 1.1 mrg }
957 1.1 mrg
958 1.1 mrg /* Store the lower SPFP value across four words. */
959 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
960 1.1 mrg _mm_store1_ps (float *__P, __m128 __A)
961 1.1 mrg {
962 1.1 mrg __v4sf __va = (__v4sf)__A;
963 1.1 mrg __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
964 1.1 mrg _mm_storeu_ps (__P, __tmp);
965 1.1 mrg }
966 1.1 mrg
967 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
968 1.1 mrg _mm_store_ps1 (float *__P, __m128 __A)
969 1.1 mrg {
970 1.1 mrg _mm_store1_ps (__P, __A);
971 1.1 mrg }
972 1.1 mrg
973 1.1 mrg /* Store four SPFP values in reverse order. The address must be aligned. */
974 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
975 1.1 mrg _mm_storer_ps (float *__P, __m128 __A)
976 1.1 mrg {
977 1.1 mrg __v4sf __va = (__v4sf)__A;
978 1.1 mrg __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
979 1.1 mrg _mm_store_ps (__P, __tmp);
980 1.1 mrg }
981 1.1 mrg
982 1.1 mrg /* Sets the low SPFP value of A from the low value of B. */
983 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
984 1.1 mrg _mm_move_ss (__m128 __A, __m128 __B)
985 1.1 mrg {
986 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
987 1.1 mrg }
988 1.1 mrg
989 1.1 mrg /* Extracts one of the four words of A. The selector N must be immediate. */
990 1.1 mrg #ifdef __OPTIMIZE__
991 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
992 1.1 mrg _mm_extract_pi16 (__m64 const __A, int const __N)
993 1.1 mrg {
994 1.1 mrg return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
995 1.1 mrg }
996 1.1 mrg
997 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
998 1.1 mrg _m_pextrw (__m64 const __A, int const __N)
999 1.1 mrg {
1000 1.1 mrg return _mm_extract_pi16 (__A, __N);
1001 1.1 mrg }
1002 1.1 mrg #else
1003 1.1 mrg #define _mm_extract_pi16(A, N) \
1004 1.1 mrg ((int) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N)))
1005 1.1 mrg
1006 1.1 mrg #define _m_pextrw(A, N) _mm_extract_pi16(A, N)
1007 1.1 mrg #endif
1008 1.1 mrg
1009 1.1 mrg /* Inserts word D into one of four words of A. The selector N must be
1010 1.1 mrg immediate. */
1011 1.1 mrg #ifdef __OPTIMIZE__
1012 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1013 1.1 mrg _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
1014 1.1 mrg {
1015 1.1 mrg return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
1016 1.1 mrg }
1017 1.1 mrg
1018 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1019 1.1 mrg _m_pinsrw (__m64 const __A, int const __D, int const __N)
1020 1.1 mrg {
1021 1.1 mrg return _mm_insert_pi16 (__A, __D, __N);
1022 1.1 mrg }
1023 1.1 mrg #else
1024 1.1 mrg #define _mm_insert_pi16(A, D, N) \
1025 1.1 mrg ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A), \
1026 1.1 mrg (int)(D), (int)(N)))
1027 1.1 mrg
1028 1.1 mrg #define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N)
1029 1.1 mrg #endif
1030 1.1 mrg
1031 1.1 mrg /* Compute the element-wise maximum of signed 16-bit values. */
1032 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1033 1.1 mrg _mm_max_pi16 (__m64 __A, __m64 __B)
1034 1.1 mrg {
1035 1.1 mrg return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1036 1.1 mrg }
1037 1.1 mrg
1038 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1039 1.1 mrg _m_pmaxsw (__m64 __A, __m64 __B)
1040 1.1 mrg {
1041 1.1 mrg return _mm_max_pi16 (__A, __B);
1042 1.1 mrg }
1043 1.1 mrg
1044 1.1 mrg /* Compute the element-wise maximum of unsigned 8-bit values. */
1045 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1046 1.1 mrg _mm_max_pu8 (__m64 __A, __m64 __B)
1047 1.1 mrg {
1048 1.1 mrg return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1049 1.1 mrg }
1050 1.1 mrg
1051 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1052 1.1 mrg _m_pmaxub (__m64 __A, __m64 __B)
1053 1.1 mrg {
1054 1.1 mrg return _mm_max_pu8 (__A, __B);
1055 1.1 mrg }
1056 1.1 mrg
1057 1.1 mrg /* Compute the element-wise minimum of signed 16-bit values. */
1058 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1059 1.1 mrg _mm_min_pi16 (__m64 __A, __m64 __B)
1060 1.1 mrg {
1061 1.1 mrg return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1062 1.1 mrg }
1063 1.1 mrg
1064 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1065 1.1 mrg _m_pminsw (__m64 __A, __m64 __B)
1066 1.1 mrg {
1067 1.1 mrg return _mm_min_pi16 (__A, __B);
1068 1.1 mrg }
1069 1.1 mrg
1070 1.1 mrg /* Compute the element-wise minimum of unsigned 8-bit values. */
1071 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1072 1.1 mrg _mm_min_pu8 (__m64 __A, __m64 __B)
1073 1.1 mrg {
1074 1.1 mrg return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1075 1.1 mrg }
1076 1.1 mrg
1077 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1078 1.1 mrg _m_pminub (__m64 __A, __m64 __B)
1079 1.1 mrg {
1080 1.1 mrg return _mm_min_pu8 (__A, __B);
1081 1.1 mrg }
1082 1.1 mrg
1083 1.1 mrg /* Create an 8-bit mask of the signs of 8-bit values. */
1084 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1085 1.1 mrg _mm_movemask_pi8 (__m64 __A)
1086 1.1 mrg {
1087 1.1 mrg return __builtin_ia32_pmovmskb ((__v8qi)__A);
1088 1.1 mrg }
1089 1.1 mrg
1090 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1091 1.1 mrg _m_pmovmskb (__m64 __A)
1092 1.1 mrg {
1093 1.1 mrg return _mm_movemask_pi8 (__A);
1094 1.1 mrg }
1095 1.1 mrg
1096 1.1 mrg /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1097 1.1 mrg in B and produce the high 16 bits of the 32-bit results. */
1098 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1099 1.1 mrg _mm_mulhi_pu16 (__m64 __A, __m64 __B)
1100 1.1 mrg {
1101 1.1 mrg return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1102 1.1 mrg }
1103 1.1 mrg
1104 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1105 1.1 mrg _m_pmulhuw (__m64 __A, __m64 __B)
1106 1.1 mrg {
1107 1.1 mrg return _mm_mulhi_pu16 (__A, __B);
1108 1.1 mrg }
1109 1.1 mrg
1110 1.1 mrg /* Return a combination of the four 16-bit values in A. The selector
1111 1.1 mrg must be an immediate. */
1112 1.1 mrg #ifdef __OPTIMIZE__
1113 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1114 1.1 mrg _mm_shuffle_pi16 (__m64 __A, int const __N)
1115 1.1 mrg {
1116 1.1 mrg return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1117 1.1 mrg }
1118 1.1 mrg
1119 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1120 1.1 mrg _m_pshufw (__m64 __A, int const __N)
1121 1.1 mrg {
1122 1.1 mrg return _mm_shuffle_pi16 (__A, __N);
1123 1.1 mrg }
1124 1.1 mrg #else
1125 1.1 mrg #define _mm_shuffle_pi16(A, N) \
1126 1.1 mrg ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N)))
1127 1.1 mrg
1128 1.1 mrg #define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N)
1129 1.1 mrg #endif
1130 1.1 mrg
1131 1.1 mrg /* Conditionally store byte elements of A into P. The high bit of each
1132 1.1 mrg byte in the selector N determines whether the corresponding byte from
1133 1.1 mrg A is stored. */
1134 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1135 1.1 mrg _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1136 1.1 mrg {
1137 1.1 mrg __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1138 1.1 mrg }
1139 1.1 mrg
1140 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1141 1.1 mrg _m_maskmovq (__m64 __A, __m64 __N, char *__P)
1142 1.1 mrg {
1143 1.1 mrg _mm_maskmove_si64 (__A, __N, __P);
1144 1.1 mrg }
1145 1.1 mrg
1146 1.1 mrg /* Compute the rounded averages of the unsigned 8-bit values in A and B. */
1147 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1148 1.1 mrg _mm_avg_pu8 (__m64 __A, __m64 __B)
1149 1.1 mrg {
1150 1.1 mrg return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1151 1.1 mrg }
1152 1.1 mrg
1153 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1154 1.1 mrg _m_pavgb (__m64 __A, __m64 __B)
1155 1.1 mrg {
1156 1.1 mrg return _mm_avg_pu8 (__A, __B);
1157 1.1 mrg }
1158 1.1 mrg
1159 1.1 mrg /* Compute the rounded averages of the unsigned 16-bit values in A and B. */
1160 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1161 1.1 mrg _mm_avg_pu16 (__m64 __A, __m64 __B)
1162 1.1 mrg {
1163 1.1 mrg return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1164 1.1 mrg }
1165 1.1 mrg
1166 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1167 1.1 mrg _m_pavgw (__m64 __A, __m64 __B)
1168 1.1 mrg {
1169 1.1 mrg return _mm_avg_pu16 (__A, __B);
1170 1.1 mrg }
1171 1.1 mrg
1172 1.1 mrg /* Compute the sum of the absolute differences of the unsigned 8-bit
1173 1.1 mrg values in A and B. Return the value in the lower 16-bit word; the
1174 1.1 mrg upper words are cleared. */
1175 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1176 1.1 mrg _mm_sad_pu8 (__m64 __A, __m64 __B)
1177 1.1 mrg {
1178 1.1 mrg return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1179 1.1 mrg }
1180 1.1 mrg
1181 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1182 1.1 mrg _m_psadbw (__m64 __A, __m64 __B)
1183 1.1 mrg {
1184 1.1 mrg return _mm_sad_pu8 (__A, __B);
1185 1.1 mrg }
1186 1.1 mrg
1187 1.1 mrg /* Loads one cache line from address P to a location "closer" to the
1188 1.1 mrg processor. The selector I specifies the type of prefetch operation. */
1189 1.1 mrg #ifdef __OPTIMIZE__
1190 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1191 1.1 mrg _mm_prefetch (const void *__P, enum _mm_hint __I)
1192 1.1 mrg {
1193 1.1 mrg __builtin_prefetch (__P, 0, __I);
1194 1.1 mrg }
1195 1.1 mrg #else
1196 1.1 mrg #define _mm_prefetch(P, I) \
1197 1.1 mrg __builtin_prefetch ((P), 0, (I))
1198 1.1 mrg #endif
1199 1.1 mrg
1200 1.1 mrg /* Stores the data in A to the address P without polluting the caches. */
1201 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1202 1.1 mrg _mm_stream_pi (__m64 *__P, __m64 __A)
1203 1.1 mrg {
1204 1.1 mrg __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1205 1.1 mrg }
1206 1.1 mrg
1207 1.1 mrg /* Likewise. The address must be 16-byte aligned. */
1208 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1209 1.1 mrg _mm_stream_ps (float *__P, __m128 __A)
1210 1.1 mrg {
1211 1.1 mrg __builtin_ia32_movntps (__P, (__v4sf)__A);
1212 1.1 mrg }
1213 1.1 mrg
1214 1.1 mrg /* Guarantees that every preceding store is globally visible before
1215 1.1 mrg any subsequent store. */
1216 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1217 1.1 mrg _mm_sfence (void)
1218 1.1 mrg {
1219 1.1 mrg __builtin_ia32_sfence ();
1220 1.1 mrg }
1221 1.1 mrg
1222 1.1 mrg /* The execution of the next instruction is delayed by an implementation
1223 1.1 mrg specific amount of time. The instruction does not modify the
1224 1.1 mrg architectural state. */
1225 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1226 1.1 mrg _mm_pause (void)
1227 1.1 mrg {
1228 1.1 mrg __asm__ __volatile__ ("rep; nop" : : );
1229 1.1 mrg }
1230 1.1 mrg
1231 1.1 mrg /* Transpose the 4x4 matrix composed of row[0-3]. */
1232 1.1 mrg #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
1233 1.1 mrg do { \
1234 1.1 mrg __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
1235 1.1 mrg __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \
1236 1.1 mrg __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \
1237 1.1 mrg __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \
1238 1.1 mrg __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \
1239 1.1 mrg (row0) = __builtin_ia32_movlhps (__t0, __t1); \
1240 1.1 mrg (row1) = __builtin_ia32_movhlps (__t1, __t0); \
1241 1.1 mrg (row2) = __builtin_ia32_movlhps (__t2, __t3); \
1242 1.1 mrg (row3) = __builtin_ia32_movhlps (__t3, __t2); \
1243 1.1 mrg } while (0)
1244 1.1 mrg
1245 1.1 mrg /* For backward source compatibility. */
1246 1.1 mrg #ifdef __SSE2__
1247 1.1 mrg # include <emmintrin.h>
1248 1.1 mrg #endif
1249 1.1 mrg
1250 1.1 mrg #endif /* __SSE__ */
1251 1.1 mrg #endif /* _XMMINTRIN_H_INCLUDED */
1252