xmmintrin.h revision 1.3 1 1.3 mrg /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 1.1 mrg
3 1.1 mrg This file is part of GCC.
4 1.1 mrg
5 1.1 mrg GCC is free software; you can redistribute it and/or modify
6 1.1 mrg it under the terms of the GNU General Public License as published by
7 1.1 mrg the Free Software Foundation; either version 3, or (at your option)
8 1.1 mrg any later version.
9 1.1 mrg
10 1.1 mrg GCC is distributed in the hope that it will be useful,
11 1.1 mrg but WITHOUT ANY WARRANTY; without even the implied warranty of
12 1.1 mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 1.1 mrg GNU General Public License for more details.
14 1.1 mrg
15 1.1 mrg Under Section 7 of GPL version 3, you are granted additional
16 1.1 mrg permissions described in the GCC Runtime Library Exception, version
17 1.1 mrg 3.1, as published by the Free Software Foundation.
18 1.1 mrg
19 1.1 mrg You should have received a copy of the GNU General Public License and
20 1.1 mrg a copy of the GCC Runtime Library Exception along with this program;
21 1.1 mrg see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 1.1 mrg <http://www.gnu.org/licenses/>. */
23 1.1 mrg
24 1.1 mrg /* Implemented from the specification included in the Intel C++ Compiler
25 1.1 mrg User Guide and Reference, version 9.0. */
26 1.1 mrg
27 1.1 mrg #ifndef _XMMINTRIN_H_INCLUDED
28 1.1 mrg #define _XMMINTRIN_H_INCLUDED
29 1.1 mrg
30 1.1 mrg #ifndef __SSE__
31 1.1 mrg # error "SSE instruction set not enabled"
32 1.1 mrg #else
33 1.1 mrg
34 1.1 mrg /* We need type definitions from the MMX header file. */
35 1.1 mrg #include <mmintrin.h>
36 1.1 mrg
37 1.1 mrg /* Get _mm_malloc () and _mm_free (). */
38 1.1 mrg #include <mm_malloc.h>
39 1.1 mrg
40 1.1 mrg /* The Intel API is flexible enough that we must allow aliasing with other
41 1.1 mrg vector types, and their scalar components. */
42 1.1 mrg typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
43 1.1 mrg
44 1.1 mrg /* Internal data types for implementing the intrinsics. */
45 1.1 mrg typedef float __v4sf __attribute__ ((__vector_size__ (16)));
46 1.1 mrg
47 1.1 mrg /* Create a selector for use with the SHUFPS instruction. */
48 1.1 mrg #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
49 1.1 mrg (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
50 1.1 mrg
51 1.1 mrg /* Constants for use with _mm_prefetch. */
52 1.1 mrg enum _mm_hint
53 1.1 mrg {
54 1.1 mrg _MM_HINT_T0 = 3,
55 1.1 mrg _MM_HINT_T1 = 2,
56 1.1 mrg _MM_HINT_T2 = 1,
57 1.1 mrg _MM_HINT_NTA = 0
58 1.1 mrg };
59 1.1 mrg
60 1.1 mrg /* Bits in the MXCSR. */
61 1.1 mrg #define _MM_EXCEPT_MASK 0x003f
62 1.1 mrg #define _MM_EXCEPT_INVALID 0x0001
63 1.1 mrg #define _MM_EXCEPT_DENORM 0x0002
64 1.1 mrg #define _MM_EXCEPT_DIV_ZERO 0x0004
65 1.1 mrg #define _MM_EXCEPT_OVERFLOW 0x0008
66 1.1 mrg #define _MM_EXCEPT_UNDERFLOW 0x0010
67 1.1 mrg #define _MM_EXCEPT_INEXACT 0x0020
68 1.1 mrg
69 1.1 mrg #define _MM_MASK_MASK 0x1f80
70 1.1 mrg #define _MM_MASK_INVALID 0x0080
71 1.1 mrg #define _MM_MASK_DENORM 0x0100
72 1.1 mrg #define _MM_MASK_DIV_ZERO 0x0200
73 1.1 mrg #define _MM_MASK_OVERFLOW 0x0400
74 1.1 mrg #define _MM_MASK_UNDERFLOW 0x0800
75 1.1 mrg #define _MM_MASK_INEXACT 0x1000
76 1.1 mrg
77 1.1 mrg #define _MM_ROUND_MASK 0x6000
78 1.1 mrg #define _MM_ROUND_NEAREST 0x0000
79 1.1 mrg #define _MM_ROUND_DOWN 0x2000
80 1.1 mrg #define _MM_ROUND_UP 0x4000
81 1.1 mrg #define _MM_ROUND_TOWARD_ZERO 0x6000
82 1.1 mrg
83 1.1 mrg #define _MM_FLUSH_ZERO_MASK 0x8000
84 1.1 mrg #define _MM_FLUSH_ZERO_ON 0x8000
85 1.1 mrg #define _MM_FLUSH_ZERO_OFF 0x0000
86 1.1 mrg
87 1.1 mrg /* Create a vector of zeros. */
88 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
89 1.1 mrg _mm_setzero_ps (void)
90 1.1 mrg {
91 1.1 mrg return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
92 1.1 mrg }
93 1.1 mrg
94 1.1 mrg /* Perform the respective operation on the lower SPFP (single-precision
95 1.1 mrg floating-point) values of A and B; the upper three SPFP values are
96 1.1 mrg passed through from A. */
97 1.1 mrg
98 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
99 1.1 mrg _mm_add_ss (__m128 __A, __m128 __B)
100 1.1 mrg {
101 1.1 mrg return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
102 1.1 mrg }
103 1.1 mrg
104 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
105 1.1 mrg _mm_sub_ss (__m128 __A, __m128 __B)
106 1.1 mrg {
107 1.1 mrg return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
108 1.1 mrg }
109 1.1 mrg
110 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
111 1.1 mrg _mm_mul_ss (__m128 __A, __m128 __B)
112 1.1 mrg {
113 1.1 mrg return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
114 1.1 mrg }
115 1.1 mrg
116 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
117 1.1 mrg _mm_div_ss (__m128 __A, __m128 __B)
118 1.1 mrg {
119 1.1 mrg return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
120 1.1 mrg }
121 1.1 mrg
122 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
123 1.1 mrg _mm_sqrt_ss (__m128 __A)
124 1.1 mrg {
125 1.1 mrg return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
126 1.1 mrg }
127 1.1 mrg
128 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
129 1.1 mrg _mm_rcp_ss (__m128 __A)
130 1.1 mrg {
131 1.1 mrg return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
132 1.1 mrg }
133 1.1 mrg
134 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
135 1.1 mrg _mm_rsqrt_ss (__m128 __A)
136 1.1 mrg {
137 1.1 mrg return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
138 1.1 mrg }
139 1.1 mrg
140 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
141 1.1 mrg _mm_min_ss (__m128 __A, __m128 __B)
142 1.1 mrg {
143 1.1 mrg return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
144 1.1 mrg }
145 1.1 mrg
146 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
147 1.1 mrg _mm_max_ss (__m128 __A, __m128 __B)
148 1.1 mrg {
149 1.1 mrg return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
150 1.1 mrg }
151 1.1 mrg
152 1.1 mrg /* Perform the respective operation on the four SPFP values in A and B. */
153 1.1 mrg
154 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
155 1.1 mrg _mm_add_ps (__m128 __A, __m128 __B)
156 1.1 mrg {
157 1.1 mrg return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
158 1.1 mrg }
159 1.1 mrg
160 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
161 1.1 mrg _mm_sub_ps (__m128 __A, __m128 __B)
162 1.1 mrg {
163 1.1 mrg return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
164 1.1 mrg }
165 1.1 mrg
166 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
167 1.1 mrg _mm_mul_ps (__m128 __A, __m128 __B)
168 1.1 mrg {
169 1.1 mrg return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
170 1.1 mrg }
171 1.1 mrg
172 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
173 1.1 mrg _mm_div_ps (__m128 __A, __m128 __B)
174 1.1 mrg {
175 1.1 mrg return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
176 1.1 mrg }
177 1.1 mrg
178 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
179 1.1 mrg _mm_sqrt_ps (__m128 __A)
180 1.1 mrg {
181 1.1 mrg return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
182 1.1 mrg }
183 1.1 mrg
184 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
185 1.1 mrg _mm_rcp_ps (__m128 __A)
186 1.1 mrg {
187 1.1 mrg return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
188 1.1 mrg }
189 1.1 mrg
190 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
191 1.1 mrg _mm_rsqrt_ps (__m128 __A)
192 1.1 mrg {
193 1.1 mrg return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
194 1.1 mrg }
195 1.1 mrg
196 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
197 1.1 mrg _mm_min_ps (__m128 __A, __m128 __B)
198 1.1 mrg {
199 1.1 mrg return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
200 1.1 mrg }
201 1.1 mrg
202 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
203 1.1 mrg _mm_max_ps (__m128 __A, __m128 __B)
204 1.1 mrg {
205 1.1 mrg return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
206 1.1 mrg }
207 1.1 mrg
208 1.1 mrg /* Perform logical bit-wise operations on 128-bit values. */
209 1.1 mrg
210 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
211 1.1 mrg _mm_and_ps (__m128 __A, __m128 __B)
212 1.1 mrg {
213 1.1 mrg return __builtin_ia32_andps (__A, __B);
214 1.1 mrg }
215 1.1 mrg
216 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
217 1.1 mrg _mm_andnot_ps (__m128 __A, __m128 __B)
218 1.1 mrg {
219 1.1 mrg return __builtin_ia32_andnps (__A, __B);
220 1.1 mrg }
221 1.1 mrg
222 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
223 1.1 mrg _mm_or_ps (__m128 __A, __m128 __B)
224 1.1 mrg {
225 1.1 mrg return __builtin_ia32_orps (__A, __B);
226 1.1 mrg }
227 1.1 mrg
228 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
229 1.1 mrg _mm_xor_ps (__m128 __A, __m128 __B)
230 1.1 mrg {
231 1.1 mrg return __builtin_ia32_xorps (__A, __B);
232 1.1 mrg }
233 1.1 mrg
234 1.1 mrg /* Perform a comparison on the lower SPFP values of A and B. If the
235 1.1 mrg comparison is true, place a mask of all ones in the result, otherwise a
236 1.1 mrg mask of zeros. The upper three SPFP values are passed through from A. */
237 1.1 mrg
238 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
239 1.1 mrg _mm_cmpeq_ss (__m128 __A, __m128 __B)
240 1.1 mrg {
241 1.1 mrg return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
242 1.1 mrg }
243 1.1 mrg
244 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
245 1.1 mrg _mm_cmplt_ss (__m128 __A, __m128 __B)
246 1.1 mrg {
247 1.1 mrg return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
248 1.1 mrg }
249 1.1 mrg
250 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
251 1.1 mrg _mm_cmple_ss (__m128 __A, __m128 __B)
252 1.1 mrg {
253 1.1 mrg return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
254 1.1 mrg }
255 1.1 mrg
256 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
257 1.1 mrg _mm_cmpgt_ss (__m128 __A, __m128 __B)
258 1.1 mrg {
259 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf) __A,
260 1.1 mrg (__v4sf)
261 1.1 mrg __builtin_ia32_cmpltss ((__v4sf) __B,
262 1.1 mrg (__v4sf)
263 1.1 mrg __A));
264 1.1 mrg }
265 1.1 mrg
266 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
267 1.1 mrg _mm_cmpge_ss (__m128 __A, __m128 __B)
268 1.1 mrg {
269 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf) __A,
270 1.1 mrg (__v4sf)
271 1.1 mrg __builtin_ia32_cmpless ((__v4sf) __B,
272 1.1 mrg (__v4sf)
273 1.1 mrg __A));
274 1.1 mrg }
275 1.1 mrg
276 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
277 1.1 mrg _mm_cmpneq_ss (__m128 __A, __m128 __B)
278 1.1 mrg {
279 1.1 mrg return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
280 1.1 mrg }
281 1.1 mrg
282 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
283 1.1 mrg _mm_cmpnlt_ss (__m128 __A, __m128 __B)
284 1.1 mrg {
285 1.1 mrg return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
286 1.1 mrg }
287 1.1 mrg
288 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
289 1.1 mrg _mm_cmpnle_ss (__m128 __A, __m128 __B)
290 1.1 mrg {
291 1.1 mrg return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
292 1.1 mrg }
293 1.1 mrg
294 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
295 1.1 mrg _mm_cmpngt_ss (__m128 __A, __m128 __B)
296 1.1 mrg {
297 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf) __A,
298 1.1 mrg (__v4sf)
299 1.1 mrg __builtin_ia32_cmpnltss ((__v4sf) __B,
300 1.1 mrg (__v4sf)
301 1.1 mrg __A));
302 1.1 mrg }
303 1.1 mrg
304 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
305 1.1 mrg _mm_cmpnge_ss (__m128 __A, __m128 __B)
306 1.1 mrg {
307 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf) __A,
308 1.1 mrg (__v4sf)
309 1.1 mrg __builtin_ia32_cmpnless ((__v4sf) __B,
310 1.1 mrg (__v4sf)
311 1.1 mrg __A));
312 1.1 mrg }
313 1.1 mrg
314 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
315 1.1 mrg _mm_cmpord_ss (__m128 __A, __m128 __B)
316 1.1 mrg {
317 1.1 mrg return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
318 1.1 mrg }
319 1.1 mrg
320 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
321 1.1 mrg _mm_cmpunord_ss (__m128 __A, __m128 __B)
322 1.1 mrg {
323 1.1 mrg return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
324 1.1 mrg }
325 1.1 mrg
326 1.1 mrg /* Perform a comparison on the four SPFP values of A and B. For each
327 1.1 mrg element, if the comparison is true, place a mask of all ones in the
328 1.1 mrg result, otherwise a mask of zeros. */
329 1.1 mrg
330 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
331 1.1 mrg _mm_cmpeq_ps (__m128 __A, __m128 __B)
332 1.1 mrg {
333 1.1 mrg return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
334 1.1 mrg }
335 1.1 mrg
336 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
337 1.1 mrg _mm_cmplt_ps (__m128 __A, __m128 __B)
338 1.1 mrg {
339 1.1 mrg return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
340 1.1 mrg }
341 1.1 mrg
342 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
343 1.1 mrg _mm_cmple_ps (__m128 __A, __m128 __B)
344 1.1 mrg {
345 1.1 mrg return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
346 1.1 mrg }
347 1.1 mrg
348 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
349 1.1 mrg _mm_cmpgt_ps (__m128 __A, __m128 __B)
350 1.1 mrg {
351 1.1 mrg return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
352 1.1 mrg }
353 1.1 mrg
354 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
355 1.1 mrg _mm_cmpge_ps (__m128 __A, __m128 __B)
356 1.1 mrg {
357 1.1 mrg return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
358 1.1 mrg }
359 1.1 mrg
360 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
361 1.1 mrg _mm_cmpneq_ps (__m128 __A, __m128 __B)
362 1.1 mrg {
363 1.1 mrg return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
364 1.1 mrg }
365 1.1 mrg
366 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
367 1.1 mrg _mm_cmpnlt_ps (__m128 __A, __m128 __B)
368 1.1 mrg {
369 1.1 mrg return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
370 1.1 mrg }
371 1.1 mrg
372 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
373 1.1 mrg _mm_cmpnle_ps (__m128 __A, __m128 __B)
374 1.1 mrg {
375 1.1 mrg return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
376 1.1 mrg }
377 1.1 mrg
378 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
379 1.1 mrg _mm_cmpngt_ps (__m128 __A, __m128 __B)
380 1.1 mrg {
381 1.1 mrg return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
382 1.1 mrg }
383 1.1 mrg
384 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
385 1.1 mrg _mm_cmpnge_ps (__m128 __A, __m128 __B)
386 1.1 mrg {
387 1.1 mrg return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
388 1.1 mrg }
389 1.1 mrg
390 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
391 1.1 mrg _mm_cmpord_ps (__m128 __A, __m128 __B)
392 1.1 mrg {
393 1.1 mrg return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
394 1.1 mrg }
395 1.1 mrg
396 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
397 1.1 mrg _mm_cmpunord_ps (__m128 __A, __m128 __B)
398 1.1 mrg {
399 1.1 mrg return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
400 1.1 mrg }
401 1.1 mrg
402 1.1 mrg /* Compare the lower SPFP values of A and B and return 1 if true
403 1.1 mrg and 0 if false. */
404 1.1 mrg
405 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
406 1.1 mrg _mm_comieq_ss (__m128 __A, __m128 __B)
407 1.1 mrg {
408 1.1 mrg return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
409 1.1 mrg }
410 1.1 mrg
411 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
412 1.1 mrg _mm_comilt_ss (__m128 __A, __m128 __B)
413 1.1 mrg {
414 1.1 mrg return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
415 1.1 mrg }
416 1.1 mrg
417 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
418 1.1 mrg _mm_comile_ss (__m128 __A, __m128 __B)
419 1.1 mrg {
420 1.1 mrg return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
421 1.1 mrg }
422 1.1 mrg
423 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
424 1.1 mrg _mm_comigt_ss (__m128 __A, __m128 __B)
425 1.1 mrg {
426 1.1 mrg return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
427 1.1 mrg }
428 1.1 mrg
429 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
430 1.1 mrg _mm_comige_ss (__m128 __A, __m128 __B)
431 1.1 mrg {
432 1.1 mrg return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
433 1.1 mrg }
434 1.1 mrg
435 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
436 1.1 mrg _mm_comineq_ss (__m128 __A, __m128 __B)
437 1.1 mrg {
438 1.1 mrg return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
439 1.1 mrg }
440 1.1 mrg
441 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
442 1.1 mrg _mm_ucomieq_ss (__m128 __A, __m128 __B)
443 1.1 mrg {
444 1.1 mrg return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
445 1.1 mrg }
446 1.1 mrg
447 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
448 1.1 mrg _mm_ucomilt_ss (__m128 __A, __m128 __B)
449 1.1 mrg {
450 1.1 mrg return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
451 1.1 mrg }
452 1.1 mrg
453 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
454 1.1 mrg _mm_ucomile_ss (__m128 __A, __m128 __B)
455 1.1 mrg {
456 1.1 mrg return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
457 1.1 mrg }
458 1.1 mrg
459 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
460 1.1 mrg _mm_ucomigt_ss (__m128 __A, __m128 __B)
461 1.1 mrg {
462 1.1 mrg return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
463 1.1 mrg }
464 1.1 mrg
465 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
466 1.1 mrg _mm_ucomige_ss (__m128 __A, __m128 __B)
467 1.1 mrg {
468 1.1 mrg return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
469 1.1 mrg }
470 1.1 mrg
471 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
472 1.1 mrg _mm_ucomineq_ss (__m128 __A, __m128 __B)
473 1.1 mrg {
474 1.1 mrg return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
475 1.1 mrg }
476 1.1 mrg
477 1.1 mrg /* Convert the lower SPFP value to a 32-bit integer according to the current
478 1.1 mrg rounding mode. */
479 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
480 1.1 mrg _mm_cvtss_si32 (__m128 __A)
481 1.1 mrg {
482 1.1 mrg return __builtin_ia32_cvtss2si ((__v4sf) __A);
483 1.1 mrg }
484 1.1 mrg
485 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
486 1.1 mrg _mm_cvt_ss2si (__m128 __A)
487 1.1 mrg {
488 1.1 mrg return _mm_cvtss_si32 (__A);
489 1.1 mrg }
490 1.1 mrg
491 1.1 mrg #ifdef __x86_64__
492 1.1 mrg /* Convert the lower SPFP value to a 32-bit integer according to the
493 1.1 mrg current rounding mode. */
494 1.1 mrg
495 1.1 mrg /* Intel intrinsic. */
496 1.1 mrg extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
497 1.1 mrg _mm_cvtss_si64 (__m128 __A)
498 1.1 mrg {
499 1.1 mrg return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
500 1.1 mrg }
501 1.1 mrg
502 1.1 mrg /* Microsoft intrinsic. */
503 1.1 mrg extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
504 1.1 mrg _mm_cvtss_si64x (__m128 __A)
505 1.1 mrg {
506 1.1 mrg return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
507 1.1 mrg }
508 1.1 mrg #endif
509 1.1 mrg
510 1.1 mrg /* Convert the two lower SPFP values to 32-bit integers according to the
511 1.1 mrg current rounding mode. Return the integers in packed form. */
512 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
513 1.1 mrg _mm_cvtps_pi32 (__m128 __A)
514 1.1 mrg {
515 1.1 mrg return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
516 1.1 mrg }
517 1.1 mrg
518 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
519 1.1 mrg _mm_cvt_ps2pi (__m128 __A)
520 1.1 mrg {
521 1.1 mrg return _mm_cvtps_pi32 (__A);
522 1.1 mrg }
523 1.1 mrg
524 1.1 mrg /* Truncate the lower SPFP value to a 32-bit integer. */
525 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
526 1.1 mrg _mm_cvttss_si32 (__m128 __A)
527 1.1 mrg {
528 1.1 mrg return __builtin_ia32_cvttss2si ((__v4sf) __A);
529 1.1 mrg }
530 1.1 mrg
531 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
532 1.1 mrg _mm_cvtt_ss2si (__m128 __A)
533 1.1 mrg {
534 1.1 mrg return _mm_cvttss_si32 (__A);
535 1.1 mrg }
536 1.1 mrg
537 1.1 mrg #ifdef __x86_64__
538 1.1 mrg /* Truncate the lower SPFP value to a 32-bit integer. */
539 1.1 mrg
540 1.1 mrg /* Intel intrinsic. */
541 1.1 mrg extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
542 1.1 mrg _mm_cvttss_si64 (__m128 __A)
543 1.1 mrg {
544 1.1 mrg return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
545 1.1 mrg }
546 1.1 mrg
547 1.1 mrg /* Microsoft intrinsic. */
548 1.1 mrg extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
549 1.1 mrg _mm_cvttss_si64x (__m128 __A)
550 1.1 mrg {
551 1.1 mrg return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
552 1.1 mrg }
553 1.1 mrg #endif
554 1.1 mrg
555 1.1 mrg /* Truncate the two lower SPFP values to 32-bit integers. Return the
556 1.1 mrg integers in packed form. */
557 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
558 1.1 mrg _mm_cvttps_pi32 (__m128 __A)
559 1.1 mrg {
560 1.1 mrg return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
561 1.1 mrg }
562 1.1 mrg
563 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
564 1.1 mrg _mm_cvtt_ps2pi (__m128 __A)
565 1.1 mrg {
566 1.1 mrg return _mm_cvttps_pi32 (__A);
567 1.1 mrg }
568 1.1 mrg
569 1.1 mrg /* Convert B to a SPFP value and insert it as element zero in A. */
570 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
571 1.1 mrg _mm_cvtsi32_ss (__m128 __A, int __B)
572 1.1 mrg {
573 1.1 mrg return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
574 1.1 mrg }
575 1.1 mrg
576 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
577 1.1 mrg _mm_cvt_si2ss (__m128 __A, int __B)
578 1.1 mrg {
579 1.1 mrg return _mm_cvtsi32_ss (__A, __B);
580 1.1 mrg }
581 1.1 mrg
582 1.1 mrg #ifdef __x86_64__
583 1.1 mrg /* Convert B to a SPFP value and insert it as element zero in A. */
584 1.1 mrg
585 1.1 mrg /* Intel intrinsic. */
586 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
587 1.1 mrg _mm_cvtsi64_ss (__m128 __A, long long __B)
588 1.1 mrg {
589 1.1 mrg return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
590 1.1 mrg }
591 1.1 mrg
592 1.1 mrg /* Microsoft intrinsic. */
593 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
594 1.1 mrg _mm_cvtsi64x_ss (__m128 __A, long long __B)
595 1.1 mrg {
596 1.1 mrg return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
597 1.1 mrg }
598 1.1 mrg #endif
599 1.1 mrg
600 1.1 mrg /* Convert the two 32-bit values in B to SPFP form and insert them
601 1.1 mrg as the two lower elements in A. */
602 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
603 1.1 mrg _mm_cvtpi32_ps (__m128 __A, __m64 __B)
604 1.1 mrg {
605 1.1 mrg return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
606 1.1 mrg }
607 1.1 mrg
608 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
609 1.1 mrg _mm_cvt_pi2ps (__m128 __A, __m64 __B)
610 1.1 mrg {
611 1.1 mrg return _mm_cvtpi32_ps (__A, __B);
612 1.1 mrg }
613 1.1 mrg
614 1.1 mrg /* Convert the four signed 16-bit values in A to SPFP form. */
615 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
616 1.1 mrg _mm_cvtpi16_ps (__m64 __A)
617 1.1 mrg {
618 1.1 mrg __v4hi __sign;
619 1.1 mrg __v2si __hisi, __losi;
620 1.1 mrg __v4sf __zero, __ra, __rb;
621 1.1 mrg
622 1.1 mrg /* This comparison against zero gives us a mask that can be used to
623 1.1 mrg fill in the missing sign bits in the unpack operations below, so
624 1.1 mrg that we get signed values after unpacking. */
625 1.1 mrg __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
626 1.1 mrg
627 1.1 mrg /* Convert the four words to doublewords. */
628 1.1 mrg __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
629 1.1 mrg __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
630 1.1 mrg
631 1.1 mrg /* Convert the doublewords to floating point two at a time. */
632 1.1 mrg __zero = (__v4sf) _mm_setzero_ps ();
633 1.1 mrg __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
634 1.1 mrg __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
635 1.1 mrg
636 1.1 mrg return (__m128) __builtin_ia32_movlhps (__ra, __rb);
637 1.1 mrg }
638 1.1 mrg
639 1.1 mrg /* Convert the four unsigned 16-bit values in A to SPFP form. */
640 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
641 1.1 mrg _mm_cvtpu16_ps (__m64 __A)
642 1.1 mrg {
643 1.1 mrg __v2si __hisi, __losi;
644 1.1 mrg __v4sf __zero, __ra, __rb;
645 1.1 mrg
646 1.1 mrg /* Convert the four words to doublewords. */
647 1.1 mrg __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
648 1.1 mrg __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
649 1.1 mrg
650 1.1 mrg /* Convert the doublewords to floating point two at a time. */
651 1.1 mrg __zero = (__v4sf) _mm_setzero_ps ();
652 1.1 mrg __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
653 1.1 mrg __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
654 1.1 mrg
655 1.1 mrg return (__m128) __builtin_ia32_movlhps (__ra, __rb);
656 1.1 mrg }
657 1.1 mrg
658 1.1 mrg /* Convert the low four signed 8-bit values in A to SPFP form. */
659 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
660 1.1 mrg _mm_cvtpi8_ps (__m64 __A)
661 1.1 mrg {
662 1.1 mrg __v8qi __sign;
663 1.1 mrg
664 1.1 mrg /* This comparison against zero gives us a mask that can be used to
665 1.1 mrg fill in the missing sign bits in the unpack operations below, so
666 1.1 mrg that we get signed values after unpacking. */
667 1.1 mrg __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
668 1.1 mrg
669 1.1 mrg /* Convert the four low bytes to words. */
670 1.1 mrg __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
671 1.1 mrg
672 1.1 mrg return _mm_cvtpi16_ps(__A);
673 1.1 mrg }
674 1.1 mrg
675 1.1 mrg /* Convert the low four unsigned 8-bit values in A to SPFP form. */
676 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
677 1.1 mrg _mm_cvtpu8_ps(__m64 __A)
678 1.1 mrg {
679 1.1 mrg __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
680 1.1 mrg return _mm_cvtpu16_ps(__A);
681 1.1 mrg }
682 1.1 mrg
683 1.1 mrg /* Convert the four signed 32-bit values in A and B to SPFP form. */
684 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
685 1.1 mrg _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
686 1.1 mrg {
687 1.1 mrg __v4sf __zero = (__v4sf) _mm_setzero_ps ();
688 1.1 mrg __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
689 1.1 mrg __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B);
690 1.1 mrg return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
691 1.1 mrg }
692 1.1 mrg
693 1.1 mrg /* Convert the four SPFP values in A to four signed 16-bit integers. */
694 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
695 1.1 mrg _mm_cvtps_pi16(__m128 __A)
696 1.1 mrg {
697 1.1 mrg __v4sf __hisf = (__v4sf)__A;
698 1.1 mrg __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
699 1.1 mrg __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
700 1.1 mrg __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
701 1.1 mrg return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
702 1.1 mrg }
703 1.1 mrg
704 1.1 mrg /* Convert the four SPFP values in A to four signed 8-bit integers. */
705 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
706 1.1 mrg _mm_cvtps_pi8(__m128 __A)
707 1.1 mrg {
708 1.1 mrg __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
709 1.1 mrg return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
710 1.1 mrg }
711 1.1 mrg
712 1.1 mrg /* Selects four specific SPFP values from A and B based on MASK. */
713 1.1 mrg #ifdef __OPTIMIZE__
714 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
715 1.1 mrg _mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
716 1.1 mrg {
717 1.1 mrg return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
718 1.1 mrg }
719 1.1 mrg #else
720 1.1 mrg #define _mm_shuffle_ps(A, B, MASK) \
721 1.1 mrg ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A), \
722 1.1 mrg (__v4sf)(__m128)(B), (int)(MASK)))
723 1.1 mrg #endif
724 1.1 mrg
725 1.1 mrg /* Selects and interleaves the upper two SPFP values from A and B. */
726 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
727 1.1 mrg _mm_unpackhi_ps (__m128 __A, __m128 __B)
728 1.1 mrg {
729 1.1 mrg return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
730 1.1 mrg }
731 1.1 mrg
732 1.1 mrg /* Selects and interleaves the lower two SPFP values from A and B. */
733 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
734 1.1 mrg _mm_unpacklo_ps (__m128 __A, __m128 __B)
735 1.1 mrg {
736 1.1 mrg return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
737 1.1 mrg }
738 1.1 mrg
739 1.1 mrg /* Sets the upper two SPFP values with 64-bits of data loaded from P;
740 1.1 mrg the lower two values are passed through from A. */
741 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
742 1.1 mrg _mm_loadh_pi (__m128 __A, __m64 const *__P)
743 1.1 mrg {
744 1.1 mrg return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P);
745 1.1 mrg }
746 1.1 mrg
747 1.1 mrg /* Stores the upper two SPFP values of A into P. */
748 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
749 1.1 mrg _mm_storeh_pi (__m64 *__P, __m128 __A)
750 1.1 mrg {
751 1.1 mrg __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A);
752 1.1 mrg }
753 1.1 mrg
754 1.1 mrg /* Moves the upper two values of B into the lower two values of A. */
755 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
756 1.1 mrg _mm_movehl_ps (__m128 __A, __m128 __B)
757 1.1 mrg {
758 1.1 mrg return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
759 1.1 mrg }
760 1.1 mrg
761 1.1 mrg /* Moves the lower two values of B into the upper two values of A. */
762 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
763 1.1 mrg _mm_movelh_ps (__m128 __A, __m128 __B)
764 1.1 mrg {
765 1.1 mrg return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
766 1.1 mrg }
767 1.1 mrg
768 1.1 mrg /* Sets the lower two SPFP values with 64-bits of data loaded from P;
769 1.1 mrg the upper two values are passed through from A. */
770 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
771 1.1 mrg _mm_loadl_pi (__m128 __A, __m64 const *__P)
772 1.1 mrg {
773 1.1 mrg return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P);
774 1.1 mrg }
775 1.1 mrg
776 1.1 mrg /* Stores the lower two SPFP values of A into P. */
777 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
778 1.1 mrg _mm_storel_pi (__m64 *__P, __m128 __A)
779 1.1 mrg {
780 1.1 mrg __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A);
781 1.1 mrg }
782 1.1 mrg
783 1.1 mrg /* Creates a 4-bit mask from the most significant bits of the SPFP values. */
784 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
785 1.1 mrg _mm_movemask_ps (__m128 __A)
786 1.1 mrg {
787 1.1 mrg return __builtin_ia32_movmskps ((__v4sf)__A);
788 1.1 mrg }
789 1.1 mrg
790 1.1 mrg /* Return the contents of the control register. */
791 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
792 1.1 mrg _mm_getcsr (void)
793 1.1 mrg {
794 1.1 mrg return __builtin_ia32_stmxcsr ();
795 1.1 mrg }
796 1.1 mrg
797 1.1 mrg /* Read exception bits from the control register. */
798 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
799 1.1 mrg _MM_GET_EXCEPTION_STATE (void)
800 1.1 mrg {
801 1.1 mrg return _mm_getcsr() & _MM_EXCEPT_MASK;
802 1.1 mrg }
803 1.1 mrg
804 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
805 1.1 mrg _MM_GET_EXCEPTION_MASK (void)
806 1.1 mrg {
807 1.1 mrg return _mm_getcsr() & _MM_MASK_MASK;
808 1.1 mrg }
809 1.1 mrg
810 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
811 1.1 mrg _MM_GET_ROUNDING_MODE (void)
812 1.1 mrg {
813 1.1 mrg return _mm_getcsr() & _MM_ROUND_MASK;
814 1.1 mrg }
815 1.1 mrg
816 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
817 1.1 mrg _MM_GET_FLUSH_ZERO_MODE (void)
818 1.1 mrg {
819 1.1 mrg return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
820 1.1 mrg }
821 1.1 mrg
822 1.1 mrg /* Set the control register to I. */
823 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
824 1.1 mrg _mm_setcsr (unsigned int __I)
825 1.1 mrg {
826 1.1 mrg __builtin_ia32_ldmxcsr (__I);
827 1.1 mrg }
828 1.1 mrg
829 1.1 mrg /* Set exception bits in the control register. */
830 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
831 1.1 mrg _MM_SET_EXCEPTION_STATE(unsigned int __mask)
832 1.1 mrg {
833 1.1 mrg _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
834 1.1 mrg }
835 1.1 mrg
836 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
837 1.1 mrg _MM_SET_EXCEPTION_MASK (unsigned int __mask)
838 1.1 mrg {
839 1.1 mrg _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
840 1.1 mrg }
841 1.1 mrg
842 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
843 1.1 mrg _MM_SET_ROUNDING_MODE (unsigned int __mode)
844 1.1 mrg {
845 1.1 mrg _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
846 1.1 mrg }
847 1.1 mrg
848 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
849 1.1 mrg _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
850 1.1 mrg {
851 1.1 mrg _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
852 1.1 mrg }
853 1.1 mrg
854 1.1 mrg /* Create a vector with element 0 as F and the rest zero. */
855 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
856 1.1 mrg _mm_set_ss (float __F)
857 1.1 mrg {
858 1.1 mrg return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
859 1.1 mrg }
860 1.1 mrg
861 1.1 mrg /* Create a vector with all four elements equal to F. */
862 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
863 1.1 mrg _mm_set1_ps (float __F)
864 1.1 mrg {
865 1.1 mrg return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
866 1.1 mrg }
867 1.1 mrg
868 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
869 1.1 mrg _mm_set_ps1 (float __F)
870 1.1 mrg {
871 1.1 mrg return _mm_set1_ps (__F);
872 1.1 mrg }
873 1.1 mrg
874 1.1 mrg /* Create a vector with element 0 as *P and the rest zero. */
875 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
876 1.1 mrg _mm_load_ss (float const *__P)
877 1.1 mrg {
878 1.1 mrg return _mm_set_ss (*__P);
879 1.1 mrg }
880 1.1 mrg
881 1.1 mrg /* Create a vector with all four elements equal to *P. */
882 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
883 1.1 mrg _mm_load1_ps (float const *__P)
884 1.1 mrg {
885 1.1 mrg return _mm_set1_ps (*__P);
886 1.1 mrg }
887 1.1 mrg
888 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
889 1.1 mrg _mm_load_ps1 (float const *__P)
890 1.1 mrg {
891 1.1 mrg return _mm_load1_ps (__P);
892 1.1 mrg }
893 1.1 mrg
894 1.1 mrg /* Load four SPFP values from P. The address must be 16-byte aligned. */
895 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
896 1.1 mrg _mm_load_ps (float const *__P)
897 1.1 mrg {
898 1.1 mrg return (__m128) *(__v4sf *)__P;
899 1.1 mrg }
900 1.1 mrg
901 1.1 mrg /* Load four SPFP values from P. The address need not be 16-byte aligned. */
902 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
903 1.1 mrg _mm_loadu_ps (float const *__P)
904 1.1 mrg {
905 1.1 mrg return (__m128) __builtin_ia32_loadups (__P);
906 1.1 mrg }
907 1.1 mrg
908 1.1 mrg /* Load four SPFP values in reverse order. The address must be aligned. */
909 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
910 1.1 mrg _mm_loadr_ps (float const *__P)
911 1.1 mrg {
912 1.1 mrg __v4sf __tmp = *(__v4sf *)__P;
913 1.1 mrg return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
914 1.1 mrg }
915 1.1 mrg
916 1.1 mrg /* Create the vector [Z Y X W]. */
917 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
918 1.1 mrg _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
919 1.1 mrg {
920 1.1 mrg return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
921 1.1 mrg }
922 1.1 mrg
923 1.1 mrg /* Create the vector [W X Y Z]. */
924 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
925 1.1 mrg _mm_setr_ps (float __Z, float __Y, float __X, float __W)
926 1.1 mrg {
927 1.1 mrg return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
928 1.1 mrg }
929 1.1 mrg
930 1.1 mrg /* Stores the lower SPFP value. */
931 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
932 1.1 mrg _mm_store_ss (float *__P, __m128 __A)
933 1.1 mrg {
934 1.1 mrg *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
935 1.1 mrg }
936 1.1 mrg
937 1.1 mrg extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
938 1.1 mrg _mm_cvtss_f32 (__m128 __A)
939 1.1 mrg {
940 1.1 mrg return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
941 1.1 mrg }
942 1.1 mrg
943 1.1 mrg /* Store four SPFP values. The address must be 16-byte aligned. */
944 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
945 1.1 mrg _mm_store_ps (float *__P, __m128 __A)
946 1.1 mrg {
947 1.1 mrg *(__v4sf *)__P = (__v4sf)__A;
948 1.1 mrg }
949 1.1 mrg
950 1.1 mrg /* Store four SPFP values. The address need not be 16-byte aligned. */
951 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
952 1.1 mrg _mm_storeu_ps (float *__P, __m128 __A)
953 1.1 mrg {
954 1.1 mrg __builtin_ia32_storeups (__P, (__v4sf)__A);
955 1.1 mrg }
956 1.1 mrg
957 1.1 mrg /* Store the lower SPFP value across four words. */
958 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
959 1.1 mrg _mm_store1_ps (float *__P, __m128 __A)
960 1.1 mrg {
961 1.1 mrg __v4sf __va = (__v4sf)__A;
962 1.1 mrg __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
963 1.1 mrg _mm_storeu_ps (__P, __tmp);
964 1.1 mrg }
965 1.1 mrg
966 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
967 1.1 mrg _mm_store_ps1 (float *__P, __m128 __A)
968 1.1 mrg {
969 1.1 mrg _mm_store1_ps (__P, __A);
970 1.1 mrg }
971 1.1 mrg
972 1.1 mrg /* Store four SPFP values in reverse order. The address must be aligned. */
973 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
974 1.1 mrg _mm_storer_ps (float *__P, __m128 __A)
975 1.1 mrg {
976 1.1 mrg __v4sf __va = (__v4sf)__A;
977 1.1 mrg __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
978 1.1 mrg _mm_store_ps (__P, __tmp);
979 1.1 mrg }
980 1.1 mrg
981 1.1 mrg /* Sets the low SPFP value of A from the low value of B. */
982 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
983 1.1 mrg _mm_move_ss (__m128 __A, __m128 __B)
984 1.1 mrg {
985 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
986 1.1 mrg }
987 1.1 mrg
988 1.1 mrg /* Extracts one of the four words of A. The selector N must be immediate. */
989 1.1 mrg #ifdef __OPTIMIZE__
990 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
991 1.1 mrg _mm_extract_pi16 (__m64 const __A, int const __N)
992 1.1 mrg {
993 1.1 mrg return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
994 1.1 mrg }
995 1.1 mrg
996 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
997 1.1 mrg _m_pextrw (__m64 const __A, int const __N)
998 1.1 mrg {
999 1.1 mrg return _mm_extract_pi16 (__A, __N);
1000 1.1 mrg }
1001 1.1 mrg #else
1002 1.1 mrg #define _mm_extract_pi16(A, N) \
1003 1.1 mrg ((int) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N)))
1004 1.1 mrg
1005 1.1 mrg #define _m_pextrw(A, N) _mm_extract_pi16(A, N)
1006 1.1 mrg #endif
1007 1.1 mrg
1008 1.1 mrg /* Inserts word D into one of four words of A. The selector N must be
1009 1.1 mrg immediate. */
1010 1.1 mrg #ifdef __OPTIMIZE__
1011 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1012 1.1 mrg _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
1013 1.1 mrg {
1014 1.1 mrg return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
1015 1.1 mrg }
1016 1.1 mrg
1017 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1018 1.1 mrg _m_pinsrw (__m64 const __A, int const __D, int const __N)
1019 1.1 mrg {
1020 1.1 mrg return _mm_insert_pi16 (__A, __D, __N);
1021 1.1 mrg }
1022 1.1 mrg #else
1023 1.1 mrg #define _mm_insert_pi16(A, D, N) \
1024 1.1 mrg ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A), \
1025 1.1 mrg (int)(D), (int)(N)))
1026 1.1 mrg
1027 1.1 mrg #define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N)
1028 1.1 mrg #endif
1029 1.1 mrg
1030 1.1 mrg /* Compute the element-wise maximum of signed 16-bit values. */
1031 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1032 1.1 mrg _mm_max_pi16 (__m64 __A, __m64 __B)
1033 1.1 mrg {
1034 1.1 mrg return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1035 1.1 mrg }
1036 1.1 mrg
1037 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1038 1.1 mrg _m_pmaxsw (__m64 __A, __m64 __B)
1039 1.1 mrg {
1040 1.1 mrg return _mm_max_pi16 (__A, __B);
1041 1.1 mrg }
1042 1.1 mrg
1043 1.1 mrg /* Compute the element-wise maximum of unsigned 8-bit values. */
1044 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1045 1.1 mrg _mm_max_pu8 (__m64 __A, __m64 __B)
1046 1.1 mrg {
1047 1.1 mrg return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1048 1.1 mrg }
1049 1.1 mrg
1050 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1051 1.1 mrg _m_pmaxub (__m64 __A, __m64 __B)
1052 1.1 mrg {
1053 1.1 mrg return _mm_max_pu8 (__A, __B);
1054 1.1 mrg }
1055 1.1 mrg
1056 1.1 mrg /* Compute the element-wise minimum of signed 16-bit values. */
1057 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1058 1.1 mrg _mm_min_pi16 (__m64 __A, __m64 __B)
1059 1.1 mrg {
1060 1.1 mrg return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1061 1.1 mrg }
1062 1.1 mrg
1063 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1064 1.1 mrg _m_pminsw (__m64 __A, __m64 __B)
1065 1.1 mrg {
1066 1.1 mrg return _mm_min_pi16 (__A, __B);
1067 1.1 mrg }
1068 1.1 mrg
1069 1.1 mrg /* Compute the element-wise minimum of unsigned 8-bit values. */
1070 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1071 1.1 mrg _mm_min_pu8 (__m64 __A, __m64 __B)
1072 1.1 mrg {
1073 1.1 mrg return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1074 1.1 mrg }
1075 1.1 mrg
1076 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1077 1.1 mrg _m_pminub (__m64 __A, __m64 __B)
1078 1.1 mrg {
1079 1.1 mrg return _mm_min_pu8 (__A, __B);
1080 1.1 mrg }
1081 1.1 mrg
1082 1.1 mrg /* Create an 8-bit mask of the signs of 8-bit values. */
1083 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1084 1.1 mrg _mm_movemask_pi8 (__m64 __A)
1085 1.1 mrg {
1086 1.1 mrg return __builtin_ia32_pmovmskb ((__v8qi)__A);
1087 1.1 mrg }
1088 1.1 mrg
1089 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1090 1.1 mrg _m_pmovmskb (__m64 __A)
1091 1.1 mrg {
1092 1.1 mrg return _mm_movemask_pi8 (__A);
1093 1.1 mrg }
1094 1.1 mrg
1095 1.1 mrg /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1096 1.1 mrg in B and produce the high 16 bits of the 32-bit results. */
1097 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1098 1.1 mrg _mm_mulhi_pu16 (__m64 __A, __m64 __B)
1099 1.1 mrg {
1100 1.1 mrg return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1101 1.1 mrg }
1102 1.1 mrg
1103 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1104 1.1 mrg _m_pmulhuw (__m64 __A, __m64 __B)
1105 1.1 mrg {
1106 1.1 mrg return _mm_mulhi_pu16 (__A, __B);
1107 1.1 mrg }
1108 1.1 mrg
1109 1.1 mrg /* Return a combination of the four 16-bit values in A. The selector
1110 1.1 mrg must be an immediate. */
1111 1.1 mrg #ifdef __OPTIMIZE__
1112 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1113 1.1 mrg _mm_shuffle_pi16 (__m64 __A, int const __N)
1114 1.1 mrg {
1115 1.1 mrg return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1116 1.1 mrg }
1117 1.1 mrg
1118 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1119 1.1 mrg _m_pshufw (__m64 __A, int const __N)
1120 1.1 mrg {
1121 1.1 mrg return _mm_shuffle_pi16 (__A, __N);
1122 1.1 mrg }
1123 1.1 mrg #else
1124 1.1 mrg #define _mm_shuffle_pi16(A, N) \
1125 1.1 mrg ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N)))
1126 1.1 mrg
1127 1.1 mrg #define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N)
1128 1.1 mrg #endif
1129 1.1 mrg
1130 1.1 mrg /* Conditionally store byte elements of A into P. The high bit of each
1131 1.1 mrg byte in the selector N determines whether the corresponding byte from
1132 1.1 mrg A is stored. */
1133 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1134 1.1 mrg _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1135 1.1 mrg {
1136 1.1 mrg __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1137 1.1 mrg }
1138 1.1 mrg
1139 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1140 1.1 mrg _m_maskmovq (__m64 __A, __m64 __N, char *__P)
1141 1.1 mrg {
1142 1.1 mrg _mm_maskmove_si64 (__A, __N, __P);
1143 1.1 mrg }
1144 1.1 mrg
1145 1.1 mrg /* Compute the rounded averages of the unsigned 8-bit values in A and B. */
1146 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1147 1.1 mrg _mm_avg_pu8 (__m64 __A, __m64 __B)
1148 1.1 mrg {
1149 1.1 mrg return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1150 1.1 mrg }
1151 1.1 mrg
1152 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1153 1.1 mrg _m_pavgb (__m64 __A, __m64 __B)
1154 1.1 mrg {
1155 1.1 mrg return _mm_avg_pu8 (__A, __B);
1156 1.1 mrg }
1157 1.1 mrg
1158 1.1 mrg /* Compute the rounded averages of the unsigned 16-bit values in A and B. */
1159 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1160 1.1 mrg _mm_avg_pu16 (__m64 __A, __m64 __B)
1161 1.1 mrg {
1162 1.1 mrg return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1163 1.1 mrg }
1164 1.1 mrg
1165 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1166 1.1 mrg _m_pavgw (__m64 __A, __m64 __B)
1167 1.1 mrg {
1168 1.1 mrg return _mm_avg_pu16 (__A, __B);
1169 1.1 mrg }
1170 1.1 mrg
1171 1.1 mrg /* Compute the sum of the absolute differences of the unsigned 8-bit
1172 1.1 mrg values in A and B. Return the value in the lower 16-bit word; the
1173 1.1 mrg upper words are cleared. */
1174 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1175 1.1 mrg _mm_sad_pu8 (__m64 __A, __m64 __B)
1176 1.1 mrg {
1177 1.1 mrg return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1178 1.1 mrg }
1179 1.1 mrg
1180 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1181 1.1 mrg _m_psadbw (__m64 __A, __m64 __B)
1182 1.1 mrg {
1183 1.1 mrg return _mm_sad_pu8 (__A, __B);
1184 1.1 mrg }
1185 1.1 mrg
1186 1.1 mrg /* Loads one cache line from address P to a location "closer" to the
1187 1.1 mrg processor. The selector I specifies the type of prefetch operation. */
1188 1.1 mrg #ifdef __OPTIMIZE__
1189 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1190 1.1 mrg _mm_prefetch (const void *__P, enum _mm_hint __I)
1191 1.1 mrg {
1192 1.1 mrg __builtin_prefetch (__P, 0, __I);
1193 1.1 mrg }
1194 1.1 mrg #else
1195 1.1 mrg #define _mm_prefetch(P, I) \
1196 1.1 mrg __builtin_prefetch ((P), 0, (I))
1197 1.1 mrg #endif
1198 1.1 mrg
1199 1.1 mrg /* Stores the data in A to the address P without polluting the caches. */
1200 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1201 1.1 mrg _mm_stream_pi (__m64 *__P, __m64 __A)
1202 1.1 mrg {
1203 1.1 mrg __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1204 1.1 mrg }
1205 1.1 mrg
1206 1.1 mrg /* Likewise. The address must be 16-byte aligned. */
1207 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1208 1.1 mrg _mm_stream_ps (float *__P, __m128 __A)
1209 1.1 mrg {
1210 1.1 mrg __builtin_ia32_movntps (__P, (__v4sf)__A);
1211 1.1 mrg }
1212 1.1 mrg
1213 1.1 mrg /* Guarantees that every preceding store is globally visible before
1214 1.1 mrg any subsequent store. */
1215 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1216 1.1 mrg _mm_sfence (void)
1217 1.1 mrg {
1218 1.1 mrg __builtin_ia32_sfence ();
1219 1.1 mrg }
1220 1.1 mrg
1221 1.1 mrg /* The execution of the next instruction is delayed by an implementation
1222 1.1 mrg specific amount of time. The instruction does not modify the
1223 1.1 mrg architectural state. */
1224 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1225 1.1 mrg _mm_pause (void)
1226 1.1 mrg {
1227 1.3 mrg __builtin_ia32_pause ();
1228 1.1 mrg }
1229 1.1 mrg
1230 1.1 mrg /* Transpose the 4x4 matrix composed of row[0-3]. */
1231 1.1 mrg #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
1232 1.1 mrg do { \
1233 1.1 mrg __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
1234 1.1 mrg __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \
1235 1.1 mrg __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \
1236 1.1 mrg __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \
1237 1.1 mrg __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \
1238 1.1 mrg (row0) = __builtin_ia32_movlhps (__t0, __t1); \
1239 1.1 mrg (row1) = __builtin_ia32_movhlps (__t1, __t0); \
1240 1.1 mrg (row2) = __builtin_ia32_movlhps (__t2, __t3); \
1241 1.1 mrg (row3) = __builtin_ia32_movhlps (__t3, __t2); \
1242 1.1 mrg } while (0)
1243 1.1 mrg
1244 1.1 mrg /* For backward source compatibility. */
1245 1.1 mrg #ifdef __SSE2__
1246 1.1 mrg # include <emmintrin.h>
1247 1.1 mrg #endif
1248 1.1 mrg
1249 1.1 mrg #endif /* __SSE__ */
1250 1.1 mrg #endif /* _XMMINTRIN_H_INCLUDED */
1251