1 1.12 mrg /* Copyright (C) 2002-2022 Free Software Foundation, Inc. 2 1.1 mrg 3 1.1 mrg This file is part of GCC. 4 1.1 mrg 5 1.1 mrg GCC is free software; you can redistribute it and/or modify 6 1.1 mrg it under the terms of the GNU General Public License as published by 7 1.1 mrg the Free Software Foundation; either version 3, or (at your option) 8 1.1 mrg any later version. 9 1.1 mrg 10 1.1 mrg GCC is distributed in the hope that it will be useful, 11 1.1 mrg but WITHOUT ANY WARRANTY; without even the implied warranty of 12 1.1 mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 1.1 mrg GNU General Public License for more details. 14 1.1 mrg 15 1.1 mrg Under Section 7 of GPL version 3, you are granted additional 16 1.1 mrg permissions described in the GCC Runtime Library Exception, version 17 1.1 mrg 3.1, as published by the Free Software Foundation. 18 1.1 mrg 19 1.1 mrg You should have received a copy of the GNU General Public License and 20 1.1 mrg a copy of the GCC Runtime Library Exception along with this program; 21 1.1 mrg see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 22 1.1 mrg <http://www.gnu.org/licenses/>. */ 23 1.1 mrg 24 1.1 mrg /* Implemented from the specification included in the Intel C++ Compiler 25 1.1 mrg User Guide and Reference, version 9.0. */ 26 1.1 mrg 27 1.1 mrg #ifndef _XMMINTRIN_H_INCLUDED 28 1.1 mrg #define _XMMINTRIN_H_INCLUDED 29 1.1 mrg 30 1.1 mrg /* We need type definitions from the MMX header file. */ 31 1.1 mrg #include <mmintrin.h> 32 1.1 mrg 33 1.1 mrg /* Get _mm_malloc () and _mm_free (). */ 34 1.1 mrg #include <mm_malloc.h> 35 1.1 mrg 36 1.5 mrg /* Constants for use with _mm_prefetch. */ 37 1.5 mrg enum _mm_hint 38 1.5 mrg { 39 1.5 mrg /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */ 40 1.5 mrg _MM_HINT_ET0 = 7, 41 1.5 mrg _MM_HINT_ET1 = 6, 42 1.5 mrg _MM_HINT_T0 = 3, 43 1.5 mrg _MM_HINT_T1 = 2, 44 1.5 mrg _MM_HINT_T2 = 1, 45 1.5 mrg _MM_HINT_NTA = 0 46 1.5 mrg }; 47 1.5 mrg 48 1.5 mrg /* Loads one cache line from address P to a location "closer" to the 49 1.5 mrg processor. The selector I specifies the type of prefetch operation. */ 50 1.5 mrg #ifdef __OPTIMIZE__ 51 1.5 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 52 1.5 mrg _mm_prefetch (const void *__P, enum _mm_hint __I) 53 1.5 mrg { 54 1.5 mrg __builtin_prefetch (__P, (__I & 0x4) >> 2, __I & 0x3); 55 1.5 mrg } 56 1.5 mrg #else 57 1.5 mrg #define _mm_prefetch(P, I) \ 58 1.5 mrg __builtin_prefetch ((P), ((I & 0x4) >> 2), (I & 0x3)) 59 1.5 mrg #endif 60 1.5 mrg 61 1.5 mrg #ifndef __SSE__ 62 1.5 mrg #pragma GCC push_options 63 1.5 mrg #pragma GCC target("sse") 64 1.5 mrg #define __DISABLE_SSE__ 65 1.5 mrg #endif /* __SSE__ */ 66 1.5 mrg 67 1.1 mrg /* The Intel API is flexible enough that we must allow aliasing with other 68 1.1 mrg vector types, and their scalar components. */ 69 1.1 mrg typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); 70 1.1 mrg 71 1.8 mrg /* Unaligned version of the same type. */ 72 1.8 mrg typedef float __m128_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1))); 73 1.8 mrg 74 1.1 mrg /* Internal data types for implementing the intrinsics. */ 75 1.1 mrg typedef float __v4sf __attribute__ ((__vector_size__ (16))); 76 1.1 mrg 77 1.1 mrg /* Create a selector for use with the SHUFPS instruction. */ 78 1.1 mrg #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \ 79 1.1 mrg (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0)) 80 1.1 mrg 81 1.1 mrg /* Bits in the MXCSR. */ 82 1.1 mrg #define _MM_EXCEPT_MASK 0x003f 83 1.1 mrg #define _MM_EXCEPT_INVALID 0x0001 84 1.1 mrg #define _MM_EXCEPT_DENORM 0x0002 85 1.1 mrg #define _MM_EXCEPT_DIV_ZERO 0x0004 86 1.1 mrg #define _MM_EXCEPT_OVERFLOW 0x0008 87 1.1 mrg #define _MM_EXCEPT_UNDERFLOW 0x0010 88 1.1 mrg #define _MM_EXCEPT_INEXACT 0x0020 89 1.1 mrg 90 1.1 mrg #define _MM_MASK_MASK 0x1f80 91 1.1 mrg #define _MM_MASK_INVALID 0x0080 92 1.1 mrg #define _MM_MASK_DENORM 0x0100 93 1.1 mrg #define _MM_MASK_DIV_ZERO 0x0200 94 1.1 mrg #define _MM_MASK_OVERFLOW 0x0400 95 1.1 mrg #define _MM_MASK_UNDERFLOW 0x0800 96 1.1 mrg #define _MM_MASK_INEXACT 0x1000 97 1.1 mrg 98 1.1 mrg #define _MM_ROUND_MASK 0x6000 99 1.1 mrg #define _MM_ROUND_NEAREST 0x0000 100 1.1 mrg #define _MM_ROUND_DOWN 0x2000 101 1.1 mrg #define _MM_ROUND_UP 0x4000 102 1.1 mrg #define _MM_ROUND_TOWARD_ZERO 0x6000 103 1.1 mrg 104 1.1 mrg #define _MM_FLUSH_ZERO_MASK 0x8000 105 1.1 mrg #define _MM_FLUSH_ZERO_ON 0x8000 106 1.1 mrg #define _MM_FLUSH_ZERO_OFF 0x0000 107 1.1 mrg 108 1.5 mrg /* Create an undefined vector. */ 109 1.5 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 110 1.5 mrg _mm_undefined_ps (void) 111 1.5 mrg { 112 1.12 mrg #pragma GCC diagnostic push 113 1.12 mrg #pragma GCC diagnostic ignored "-Winit-self" 114 1.5 mrg __m128 __Y = __Y; 115 1.12 mrg #pragma GCC diagnostic pop 116 1.5 mrg return __Y; 117 1.5 mrg } 118 1.5 mrg 119 1.1 mrg /* Create a vector of zeros. */ 120 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 121 1.1 mrg _mm_setzero_ps (void) 122 1.1 mrg { 123 1.1 mrg return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f }; 124 1.1 mrg } 125 1.1 mrg 126 1.1 mrg /* Perform the respective operation on the lower SPFP (single-precision 127 1.1 mrg floating-point) values of A and B; the upper three SPFP values are 128 1.1 mrg passed through from A. */ 129 1.1 mrg 130 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 131 1.1 mrg _mm_add_ss (__m128 __A, __m128 __B) 132 1.1 mrg { 133 1.1 mrg return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B); 134 1.1 mrg } 135 1.1 mrg 136 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 137 1.1 mrg _mm_sub_ss (__m128 __A, __m128 __B) 138 1.1 mrg { 139 1.1 mrg return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B); 140 1.1 mrg } 141 1.1 mrg 142 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 143 1.1 mrg _mm_mul_ss (__m128 __A, __m128 __B) 144 1.1 mrg { 145 1.1 mrg return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B); 146 1.1 mrg } 147 1.1 mrg 148 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 149 1.1 mrg _mm_div_ss (__m128 __A, __m128 __B) 150 1.1 mrg { 151 1.1 mrg return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B); 152 1.1 mrg } 153 1.1 mrg 154 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 155 1.1 mrg _mm_sqrt_ss (__m128 __A) 156 1.1 mrg { 157 1.1 mrg return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A); 158 1.1 mrg } 159 1.1 mrg 160 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 161 1.1 mrg _mm_rcp_ss (__m128 __A) 162 1.1 mrg { 163 1.1 mrg return (__m128) __builtin_ia32_rcpss ((__v4sf)__A); 164 1.1 mrg } 165 1.1 mrg 166 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 167 1.1 mrg _mm_rsqrt_ss (__m128 __A) 168 1.1 mrg { 169 1.1 mrg return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A); 170 1.1 mrg } 171 1.1 mrg 172 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 173 1.1 mrg _mm_min_ss (__m128 __A, __m128 __B) 174 1.1 mrg { 175 1.1 mrg return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B); 176 1.1 mrg } 177 1.1 mrg 178 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 179 1.1 mrg _mm_max_ss (__m128 __A, __m128 __B) 180 1.1 mrg { 181 1.1 mrg return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B); 182 1.1 mrg } 183 1.1 mrg 184 1.1 mrg /* Perform the respective operation on the four SPFP values in A and B. */ 185 1.1 mrg 186 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 187 1.1 mrg _mm_add_ps (__m128 __A, __m128 __B) 188 1.1 mrg { 189 1.5 mrg return (__m128) ((__v4sf)__A + (__v4sf)__B); 190 1.1 mrg } 191 1.1 mrg 192 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 193 1.1 mrg _mm_sub_ps (__m128 __A, __m128 __B) 194 1.1 mrg { 195 1.5 mrg return (__m128) ((__v4sf)__A - (__v4sf)__B); 196 1.1 mrg } 197 1.1 mrg 198 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 199 1.1 mrg _mm_mul_ps (__m128 __A, __m128 __B) 200 1.1 mrg { 201 1.5 mrg return (__m128) ((__v4sf)__A * (__v4sf)__B); 202 1.1 mrg } 203 1.1 mrg 204 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 205 1.1 mrg _mm_div_ps (__m128 __A, __m128 __B) 206 1.1 mrg { 207 1.5 mrg return (__m128) ((__v4sf)__A / (__v4sf)__B); 208 1.1 mrg } 209 1.1 mrg 210 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 211 1.1 mrg _mm_sqrt_ps (__m128 __A) 212 1.1 mrg { 213 1.1 mrg return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A); 214 1.1 mrg } 215 1.1 mrg 216 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 217 1.1 mrg _mm_rcp_ps (__m128 __A) 218 1.1 mrg { 219 1.1 mrg return (__m128) __builtin_ia32_rcpps ((__v4sf)__A); 220 1.1 mrg } 221 1.1 mrg 222 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 223 1.1 mrg _mm_rsqrt_ps (__m128 __A) 224 1.1 mrg { 225 1.1 mrg return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A); 226 1.1 mrg } 227 1.1 mrg 228 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 229 1.1 mrg _mm_min_ps (__m128 __A, __m128 __B) 230 1.1 mrg { 231 1.1 mrg return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B); 232 1.1 mrg } 233 1.1 mrg 234 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 235 1.1 mrg _mm_max_ps (__m128 __A, __m128 __B) 236 1.1 mrg { 237 1.1 mrg return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B); 238 1.1 mrg } 239 1.1 mrg 240 1.1 mrg /* Perform logical bit-wise operations on 128-bit values. */ 241 1.1 mrg 242 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 243 1.1 mrg _mm_and_ps (__m128 __A, __m128 __B) 244 1.1 mrg { 245 1.1 mrg return __builtin_ia32_andps (__A, __B); 246 1.1 mrg } 247 1.1 mrg 248 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 249 1.1 mrg _mm_andnot_ps (__m128 __A, __m128 __B) 250 1.1 mrg { 251 1.1 mrg return __builtin_ia32_andnps (__A, __B); 252 1.1 mrg } 253 1.1 mrg 254 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 255 1.1 mrg _mm_or_ps (__m128 __A, __m128 __B) 256 1.1 mrg { 257 1.1 mrg return __builtin_ia32_orps (__A, __B); 258 1.1 mrg } 259 1.1 mrg 260 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 261 1.1 mrg _mm_xor_ps (__m128 __A, __m128 __B) 262 1.1 mrg { 263 1.1 mrg return __builtin_ia32_xorps (__A, __B); 264 1.1 mrg } 265 1.1 mrg 266 1.1 mrg /* Perform a comparison on the lower SPFP values of A and B. If the 267 1.1 mrg comparison is true, place a mask of all ones in the result, otherwise a 268 1.1 mrg mask of zeros. The upper three SPFP values are passed through from A. */ 269 1.1 mrg 270 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 271 1.1 mrg _mm_cmpeq_ss (__m128 __A, __m128 __B) 272 1.1 mrg { 273 1.1 mrg return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B); 274 1.1 mrg } 275 1.1 mrg 276 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 277 1.1 mrg _mm_cmplt_ss (__m128 __A, __m128 __B) 278 1.1 mrg { 279 1.1 mrg return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B); 280 1.1 mrg } 281 1.1 mrg 282 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 283 1.1 mrg _mm_cmple_ss (__m128 __A, __m128 __B) 284 1.1 mrg { 285 1.1 mrg return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B); 286 1.1 mrg } 287 1.1 mrg 288 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 289 1.1 mrg _mm_cmpgt_ss (__m128 __A, __m128 __B) 290 1.1 mrg { 291 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf) __A, 292 1.1 mrg (__v4sf) 293 1.1 mrg __builtin_ia32_cmpltss ((__v4sf) __B, 294 1.1 mrg (__v4sf) 295 1.1 mrg __A)); 296 1.1 mrg } 297 1.1 mrg 298 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 299 1.1 mrg _mm_cmpge_ss (__m128 __A, __m128 __B) 300 1.1 mrg { 301 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf) __A, 302 1.1 mrg (__v4sf) 303 1.1 mrg __builtin_ia32_cmpless ((__v4sf) __B, 304 1.1 mrg (__v4sf) 305 1.1 mrg __A)); 306 1.1 mrg } 307 1.1 mrg 308 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 309 1.1 mrg _mm_cmpneq_ss (__m128 __A, __m128 __B) 310 1.1 mrg { 311 1.1 mrg return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B); 312 1.1 mrg } 313 1.1 mrg 314 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 315 1.1 mrg _mm_cmpnlt_ss (__m128 __A, __m128 __B) 316 1.1 mrg { 317 1.1 mrg return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B); 318 1.1 mrg } 319 1.1 mrg 320 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 321 1.1 mrg _mm_cmpnle_ss (__m128 __A, __m128 __B) 322 1.1 mrg { 323 1.1 mrg return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B); 324 1.1 mrg } 325 1.1 mrg 326 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 327 1.1 mrg _mm_cmpngt_ss (__m128 __A, __m128 __B) 328 1.1 mrg { 329 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf) __A, 330 1.1 mrg (__v4sf) 331 1.1 mrg __builtin_ia32_cmpnltss ((__v4sf) __B, 332 1.1 mrg (__v4sf) 333 1.1 mrg __A)); 334 1.1 mrg } 335 1.1 mrg 336 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 337 1.1 mrg _mm_cmpnge_ss (__m128 __A, __m128 __B) 338 1.1 mrg { 339 1.1 mrg return (__m128) __builtin_ia32_movss ((__v4sf) __A, 340 1.1 mrg (__v4sf) 341 1.1 mrg __builtin_ia32_cmpnless ((__v4sf) __B, 342 1.1 mrg (__v4sf) 343 1.1 mrg __A)); 344 1.1 mrg } 345 1.1 mrg 346 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 347 1.1 mrg _mm_cmpord_ss (__m128 __A, __m128 __B) 348 1.1 mrg { 349 1.1 mrg return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B); 350 1.1 mrg } 351 1.1 mrg 352 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 353 1.1 mrg _mm_cmpunord_ss (__m128 __A, __m128 __B) 354 1.1 mrg { 355 1.1 mrg return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B); 356 1.1 mrg } 357 1.1 mrg 358 1.1 mrg /* Perform a comparison on the four SPFP values of A and B. For each 359 1.1 mrg element, if the comparison is true, place a mask of all ones in the 360 1.1 mrg result, otherwise a mask of zeros. */ 361 1.1 mrg 362 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 363 1.1 mrg _mm_cmpeq_ps (__m128 __A, __m128 __B) 364 1.1 mrg { 365 1.1 mrg return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B); 366 1.1 mrg } 367 1.1 mrg 368 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 369 1.1 mrg _mm_cmplt_ps (__m128 __A, __m128 __B) 370 1.1 mrg { 371 1.1 mrg return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B); 372 1.1 mrg } 373 1.1 mrg 374 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 375 1.1 mrg _mm_cmple_ps (__m128 __A, __m128 __B) 376 1.1 mrg { 377 1.1 mrg return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B); 378 1.1 mrg } 379 1.1 mrg 380 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 381 1.1 mrg _mm_cmpgt_ps (__m128 __A, __m128 __B) 382 1.1 mrg { 383 1.1 mrg return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B); 384 1.1 mrg } 385 1.1 mrg 386 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 387 1.1 mrg _mm_cmpge_ps (__m128 __A, __m128 __B) 388 1.1 mrg { 389 1.1 mrg return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B); 390 1.1 mrg } 391 1.1 mrg 392 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 393 1.1 mrg _mm_cmpneq_ps (__m128 __A, __m128 __B) 394 1.1 mrg { 395 1.1 mrg return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B); 396 1.1 mrg } 397 1.1 mrg 398 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 399 1.1 mrg _mm_cmpnlt_ps (__m128 __A, __m128 __B) 400 1.1 mrg { 401 1.1 mrg return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B); 402 1.1 mrg } 403 1.1 mrg 404 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 405 1.1 mrg _mm_cmpnle_ps (__m128 __A, __m128 __B) 406 1.1 mrg { 407 1.1 mrg return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B); 408 1.1 mrg } 409 1.1 mrg 410 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 411 1.1 mrg _mm_cmpngt_ps (__m128 __A, __m128 __B) 412 1.1 mrg { 413 1.1 mrg return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B); 414 1.1 mrg } 415 1.1 mrg 416 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 417 1.1 mrg _mm_cmpnge_ps (__m128 __A, __m128 __B) 418 1.1 mrg { 419 1.1 mrg return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B); 420 1.1 mrg } 421 1.1 mrg 422 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 423 1.1 mrg _mm_cmpord_ps (__m128 __A, __m128 __B) 424 1.1 mrg { 425 1.1 mrg return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B); 426 1.1 mrg } 427 1.1 mrg 428 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 429 1.1 mrg _mm_cmpunord_ps (__m128 __A, __m128 __B) 430 1.1 mrg { 431 1.1 mrg return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B); 432 1.1 mrg } 433 1.1 mrg 434 1.1 mrg /* Compare the lower SPFP values of A and B and return 1 if true 435 1.1 mrg and 0 if false. */ 436 1.1 mrg 437 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 438 1.1 mrg _mm_comieq_ss (__m128 __A, __m128 __B) 439 1.1 mrg { 440 1.1 mrg return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B); 441 1.1 mrg } 442 1.1 mrg 443 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 444 1.1 mrg _mm_comilt_ss (__m128 __A, __m128 __B) 445 1.1 mrg { 446 1.1 mrg return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B); 447 1.1 mrg } 448 1.1 mrg 449 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 450 1.1 mrg _mm_comile_ss (__m128 __A, __m128 __B) 451 1.1 mrg { 452 1.1 mrg return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B); 453 1.1 mrg } 454 1.1 mrg 455 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 456 1.1 mrg _mm_comigt_ss (__m128 __A, __m128 __B) 457 1.1 mrg { 458 1.1 mrg return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B); 459 1.1 mrg } 460 1.1 mrg 461 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 462 1.1 mrg _mm_comige_ss (__m128 __A, __m128 __B) 463 1.1 mrg { 464 1.1 mrg return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B); 465 1.1 mrg } 466 1.1 mrg 467 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 468 1.1 mrg _mm_comineq_ss (__m128 __A, __m128 __B) 469 1.1 mrg { 470 1.1 mrg return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B); 471 1.1 mrg } 472 1.1 mrg 473 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 474 1.1 mrg _mm_ucomieq_ss (__m128 __A, __m128 __B) 475 1.1 mrg { 476 1.1 mrg return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B); 477 1.1 mrg } 478 1.1 mrg 479 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 480 1.1 mrg _mm_ucomilt_ss (__m128 __A, __m128 __B) 481 1.1 mrg { 482 1.1 mrg return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B); 483 1.1 mrg } 484 1.1 mrg 485 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 486 1.1 mrg _mm_ucomile_ss (__m128 __A, __m128 __B) 487 1.1 mrg { 488 1.1 mrg return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B); 489 1.1 mrg } 490 1.1 mrg 491 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 492 1.1 mrg _mm_ucomigt_ss (__m128 __A, __m128 __B) 493 1.1 mrg { 494 1.1 mrg return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B); 495 1.1 mrg } 496 1.1 mrg 497 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 498 1.1 mrg _mm_ucomige_ss (__m128 __A, __m128 __B) 499 1.1 mrg { 500 1.1 mrg return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B); 501 1.1 mrg } 502 1.1 mrg 503 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 504 1.1 mrg _mm_ucomineq_ss (__m128 __A, __m128 __B) 505 1.1 mrg { 506 1.1 mrg return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B); 507 1.1 mrg } 508 1.1 mrg 509 1.1 mrg /* Convert the lower SPFP value to a 32-bit integer according to the current 510 1.1 mrg rounding mode. */ 511 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 512 1.1 mrg _mm_cvtss_si32 (__m128 __A) 513 1.1 mrg { 514 1.1 mrg return __builtin_ia32_cvtss2si ((__v4sf) __A); 515 1.1 mrg } 516 1.1 mrg 517 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 518 1.1 mrg _mm_cvt_ss2si (__m128 __A) 519 1.1 mrg { 520 1.1 mrg return _mm_cvtss_si32 (__A); 521 1.1 mrg } 522 1.1 mrg 523 1.1 mrg #ifdef __x86_64__ 524 1.1 mrg /* Convert the lower SPFP value to a 32-bit integer according to the 525 1.1 mrg current rounding mode. */ 526 1.1 mrg 527 1.1 mrg /* Intel intrinsic. */ 528 1.1 mrg extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 529 1.1 mrg _mm_cvtss_si64 (__m128 __A) 530 1.1 mrg { 531 1.1 mrg return __builtin_ia32_cvtss2si64 ((__v4sf) __A); 532 1.1 mrg } 533 1.1 mrg 534 1.1 mrg /* Microsoft intrinsic. */ 535 1.1 mrg extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 536 1.1 mrg _mm_cvtss_si64x (__m128 __A) 537 1.1 mrg { 538 1.1 mrg return __builtin_ia32_cvtss2si64 ((__v4sf) __A); 539 1.1 mrg } 540 1.1 mrg #endif 541 1.1 mrg 542 1.1 mrg /* Convert the two lower SPFP values to 32-bit integers according to the 543 1.1 mrg current rounding mode. Return the integers in packed form. */ 544 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 545 1.1 mrg _mm_cvtps_pi32 (__m128 __A) 546 1.1 mrg { 547 1.1 mrg return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A); 548 1.1 mrg } 549 1.1 mrg 550 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 551 1.1 mrg _mm_cvt_ps2pi (__m128 __A) 552 1.1 mrg { 553 1.1 mrg return _mm_cvtps_pi32 (__A); 554 1.1 mrg } 555 1.1 mrg 556 1.1 mrg /* Truncate the lower SPFP value to a 32-bit integer. */ 557 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 558 1.1 mrg _mm_cvttss_si32 (__m128 __A) 559 1.1 mrg { 560 1.1 mrg return __builtin_ia32_cvttss2si ((__v4sf) __A); 561 1.1 mrg } 562 1.1 mrg 563 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 564 1.1 mrg _mm_cvtt_ss2si (__m128 __A) 565 1.1 mrg { 566 1.1 mrg return _mm_cvttss_si32 (__A); 567 1.1 mrg } 568 1.1 mrg 569 1.1 mrg #ifdef __x86_64__ 570 1.1 mrg /* Truncate the lower SPFP value to a 32-bit integer. */ 571 1.1 mrg 572 1.1 mrg /* Intel intrinsic. */ 573 1.1 mrg extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 574 1.1 mrg _mm_cvttss_si64 (__m128 __A) 575 1.1 mrg { 576 1.1 mrg return __builtin_ia32_cvttss2si64 ((__v4sf) __A); 577 1.1 mrg } 578 1.1 mrg 579 1.1 mrg /* Microsoft intrinsic. */ 580 1.1 mrg extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 581 1.1 mrg _mm_cvttss_si64x (__m128 __A) 582 1.1 mrg { 583 1.1 mrg return __builtin_ia32_cvttss2si64 ((__v4sf) __A); 584 1.1 mrg } 585 1.1 mrg #endif 586 1.1 mrg 587 1.1 mrg /* Truncate the two lower SPFP values to 32-bit integers. Return the 588 1.1 mrg integers in packed form. */ 589 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 590 1.1 mrg _mm_cvttps_pi32 (__m128 __A) 591 1.1 mrg { 592 1.1 mrg return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A); 593 1.1 mrg } 594 1.1 mrg 595 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 596 1.1 mrg _mm_cvtt_ps2pi (__m128 __A) 597 1.1 mrg { 598 1.1 mrg return _mm_cvttps_pi32 (__A); 599 1.1 mrg } 600 1.1 mrg 601 1.1 mrg /* Convert B to a SPFP value and insert it as element zero in A. */ 602 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 603 1.1 mrg _mm_cvtsi32_ss (__m128 __A, int __B) 604 1.1 mrg { 605 1.1 mrg return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B); 606 1.1 mrg } 607 1.1 mrg 608 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 609 1.1 mrg _mm_cvt_si2ss (__m128 __A, int __B) 610 1.1 mrg { 611 1.1 mrg return _mm_cvtsi32_ss (__A, __B); 612 1.1 mrg } 613 1.1 mrg 614 1.1 mrg #ifdef __x86_64__ 615 1.1 mrg /* Convert B to a SPFP value and insert it as element zero in A. */ 616 1.1 mrg 617 1.1 mrg /* Intel intrinsic. */ 618 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 619 1.1 mrg _mm_cvtsi64_ss (__m128 __A, long long __B) 620 1.1 mrg { 621 1.1 mrg return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); 622 1.1 mrg } 623 1.1 mrg 624 1.1 mrg /* Microsoft intrinsic. */ 625 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 626 1.1 mrg _mm_cvtsi64x_ss (__m128 __A, long long __B) 627 1.1 mrg { 628 1.1 mrg return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); 629 1.1 mrg } 630 1.1 mrg #endif 631 1.1 mrg 632 1.1 mrg /* Convert the two 32-bit values in B to SPFP form and insert them 633 1.1 mrg as the two lower elements in A. */ 634 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 635 1.1 mrg _mm_cvtpi32_ps (__m128 __A, __m64 __B) 636 1.1 mrg { 637 1.1 mrg return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B); 638 1.1 mrg } 639 1.1 mrg 640 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 641 1.1 mrg _mm_cvt_pi2ps (__m128 __A, __m64 __B) 642 1.1 mrg { 643 1.1 mrg return _mm_cvtpi32_ps (__A, __B); 644 1.1 mrg } 645 1.1 mrg 646 1.1 mrg /* Convert the four signed 16-bit values in A to SPFP form. */ 647 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 648 1.1 mrg _mm_cvtpi16_ps (__m64 __A) 649 1.1 mrg { 650 1.1 mrg __v4hi __sign; 651 1.1 mrg __v2si __hisi, __losi; 652 1.1 mrg __v4sf __zero, __ra, __rb; 653 1.1 mrg 654 1.1 mrg /* This comparison against zero gives us a mask that can be used to 655 1.1 mrg fill in the missing sign bits in the unpack operations below, so 656 1.1 mrg that we get signed values after unpacking. */ 657 1.1 mrg __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A); 658 1.1 mrg 659 1.1 mrg /* Convert the four words to doublewords. */ 660 1.1 mrg __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign); 661 1.1 mrg __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign); 662 1.1 mrg 663 1.1 mrg /* Convert the doublewords to floating point two at a time. */ 664 1.1 mrg __zero = (__v4sf) _mm_setzero_ps (); 665 1.1 mrg __ra = __builtin_ia32_cvtpi2ps (__zero, __losi); 666 1.1 mrg __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi); 667 1.1 mrg 668 1.1 mrg return (__m128) __builtin_ia32_movlhps (__ra, __rb); 669 1.1 mrg } 670 1.1 mrg 671 1.1 mrg /* Convert the four unsigned 16-bit values in A to SPFP form. */ 672 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 673 1.1 mrg _mm_cvtpu16_ps (__m64 __A) 674 1.1 mrg { 675 1.1 mrg __v2si __hisi, __losi; 676 1.1 mrg __v4sf __zero, __ra, __rb; 677 1.1 mrg 678 1.1 mrg /* Convert the four words to doublewords. */ 679 1.1 mrg __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL); 680 1.1 mrg __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL); 681 1.1 mrg 682 1.1 mrg /* Convert the doublewords to floating point two at a time. */ 683 1.1 mrg __zero = (__v4sf) _mm_setzero_ps (); 684 1.1 mrg __ra = __builtin_ia32_cvtpi2ps (__zero, __losi); 685 1.1 mrg __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi); 686 1.1 mrg 687 1.1 mrg return (__m128) __builtin_ia32_movlhps (__ra, __rb); 688 1.1 mrg } 689 1.1 mrg 690 1.1 mrg /* Convert the low four signed 8-bit values in A to SPFP form. */ 691 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 692 1.1 mrg _mm_cvtpi8_ps (__m64 __A) 693 1.1 mrg { 694 1.1 mrg __v8qi __sign; 695 1.1 mrg 696 1.1 mrg /* This comparison against zero gives us a mask that can be used to 697 1.1 mrg fill in the missing sign bits in the unpack operations below, so 698 1.1 mrg that we get signed values after unpacking. */ 699 1.1 mrg __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A); 700 1.1 mrg 701 1.1 mrg /* Convert the four low bytes to words. */ 702 1.1 mrg __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign); 703 1.1 mrg 704 1.1 mrg return _mm_cvtpi16_ps(__A); 705 1.1 mrg } 706 1.1 mrg 707 1.1 mrg /* Convert the low four unsigned 8-bit values in A to SPFP form. */ 708 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 709 1.1 mrg _mm_cvtpu8_ps(__m64 __A) 710 1.1 mrg { 711 1.1 mrg __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL); 712 1.1 mrg return _mm_cvtpu16_ps(__A); 713 1.1 mrg } 714 1.1 mrg 715 1.1 mrg /* Convert the four signed 32-bit values in A and B to SPFP form. */ 716 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 717 1.1 mrg _mm_cvtpi32x2_ps(__m64 __A, __m64 __B) 718 1.1 mrg { 719 1.1 mrg __v4sf __zero = (__v4sf) _mm_setzero_ps (); 720 1.1 mrg __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A); 721 1.1 mrg __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B); 722 1.1 mrg return (__m128) __builtin_ia32_movlhps (__sfa, __sfb); 723 1.1 mrg } 724 1.1 mrg 725 1.1 mrg /* Convert the four SPFP values in A to four signed 16-bit integers. */ 726 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 727 1.1 mrg _mm_cvtps_pi16(__m128 __A) 728 1.1 mrg { 729 1.1 mrg __v4sf __hisf = (__v4sf)__A; 730 1.1 mrg __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf); 731 1.1 mrg __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf); 732 1.1 mrg __v2si __losi = __builtin_ia32_cvtps2pi (__losf); 733 1.1 mrg return (__m64) __builtin_ia32_packssdw (__hisi, __losi); 734 1.1 mrg } 735 1.1 mrg 736 1.1 mrg /* Convert the four SPFP values in A to four signed 8-bit integers. */ 737 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 738 1.1 mrg _mm_cvtps_pi8(__m128 __A) 739 1.1 mrg { 740 1.1 mrg __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A); 741 1.1 mrg return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL); 742 1.1 mrg } 743 1.1 mrg 744 1.1 mrg /* Selects four specific SPFP values from A and B based on MASK. */ 745 1.1 mrg #ifdef __OPTIMIZE__ 746 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 747 1.1 mrg _mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask) 748 1.1 mrg { 749 1.1 mrg return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask); 750 1.1 mrg } 751 1.1 mrg #else 752 1.1 mrg #define _mm_shuffle_ps(A, B, MASK) \ 753 1.1 mrg ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A), \ 754 1.1 mrg (__v4sf)(__m128)(B), (int)(MASK))) 755 1.1 mrg #endif 756 1.1 mrg 757 1.1 mrg /* Selects and interleaves the upper two SPFP values from A and B. */ 758 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 759 1.1 mrg _mm_unpackhi_ps (__m128 __A, __m128 __B) 760 1.1 mrg { 761 1.1 mrg return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B); 762 1.1 mrg } 763 1.1 mrg 764 1.1 mrg /* Selects and interleaves the lower two SPFP values from A and B. */ 765 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 766 1.1 mrg _mm_unpacklo_ps (__m128 __A, __m128 __B) 767 1.1 mrg { 768 1.1 mrg return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B); 769 1.1 mrg } 770 1.1 mrg 771 1.1 mrg /* Sets the upper two SPFP values with 64-bits of data loaded from P; 772 1.1 mrg the lower two values are passed through from A. */ 773 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 774 1.1 mrg _mm_loadh_pi (__m128 __A, __m64 const *__P) 775 1.1 mrg { 776 1.1 mrg return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P); 777 1.1 mrg } 778 1.1 mrg 779 1.1 mrg /* Stores the upper two SPFP values of A into P. */ 780 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 781 1.1 mrg _mm_storeh_pi (__m64 *__P, __m128 __A) 782 1.1 mrg { 783 1.1 mrg __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A); 784 1.1 mrg } 785 1.1 mrg 786 1.1 mrg /* Moves the upper two values of B into the lower two values of A. */ 787 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 788 1.1 mrg _mm_movehl_ps (__m128 __A, __m128 __B) 789 1.1 mrg { 790 1.1 mrg return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B); 791 1.1 mrg } 792 1.1 mrg 793 1.1 mrg /* Moves the lower two values of B into the upper two values of A. */ 794 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 795 1.1 mrg _mm_movelh_ps (__m128 __A, __m128 __B) 796 1.1 mrg { 797 1.1 mrg return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B); 798 1.1 mrg } 799 1.1 mrg 800 1.1 mrg /* Sets the lower two SPFP values with 64-bits of data loaded from P; 801 1.1 mrg the upper two values are passed through from A. */ 802 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 803 1.1 mrg _mm_loadl_pi (__m128 __A, __m64 const *__P) 804 1.1 mrg { 805 1.1 mrg return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P); 806 1.1 mrg } 807 1.1 mrg 808 1.1 mrg /* Stores the lower two SPFP values of A into P. */ 809 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 810 1.1 mrg _mm_storel_pi (__m64 *__P, __m128 __A) 811 1.1 mrg { 812 1.1 mrg __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A); 813 1.1 mrg } 814 1.1 mrg 815 1.1 mrg /* Creates a 4-bit mask from the most significant bits of the SPFP values. */ 816 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 817 1.1 mrg _mm_movemask_ps (__m128 __A) 818 1.1 mrg { 819 1.1 mrg return __builtin_ia32_movmskps ((__v4sf)__A); 820 1.1 mrg } 821 1.1 mrg 822 1.1 mrg /* Return the contents of the control register. */ 823 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 824 1.1 mrg _mm_getcsr (void) 825 1.1 mrg { 826 1.1 mrg return __builtin_ia32_stmxcsr (); 827 1.1 mrg } 828 1.1 mrg 829 1.1 mrg /* Read exception bits from the control register. */ 830 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 831 1.1 mrg _MM_GET_EXCEPTION_STATE (void) 832 1.1 mrg { 833 1.1 mrg return _mm_getcsr() & _MM_EXCEPT_MASK; 834 1.1 mrg } 835 1.1 mrg 836 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 837 1.1 mrg _MM_GET_EXCEPTION_MASK (void) 838 1.1 mrg { 839 1.1 mrg return _mm_getcsr() & _MM_MASK_MASK; 840 1.1 mrg } 841 1.1 mrg 842 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 843 1.1 mrg _MM_GET_ROUNDING_MODE (void) 844 1.1 mrg { 845 1.1 mrg return _mm_getcsr() & _MM_ROUND_MASK; 846 1.1 mrg } 847 1.1 mrg 848 1.1 mrg extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 849 1.1 mrg _MM_GET_FLUSH_ZERO_MODE (void) 850 1.1 mrg { 851 1.1 mrg return _mm_getcsr() & _MM_FLUSH_ZERO_MASK; 852 1.1 mrg } 853 1.1 mrg 854 1.1 mrg /* Set the control register to I. */ 855 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 856 1.1 mrg _mm_setcsr (unsigned int __I) 857 1.1 mrg { 858 1.1 mrg __builtin_ia32_ldmxcsr (__I); 859 1.1 mrg } 860 1.1 mrg 861 1.1 mrg /* Set exception bits in the control register. */ 862 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 863 1.1 mrg _MM_SET_EXCEPTION_STATE(unsigned int __mask) 864 1.1 mrg { 865 1.1 mrg _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask); 866 1.1 mrg } 867 1.1 mrg 868 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 869 1.1 mrg _MM_SET_EXCEPTION_MASK (unsigned int __mask) 870 1.1 mrg { 871 1.1 mrg _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask); 872 1.1 mrg } 873 1.1 mrg 874 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 875 1.1 mrg _MM_SET_ROUNDING_MODE (unsigned int __mode) 876 1.1 mrg { 877 1.1 mrg _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode); 878 1.1 mrg } 879 1.1 mrg 880 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 881 1.1 mrg _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode) 882 1.1 mrg { 883 1.1 mrg _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode); 884 1.1 mrg } 885 1.1 mrg 886 1.1 mrg /* Create a vector with element 0 as F and the rest zero. */ 887 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 888 1.1 mrg _mm_set_ss (float __F) 889 1.1 mrg { 890 1.1 mrg return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f }; 891 1.1 mrg } 892 1.1 mrg 893 1.1 mrg /* Create a vector with all four elements equal to F. */ 894 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 895 1.1 mrg _mm_set1_ps (float __F) 896 1.1 mrg { 897 1.1 mrg return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F }; 898 1.1 mrg } 899 1.1 mrg 900 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 901 1.1 mrg _mm_set_ps1 (float __F) 902 1.1 mrg { 903 1.1 mrg return _mm_set1_ps (__F); 904 1.1 mrg } 905 1.1 mrg 906 1.1 mrg /* Create a vector with element 0 as *P and the rest zero. */ 907 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 908 1.1 mrg _mm_load_ss (float const *__P) 909 1.1 mrg { 910 1.1 mrg return _mm_set_ss (*__P); 911 1.1 mrg } 912 1.1 mrg 913 1.1 mrg /* Create a vector with all four elements equal to *P. */ 914 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 915 1.1 mrg _mm_load1_ps (float const *__P) 916 1.1 mrg { 917 1.1 mrg return _mm_set1_ps (*__P); 918 1.1 mrg } 919 1.1 mrg 920 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 921 1.1 mrg _mm_load_ps1 (float const *__P) 922 1.1 mrg { 923 1.1 mrg return _mm_load1_ps (__P); 924 1.1 mrg } 925 1.1 mrg 926 1.1 mrg /* Load four SPFP values from P. The address must be 16-byte aligned. */ 927 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 928 1.1 mrg _mm_load_ps (float const *__P) 929 1.1 mrg { 930 1.12 mrg return *(__m128 const *)__P; 931 1.1 mrg } 932 1.1 mrg 933 1.1 mrg /* Load four SPFP values from P. The address need not be 16-byte aligned. */ 934 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 935 1.1 mrg _mm_loadu_ps (float const *__P) 936 1.1 mrg { 937 1.12 mrg return *(__m128_u const *)__P; 938 1.1 mrg } 939 1.1 mrg 940 1.1 mrg /* Load four SPFP values in reverse order. The address must be aligned. */ 941 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 942 1.1 mrg _mm_loadr_ps (float const *__P) 943 1.1 mrg { 944 1.12 mrg __v4sf __tmp = *(__v4sf const *)__P; 945 1.1 mrg return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3)); 946 1.1 mrg } 947 1.1 mrg 948 1.1 mrg /* Create the vector [Z Y X W]. */ 949 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 950 1.1 mrg _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W) 951 1.1 mrg { 952 1.1 mrg return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z }; 953 1.1 mrg } 954 1.1 mrg 955 1.1 mrg /* Create the vector [W X Y Z]. */ 956 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 957 1.1 mrg _mm_setr_ps (float __Z, float __Y, float __X, float __W) 958 1.1 mrg { 959 1.1 mrg return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W }; 960 1.1 mrg } 961 1.1 mrg 962 1.1 mrg /* Stores the lower SPFP value. */ 963 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 964 1.1 mrg _mm_store_ss (float *__P, __m128 __A) 965 1.1 mrg { 966 1.5 mrg *__P = ((__v4sf)__A)[0]; 967 1.1 mrg } 968 1.1 mrg 969 1.1 mrg extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 970 1.1 mrg _mm_cvtss_f32 (__m128 __A) 971 1.1 mrg { 972 1.5 mrg return ((__v4sf)__A)[0]; 973 1.1 mrg } 974 1.1 mrg 975 1.1 mrg /* Store four SPFP values. The address must be 16-byte aligned. */ 976 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 977 1.1 mrg _mm_store_ps (float *__P, __m128 __A) 978 1.1 mrg { 979 1.8 mrg *(__m128 *)__P = __A; 980 1.1 mrg } 981 1.1 mrg 982 1.1 mrg /* Store four SPFP values. The address need not be 16-byte aligned. */ 983 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 984 1.1 mrg _mm_storeu_ps (float *__P, __m128 __A) 985 1.1 mrg { 986 1.8 mrg *(__m128_u *)__P = __A; 987 1.1 mrg } 988 1.1 mrg 989 1.1 mrg /* Store the lower SPFP value across four words. */ 990 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 991 1.1 mrg _mm_store1_ps (float *__P, __m128 __A) 992 1.1 mrg { 993 1.1 mrg __v4sf __va = (__v4sf)__A; 994 1.1 mrg __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0)); 995 1.1 mrg _mm_storeu_ps (__P, __tmp); 996 1.1 mrg } 997 1.1 mrg 998 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 999 1.1 mrg _mm_store_ps1 (float *__P, __m128 __A) 1000 1.1 mrg { 1001 1.1 mrg _mm_store1_ps (__P, __A); 1002 1.1 mrg } 1003 1.1 mrg 1004 1.1 mrg /* Store four SPFP values in reverse order. The address must be aligned. */ 1005 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1006 1.1 mrg _mm_storer_ps (float *__P, __m128 __A) 1007 1.1 mrg { 1008 1.1 mrg __v4sf __va = (__v4sf)__A; 1009 1.1 mrg __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3)); 1010 1.1 mrg _mm_store_ps (__P, __tmp); 1011 1.1 mrg } 1012 1.1 mrg 1013 1.1 mrg /* Sets the low SPFP value of A from the low value of B. */ 1014 1.1 mrg extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1015 1.1 mrg _mm_move_ss (__m128 __A, __m128 __B) 1016 1.1 mrg { 1017 1.10 mrg return (__m128) __builtin_shuffle ((__v4sf)__A, (__v4sf)__B, 1018 1.10 mrg __extension__ 1019 1.10 mrg (__attribute__((__vector_size__ (16))) int) 1020 1.10 mrg {4,1,2,3}); 1021 1.1 mrg } 1022 1.1 mrg 1023 1.1 mrg /* Extracts one of the four words of A. The selector N must be immediate. */ 1024 1.1 mrg #ifdef __OPTIMIZE__ 1025 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1026 1.1 mrg _mm_extract_pi16 (__m64 const __A, int const __N) 1027 1.1 mrg { 1028 1.12 mrg return (unsigned short) __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N); 1029 1.1 mrg } 1030 1.1 mrg 1031 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1032 1.1 mrg _m_pextrw (__m64 const __A, int const __N) 1033 1.1 mrg { 1034 1.1 mrg return _mm_extract_pi16 (__A, __N); 1035 1.1 mrg } 1036 1.1 mrg #else 1037 1.1 mrg #define _mm_extract_pi16(A, N) \ 1038 1.12 mrg ((int) (unsigned short) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N))) 1039 1.1 mrg 1040 1.1 mrg #define _m_pextrw(A, N) _mm_extract_pi16(A, N) 1041 1.1 mrg #endif 1042 1.1 mrg 1043 1.1 mrg /* Inserts word D into one of four words of A. The selector N must be 1044 1.1 mrg immediate. */ 1045 1.1 mrg #ifdef __OPTIMIZE__ 1046 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1047 1.1 mrg _mm_insert_pi16 (__m64 const __A, int const __D, int const __N) 1048 1.1 mrg { 1049 1.1 mrg return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N); 1050 1.1 mrg } 1051 1.1 mrg 1052 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1053 1.1 mrg _m_pinsrw (__m64 const __A, int const __D, int const __N) 1054 1.1 mrg { 1055 1.1 mrg return _mm_insert_pi16 (__A, __D, __N); 1056 1.1 mrg } 1057 1.1 mrg #else 1058 1.1 mrg #define _mm_insert_pi16(A, D, N) \ 1059 1.1 mrg ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A), \ 1060 1.1 mrg (int)(D), (int)(N))) 1061 1.1 mrg 1062 1.1 mrg #define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N) 1063 1.1 mrg #endif 1064 1.1 mrg 1065 1.1 mrg /* Compute the element-wise maximum of signed 16-bit values. */ 1066 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1067 1.1 mrg _mm_max_pi16 (__m64 __A, __m64 __B) 1068 1.1 mrg { 1069 1.1 mrg return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B); 1070 1.1 mrg } 1071 1.1 mrg 1072 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1073 1.1 mrg _m_pmaxsw (__m64 __A, __m64 __B) 1074 1.1 mrg { 1075 1.1 mrg return _mm_max_pi16 (__A, __B); 1076 1.1 mrg } 1077 1.1 mrg 1078 1.1 mrg /* Compute the element-wise maximum of unsigned 8-bit values. */ 1079 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1080 1.1 mrg _mm_max_pu8 (__m64 __A, __m64 __B) 1081 1.1 mrg { 1082 1.1 mrg return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B); 1083 1.1 mrg } 1084 1.1 mrg 1085 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1086 1.1 mrg _m_pmaxub (__m64 __A, __m64 __B) 1087 1.1 mrg { 1088 1.1 mrg return _mm_max_pu8 (__A, __B); 1089 1.1 mrg } 1090 1.1 mrg 1091 1.1 mrg /* Compute the element-wise minimum of signed 16-bit values. */ 1092 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1093 1.1 mrg _mm_min_pi16 (__m64 __A, __m64 __B) 1094 1.1 mrg { 1095 1.1 mrg return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B); 1096 1.1 mrg } 1097 1.1 mrg 1098 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1099 1.1 mrg _m_pminsw (__m64 __A, __m64 __B) 1100 1.1 mrg { 1101 1.1 mrg return _mm_min_pi16 (__A, __B); 1102 1.1 mrg } 1103 1.1 mrg 1104 1.1 mrg /* Compute the element-wise minimum of unsigned 8-bit values. */ 1105 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1106 1.1 mrg _mm_min_pu8 (__m64 __A, __m64 __B) 1107 1.1 mrg { 1108 1.1 mrg return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B); 1109 1.1 mrg } 1110 1.1 mrg 1111 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1112 1.1 mrg _m_pminub (__m64 __A, __m64 __B) 1113 1.1 mrg { 1114 1.1 mrg return _mm_min_pu8 (__A, __B); 1115 1.1 mrg } 1116 1.1 mrg 1117 1.1 mrg /* Create an 8-bit mask of the signs of 8-bit values. */ 1118 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1119 1.1 mrg _mm_movemask_pi8 (__m64 __A) 1120 1.1 mrg { 1121 1.1 mrg return __builtin_ia32_pmovmskb ((__v8qi)__A); 1122 1.1 mrg } 1123 1.1 mrg 1124 1.1 mrg extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1125 1.1 mrg _m_pmovmskb (__m64 __A) 1126 1.1 mrg { 1127 1.1 mrg return _mm_movemask_pi8 (__A); 1128 1.1 mrg } 1129 1.1 mrg 1130 1.1 mrg /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values 1131 1.1 mrg in B and produce the high 16 bits of the 32-bit results. */ 1132 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1133 1.1 mrg _mm_mulhi_pu16 (__m64 __A, __m64 __B) 1134 1.1 mrg { 1135 1.1 mrg return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B); 1136 1.1 mrg } 1137 1.1 mrg 1138 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1139 1.1 mrg _m_pmulhuw (__m64 __A, __m64 __B) 1140 1.1 mrg { 1141 1.1 mrg return _mm_mulhi_pu16 (__A, __B); 1142 1.1 mrg } 1143 1.1 mrg 1144 1.1 mrg /* Return a combination of the four 16-bit values in A. The selector 1145 1.1 mrg must be an immediate. */ 1146 1.1 mrg #ifdef __OPTIMIZE__ 1147 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1148 1.1 mrg _mm_shuffle_pi16 (__m64 __A, int const __N) 1149 1.1 mrg { 1150 1.1 mrg return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N); 1151 1.1 mrg } 1152 1.1 mrg 1153 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1154 1.1 mrg _m_pshufw (__m64 __A, int const __N) 1155 1.1 mrg { 1156 1.1 mrg return _mm_shuffle_pi16 (__A, __N); 1157 1.1 mrg } 1158 1.1 mrg #else 1159 1.1 mrg #define _mm_shuffle_pi16(A, N) \ 1160 1.1 mrg ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N))) 1161 1.1 mrg 1162 1.1 mrg #define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N) 1163 1.1 mrg #endif 1164 1.1 mrg 1165 1.1 mrg /* Conditionally store byte elements of A into P. The high bit of each 1166 1.1 mrg byte in the selector N determines whether the corresponding byte from 1167 1.1 mrg A is stored. */ 1168 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1169 1.1 mrg _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P) 1170 1.1 mrg { 1171 1.11 mrg #ifdef __MMX_WITH_SSE__ 1172 1.11 mrg /* Emulate MMX maskmovq with SSE2 maskmovdqu and handle unmapped bits 1173 1.11 mrg 64:127 at address __P. */ 1174 1.11 mrg typedef long long __v2di __attribute__ ((__vector_size__ (16))); 1175 1.11 mrg typedef char __v16qi __attribute__ ((__vector_size__ (16))); 1176 1.11 mrg /* Zero-extend __A and __N to 128 bits. */ 1177 1.11 mrg __v2di __A128 = __extension__ (__v2di) { ((__v1di) __A)[0], 0 }; 1178 1.11 mrg __v2di __N128 = __extension__ (__v2di) { ((__v1di) __N)[0], 0 }; 1179 1.11 mrg 1180 1.11 mrg /* Check the alignment of __P. */ 1181 1.11 mrg __SIZE_TYPE__ offset = ((__SIZE_TYPE__) __P) & 0xf; 1182 1.11 mrg if (offset) 1183 1.11 mrg { 1184 1.11 mrg /* If the misalignment of __P > 8, subtract __P by 8 bytes. 1185 1.11 mrg Otherwise, subtract __P by the misalignment. */ 1186 1.11 mrg if (offset > 8) 1187 1.11 mrg offset = 8; 1188 1.11 mrg __P = (char *) (((__SIZE_TYPE__) __P) - offset); 1189 1.11 mrg 1190 1.11 mrg /* Shift __A128 and __N128 to the left by the adjustment. */ 1191 1.11 mrg switch (offset) 1192 1.11 mrg { 1193 1.11 mrg case 1: 1194 1.11 mrg __A128 = __builtin_ia32_pslldqi128 (__A128, 8); 1195 1.11 mrg __N128 = __builtin_ia32_pslldqi128 (__N128, 8); 1196 1.11 mrg break; 1197 1.11 mrg case 2: 1198 1.11 mrg __A128 = __builtin_ia32_pslldqi128 (__A128, 2 * 8); 1199 1.11 mrg __N128 = __builtin_ia32_pslldqi128 (__N128, 2 * 8); 1200 1.11 mrg break; 1201 1.11 mrg case 3: 1202 1.11 mrg __A128 = __builtin_ia32_pslldqi128 (__A128, 3 * 8); 1203 1.11 mrg __N128 = __builtin_ia32_pslldqi128 (__N128, 3 * 8); 1204 1.11 mrg break; 1205 1.11 mrg case 4: 1206 1.11 mrg __A128 = __builtin_ia32_pslldqi128 (__A128, 4 * 8); 1207 1.11 mrg __N128 = __builtin_ia32_pslldqi128 (__N128, 4 * 8); 1208 1.11 mrg break; 1209 1.11 mrg case 5: 1210 1.11 mrg __A128 = __builtin_ia32_pslldqi128 (__A128, 5 * 8); 1211 1.11 mrg __N128 = __builtin_ia32_pslldqi128 (__N128, 5 * 8); 1212 1.11 mrg break; 1213 1.11 mrg case 6: 1214 1.11 mrg __A128 = __builtin_ia32_pslldqi128 (__A128, 6 * 8); 1215 1.11 mrg __N128 = __builtin_ia32_pslldqi128 (__N128, 6 * 8); 1216 1.11 mrg break; 1217 1.11 mrg case 7: 1218 1.11 mrg __A128 = __builtin_ia32_pslldqi128 (__A128, 7 * 8); 1219 1.11 mrg __N128 = __builtin_ia32_pslldqi128 (__N128, 7 * 8); 1220 1.11 mrg break; 1221 1.11 mrg case 8: 1222 1.11 mrg __A128 = __builtin_ia32_pslldqi128 (__A128, 8 * 8); 1223 1.11 mrg __N128 = __builtin_ia32_pslldqi128 (__N128, 8 * 8); 1224 1.11 mrg break; 1225 1.11 mrg default: 1226 1.11 mrg break; 1227 1.11 mrg } 1228 1.11 mrg } 1229 1.11 mrg __builtin_ia32_maskmovdqu ((__v16qi)__A128, (__v16qi)__N128, __P); 1230 1.11 mrg #else 1231 1.1 mrg __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P); 1232 1.11 mrg #endif 1233 1.1 mrg } 1234 1.1 mrg 1235 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1236 1.1 mrg _m_maskmovq (__m64 __A, __m64 __N, char *__P) 1237 1.1 mrg { 1238 1.1 mrg _mm_maskmove_si64 (__A, __N, __P); 1239 1.1 mrg } 1240 1.1 mrg 1241 1.1 mrg /* Compute the rounded averages of the unsigned 8-bit values in A and B. */ 1242 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1243 1.1 mrg _mm_avg_pu8 (__m64 __A, __m64 __B) 1244 1.1 mrg { 1245 1.1 mrg return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B); 1246 1.1 mrg } 1247 1.1 mrg 1248 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1249 1.1 mrg _m_pavgb (__m64 __A, __m64 __B) 1250 1.1 mrg { 1251 1.1 mrg return _mm_avg_pu8 (__A, __B); 1252 1.1 mrg } 1253 1.1 mrg 1254 1.1 mrg /* Compute the rounded averages of the unsigned 16-bit values in A and B. */ 1255 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1256 1.1 mrg _mm_avg_pu16 (__m64 __A, __m64 __B) 1257 1.1 mrg { 1258 1.1 mrg return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B); 1259 1.1 mrg } 1260 1.1 mrg 1261 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1262 1.1 mrg _m_pavgw (__m64 __A, __m64 __B) 1263 1.1 mrg { 1264 1.1 mrg return _mm_avg_pu16 (__A, __B); 1265 1.1 mrg } 1266 1.1 mrg 1267 1.1 mrg /* Compute the sum of the absolute differences of the unsigned 8-bit 1268 1.1 mrg values in A and B. Return the value in the lower 16-bit word; the 1269 1.1 mrg upper words are cleared. */ 1270 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1271 1.1 mrg _mm_sad_pu8 (__m64 __A, __m64 __B) 1272 1.1 mrg { 1273 1.1 mrg return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B); 1274 1.1 mrg } 1275 1.1 mrg 1276 1.1 mrg extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1277 1.1 mrg _m_psadbw (__m64 __A, __m64 __B) 1278 1.1 mrg { 1279 1.1 mrg return _mm_sad_pu8 (__A, __B); 1280 1.1 mrg } 1281 1.1 mrg 1282 1.1 mrg /* Stores the data in A to the address P without polluting the caches. */ 1283 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1284 1.1 mrg _mm_stream_pi (__m64 *__P, __m64 __A) 1285 1.1 mrg { 1286 1.1 mrg __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A); 1287 1.1 mrg } 1288 1.1 mrg 1289 1.1 mrg /* Likewise. The address must be 16-byte aligned. */ 1290 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1291 1.1 mrg _mm_stream_ps (float *__P, __m128 __A) 1292 1.1 mrg { 1293 1.1 mrg __builtin_ia32_movntps (__P, (__v4sf)__A); 1294 1.1 mrg } 1295 1.1 mrg 1296 1.1 mrg /* Guarantees that every preceding store is globally visible before 1297 1.1 mrg any subsequent store. */ 1298 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1299 1.1 mrg _mm_sfence (void) 1300 1.1 mrg { 1301 1.1 mrg __builtin_ia32_sfence (); 1302 1.1 mrg } 1303 1.1 mrg 1304 1.1 mrg /* Transpose the 4x4 matrix composed of row[0-3]. */ 1305 1.1 mrg #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ 1306 1.1 mrg do { \ 1307 1.1 mrg __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \ 1308 1.1 mrg __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \ 1309 1.1 mrg __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \ 1310 1.1 mrg __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \ 1311 1.1 mrg __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \ 1312 1.1 mrg (row0) = __builtin_ia32_movlhps (__t0, __t1); \ 1313 1.1 mrg (row1) = __builtin_ia32_movhlps (__t1, __t0); \ 1314 1.1 mrg (row2) = __builtin_ia32_movlhps (__t2, __t3); \ 1315 1.1 mrg (row3) = __builtin_ia32_movhlps (__t3, __t2); \ 1316 1.1 mrg } while (0) 1317 1.1 mrg 1318 1.1 mrg /* For backward source compatibility. */ 1319 1.1 mrg # include <emmintrin.h> 1320 1.1 mrg 1321 1.5 mrg #ifdef __DISABLE_SSE__ 1322 1.5 mrg #undef __DISABLE_SSE__ 1323 1.5 mrg #pragma GCC pop_options 1324 1.5 mrg #endif /* __DISABLE_SSE__ */ 1325 1.5 mrg 1326 1.5 mrg /* The execution of the next instruction is delayed by an implementation 1327 1.5 mrg specific amount of time. The instruction does not modify the 1328 1.5 mrg architectural state. This is after the pop_options pragma because 1329 1.5 mrg it does not require SSE support in the processor--the encoding is a 1330 1.5 mrg nop on processors that do not support it. */ 1331 1.5 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 1332 1.5 mrg _mm_pause (void) 1333 1.5 mrg { 1334 1.5 mrg __builtin_ia32_pause (); 1335 1.5 mrg } 1336 1.5 mrg 1337 1.1 mrg #endif /* _XMMINTRIN_H_INCLUDED */ 1338