1 1.12 mrg /* Copyright (C) 2007-2022 Free Software Foundation, Inc. 2 1.1 mrg 3 1.1 mrg This file is part of GCC. 4 1.1 mrg 5 1.1 mrg GCC is free software; you can redistribute it and/or modify 6 1.1 mrg it under the terms of the GNU General Public License as published by 7 1.1 mrg the Free Software Foundation; either version 3, or (at your option) 8 1.1 mrg any later version. 9 1.1 mrg 10 1.1 mrg GCC is distributed in the hope that it will be useful, 11 1.1 mrg but WITHOUT ANY WARRANTY; without even the implied warranty of 12 1.1 mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 1.1 mrg GNU General Public License for more details. 14 1.1 mrg 15 1.1 mrg Under Section 7 of GPL version 3, you are granted additional 16 1.1 mrg permissions described in the GCC Runtime Library Exception, version 17 1.1 mrg 3.1, as published by the Free Software Foundation. 18 1.1 mrg 19 1.1 mrg You should have received a copy of the GNU General Public License and 20 1.1 mrg a copy of the GCC Runtime Library Exception along with this program; 21 1.1 mrg see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 22 1.1 mrg <http://www.gnu.org/licenses/>. */ 23 1.1 mrg 24 1.1 mrg /* Implemented from the specification included in the AMD Programmers 25 1.1 mrg Manual Update, version 2.x */ 26 1.1 mrg 27 1.1 mrg #ifndef _AMMINTRIN_H_INCLUDED 28 1.1 mrg #define _AMMINTRIN_H_INCLUDED 29 1.1 mrg 30 1.1 mrg /* We need definitions from the SSE3, SSE2 and SSE header files*/ 31 1.1 mrg #include <pmmintrin.h> 32 1.1 mrg 33 1.5 mrg #ifndef __SSE4A__ 34 1.5 mrg #pragma GCC push_options 35 1.5 mrg #pragma GCC target("sse4a") 36 1.5 mrg #define __DISABLE_SSE4A__ 37 1.5 mrg #endif /* __SSE4A__ */ 38 1.5 mrg 39 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 40 1.1 mrg _mm_stream_sd (double * __P, __m128d __Y) 41 1.1 mrg { 42 1.1 mrg __builtin_ia32_movntsd (__P, (__v2df) __Y); 43 1.1 mrg } 44 1.1 mrg 45 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 46 1.1 mrg _mm_stream_ss (float * __P, __m128 __Y) 47 1.1 mrg { 48 1.1 mrg __builtin_ia32_movntss (__P, (__v4sf) __Y); 49 1.1 mrg } 50 1.1 mrg 51 1.1 mrg extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 52 1.1 mrg _mm_extract_si64 (__m128i __X, __m128i __Y) 53 1.1 mrg { 54 1.1 mrg return (__m128i) __builtin_ia32_extrq ((__v2di) __X, (__v16qi) __Y); 55 1.1 mrg } 56 1.1 mrg 57 1.1 mrg #ifdef __OPTIMIZE__ 58 1.1 mrg extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 59 1.1 mrg _mm_extracti_si64 (__m128i __X, unsigned const int __I, unsigned const int __L) 60 1.1 mrg { 61 1.1 mrg return (__m128i) __builtin_ia32_extrqi ((__v2di) __X, __I, __L); 62 1.1 mrg } 63 1.1 mrg #else 64 1.1 mrg #define _mm_extracti_si64(X, I, L) \ 65 1.1 mrg ((__m128i) __builtin_ia32_extrqi ((__v2di)(__m128i)(X), \ 66 1.1 mrg (unsigned int)(I), (unsigned int)(L))) 67 1.1 mrg #endif 68 1.1 mrg 69 1.1 mrg extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 70 1.1 mrg _mm_insert_si64 (__m128i __X,__m128i __Y) 71 1.1 mrg { 72 1.1 mrg return (__m128i) __builtin_ia32_insertq ((__v2di)__X, (__v2di)__Y); 73 1.1 mrg } 74 1.1 mrg 75 1.1 mrg #ifdef __OPTIMIZE__ 76 1.1 mrg extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 77 1.1 mrg _mm_inserti_si64(__m128i __X, __m128i __Y, unsigned const int __I, unsigned const int __L) 78 1.1 mrg { 79 1.1 mrg return (__m128i) __builtin_ia32_insertqi ((__v2di)__X, (__v2di)__Y, __I, __L); 80 1.1 mrg } 81 1.1 mrg #else 82 1.1 mrg #define _mm_inserti_si64(X, Y, I, L) \ 83 1.1 mrg ((__m128i) __builtin_ia32_insertqi ((__v2di)(__m128i)(X), \ 84 1.1 mrg (__v2di)(__m128i)(Y), \ 85 1.1 mrg (unsigned int)(I), (unsigned int)(L))) 86 1.1 mrg #endif 87 1.1 mrg 88 1.5 mrg #ifdef __DISABLE_SSE4A__ 89 1.5 mrg #undef __DISABLE_SSE4A__ 90 1.5 mrg #pragma GCC pop_options 91 1.5 mrg #endif /* __DISABLE_SSE4A__ */ 92 1.1 mrg 93 1.1 mrg #endif /* _AMMINTRIN_H_INCLUDED */ 94