ammintrin.h revision 1.1 1 1.1 mrg /* Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
2 1.1 mrg
3 1.1 mrg This file is part of GCC.
4 1.1 mrg
5 1.1 mrg GCC is free software; you can redistribute it and/or modify
6 1.1 mrg it under the terms of the GNU General Public License as published by
7 1.1 mrg the Free Software Foundation; either version 3, or (at your option)
8 1.1 mrg any later version.
9 1.1 mrg
10 1.1 mrg GCC is distributed in the hope that it will be useful,
11 1.1 mrg but WITHOUT ANY WARRANTY; without even the implied warranty of
12 1.1 mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 1.1 mrg GNU General Public License for more details.
14 1.1 mrg
15 1.1 mrg Under Section 7 of GPL version 3, you are granted additional
16 1.1 mrg permissions described in the GCC Runtime Library Exception, version
17 1.1 mrg 3.1, as published by the Free Software Foundation.
18 1.1 mrg
19 1.1 mrg You should have received a copy of the GNU General Public License and
20 1.1 mrg a copy of the GCC Runtime Library Exception along with this program;
21 1.1 mrg see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 1.1 mrg <http://www.gnu.org/licenses/>. */
23 1.1 mrg
24 1.1 mrg /* Implemented from the specification included in the AMD Programmers
25 1.1 mrg Manual Update, version 2.x */
26 1.1 mrg
27 1.1 mrg #ifndef _AMMINTRIN_H_INCLUDED
28 1.1 mrg #define _AMMINTRIN_H_INCLUDED
29 1.1 mrg
30 1.1 mrg #ifndef __SSE4A__
31 1.1 mrg # error "SSE4A instruction set not enabled"
32 1.1 mrg #else
33 1.1 mrg
34 1.1 mrg /* We need definitions from the SSE3, SSE2 and SSE header files*/
35 1.1 mrg #include <pmmintrin.h>
36 1.1 mrg
37 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
38 1.1 mrg _mm_stream_sd (double * __P, __m128d __Y)
39 1.1 mrg {
40 1.1 mrg __builtin_ia32_movntsd (__P, (__v2df) __Y);
41 1.1 mrg }
42 1.1 mrg
43 1.1 mrg extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
44 1.1 mrg _mm_stream_ss (float * __P, __m128 __Y)
45 1.1 mrg {
46 1.1 mrg __builtin_ia32_movntss (__P, (__v4sf) __Y);
47 1.1 mrg }
48 1.1 mrg
49 1.1 mrg extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
50 1.1 mrg _mm_extract_si64 (__m128i __X, __m128i __Y)
51 1.1 mrg {
52 1.1 mrg return (__m128i) __builtin_ia32_extrq ((__v2di) __X, (__v16qi) __Y);
53 1.1 mrg }
54 1.1 mrg
55 1.1 mrg #ifdef __OPTIMIZE__
56 1.1 mrg extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
57 1.1 mrg _mm_extracti_si64 (__m128i __X, unsigned const int __I, unsigned const int __L)
58 1.1 mrg {
59 1.1 mrg return (__m128i) __builtin_ia32_extrqi ((__v2di) __X, __I, __L);
60 1.1 mrg }
61 1.1 mrg #else
62 1.1 mrg #define _mm_extracti_si64(X, I, L) \
63 1.1 mrg ((__m128i) __builtin_ia32_extrqi ((__v2di)(__m128i)(X), \
64 1.1 mrg (unsigned int)(I), (unsigned int)(L)))
65 1.1 mrg #endif
66 1.1 mrg
67 1.1 mrg extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
68 1.1 mrg _mm_insert_si64 (__m128i __X,__m128i __Y)
69 1.1 mrg {
70 1.1 mrg return (__m128i) __builtin_ia32_insertq ((__v2di)__X, (__v2di)__Y);
71 1.1 mrg }
72 1.1 mrg
73 1.1 mrg #ifdef __OPTIMIZE__
74 1.1 mrg extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
75 1.1 mrg _mm_inserti_si64(__m128i __X, __m128i __Y, unsigned const int __I, unsigned const int __L)
76 1.1 mrg {
77 1.1 mrg return (__m128i) __builtin_ia32_insertqi ((__v2di)__X, (__v2di)__Y, __I, __L);
78 1.1 mrg }
79 1.1 mrg #else
80 1.1 mrg #define _mm_inserti_si64(X, Y, I, L) \
81 1.1 mrg ((__m128i) __builtin_ia32_insertqi ((__v2di)(__m128i)(X), \
82 1.1 mrg (__v2di)(__m128i)(Y), \
83 1.1 mrg (unsigned int)(I), (unsigned int)(L)))
84 1.1 mrg #endif
85 1.1 mrg
86 1.1 mrg #endif /* __SSE4A__ */
87 1.1 mrg
88 1.1 mrg #endif /* _AMMINTRIN_H_INCLUDED */
89