spu2vmx.h revision 1.1 1 1.1 mrg /* Cell SPU 2 VMX intrinsics header
2 1.1 mrg Copyright (C) 2007, 2009 Free Software Foundation, Inc.
3 1.1 mrg
4 1.1 mrg This file is free software; you can redistribute it and/or modify it under
5 1.1 mrg the terms of the GNU General Public License as published by the Free
6 1.1 mrg Software Foundation; either version 3 of the License, or (at your option)
7 1.1 mrg any later version.
8 1.1 mrg
9 1.1 mrg This file is distributed in the hope that it will be useful, but WITHOUT
10 1.1 mrg ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 1.1 mrg FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 1.1 mrg for more details.
13 1.1 mrg
14 1.1 mrg Under Section 7 of GPL version 3, you are granted additional
15 1.1 mrg permissions described in the GCC Runtime Library Exception, version
16 1.1 mrg 3.1, as published by the Free Software Foundation.
17 1.1 mrg
18 1.1 mrg You should have received a copy of the GNU General Public License and
19 1.1 mrg a copy of the GCC Runtime Library Exception along with this program;
20 1.1 mrg see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
21 1.1 mrg <http://www.gnu.org/licenses/>. */
22 1.1 mrg
23 1.1 mrg #ifndef _SPU2VMX_H_
24 1.1 mrg #define _SPU2VMX_H_ 1
25 1.1 mrg
26 1.1 mrg #ifdef __cplusplus
27 1.1 mrg
28 1.1 mrg #ifndef __SPU__
29 1.1 mrg
30 1.1 mrg #include <si2vmx.h>
31 1.1 mrg
32 1.1 mrg /* spu_absd (absolute difference)
33 1.1 mrg * ========
34 1.1 mrg */
35 1.1 mrg static __inline vec_uchar16 spu_absd(vec_uchar16 a, vec_uchar16 b)
36 1.1 mrg {
37 1.1 mrg return ((vec_uchar16)(si_absdb((qword)(a), (qword)(b))));
38 1.1 mrg
39 1.1 mrg }
40 1.1 mrg
41 1.1 mrg
42 1.1 mrg /* spu_add
43 1.1 mrg * =======
44 1.1 mrg */
45 1.1 mrg static __inline vec_uint4 spu_add(vec_uint4 a, vec_uint4 b)
46 1.1 mrg {
47 1.1 mrg return ((vec_uint4)(si_a((qword)(a), (qword)(b))));
48 1.1 mrg }
49 1.1 mrg
50 1.1 mrg static __inline vec_int4 spu_add(vec_int4 a, vec_int4 b)
51 1.1 mrg {
52 1.1 mrg return ((vec_int4)(si_a((qword)(a), (qword)(b))));
53 1.1 mrg }
54 1.1 mrg
55 1.1 mrg static __inline vec_ushort8 spu_add(vec_ushort8 a, vec_ushort8 b)
56 1.1 mrg {
57 1.1 mrg return ((vec_ushort8)(si_ah((qword)(a), (qword)(b))));
58 1.1 mrg }
59 1.1 mrg
60 1.1 mrg static __inline vec_short8 spu_add(vec_short8 a, vec_short8 b)
61 1.1 mrg {
62 1.1 mrg return ((vec_short8)(si_ah((qword)(a), (qword)(b))));
63 1.1 mrg }
64 1.1 mrg
65 1.1 mrg static __inline vec_uint4 spu_add(vec_uint4 a, unsigned int b)
66 1.1 mrg {
67 1.1 mrg return ((vec_uint4)(si_ai((qword)(a), (int)(b))));
68 1.1 mrg }
69 1.1 mrg
70 1.1 mrg static __inline vec_int4 spu_add(vec_int4 a, int b)
71 1.1 mrg {
72 1.1 mrg return ((vec_int4)(si_ai((qword)(a), b)));
73 1.1 mrg }
74 1.1 mrg
75 1.1 mrg static __inline vec_ushort8 spu_add(vec_ushort8 a, unsigned short b)
76 1.1 mrg {
77 1.1 mrg return ((vec_ushort8)(si_ahi((qword)(a), (short)(b))));
78 1.1 mrg }
79 1.1 mrg
80 1.1 mrg static __inline vec_short8 spu_add(vec_short8 a, short b)
81 1.1 mrg {
82 1.1 mrg return ((vec_short8)(si_ahi((qword)(a), b)));
83 1.1 mrg }
84 1.1 mrg
85 1.1 mrg static __inline vec_float4 spu_add(vec_float4 a, vec_float4 b)
86 1.1 mrg {
87 1.1 mrg return ((vec_float4)(si_fa((qword)(a), (qword)(b))));
88 1.1 mrg }
89 1.1 mrg
90 1.1 mrg static __inline vec_double2 spu_add(vec_double2 a, vec_double2 b)
91 1.1 mrg {
92 1.1 mrg return ((vec_double2)(si_dfa((qword)(a), (qword)(b))));
93 1.1 mrg }
94 1.1 mrg
95 1.1 mrg
96 1.1 mrg /* spu_addx
97 1.1 mrg * ========
98 1.1 mrg */
99 1.1 mrg static __inline vec_uint4 spu_addx(vec_uint4 a, vec_uint4 b, vec_uint4 c)
100 1.1 mrg {
101 1.1 mrg return ((vec_uint4)(si_addx((qword)(a), (qword)(b), (qword)(c))));
102 1.1 mrg }
103 1.1 mrg
104 1.1 mrg static __inline vec_int4 spu_addx(vec_int4 a, vec_int4 b, vec_int4 c)
105 1.1 mrg {
106 1.1 mrg return ((vec_int4)(si_addx((qword)(a), (qword)(b), (qword)(c))));
107 1.1 mrg }
108 1.1 mrg
109 1.1 mrg
110 1.1 mrg /* spu_and
111 1.1 mrg * =======
112 1.1 mrg */
113 1.1 mrg static __inline vec_uchar16 spu_and(vec_uchar16 a, vec_uchar16 b)
114 1.1 mrg {
115 1.1 mrg return ((vec_uchar16)(si_and((qword)(a), (qword)(b))));
116 1.1 mrg }
117 1.1 mrg
118 1.1 mrg static __inline vec_char16 spu_and(vec_char16 a, vec_char16 b)
119 1.1 mrg {
120 1.1 mrg return ((vec_char16)(si_and((qword)(a), (qword)(b))));
121 1.1 mrg }
122 1.1 mrg
123 1.1 mrg static __inline vec_ushort8 spu_and(vec_ushort8 a, vec_ushort8 b)
124 1.1 mrg {
125 1.1 mrg return ((vec_ushort8)(si_and((qword)(a), (qword)(b))));
126 1.1 mrg }
127 1.1 mrg
128 1.1 mrg static __inline vec_short8 spu_and(vec_short8 a, vec_short8 b)
129 1.1 mrg {
130 1.1 mrg return ((vec_short8)(si_and((qword)(a), (qword)(b))));
131 1.1 mrg }
132 1.1 mrg
133 1.1 mrg static __inline vec_uint4 spu_and(vec_uint4 a, vec_uint4 b)
134 1.1 mrg {
135 1.1 mrg return ((vec_uint4)(si_and((qword)(a), (qword)(b))));
136 1.1 mrg }
137 1.1 mrg
138 1.1 mrg static __inline vec_int4 spu_and(vec_int4 a, vec_int4 b)
139 1.1 mrg {
140 1.1 mrg return ((vec_int4)(si_and((qword)(a), (qword)(b))));
141 1.1 mrg }
142 1.1 mrg
143 1.1 mrg static __inline vec_float4 spu_and(vec_float4 a, vec_float4 b)
144 1.1 mrg {
145 1.1 mrg return ((vec_float4)(si_and((qword)(a), (qword)(b))));
146 1.1 mrg }
147 1.1 mrg
148 1.1 mrg static __inline vec_ullong2 spu_and(vec_ullong2 a, vec_ullong2 b)
149 1.1 mrg {
150 1.1 mrg return ((vec_ullong2)(si_and((qword)(a), (qword)(b))));
151 1.1 mrg }
152 1.1 mrg
153 1.1 mrg static __inline vec_llong2 spu_and(vec_llong2 a, vec_llong2 b)
154 1.1 mrg {
155 1.1 mrg return ((vec_llong2)(si_and((qword)(a), (qword)(b))));
156 1.1 mrg }
157 1.1 mrg
158 1.1 mrg static __inline vec_double2 spu_and(vec_double2 a, vec_double2 b)
159 1.1 mrg {
160 1.1 mrg return ((vec_double2)(si_and((qword)(a), (qword)(b))));
161 1.1 mrg }
162 1.1 mrg
163 1.1 mrg static __inline vec_uchar16 spu_and(vec_uchar16 a, unsigned char b)
164 1.1 mrg {
165 1.1 mrg return ((vec_uchar16)(si_andbi((qword)(a), (signed char)(b))));
166 1.1 mrg }
167 1.1 mrg
168 1.1 mrg
169 1.1 mrg static __inline vec_char16 spu_and(vec_char16 a, signed char b)
170 1.1 mrg {
171 1.1 mrg return ((vec_char16)(si_andbi((qword)(a), b)));
172 1.1 mrg }
173 1.1 mrg
174 1.1 mrg static __inline vec_ushort8 spu_and(vec_ushort8 a, unsigned short b)
175 1.1 mrg {
176 1.1 mrg return ((vec_ushort8)(si_andhi((qword)(a), (signed short)(b))));
177 1.1 mrg }
178 1.1 mrg
179 1.1 mrg static __inline vec_short8 spu_and(vec_short8 a, signed short b)
180 1.1 mrg {
181 1.1 mrg return ((vec_short8)(si_andhi((qword)(a), b)));
182 1.1 mrg }
183 1.1 mrg
184 1.1 mrg static __inline vec_uint4 spu_and(vec_uint4 a, unsigned int b)
185 1.1 mrg {
186 1.1 mrg return ((vec_uint4)(si_andi((qword)(a), (signed int)(b))));
187 1.1 mrg }
188 1.1 mrg
189 1.1 mrg static __inline vec_int4 spu_and(vec_int4 a, signed int b)
190 1.1 mrg {
191 1.1 mrg return ((vec_int4)(si_andi((qword)(a), b)));
192 1.1 mrg }
193 1.1 mrg
194 1.1 mrg
195 1.1 mrg /* spu_andc
196 1.1 mrg * ========
197 1.1 mrg */
198 1.1 mrg #define spu_andc(_a, _b) vec_andc(_a, _b)
199 1.1 mrg
200 1.1 mrg
201 1.1 mrg /* spu_avg
202 1.1 mrg * =======
203 1.1 mrg */
204 1.1 mrg #define spu_avg(_a, _b) vec_avg(_a, _b)
205 1.1 mrg
206 1.1 mrg
207 1.1 mrg /* spu_bisled
208 1.1 mrg * spu_bisled_d
209 1.1 mrg * spu_bisled_e
210 1.1 mrg * ============
211 1.1 mrg */
212 1.1 mrg #define spu_bisled(_func) /* not mappable */
213 1.1 mrg #define spu_bisled_d(_func) /* not mappable */
214 1.1 mrg #define spu_bisled_e(_func) /* not mappable */
215 1.1 mrg
216 1.1 mrg /* spu_cmpabseq
217 1.1 mrg * ============
218 1.1 mrg */
219 1.1 mrg static __inline vec_uint4 spu_cmpabseq(vec_float4 a, vec_float4 b)
220 1.1 mrg {
221 1.1 mrg return ((vec_uint4)(si_fcmeq((qword)(a), (qword)(b))));
222 1.1 mrg
223 1.1 mrg }
224 1.1 mrg
225 1.1 mrg static __inline vec_ullong2 spu_cmpabseq(vec_double2 a, vec_double2 b)
226 1.1 mrg {
227 1.1 mrg return ((vec_ullong2)(si_dfcmeq((qword)(a), (qword)(b))));
228 1.1 mrg }
229 1.1 mrg
230 1.1 mrg
231 1.1 mrg /* spu_cmpabsgt
232 1.1 mrg * ============
233 1.1 mrg */
234 1.1 mrg static __inline vec_uint4 spu_cmpabsgt(vec_float4 a, vec_float4 b)
235 1.1 mrg {
236 1.1 mrg return ((vec_uint4)(si_fcmgt((qword)(a), (qword)(b))));
237 1.1 mrg }
238 1.1 mrg
239 1.1 mrg static __inline vec_ullong2 spu_cmpabsgt(vec_double2 a, vec_double2 b)
240 1.1 mrg {
241 1.1 mrg return ((vec_ullong2)(si_dfcmgt((qword)(a), (qword)(b))));
242 1.1 mrg }
243 1.1 mrg
244 1.1 mrg
245 1.1 mrg /* spu_cmpeq
246 1.1 mrg * ========
247 1.1 mrg */
248 1.1 mrg static __inline vec_uchar16 spu_cmpeq(vec_uchar16 a, vec_uchar16 b)
249 1.1 mrg {
250 1.1 mrg return ((vec_uchar16)(si_ceqb((qword)(a), (qword)(b))));
251 1.1 mrg }
252 1.1 mrg
253 1.1 mrg static __inline vec_uchar16 spu_cmpeq(vec_char16 a, vec_char16 b)
254 1.1 mrg {
255 1.1 mrg return ((vec_uchar16)(si_ceqb((qword)(a), (qword)(b))));
256 1.1 mrg }
257 1.1 mrg
258 1.1 mrg static __inline vec_ushort8 spu_cmpeq(vec_ushort8 a, vec_ushort8 b)
259 1.1 mrg {
260 1.1 mrg return ((vec_ushort8)(si_ceqh((qword)(a), (qword)(b))));
261 1.1 mrg }
262 1.1 mrg
263 1.1 mrg static __inline vec_ushort8 spu_cmpeq(vec_short8 a, vec_short8 b)
264 1.1 mrg {
265 1.1 mrg return ((vec_ushort8)(si_ceqh((qword)(a), (qword)(b))));
266 1.1 mrg }
267 1.1 mrg
268 1.1 mrg static __inline vec_uint4 spu_cmpeq(vec_uint4 a, vec_uint4 b)
269 1.1 mrg {
270 1.1 mrg return ((vec_uint4)(si_ceq((qword)(a), (qword)(b))));
271 1.1 mrg }
272 1.1 mrg
273 1.1 mrg static __inline vec_uint4 spu_cmpeq(vec_int4 a, vec_int4 b)
274 1.1 mrg {
275 1.1 mrg return ((vec_uint4)(si_ceq((qword)(a), (qword)(b))));
276 1.1 mrg }
277 1.1 mrg
278 1.1 mrg static __inline vec_uint4 spu_cmpeq(vec_float4 a, vec_float4 b)
279 1.1 mrg {
280 1.1 mrg return ((vec_uint4)(si_fceq((qword)(a), (qword)(b))));
281 1.1 mrg }
282 1.1 mrg
283 1.1 mrg static __inline vec_uchar16 spu_cmpeq(vec_uchar16 a, unsigned char b)
284 1.1 mrg {
285 1.1 mrg return ((vec_uchar16)(si_ceqbi((qword)(a), (signed char)(b))));
286 1.1 mrg }
287 1.1 mrg
288 1.1 mrg static __inline vec_uchar16 spu_cmpeq(vec_char16 a, signed char b)
289 1.1 mrg {
290 1.1 mrg return ((vec_uchar16)(si_ceqbi((qword)(a), b)));
291 1.1 mrg }
292 1.1 mrg
293 1.1 mrg static __inline vec_ushort8 spu_cmpeq(vec_ushort8 a, unsigned short b)
294 1.1 mrg {
295 1.1 mrg return ((vec_ushort8)(si_ceqhi((qword)(a), (signed short)(b))));
296 1.1 mrg }
297 1.1 mrg
298 1.1 mrg static __inline vec_ushort8 spu_cmpeq(vec_short8 a, signed short b)
299 1.1 mrg {
300 1.1 mrg return ((vec_ushort8)(si_ceqhi((qword)(a), b)));
301 1.1 mrg }
302 1.1 mrg
303 1.1 mrg static __inline vec_uint4 spu_cmpeq(vec_uint4 a, unsigned int b)
304 1.1 mrg {
305 1.1 mrg return ((vec_uint4)(si_ceqi((qword)(a), (signed int)(b))));
306 1.1 mrg }
307 1.1 mrg
308 1.1 mrg static __inline vec_uint4 spu_cmpeq(vec_int4 a, signed int b)
309 1.1 mrg {
310 1.1 mrg return ((vec_uint4)(si_ceqi((qword)(a), b)));
311 1.1 mrg }
312 1.1 mrg
313 1.1 mrg static __inline vec_ullong2 spu_cmpeq(vec_double2 a, vec_double2 b)
314 1.1 mrg {
315 1.1 mrg return ((vec_ullong2)(si_dfceq((qword)(a), (qword)(b))));
316 1.1 mrg }
317 1.1 mrg
318 1.1 mrg
319 1.1 mrg /* spu_cmpgt
320 1.1 mrg * ========
321 1.1 mrg */
322 1.1 mrg static __inline vec_uchar16 spu_cmpgt(vec_uchar16 a, vec_uchar16 b)
323 1.1 mrg {
324 1.1 mrg return ((vec_uchar16)(si_clgtb((qword)(a), (qword)(b))));
325 1.1 mrg }
326 1.1 mrg
327 1.1 mrg static __inline vec_uchar16 spu_cmpgt(vec_char16 a, vec_char16 b)
328 1.1 mrg {
329 1.1 mrg return ((vec_uchar16)(si_cgtb((qword)(a), (qword)(b))));
330 1.1 mrg }
331 1.1 mrg
332 1.1 mrg static __inline vec_ushort8 spu_cmpgt(vec_ushort8 a, vec_ushort8 b)
333 1.1 mrg {
334 1.1 mrg return ((vec_ushort8)(si_clgth((qword)(a), (qword)(b))));
335 1.1 mrg }
336 1.1 mrg
337 1.1 mrg static __inline vec_ushort8 spu_cmpgt(vec_short8 a, vec_short8 b)
338 1.1 mrg {
339 1.1 mrg return ((vec_ushort8)(si_cgth((qword)(a), (qword)(b))));
340 1.1 mrg }
341 1.1 mrg
342 1.1 mrg static __inline vec_uint4 spu_cmpgt(vec_uint4 a, vec_uint4 b)
343 1.1 mrg {
344 1.1 mrg return ((vec_uint4)(si_clgt((qword)(a), (qword)(b))));
345 1.1 mrg }
346 1.1 mrg
347 1.1 mrg static __inline vec_uint4 spu_cmpgt(vec_int4 a, vec_int4 b)
348 1.1 mrg {
349 1.1 mrg return ((vec_uint4)(si_cgt((qword)(a), (qword)(b))));
350 1.1 mrg }
351 1.1 mrg
352 1.1 mrg static __inline vec_uint4 spu_cmpgt(vec_float4 a, vec_float4 b)
353 1.1 mrg {
354 1.1 mrg return ((vec_uint4)(si_fcgt((qword)(a), (qword)(b))));
355 1.1 mrg }
356 1.1 mrg
357 1.1 mrg static __inline vec_uchar16 spu_cmpgt(vec_uchar16 a, unsigned char b)
358 1.1 mrg {
359 1.1 mrg return ((vec_uchar16)(si_clgtbi((qword)(a), b)));
360 1.1 mrg }
361 1.1 mrg
362 1.1 mrg static __inline vec_uchar16 spu_cmpgt(vec_char16 a, signed char b)
363 1.1 mrg {
364 1.1 mrg return ((vec_uchar16)(si_cgtbi((qword)(a), b)));
365 1.1 mrg }
366 1.1 mrg
367 1.1 mrg static __inline vec_ushort8 spu_cmpgt(vec_ushort8 a, unsigned short b)
368 1.1 mrg {
369 1.1 mrg return ((vec_ushort8)(si_clgthi((qword)(a), b)));
370 1.1 mrg }
371 1.1 mrg
372 1.1 mrg static __inline vec_ushort8 spu_cmpgt(vec_short8 a, signed short b)
373 1.1 mrg {
374 1.1 mrg return ((vec_ushort8)(si_cgthi((qword)(a), b)));
375 1.1 mrg }
376 1.1 mrg
377 1.1 mrg static __inline vec_uint4 spu_cmpgt(vec_uint4 a, unsigned int b)
378 1.1 mrg {
379 1.1 mrg return ((vec_uint4)(si_clgti((qword)(a), b)));
380 1.1 mrg }
381 1.1 mrg
382 1.1 mrg static __inline vec_uint4 spu_cmpgt(vec_int4 a, signed int b)
383 1.1 mrg {
384 1.1 mrg return ((vec_uint4)(si_cgti((qword)(a), b)));
385 1.1 mrg }
386 1.1 mrg
387 1.1 mrg static __inline vec_ullong2 spu_cmpgt(vec_double2 a, vec_double2 b)
388 1.1 mrg {
389 1.1 mrg return ((vec_ullong2)(si_dfcgt((qword)(a), (qword)(b))));
390 1.1 mrg }
391 1.1 mrg
392 1.1 mrg
393 1.1 mrg /* spu_cntb
394 1.1 mrg * ========
395 1.1 mrg */
396 1.1 mrg static __inline vec_uchar16 spu_cntb(vec_uchar16 a)
397 1.1 mrg {
398 1.1 mrg return ((vec_uchar16)(si_cntb((qword)(a))));
399 1.1 mrg }
400 1.1 mrg
401 1.1 mrg
402 1.1 mrg static __inline vec_uchar16 spu_cntb(vec_char16 a)
403 1.1 mrg {
404 1.1 mrg return ((vec_uchar16)(si_cntb((qword)(a))));
405 1.1 mrg }
406 1.1 mrg
407 1.1 mrg /* spu_cntlz
408 1.1 mrg * =========
409 1.1 mrg */
410 1.1 mrg static __inline vec_uint4 spu_cntlz(vec_uint4 a)
411 1.1 mrg {
412 1.1 mrg return ((vec_uint4)(si_clz((qword)(a))));
413 1.1 mrg }
414 1.1 mrg
415 1.1 mrg static __inline vec_uint4 spu_cntlz(vec_int4 a)
416 1.1 mrg {
417 1.1 mrg return ((vec_uint4)(si_clz((qword)(a))));
418 1.1 mrg }
419 1.1 mrg
420 1.1 mrg static __inline vec_uint4 spu_cntlz(vec_float4 a)
421 1.1 mrg {
422 1.1 mrg return ((vec_uint4)(si_clz((qword)(a))));
423 1.1 mrg }
424 1.1 mrg
425 1.1 mrg /* spu_testsv
426 1.1 mrg * ==========
427 1.1 mrg */
428 1.1 mrg static __inline vec_ullong2 spu_testsv(vec_double2 a, char b)
429 1.1 mrg {
430 1.1 mrg return ((vec_ullong2)(si_dftsv((qword)(a), b)));
431 1.1 mrg }
432 1.1 mrg
433 1.1 mrg /* spu_convtf
434 1.1 mrg * ==========
435 1.1 mrg */
436 1.1 mrg #define spu_convtf(_a, _b) (vec_ctf(_a, _b))
437 1.1 mrg
438 1.1 mrg /* spu_convts
439 1.1 mrg * ==========
440 1.1 mrg */
441 1.1 mrg #define spu_convts(_a, _b) (vec_cts(_a, _b))
442 1.1 mrg
443 1.1 mrg /* spu_convtu
444 1.1 mrg * ==========
445 1.1 mrg */
446 1.1 mrg #define spu_convtu(_a, _b) (vec_ctu(_a, _b))
447 1.1 mrg
448 1.1 mrg
449 1.1 mrg /* spu_dsync
450 1.1 mrg * ========
451 1.1 mrg */
452 1.1 mrg #define spu_dsync()
453 1.1 mrg
454 1.1 mrg /* spu_eqv
455 1.1 mrg * =======
456 1.1 mrg */
457 1.1 mrg static __inline vec_uchar16 spu_eqv(vec_uchar16 a, vec_uchar16 b)
458 1.1 mrg {
459 1.1 mrg return ((vec_uchar16)(si_eqv((qword)(a), (qword)(b))));
460 1.1 mrg }
461 1.1 mrg
462 1.1 mrg static __inline vec_char16 spu_eqv(vec_char16 a, vec_char16 b)
463 1.1 mrg {
464 1.1 mrg return ((vec_char16)(si_eqv((qword)(a), (qword)(b))));
465 1.1 mrg }
466 1.1 mrg
467 1.1 mrg static __inline vec_ushort8 spu_eqv(vec_ushort8 a, vec_ushort8 b)
468 1.1 mrg {
469 1.1 mrg return ((vec_ushort8)(si_eqv((qword)(a), (qword)(b))));
470 1.1 mrg }
471 1.1 mrg
472 1.1 mrg static __inline vec_short8 spu_eqv(vec_short8 a, vec_short8 b)
473 1.1 mrg {
474 1.1 mrg return ((vec_short8)(si_eqv((qword)(a), (qword)(b))));
475 1.1 mrg }
476 1.1 mrg
477 1.1 mrg static __inline vec_uint4 spu_eqv(vec_uint4 a, vec_uint4 b)
478 1.1 mrg {
479 1.1 mrg return ((vec_uint4)(si_eqv((qword)(a), (qword)(b))));
480 1.1 mrg }
481 1.1 mrg
482 1.1 mrg static __inline vec_int4 spu_eqv(vec_int4 a, vec_int4 b)
483 1.1 mrg {
484 1.1 mrg return ((vec_int4)(si_eqv((qword)(a), (qword)(b))));
485 1.1 mrg }
486 1.1 mrg
487 1.1 mrg static __inline vec_float4 spu_eqv(vec_float4 a, vec_float4 b)
488 1.1 mrg {
489 1.1 mrg return ((vec_float4)(si_eqv((qword)(a), (qword)(b))));
490 1.1 mrg }
491 1.1 mrg
492 1.1 mrg static __inline vec_ullong2 spu_eqv(vec_ullong2 a, vec_ullong2 b)
493 1.1 mrg {
494 1.1 mrg return ((vec_ullong2)(si_eqv((qword)(a), (qword)(b))));
495 1.1 mrg }
496 1.1 mrg
497 1.1 mrg static __inline vec_llong2 spu_eqv(vec_llong2 a, vec_llong2 b)
498 1.1 mrg {
499 1.1 mrg return ((vec_llong2)(si_eqv((qword)(a), (qword)(b))));
500 1.1 mrg }
501 1.1 mrg
502 1.1 mrg static __inline vec_double2 spu_eqv(vec_double2 a, vec_double2 b)
503 1.1 mrg {
504 1.1 mrg return ((vec_double2)(si_eqv((qword)(a), (qword)(b))));
505 1.1 mrg }
506 1.1 mrg
507 1.1 mrg /* spu_extend
508 1.1 mrg * ========
509 1.1 mrg */
510 1.1 mrg static __inline vec_short8 spu_extend(vec_char16 a)
511 1.1 mrg {
512 1.1 mrg return ((vec_short8)(si_xsbh((qword)(a))));
513 1.1 mrg }
514 1.1 mrg
515 1.1 mrg
516 1.1 mrg static __inline vec_int4 spu_extend(vec_short8 a)
517 1.1 mrg {
518 1.1 mrg return ((vec_int4)(si_xshw((qword)(a))));
519 1.1 mrg }
520 1.1 mrg
521 1.1 mrg static __inline vec_llong2 spu_extend(vec_int4 a)
522 1.1 mrg {
523 1.1 mrg return ((vec_llong2)(si_xswd((qword)(a))));
524 1.1 mrg }
525 1.1 mrg
526 1.1 mrg
527 1.1 mrg static __inline vec_double2 spu_extend(vec_float4 a)
528 1.1 mrg {
529 1.1 mrg return ((vec_double2)(si_fesd((qword)(a))));
530 1.1 mrg }
531 1.1 mrg
532 1.1 mrg
533 1.1 mrg /* spu_extract
534 1.1 mrg * ========
535 1.1 mrg */
536 1.1 mrg static __inline unsigned char spu_extract(vec_uchar16 a, int element)
537 1.1 mrg {
538 1.1 mrg union {
539 1.1 mrg vec_uchar16 v;
540 1.1 mrg unsigned char c[16];
541 1.1 mrg } in;
542 1.1 mrg
543 1.1 mrg in.v = a;
544 1.1 mrg return (in.c[element & 15]);
545 1.1 mrg }
546 1.1 mrg
547 1.1 mrg static __inline signed char spu_extract(vec_char16 a, int element)
548 1.1 mrg {
549 1.1 mrg union {
550 1.1 mrg vec_char16 v;
551 1.1 mrg signed char c[16];
552 1.1 mrg } in;
553 1.1 mrg
554 1.1 mrg in.v = a;
555 1.1 mrg return (in.c[element & 15]);
556 1.1 mrg }
557 1.1 mrg
558 1.1 mrg static __inline unsigned short spu_extract(vec_ushort8 a, int element)
559 1.1 mrg {
560 1.1 mrg union {
561 1.1 mrg vec_ushort8 v;
562 1.1 mrg unsigned short s[8];
563 1.1 mrg } in;
564 1.1 mrg
565 1.1 mrg in.v = a;
566 1.1 mrg return (in.s[element & 7]);
567 1.1 mrg }
568 1.1 mrg
569 1.1 mrg static __inline signed short spu_extract(vec_short8 a, int element)
570 1.1 mrg {
571 1.1 mrg union {
572 1.1 mrg vec_short8 v;
573 1.1 mrg signed short s[8];
574 1.1 mrg } in;
575 1.1 mrg
576 1.1 mrg in.v = a;
577 1.1 mrg return (in.s[element & 7]);
578 1.1 mrg }
579 1.1 mrg
580 1.1 mrg static __inline unsigned int spu_extract(vec_uint4 a, int element)
581 1.1 mrg {
582 1.1 mrg union {
583 1.1 mrg vec_uint4 v;
584 1.1 mrg unsigned int i[4];
585 1.1 mrg } in;
586 1.1 mrg
587 1.1 mrg in.v = a;
588 1.1 mrg return (in.i[element & 3]);
589 1.1 mrg }
590 1.1 mrg
591 1.1 mrg static __inline signed int spu_extract(vec_int4 a, int element)
592 1.1 mrg {
593 1.1 mrg union {
594 1.1 mrg vec_int4 v;
595 1.1 mrg signed int i[4];
596 1.1 mrg } in;
597 1.1 mrg
598 1.1 mrg in.v = a;
599 1.1 mrg return (in.i[element & 3]);
600 1.1 mrg }
601 1.1 mrg
602 1.1 mrg static __inline float spu_extract(vec_float4 a, int element)
603 1.1 mrg {
604 1.1 mrg union {
605 1.1 mrg vec_float4 v;
606 1.1 mrg float f[4];
607 1.1 mrg } in;
608 1.1 mrg
609 1.1 mrg in.v = a;
610 1.1 mrg return (in.f[element & 3]);
611 1.1 mrg }
612 1.1 mrg
613 1.1 mrg static __inline unsigned long long spu_extract(vec_ullong2 a, int element)
614 1.1 mrg {
615 1.1 mrg union {
616 1.1 mrg vec_ullong2 v;
617 1.1 mrg unsigned long long l[2];
618 1.1 mrg } in;
619 1.1 mrg
620 1.1 mrg in.v = a;
621 1.1 mrg return (in.l[element & 1]);
622 1.1 mrg }
623 1.1 mrg
624 1.1 mrg static __inline signed long long spu_extract(vec_llong2 a, int element)
625 1.1 mrg {
626 1.1 mrg union {
627 1.1 mrg vec_llong2 v;
628 1.1 mrg signed long long l[2];
629 1.1 mrg } in;
630 1.1 mrg
631 1.1 mrg in.v = a;
632 1.1 mrg return (in.l[element & 1]);
633 1.1 mrg }
634 1.1 mrg
635 1.1 mrg static __inline double spu_extract(vec_double2 a, int element)
636 1.1 mrg {
637 1.1 mrg union {
638 1.1 mrg vec_double2 v;
639 1.1 mrg double d[2];
640 1.1 mrg } in;
641 1.1 mrg
642 1.1 mrg in.v = a;
643 1.1 mrg return (in.d[element & 1]);
644 1.1 mrg }
645 1.1 mrg
646 1.1 mrg /* spu_gather
647 1.1 mrg * ========
648 1.1 mrg */
649 1.1 mrg static __inline vec_uint4 spu_gather(vec_uchar16 a)
650 1.1 mrg {
651 1.1 mrg return ((vec_uint4)(si_gbb((qword)(a))));
652 1.1 mrg }
653 1.1 mrg
654 1.1 mrg
655 1.1 mrg static __inline vec_uint4 spu_gather(vec_char16 a)
656 1.1 mrg {
657 1.1 mrg return ((vec_uint4)(si_gbb((qword)(a))));
658 1.1 mrg }
659 1.1 mrg
660 1.1 mrg static __inline vec_uint4 spu_gather(vec_ushort8 a)
661 1.1 mrg {
662 1.1 mrg return ((vec_uint4)(si_gbh((qword)(a))));
663 1.1 mrg }
664 1.1 mrg
665 1.1 mrg static __inline vec_uint4 spu_gather(vec_short8 a)
666 1.1 mrg {
667 1.1 mrg return ((vec_uint4)(si_gbh((qword)(a))));
668 1.1 mrg }
669 1.1 mrg
670 1.1 mrg
671 1.1 mrg static __inline vec_uint4 spu_gather(vec_uint4 a)
672 1.1 mrg {
673 1.1 mrg return ((vec_uint4)(si_gb((qword)(a))));
674 1.1 mrg }
675 1.1 mrg
676 1.1 mrg static __inline vec_uint4 spu_gather(vec_int4 a)
677 1.1 mrg {
678 1.1 mrg return ((vec_uint4)(si_gb((qword)(a))));
679 1.1 mrg }
680 1.1 mrg
681 1.1 mrg static __inline vec_uint4 spu_gather(vec_float4 a)
682 1.1 mrg {
683 1.1 mrg return ((vec_uint4)(si_gb((qword)(a))));
684 1.1 mrg }
685 1.1 mrg
686 1.1 mrg /* spu_genb
687 1.1 mrg * ========
688 1.1 mrg */
689 1.1 mrg static __inline vec_uint4 spu_genb(vec_uint4 a, vec_uint4 b)
690 1.1 mrg {
691 1.1 mrg return ((vec_uint4)(si_bg((qword)(b), (qword)(a))));
692 1.1 mrg }
693 1.1 mrg
694 1.1 mrg static __inline vec_int4 spu_genb(vec_int4 a, vec_int4 b)
695 1.1 mrg {
696 1.1 mrg return ((vec_int4)(si_bg((qword)(b), (qword)(a))));
697 1.1 mrg }
698 1.1 mrg
699 1.1 mrg /* spu_genbx
700 1.1 mrg * =========
701 1.1 mrg */
702 1.1 mrg static __inline vec_uint4 spu_genbx(vec_uint4 a, vec_uint4 b, vec_uint4 c)
703 1.1 mrg {
704 1.1 mrg return ((vec_uint4)(si_bgx((qword)(b), (qword)(a), (qword)(c))));
705 1.1 mrg }
706 1.1 mrg
707 1.1 mrg static __inline vec_int4 spu_genbx(vec_int4 a, vec_int4 b, vec_int4 c)
708 1.1 mrg {
709 1.1 mrg return ((vec_int4)(si_bgx((qword)(b), (qword)(a), (qword)(c))));
710 1.1 mrg }
711 1.1 mrg
712 1.1 mrg
713 1.1 mrg /* spu_genc
714 1.1 mrg * ========
715 1.1 mrg */
716 1.1 mrg static __inline vec_uint4 spu_genc(vec_uint4 a, vec_uint4 b)
717 1.1 mrg {
718 1.1 mrg return ((vec_uint4)(si_cg((qword)(a), (qword)(b))));
719 1.1 mrg }
720 1.1 mrg
721 1.1 mrg static __inline vec_int4 spu_genc(vec_int4 a, vec_int4 b)
722 1.1 mrg {
723 1.1 mrg return ((vec_int4)(si_cg((qword)(a), (qword)(b))));
724 1.1 mrg }
725 1.1 mrg
726 1.1 mrg /* spu_gencx
727 1.1 mrg * =========
728 1.1 mrg */
729 1.1 mrg static __inline vec_uint4 spu_gencx(vec_uint4 a, vec_uint4 b, vec_uint4 c)
730 1.1 mrg {
731 1.1 mrg return ((vec_uint4)(si_cgx((qword)(a), (qword)(b), (qword)(c))));
732 1.1 mrg }
733 1.1 mrg
734 1.1 mrg static __inline vec_int4 spu_gencx(vec_int4 a, vec_int4 b, vec_int4 c)
735 1.1 mrg {
736 1.1 mrg return ((vec_int4)(si_cgx((qword)(a), (qword)(b), (qword)(c))));
737 1.1 mrg }
738 1.1 mrg
739 1.1 mrg
740 1.1 mrg /* spu_hcmpeq
741 1.1 mrg * ========
742 1.1 mrg */
743 1.1 mrg #define spu_hcmpeq(_a, _b) if (_a == _b) { SPU_HALT_ACTION; };
744 1.1 mrg
745 1.1 mrg
746 1.1 mrg /* spu_hcmpgt
747 1.1 mrg * ========
748 1.1 mrg */
749 1.1 mrg #define spu_hcmpgt(_a, _b) if (_a > _b) { SPU_HALT_ACTION; };
750 1.1 mrg
751 1.1 mrg
752 1.1 mrg /* spu_idisable
753 1.1 mrg * ============
754 1.1 mrg */
755 1.1 mrg #define spu_idisable() SPU_UNSUPPORTED_ACTION
756 1.1 mrg
757 1.1 mrg
758 1.1 mrg /* spu_ienable
759 1.1 mrg * ===========
760 1.1 mrg */
761 1.1 mrg #define spu_ienable() SPU_UNSUPPORTED_ACTION
762 1.1 mrg
763 1.1 mrg
764 1.1 mrg /* spu_insert
765 1.1 mrg * ========
766 1.1 mrg */
767 1.1 mrg static __inline vec_uchar16 spu_insert(unsigned char a, vec_uchar16 b, int element)
768 1.1 mrg {
769 1.1 mrg union {
770 1.1 mrg vec_uchar16 v;
771 1.1 mrg unsigned char c[16];
772 1.1 mrg } in;
773 1.1 mrg
774 1.1 mrg in.v = b;
775 1.1 mrg in.c[element & 15] = a;
776 1.1 mrg return (in.v);
777 1.1 mrg }
778 1.1 mrg
779 1.1 mrg static __inline vec_char16 spu_insert(signed char a, vec_char16 b, int element)
780 1.1 mrg {
781 1.1 mrg return ((vec_char16)spu_insert((unsigned char)(a), (vec_uchar16)(b), element));
782 1.1 mrg }
783 1.1 mrg
784 1.1 mrg static __inline vec_ushort8 spu_insert(unsigned short a, vec_ushort8 b, int element)
785 1.1 mrg {
786 1.1 mrg union {
787 1.1 mrg vec_ushort8 v;
788 1.1 mrg unsigned short s[8];
789 1.1 mrg } in;
790 1.1 mrg
791 1.1 mrg in.v = b;
792 1.1 mrg in.s[element & 7] = a;
793 1.1 mrg return (in.v);
794 1.1 mrg }
795 1.1 mrg
796 1.1 mrg static __inline vec_short8 spu_insert(signed short a, vec_short8 b, int element)
797 1.1 mrg {
798 1.1 mrg return ((vec_short8)spu_insert((unsigned short)(a), (vec_ushort8)(b), element));
799 1.1 mrg }
800 1.1 mrg
801 1.1 mrg static __inline vec_uint4 spu_insert(unsigned int a, vec_uint4 b, int element)
802 1.1 mrg {
803 1.1 mrg union {
804 1.1 mrg vec_uint4 v;
805 1.1 mrg unsigned int i[4];
806 1.1 mrg } in;
807 1.1 mrg
808 1.1 mrg in.v = b;
809 1.1 mrg in.i[element & 3] = a;
810 1.1 mrg return (in.v);
811 1.1 mrg }
812 1.1 mrg
813 1.1 mrg static __inline vec_int4 spu_insert(signed int a, vec_int4 b, int element)
814 1.1 mrg {
815 1.1 mrg return ((vec_int4)spu_insert((unsigned int)(a), (vec_uint4)(b), element));
816 1.1 mrg }
817 1.1 mrg
818 1.1 mrg static __inline vec_float4 spu_insert(float a, vec_float4 b, int element)
819 1.1 mrg {
820 1.1 mrg union {
821 1.1 mrg vec_float4 v;
822 1.1 mrg float f[4];
823 1.1 mrg } in;
824 1.1 mrg
825 1.1 mrg in.v = b;
826 1.1 mrg in.f[element & 3] = a;
827 1.1 mrg return (in.v);
828 1.1 mrg }
829 1.1 mrg
830 1.1 mrg static __inline vec_ullong2 spu_insert(unsigned long long a, vec_ullong2 b, int element)
831 1.1 mrg {
832 1.1 mrg union {
833 1.1 mrg vec_ullong2 v;
834 1.1 mrg unsigned long long l[2];
835 1.1 mrg } in;
836 1.1 mrg
837 1.1 mrg in.v = b;
838 1.1 mrg in.l[element & 1] = a;
839 1.1 mrg return (in.v);
840 1.1 mrg }
841 1.1 mrg
842 1.1 mrg static __inline vec_llong2 spu_insert(signed long long a, vec_llong2 b, int element)
843 1.1 mrg {
844 1.1 mrg return ((vec_llong2)spu_insert((unsigned long long)(a), (vec_ullong2)(b), element));
845 1.1 mrg }
846 1.1 mrg
847 1.1 mrg static __inline vec_double2 spu_insert(double a, vec_double2 b, int element)
848 1.1 mrg {
849 1.1 mrg union {
850 1.1 mrg vec_double2 v;
851 1.1 mrg double d[2];
852 1.1 mrg } in;
853 1.1 mrg
854 1.1 mrg in.v = b;
855 1.1 mrg in.d[element & 1] = a;
856 1.1 mrg return (in.v);
857 1.1 mrg }
858 1.1 mrg
859 1.1 mrg
860 1.1 mrg /* spu_madd
861 1.1 mrg * ========
862 1.1 mrg */
863 1.1 mrg static __inline vec_int4 spu_madd(vec_short8 a, vec_short8 b, vec_int4 c)
864 1.1 mrg {
865 1.1 mrg return ((vec_int4)(si_mpya((qword)(a), (qword)(b), (qword)(c))));
866 1.1 mrg }
867 1.1 mrg
868 1.1 mrg static __inline vec_float4 spu_madd(vec_float4 a, vec_float4 b, vec_float4 c)
869 1.1 mrg {
870 1.1 mrg return ((vec_float4)(si_fma((qword)(a), (qword)(b), (qword)(c))));
871 1.1 mrg }
872 1.1 mrg
873 1.1 mrg static __inline vec_double2 spu_madd(vec_double2 a, vec_double2 b, vec_double2 c)
874 1.1 mrg {
875 1.1 mrg return ((vec_double2)(si_dfma((qword)(a), (qword)(b), (qword)(c))));
876 1.1 mrg }
877 1.1 mrg
878 1.1 mrg
879 1.1 mrg /* spu_maskb
880 1.1 mrg * ========
881 1.1 mrg */
882 1.1 mrg #define spu_maskb(_a) (vec_uchar16)(si_fsmb(si_from_int((int)(_a))))
883 1.1 mrg
884 1.1 mrg /* spu_maskh
885 1.1 mrg * ========
886 1.1 mrg */
887 1.1 mrg #define spu_maskh(_a) (vec_ushort8)(si_fsmh(si_from_int((int)(_a))))
888 1.1 mrg
889 1.1 mrg
890 1.1 mrg /* spu_maskw
891 1.1 mrg * ========
892 1.1 mrg */
893 1.1 mrg #define spu_maskw(_a) (vec_uint4)(si_fsm(si_from_int((int)(_a))))
894 1.1 mrg
895 1.1 mrg
896 1.1 mrg /* spu_mfcdma32
897 1.1 mrg * ========
898 1.1 mrg */
899 1.1 mrg #define spu_mfcdma32(_ls, _ea, _size, _tagid, _cmd)
900 1.1 mrg
901 1.1 mrg
902 1.1 mrg /* spu_mfcdma64
903 1.1 mrg * ========
904 1.1 mrg */
905 1.1 mrg #define spu_mfcdma64(_ls, _eahi, _ealow, _size, _tagid, _cmd)
906 1.1 mrg
907 1.1 mrg /* spu_mfcstat
908 1.1 mrg * ========
909 1.1 mrg */
910 1.1 mrg #define spu_mfcstat(_type) 0xFFFFFFFF
911 1.1 mrg
912 1.1 mrg
913 1.1 mrg
914 1.1 mrg /* spu_mffpscr
915 1.1 mrg * ===========
916 1.1 mrg */
917 1.1 mrg #define spu_mffpscr() (vec_uint4)(si_fscrrd())
918 1.1 mrg
919 1.1 mrg
920 1.1 mrg /* spu_mfspr
921 1.1 mrg * ========
922 1.1 mrg */
923 1.1 mrg
924 1.1 mrg #define spu_mfspr(_reg) si_to_uint(si_mfspr(_reg))
925 1.1 mrg
926 1.1 mrg
927 1.1 mrg
928 1.1 mrg /* spu_mhhadd
929 1.1 mrg * ==========
930 1.1 mrg */
931 1.1 mrg static __inline vec_int4 spu_mhhadd(vec_short8 a, vec_short8 b, vec_int4 c)
932 1.1 mrg {
933 1.1 mrg return ((vec_int4)(si_mpyhha((qword)(a), (qword)(b), (qword)(c))));
934 1.1 mrg }
935 1.1 mrg
936 1.1 mrg
937 1.1 mrg static __inline vec_uint4 spu_mhhadd(vec_ushort8 a, vec_ushort8 b, vec_uint4 c)
938 1.1 mrg {
939 1.1 mrg return ((vec_uint4)(si_mpyhhau((qword)(a), (qword)(b), (qword)(c))));
940 1.1 mrg }
941 1.1 mrg
942 1.1 mrg
943 1.1 mrg /* spu_msub
944 1.1 mrg * ========
945 1.1 mrg */
946 1.1 mrg static __inline vec_float4 spu_msub(vec_float4 a, vec_float4 b, vec_float4 c)
947 1.1 mrg {
948 1.1 mrg return ((vec_float4)(si_fms((qword)(a), (qword)(b), (qword)(c))));
949 1.1 mrg }
950 1.1 mrg
951 1.1 mrg static __inline vec_double2 spu_msub(vec_double2 a, vec_double2 b, vec_double2 c)
952 1.1 mrg {
953 1.1 mrg return ((vec_double2)(si_dfms((qword)(a), (qword)(b), (qword)(c))));
954 1.1 mrg }
955 1.1 mrg
956 1.1 mrg
957 1.1 mrg /* spu_mtfpscr
958 1.1 mrg * ===========
959 1.1 mrg */
960 1.1 mrg #define spu_mtfpscr(_a)
961 1.1 mrg
962 1.1 mrg
963 1.1 mrg /* spu_mtspr
964 1.1 mrg * ========
965 1.1 mrg */
966 1.1 mrg #define spu_mtspr(_reg, _a)
967 1.1 mrg
968 1.1 mrg
969 1.1 mrg /* spu_mul
970 1.1 mrg * ========
971 1.1 mrg */
972 1.1 mrg static __inline vec_float4 spu_mul(vec_float4 a, vec_float4 b)
973 1.1 mrg {
974 1.1 mrg return ((vec_float4)(si_fm((qword)(a), (qword)(b))));
975 1.1 mrg }
976 1.1 mrg
977 1.1 mrg static __inline vec_double2 spu_mul(vec_double2 a, vec_double2 b)
978 1.1 mrg {
979 1.1 mrg return ((vec_double2)(si_dfm((qword)(a), (qword)(b))));
980 1.1 mrg }
981 1.1 mrg
982 1.1 mrg
983 1.1 mrg /* spu_mulh
984 1.1 mrg * ========
985 1.1 mrg */
986 1.1 mrg static __inline vec_int4 spu_mulh(vec_short8 a, vec_short8 b)
987 1.1 mrg {
988 1.1 mrg return ((vec_int4)(si_mpyh((qword)(a), (qword)(b))));
989 1.1 mrg }
990 1.1 mrg
991 1.1 mrg /* spu_mule
992 1.1 mrg * =========
993 1.1 mrg */
994 1.1 mrg #define spu_mule(_a, _b) vec_mule(_a, _b)
995 1.1 mrg
996 1.1 mrg
997 1.1 mrg
998 1.1 mrg /* spu_mulo
999 1.1 mrg * ========
1000 1.1 mrg */
1001 1.1 mrg static __inline vec_int4 spu_mulo(vec_short8 a, vec_short8 b)
1002 1.1 mrg {
1003 1.1 mrg return ((vec_int4)(si_mpy((qword)(a), (qword)(b))));
1004 1.1 mrg }
1005 1.1 mrg
1006 1.1 mrg
1007 1.1 mrg static __inline vec_uint4 spu_mulo(vec_ushort8 a, vec_ushort8 b)
1008 1.1 mrg {
1009 1.1 mrg return ((vec_uint4)(si_mpyu((qword)(a), (qword)(b))));
1010 1.1 mrg }
1011 1.1 mrg
1012 1.1 mrg
1013 1.1 mrg static __inline vec_int4 spu_mulo(vec_short8 a, short b)
1014 1.1 mrg {
1015 1.1 mrg return ((vec_int4)(si_mpyi((qword)(a), b)));
1016 1.1 mrg }
1017 1.1 mrg
1018 1.1 mrg static __inline vec_uint4 spu_mulo(vec_ushort8 a, unsigned short b)
1019 1.1 mrg {
1020 1.1 mrg return ((vec_uint4)(si_mpyui((qword)(a), b)));
1021 1.1 mrg }
1022 1.1 mrg
1023 1.1 mrg
1024 1.1 mrg /* spu_mulsr
1025 1.1 mrg * =========
1026 1.1 mrg */
1027 1.1 mrg static __inline vec_int4 spu_mulsr(vec_short8 a, vec_short8 b)
1028 1.1 mrg {
1029 1.1 mrg return ((vec_int4)(si_mpys((qword)(a), (qword)(b))));
1030 1.1 mrg }
1031 1.1 mrg
1032 1.1 mrg
1033 1.1 mrg /* spu_nand
1034 1.1 mrg * ========
1035 1.1 mrg */
1036 1.1 mrg static __inline vec_uchar16 spu_nand(vec_uchar16 a, vec_uchar16 b)
1037 1.1 mrg {
1038 1.1 mrg return ((vec_uchar16)(si_nand((qword)(a), (qword)(b))));
1039 1.1 mrg }
1040 1.1 mrg
1041 1.1 mrg static __inline vec_char16 spu_nand(vec_char16 a, vec_char16 b)
1042 1.1 mrg {
1043 1.1 mrg return ((vec_char16)(si_nand((qword)(a), (qword)(b))));
1044 1.1 mrg }
1045 1.1 mrg
1046 1.1 mrg static __inline vec_ushort8 spu_nand(vec_ushort8 a, vec_ushort8 b)
1047 1.1 mrg {
1048 1.1 mrg return ((vec_ushort8)(si_nand((qword)(a), (qword)(b))));
1049 1.1 mrg }
1050 1.1 mrg
1051 1.1 mrg static __inline vec_short8 spu_nand(vec_short8 a, vec_short8 b)
1052 1.1 mrg {
1053 1.1 mrg return ((vec_short8)(si_nand((qword)(a), (qword)(b))));
1054 1.1 mrg }
1055 1.1 mrg
1056 1.1 mrg static __inline vec_uint4 spu_nand(vec_uint4 a, vec_uint4 b)
1057 1.1 mrg {
1058 1.1 mrg return ((vec_uint4)(si_nand((qword)(a), (qword)(b))));
1059 1.1 mrg }
1060 1.1 mrg
1061 1.1 mrg static __inline vec_int4 spu_nand(vec_int4 a, vec_int4 b)
1062 1.1 mrg {
1063 1.1 mrg return ((vec_int4)(si_nand((qword)(a), (qword)(b))));
1064 1.1 mrg }
1065 1.1 mrg
1066 1.1 mrg static __inline vec_float4 spu_nand(vec_float4 a, vec_float4 b)
1067 1.1 mrg {
1068 1.1 mrg return ((vec_float4)(si_nand((qword)(a), (qword)(b))));
1069 1.1 mrg }
1070 1.1 mrg
1071 1.1 mrg static __inline vec_ullong2 spu_nand(vec_ullong2 a, vec_ullong2 b)
1072 1.1 mrg {
1073 1.1 mrg return ((vec_ullong2)(si_nand((qword)(a), (qword)(b))));
1074 1.1 mrg }
1075 1.1 mrg
1076 1.1 mrg static __inline vec_llong2 spu_nand(vec_llong2 a, vec_llong2 b)
1077 1.1 mrg {
1078 1.1 mrg return ((vec_llong2)(si_nand((qword)(a), (qword)(b))));
1079 1.1 mrg }
1080 1.1 mrg
1081 1.1 mrg static __inline vec_double2 spu_nand(vec_double2 a, vec_double2 b)
1082 1.1 mrg {
1083 1.1 mrg return ((vec_double2)(si_nand((qword)(a), (qword)(b))));
1084 1.1 mrg }
1085 1.1 mrg
1086 1.1 mrg
1087 1.1 mrg /* spu_nmadd
1088 1.1 mrg * =========
1089 1.1 mrg */
1090 1.1 mrg static __inline vec_double2 spu_nmadd(vec_double2 a, vec_double2 b, vec_double2 c)
1091 1.1 mrg {
1092 1.1 mrg return ((vec_double2)(si_dfnma((qword)(a), (qword)(b), (qword)(c))));
1093 1.1 mrg }
1094 1.1 mrg
1095 1.1 mrg
1096 1.1 mrg /* spu_nmsub
1097 1.1 mrg * =========
1098 1.1 mrg */
1099 1.1 mrg static __inline vec_float4 spu_nmsub(vec_float4 a, vec_float4 b, vec_float4 c)
1100 1.1 mrg {
1101 1.1 mrg return ((vec_float4)(si_fnms((qword)(a), (qword)(b), (qword)(c))));
1102 1.1 mrg }
1103 1.1 mrg
1104 1.1 mrg static __inline vec_double2 spu_nmsub(vec_double2 a, vec_double2 b, vec_double2 c)
1105 1.1 mrg {
1106 1.1 mrg return ((vec_double2)(si_dfnms((qword)(a), (qword)(b), (qword)(c))));
1107 1.1 mrg }
1108 1.1 mrg
1109 1.1 mrg
1110 1.1 mrg /* spu_nor
1111 1.1 mrg * =======
1112 1.1 mrg */
1113 1.1 mrg #define spu_nor(_a, _b) vec_nor(_a, _b)
1114 1.1 mrg
1115 1.1 mrg
1116 1.1 mrg /* spu_or
1117 1.1 mrg * ======
1118 1.1 mrg */
1119 1.1 mrg static __inline vec_uchar16 spu_or(vec_uchar16 a, vec_uchar16 b)
1120 1.1 mrg {
1121 1.1 mrg return ((vec_uchar16)(si_or((qword)(a), (qword)(b))));
1122 1.1 mrg }
1123 1.1 mrg
1124 1.1 mrg static __inline vec_char16 spu_or(vec_char16 a, vec_char16 b)
1125 1.1 mrg {
1126 1.1 mrg return ((vec_char16)(si_or((qword)(a), (qword)(b))));
1127 1.1 mrg }
1128 1.1 mrg
1129 1.1 mrg static __inline vec_ushort8 spu_or(vec_ushort8 a, vec_ushort8 b)
1130 1.1 mrg {
1131 1.1 mrg return ((vec_ushort8)(si_or((qword)(a), (qword)(b))));
1132 1.1 mrg }
1133 1.1 mrg
1134 1.1 mrg static __inline vec_short8 spu_or(vec_short8 a, vec_short8 b)
1135 1.1 mrg {
1136 1.1 mrg return ((vec_short8)(si_or((qword)(a), (qword)(b))));
1137 1.1 mrg }
1138 1.1 mrg
1139 1.1 mrg static __inline vec_uint4 spu_or(vec_uint4 a, vec_uint4 b)
1140 1.1 mrg {
1141 1.1 mrg return ((vec_uint4)(si_or((qword)(a), (qword)(b))));
1142 1.1 mrg }
1143 1.1 mrg
1144 1.1 mrg static __inline vec_int4 spu_or(vec_int4 a, vec_int4 b)
1145 1.1 mrg {
1146 1.1 mrg return ((vec_int4)(si_or((qword)(a), (qword)(b))));
1147 1.1 mrg }
1148 1.1 mrg
1149 1.1 mrg static __inline vec_float4 spu_or(vec_float4 a, vec_float4 b)
1150 1.1 mrg {
1151 1.1 mrg return ((vec_float4)(si_or((qword)(a), (qword)(b))));
1152 1.1 mrg }
1153 1.1 mrg
1154 1.1 mrg static __inline vec_ullong2 spu_or(vec_ullong2 a, vec_ullong2 b)
1155 1.1 mrg {
1156 1.1 mrg return ((vec_ullong2)(si_or((qword)(a), (qword)(b))));
1157 1.1 mrg }
1158 1.1 mrg
1159 1.1 mrg static __inline vec_llong2 spu_or(vec_llong2 a, vec_llong2 b)
1160 1.1 mrg {
1161 1.1 mrg return ((vec_llong2)(si_or((qword)(a), (qword)(b))));
1162 1.1 mrg }
1163 1.1 mrg
1164 1.1 mrg static __inline vec_double2 spu_or(vec_double2 a, vec_double2 b)
1165 1.1 mrg {
1166 1.1 mrg return ((vec_double2)(si_or((qword)(a), (qword)(b))));
1167 1.1 mrg }
1168 1.1 mrg
1169 1.1 mrg
1170 1.1 mrg static __inline vec_uchar16 spu_or(vec_uchar16 a, unsigned char b)
1171 1.1 mrg {
1172 1.1 mrg return ((vec_uchar16)(si_orbi((qword)(a), b)));
1173 1.1 mrg }
1174 1.1 mrg
1175 1.1 mrg static __inline vec_char16 spu_or(vec_char16 a, signed char b)
1176 1.1 mrg {
1177 1.1 mrg return ((vec_char16)(si_orbi((qword)(a), (unsigned char)(b))));
1178 1.1 mrg }
1179 1.1 mrg
1180 1.1 mrg static __inline vec_ushort8 spu_or(vec_ushort8 a, unsigned short b)
1181 1.1 mrg {
1182 1.1 mrg return ((vec_ushort8)(si_orhi((qword)(a), b)));
1183 1.1 mrg }
1184 1.1 mrg
1185 1.1 mrg static __inline vec_short8 spu_or(vec_short8 a, signed short b)
1186 1.1 mrg {
1187 1.1 mrg return ((vec_short8)(si_orhi((qword)(a), (unsigned short)(b))));
1188 1.1 mrg }
1189 1.1 mrg
1190 1.1 mrg static __inline vec_uint4 spu_or(vec_uint4 a, unsigned int b)
1191 1.1 mrg {
1192 1.1 mrg return ((vec_uint4)(si_ori((qword)(a), b)));
1193 1.1 mrg }
1194 1.1 mrg
1195 1.1 mrg static __inline vec_int4 spu_or(vec_int4 a, signed int b)
1196 1.1 mrg {
1197 1.1 mrg return ((vec_int4)(si_ori((qword)(a), (unsigned int)(b))));
1198 1.1 mrg }
1199 1.1 mrg
1200 1.1 mrg
1201 1.1 mrg /* spu_orc
1202 1.1 mrg * =======
1203 1.1 mrg */
1204 1.1 mrg #define spu_orc(_a, _b) vec_or(_a, vec_nor(_b, _b))
1205 1.1 mrg
1206 1.1 mrg
1207 1.1 mrg /* spu_orx
1208 1.1 mrg * =======
1209 1.1 mrg */
1210 1.1 mrg static __inline vec_uint4 spu_orx(vec_uint4 a)
1211 1.1 mrg {
1212 1.1 mrg return ((vec_uint4)(si_orx((qword)(a))));
1213 1.1 mrg }
1214 1.1 mrg
1215 1.1 mrg static __inline vec_int4 spu_orx(vec_int4 a)
1216 1.1 mrg {
1217 1.1 mrg return ((vec_int4)(si_orx((qword)(a))));
1218 1.1 mrg }
1219 1.1 mrg
1220 1.1 mrg
1221 1.1 mrg /* spu_promote
1222 1.1 mrg * ===========
1223 1.1 mrg */
1224 1.1 mrg static __inline vec_uchar16 spu_promote(unsigned char a, int element)
1225 1.1 mrg {
1226 1.1 mrg union {
1227 1.1 mrg vec_uchar16 v;
1228 1.1 mrg unsigned char c[16];
1229 1.1 mrg } in;
1230 1.1 mrg
1231 1.1 mrg in.c[element & 15] = a;
1232 1.1 mrg return (in.v);
1233 1.1 mrg }
1234 1.1 mrg
1235 1.1 mrg static __inline vec_char16 spu_promote(signed char a, int element)
1236 1.1 mrg {
1237 1.1 mrg union {
1238 1.1 mrg vec_char16 v;
1239 1.1 mrg signed char c[16];
1240 1.1 mrg } in;
1241 1.1 mrg
1242 1.1 mrg in.c[element & 15] = a;
1243 1.1 mrg return (in.v);
1244 1.1 mrg }
1245 1.1 mrg
1246 1.1 mrg static __inline vec_ushort8 spu_promote(unsigned short a, int element)
1247 1.1 mrg {
1248 1.1 mrg union {
1249 1.1 mrg vec_ushort8 v;
1250 1.1 mrg unsigned short s[8];
1251 1.1 mrg } in;
1252 1.1 mrg
1253 1.1 mrg in.s[element & 7] = a;
1254 1.1 mrg return (in.v);
1255 1.1 mrg }
1256 1.1 mrg
1257 1.1 mrg static __inline vec_short8 spu_promote(signed short a, int element)
1258 1.1 mrg {
1259 1.1 mrg union {
1260 1.1 mrg vec_short8 v;
1261 1.1 mrg signed short s[8];
1262 1.1 mrg } in;
1263 1.1 mrg
1264 1.1 mrg in.s[element & 7] = a;
1265 1.1 mrg return (in.v);
1266 1.1 mrg }
1267 1.1 mrg
1268 1.1 mrg static __inline vec_uint4 spu_promote(unsigned int a, int element)
1269 1.1 mrg {
1270 1.1 mrg union {
1271 1.1 mrg vec_uint4 v;
1272 1.1 mrg unsigned int i[4];
1273 1.1 mrg } in;
1274 1.1 mrg
1275 1.1 mrg in.i[element & 3] = a;
1276 1.1 mrg return (in.v);
1277 1.1 mrg }
1278 1.1 mrg
1279 1.1 mrg static __inline vec_int4 spu_promote(signed int a, int element)
1280 1.1 mrg {
1281 1.1 mrg union {
1282 1.1 mrg vec_int4 v;
1283 1.1 mrg signed int i[4];
1284 1.1 mrg } in;
1285 1.1 mrg
1286 1.1 mrg in.i[element & 3] = a;
1287 1.1 mrg return (in.v);
1288 1.1 mrg }
1289 1.1 mrg
1290 1.1 mrg static __inline vec_float4 spu_promote(float a, int element)
1291 1.1 mrg {
1292 1.1 mrg union {
1293 1.1 mrg vec_float4 v;
1294 1.1 mrg float f[4];
1295 1.1 mrg } in;
1296 1.1 mrg
1297 1.1 mrg in.f[element & 3] = a;
1298 1.1 mrg return (in.v);
1299 1.1 mrg }
1300 1.1 mrg
1301 1.1 mrg static __inline vec_ullong2 spu_promote(unsigned long long a, int element)
1302 1.1 mrg {
1303 1.1 mrg union {
1304 1.1 mrg vec_ullong2 v;
1305 1.1 mrg unsigned long long l[2];
1306 1.1 mrg } in;
1307 1.1 mrg
1308 1.1 mrg in.l[element & 1] = a;
1309 1.1 mrg return (in.v);
1310 1.1 mrg }
1311 1.1 mrg
1312 1.1 mrg static __inline vec_llong2 spu_promote(signed long long a, int element)
1313 1.1 mrg {
1314 1.1 mrg union {
1315 1.1 mrg vec_llong2 v;
1316 1.1 mrg signed long long l[2];
1317 1.1 mrg } in;
1318 1.1 mrg
1319 1.1 mrg in.l[element & 1] = a;
1320 1.1 mrg return (in.v);
1321 1.1 mrg }
1322 1.1 mrg
1323 1.1 mrg static __inline vec_double2 spu_promote(double a, int element)
1324 1.1 mrg {
1325 1.1 mrg union {
1326 1.1 mrg vec_double2 v;
1327 1.1 mrg double d[2];
1328 1.1 mrg } in;
1329 1.1 mrg
1330 1.1 mrg in.d[element & 1] = a;
1331 1.1 mrg return (in.v);
1332 1.1 mrg }
1333 1.1 mrg
1334 1.1 mrg /* spu_re
1335 1.1 mrg * ======
1336 1.1 mrg */
1337 1.1 mrg #define spu_re(_a) vec_re(_a)
1338 1.1 mrg
1339 1.1 mrg
1340 1.1 mrg /* spu_readch
1341 1.1 mrg * ==========
1342 1.1 mrg */
1343 1.1 mrg #define spu_readch(_channel) 0 /* not mappable */
1344 1.1 mrg
1345 1.1 mrg
1346 1.1 mrg /* spu_readchcnt
1347 1.1 mrg * =============
1348 1.1 mrg */
1349 1.1 mrg #define spu_readchcnt(_channel) 0 /* not mappable */
1350 1.1 mrg
1351 1.1 mrg
1352 1.1 mrg /* spu_readchqw
1353 1.1 mrg * ============
1354 1.1 mrg */
1355 1.1 mrg #define spu_readchqw(_channel) __extension__ ({ vec_uint4 result = { 0, 0, 0, 0 }; result; })
1356 1.1 mrg
1357 1.1 mrg /* spu_rl
1358 1.1 mrg * ======
1359 1.1 mrg */
1360 1.1 mrg static __inline vec_ushort8 spu_rl(vec_ushort8 a, vec_short8 b)
1361 1.1 mrg {
1362 1.1 mrg return ((vec_ushort8)(si_roth((qword)(a), (qword)(b))));
1363 1.1 mrg }
1364 1.1 mrg
1365 1.1 mrg static __inline vec_short8 spu_rl(vec_short8 a, vec_short8 b)
1366 1.1 mrg {
1367 1.1 mrg return ((vec_short8)(si_roth((qword)(a), (qword)(b))));
1368 1.1 mrg }
1369 1.1 mrg
1370 1.1 mrg static __inline vec_uint4 spu_rl(vec_uint4 a, vec_int4 b)
1371 1.1 mrg {
1372 1.1 mrg return ((vec_uint4)(si_rot((qword)(a), (qword)(b))));
1373 1.1 mrg }
1374 1.1 mrg
1375 1.1 mrg static __inline vec_int4 spu_rl(vec_int4 a, vec_int4 b)
1376 1.1 mrg {
1377 1.1 mrg return ((vec_int4)(si_rot((qword)(a), (qword)(b))));
1378 1.1 mrg }
1379 1.1 mrg
1380 1.1 mrg static __inline vec_ushort8 spu_rl(vec_ushort8 a, int b)
1381 1.1 mrg {
1382 1.1 mrg return ((vec_ushort8)(si_rothi((qword)(a), b)));
1383 1.1 mrg }
1384 1.1 mrg
1385 1.1 mrg static __inline vec_short8 spu_rl(vec_short8 a, int b)
1386 1.1 mrg {
1387 1.1 mrg return ((vec_short8)(si_rothi((qword)(a), b)));
1388 1.1 mrg }
1389 1.1 mrg
1390 1.1 mrg static __inline vec_uint4 spu_rl(vec_uint4 a, int b)
1391 1.1 mrg {
1392 1.1 mrg return ((vec_uint4)(si_roti((qword)(a), b)));
1393 1.1 mrg }
1394 1.1 mrg
1395 1.1 mrg static __inline vec_int4 spu_rl(vec_int4 a, int b)
1396 1.1 mrg {
1397 1.1 mrg return ((vec_int4)(si_roti((qword)(a), b)));
1398 1.1 mrg }
1399 1.1 mrg
1400 1.1 mrg
1401 1.1 mrg /* spu_rlmask
1402 1.1 mrg * ==========
1403 1.1 mrg */
1404 1.1 mrg static __inline vec_ushort8 spu_rlmask(vec_ushort8 a, vec_short8 b)
1405 1.1 mrg {
1406 1.1 mrg return ((vec_ushort8)(si_rothm((qword)(a), (qword)(b))));
1407 1.1 mrg }
1408 1.1 mrg
1409 1.1 mrg static __inline vec_short8 spu_rlmask(vec_short8 a, vec_short8 b)
1410 1.1 mrg {
1411 1.1 mrg return ((vec_short8)(si_rothm((qword)(a), (qword)(b))));
1412 1.1 mrg }
1413 1.1 mrg
1414 1.1 mrg static __inline vec_uint4 spu_rlmask(vec_uint4 a, vec_int4 b)
1415 1.1 mrg {
1416 1.1 mrg return ((vec_uint4)(si_rotm((qword)(a), (qword)(b))));
1417 1.1 mrg }
1418 1.1 mrg
1419 1.1 mrg static __inline vec_int4 spu_rlmask(vec_int4 a, vec_int4 b)
1420 1.1 mrg {
1421 1.1 mrg return ((vec_int4)(si_rotm((qword)(a), (qword)(b))));
1422 1.1 mrg }
1423 1.1 mrg
1424 1.1 mrg static __inline vec_ushort8 spu_rlmask(vec_ushort8 a, int b)
1425 1.1 mrg {
1426 1.1 mrg return ((vec_ushort8)(si_rothmi((qword)(a), b)));
1427 1.1 mrg }
1428 1.1 mrg
1429 1.1 mrg static __inline vec_short8 spu_rlmask(vec_short8 a, int b)
1430 1.1 mrg {
1431 1.1 mrg return ((vec_short8)(si_rothmi((qword)(a), b)));
1432 1.1 mrg }
1433 1.1 mrg
1434 1.1 mrg
1435 1.1 mrg static __inline vec_uint4 spu_rlmask(vec_uint4 a, int b)
1436 1.1 mrg {
1437 1.1 mrg return ((vec_uint4)(si_rotmi((qword)(a), b)));
1438 1.1 mrg }
1439 1.1 mrg
1440 1.1 mrg static __inline vec_int4 spu_rlmask(vec_int4 a, int b)
1441 1.1 mrg {
1442 1.1 mrg return ((vec_int4)(si_rotmi((qword)(a), b)));
1443 1.1 mrg }
1444 1.1 mrg
1445 1.1 mrg /* spu_rlmaska
1446 1.1 mrg * ===========
1447 1.1 mrg */
1448 1.1 mrg static __inline vec_short8 spu_rlmaska(vec_short8 a, vec_short8 b)
1449 1.1 mrg {
1450 1.1 mrg return ((vec_short8)(si_rotmah((qword)(a), (qword)(b))));
1451 1.1 mrg }
1452 1.1 mrg
1453 1.1 mrg static __inline vec_ushort8 spu_rlmaska(vec_ushort8 a, vec_short8 b)
1454 1.1 mrg {
1455 1.1 mrg return ((vec_ushort8)(si_rotmah((qword)(a), (qword)(b))));
1456 1.1 mrg }
1457 1.1 mrg
1458 1.1 mrg
1459 1.1 mrg static __inline vec_int4 spu_rlmaska(vec_int4 a, vec_int4 b)
1460 1.1 mrg {
1461 1.1 mrg return ((vec_int4)(si_rotma((qword)(a), (qword)(b))));
1462 1.1 mrg }
1463 1.1 mrg
1464 1.1 mrg static __inline vec_uint4 spu_rlmaska(vec_uint4 a, vec_int4 b)
1465 1.1 mrg {
1466 1.1 mrg return ((vec_uint4)(si_rotma((qword)(a), (qword)(b))));
1467 1.1 mrg }
1468 1.1 mrg
1469 1.1 mrg static __inline vec_ushort8 spu_rlmaska(vec_ushort8 a, int b)
1470 1.1 mrg {
1471 1.1 mrg return ((vec_ushort8)(si_rotmahi((qword)(a), b)));
1472 1.1 mrg }
1473 1.1 mrg
1474 1.1 mrg static __inline vec_short8 spu_rlmaska(vec_short8 a, int b)
1475 1.1 mrg {
1476 1.1 mrg return ((vec_short8)(si_rotmahi((qword)(a), b)));
1477 1.1 mrg }
1478 1.1 mrg
1479 1.1 mrg static __inline vec_uint4 spu_rlmaska(vec_uint4 a, int b)
1480 1.1 mrg {
1481 1.1 mrg return ((vec_uint4)(si_rotmai((qword)(a), b)));
1482 1.1 mrg }
1483 1.1 mrg
1484 1.1 mrg static __inline vec_int4 spu_rlmaska(vec_int4 a, int b)
1485 1.1 mrg {
1486 1.1 mrg return ((vec_int4)(si_rotmai((qword)(a), b)));
1487 1.1 mrg }
1488 1.1 mrg
1489 1.1 mrg
1490 1.1 mrg /* spu_rlmaskqw
1491 1.1 mrg * ============
1492 1.1 mrg */
1493 1.1 mrg static __inline vec_uchar16 spu_rlmaskqw(vec_uchar16 a, int count)
1494 1.1 mrg {
1495 1.1 mrg return ((vec_uchar16)(si_rotqmbi((qword)(a), si_from_int(count))));
1496 1.1 mrg }
1497 1.1 mrg
1498 1.1 mrg static __inline vec_char16 spu_rlmaskqw(vec_char16 a, int count)
1499 1.1 mrg {
1500 1.1 mrg return ((vec_char16)(si_rotqmbi((qword)(a), si_from_int(count))));
1501 1.1 mrg }
1502 1.1 mrg
1503 1.1 mrg static __inline vec_ushort8 spu_rlmaskqw(vec_ushort8 a, int count)
1504 1.1 mrg {
1505 1.1 mrg return ((vec_ushort8)(si_rotqmbi((qword)(a), si_from_int(count))));
1506 1.1 mrg }
1507 1.1 mrg
1508 1.1 mrg static __inline vec_short8 spu_rlmaskqw(vec_short8 a, int count)
1509 1.1 mrg {
1510 1.1 mrg return ((vec_short8)(si_rotqmbi((qword)(a), si_from_int(count))));
1511 1.1 mrg }
1512 1.1 mrg
1513 1.1 mrg static __inline vec_uint4 spu_rlmaskqw(vec_uint4 a, int count)
1514 1.1 mrg {
1515 1.1 mrg return ((vec_uint4)(si_rotqmbi((qword)(a), si_from_int(count))));
1516 1.1 mrg }
1517 1.1 mrg
1518 1.1 mrg static __inline vec_int4 spu_rlmaskqw(vec_int4 a, int count)
1519 1.1 mrg {
1520 1.1 mrg return ((vec_int4)(si_rotqmbi((qword)(a), si_from_int(count))));
1521 1.1 mrg }
1522 1.1 mrg
1523 1.1 mrg static __inline vec_float4 spu_rlmaskqw(vec_float4 a, int count)
1524 1.1 mrg {
1525 1.1 mrg return ((vec_float4)(si_rotqmbi((qword)(a), si_from_int(count))));
1526 1.1 mrg }
1527 1.1 mrg
1528 1.1 mrg static __inline vec_ullong2 spu_rlmaskqw(vec_ullong2 a, int count)
1529 1.1 mrg {
1530 1.1 mrg return ((vec_ullong2)(si_rotqmbi((qword)(a), si_from_int(count))));
1531 1.1 mrg }
1532 1.1 mrg
1533 1.1 mrg static __inline vec_llong2 spu_rlmaskqw(vec_llong2 a, int count)
1534 1.1 mrg {
1535 1.1 mrg return ((vec_llong2)(si_rotqmbi((qword)(a), si_from_int(count))));
1536 1.1 mrg }
1537 1.1 mrg
1538 1.1 mrg static __inline vec_double2 spu_rlmaskqw(vec_double2 a, int count)
1539 1.1 mrg {
1540 1.1 mrg return ((vec_double2)(si_rotqmbi((qword)(a), si_from_int(count))));
1541 1.1 mrg }
1542 1.1 mrg
1543 1.1 mrg /* spu_rlmaskqwbyte
1544 1.1 mrg * ================
1545 1.1 mrg */
1546 1.1 mrg static __inline vec_uchar16 spu_rlmaskqwbyte(vec_uchar16 a, int count)
1547 1.1 mrg {
1548 1.1 mrg return ((vec_uchar16)(si_rotqmby((qword)(a), si_from_int(count))));
1549 1.1 mrg }
1550 1.1 mrg
1551 1.1 mrg static __inline vec_char16 spu_rlmaskqwbyte(vec_char16 a, int count)
1552 1.1 mrg {
1553 1.1 mrg return ((vec_char16)(si_rotqmby((qword)(a), si_from_int(count))));
1554 1.1 mrg }
1555 1.1 mrg
1556 1.1 mrg static __inline vec_ushort8 spu_rlmaskqwbyte(vec_ushort8 a, int count)
1557 1.1 mrg {
1558 1.1 mrg return ((vec_ushort8)(si_rotqmby((qword)(a), si_from_int(count))));
1559 1.1 mrg }
1560 1.1 mrg
1561 1.1 mrg static __inline vec_short8 spu_rlmaskqwbyte(vec_short8 a, int count)
1562 1.1 mrg {
1563 1.1 mrg return ((vec_short8)(si_rotqmby((qword)(a), si_from_int(count))));
1564 1.1 mrg }
1565 1.1 mrg
1566 1.1 mrg static __inline vec_uint4 spu_rlmaskqwbyte(vec_uint4 a, int count)
1567 1.1 mrg {
1568 1.1 mrg return ((vec_uint4)(si_rotqmby((qword)(a), si_from_int(count))));
1569 1.1 mrg }
1570 1.1 mrg
1571 1.1 mrg static __inline vec_int4 spu_rlmaskqwbyte(vec_int4 a, int count)
1572 1.1 mrg {
1573 1.1 mrg return ((vec_int4)(si_rotqmby((qword)(a), si_from_int(count))));
1574 1.1 mrg }
1575 1.1 mrg
1576 1.1 mrg static __inline vec_float4 spu_rlmaskqwbyte(vec_float4 a, int count)
1577 1.1 mrg {
1578 1.1 mrg return ((vec_float4)(si_rotqmby((qword)(a), si_from_int(count))));
1579 1.1 mrg }
1580 1.1 mrg
1581 1.1 mrg static __inline vec_ullong2 spu_rlmaskqwbyte(vec_ullong2 a, int count)
1582 1.1 mrg {
1583 1.1 mrg return ((vec_ullong2)(si_rotqmby((qword)(a), si_from_int(count))));
1584 1.1 mrg }
1585 1.1 mrg
1586 1.1 mrg static __inline vec_llong2 spu_rlmaskqwbyte(vec_llong2 a, int count)
1587 1.1 mrg {
1588 1.1 mrg return ((vec_llong2)(si_rotqmby((qword)(a), si_from_int(count))));
1589 1.1 mrg }
1590 1.1 mrg
1591 1.1 mrg static __inline vec_double2 spu_rlmaskqwbyte(vec_double2 a, int count)
1592 1.1 mrg {
1593 1.1 mrg return ((vec_double2)(si_rotqmby((qword)(a), si_from_int(count))));
1594 1.1 mrg }
1595 1.1 mrg
1596 1.1 mrg /* spu_rlmaskqwbytebc
1597 1.1 mrg * ==================
1598 1.1 mrg */
1599 1.1 mrg static __inline vec_uchar16 spu_rlmaskqwbytebc(vec_uchar16 a, int count)
1600 1.1 mrg {
1601 1.1 mrg return ((vec_uchar16)(si_rotqmbybi((qword)(a), si_from_int(count))));
1602 1.1 mrg }
1603 1.1 mrg
1604 1.1 mrg static __inline vec_char16 spu_rlmaskqwbytebc(vec_char16 a, int count)
1605 1.1 mrg {
1606 1.1 mrg return ((vec_char16)(si_rotqmbybi((qword)(a), si_from_int(count))));
1607 1.1 mrg }
1608 1.1 mrg
1609 1.1 mrg static __inline vec_ushort8 spu_rlmaskqwbytebc(vec_ushort8 a, int count)
1610 1.1 mrg {
1611 1.1 mrg return ((vec_ushort8)(si_rotqmbybi((qword)(a), si_from_int(count))));
1612 1.1 mrg }
1613 1.1 mrg
1614 1.1 mrg static __inline vec_short8 spu_rlmaskqwbytebc(vec_short8 a, int count)
1615 1.1 mrg {
1616 1.1 mrg return ((vec_short8)(si_rotqmbybi((qword)(a), si_from_int(count))));
1617 1.1 mrg }
1618 1.1 mrg
1619 1.1 mrg static __inline vec_uint4 spu_rlmaskqwbytebc(vec_uint4 a, int count)
1620 1.1 mrg {
1621 1.1 mrg return ((vec_uint4)(si_rotqmbybi((qword)(a), si_from_int(count))));
1622 1.1 mrg }
1623 1.1 mrg
1624 1.1 mrg static __inline vec_int4 spu_rlmaskqwbytebc(vec_int4 a, int count)
1625 1.1 mrg {
1626 1.1 mrg return ((vec_int4)(si_rotqmbybi((qword)(a), si_from_int(count))));
1627 1.1 mrg }
1628 1.1 mrg
1629 1.1 mrg static __inline vec_float4 spu_rlmaskqwbytebc(vec_float4 a, int count)
1630 1.1 mrg {
1631 1.1 mrg return ((vec_float4)(si_rotqmbybi((qword)(a), si_from_int(count))));
1632 1.1 mrg }
1633 1.1 mrg
1634 1.1 mrg static __inline vec_ullong2 spu_rlmaskqwbytebc(vec_ullong2 a, int count)
1635 1.1 mrg {
1636 1.1 mrg return ((vec_ullong2)(si_rotqmbybi((qword)(a), si_from_int(count))));
1637 1.1 mrg }
1638 1.1 mrg
1639 1.1 mrg static __inline vec_llong2 spu_rlmaskqwbytebc(vec_llong2 a, int count)
1640 1.1 mrg {
1641 1.1 mrg return ((vec_llong2)(si_rotqmbybi((qword)(a), si_from_int(count))));
1642 1.1 mrg }
1643 1.1 mrg
1644 1.1 mrg static __inline vec_double2 spu_rlmaskqwbytebc(vec_double2 a, int count)
1645 1.1 mrg {
1646 1.1 mrg return ((vec_double2)(si_rotqmbybi((qword)(a), si_from_int(count))));
1647 1.1 mrg }
1648 1.1 mrg
1649 1.1 mrg
1650 1.1 mrg /* spu_rlqwbyte
1651 1.1 mrg * ============
1652 1.1 mrg */
1653 1.1 mrg static __inline vec_uchar16 spu_rlqwbyte(vec_uchar16 a, int count)
1654 1.1 mrg {
1655 1.1 mrg return ((vec_uchar16)(si_rotqby((qword)(a), si_from_int(count))));
1656 1.1 mrg }
1657 1.1 mrg
1658 1.1 mrg static __inline vec_char16 spu_rlqwbyte(vec_char16 a, int count)
1659 1.1 mrg {
1660 1.1 mrg return ((vec_char16)(si_rotqby((qword)(a), si_from_int(count))));
1661 1.1 mrg }
1662 1.1 mrg
1663 1.1 mrg static __inline vec_ushort8 spu_rlqwbyte(vec_ushort8 a, int count)
1664 1.1 mrg {
1665 1.1 mrg return ((vec_ushort8)(si_rotqby((qword)(a), si_from_int(count))));
1666 1.1 mrg }
1667 1.1 mrg
1668 1.1 mrg static __inline vec_short8 spu_rlqwbyte(vec_short8 a, int count)
1669 1.1 mrg {
1670 1.1 mrg return ((vec_short8)(si_rotqby((qword)(a), si_from_int(count))));
1671 1.1 mrg }
1672 1.1 mrg
1673 1.1 mrg static __inline vec_uint4 spu_rlqwbyte(vec_uint4 a, int count)
1674 1.1 mrg {
1675 1.1 mrg return ((vec_uint4)(si_rotqby((qword)(a), si_from_int(count))));
1676 1.1 mrg }
1677 1.1 mrg
1678 1.1 mrg static __inline vec_int4 spu_rlqwbyte(vec_int4 a, int count)
1679 1.1 mrg {
1680 1.1 mrg return ((vec_int4)(si_rotqby((qword)(a), si_from_int(count))));
1681 1.1 mrg }
1682 1.1 mrg
1683 1.1 mrg static __inline vec_float4 spu_rlqwbyte(vec_float4 a, int count)
1684 1.1 mrg {
1685 1.1 mrg return ((vec_float4)(si_rotqby((qword)(a), si_from_int(count))));
1686 1.1 mrg }
1687 1.1 mrg
1688 1.1 mrg static __inline vec_ullong2 spu_rlqwbyte(vec_ullong2 a, int count)
1689 1.1 mrg {
1690 1.1 mrg return ((vec_ullong2)(si_rotqby((qword)(a), si_from_int(count))));
1691 1.1 mrg }
1692 1.1 mrg
1693 1.1 mrg static __inline vec_llong2 spu_rlqwbyte(vec_llong2 a, int count)
1694 1.1 mrg {
1695 1.1 mrg return ((vec_llong2)(si_rotqby((qword)(a), si_from_int(count))));
1696 1.1 mrg }
1697 1.1 mrg
1698 1.1 mrg static __inline vec_double2 spu_rlqwbyte(vec_double2 a, int count)
1699 1.1 mrg {
1700 1.1 mrg return ((vec_double2)(si_rotqby((qword)(a), si_from_int(count))));
1701 1.1 mrg }
1702 1.1 mrg
1703 1.1 mrg
1704 1.1 mrg /* spu_rlqwbytebc
1705 1.1 mrg * ==============
1706 1.1 mrg */
1707 1.1 mrg static __inline vec_uchar16 spu_rlqwbytebc(vec_uchar16 a, int count)
1708 1.1 mrg {
1709 1.1 mrg return ((vec_uchar16)(si_rotqbybi((qword)(a), si_from_int(count))));
1710 1.1 mrg }
1711 1.1 mrg
1712 1.1 mrg static __inline vec_char16 spu_rlqwbytebc(vec_char16 a, int count)
1713 1.1 mrg {
1714 1.1 mrg return ((vec_char16)(si_rotqbybi((qword)(a), si_from_int(count))));
1715 1.1 mrg }
1716 1.1 mrg
1717 1.1 mrg static __inline vec_ushort8 spu_rlqwbytebc(vec_ushort8 a, int count)
1718 1.1 mrg {
1719 1.1 mrg return ((vec_ushort8)(si_rotqbybi((qword)(a), si_from_int(count))));
1720 1.1 mrg }
1721 1.1 mrg
1722 1.1 mrg static __inline vec_short8 spu_rlqwbytebc(vec_short8 a, int count)
1723 1.1 mrg {
1724 1.1 mrg return ((vec_short8)(si_rotqbybi((qword)(a), si_from_int(count))));
1725 1.1 mrg }
1726 1.1 mrg
1727 1.1 mrg static __inline vec_uint4 spu_rlqwbytebc(vec_uint4 a, int count)
1728 1.1 mrg {
1729 1.1 mrg return ((vec_uint4)(si_rotqbybi((qword)(a), si_from_int(count))));
1730 1.1 mrg }
1731 1.1 mrg
1732 1.1 mrg static __inline vec_int4 spu_rlqwbytebc(vec_int4 a, int count)
1733 1.1 mrg {
1734 1.1 mrg return ((vec_int4)(si_rotqbybi((qword)(a), si_from_int(count))));
1735 1.1 mrg }
1736 1.1 mrg
1737 1.1 mrg static __inline vec_float4 spu_rlqwbytebc(vec_float4 a, int count)
1738 1.1 mrg {
1739 1.1 mrg return ((vec_float4)(si_rotqbybi((qword)(a), si_from_int(count))));
1740 1.1 mrg }
1741 1.1 mrg
1742 1.1 mrg static __inline vec_ullong2 spu_rlqwbytebc(vec_ullong2 a, int count)
1743 1.1 mrg {
1744 1.1 mrg return ((vec_ullong2)(si_rotqbybi((qword)(a), si_from_int(count))));
1745 1.1 mrg }
1746 1.1 mrg
1747 1.1 mrg static __inline vec_llong2 spu_rlqwbytebc(vec_llong2 a, int count)
1748 1.1 mrg {
1749 1.1 mrg return ((vec_llong2)(si_rotqbybi((qword)(a), si_from_int(count))));
1750 1.1 mrg }
1751 1.1 mrg
1752 1.1 mrg static __inline vec_double2 spu_rlqwbytebc(vec_double2 a, int count)
1753 1.1 mrg {
1754 1.1 mrg return ((vec_double2)(si_rotqbybi((qword)(a), si_from_int(count))));
1755 1.1 mrg }
1756 1.1 mrg
1757 1.1 mrg /* spu_rlqw
1758 1.1 mrg * ========
1759 1.1 mrg */
1760 1.1 mrg static __inline vec_uchar16 spu_rlqw(vec_uchar16 a, int count)
1761 1.1 mrg {
1762 1.1 mrg return ((vec_uchar16)(si_rotqbi((qword)(a), si_from_int(count))));
1763 1.1 mrg }
1764 1.1 mrg
1765 1.1 mrg static __inline vec_char16 spu_rlqw(vec_char16 a, int count)
1766 1.1 mrg {
1767 1.1 mrg return ((vec_char16)(si_rotqbi((qword)(a), si_from_int(count))));
1768 1.1 mrg }
1769 1.1 mrg
1770 1.1 mrg static __inline vec_ushort8 spu_rlqw(vec_ushort8 a, int count)
1771 1.1 mrg {
1772 1.1 mrg return ((vec_ushort8)(si_rotqbi((qword)(a), si_from_int(count))));
1773 1.1 mrg }
1774 1.1 mrg
1775 1.1 mrg static __inline vec_short8 spu_rlqw(vec_short8 a, int count)
1776 1.1 mrg {
1777 1.1 mrg return ((vec_short8)(si_rotqbi((qword)(a), si_from_int(count))));
1778 1.1 mrg }
1779 1.1 mrg
1780 1.1 mrg static __inline vec_uint4 spu_rlqw(vec_uint4 a, int count)
1781 1.1 mrg {
1782 1.1 mrg return ((vec_uint4)(si_rotqbi((qword)(a), si_from_int(count))));
1783 1.1 mrg }
1784 1.1 mrg
1785 1.1 mrg static __inline vec_int4 spu_rlqw(vec_int4 a, int count)
1786 1.1 mrg {
1787 1.1 mrg return ((vec_int4)(si_rotqbi((qword)(a), si_from_int(count))));
1788 1.1 mrg }
1789 1.1 mrg
1790 1.1 mrg static __inline vec_float4 spu_rlqw(vec_float4 a, int count)
1791 1.1 mrg {
1792 1.1 mrg return ((vec_float4)(si_rotqbi((qword)(a), si_from_int(count))));
1793 1.1 mrg }
1794 1.1 mrg
1795 1.1 mrg static __inline vec_ullong2 spu_rlqw(vec_ullong2 a, int count)
1796 1.1 mrg {
1797 1.1 mrg return ((vec_ullong2)(si_rotqbi((qword)(a), si_from_int(count))));
1798 1.1 mrg }
1799 1.1 mrg
1800 1.1 mrg static __inline vec_llong2 spu_rlqw(vec_llong2 a, int count)
1801 1.1 mrg {
1802 1.1 mrg return ((vec_llong2)(si_rotqbi((qword)(a), si_from_int(count))));
1803 1.1 mrg }
1804 1.1 mrg
1805 1.1 mrg static __inline vec_double2 spu_rlqw(vec_double2 a, int count)
1806 1.1 mrg {
1807 1.1 mrg return ((vec_double2)(si_rotqbi((qword)(a), si_from_int(count))));
1808 1.1 mrg }
1809 1.1 mrg
1810 1.1 mrg /* spu_roundtf
1811 1.1 mrg * ===========
1812 1.1 mrg */
1813 1.1 mrg static __inline vec_float4 spu_roundtf(vec_double2 a)
1814 1.1 mrg {
1815 1.1 mrg return ((vec_float4)(si_frds((qword)(a))));
1816 1.1 mrg }
1817 1.1 mrg
1818 1.1 mrg
1819 1.1 mrg /* spu_rsqrte
1820 1.1 mrg * ==========
1821 1.1 mrg */
1822 1.1 mrg #define spu_rsqrte(_a) vec_rsqrte(_a)
1823 1.1 mrg
1824 1.1 mrg
1825 1.1 mrg /* spu_sel
1826 1.1 mrg * =======
1827 1.1 mrg */
1828 1.1 mrg static __inline vec_uchar16 spu_sel(vec_uchar16 a, vec_uchar16 b, vec_uchar16 pattern)
1829 1.1 mrg {
1830 1.1 mrg return ((vec_uchar16)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1831 1.1 mrg }
1832 1.1 mrg
1833 1.1 mrg static __inline vec_char16 spu_sel(vec_char16 a, vec_char16 b, vec_uchar16 pattern)
1834 1.1 mrg {
1835 1.1 mrg return ((vec_char16)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1836 1.1 mrg }
1837 1.1 mrg
1838 1.1 mrg static __inline vec_ushort8 spu_sel(vec_ushort8 a, vec_ushort8 b, vec_ushort8 pattern)
1839 1.1 mrg {
1840 1.1 mrg return ((vec_ushort8)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1841 1.1 mrg }
1842 1.1 mrg
1843 1.1 mrg static __inline vec_short8 spu_sel(vec_short8 a, vec_short8 b, vec_ushort8 pattern)
1844 1.1 mrg {
1845 1.1 mrg return ((vec_short8)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1846 1.1 mrg }
1847 1.1 mrg
1848 1.1 mrg static __inline vec_uint4 spu_sel(vec_uint4 a, vec_uint4 b, vec_uint4 pattern)
1849 1.1 mrg {
1850 1.1 mrg return ((vec_uint4)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1851 1.1 mrg }
1852 1.1 mrg
1853 1.1 mrg static __inline vec_int4 spu_sel(vec_int4 a, vec_int4 b, vec_uint4 pattern)
1854 1.1 mrg {
1855 1.1 mrg return ((vec_int4)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1856 1.1 mrg }
1857 1.1 mrg
1858 1.1 mrg static __inline vec_float4 spu_sel(vec_float4 a, vec_float4 b, vec_uint4 pattern)
1859 1.1 mrg {
1860 1.1 mrg return ((vec_float4)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1861 1.1 mrg }
1862 1.1 mrg
1863 1.1 mrg static __inline vec_ullong2 spu_sel(vec_ullong2 a, vec_ullong2 b, vec_ullong2 pattern)
1864 1.1 mrg {
1865 1.1 mrg return ((vec_ullong2)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1866 1.1 mrg }
1867 1.1 mrg
1868 1.1 mrg static __inline vec_llong2 spu_sel(vec_llong2 a, vec_llong2 b, vec_ullong2 pattern)
1869 1.1 mrg {
1870 1.1 mrg return ((vec_llong2)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1871 1.1 mrg }
1872 1.1 mrg
1873 1.1 mrg static __inline vec_double2 spu_sel(vec_double2 a, vec_double2 b, vec_ullong2 pattern)
1874 1.1 mrg {
1875 1.1 mrg return ((vec_double2)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1876 1.1 mrg }
1877 1.1 mrg
1878 1.1 mrg
1879 1.1 mrg
1880 1.1 mrg /* spu_shuffle
1881 1.1 mrg * ===========
1882 1.1 mrg */
1883 1.1 mrg static __inline vec_uchar16 spu_shuffle(vec_uchar16 a, vec_uchar16 b, vec_uchar16 pattern)
1884 1.1 mrg {
1885 1.1 mrg return ((vec_uchar16)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1886 1.1 mrg }
1887 1.1 mrg
1888 1.1 mrg static __inline vec_char16 spu_shuffle(vec_char16 a, vec_char16 b, vec_uchar16 pattern)
1889 1.1 mrg {
1890 1.1 mrg return ((vec_char16)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1891 1.1 mrg }
1892 1.1 mrg
1893 1.1 mrg static __inline vec_ushort8 spu_shuffle(vec_ushort8 a, vec_ushort8 b, vec_uchar16 pattern)
1894 1.1 mrg {
1895 1.1 mrg return ((vec_ushort8)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1896 1.1 mrg }
1897 1.1 mrg
1898 1.1 mrg static __inline vec_short8 spu_shuffle(vec_short8 a, vec_short8 b, vec_uchar16 pattern)
1899 1.1 mrg {
1900 1.1 mrg return ((vec_short8)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1901 1.1 mrg }
1902 1.1 mrg
1903 1.1 mrg static __inline vec_uint4 spu_shuffle(vec_uint4 a, vec_uint4 b, vec_uchar16 pattern)
1904 1.1 mrg {
1905 1.1 mrg return ((vec_uint4)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1906 1.1 mrg }
1907 1.1 mrg
1908 1.1 mrg static __inline vec_int4 spu_shuffle(vec_int4 a, vec_int4 b, vec_uchar16 pattern)
1909 1.1 mrg {
1910 1.1 mrg return ((vec_int4)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1911 1.1 mrg }
1912 1.1 mrg
1913 1.1 mrg static __inline vec_float4 spu_shuffle(vec_float4 a, vec_float4 b, vec_uchar16 pattern)
1914 1.1 mrg {
1915 1.1 mrg return ((vec_float4)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1916 1.1 mrg }
1917 1.1 mrg
1918 1.1 mrg static __inline vec_ullong2 spu_shuffle(vec_ullong2 a, vec_ullong2 b, vec_uchar16 pattern)
1919 1.1 mrg {
1920 1.1 mrg return ((vec_ullong2)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1921 1.1 mrg }
1922 1.1 mrg
1923 1.1 mrg static __inline vec_llong2 spu_shuffle(vec_llong2 a, vec_llong2 b, vec_uchar16 pattern)
1924 1.1 mrg {
1925 1.1 mrg return ((vec_llong2)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1926 1.1 mrg }
1927 1.1 mrg
1928 1.1 mrg static __inline vec_double2 spu_shuffle(vec_double2 a, vec_double2 b, vec_uchar16 pattern)
1929 1.1 mrg {
1930 1.1 mrg return ((vec_double2)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1931 1.1 mrg }
1932 1.1 mrg
1933 1.1 mrg
1934 1.1 mrg /* spu_sl
1935 1.1 mrg * ======
1936 1.1 mrg */
1937 1.1 mrg static __inline vec_ushort8 spu_sl(vec_ushort8 a, vec_ushort8 b)
1938 1.1 mrg {
1939 1.1 mrg return ((vec_ushort8)(si_shlh((qword)(a), (qword)(b))));
1940 1.1 mrg }
1941 1.1 mrg
1942 1.1 mrg static __inline vec_short8 spu_sl(vec_short8 a, vec_ushort8 b)
1943 1.1 mrg {
1944 1.1 mrg return ((vec_short8)(si_shlh((qword)(a), (qword)(b))));
1945 1.1 mrg }
1946 1.1 mrg
1947 1.1 mrg static __inline vec_uint4 spu_sl(vec_uint4 a, vec_uint4 b)
1948 1.1 mrg {
1949 1.1 mrg return ((vec_uint4)(si_shl((qword)(a), (qword)(b))));
1950 1.1 mrg }
1951 1.1 mrg
1952 1.1 mrg static __inline vec_int4 spu_sl(vec_int4 a, vec_uint4 b)
1953 1.1 mrg {
1954 1.1 mrg return ((vec_int4)(si_shl((qword)(a), (qword)(b))));
1955 1.1 mrg }
1956 1.1 mrg
1957 1.1 mrg static __inline vec_ushort8 spu_sl(vec_ushort8 a, unsigned int b)
1958 1.1 mrg {
1959 1.1 mrg return ((vec_ushort8)(si_shlhi((qword)(a), b)));
1960 1.1 mrg }
1961 1.1 mrg
1962 1.1 mrg static __inline vec_short8 spu_sl(vec_short8 a, unsigned int b)
1963 1.1 mrg {
1964 1.1 mrg return ((vec_short8)(si_shlhi((qword)(a), b)));
1965 1.1 mrg }
1966 1.1 mrg
1967 1.1 mrg static __inline vec_uint4 spu_sl(vec_uint4 a, unsigned int b)
1968 1.1 mrg {
1969 1.1 mrg return ((vec_uint4)(si_shli((qword)(a), b)));
1970 1.1 mrg }
1971 1.1 mrg
1972 1.1 mrg static __inline vec_int4 spu_sl(vec_int4 a, unsigned int b)
1973 1.1 mrg {
1974 1.1 mrg return ((vec_int4)(si_shli((qword)(a), b)));
1975 1.1 mrg }
1976 1.1 mrg
1977 1.1 mrg
1978 1.1 mrg /* spu_slqw
1979 1.1 mrg * ========
1980 1.1 mrg */
1981 1.1 mrg static __inline vec_uchar16 spu_slqw(vec_uchar16 a, unsigned int count)
1982 1.1 mrg {
1983 1.1 mrg return ((vec_uchar16)(si_shlqbi((qword)(a), si_from_uint(count))));
1984 1.1 mrg }
1985 1.1 mrg
1986 1.1 mrg static __inline vec_char16 spu_slqw(vec_char16 a, unsigned int count)
1987 1.1 mrg {
1988 1.1 mrg return ((vec_char16)(si_shlqbi((qword)(a), si_from_uint(count))));
1989 1.1 mrg }
1990 1.1 mrg
1991 1.1 mrg static __inline vec_ushort8 spu_slqw(vec_ushort8 a, unsigned int count)
1992 1.1 mrg {
1993 1.1 mrg return ((vec_ushort8)(si_shlqbi((qword)(a), si_from_uint(count))));
1994 1.1 mrg }
1995 1.1 mrg
1996 1.1 mrg static __inline vec_short8 spu_slqw(vec_short8 a, unsigned int count)
1997 1.1 mrg {
1998 1.1 mrg return ((vec_short8)(si_shlqbi((qword)(a), si_from_uint(count))));
1999 1.1 mrg }
2000 1.1 mrg
2001 1.1 mrg static __inline vec_uint4 spu_slqw(vec_uint4 a, unsigned int count)
2002 1.1 mrg {
2003 1.1 mrg return ((vec_uint4)(si_shlqbi((qword)(a), si_from_uint(count))));
2004 1.1 mrg }
2005 1.1 mrg
2006 1.1 mrg static __inline vec_int4 spu_slqw(vec_int4 a, unsigned int count)
2007 1.1 mrg {
2008 1.1 mrg return ((vec_int4)(si_shlqbi((qword)(a), si_from_uint(count))));
2009 1.1 mrg }
2010 1.1 mrg
2011 1.1 mrg static __inline vec_float4 spu_slqw(vec_float4 a, unsigned int count)
2012 1.1 mrg {
2013 1.1 mrg return ((vec_float4)(si_shlqbi((qword)(a), si_from_uint(count))));
2014 1.1 mrg }
2015 1.1 mrg
2016 1.1 mrg static __inline vec_ullong2 spu_slqw(vec_ullong2 a, unsigned int count)
2017 1.1 mrg {
2018 1.1 mrg return ((vec_ullong2)(si_shlqbi((qword)(a), si_from_uint(count))));
2019 1.1 mrg }
2020 1.1 mrg
2021 1.1 mrg static __inline vec_llong2 spu_slqw(vec_llong2 a, unsigned int count)
2022 1.1 mrg {
2023 1.1 mrg return ((vec_llong2)(si_shlqbi((qword)(a), si_from_uint(count))));
2024 1.1 mrg }
2025 1.1 mrg
2026 1.1 mrg static __inline vec_double2 spu_slqw(vec_double2 a, unsigned int count)
2027 1.1 mrg {
2028 1.1 mrg return ((vec_double2)(si_shlqbi((qword)(a), si_from_uint(count))));
2029 1.1 mrg }
2030 1.1 mrg
2031 1.1 mrg /* spu_slqwbyte
2032 1.1 mrg * ============
2033 1.1 mrg */
2034 1.1 mrg static __inline vec_uchar16 spu_slqwbyte(vec_uchar16 a, unsigned int count)
2035 1.1 mrg {
2036 1.1 mrg return ((vec_uchar16)(si_shlqby((qword)(a), si_from_uint(count))));
2037 1.1 mrg }
2038 1.1 mrg
2039 1.1 mrg static __inline vec_char16 spu_slqwbyte(vec_char16 a, unsigned int count)
2040 1.1 mrg {
2041 1.1 mrg return ((vec_char16)(si_shlqby((qword)(a), si_from_uint(count))));
2042 1.1 mrg }
2043 1.1 mrg
2044 1.1 mrg static __inline vec_ushort8 spu_slqwbyte(vec_ushort8 a, unsigned int count)
2045 1.1 mrg {
2046 1.1 mrg return ((vec_ushort8)(si_shlqby((qword)(a), si_from_uint(count))));
2047 1.1 mrg }
2048 1.1 mrg
2049 1.1 mrg static __inline vec_short8 spu_slqwbyte(vec_short8 a, unsigned int count)
2050 1.1 mrg {
2051 1.1 mrg return ((vec_short8)(si_shlqby((qword)(a), si_from_uint(count))));
2052 1.1 mrg }
2053 1.1 mrg
2054 1.1 mrg static __inline vec_uint4 spu_slqwbyte(vec_uint4 a, unsigned int count)
2055 1.1 mrg {
2056 1.1 mrg return ((vec_uint4)(si_shlqby((qword)(a), si_from_uint(count))));
2057 1.1 mrg }
2058 1.1 mrg
2059 1.1 mrg static __inline vec_int4 spu_slqwbyte(vec_int4 a, unsigned int count)
2060 1.1 mrg {
2061 1.1 mrg return ((vec_int4)(si_shlqby((qword)(a), si_from_uint(count))));
2062 1.1 mrg }
2063 1.1 mrg
2064 1.1 mrg static __inline vec_float4 spu_slqwbyte(vec_float4 a, unsigned int count)
2065 1.1 mrg {
2066 1.1 mrg return ((vec_float4)(si_shlqby((qword)(a), si_from_uint(count))));
2067 1.1 mrg }
2068 1.1 mrg
2069 1.1 mrg static __inline vec_ullong2 spu_slqwbyte(vec_ullong2 a, unsigned int count)
2070 1.1 mrg {
2071 1.1 mrg return ((vec_ullong2)(si_shlqby((qword)(a), si_from_uint(count))));
2072 1.1 mrg }
2073 1.1 mrg
2074 1.1 mrg static __inline vec_llong2 spu_slqwbyte(vec_llong2 a, unsigned int count)
2075 1.1 mrg {
2076 1.1 mrg return ((vec_llong2)(si_shlqby((qword)(a), si_from_uint(count))));
2077 1.1 mrg }
2078 1.1 mrg
2079 1.1 mrg static __inline vec_double2 spu_slqwbyte(vec_double2 a, unsigned int count)
2080 1.1 mrg {
2081 1.1 mrg return ((vec_double2)(si_shlqby((qword)(a), si_from_uint(count))));
2082 1.1 mrg }
2083 1.1 mrg
2084 1.1 mrg /* spu_slqwbytebc
2085 1.1 mrg * ==============
2086 1.1 mrg */
2087 1.1 mrg static __inline vec_uchar16 spu_slqwbytebc(vec_uchar16 a, unsigned int count)
2088 1.1 mrg {
2089 1.1 mrg return ((vec_uchar16)(si_shlqbybi((qword)(a), si_from_uint(count))));
2090 1.1 mrg }
2091 1.1 mrg
2092 1.1 mrg static __inline vec_char16 spu_slqwbytebc(vec_char16 a, unsigned int count)
2093 1.1 mrg {
2094 1.1 mrg return ((vec_char16)(si_shlqbybi((qword)(a), si_from_uint(count))));
2095 1.1 mrg }
2096 1.1 mrg
2097 1.1 mrg static __inline vec_ushort8 spu_slqwbytebc(vec_ushort8 a, unsigned int count)
2098 1.1 mrg {
2099 1.1 mrg return ((vec_ushort8)(si_shlqbybi((qword)(a), si_from_uint(count))));
2100 1.1 mrg }
2101 1.1 mrg
2102 1.1 mrg static __inline vec_short8 spu_slqwbytebc(vec_short8 a, unsigned int count)
2103 1.1 mrg {
2104 1.1 mrg return ((vec_short8)(si_shlqbybi((qword)(a), si_from_uint(count))));
2105 1.1 mrg }
2106 1.1 mrg
2107 1.1 mrg static __inline vec_uint4 spu_slqwbytebc(vec_uint4 a, unsigned int count)
2108 1.1 mrg {
2109 1.1 mrg return ((vec_uint4)(si_shlqbybi((qword)(a), si_from_uint(count))));
2110 1.1 mrg }
2111 1.1 mrg
2112 1.1 mrg static __inline vec_int4 spu_slqwbytebc(vec_int4 a, unsigned int count)
2113 1.1 mrg {
2114 1.1 mrg return ((vec_int4)(si_shlqbybi((qword)(a), si_from_uint(count))));
2115 1.1 mrg }
2116 1.1 mrg
2117 1.1 mrg static __inline vec_float4 spu_slqwbytebc(vec_float4 a, unsigned int count)
2118 1.1 mrg {
2119 1.1 mrg return ((vec_float4)(si_shlqbybi((qword)(a), si_from_uint(count))));
2120 1.1 mrg }
2121 1.1 mrg
2122 1.1 mrg static __inline vec_ullong2 spu_slqwbytebc(vec_ullong2 a, unsigned int count)
2123 1.1 mrg {
2124 1.1 mrg return ((vec_ullong2)(si_shlqbybi((qword)(a), si_from_uint(count))));
2125 1.1 mrg }
2126 1.1 mrg
2127 1.1 mrg static __inline vec_llong2 spu_slqwbytebc(vec_llong2 a, unsigned int count)
2128 1.1 mrg {
2129 1.1 mrg return ((vec_llong2)(si_shlqbybi((qword)(a), si_from_uint(count))));
2130 1.1 mrg }
2131 1.1 mrg
2132 1.1 mrg static __inline vec_double2 spu_slqwbytebc(vec_double2 a, unsigned int count)
2133 1.1 mrg {
2134 1.1 mrg return ((vec_double2)(si_shlqbybi((qword)(a), si_from_uint(count))));
2135 1.1 mrg }
2136 1.1 mrg
2137 1.1 mrg /* spu_splats
2138 1.1 mrg * ==========
2139 1.1 mrg */
2140 1.1 mrg static __inline vec_uchar16 spu_splats(unsigned char a)
2141 1.1 mrg {
2142 1.1 mrg union {
2143 1.1 mrg vec_uchar16 v;
2144 1.1 mrg unsigned char c[16];
2145 1.1 mrg } in;
2146 1.1 mrg
2147 1.1 mrg in.c[0] = a;
2148 1.1 mrg return (vec_splat(in.v, 0));
2149 1.1 mrg }
2150 1.1 mrg
2151 1.1 mrg static __inline vec_char16 spu_splats(signed char a)
2152 1.1 mrg {
2153 1.1 mrg return ((vec_char16)spu_splats((unsigned char)(a)));
2154 1.1 mrg }
2155 1.1 mrg
2156 1.1 mrg static __inline vec_ushort8 spu_splats(unsigned short a)
2157 1.1 mrg {
2158 1.1 mrg union {
2159 1.1 mrg vec_ushort8 v;
2160 1.1 mrg unsigned short s[8];
2161 1.1 mrg } in;
2162 1.1 mrg
2163 1.1 mrg in.s[0] = a;
2164 1.1 mrg return (vec_splat(in.v, 0));
2165 1.1 mrg }
2166 1.1 mrg
2167 1.1 mrg static __inline vec_short8 spu_splats(signed short a)
2168 1.1 mrg {
2169 1.1 mrg return ((vec_short8)spu_splats((unsigned short)(a)));
2170 1.1 mrg }
2171 1.1 mrg
2172 1.1 mrg static __inline vec_uint4 spu_splats(unsigned int a)
2173 1.1 mrg {
2174 1.1 mrg union {
2175 1.1 mrg vec_uint4 v;
2176 1.1 mrg unsigned int i[4];
2177 1.1 mrg } in;
2178 1.1 mrg
2179 1.1 mrg in.i[0] = a;
2180 1.1 mrg return (vec_splat(in.v, 0));
2181 1.1 mrg }
2182 1.1 mrg
2183 1.1 mrg static __inline vec_int4 spu_splats(signed int a)
2184 1.1 mrg {
2185 1.1 mrg return ((vec_int4)spu_splats((unsigned int)(a)));
2186 1.1 mrg }
2187 1.1 mrg
2188 1.1 mrg static __inline vec_float4 spu_splats(float a)
2189 1.1 mrg {
2190 1.1 mrg union {
2191 1.1 mrg vec_float4 v;
2192 1.1 mrg float f[4];
2193 1.1 mrg } in;
2194 1.1 mrg
2195 1.1 mrg in.f[0] = a;
2196 1.1 mrg return (vec_splat(in.v, 0));
2197 1.1 mrg }
2198 1.1 mrg
2199 1.1 mrg static __inline vec_ullong2 spu_splats(unsigned long long a)
2200 1.1 mrg {
2201 1.1 mrg union {
2202 1.1 mrg vec_ullong2 v;
2203 1.1 mrg unsigned long long l[2];
2204 1.1 mrg } in;
2205 1.1 mrg
2206 1.1 mrg in.l[0] = a;
2207 1.1 mrg in.l[1] = a;
2208 1.1 mrg return (in.v);
2209 1.1 mrg }
2210 1.1 mrg
2211 1.1 mrg static __inline vec_llong2 spu_splats(signed long long a)
2212 1.1 mrg {
2213 1.1 mrg return ((vec_llong2)spu_splats((unsigned long long)(a)));
2214 1.1 mrg }
2215 1.1 mrg
2216 1.1 mrg static __inline vec_double2 spu_splats(double a)
2217 1.1 mrg {
2218 1.1 mrg union {
2219 1.1 mrg vec_double2 v;
2220 1.1 mrg double d[2];
2221 1.1 mrg } in;
2222 1.1 mrg
2223 1.1 mrg in.d[0] = a;
2224 1.1 mrg in.d[1] = a;
2225 1.1 mrg return (in.v);
2226 1.1 mrg }
2227 1.1 mrg
2228 1.1 mrg
2229 1.1 mrg /* spu_stop
2230 1.1 mrg * ========
2231 1.1 mrg */
2232 1.1 mrg #define spu_stop(_type) si_stop(_type)
2233 1.1 mrg
2234 1.1 mrg
2235 1.1 mrg /* spu_sub
2236 1.1 mrg * =======
2237 1.1 mrg */
2238 1.1 mrg static __inline vec_ushort8 spu_sub(vec_ushort8 a, vec_ushort8 b)
2239 1.1 mrg {
2240 1.1 mrg return ((vec_ushort8)(si_sfh((qword)(b), (qword)(a))));
2241 1.1 mrg }
2242 1.1 mrg
2243 1.1 mrg static __inline vec_short8 spu_sub(vec_short8 a, vec_short8 b)
2244 1.1 mrg {
2245 1.1 mrg return ((vec_short8)(si_sfh((qword)(b), (qword)(a))));
2246 1.1 mrg }
2247 1.1 mrg
2248 1.1 mrg static __inline vec_uint4 spu_sub(vec_uint4 a, vec_uint4 b)
2249 1.1 mrg {
2250 1.1 mrg return ((vec_uint4)(si_sf((qword)(b), (qword)(a))));
2251 1.1 mrg }
2252 1.1 mrg
2253 1.1 mrg static __inline vec_int4 spu_sub(vec_int4 a, vec_int4 b)
2254 1.1 mrg {
2255 1.1 mrg return ((vec_int4)(si_sf((qword)(b), (qword)(a))));
2256 1.1 mrg }
2257 1.1 mrg
2258 1.1 mrg static __inline vec_float4 spu_sub(vec_float4 a, vec_float4 b)
2259 1.1 mrg {
2260 1.1 mrg return ((vec_float4)(si_fs((qword)(a), (qword)(b))));
2261 1.1 mrg }
2262 1.1 mrg
2263 1.1 mrg static __inline vec_double2 spu_sub(vec_double2 a, vec_double2 b)
2264 1.1 mrg {
2265 1.1 mrg return ((vec_double2)(si_dfs((qword)(a), (qword)(b))));
2266 1.1 mrg }
2267 1.1 mrg
2268 1.1 mrg static __inline vec_uint4 spu_sub(unsigned int a, vec_uint4 b)
2269 1.1 mrg {
2270 1.1 mrg return ((vec_uint4)(si_sfi((qword)b, (int)a)));
2271 1.1 mrg }
2272 1.1 mrg
2273 1.1 mrg static __inline vec_int4 spu_sub(signed int a, vec_int4 b)
2274 1.1 mrg {
2275 1.1 mrg return ((vec_int4)(si_sfi((qword)b, (int)a)));
2276 1.1 mrg }
2277 1.1 mrg
2278 1.1 mrg static __inline vec_ushort8 spu_sub(unsigned short a, vec_ushort8 b)
2279 1.1 mrg {
2280 1.1 mrg return ((vec_ushort8)(si_sfhi((qword)b, (short)a)));
2281 1.1 mrg }
2282 1.1 mrg
2283 1.1 mrg static __inline vec_short8 spu_sub(signed short a, vec_short8 b)
2284 1.1 mrg {
2285 1.1 mrg return ((vec_short8)(si_sfhi((qword)b, (short)a)));
2286 1.1 mrg }
2287 1.1 mrg
2288 1.1 mrg /* spu_subx
2289 1.1 mrg * ========
2290 1.1 mrg */
2291 1.1 mrg static __inline vec_uint4 spu_subx(vec_uint4 a, vec_uint4 b, vec_uint4 c)
2292 1.1 mrg {
2293 1.1 mrg return ((vec_uint4)(si_sfx((qword)(b), (qword)(a), (qword)(c))));
2294 1.1 mrg }
2295 1.1 mrg
2296 1.1 mrg static __inline vec_int4 spu_subx(vec_int4 a, vec_int4 b, vec_int4 c)
2297 1.1 mrg {
2298 1.1 mrg return ((vec_int4)(si_sfx((qword)(b), (qword)(a), (qword)(c))));
2299 1.1 mrg }
2300 1.1 mrg
2301 1.1 mrg /* spu_sumb
2302 1.1 mrg * ========
2303 1.1 mrg */
2304 1.1 mrg static __inline vec_ushort8 spu_sumb(vec_uchar16 a, vec_uchar16 b)
2305 1.1 mrg {
2306 1.1 mrg return ((vec_ushort8)(si_sumb((qword)(a), (qword)(b))));
2307 1.1 mrg }
2308 1.1 mrg
2309 1.1 mrg
2310 1.1 mrg /* spu_sync
2311 1.1 mrg * spu_sync_c
2312 1.1 mrg * ========
2313 1.1 mrg */
2314 1.1 mrg #define spu_sync() /* do nothing */
2315 1.1 mrg
2316 1.1 mrg #define spu_sync_c() /* do nothing */
2317 1.1 mrg
2318 1.1 mrg
2319 1.1 mrg /* spu_writech
2320 1.1 mrg * ===========
2321 1.1 mrg */
2322 1.1 mrg #define spu_writech(_channel, _a) /* not mappable */
2323 1.1 mrg
2324 1.1 mrg /* spu_writechqw
2325 1.1 mrg * =============
2326 1.1 mrg */
2327 1.1 mrg #define spu_writechqw(_channel, _a) /* not mappable */
2328 1.1 mrg
2329 1.1 mrg
2330 1.1 mrg /* spu_xor
2331 1.1 mrg * =======
2332 1.1 mrg */
2333 1.1 mrg static __inline vec_uchar16 spu_xor(vec_uchar16 a, vec_uchar16 b)
2334 1.1 mrg {
2335 1.1 mrg return ((vec_uchar16)(si_xor((qword)(a), (qword)(b))));
2336 1.1 mrg }
2337 1.1 mrg
2338 1.1 mrg static __inline vec_char16 spu_xor(vec_char16 a, vec_char16 b)
2339 1.1 mrg {
2340 1.1 mrg return ((vec_char16)(si_xor((qword)(a), (qword)(b))));
2341 1.1 mrg }
2342 1.1 mrg
2343 1.1 mrg static __inline vec_ushort8 spu_xor(vec_ushort8 a, vec_ushort8 b)
2344 1.1 mrg {
2345 1.1 mrg return ((vec_ushort8)(si_xor((qword)(a), (qword)(b))));
2346 1.1 mrg }
2347 1.1 mrg
2348 1.1 mrg static __inline vec_short8 spu_xor(vec_short8 a, vec_short8 b)
2349 1.1 mrg {
2350 1.1 mrg return ((vec_short8)(si_xor((qword)(a), (qword)(b))));
2351 1.1 mrg }
2352 1.1 mrg
2353 1.1 mrg static __inline vec_uint4 spu_xor(vec_uint4 a, vec_uint4 b)
2354 1.1 mrg {
2355 1.1 mrg return ((vec_uint4)(si_xor((qword)(a), (qword)(b))));
2356 1.1 mrg }
2357 1.1 mrg
2358 1.1 mrg static __inline vec_int4 spu_xor(vec_int4 a, vec_int4 b)
2359 1.1 mrg {
2360 1.1 mrg return ((vec_int4)(si_xor((qword)(a), (qword)(b))));
2361 1.1 mrg }
2362 1.1 mrg
2363 1.1 mrg static __inline vec_float4 spu_xor(vec_float4 a, vec_float4 b)
2364 1.1 mrg {
2365 1.1 mrg return ((vec_float4)(si_xor((qword)(a), (qword)(b))));
2366 1.1 mrg }
2367 1.1 mrg
2368 1.1 mrg static __inline vec_ullong2 spu_xor(vec_ullong2 a, vec_ullong2 b)
2369 1.1 mrg {
2370 1.1 mrg return ((vec_ullong2)(si_xor((qword)(a), (qword)(b))));
2371 1.1 mrg }
2372 1.1 mrg
2373 1.1 mrg static __inline vec_llong2 spu_xor(vec_llong2 a, vec_llong2 b)
2374 1.1 mrg {
2375 1.1 mrg return ((vec_llong2)(si_xor((qword)(a), (qword)(b))));
2376 1.1 mrg }
2377 1.1 mrg
2378 1.1 mrg static __inline vec_double2 spu_xor(vec_double2 a, vec_double2 b)
2379 1.1 mrg {
2380 1.1 mrg return ((vec_double2)(si_xor((qword)(a), (qword)(b))));
2381 1.1 mrg }
2382 1.1 mrg
2383 1.1 mrg static __inline vec_uchar16 spu_xor(vec_uchar16 a, unsigned char b)
2384 1.1 mrg {
2385 1.1 mrg return ((vec_uchar16)(si_xorbi((qword)(a), b)));
2386 1.1 mrg }
2387 1.1 mrg
2388 1.1 mrg static __inline vec_char16 spu_xor(vec_char16 a, signed char b)
2389 1.1 mrg {
2390 1.1 mrg return ((vec_char16)(si_xorbi((qword)(a), (unsigned char)(b))));
2391 1.1 mrg }
2392 1.1 mrg
2393 1.1 mrg static __inline vec_ushort8 spu_xor(vec_ushort8 a, unsigned short b)
2394 1.1 mrg {
2395 1.1 mrg return ((vec_ushort8)(si_xorhi((qword)(a), b)));
2396 1.1 mrg }
2397 1.1 mrg
2398 1.1 mrg static __inline vec_short8 spu_xor(vec_short8 a, signed short b)
2399 1.1 mrg {
2400 1.1 mrg return ((vec_short8)(si_xorhi((qword)(a), (unsigned short)(b))));
2401 1.1 mrg }
2402 1.1 mrg
2403 1.1 mrg static __inline vec_uint4 spu_xor(vec_uint4 a, unsigned int b)
2404 1.1 mrg {
2405 1.1 mrg return ((vec_uint4)(si_xori((qword)(a), b)));
2406 1.1 mrg }
2407 1.1 mrg
2408 1.1 mrg static __inline vec_int4 spu_xor(vec_int4 a, signed int b)
2409 1.1 mrg {
2410 1.1 mrg return ((vec_int4)(si_xori((qword)(a), (unsigned int)(b))));
2411 1.1 mrg }
2412 1.1 mrg
2413 1.1 mrg #endif /* !__SPU__ */
2414 1.1 mrg #endif /* __cplusplus */
2415 1.1 mrg #endif /* !_SPU2VMX_H_ */
2416