Lines Matching defs:__v

64 #define	__neon_lane_index(__v, __i)	(__arraycount(__v) - 1 - (__i))
65 #define __neon_laneq_index(__v, __i) (__arraycount(__v) - 1 - (__i))
67 #define __neon_lane_index(__v, __i) ((__i) ^ (__arraycount(__v) - 1))
68 #define __neon_laneq_index(__v, __i) ((__i) ^ (__arraycount(__v)/2 - 1))
70 #define __neon_lane_index(__v, __i) (__i)
71 #define __neon_laneq_index(__v, __i) (__i)
96 #define __neon_lane_index(__v, __i) __i
97 #define __neon_laneq_index(__v, __i) __i
99 #define __neon_lane_index(__v, __i) (__arraycount(__v) - 1 - __i)
100 #define __neon_laneq_index(__v, __i) (__arraycount(__v) - 1 - __i)
232 vgetq_lane_u32(uint32x4_t __v, uint8_t __i)
235 return __v[__neon_laneq_index(__v, __i)];
237 return (uint32_t)__builtin_neon_vget_laneuv4si((int32x4_t)__v, __i);
241 #define vgetq_lane_u32(__v, __i) \
242 (uint32_t)__builtin_neon_vgetq_lane_i32((int32x4_t)(__v), \
243 __neon_laneq_index(__v, __i))
262 uint32x4_t __v = (uint32x4_t)__builtin_neon_vld1q_v(__p32, 50);
264 __v = __builtin_shufflevector(__v, __v, 3,2,1,0);
266 return __v;
286 uint8x16_t __v = (uint8x16_t)__builtin_neon_vld1q_v(__p8, 48);
288 __v = __builtin_shufflevector(__v, __v,
291 return __v;
367 vreinterpretq_s32_u8(uint8x16_t __v)
369 return (int32x4_t)__v;
374 vreinterpretq_u16_u32(uint32x4_t __v)
376 return (uint16x8_t)__v;
381 vreinterpretq_u32_u16(uint16x8_t __v)
383 return (uint32x4_t)__v;
388 vreinterpretq_u32_u64(uint64x2_t __v)
390 return (uint32x4_t)__v;
395 vreinterpretq_u32_u8(uint8x16_t __v)
397 return (uint32x4_t)__v;
402 vreinterpretq_u64_u32(uint32x4_t __v)
404 return (uint64x2_t)__v;
409 vreinterpretq_u64_u8(uint8x16_t __v)
411 return (uint64x2_t)__v;
416 vreinterpretq_u8_s32(int32x4_t __v)
418 return (uint8x16_t)__v;
423 vreinterpretq_u8_u32(uint32x4_t __v)
425 return (uint8x16_t)__v;
430 vreinterpretq_u8_u64(uint64x2_t __v)
432 return (uint8x16_t)__v;
437 vrev32q_u16(uint16x8_t __v)
440 return __builtin_shuffle(__v, (uint16x8_t) { 1,0, 3,2, 5,4, 7,6 });
442 return __builtin_shufflevector(__v, __v, 1,0, 3,2, 5,4, 7,6);
448 vrev32q_u8(uint8x16_t __v)
451 return __builtin_shuffle(__v,
454 return __builtin_shufflevector(__v, __v,
462 vsetq_lane_u32(uint32_t __x, uint32x4_t __v, uint8_t __i)
464 __v[__neon_laneq_index(__v, __i)] = __x;
465 return __v;
468 #define vsetq_lane_u32(__x, __v, __i) \
469 (uint32x4_t)__builtin_neon_vsetq_lane_i32((__x), (int32x4_t)(__v), \
470 __neon_laneq_index(__v, __i))
476 vsetq_lane_u64(uint64_t __x, uint64x2_t __v, uint8_t __i)
478 __v[__neon_laneq_index(__v, __i)] = __x;
479 return __v;
482 #define vsetq_lane_u64(__x, __v, __i) \
483 (uint64x2_t)__builtin_neon_vsetq_lane_i64((__x), (int64x2_t)(__v), \
484 __neon_laneq_index(__v, __i));
490 vshlq_n_s32(int32x4_t __v, uint8_t __bits)
493 return (int32x4_t)__builtin_aarch64_ashlv4si(__v, __bits);
495 return (int32x4_t)__builtin_neon_vshl_nv4si(__v, __bits);
499 #define vshlq_n_s32(__v, __bits) \
500 (int32x4_t)__builtin_neon_vshlq_n_v((int32x4_t)(__v), (__bits), 34)
506 vshlq_n_u32(uint32x4_t __v, uint8_t __bits)
509 return (uint32x4_t)__builtin_aarch64_ashlv4si((int32x4_t)__v, __bits);
511 return (uint32x4_t)__builtin_neon_vshl_nv4si((int32x4_t)__v, __bits);
515 #define vshlq_n_u32(__v, __bits) \
516 (uint32x4_t)__builtin_neon_vshlq_n_v((int32x4_t)(__v), (__bits), 50)
522 vshrq_n_u32(uint32x4_t __v, uint8_t __bits)
526 return __builtin_aarch64_lshrv4si_uus(__v, __bits);
528 return (uint32x4_t)__builtin_aarch64_lshrv4si((int32x4_t)__v, __bits);
531 return (uint32x4_t)__builtin_neon_vshru_nv4si((int32x4_t)__v, __bits);
535 #define vshrq_n_u32(__v, __bits) \
536 (uint32x4_t)__builtin_neon_vshrq_n_v((int32x4_t)(__v), (__bits), 50)
542 vshrq_n_u8(uint8x16_t __v, uint8_t __bits)
546 return __builtin_aarch64_lshrv16qi_uus(__v, __bits);
548 return (uint8x16_t)__builtin_aarch64_lshrv16qi((int8x16_t)__v, __bits);
551 return (uint8x16_t)__builtin_neon_vshru_nv16qi((int8x16_t)__v, __bits);
555 #define vshrq_n_u8(__v, __bits) \
556 (uint8x16_t)__builtin_neon_vshrq_n_v((int8x16_t)(__v), (__bits), 48)
628 vst1q_u32(uint32_t *__p32, uint32x4_t __v)
634 __builtin_aarch64_st1v4si(__p, (int32x4_t)__v);
638 __builtin_neon_vst1v4si(__p, (int32x4_t)__v);
642 __v = __builtin_shufflevector(__v, __v, 3,2,1,0);
644 __builtin_neon_vst1q_v(__p32, __v, 50);
650 vst1q_u8(uint8_t *__p8, uint8x16_t __v)
656 __builtin_aarch64_st1v16qi(__p, (int8x16_t)__v);
660 __builtin_neon_vst1v16qi(__p, (int8x16_t)__v);
664 __v = __builtin_shufflevector(__v, __v,
667 __builtin_neon_vst1q_v(__p8, __v, 48);