1 /* Copyright (C) 2012-2024 Free Software Foundation, Inc. 2 Contributed by Altera and Mentor Graphics, Inc. 3 4 This file is free software; you can redistribute it and/or modify it 5 under the terms of the GNU General Public License as published by the 6 Free Software Foundation; either version 3, or (at your option) any 7 later version. 8 9 This file is distributed in the hope that it will be useful, but 10 WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 General Public License for more details. 13 14 Under Section 7 of GPL version 3, you are granted additional 15 permissions described in the GCC Runtime Library Exception, version 16 3.1, as published by the Free Software Foundation. 17 18 You should have received a copy of the GNU General Public License and 19 a copy of the GCC Runtime Library Exception along with this program; 20 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 21 <http://www.gnu.org/licenses/>. */ 22 23 #include "lib2-gcn.h" 24 25 /* 16-bit V64HI divide and modulo as used in gcn. 26 This is a simple conversion from lib2-divmod.c. */ 27 28 #define MASKMODE v64hi 29 #include "amdgcn_veclib.h" 30 31 static v64udi 32 __udivmodv64hi4_aux (v64uhi num, v64uhi den, v64hi __mask) 33 { 34 v64uhi bit = VECTOR_INIT ((unsigned short)1U); 35 v64uhi res = VECTOR_INIT ((unsigned short)0U); 36 37 VECTOR_WHILE ((den < num) & (bit != 0) & ((den & (1L<<15)) == 0), 38 cond, NO_COND) 39 VECTOR_COND_MOVE (den, den << 1, cond); 40 VECTOR_COND_MOVE (bit, bit << 1, cond); 41 VECTOR_ENDWHILE 42 VECTOR_WHILE (bit != 0, loopcond, NO_COND) 43 VECTOR_IF2 (num >= den, ifcond, loopcond) 44 VECTOR_COND_MOVE (num, num - den, ifcond); 45 VECTOR_COND_MOVE (res, res | bit, ifcond); 46 VECTOR_ENDIF 47 VECTOR_COND_MOVE (bit, bit >> 1, loopcond); 48 VECTOR_COND_MOVE (den, den >> 1, loopcond); 49 VECTOR_ENDWHILE 50 51 return PACK_SI_PAIR (res, num); 52 } 53 54 static v64udi 55 __divmodv64hi4_aux (v64hi a, v64hi b, v64hi __mask) 56 { 57 v64hi nega = VECTOR_INIT ((short)0); 58 v64hi negb = VECTOR_INIT ((short)0); 59 60 VECTOR_IF (a < 0, cond) 61 VECTOR_COND_MOVE (a, -a, cond); 62 nega = cond; 63 VECTOR_ENDIF 64 65 VECTOR_IF (b < 0, cond) 66 VECTOR_COND_MOVE (b, -b, cond); 67 negb = cond; 68 VECTOR_ENDIF 69 70 v64uhi ua = __builtin_convertvector (a, v64uhi); 71 v64uhi ub = __builtin_convertvector (b, v64uhi); 72 v64udi pair = __udivmodv64hi4_aux (ua, ub, __mask); 73 74 v64hi quot = UNPACK_SI_LOW (v64hi, pair); 75 v64hi rem = UNPACK_SI_HIGH (v64hi, pair); 76 VECTOR_COND_MOVE (quot, -quot, nega ^ negb); 77 VECTOR_COND_MOVE (rem, -rem, nega); 78 pair = PACK_SI_PAIR (quot, rem); 79 80 return pair; 81 } 82 83 84 static inline v64hi 85 __divv64hi3_aux (v64hi a, v64hi b, v64hi __mask) 86 { 87 v64udi pair = __divmodv64hi4_aux (a, b, __mask); 88 return UNPACK_SI_LOW (v64hi, pair); 89 } 90 91 static inline v64hi 92 __modv64hi3_aux (v64hi a, v64hi b, v64hi __mask) 93 { 94 v64udi pair = __divmodv64hi4_aux (a, b, __mask); 95 return UNPACK_SI_HIGH (v64hi, pair); 96 } 97 98 99 static inline v64uhi 100 __udivv64hi3_aux (v64uhi a, v64uhi b, v64hi __mask) 101 { 102 v64udi pair = __udivmodv64hi4_aux (a, b, __mask); 103 return UNPACK_SI_LOW (v64uhi, pair); 104 } 105 106 static inline v64uhi 107 __umodv64hi3_aux (v64uhi a, v64uhi b, v64hi __mask) 108 { 109 v64udi pair = __udivmodv64hi4_aux (a, b, __mask); 110 return UNPACK_SI_HIGH (v64uhi, pair); 111 } 112 113 DEF_VARIANTS (__div, hi3, hi) 114 DEF_VARIANTS (__mod, hi3, hi) 115 DEF_VARIANTS_B (__divmod, hi4, udi, hi) 116 DEF_VARIANTS (__udiv, hi3, uhi) 117 DEF_VARIANTS (__umod, hi3, uhi) 118 DEF_VARIANTS_B (__udivmod, hi4, udi, uhi) 119