1 /* Copyright (C) 2012-2024 Free Software Foundation, Inc. 2 Contributed by Altera and Mentor Graphics, Inc. 3 4 This file is free software; you can redistribute it and/or modify it 5 under the terms of the GNU General Public License as published by the 6 Free Software Foundation; either version 3, or (at your option) any 7 later version. 8 9 This file is distributed in the hope that it will be useful, but 10 WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 General Public License for more details. 13 14 Under Section 7 of GPL version 3, you are granted additional 15 permissions described in the GCC Runtime Library Exception, version 16 3.1, as published by the Free Software Foundation. 17 18 You should have received a copy of the GNU General Public License and 19 a copy of the GCC Runtime Library Exception along with this program; 20 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 21 <http://www.gnu.org/licenses/>. */ 22 23 #include "lib2-gcn.h" 24 25 /* 8-bit V64QI divide and modulo as used in gcn. 26 This is a simple conversion from lib2-divmod.c. */ 27 28 #define MASKMODE v64qi 29 #include "amdgcn_veclib.h" 30 31 static v64udi 32 __udivmodv64qi4_aux (v64uqi num, v64uqi den, v64qi __mask) 33 { 34 v64uqi bit = VECTOR_INIT ((unsigned char)1U); 35 v64uqi res = VECTOR_INIT ((unsigned char)0U); 36 37 VECTOR_WHILE ((den < num) & (bit != 0) & ((den & (1<<7)) == 0), 38 cond, NO_COND) 39 VECTOR_COND_MOVE (den, den << 1, cond); 40 VECTOR_COND_MOVE (bit, bit << 1, cond); 41 VECTOR_ENDWHILE 42 VECTOR_WHILE (bit != 0, loopcond, NO_COND) 43 VECTOR_IF2 (num >= den, ifcond, loopcond) 44 VECTOR_COND_MOVE (num, num - den, ifcond); 45 VECTOR_COND_MOVE (res, res | bit, ifcond); 46 VECTOR_ENDIF 47 VECTOR_COND_MOVE (bit, bit >> 1, loopcond); 48 VECTOR_COND_MOVE (den, den >> 1, loopcond); 49 VECTOR_ENDWHILE 50 51 return PACK_SI_PAIR (res, num); 52 } 53 54 static v64udi 55 __divmodv64qi4_aux (v64qi a, v64qi b, v64qi __mask) 56 { 57 v64qi nega = VECTOR_INIT ((char)0); 58 v64qi negb = VECTOR_INIT ((char)0); 59 60 VECTOR_IF (a < 0, cond) 61 VECTOR_COND_MOVE (a, -a, cond); 62 nega = cond; 63 VECTOR_ENDIF 64 65 VECTOR_IF (b < 0, cond) 66 VECTOR_COND_MOVE (b, -b, cond); 67 negb = cond; 68 VECTOR_ENDIF 69 70 v64uqi ua = __builtin_convertvector (a, v64uqi); 71 v64uqi ub = __builtin_convertvector (b, v64uqi); 72 v64udi pair = __udivmodv64qi4_aux (ua, ub, __mask); 73 74 v64qi quot = UNPACK_SI_LOW (v64qi, pair); 75 v64qi rem = UNPACK_SI_HIGH (v64qi, pair); 76 VECTOR_COND_MOVE (quot, -quot, nega ^ negb); 77 VECTOR_COND_MOVE (rem, -rem, nega); 78 pair = PACK_SI_PAIR (quot, rem); 79 80 return pair; 81 } 82 83 84 static inline v64qi 85 __divv64qi3_aux (v64qi a, v64qi b, v64qi __mask) 86 { 87 v64udi pair = __divmodv64qi4_aux (a, b, __mask); 88 return UNPACK_SI_LOW (v64qi, pair); 89 } 90 91 static inline v64qi 92 __modv64qi3_aux (v64qi a, v64qi b, v64qi __mask) 93 { 94 v64udi pair = __divmodv64qi4_aux (a, b, __mask); 95 return UNPACK_SI_HIGH (v64qi, pair); 96 } 97 98 99 static inline v64uqi 100 __udivv64qi3_aux (v64uqi a, v64uqi b, v64qi __mask) 101 { 102 v64udi pair = __udivmodv64qi4_aux (a, b, __mask); 103 return UNPACK_SI_LOW (v64uqi, pair); 104 } 105 106 static inline v64uqi 107 __umodv64qi3_aux (v64uqi a, v64uqi b, v64qi __mask) 108 { 109 v64udi pair = __udivmodv64qi4_aux (a, b, __mask); 110 return UNPACK_SI_HIGH (v64uqi, pair); 111 } 112 113 DEF_VARIANTS (__div, qi3, qi) 114 DEF_VARIANTS (__mod, qi3, qi) 115 DEF_VARIANTS_B (__divmod, qi4, udi, qi) 116 DEF_VARIANTS (__udiv, qi3, uqi) 117 DEF_VARIANTS (__umod, qi3, uqi) 118 DEF_VARIANTS_B (__udivmod, qi4, udi, uqi) 119