1b8e80941Smrg/*
2b8e80941Smrg * The implementations contained in this file are heavily based on the
3b8e80941Smrg * implementations found in the Berkeley SoftFloat library. As such, they are
4b8e80941Smrg * licensed under the same 3-clause BSD license:
5b8e80941Smrg *
6b8e80941Smrg * License for Berkeley SoftFloat Release 3e
7b8e80941Smrg *
8b8e80941Smrg * John R. Hauser
9b8e80941Smrg * 2018 January 20
10b8e80941Smrg *
11b8e80941Smrg * The following applies to the whole of SoftFloat Release 3e as well as to
12b8e80941Smrg * each source file individually.
13b8e80941Smrg *
14b8e80941Smrg * Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of the
15b8e80941Smrg * University of California.  All rights reserved.
16b8e80941Smrg *
17b8e80941Smrg * Redistribution and use in source and binary forms, with or without
18b8e80941Smrg * modification, are permitted provided that the following conditions are met:
19b8e80941Smrg *
20b8e80941Smrg *  1. Redistributions of source code must retain the above copyright notice,
21b8e80941Smrg *     this list of conditions, and the following disclaimer.
22b8e80941Smrg *
23b8e80941Smrg *  2. Redistributions in binary form must reproduce the above copyright
24b8e80941Smrg *     notice, this list of conditions, and the following disclaimer in the
25b8e80941Smrg *     documentation and/or other materials provided with the distribution.
26b8e80941Smrg *
27b8e80941Smrg *  3. Neither the name of the University nor the names of its contributors
28b8e80941Smrg *     may be used to endorse or promote products derived from this software
29b8e80941Smrg *     without specific prior written permission.
30b8e80941Smrg *
31b8e80941Smrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
32b8e80941Smrg * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
33b8e80941Smrg * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
34b8e80941Smrg * DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
35b8e80941Smrg * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
36b8e80941Smrg * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37b8e80941Smrg * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
38b8e80941Smrg * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39b8e80941Smrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40b8e80941Smrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41b8e80941Smrg*/
42b8e80941Smrg
43b8e80941Smrg#version 430
44b8e80941Smrg#extension GL_ARB_gpu_shader_int64 : enable
45b8e80941Smrg#extension GL_ARB_shader_bit_encoding : enable
46b8e80941Smrg#extension GL_EXT_shader_integer_mix : enable
47b8e80941Smrg#extension GL_MESA_shader_integer_functions : enable
48b8e80941Smrg
49b8e80941Smrg#pragma warning(off)
50b8e80941Smrg
51b8e80941Smrg/* Software IEEE floating-point rounding mode.
52b8e80941Smrg * GLSL spec section "4.7.1 Range and Precision":
53b8e80941Smrg * The rounding mode cannot be set and is undefined.
54b8e80941Smrg * But here, we are able to define the rounding mode at the compilation time.
55b8e80941Smrg */
56b8e80941Smrg#define FLOAT_ROUND_NEAREST_EVEN    0
57b8e80941Smrg#define FLOAT_ROUND_TO_ZERO         1
58b8e80941Smrg#define FLOAT_ROUND_DOWN            2
59b8e80941Smrg#define FLOAT_ROUND_UP              3
60b8e80941Smrg#define FLOAT_ROUNDING_MODE         FLOAT_ROUND_NEAREST_EVEN
61b8e80941Smrg
62b8e80941Smrg/* Absolute value of a Float64 :
63b8e80941Smrg * Clear the sign bit
64b8e80941Smrg */
65b8e80941Smrguint64_t
66b8e80941Smrg__fabs64(uint64_t __a)
67b8e80941Smrg{
68b8e80941Smrg   uvec2 a = unpackUint2x32(__a);
69b8e80941Smrg   a.y &= 0x7FFFFFFFu;
70b8e80941Smrg   return packUint2x32(a);
71b8e80941Smrg}
72b8e80941Smrg
73b8e80941Smrg/* Returns 1 if the double-precision floating-point value `a' is a NaN;
74b8e80941Smrg * otherwise returns 0.
75b8e80941Smrg */
76b8e80941Smrgbool
77b8e80941Smrg__is_nan(uint64_t __a)
78b8e80941Smrg{
79b8e80941Smrg   uvec2 a = unpackUint2x32(__a);
80b8e80941Smrg   return (0xFFE00000u <= (a.y<<1)) &&
81b8e80941Smrg      ((a.x != 0u) || ((a.y & 0x000FFFFFu) != 0u));
82b8e80941Smrg}
83b8e80941Smrg
84b8e80941Smrg/* Negate value of a Float64 :
85b8e80941Smrg * Toggle the sign bit
86b8e80941Smrg */
87b8e80941Smrguint64_t
88b8e80941Smrg__fneg64(uint64_t __a)
89b8e80941Smrg{
90b8e80941Smrg   uvec2 a = unpackUint2x32(__a);
91b8e80941Smrg   uint t = a.y;
92b8e80941Smrg
93b8e80941Smrg   t ^= (1u << 31);
94b8e80941Smrg   a.y = mix(t, a.y, __is_nan(__a));
95b8e80941Smrg   return packUint2x32(a);
96b8e80941Smrg}
97b8e80941Smrg
98b8e80941Smrguint64_t
99b8e80941Smrg__fsign64(uint64_t __a)
100b8e80941Smrg{
101b8e80941Smrg   uvec2 a = unpackUint2x32(__a);
102b8e80941Smrg   uvec2 retval;
103b8e80941Smrg   retval.x = 0u;
104b8e80941Smrg   retval.y = mix((a.y & 0x80000000u) | 0x3FF00000u, 0u, (a.y << 1 | a.x) == 0u);
105b8e80941Smrg   return packUint2x32(retval);
106b8e80941Smrg}
107b8e80941Smrg
108b8e80941Smrg/* Returns the fraction bits of the double-precision floating-point value `a'.*/
109b8e80941Smrguint
110b8e80941Smrg__extractFloat64FracLo(uint64_t a)
111b8e80941Smrg{
112b8e80941Smrg   return unpackUint2x32(a).x;
113b8e80941Smrg}
114b8e80941Smrg
115b8e80941Smrguint
116b8e80941Smrg__extractFloat64FracHi(uint64_t a)
117b8e80941Smrg{
118b8e80941Smrg   return unpackUint2x32(a).y & 0x000FFFFFu;
119b8e80941Smrg}
120b8e80941Smrg
121b8e80941Smrg/* Returns the exponent bits of the double-precision floating-point value `a'.*/
122b8e80941Smrgint
123b8e80941Smrg__extractFloat64Exp(uint64_t __a)
124b8e80941Smrg{
125b8e80941Smrg   uvec2 a = unpackUint2x32(__a);
126b8e80941Smrg   return int((a.y>>20) & 0x7FFu);
127b8e80941Smrg}
128b8e80941Smrg
129b8e80941Smrgbool
130b8e80941Smrg__feq64_nonnan(uint64_t __a, uint64_t __b)
131b8e80941Smrg{
132b8e80941Smrg   uvec2 a = unpackUint2x32(__a);
133b8e80941Smrg   uvec2 b = unpackUint2x32(__b);
134b8e80941Smrg   return (a.x == b.x) &&
135b8e80941Smrg          ((a.y == b.y) || ((a.x == 0u) && (((a.y | b.y)<<1) == 0u)));
136b8e80941Smrg}
137b8e80941Smrg
138b8e80941Smrg/* Returns true if the double-precision floating-point value `a' is equal to the
139b8e80941Smrg * corresponding value `b', and false otherwise.  The comparison is performed
140b8e80941Smrg * according to the IEEE Standard for Floating-Point Arithmetic.
141b8e80941Smrg */
142b8e80941Smrgbool
143b8e80941Smrg__feq64(uint64_t a, uint64_t b)
144b8e80941Smrg{
145b8e80941Smrg   if (__is_nan(a) || __is_nan(b))
146b8e80941Smrg      return false;
147b8e80941Smrg
148b8e80941Smrg   return __feq64_nonnan(a, b);
149b8e80941Smrg}
150b8e80941Smrg
151b8e80941Smrg/* Returns true if the double-precision floating-point value `a' is not equal
152b8e80941Smrg * to the corresponding value `b', and false otherwise.  The comparison is
153b8e80941Smrg * performed according to the IEEE Standard for Floating-Point Arithmetic.
154b8e80941Smrg */
155b8e80941Smrgbool
156b8e80941Smrg__fne64(uint64_t a, uint64_t b)
157b8e80941Smrg{
158b8e80941Smrg   if (__is_nan(a) || __is_nan(b))
159b8e80941Smrg      return true;
160b8e80941Smrg
161b8e80941Smrg   return !__feq64_nonnan(a, b);
162b8e80941Smrg}
163b8e80941Smrg
164b8e80941Smrg/* Returns the sign bit of the double-precision floating-point value `a'.*/
165b8e80941Smrguint
166b8e80941Smrg__extractFloat64Sign(uint64_t a)
167b8e80941Smrg{
168b8e80941Smrg   return unpackUint2x32(a).y >> 31;
169b8e80941Smrg}
170b8e80941Smrg
171b8e80941Smrg/* Returns true if the 64-bit value formed by concatenating `a0' and `a1' is less
172b8e80941Smrg * than the 64-bit value formed by concatenating `b0' and `b1'.  Otherwise,
173b8e80941Smrg * returns false.
174b8e80941Smrg */
175b8e80941Smrgbool
176b8e80941Smrglt64(uint a0, uint a1, uint b0, uint b1)
177b8e80941Smrg{
178b8e80941Smrg   return (a0 < b0) || ((a0 == b0) && (a1 < b1));
179b8e80941Smrg}
180b8e80941Smrg
181b8e80941Smrgbool
182b8e80941Smrg__flt64_nonnan(uint64_t __a, uint64_t __b)
183b8e80941Smrg{
184b8e80941Smrg   uvec2 a = unpackUint2x32(__a);
185b8e80941Smrg   uvec2 b = unpackUint2x32(__b);
186b8e80941Smrg   uint aSign = __extractFloat64Sign(__a);
187b8e80941Smrg   uint bSign = __extractFloat64Sign(__b);
188b8e80941Smrg   if (aSign != bSign)
189b8e80941Smrg      return (aSign != 0u) && ((((a.y | b.y)<<1) | a.x | b.x) != 0u);
190b8e80941Smrg
191b8e80941Smrg   return mix(lt64(a.y, a.x, b.y, b.x), lt64(b.y, b.x, a.y, a.x), aSign != 0u);
192b8e80941Smrg}
193b8e80941Smrg
194b8e80941Smrg/* Returns true if the double-precision floating-point value `a' is less than
195b8e80941Smrg * the corresponding value `b', and false otherwise.  The comparison is performed
196b8e80941Smrg * according to the IEEE Standard for Floating-Point Arithmetic.
197b8e80941Smrg */
198b8e80941Smrgbool
199b8e80941Smrg__flt64(uint64_t a, uint64_t b)
200b8e80941Smrg{
201b8e80941Smrg   if (__is_nan(a) || __is_nan(b))
202b8e80941Smrg      return false;
203b8e80941Smrg
204b8e80941Smrg   return __flt64_nonnan(a, b);
205b8e80941Smrg}
206b8e80941Smrg
207b8e80941Smrg/* Returns true if the double-precision floating-point value `a' is greater
208b8e80941Smrg * than or equal to * the corresponding value `b', and false otherwise.  The
209b8e80941Smrg * comparison is performed * according to the IEEE Standard for Floating-Point
210b8e80941Smrg * Arithmetic.
211b8e80941Smrg */
212b8e80941Smrgbool
213b8e80941Smrg__fge64(uint64_t a, uint64_t b)
214b8e80941Smrg{
215b8e80941Smrg   if (__is_nan(a) || __is_nan(b))
216b8e80941Smrg      return false;
217b8e80941Smrg
218b8e80941Smrg   return !__flt64_nonnan(a, b);
219b8e80941Smrg}
220b8e80941Smrg
221b8e80941Smrg/* Adds the 64-bit value formed by concatenating `a0' and `a1' to the 64-bit
222b8e80941Smrg * value formed by concatenating `b0' and `b1'.  Addition is modulo 2^64, so
223b8e80941Smrg * any carry out is lost.  The result is broken into two 32-bit pieces which
224b8e80941Smrg * are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
225b8e80941Smrg */
226b8e80941Smrgvoid
227b8e80941Smrg__add64(uint a0, uint a1, uint b0, uint b1,
228b8e80941Smrg        out uint z0Ptr,
229b8e80941Smrg        out uint z1Ptr)
230b8e80941Smrg{
231b8e80941Smrg   uint z1 = a1 + b1;
232b8e80941Smrg   z1Ptr = z1;
233b8e80941Smrg   z0Ptr = a0 + b0 + uint(z1 < a1);
234b8e80941Smrg}
235b8e80941Smrg
236b8e80941Smrg
237b8e80941Smrg/* Subtracts the 64-bit value formed by concatenating `b0' and `b1' from the
238b8e80941Smrg * 64-bit value formed by concatenating `a0' and `a1'.  Subtraction is modulo
239b8e80941Smrg * 2^64, so any borrow out (carry out) is lost.  The result is broken into two
240b8e80941Smrg * 32-bit pieces which are stored at the locations pointed to by `z0Ptr' and
241b8e80941Smrg * `z1Ptr'.
242b8e80941Smrg */
243b8e80941Smrgvoid
244b8e80941Smrg__sub64(uint a0, uint a1, uint b0, uint b1,
245b8e80941Smrg        out uint z0Ptr,
246b8e80941Smrg        out uint z1Ptr)
247b8e80941Smrg{
248b8e80941Smrg   z1Ptr = a1 - b1;
249b8e80941Smrg   z0Ptr = a0 - b0 - uint(a1 < b1);
250b8e80941Smrg}
251b8e80941Smrg
252b8e80941Smrg/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the
253b8e80941Smrg * number of bits given in `count'.  If any nonzero bits are shifted off, they
254b8e80941Smrg * are "jammed" into the least significant bit of the result by setting the
255b8e80941Smrg * least significant bit to 1.  The value of `count' can be arbitrarily large;
256b8e80941Smrg * in particular, if `count' is greater than 64, the result will be either 0
257b8e80941Smrg * or 1, depending on whether the concatenation of `a0' and `a1' is zero or
258b8e80941Smrg * nonzero.  The result is broken into two 32-bit pieces which are stored at
259b8e80941Smrg * the locations pointed to by `z0Ptr' and `z1Ptr'.
260b8e80941Smrg */
261b8e80941Smrgvoid
262b8e80941Smrg__shift64RightJamming(uint a0,
263b8e80941Smrg                      uint a1,
264b8e80941Smrg                      int count,
265b8e80941Smrg                      out uint z0Ptr,
266b8e80941Smrg                      out uint z1Ptr)
267b8e80941Smrg{
268b8e80941Smrg   uint z0;
269b8e80941Smrg   uint z1;
270b8e80941Smrg   int negCount = (-count) & 31;
271b8e80941Smrg
272b8e80941Smrg   z0 = mix(0u, a0, count == 0);
273b8e80941Smrg   z0 = mix(z0, (a0 >> count), count < 32);
274b8e80941Smrg
275b8e80941Smrg   z1 = uint((a0 | a1) != 0u); /* count >= 64 */
276b8e80941Smrg   uint z1_lt64 = (a0>>(count & 31)) | uint(((a0<<negCount) | a1) != 0u);
277b8e80941Smrg   z1 = mix(z1, z1_lt64, count < 64);
278b8e80941Smrg   z1 = mix(z1, (a0 | uint(a1 != 0u)), count == 32);
279b8e80941Smrg   uint z1_lt32 = (a0<<negCount) | (a1>>count) | uint ((a1<<negCount) != 0u);
280b8e80941Smrg   z1 = mix(z1, z1_lt32, count < 32);
281b8e80941Smrg   z1 = mix(z1, a1, count == 0);
282b8e80941Smrg   z1Ptr = z1;
283b8e80941Smrg   z0Ptr = z0;
284b8e80941Smrg}
285b8e80941Smrg
286b8e80941Smrg/* Shifts the 96-bit value formed by concatenating `a0', `a1', and `a2' right
287b8e80941Smrg * by 32 _plus_ the number of bits given in `count'.  The shifted result is
288b8e80941Smrg * at most 64 nonzero bits; these are broken into two 32-bit pieces which are
289b8e80941Smrg * stored at the locations pointed to by `z0Ptr' and `z1Ptr'.  The bits shifted
290b8e80941Smrg * off form a third 32-bit result as follows:  The _last_ bit shifted off is
291b8e80941Smrg * the most-significant bit of the extra result, and the other 31 bits of the
292b8e80941Smrg * extra result are all zero if and only if _all_but_the_last_ bits shifted off
293b8e80941Smrg * were all zero.  This extra result is stored in the location pointed to by
294b8e80941Smrg * `z2Ptr'.  The value of `count' can be arbitrarily large.
295b8e80941Smrg *     (This routine makes more sense if `a0', `a1', and `a2' are considered
296b8e80941Smrg * to form a fixed-point value with binary point between `a1' and `a2'.  This
297b8e80941Smrg * fixed-point value is shifted right by the number of bits given in `count',
298b8e80941Smrg * and the integer part of the result is returned at the locations pointed to
299b8e80941Smrg * by `z0Ptr' and `z1Ptr'.  The fractional part of the result may be slightly
300b8e80941Smrg * corrupted as described above, and is returned at the location pointed to by
301b8e80941Smrg * `z2Ptr'.)
302b8e80941Smrg */
303b8e80941Smrgvoid
304b8e80941Smrg__shift64ExtraRightJamming(uint a0, uint a1, uint a2,
305b8e80941Smrg                           int count,
306b8e80941Smrg                           out uint z0Ptr,
307b8e80941Smrg                           out uint z1Ptr,
308b8e80941Smrg                           out uint z2Ptr)
309b8e80941Smrg{
310b8e80941Smrg   uint z0 = 0u;
311b8e80941Smrg   uint z1;
312b8e80941Smrg   uint z2;
313b8e80941Smrg   int negCount = (-count) & 31;
314b8e80941Smrg
315b8e80941Smrg   z2 = mix(uint(a0 != 0u), a0, count == 64);
316b8e80941Smrg   z2 = mix(z2, a0 << negCount, count < 64);
317b8e80941Smrg   z2 = mix(z2, a1 << negCount, count < 32);
318b8e80941Smrg
319b8e80941Smrg   z1 = mix(0u, (a0 >> (count & 31)), count < 64);
320b8e80941Smrg   z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);
321b8e80941Smrg
322b8e80941Smrg   a2 = mix(a2 | a1, a2, count < 32);
323b8e80941Smrg   z0 = mix(z0, a0 >> count, count < 32);
324b8e80941Smrg   z2 |= uint(a2 != 0u);
325b8e80941Smrg
326b8e80941Smrg   z0 = mix(z0, 0u, (count == 32));
327b8e80941Smrg   z1 = mix(z1, a0, (count == 32));
328b8e80941Smrg   z2 = mix(z2, a1, (count == 32));
329b8e80941Smrg   z0 = mix(z0, a0, (count == 0));
330b8e80941Smrg   z1 = mix(z1, a1, (count == 0));
331b8e80941Smrg   z2 = mix(z2, a2, (count == 0));
332b8e80941Smrg   z2Ptr = z2;
333b8e80941Smrg   z1Ptr = z1;
334b8e80941Smrg   z0Ptr = z0;
335b8e80941Smrg}
336b8e80941Smrg
337b8e80941Smrg/* Shifts the 64-bit value formed by concatenating `a0' and `a1' left by the
338b8e80941Smrg * number of bits given in `count'.  Any bits shifted off are lost.  The value
339b8e80941Smrg * of `count' must be less than 32.  The result is broken into two 32-bit
340b8e80941Smrg * pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
341b8e80941Smrg */
342b8e80941Smrgvoid
343b8e80941Smrg__shortShift64Left(uint a0, uint a1,
344b8e80941Smrg                   int count,
345b8e80941Smrg                   out uint z0Ptr,
346b8e80941Smrg                   out uint z1Ptr)
347b8e80941Smrg{
348b8e80941Smrg   z1Ptr = a1<<count;
349b8e80941Smrg   z0Ptr = mix((a0 << count | (a1 >> ((-count) & 31))), a0, count == 0);
350b8e80941Smrg}
351b8e80941Smrg
352b8e80941Smrg/* Packs the sign `zSign', the exponent `zExp', and the significand formed by
353b8e80941Smrg * the concatenation of `zFrac0' and `zFrac1' into a double-precision floating-
354b8e80941Smrg * point value, returning the result.  After being shifted into the proper
355b8e80941Smrg * positions, the three fields `zSign', `zExp', and `zFrac0' are simply added
356b8e80941Smrg * together to form the most significant 32 bits of the result.  This means
357b8e80941Smrg * that any integer portion of `zFrac0' will be added into the exponent.  Since
358b8e80941Smrg * a properly normalized significand will have an integer portion equal to 1,
359b8e80941Smrg * the `zExp' input should be 1 less than the desired result exponent whenever
360b8e80941Smrg * `zFrac0' and `zFrac1' concatenated form a complete, normalized significand.
361b8e80941Smrg */
362b8e80941Smrguint64_t
363b8e80941Smrg__packFloat64(uint zSign, int zExp, uint zFrac0, uint zFrac1)
364b8e80941Smrg{
365b8e80941Smrg   uvec2 z;
366b8e80941Smrg
367b8e80941Smrg   z.y = (zSign << 31) + (uint(zExp) << 20) + zFrac0;
368b8e80941Smrg   z.x = zFrac1;
369b8e80941Smrg   return packUint2x32(z);
370b8e80941Smrg}
371b8e80941Smrg
372b8e80941Smrg/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
373b8e80941Smrg * and extended significand formed by the concatenation of `zFrac0', `zFrac1',
374b8e80941Smrg * and `zFrac2', and returns the proper double-precision floating-point value
375b8e80941Smrg * corresponding to the abstract input.  Ordinarily, the abstract value is
376b8e80941Smrg * simply rounded and packed into the double-precision format, with the inexact
377b8e80941Smrg * exception raised if the abstract input cannot be represented exactly.
378b8e80941Smrg * However, if the abstract value is too large, the overflow and inexact
379b8e80941Smrg * exceptions are raised and an infinity or maximal finite value is returned.
380b8e80941Smrg * If the abstract value is too small, the input value is rounded to a
381b8e80941Smrg * subnormal number, and the underflow and inexact exceptions are raised if the
382b8e80941Smrg * abstract input cannot be represented exactly as a subnormal double-precision
383b8e80941Smrg * floating-point number.
384b8e80941Smrg *     The input significand must be normalized or smaller.  If the input
385b8e80941Smrg * significand is not normalized, `zExp' must be 0; in that case, the result
386b8e80941Smrg * returned is a subnormal number, and it must not require rounding.  In the
387b8e80941Smrg * usual case that the input significand is normalized, `zExp' must be 1 less
388b8e80941Smrg * than the "true" floating-point exponent.  The handling of underflow and
389b8e80941Smrg * overflow follows the IEEE Standard for Floating-Point Arithmetic.
390b8e80941Smrg */
391b8e80941Smrguint64_t
392b8e80941Smrg__roundAndPackFloat64(uint zSign,
393b8e80941Smrg                      int zExp,
394b8e80941Smrg                      uint zFrac0,
395b8e80941Smrg                      uint zFrac1,
396b8e80941Smrg                      uint zFrac2)
397b8e80941Smrg{
398b8e80941Smrg   bool roundNearestEven;
399b8e80941Smrg   bool increment;
400b8e80941Smrg
401b8e80941Smrg   roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
402b8e80941Smrg   increment = int(zFrac2) < 0;
403b8e80941Smrg   if (!roundNearestEven) {
404b8e80941Smrg      if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {
405b8e80941Smrg         increment = false;
406b8e80941Smrg      } else {
407b8e80941Smrg         if (zSign != 0u) {
408b8e80941Smrg            increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
409b8e80941Smrg               (zFrac2 != 0u);
410b8e80941Smrg         } else {
411b8e80941Smrg            increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
412b8e80941Smrg               (zFrac2 != 0u);
413b8e80941Smrg         }
414b8e80941Smrg      }
415b8e80941Smrg   }
416b8e80941Smrg   if (0x7FD <= zExp) {
417b8e80941Smrg      if ((0x7FD < zExp) ||
418b8e80941Smrg         ((zExp == 0x7FD) &&
419b8e80941Smrg            (0x001FFFFFu == zFrac0 && 0xFFFFFFFFu == zFrac1) &&
420b8e80941Smrg               increment)) {
421b8e80941Smrg         if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) ||
422b8e80941Smrg            ((zSign != 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)) ||
423b8e80941Smrg               ((zSign == 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN))) {
424b8e80941Smrg            return __packFloat64(zSign, 0x7FE, 0x000FFFFFu, 0xFFFFFFFFu);
425b8e80941Smrg         }
426b8e80941Smrg         return __packFloat64(zSign, 0x7FF, 0u, 0u);
427b8e80941Smrg      }
428b8e80941Smrg      if (zExp < 0) {
429b8e80941Smrg         __shift64ExtraRightJamming(
430b8e80941Smrg            zFrac0, zFrac1, zFrac2, -zExp, zFrac0, zFrac1, zFrac2);
431b8e80941Smrg         zExp = 0;
432b8e80941Smrg         if (roundNearestEven) {
433b8e80941Smrg            increment = zFrac2 < 0u;
434b8e80941Smrg         } else {
435b8e80941Smrg            if (zSign != 0u) {
436b8e80941Smrg               increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
437b8e80941Smrg                  (zFrac2 != 0u);
438b8e80941Smrg            } else {
439b8e80941Smrg               increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
440b8e80941Smrg                  (zFrac2 != 0u);
441b8e80941Smrg            }
442b8e80941Smrg         }
443b8e80941Smrg      }
444b8e80941Smrg   }
445b8e80941Smrg   if (increment) {
446b8e80941Smrg      __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
447b8e80941Smrg      zFrac1 &= ~((zFrac2 + uint(zFrac2 == 0u)) & uint(roundNearestEven));
448b8e80941Smrg   } else {
449b8e80941Smrg      zExp = mix(zExp, 0, (zFrac0 | zFrac1) == 0u);
450b8e80941Smrg   }
451b8e80941Smrg   return __packFloat64(zSign, zExp, zFrac0, zFrac1);
452b8e80941Smrg}
453b8e80941Smrg
454b8e80941Smrguint64_t
455b8e80941Smrg__roundAndPackUInt64(uint zSign, uint zFrac0, uint zFrac1, uint zFrac2)
456b8e80941Smrg{
457b8e80941Smrg   bool roundNearestEven;
458b8e80941Smrg   bool increment;
459b8e80941Smrg   uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
460b8e80941Smrg
461b8e80941Smrg   roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
462b8e80941Smrg
463b8e80941Smrg   if (zFrac2 >= 0x80000000u)
464b8e80941Smrg      increment = false;
465b8e80941Smrg
466b8e80941Smrg   if (!roundNearestEven) {
467b8e80941Smrg      if (zSign != 0u) {
468b8e80941Smrg         if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) && (zFrac2 != 0u)) {
469b8e80941Smrg            increment = false;
470b8e80941Smrg         }
471b8e80941Smrg      } else {
472b8e80941Smrg         increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
473b8e80941Smrg            (zFrac2 != 0u);
474b8e80941Smrg      }
475b8e80941Smrg   }
476b8e80941Smrg
477b8e80941Smrg   if (increment) {
478b8e80941Smrg      __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
479b8e80941Smrg      if ((zFrac0 | zFrac1) != 0u)
480b8e80941Smrg         zFrac1 &= ~(1u) + uint(zFrac2 == 0u) & uint(roundNearestEven);
481b8e80941Smrg   }
482b8e80941Smrg   return mix(packUint2x32(uvec2(zFrac1, zFrac0)), default_nan,
483b8e80941Smrg              (zSign !=0u && (zFrac0 | zFrac1) != 0u));
484b8e80941Smrg}
485b8e80941Smrg
486b8e80941Smrgint64_t
487b8e80941Smrg__roundAndPackInt64(uint zSign, uint zFrac0, uint zFrac1, uint zFrac2)
488b8e80941Smrg{
489b8e80941Smrg   bool roundNearestEven;
490b8e80941Smrg   bool increment;
491b8e80941Smrg   int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;
492b8e80941Smrg   int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;
493b8e80941Smrg
494b8e80941Smrg   roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
495b8e80941Smrg
496b8e80941Smrg   if (zFrac2 >= 0x80000000u)
497b8e80941Smrg      increment = false;
498b8e80941Smrg
499b8e80941Smrg   if (!roundNearestEven) {
500b8e80941Smrg      if (zSign != 0u) {
501b8e80941Smrg         increment = ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
502b8e80941Smrg            (zFrac2 != 0u));
503b8e80941Smrg      } else {
504b8e80941Smrg         increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
505b8e80941Smrg            (zFrac2 != 0u);
506b8e80941Smrg      }
507b8e80941Smrg   }
508b8e80941Smrg
509b8e80941Smrg   if (increment) {
510b8e80941Smrg      __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
511b8e80941Smrg      if ((zFrac0 | zFrac1) != 0u)
512b8e80941Smrg         zFrac1 &= ~(1u) + uint(zFrac2 == 0u) & uint(roundNearestEven);
513b8e80941Smrg   }
514b8e80941Smrg
515b8e80941Smrg   int64_t absZ = mix(int64_t(packUint2x32(uvec2(zFrac1, zFrac0))),
516b8e80941Smrg                      -int64_t(packUint2x32(uvec2(zFrac1, zFrac0))),
517b8e80941Smrg                      (zSign != 0u));
518b8e80941Smrg   int64_t nan = mix(default_PosNaN, default_NegNaN, bool(zSign));
519b8e80941Smrg   return mix(absZ, nan, bool(zSign ^ uint(absZ < 0)) && bool(absZ));
520b8e80941Smrg}
521b8e80941Smrg
522b8e80941Smrg/* Returns the number of leading 0 bits before the most-significant 1 bit of
523b8e80941Smrg * `a'.  If `a' is zero, 32 is returned.
524b8e80941Smrg */
525b8e80941Smrgint
526b8e80941Smrg__countLeadingZeros32(uint a)
527b8e80941Smrg{
528b8e80941Smrg   int shiftCount;
529b8e80941Smrg   shiftCount = mix(31 - findMSB(a), 32, a == 0u);
530b8e80941Smrg   return shiftCount;
531b8e80941Smrg}
532b8e80941Smrg
533b8e80941Smrg/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
534b8e80941Smrg * and significand formed by the concatenation of `zSig0' and `zSig1', and
535b8e80941Smrg * returns the proper double-precision floating-point value corresponding
536b8e80941Smrg * to the abstract input.  This routine is just like `__roundAndPackFloat64'
537b8e80941Smrg * except that the input significand has fewer bits and does not have to be
538b8e80941Smrg * normalized.  In all cases, `zExp' must be 1 less than the "true" floating-
539b8e80941Smrg * point exponent.
540b8e80941Smrg */
541b8e80941Smrguint64_t
542b8e80941Smrg__normalizeRoundAndPackFloat64(uint zSign,
543b8e80941Smrg                               int zExp,
544b8e80941Smrg                               uint zFrac0,
545b8e80941Smrg                               uint zFrac1)
546b8e80941Smrg{
547b8e80941Smrg   int shiftCount;
548b8e80941Smrg   uint zFrac2;
549b8e80941Smrg
550b8e80941Smrg   if (zFrac0 == 0u) {
551b8e80941Smrg      zExp -= 32;
552b8e80941Smrg      zFrac0 = zFrac1;
553b8e80941Smrg      zFrac1 = 0u;
554b8e80941Smrg   }
555b8e80941Smrg
556b8e80941Smrg   shiftCount = __countLeadingZeros32(zFrac0) - 11;
557b8e80941Smrg   if (0 <= shiftCount) {
558b8e80941Smrg      zFrac2 = 0u;
559b8e80941Smrg      __shortShift64Left(zFrac0, zFrac1, shiftCount, zFrac0, zFrac1);
560b8e80941Smrg   } else {
561b8e80941Smrg      __shift64ExtraRightJamming(
562b8e80941Smrg         zFrac0, zFrac1, 0u, -shiftCount, zFrac0, zFrac1, zFrac2);
563b8e80941Smrg   }
564b8e80941Smrg   zExp -= shiftCount;
565b8e80941Smrg   return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);
566b8e80941Smrg}
567b8e80941Smrg
568b8e80941Smrg/* Takes two double-precision floating-point values `a' and `b', one of which
569b8e80941Smrg * is a NaN, and returns the appropriate NaN result.
570b8e80941Smrg */
571b8e80941Smrguint64_t
572b8e80941Smrg__propagateFloat64NaN(uint64_t __a, uint64_t __b)
573b8e80941Smrg{
574b8e80941Smrg   bool aIsNaN = __is_nan(__a);
575b8e80941Smrg   bool bIsNaN = __is_nan(__b);
576b8e80941Smrg   uvec2 a = unpackUint2x32(__a);
577b8e80941Smrg   uvec2 b = unpackUint2x32(__b);
578b8e80941Smrg   a.y |= 0x00080000u;
579b8e80941Smrg   b.y |= 0x00080000u;
580b8e80941Smrg
581b8e80941Smrg   return packUint2x32(mix(b, mix(a, b, bvec2(bIsNaN, bIsNaN)), bvec2(aIsNaN, aIsNaN)));
582b8e80941Smrg}
583b8e80941Smrg
584b8e80941Smrg/* Returns the result of adding the double-precision floating-point values
585b8e80941Smrg * `a' and `b'.  The operation is performed according to the IEEE Standard for
586b8e80941Smrg * Floating-Point Arithmetic.
587b8e80941Smrg */
588b8e80941Smrguint64_t
589b8e80941Smrg__fadd64(uint64_t a, uint64_t b)
590b8e80941Smrg{
591b8e80941Smrg   uint aSign = __extractFloat64Sign(a);
592b8e80941Smrg   uint bSign = __extractFloat64Sign(b);
593b8e80941Smrg   uint aFracLo = __extractFloat64FracLo(a);
594b8e80941Smrg   uint aFracHi = __extractFloat64FracHi(a);
595b8e80941Smrg   uint bFracLo = __extractFloat64FracLo(b);
596b8e80941Smrg   uint bFracHi = __extractFloat64FracHi(b);
597b8e80941Smrg   int aExp = __extractFloat64Exp(a);
598b8e80941Smrg   int bExp = __extractFloat64Exp(b);
599b8e80941Smrg   uint zFrac0 = 0u;
600b8e80941Smrg   uint zFrac1 = 0u;
601b8e80941Smrg   int expDiff = aExp - bExp;
602b8e80941Smrg   if (aSign == bSign) {
603b8e80941Smrg      uint zFrac2 = 0u;
604b8e80941Smrg      int zExp;
605b8e80941Smrg      bool orig_exp_diff_is_zero = (expDiff == 0);
606b8e80941Smrg
607b8e80941Smrg      if (orig_exp_diff_is_zero) {
608b8e80941Smrg         if (aExp == 0x7FF) {
609b8e80941Smrg            bool propagate = (aFracHi | aFracLo | bFracHi | bFracLo) != 0u;
610b8e80941Smrg            return mix(a, __propagateFloat64NaN(a, b), propagate);
611b8e80941Smrg         }
612b8e80941Smrg         __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
613b8e80941Smrg         if (aExp == 0)
614b8e80941Smrg            return __packFloat64(aSign, 0, zFrac0, zFrac1);
615b8e80941Smrg         zFrac2 = 0u;
616b8e80941Smrg         zFrac0 |= 0x00200000u;
617b8e80941Smrg         zExp = aExp;
618b8e80941Smrg         __shift64ExtraRightJamming(
619b8e80941Smrg            zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
620b8e80941Smrg      } else if (0 < expDiff) {
621b8e80941Smrg         if (aExp == 0x7FF) {
622b8e80941Smrg            bool propagate = (aFracHi | aFracLo) != 0u;
623b8e80941Smrg            return mix(a, __propagateFloat64NaN(a, b), propagate);
624b8e80941Smrg         }
625b8e80941Smrg
626b8e80941Smrg         expDiff = mix(expDiff, expDiff - 1, bExp == 0);
627b8e80941Smrg         bFracHi = mix(bFracHi | 0x00100000u, bFracHi, bExp == 0);
628b8e80941Smrg         __shift64ExtraRightJamming(
629b8e80941Smrg            bFracHi, bFracLo, 0u, expDiff, bFracHi, bFracLo, zFrac2);
630b8e80941Smrg         zExp = aExp;
631b8e80941Smrg      } else if (expDiff < 0) {
632b8e80941Smrg         if (bExp == 0x7FF) {
633b8e80941Smrg            bool propagate = (bFracHi | bFracLo) != 0u;
634b8e80941Smrg            return mix(__packFloat64(aSign, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);
635b8e80941Smrg         }
636b8e80941Smrg         expDiff = mix(expDiff, expDiff + 1, aExp == 0);
637b8e80941Smrg         aFracHi = mix(aFracHi | 0x00100000u, aFracHi, aExp == 0);
638b8e80941Smrg         __shift64ExtraRightJamming(
639b8e80941Smrg            aFracHi, aFracLo, 0u, - expDiff, aFracHi, aFracLo, zFrac2);
640b8e80941Smrg         zExp = bExp;
641b8e80941Smrg      }
642b8e80941Smrg      if (!orig_exp_diff_is_zero) {
643b8e80941Smrg         aFracHi |= 0x00100000u;
644b8e80941Smrg         __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
645b8e80941Smrg         --zExp;
646b8e80941Smrg         if (!(zFrac0 < 0x00200000u)) {
647b8e80941Smrg            __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
648b8e80941Smrg            ++zExp;
649b8e80941Smrg         }
650b8e80941Smrg      }
651b8e80941Smrg      return __roundAndPackFloat64(aSign, zExp, zFrac0, zFrac1, zFrac2);
652b8e80941Smrg
653b8e80941Smrg   } else {
654b8e80941Smrg      int zExp;
655b8e80941Smrg
656b8e80941Smrg      __shortShift64Left(aFracHi, aFracLo, 10, aFracHi, aFracLo);
657b8e80941Smrg      __shortShift64Left(bFracHi, bFracLo, 10, bFracHi, bFracLo);
658b8e80941Smrg      if (0 < expDiff) {
659b8e80941Smrg         if (aExp == 0x7FF) {
660b8e80941Smrg            bool propagate = (aFracHi | aFracLo) != 0u;
661b8e80941Smrg            return mix(a, __propagateFloat64NaN(a, b), propagate);
662b8e80941Smrg         }
663b8e80941Smrg         expDiff = mix(expDiff, expDiff - 1, bExp == 0);
664b8e80941Smrg         bFracHi = mix(bFracHi | 0x40000000u, bFracHi, bExp == 0);
665b8e80941Smrg         __shift64RightJamming(bFracHi, bFracLo, expDiff, bFracHi, bFracLo);
666b8e80941Smrg         aFracHi |= 0x40000000u;
667b8e80941Smrg         __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
668b8e80941Smrg         zExp = aExp;
669b8e80941Smrg         --zExp;
670b8e80941Smrg         return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);
671b8e80941Smrg      }
672b8e80941Smrg      if (expDiff < 0) {
673b8e80941Smrg         if (bExp == 0x7FF) {
674b8e80941Smrg            bool propagate = (bFracHi | bFracLo) != 0u;
675b8e80941Smrg            return mix(__packFloat64(aSign ^ 1u, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);
676b8e80941Smrg         }
677b8e80941Smrg         expDiff = mix(expDiff, expDiff + 1, aExp == 0);
678b8e80941Smrg         aFracHi = mix(aFracHi | 0x40000000u, aFracHi, aExp == 0);
679b8e80941Smrg         __shift64RightJamming(aFracHi, aFracLo, - expDiff, aFracHi, aFracLo);
680b8e80941Smrg         bFracHi |= 0x40000000u;
681b8e80941Smrg         __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
682b8e80941Smrg         zExp = bExp;
683b8e80941Smrg         aSign ^= 1u;
684b8e80941Smrg         --zExp;
685b8e80941Smrg         return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);
686b8e80941Smrg      }
687b8e80941Smrg      if (aExp == 0x7FF) {
688b8e80941Smrg          bool propagate = (aFracHi | aFracLo | bFracHi | bFracLo) != 0u;
689b8e80941Smrg         return mix(0xFFFFFFFFFFFFFFFFUL, __propagateFloat64NaN(a, b), propagate);
690b8e80941Smrg      }
691b8e80941Smrg      bExp = mix(bExp, 1, aExp == 0);
692b8e80941Smrg      aExp = mix(aExp, 1, aExp == 0);
693b8e80941Smrg      bool zexp_normal = false;
694b8e80941Smrg      bool blta = true;
695b8e80941Smrg      if (bFracHi < aFracHi) {
696b8e80941Smrg         __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
697b8e80941Smrg         zexp_normal = true;
698b8e80941Smrg      }
699b8e80941Smrg      else if (aFracHi < bFracHi) {
700b8e80941Smrg         __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
701b8e80941Smrg         blta = false;
702b8e80941Smrg         zexp_normal = true;
703b8e80941Smrg      }
704b8e80941Smrg      else if (bFracLo < aFracLo) {
705b8e80941Smrg         __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
706b8e80941Smrg         zexp_normal = true;
707b8e80941Smrg      }
708b8e80941Smrg      else if (aFracLo < bFracLo) {
709b8e80941Smrg         __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
710b8e80941Smrg          blta = false;
711b8e80941Smrg          zexp_normal = true;
712b8e80941Smrg      }
713b8e80941Smrg      zExp = mix(bExp, aExp, blta);
714b8e80941Smrg      aSign = mix(aSign ^ 1u, aSign, blta);
715b8e80941Smrg      uint64_t retval_0 = __packFloat64(uint(FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN), 0, 0u, 0u);
716b8e80941Smrg      uint64_t retval_1 = __normalizeRoundAndPackFloat64(aSign, zExp - 11, zFrac0, zFrac1);
717b8e80941Smrg      return mix(retval_0, retval_1, zexp_normal);
718b8e80941Smrg   }
719b8e80941Smrg}
720b8e80941Smrg
721b8e80941Smrg/* Multiplies `a' by `b' to obtain a 64-bit product.  The product is broken
722b8e80941Smrg * into two 32-bit pieces which are stored at the locations pointed to by
723b8e80941Smrg * `z0Ptr' and `z1Ptr'.
724b8e80941Smrg */
725b8e80941Smrgvoid
726b8e80941Smrg__mul32To64(uint a, uint b, out uint z0Ptr, out uint z1Ptr)
727b8e80941Smrg{
728b8e80941Smrg   uint aLow = a & 0x0000FFFFu;
729b8e80941Smrg   uint aHigh = a>>16;
730b8e80941Smrg   uint bLow = b & 0x0000FFFFu;
731b8e80941Smrg   uint bHigh = b>>16;
732b8e80941Smrg   uint z1 = aLow * bLow;
733b8e80941Smrg   uint zMiddleA = aLow * bHigh;
734b8e80941Smrg   uint zMiddleB = aHigh * bLow;
735b8e80941Smrg   uint z0 = aHigh * bHigh;
736b8e80941Smrg   zMiddleA += zMiddleB;
737b8e80941Smrg   z0 += ((uint(zMiddleA < zMiddleB)) << 16) + (zMiddleA >> 16);
738b8e80941Smrg   zMiddleA <<= 16;
739b8e80941Smrg   z1 += zMiddleA;
740b8e80941Smrg   z0 += uint(z1 < zMiddleA);
741b8e80941Smrg   z1Ptr = z1;
742b8e80941Smrg   z0Ptr = z0;
743b8e80941Smrg}
744b8e80941Smrg
745b8e80941Smrg/* Multiplies the 64-bit value formed by concatenating `a0' and `a1' to the
746b8e80941Smrg * 64-bit value formed by concatenating `b0' and `b1' to obtain a 128-bit
747b8e80941Smrg * product.  The product is broken into four 32-bit pieces which are stored at
748b8e80941Smrg * the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.
749b8e80941Smrg */
750b8e80941Smrgvoid
751b8e80941Smrg__mul64To128(uint a0, uint a1, uint b0, uint b1,
752b8e80941Smrg             out uint z0Ptr,
753b8e80941Smrg             out uint z1Ptr,
754b8e80941Smrg             out uint z2Ptr,
755b8e80941Smrg             out uint z3Ptr)
756b8e80941Smrg{
757b8e80941Smrg   uint z0 = 0u;
758b8e80941Smrg   uint z1 = 0u;
759b8e80941Smrg   uint z2 = 0u;
760b8e80941Smrg   uint z3 = 0u;
761b8e80941Smrg   uint more1 = 0u;
762b8e80941Smrg   uint more2 = 0u;
763b8e80941Smrg
764b8e80941Smrg   __mul32To64(a1, b1, z2, z3);
765b8e80941Smrg   __mul32To64(a1, b0, z1, more2);
766b8e80941Smrg   __add64(z1, more2, 0u, z2, z1, z2);
767b8e80941Smrg   __mul32To64(a0, b0, z0, more1);
768b8e80941Smrg   __add64(z0, more1, 0u, z1, z0, z1);
769b8e80941Smrg   __mul32To64(a0, b1, more1, more2);
770b8e80941Smrg   __add64(more1, more2, 0u, z2, more1, z2);
771b8e80941Smrg   __add64(z0, z1, 0u, more1, z0, z1);
772b8e80941Smrg   z3Ptr = z3;
773b8e80941Smrg   z2Ptr = z2;
774b8e80941Smrg   z1Ptr = z1;
775b8e80941Smrg   z0Ptr = z0;
776b8e80941Smrg}
777b8e80941Smrg
778b8e80941Smrg/* Normalizes the subnormal double-precision floating-point value represented
779b8e80941Smrg * by the denormalized significand formed by the concatenation of `aFrac0' and
780b8e80941Smrg * `aFrac1'.  The normalized exponent is stored at the location pointed to by
781b8e80941Smrg * `zExpPtr'.  The most significant 21 bits of the normalized significand are
782b8e80941Smrg * stored at the location pointed to by `zFrac0Ptr', and the least significant
783b8e80941Smrg * 32 bits of the normalized significand are stored at the location pointed to
784b8e80941Smrg * by `zFrac1Ptr'.
785b8e80941Smrg */
786b8e80941Smrgvoid
787b8e80941Smrg__normalizeFloat64Subnormal(uint aFrac0, uint aFrac1,
788b8e80941Smrg                            out int zExpPtr,
789b8e80941Smrg                            out uint zFrac0Ptr,
790b8e80941Smrg                            out uint zFrac1Ptr)
791b8e80941Smrg{
792b8e80941Smrg   int shiftCount;
793b8e80941Smrg   uint temp_zfrac0, temp_zfrac1;
794b8e80941Smrg   shiftCount = __countLeadingZeros32(mix(aFrac0, aFrac1, aFrac0 == 0u)) - 11;
795b8e80941Smrg   zExpPtr = mix(1 - shiftCount, -shiftCount - 31, aFrac0 == 0u);
796b8e80941Smrg
797b8e80941Smrg   temp_zfrac0 = mix(aFrac1<<shiftCount, aFrac1>>(-shiftCount), shiftCount < 0);
798b8e80941Smrg   temp_zfrac1 = mix(0u, aFrac1<<(shiftCount & 31), shiftCount < 0);
799b8e80941Smrg
800b8e80941Smrg   __shortShift64Left(aFrac0, aFrac1, shiftCount, zFrac0Ptr, zFrac1Ptr);
801b8e80941Smrg
802b8e80941Smrg   zFrac0Ptr = mix(zFrac0Ptr, temp_zfrac0, aFrac0 == 0);
803b8e80941Smrg   zFrac1Ptr = mix(zFrac1Ptr, temp_zfrac1, aFrac0 == 0);
804b8e80941Smrg}
805b8e80941Smrg
806b8e80941Smrg/* Returns the result of multiplying the double-precision floating-point values
807b8e80941Smrg * `a' and `b'.  The operation is performed according to the IEEE Standard for
808b8e80941Smrg * Floating-Point Arithmetic.
809b8e80941Smrg */
810b8e80941Smrguint64_t
811b8e80941Smrg__fmul64(uint64_t a, uint64_t b)
812b8e80941Smrg{
813b8e80941Smrg   uint zFrac0 = 0u;
814b8e80941Smrg   uint zFrac1 = 0u;
815b8e80941Smrg   uint zFrac2 = 0u;
816b8e80941Smrg   uint zFrac3 = 0u;
817b8e80941Smrg   int zExp;
818b8e80941Smrg
819b8e80941Smrg   uint aFracLo = __extractFloat64FracLo(a);
820b8e80941Smrg   uint aFracHi = __extractFloat64FracHi(a);
821b8e80941Smrg   uint bFracLo = __extractFloat64FracLo(b);
822b8e80941Smrg   uint bFracHi = __extractFloat64FracHi(b);
823b8e80941Smrg   int aExp = __extractFloat64Exp(a);
824b8e80941Smrg   uint aSign = __extractFloat64Sign(a);
825b8e80941Smrg   int bExp = __extractFloat64Exp(b);
826b8e80941Smrg   uint bSign = __extractFloat64Sign(b);
827b8e80941Smrg   uint zSign = aSign ^ bSign;
828b8e80941Smrg   if (aExp == 0x7FF) {
829b8e80941Smrg      if (((aFracHi | aFracLo) != 0u) ||
830b8e80941Smrg         ((bExp == 0x7FF) && ((bFracHi | bFracLo) != 0u))) {
831b8e80941Smrg         return __propagateFloat64NaN(a, b);
832b8e80941Smrg      }
833b8e80941Smrg      if ((uint(bExp) | bFracHi | bFracLo) == 0u)
834b8e80941Smrg            return 0xFFFFFFFFFFFFFFFFUL;
835b8e80941Smrg      return __packFloat64(zSign, 0x7FF, 0u, 0u);
836b8e80941Smrg   }
837b8e80941Smrg   if (bExp == 0x7FF) {
838b8e80941Smrg      if ((bFracHi | bFracLo) != 0u)
839b8e80941Smrg         return __propagateFloat64NaN(a, b);
840b8e80941Smrg      if ((uint(aExp) | aFracHi | aFracLo) == 0u)
841b8e80941Smrg         return 0xFFFFFFFFFFFFFFFFUL;
842b8e80941Smrg      return __packFloat64(zSign, 0x7FF, 0u, 0u);
843b8e80941Smrg   }
844b8e80941Smrg   if (aExp == 0) {
845b8e80941Smrg      if ((aFracHi | aFracLo) == 0u)
846b8e80941Smrg         return __packFloat64(zSign, 0, 0u, 0u);
847b8e80941Smrg      __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);
848b8e80941Smrg   }
849b8e80941Smrg   if (bExp == 0) {
850b8e80941Smrg      if ((bFracHi | bFracLo) == 0u)
851b8e80941Smrg         return __packFloat64(zSign, 0, 0u, 0u);
852b8e80941Smrg      __normalizeFloat64Subnormal(bFracHi, bFracLo, bExp, bFracHi, bFracLo);
853b8e80941Smrg   }
854b8e80941Smrg   zExp = aExp + bExp - 0x400;
855b8e80941Smrg   aFracHi |= 0x00100000u;
856b8e80941Smrg   __shortShift64Left(bFracHi, bFracLo, 12, bFracHi, bFracLo);
857b8e80941Smrg   __mul64To128(
858b8e80941Smrg      aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1, zFrac2, zFrac3);
859b8e80941Smrg   __add64(zFrac0, zFrac1, aFracHi, aFracLo, zFrac0, zFrac1);
860b8e80941Smrg   zFrac2 |= uint(zFrac3 != 0u);
861b8e80941Smrg   if (0x00200000u <= zFrac0) {
862b8e80941Smrg      __shift64ExtraRightJamming(
863b8e80941Smrg         zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
864b8e80941Smrg      ++zExp;
865b8e80941Smrg   }
866b8e80941Smrg   return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);
867b8e80941Smrg}
868b8e80941Smrg
869b8e80941Smrguint64_t
870b8e80941Smrg__ffma64(uint64_t a, uint64_t b, uint64_t c)
871b8e80941Smrg{
872b8e80941Smrg   return __fadd64(__fmul64(a, b), c);
873b8e80941Smrg}
874b8e80941Smrg
875b8e80941Smrg/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the
876b8e80941Smrg * number of bits given in `count'.  Any bits shifted off are lost.  The value
877b8e80941Smrg * of `count' can be arbitrarily large; in particular, if `count' is greater
878b8e80941Smrg * than 64, the result will be 0.  The result is broken into two 32-bit pieces
879b8e80941Smrg * which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
880b8e80941Smrg */
881b8e80941Smrgvoid
882b8e80941Smrg__shift64Right(uint a0, uint a1,
883b8e80941Smrg               int count,
884b8e80941Smrg               out uint z0Ptr,
885b8e80941Smrg               out uint z1Ptr)
886b8e80941Smrg{
887b8e80941Smrg   uint z0;
888b8e80941Smrg   uint z1;
889b8e80941Smrg   int negCount = (-count) & 31;
890b8e80941Smrg
891b8e80941Smrg   z0 = 0u;
892b8e80941Smrg   z0 = mix(z0, (a0 >> count), count < 32);
893b8e80941Smrg   z0 = mix(z0, a0, count == 0);
894b8e80941Smrg
895b8e80941Smrg   z1 = mix(0u, (a0 >> (count & 31)), count < 64);
896b8e80941Smrg   z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);
897b8e80941Smrg   z1 = mix(z1, a0, count == 0);
898b8e80941Smrg
899b8e80941Smrg   z1Ptr = z1;
900b8e80941Smrg   z0Ptr = z0;
901b8e80941Smrg}
902b8e80941Smrg
903b8e80941Smrg/* Returns the result of converting the double-precision floating-point value
904b8e80941Smrg * `a' to the unsigned integer format.  The conversion is performed according
905b8e80941Smrg * to the IEEE Standard for Floating-Point Arithmetic.
906b8e80941Smrg */
907b8e80941Smrguint
908b8e80941Smrg__fp64_to_uint(uint64_t a)
909b8e80941Smrg{
910b8e80941Smrg   uint aFracLo = __extractFloat64FracLo(a);
911b8e80941Smrg   uint aFracHi = __extractFloat64FracHi(a);
912b8e80941Smrg   int aExp = __extractFloat64Exp(a);
913b8e80941Smrg   uint aSign = __extractFloat64Sign(a);
914b8e80941Smrg
915b8e80941Smrg   if ((aExp == 0x7FF) && ((aFracHi | aFracLo) != 0u))
916b8e80941Smrg      return 0xFFFFFFFFu;
917b8e80941Smrg
918b8e80941Smrg   aFracHi |= mix(0u, 0x00100000u, aExp != 0);
919b8e80941Smrg
920b8e80941Smrg   int shiftDist = 0x427 - aExp;
921b8e80941Smrg   if (0 < shiftDist)
922b8e80941Smrg      __shift64RightJamming(aFracHi, aFracLo, shiftDist, aFracHi, aFracLo);
923b8e80941Smrg
924b8e80941Smrg   if ((aFracHi & 0xFFFFF000u) != 0u)
925b8e80941Smrg      return mix(~0u, 0u, (aSign != 0u));
926b8e80941Smrg
927b8e80941Smrg   uint z = 0u;
928b8e80941Smrg   uint zero = 0u;
929b8e80941Smrg   __shift64Right(aFracHi, aFracLo, 12, zero, z);
930b8e80941Smrg
931b8e80941Smrg   uint expt = mix(~0u, 0u, (aSign != 0u));
932b8e80941Smrg
933b8e80941Smrg   return mix(z, expt, (aSign != 0u) && (z != 0u));
934b8e80941Smrg}
935b8e80941Smrg
936b8e80941Smrguint64_t
937b8e80941Smrg__uint_to_fp64(uint a)
938b8e80941Smrg{
939b8e80941Smrg   if (a == 0u)
940b8e80941Smrg      return 0ul;
941b8e80941Smrg
942b8e80941Smrg   int shiftDist = __countLeadingZeros32(a) + 21;
943b8e80941Smrg
944b8e80941Smrg   uint aHigh = 0u;
945b8e80941Smrg   uint aLow = 0u;
946b8e80941Smrg   int negCount = (- shiftDist) & 31;
947b8e80941Smrg
948b8e80941Smrg   aHigh = mix(0u, a<< shiftDist - 32, shiftDist < 64);
949b8e80941Smrg   aLow = 0u;
950b8e80941Smrg   aHigh = mix(aHigh, 0u, shiftDist == 0);
951b8e80941Smrg   aLow = mix(aLow, a, shiftDist ==0);
952b8e80941Smrg   aHigh = mix(aHigh, a >> negCount, shiftDist < 32);
953b8e80941Smrg   aLow = mix(aLow, a << shiftDist, shiftDist < 32);
954b8e80941Smrg
955b8e80941Smrg   return __packFloat64(0u, 0x432 - shiftDist, aHigh, aLow);
956b8e80941Smrg}
957b8e80941Smrg
958b8e80941Smrguint64_t
959b8e80941Smrg__uint64_to_fp64(uint64_t a)
960b8e80941Smrg{
961b8e80941Smrg   if (a == 0u)
962b8e80941Smrg      return 0ul;
963b8e80941Smrg
964b8e80941Smrg   uvec2 aFrac = unpackUint2x32(a);
965b8e80941Smrg   uint aFracLo = __extractFloat64FracLo(a);
966b8e80941Smrg   uint aFracHi = __extractFloat64FracHi(a);
967b8e80941Smrg
968b8e80941Smrg   if ((aFracHi & 0x80000000u) != 0u) {
969b8e80941Smrg      __shift64RightJamming(aFracHi, aFracLo, 1, aFracHi, aFracLo);
970b8e80941Smrg      return __roundAndPackFloat64(0, 0x433, aFracHi, aFracLo, 0u);
971b8e80941Smrg   } else {
972b8e80941Smrg      return __normalizeRoundAndPackFloat64(0, 0x432, aFrac.y, aFrac.x);
973b8e80941Smrg   }
974b8e80941Smrg}
975b8e80941Smrg
976b8e80941Smrguint64_t
977b8e80941Smrg__fp64_to_uint64(uint64_t a)
978b8e80941Smrg{
979b8e80941Smrg   uint aFracLo = __extractFloat64FracLo(a);
980b8e80941Smrg   uint aFracHi = __extractFloat64FracHi(a);
981b8e80941Smrg   int aExp = __extractFloat64Exp(a);
982b8e80941Smrg   uint aSign = __extractFloat64Sign(a);
983b8e80941Smrg   uint zFrac2 = 0u;
984b8e80941Smrg   uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
985b8e80941Smrg
986b8e80941Smrg   aFracHi = mix(aFracHi, aFracHi | 0x00100000u, aExp != 0);
987b8e80941Smrg   int shiftCount = 0x433 - aExp;
988b8e80941Smrg
989b8e80941Smrg   if ( shiftCount <= 0 ) {
990b8e80941Smrg      if (shiftCount < -11 && aExp == 0x7FF) {
991b8e80941Smrg         if ((aFracHi | aFracLo) != 0u)
992b8e80941Smrg            return __propagateFloat64NaN(a, a);
993b8e80941Smrg         return mix(default_nan, a, aSign == 0u);
994b8e80941Smrg      }
995b8e80941Smrg      __shortShift64Left(aFracHi, aFracLo, -shiftCount, aFracHi, aFracLo);
996b8e80941Smrg   } else {
997b8e80941Smrg      __shift64ExtraRightJamming(aFracHi, aFracLo, zFrac2, shiftCount,
998b8e80941Smrg                                 aFracHi, aFracLo, zFrac2);
999b8e80941Smrg   }
1000b8e80941Smrg   return __roundAndPackUInt64(aSign, aFracHi, aFracLo, zFrac2);
1001b8e80941Smrg}
1002b8e80941Smrg
1003b8e80941Smrgint64_t
1004b8e80941Smrg__fp64_to_int64(uint64_t a)
1005b8e80941Smrg{
1006b8e80941Smrg   uint zFrac2 = 0u;
1007b8e80941Smrg   uint aFracLo = __extractFloat64FracLo(a);
1008b8e80941Smrg   uint aFracHi = __extractFloat64FracHi(a);
1009b8e80941Smrg   int aExp = __extractFloat64Exp(a);
1010b8e80941Smrg   uint aSign = __extractFloat64Sign(a);
1011b8e80941Smrg   int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;
1012b8e80941Smrg   int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;
1013b8e80941Smrg
1014b8e80941Smrg   aFracHi = mix(aFracHi, aFracHi | 0x00100000u, aExp != 0);
1015b8e80941Smrg   int shiftCount = 0x433 - aExp;
1016b8e80941Smrg
1017b8e80941Smrg   if (shiftCount <= 0) {
1018b8e80941Smrg      if (shiftCount < -11 && aExp == 0x7FF) {
1019b8e80941Smrg         if ((aFracHi | aFracLo) != 0u)
1020b8e80941Smrg            return default_NegNaN;
1021b8e80941Smrg         return mix(default_NegNaN, default_PosNaN, aSign == 0u);
1022b8e80941Smrg      }
1023b8e80941Smrg      __shortShift64Left(aFracHi, aFracLo, -shiftCount, aFracHi, aFracLo);
1024b8e80941Smrg   } else {
1025b8e80941Smrg      __shift64ExtraRightJamming(aFracHi, aFracLo, zFrac2, shiftCount,
1026b8e80941Smrg                                 aFracHi, aFracLo, zFrac2);
1027b8e80941Smrg   }
1028b8e80941Smrg
1029b8e80941Smrg   return __roundAndPackInt64(aSign, aFracHi, aFracLo, zFrac2);
1030b8e80941Smrg}
1031b8e80941Smrg
1032b8e80941Smrguint64_t
1033b8e80941Smrg__fp32_to_uint64(float f)
1034b8e80941Smrg{
1035b8e80941Smrg   uint a = floatBitsToUint(f);
1036b8e80941Smrg   uint aFrac = a & 0x007FFFFFu;
1037b8e80941Smrg   int aExp = int((a>>23) & 0xFFu);
1038b8e80941Smrg   uint aSign = a>>31;
1039b8e80941Smrg   uint zFrac0 = 0u;
1040b8e80941Smrg   uint zFrac1 = 0u;
1041b8e80941Smrg   uint zFrac2 = 0u;
1042b8e80941Smrg   uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
1043b8e80941Smrg   int shiftCount = 0xBE - aExp;
1044b8e80941Smrg
1045b8e80941Smrg   if (shiftCount <0) {
1046b8e80941Smrg      if (aExp == 0xFF)
1047b8e80941Smrg         return default_nan;
1048b8e80941Smrg   }
1049b8e80941Smrg
1050b8e80941Smrg   aFrac = mix(aFrac, aFrac | 0x00800000u, aExp != 0);
1051b8e80941Smrg   __shortShift64Left(aFrac, 0, 40, zFrac0, zFrac1);
1052b8e80941Smrg
1053b8e80941Smrg   if (shiftCount != 0) {
1054b8e80941Smrg      __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, shiftCount,
1055b8e80941Smrg                                 zFrac0, zFrac1, zFrac2);
1056b8e80941Smrg   }
1057b8e80941Smrg
1058b8e80941Smrg   return __roundAndPackUInt64(aSign, zFrac0, zFrac1, zFrac2);
1059b8e80941Smrg}
1060b8e80941Smrg
1061b8e80941Smrgint64_t
1062b8e80941Smrg__fp32_to_int64(float f)
1063b8e80941Smrg{
1064b8e80941Smrg   uint a = floatBitsToUint(f);
1065b8e80941Smrg   uint aFrac = a & 0x007FFFFFu;
1066b8e80941Smrg   int aExp = int((a>>23) & 0xFFu);
1067b8e80941Smrg   uint aSign = a>>31;
1068b8e80941Smrg   uint zFrac0 = 0u;
1069b8e80941Smrg   uint zFrac1 = 0u;
1070b8e80941Smrg   uint zFrac2 = 0u;
1071b8e80941Smrg   int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;
1072b8e80941Smrg   int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;
1073b8e80941Smrg   int shiftCount = 0xBE - aExp;
1074b8e80941Smrg
1075b8e80941Smrg   if (shiftCount <0) {
1076b8e80941Smrg      if (aExp == 0xFF && aFrac != 0u)
1077b8e80941Smrg         return default_NegNaN;
1078b8e80941Smrg      return mix(default_NegNaN, default_PosNaN, aSign == 0u);
1079b8e80941Smrg   }
1080b8e80941Smrg
1081b8e80941Smrg   aFrac = mix(aFrac, aFrac | 0x00800000u, aExp != 0);
1082b8e80941Smrg   __shortShift64Left(aFrac, 0, 40, zFrac0, zFrac1);
1083b8e80941Smrg
1084b8e80941Smrg   if (shiftCount != 0) {
1085b8e80941Smrg      __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, shiftCount,
1086b8e80941Smrg                                 zFrac0, zFrac1, zFrac2);
1087b8e80941Smrg   }
1088b8e80941Smrg
1089b8e80941Smrg   return __roundAndPackInt64(aSign, zFrac0, zFrac1, zFrac2);
1090b8e80941Smrg}
1091b8e80941Smrg
1092b8e80941Smrguint64_t
1093b8e80941Smrg__int64_to_fp64(int64_t a)
1094b8e80941Smrg{
1095b8e80941Smrg   if (a==0)
1096b8e80941Smrg      return 0ul;
1097b8e80941Smrg
1098b8e80941Smrg   uint64_t absA = mix(uint64_t(a), uint64_t(-a), a < 0);
1099b8e80941Smrg   uint aFracHi = __extractFloat64FracHi(absA);
1100b8e80941Smrg   uvec2 aFrac = unpackUint2x32(absA);
1101b8e80941Smrg   uint zSign = uint(a < 0);
1102b8e80941Smrg
1103b8e80941Smrg   if ((aFracHi & 0x80000000u) != 0u) {
1104b8e80941Smrg      return mix(0ul, __packFloat64(1, 0x434, 0u, 0u), a < 0);
1105b8e80941Smrg   }
1106b8e80941Smrg
1107b8e80941Smrg   return __normalizeRoundAndPackFloat64(zSign, 0x432, aFrac.y, aFrac.x);
1108b8e80941Smrg}
1109b8e80941Smrg
1110b8e80941Smrg/* Returns the result of converting the double-precision floating-point value
1111b8e80941Smrg * `a' to the 32-bit two's complement integer format.  The conversion is
1112b8e80941Smrg * performed according to the IEEE Standard for Floating-Point Arithmetic---
1113b8e80941Smrg * which means in particular that the conversion is rounded according to the
1114b8e80941Smrg * current rounding mode.  If `a' is a NaN, the largest positive integer is
1115b8e80941Smrg * returned.  Otherwise, if the conversion overflows, the largest integer with
1116b8e80941Smrg * the same sign as `a' is returned.
1117b8e80941Smrg */
1118b8e80941Smrgint
1119b8e80941Smrg__fp64_to_int(uint64_t a)
1120b8e80941Smrg{
1121b8e80941Smrg   uint aFracLo = __extractFloat64FracLo(a);
1122b8e80941Smrg   uint aFracHi = __extractFloat64FracHi(a);
1123b8e80941Smrg   int aExp = __extractFloat64Exp(a);
1124b8e80941Smrg   uint aSign = __extractFloat64Sign(a);
1125b8e80941Smrg
1126b8e80941Smrg   uint absZ = 0u;
1127b8e80941Smrg   uint aFracExtra = 0u;
1128b8e80941Smrg   int shiftCount = aExp - 0x413;
1129b8e80941Smrg
1130b8e80941Smrg   if (0 <= shiftCount) {
1131b8e80941Smrg      if (0x41E < aExp) {
1132b8e80941Smrg         if ((aExp == 0x7FF) && bool(aFracHi | aFracLo))
1133b8e80941Smrg            aSign = 0u;
1134b8e80941Smrg         return mix(0x7FFFFFFF, 0x80000000, bool(aSign));
1135b8e80941Smrg      }
1136b8e80941Smrg      __shortShift64Left(aFracHi | 0x00100000u, aFracLo, shiftCount, absZ, aFracExtra);
1137b8e80941Smrg   } else {
1138b8e80941Smrg      if (aExp < 0x3FF)
1139b8e80941Smrg         return 0;
1140b8e80941Smrg
1141b8e80941Smrg      aFracHi |= 0x00100000u;
1142b8e80941Smrg      aFracExtra = ( aFracHi << (shiftCount & 31)) | aFracLo;
1143b8e80941Smrg      absZ = aFracHi >> (- shiftCount);
1144b8e80941Smrg   }
1145b8e80941Smrg
1146b8e80941Smrg   int z = mix(int(absZ), -int(absZ), (aSign != 0u));
1147b8e80941Smrg   int nan = mix(0x7FFFFFFF, 0x80000000, bool(aSign));
1148b8e80941Smrg   return mix(z, nan, bool(aSign ^ uint(z < 0)) && bool(z));
1149b8e80941Smrg}
1150b8e80941Smrg
1151b8e80941Smrg/* Returns the result of converting the 32-bit two's complement integer `a'
1152b8e80941Smrg * to the double-precision floating-point format.  The conversion is performed
1153b8e80941Smrg * according to the IEEE Standard for Floating-Point Arithmetic.
1154b8e80941Smrg */
1155b8e80941Smrguint64_t
1156b8e80941Smrg__int_to_fp64(int a)
1157b8e80941Smrg{
1158b8e80941Smrg   uint zFrac0 = 0u;
1159b8e80941Smrg   uint zFrac1 = 0u;
1160b8e80941Smrg   if (a==0)
1161b8e80941Smrg      return __packFloat64(0u, 0, 0u, 0u);
1162b8e80941Smrg   uint zSign = uint(a < 0);
1163b8e80941Smrg   uint absA = mix(uint(a), uint(-a), a < 0);
1164b8e80941Smrg   int shiftCount = __countLeadingZeros32(absA) - 11;
1165b8e80941Smrg   if (0 <= shiftCount) {
1166b8e80941Smrg      zFrac0 = absA << shiftCount;
1167b8e80941Smrg      zFrac1 = 0u;
1168b8e80941Smrg   } else {
1169b8e80941Smrg      __shift64Right(absA, 0u, -shiftCount, zFrac0, zFrac1);
1170b8e80941Smrg   }
1171b8e80941Smrg   return __packFloat64(zSign, 0x412 - shiftCount, zFrac0, zFrac1);
1172b8e80941Smrg}
1173b8e80941Smrg
1174b8e80941Smrgbool
1175b8e80941Smrg__fp64_to_bool(uint64_t a)
1176b8e80941Smrg{
1177b8e80941Smrg   return !__feq64_nonnan(__fabs64(a), 0ul);
1178b8e80941Smrg}
1179b8e80941Smrg
1180b8e80941Smrguint64_t
1181b8e80941Smrg__bool_to_fp64(bool a)
1182b8e80941Smrg{
1183b8e80941Smrg   return __int_to_fp64(int(a));
1184b8e80941Smrg}
1185b8e80941Smrg
1186b8e80941Smrg/* Packs the sign `zSign', exponent `zExp', and significand `zFrac' into a
1187b8e80941Smrg * single-precision floating-point value, returning the result.  After being
1188b8e80941Smrg * shifted into the proper positions, the three fields are simply added
1189b8e80941Smrg * together to form the result.  This means that any integer portion of `zSig'
1190b8e80941Smrg * will be added into the exponent.  Since a properly normalized significand
1191b8e80941Smrg * will have an integer portion equal to 1, the `zExp' input should be 1 less
1192b8e80941Smrg * than the desired result exponent whenever `zFrac' is a complete, normalized
1193b8e80941Smrg * significand.
1194b8e80941Smrg */
1195b8e80941Smrgfloat
1196b8e80941Smrg__packFloat32(uint zSign, int zExp, uint zFrac)
1197b8e80941Smrg{
1198b8e80941Smrg   return uintBitsToFloat((zSign<<31) + (uint(zExp)<<23) + zFrac);
1199b8e80941Smrg}
1200b8e80941Smrg
1201b8e80941Smrg/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
1202b8e80941Smrg * and significand `zFrac', and returns the proper single-precision floating-
1203b8e80941Smrg * point value corresponding to the abstract input.  Ordinarily, the abstract
1204b8e80941Smrg * value is simply rounded and packed into the single-precision format, with
1205b8e80941Smrg * the inexact exception raised if the abstract input cannot be represented
1206b8e80941Smrg * exactly.  However, if the abstract value is too large, the overflow and
1207b8e80941Smrg * inexact exceptions are raised and an infinity or maximal finite value is
1208b8e80941Smrg * returned.  If the abstract value is too small, the input value is rounded to
1209b8e80941Smrg * a subnormal number, and the underflow and inexact exceptions are raised if
1210b8e80941Smrg * the abstract input cannot be represented exactly as a subnormal single-
1211b8e80941Smrg * precision floating-point number.
1212b8e80941Smrg *     The input significand `zFrac' has its binary point between bits 30
1213b8e80941Smrg * and 29, which is 7 bits to the left of the usual location.  This shifted
1214b8e80941Smrg * significand must be normalized or smaller.  If `zFrac' is not normalized,
1215b8e80941Smrg * `zExp' must be 0; in that case, the result returned is a subnormal number,
1216b8e80941Smrg * and it must not require rounding.  In the usual case that `zFrac' is
1217b8e80941Smrg * normalized, `zExp' must be 1 less than the "true" floating-point exponent.
1218b8e80941Smrg * The handling of underflow and overflow follows the IEEE Standard for
1219b8e80941Smrg * Floating-Point Arithmetic.
1220b8e80941Smrg */
1221b8e80941Smrgfloat
1222b8e80941Smrg__roundAndPackFloat32(uint zSign, int zExp, uint zFrac)
1223b8e80941Smrg{
1224b8e80941Smrg   bool roundNearestEven;
1225b8e80941Smrg   int roundIncrement;
1226b8e80941Smrg   int roundBits;
1227b8e80941Smrg
1228b8e80941Smrg   roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
1229b8e80941Smrg   roundIncrement = 0x40;
1230b8e80941Smrg   if (!roundNearestEven) {
1231b8e80941Smrg      if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {
1232b8e80941Smrg         roundIncrement = 0;
1233b8e80941Smrg      } else {
1234b8e80941Smrg         roundIncrement = 0x7F;
1235b8e80941Smrg         if (zSign != 0u) {
1236b8e80941Smrg            if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)
1237b8e80941Smrg               roundIncrement = 0;
1238b8e80941Smrg         } else {
1239b8e80941Smrg            if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN)
1240b8e80941Smrg               roundIncrement = 0;
1241b8e80941Smrg         }
1242b8e80941Smrg      }
1243b8e80941Smrg   }
1244b8e80941Smrg   roundBits = int(zFrac & 0x7Fu);
1245b8e80941Smrg   if (0xFDu <= uint(zExp)) {
1246b8e80941Smrg      if ((0xFD < zExp) || ((zExp == 0xFD) && (int(zFrac) + roundIncrement) < 0))
1247b8e80941Smrg         return __packFloat32(zSign, 0xFF, 0u) - float(roundIncrement == 0);
1248b8e80941Smrg      int count = -zExp;
1249b8e80941Smrg      bool zexp_lt0 = zExp < 0;
1250b8e80941Smrg      uint zFrac_lt0 = mix(uint(zFrac != 0u), (zFrac>>count) | uint((zFrac<<((-count) & 31)) != 0u), (-zExp) < 32);
1251b8e80941Smrg      zFrac = mix(zFrac, zFrac_lt0, zexp_lt0);
1252b8e80941Smrg      roundBits = mix(roundBits, int(zFrac) & 0x7f, zexp_lt0);
1253b8e80941Smrg      zExp = mix(zExp, 0, zexp_lt0);
1254b8e80941Smrg   }
1255b8e80941Smrg   zFrac = (zFrac + uint(roundIncrement))>>7;
1256b8e80941Smrg   zFrac &= ~uint(((roundBits ^ 0x40) == 0) && roundNearestEven);
1257b8e80941Smrg
1258b8e80941Smrg   return __packFloat32(zSign, mix(zExp, 0, zFrac == 0u), zFrac);
1259b8e80941Smrg}
1260b8e80941Smrg
1261b8e80941Smrg/* Returns the result of converting the double-precision floating-point value
1262b8e80941Smrg * `a' to the single-precision floating-point format.  The conversion is
1263b8e80941Smrg * performed according to the IEEE Standard for Floating-Point Arithmetic.
1264b8e80941Smrg */
1265b8e80941Smrgfloat
1266b8e80941Smrg__fp64_to_fp32(uint64_t __a)
1267b8e80941Smrg{
1268b8e80941Smrg   uvec2 a = unpackUint2x32(__a);
1269b8e80941Smrg   uint zFrac = 0u;
1270b8e80941Smrg   uint allZero = 0u;
1271b8e80941Smrg
1272b8e80941Smrg   uint aFracLo = __extractFloat64FracLo(__a);
1273b8e80941Smrg   uint aFracHi = __extractFloat64FracHi(__a);
1274b8e80941Smrg   int aExp = __extractFloat64Exp(__a);
1275b8e80941Smrg   uint aSign = __extractFloat64Sign(__a);
1276b8e80941Smrg   if (aExp == 0x7FF) {
1277b8e80941Smrg      __shortShift64Left(a.y, a.x, 12, a.y, a.x);
1278b8e80941Smrg      float rval = uintBitsToFloat((aSign<<31) | 0x7FC00000u | (a.y>>9));
1279b8e80941Smrg      rval = mix(__packFloat32(aSign, 0xFF, 0u), rval, (aFracHi | aFracLo) != 0u);
1280b8e80941Smrg      return rval;
1281b8e80941Smrg   }
1282b8e80941Smrg   __shift64RightJamming(aFracHi, aFracLo, 22, allZero, zFrac);
1283b8e80941Smrg   zFrac = mix(zFrac, zFrac | 0x40000000u, aExp != 0);
1284b8e80941Smrg   return __roundAndPackFloat32(aSign, aExp - 0x381, zFrac);
1285b8e80941Smrg}
1286b8e80941Smrg
1287b8e80941Smrgfloat
1288b8e80941Smrg__uint64_to_fp32(uint64_t __a)
1289b8e80941Smrg{
1290b8e80941Smrg   uint zFrac = 0u;
1291b8e80941Smrg   uvec2 aFrac = unpackUint2x32(__a);
1292b8e80941Smrg   int shiftCount = __countLeadingZeros32(mix(aFrac.y, aFrac.x, aFrac.y == 0u));
1293b8e80941Smrg   shiftCount -= mix(40, 8, aFrac.y == 0u);
1294b8e80941Smrg
1295b8e80941Smrg   if (0 <= shiftCount) {
1296b8e80941Smrg      __shortShift64Left(aFrac.y, aFrac.x, shiftCount, aFrac.y, aFrac.x);
1297b8e80941Smrg      bool is_zero = (aFrac.y | aFrac.x) == 0u;
1298b8e80941Smrg      return mix(__packFloat32(0u, 0x95 - shiftCount, aFrac.x), 0, is_zero);
1299b8e80941Smrg   }
1300b8e80941Smrg
1301b8e80941Smrg   shiftCount += 7;
1302b8e80941Smrg   __shift64RightJamming(aFrac.y, aFrac.x, -shiftCount, aFrac.y, aFrac.x);
1303b8e80941Smrg   zFrac = mix(aFrac.x<<shiftCount, aFrac.x, shiftCount < 0);
1304b8e80941Smrg   return __roundAndPackFloat32(0u, 0x9C - shiftCount, zFrac);
1305b8e80941Smrg}
1306b8e80941Smrg
1307b8e80941Smrgfloat
1308b8e80941Smrg__int64_to_fp32(int64_t __a)
1309b8e80941Smrg{
1310b8e80941Smrg   uint zFrac = 0u;
1311b8e80941Smrg   uint aSign = uint(__a < 0);
1312b8e80941Smrg   uint64_t absA = mix(uint64_t(__a), uint64_t(-__a), __a < 0);
1313b8e80941Smrg   uvec2 aFrac = unpackUint2x32(absA);
1314b8e80941Smrg   int shiftCount = __countLeadingZeros32(mix(aFrac.y, aFrac.x, aFrac.y == 0u));
1315b8e80941Smrg   shiftCount -= mix(40, 8, aFrac.y == 0u);
1316b8e80941Smrg
1317b8e80941Smrg   if (0 <= shiftCount) {
1318b8e80941Smrg      __shortShift64Left(aFrac.y, aFrac.x, shiftCount, aFrac.y, aFrac.x);
1319b8e80941Smrg      bool is_zero = (aFrac.y | aFrac.x) == 0u;
1320b8e80941Smrg      return mix(__packFloat32(aSign, 0x95 - shiftCount, aFrac.x), 0, absA == 0u);
1321b8e80941Smrg   }
1322b8e80941Smrg
1323b8e80941Smrg   shiftCount += 7;
1324b8e80941Smrg   __shift64RightJamming(aFrac.y, aFrac.x, -shiftCount, aFrac.y, aFrac.x);
1325b8e80941Smrg   zFrac = mix(aFrac.x<<shiftCount, aFrac.x, shiftCount < 0);
1326b8e80941Smrg   return __roundAndPackFloat32(aSign, 0x9C - shiftCount, zFrac);
1327b8e80941Smrg}
1328b8e80941Smrg
1329b8e80941Smrg/* Returns the result of converting the single-precision floating-point value
1330b8e80941Smrg * `a' to the double-precision floating-point format.
1331b8e80941Smrg */
1332b8e80941Smrguint64_t
1333b8e80941Smrg__fp32_to_fp64(float f)
1334b8e80941Smrg{
1335b8e80941Smrg   uint a = floatBitsToUint(f);
1336b8e80941Smrg   uint aFrac = a & 0x007FFFFFu;
1337b8e80941Smrg   int aExp = int((a>>23) & 0xFFu);
1338b8e80941Smrg   uint aSign = a>>31;
1339b8e80941Smrg   uint zFrac0 = 0u;
1340b8e80941Smrg   uint zFrac1 = 0u;
1341b8e80941Smrg
1342b8e80941Smrg   if (aExp == 0xFF) {
1343b8e80941Smrg      if (aFrac != 0u) {
1344b8e80941Smrg         uint nanLo = 0u;
1345b8e80941Smrg         uint nanHi = a<<9;
1346b8e80941Smrg         __shift64Right(nanHi, nanLo, 12, nanHi, nanLo);
1347b8e80941Smrg         nanHi |= ((aSign<<31) | 0x7FF80000u);
1348b8e80941Smrg         return packUint2x32(uvec2(nanLo, nanHi));
1349b8e80941Smrg      }
1350b8e80941Smrg      return __packFloat64(aSign, 0x7FF, 0u, 0u);
1351b8e80941Smrg    }
1352b8e80941Smrg
1353b8e80941Smrg   if (aExp == 0) {
1354b8e80941Smrg      if (aFrac == 0u)
1355b8e80941Smrg         return __packFloat64(aSign, 0, 0u, 0u);
1356b8e80941Smrg      /* Normalize subnormal */
1357b8e80941Smrg      int shiftCount = __countLeadingZeros32(aFrac) - 8;
1358b8e80941Smrg      aFrac <<= shiftCount;
1359b8e80941Smrg      aExp = 1 - shiftCount;
1360b8e80941Smrg      --aExp;
1361b8e80941Smrg   }
1362b8e80941Smrg
1363b8e80941Smrg   __shift64Right(aFrac, 0u, 3, zFrac0, zFrac1);
1364b8e80941Smrg   return __packFloat64(aSign, aExp + 0x380, zFrac0, zFrac1);
1365b8e80941Smrg}
1366b8e80941Smrg
1367b8e80941Smrg/* Adds the 96-bit value formed by concatenating `a0', `a1', and `a2' to the
1368b8e80941Smrg * 96-bit value formed by concatenating `b0', `b1', and `b2'.  Addition is
1369b8e80941Smrg * modulo 2^96, so any carry out is lost.  The result is broken into three
1370b8e80941Smrg * 32-bit pieces which are stored at the locations pointed to by `z0Ptr',
1371b8e80941Smrg * `z1Ptr', and `z2Ptr'.
1372b8e80941Smrg */
1373b8e80941Smrgvoid
1374b8e80941Smrg__add96(uint a0, uint a1, uint a2,
1375b8e80941Smrg        uint b0, uint b1, uint b2,
1376b8e80941Smrg        out uint z0Ptr,
1377b8e80941Smrg        out uint z1Ptr,
1378b8e80941Smrg        out uint z2Ptr)
1379b8e80941Smrg{
1380b8e80941Smrg   uint z2 = a2 + b2;
1381b8e80941Smrg   uint carry1 = uint(z2 < a2);
1382b8e80941Smrg   uint z1 = a1 + b1;
1383b8e80941Smrg   uint carry0 = uint(z1 < a1);
1384b8e80941Smrg   uint z0 = a0 + b0;
1385b8e80941Smrg   z1 += carry1;
1386b8e80941Smrg   z0 += uint(z1 < carry1);
1387b8e80941Smrg   z0 += carry0;
1388b8e80941Smrg   z2Ptr = z2;
1389b8e80941Smrg   z1Ptr = z1;
1390b8e80941Smrg   z0Ptr = z0;
1391b8e80941Smrg}
1392b8e80941Smrg
1393b8e80941Smrg/* Subtracts the 96-bit value formed by concatenating `b0', `b1', and `b2' from
1394b8e80941Smrg * the 96-bit value formed by concatenating `a0', `a1', and `a2'.  Subtraction
1395b8e80941Smrg * is modulo 2^96, so any borrow out (carry out) is lost.  The result is broken
1396b8e80941Smrg * into three 32-bit pieces which are stored at the locations pointed to by
1397b8e80941Smrg * `z0Ptr', `z1Ptr', and `z2Ptr'.
1398b8e80941Smrg */
1399b8e80941Smrgvoid
1400b8e80941Smrg__sub96(uint a0, uint a1, uint a2,
1401b8e80941Smrg        uint b0, uint b1, uint b2,
1402b8e80941Smrg        out uint z0Ptr,
1403b8e80941Smrg        out uint z1Ptr,
1404b8e80941Smrg        out uint z2Ptr)
1405b8e80941Smrg{
1406b8e80941Smrg   uint z2 = a2 - b2;
1407b8e80941Smrg   uint borrow1 = uint(a2 < b2);
1408b8e80941Smrg   uint z1 = a1 - b1;
1409b8e80941Smrg   uint borrow0 = uint(a1 < b1);
1410b8e80941Smrg   uint z0 = a0 - b0;
1411b8e80941Smrg   z0 -= uint(z1 < borrow1);
1412b8e80941Smrg   z1 -= borrow1;
1413b8e80941Smrg   z0 -= borrow0;
1414b8e80941Smrg   z2Ptr = z2;
1415b8e80941Smrg   z1Ptr = z1;
1416b8e80941Smrg   z0Ptr = z0;
1417b8e80941Smrg}
1418b8e80941Smrg
1419b8e80941Smrg/* Returns an approximation to the 32-bit integer quotient obtained by dividing
1420b8e80941Smrg * `b' into the 64-bit value formed by concatenating `a0' and `a1'.  The
1421b8e80941Smrg * divisor `b' must be at least 2^31.  If q is the exact quotient truncated
1422b8e80941Smrg * toward zero, the approximation returned lies between q and q + 2 inclusive.
1423b8e80941Smrg * If the exact quotient q is larger than 32 bits, the maximum positive 32-bit
1424b8e80941Smrg * unsigned integer is returned.
1425b8e80941Smrg */
1426b8e80941Smrguint
1427b8e80941Smrg__estimateDiv64To32(uint a0, uint a1, uint b)
1428b8e80941Smrg{
1429b8e80941Smrg   uint b0;
1430b8e80941Smrg   uint b1;
1431b8e80941Smrg   uint rem0 = 0u;
1432b8e80941Smrg   uint rem1 = 0u;
1433b8e80941Smrg   uint term0 = 0u;
1434b8e80941Smrg   uint term1 = 0u;
1435b8e80941Smrg   uint z;
1436b8e80941Smrg
1437b8e80941Smrg   if (b <= a0)
1438b8e80941Smrg      return 0xFFFFFFFFu;
1439b8e80941Smrg   b0 = b>>16;
1440b8e80941Smrg   z = (b0<<16 <= a0) ? 0xFFFF0000u : (a0 / b0)<<16;
1441b8e80941Smrg   __mul32To64(b, z, term0, term1);
1442b8e80941Smrg   __sub64(a0, a1, term0, term1, rem0, rem1);
1443b8e80941Smrg   while (int(rem0) < 0) {
1444b8e80941Smrg      z -= 0x10000u;
1445b8e80941Smrg      b1 = b<<16;
1446b8e80941Smrg      __add64(rem0, rem1, b0, b1, rem0, rem1);
1447b8e80941Smrg   }
1448b8e80941Smrg   rem0 = (rem0<<16) | (rem1>>16);
1449b8e80941Smrg   z |= (b0<<16 <= rem0) ? 0xFFFFu : rem0 / b0;
1450b8e80941Smrg   return z;
1451b8e80941Smrg}
1452b8e80941Smrg
1453b8e80941Smrguint
1454b8e80941Smrg__sqrtOddAdjustments(int index)
1455b8e80941Smrg{
1456b8e80941Smrg   uint res = 0u;
1457b8e80941Smrg   if (index == 0)
1458b8e80941Smrg      res = 0x0004u;
1459b8e80941Smrg   if (index == 1)
1460b8e80941Smrg      res = 0x0022u;
1461b8e80941Smrg   if (index == 2)
1462b8e80941Smrg      res = 0x005Du;
1463b8e80941Smrg   if (index == 3)
1464b8e80941Smrg      res = 0x00B1u;
1465b8e80941Smrg   if (index == 4)
1466b8e80941Smrg      res = 0x011Du;
1467b8e80941Smrg   if (index == 5)
1468b8e80941Smrg      res = 0x019Fu;
1469b8e80941Smrg   if (index == 6)
1470b8e80941Smrg      res = 0x0236u;
1471b8e80941Smrg   if (index == 7)
1472b8e80941Smrg      res = 0x02E0u;
1473b8e80941Smrg   if (index == 8)
1474b8e80941Smrg      res = 0x039Cu;
1475b8e80941Smrg   if (index == 9)
1476b8e80941Smrg      res = 0x0468u;
1477b8e80941Smrg   if (index == 10)
1478b8e80941Smrg      res = 0x0545u;
1479b8e80941Smrg   if (index == 11)
1480b8e80941Smrg      res = 0x631u;
1481b8e80941Smrg   if (index == 12)
1482b8e80941Smrg      res = 0x072Bu;
1483b8e80941Smrg   if (index == 13)
1484b8e80941Smrg      res = 0x0832u;
1485b8e80941Smrg   if (index == 14)
1486b8e80941Smrg      res = 0x0946u;
1487b8e80941Smrg   if (index == 15)
1488b8e80941Smrg      res = 0x0A67u;
1489b8e80941Smrg
1490b8e80941Smrg   return res;
1491b8e80941Smrg}
1492b8e80941Smrg
1493b8e80941Smrguint
1494b8e80941Smrg__sqrtEvenAdjustments(int index)
1495b8e80941Smrg{
1496b8e80941Smrg   uint res = 0u;
1497b8e80941Smrg   if (index == 0)
1498b8e80941Smrg      res = 0x0A2Du;
1499b8e80941Smrg   if (index == 1)
1500b8e80941Smrg      res = 0x08AFu;
1501b8e80941Smrg   if (index == 2)
1502b8e80941Smrg      res = 0x075Au;
1503b8e80941Smrg   if (index == 3)
1504b8e80941Smrg      res = 0x0629u;
1505b8e80941Smrg   if (index == 4)
1506b8e80941Smrg      res = 0x051Au;
1507b8e80941Smrg   if (index == 5)
1508b8e80941Smrg      res = 0x0429u;
1509b8e80941Smrg   if (index == 6)
1510b8e80941Smrg      res = 0x0356u;
1511b8e80941Smrg   if (index == 7)
1512b8e80941Smrg      res = 0x029Eu;
1513b8e80941Smrg   if (index == 8)
1514b8e80941Smrg      res = 0x0200u;
1515b8e80941Smrg   if (index == 9)
1516b8e80941Smrg      res = 0x0179u;
1517b8e80941Smrg   if (index == 10)
1518b8e80941Smrg      res = 0x0109u;
1519b8e80941Smrg   if (index == 11)
1520b8e80941Smrg      res = 0x00AFu;
1521b8e80941Smrg   if (index == 12)
1522b8e80941Smrg      res = 0x0068u;
1523b8e80941Smrg   if (index == 13)
1524b8e80941Smrg      res = 0x0034u;
1525b8e80941Smrg   if (index == 14)
1526b8e80941Smrg      res = 0x0012u;
1527b8e80941Smrg   if (index == 15)
1528b8e80941Smrg      res = 0x0002u;
1529b8e80941Smrg
1530b8e80941Smrg   return res;
1531b8e80941Smrg}
1532b8e80941Smrg
1533b8e80941Smrg/* Returns an approximation to the square root of the 32-bit significand given
1534b8e80941Smrg * by `a'.  Considered as an integer, `a' must be at least 2^31.  If bit 0 of
1535b8e80941Smrg * `aExp' (the least significant bit) is 1, the integer returned approximates
1536b8e80941Smrg * 2^31*sqrt(`a'/2^31), where `a' is considered an integer.  If bit 0 of `aExp'
1537b8e80941Smrg * is 0, the integer returned approximates 2^31*sqrt(`a'/2^30).  In either
1538b8e80941Smrg * case, the approximation returned lies strictly within +/-2 of the exact
1539b8e80941Smrg * value.
1540b8e80941Smrg */
1541b8e80941Smrguint
1542b8e80941Smrg__estimateSqrt32(int aExp, uint a)
1543b8e80941Smrg{
1544b8e80941Smrg   uint z;
1545b8e80941Smrg
1546b8e80941Smrg   int index = int(a>>27 & 15u);
1547b8e80941Smrg   if ((aExp & 1) != 0) {
1548b8e80941Smrg      z = 0x4000u + (a>>17) - __sqrtOddAdjustments(index);
1549b8e80941Smrg      z = ((a / z)<<14) + (z<<15);
1550b8e80941Smrg      a >>= 1;
1551b8e80941Smrg   } else {
1552b8e80941Smrg      z = 0x8000u + (a>>17) - __sqrtEvenAdjustments(index);
1553b8e80941Smrg      z = a / z + z;
1554b8e80941Smrg      z = (0x20000u <= z) ? 0xFFFF8000u : (z<<15);
1555b8e80941Smrg      if (z <= a)
1556b8e80941Smrg         return uint(int(a)>>1);
1557b8e80941Smrg   }
1558b8e80941Smrg   return ((__estimateDiv64To32(a, 0u, z))>>1) + (z>>1);
1559b8e80941Smrg}
1560b8e80941Smrg
1561b8e80941Smrg/* Returns the square root of the double-precision floating-point value `a'.
1562b8e80941Smrg * The operation is performed according to the IEEE Standard for Floating-Point
1563b8e80941Smrg * Arithmetic.
1564b8e80941Smrg */
1565b8e80941Smrguint64_t
1566b8e80941Smrg__fsqrt64(uint64_t a)
1567b8e80941Smrg{
1568b8e80941Smrg   uint zFrac0 = 0u;
1569b8e80941Smrg   uint zFrac1 = 0u;
1570b8e80941Smrg   uint zFrac2 = 0u;
1571b8e80941Smrg   uint doubleZFrac0 = 0u;
1572b8e80941Smrg   uint rem0 = 0u;
1573b8e80941Smrg   uint rem1 = 0u;
1574b8e80941Smrg   uint rem2 = 0u;
1575b8e80941Smrg   uint rem3 = 0u;
1576b8e80941Smrg   uint term0 = 0u;
1577b8e80941Smrg   uint term1 = 0u;
1578b8e80941Smrg   uint term2 = 0u;
1579b8e80941Smrg   uint term3 = 0u;
1580b8e80941Smrg   uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
1581b8e80941Smrg
1582b8e80941Smrg   uint aFracLo = __extractFloat64FracLo(a);
1583b8e80941Smrg   uint aFracHi = __extractFloat64FracHi(a);
1584b8e80941Smrg   int aExp = __extractFloat64Exp(a);
1585b8e80941Smrg   uint aSign = __extractFloat64Sign(a);
1586b8e80941Smrg   if (aExp == 0x7FF) {
1587b8e80941Smrg      if ((aFracHi | aFracLo) != 0u)
1588b8e80941Smrg         return __propagateFloat64NaN(a, a);
1589b8e80941Smrg      if (aSign == 0u)
1590b8e80941Smrg         return a;
1591b8e80941Smrg      return default_nan;
1592b8e80941Smrg   }
1593b8e80941Smrg   if (aSign != 0u) {
1594b8e80941Smrg      if ((uint(aExp) | aFracHi | aFracLo) == 0u)
1595b8e80941Smrg         return a;
1596b8e80941Smrg      return default_nan;
1597b8e80941Smrg   }
1598b8e80941Smrg   if (aExp == 0) {
1599b8e80941Smrg      if ((aFracHi | aFracLo) == 0u)
1600b8e80941Smrg         return __packFloat64(0u, 0, 0u, 0u);
1601b8e80941Smrg      __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);
1602b8e80941Smrg   }
1603b8e80941Smrg   int zExp = ((aExp - 0x3FF)>>1) + 0x3FE;
1604b8e80941Smrg   aFracHi |= 0x00100000u;
1605b8e80941Smrg   __shortShift64Left(aFracHi, aFracLo, 11, term0, term1);
1606b8e80941Smrg   zFrac0 = (__estimateSqrt32(aExp, term0)>>1) + 1u;
1607b8e80941Smrg   if (zFrac0 == 0u)
1608b8e80941Smrg      zFrac0 = 0x7FFFFFFFu;
1609b8e80941Smrg   doubleZFrac0 = zFrac0 + zFrac0;
1610b8e80941Smrg   __shortShift64Left(aFracHi, aFracLo, 9 - (aExp & 1), aFracHi, aFracLo);
1611b8e80941Smrg   __mul32To64(zFrac0, zFrac0, term0, term1);
1612b8e80941Smrg   __sub64(aFracHi, aFracLo, term0, term1, rem0, rem1);
1613b8e80941Smrg   while (int(rem0) < 0) {
1614b8e80941Smrg      --zFrac0;
1615b8e80941Smrg      doubleZFrac0 -= 2u;
1616b8e80941Smrg      __add64(rem0, rem1, 0u, doubleZFrac0 | 1u, rem0, rem1);
1617b8e80941Smrg   }
1618b8e80941Smrg   zFrac1 = __estimateDiv64To32(rem1, 0u, doubleZFrac0);
1619b8e80941Smrg   if ((zFrac1 & 0x1FFu) <= 5u) {
1620b8e80941Smrg      if (zFrac1 == 0u)
1621b8e80941Smrg         zFrac1 = 1u;
1622b8e80941Smrg      __mul32To64(doubleZFrac0, zFrac1, term1, term2);
1623b8e80941Smrg      __sub64(rem1, 0u, term1, term2, rem1, rem2);
1624b8e80941Smrg      __mul32To64(zFrac1, zFrac1, term2, term3);
1625b8e80941Smrg      __sub96(rem1, rem2, 0u, 0u, term2, term3, rem1, rem2, rem3);
1626b8e80941Smrg      while (int(rem1) < 0) {
1627b8e80941Smrg         --zFrac1;
1628b8e80941Smrg         __shortShift64Left(0u, zFrac1, 1, term2, term3);
1629b8e80941Smrg         term3 |= 1u;
1630b8e80941Smrg         term2 |= doubleZFrac0;
1631b8e80941Smrg         __add96(rem1, rem2, rem3, 0u, term2, term3, rem1, rem2, rem3);
1632b8e80941Smrg      }
1633b8e80941Smrg      zFrac1 |= uint((rem1 | rem2 | rem3) != 0u);
1634b8e80941Smrg   }
1635b8e80941Smrg   __shift64ExtraRightJamming(zFrac0, zFrac1, 0u, 10, zFrac0, zFrac1, zFrac2);
1636b8e80941Smrg   return __roundAndPackFloat64(0u, zExp, zFrac0, zFrac1, zFrac2);
1637b8e80941Smrg}
1638b8e80941Smrg
1639b8e80941Smrguint64_t
1640b8e80941Smrg__ftrunc64(uint64_t __a)
1641b8e80941Smrg{
1642b8e80941Smrg   uvec2 a = unpackUint2x32(__a);
1643b8e80941Smrg   int aExp = __extractFloat64Exp(__a);
1644b8e80941Smrg   uint zLo;
1645b8e80941Smrg   uint zHi;
1646b8e80941Smrg
1647b8e80941Smrg   int unbiasedExp = aExp - 1023;
1648b8e80941Smrg   int fracBits = 52 - unbiasedExp;
1649b8e80941Smrg   uint maskLo = mix(~0u << fracBits, 0u, fracBits >= 32);
1650b8e80941Smrg   uint maskHi = mix(~0u << (fracBits - 32), ~0u, fracBits < 33);
1651b8e80941Smrg   zLo = maskLo & a.x;
1652b8e80941Smrg   zHi = maskHi & a.y;
1653b8e80941Smrg
1654b8e80941Smrg   zLo = mix(zLo, 0u, unbiasedExp < 0);
1655b8e80941Smrg   zHi = mix(zHi, 0u, unbiasedExp < 0);
1656b8e80941Smrg   zLo = mix(zLo, a.x, unbiasedExp > 52);
1657b8e80941Smrg   zHi = mix(zHi, a.y, unbiasedExp > 52);
1658b8e80941Smrg   return packUint2x32(uvec2(zLo, zHi));
1659b8e80941Smrg}
1660b8e80941Smrg
1661b8e80941Smrguint64_t
1662b8e80941Smrg__ffloor64(uint64_t a)
1663b8e80941Smrg{
1664b8e80941Smrg   bool is_positive = __fge64(a, 0ul);
1665b8e80941Smrg   uint64_t tr = __ftrunc64(a);
1666b8e80941Smrg
1667b8e80941Smrg   if (is_positive || __feq64(tr, a)) {
1668b8e80941Smrg      return tr;
1669b8e80941Smrg   } else {
1670b8e80941Smrg      return __fadd64(tr, 0xbff0000000000000ul /* -1.0 */);
1671b8e80941Smrg   }
1672b8e80941Smrg}
1673b8e80941Smrg
1674b8e80941Smrguint64_t
1675b8e80941Smrg__fround64(uint64_t __a)
1676b8e80941Smrg{
1677b8e80941Smrg   uvec2 a = unpackUint2x32(__a);
1678b8e80941Smrg   int unbiasedExp = __extractFloat64Exp(__a) - 1023;
1679b8e80941Smrg   uint aHi = a.y;
1680b8e80941Smrg   uint aLo = a.x;
1681b8e80941Smrg
1682b8e80941Smrg   if (unbiasedExp < 20) {
1683b8e80941Smrg      if (unbiasedExp < 0) {
1684b8e80941Smrg         if ((aHi & 0x80000000u) != 0u && aLo == 0u) {
1685b8e80941Smrg            return 0;
1686b8e80941Smrg         }
1687b8e80941Smrg         aHi &= 0x80000000u;
1688b8e80941Smrg         if ((a.y & 0x000FFFFFu) == 0u && a.x == 0u) {
1689b8e80941Smrg            aLo = 0u;
1690b8e80941Smrg            return packUint2x32(uvec2(aLo, aHi));
1691b8e80941Smrg         }
1692b8e80941Smrg         aHi = mix(aHi, (aHi | 0x3FF00000u), unbiasedExp == -1);
1693b8e80941Smrg         aLo = 0u;
1694b8e80941Smrg      } else {
1695b8e80941Smrg         uint maskExp = 0x000FFFFFu >> unbiasedExp;
1696b8e80941Smrg         uint lastBit = maskExp + 1;
1697b8e80941Smrg         aHi += 0x00080000u >> unbiasedExp;
1698b8e80941Smrg         if ((aHi & maskExp) == 0u)
1699b8e80941Smrg            aHi &= ~lastBit;
1700b8e80941Smrg         aHi &= ~maskExp;
1701b8e80941Smrg         aLo = 0u;
1702b8e80941Smrg      }
1703b8e80941Smrg   } else if (unbiasedExp > 51 || unbiasedExp == 1024) {
1704b8e80941Smrg      return __a;
1705b8e80941Smrg   } else {
1706b8e80941Smrg      uint maskExp = 0xFFFFFFFFu >> (unbiasedExp - 20);
1707b8e80941Smrg      if ((aLo & maskExp) == 0u)
1708b8e80941Smrg         return __a;
1709b8e80941Smrg      uint tmp = aLo + (1u << (51 - unbiasedExp));
1710b8e80941Smrg      if(tmp < aLo)
1711b8e80941Smrg         aHi += 1u;
1712b8e80941Smrg      aLo = tmp;
1713b8e80941Smrg      aLo &= ~maskExp;
1714b8e80941Smrg   }
1715b8e80941Smrg
1716b8e80941Smrg   return packUint2x32(uvec2(aLo, aHi));
1717b8e80941Smrg}
1718b8e80941Smrg
1719b8e80941Smrguint64_t
1720b8e80941Smrg__fmin64(uint64_t a, uint64_t b)
1721b8e80941Smrg{
1722b8e80941Smrg   if (__is_nan(a)) return b;
1723b8e80941Smrg   if (__is_nan(b)) return a;
1724b8e80941Smrg
1725b8e80941Smrg   if (__flt64_nonnan(a, b)) return a;
1726b8e80941Smrg   return b;
1727b8e80941Smrg}
1728b8e80941Smrg
1729b8e80941Smrguint64_t
1730b8e80941Smrg__fmax64(uint64_t a, uint64_t b)
1731b8e80941Smrg{
1732b8e80941Smrg   if (__is_nan(a)) return b;
1733b8e80941Smrg   if (__is_nan(b)) return a;
1734b8e80941Smrg
1735b8e80941Smrg   if (__flt64_nonnan(a, b)) return b;
1736b8e80941Smrg   return a;
1737b8e80941Smrg}
1738b8e80941Smrg
1739b8e80941Smrguint64_t
1740b8e80941Smrg__ffract64(uint64_t a)
1741b8e80941Smrg{
1742b8e80941Smrg   return __fadd64(a, __fneg64(__ffloor64(a)));
1743b8e80941Smrg}
1744