u_math.h revision 01e04c3f
101e04c3fSmrg/**************************************************************************
201e04c3fSmrg *
301e04c3fSmrg * Copyright 2008 VMware, Inc.
401e04c3fSmrg * All Rights Reserved.
501e04c3fSmrg *
601e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a
701e04c3fSmrg * copy of this software and associated documentation files (the
801e04c3fSmrg * "Software"), to deal in the Software without restriction, including
901e04c3fSmrg * without limitation the rights to use, copy, modify, merge, publish,
1001e04c3fSmrg * distribute, sub license, and/or sell copies of the Software, and to
1101e04c3fSmrg * permit persons to whom the Software is furnished to do so, subject to
1201e04c3fSmrg * the following conditions:
1301e04c3fSmrg *
1401e04c3fSmrg * The above copyright notice and this permission notice (including the
1501e04c3fSmrg * next paragraph) shall be included in all copies or substantial portions
1601e04c3fSmrg * of the Software.
1701e04c3fSmrg *
1801e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1901e04c3fSmrg * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2001e04c3fSmrg * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
2101e04c3fSmrg * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
2201e04c3fSmrg * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
2301e04c3fSmrg * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
2401e04c3fSmrg * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2501e04c3fSmrg *
2601e04c3fSmrg **************************************************************************/
2701e04c3fSmrg
2801e04c3fSmrg
2901e04c3fSmrg/**
3001e04c3fSmrg * Math utilities and approximations for common math functions.
3101e04c3fSmrg * Reduced precision is usually acceptable in shaders...
3201e04c3fSmrg *
3301e04c3fSmrg * "fast" is used in the names of functions which are low-precision,
3401e04c3fSmrg * or at least lower-precision than the normal C lib functions.
3501e04c3fSmrg */
3601e04c3fSmrg
3701e04c3fSmrg
3801e04c3fSmrg#ifndef U_MATH_H
3901e04c3fSmrg#define U_MATH_H
4001e04c3fSmrg
4101e04c3fSmrg
4201e04c3fSmrg#include "pipe/p_compiler.h"
4301e04c3fSmrg
4401e04c3fSmrg#include "c99_math.h"
4501e04c3fSmrg#include <assert.h>
4601e04c3fSmrg#include <float.h>
4701e04c3fSmrg#include <stdarg.h>
4801e04c3fSmrg
4901e04c3fSmrg#include "bitscan.h"
5001e04c3fSmrg
5101e04c3fSmrg#ifdef __cplusplus
5201e04c3fSmrgextern "C" {
5301e04c3fSmrg#endif
5401e04c3fSmrg
5501e04c3fSmrg
5601e04c3fSmrg#ifndef M_SQRT2
5701e04c3fSmrg#define M_SQRT2 1.41421356237309504880
5801e04c3fSmrg#endif
5901e04c3fSmrg
6001e04c3fSmrg#define POW2_TABLE_SIZE_LOG2 9
6101e04c3fSmrg#define POW2_TABLE_SIZE (1 << POW2_TABLE_SIZE_LOG2)
6201e04c3fSmrg#define POW2_TABLE_OFFSET (POW2_TABLE_SIZE/2)
6301e04c3fSmrg#define POW2_TABLE_SCALE ((float)(POW2_TABLE_SIZE/2))
6401e04c3fSmrgextern float pow2_table[POW2_TABLE_SIZE];
6501e04c3fSmrg
6601e04c3fSmrg
6701e04c3fSmrg/**
6801e04c3fSmrg * Initialize math module.  This should be called before using any
6901e04c3fSmrg * other functions in this module.
7001e04c3fSmrg */
7101e04c3fSmrgextern void
7201e04c3fSmrgutil_init_math(void);
7301e04c3fSmrg
7401e04c3fSmrg
7501e04c3fSmrgunion fi {
7601e04c3fSmrg   float f;
7701e04c3fSmrg   int32_t i;
7801e04c3fSmrg   uint32_t ui;
7901e04c3fSmrg};
8001e04c3fSmrg
8101e04c3fSmrg
8201e04c3fSmrgunion di {
8301e04c3fSmrg   double d;
8401e04c3fSmrg   int64_t i;
8501e04c3fSmrg   uint64_t ui;
8601e04c3fSmrg};
8701e04c3fSmrg
8801e04c3fSmrg
8901e04c3fSmrg/**
9001e04c3fSmrg * Extract the IEEE float32 exponent.
9101e04c3fSmrg */
9201e04c3fSmrgstatic inline signed
9301e04c3fSmrgutil_get_float32_exponent(float x)
9401e04c3fSmrg{
9501e04c3fSmrg   union fi f;
9601e04c3fSmrg
9701e04c3fSmrg   f.f = x;
9801e04c3fSmrg
9901e04c3fSmrg   return ((f.ui >> 23) & 0xff) - 127;
10001e04c3fSmrg}
10101e04c3fSmrg
10201e04c3fSmrg
10301e04c3fSmrg/**
10401e04c3fSmrg * Fast version of 2^x
10501e04c3fSmrg * Identity: exp2(a + b) = exp2(a) * exp2(b)
10601e04c3fSmrg * Let ipart = int(x)
10701e04c3fSmrg * Let fpart = x - ipart;
10801e04c3fSmrg * So, exp2(x) = exp2(ipart) * exp2(fpart)
10901e04c3fSmrg * Compute exp2(ipart) with i << ipart
11001e04c3fSmrg * Compute exp2(fpart) with lookup table.
11101e04c3fSmrg */
11201e04c3fSmrgstatic inline float
11301e04c3fSmrgutil_fast_exp2(float x)
11401e04c3fSmrg{
11501e04c3fSmrg   int32_t ipart;
11601e04c3fSmrg   float fpart, mpart;
11701e04c3fSmrg   union fi epart;
11801e04c3fSmrg
11901e04c3fSmrg   if(x > 129.00000f)
12001e04c3fSmrg      return 3.402823466e+38f;
12101e04c3fSmrg
12201e04c3fSmrg   if (x < -126.99999f)
12301e04c3fSmrg      return 0.0f;
12401e04c3fSmrg
12501e04c3fSmrg   ipart = (int32_t) x;
12601e04c3fSmrg   fpart = x - (float) ipart;
12701e04c3fSmrg
12801e04c3fSmrg   /* same as
12901e04c3fSmrg    *   epart.f = (float) (1 << ipart)
13001e04c3fSmrg    * but faster and without integer overflow for ipart > 31
13101e04c3fSmrg    */
13201e04c3fSmrg   epart.i = (ipart + 127 ) << 23;
13301e04c3fSmrg
13401e04c3fSmrg   mpart = pow2_table[POW2_TABLE_OFFSET + (int)(fpart * POW2_TABLE_SCALE)];
13501e04c3fSmrg
13601e04c3fSmrg   return epart.f * mpart;
13701e04c3fSmrg}
13801e04c3fSmrg
13901e04c3fSmrg
14001e04c3fSmrg/**
14101e04c3fSmrg * Fast approximation to exp(x).
14201e04c3fSmrg */
14301e04c3fSmrgstatic inline float
14401e04c3fSmrgutil_fast_exp(float x)
14501e04c3fSmrg{
14601e04c3fSmrg   const float k = 1.44269f; /* = log2(e) */
14701e04c3fSmrg   return util_fast_exp2(k * x);
14801e04c3fSmrg}
14901e04c3fSmrg
15001e04c3fSmrg
15101e04c3fSmrg#define LOG2_TABLE_SIZE_LOG2 16
15201e04c3fSmrg#define LOG2_TABLE_SCALE (1 << LOG2_TABLE_SIZE_LOG2)
15301e04c3fSmrg#define LOG2_TABLE_SIZE (LOG2_TABLE_SCALE + 1)
15401e04c3fSmrgextern float log2_table[LOG2_TABLE_SIZE];
15501e04c3fSmrg
15601e04c3fSmrg
15701e04c3fSmrg/**
15801e04c3fSmrg * Fast approximation to log2(x).
15901e04c3fSmrg */
16001e04c3fSmrgstatic inline float
16101e04c3fSmrgutil_fast_log2(float x)
16201e04c3fSmrg{
16301e04c3fSmrg   union fi num;
16401e04c3fSmrg   float epart, mpart;
16501e04c3fSmrg   num.f = x;
16601e04c3fSmrg   epart = (float)(((num.i & 0x7f800000) >> 23) - 127);
16701e04c3fSmrg   /* mpart = log2_table[mantissa*LOG2_TABLE_SCALE + 0.5] */
16801e04c3fSmrg   mpart = log2_table[((num.i & 0x007fffff) + (1 << (22 - LOG2_TABLE_SIZE_LOG2))) >> (23 - LOG2_TABLE_SIZE_LOG2)];
16901e04c3fSmrg   return epart + mpart;
17001e04c3fSmrg}
17101e04c3fSmrg
17201e04c3fSmrg
17301e04c3fSmrg/**
17401e04c3fSmrg * Fast approximation to x^y.
17501e04c3fSmrg */
17601e04c3fSmrgstatic inline float
17701e04c3fSmrgutil_fast_pow(float x, float y)
17801e04c3fSmrg{
17901e04c3fSmrg   return util_fast_exp2(util_fast_log2(x) * y);
18001e04c3fSmrg}
18101e04c3fSmrg
18201e04c3fSmrg
18301e04c3fSmrg/**
18401e04c3fSmrg * Floor(x), returned as int.
18501e04c3fSmrg */
18601e04c3fSmrgstatic inline int
18701e04c3fSmrgutil_ifloor(float f)
18801e04c3fSmrg{
18901e04c3fSmrg   int ai, bi;
19001e04c3fSmrg   double af, bf;
19101e04c3fSmrg   union fi u;
19201e04c3fSmrg   af = (3 << 22) + 0.5 + (double) f;
19301e04c3fSmrg   bf = (3 << 22) + 0.5 - (double) f;
19401e04c3fSmrg   u.f = (float) af;  ai = u.i;
19501e04c3fSmrg   u.f = (float) bf;  bi = u.i;
19601e04c3fSmrg   return (ai - bi) >> 1;
19701e04c3fSmrg}
19801e04c3fSmrg
19901e04c3fSmrg
20001e04c3fSmrg/**
20101e04c3fSmrg * Round float to nearest int.
20201e04c3fSmrg */
20301e04c3fSmrgstatic inline int
20401e04c3fSmrgutil_iround(float f)
20501e04c3fSmrg{
20601e04c3fSmrg#if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
20701e04c3fSmrg   int r;
20801e04c3fSmrg   __asm__ ("fistpl %0" : "=m" (r) : "t" (f) : "st");
20901e04c3fSmrg   return r;
21001e04c3fSmrg#elif defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86)
21101e04c3fSmrg   int r;
21201e04c3fSmrg   _asm {
21301e04c3fSmrg      fld f
21401e04c3fSmrg      fistp r
21501e04c3fSmrg   }
21601e04c3fSmrg   return r;
21701e04c3fSmrg#else
21801e04c3fSmrg   if (f >= 0.0f)
21901e04c3fSmrg      return (int) (f + 0.5f);
22001e04c3fSmrg   else
22101e04c3fSmrg      return (int) (f - 0.5f);
22201e04c3fSmrg#endif
22301e04c3fSmrg}
22401e04c3fSmrg
22501e04c3fSmrg
22601e04c3fSmrg/**
22701e04c3fSmrg * Approximate floating point comparison
22801e04c3fSmrg */
22901e04c3fSmrgstatic inline boolean
23001e04c3fSmrgutil_is_approx(float a, float b, float tol)
23101e04c3fSmrg{
23201e04c3fSmrg   return fabsf(b - a) <= tol;
23301e04c3fSmrg}
23401e04c3fSmrg
23501e04c3fSmrg
23601e04c3fSmrg/**
23701e04c3fSmrg * util_is_X_inf_or_nan = test if x is NaN or +/- Inf
23801e04c3fSmrg * util_is_X_nan        = test if x is NaN
23901e04c3fSmrg * util_X_inf_sign      = return +1 for +Inf, -1 for -Inf, or 0 for not Inf
24001e04c3fSmrg *
24101e04c3fSmrg * NaN can be checked with x != x, however this fails with the fast math flag
24201e04c3fSmrg **/
24301e04c3fSmrg
24401e04c3fSmrg
24501e04c3fSmrg/**
24601e04c3fSmrg * Single-float
24701e04c3fSmrg */
24801e04c3fSmrgstatic inline boolean
24901e04c3fSmrgutil_is_inf_or_nan(float x)
25001e04c3fSmrg{
25101e04c3fSmrg   union fi tmp;
25201e04c3fSmrg   tmp.f = x;
25301e04c3fSmrg   return (tmp.ui & 0x7f800000) == 0x7f800000;
25401e04c3fSmrg}
25501e04c3fSmrg
25601e04c3fSmrg
25701e04c3fSmrgstatic inline boolean
25801e04c3fSmrgutil_is_nan(float x)
25901e04c3fSmrg{
26001e04c3fSmrg   union fi tmp;
26101e04c3fSmrg   tmp.f = x;
26201e04c3fSmrg   return (tmp.ui & 0x7fffffff) > 0x7f800000;
26301e04c3fSmrg}
26401e04c3fSmrg
26501e04c3fSmrg
26601e04c3fSmrgstatic inline int
26701e04c3fSmrgutil_inf_sign(float x)
26801e04c3fSmrg{
26901e04c3fSmrg   union fi tmp;
27001e04c3fSmrg   tmp.f = x;
27101e04c3fSmrg   if ((tmp.ui & 0x7fffffff) != 0x7f800000) {
27201e04c3fSmrg      return 0;
27301e04c3fSmrg   }
27401e04c3fSmrg
27501e04c3fSmrg   return (x < 0) ? -1 : 1;
27601e04c3fSmrg}
27701e04c3fSmrg
27801e04c3fSmrg
27901e04c3fSmrg/**
28001e04c3fSmrg * Double-float
28101e04c3fSmrg */
28201e04c3fSmrgstatic inline boolean
28301e04c3fSmrgutil_is_double_inf_or_nan(double x)
28401e04c3fSmrg{
28501e04c3fSmrg   union di tmp;
28601e04c3fSmrg   tmp.d = x;
28701e04c3fSmrg   return (tmp.ui & 0x7ff0000000000000ULL) == 0x7ff0000000000000ULL;
28801e04c3fSmrg}
28901e04c3fSmrg
29001e04c3fSmrg
29101e04c3fSmrgstatic inline boolean
29201e04c3fSmrgutil_is_double_nan(double x)
29301e04c3fSmrg{
29401e04c3fSmrg   union di tmp;
29501e04c3fSmrg   tmp.d = x;
29601e04c3fSmrg   return (tmp.ui & 0x7fffffffffffffffULL) > 0x7ff0000000000000ULL;
29701e04c3fSmrg}
29801e04c3fSmrg
29901e04c3fSmrg
30001e04c3fSmrgstatic inline int
30101e04c3fSmrgutil_double_inf_sign(double x)
30201e04c3fSmrg{
30301e04c3fSmrg   union di tmp;
30401e04c3fSmrg   tmp.d = x;
30501e04c3fSmrg   if ((tmp.ui & 0x7fffffffffffffffULL) != 0x7ff0000000000000ULL) {
30601e04c3fSmrg      return 0;
30701e04c3fSmrg   }
30801e04c3fSmrg
30901e04c3fSmrg   return (x < 0) ? -1 : 1;
31001e04c3fSmrg}
31101e04c3fSmrg
31201e04c3fSmrg
31301e04c3fSmrg/**
31401e04c3fSmrg * Half-float
31501e04c3fSmrg */
31601e04c3fSmrgstatic inline boolean
31701e04c3fSmrgutil_is_half_inf_or_nan(int16_t x)
31801e04c3fSmrg{
31901e04c3fSmrg   return (x & 0x7c00) == 0x7c00;
32001e04c3fSmrg}
32101e04c3fSmrg
32201e04c3fSmrg
32301e04c3fSmrgstatic inline boolean
32401e04c3fSmrgutil_is_half_nan(int16_t x)
32501e04c3fSmrg{
32601e04c3fSmrg   return (x & 0x7fff) > 0x7c00;
32701e04c3fSmrg}
32801e04c3fSmrg
32901e04c3fSmrg
33001e04c3fSmrgstatic inline int
33101e04c3fSmrgutil_half_inf_sign(int16_t x)
33201e04c3fSmrg{
33301e04c3fSmrg   if ((x & 0x7fff) != 0x7c00) {
33401e04c3fSmrg      return 0;
33501e04c3fSmrg   }
33601e04c3fSmrg
33701e04c3fSmrg   return (x < 0) ? -1 : 1;
33801e04c3fSmrg}
33901e04c3fSmrg
34001e04c3fSmrg
34101e04c3fSmrg/**
34201e04c3fSmrg * Return float bits.
34301e04c3fSmrg */
34401e04c3fSmrgstatic inline unsigned
34501e04c3fSmrgfui( float f )
34601e04c3fSmrg{
34701e04c3fSmrg   union fi fi;
34801e04c3fSmrg   fi.f = f;
34901e04c3fSmrg   return fi.ui;
35001e04c3fSmrg}
35101e04c3fSmrg
35201e04c3fSmrgstatic inline float
35301e04c3fSmrguif(uint32_t ui)
35401e04c3fSmrg{
35501e04c3fSmrg   union fi fi;
35601e04c3fSmrg   fi.ui = ui;
35701e04c3fSmrg   return fi.f;
35801e04c3fSmrg}
35901e04c3fSmrg
36001e04c3fSmrg
36101e04c3fSmrg/**
36201e04c3fSmrg * Convert ubyte to float in [0, 1].
36301e04c3fSmrg */
36401e04c3fSmrgstatic inline float
36501e04c3fSmrgubyte_to_float(ubyte ub)
36601e04c3fSmrg{
36701e04c3fSmrg   return (float) ub * (1.0f / 255.0f);
36801e04c3fSmrg}
36901e04c3fSmrg
37001e04c3fSmrg
37101e04c3fSmrg/**
37201e04c3fSmrg * Convert float in [0,1] to ubyte in [0,255] with clamping.
37301e04c3fSmrg */
37401e04c3fSmrgstatic inline ubyte
37501e04c3fSmrgfloat_to_ubyte(float f)
37601e04c3fSmrg{
37701e04c3fSmrg   /* return 0 for NaN too */
37801e04c3fSmrg   if (!(f > 0.0f)) {
37901e04c3fSmrg      return (ubyte) 0;
38001e04c3fSmrg   }
38101e04c3fSmrg   else if (f >= 1.0f) {
38201e04c3fSmrg      return (ubyte) 255;
38301e04c3fSmrg   }
38401e04c3fSmrg   else {
38501e04c3fSmrg      union fi tmp;
38601e04c3fSmrg      tmp.f = f;
38701e04c3fSmrg      tmp.f = tmp.f * (255.0f/256.0f) + 32768.0f;
38801e04c3fSmrg      return (ubyte) tmp.i;
38901e04c3fSmrg   }
39001e04c3fSmrg}
39101e04c3fSmrg
39201e04c3fSmrgstatic inline float
39301e04c3fSmrgbyte_to_float_tex(int8_t b)
39401e04c3fSmrg{
39501e04c3fSmrg   return (b == -128) ? -1.0F : b * 1.0F / 127.0F;
39601e04c3fSmrg}
39701e04c3fSmrg
39801e04c3fSmrgstatic inline int8_t
39901e04c3fSmrgfloat_to_byte_tex(float f)
40001e04c3fSmrg{
40101e04c3fSmrg   return (int8_t) (127.0F * f);
40201e04c3fSmrg}
40301e04c3fSmrg
40401e04c3fSmrg/**
40501e04c3fSmrg * Calc log base 2
40601e04c3fSmrg */
40701e04c3fSmrgstatic inline unsigned
40801e04c3fSmrgutil_logbase2(unsigned n)
40901e04c3fSmrg{
41001e04c3fSmrg#if defined(HAVE___BUILTIN_CLZ)
41101e04c3fSmrg   return ((sizeof(unsigned) * 8 - 1) - __builtin_clz(n | 1));
41201e04c3fSmrg#else
41301e04c3fSmrg   unsigned pos = 0;
41401e04c3fSmrg   if (n >= 1<<16) { n >>= 16; pos += 16; }
41501e04c3fSmrg   if (n >= 1<< 8) { n >>=  8; pos +=  8; }
41601e04c3fSmrg   if (n >= 1<< 4) { n >>=  4; pos +=  4; }
41701e04c3fSmrg   if (n >= 1<< 2) { n >>=  2; pos +=  2; }
41801e04c3fSmrg   if (n >= 1<< 1) {           pos +=  1; }
41901e04c3fSmrg   return pos;
42001e04c3fSmrg#endif
42101e04c3fSmrg}
42201e04c3fSmrg
42301e04c3fSmrgstatic inline uint64_t
42401e04c3fSmrgutil_logbase2_64(uint64_t n)
42501e04c3fSmrg{
42601e04c3fSmrg#if defined(HAVE___BUILTIN_CLZLL)
42701e04c3fSmrg   return ((sizeof(uint64_t) * 8 - 1) - __builtin_clzll(n | 1));
42801e04c3fSmrg#else
42901e04c3fSmrg   uint64_t pos = 0ull;
43001e04c3fSmrg   if (n >= 1ull<<32) { n >>= 32; pos += 32; }
43101e04c3fSmrg   if (n >= 1ull<<16) { n >>= 16; pos += 16; }
43201e04c3fSmrg   if (n >= 1ull<< 8) { n >>=  8; pos +=  8; }
43301e04c3fSmrg   if (n >= 1ull<< 4) { n >>=  4; pos +=  4; }
43401e04c3fSmrg   if (n >= 1ull<< 2) { n >>=  2; pos +=  2; }
43501e04c3fSmrg   if (n >= 1ull<< 1) {           pos +=  1; }
43601e04c3fSmrg   return pos;
43701e04c3fSmrg#endif
43801e04c3fSmrg}
43901e04c3fSmrg
44001e04c3fSmrg/**
44101e04c3fSmrg * Returns the ceiling of log n base 2, and 0 when n == 0. Equivalently,
44201e04c3fSmrg * returns the smallest x such that n <= 2**x.
44301e04c3fSmrg */
44401e04c3fSmrgstatic inline unsigned
44501e04c3fSmrgutil_logbase2_ceil(unsigned n)
44601e04c3fSmrg{
44701e04c3fSmrg   if (n <= 1)
44801e04c3fSmrg      return 0;
44901e04c3fSmrg
45001e04c3fSmrg   return 1 + util_logbase2(n - 1);
45101e04c3fSmrg}
45201e04c3fSmrg
45301e04c3fSmrgstatic inline uint64_t
45401e04c3fSmrgutil_logbase2_ceil64(uint64_t n)
45501e04c3fSmrg{
45601e04c3fSmrg   if (n <= 1)
45701e04c3fSmrg      return 0;
45801e04c3fSmrg
45901e04c3fSmrg   return 1ull + util_logbase2_64(n - 1);
46001e04c3fSmrg}
46101e04c3fSmrg
46201e04c3fSmrg/**
46301e04c3fSmrg * Returns the smallest power of two >= x
46401e04c3fSmrg */
46501e04c3fSmrgstatic inline unsigned
46601e04c3fSmrgutil_next_power_of_two(unsigned x)
46701e04c3fSmrg{
46801e04c3fSmrg#if defined(HAVE___BUILTIN_CLZ)
46901e04c3fSmrg   if (x <= 1)
47001e04c3fSmrg       return 1;
47101e04c3fSmrg
47201e04c3fSmrg   return (1 << ((sizeof(unsigned) * 8) - __builtin_clz(x - 1)));
47301e04c3fSmrg#else
47401e04c3fSmrg   unsigned val = x;
47501e04c3fSmrg
47601e04c3fSmrg   if (x <= 1)
47701e04c3fSmrg      return 1;
47801e04c3fSmrg
47901e04c3fSmrg   if (util_is_power_of_two_or_zero(x))
48001e04c3fSmrg      return x;
48101e04c3fSmrg
48201e04c3fSmrg   val--;
48301e04c3fSmrg   val = (val >> 1) | val;
48401e04c3fSmrg   val = (val >> 2) | val;
48501e04c3fSmrg   val = (val >> 4) | val;
48601e04c3fSmrg   val = (val >> 8) | val;
48701e04c3fSmrg   val = (val >> 16) | val;
48801e04c3fSmrg   val++;
48901e04c3fSmrg   return val;
49001e04c3fSmrg#endif
49101e04c3fSmrg}
49201e04c3fSmrg
49301e04c3fSmrgstatic inline uint64_t
49401e04c3fSmrgutil_next_power_of_two64(uint64_t x)
49501e04c3fSmrg{
49601e04c3fSmrg#if defined(HAVE___BUILTIN_CLZLL)
49701e04c3fSmrg   if (x <= 1)
49801e04c3fSmrg       return 1;
49901e04c3fSmrg
50001e04c3fSmrg   return (1ull << ((sizeof(uint64_t) * 8) - __builtin_clzll(x - 1)));
50101e04c3fSmrg#else
50201e04c3fSmrg   uint64_t val = x;
50301e04c3fSmrg
50401e04c3fSmrg   if (x <= 1)
50501e04c3fSmrg      return 1;
50601e04c3fSmrg
50701e04c3fSmrg   if (util_is_power_of_two_or_zero64(x))
50801e04c3fSmrg      return x;
50901e04c3fSmrg
51001e04c3fSmrg   val--;
51101e04c3fSmrg   val = (val >> 1)  | val;
51201e04c3fSmrg   val = (val >> 2)  | val;
51301e04c3fSmrg   val = (val >> 4)  | val;
51401e04c3fSmrg   val = (val >> 8)  | val;
51501e04c3fSmrg   val = (val >> 16) | val;
51601e04c3fSmrg   val = (val >> 32) | val;
51701e04c3fSmrg   val++;
51801e04c3fSmrg   return val;
51901e04c3fSmrg#endif
52001e04c3fSmrg}
52101e04c3fSmrg
52201e04c3fSmrg
52301e04c3fSmrg/**
52401e04c3fSmrg * Return number of bits set in n.
52501e04c3fSmrg */
52601e04c3fSmrgstatic inline unsigned
52701e04c3fSmrgutil_bitcount(unsigned n)
52801e04c3fSmrg{
52901e04c3fSmrg#if defined(HAVE___BUILTIN_POPCOUNT)
53001e04c3fSmrg   return __builtin_popcount(n);
53101e04c3fSmrg#else
53201e04c3fSmrg   /* K&R classic bitcount.
53301e04c3fSmrg    *
53401e04c3fSmrg    * For each iteration, clear the LSB from the bitfield.
53501e04c3fSmrg    * Requires only one iteration per set bit, instead of
53601e04c3fSmrg    * one iteration per bit less than highest set bit.
53701e04c3fSmrg    */
53801e04c3fSmrg   unsigned bits;
53901e04c3fSmrg   for (bits = 0; n; bits++) {
54001e04c3fSmrg      n &= n - 1;
54101e04c3fSmrg   }
54201e04c3fSmrg   return bits;
54301e04c3fSmrg#endif
54401e04c3fSmrg}
54501e04c3fSmrg
54601e04c3fSmrg
54701e04c3fSmrgstatic inline unsigned
54801e04c3fSmrgutil_bitcount64(uint64_t n)
54901e04c3fSmrg{
55001e04c3fSmrg#ifdef HAVE___BUILTIN_POPCOUNTLL
55101e04c3fSmrg   return __builtin_popcountll(n);
55201e04c3fSmrg#else
55301e04c3fSmrg   return util_bitcount(n) + util_bitcount(n >> 32);
55401e04c3fSmrg#endif
55501e04c3fSmrg}
55601e04c3fSmrg
55701e04c3fSmrg
55801e04c3fSmrg/**
55901e04c3fSmrg * Reverse bits in n
56001e04c3fSmrg * Algorithm taken from:
56101e04c3fSmrg * http://stackoverflow.com/questions/9144800/c-reverse-bits-in-unsigned-integer
56201e04c3fSmrg */
56301e04c3fSmrgstatic inline unsigned
56401e04c3fSmrgutil_bitreverse(unsigned n)
56501e04c3fSmrg{
56601e04c3fSmrg    n = ((n >> 1) & 0x55555555u) | ((n & 0x55555555u) << 1);
56701e04c3fSmrg    n = ((n >> 2) & 0x33333333u) | ((n & 0x33333333u) << 2);
56801e04c3fSmrg    n = ((n >> 4) & 0x0f0f0f0fu) | ((n & 0x0f0f0f0fu) << 4);
56901e04c3fSmrg    n = ((n >> 8) & 0x00ff00ffu) | ((n & 0x00ff00ffu) << 8);
57001e04c3fSmrg    n = ((n >> 16) & 0xffffu) | ((n & 0xffffu) << 16);
57101e04c3fSmrg    return n;
57201e04c3fSmrg}
57301e04c3fSmrg
57401e04c3fSmrg/**
57501e04c3fSmrg * Convert from little endian to CPU byte order.
57601e04c3fSmrg */
57701e04c3fSmrg
57801e04c3fSmrg#ifdef PIPE_ARCH_BIG_ENDIAN
57901e04c3fSmrg#define util_le64_to_cpu(x) util_bswap64(x)
58001e04c3fSmrg#define util_le32_to_cpu(x) util_bswap32(x)
58101e04c3fSmrg#define util_le16_to_cpu(x) util_bswap16(x)
58201e04c3fSmrg#else
58301e04c3fSmrg#define util_le64_to_cpu(x) (x)
58401e04c3fSmrg#define util_le32_to_cpu(x) (x)
58501e04c3fSmrg#define util_le16_to_cpu(x) (x)
58601e04c3fSmrg#endif
58701e04c3fSmrg
58801e04c3fSmrg#define util_cpu_to_le64(x) util_le64_to_cpu(x)
58901e04c3fSmrg#define util_cpu_to_le32(x) util_le32_to_cpu(x)
59001e04c3fSmrg#define util_cpu_to_le16(x) util_le16_to_cpu(x)
59101e04c3fSmrg
59201e04c3fSmrg/**
59301e04c3fSmrg * Reverse byte order of a 32 bit word.
59401e04c3fSmrg */
59501e04c3fSmrgstatic inline uint32_t
59601e04c3fSmrgutil_bswap32(uint32_t n)
59701e04c3fSmrg{
59801e04c3fSmrg#if defined(HAVE___BUILTIN_BSWAP32)
59901e04c3fSmrg   return __builtin_bswap32(n);
60001e04c3fSmrg#else
60101e04c3fSmrg   return (n >> 24) |
60201e04c3fSmrg          ((n >> 8) & 0x0000ff00) |
60301e04c3fSmrg          ((n << 8) & 0x00ff0000) |
60401e04c3fSmrg          (n << 24);
60501e04c3fSmrg#endif
60601e04c3fSmrg}
60701e04c3fSmrg
60801e04c3fSmrg/**
60901e04c3fSmrg * Reverse byte order of a 64bit word.
61001e04c3fSmrg */
61101e04c3fSmrgstatic inline uint64_t
61201e04c3fSmrgutil_bswap64(uint64_t n)
61301e04c3fSmrg{
61401e04c3fSmrg#if defined(HAVE___BUILTIN_BSWAP64)
61501e04c3fSmrg   return __builtin_bswap64(n);
61601e04c3fSmrg#else
61701e04c3fSmrg   return ((uint64_t)util_bswap32((uint32_t)n) << 32) |
61801e04c3fSmrg          util_bswap32((n >> 32));
61901e04c3fSmrg#endif
62001e04c3fSmrg}
62101e04c3fSmrg
62201e04c3fSmrg
62301e04c3fSmrg/**
62401e04c3fSmrg * Reverse byte order of a 16 bit word.
62501e04c3fSmrg */
62601e04c3fSmrgstatic inline uint16_t
62701e04c3fSmrgutil_bswap16(uint16_t n)
62801e04c3fSmrg{
62901e04c3fSmrg   return (n >> 8) |
63001e04c3fSmrg          (n << 8);
63101e04c3fSmrg}
63201e04c3fSmrg
63301e04c3fSmrgstatic inline void*
63401e04c3fSmrgutil_memcpy_cpu_to_le32(void * restrict dest, const void * restrict src, size_t n)
63501e04c3fSmrg{
63601e04c3fSmrg#ifdef PIPE_ARCH_BIG_ENDIAN
63701e04c3fSmrg   size_t i, e;
63801e04c3fSmrg   assert(n % 4 == 0);
63901e04c3fSmrg
64001e04c3fSmrg   for (i = 0, e = n / 4; i < e; i++) {
64101e04c3fSmrg      uint32_t * restrict d = (uint32_t* restrict)dest;
64201e04c3fSmrg      const uint32_t * restrict s = (const uint32_t* restrict)src;
64301e04c3fSmrg      d[i] = util_bswap32(s[i]);
64401e04c3fSmrg   }
64501e04c3fSmrg   return dest;
64601e04c3fSmrg#else
64701e04c3fSmrg   return memcpy(dest, src, n);
64801e04c3fSmrg#endif
64901e04c3fSmrg}
65001e04c3fSmrg
65101e04c3fSmrg/**
65201e04c3fSmrg * Clamp X to [MIN, MAX].
65301e04c3fSmrg * This is a macro to allow float, int, uint, etc. types.
65401e04c3fSmrg * We arbitrarily turn NaN into MIN.
65501e04c3fSmrg */
65601e04c3fSmrg#define CLAMP( X, MIN, MAX )  ( (X)>(MIN) ? ((X)>(MAX) ? (MAX) : (X)) : (MIN) )
65701e04c3fSmrg
65801e04c3fSmrg#define MIN2( A, B )   ( (A)<(B) ? (A) : (B) )
65901e04c3fSmrg#define MAX2( A, B )   ( (A)>(B) ? (A) : (B) )
66001e04c3fSmrg
66101e04c3fSmrg#define MIN3( A, B, C ) ((A) < (B) ? MIN2(A, C) : MIN2(B, C))
66201e04c3fSmrg#define MAX3( A, B, C ) ((A) > (B) ? MAX2(A, C) : MAX2(B, C))
66301e04c3fSmrg
66401e04c3fSmrg#define MIN4( A, B, C, D ) ((A) < (B) ? MIN3(A, C, D) : MIN3(B, C, D))
66501e04c3fSmrg#define MAX4( A, B, C, D ) ((A) > (B) ? MAX3(A, C, D) : MAX3(B, C, D))
66601e04c3fSmrg
66701e04c3fSmrg
66801e04c3fSmrg/**
66901e04c3fSmrg * Align a value, only works pot alignemnts.
67001e04c3fSmrg */
67101e04c3fSmrgstatic inline int
67201e04c3fSmrgalign(int value, int alignment)
67301e04c3fSmrg{
67401e04c3fSmrg   return (value + alignment - 1) & ~(alignment - 1);
67501e04c3fSmrg}
67601e04c3fSmrg
67701e04c3fSmrgstatic inline uint64_t
67801e04c3fSmrgalign64(uint64_t value, unsigned alignment)
67901e04c3fSmrg{
68001e04c3fSmrg   return (value + alignment - 1) & ~((uint64_t)alignment - 1);
68101e04c3fSmrg}
68201e04c3fSmrg
68301e04c3fSmrg/**
68401e04c3fSmrg * Works like align but on npot alignments.
68501e04c3fSmrg */
68601e04c3fSmrgstatic inline size_t
68701e04c3fSmrgutil_align_npot(size_t value, size_t alignment)
68801e04c3fSmrg{
68901e04c3fSmrg   if (value % alignment)
69001e04c3fSmrg      return value + (alignment - (value % alignment));
69101e04c3fSmrg   return value;
69201e04c3fSmrg}
69301e04c3fSmrg
69401e04c3fSmrgstatic inline unsigned
69501e04c3fSmrgu_minify(unsigned value, unsigned levels)
69601e04c3fSmrg{
69701e04c3fSmrg    return MAX2(1, value >> levels);
69801e04c3fSmrg}
69901e04c3fSmrg
70001e04c3fSmrg#ifndef COPY_4V
70101e04c3fSmrg#define COPY_4V( DST, SRC )         \
70201e04c3fSmrgdo {                                \
70301e04c3fSmrg   (DST)[0] = (SRC)[0];             \
70401e04c3fSmrg   (DST)[1] = (SRC)[1];             \
70501e04c3fSmrg   (DST)[2] = (SRC)[2];             \
70601e04c3fSmrg   (DST)[3] = (SRC)[3];             \
70701e04c3fSmrg} while (0)
70801e04c3fSmrg#endif
70901e04c3fSmrg
71001e04c3fSmrg
71101e04c3fSmrg#ifndef COPY_4FV
71201e04c3fSmrg#define COPY_4FV( DST, SRC )  COPY_4V(DST, SRC)
71301e04c3fSmrg#endif
71401e04c3fSmrg
71501e04c3fSmrg
71601e04c3fSmrg#ifndef ASSIGN_4V
71701e04c3fSmrg#define ASSIGN_4V( DST, V0, V1, V2, V3 ) \
71801e04c3fSmrgdo {                                     \
71901e04c3fSmrg   (DST)[0] = (V0);                      \
72001e04c3fSmrg   (DST)[1] = (V1);                      \
72101e04c3fSmrg   (DST)[2] = (V2);                      \
72201e04c3fSmrg   (DST)[3] = (V3);                      \
72301e04c3fSmrg} while (0)
72401e04c3fSmrg#endif
72501e04c3fSmrg
72601e04c3fSmrg
72701e04c3fSmrgstatic inline uint32_t
72801e04c3fSmrgutil_unsigned_fixed(float value, unsigned frac_bits)
72901e04c3fSmrg{
73001e04c3fSmrg   return value < 0 ? 0 : (uint32_t)(value * (1<<frac_bits));
73101e04c3fSmrg}
73201e04c3fSmrg
73301e04c3fSmrgstatic inline int32_t
73401e04c3fSmrgutil_signed_fixed(float value, unsigned frac_bits)
73501e04c3fSmrg{
73601e04c3fSmrg   return (int32_t)(value * (1<<frac_bits));
73701e04c3fSmrg}
73801e04c3fSmrg
73901e04c3fSmrgunsigned
74001e04c3fSmrgutil_fpstate_get(void);
74101e04c3fSmrgunsigned
74201e04c3fSmrgutil_fpstate_set_denorms_to_zero(unsigned current_fpstate);
74301e04c3fSmrgvoid
74401e04c3fSmrgutil_fpstate_set(unsigned fpstate);
74501e04c3fSmrg
74601e04c3fSmrg
74701e04c3fSmrg
74801e04c3fSmrg#ifdef __cplusplus
74901e04c3fSmrg}
75001e04c3fSmrg#endif
75101e04c3fSmrg
75201e04c3fSmrg#endif /* U_MATH_H */
753