u_math.h revision d8407755
101e04c3fSmrg/************************************************************************** 201e04c3fSmrg * 301e04c3fSmrg * Copyright 2008 VMware, Inc. 401e04c3fSmrg * All Rights Reserved. 501e04c3fSmrg * 601e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a 701e04c3fSmrg * copy of this software and associated documentation files (the 801e04c3fSmrg * "Software"), to deal in the Software without restriction, including 901e04c3fSmrg * without limitation the rights to use, copy, modify, merge, publish, 1001e04c3fSmrg * distribute, sub license, and/or sell copies of the Software, and to 1101e04c3fSmrg * permit persons to whom the Software is furnished to do so, subject to 1201e04c3fSmrg * the following conditions: 1301e04c3fSmrg * 1401e04c3fSmrg * The above copyright notice and this permission notice (including the 1501e04c3fSmrg * next paragraph) shall be included in all copies or substantial portions 1601e04c3fSmrg * of the Software. 1701e04c3fSmrg * 1801e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 1901e04c3fSmrg * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 2001e04c3fSmrg * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 2101e04c3fSmrg * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 2201e04c3fSmrg * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 2301e04c3fSmrg * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 2401e04c3fSmrg * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 2501e04c3fSmrg * 2601e04c3fSmrg **************************************************************************/ 2701e04c3fSmrg 2801e04c3fSmrg 2901e04c3fSmrg/** 3001e04c3fSmrg * Math utilities and approximations for common math functions. 3101e04c3fSmrg * Reduced precision is usually acceptable in shaders... 3201e04c3fSmrg * 3301e04c3fSmrg * "fast" is used in the names of functions which are low-precision, 3401e04c3fSmrg * or at least lower-precision than the normal C lib functions. 3501e04c3fSmrg */ 3601e04c3fSmrg 3701e04c3fSmrg 3801e04c3fSmrg#ifndef U_MATH_H 3901e04c3fSmrg#define U_MATH_H 4001e04c3fSmrg 4101e04c3fSmrg 4201e04c3fSmrg#include "pipe/p_compiler.h" 4301e04c3fSmrg 4401e04c3fSmrg#include "c99_math.h" 4501e04c3fSmrg#include <assert.h> 4601e04c3fSmrg#include <float.h> 4701e04c3fSmrg#include <stdarg.h> 4801e04c3fSmrg 4901e04c3fSmrg#include "bitscan.h" 5001e04c3fSmrg 5101e04c3fSmrg#ifdef __cplusplus 5201e04c3fSmrgextern "C" { 5301e04c3fSmrg#endif 5401e04c3fSmrg 5501e04c3fSmrg 5601e04c3fSmrg#ifndef M_SQRT2 5701e04c3fSmrg#define M_SQRT2 1.41421356237309504880 5801e04c3fSmrg#endif 5901e04c3fSmrg 6001e04c3fSmrg#define POW2_TABLE_SIZE_LOG2 9 6101e04c3fSmrg#define POW2_TABLE_SIZE (1 << POW2_TABLE_SIZE_LOG2) 6201e04c3fSmrg#define POW2_TABLE_OFFSET (POW2_TABLE_SIZE/2) 6301e04c3fSmrg#define POW2_TABLE_SCALE ((float)(POW2_TABLE_SIZE/2)) 6401e04c3fSmrgextern float pow2_table[POW2_TABLE_SIZE]; 6501e04c3fSmrg 6601e04c3fSmrg 6701e04c3fSmrg/** 6801e04c3fSmrg * Initialize math module. This should be called before using any 6901e04c3fSmrg * other functions in this module. 7001e04c3fSmrg */ 7101e04c3fSmrgextern void 7201e04c3fSmrgutil_init_math(void); 7301e04c3fSmrg 7401e04c3fSmrg 7501e04c3fSmrgunion fi { 7601e04c3fSmrg float f; 7701e04c3fSmrg int32_t i; 7801e04c3fSmrg uint32_t ui; 7901e04c3fSmrg}; 8001e04c3fSmrg 8101e04c3fSmrg 8201e04c3fSmrgunion di { 8301e04c3fSmrg double d; 8401e04c3fSmrg int64_t i; 8501e04c3fSmrg uint64_t ui; 8601e04c3fSmrg}; 8701e04c3fSmrg 8801e04c3fSmrg 8901e04c3fSmrg/** 9001e04c3fSmrg * Extract the IEEE float32 exponent. 9101e04c3fSmrg */ 9201e04c3fSmrgstatic inline signed 9301e04c3fSmrgutil_get_float32_exponent(float x) 9401e04c3fSmrg{ 9501e04c3fSmrg union fi f; 9601e04c3fSmrg 9701e04c3fSmrg f.f = x; 9801e04c3fSmrg 9901e04c3fSmrg return ((f.ui >> 23) & 0xff) - 127; 10001e04c3fSmrg} 10101e04c3fSmrg 10201e04c3fSmrg 10301e04c3fSmrg/** 10401e04c3fSmrg * Fast version of 2^x 10501e04c3fSmrg * Identity: exp2(a + b) = exp2(a) * exp2(b) 10601e04c3fSmrg * Let ipart = int(x) 10701e04c3fSmrg * Let fpart = x - ipart; 10801e04c3fSmrg * So, exp2(x) = exp2(ipart) * exp2(fpart) 10901e04c3fSmrg * Compute exp2(ipart) with i << ipart 11001e04c3fSmrg * Compute exp2(fpart) with lookup table. 11101e04c3fSmrg */ 11201e04c3fSmrgstatic inline float 11301e04c3fSmrgutil_fast_exp2(float x) 11401e04c3fSmrg{ 11501e04c3fSmrg int32_t ipart; 11601e04c3fSmrg float fpart, mpart; 11701e04c3fSmrg union fi epart; 11801e04c3fSmrg 11901e04c3fSmrg if(x > 129.00000f) 1209d3d7adaSmaya return FLT_MAX; 12101e04c3fSmrg 12201e04c3fSmrg if (x < -126.99999f) 12301e04c3fSmrg return 0.0f; 12401e04c3fSmrg 12501e04c3fSmrg ipart = (int32_t) x; 12601e04c3fSmrg fpart = x - (float) ipart; 12701e04c3fSmrg 12801e04c3fSmrg /* same as 12901e04c3fSmrg * epart.f = (float) (1 << ipart) 13001e04c3fSmrg * but faster and without integer overflow for ipart > 31 13101e04c3fSmrg */ 13201e04c3fSmrg epart.i = (ipart + 127 ) << 23; 13301e04c3fSmrg 13401e04c3fSmrg mpart = pow2_table[POW2_TABLE_OFFSET + (int)(fpart * POW2_TABLE_SCALE)]; 13501e04c3fSmrg 13601e04c3fSmrg return epart.f * mpart; 13701e04c3fSmrg} 13801e04c3fSmrg 13901e04c3fSmrg 14001e04c3fSmrg/** 14101e04c3fSmrg * Fast approximation to exp(x). 14201e04c3fSmrg */ 14301e04c3fSmrgstatic inline float 14401e04c3fSmrgutil_fast_exp(float x) 14501e04c3fSmrg{ 14601e04c3fSmrg const float k = 1.44269f; /* = log2(e) */ 14701e04c3fSmrg return util_fast_exp2(k * x); 14801e04c3fSmrg} 14901e04c3fSmrg 15001e04c3fSmrg 15101e04c3fSmrg#define LOG2_TABLE_SIZE_LOG2 16 15201e04c3fSmrg#define LOG2_TABLE_SCALE (1 << LOG2_TABLE_SIZE_LOG2) 15301e04c3fSmrg#define LOG2_TABLE_SIZE (LOG2_TABLE_SCALE + 1) 15401e04c3fSmrgextern float log2_table[LOG2_TABLE_SIZE]; 15501e04c3fSmrg 15601e04c3fSmrg 15701e04c3fSmrg/** 15801e04c3fSmrg * Fast approximation to log2(x). 15901e04c3fSmrg */ 16001e04c3fSmrgstatic inline float 16101e04c3fSmrgutil_fast_log2(float x) 16201e04c3fSmrg{ 16301e04c3fSmrg union fi num; 16401e04c3fSmrg float epart, mpart; 16501e04c3fSmrg num.f = x; 16601e04c3fSmrg epart = (float)(((num.i & 0x7f800000) >> 23) - 127); 16701e04c3fSmrg /* mpart = log2_table[mantissa*LOG2_TABLE_SCALE + 0.5] */ 16801e04c3fSmrg mpart = log2_table[((num.i & 0x007fffff) + (1 << (22 - LOG2_TABLE_SIZE_LOG2))) >> (23 - LOG2_TABLE_SIZE_LOG2)]; 16901e04c3fSmrg return epart + mpart; 17001e04c3fSmrg} 17101e04c3fSmrg 17201e04c3fSmrg 17301e04c3fSmrg/** 17401e04c3fSmrg * Fast approximation to x^y. 17501e04c3fSmrg */ 17601e04c3fSmrgstatic inline float 17701e04c3fSmrgutil_fast_pow(float x, float y) 17801e04c3fSmrg{ 17901e04c3fSmrg return util_fast_exp2(util_fast_log2(x) * y); 18001e04c3fSmrg} 18101e04c3fSmrg 18201e04c3fSmrg 18301e04c3fSmrg/** 18401e04c3fSmrg * Floor(x), returned as int. 18501e04c3fSmrg */ 18601e04c3fSmrgstatic inline int 18701e04c3fSmrgutil_ifloor(float f) 18801e04c3fSmrg{ 18901e04c3fSmrg int ai, bi; 19001e04c3fSmrg double af, bf; 19101e04c3fSmrg union fi u; 19201e04c3fSmrg af = (3 << 22) + 0.5 + (double) f; 19301e04c3fSmrg bf = (3 << 22) + 0.5 - (double) f; 19401e04c3fSmrg u.f = (float) af; ai = u.i; 19501e04c3fSmrg u.f = (float) bf; bi = u.i; 19601e04c3fSmrg return (ai - bi) >> 1; 19701e04c3fSmrg} 19801e04c3fSmrg 19901e04c3fSmrg 20001e04c3fSmrg/** 20101e04c3fSmrg * Round float to nearest int. 20201e04c3fSmrg */ 20301e04c3fSmrgstatic inline int 20401e04c3fSmrgutil_iround(float f) 20501e04c3fSmrg{ 20601e04c3fSmrg#if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86) 20701e04c3fSmrg int r; 20801e04c3fSmrg __asm__ ("fistpl %0" : "=m" (r) : "t" (f) : "st"); 20901e04c3fSmrg return r; 21001e04c3fSmrg#elif defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86) 21101e04c3fSmrg int r; 21201e04c3fSmrg _asm { 21301e04c3fSmrg fld f 21401e04c3fSmrg fistp r 21501e04c3fSmrg } 21601e04c3fSmrg return r; 21701e04c3fSmrg#else 21801e04c3fSmrg if (f >= 0.0f) 21901e04c3fSmrg return (int) (f + 0.5f); 22001e04c3fSmrg else 22101e04c3fSmrg return (int) (f - 0.5f); 22201e04c3fSmrg#endif 22301e04c3fSmrg} 22401e04c3fSmrg 22501e04c3fSmrg 22601e04c3fSmrg/** 22701e04c3fSmrg * Approximate floating point comparison 22801e04c3fSmrg */ 22901e04c3fSmrgstatic inline boolean 23001e04c3fSmrgutil_is_approx(float a, float b, float tol) 23101e04c3fSmrg{ 23201e04c3fSmrg return fabsf(b - a) <= tol; 23301e04c3fSmrg} 23401e04c3fSmrg 23501e04c3fSmrg 23601e04c3fSmrg/** 23701e04c3fSmrg * util_is_X_inf_or_nan = test if x is NaN or +/- Inf 23801e04c3fSmrg * util_is_X_nan = test if x is NaN 23901e04c3fSmrg * util_X_inf_sign = return +1 for +Inf, -1 for -Inf, or 0 for not Inf 24001e04c3fSmrg * 24101e04c3fSmrg * NaN can be checked with x != x, however this fails with the fast math flag 24201e04c3fSmrg **/ 24301e04c3fSmrg 24401e04c3fSmrg 24501e04c3fSmrg/** 24601e04c3fSmrg * Single-float 24701e04c3fSmrg */ 24801e04c3fSmrgstatic inline boolean 24901e04c3fSmrgutil_is_inf_or_nan(float x) 25001e04c3fSmrg{ 25101e04c3fSmrg union fi tmp; 25201e04c3fSmrg tmp.f = x; 25301e04c3fSmrg return (tmp.ui & 0x7f800000) == 0x7f800000; 25401e04c3fSmrg} 25501e04c3fSmrg 25601e04c3fSmrg 25701e04c3fSmrgstatic inline boolean 25801e04c3fSmrgutil_is_nan(float x) 25901e04c3fSmrg{ 26001e04c3fSmrg union fi tmp; 26101e04c3fSmrg tmp.f = x; 26201e04c3fSmrg return (tmp.ui & 0x7fffffff) > 0x7f800000; 26301e04c3fSmrg} 26401e04c3fSmrg 26501e04c3fSmrg 26601e04c3fSmrgstatic inline int 26701e04c3fSmrgutil_inf_sign(float x) 26801e04c3fSmrg{ 26901e04c3fSmrg union fi tmp; 27001e04c3fSmrg tmp.f = x; 27101e04c3fSmrg if ((tmp.ui & 0x7fffffff) != 0x7f800000) { 27201e04c3fSmrg return 0; 27301e04c3fSmrg } 27401e04c3fSmrg 27501e04c3fSmrg return (x < 0) ? -1 : 1; 27601e04c3fSmrg} 27701e04c3fSmrg 27801e04c3fSmrg 27901e04c3fSmrg/** 28001e04c3fSmrg * Double-float 28101e04c3fSmrg */ 28201e04c3fSmrgstatic inline boolean 28301e04c3fSmrgutil_is_double_inf_or_nan(double x) 28401e04c3fSmrg{ 28501e04c3fSmrg union di tmp; 28601e04c3fSmrg tmp.d = x; 28701e04c3fSmrg return (tmp.ui & 0x7ff0000000000000ULL) == 0x7ff0000000000000ULL; 28801e04c3fSmrg} 28901e04c3fSmrg 29001e04c3fSmrg 29101e04c3fSmrgstatic inline boolean 29201e04c3fSmrgutil_is_double_nan(double x) 29301e04c3fSmrg{ 29401e04c3fSmrg union di tmp; 29501e04c3fSmrg tmp.d = x; 29601e04c3fSmrg return (tmp.ui & 0x7fffffffffffffffULL) > 0x7ff0000000000000ULL; 29701e04c3fSmrg} 29801e04c3fSmrg 29901e04c3fSmrg 30001e04c3fSmrgstatic inline int 30101e04c3fSmrgutil_double_inf_sign(double x) 30201e04c3fSmrg{ 30301e04c3fSmrg union di tmp; 30401e04c3fSmrg tmp.d = x; 30501e04c3fSmrg if ((tmp.ui & 0x7fffffffffffffffULL) != 0x7ff0000000000000ULL) { 30601e04c3fSmrg return 0; 30701e04c3fSmrg } 30801e04c3fSmrg 30901e04c3fSmrg return (x < 0) ? -1 : 1; 31001e04c3fSmrg} 31101e04c3fSmrg 31201e04c3fSmrg 31301e04c3fSmrg/** 31401e04c3fSmrg * Half-float 31501e04c3fSmrg */ 31601e04c3fSmrgstatic inline boolean 31701e04c3fSmrgutil_is_half_inf_or_nan(int16_t x) 31801e04c3fSmrg{ 31901e04c3fSmrg return (x & 0x7c00) == 0x7c00; 32001e04c3fSmrg} 32101e04c3fSmrg 32201e04c3fSmrg 32301e04c3fSmrgstatic inline boolean 32401e04c3fSmrgutil_is_half_nan(int16_t x) 32501e04c3fSmrg{ 32601e04c3fSmrg return (x & 0x7fff) > 0x7c00; 32701e04c3fSmrg} 32801e04c3fSmrg 32901e04c3fSmrg 33001e04c3fSmrgstatic inline int 33101e04c3fSmrgutil_half_inf_sign(int16_t x) 33201e04c3fSmrg{ 33301e04c3fSmrg if ((x & 0x7fff) != 0x7c00) { 33401e04c3fSmrg return 0; 33501e04c3fSmrg } 33601e04c3fSmrg 33701e04c3fSmrg return (x < 0) ? -1 : 1; 33801e04c3fSmrg} 33901e04c3fSmrg 34001e04c3fSmrg 34101e04c3fSmrg/** 34201e04c3fSmrg * Return float bits. 34301e04c3fSmrg */ 34401e04c3fSmrgstatic inline unsigned 34501e04c3fSmrgfui( float f ) 34601e04c3fSmrg{ 34701e04c3fSmrg union fi fi; 34801e04c3fSmrg fi.f = f; 34901e04c3fSmrg return fi.ui; 35001e04c3fSmrg} 35101e04c3fSmrg 35201e04c3fSmrgstatic inline float 35301e04c3fSmrguif(uint32_t ui) 35401e04c3fSmrg{ 35501e04c3fSmrg union fi fi; 35601e04c3fSmrg fi.ui = ui; 35701e04c3fSmrg return fi.f; 35801e04c3fSmrg} 35901e04c3fSmrg 36001e04c3fSmrg 36101e04c3fSmrg/** 36201e04c3fSmrg * Convert ubyte to float in [0, 1]. 36301e04c3fSmrg */ 36401e04c3fSmrgstatic inline float 36501e04c3fSmrgubyte_to_float(ubyte ub) 36601e04c3fSmrg{ 36701e04c3fSmrg return (float) ub * (1.0f / 255.0f); 36801e04c3fSmrg} 36901e04c3fSmrg 37001e04c3fSmrg 37101e04c3fSmrg/** 37201e04c3fSmrg * Convert float in [0,1] to ubyte in [0,255] with clamping. 37301e04c3fSmrg */ 37401e04c3fSmrgstatic inline ubyte 37501e04c3fSmrgfloat_to_ubyte(float f) 37601e04c3fSmrg{ 37701e04c3fSmrg /* return 0 for NaN too */ 37801e04c3fSmrg if (!(f > 0.0f)) { 37901e04c3fSmrg return (ubyte) 0; 38001e04c3fSmrg } 38101e04c3fSmrg else if (f >= 1.0f) { 38201e04c3fSmrg return (ubyte) 255; 38301e04c3fSmrg } 38401e04c3fSmrg else { 38501e04c3fSmrg union fi tmp; 38601e04c3fSmrg tmp.f = f; 38701e04c3fSmrg tmp.f = tmp.f * (255.0f/256.0f) + 32768.0f; 38801e04c3fSmrg return (ubyte) tmp.i; 38901e04c3fSmrg } 39001e04c3fSmrg} 39101e04c3fSmrg 392d8407755Smaya/** 393d8407755Smaya * Convert ushort to float in [0, 1]. 394d8407755Smaya */ 395d8407755Smayastatic inline float 396d8407755Smayaushort_to_float(ushort us) 397d8407755Smaya{ 398d8407755Smaya return (float) us * (1.0f / 65535.0f); 399d8407755Smaya} 400d8407755Smaya 401d8407755Smaya 402d8407755Smaya/** 403d8407755Smaya * Convert float in [0,1] to ushort in [0,65535] with clamping. 404d8407755Smaya */ 405d8407755Smayastatic inline ushort 406d8407755Smayafloat_to_ushort(float f) 407d8407755Smaya{ 408d8407755Smaya /* return 0 for NaN too */ 409d8407755Smaya if (!(f > 0.0f)) { 410d8407755Smaya return (ushort) 0; 411d8407755Smaya } 412d8407755Smaya else if (f >= 1.0f) { 413d8407755Smaya return (ushort) 65535; 414d8407755Smaya } 415d8407755Smaya else { 416d8407755Smaya union fi tmp; 417d8407755Smaya tmp.f = f; 418d8407755Smaya tmp.f = tmp.f * (65535.0f/65536.0f) + 128.0f; 419d8407755Smaya return (ushort) tmp.i; 420d8407755Smaya } 421d8407755Smaya} 422d8407755Smaya 42301e04c3fSmrgstatic inline float 42401e04c3fSmrgbyte_to_float_tex(int8_t b) 42501e04c3fSmrg{ 42601e04c3fSmrg return (b == -128) ? -1.0F : b * 1.0F / 127.0F; 42701e04c3fSmrg} 42801e04c3fSmrg 42901e04c3fSmrgstatic inline int8_t 43001e04c3fSmrgfloat_to_byte_tex(float f) 43101e04c3fSmrg{ 43201e04c3fSmrg return (int8_t) (127.0F * f); 43301e04c3fSmrg} 43401e04c3fSmrg 43501e04c3fSmrg/** 43601e04c3fSmrg * Calc log base 2 43701e04c3fSmrg */ 43801e04c3fSmrgstatic inline unsigned 43901e04c3fSmrgutil_logbase2(unsigned n) 44001e04c3fSmrg{ 44101e04c3fSmrg#if defined(HAVE___BUILTIN_CLZ) 44201e04c3fSmrg return ((sizeof(unsigned) * 8 - 1) - __builtin_clz(n | 1)); 44301e04c3fSmrg#else 44401e04c3fSmrg unsigned pos = 0; 44501e04c3fSmrg if (n >= 1<<16) { n >>= 16; pos += 16; } 44601e04c3fSmrg if (n >= 1<< 8) { n >>= 8; pos += 8; } 44701e04c3fSmrg if (n >= 1<< 4) { n >>= 4; pos += 4; } 44801e04c3fSmrg if (n >= 1<< 2) { n >>= 2; pos += 2; } 44901e04c3fSmrg if (n >= 1<< 1) { pos += 1; } 45001e04c3fSmrg return pos; 45101e04c3fSmrg#endif 45201e04c3fSmrg} 45301e04c3fSmrg 45401e04c3fSmrgstatic inline uint64_t 45501e04c3fSmrgutil_logbase2_64(uint64_t n) 45601e04c3fSmrg{ 45701e04c3fSmrg#if defined(HAVE___BUILTIN_CLZLL) 45801e04c3fSmrg return ((sizeof(uint64_t) * 8 - 1) - __builtin_clzll(n | 1)); 45901e04c3fSmrg#else 46001e04c3fSmrg uint64_t pos = 0ull; 46101e04c3fSmrg if (n >= 1ull<<32) { n >>= 32; pos += 32; } 46201e04c3fSmrg if (n >= 1ull<<16) { n >>= 16; pos += 16; } 46301e04c3fSmrg if (n >= 1ull<< 8) { n >>= 8; pos += 8; } 46401e04c3fSmrg if (n >= 1ull<< 4) { n >>= 4; pos += 4; } 46501e04c3fSmrg if (n >= 1ull<< 2) { n >>= 2; pos += 2; } 46601e04c3fSmrg if (n >= 1ull<< 1) { pos += 1; } 46701e04c3fSmrg return pos; 46801e04c3fSmrg#endif 46901e04c3fSmrg} 47001e04c3fSmrg 47101e04c3fSmrg/** 47201e04c3fSmrg * Returns the ceiling of log n base 2, and 0 when n == 0. Equivalently, 47301e04c3fSmrg * returns the smallest x such that n <= 2**x. 47401e04c3fSmrg */ 47501e04c3fSmrgstatic inline unsigned 47601e04c3fSmrgutil_logbase2_ceil(unsigned n) 47701e04c3fSmrg{ 47801e04c3fSmrg if (n <= 1) 47901e04c3fSmrg return 0; 48001e04c3fSmrg 48101e04c3fSmrg return 1 + util_logbase2(n - 1); 48201e04c3fSmrg} 48301e04c3fSmrg 48401e04c3fSmrgstatic inline uint64_t 48501e04c3fSmrgutil_logbase2_ceil64(uint64_t n) 48601e04c3fSmrg{ 48701e04c3fSmrg if (n <= 1) 48801e04c3fSmrg return 0; 48901e04c3fSmrg 49001e04c3fSmrg return 1ull + util_logbase2_64(n - 1); 49101e04c3fSmrg} 49201e04c3fSmrg 49301e04c3fSmrg/** 49401e04c3fSmrg * Returns the smallest power of two >= x 49501e04c3fSmrg */ 49601e04c3fSmrgstatic inline unsigned 49701e04c3fSmrgutil_next_power_of_two(unsigned x) 49801e04c3fSmrg{ 49901e04c3fSmrg#if defined(HAVE___BUILTIN_CLZ) 50001e04c3fSmrg if (x <= 1) 50101e04c3fSmrg return 1; 50201e04c3fSmrg 50301e04c3fSmrg return (1 << ((sizeof(unsigned) * 8) - __builtin_clz(x - 1))); 50401e04c3fSmrg#else 50501e04c3fSmrg unsigned val = x; 50601e04c3fSmrg 50701e04c3fSmrg if (x <= 1) 50801e04c3fSmrg return 1; 50901e04c3fSmrg 51001e04c3fSmrg if (util_is_power_of_two_or_zero(x)) 51101e04c3fSmrg return x; 51201e04c3fSmrg 51301e04c3fSmrg val--; 51401e04c3fSmrg val = (val >> 1) | val; 51501e04c3fSmrg val = (val >> 2) | val; 51601e04c3fSmrg val = (val >> 4) | val; 51701e04c3fSmrg val = (val >> 8) | val; 51801e04c3fSmrg val = (val >> 16) | val; 51901e04c3fSmrg val++; 52001e04c3fSmrg return val; 52101e04c3fSmrg#endif 52201e04c3fSmrg} 52301e04c3fSmrg 52401e04c3fSmrgstatic inline uint64_t 52501e04c3fSmrgutil_next_power_of_two64(uint64_t x) 52601e04c3fSmrg{ 52701e04c3fSmrg#if defined(HAVE___BUILTIN_CLZLL) 52801e04c3fSmrg if (x <= 1) 52901e04c3fSmrg return 1; 53001e04c3fSmrg 53101e04c3fSmrg return (1ull << ((sizeof(uint64_t) * 8) - __builtin_clzll(x - 1))); 53201e04c3fSmrg#else 53301e04c3fSmrg uint64_t val = x; 53401e04c3fSmrg 53501e04c3fSmrg if (x <= 1) 53601e04c3fSmrg return 1; 53701e04c3fSmrg 53801e04c3fSmrg if (util_is_power_of_two_or_zero64(x)) 53901e04c3fSmrg return x; 54001e04c3fSmrg 54101e04c3fSmrg val--; 54201e04c3fSmrg val = (val >> 1) | val; 54301e04c3fSmrg val = (val >> 2) | val; 54401e04c3fSmrg val = (val >> 4) | val; 54501e04c3fSmrg val = (val >> 8) | val; 54601e04c3fSmrg val = (val >> 16) | val; 54701e04c3fSmrg val = (val >> 32) | val; 54801e04c3fSmrg val++; 54901e04c3fSmrg return val; 55001e04c3fSmrg#endif 55101e04c3fSmrg} 55201e04c3fSmrg 55301e04c3fSmrg 55401e04c3fSmrg/** 55501e04c3fSmrg * Return number of bits set in n. 55601e04c3fSmrg */ 55701e04c3fSmrgstatic inline unsigned 55801e04c3fSmrgutil_bitcount(unsigned n) 55901e04c3fSmrg{ 56001e04c3fSmrg#if defined(HAVE___BUILTIN_POPCOUNT) 56101e04c3fSmrg return __builtin_popcount(n); 56201e04c3fSmrg#else 56301e04c3fSmrg /* K&R classic bitcount. 56401e04c3fSmrg * 56501e04c3fSmrg * For each iteration, clear the LSB from the bitfield. 56601e04c3fSmrg * Requires only one iteration per set bit, instead of 56701e04c3fSmrg * one iteration per bit less than highest set bit. 56801e04c3fSmrg */ 56901e04c3fSmrg unsigned bits; 57001e04c3fSmrg for (bits = 0; n; bits++) { 57101e04c3fSmrg n &= n - 1; 57201e04c3fSmrg } 57301e04c3fSmrg return bits; 57401e04c3fSmrg#endif 57501e04c3fSmrg} 57601e04c3fSmrg 57701e04c3fSmrg 57801e04c3fSmrgstatic inline unsigned 57901e04c3fSmrgutil_bitcount64(uint64_t n) 58001e04c3fSmrg{ 58101e04c3fSmrg#ifdef HAVE___BUILTIN_POPCOUNTLL 58201e04c3fSmrg return __builtin_popcountll(n); 58301e04c3fSmrg#else 58401e04c3fSmrg return util_bitcount(n) + util_bitcount(n >> 32); 58501e04c3fSmrg#endif 58601e04c3fSmrg} 58701e04c3fSmrg 58801e04c3fSmrg 58901e04c3fSmrg/** 59001e04c3fSmrg * Reverse bits in n 59101e04c3fSmrg * Algorithm taken from: 59201e04c3fSmrg * http://stackoverflow.com/questions/9144800/c-reverse-bits-in-unsigned-integer 59301e04c3fSmrg */ 59401e04c3fSmrgstatic inline unsigned 59501e04c3fSmrgutil_bitreverse(unsigned n) 59601e04c3fSmrg{ 59701e04c3fSmrg n = ((n >> 1) & 0x55555555u) | ((n & 0x55555555u) << 1); 59801e04c3fSmrg n = ((n >> 2) & 0x33333333u) | ((n & 0x33333333u) << 2); 59901e04c3fSmrg n = ((n >> 4) & 0x0f0f0f0fu) | ((n & 0x0f0f0f0fu) << 4); 60001e04c3fSmrg n = ((n >> 8) & 0x00ff00ffu) | ((n & 0x00ff00ffu) << 8); 60101e04c3fSmrg n = ((n >> 16) & 0xffffu) | ((n & 0xffffu) << 16); 60201e04c3fSmrg return n; 60301e04c3fSmrg} 60401e04c3fSmrg 60501e04c3fSmrg/** 60601e04c3fSmrg * Convert from little endian to CPU byte order. 60701e04c3fSmrg */ 60801e04c3fSmrg 60901e04c3fSmrg#ifdef PIPE_ARCH_BIG_ENDIAN 61001e04c3fSmrg#define util_le64_to_cpu(x) util_bswap64(x) 61101e04c3fSmrg#define util_le32_to_cpu(x) util_bswap32(x) 61201e04c3fSmrg#define util_le16_to_cpu(x) util_bswap16(x) 61301e04c3fSmrg#else 61401e04c3fSmrg#define util_le64_to_cpu(x) (x) 61501e04c3fSmrg#define util_le32_to_cpu(x) (x) 61601e04c3fSmrg#define util_le16_to_cpu(x) (x) 61701e04c3fSmrg#endif 61801e04c3fSmrg 61901e04c3fSmrg#define util_cpu_to_le64(x) util_le64_to_cpu(x) 62001e04c3fSmrg#define util_cpu_to_le32(x) util_le32_to_cpu(x) 62101e04c3fSmrg#define util_cpu_to_le16(x) util_le16_to_cpu(x) 62201e04c3fSmrg 62301e04c3fSmrg/** 62401e04c3fSmrg * Reverse byte order of a 32 bit word. 62501e04c3fSmrg */ 62601e04c3fSmrgstatic inline uint32_t 62701e04c3fSmrgutil_bswap32(uint32_t n) 62801e04c3fSmrg{ 62901e04c3fSmrg#if defined(HAVE___BUILTIN_BSWAP32) 63001e04c3fSmrg return __builtin_bswap32(n); 63101e04c3fSmrg#else 63201e04c3fSmrg return (n >> 24) | 63301e04c3fSmrg ((n >> 8) & 0x0000ff00) | 63401e04c3fSmrg ((n << 8) & 0x00ff0000) | 63501e04c3fSmrg (n << 24); 63601e04c3fSmrg#endif 63701e04c3fSmrg} 63801e04c3fSmrg 63901e04c3fSmrg/** 64001e04c3fSmrg * Reverse byte order of a 64bit word. 64101e04c3fSmrg */ 64201e04c3fSmrgstatic inline uint64_t 64301e04c3fSmrgutil_bswap64(uint64_t n) 64401e04c3fSmrg{ 64501e04c3fSmrg#if defined(HAVE___BUILTIN_BSWAP64) 64601e04c3fSmrg return __builtin_bswap64(n); 64701e04c3fSmrg#else 64801e04c3fSmrg return ((uint64_t)util_bswap32((uint32_t)n) << 32) | 64901e04c3fSmrg util_bswap32((n >> 32)); 65001e04c3fSmrg#endif 65101e04c3fSmrg} 65201e04c3fSmrg 65301e04c3fSmrg 65401e04c3fSmrg/** 65501e04c3fSmrg * Reverse byte order of a 16 bit word. 65601e04c3fSmrg */ 65701e04c3fSmrgstatic inline uint16_t 65801e04c3fSmrgutil_bswap16(uint16_t n) 65901e04c3fSmrg{ 66001e04c3fSmrg return (n >> 8) | 66101e04c3fSmrg (n << 8); 66201e04c3fSmrg} 66301e04c3fSmrg 66401e04c3fSmrgstatic inline void* 66501e04c3fSmrgutil_memcpy_cpu_to_le32(void * restrict dest, const void * restrict src, size_t n) 66601e04c3fSmrg{ 66701e04c3fSmrg#ifdef PIPE_ARCH_BIG_ENDIAN 66801e04c3fSmrg size_t i, e; 66901e04c3fSmrg assert(n % 4 == 0); 67001e04c3fSmrg 67101e04c3fSmrg for (i = 0, e = n / 4; i < e; i++) { 67201e04c3fSmrg uint32_t * restrict d = (uint32_t* restrict)dest; 67301e04c3fSmrg const uint32_t * restrict s = (const uint32_t* restrict)src; 67401e04c3fSmrg d[i] = util_bswap32(s[i]); 67501e04c3fSmrg } 67601e04c3fSmrg return dest; 67701e04c3fSmrg#else 67801e04c3fSmrg return memcpy(dest, src, n); 67901e04c3fSmrg#endif 68001e04c3fSmrg} 68101e04c3fSmrg 68201e04c3fSmrg/** 68301e04c3fSmrg * Clamp X to [MIN, MAX]. 68401e04c3fSmrg * This is a macro to allow float, int, uint, etc. types. 68501e04c3fSmrg * We arbitrarily turn NaN into MIN. 68601e04c3fSmrg */ 68701e04c3fSmrg#define CLAMP( X, MIN, MAX ) ( (X)>(MIN) ? ((X)>(MAX) ? (MAX) : (X)) : (MIN) ) 68801e04c3fSmrg 68901e04c3fSmrg#define MIN2( A, B ) ( (A)<(B) ? (A) : (B) ) 69001e04c3fSmrg#define MAX2( A, B ) ( (A)>(B) ? (A) : (B) ) 69101e04c3fSmrg 69201e04c3fSmrg#define MIN3( A, B, C ) ((A) < (B) ? MIN2(A, C) : MIN2(B, C)) 69301e04c3fSmrg#define MAX3( A, B, C ) ((A) > (B) ? MAX2(A, C) : MAX2(B, C)) 69401e04c3fSmrg 69501e04c3fSmrg#define MIN4( A, B, C, D ) ((A) < (B) ? MIN3(A, C, D) : MIN3(B, C, D)) 69601e04c3fSmrg#define MAX4( A, B, C, D ) ((A) > (B) ? MAX3(A, C, D) : MAX3(B, C, D)) 69701e04c3fSmrg 69801e04c3fSmrg 69901e04c3fSmrg/** 70001e04c3fSmrg * Align a value, only works pot alignemnts. 70101e04c3fSmrg */ 70201e04c3fSmrgstatic inline int 70301e04c3fSmrgalign(int value, int alignment) 70401e04c3fSmrg{ 70501e04c3fSmrg return (value + alignment - 1) & ~(alignment - 1); 70601e04c3fSmrg} 70701e04c3fSmrg 70801e04c3fSmrgstatic inline uint64_t 70901e04c3fSmrgalign64(uint64_t value, unsigned alignment) 71001e04c3fSmrg{ 71101e04c3fSmrg return (value + alignment - 1) & ~((uint64_t)alignment - 1); 71201e04c3fSmrg} 71301e04c3fSmrg 71401e04c3fSmrg/** 71501e04c3fSmrg * Works like align but on npot alignments. 71601e04c3fSmrg */ 71701e04c3fSmrgstatic inline size_t 71801e04c3fSmrgutil_align_npot(size_t value, size_t alignment) 71901e04c3fSmrg{ 72001e04c3fSmrg if (value % alignment) 72101e04c3fSmrg return value + (alignment - (value % alignment)); 72201e04c3fSmrg return value; 72301e04c3fSmrg} 72401e04c3fSmrg 72501e04c3fSmrgstatic inline unsigned 72601e04c3fSmrgu_minify(unsigned value, unsigned levels) 72701e04c3fSmrg{ 72801e04c3fSmrg return MAX2(1, value >> levels); 72901e04c3fSmrg} 73001e04c3fSmrg 73101e04c3fSmrg#ifndef COPY_4V 73201e04c3fSmrg#define COPY_4V( DST, SRC ) \ 73301e04c3fSmrgdo { \ 73401e04c3fSmrg (DST)[0] = (SRC)[0]; \ 73501e04c3fSmrg (DST)[1] = (SRC)[1]; \ 73601e04c3fSmrg (DST)[2] = (SRC)[2]; \ 73701e04c3fSmrg (DST)[3] = (SRC)[3]; \ 73801e04c3fSmrg} while (0) 73901e04c3fSmrg#endif 74001e04c3fSmrg 74101e04c3fSmrg 74201e04c3fSmrg#ifndef COPY_4FV 74301e04c3fSmrg#define COPY_4FV( DST, SRC ) COPY_4V(DST, SRC) 74401e04c3fSmrg#endif 74501e04c3fSmrg 74601e04c3fSmrg 74701e04c3fSmrg#ifndef ASSIGN_4V 74801e04c3fSmrg#define ASSIGN_4V( DST, V0, V1, V2, V3 ) \ 74901e04c3fSmrgdo { \ 75001e04c3fSmrg (DST)[0] = (V0); \ 75101e04c3fSmrg (DST)[1] = (V1); \ 75201e04c3fSmrg (DST)[2] = (V2); \ 75301e04c3fSmrg (DST)[3] = (V3); \ 75401e04c3fSmrg} while (0) 75501e04c3fSmrg#endif 75601e04c3fSmrg 75701e04c3fSmrg 75801e04c3fSmrgstatic inline uint32_t 75901e04c3fSmrgutil_unsigned_fixed(float value, unsigned frac_bits) 76001e04c3fSmrg{ 76101e04c3fSmrg return value < 0 ? 0 : (uint32_t)(value * (1<<frac_bits)); 76201e04c3fSmrg} 76301e04c3fSmrg 76401e04c3fSmrgstatic inline int32_t 76501e04c3fSmrgutil_signed_fixed(float value, unsigned frac_bits) 76601e04c3fSmrg{ 76701e04c3fSmrg return (int32_t)(value * (1<<frac_bits)); 76801e04c3fSmrg} 76901e04c3fSmrg 77001e04c3fSmrgunsigned 77101e04c3fSmrgutil_fpstate_get(void); 77201e04c3fSmrgunsigned 77301e04c3fSmrgutil_fpstate_set_denorms_to_zero(unsigned current_fpstate); 77401e04c3fSmrgvoid 77501e04c3fSmrgutil_fpstate_set(unsigned fpstate); 77601e04c3fSmrg 77701e04c3fSmrg 77801e04c3fSmrg 77901e04c3fSmrg#ifdef __cplusplus 78001e04c3fSmrg} 78101e04c3fSmrg#endif 78201e04c3fSmrg 78301e04c3fSmrg#endif /* U_MATH_H */ 784