u_math.h revision 01e04c3f
1/************************************************************************** 2 * 3 * Copyright 2008 VMware, Inc. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29/** 30 * Math utilities and approximations for common math functions. 31 * Reduced precision is usually acceptable in shaders... 32 * 33 * "fast" is used in the names of functions which are low-precision, 34 * or at least lower-precision than the normal C lib functions. 35 */ 36 37 38#ifndef U_MATH_H 39#define U_MATH_H 40 41 42#include "pipe/p_compiler.h" 43 44#include "c99_math.h" 45#include <assert.h> 46#include <float.h> 47#include <stdarg.h> 48 49#include "bitscan.h" 50 51#ifdef __cplusplus 52extern "C" { 53#endif 54 55 56#ifndef M_SQRT2 57#define M_SQRT2 1.41421356237309504880 58#endif 59 60#define POW2_TABLE_SIZE_LOG2 9 61#define POW2_TABLE_SIZE (1 << POW2_TABLE_SIZE_LOG2) 62#define POW2_TABLE_OFFSET (POW2_TABLE_SIZE/2) 63#define POW2_TABLE_SCALE ((float)(POW2_TABLE_SIZE/2)) 64extern float pow2_table[POW2_TABLE_SIZE]; 65 66 67/** 68 * Initialize math module. This should be called before using any 69 * other functions in this module. 70 */ 71extern void 72util_init_math(void); 73 74 75union fi { 76 float f; 77 int32_t i; 78 uint32_t ui; 79}; 80 81 82union di { 83 double d; 84 int64_t i; 85 uint64_t ui; 86}; 87 88 89/** 90 * Extract the IEEE float32 exponent. 91 */ 92static inline signed 93util_get_float32_exponent(float x) 94{ 95 union fi f; 96 97 f.f = x; 98 99 return ((f.ui >> 23) & 0xff) - 127; 100} 101 102 103/** 104 * Fast version of 2^x 105 * Identity: exp2(a + b) = exp2(a) * exp2(b) 106 * Let ipart = int(x) 107 * Let fpart = x - ipart; 108 * So, exp2(x) = exp2(ipart) * exp2(fpart) 109 * Compute exp2(ipart) with i << ipart 110 * Compute exp2(fpart) with lookup table. 111 */ 112static inline float 113util_fast_exp2(float x) 114{ 115 int32_t ipart; 116 float fpart, mpart; 117 union fi epart; 118 119 if(x > 129.00000f) 120 return 3.402823466e+38f; 121 122 if (x < -126.99999f) 123 return 0.0f; 124 125 ipart = (int32_t) x; 126 fpart = x - (float) ipart; 127 128 /* same as 129 * epart.f = (float) (1 << ipart) 130 * but faster and without integer overflow for ipart > 31 131 */ 132 epart.i = (ipart + 127 ) << 23; 133 134 mpart = pow2_table[POW2_TABLE_OFFSET + (int)(fpart * POW2_TABLE_SCALE)]; 135 136 return epart.f * mpart; 137} 138 139 140/** 141 * Fast approximation to exp(x). 142 */ 143static inline float 144util_fast_exp(float x) 145{ 146 const float k = 1.44269f; /* = log2(e) */ 147 return util_fast_exp2(k * x); 148} 149 150 151#define LOG2_TABLE_SIZE_LOG2 16 152#define LOG2_TABLE_SCALE (1 << LOG2_TABLE_SIZE_LOG2) 153#define LOG2_TABLE_SIZE (LOG2_TABLE_SCALE + 1) 154extern float log2_table[LOG2_TABLE_SIZE]; 155 156 157/** 158 * Fast approximation to log2(x). 159 */ 160static inline float 161util_fast_log2(float x) 162{ 163 union fi num; 164 float epart, mpart; 165 num.f = x; 166 epart = (float)(((num.i & 0x7f800000) >> 23) - 127); 167 /* mpart = log2_table[mantissa*LOG2_TABLE_SCALE + 0.5] */ 168 mpart = log2_table[((num.i & 0x007fffff) + (1 << (22 - LOG2_TABLE_SIZE_LOG2))) >> (23 - LOG2_TABLE_SIZE_LOG2)]; 169 return epart + mpart; 170} 171 172 173/** 174 * Fast approximation to x^y. 175 */ 176static inline float 177util_fast_pow(float x, float y) 178{ 179 return util_fast_exp2(util_fast_log2(x) * y); 180} 181 182 183/** 184 * Floor(x), returned as int. 185 */ 186static inline int 187util_ifloor(float f) 188{ 189 int ai, bi; 190 double af, bf; 191 union fi u; 192 af = (3 << 22) + 0.5 + (double) f; 193 bf = (3 << 22) + 0.5 - (double) f; 194 u.f = (float) af; ai = u.i; 195 u.f = (float) bf; bi = u.i; 196 return (ai - bi) >> 1; 197} 198 199 200/** 201 * Round float to nearest int. 202 */ 203static inline int 204util_iround(float f) 205{ 206#if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86) 207 int r; 208 __asm__ ("fistpl %0" : "=m" (r) : "t" (f) : "st"); 209 return r; 210#elif defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86) 211 int r; 212 _asm { 213 fld f 214 fistp r 215 } 216 return r; 217#else 218 if (f >= 0.0f) 219 return (int) (f + 0.5f); 220 else 221 return (int) (f - 0.5f); 222#endif 223} 224 225 226/** 227 * Approximate floating point comparison 228 */ 229static inline boolean 230util_is_approx(float a, float b, float tol) 231{ 232 return fabsf(b - a) <= tol; 233} 234 235 236/** 237 * util_is_X_inf_or_nan = test if x is NaN or +/- Inf 238 * util_is_X_nan = test if x is NaN 239 * util_X_inf_sign = return +1 for +Inf, -1 for -Inf, or 0 for not Inf 240 * 241 * NaN can be checked with x != x, however this fails with the fast math flag 242 **/ 243 244 245/** 246 * Single-float 247 */ 248static inline boolean 249util_is_inf_or_nan(float x) 250{ 251 union fi tmp; 252 tmp.f = x; 253 return (tmp.ui & 0x7f800000) == 0x7f800000; 254} 255 256 257static inline boolean 258util_is_nan(float x) 259{ 260 union fi tmp; 261 tmp.f = x; 262 return (tmp.ui & 0x7fffffff) > 0x7f800000; 263} 264 265 266static inline int 267util_inf_sign(float x) 268{ 269 union fi tmp; 270 tmp.f = x; 271 if ((tmp.ui & 0x7fffffff) != 0x7f800000) { 272 return 0; 273 } 274 275 return (x < 0) ? -1 : 1; 276} 277 278 279/** 280 * Double-float 281 */ 282static inline boolean 283util_is_double_inf_or_nan(double x) 284{ 285 union di tmp; 286 tmp.d = x; 287 return (tmp.ui & 0x7ff0000000000000ULL) == 0x7ff0000000000000ULL; 288} 289 290 291static inline boolean 292util_is_double_nan(double x) 293{ 294 union di tmp; 295 tmp.d = x; 296 return (tmp.ui & 0x7fffffffffffffffULL) > 0x7ff0000000000000ULL; 297} 298 299 300static inline int 301util_double_inf_sign(double x) 302{ 303 union di tmp; 304 tmp.d = x; 305 if ((tmp.ui & 0x7fffffffffffffffULL) != 0x7ff0000000000000ULL) { 306 return 0; 307 } 308 309 return (x < 0) ? -1 : 1; 310} 311 312 313/** 314 * Half-float 315 */ 316static inline boolean 317util_is_half_inf_or_nan(int16_t x) 318{ 319 return (x & 0x7c00) == 0x7c00; 320} 321 322 323static inline boolean 324util_is_half_nan(int16_t x) 325{ 326 return (x & 0x7fff) > 0x7c00; 327} 328 329 330static inline int 331util_half_inf_sign(int16_t x) 332{ 333 if ((x & 0x7fff) != 0x7c00) { 334 return 0; 335 } 336 337 return (x < 0) ? -1 : 1; 338} 339 340 341/** 342 * Return float bits. 343 */ 344static inline unsigned 345fui( float f ) 346{ 347 union fi fi; 348 fi.f = f; 349 return fi.ui; 350} 351 352static inline float 353uif(uint32_t ui) 354{ 355 union fi fi; 356 fi.ui = ui; 357 return fi.f; 358} 359 360 361/** 362 * Convert ubyte to float in [0, 1]. 363 */ 364static inline float 365ubyte_to_float(ubyte ub) 366{ 367 return (float) ub * (1.0f / 255.0f); 368} 369 370 371/** 372 * Convert float in [0,1] to ubyte in [0,255] with clamping. 373 */ 374static inline ubyte 375float_to_ubyte(float f) 376{ 377 /* return 0 for NaN too */ 378 if (!(f > 0.0f)) { 379 return (ubyte) 0; 380 } 381 else if (f >= 1.0f) { 382 return (ubyte) 255; 383 } 384 else { 385 union fi tmp; 386 tmp.f = f; 387 tmp.f = tmp.f * (255.0f/256.0f) + 32768.0f; 388 return (ubyte) tmp.i; 389 } 390} 391 392static inline float 393byte_to_float_tex(int8_t b) 394{ 395 return (b == -128) ? -1.0F : b * 1.0F / 127.0F; 396} 397 398static inline int8_t 399float_to_byte_tex(float f) 400{ 401 return (int8_t) (127.0F * f); 402} 403 404/** 405 * Calc log base 2 406 */ 407static inline unsigned 408util_logbase2(unsigned n) 409{ 410#if defined(HAVE___BUILTIN_CLZ) 411 return ((sizeof(unsigned) * 8 - 1) - __builtin_clz(n | 1)); 412#else 413 unsigned pos = 0; 414 if (n >= 1<<16) { n >>= 16; pos += 16; } 415 if (n >= 1<< 8) { n >>= 8; pos += 8; } 416 if (n >= 1<< 4) { n >>= 4; pos += 4; } 417 if (n >= 1<< 2) { n >>= 2; pos += 2; } 418 if (n >= 1<< 1) { pos += 1; } 419 return pos; 420#endif 421} 422 423static inline uint64_t 424util_logbase2_64(uint64_t n) 425{ 426#if defined(HAVE___BUILTIN_CLZLL) 427 return ((sizeof(uint64_t) * 8 - 1) - __builtin_clzll(n | 1)); 428#else 429 uint64_t pos = 0ull; 430 if (n >= 1ull<<32) { n >>= 32; pos += 32; } 431 if (n >= 1ull<<16) { n >>= 16; pos += 16; } 432 if (n >= 1ull<< 8) { n >>= 8; pos += 8; } 433 if (n >= 1ull<< 4) { n >>= 4; pos += 4; } 434 if (n >= 1ull<< 2) { n >>= 2; pos += 2; } 435 if (n >= 1ull<< 1) { pos += 1; } 436 return pos; 437#endif 438} 439 440/** 441 * Returns the ceiling of log n base 2, and 0 when n == 0. Equivalently, 442 * returns the smallest x such that n <= 2**x. 443 */ 444static inline unsigned 445util_logbase2_ceil(unsigned n) 446{ 447 if (n <= 1) 448 return 0; 449 450 return 1 + util_logbase2(n - 1); 451} 452 453static inline uint64_t 454util_logbase2_ceil64(uint64_t n) 455{ 456 if (n <= 1) 457 return 0; 458 459 return 1ull + util_logbase2_64(n - 1); 460} 461 462/** 463 * Returns the smallest power of two >= x 464 */ 465static inline unsigned 466util_next_power_of_two(unsigned x) 467{ 468#if defined(HAVE___BUILTIN_CLZ) 469 if (x <= 1) 470 return 1; 471 472 return (1 << ((sizeof(unsigned) * 8) - __builtin_clz(x - 1))); 473#else 474 unsigned val = x; 475 476 if (x <= 1) 477 return 1; 478 479 if (util_is_power_of_two_or_zero(x)) 480 return x; 481 482 val--; 483 val = (val >> 1) | val; 484 val = (val >> 2) | val; 485 val = (val >> 4) | val; 486 val = (val >> 8) | val; 487 val = (val >> 16) | val; 488 val++; 489 return val; 490#endif 491} 492 493static inline uint64_t 494util_next_power_of_two64(uint64_t x) 495{ 496#if defined(HAVE___BUILTIN_CLZLL) 497 if (x <= 1) 498 return 1; 499 500 return (1ull << ((sizeof(uint64_t) * 8) - __builtin_clzll(x - 1))); 501#else 502 uint64_t val = x; 503 504 if (x <= 1) 505 return 1; 506 507 if (util_is_power_of_two_or_zero64(x)) 508 return x; 509 510 val--; 511 val = (val >> 1) | val; 512 val = (val >> 2) | val; 513 val = (val >> 4) | val; 514 val = (val >> 8) | val; 515 val = (val >> 16) | val; 516 val = (val >> 32) | val; 517 val++; 518 return val; 519#endif 520} 521 522 523/** 524 * Return number of bits set in n. 525 */ 526static inline unsigned 527util_bitcount(unsigned n) 528{ 529#if defined(HAVE___BUILTIN_POPCOUNT) 530 return __builtin_popcount(n); 531#else 532 /* K&R classic bitcount. 533 * 534 * For each iteration, clear the LSB from the bitfield. 535 * Requires only one iteration per set bit, instead of 536 * one iteration per bit less than highest set bit. 537 */ 538 unsigned bits; 539 for (bits = 0; n; bits++) { 540 n &= n - 1; 541 } 542 return bits; 543#endif 544} 545 546 547static inline unsigned 548util_bitcount64(uint64_t n) 549{ 550#ifdef HAVE___BUILTIN_POPCOUNTLL 551 return __builtin_popcountll(n); 552#else 553 return util_bitcount(n) + util_bitcount(n >> 32); 554#endif 555} 556 557 558/** 559 * Reverse bits in n 560 * Algorithm taken from: 561 * http://stackoverflow.com/questions/9144800/c-reverse-bits-in-unsigned-integer 562 */ 563static inline unsigned 564util_bitreverse(unsigned n) 565{ 566 n = ((n >> 1) & 0x55555555u) | ((n & 0x55555555u) << 1); 567 n = ((n >> 2) & 0x33333333u) | ((n & 0x33333333u) << 2); 568 n = ((n >> 4) & 0x0f0f0f0fu) | ((n & 0x0f0f0f0fu) << 4); 569 n = ((n >> 8) & 0x00ff00ffu) | ((n & 0x00ff00ffu) << 8); 570 n = ((n >> 16) & 0xffffu) | ((n & 0xffffu) << 16); 571 return n; 572} 573 574/** 575 * Convert from little endian to CPU byte order. 576 */ 577 578#ifdef PIPE_ARCH_BIG_ENDIAN 579#define util_le64_to_cpu(x) util_bswap64(x) 580#define util_le32_to_cpu(x) util_bswap32(x) 581#define util_le16_to_cpu(x) util_bswap16(x) 582#else 583#define util_le64_to_cpu(x) (x) 584#define util_le32_to_cpu(x) (x) 585#define util_le16_to_cpu(x) (x) 586#endif 587 588#define util_cpu_to_le64(x) util_le64_to_cpu(x) 589#define util_cpu_to_le32(x) util_le32_to_cpu(x) 590#define util_cpu_to_le16(x) util_le16_to_cpu(x) 591 592/** 593 * Reverse byte order of a 32 bit word. 594 */ 595static inline uint32_t 596util_bswap32(uint32_t n) 597{ 598#if defined(HAVE___BUILTIN_BSWAP32) 599 return __builtin_bswap32(n); 600#else 601 return (n >> 24) | 602 ((n >> 8) & 0x0000ff00) | 603 ((n << 8) & 0x00ff0000) | 604 (n << 24); 605#endif 606} 607 608/** 609 * Reverse byte order of a 64bit word. 610 */ 611static inline uint64_t 612util_bswap64(uint64_t n) 613{ 614#if defined(HAVE___BUILTIN_BSWAP64) 615 return __builtin_bswap64(n); 616#else 617 return ((uint64_t)util_bswap32((uint32_t)n) << 32) | 618 util_bswap32((n >> 32)); 619#endif 620} 621 622 623/** 624 * Reverse byte order of a 16 bit word. 625 */ 626static inline uint16_t 627util_bswap16(uint16_t n) 628{ 629 return (n >> 8) | 630 (n << 8); 631} 632 633static inline void* 634util_memcpy_cpu_to_le32(void * restrict dest, const void * restrict src, size_t n) 635{ 636#ifdef PIPE_ARCH_BIG_ENDIAN 637 size_t i, e; 638 assert(n % 4 == 0); 639 640 for (i = 0, e = n / 4; i < e; i++) { 641 uint32_t * restrict d = (uint32_t* restrict)dest; 642 const uint32_t * restrict s = (const uint32_t* restrict)src; 643 d[i] = util_bswap32(s[i]); 644 } 645 return dest; 646#else 647 return memcpy(dest, src, n); 648#endif 649} 650 651/** 652 * Clamp X to [MIN, MAX]. 653 * This is a macro to allow float, int, uint, etc. types. 654 * We arbitrarily turn NaN into MIN. 655 */ 656#define CLAMP( X, MIN, MAX ) ( (X)>(MIN) ? ((X)>(MAX) ? (MAX) : (X)) : (MIN) ) 657 658#define MIN2( A, B ) ( (A)<(B) ? (A) : (B) ) 659#define MAX2( A, B ) ( (A)>(B) ? (A) : (B) ) 660 661#define MIN3( A, B, C ) ((A) < (B) ? MIN2(A, C) : MIN2(B, C)) 662#define MAX3( A, B, C ) ((A) > (B) ? MAX2(A, C) : MAX2(B, C)) 663 664#define MIN4( A, B, C, D ) ((A) < (B) ? MIN3(A, C, D) : MIN3(B, C, D)) 665#define MAX4( A, B, C, D ) ((A) > (B) ? MAX3(A, C, D) : MAX3(B, C, D)) 666 667 668/** 669 * Align a value, only works pot alignemnts. 670 */ 671static inline int 672align(int value, int alignment) 673{ 674 return (value + alignment - 1) & ~(alignment - 1); 675} 676 677static inline uint64_t 678align64(uint64_t value, unsigned alignment) 679{ 680 return (value + alignment - 1) & ~((uint64_t)alignment - 1); 681} 682 683/** 684 * Works like align but on npot alignments. 685 */ 686static inline size_t 687util_align_npot(size_t value, size_t alignment) 688{ 689 if (value % alignment) 690 return value + (alignment - (value % alignment)); 691 return value; 692} 693 694static inline unsigned 695u_minify(unsigned value, unsigned levels) 696{ 697 return MAX2(1, value >> levels); 698} 699 700#ifndef COPY_4V 701#define COPY_4V( DST, SRC ) \ 702do { \ 703 (DST)[0] = (SRC)[0]; \ 704 (DST)[1] = (SRC)[1]; \ 705 (DST)[2] = (SRC)[2]; \ 706 (DST)[3] = (SRC)[3]; \ 707} while (0) 708#endif 709 710 711#ifndef COPY_4FV 712#define COPY_4FV( DST, SRC ) COPY_4V(DST, SRC) 713#endif 714 715 716#ifndef ASSIGN_4V 717#define ASSIGN_4V( DST, V0, V1, V2, V3 ) \ 718do { \ 719 (DST)[0] = (V0); \ 720 (DST)[1] = (V1); \ 721 (DST)[2] = (V2); \ 722 (DST)[3] = (V3); \ 723} while (0) 724#endif 725 726 727static inline uint32_t 728util_unsigned_fixed(float value, unsigned frac_bits) 729{ 730 return value < 0 ? 0 : (uint32_t)(value * (1<<frac_bits)); 731} 732 733static inline int32_t 734util_signed_fixed(float value, unsigned frac_bits) 735{ 736 return (int32_t)(value * (1<<frac_bits)); 737} 738 739unsigned 740util_fpstate_get(void); 741unsigned 742util_fpstate_set_denorms_to_zero(unsigned current_fpstate); 743void 744util_fpstate_set(unsigned fpstate); 745 746 747 748#ifdef __cplusplus 749} 750#endif 751 752#endif /* U_MATH_H */ 753