17ec681f3Smrg/* 27ec681f3Smrg * Copyright © 2018 Intel Corporation 37ec681f3Smrg * 47ec681f3Smrg * Permission is hereby granted, free of charge, to any person obtaining a 57ec681f3Smrg * copy of this software and associated documentation files (the "Software"), 67ec681f3Smrg * to deal in the Software without restriction, including without limitation 77ec681f3Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 87ec681f3Smrg * and/or sell copies of the Software, and to permit persons to whom the 97ec681f3Smrg * Software is furnished to do so, subject to the following conditions: 107ec681f3Smrg * 117ec681f3Smrg * The above copyright notice and this permission notice (including the next 127ec681f3Smrg * paragraph) shall be included in all copies or substantial portions of the 137ec681f3Smrg * Software. 147ec681f3Smrg * 157ec681f3Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 167ec681f3Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 177ec681f3Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 187ec681f3Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 197ec681f3Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 207ec681f3Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 217ec681f3Smrg * IN THE SOFTWARE. 227ec681f3Smrg */ 237ec681f3Smrg#include <math.h> 247ec681f3Smrg#include <float.h> 257ec681f3Smrg#include "nir.h" 267ec681f3Smrg#include "nir_range_analysis.h" 277ec681f3Smrg#include "util/hash_table.h" 287ec681f3Smrg 297ec681f3Smrg/** 307ec681f3Smrg * Analyzes a sequence of operations to determine some aspects of the range of 317ec681f3Smrg * the result. 327ec681f3Smrg */ 337ec681f3Smrg 347ec681f3Smrgstatic bool 357ec681f3Smrgis_not_negative(enum ssa_ranges r) 367ec681f3Smrg{ 377ec681f3Smrg return r == gt_zero || r == ge_zero || r == eq_zero; 387ec681f3Smrg} 397ec681f3Smrg 407ec681f3Smrgstatic bool 417ec681f3Smrgis_not_zero(enum ssa_ranges r) 427ec681f3Smrg{ 437ec681f3Smrg return r == gt_zero || r == lt_zero || r == ne_zero; 447ec681f3Smrg} 457ec681f3Smrg 467ec681f3Smrgstatic void * 477ec681f3Smrgpack_data(const struct ssa_result_range r) 487ec681f3Smrg{ 497ec681f3Smrg return (void *)(uintptr_t)(r.range | r.is_integral << 8 | r.is_finite << 9 | 507ec681f3Smrg r.is_a_number << 10); 517ec681f3Smrg} 527ec681f3Smrg 537ec681f3Smrgstatic struct ssa_result_range 547ec681f3Smrgunpack_data(const void *p) 557ec681f3Smrg{ 567ec681f3Smrg const uintptr_t v = (uintptr_t) p; 577ec681f3Smrg 587ec681f3Smrg return (struct ssa_result_range){ 597ec681f3Smrg .range = v & 0xff, 607ec681f3Smrg .is_integral = (v & 0x00100) != 0, 617ec681f3Smrg .is_finite = (v & 0x00200) != 0, 627ec681f3Smrg .is_a_number = (v & 0x00400) != 0 637ec681f3Smrg }; 647ec681f3Smrg} 657ec681f3Smrg 667ec681f3Smrgstatic void * 677ec681f3Smrgpack_key(const struct nir_alu_instr *instr, nir_alu_type type) 687ec681f3Smrg{ 697ec681f3Smrg uintptr_t type_encoding; 707ec681f3Smrg uintptr_t ptr = (uintptr_t) instr; 717ec681f3Smrg 727ec681f3Smrg /* The low 2 bits have to be zero or this whole scheme falls apart. */ 737ec681f3Smrg assert((ptr & 0x3) == 0); 747ec681f3Smrg 757ec681f3Smrg /* NIR is typeless in the sense that sequences of bits have whatever 767ec681f3Smrg * meaning is attached to them by the instruction that consumes them. 777ec681f3Smrg * However, the number of bits must match between producer and consumer. 787ec681f3Smrg * As a result, the number of bits does not need to be encoded here. 797ec681f3Smrg */ 807ec681f3Smrg switch (nir_alu_type_get_base_type(type)) { 817ec681f3Smrg case nir_type_int: type_encoding = 0; break; 827ec681f3Smrg case nir_type_uint: type_encoding = 1; break; 837ec681f3Smrg case nir_type_bool: type_encoding = 2; break; 847ec681f3Smrg case nir_type_float: type_encoding = 3; break; 857ec681f3Smrg default: unreachable("Invalid base type."); 867ec681f3Smrg } 877ec681f3Smrg 887ec681f3Smrg return (void *)(ptr | type_encoding); 897ec681f3Smrg} 907ec681f3Smrg 917ec681f3Smrgstatic nir_alu_type 927ec681f3Smrgnir_alu_src_type(const nir_alu_instr *instr, unsigned src) 937ec681f3Smrg{ 947ec681f3Smrg return nir_alu_type_get_base_type(nir_op_infos[instr->op].input_types[src]) | 957ec681f3Smrg nir_src_bit_size(instr->src[src].src); 967ec681f3Smrg} 977ec681f3Smrg 987ec681f3Smrgstatic struct ssa_result_range 997ec681f3Smrganalyze_constant(const struct nir_alu_instr *instr, unsigned src, 1007ec681f3Smrg nir_alu_type use_type) 1017ec681f3Smrg{ 1027ec681f3Smrg uint8_t swizzle[NIR_MAX_VEC_COMPONENTS] = { 0, 1, 2, 3, 1037ec681f3Smrg 4, 5, 6, 7, 1047ec681f3Smrg 8, 9, 10, 11, 1057ec681f3Smrg 12, 13, 14, 15 }; 1067ec681f3Smrg 1077ec681f3Smrg /* If the source is an explicitly sized source, then we need to reset 1087ec681f3Smrg * both the number of components and the swizzle. 1097ec681f3Smrg */ 1107ec681f3Smrg const unsigned num_components = nir_ssa_alu_instr_src_components(instr, src); 1117ec681f3Smrg 1127ec681f3Smrg for (unsigned i = 0; i < num_components; ++i) 1137ec681f3Smrg swizzle[i] = instr->src[src].swizzle[i]; 1147ec681f3Smrg 1157ec681f3Smrg const nir_load_const_instr *const load = 1167ec681f3Smrg nir_instr_as_load_const(instr->src[src].src.ssa->parent_instr); 1177ec681f3Smrg 1187ec681f3Smrg struct ssa_result_range r = { unknown, false, false, false }; 1197ec681f3Smrg 1207ec681f3Smrg switch (nir_alu_type_get_base_type(use_type)) { 1217ec681f3Smrg case nir_type_float: { 1227ec681f3Smrg double min_value = DBL_MAX; 1237ec681f3Smrg double max_value = -DBL_MAX; 1247ec681f3Smrg bool any_zero = false; 1257ec681f3Smrg bool all_zero = true; 1267ec681f3Smrg 1277ec681f3Smrg r.is_integral = true; 1287ec681f3Smrg r.is_a_number = true; 1297ec681f3Smrg r.is_finite = true; 1307ec681f3Smrg 1317ec681f3Smrg for (unsigned i = 0; i < num_components; ++i) { 1327ec681f3Smrg const double v = nir_const_value_as_float(load->value[swizzle[i]], 1337ec681f3Smrg load->def.bit_size); 1347ec681f3Smrg 1357ec681f3Smrg if (floor(v) != v) 1367ec681f3Smrg r.is_integral = false; 1377ec681f3Smrg 1387ec681f3Smrg if (isnan(v)) 1397ec681f3Smrg r.is_a_number = false; 1407ec681f3Smrg 1417ec681f3Smrg if (!isfinite(v)) 1427ec681f3Smrg r.is_finite = false; 1437ec681f3Smrg 1447ec681f3Smrg any_zero = any_zero || (v == 0.0); 1457ec681f3Smrg all_zero = all_zero && (v == 0.0); 1467ec681f3Smrg min_value = MIN2(min_value, v); 1477ec681f3Smrg max_value = MAX2(max_value, v); 1487ec681f3Smrg } 1497ec681f3Smrg 1507ec681f3Smrg assert(any_zero >= all_zero); 1517ec681f3Smrg assert(isnan(max_value) || max_value >= min_value); 1527ec681f3Smrg 1537ec681f3Smrg if (all_zero) 1547ec681f3Smrg r.range = eq_zero; 1557ec681f3Smrg else if (min_value > 0.0) 1567ec681f3Smrg r.range = gt_zero; 1577ec681f3Smrg else if (min_value == 0.0) 1587ec681f3Smrg r.range = ge_zero; 1597ec681f3Smrg else if (max_value < 0.0) 1607ec681f3Smrg r.range = lt_zero; 1617ec681f3Smrg else if (max_value == 0.0) 1627ec681f3Smrg r.range = le_zero; 1637ec681f3Smrg else if (!any_zero) 1647ec681f3Smrg r.range = ne_zero; 1657ec681f3Smrg else 1667ec681f3Smrg r.range = unknown; 1677ec681f3Smrg 1687ec681f3Smrg return r; 1697ec681f3Smrg } 1707ec681f3Smrg 1717ec681f3Smrg case nir_type_int: 1727ec681f3Smrg case nir_type_bool: { 1737ec681f3Smrg int64_t min_value = INT_MAX; 1747ec681f3Smrg int64_t max_value = INT_MIN; 1757ec681f3Smrg bool any_zero = false; 1767ec681f3Smrg bool all_zero = true; 1777ec681f3Smrg 1787ec681f3Smrg for (unsigned i = 0; i < num_components; ++i) { 1797ec681f3Smrg const int64_t v = nir_const_value_as_int(load->value[swizzle[i]], 1807ec681f3Smrg load->def.bit_size); 1817ec681f3Smrg 1827ec681f3Smrg any_zero = any_zero || (v == 0); 1837ec681f3Smrg all_zero = all_zero && (v == 0); 1847ec681f3Smrg min_value = MIN2(min_value, v); 1857ec681f3Smrg max_value = MAX2(max_value, v); 1867ec681f3Smrg } 1877ec681f3Smrg 1887ec681f3Smrg assert(any_zero >= all_zero); 1897ec681f3Smrg assert(max_value >= min_value); 1907ec681f3Smrg 1917ec681f3Smrg if (all_zero) 1927ec681f3Smrg r.range = eq_zero; 1937ec681f3Smrg else if (min_value > 0) 1947ec681f3Smrg r.range = gt_zero; 1957ec681f3Smrg else if (min_value == 0) 1967ec681f3Smrg r.range = ge_zero; 1977ec681f3Smrg else if (max_value < 0) 1987ec681f3Smrg r.range = lt_zero; 1997ec681f3Smrg else if (max_value == 0) 2007ec681f3Smrg r.range = le_zero; 2017ec681f3Smrg else if (!any_zero) 2027ec681f3Smrg r.range = ne_zero; 2037ec681f3Smrg else 2047ec681f3Smrg r.range = unknown; 2057ec681f3Smrg 2067ec681f3Smrg return r; 2077ec681f3Smrg } 2087ec681f3Smrg 2097ec681f3Smrg case nir_type_uint: { 2107ec681f3Smrg bool any_zero = false; 2117ec681f3Smrg bool all_zero = true; 2127ec681f3Smrg 2137ec681f3Smrg for (unsigned i = 0; i < num_components; ++i) { 2147ec681f3Smrg const uint64_t v = nir_const_value_as_uint(load->value[swizzle[i]], 2157ec681f3Smrg load->def.bit_size); 2167ec681f3Smrg 2177ec681f3Smrg any_zero = any_zero || (v == 0); 2187ec681f3Smrg all_zero = all_zero && (v == 0); 2197ec681f3Smrg } 2207ec681f3Smrg 2217ec681f3Smrg assert(any_zero >= all_zero); 2227ec681f3Smrg 2237ec681f3Smrg if (all_zero) 2247ec681f3Smrg r.range = eq_zero; 2257ec681f3Smrg else if (any_zero) 2267ec681f3Smrg r.range = ge_zero; 2277ec681f3Smrg else 2287ec681f3Smrg r.range = gt_zero; 2297ec681f3Smrg 2307ec681f3Smrg return r; 2317ec681f3Smrg } 2327ec681f3Smrg 2337ec681f3Smrg default: 2347ec681f3Smrg unreachable("Invalid alu source type"); 2357ec681f3Smrg } 2367ec681f3Smrg} 2377ec681f3Smrg 2387ec681f3Smrg/** 2397ec681f3Smrg * Short-hand name for use in the tables in analyze_expression. If this name 2407ec681f3Smrg * becomes a problem on some compiler, we can change it to _. 2417ec681f3Smrg */ 2427ec681f3Smrg#define _______ unknown 2437ec681f3Smrg 2447ec681f3Smrg 2457ec681f3Smrg#if defined(__clang__) 2467ec681f3Smrg /* clang wants _Pragma("unroll X") */ 2477ec681f3Smrg #define pragma_unroll_5 _Pragma("unroll 5") 2487ec681f3Smrg #define pragma_unroll_7 _Pragma("unroll 7") 2497ec681f3Smrg/* gcc wants _Pragma("GCC unroll X") */ 2507ec681f3Smrg#elif defined(__GNUC__) 2517ec681f3Smrg #if __GNUC__ >= 8 2527ec681f3Smrg #define pragma_unroll_5 _Pragma("GCC unroll 5") 2537ec681f3Smrg #define pragma_unroll_7 _Pragma("GCC unroll 7") 2547ec681f3Smrg #else 2557ec681f3Smrg #pragma GCC optimize ("unroll-loops") 2567ec681f3Smrg #define pragma_unroll_5 2577ec681f3Smrg #define pragma_unroll_7 2587ec681f3Smrg #endif 2597ec681f3Smrg#else 2607ec681f3Smrg /* MSVC doesn't have C99's _Pragma() */ 2617ec681f3Smrg #define pragma_unroll_5 2627ec681f3Smrg #define pragma_unroll_7 2637ec681f3Smrg#endif 2647ec681f3Smrg 2657ec681f3Smrg 2667ec681f3Smrg#ifndef NDEBUG 2677ec681f3Smrg#define ASSERT_TABLE_IS_COMMUTATIVE(t) \ 2687ec681f3Smrg do { \ 2697ec681f3Smrg static bool first = true; \ 2707ec681f3Smrg if (first) { \ 2717ec681f3Smrg first = false; \ 2727ec681f3Smrg pragma_unroll_7 \ 2737ec681f3Smrg for (unsigned r = 0; r < ARRAY_SIZE(t); r++) { \ 2747ec681f3Smrg pragma_unroll_7 \ 2757ec681f3Smrg for (unsigned c = 0; c < ARRAY_SIZE(t[0]); c++) \ 2767ec681f3Smrg assert(t[r][c] == t[c][r]); \ 2777ec681f3Smrg } \ 2787ec681f3Smrg } \ 2797ec681f3Smrg } while (false) 2807ec681f3Smrg 2817ec681f3Smrg#define ASSERT_TABLE_IS_DIAGONAL(t) \ 2827ec681f3Smrg do { \ 2837ec681f3Smrg static bool first = true; \ 2847ec681f3Smrg if (first) { \ 2857ec681f3Smrg first = false; \ 2867ec681f3Smrg pragma_unroll_7 \ 2877ec681f3Smrg for (unsigned r = 0; r < ARRAY_SIZE(t); r++) \ 2887ec681f3Smrg assert(t[r][r] == r); \ 2897ec681f3Smrg } \ 2907ec681f3Smrg } while (false) 2917ec681f3Smrg 2927ec681f3Smrg#else 2937ec681f3Smrg#define ASSERT_TABLE_IS_COMMUTATIVE(t) 2947ec681f3Smrg#define ASSERT_TABLE_IS_DIAGONAL(t) 2957ec681f3Smrg#endif /* !defined(NDEBUG) */ 2967ec681f3Smrg 2977ec681f3Smrgstatic enum ssa_ranges 2987ec681f3Smrgunion_ranges(enum ssa_ranges a, enum ssa_ranges b) 2997ec681f3Smrg{ 3007ec681f3Smrg static const enum ssa_ranges union_table[last_range + 1][last_range + 1] = { 3017ec681f3Smrg /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */ 3027ec681f3Smrg /* unknown */ { _______, _______, _______, _______, _______, _______, _______ }, 3037ec681f3Smrg /* lt_zero */ { _______, lt_zero, le_zero, ne_zero, _______, ne_zero, le_zero }, 3047ec681f3Smrg /* le_zero */ { _______, le_zero, le_zero, _______, _______, _______, le_zero }, 3057ec681f3Smrg /* gt_zero */ { _______, ne_zero, _______, gt_zero, ge_zero, ne_zero, ge_zero }, 3067ec681f3Smrg /* ge_zero */ { _______, _______, _______, ge_zero, ge_zero, _______, ge_zero }, 3077ec681f3Smrg /* ne_zero */ { _______, ne_zero, _______, ne_zero, _______, ne_zero, _______ }, 3087ec681f3Smrg /* eq_zero */ { _______, le_zero, le_zero, ge_zero, ge_zero, _______, eq_zero }, 3097ec681f3Smrg }; 3107ec681f3Smrg 3117ec681f3Smrg ASSERT_TABLE_IS_COMMUTATIVE(union_table); 3127ec681f3Smrg ASSERT_TABLE_IS_DIAGONAL(union_table); 3137ec681f3Smrg 3147ec681f3Smrg return union_table[a][b]; 3157ec681f3Smrg} 3167ec681f3Smrg 3177ec681f3Smrg#ifndef NDEBUG 3187ec681f3Smrg/* Verify that the 'unknown' entry in each row (or column) of the table is the 3197ec681f3Smrg * union of all the other values in the row (or column). 3207ec681f3Smrg */ 3217ec681f3Smrg#define ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(t) \ 3227ec681f3Smrg do { \ 3237ec681f3Smrg static bool first = true; \ 3247ec681f3Smrg if (first) { \ 3257ec681f3Smrg first = false; \ 3267ec681f3Smrg pragma_unroll_7 \ 3277ec681f3Smrg for (unsigned i = 0; i < last_range; i++) { \ 3287ec681f3Smrg enum ssa_ranges col_range = t[i][unknown + 1]; \ 3297ec681f3Smrg enum ssa_ranges row_range = t[unknown + 1][i]; \ 3307ec681f3Smrg \ 3317ec681f3Smrg pragma_unroll_5 \ 3327ec681f3Smrg for (unsigned j = unknown + 2; j < last_range; j++) { \ 3337ec681f3Smrg col_range = union_ranges(col_range, t[i][j]); \ 3347ec681f3Smrg row_range = union_ranges(row_range, t[j][i]); \ 3357ec681f3Smrg } \ 3367ec681f3Smrg \ 3377ec681f3Smrg assert(col_range == t[i][unknown]); \ 3387ec681f3Smrg assert(row_range == t[unknown][i]); \ 3397ec681f3Smrg } \ 3407ec681f3Smrg } \ 3417ec681f3Smrg } while (false) 3427ec681f3Smrg 3437ec681f3Smrg/* For most operations, the union of ranges for a strict inequality and 3447ec681f3Smrg * equality should be the range of the non-strict inequality (e.g., 3457ec681f3Smrg * union_ranges(range(op(lt_zero), range(op(eq_zero))) == range(op(le_zero)). 3467ec681f3Smrg * 3477ec681f3Smrg * Does not apply to selection-like opcodes (bcsel, fmin, fmax, etc.). 3487ec681f3Smrg */ 3497ec681f3Smrg#define ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_1_SOURCE(t) \ 3507ec681f3Smrg do { \ 3517ec681f3Smrg assert(union_ranges(t[lt_zero], t[eq_zero]) == t[le_zero]); \ 3527ec681f3Smrg assert(union_ranges(t[gt_zero], t[eq_zero]) == t[ge_zero]); \ 3537ec681f3Smrg } while (false) 3547ec681f3Smrg 3557ec681f3Smrg#define ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(t) \ 3567ec681f3Smrg do { \ 3577ec681f3Smrg static bool first = true; \ 3587ec681f3Smrg if (first) { \ 3597ec681f3Smrg first = false; \ 3607ec681f3Smrg pragma_unroll_7 \ 3617ec681f3Smrg for (unsigned i = 0; i < last_range; i++) { \ 3627ec681f3Smrg assert(union_ranges(t[i][lt_zero], t[i][eq_zero]) == t[i][le_zero]); \ 3637ec681f3Smrg assert(union_ranges(t[i][gt_zero], t[i][eq_zero]) == t[i][ge_zero]); \ 3647ec681f3Smrg assert(union_ranges(t[lt_zero][i], t[eq_zero][i]) == t[le_zero][i]); \ 3657ec681f3Smrg assert(union_ranges(t[gt_zero][i], t[eq_zero][i]) == t[ge_zero][i]); \ 3667ec681f3Smrg } \ 3677ec681f3Smrg } \ 3687ec681f3Smrg } while (false) 3697ec681f3Smrg 3707ec681f3Smrg/* Several other unordered tuples span the range of "everything." Each should 3717ec681f3Smrg * have the same value as unknown: (lt_zero, ge_zero), (le_zero, gt_zero), and 3727ec681f3Smrg * (eq_zero, ne_zero). union_ranges is already commutative, so only one 3737ec681f3Smrg * ordering needs to be checked. 3747ec681f3Smrg * 3757ec681f3Smrg * Does not apply to selection-like opcodes (bcsel, fmin, fmax, etc.). 3767ec681f3Smrg * 3777ec681f3Smrg * In cases where this can be used, it is unnecessary to also use 3787ec681f3Smrg * ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_*_SOURCE. For any range X, 3797ec681f3Smrg * union_ranges(X, X) == X. The disjoint ranges cover all of the non-unknown 3807ec681f3Smrg * possibilities, so the union of all the unions of disjoint ranges is 3817ec681f3Smrg * equivalent to the union of "others." 3827ec681f3Smrg */ 3837ec681f3Smrg#define ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_1_SOURCE(t) \ 3847ec681f3Smrg do { \ 3857ec681f3Smrg assert(union_ranges(t[lt_zero], t[ge_zero]) == t[unknown]); \ 3867ec681f3Smrg assert(union_ranges(t[le_zero], t[gt_zero]) == t[unknown]); \ 3877ec681f3Smrg assert(union_ranges(t[eq_zero], t[ne_zero]) == t[unknown]); \ 3887ec681f3Smrg } while (false) 3897ec681f3Smrg 3907ec681f3Smrg#define ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(t) \ 3917ec681f3Smrg do { \ 3927ec681f3Smrg static bool first = true; \ 3937ec681f3Smrg if (first) { \ 3947ec681f3Smrg first = false; \ 3957ec681f3Smrg pragma_unroll_7 \ 3967ec681f3Smrg for (unsigned i = 0; i < last_range; i++) { \ 3977ec681f3Smrg assert(union_ranges(t[i][lt_zero], t[i][ge_zero]) == \ 3987ec681f3Smrg t[i][unknown]); \ 3997ec681f3Smrg assert(union_ranges(t[i][le_zero], t[i][gt_zero]) == \ 4007ec681f3Smrg t[i][unknown]); \ 4017ec681f3Smrg assert(union_ranges(t[i][eq_zero], t[i][ne_zero]) == \ 4027ec681f3Smrg t[i][unknown]); \ 4037ec681f3Smrg \ 4047ec681f3Smrg assert(union_ranges(t[lt_zero][i], t[ge_zero][i]) == \ 4057ec681f3Smrg t[unknown][i]); \ 4067ec681f3Smrg assert(union_ranges(t[le_zero][i], t[gt_zero][i]) == \ 4077ec681f3Smrg t[unknown][i]); \ 4087ec681f3Smrg assert(union_ranges(t[eq_zero][i], t[ne_zero][i]) == \ 4097ec681f3Smrg t[unknown][i]); \ 4107ec681f3Smrg } \ 4117ec681f3Smrg } \ 4127ec681f3Smrg } while (false) 4137ec681f3Smrg 4147ec681f3Smrg#else 4157ec681f3Smrg#define ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(t) 4167ec681f3Smrg#define ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_1_SOURCE(t) 4177ec681f3Smrg#define ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(t) 4187ec681f3Smrg#define ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_1_SOURCE(t) 4197ec681f3Smrg#define ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(t) 4207ec681f3Smrg#endif /* !defined(NDEBUG) */ 4217ec681f3Smrg 4227ec681f3Smrg/** 4237ec681f3Smrg * Analyze an expression to determine the range of its result 4247ec681f3Smrg * 4257ec681f3Smrg * The end result of this analysis is a token that communicates something 4267ec681f3Smrg * about the range of values. There's an implicit grammar that produces 4277ec681f3Smrg * tokens from sequences of literal values, other tokens, and operations. 4287ec681f3Smrg * This function implements this grammar as a recursive-descent parser. Some 4297ec681f3Smrg * (but not all) of the grammar is listed in-line in the function. 4307ec681f3Smrg */ 4317ec681f3Smrgstatic struct ssa_result_range 4327ec681f3Smrganalyze_expression(const nir_alu_instr *instr, unsigned src, 4337ec681f3Smrg struct hash_table *ht, nir_alu_type use_type) 4347ec681f3Smrg{ 4357ec681f3Smrg /* Ensure that the _Pragma("GCC unroll 7") above are correct. */ 4367ec681f3Smrg STATIC_ASSERT(last_range + 1 == 7); 4377ec681f3Smrg 4387ec681f3Smrg if (!instr->src[src].src.is_ssa) 4397ec681f3Smrg return (struct ssa_result_range){unknown, false, false, false}; 4407ec681f3Smrg 4417ec681f3Smrg if (nir_src_is_const(instr->src[src].src)) 4427ec681f3Smrg return analyze_constant(instr, src, use_type); 4437ec681f3Smrg 4447ec681f3Smrg if (instr->src[src].src.ssa->parent_instr->type != nir_instr_type_alu) 4457ec681f3Smrg return (struct ssa_result_range){unknown, false, false, false}; 4467ec681f3Smrg 4477ec681f3Smrg const struct nir_alu_instr *const alu = 4487ec681f3Smrg nir_instr_as_alu(instr->src[src].src.ssa->parent_instr); 4497ec681f3Smrg 4507ec681f3Smrg /* Bail if the type of the instruction generating the value does not match 4517ec681f3Smrg * the type the value will be interpreted as. int/uint/bool can be 4527ec681f3Smrg * reinterpreted trivially. The most important cases are between float and 4537ec681f3Smrg * non-float. 4547ec681f3Smrg */ 4557ec681f3Smrg if (alu->op != nir_op_mov && alu->op != nir_op_bcsel) { 4567ec681f3Smrg const nir_alu_type use_base_type = 4577ec681f3Smrg nir_alu_type_get_base_type(use_type); 4587ec681f3Smrg const nir_alu_type src_base_type = 4597ec681f3Smrg nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type); 4607ec681f3Smrg 4617ec681f3Smrg if (use_base_type != src_base_type && 4627ec681f3Smrg (use_base_type == nir_type_float || 4637ec681f3Smrg src_base_type == nir_type_float)) { 4647ec681f3Smrg return (struct ssa_result_range){unknown, false, false, false}; 4657ec681f3Smrg } 4667ec681f3Smrg } 4677ec681f3Smrg 4687ec681f3Smrg struct hash_entry *he = _mesa_hash_table_search(ht, pack_key(alu, use_type)); 4697ec681f3Smrg if (he != NULL) 4707ec681f3Smrg return unpack_data(he->data); 4717ec681f3Smrg 4727ec681f3Smrg struct ssa_result_range r = {unknown, false, false, false}; 4737ec681f3Smrg 4747ec681f3Smrg /* ge_zero: ge_zero + ge_zero 4757ec681f3Smrg * 4767ec681f3Smrg * gt_zero: gt_zero + eq_zero 4777ec681f3Smrg * | gt_zero + ge_zero 4787ec681f3Smrg * | eq_zero + gt_zero # Addition is commutative 4797ec681f3Smrg * | ge_zero + gt_zero # Addition is commutative 4807ec681f3Smrg * | gt_zero + gt_zero 4817ec681f3Smrg * ; 4827ec681f3Smrg * 4837ec681f3Smrg * le_zero: le_zero + le_zero 4847ec681f3Smrg * 4857ec681f3Smrg * lt_zero: lt_zero + eq_zero 4867ec681f3Smrg * | lt_zero + le_zero 4877ec681f3Smrg * | eq_zero + lt_zero # Addition is commutative 4887ec681f3Smrg * | le_zero + lt_zero # Addition is commutative 4897ec681f3Smrg * | lt_zero + lt_zero 4907ec681f3Smrg * ; 4917ec681f3Smrg * 4927ec681f3Smrg * ne_zero: eq_zero + ne_zero 4937ec681f3Smrg * | ne_zero + eq_zero # Addition is commutative 4947ec681f3Smrg * ; 4957ec681f3Smrg * 4967ec681f3Smrg * eq_zero: eq_zero + eq_zero 4977ec681f3Smrg * ; 4987ec681f3Smrg * 4997ec681f3Smrg * All other cases are 'unknown'. The seeming odd entry is (ne_zero, 5007ec681f3Smrg * ne_zero), but that could be (-5, +5) which is not ne_zero. 5017ec681f3Smrg */ 5027ec681f3Smrg static const enum ssa_ranges fadd_table[last_range + 1][last_range + 1] = { 5037ec681f3Smrg /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */ 5047ec681f3Smrg /* unknown */ { _______, _______, _______, _______, _______, _______, _______ }, 5057ec681f3Smrg /* lt_zero */ { _______, lt_zero, lt_zero, _______, _______, _______, lt_zero }, 5067ec681f3Smrg /* le_zero */ { _______, lt_zero, le_zero, _______, _______, _______, le_zero }, 5077ec681f3Smrg /* gt_zero */ { _______, _______, _______, gt_zero, gt_zero, _______, gt_zero }, 5087ec681f3Smrg /* ge_zero */ { _______, _______, _______, gt_zero, ge_zero, _______, ge_zero }, 5097ec681f3Smrg /* ne_zero */ { _______, _______, _______, _______, _______, _______, ne_zero }, 5107ec681f3Smrg /* eq_zero */ { _______, lt_zero, le_zero, gt_zero, ge_zero, ne_zero, eq_zero }, 5117ec681f3Smrg }; 5127ec681f3Smrg 5137ec681f3Smrg ASSERT_TABLE_IS_COMMUTATIVE(fadd_table); 5147ec681f3Smrg ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(fadd_table); 5157ec681f3Smrg ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(fadd_table); 5167ec681f3Smrg 5177ec681f3Smrg /* Due to flush-to-zero semanatics of floating-point numbers with very 5187ec681f3Smrg * small mangnitudes, we can never really be sure a result will be 5197ec681f3Smrg * non-zero. 5207ec681f3Smrg * 5217ec681f3Smrg * ge_zero: ge_zero * ge_zero 5227ec681f3Smrg * | ge_zero * gt_zero 5237ec681f3Smrg * | ge_zero * eq_zero 5247ec681f3Smrg * | le_zero * lt_zero 5257ec681f3Smrg * | lt_zero * le_zero # Multiplication is commutative 5267ec681f3Smrg * | le_zero * le_zero 5277ec681f3Smrg * | gt_zero * ge_zero # Multiplication is commutative 5287ec681f3Smrg * | eq_zero * ge_zero # Multiplication is commutative 5297ec681f3Smrg * | a * a # Left source == right source 5307ec681f3Smrg * | gt_zero * gt_zero 5317ec681f3Smrg * | lt_zero * lt_zero 5327ec681f3Smrg * ; 5337ec681f3Smrg * 5347ec681f3Smrg * le_zero: ge_zero * le_zero 5357ec681f3Smrg * | ge_zero * lt_zero 5367ec681f3Smrg * | lt_zero * ge_zero # Multiplication is commutative 5377ec681f3Smrg * | le_zero * ge_zero # Multiplication is commutative 5387ec681f3Smrg * | le_zero * gt_zero 5397ec681f3Smrg * | lt_zero * gt_zero 5407ec681f3Smrg * | gt_zero * lt_zero # Multiplication is commutative 5417ec681f3Smrg * ; 5427ec681f3Smrg * 5437ec681f3Smrg * eq_zero: eq_zero * <any> 5447ec681f3Smrg * <any> * eq_zero # Multiplication is commutative 5457ec681f3Smrg * 5467ec681f3Smrg * All other cases are 'unknown'. 5477ec681f3Smrg */ 5487ec681f3Smrg static const enum ssa_ranges fmul_table[last_range + 1][last_range + 1] = { 5497ec681f3Smrg /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */ 5507ec681f3Smrg /* unknown */ { _______, _______, _______, _______, _______, _______, eq_zero }, 5517ec681f3Smrg /* lt_zero */ { _______, ge_zero, ge_zero, le_zero, le_zero, _______, eq_zero }, 5527ec681f3Smrg /* le_zero */ { _______, ge_zero, ge_zero, le_zero, le_zero, _______, eq_zero }, 5537ec681f3Smrg /* gt_zero */ { _______, le_zero, le_zero, ge_zero, ge_zero, _______, eq_zero }, 5547ec681f3Smrg /* ge_zero */ { _______, le_zero, le_zero, ge_zero, ge_zero, _______, eq_zero }, 5557ec681f3Smrg /* ne_zero */ { _______, _______, _______, _______, _______, _______, eq_zero }, 5567ec681f3Smrg /* eq_zero */ { eq_zero, eq_zero, eq_zero, eq_zero, eq_zero, eq_zero, eq_zero } 5577ec681f3Smrg }; 5587ec681f3Smrg 5597ec681f3Smrg ASSERT_TABLE_IS_COMMUTATIVE(fmul_table); 5607ec681f3Smrg ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(fmul_table); 5617ec681f3Smrg ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(fmul_table); 5627ec681f3Smrg 5637ec681f3Smrg static const enum ssa_ranges fneg_table[last_range + 1] = { 5647ec681f3Smrg /* unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */ 5657ec681f3Smrg _______, gt_zero, ge_zero, lt_zero, le_zero, ne_zero, eq_zero 5667ec681f3Smrg }; 5677ec681f3Smrg 5687ec681f3Smrg ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_1_SOURCE(fneg_table); 5697ec681f3Smrg ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_1_SOURCE(fneg_table); 5707ec681f3Smrg 5717ec681f3Smrg 5727ec681f3Smrg switch (alu->op) { 5737ec681f3Smrg case nir_op_b2f32: 5747ec681f3Smrg case nir_op_b2i32: 5757ec681f3Smrg /* b2f32 will generate either 0.0 or 1.0. This case is trivial. 5767ec681f3Smrg * 5777ec681f3Smrg * b2i32 will generate either 0x00000000 or 0x00000001. When those bit 5787ec681f3Smrg * patterns are interpreted as floating point, they are 0.0 and 5797ec681f3Smrg * 1.401298464324817e-45. The latter is subnormal, but it is finite and 5807ec681f3Smrg * a number. 5817ec681f3Smrg */ 5827ec681f3Smrg r = (struct ssa_result_range){ge_zero, alu->op == nir_op_b2f32, true, true}; 5837ec681f3Smrg break; 5847ec681f3Smrg 5857ec681f3Smrg case nir_op_bcsel: { 5867ec681f3Smrg const struct ssa_result_range left = 5877ec681f3Smrg analyze_expression(alu, 1, ht, use_type); 5887ec681f3Smrg const struct ssa_result_range right = 5897ec681f3Smrg analyze_expression(alu, 2, ht, use_type); 5907ec681f3Smrg 5917ec681f3Smrg r.is_integral = left.is_integral && right.is_integral; 5927ec681f3Smrg 5937ec681f3Smrg /* This could be better, but it would require a lot of work. For 5947ec681f3Smrg * example, the result of the following is a number: 5957ec681f3Smrg * 5967ec681f3Smrg * bcsel(a > 0.0, a, 38.6) 5977ec681f3Smrg * 5987ec681f3Smrg * If the result of 'a > 0.0' is true, then the use of 'a' in the true 5997ec681f3Smrg * part of the bcsel must be a number. 6007ec681f3Smrg * 6017ec681f3Smrg * Other cases are even more challenging. 6027ec681f3Smrg * 6037ec681f3Smrg * bcsel(a > 0.5, a - 0.5, 0.0) 6047ec681f3Smrg */ 6057ec681f3Smrg r.is_a_number = left.is_a_number && right.is_a_number; 6067ec681f3Smrg r.is_finite = left.is_finite && right.is_finite; 6077ec681f3Smrg 6087ec681f3Smrg r.range = union_ranges(left.range, right.range); 6097ec681f3Smrg break; 6107ec681f3Smrg } 6117ec681f3Smrg 6127ec681f3Smrg case nir_op_i2f32: 6137ec681f3Smrg case nir_op_u2f32: 6147ec681f3Smrg r = analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 6157ec681f3Smrg 6167ec681f3Smrg r.is_integral = true; 6177ec681f3Smrg r.is_a_number = true; 6187ec681f3Smrg r.is_finite = true; 6197ec681f3Smrg 6207ec681f3Smrg if (r.range == unknown && alu->op == nir_op_u2f32) 6217ec681f3Smrg r.range = ge_zero; 6227ec681f3Smrg 6237ec681f3Smrg break; 6247ec681f3Smrg 6257ec681f3Smrg case nir_op_fabs: 6267ec681f3Smrg r = analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 6277ec681f3Smrg 6287ec681f3Smrg switch (r.range) { 6297ec681f3Smrg case unknown: 6307ec681f3Smrg case le_zero: 6317ec681f3Smrg case ge_zero: 6327ec681f3Smrg r.range = ge_zero; 6337ec681f3Smrg break; 6347ec681f3Smrg 6357ec681f3Smrg case lt_zero: 6367ec681f3Smrg case gt_zero: 6377ec681f3Smrg case ne_zero: 6387ec681f3Smrg r.range = gt_zero; 6397ec681f3Smrg break; 6407ec681f3Smrg 6417ec681f3Smrg case eq_zero: 6427ec681f3Smrg break; 6437ec681f3Smrg } 6447ec681f3Smrg 6457ec681f3Smrg break; 6467ec681f3Smrg 6477ec681f3Smrg case nir_op_fadd: { 6487ec681f3Smrg const struct ssa_result_range left = 6497ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 6507ec681f3Smrg const struct ssa_result_range right = 6517ec681f3Smrg analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1)); 6527ec681f3Smrg 6537ec681f3Smrg r.is_integral = left.is_integral && right.is_integral; 6547ec681f3Smrg r.range = fadd_table[left.range][right.range]; 6557ec681f3Smrg 6567ec681f3Smrg /* X + Y is NaN if either operand is NaN or if one operand is +Inf and 6577ec681f3Smrg * the other is -Inf. If neither operand is NaN and at least one of the 6587ec681f3Smrg * operands is finite, then the result cannot be NaN. 6597ec681f3Smrg */ 6607ec681f3Smrg r.is_a_number = left.is_a_number && right.is_a_number && 6617ec681f3Smrg (left.is_finite || right.is_finite); 6627ec681f3Smrg break; 6637ec681f3Smrg } 6647ec681f3Smrg 6657ec681f3Smrg case nir_op_fexp2: { 6667ec681f3Smrg /* If the parameter might be less than zero, the mathematically result 6677ec681f3Smrg * will be on (0, 1). For sufficiently large magnitude negative 6687ec681f3Smrg * parameters, the result will flush to zero. 6697ec681f3Smrg */ 6707ec681f3Smrg static const enum ssa_ranges table[last_range + 1] = { 6717ec681f3Smrg /* unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */ 6727ec681f3Smrg ge_zero, ge_zero, ge_zero, gt_zero, gt_zero, ge_zero, gt_zero 6737ec681f3Smrg }; 6747ec681f3Smrg 6757ec681f3Smrg r = analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 6767ec681f3Smrg 6777ec681f3Smrg ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_1_SOURCE(table); 6787ec681f3Smrg ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_1_SOURCE(table); 6797ec681f3Smrg 6807ec681f3Smrg r.is_integral = r.is_integral && is_not_negative(r.range); 6817ec681f3Smrg r.range = table[r.range]; 6827ec681f3Smrg 6837ec681f3Smrg /* Various cases can result in NaN, so assume the worst. */ 6847ec681f3Smrg r.is_finite = false; 6857ec681f3Smrg r.is_a_number = false; 6867ec681f3Smrg break; 6877ec681f3Smrg } 6887ec681f3Smrg 6897ec681f3Smrg case nir_op_fmax: { 6907ec681f3Smrg const struct ssa_result_range left = 6917ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 6927ec681f3Smrg const struct ssa_result_range right = 6937ec681f3Smrg analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1)); 6947ec681f3Smrg 6957ec681f3Smrg r.is_integral = left.is_integral && right.is_integral; 6967ec681f3Smrg 6977ec681f3Smrg /* This is conservative. It may be possible to determine that the 6987ec681f3Smrg * result must be finite in more cases, but it would take some effort to 6997ec681f3Smrg * work out all the corners. For example, fmax({lt_zero, finite}, 7007ec681f3Smrg * {lt_zero}) should result in {lt_zero, finite}. 7017ec681f3Smrg */ 7027ec681f3Smrg r.is_finite = left.is_finite && right.is_finite; 7037ec681f3Smrg 7047ec681f3Smrg /* If one source is NaN, fmax always picks the other source. */ 7057ec681f3Smrg r.is_a_number = left.is_a_number || right.is_a_number; 7067ec681f3Smrg 7077ec681f3Smrg /* gt_zero: fmax(gt_zero, *) 7087ec681f3Smrg * | fmax(*, gt_zero) # Treat fmax as commutative 7097ec681f3Smrg * ; 7107ec681f3Smrg * 7117ec681f3Smrg * ge_zero: fmax(ge_zero, ne_zero) 7127ec681f3Smrg * | fmax(ge_zero, lt_zero) 7137ec681f3Smrg * | fmax(ge_zero, le_zero) 7147ec681f3Smrg * | fmax(ge_zero, eq_zero) 7157ec681f3Smrg * | fmax(ne_zero, ge_zero) # Treat fmax as commutative 7167ec681f3Smrg * | fmax(lt_zero, ge_zero) # Treat fmax as commutative 7177ec681f3Smrg * | fmax(le_zero, ge_zero) # Treat fmax as commutative 7187ec681f3Smrg * | fmax(eq_zero, ge_zero) # Treat fmax as commutative 7197ec681f3Smrg * | fmax(ge_zero, ge_zero) 7207ec681f3Smrg * ; 7217ec681f3Smrg * 7227ec681f3Smrg * le_zero: fmax(le_zero, lt_zero) 7237ec681f3Smrg * | fmax(lt_zero, le_zero) # Treat fmax as commutative 7247ec681f3Smrg * | fmax(le_zero, le_zero) 7257ec681f3Smrg * ; 7267ec681f3Smrg * 7277ec681f3Smrg * lt_zero: fmax(lt_zero, lt_zero) 7287ec681f3Smrg * ; 7297ec681f3Smrg * 7307ec681f3Smrg * ne_zero: fmax(ne_zero, lt_zero) 7317ec681f3Smrg * | fmax(lt_zero, ne_zero) # Treat fmax as commutative 7327ec681f3Smrg * | fmax(ne_zero, ne_zero) 7337ec681f3Smrg * ; 7347ec681f3Smrg * 7357ec681f3Smrg * eq_zero: fmax(eq_zero, le_zero) 7367ec681f3Smrg * | fmax(eq_zero, lt_zero) 7377ec681f3Smrg * | fmax(le_zero, eq_zero) # Treat fmax as commutative 7387ec681f3Smrg * | fmax(lt_zero, eq_zero) # Treat fmax as commutative 7397ec681f3Smrg * | fmax(eq_zero, eq_zero) 7407ec681f3Smrg * ; 7417ec681f3Smrg * 7427ec681f3Smrg * All other cases are 'unknown'. 7437ec681f3Smrg */ 7447ec681f3Smrg static const enum ssa_ranges table[last_range + 1][last_range + 1] = { 7457ec681f3Smrg /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */ 7467ec681f3Smrg /* unknown */ { _______, _______, _______, gt_zero, ge_zero, _______, _______ }, 7477ec681f3Smrg /* lt_zero */ { _______, lt_zero, le_zero, gt_zero, ge_zero, ne_zero, eq_zero }, 7487ec681f3Smrg /* le_zero */ { _______, le_zero, le_zero, gt_zero, ge_zero, _______, eq_zero }, 7497ec681f3Smrg /* gt_zero */ { gt_zero, gt_zero, gt_zero, gt_zero, gt_zero, gt_zero, gt_zero }, 7507ec681f3Smrg /* ge_zero */ { ge_zero, ge_zero, ge_zero, gt_zero, ge_zero, ge_zero, ge_zero }, 7517ec681f3Smrg /* ne_zero */ { _______, ne_zero, _______, gt_zero, ge_zero, ne_zero, _______ }, 7527ec681f3Smrg /* eq_zero */ { _______, eq_zero, eq_zero, gt_zero, ge_zero, _______, eq_zero } 7537ec681f3Smrg }; 7547ec681f3Smrg 7557ec681f3Smrg /* Treat fmax as commutative. */ 7567ec681f3Smrg ASSERT_TABLE_IS_COMMUTATIVE(table); 7577ec681f3Smrg ASSERT_TABLE_IS_DIAGONAL(table); 7587ec681f3Smrg ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(table); 7597ec681f3Smrg 7607ec681f3Smrg r.range = table[left.range][right.range]; 7617ec681f3Smrg 7627ec681f3Smrg /* Recall that when either value is NaN, fmax will pick the other value. 7637ec681f3Smrg * This means the result range of the fmax will either be the "ideal" 7647ec681f3Smrg * result range (calculated above) or the range of the non-NaN value. 7657ec681f3Smrg */ 7667ec681f3Smrg if (!left.is_a_number) 7677ec681f3Smrg r.range = union_ranges(r.range, right.range); 7687ec681f3Smrg 7697ec681f3Smrg if (!right.is_a_number) 7707ec681f3Smrg r.range = union_ranges(r.range, left.range); 7717ec681f3Smrg 7727ec681f3Smrg break; 7737ec681f3Smrg } 7747ec681f3Smrg 7757ec681f3Smrg case nir_op_fmin: { 7767ec681f3Smrg const struct ssa_result_range left = 7777ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 7787ec681f3Smrg const struct ssa_result_range right = 7797ec681f3Smrg analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1)); 7807ec681f3Smrg 7817ec681f3Smrg r.is_integral = left.is_integral && right.is_integral; 7827ec681f3Smrg 7837ec681f3Smrg /* This is conservative. It may be possible to determine that the 7847ec681f3Smrg * result must be finite in more cases, but it would take some effort to 7857ec681f3Smrg * work out all the corners. For example, fmin({gt_zero, finite}, 7867ec681f3Smrg * {gt_zero}) should result in {gt_zero, finite}. 7877ec681f3Smrg */ 7887ec681f3Smrg r.is_finite = left.is_finite && right.is_finite; 7897ec681f3Smrg 7907ec681f3Smrg /* If one source is NaN, fmin always picks the other source. */ 7917ec681f3Smrg r.is_a_number = left.is_a_number || right.is_a_number; 7927ec681f3Smrg 7937ec681f3Smrg /* lt_zero: fmin(lt_zero, *) 7947ec681f3Smrg * | fmin(*, lt_zero) # Treat fmin as commutative 7957ec681f3Smrg * ; 7967ec681f3Smrg * 7977ec681f3Smrg * le_zero: fmin(le_zero, ne_zero) 7987ec681f3Smrg * | fmin(le_zero, gt_zero) 7997ec681f3Smrg * | fmin(le_zero, ge_zero) 8007ec681f3Smrg * | fmin(le_zero, eq_zero) 8017ec681f3Smrg * | fmin(ne_zero, le_zero) # Treat fmin as commutative 8027ec681f3Smrg * | fmin(gt_zero, le_zero) # Treat fmin as commutative 8037ec681f3Smrg * | fmin(ge_zero, le_zero) # Treat fmin as commutative 8047ec681f3Smrg * | fmin(eq_zero, le_zero) # Treat fmin as commutative 8057ec681f3Smrg * | fmin(le_zero, le_zero) 8067ec681f3Smrg * ; 8077ec681f3Smrg * 8087ec681f3Smrg * ge_zero: fmin(ge_zero, gt_zero) 8097ec681f3Smrg * | fmin(gt_zero, ge_zero) # Treat fmin as commutative 8107ec681f3Smrg * | fmin(ge_zero, ge_zero) 8117ec681f3Smrg * ; 8127ec681f3Smrg * 8137ec681f3Smrg * gt_zero: fmin(gt_zero, gt_zero) 8147ec681f3Smrg * ; 8157ec681f3Smrg * 8167ec681f3Smrg * ne_zero: fmin(ne_zero, gt_zero) 8177ec681f3Smrg * | fmin(gt_zero, ne_zero) # Treat fmin as commutative 8187ec681f3Smrg * | fmin(ne_zero, ne_zero) 8197ec681f3Smrg * ; 8207ec681f3Smrg * 8217ec681f3Smrg * eq_zero: fmin(eq_zero, ge_zero) 8227ec681f3Smrg * | fmin(eq_zero, gt_zero) 8237ec681f3Smrg * | fmin(ge_zero, eq_zero) # Treat fmin as commutative 8247ec681f3Smrg * | fmin(gt_zero, eq_zero) # Treat fmin as commutative 8257ec681f3Smrg * | fmin(eq_zero, eq_zero) 8267ec681f3Smrg * ; 8277ec681f3Smrg * 8287ec681f3Smrg * All other cases are 'unknown'. 8297ec681f3Smrg */ 8307ec681f3Smrg static const enum ssa_ranges table[last_range + 1][last_range + 1] = { 8317ec681f3Smrg /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */ 8327ec681f3Smrg /* unknown */ { _______, lt_zero, le_zero, _______, _______, _______, _______ }, 8337ec681f3Smrg /* lt_zero */ { lt_zero, lt_zero, lt_zero, lt_zero, lt_zero, lt_zero, lt_zero }, 8347ec681f3Smrg /* le_zero */ { le_zero, lt_zero, le_zero, le_zero, le_zero, le_zero, le_zero }, 8357ec681f3Smrg /* gt_zero */ { _______, lt_zero, le_zero, gt_zero, ge_zero, ne_zero, eq_zero }, 8367ec681f3Smrg /* ge_zero */ { _______, lt_zero, le_zero, ge_zero, ge_zero, _______, eq_zero }, 8377ec681f3Smrg /* ne_zero */ { _______, lt_zero, le_zero, ne_zero, _______, ne_zero, _______ }, 8387ec681f3Smrg /* eq_zero */ { _______, lt_zero, le_zero, eq_zero, eq_zero, _______, eq_zero } 8397ec681f3Smrg }; 8407ec681f3Smrg 8417ec681f3Smrg /* Treat fmin as commutative. */ 8427ec681f3Smrg ASSERT_TABLE_IS_COMMUTATIVE(table); 8437ec681f3Smrg ASSERT_TABLE_IS_DIAGONAL(table); 8447ec681f3Smrg ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(table); 8457ec681f3Smrg 8467ec681f3Smrg r.range = table[left.range][right.range]; 8477ec681f3Smrg 8487ec681f3Smrg /* Recall that when either value is NaN, fmin will pick the other value. 8497ec681f3Smrg * This means the result range of the fmin will either be the "ideal" 8507ec681f3Smrg * result range (calculated above) or the range of the non-NaN value. 8517ec681f3Smrg */ 8527ec681f3Smrg if (!left.is_a_number) 8537ec681f3Smrg r.range = union_ranges(r.range, right.range); 8547ec681f3Smrg 8557ec681f3Smrg if (!right.is_a_number) 8567ec681f3Smrg r.range = union_ranges(r.range, left.range); 8577ec681f3Smrg 8587ec681f3Smrg break; 8597ec681f3Smrg } 8607ec681f3Smrg 8617ec681f3Smrg case nir_op_fmul: { 8627ec681f3Smrg const struct ssa_result_range left = 8637ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 8647ec681f3Smrg const struct ssa_result_range right = 8657ec681f3Smrg analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1)); 8667ec681f3Smrg 8677ec681f3Smrg r.is_integral = left.is_integral && right.is_integral; 8687ec681f3Smrg 8697ec681f3Smrg /* x * x => ge_zero */ 8707ec681f3Smrg if (left.range != eq_zero && nir_alu_srcs_equal(alu, alu, 0, 1)) { 8717ec681f3Smrg /* Even if x > 0, the result of x*x can be zero when x is, for 8727ec681f3Smrg * example, a subnormal number. 8737ec681f3Smrg */ 8747ec681f3Smrg r.range = ge_zero; 8757ec681f3Smrg } else if (left.range != eq_zero && nir_alu_srcs_negative_equal(alu, alu, 0, 1)) { 8767ec681f3Smrg /* -x * x => le_zero. */ 8777ec681f3Smrg r.range = le_zero; 8787ec681f3Smrg } else 8797ec681f3Smrg r.range = fmul_table[left.range][right.range]; 8807ec681f3Smrg 8817ec681f3Smrg /* Mulitpliation produces NaN for X * NaN and for 0 * ±Inf. If both 8827ec681f3Smrg * operands are numbers and either both are finite or one is finite and 8837ec681f3Smrg * the other cannot be zero, then the result must be a number. 8847ec681f3Smrg */ 8857ec681f3Smrg r.is_a_number = (left.is_a_number && right.is_a_number) && 8867ec681f3Smrg ((left.is_finite && right.is_finite) || 8877ec681f3Smrg (!is_not_zero(left.range) && right.is_finite) || 8887ec681f3Smrg (left.is_finite && !is_not_zero(right.range))); 8897ec681f3Smrg 8907ec681f3Smrg break; 8917ec681f3Smrg } 8927ec681f3Smrg 8937ec681f3Smrg case nir_op_frcp: 8947ec681f3Smrg r = (struct ssa_result_range){ 8957ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)).range, 8967ec681f3Smrg false, 8977ec681f3Smrg false, /* Various cases can result in NaN, so assume the worst. */ 8987ec681f3Smrg false /* " " " " " " " " " " */ 8997ec681f3Smrg }; 9007ec681f3Smrg break; 9017ec681f3Smrg 9027ec681f3Smrg case nir_op_mov: 9037ec681f3Smrg r = analyze_expression(alu, 0, ht, use_type); 9047ec681f3Smrg break; 9057ec681f3Smrg 9067ec681f3Smrg case nir_op_fneg: 9077ec681f3Smrg r = analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 9087ec681f3Smrg 9097ec681f3Smrg r.range = fneg_table[r.range]; 9107ec681f3Smrg break; 9117ec681f3Smrg 9127ec681f3Smrg case nir_op_fsat: { 9137ec681f3Smrg const struct ssa_result_range left = 9147ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 9157ec681f3Smrg 9167ec681f3Smrg /* fsat(NaN) = 0. */ 9177ec681f3Smrg r.is_a_number = true; 9187ec681f3Smrg r.is_finite = true; 9197ec681f3Smrg 9207ec681f3Smrg switch (left.range) { 9217ec681f3Smrg case le_zero: 9227ec681f3Smrg case lt_zero: 9237ec681f3Smrg case eq_zero: 9247ec681f3Smrg r.range = eq_zero; 9257ec681f3Smrg r.is_integral = true; 9267ec681f3Smrg break; 9277ec681f3Smrg 9287ec681f3Smrg case gt_zero: 9297ec681f3Smrg /* fsat is equivalent to fmin(fmax(X, 0.0), 1.0), so if X is not a 9307ec681f3Smrg * number, the result will be 0. 9317ec681f3Smrg */ 9327ec681f3Smrg r.range = left.is_a_number ? gt_zero : ge_zero; 9337ec681f3Smrg r.is_integral = left.is_integral; 9347ec681f3Smrg break; 9357ec681f3Smrg 9367ec681f3Smrg case ge_zero: 9377ec681f3Smrg case ne_zero: 9387ec681f3Smrg case unknown: 9397ec681f3Smrg /* Since the result must be in [0, 1], the value must be >= 0. */ 9407ec681f3Smrg r.range = ge_zero; 9417ec681f3Smrg r.is_integral = left.is_integral; 9427ec681f3Smrg break; 9437ec681f3Smrg } 9447ec681f3Smrg break; 9457ec681f3Smrg } 9467ec681f3Smrg 9477ec681f3Smrg case nir_op_fsign: 9487ec681f3Smrg r = (struct ssa_result_range){ 9497ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)).range, 9507ec681f3Smrg true, 9517ec681f3Smrg true, /* fsign is -1, 0, or 1, even for NaN, so it must be a number. */ 9527ec681f3Smrg true /* fsign is -1, 0, or 1, even for NaN, so it must be finite. */ 9537ec681f3Smrg }; 9547ec681f3Smrg break; 9557ec681f3Smrg 9567ec681f3Smrg case nir_op_fsqrt: 9577ec681f3Smrg case nir_op_frsq: 9587ec681f3Smrg r = (struct ssa_result_range){ge_zero, false, false, false}; 9597ec681f3Smrg break; 9607ec681f3Smrg 9617ec681f3Smrg case nir_op_ffloor: { 9627ec681f3Smrg const struct ssa_result_range left = 9637ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 9647ec681f3Smrg 9657ec681f3Smrg r.is_integral = true; 9667ec681f3Smrg 9677ec681f3Smrg /* In IEEE 754, floor(NaN) is NaN, and floor(±Inf) is ±Inf. See 9687ec681f3Smrg * https://pubs.opengroup.org/onlinepubs/9699919799.2016edition/functions/floor.html 9697ec681f3Smrg */ 9707ec681f3Smrg r.is_a_number = left.is_a_number; 9717ec681f3Smrg r.is_finite = left.is_finite; 9727ec681f3Smrg 9737ec681f3Smrg if (left.is_integral || left.range == le_zero || left.range == lt_zero) 9747ec681f3Smrg r.range = left.range; 9757ec681f3Smrg else if (left.range == ge_zero || left.range == gt_zero) 9767ec681f3Smrg r.range = ge_zero; 9777ec681f3Smrg else if (left.range == ne_zero) 9787ec681f3Smrg r.range = unknown; 9797ec681f3Smrg 9807ec681f3Smrg break; 9817ec681f3Smrg } 9827ec681f3Smrg 9837ec681f3Smrg case nir_op_fceil: { 9847ec681f3Smrg const struct ssa_result_range left = 9857ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 9867ec681f3Smrg 9877ec681f3Smrg r.is_integral = true; 9887ec681f3Smrg 9897ec681f3Smrg /* In IEEE 754, ceil(NaN) is NaN, and ceil(±Inf) is ±Inf. See 9907ec681f3Smrg * https://pubs.opengroup.org/onlinepubs/9699919799.2016edition/functions/ceil.html 9917ec681f3Smrg */ 9927ec681f3Smrg r.is_a_number = left.is_a_number; 9937ec681f3Smrg r.is_finite = left.is_finite; 9947ec681f3Smrg 9957ec681f3Smrg if (left.is_integral || left.range == ge_zero || left.range == gt_zero) 9967ec681f3Smrg r.range = left.range; 9977ec681f3Smrg else if (left.range == le_zero || left.range == lt_zero) 9987ec681f3Smrg r.range = le_zero; 9997ec681f3Smrg else if (left.range == ne_zero) 10007ec681f3Smrg r.range = unknown; 10017ec681f3Smrg 10027ec681f3Smrg break; 10037ec681f3Smrg } 10047ec681f3Smrg 10057ec681f3Smrg case nir_op_ftrunc: { 10067ec681f3Smrg const struct ssa_result_range left = 10077ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 10087ec681f3Smrg 10097ec681f3Smrg r.is_integral = true; 10107ec681f3Smrg 10117ec681f3Smrg /* In IEEE 754, trunc(NaN) is NaN, and trunc(±Inf) is ±Inf. See 10127ec681f3Smrg * https://pubs.opengroup.org/onlinepubs/9699919799.2016edition/functions/trunc.html 10137ec681f3Smrg */ 10147ec681f3Smrg r.is_a_number = left.is_a_number; 10157ec681f3Smrg r.is_finite = left.is_finite; 10167ec681f3Smrg 10177ec681f3Smrg if (left.is_integral) 10187ec681f3Smrg r.range = left.range; 10197ec681f3Smrg else if (left.range == ge_zero || left.range == gt_zero) 10207ec681f3Smrg r.range = ge_zero; 10217ec681f3Smrg else if (left.range == le_zero || left.range == lt_zero) 10227ec681f3Smrg r.range = le_zero; 10237ec681f3Smrg else if (left.range == ne_zero) 10247ec681f3Smrg r.range = unknown; 10257ec681f3Smrg 10267ec681f3Smrg break; 10277ec681f3Smrg } 10287ec681f3Smrg 10297ec681f3Smrg case nir_op_flt: 10307ec681f3Smrg case nir_op_fge: 10317ec681f3Smrg case nir_op_feq: 10327ec681f3Smrg case nir_op_fneu: 10337ec681f3Smrg case nir_op_ilt: 10347ec681f3Smrg case nir_op_ige: 10357ec681f3Smrg case nir_op_ieq: 10367ec681f3Smrg case nir_op_ine: 10377ec681f3Smrg case nir_op_ult: 10387ec681f3Smrg case nir_op_uge: 10397ec681f3Smrg /* Boolean results are 0 or -1. */ 10407ec681f3Smrg r = (struct ssa_result_range){le_zero, false, true, false}; 10417ec681f3Smrg break; 10427ec681f3Smrg 10437ec681f3Smrg case nir_op_fpow: { 10447ec681f3Smrg /* Due to flush-to-zero semanatics of floating-point numbers with very 10457ec681f3Smrg * small mangnitudes, we can never really be sure a result will be 10467ec681f3Smrg * non-zero. 10477ec681f3Smrg * 10487ec681f3Smrg * NIR uses pow() and powf() to constant evaluate nir_op_fpow. The man 10497ec681f3Smrg * page for that function says: 10507ec681f3Smrg * 10517ec681f3Smrg * If y is 0, the result is 1.0 (even if x is a NaN). 10527ec681f3Smrg * 10537ec681f3Smrg * gt_zero: pow(*, eq_zero) 10547ec681f3Smrg * | pow(eq_zero, lt_zero) # 0^-y = +inf 10557ec681f3Smrg * | pow(eq_zero, le_zero) # 0^-y = +inf or 0^0 = 1.0 10567ec681f3Smrg * ; 10577ec681f3Smrg * 10587ec681f3Smrg * eq_zero: pow(eq_zero, gt_zero) 10597ec681f3Smrg * ; 10607ec681f3Smrg * 10617ec681f3Smrg * ge_zero: pow(gt_zero, gt_zero) 10627ec681f3Smrg * | pow(gt_zero, ge_zero) 10637ec681f3Smrg * | pow(gt_zero, lt_zero) 10647ec681f3Smrg * | pow(gt_zero, le_zero) 10657ec681f3Smrg * | pow(gt_zero, ne_zero) 10667ec681f3Smrg * | pow(gt_zero, unknown) 10677ec681f3Smrg * | pow(ge_zero, gt_zero) 10687ec681f3Smrg * | pow(ge_zero, ge_zero) 10697ec681f3Smrg * | pow(ge_zero, lt_zero) 10707ec681f3Smrg * | pow(ge_zero, le_zero) 10717ec681f3Smrg * | pow(ge_zero, ne_zero) 10727ec681f3Smrg * | pow(ge_zero, unknown) 10737ec681f3Smrg * | pow(eq_zero, ge_zero) # 0^0 = 1.0 or 0^+y = 0.0 10747ec681f3Smrg * | pow(eq_zero, ne_zero) # 0^-y = +inf or 0^+y = 0.0 10757ec681f3Smrg * | pow(eq_zero, unknown) # union of all other y cases 10767ec681f3Smrg * ; 10777ec681f3Smrg * 10787ec681f3Smrg * All other cases are unknown. 10797ec681f3Smrg * 10807ec681f3Smrg * We could do better if the right operand is a constant, integral 10817ec681f3Smrg * value. 10827ec681f3Smrg */ 10837ec681f3Smrg static const enum ssa_ranges table[last_range + 1][last_range + 1] = { 10847ec681f3Smrg /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */ 10857ec681f3Smrg /* unknown */ { _______, _______, _______, _______, _______, _______, gt_zero }, 10867ec681f3Smrg /* lt_zero */ { _______, _______, _______, _______, _______, _______, gt_zero }, 10877ec681f3Smrg /* le_zero */ { _______, _______, _______, _______, _______, _______, gt_zero }, 10887ec681f3Smrg /* gt_zero */ { ge_zero, ge_zero, ge_zero, ge_zero, ge_zero, ge_zero, gt_zero }, 10897ec681f3Smrg /* ge_zero */ { ge_zero, ge_zero, ge_zero, ge_zero, ge_zero, ge_zero, gt_zero }, 10907ec681f3Smrg /* ne_zero */ { _______, _______, _______, _______, _______, _______, gt_zero }, 10917ec681f3Smrg /* eq_zero */ { ge_zero, gt_zero, gt_zero, eq_zero, ge_zero, ge_zero, gt_zero }, 10927ec681f3Smrg }; 10937ec681f3Smrg 10947ec681f3Smrg const struct ssa_result_range left = 10957ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 10967ec681f3Smrg const struct ssa_result_range right = 10977ec681f3Smrg analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1)); 10987ec681f3Smrg 10997ec681f3Smrg ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(table); 11007ec681f3Smrg ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(table); 11017ec681f3Smrg 11027ec681f3Smrg r.is_integral = left.is_integral && right.is_integral && 11037ec681f3Smrg is_not_negative(right.range); 11047ec681f3Smrg r.range = table[left.range][right.range]; 11057ec681f3Smrg 11067ec681f3Smrg /* Various cases can result in NaN, so assume the worst. */ 11077ec681f3Smrg r.is_a_number = false; 11087ec681f3Smrg 11097ec681f3Smrg break; 11107ec681f3Smrg } 11117ec681f3Smrg 11127ec681f3Smrg case nir_op_ffma: { 11137ec681f3Smrg const struct ssa_result_range first = 11147ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 11157ec681f3Smrg const struct ssa_result_range second = 11167ec681f3Smrg analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1)); 11177ec681f3Smrg const struct ssa_result_range third = 11187ec681f3Smrg analyze_expression(alu, 2, ht, nir_alu_src_type(alu, 2)); 11197ec681f3Smrg 11207ec681f3Smrg r.is_integral = first.is_integral && second.is_integral && 11217ec681f3Smrg third.is_integral; 11227ec681f3Smrg 11237ec681f3Smrg /* Various cases can result in NaN, so assume the worst. */ 11247ec681f3Smrg r.is_a_number = false; 11257ec681f3Smrg 11267ec681f3Smrg enum ssa_ranges fmul_range; 11277ec681f3Smrg 11287ec681f3Smrg if (first.range != eq_zero && nir_alu_srcs_equal(alu, alu, 0, 1)) { 11297ec681f3Smrg /* See handling of nir_op_fmul for explanation of why ge_zero is the 11307ec681f3Smrg * range. 11317ec681f3Smrg */ 11327ec681f3Smrg fmul_range = ge_zero; 11337ec681f3Smrg } else if (first.range != eq_zero && nir_alu_srcs_negative_equal(alu, alu, 0, 1)) { 11347ec681f3Smrg /* -x * x => le_zero */ 11357ec681f3Smrg fmul_range = le_zero; 11367ec681f3Smrg } else 11377ec681f3Smrg fmul_range = fmul_table[first.range][second.range]; 11387ec681f3Smrg 11397ec681f3Smrg r.range = fadd_table[fmul_range][third.range]; 11407ec681f3Smrg break; 11417ec681f3Smrg } 11427ec681f3Smrg 11437ec681f3Smrg case nir_op_flrp: { 11447ec681f3Smrg const struct ssa_result_range first = 11457ec681f3Smrg analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)); 11467ec681f3Smrg const struct ssa_result_range second = 11477ec681f3Smrg analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1)); 11487ec681f3Smrg const struct ssa_result_range third = 11497ec681f3Smrg analyze_expression(alu, 2, ht, nir_alu_src_type(alu, 2)); 11507ec681f3Smrg 11517ec681f3Smrg r.is_integral = first.is_integral && second.is_integral && 11527ec681f3Smrg third.is_integral; 11537ec681f3Smrg 11547ec681f3Smrg /* Various cases can result in NaN, so assume the worst. */ 11557ec681f3Smrg r.is_a_number = false; 11567ec681f3Smrg 11577ec681f3Smrg /* Decompose the flrp to first + third * (second + -first) */ 11587ec681f3Smrg const enum ssa_ranges inner_fadd_range = 11597ec681f3Smrg fadd_table[second.range][fneg_table[first.range]]; 11607ec681f3Smrg 11617ec681f3Smrg const enum ssa_ranges fmul_range = 11627ec681f3Smrg fmul_table[third.range][inner_fadd_range]; 11637ec681f3Smrg 11647ec681f3Smrg r.range = fadd_table[first.range][fmul_range]; 11657ec681f3Smrg break; 11667ec681f3Smrg } 11677ec681f3Smrg 11687ec681f3Smrg default: 11697ec681f3Smrg r = (struct ssa_result_range){unknown, false, false, false}; 11707ec681f3Smrg break; 11717ec681f3Smrg } 11727ec681f3Smrg 11737ec681f3Smrg if (r.range == eq_zero) 11747ec681f3Smrg r.is_integral = true; 11757ec681f3Smrg 11767ec681f3Smrg /* Just like isfinite(), the is_finite flag implies the value is a number. */ 11777ec681f3Smrg assert((int) r.is_finite <= (int) r.is_a_number); 11787ec681f3Smrg 11797ec681f3Smrg _mesa_hash_table_insert(ht, pack_key(alu, use_type), pack_data(r)); 11807ec681f3Smrg return r; 11817ec681f3Smrg} 11827ec681f3Smrg 11837ec681f3Smrg#undef _______ 11847ec681f3Smrg 11857ec681f3Smrgstruct ssa_result_range 11867ec681f3Smrgnir_analyze_range(struct hash_table *range_ht, 11877ec681f3Smrg const nir_alu_instr *instr, unsigned src) 11887ec681f3Smrg{ 11897ec681f3Smrg return analyze_expression(instr, src, range_ht, 11907ec681f3Smrg nir_alu_src_type(instr, src)); 11917ec681f3Smrg} 11927ec681f3Smrg 11937ec681f3Smrgstatic uint32_t bitmask(uint32_t size) { 11947ec681f3Smrg return size >= 32 ? 0xffffffffu : ((uint32_t)1 << size) - 1u; 11957ec681f3Smrg} 11967ec681f3Smrg 11977ec681f3Smrgstatic uint64_t mul_clamp(uint32_t a, uint32_t b) 11987ec681f3Smrg{ 11997ec681f3Smrg if (a != 0 && (a * b) / a != b) 12007ec681f3Smrg return (uint64_t)UINT32_MAX + 1; 12017ec681f3Smrg else 12027ec681f3Smrg return a * b; 12037ec681f3Smrg} 12047ec681f3Smrg 12057ec681f3Smrg/* recursively gather at most "buf_size" phi/bcsel sources */ 12067ec681f3Smrgstatic unsigned 12077ec681f3Smrgsearch_phi_bcsel(nir_ssa_scalar scalar, nir_ssa_scalar *buf, unsigned buf_size, struct set *visited) 12087ec681f3Smrg{ 12097ec681f3Smrg if (_mesa_set_search(visited, scalar.def)) 12107ec681f3Smrg return 0; 12117ec681f3Smrg _mesa_set_add(visited, scalar.def); 12127ec681f3Smrg 12137ec681f3Smrg if (scalar.def->parent_instr->type == nir_instr_type_phi) { 12147ec681f3Smrg nir_phi_instr *phi = nir_instr_as_phi(scalar.def->parent_instr); 12157ec681f3Smrg unsigned num_sources_left = exec_list_length(&phi->srcs); 12167ec681f3Smrg if (buf_size >= num_sources_left) { 12177ec681f3Smrg unsigned total_added = 0; 12187ec681f3Smrg nir_foreach_phi_src(src, phi) { 12197ec681f3Smrg num_sources_left--; 12207ec681f3Smrg unsigned added = search_phi_bcsel( 12217ec681f3Smrg (nir_ssa_scalar){src->src.ssa, 0}, buf + total_added, buf_size - num_sources_left, visited); 12227ec681f3Smrg assert(added <= buf_size); 12237ec681f3Smrg buf_size -= added; 12247ec681f3Smrg total_added += added; 12257ec681f3Smrg } 12267ec681f3Smrg return total_added; 12277ec681f3Smrg } 12287ec681f3Smrg } 12297ec681f3Smrg 12307ec681f3Smrg if (nir_ssa_scalar_is_alu(scalar)) { 12317ec681f3Smrg nir_op op = nir_ssa_scalar_alu_op(scalar); 12327ec681f3Smrg 12337ec681f3Smrg if ((op == nir_op_bcsel || op == nir_op_b32csel) && buf_size >= 2) { 12347ec681f3Smrg nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0); 12357ec681f3Smrg nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1); 12367ec681f3Smrg 12377ec681f3Smrg unsigned added = search_phi_bcsel(src0, buf, buf_size - 1, visited); 12387ec681f3Smrg buf_size -= added; 12397ec681f3Smrg added += search_phi_bcsel(src1, buf + added, buf_size, visited); 12407ec681f3Smrg return added; 12417ec681f3Smrg } 12427ec681f3Smrg } 12437ec681f3Smrg 12447ec681f3Smrg buf[0] = scalar; 12457ec681f3Smrg return 1; 12467ec681f3Smrg} 12477ec681f3Smrg 12487ec681f3Smrgstatic nir_variable * 12497ec681f3Smrglookup_input(nir_shader *shader, unsigned driver_location) 12507ec681f3Smrg{ 12517ec681f3Smrg return nir_find_variable_with_driver_location(shader, nir_var_shader_in, 12527ec681f3Smrg driver_location); 12537ec681f3Smrg} 12547ec681f3Smrg 12557ec681f3Smrg/* The config here should be generic enough to be correct on any HW. */ 12567ec681f3Smrgstatic const nir_unsigned_upper_bound_config default_ub_config = { 12577ec681f3Smrg .min_subgroup_size = 1u, 12587ec681f3Smrg .max_subgroup_size = UINT16_MAX, 12597ec681f3Smrg .max_workgroup_invocations = UINT16_MAX, 12607ec681f3Smrg .max_workgroup_count = {UINT16_MAX, UINT16_MAX, UINT16_MAX}, 12617ec681f3Smrg .max_workgroup_size = {UINT16_MAX, UINT16_MAX, UINT16_MAX}, 12627ec681f3Smrg .vertex_attrib_max = { 12637ec681f3Smrg UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, 12647ec681f3Smrg UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, 12657ec681f3Smrg UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, 12667ec681f3Smrg UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, 12677ec681f3Smrg }, 12687ec681f3Smrg}; 12697ec681f3Smrg 12707ec681f3Smrguint32_t 12717ec681f3Smrgnir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht, 12727ec681f3Smrg nir_ssa_scalar scalar, 12737ec681f3Smrg const nir_unsigned_upper_bound_config *config) 12747ec681f3Smrg{ 12757ec681f3Smrg assert(scalar.def->bit_size <= 32); 12767ec681f3Smrg 12777ec681f3Smrg if (!config) 12787ec681f3Smrg config = &default_ub_config; 12797ec681f3Smrg if (nir_ssa_scalar_is_const(scalar)) 12807ec681f3Smrg return nir_ssa_scalar_as_uint(scalar); 12817ec681f3Smrg 12827ec681f3Smrg /* keys can't be 0, so we have to add 1 to the index */ 12837ec681f3Smrg void *key = (void*)(((uintptr_t)(scalar.def->index + 1) << 4) | scalar.comp); 12847ec681f3Smrg struct hash_entry *he = _mesa_hash_table_search(range_ht, key); 12857ec681f3Smrg if (he != NULL) 12867ec681f3Smrg return (uintptr_t)he->data; 12877ec681f3Smrg 12887ec681f3Smrg uint32_t max = bitmask(scalar.def->bit_size); 12897ec681f3Smrg 12907ec681f3Smrg if (scalar.def->parent_instr->type == nir_instr_type_intrinsic) { 12917ec681f3Smrg uint32_t res = max; 12927ec681f3Smrg nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr); 12937ec681f3Smrg switch (intrin->intrinsic) { 12947ec681f3Smrg case nir_intrinsic_load_local_invocation_index: 12957ec681f3Smrg /* The local invocation index is used under the hood by RADV for 12967ec681f3Smrg * some non-compute-like shaders (eg. LS and NGG). These technically 12977ec681f3Smrg * run in workgroups on the HW, even though this fact is not exposed 12987ec681f3Smrg * by the API. 12997ec681f3Smrg * They can safely use the same code path here as variable sized 13007ec681f3Smrg * compute-like shader stages. 13017ec681f3Smrg */ 13027ec681f3Smrg if (!gl_shader_stage_uses_workgroup(shader->info.stage) || 13037ec681f3Smrg shader->info.workgroup_size_variable) { 13047ec681f3Smrg res = config->max_workgroup_invocations - 1; 13057ec681f3Smrg } else { 13067ec681f3Smrg res = (shader->info.workgroup_size[0] * 13077ec681f3Smrg shader->info.workgroup_size[1] * 13087ec681f3Smrg shader->info.workgroup_size[2]) - 1u; 13097ec681f3Smrg } 13107ec681f3Smrg break; 13117ec681f3Smrg case nir_intrinsic_load_local_invocation_id: 13127ec681f3Smrg if (shader->info.workgroup_size_variable) 13137ec681f3Smrg res = config->max_workgroup_size[scalar.comp] - 1u; 13147ec681f3Smrg else 13157ec681f3Smrg res = shader->info.workgroup_size[scalar.comp] - 1u; 13167ec681f3Smrg break; 13177ec681f3Smrg case nir_intrinsic_load_workgroup_id: 13187ec681f3Smrg res = config->max_workgroup_count[scalar.comp] - 1u; 13197ec681f3Smrg break; 13207ec681f3Smrg case nir_intrinsic_load_num_workgroups: 13217ec681f3Smrg res = config->max_workgroup_count[scalar.comp]; 13227ec681f3Smrg break; 13237ec681f3Smrg case nir_intrinsic_load_global_invocation_id: 13247ec681f3Smrg if (shader->info.workgroup_size_variable) { 13257ec681f3Smrg res = mul_clamp(config->max_workgroup_size[scalar.comp], 13267ec681f3Smrg config->max_workgroup_count[scalar.comp]) - 1u; 13277ec681f3Smrg } else { 13287ec681f3Smrg res = (shader->info.workgroup_size[scalar.comp] * 13297ec681f3Smrg config->max_workgroup_count[scalar.comp]) - 1u; 13307ec681f3Smrg } 13317ec681f3Smrg break; 13327ec681f3Smrg case nir_intrinsic_load_invocation_id: 13337ec681f3Smrg if (shader->info.stage == MESA_SHADER_TESS_CTRL) 13347ec681f3Smrg res = shader->info.tess.tcs_vertices_out 13357ec681f3Smrg ? (shader->info.tess.tcs_vertices_out - 1) 13367ec681f3Smrg : 511; /* Generous maximum output patch size of 512 */ 13377ec681f3Smrg break; 13387ec681f3Smrg case nir_intrinsic_load_subgroup_invocation: 13397ec681f3Smrg case nir_intrinsic_first_invocation: 13407ec681f3Smrg res = config->max_subgroup_size - 1; 13417ec681f3Smrg break; 13427ec681f3Smrg case nir_intrinsic_mbcnt_amd: { 13437ec681f3Smrg uint32_t src0 = config->max_subgroup_size - 1; 13447ec681f3Smrg uint32_t src1 = nir_unsigned_upper_bound(shader, range_ht, (nir_ssa_scalar){intrin->src[1].ssa, 0}, config); 13457ec681f3Smrg 13467ec681f3Smrg if (src0 + src1 < src0) 13477ec681f3Smrg res = max; /* overflow */ 13487ec681f3Smrg else 13497ec681f3Smrg res = src0 + src1; 13507ec681f3Smrg break; 13517ec681f3Smrg } 13527ec681f3Smrg case nir_intrinsic_load_subgroup_size: 13537ec681f3Smrg res = config->max_subgroup_size; 13547ec681f3Smrg break; 13557ec681f3Smrg case nir_intrinsic_load_subgroup_id: 13567ec681f3Smrg case nir_intrinsic_load_num_subgroups: { 13577ec681f3Smrg uint32_t workgroup_size = config->max_workgroup_invocations; 13587ec681f3Smrg if (gl_shader_stage_uses_workgroup(shader->info.stage) && 13597ec681f3Smrg !shader->info.workgroup_size_variable) { 13607ec681f3Smrg workgroup_size = shader->info.workgroup_size[0] * 13617ec681f3Smrg shader->info.workgroup_size[1] * 13627ec681f3Smrg shader->info.workgroup_size[2]; 13637ec681f3Smrg } 13647ec681f3Smrg res = DIV_ROUND_UP(workgroup_size, config->min_subgroup_size); 13657ec681f3Smrg if (intrin->intrinsic == nir_intrinsic_load_subgroup_id) 13667ec681f3Smrg res--; 13677ec681f3Smrg break; 13687ec681f3Smrg } 13697ec681f3Smrg case nir_intrinsic_load_input: { 13707ec681f3Smrg if (shader->info.stage == MESA_SHADER_VERTEX && nir_src_is_const(intrin->src[0])) { 13717ec681f3Smrg nir_variable *var = lookup_input(shader, nir_intrinsic_base(intrin)); 13727ec681f3Smrg if (var) { 13737ec681f3Smrg int loc = var->data.location - VERT_ATTRIB_GENERIC0; 13747ec681f3Smrg if (loc >= 0) 13757ec681f3Smrg res = config->vertex_attrib_max[loc]; 13767ec681f3Smrg } 13777ec681f3Smrg } 13787ec681f3Smrg break; 13797ec681f3Smrg } 13807ec681f3Smrg case nir_intrinsic_reduce: 13817ec681f3Smrg case nir_intrinsic_inclusive_scan: 13827ec681f3Smrg case nir_intrinsic_exclusive_scan: { 13837ec681f3Smrg nir_op op = nir_intrinsic_reduction_op(intrin); 13847ec681f3Smrg if (op == nir_op_umin || op == nir_op_umax || op == nir_op_imin || op == nir_op_imax) 13857ec681f3Smrg res = nir_unsigned_upper_bound(shader, range_ht, (nir_ssa_scalar){intrin->src[0].ssa, 0}, config); 13867ec681f3Smrg break; 13877ec681f3Smrg } 13887ec681f3Smrg case nir_intrinsic_read_first_invocation: 13897ec681f3Smrg case nir_intrinsic_read_invocation: 13907ec681f3Smrg case nir_intrinsic_shuffle: 13917ec681f3Smrg case nir_intrinsic_shuffle_xor: 13927ec681f3Smrg case nir_intrinsic_shuffle_up: 13937ec681f3Smrg case nir_intrinsic_shuffle_down: 13947ec681f3Smrg case nir_intrinsic_quad_broadcast: 13957ec681f3Smrg case nir_intrinsic_quad_swap_horizontal: 13967ec681f3Smrg case nir_intrinsic_quad_swap_vertical: 13977ec681f3Smrg case nir_intrinsic_quad_swap_diagonal: 13987ec681f3Smrg case nir_intrinsic_quad_swizzle_amd: 13997ec681f3Smrg case nir_intrinsic_masked_swizzle_amd: 14007ec681f3Smrg res = nir_unsigned_upper_bound(shader, range_ht, (nir_ssa_scalar){intrin->src[0].ssa, 0}, config); 14017ec681f3Smrg break; 14027ec681f3Smrg case nir_intrinsic_write_invocation_amd: { 14037ec681f3Smrg uint32_t src0 = nir_unsigned_upper_bound(shader, range_ht, (nir_ssa_scalar){intrin->src[0].ssa, 0}, config); 14047ec681f3Smrg uint32_t src1 = nir_unsigned_upper_bound(shader, range_ht, (nir_ssa_scalar){intrin->src[1].ssa, 0}, config); 14057ec681f3Smrg res = MAX2(src0, src1); 14067ec681f3Smrg break; 14077ec681f3Smrg } 14087ec681f3Smrg case nir_intrinsic_load_tess_rel_patch_id_amd: 14097ec681f3Smrg case nir_intrinsic_load_tcs_num_patches_amd: 14107ec681f3Smrg /* Very generous maximum: TCS/TES executed by largest possible workgroup */ 14117ec681f3Smrg res = config->max_workgroup_invocations / MAX2(shader->info.tess.tcs_vertices_out, 1u); 14127ec681f3Smrg break; 14137ec681f3Smrg default: 14147ec681f3Smrg break; 14157ec681f3Smrg } 14167ec681f3Smrg if (res != max) 14177ec681f3Smrg _mesa_hash_table_insert(range_ht, key, (void*)(uintptr_t)res); 14187ec681f3Smrg return res; 14197ec681f3Smrg } 14207ec681f3Smrg 14217ec681f3Smrg if (scalar.def->parent_instr->type == nir_instr_type_phi) { 14227ec681f3Smrg nir_cf_node *prev = nir_cf_node_prev(&scalar.def->parent_instr->block->cf_node); 14237ec681f3Smrg 14247ec681f3Smrg uint32_t res = 0; 14257ec681f3Smrg if (!prev || prev->type == nir_cf_node_block) { 14267ec681f3Smrg _mesa_hash_table_insert(range_ht, key, (void*)(uintptr_t)max); 14277ec681f3Smrg 14287ec681f3Smrg struct set *visited = _mesa_pointer_set_create(NULL); 14297ec681f3Smrg nir_ssa_scalar defs[64]; 14307ec681f3Smrg unsigned def_count = search_phi_bcsel(scalar, defs, 64, visited); 14317ec681f3Smrg _mesa_set_destroy(visited, NULL); 14327ec681f3Smrg 14337ec681f3Smrg for (unsigned i = 0; i < def_count; i++) 14347ec681f3Smrg res = MAX2(res, nir_unsigned_upper_bound(shader, range_ht, defs[i], config)); 14357ec681f3Smrg } else { 14367ec681f3Smrg nir_foreach_phi_src(src, nir_instr_as_phi(scalar.def->parent_instr)) { 14377ec681f3Smrg res = MAX2(res, nir_unsigned_upper_bound( 14387ec681f3Smrg shader, range_ht, (nir_ssa_scalar){src->src.ssa, 0}, config)); 14397ec681f3Smrg } 14407ec681f3Smrg } 14417ec681f3Smrg 14427ec681f3Smrg _mesa_hash_table_insert(range_ht, key, (void*)(uintptr_t)res); 14437ec681f3Smrg return res; 14447ec681f3Smrg } 14457ec681f3Smrg 14467ec681f3Smrg if (nir_ssa_scalar_is_alu(scalar)) { 14477ec681f3Smrg nir_op op = nir_ssa_scalar_alu_op(scalar); 14487ec681f3Smrg 14497ec681f3Smrg switch (op) { 14507ec681f3Smrg case nir_op_umin: 14517ec681f3Smrg case nir_op_imin: 14527ec681f3Smrg case nir_op_imax: 14537ec681f3Smrg case nir_op_umax: 14547ec681f3Smrg case nir_op_iand: 14557ec681f3Smrg case nir_op_ior: 14567ec681f3Smrg case nir_op_ixor: 14577ec681f3Smrg case nir_op_ishl: 14587ec681f3Smrg case nir_op_imul: 14597ec681f3Smrg case nir_op_ushr: 14607ec681f3Smrg case nir_op_ishr: 14617ec681f3Smrg case nir_op_iadd: 14627ec681f3Smrg case nir_op_umod: 14637ec681f3Smrg case nir_op_udiv: 14647ec681f3Smrg case nir_op_bcsel: 14657ec681f3Smrg case nir_op_b32csel: 14667ec681f3Smrg case nir_op_ubfe: 14677ec681f3Smrg case nir_op_bfm: 14687ec681f3Smrg case nir_op_fmul: 14697ec681f3Smrg case nir_op_extract_u8: 14707ec681f3Smrg case nir_op_extract_i8: 14717ec681f3Smrg case nir_op_extract_u16: 14727ec681f3Smrg case nir_op_extract_i16: 14737ec681f3Smrg break; 14747ec681f3Smrg case nir_op_u2u1: 14757ec681f3Smrg case nir_op_u2u8: 14767ec681f3Smrg case nir_op_u2u16: 14777ec681f3Smrg case nir_op_u2u32: 14787ec681f3Smrg case nir_op_f2u32: 14797ec681f3Smrg if (nir_ssa_scalar_chase_alu_src(scalar, 0).def->bit_size > 32) { 14807ec681f3Smrg /* If src is >32 bits, return max */ 14817ec681f3Smrg return max; 14827ec681f3Smrg } 14837ec681f3Smrg break; 14847ec681f3Smrg default: 14857ec681f3Smrg return max; 14867ec681f3Smrg } 14877ec681f3Smrg 14887ec681f3Smrg uint32_t src0 = nir_unsigned_upper_bound(shader, range_ht, nir_ssa_scalar_chase_alu_src(scalar, 0), config); 14897ec681f3Smrg uint32_t src1 = max, src2 = max; 14907ec681f3Smrg if (nir_op_infos[op].num_inputs > 1) 14917ec681f3Smrg src1 = nir_unsigned_upper_bound(shader, range_ht, nir_ssa_scalar_chase_alu_src(scalar, 1), config); 14927ec681f3Smrg if (nir_op_infos[op].num_inputs > 2) 14937ec681f3Smrg src2 = nir_unsigned_upper_bound(shader, range_ht, nir_ssa_scalar_chase_alu_src(scalar, 2), config); 14947ec681f3Smrg 14957ec681f3Smrg uint32_t res = max; 14967ec681f3Smrg switch (op) { 14977ec681f3Smrg case nir_op_umin: 14987ec681f3Smrg res = src0 < src1 ? src0 : src1; 14997ec681f3Smrg break; 15007ec681f3Smrg case nir_op_imin: 15017ec681f3Smrg case nir_op_imax: 15027ec681f3Smrg case nir_op_umax: 15037ec681f3Smrg res = src0 > src1 ? src0 : src1; 15047ec681f3Smrg break; 15057ec681f3Smrg case nir_op_iand: 15067ec681f3Smrg res = bitmask(util_last_bit64(src0)) & bitmask(util_last_bit64(src1)); 15077ec681f3Smrg break; 15087ec681f3Smrg case nir_op_ior: 15097ec681f3Smrg case nir_op_ixor: 15107ec681f3Smrg res = bitmask(util_last_bit64(src0)) | bitmask(util_last_bit64(src1)); 15117ec681f3Smrg break; 15127ec681f3Smrg case nir_op_ishl: 15137ec681f3Smrg if (util_last_bit64(src0) + src1 > scalar.def->bit_size) 15147ec681f3Smrg res = max; /* overflow */ 15157ec681f3Smrg else 15167ec681f3Smrg res = src0 << MIN2(src1, scalar.def->bit_size - 1u); 15177ec681f3Smrg break; 15187ec681f3Smrg case nir_op_imul: 15197ec681f3Smrg if (src0 != 0 && (src0 * src1) / src0 != src1) 15207ec681f3Smrg res = max; 15217ec681f3Smrg else 15227ec681f3Smrg res = src0 * src1; 15237ec681f3Smrg break; 15247ec681f3Smrg case nir_op_ushr: { 15257ec681f3Smrg nir_ssa_scalar src1_scalar = nir_ssa_scalar_chase_alu_src(scalar, 1); 15267ec681f3Smrg if (nir_ssa_scalar_is_const(src1_scalar)) 15277ec681f3Smrg res = src0 >> nir_ssa_scalar_as_uint(src1_scalar); 15287ec681f3Smrg else 15297ec681f3Smrg res = src0; 15307ec681f3Smrg break; 15317ec681f3Smrg } 15327ec681f3Smrg case nir_op_ishr: { 15337ec681f3Smrg nir_ssa_scalar src1_scalar = nir_ssa_scalar_chase_alu_src(scalar, 1); 15347ec681f3Smrg if (src0 <= 2147483647 && nir_ssa_scalar_is_const(src1_scalar)) 15357ec681f3Smrg res = src0 >> nir_ssa_scalar_as_uint(src1_scalar); 15367ec681f3Smrg else 15377ec681f3Smrg res = src0; 15387ec681f3Smrg break; 15397ec681f3Smrg } 15407ec681f3Smrg case nir_op_iadd: 15417ec681f3Smrg if (src0 + src1 < src0) 15427ec681f3Smrg res = max; /* overflow */ 15437ec681f3Smrg else 15447ec681f3Smrg res = src0 + src1; 15457ec681f3Smrg break; 15467ec681f3Smrg case nir_op_umod: 15477ec681f3Smrg res = src1 ? src1 - 1 : 0; 15487ec681f3Smrg break; 15497ec681f3Smrg case nir_op_udiv: { 15507ec681f3Smrg nir_ssa_scalar src1_scalar = nir_ssa_scalar_chase_alu_src(scalar, 1); 15517ec681f3Smrg if (nir_ssa_scalar_is_const(src1_scalar)) 15527ec681f3Smrg res = nir_ssa_scalar_as_uint(src1_scalar) ? src0 / nir_ssa_scalar_as_uint(src1_scalar) : 0; 15537ec681f3Smrg else 15547ec681f3Smrg res = src0; 15557ec681f3Smrg break; 15567ec681f3Smrg } 15577ec681f3Smrg case nir_op_bcsel: 15587ec681f3Smrg case nir_op_b32csel: 15597ec681f3Smrg res = src1 > src2 ? src1 : src2; 15607ec681f3Smrg break; 15617ec681f3Smrg case nir_op_ubfe: 15627ec681f3Smrg res = bitmask(MIN2(src2, scalar.def->bit_size)); 15637ec681f3Smrg break; 15647ec681f3Smrg case nir_op_bfm: { 15657ec681f3Smrg nir_ssa_scalar src1_scalar = nir_ssa_scalar_chase_alu_src(scalar, 1); 15667ec681f3Smrg if (nir_ssa_scalar_is_const(src1_scalar)) { 15677ec681f3Smrg src0 = MIN2(src0, 31); 15687ec681f3Smrg src1 = nir_ssa_scalar_as_uint(src1_scalar) & 0x1fu; 15697ec681f3Smrg res = bitmask(src0) << src1; 15707ec681f3Smrg } else { 15717ec681f3Smrg src0 = MIN2(src0, 31); 15727ec681f3Smrg src1 = MIN2(src1, 31); 15737ec681f3Smrg res = bitmask(MIN2(src0 + src1, 32)); 15747ec681f3Smrg } 15757ec681f3Smrg break; 15767ec681f3Smrg } 15777ec681f3Smrg /* limited floating-point support for f2u32(fmul(load_input(), <constant>)) */ 15787ec681f3Smrg case nir_op_f2u32: 15797ec681f3Smrg /* infinity/NaN starts at 0x7f800000u, negative numbers at 0x80000000 */ 15807ec681f3Smrg if (src0 < 0x7f800000u) { 15817ec681f3Smrg float val; 15827ec681f3Smrg memcpy(&val, &src0, 4); 15837ec681f3Smrg res = (uint32_t)val; 15847ec681f3Smrg } 15857ec681f3Smrg break; 15867ec681f3Smrg case nir_op_fmul: 15877ec681f3Smrg /* infinity/NaN starts at 0x7f800000u, negative numbers at 0x80000000 */ 15887ec681f3Smrg if (src0 < 0x7f800000u && src1 < 0x7f800000u) { 15897ec681f3Smrg float src0_f, src1_f; 15907ec681f3Smrg memcpy(&src0_f, &src0, 4); 15917ec681f3Smrg memcpy(&src1_f, &src1, 4); 15927ec681f3Smrg /* not a proper rounding-up multiplication, but should be good enough */ 15937ec681f3Smrg float max_f = ceilf(src0_f) * ceilf(src1_f); 15947ec681f3Smrg memcpy(&res, &max_f, 4); 15957ec681f3Smrg } 15967ec681f3Smrg break; 15977ec681f3Smrg case nir_op_u2u1: 15987ec681f3Smrg case nir_op_u2u8: 15997ec681f3Smrg case nir_op_u2u16: 16007ec681f3Smrg case nir_op_u2u32: 16017ec681f3Smrg res = MIN2(src0, max); 16027ec681f3Smrg break; 16037ec681f3Smrg case nir_op_sad_u8x4: 16047ec681f3Smrg res = src2 + 4 * 255; 16057ec681f3Smrg break; 16067ec681f3Smrg case nir_op_extract_u8: 16077ec681f3Smrg res = MIN2(src0, UINT8_MAX); 16087ec681f3Smrg break; 16097ec681f3Smrg case nir_op_extract_i8: 16107ec681f3Smrg res = (src0 >= 0x80) ? max : MIN2(src0, INT8_MAX); 16117ec681f3Smrg break; 16127ec681f3Smrg case nir_op_extract_u16: 16137ec681f3Smrg res = MIN2(src0, UINT16_MAX); 16147ec681f3Smrg break; 16157ec681f3Smrg case nir_op_extract_i16: 16167ec681f3Smrg res = (src0 >= 0x8000) ? max : MIN2(src0, INT16_MAX); 16177ec681f3Smrg break; 16187ec681f3Smrg default: 16197ec681f3Smrg res = max; 16207ec681f3Smrg break; 16217ec681f3Smrg } 16227ec681f3Smrg _mesa_hash_table_insert(range_ht, key, (void*)(uintptr_t)res); 16237ec681f3Smrg return res; 16247ec681f3Smrg } 16257ec681f3Smrg 16267ec681f3Smrg return max; 16277ec681f3Smrg} 16287ec681f3Smrg 16297ec681f3Smrgbool 16307ec681f3Smrgnir_addition_might_overflow(nir_shader *shader, struct hash_table *range_ht, 16317ec681f3Smrg nir_ssa_scalar ssa, unsigned const_val, 16327ec681f3Smrg const nir_unsigned_upper_bound_config *config) 16337ec681f3Smrg{ 16347ec681f3Smrg if (nir_ssa_scalar_is_alu(ssa)) { 16357ec681f3Smrg nir_op alu_op = nir_ssa_scalar_alu_op(ssa); 16367ec681f3Smrg 16377ec681f3Smrg /* iadd(imul(a, #b), #c) */ 16387ec681f3Smrg if (alu_op == nir_op_imul || alu_op == nir_op_ishl) { 16397ec681f3Smrg nir_ssa_scalar mul_src0 = nir_ssa_scalar_chase_alu_src(ssa, 0); 16407ec681f3Smrg nir_ssa_scalar mul_src1 = nir_ssa_scalar_chase_alu_src(ssa, 1); 16417ec681f3Smrg uint32_t stride = 1; 16427ec681f3Smrg if (nir_ssa_scalar_is_const(mul_src0)) 16437ec681f3Smrg stride = nir_ssa_scalar_as_uint(mul_src0); 16447ec681f3Smrg else if (nir_ssa_scalar_is_const(mul_src1)) 16457ec681f3Smrg stride = nir_ssa_scalar_as_uint(mul_src1); 16467ec681f3Smrg 16477ec681f3Smrg if (alu_op == nir_op_ishl) 16487ec681f3Smrg stride = 1u << (stride % 32u); 16497ec681f3Smrg 16507ec681f3Smrg if (!stride || const_val <= UINT32_MAX - (UINT32_MAX / stride * stride)) 16517ec681f3Smrg return false; 16527ec681f3Smrg } 16537ec681f3Smrg 16547ec681f3Smrg /* iadd(iand(a, #b), #c) */ 16557ec681f3Smrg if (alu_op == nir_op_iand) { 16567ec681f3Smrg nir_ssa_scalar and_src0 = nir_ssa_scalar_chase_alu_src(ssa, 0); 16577ec681f3Smrg nir_ssa_scalar and_src1 = nir_ssa_scalar_chase_alu_src(ssa, 1); 16587ec681f3Smrg uint32_t mask = 0xffffffff; 16597ec681f3Smrg if (nir_ssa_scalar_is_const(and_src0)) 16607ec681f3Smrg mask = nir_ssa_scalar_as_uint(and_src0); 16617ec681f3Smrg else if (nir_ssa_scalar_is_const(and_src1)) 16627ec681f3Smrg mask = nir_ssa_scalar_as_uint(and_src1); 16637ec681f3Smrg if (mask == 0 || const_val < (1u << (ffs(mask) - 1))) 16647ec681f3Smrg return false; 16657ec681f3Smrg } 16667ec681f3Smrg } 16677ec681f3Smrg 16687ec681f3Smrg uint32_t ub = nir_unsigned_upper_bound(shader, range_ht, ssa, config); 16697ec681f3Smrg return const_val + ub < const_val; 16707ec681f3Smrg} 16717ec681f3Smrg 16727ec681f3Smrgstatic uint64_t 16737ec681f3Smrgssa_def_bits_used(nir_ssa_def *def, int recur) 16747ec681f3Smrg{ 16757ec681f3Smrg uint64_t bits_used = 0; 16767ec681f3Smrg uint64_t all_bits = BITFIELD64_MASK(def->bit_size); 16777ec681f3Smrg 16787ec681f3Smrg /* Querying the bits used from a vector is too hard of a question to 16797ec681f3Smrg * answer. Return the conservative answer that all bits are used. To 16807ec681f3Smrg * handle this, the function would need to be extended to be a query of a 16817ec681f3Smrg * single component of the vector. That would also necessary to fully 16827ec681f3Smrg * handle the 'num_components > 1' inside the loop below. 16837ec681f3Smrg * 16847ec681f3Smrg * FINISHME: This restriction will eventually need to be restricted to be 16857ec681f3Smrg * useful for hardware that uses u16vec2 as the native 16-bit integer type. 16867ec681f3Smrg */ 16877ec681f3Smrg if (def->num_components > 1) 16887ec681f3Smrg return all_bits; 16897ec681f3Smrg 16907ec681f3Smrg /* Limit recursion */ 16917ec681f3Smrg if (recur-- <= 0) 16927ec681f3Smrg return all_bits; 16937ec681f3Smrg 16947ec681f3Smrg nir_foreach_use(src, def) { 16957ec681f3Smrg switch (src->parent_instr->type) { 16967ec681f3Smrg case nir_instr_type_alu: { 16977ec681f3Smrg nir_alu_instr *use_alu = nir_instr_as_alu(src->parent_instr); 16987ec681f3Smrg unsigned src_idx = container_of(src, nir_alu_src, src) - use_alu->src; 16997ec681f3Smrg 17007ec681f3Smrg /* If a user of the value produces a vector result, return the 17017ec681f3Smrg * conservative answer that all bits are used. It is possible to 17027ec681f3Smrg * answer this query by looping over the components used. For example, 17037ec681f3Smrg * 17047ec681f3Smrg * vec4 32 ssa_5 = load_const(0x0000f000, 0x00000f00, 0x000000f0, 0x0000000f) 17057ec681f3Smrg * ... 17067ec681f3Smrg * vec4 32 ssa_8 = iand ssa_7.xxxx, ssa_5 17077ec681f3Smrg * 17087ec681f3Smrg * could conceivably return 0x0000ffff when queyring the bits used of 17097ec681f3Smrg * ssa_7. This is unlikely to be worth the effort because the 17107ec681f3Smrg * question can eventually answered after the shader has been 17117ec681f3Smrg * scalarized. 17127ec681f3Smrg */ 17137ec681f3Smrg if (use_alu->dest.dest.ssa.num_components > 1) 17147ec681f3Smrg return all_bits; 17157ec681f3Smrg 17167ec681f3Smrg switch (use_alu->op) { 17177ec681f3Smrg case nir_op_u2u8: 17187ec681f3Smrg case nir_op_i2i8: 17197ec681f3Smrg bits_used |= 0xff; 17207ec681f3Smrg break; 17217ec681f3Smrg 17227ec681f3Smrg case nir_op_u2u16: 17237ec681f3Smrg case nir_op_i2i16: 17247ec681f3Smrg bits_used |= all_bits & 0xffff; 17257ec681f3Smrg break; 17267ec681f3Smrg 17277ec681f3Smrg case nir_op_u2u32: 17287ec681f3Smrg case nir_op_i2i32: 17297ec681f3Smrg bits_used |= all_bits & 0xffffffff; 17307ec681f3Smrg break; 17317ec681f3Smrg 17327ec681f3Smrg case nir_op_extract_u8: 17337ec681f3Smrg case nir_op_extract_i8: 17347ec681f3Smrg if (src_idx == 0 && nir_src_is_const(use_alu->src[1].src)) { 17357ec681f3Smrg unsigned chunk = nir_src_comp_as_uint(use_alu->src[1].src, 17367ec681f3Smrg use_alu->src[1].swizzle[0]); 17377ec681f3Smrg bits_used |= 0xffull << (chunk * 8); 17387ec681f3Smrg break; 17397ec681f3Smrg } else { 17407ec681f3Smrg return all_bits; 17417ec681f3Smrg } 17427ec681f3Smrg 17437ec681f3Smrg case nir_op_extract_u16: 17447ec681f3Smrg case nir_op_extract_i16: 17457ec681f3Smrg if (src_idx == 0 && nir_src_is_const(use_alu->src[1].src)) { 17467ec681f3Smrg unsigned chunk = nir_src_comp_as_uint(use_alu->src[1].src, 17477ec681f3Smrg use_alu->src[1].swizzle[0]); 17487ec681f3Smrg bits_used |= 0xffffull << (chunk * 16); 17497ec681f3Smrg break; 17507ec681f3Smrg } else { 17517ec681f3Smrg return all_bits; 17527ec681f3Smrg } 17537ec681f3Smrg 17547ec681f3Smrg case nir_op_ishl: 17557ec681f3Smrg case nir_op_ishr: 17567ec681f3Smrg case nir_op_ushr: 17577ec681f3Smrg if (src_idx == 1) { 17587ec681f3Smrg bits_used |= (nir_src_bit_size(use_alu->src[0].src) - 1); 17597ec681f3Smrg break; 17607ec681f3Smrg } else { 17617ec681f3Smrg return all_bits; 17627ec681f3Smrg } 17637ec681f3Smrg 17647ec681f3Smrg case nir_op_iand: 17657ec681f3Smrg assert(src_idx < 2); 17667ec681f3Smrg if (nir_src_is_const(use_alu->src[1 - src_idx].src)) { 17677ec681f3Smrg uint64_t u64 = nir_src_comp_as_uint(use_alu->src[1 - src_idx].src, 17687ec681f3Smrg use_alu->src[1 - src_idx].swizzle[0]); 17697ec681f3Smrg bits_used |= u64; 17707ec681f3Smrg break; 17717ec681f3Smrg } else { 17727ec681f3Smrg return all_bits; 17737ec681f3Smrg } 17747ec681f3Smrg 17757ec681f3Smrg case nir_op_ior: 17767ec681f3Smrg assert(src_idx < 2); 17777ec681f3Smrg if (nir_src_is_const(use_alu->src[1 - src_idx].src)) { 17787ec681f3Smrg uint64_t u64 = nir_src_comp_as_uint(use_alu->src[1 - src_idx].src, 17797ec681f3Smrg use_alu->src[1 - src_idx].swizzle[0]); 17807ec681f3Smrg bits_used |= all_bits & ~u64; 17817ec681f3Smrg break; 17827ec681f3Smrg } else { 17837ec681f3Smrg return all_bits; 17847ec681f3Smrg } 17857ec681f3Smrg 17867ec681f3Smrg default: 17877ec681f3Smrg /* We don't know what this op does */ 17887ec681f3Smrg return all_bits; 17897ec681f3Smrg } 17907ec681f3Smrg break; 17917ec681f3Smrg } 17927ec681f3Smrg 17937ec681f3Smrg case nir_instr_type_intrinsic: { 17947ec681f3Smrg nir_intrinsic_instr *use_intrin = 17957ec681f3Smrg nir_instr_as_intrinsic(src->parent_instr); 17967ec681f3Smrg unsigned src_idx = src - use_intrin->src; 17977ec681f3Smrg 17987ec681f3Smrg switch (use_intrin->intrinsic) { 17997ec681f3Smrg case nir_intrinsic_read_invocation: 18007ec681f3Smrg case nir_intrinsic_shuffle: 18017ec681f3Smrg case nir_intrinsic_shuffle_up: 18027ec681f3Smrg case nir_intrinsic_shuffle_down: 18037ec681f3Smrg case nir_intrinsic_shuffle_xor: 18047ec681f3Smrg case nir_intrinsic_quad_broadcast: 18057ec681f3Smrg case nir_intrinsic_quad_swap_horizontal: 18067ec681f3Smrg case nir_intrinsic_quad_swap_vertical: 18077ec681f3Smrg case nir_intrinsic_quad_swap_diagonal: 18087ec681f3Smrg if (src_idx == 0) { 18097ec681f3Smrg assert(use_intrin->dest.is_ssa); 18107ec681f3Smrg bits_used |= ssa_def_bits_used(&use_intrin->dest.ssa, recur); 18117ec681f3Smrg } else { 18127ec681f3Smrg if (use_intrin->intrinsic == nir_intrinsic_quad_broadcast) { 18137ec681f3Smrg bits_used |= 3; 18147ec681f3Smrg } else { 18157ec681f3Smrg /* Subgroups larger than 128 are not a thing */ 18167ec681f3Smrg bits_used |= 127; 18177ec681f3Smrg } 18187ec681f3Smrg } 18197ec681f3Smrg break; 18207ec681f3Smrg 18217ec681f3Smrg case nir_intrinsic_reduce: 18227ec681f3Smrg case nir_intrinsic_inclusive_scan: 18237ec681f3Smrg case nir_intrinsic_exclusive_scan: 18247ec681f3Smrg assert(src_idx == 0); 18257ec681f3Smrg switch (nir_intrinsic_reduction_op(use_intrin)) { 18267ec681f3Smrg case nir_op_iadd: 18277ec681f3Smrg case nir_op_imul: 18287ec681f3Smrg case nir_op_ior: 18297ec681f3Smrg case nir_op_iand: 18307ec681f3Smrg case nir_op_ixor: 18317ec681f3Smrg bits_used |= ssa_def_bits_used(&use_intrin->dest.ssa, recur); 18327ec681f3Smrg break; 18337ec681f3Smrg 18347ec681f3Smrg default: 18357ec681f3Smrg return all_bits; 18367ec681f3Smrg } 18377ec681f3Smrg break; 18387ec681f3Smrg 18397ec681f3Smrg default: 18407ec681f3Smrg /* We don't know what this op does */ 18417ec681f3Smrg return all_bits; 18427ec681f3Smrg } 18437ec681f3Smrg break; 18447ec681f3Smrg } 18457ec681f3Smrg 18467ec681f3Smrg case nir_instr_type_phi: { 18477ec681f3Smrg nir_phi_instr *use_phi = nir_instr_as_phi(src->parent_instr); 18487ec681f3Smrg bits_used |= ssa_def_bits_used(&use_phi->dest.ssa, recur); 18497ec681f3Smrg break; 18507ec681f3Smrg } 18517ec681f3Smrg 18527ec681f3Smrg default: 18537ec681f3Smrg return all_bits; 18547ec681f3Smrg } 18557ec681f3Smrg 18567ec681f3Smrg /* If we've somehow shown that all our bits are used, we're done */ 18577ec681f3Smrg assert((bits_used & ~all_bits) == 0); 18587ec681f3Smrg if (bits_used == all_bits) 18597ec681f3Smrg return all_bits; 18607ec681f3Smrg } 18617ec681f3Smrg 18627ec681f3Smrg return bits_used; 18637ec681f3Smrg} 18647ec681f3Smrg 18657ec681f3Smrguint64_t 18667ec681f3Smrgnir_ssa_def_bits_used(nir_ssa_def *def) 18677ec681f3Smrg{ 18687ec681f3Smrg return ssa_def_bits_used(def, 2); 18697ec681f3Smrg} 1870