nir.h revision 7e102996
101e04c3fSmrg/* 201e04c3fSmrg * Copyright © 2014 Connor Abbott 301e04c3fSmrg * 401e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a 501e04c3fSmrg * copy of this software and associated documentation files (the "Software"), 601e04c3fSmrg * to deal in the Software without restriction, including without limitation 701e04c3fSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 801e04c3fSmrg * and/or sell copies of the Software, and to permit persons to whom the 901e04c3fSmrg * Software is furnished to do so, subject to the following conditions: 1001e04c3fSmrg * 1101e04c3fSmrg * The above copyright notice and this permission notice (including the next 1201e04c3fSmrg * paragraph) shall be included in all copies or substantial portions of the 1301e04c3fSmrg * Software. 1401e04c3fSmrg * 1501e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1601e04c3fSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1701e04c3fSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1801e04c3fSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 1901e04c3fSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 2001e04c3fSmrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 2101e04c3fSmrg * IN THE SOFTWARE. 2201e04c3fSmrg * 2301e04c3fSmrg * Authors: 2401e04c3fSmrg * Connor Abbott (cwabbott0@gmail.com) 2501e04c3fSmrg * 2601e04c3fSmrg */ 2701e04c3fSmrg 2801e04c3fSmrg#ifndef NIR_H 2901e04c3fSmrg#define NIR_H 3001e04c3fSmrg 3101e04c3fSmrg#include "util/hash_table.h" 3201e04c3fSmrg#include "compiler/glsl/list.h" 3301e04c3fSmrg#include "GL/gl.h" /* GLenum */ 3401e04c3fSmrg#include "util/list.h" 3501e04c3fSmrg#include "util/ralloc.h" 3601e04c3fSmrg#include "util/set.h" 377e102996Smaya#include "util/bitscan.h" 3801e04c3fSmrg#include "util/bitset.h" 3901e04c3fSmrg#include "util/macros.h" 4001e04c3fSmrg#include "compiler/nir_types.h" 4101e04c3fSmrg#include "compiler/shader_enums.h" 4201e04c3fSmrg#include "compiler/shader_info.h" 4301e04c3fSmrg#include <stdio.h> 4401e04c3fSmrg 4501e04c3fSmrg#ifndef NDEBUG 4601e04c3fSmrg#include "util/debug.h" 4701e04c3fSmrg#endif /* NDEBUG */ 4801e04c3fSmrg 4901e04c3fSmrg#include "nir_opcodes.h" 5001e04c3fSmrg 5101e04c3fSmrg#if defined(_WIN32) && !defined(snprintf) 5201e04c3fSmrg#define snprintf _snprintf 5301e04c3fSmrg#endif 5401e04c3fSmrg 5501e04c3fSmrg#ifdef __cplusplus 5601e04c3fSmrgextern "C" { 5701e04c3fSmrg#endif 5801e04c3fSmrg 5901e04c3fSmrg#define NIR_FALSE 0u 6001e04c3fSmrg#define NIR_TRUE (~0u) 6101e04c3fSmrg#define NIR_MAX_VEC_COMPONENTS 4 627e102996Smaya#define NIR_MAX_MATRIX_COLUMNS 4 6301e04c3fSmrgtypedef uint8_t nir_component_mask_t; 6401e04c3fSmrg 6501e04c3fSmrg/** Defines a cast function 6601e04c3fSmrg * 6701e04c3fSmrg * This macro defines a cast function from in_type to out_type where 6801e04c3fSmrg * out_type is some structure type that contains a field of type out_type. 6901e04c3fSmrg * 7001e04c3fSmrg * Note that you have to be a bit careful as the generated cast function 7101e04c3fSmrg * destroys constness. 7201e04c3fSmrg */ 7301e04c3fSmrg#define NIR_DEFINE_CAST(name, in_type, out_type, field, \ 7401e04c3fSmrg type_field, type_value) \ 7501e04c3fSmrgstatic inline out_type * \ 7601e04c3fSmrgname(const in_type *parent) \ 7701e04c3fSmrg{ \ 7801e04c3fSmrg assert(parent && parent->type_field == type_value); \ 7901e04c3fSmrg return exec_node_data(out_type, parent, field); \ 8001e04c3fSmrg} 8101e04c3fSmrg 8201e04c3fSmrgstruct nir_function; 8301e04c3fSmrgstruct nir_shader; 8401e04c3fSmrgstruct nir_instr; 8501e04c3fSmrgstruct nir_builder; 8601e04c3fSmrg 8701e04c3fSmrg 8801e04c3fSmrg/** 8901e04c3fSmrg * Description of built-in state associated with a uniform 9001e04c3fSmrg * 9101e04c3fSmrg * \sa nir_variable::state_slots 9201e04c3fSmrg */ 9301e04c3fSmrgtypedef struct { 9401e04c3fSmrg gl_state_index16 tokens[STATE_LENGTH]; 9501e04c3fSmrg int swizzle; 9601e04c3fSmrg} nir_state_slot; 9701e04c3fSmrg 9801e04c3fSmrgtypedef enum { 9901e04c3fSmrg nir_var_shader_in = (1 << 0), 10001e04c3fSmrg nir_var_shader_out = (1 << 1), 1017e102996Smaya nir_var_shader_temp = (1 << 2), 1027e102996Smaya nir_var_function_temp = (1 << 3), 10301e04c3fSmrg nir_var_uniform = (1 << 4), 1047e102996Smaya nir_var_mem_ubo = (1 << 5), 10501e04c3fSmrg nir_var_system_value = (1 << 6), 1067e102996Smaya nir_var_mem_ssbo = (1 << 7), 1077e102996Smaya nir_var_mem_shared = (1 << 8), 1087e102996Smaya nir_var_mem_global = (1 << 9), 10901e04c3fSmrg nir_var_all = ~0, 11001e04c3fSmrg} nir_variable_mode; 11101e04c3fSmrg 11201e04c3fSmrg/** 11301e04c3fSmrg * Rounding modes. 11401e04c3fSmrg */ 11501e04c3fSmrgtypedef enum { 11601e04c3fSmrg nir_rounding_mode_undef = 0, 11701e04c3fSmrg nir_rounding_mode_rtne = 1, /* round to nearest even */ 11801e04c3fSmrg nir_rounding_mode_ru = 2, /* round up */ 11901e04c3fSmrg nir_rounding_mode_rd = 3, /* round down */ 12001e04c3fSmrg nir_rounding_mode_rtz = 4, /* round towards zero */ 12101e04c3fSmrg} nir_rounding_mode; 12201e04c3fSmrg 12301e04c3fSmrgtypedef union { 1247e102996Smaya bool b; 1257e102996Smaya float f32; 1267e102996Smaya double f64; 1277e102996Smaya int8_t i8; 1287e102996Smaya uint8_t u8; 1297e102996Smaya int16_t i16; 1307e102996Smaya uint16_t u16; 1317e102996Smaya int32_t i32; 1327e102996Smaya uint32_t u32; 1337e102996Smaya int64_t i64; 1347e102996Smaya uint64_t u64; 13501e04c3fSmrg} nir_const_value; 13601e04c3fSmrg 1377e102996Smaya#define nir_const_value_to_array(arr, c, components, m) \ 1387e102996Smaya{ \ 1397e102996Smaya for (unsigned i = 0; i < components; ++i) \ 1407e102996Smaya arr[i] = c[i].m; \ 1417e102996Smaya} while (false) 1427e102996Smaya 1437e102996Smayastatic inline nir_const_value 1447e102996Smayanir_const_value_for_raw_uint(uint64_t x, unsigned bit_size) 1457e102996Smaya{ 1467e102996Smaya nir_const_value v; 1477e102996Smaya memset(&v, 0, sizeof(v)); 1487e102996Smaya 1497e102996Smaya switch (bit_size) { 1507e102996Smaya case 1: v.b = x; break; 1517e102996Smaya case 8: v.u8 = x; break; 1527e102996Smaya case 16: v.u16 = x; break; 1537e102996Smaya case 32: v.u32 = x; break; 1547e102996Smaya case 64: v.u64 = x; break; 1557e102996Smaya default: 1567e102996Smaya unreachable("Invalid bit size"); 1577e102996Smaya } 1587e102996Smaya 1597e102996Smaya return v; 1607e102996Smaya} 1617e102996Smaya 1627e102996Smayastatic inline nir_const_value 1637e102996Smayanir_const_value_for_int(int64_t i, unsigned bit_size) 1647e102996Smaya{ 1657e102996Smaya nir_const_value v; 1667e102996Smaya memset(&v, 0, sizeof(v)); 1677e102996Smaya 1687e102996Smaya assert(bit_size <= 64); 1697e102996Smaya if (bit_size < 64) { 1707e102996Smaya assert(i >= (-(1ll << (bit_size - 1)))); 1717e102996Smaya assert(i < (1ll << (bit_size - 1))); 1727e102996Smaya } 1737e102996Smaya 1747e102996Smaya return nir_const_value_for_raw_uint(i, bit_size); 1757e102996Smaya} 1767e102996Smaya 1777e102996Smayastatic inline nir_const_value 1787e102996Smayanir_const_value_for_uint(uint64_t u, unsigned bit_size) 1797e102996Smaya{ 1807e102996Smaya nir_const_value v; 1817e102996Smaya memset(&v, 0, sizeof(v)); 1827e102996Smaya 1837e102996Smaya assert(bit_size <= 64); 1847e102996Smaya if (bit_size < 64) 1857e102996Smaya assert(u < (1ull << bit_size)); 1867e102996Smaya 1877e102996Smaya return nir_const_value_for_raw_uint(u, bit_size); 1887e102996Smaya} 1897e102996Smaya 1907e102996Smayastatic inline nir_const_value 1917e102996Smayanir_const_value_for_bool(bool b, unsigned bit_size) 1927e102996Smaya{ 1937e102996Smaya /* Booleans use a 0/-1 convention */ 1947e102996Smaya return nir_const_value_for_int(-(int)b, bit_size); 1957e102996Smaya} 1967e102996Smaya 1977e102996Smaya/* This one isn't inline because it requires half-float conversion */ 1987e102996Smayanir_const_value nir_const_value_for_float(double b, unsigned bit_size); 1997e102996Smaya 2007e102996Smayastatic inline int64_t 2017e102996Smayanir_const_value_as_int(nir_const_value value, unsigned bit_size) 2027e102996Smaya{ 2037e102996Smaya switch (bit_size) { 2047e102996Smaya /* int1_t uses 0/-1 convention */ 2057e102996Smaya case 1: return -(int)value.b; 2067e102996Smaya case 8: return value.i8; 2077e102996Smaya case 16: return value.i16; 2087e102996Smaya case 32: return value.i32; 2097e102996Smaya case 64: return value.i64; 2107e102996Smaya default: 2117e102996Smaya unreachable("Invalid bit size"); 2127e102996Smaya } 2137e102996Smaya} 2147e102996Smaya 2157e102996Smayastatic inline int64_t 2167e102996Smayanir_const_value_as_uint(nir_const_value value, unsigned bit_size) 2177e102996Smaya{ 2187e102996Smaya switch (bit_size) { 2197e102996Smaya case 1: return value.b; 2207e102996Smaya case 8: return value.u8; 2217e102996Smaya case 16: return value.u16; 2227e102996Smaya case 32: return value.u32; 2237e102996Smaya case 64: return value.u64; 2247e102996Smaya default: 2257e102996Smaya unreachable("Invalid bit size"); 2267e102996Smaya } 2277e102996Smaya} 2287e102996Smaya 2297e102996Smayastatic inline bool 2307e102996Smayanir_const_value_as_bool(nir_const_value value, unsigned bit_size) 2317e102996Smaya{ 2327e102996Smaya int64_t i = nir_const_value_as_int(value, bit_size); 2337e102996Smaya 2347e102996Smaya /* Booleans of any size use 0/-1 convention */ 2357e102996Smaya assert(i == 0 || i == -1); 2367e102996Smaya 2377e102996Smaya return i; 2387e102996Smaya} 2397e102996Smaya 2407e102996Smaya/* This one isn't inline because it requires half-float conversion */ 2417e102996Smayadouble nir_const_value_as_float(nir_const_value value, unsigned bit_size); 2427e102996Smaya 24301e04c3fSmrgtypedef struct nir_constant { 24401e04c3fSmrg /** 24501e04c3fSmrg * Value of the constant. 24601e04c3fSmrg * 24701e04c3fSmrg * The field used to back the values supplied by the constant is determined 24801e04c3fSmrg * by the type associated with the \c nir_variable. Constants may be 24901e04c3fSmrg * scalars, vectors, or matrices. 25001e04c3fSmrg */ 2517e102996Smaya nir_const_value values[NIR_MAX_MATRIX_COLUMNS][NIR_MAX_VEC_COMPONENTS]; 25201e04c3fSmrg 25301e04c3fSmrg /* we could get this from the var->type but makes clone *much* easier to 25401e04c3fSmrg * not have to care about the type. 25501e04c3fSmrg */ 25601e04c3fSmrg unsigned num_elements; 25701e04c3fSmrg 25801e04c3fSmrg /* Array elements / Structure Fields */ 25901e04c3fSmrg struct nir_constant **elements; 26001e04c3fSmrg} nir_constant; 26101e04c3fSmrg 26201e04c3fSmrg/** 26301e04c3fSmrg * \brief Layout qualifiers for gl_FragDepth. 26401e04c3fSmrg * 26501e04c3fSmrg * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared 26601e04c3fSmrg * with a layout qualifier. 26701e04c3fSmrg */ 26801e04c3fSmrgtypedef enum { 26901e04c3fSmrg nir_depth_layout_none, /**< No depth layout is specified. */ 27001e04c3fSmrg nir_depth_layout_any, 27101e04c3fSmrg nir_depth_layout_greater, 27201e04c3fSmrg nir_depth_layout_less, 27301e04c3fSmrg nir_depth_layout_unchanged 27401e04c3fSmrg} nir_depth_layout; 27501e04c3fSmrg 27601e04c3fSmrg/** 27701e04c3fSmrg * Enum keeping track of how a variable was declared. 27801e04c3fSmrg */ 27901e04c3fSmrgtypedef enum { 28001e04c3fSmrg /** 28101e04c3fSmrg * Normal declaration. 28201e04c3fSmrg */ 28301e04c3fSmrg nir_var_declared_normally = 0, 28401e04c3fSmrg 28501e04c3fSmrg /** 28601e04c3fSmrg * Variable is implicitly generated by the compiler and should not be 28701e04c3fSmrg * visible via the API. 28801e04c3fSmrg */ 28901e04c3fSmrg nir_var_hidden, 29001e04c3fSmrg} nir_var_declaration_type; 29101e04c3fSmrg 29201e04c3fSmrg/** 29301e04c3fSmrg * Either a uniform, global variable, shader input, or shader output. Based on 29401e04c3fSmrg * ir_variable - it should be easy to translate between the two. 29501e04c3fSmrg */ 29601e04c3fSmrg 29701e04c3fSmrgtypedef struct nir_variable { 29801e04c3fSmrg struct exec_node node; 29901e04c3fSmrg 30001e04c3fSmrg /** 30101e04c3fSmrg * Declared type of the variable 30201e04c3fSmrg */ 30301e04c3fSmrg const struct glsl_type *type; 30401e04c3fSmrg 30501e04c3fSmrg /** 30601e04c3fSmrg * Declared name of the variable 30701e04c3fSmrg */ 30801e04c3fSmrg char *name; 30901e04c3fSmrg 31001e04c3fSmrg struct nir_variable_data { 31101e04c3fSmrg /** 31201e04c3fSmrg * Storage class of the variable. 31301e04c3fSmrg * 31401e04c3fSmrg * \sa nir_variable_mode 31501e04c3fSmrg */ 31601e04c3fSmrg nir_variable_mode mode; 31701e04c3fSmrg 31801e04c3fSmrg /** 31901e04c3fSmrg * Is the variable read-only? 32001e04c3fSmrg * 32101e04c3fSmrg * This is set for variables declared as \c const, shader inputs, 32201e04c3fSmrg * and uniforms. 32301e04c3fSmrg */ 32401e04c3fSmrg unsigned read_only:1; 32501e04c3fSmrg unsigned centroid:1; 32601e04c3fSmrg unsigned sample:1; 32701e04c3fSmrg unsigned patch:1; 32801e04c3fSmrg unsigned invariant:1; 32901e04c3fSmrg 33001e04c3fSmrg /** 33101e04c3fSmrg * When separate shader programs are enabled, only input/outputs between 33201e04c3fSmrg * the stages of a multi-stage separate program can be safely removed 33301e04c3fSmrg * from the shader interface. Other input/outputs must remains active. 33401e04c3fSmrg * 33501e04c3fSmrg * This is also used to make sure xfb varyings that are unused by the 33601e04c3fSmrg * fragment shader are not removed. 33701e04c3fSmrg */ 33801e04c3fSmrg unsigned always_active_io:1; 33901e04c3fSmrg 34001e04c3fSmrg /** 34101e04c3fSmrg * Interpolation mode for shader inputs / outputs 34201e04c3fSmrg * 34301e04c3fSmrg * \sa glsl_interp_mode 34401e04c3fSmrg */ 34501e04c3fSmrg unsigned interpolation:2; 34601e04c3fSmrg 34701e04c3fSmrg /** 34801e04c3fSmrg * If non-zero, then this variable may be packed along with other variables 34901e04c3fSmrg * into a single varying slot, so this offset should be applied when 35001e04c3fSmrg * accessing components. For example, an offset of 1 means that the x 35101e04c3fSmrg * component of this variable is actually stored in component y of the 35201e04c3fSmrg * location specified by \c location. 35301e04c3fSmrg */ 35401e04c3fSmrg unsigned location_frac:2; 35501e04c3fSmrg 35601e04c3fSmrg /** 35701e04c3fSmrg * If true, this variable represents an array of scalars that should 35801e04c3fSmrg * be tightly packed. In other words, consecutive array elements 35901e04c3fSmrg * should be stored one component apart, rather than one slot apart. 36001e04c3fSmrg */ 36101e04c3fSmrg unsigned compact:1; 36201e04c3fSmrg 36301e04c3fSmrg /** 36401e04c3fSmrg * Whether this is a fragment shader output implicitly initialized with 36501e04c3fSmrg * the previous contents of the specified render target at the 36601e04c3fSmrg * framebuffer location corresponding to this shader invocation. 36701e04c3fSmrg */ 36801e04c3fSmrg unsigned fb_fetch_output:1; 36901e04c3fSmrg 37001e04c3fSmrg /** 37101e04c3fSmrg * Non-zero if this variable is considered bindless as defined by 37201e04c3fSmrg * ARB_bindless_texture. 37301e04c3fSmrg */ 37401e04c3fSmrg unsigned bindless:1; 37501e04c3fSmrg 37601e04c3fSmrg /** 37701e04c3fSmrg * Was an explicit binding set in the shader? 37801e04c3fSmrg */ 37901e04c3fSmrg unsigned explicit_binding:1; 38001e04c3fSmrg 38101e04c3fSmrg /** 38201e04c3fSmrg * Was a transfer feedback buffer set in the shader? 38301e04c3fSmrg */ 38401e04c3fSmrg unsigned explicit_xfb_buffer:1; 38501e04c3fSmrg 38601e04c3fSmrg /** 38701e04c3fSmrg * Was a transfer feedback stride set in the shader? 38801e04c3fSmrg */ 38901e04c3fSmrg unsigned explicit_xfb_stride:1; 39001e04c3fSmrg 39101e04c3fSmrg /** 39201e04c3fSmrg * Was an explicit offset set in the shader? 39301e04c3fSmrg */ 39401e04c3fSmrg unsigned explicit_offset:1; 39501e04c3fSmrg 39601e04c3fSmrg /** 39701e04c3fSmrg * \brief Layout qualifier for gl_FragDepth. 39801e04c3fSmrg * 39901e04c3fSmrg * This is not equal to \c ir_depth_layout_none if and only if this 40001e04c3fSmrg * variable is \c gl_FragDepth and a layout qualifier is specified. 40101e04c3fSmrg */ 40201e04c3fSmrg nir_depth_layout depth_layout; 40301e04c3fSmrg 40401e04c3fSmrg /** 40501e04c3fSmrg * Storage location of the base of this variable 40601e04c3fSmrg * 40701e04c3fSmrg * The precise meaning of this field depends on the nature of the variable. 40801e04c3fSmrg * 40901e04c3fSmrg * - Vertex shader input: one of the values from \c gl_vert_attrib. 41001e04c3fSmrg * - Vertex shader output: one of the values from \c gl_varying_slot. 41101e04c3fSmrg * - Geometry shader input: one of the values from \c gl_varying_slot. 41201e04c3fSmrg * - Geometry shader output: one of the values from \c gl_varying_slot. 41301e04c3fSmrg * - Fragment shader input: one of the values from \c gl_varying_slot. 41401e04c3fSmrg * - Fragment shader output: one of the values from \c gl_frag_result. 41501e04c3fSmrg * - Uniforms: Per-stage uniform slot number for default uniform block. 41601e04c3fSmrg * - Uniforms: Index within the uniform block definition for UBO members. 41701e04c3fSmrg * - Non-UBO Uniforms: uniform slot number. 41801e04c3fSmrg * - Other: This field is not currently used. 41901e04c3fSmrg * 42001e04c3fSmrg * If the variable is a uniform, shader input, or shader output, and the 42101e04c3fSmrg * slot has not been assigned, the value will be -1. 42201e04c3fSmrg */ 42301e04c3fSmrg int location; 42401e04c3fSmrg 42501e04c3fSmrg /** 42601e04c3fSmrg * The actual location of the variable in the IR. Only valid for inputs 42701e04c3fSmrg * and outputs. 42801e04c3fSmrg */ 42901e04c3fSmrg unsigned int driver_location; 43001e04c3fSmrg 43101e04c3fSmrg /** 43201e04c3fSmrg * Vertex stream output identifier. 43301e04c3fSmrg * 43401e04c3fSmrg * For packed outputs, bit 31 is set and bits [2*i+1,2*i] indicate the 43501e04c3fSmrg * stream of the i-th component. 43601e04c3fSmrg */ 43701e04c3fSmrg unsigned stream; 43801e04c3fSmrg 43901e04c3fSmrg /** 44001e04c3fSmrg * output index for dual source blending. 44101e04c3fSmrg */ 44201e04c3fSmrg int index; 44301e04c3fSmrg 44401e04c3fSmrg /** 44501e04c3fSmrg * Descriptor set binding for sampler or UBO. 44601e04c3fSmrg */ 44701e04c3fSmrg int descriptor_set; 44801e04c3fSmrg 44901e04c3fSmrg /** 45001e04c3fSmrg * Initial binding point for a sampler or UBO. 45101e04c3fSmrg * 45201e04c3fSmrg * For array types, this represents the binding point for the first element. 45301e04c3fSmrg */ 45401e04c3fSmrg int binding; 45501e04c3fSmrg 45601e04c3fSmrg /** 45701e04c3fSmrg * Location an atomic counter or transform feedback is stored at. 45801e04c3fSmrg */ 45901e04c3fSmrg unsigned offset; 46001e04c3fSmrg 46101e04c3fSmrg /** 46201e04c3fSmrg * Transform feedback buffer. 46301e04c3fSmrg */ 46401e04c3fSmrg unsigned xfb_buffer; 46501e04c3fSmrg 46601e04c3fSmrg /** 46701e04c3fSmrg * Transform feedback stride. 46801e04c3fSmrg */ 46901e04c3fSmrg unsigned xfb_stride; 47001e04c3fSmrg 47101e04c3fSmrg /** 47201e04c3fSmrg * How the variable was declared. See nir_var_declaration_type. 47301e04c3fSmrg * 47401e04c3fSmrg * This is used to detect variables generated by the compiler, so should 47501e04c3fSmrg * not be visible via the API. 47601e04c3fSmrg */ 47701e04c3fSmrg unsigned how_declared:2; 47801e04c3fSmrg 47901e04c3fSmrg /** 48001e04c3fSmrg * ARB_shader_image_load_store qualifiers. 48101e04c3fSmrg */ 48201e04c3fSmrg struct { 48301e04c3fSmrg enum gl_access_qualifier access; 48401e04c3fSmrg 48501e04c3fSmrg /** Image internal format if specified explicitly, otherwise GL_NONE. */ 48601e04c3fSmrg GLenum format; 48701e04c3fSmrg } image; 48801e04c3fSmrg } data; 48901e04c3fSmrg 49001e04c3fSmrg /** 49101e04c3fSmrg * Built-in state that backs this uniform 49201e04c3fSmrg * 49301e04c3fSmrg * Once set at variable creation, \c state_slots must remain invariant. 49401e04c3fSmrg * This is because, ideally, this array would be shared by all clones of 49501e04c3fSmrg * this variable in the IR tree. In other words, we'd really like for it 49601e04c3fSmrg * to be a fly-weight. 49701e04c3fSmrg * 49801e04c3fSmrg * If the variable is not a uniform, \c num_state_slots will be zero and 49901e04c3fSmrg * \c state_slots will be \c NULL. 50001e04c3fSmrg */ 50101e04c3fSmrg /*@{*/ 50201e04c3fSmrg unsigned num_state_slots; /**< Number of state slots used */ 50301e04c3fSmrg nir_state_slot *state_slots; /**< State descriptors. */ 50401e04c3fSmrg /*@}*/ 50501e04c3fSmrg 50601e04c3fSmrg /** 50701e04c3fSmrg * Constant expression assigned in the initializer of the variable 50801e04c3fSmrg * 50901e04c3fSmrg * This field should only be used temporarily by creators of NIR shaders 51001e04c3fSmrg * and then lower_constant_initializers can be used to get rid of them. 51101e04c3fSmrg * Most of the rest of NIR ignores this field or asserts that it's NULL. 51201e04c3fSmrg */ 51301e04c3fSmrg nir_constant *constant_initializer; 51401e04c3fSmrg 51501e04c3fSmrg /** 51601e04c3fSmrg * For variables that are in an interface block or are an instance of an 51701e04c3fSmrg * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block. 51801e04c3fSmrg * 51901e04c3fSmrg * \sa ir_variable::location 52001e04c3fSmrg */ 52101e04c3fSmrg const struct glsl_type *interface_type; 52201e04c3fSmrg 52301e04c3fSmrg /** 52401e04c3fSmrg * Description of per-member data for per-member struct variables 52501e04c3fSmrg * 52601e04c3fSmrg * This is used for variables which are actually an amalgamation of 52701e04c3fSmrg * multiple entities such as a struct of built-in values or a struct of 52801e04c3fSmrg * inputs each with their own layout specifier. This is only allowed on 52901e04c3fSmrg * variables with a struct or array of array of struct type. 53001e04c3fSmrg */ 53101e04c3fSmrg unsigned num_members; 53201e04c3fSmrg struct nir_variable_data *members; 53301e04c3fSmrg} nir_variable; 53401e04c3fSmrg 53501e04c3fSmrg#define nir_foreach_variable(var, var_list) \ 53601e04c3fSmrg foreach_list_typed(nir_variable, var, node, var_list) 53701e04c3fSmrg 53801e04c3fSmrg#define nir_foreach_variable_safe(var, var_list) \ 53901e04c3fSmrg foreach_list_typed_safe(nir_variable, var, node, var_list) 54001e04c3fSmrg 54101e04c3fSmrgstatic inline bool 54201e04c3fSmrgnir_variable_is_global(const nir_variable *var) 54301e04c3fSmrg{ 5447e102996Smaya return var->data.mode != nir_var_function_temp; 54501e04c3fSmrg} 54601e04c3fSmrg 54701e04c3fSmrgtypedef struct nir_register { 54801e04c3fSmrg struct exec_node node; 54901e04c3fSmrg 55001e04c3fSmrg unsigned num_components; /** < number of vector components */ 55101e04c3fSmrg unsigned num_array_elems; /** < size of array (0 for no array) */ 55201e04c3fSmrg 55301e04c3fSmrg /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */ 55401e04c3fSmrg uint8_t bit_size; 55501e04c3fSmrg 55601e04c3fSmrg /** generic register index. */ 55701e04c3fSmrg unsigned index; 55801e04c3fSmrg 55901e04c3fSmrg /** only for debug purposes, can be NULL */ 56001e04c3fSmrg const char *name; 56101e04c3fSmrg 56201e04c3fSmrg /** set of nir_srcs where this register is used (read from) */ 56301e04c3fSmrg struct list_head uses; 56401e04c3fSmrg 56501e04c3fSmrg /** set of nir_dests where this register is defined (written to) */ 56601e04c3fSmrg struct list_head defs; 56701e04c3fSmrg 56801e04c3fSmrg /** set of nir_ifs where this register is used as a condition */ 56901e04c3fSmrg struct list_head if_uses; 57001e04c3fSmrg} nir_register; 57101e04c3fSmrg 57201e04c3fSmrg#define nir_foreach_register(reg, reg_list) \ 57301e04c3fSmrg foreach_list_typed(nir_register, reg, node, reg_list) 57401e04c3fSmrg#define nir_foreach_register_safe(reg, reg_list) \ 57501e04c3fSmrg foreach_list_typed_safe(nir_register, reg, node, reg_list) 57601e04c3fSmrg 5777e102996Smayatypedef enum PACKED { 57801e04c3fSmrg nir_instr_type_alu, 57901e04c3fSmrg nir_instr_type_deref, 58001e04c3fSmrg nir_instr_type_call, 58101e04c3fSmrg nir_instr_type_tex, 58201e04c3fSmrg nir_instr_type_intrinsic, 58301e04c3fSmrg nir_instr_type_load_const, 58401e04c3fSmrg nir_instr_type_jump, 58501e04c3fSmrg nir_instr_type_ssa_undef, 58601e04c3fSmrg nir_instr_type_phi, 58701e04c3fSmrg nir_instr_type_parallel_copy, 58801e04c3fSmrg} nir_instr_type; 58901e04c3fSmrg 59001e04c3fSmrgtypedef struct nir_instr { 59101e04c3fSmrg struct exec_node node; 59201e04c3fSmrg struct nir_block *block; 5937e102996Smaya nir_instr_type type; 59401e04c3fSmrg 59501e04c3fSmrg /* A temporary for optimization and analysis passes to use for storing 59601e04c3fSmrg * flags. For instance, DCE uses this to store the "dead/live" info. 59701e04c3fSmrg */ 59801e04c3fSmrg uint8_t pass_flags; 5997e102996Smaya 6007e102996Smaya /** generic instruction index. */ 6017e102996Smaya unsigned index; 60201e04c3fSmrg} nir_instr; 60301e04c3fSmrg 60401e04c3fSmrgstatic inline nir_instr * 60501e04c3fSmrgnir_instr_next(nir_instr *instr) 60601e04c3fSmrg{ 60701e04c3fSmrg struct exec_node *next = exec_node_get_next(&instr->node); 60801e04c3fSmrg if (exec_node_is_tail_sentinel(next)) 60901e04c3fSmrg return NULL; 61001e04c3fSmrg else 61101e04c3fSmrg return exec_node_data(nir_instr, next, node); 61201e04c3fSmrg} 61301e04c3fSmrg 61401e04c3fSmrgstatic inline nir_instr * 61501e04c3fSmrgnir_instr_prev(nir_instr *instr) 61601e04c3fSmrg{ 61701e04c3fSmrg struct exec_node *prev = exec_node_get_prev(&instr->node); 61801e04c3fSmrg if (exec_node_is_head_sentinel(prev)) 61901e04c3fSmrg return NULL; 62001e04c3fSmrg else 62101e04c3fSmrg return exec_node_data(nir_instr, prev, node); 62201e04c3fSmrg} 62301e04c3fSmrg 62401e04c3fSmrgstatic inline bool 62501e04c3fSmrgnir_instr_is_first(const nir_instr *instr) 62601e04c3fSmrg{ 62701e04c3fSmrg return exec_node_is_head_sentinel(exec_node_get_prev_const(&instr->node)); 62801e04c3fSmrg} 62901e04c3fSmrg 63001e04c3fSmrgstatic inline bool 63101e04c3fSmrgnir_instr_is_last(const nir_instr *instr) 63201e04c3fSmrg{ 63301e04c3fSmrg return exec_node_is_tail_sentinel(exec_node_get_next_const(&instr->node)); 63401e04c3fSmrg} 63501e04c3fSmrg 63601e04c3fSmrgtypedef struct nir_ssa_def { 63701e04c3fSmrg /** for debugging only, can be NULL */ 63801e04c3fSmrg const char* name; 63901e04c3fSmrg 64001e04c3fSmrg /** generic SSA definition index. */ 64101e04c3fSmrg unsigned index; 64201e04c3fSmrg 64301e04c3fSmrg /** Index into the live_in and live_out bitfields */ 64401e04c3fSmrg unsigned live_index; 64501e04c3fSmrg 64601e04c3fSmrg /** Instruction which produces this SSA value. */ 64701e04c3fSmrg nir_instr *parent_instr; 64801e04c3fSmrg 64901e04c3fSmrg /** set of nir_instrs where this register is used (read from) */ 65001e04c3fSmrg struct list_head uses; 65101e04c3fSmrg 65201e04c3fSmrg /** set of nir_ifs where this register is used as a condition */ 65301e04c3fSmrg struct list_head if_uses; 65401e04c3fSmrg 65501e04c3fSmrg uint8_t num_components; 65601e04c3fSmrg 65701e04c3fSmrg /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */ 65801e04c3fSmrg uint8_t bit_size; 65901e04c3fSmrg} nir_ssa_def; 66001e04c3fSmrg 66101e04c3fSmrgstruct nir_src; 66201e04c3fSmrg 66301e04c3fSmrgtypedef struct { 66401e04c3fSmrg nir_register *reg; 66501e04c3fSmrg struct nir_src *indirect; /** < NULL for no indirect offset */ 66601e04c3fSmrg unsigned base_offset; 66701e04c3fSmrg 66801e04c3fSmrg /* TODO use-def chain goes here */ 66901e04c3fSmrg} nir_reg_src; 67001e04c3fSmrg 67101e04c3fSmrgtypedef struct { 67201e04c3fSmrg nir_instr *parent_instr; 67301e04c3fSmrg struct list_head def_link; 67401e04c3fSmrg 67501e04c3fSmrg nir_register *reg; 67601e04c3fSmrg struct nir_src *indirect; /** < NULL for no indirect offset */ 67701e04c3fSmrg unsigned base_offset; 67801e04c3fSmrg 67901e04c3fSmrg /* TODO def-use chain goes here */ 68001e04c3fSmrg} nir_reg_dest; 68101e04c3fSmrg 68201e04c3fSmrgstruct nir_if; 68301e04c3fSmrg 68401e04c3fSmrgtypedef struct nir_src { 68501e04c3fSmrg union { 68601e04c3fSmrg /** Instruction that consumes this value as a source. */ 68701e04c3fSmrg nir_instr *parent_instr; 68801e04c3fSmrg struct nir_if *parent_if; 68901e04c3fSmrg }; 69001e04c3fSmrg 69101e04c3fSmrg struct list_head use_link; 69201e04c3fSmrg 69301e04c3fSmrg union { 69401e04c3fSmrg nir_reg_src reg; 69501e04c3fSmrg nir_ssa_def *ssa; 69601e04c3fSmrg }; 69701e04c3fSmrg 69801e04c3fSmrg bool is_ssa; 69901e04c3fSmrg} nir_src; 70001e04c3fSmrg 70101e04c3fSmrgstatic inline nir_src 70201e04c3fSmrgnir_src_init(void) 70301e04c3fSmrg{ 70401e04c3fSmrg nir_src src = { { NULL } }; 70501e04c3fSmrg return src; 70601e04c3fSmrg} 70701e04c3fSmrg 70801e04c3fSmrg#define NIR_SRC_INIT nir_src_init() 70901e04c3fSmrg 71001e04c3fSmrg#define nir_foreach_use(src, reg_or_ssa_def) \ 71101e04c3fSmrg list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link) 71201e04c3fSmrg 71301e04c3fSmrg#define nir_foreach_use_safe(src, reg_or_ssa_def) \ 71401e04c3fSmrg list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link) 71501e04c3fSmrg 71601e04c3fSmrg#define nir_foreach_if_use(src, reg_or_ssa_def) \ 71701e04c3fSmrg list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link) 71801e04c3fSmrg 71901e04c3fSmrg#define nir_foreach_if_use_safe(src, reg_or_ssa_def) \ 72001e04c3fSmrg list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link) 72101e04c3fSmrg 72201e04c3fSmrgtypedef struct { 72301e04c3fSmrg union { 72401e04c3fSmrg nir_reg_dest reg; 72501e04c3fSmrg nir_ssa_def ssa; 72601e04c3fSmrg }; 72701e04c3fSmrg 72801e04c3fSmrg bool is_ssa; 72901e04c3fSmrg} nir_dest; 73001e04c3fSmrg 73101e04c3fSmrgstatic inline nir_dest 73201e04c3fSmrgnir_dest_init(void) 73301e04c3fSmrg{ 73401e04c3fSmrg nir_dest dest = { { { NULL } } }; 73501e04c3fSmrg return dest; 73601e04c3fSmrg} 73701e04c3fSmrg 73801e04c3fSmrg#define NIR_DEST_INIT nir_dest_init() 73901e04c3fSmrg 74001e04c3fSmrg#define nir_foreach_def(dest, reg) \ 74101e04c3fSmrg list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link) 74201e04c3fSmrg 74301e04c3fSmrg#define nir_foreach_def_safe(dest, reg) \ 74401e04c3fSmrg list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link) 74501e04c3fSmrg 74601e04c3fSmrgstatic inline nir_src 74701e04c3fSmrgnir_src_for_ssa(nir_ssa_def *def) 74801e04c3fSmrg{ 74901e04c3fSmrg nir_src src = NIR_SRC_INIT; 75001e04c3fSmrg 75101e04c3fSmrg src.is_ssa = true; 75201e04c3fSmrg src.ssa = def; 75301e04c3fSmrg 75401e04c3fSmrg return src; 75501e04c3fSmrg} 75601e04c3fSmrg 75701e04c3fSmrgstatic inline nir_src 75801e04c3fSmrgnir_src_for_reg(nir_register *reg) 75901e04c3fSmrg{ 76001e04c3fSmrg nir_src src = NIR_SRC_INIT; 76101e04c3fSmrg 76201e04c3fSmrg src.is_ssa = false; 76301e04c3fSmrg src.reg.reg = reg; 76401e04c3fSmrg src.reg.indirect = NULL; 76501e04c3fSmrg src.reg.base_offset = 0; 76601e04c3fSmrg 76701e04c3fSmrg return src; 76801e04c3fSmrg} 76901e04c3fSmrg 77001e04c3fSmrgstatic inline nir_dest 77101e04c3fSmrgnir_dest_for_reg(nir_register *reg) 77201e04c3fSmrg{ 77301e04c3fSmrg nir_dest dest = NIR_DEST_INIT; 77401e04c3fSmrg 77501e04c3fSmrg dest.reg.reg = reg; 77601e04c3fSmrg 77701e04c3fSmrg return dest; 77801e04c3fSmrg} 77901e04c3fSmrg 78001e04c3fSmrgstatic inline unsigned 78101e04c3fSmrgnir_src_bit_size(nir_src src) 78201e04c3fSmrg{ 78301e04c3fSmrg return src.is_ssa ? src.ssa->bit_size : src.reg.reg->bit_size; 78401e04c3fSmrg} 78501e04c3fSmrg 78601e04c3fSmrgstatic inline unsigned 78701e04c3fSmrgnir_src_num_components(nir_src src) 78801e04c3fSmrg{ 78901e04c3fSmrg return src.is_ssa ? src.ssa->num_components : src.reg.reg->num_components; 79001e04c3fSmrg} 79101e04c3fSmrg 79201e04c3fSmrgstatic inline bool 79301e04c3fSmrgnir_src_is_const(nir_src src) 79401e04c3fSmrg{ 79501e04c3fSmrg return src.is_ssa && 79601e04c3fSmrg src.ssa->parent_instr->type == nir_instr_type_load_const; 79701e04c3fSmrg} 79801e04c3fSmrg 79901e04c3fSmrgint64_t nir_src_as_int(nir_src src); 80001e04c3fSmrguint64_t nir_src_as_uint(nir_src src); 80101e04c3fSmrgbool nir_src_as_bool(nir_src src); 80201e04c3fSmrgdouble nir_src_as_float(nir_src src); 80301e04c3fSmrgint64_t nir_src_comp_as_int(nir_src src, unsigned component); 80401e04c3fSmrguint64_t nir_src_comp_as_uint(nir_src src, unsigned component); 80501e04c3fSmrgbool nir_src_comp_as_bool(nir_src src, unsigned component); 80601e04c3fSmrgdouble nir_src_comp_as_float(nir_src src, unsigned component); 80701e04c3fSmrg 80801e04c3fSmrgstatic inline unsigned 80901e04c3fSmrgnir_dest_bit_size(nir_dest dest) 81001e04c3fSmrg{ 81101e04c3fSmrg return dest.is_ssa ? dest.ssa.bit_size : dest.reg.reg->bit_size; 81201e04c3fSmrg} 81301e04c3fSmrg 81401e04c3fSmrgstatic inline unsigned 81501e04c3fSmrgnir_dest_num_components(nir_dest dest) 81601e04c3fSmrg{ 81701e04c3fSmrg return dest.is_ssa ? dest.ssa.num_components : dest.reg.reg->num_components; 81801e04c3fSmrg} 81901e04c3fSmrg 82001e04c3fSmrgvoid nir_src_copy(nir_src *dest, const nir_src *src, void *instr_or_if); 82101e04c3fSmrgvoid nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr); 82201e04c3fSmrg 82301e04c3fSmrgtypedef struct { 82401e04c3fSmrg nir_src src; 82501e04c3fSmrg 82601e04c3fSmrg /** 82701e04c3fSmrg * \name input modifiers 82801e04c3fSmrg */ 82901e04c3fSmrg /*@{*/ 83001e04c3fSmrg /** 83101e04c3fSmrg * For inputs interpreted as floating point, flips the sign bit. For 83201e04c3fSmrg * inputs interpreted as integers, performs the two's complement negation. 83301e04c3fSmrg */ 83401e04c3fSmrg bool negate; 83501e04c3fSmrg 83601e04c3fSmrg /** 83701e04c3fSmrg * Clears the sign bit for floating point values, and computes the integer 83801e04c3fSmrg * absolute value for integers. Note that the negate modifier acts after 83901e04c3fSmrg * the absolute value modifier, therefore if both are set then all inputs 84001e04c3fSmrg * will become negative. 84101e04c3fSmrg */ 84201e04c3fSmrg bool abs; 84301e04c3fSmrg /*@}*/ 84401e04c3fSmrg 84501e04c3fSmrg /** 84601e04c3fSmrg * For each input component, says which component of the register it is 84701e04c3fSmrg * chosen from. Note that which elements of the swizzle are used and which 84801e04c3fSmrg * are ignored are based on the write mask for most opcodes - for example, 84901e04c3fSmrg * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and 85001e04c3fSmrg * a swizzle of {2, x, 1, 0} where x means "don't care." 85101e04c3fSmrg */ 85201e04c3fSmrg uint8_t swizzle[NIR_MAX_VEC_COMPONENTS]; 85301e04c3fSmrg} nir_alu_src; 85401e04c3fSmrg 85501e04c3fSmrgtypedef struct { 85601e04c3fSmrg nir_dest dest; 85701e04c3fSmrg 85801e04c3fSmrg /** 85901e04c3fSmrg * \name saturate output modifier 86001e04c3fSmrg * 86101e04c3fSmrg * Only valid for opcodes that output floating-point numbers. Clamps the 86201e04c3fSmrg * output to between 0.0 and 1.0 inclusive. 86301e04c3fSmrg */ 86401e04c3fSmrg 86501e04c3fSmrg bool saturate; 86601e04c3fSmrg 86701e04c3fSmrg unsigned write_mask : NIR_MAX_VEC_COMPONENTS; /* ignored if dest.is_ssa is true */ 86801e04c3fSmrg} nir_alu_dest; 86901e04c3fSmrg 8707e102996Smaya/** NIR sized and unsized types 8717e102996Smaya * 8727e102996Smaya * The values in this enum are carefully chosen so that the sized type is 8737e102996Smaya * just the unsized type OR the number of bits. 8747e102996Smaya */ 87501e04c3fSmrgtypedef enum { 87601e04c3fSmrg nir_type_invalid = 0, /* Not a valid type */ 8777e102996Smaya nir_type_int = 2, 8787e102996Smaya nir_type_uint = 4, 8797e102996Smaya nir_type_bool = 6, 8807e102996Smaya nir_type_float = 128, 8817e102996Smaya nir_type_bool1 = 1 | nir_type_bool, 88201e04c3fSmrg nir_type_bool32 = 32 | nir_type_bool, 8837e102996Smaya nir_type_int1 = 1 | nir_type_int, 88401e04c3fSmrg nir_type_int8 = 8 | nir_type_int, 88501e04c3fSmrg nir_type_int16 = 16 | nir_type_int, 88601e04c3fSmrg nir_type_int32 = 32 | nir_type_int, 88701e04c3fSmrg nir_type_int64 = 64 | nir_type_int, 8887e102996Smaya nir_type_uint1 = 1 | nir_type_uint, 88901e04c3fSmrg nir_type_uint8 = 8 | nir_type_uint, 89001e04c3fSmrg nir_type_uint16 = 16 | nir_type_uint, 89101e04c3fSmrg nir_type_uint32 = 32 | nir_type_uint, 89201e04c3fSmrg nir_type_uint64 = 64 | nir_type_uint, 89301e04c3fSmrg nir_type_float16 = 16 | nir_type_float, 89401e04c3fSmrg nir_type_float32 = 32 | nir_type_float, 89501e04c3fSmrg nir_type_float64 = 64 | nir_type_float, 89601e04c3fSmrg} nir_alu_type; 89701e04c3fSmrg 8987e102996Smaya#define NIR_ALU_TYPE_SIZE_MASK 0x79 8997e102996Smaya#define NIR_ALU_TYPE_BASE_TYPE_MASK 0x86 90001e04c3fSmrg 90101e04c3fSmrgstatic inline unsigned 90201e04c3fSmrgnir_alu_type_get_type_size(nir_alu_type type) 90301e04c3fSmrg{ 90401e04c3fSmrg return type & NIR_ALU_TYPE_SIZE_MASK; 90501e04c3fSmrg} 90601e04c3fSmrg 90701e04c3fSmrgstatic inline unsigned 90801e04c3fSmrgnir_alu_type_get_base_type(nir_alu_type type) 90901e04c3fSmrg{ 91001e04c3fSmrg return type & NIR_ALU_TYPE_BASE_TYPE_MASK; 91101e04c3fSmrg} 91201e04c3fSmrg 91301e04c3fSmrgstatic inline nir_alu_type 91401e04c3fSmrgnir_get_nir_type_for_glsl_base_type(enum glsl_base_type base_type) 91501e04c3fSmrg{ 91601e04c3fSmrg switch (base_type) { 91701e04c3fSmrg case GLSL_TYPE_BOOL: 9187e102996Smaya return nir_type_bool1; 91901e04c3fSmrg break; 92001e04c3fSmrg case GLSL_TYPE_UINT: 92101e04c3fSmrg return nir_type_uint32; 92201e04c3fSmrg break; 92301e04c3fSmrg case GLSL_TYPE_INT: 92401e04c3fSmrg return nir_type_int32; 92501e04c3fSmrg break; 92601e04c3fSmrg case GLSL_TYPE_UINT16: 92701e04c3fSmrg return nir_type_uint16; 92801e04c3fSmrg break; 92901e04c3fSmrg case GLSL_TYPE_INT16: 93001e04c3fSmrg return nir_type_int16; 93101e04c3fSmrg break; 93201e04c3fSmrg case GLSL_TYPE_UINT8: 93301e04c3fSmrg return nir_type_uint8; 93401e04c3fSmrg case GLSL_TYPE_INT8: 93501e04c3fSmrg return nir_type_int8; 93601e04c3fSmrg case GLSL_TYPE_UINT64: 93701e04c3fSmrg return nir_type_uint64; 93801e04c3fSmrg break; 93901e04c3fSmrg case GLSL_TYPE_INT64: 94001e04c3fSmrg return nir_type_int64; 94101e04c3fSmrg break; 94201e04c3fSmrg case GLSL_TYPE_FLOAT: 94301e04c3fSmrg return nir_type_float32; 94401e04c3fSmrg break; 94501e04c3fSmrg case GLSL_TYPE_FLOAT16: 94601e04c3fSmrg return nir_type_float16; 94701e04c3fSmrg break; 94801e04c3fSmrg case GLSL_TYPE_DOUBLE: 94901e04c3fSmrg return nir_type_float64; 95001e04c3fSmrg break; 95101e04c3fSmrg default: 95201e04c3fSmrg unreachable("unknown type"); 95301e04c3fSmrg } 95401e04c3fSmrg} 95501e04c3fSmrg 95601e04c3fSmrgstatic inline nir_alu_type 95701e04c3fSmrgnir_get_nir_type_for_glsl_type(const struct glsl_type *type) 95801e04c3fSmrg{ 95901e04c3fSmrg return nir_get_nir_type_for_glsl_base_type(glsl_get_base_type(type)); 96001e04c3fSmrg} 96101e04c3fSmrg 96201e04c3fSmrgnir_op nir_type_conversion_op(nir_alu_type src, nir_alu_type dst, 96301e04c3fSmrg nir_rounding_mode rnd); 96401e04c3fSmrg 9657e102996Smayastatic inline nir_op 9667e102996Smayanir_op_vec(unsigned components) 9677e102996Smaya{ 9687e102996Smaya switch (components) { 9697e102996Smaya case 1: return nir_op_imov; 9707e102996Smaya case 2: return nir_op_vec2; 9717e102996Smaya case 3: return nir_op_vec3; 9727e102996Smaya case 4: return nir_op_vec4; 9737e102996Smaya default: unreachable("bad component count"); 9747e102996Smaya } 9757e102996Smaya} 9767e102996Smaya 97701e04c3fSmrgtypedef enum { 97801e04c3fSmrg NIR_OP_IS_COMMUTATIVE = (1 << 0), 97901e04c3fSmrg NIR_OP_IS_ASSOCIATIVE = (1 << 1), 98001e04c3fSmrg} nir_op_algebraic_property; 98101e04c3fSmrg 98201e04c3fSmrgtypedef struct { 98301e04c3fSmrg const char *name; 98401e04c3fSmrg 98501e04c3fSmrg unsigned num_inputs; 98601e04c3fSmrg 98701e04c3fSmrg /** 98801e04c3fSmrg * The number of components in the output 98901e04c3fSmrg * 99001e04c3fSmrg * If non-zero, this is the size of the output and input sizes are 99101e04c3fSmrg * explicitly given; swizzle and writemask are still in effect, but if 99201e04c3fSmrg * the output component is masked out, then the input component may 99301e04c3fSmrg * still be in use. 99401e04c3fSmrg * 99501e04c3fSmrg * If zero, the opcode acts in the standard, per-component manner; the 99601e04c3fSmrg * operation is performed on each component (except the ones that are 99701e04c3fSmrg * masked out) with the input being taken from the input swizzle for 99801e04c3fSmrg * that component. 99901e04c3fSmrg * 100001e04c3fSmrg * The size of some of the inputs may be given (i.e. non-zero) even 100101e04c3fSmrg * though output_size is zero; in that case, the inputs with a zero 100201e04c3fSmrg * size act per-component, while the inputs with non-zero size don't. 100301e04c3fSmrg */ 100401e04c3fSmrg unsigned output_size; 100501e04c3fSmrg 100601e04c3fSmrg /** 100701e04c3fSmrg * The type of vector that the instruction outputs. Note that the 100801e04c3fSmrg * staurate modifier is only allowed on outputs with the float type. 100901e04c3fSmrg */ 101001e04c3fSmrg 101101e04c3fSmrg nir_alu_type output_type; 101201e04c3fSmrg 101301e04c3fSmrg /** 101401e04c3fSmrg * The number of components in each input 101501e04c3fSmrg */ 101601e04c3fSmrg unsigned input_sizes[NIR_MAX_VEC_COMPONENTS]; 101701e04c3fSmrg 101801e04c3fSmrg /** 101901e04c3fSmrg * The type of vector that each input takes. Note that negate and 102001e04c3fSmrg * absolute value are only allowed on inputs with int or float type and 102101e04c3fSmrg * behave differently on the two. 102201e04c3fSmrg */ 102301e04c3fSmrg nir_alu_type input_types[NIR_MAX_VEC_COMPONENTS]; 102401e04c3fSmrg 102501e04c3fSmrg nir_op_algebraic_property algebraic_properties; 10267e102996Smaya 10277e102996Smaya /* Whether this represents a numeric conversion opcode */ 10287e102996Smaya bool is_conversion; 102901e04c3fSmrg} nir_op_info; 103001e04c3fSmrg 103101e04c3fSmrgextern const nir_op_info nir_op_infos[nir_num_opcodes]; 103201e04c3fSmrg 103301e04c3fSmrgtypedef struct nir_alu_instr { 103401e04c3fSmrg nir_instr instr; 103501e04c3fSmrg nir_op op; 103601e04c3fSmrg 103701e04c3fSmrg /** Indicates that this ALU instruction generates an exact value 103801e04c3fSmrg * 103901e04c3fSmrg * This is kind of a mixture of GLSL "precise" and "invariant" and not 104001e04c3fSmrg * really equivalent to either. This indicates that the value generated by 104101e04c3fSmrg * this operation is high-precision and any code transformations that touch 104201e04c3fSmrg * it must ensure that the resulting value is bit-for-bit identical to the 104301e04c3fSmrg * original. 104401e04c3fSmrg */ 104501e04c3fSmrg bool exact; 104601e04c3fSmrg 104701e04c3fSmrg nir_alu_dest dest; 104801e04c3fSmrg nir_alu_src src[]; 104901e04c3fSmrg} nir_alu_instr; 105001e04c3fSmrg 105101e04c3fSmrgvoid nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src, 105201e04c3fSmrg nir_alu_instr *instr); 105301e04c3fSmrgvoid nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src, 105401e04c3fSmrg nir_alu_instr *instr); 105501e04c3fSmrg 105601e04c3fSmrg/* is this source channel used? */ 105701e04c3fSmrgstatic inline bool 105801e04c3fSmrgnir_alu_instr_channel_used(const nir_alu_instr *instr, unsigned src, 105901e04c3fSmrg unsigned channel) 106001e04c3fSmrg{ 106101e04c3fSmrg if (nir_op_infos[instr->op].input_sizes[src] > 0) 106201e04c3fSmrg return channel < nir_op_infos[instr->op].input_sizes[src]; 106301e04c3fSmrg 106401e04c3fSmrg return (instr->dest.write_mask >> channel) & 1; 106501e04c3fSmrg} 106601e04c3fSmrg 10677e102996Smayastatic inline nir_component_mask_t 10687e102996Smayanir_alu_instr_src_read_mask(const nir_alu_instr *instr, unsigned src) 10697e102996Smaya{ 10707e102996Smaya nir_component_mask_t read_mask = 0; 10717e102996Smaya for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; c++) { 10727e102996Smaya if (!nir_alu_instr_channel_used(instr, src, c)) 10737e102996Smaya continue; 10747e102996Smaya 10757e102996Smaya read_mask |= (1 << instr->src[src].swizzle[c]); 10767e102996Smaya } 10777e102996Smaya return read_mask; 10787e102996Smaya} 10797e102996Smaya 108001e04c3fSmrg/* 108101e04c3fSmrg * For instructions whose destinations are SSA, get the number of channels 108201e04c3fSmrg * used for a source 108301e04c3fSmrg */ 108401e04c3fSmrgstatic inline unsigned 108501e04c3fSmrgnir_ssa_alu_instr_src_components(const nir_alu_instr *instr, unsigned src) 108601e04c3fSmrg{ 108701e04c3fSmrg assert(instr->dest.dest.is_ssa); 108801e04c3fSmrg 108901e04c3fSmrg if (nir_op_infos[instr->op].input_sizes[src] > 0) 109001e04c3fSmrg return nir_op_infos[instr->op].input_sizes[src]; 109101e04c3fSmrg 109201e04c3fSmrg return instr->dest.dest.ssa.num_components; 109301e04c3fSmrg} 109401e04c3fSmrg 10957e102996Smayabool nir_const_value_negative_equal(const nir_const_value *c1, 10967e102996Smaya const nir_const_value *c2, 10977e102996Smaya unsigned components, 10987e102996Smaya nir_alu_type base_type, 10997e102996Smaya unsigned bits); 11007e102996Smaya 110101e04c3fSmrgbool nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2, 110201e04c3fSmrg unsigned src1, unsigned src2); 110301e04c3fSmrg 11047e102996Smayabool nir_alu_srcs_negative_equal(const nir_alu_instr *alu1, 11057e102996Smaya const nir_alu_instr *alu2, 11067e102996Smaya unsigned src1, unsigned src2); 11077e102996Smaya 110801e04c3fSmrgtypedef enum { 110901e04c3fSmrg nir_deref_type_var, 111001e04c3fSmrg nir_deref_type_array, 111101e04c3fSmrg nir_deref_type_array_wildcard, 11127e102996Smaya nir_deref_type_ptr_as_array, 111301e04c3fSmrg nir_deref_type_struct, 111401e04c3fSmrg nir_deref_type_cast, 111501e04c3fSmrg} nir_deref_type; 111601e04c3fSmrg 111701e04c3fSmrgtypedef struct { 111801e04c3fSmrg nir_instr instr; 111901e04c3fSmrg 112001e04c3fSmrg /** The type of this deref instruction */ 112101e04c3fSmrg nir_deref_type deref_type; 112201e04c3fSmrg 112301e04c3fSmrg /** The mode of the underlying variable */ 112401e04c3fSmrg nir_variable_mode mode; 112501e04c3fSmrg 112601e04c3fSmrg /** The dereferenced type of the resulting pointer value */ 112701e04c3fSmrg const struct glsl_type *type; 112801e04c3fSmrg 112901e04c3fSmrg union { 113001e04c3fSmrg /** Variable being dereferenced if deref_type is a deref_var */ 113101e04c3fSmrg nir_variable *var; 113201e04c3fSmrg 113301e04c3fSmrg /** Parent deref if deref_type is not deref_var */ 113401e04c3fSmrg nir_src parent; 113501e04c3fSmrg }; 113601e04c3fSmrg 113701e04c3fSmrg /** Additional deref parameters */ 113801e04c3fSmrg union { 113901e04c3fSmrg struct { 114001e04c3fSmrg nir_src index; 114101e04c3fSmrg } arr; 114201e04c3fSmrg 114301e04c3fSmrg struct { 114401e04c3fSmrg unsigned index; 114501e04c3fSmrg } strct; 11467e102996Smaya 11477e102996Smaya struct { 11487e102996Smaya unsigned ptr_stride; 11497e102996Smaya } cast; 115001e04c3fSmrg }; 115101e04c3fSmrg 115201e04c3fSmrg /** Destination to store the resulting "pointer" */ 115301e04c3fSmrg nir_dest dest; 115401e04c3fSmrg} nir_deref_instr; 115501e04c3fSmrg 11567e102996Smayastatic inline nir_deref_instr *nir_src_as_deref(nir_src src); 115701e04c3fSmrg 115801e04c3fSmrgstatic inline nir_deref_instr * 115901e04c3fSmrgnir_deref_instr_parent(const nir_deref_instr *instr) 116001e04c3fSmrg{ 116101e04c3fSmrg if (instr->deref_type == nir_deref_type_var) 116201e04c3fSmrg return NULL; 116301e04c3fSmrg else 116401e04c3fSmrg return nir_src_as_deref(instr->parent); 116501e04c3fSmrg} 116601e04c3fSmrg 116701e04c3fSmrgstatic inline nir_variable * 116801e04c3fSmrgnir_deref_instr_get_variable(const nir_deref_instr *instr) 116901e04c3fSmrg{ 117001e04c3fSmrg while (instr->deref_type != nir_deref_type_var) { 117101e04c3fSmrg if (instr->deref_type == nir_deref_type_cast) 117201e04c3fSmrg return NULL; 117301e04c3fSmrg 117401e04c3fSmrg instr = nir_deref_instr_parent(instr); 117501e04c3fSmrg } 117601e04c3fSmrg 117701e04c3fSmrg return instr->var; 117801e04c3fSmrg} 117901e04c3fSmrg 118001e04c3fSmrgbool nir_deref_instr_has_indirect(nir_deref_instr *instr); 118101e04c3fSmrg 118201e04c3fSmrgbool nir_deref_instr_remove_if_unused(nir_deref_instr *instr); 118301e04c3fSmrg 11847e102996Smayaunsigned nir_deref_instr_ptr_as_array_stride(nir_deref_instr *instr); 11857e102996Smaya 118601e04c3fSmrgtypedef struct { 118701e04c3fSmrg nir_instr instr; 118801e04c3fSmrg 118901e04c3fSmrg struct nir_function *callee; 119001e04c3fSmrg 119101e04c3fSmrg unsigned num_params; 119201e04c3fSmrg nir_src params[]; 119301e04c3fSmrg} nir_call_instr; 119401e04c3fSmrg 119501e04c3fSmrg#include "nir_intrinsics.h" 119601e04c3fSmrg 119701e04c3fSmrg#define NIR_INTRINSIC_MAX_CONST_INDEX 4 119801e04c3fSmrg 119901e04c3fSmrg/** Represents an intrinsic 120001e04c3fSmrg * 120101e04c3fSmrg * An intrinsic is an instruction type for handling things that are 120201e04c3fSmrg * more-or-less regular operations but don't just consume and produce SSA 120301e04c3fSmrg * values like ALU operations do. Intrinsics are not for things that have 120401e04c3fSmrg * special semantic meaning such as phi nodes and parallel copies. 120501e04c3fSmrg * Examples of intrinsics include variable load/store operations, system 120601e04c3fSmrg * value loads, and the like. Even though texturing more-or-less falls 120701e04c3fSmrg * under this category, texturing is its own instruction type because 120801e04c3fSmrg * trying to represent texturing with intrinsics would lead to a 120901e04c3fSmrg * combinatorial explosion of intrinsic opcodes. 121001e04c3fSmrg * 121101e04c3fSmrg * By having a single instruction type for handling a lot of different 121201e04c3fSmrg * cases, optimization passes can look for intrinsics and, for the most 121301e04c3fSmrg * part, completely ignore them. Each intrinsic type also has a few 121401e04c3fSmrg * possible flags that govern whether or not they can be reordered or 121501e04c3fSmrg * eliminated. That way passes like dead code elimination can still work 121601e04c3fSmrg * on intrisics without understanding the meaning of each. 121701e04c3fSmrg * 121801e04c3fSmrg * Each intrinsic has some number of constant indices, some number of 121901e04c3fSmrg * variables, and some number of sources. What these sources, variables, 122001e04c3fSmrg * and indices mean depends on the intrinsic and is documented with the 122101e04c3fSmrg * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture 122201e04c3fSmrg * instructions are the only types of instruction that can operate on 122301e04c3fSmrg * variables. 122401e04c3fSmrg */ 122501e04c3fSmrgtypedef struct { 122601e04c3fSmrg nir_instr instr; 122701e04c3fSmrg 122801e04c3fSmrg nir_intrinsic_op intrinsic; 122901e04c3fSmrg 123001e04c3fSmrg nir_dest dest; 123101e04c3fSmrg 123201e04c3fSmrg /** number of components if this is a vectorized intrinsic 123301e04c3fSmrg * 123401e04c3fSmrg * Similarly to ALU operations, some intrinsics are vectorized. 123501e04c3fSmrg * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0. 123601e04c3fSmrg * For vectorized intrinsics, the num_components field specifies the 123701e04c3fSmrg * number of destination components and the number of source components 123801e04c3fSmrg * for all sources with nir_intrinsic_infos.src_components[i] == 0. 123901e04c3fSmrg */ 124001e04c3fSmrg uint8_t num_components; 124101e04c3fSmrg 124201e04c3fSmrg int const_index[NIR_INTRINSIC_MAX_CONST_INDEX]; 124301e04c3fSmrg 124401e04c3fSmrg nir_src src[]; 124501e04c3fSmrg} nir_intrinsic_instr; 124601e04c3fSmrg 124701e04c3fSmrgstatic inline nir_variable * 124801e04c3fSmrgnir_intrinsic_get_var(nir_intrinsic_instr *intrin, unsigned i) 124901e04c3fSmrg{ 125001e04c3fSmrg return nir_deref_instr_get_variable(nir_src_as_deref(intrin->src[i])); 125101e04c3fSmrg} 125201e04c3fSmrg 125301e04c3fSmrg/** 125401e04c3fSmrg * \name NIR intrinsics semantic flags 125501e04c3fSmrg * 125601e04c3fSmrg * information about what the compiler can do with the intrinsics. 125701e04c3fSmrg * 125801e04c3fSmrg * \sa nir_intrinsic_info::flags 125901e04c3fSmrg */ 126001e04c3fSmrgtypedef enum { 126101e04c3fSmrg /** 126201e04c3fSmrg * whether the intrinsic can be safely eliminated if none of its output 126301e04c3fSmrg * value is not being used. 126401e04c3fSmrg */ 126501e04c3fSmrg NIR_INTRINSIC_CAN_ELIMINATE = (1 << 0), 126601e04c3fSmrg 126701e04c3fSmrg /** 126801e04c3fSmrg * Whether the intrinsic can be reordered with respect to any other 126901e04c3fSmrg * intrinsic, i.e. whether the only reordering dependencies of the 127001e04c3fSmrg * intrinsic are due to the register reads/writes. 127101e04c3fSmrg */ 127201e04c3fSmrg NIR_INTRINSIC_CAN_REORDER = (1 << 1), 127301e04c3fSmrg} nir_intrinsic_semantic_flag; 127401e04c3fSmrg 127501e04c3fSmrg/** 127601e04c3fSmrg * \name NIR intrinsics const-index flag 127701e04c3fSmrg * 127801e04c3fSmrg * Indicates the usage of a const_index slot. 127901e04c3fSmrg * 128001e04c3fSmrg * \sa nir_intrinsic_info::index_map 128101e04c3fSmrg */ 128201e04c3fSmrgtypedef enum { 128301e04c3fSmrg /** 128401e04c3fSmrg * Generally instructions that take a offset src argument, can encode 128501e04c3fSmrg * a constant 'base' value which is added to the offset. 128601e04c3fSmrg */ 128701e04c3fSmrg NIR_INTRINSIC_BASE = 1, 128801e04c3fSmrg 128901e04c3fSmrg /** 129001e04c3fSmrg * For store instructions, a writemask for the store. 129101e04c3fSmrg */ 129201e04c3fSmrg NIR_INTRINSIC_WRMASK = 2, 129301e04c3fSmrg 129401e04c3fSmrg /** 129501e04c3fSmrg * The stream-id for GS emit_vertex/end_primitive intrinsics. 129601e04c3fSmrg */ 129701e04c3fSmrg NIR_INTRINSIC_STREAM_ID = 3, 129801e04c3fSmrg 129901e04c3fSmrg /** 130001e04c3fSmrg * The clip-plane id for load_user_clip_plane intrinsic. 130101e04c3fSmrg */ 130201e04c3fSmrg NIR_INTRINSIC_UCP_ID = 4, 130301e04c3fSmrg 130401e04c3fSmrg /** 130501e04c3fSmrg * The amount of data, starting from BASE, that this instruction may 130601e04c3fSmrg * access. This is used to provide bounds if the offset is not constant. 130701e04c3fSmrg */ 130801e04c3fSmrg NIR_INTRINSIC_RANGE = 5, 130901e04c3fSmrg 131001e04c3fSmrg /** 131101e04c3fSmrg * The Vulkan descriptor set for vulkan_resource_index intrinsic. 131201e04c3fSmrg */ 131301e04c3fSmrg NIR_INTRINSIC_DESC_SET = 6, 131401e04c3fSmrg 131501e04c3fSmrg /** 131601e04c3fSmrg * The Vulkan descriptor set binding for vulkan_resource_index intrinsic. 131701e04c3fSmrg */ 131801e04c3fSmrg NIR_INTRINSIC_BINDING = 7, 131901e04c3fSmrg 132001e04c3fSmrg /** 132101e04c3fSmrg * Component offset. 132201e04c3fSmrg */ 132301e04c3fSmrg NIR_INTRINSIC_COMPONENT = 8, 132401e04c3fSmrg 132501e04c3fSmrg /** 132601e04c3fSmrg * Interpolation mode (only meaningful for FS inputs). 132701e04c3fSmrg */ 132801e04c3fSmrg NIR_INTRINSIC_INTERP_MODE = 9, 132901e04c3fSmrg 133001e04c3fSmrg /** 133101e04c3fSmrg * A binary nir_op to use when performing a reduction or scan operation 133201e04c3fSmrg */ 133301e04c3fSmrg NIR_INTRINSIC_REDUCTION_OP = 10, 133401e04c3fSmrg 133501e04c3fSmrg /** 133601e04c3fSmrg * Cluster size for reduction operations 133701e04c3fSmrg */ 133801e04c3fSmrg NIR_INTRINSIC_CLUSTER_SIZE = 11, 133901e04c3fSmrg 134001e04c3fSmrg /** 134101e04c3fSmrg * Parameter index for a load_param intrinsic 134201e04c3fSmrg */ 134301e04c3fSmrg NIR_INTRINSIC_PARAM_IDX = 12, 134401e04c3fSmrg 134501e04c3fSmrg /** 134601e04c3fSmrg * Image dimensionality for image intrinsics 134701e04c3fSmrg * 134801e04c3fSmrg * One of GLSL_SAMPLER_DIM_* 134901e04c3fSmrg */ 135001e04c3fSmrg NIR_INTRINSIC_IMAGE_DIM = 13, 135101e04c3fSmrg 135201e04c3fSmrg /** 135301e04c3fSmrg * Non-zero if we are accessing an array image 135401e04c3fSmrg */ 135501e04c3fSmrg NIR_INTRINSIC_IMAGE_ARRAY = 14, 135601e04c3fSmrg 135701e04c3fSmrg /** 135801e04c3fSmrg * Image format for image intrinsics 135901e04c3fSmrg */ 136001e04c3fSmrg NIR_INTRINSIC_FORMAT = 15, 136101e04c3fSmrg 136201e04c3fSmrg /** 13637e102996Smaya * Access qualifiers for image and memory access intrinsics 136401e04c3fSmrg */ 136501e04c3fSmrg NIR_INTRINSIC_ACCESS = 16, 136601e04c3fSmrg 13677e102996Smaya /** 13687e102996Smaya * Alignment for offsets and addresses 13697e102996Smaya * 13707e102996Smaya * These two parameters, specify an alignment in terms of a multiplier and 13717e102996Smaya * an offset. The offset or address parameter X of the intrinsic is 13727e102996Smaya * guaranteed to satisfy the following: 13737e102996Smaya * 13747e102996Smaya * (X - align_offset) % align_mul == 0 13757e102996Smaya */ 13767e102996Smaya NIR_INTRINSIC_ALIGN_MUL = 17, 13777e102996Smaya NIR_INTRINSIC_ALIGN_OFFSET = 18, 13787e102996Smaya 13797e102996Smaya /** 13807e102996Smaya * The Vulkan descriptor type for a vulkan_resource_[re]index intrinsic. 13817e102996Smaya */ 13827e102996Smaya NIR_INTRINSIC_DESC_TYPE = 19, 13837e102996Smaya 13847e102996Smaya /* Separate source/dest access flags for copies */ 13857e102996Smaya NIR_INTRINSIC_SRC_ACCESS, 13867e102996Smaya NIR_INTRINSIC_DST_ACCESS, 13877e102996Smaya 138801e04c3fSmrg NIR_INTRINSIC_NUM_INDEX_FLAGS, 138901e04c3fSmrg 139001e04c3fSmrg} nir_intrinsic_index_flag; 139101e04c3fSmrg 139201e04c3fSmrg#define NIR_INTRINSIC_MAX_INPUTS 5 139301e04c3fSmrg 139401e04c3fSmrgtypedef struct { 139501e04c3fSmrg const char *name; 139601e04c3fSmrg 139701e04c3fSmrg unsigned num_srcs; /** < number of register/SSA inputs */ 139801e04c3fSmrg 139901e04c3fSmrg /** number of components of each input register 140001e04c3fSmrg * 140101e04c3fSmrg * If this value is 0, the number of components is given by the 14027e102996Smaya * num_components field of nir_intrinsic_instr. If this value is -1, the 14037e102996Smaya * intrinsic consumes however many components are provided and it is not 14047e102996Smaya * validated at all. 140501e04c3fSmrg */ 14067e102996Smaya int src_components[NIR_INTRINSIC_MAX_INPUTS]; 140701e04c3fSmrg 140801e04c3fSmrg bool has_dest; 140901e04c3fSmrg 141001e04c3fSmrg /** number of components of the output register 141101e04c3fSmrg * 141201e04c3fSmrg * If this value is 0, the number of components is given by the 141301e04c3fSmrg * num_components field of nir_intrinsic_instr. 141401e04c3fSmrg */ 141501e04c3fSmrg unsigned dest_components; 141601e04c3fSmrg 14177e102996Smaya /** bitfield of legal bit sizes */ 14187e102996Smaya unsigned dest_bit_sizes; 14197e102996Smaya 142001e04c3fSmrg /** the number of constant indices used by the intrinsic */ 142101e04c3fSmrg unsigned num_indices; 142201e04c3fSmrg 142301e04c3fSmrg /** indicates the usage of intr->const_index[n] */ 142401e04c3fSmrg unsigned index_map[NIR_INTRINSIC_NUM_INDEX_FLAGS]; 142501e04c3fSmrg 142601e04c3fSmrg /** semantic flags for calls to this intrinsic */ 142701e04c3fSmrg nir_intrinsic_semantic_flag flags; 142801e04c3fSmrg} nir_intrinsic_info; 142901e04c3fSmrg 143001e04c3fSmrgextern const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics]; 143101e04c3fSmrg 143201e04c3fSmrgstatic inline unsigned 143301e04c3fSmrgnir_intrinsic_src_components(nir_intrinsic_instr *intr, unsigned srcn) 143401e04c3fSmrg{ 143501e04c3fSmrg const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic]; 143601e04c3fSmrg assert(srcn < info->num_srcs); 14377e102996Smaya if (info->src_components[srcn] > 0) 143801e04c3fSmrg return info->src_components[srcn]; 14397e102996Smaya else if (info->src_components[srcn] == 0) 144001e04c3fSmrg return intr->num_components; 14417e102996Smaya else 14427e102996Smaya return nir_src_num_components(intr->src[srcn]); 144301e04c3fSmrg} 144401e04c3fSmrg 144501e04c3fSmrgstatic inline unsigned 144601e04c3fSmrgnir_intrinsic_dest_components(nir_intrinsic_instr *intr) 144701e04c3fSmrg{ 144801e04c3fSmrg const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic]; 144901e04c3fSmrg if (!info->has_dest) 145001e04c3fSmrg return 0; 145101e04c3fSmrg else if (info->dest_components) 145201e04c3fSmrg return info->dest_components; 145301e04c3fSmrg else 145401e04c3fSmrg return intr->num_components; 145501e04c3fSmrg} 145601e04c3fSmrg 145701e04c3fSmrg#define INTRINSIC_IDX_ACCESSORS(name, flag, type) \ 145801e04c3fSmrgstatic inline type \ 145901e04c3fSmrgnir_intrinsic_##name(const nir_intrinsic_instr *instr) \ 146001e04c3fSmrg{ \ 146101e04c3fSmrg const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \ 146201e04c3fSmrg assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \ 146301e04c3fSmrg return (type)instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \ 146401e04c3fSmrg} \ 146501e04c3fSmrgstatic inline void \ 146601e04c3fSmrgnir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \ 146701e04c3fSmrg{ \ 146801e04c3fSmrg const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \ 146901e04c3fSmrg assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \ 147001e04c3fSmrg instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \ 147101e04c3fSmrg} 147201e04c3fSmrg 147301e04c3fSmrgINTRINSIC_IDX_ACCESSORS(write_mask, WRMASK, unsigned) 147401e04c3fSmrgINTRINSIC_IDX_ACCESSORS(base, BASE, int) 147501e04c3fSmrgINTRINSIC_IDX_ACCESSORS(stream_id, STREAM_ID, unsigned) 147601e04c3fSmrgINTRINSIC_IDX_ACCESSORS(ucp_id, UCP_ID, unsigned) 147701e04c3fSmrgINTRINSIC_IDX_ACCESSORS(range, RANGE, unsigned) 147801e04c3fSmrgINTRINSIC_IDX_ACCESSORS(desc_set, DESC_SET, unsigned) 147901e04c3fSmrgINTRINSIC_IDX_ACCESSORS(binding, BINDING, unsigned) 148001e04c3fSmrgINTRINSIC_IDX_ACCESSORS(component, COMPONENT, unsigned) 148101e04c3fSmrgINTRINSIC_IDX_ACCESSORS(interp_mode, INTERP_MODE, unsigned) 148201e04c3fSmrgINTRINSIC_IDX_ACCESSORS(reduction_op, REDUCTION_OP, unsigned) 148301e04c3fSmrgINTRINSIC_IDX_ACCESSORS(cluster_size, CLUSTER_SIZE, unsigned) 148401e04c3fSmrgINTRINSIC_IDX_ACCESSORS(param_idx, PARAM_IDX, unsigned) 148501e04c3fSmrgINTRINSIC_IDX_ACCESSORS(image_dim, IMAGE_DIM, enum glsl_sampler_dim) 148601e04c3fSmrgINTRINSIC_IDX_ACCESSORS(image_array, IMAGE_ARRAY, bool) 148701e04c3fSmrgINTRINSIC_IDX_ACCESSORS(access, ACCESS, enum gl_access_qualifier) 14887e102996SmayaINTRINSIC_IDX_ACCESSORS(src_access, SRC_ACCESS, enum gl_access_qualifier) 14897e102996SmayaINTRINSIC_IDX_ACCESSORS(dst_access, DST_ACCESS, enum gl_access_qualifier) 149001e04c3fSmrgINTRINSIC_IDX_ACCESSORS(format, FORMAT, unsigned) 14917e102996SmayaINTRINSIC_IDX_ACCESSORS(align_mul, ALIGN_MUL, unsigned) 14927e102996SmayaINTRINSIC_IDX_ACCESSORS(align_offset, ALIGN_OFFSET, unsigned) 14937e102996SmayaINTRINSIC_IDX_ACCESSORS(desc_type, DESC_TYPE, unsigned) 14947e102996Smaya 14957e102996Smayastatic inline void 14967e102996Smayanir_intrinsic_set_align(nir_intrinsic_instr *intrin, 14977e102996Smaya unsigned align_mul, unsigned align_offset) 14987e102996Smaya{ 14997e102996Smaya assert(util_is_power_of_two_nonzero(align_mul)); 15007e102996Smaya assert(align_offset < align_mul); 15017e102996Smaya nir_intrinsic_set_align_mul(intrin, align_mul); 15027e102996Smaya nir_intrinsic_set_align_offset(intrin, align_offset); 15037e102996Smaya} 15047e102996Smaya 15057e102996Smaya/** Returns a simple alignment for a load/store intrinsic offset 15067e102996Smaya * 15077e102996Smaya * Instead of the full mul+offset alignment scheme provided by the ALIGN_MUL 15087e102996Smaya * and ALIGN_OFFSET parameters, this helper takes both into account and 15097e102996Smaya * provides a single simple alignment parameter. The offset X is guaranteed 15107e102996Smaya * to satisfy X % align == 0. 15117e102996Smaya */ 15127e102996Smayastatic inline unsigned 15137e102996Smayanir_intrinsic_align(const nir_intrinsic_instr *intrin) 15147e102996Smaya{ 15157e102996Smaya const unsigned align_mul = nir_intrinsic_align_mul(intrin); 15167e102996Smaya const unsigned align_offset = nir_intrinsic_align_offset(intrin); 15177e102996Smaya assert(align_offset < align_mul); 15187e102996Smaya return align_offset ? 1 << (ffs(align_offset) - 1) : align_mul; 15197e102996Smaya} 15207e102996Smaya 15217e102996Smaya/* Converts a image_deref_* intrinsic into a image_* one */ 15227e102996Smayavoid nir_rewrite_image_intrinsic(nir_intrinsic_instr *instr, 15237e102996Smaya nir_ssa_def *handle, bool bindless); 15247e102996Smaya 15257e102996Smaya/* Determine if an intrinsic can be arbitrarily reordered and eliminated. */ 15267e102996Smayastatic inline bool 15277e102996Smayanir_intrinsic_can_reorder(nir_intrinsic_instr *instr) 15287e102996Smaya{ 15297e102996Smaya const nir_intrinsic_info *info = 15307e102996Smaya &nir_intrinsic_infos[instr->intrinsic]; 15317e102996Smaya return (info->flags & NIR_INTRINSIC_CAN_ELIMINATE) && 15327e102996Smaya (info->flags & NIR_INTRINSIC_CAN_REORDER); 15337e102996Smaya} 153401e04c3fSmrg 153501e04c3fSmrg/** 153601e04c3fSmrg * \group texture information 153701e04c3fSmrg * 153801e04c3fSmrg * This gives semantic information about textures which is useful to the 153901e04c3fSmrg * frontend, the backend, and lowering passes, but not the optimizer. 154001e04c3fSmrg */ 154101e04c3fSmrg 154201e04c3fSmrgtypedef enum { 154301e04c3fSmrg nir_tex_src_coord, 154401e04c3fSmrg nir_tex_src_projector, 154501e04c3fSmrg nir_tex_src_comparator, /* shadow comparator */ 154601e04c3fSmrg nir_tex_src_offset, 154701e04c3fSmrg nir_tex_src_bias, 154801e04c3fSmrg nir_tex_src_lod, 15497e102996Smaya nir_tex_src_min_lod, 155001e04c3fSmrg nir_tex_src_ms_index, /* MSAA sample index */ 155101e04c3fSmrg nir_tex_src_ms_mcs, /* MSAA compression value */ 155201e04c3fSmrg nir_tex_src_ddx, 155301e04c3fSmrg nir_tex_src_ddy, 155401e04c3fSmrg nir_tex_src_texture_deref, /* < deref pointing to the texture */ 155501e04c3fSmrg nir_tex_src_sampler_deref, /* < deref pointing to the sampler */ 155601e04c3fSmrg nir_tex_src_texture_offset, /* < dynamically uniform indirect offset */ 155701e04c3fSmrg nir_tex_src_sampler_offset, /* < dynamically uniform indirect offset */ 15587e102996Smaya nir_tex_src_texture_handle, /* < bindless texture handle */ 15597e102996Smaya nir_tex_src_sampler_handle, /* < bindless sampler handle */ 156001e04c3fSmrg nir_tex_src_plane, /* < selects plane for planar textures */ 156101e04c3fSmrg nir_num_tex_src_types 156201e04c3fSmrg} nir_tex_src_type; 156301e04c3fSmrg 156401e04c3fSmrgtypedef struct { 156501e04c3fSmrg nir_src src; 156601e04c3fSmrg nir_tex_src_type src_type; 156701e04c3fSmrg} nir_tex_src; 156801e04c3fSmrg 156901e04c3fSmrgtypedef enum { 157001e04c3fSmrg nir_texop_tex, /**< Regular texture look-up */ 157101e04c3fSmrg nir_texop_txb, /**< Texture look-up with LOD bias */ 157201e04c3fSmrg nir_texop_txl, /**< Texture look-up with explicit LOD */ 157301e04c3fSmrg nir_texop_txd, /**< Texture look-up with partial derivatives */ 157401e04c3fSmrg nir_texop_txf, /**< Texel fetch with explicit LOD */ 15757e102996Smaya nir_texop_txf_ms, /**< Multisample texture fetch */ 15767e102996Smaya nir_texop_txf_ms_fb, /**< Multisample texture fetch from framebuffer */ 157701e04c3fSmrg nir_texop_txf_ms_mcs, /**< Multisample compression value fetch */ 157801e04c3fSmrg nir_texop_txs, /**< Texture size */ 157901e04c3fSmrg nir_texop_lod, /**< Texture lod query */ 158001e04c3fSmrg nir_texop_tg4, /**< Texture gather */ 158101e04c3fSmrg nir_texop_query_levels, /**< Texture levels query */ 158201e04c3fSmrg nir_texop_texture_samples, /**< Texture samples query */ 158301e04c3fSmrg nir_texop_samples_identical, /**< Query whether all samples are definitely 158401e04c3fSmrg * identical. 158501e04c3fSmrg */ 158601e04c3fSmrg} nir_texop; 158701e04c3fSmrg 158801e04c3fSmrgtypedef struct { 158901e04c3fSmrg nir_instr instr; 159001e04c3fSmrg 159101e04c3fSmrg enum glsl_sampler_dim sampler_dim; 159201e04c3fSmrg nir_alu_type dest_type; 159301e04c3fSmrg 159401e04c3fSmrg nir_texop op; 159501e04c3fSmrg nir_dest dest; 159601e04c3fSmrg nir_tex_src *src; 159701e04c3fSmrg unsigned num_srcs, coord_components; 159801e04c3fSmrg bool is_array, is_shadow; 159901e04c3fSmrg 160001e04c3fSmrg /** 160101e04c3fSmrg * If is_shadow is true, whether this is the old-style shadow that outputs 4 160201e04c3fSmrg * components or the new-style shadow that outputs 1 component. 160301e04c3fSmrg */ 160401e04c3fSmrg bool is_new_style_shadow; 160501e04c3fSmrg 160601e04c3fSmrg /* gather component selector */ 160701e04c3fSmrg unsigned component : 2; 160801e04c3fSmrg 16097e102996Smaya /* gather offsets */ 16107e102996Smaya int8_t tg4_offsets[4][2]; 16117e102996Smaya 16127e102996Smaya /* True if the texture index or handle is not dynamically uniform */ 16137e102996Smaya bool texture_non_uniform; 16147e102996Smaya 16157e102996Smaya /* True if the sampler index or handle is not dynamically uniform */ 16167e102996Smaya bool sampler_non_uniform; 16177e102996Smaya 161801e04c3fSmrg /** The texture index 161901e04c3fSmrg * 162001e04c3fSmrg * If this texture instruction has a nir_tex_src_texture_offset source, 162101e04c3fSmrg * then the texture index is given by texture_index + texture_offset. 162201e04c3fSmrg */ 162301e04c3fSmrg unsigned texture_index; 162401e04c3fSmrg 162501e04c3fSmrg /** The size of the texture array or 0 if it's not an array */ 162601e04c3fSmrg unsigned texture_array_size; 162701e04c3fSmrg 162801e04c3fSmrg /** The sampler index 162901e04c3fSmrg * 163001e04c3fSmrg * The following operations do not require a sampler and, as such, this 163101e04c3fSmrg * field should be ignored: 163201e04c3fSmrg * - nir_texop_txf 163301e04c3fSmrg * - nir_texop_txf_ms 163401e04c3fSmrg * - nir_texop_txs 163501e04c3fSmrg * - nir_texop_lod 163601e04c3fSmrg * - nir_texop_query_levels 163701e04c3fSmrg * - nir_texop_texture_samples 163801e04c3fSmrg * - nir_texop_samples_identical 163901e04c3fSmrg * 164001e04c3fSmrg * If this texture instruction has a nir_tex_src_sampler_offset source, 164101e04c3fSmrg * then the sampler index is given by sampler_index + sampler_offset. 164201e04c3fSmrg */ 164301e04c3fSmrg unsigned sampler_index; 164401e04c3fSmrg} nir_tex_instr; 164501e04c3fSmrg 164601e04c3fSmrgstatic inline unsigned 164701e04c3fSmrgnir_tex_instr_dest_size(const nir_tex_instr *instr) 164801e04c3fSmrg{ 164901e04c3fSmrg switch (instr->op) { 165001e04c3fSmrg case nir_texop_txs: { 165101e04c3fSmrg unsigned ret; 165201e04c3fSmrg switch (instr->sampler_dim) { 165301e04c3fSmrg case GLSL_SAMPLER_DIM_1D: 165401e04c3fSmrg case GLSL_SAMPLER_DIM_BUF: 165501e04c3fSmrg ret = 1; 165601e04c3fSmrg break; 165701e04c3fSmrg case GLSL_SAMPLER_DIM_2D: 165801e04c3fSmrg case GLSL_SAMPLER_DIM_CUBE: 165901e04c3fSmrg case GLSL_SAMPLER_DIM_MS: 166001e04c3fSmrg case GLSL_SAMPLER_DIM_RECT: 166101e04c3fSmrg case GLSL_SAMPLER_DIM_EXTERNAL: 166201e04c3fSmrg case GLSL_SAMPLER_DIM_SUBPASS: 166301e04c3fSmrg ret = 2; 166401e04c3fSmrg break; 166501e04c3fSmrg case GLSL_SAMPLER_DIM_3D: 166601e04c3fSmrg ret = 3; 166701e04c3fSmrg break; 166801e04c3fSmrg default: 166901e04c3fSmrg unreachable("not reached"); 167001e04c3fSmrg } 167101e04c3fSmrg if (instr->is_array) 167201e04c3fSmrg ret++; 167301e04c3fSmrg return ret; 167401e04c3fSmrg } 167501e04c3fSmrg 167601e04c3fSmrg case nir_texop_lod: 167701e04c3fSmrg return 2; 167801e04c3fSmrg 167901e04c3fSmrg case nir_texop_texture_samples: 168001e04c3fSmrg case nir_texop_query_levels: 168101e04c3fSmrg case nir_texop_samples_identical: 168201e04c3fSmrg return 1; 168301e04c3fSmrg 168401e04c3fSmrg default: 168501e04c3fSmrg if (instr->is_shadow && instr->is_new_style_shadow) 168601e04c3fSmrg return 1; 168701e04c3fSmrg 168801e04c3fSmrg return 4; 168901e04c3fSmrg } 169001e04c3fSmrg} 169101e04c3fSmrg 169201e04c3fSmrg/* Returns true if this texture operation queries something about the texture 169301e04c3fSmrg * rather than actually sampling it. 169401e04c3fSmrg */ 169501e04c3fSmrgstatic inline bool 169601e04c3fSmrgnir_tex_instr_is_query(const nir_tex_instr *instr) 169701e04c3fSmrg{ 169801e04c3fSmrg switch (instr->op) { 169901e04c3fSmrg case nir_texop_txs: 170001e04c3fSmrg case nir_texop_lod: 170101e04c3fSmrg case nir_texop_texture_samples: 170201e04c3fSmrg case nir_texop_query_levels: 170301e04c3fSmrg case nir_texop_txf_ms_mcs: 170401e04c3fSmrg return true; 170501e04c3fSmrg case nir_texop_tex: 170601e04c3fSmrg case nir_texop_txb: 170701e04c3fSmrg case nir_texop_txl: 170801e04c3fSmrg case nir_texop_txd: 170901e04c3fSmrg case nir_texop_txf: 171001e04c3fSmrg case nir_texop_txf_ms: 17117e102996Smaya case nir_texop_txf_ms_fb: 171201e04c3fSmrg case nir_texop_tg4: 171301e04c3fSmrg return false; 171401e04c3fSmrg default: 171501e04c3fSmrg unreachable("Invalid texture opcode"); 171601e04c3fSmrg } 171701e04c3fSmrg} 171801e04c3fSmrg 171901e04c3fSmrgstatic inline bool 172001e04c3fSmrgnir_alu_instr_is_comparison(const nir_alu_instr *instr) 172101e04c3fSmrg{ 172201e04c3fSmrg switch (instr->op) { 172301e04c3fSmrg case nir_op_flt: 172401e04c3fSmrg case nir_op_fge: 172501e04c3fSmrg case nir_op_feq: 172601e04c3fSmrg case nir_op_fne: 172701e04c3fSmrg case nir_op_ilt: 172801e04c3fSmrg case nir_op_ult: 172901e04c3fSmrg case nir_op_ige: 173001e04c3fSmrg case nir_op_uge: 173101e04c3fSmrg case nir_op_ieq: 173201e04c3fSmrg case nir_op_ine: 17337e102996Smaya case nir_op_i2b1: 17347e102996Smaya case nir_op_f2b1: 173501e04c3fSmrg case nir_op_inot: 173601e04c3fSmrg case nir_op_fnot: 173701e04c3fSmrg return true; 173801e04c3fSmrg default: 173901e04c3fSmrg return false; 174001e04c3fSmrg } 174101e04c3fSmrg} 174201e04c3fSmrg 174301e04c3fSmrgstatic inline nir_alu_type 174401e04c3fSmrgnir_tex_instr_src_type(const nir_tex_instr *instr, unsigned src) 174501e04c3fSmrg{ 174601e04c3fSmrg switch (instr->src[src].src_type) { 174701e04c3fSmrg case nir_tex_src_coord: 174801e04c3fSmrg switch (instr->op) { 174901e04c3fSmrg case nir_texop_txf: 175001e04c3fSmrg case nir_texop_txf_ms: 17517e102996Smaya case nir_texop_txf_ms_fb: 175201e04c3fSmrg case nir_texop_txf_ms_mcs: 175301e04c3fSmrg case nir_texop_samples_identical: 175401e04c3fSmrg return nir_type_int; 175501e04c3fSmrg 175601e04c3fSmrg default: 175701e04c3fSmrg return nir_type_float; 175801e04c3fSmrg } 175901e04c3fSmrg 176001e04c3fSmrg case nir_tex_src_lod: 176101e04c3fSmrg switch (instr->op) { 176201e04c3fSmrg case nir_texop_txs: 176301e04c3fSmrg case nir_texop_txf: 176401e04c3fSmrg return nir_type_int; 176501e04c3fSmrg 176601e04c3fSmrg default: 176701e04c3fSmrg return nir_type_float; 176801e04c3fSmrg } 176901e04c3fSmrg 177001e04c3fSmrg case nir_tex_src_projector: 177101e04c3fSmrg case nir_tex_src_comparator: 177201e04c3fSmrg case nir_tex_src_bias: 177301e04c3fSmrg case nir_tex_src_ddx: 177401e04c3fSmrg case nir_tex_src_ddy: 177501e04c3fSmrg return nir_type_float; 177601e04c3fSmrg 177701e04c3fSmrg case nir_tex_src_offset: 177801e04c3fSmrg case nir_tex_src_ms_index: 177901e04c3fSmrg case nir_tex_src_texture_offset: 178001e04c3fSmrg case nir_tex_src_sampler_offset: 178101e04c3fSmrg return nir_type_int; 178201e04c3fSmrg 178301e04c3fSmrg default: 178401e04c3fSmrg unreachable("Invalid texture source type"); 178501e04c3fSmrg } 178601e04c3fSmrg} 178701e04c3fSmrg 178801e04c3fSmrgstatic inline unsigned 178901e04c3fSmrgnir_tex_instr_src_size(const nir_tex_instr *instr, unsigned src) 179001e04c3fSmrg{ 179101e04c3fSmrg if (instr->src[src].src_type == nir_tex_src_coord) 179201e04c3fSmrg return instr->coord_components; 179301e04c3fSmrg 179401e04c3fSmrg /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs */ 179501e04c3fSmrg if (instr->src[src].src_type == nir_tex_src_ms_mcs) 179601e04c3fSmrg return 4; 179701e04c3fSmrg 179801e04c3fSmrg if (instr->src[src].src_type == nir_tex_src_ddx || 179901e04c3fSmrg instr->src[src].src_type == nir_tex_src_ddy) { 180001e04c3fSmrg if (instr->is_array) 180101e04c3fSmrg return instr->coord_components - 1; 180201e04c3fSmrg else 180301e04c3fSmrg return instr->coord_components; 180401e04c3fSmrg } 180501e04c3fSmrg 180601e04c3fSmrg /* Usual APIs don't allow cube + offset, but we allow it, with 2 coords for 180701e04c3fSmrg * the offset, since a cube maps to a single face. 180801e04c3fSmrg */ 180901e04c3fSmrg if (instr->src[src].src_type == nir_tex_src_offset) { 181001e04c3fSmrg if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) 181101e04c3fSmrg return 2; 181201e04c3fSmrg else if (instr->is_array) 181301e04c3fSmrg return instr->coord_components - 1; 181401e04c3fSmrg else 181501e04c3fSmrg return instr->coord_components; 181601e04c3fSmrg } 181701e04c3fSmrg 181801e04c3fSmrg return 1; 181901e04c3fSmrg} 182001e04c3fSmrg 182101e04c3fSmrgstatic inline int 182201e04c3fSmrgnir_tex_instr_src_index(const nir_tex_instr *instr, nir_tex_src_type type) 182301e04c3fSmrg{ 182401e04c3fSmrg for (unsigned i = 0; i < instr->num_srcs; i++) 182501e04c3fSmrg if (instr->src[i].src_type == type) 182601e04c3fSmrg return (int) i; 182701e04c3fSmrg 182801e04c3fSmrg return -1; 182901e04c3fSmrg} 183001e04c3fSmrg 183101e04c3fSmrgvoid nir_tex_instr_add_src(nir_tex_instr *tex, 183201e04c3fSmrg nir_tex_src_type src_type, 183301e04c3fSmrg nir_src src); 183401e04c3fSmrg 183501e04c3fSmrgvoid nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx); 183601e04c3fSmrg 18377e102996Smayabool nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr *tex); 18387e102996Smaya 183901e04c3fSmrgtypedef struct { 184001e04c3fSmrg nir_instr instr; 184101e04c3fSmrg 184201e04c3fSmrg nir_ssa_def def; 18437e102996Smaya 18447e102996Smaya nir_const_value value[]; 184501e04c3fSmrg} nir_load_const_instr; 184601e04c3fSmrg 18477e102996Smaya#define nir_const_load_to_arr(arr, l, m) \ 18487e102996Smaya{ \ 18497e102996Smaya nir_const_value_to_array(arr, l->value, l->def.num_components, m); \ 18507e102996Smaya} while (false); 18517e102996Smaya 185201e04c3fSmrgtypedef enum { 185301e04c3fSmrg nir_jump_return, 185401e04c3fSmrg nir_jump_break, 185501e04c3fSmrg nir_jump_continue, 185601e04c3fSmrg} nir_jump_type; 185701e04c3fSmrg 185801e04c3fSmrgtypedef struct { 185901e04c3fSmrg nir_instr instr; 186001e04c3fSmrg nir_jump_type type; 186101e04c3fSmrg} nir_jump_instr; 186201e04c3fSmrg 186301e04c3fSmrg/* creates a new SSA variable in an undefined state */ 186401e04c3fSmrg 186501e04c3fSmrgtypedef struct { 186601e04c3fSmrg nir_instr instr; 186701e04c3fSmrg nir_ssa_def def; 186801e04c3fSmrg} nir_ssa_undef_instr; 186901e04c3fSmrg 187001e04c3fSmrgtypedef struct { 187101e04c3fSmrg struct exec_node node; 187201e04c3fSmrg 187301e04c3fSmrg /* The predecessor block corresponding to this source */ 187401e04c3fSmrg struct nir_block *pred; 187501e04c3fSmrg 187601e04c3fSmrg nir_src src; 187701e04c3fSmrg} nir_phi_src; 187801e04c3fSmrg 187901e04c3fSmrg#define nir_foreach_phi_src(phi_src, phi) \ 188001e04c3fSmrg foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs) 188101e04c3fSmrg#define nir_foreach_phi_src_safe(phi_src, phi) \ 188201e04c3fSmrg foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs) 188301e04c3fSmrg 188401e04c3fSmrgtypedef struct { 188501e04c3fSmrg nir_instr instr; 188601e04c3fSmrg 188701e04c3fSmrg struct exec_list srcs; /** < list of nir_phi_src */ 188801e04c3fSmrg 188901e04c3fSmrg nir_dest dest; 189001e04c3fSmrg} nir_phi_instr; 189101e04c3fSmrg 189201e04c3fSmrgtypedef struct { 189301e04c3fSmrg struct exec_node node; 189401e04c3fSmrg nir_src src; 189501e04c3fSmrg nir_dest dest; 189601e04c3fSmrg} nir_parallel_copy_entry; 189701e04c3fSmrg 189801e04c3fSmrg#define nir_foreach_parallel_copy_entry(entry, pcopy) \ 189901e04c3fSmrg foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries) 190001e04c3fSmrg 190101e04c3fSmrgtypedef struct { 190201e04c3fSmrg nir_instr instr; 190301e04c3fSmrg 190401e04c3fSmrg /* A list of nir_parallel_copy_entrys. The sources of all of the 190501e04c3fSmrg * entries are copied to the corresponding destinations "in parallel". 190601e04c3fSmrg * In other words, if we have two entries: a -> b and b -> a, the values 190701e04c3fSmrg * get swapped. 190801e04c3fSmrg */ 190901e04c3fSmrg struct exec_list entries; 191001e04c3fSmrg} nir_parallel_copy_instr; 191101e04c3fSmrg 191201e04c3fSmrgNIR_DEFINE_CAST(nir_instr_as_alu, nir_instr, nir_alu_instr, instr, 191301e04c3fSmrg type, nir_instr_type_alu) 19147e102996SmayaNIR_DEFINE_CAST(nir_instr_as_deref, nir_instr, nir_deref_instr, instr, 19157e102996Smaya type, nir_instr_type_deref) 191601e04c3fSmrgNIR_DEFINE_CAST(nir_instr_as_call, nir_instr, nir_call_instr, instr, 191701e04c3fSmrg type, nir_instr_type_call) 191801e04c3fSmrgNIR_DEFINE_CAST(nir_instr_as_jump, nir_instr, nir_jump_instr, instr, 191901e04c3fSmrg type, nir_instr_type_jump) 192001e04c3fSmrgNIR_DEFINE_CAST(nir_instr_as_tex, nir_instr, nir_tex_instr, instr, 192101e04c3fSmrg type, nir_instr_type_tex) 192201e04c3fSmrgNIR_DEFINE_CAST(nir_instr_as_intrinsic, nir_instr, nir_intrinsic_instr, instr, 192301e04c3fSmrg type, nir_instr_type_intrinsic) 192401e04c3fSmrgNIR_DEFINE_CAST(nir_instr_as_load_const, nir_instr, nir_load_const_instr, instr, 192501e04c3fSmrg type, nir_instr_type_load_const) 192601e04c3fSmrgNIR_DEFINE_CAST(nir_instr_as_ssa_undef, nir_instr, nir_ssa_undef_instr, instr, 192701e04c3fSmrg type, nir_instr_type_ssa_undef) 192801e04c3fSmrgNIR_DEFINE_CAST(nir_instr_as_phi, nir_instr, nir_phi_instr, instr, 192901e04c3fSmrg type, nir_instr_type_phi) 193001e04c3fSmrgNIR_DEFINE_CAST(nir_instr_as_parallel_copy, nir_instr, 193101e04c3fSmrg nir_parallel_copy_instr, instr, 193201e04c3fSmrg type, nir_instr_type_parallel_copy) 193301e04c3fSmrg 19347e102996Smayatypedef struct { 19357e102996Smaya nir_ssa_def *def; 19367e102996Smaya unsigned comp; 19377e102996Smaya} nir_ssa_scalar; 19387e102996Smaya 19397e102996Smayastatic inline bool 19407e102996Smayanir_ssa_scalar_is_const(nir_ssa_scalar s) 19417e102996Smaya{ 19427e102996Smaya return s.def->parent_instr->type == nir_instr_type_load_const; 19437e102996Smaya} 19447e102996Smaya 19457e102996Smayastatic inline nir_const_value 19467e102996Smayanir_ssa_scalar_as_const_value(nir_ssa_scalar s) 19477e102996Smaya{ 19487e102996Smaya assert(s.comp < s.def->num_components); 19497e102996Smaya nir_load_const_instr *load = nir_instr_as_load_const(s.def->parent_instr); 19507e102996Smaya return load->value[s.comp]; 19517e102996Smaya} 19527e102996Smaya 19537e102996Smaya#define NIR_DEFINE_SCALAR_AS_CONST(type, suffix) \ 19547e102996Smayastatic inline type \ 19557e102996Smayanir_ssa_scalar_as_##suffix(nir_ssa_scalar s) \ 19567e102996Smaya{ \ 19577e102996Smaya return nir_const_value_as_##suffix( \ 19587e102996Smaya nir_ssa_scalar_as_const_value(s), s.def->bit_size); \ 19597e102996Smaya} 19607e102996Smaya 19617e102996SmayaNIR_DEFINE_SCALAR_AS_CONST(int64_t, int) 19627e102996SmayaNIR_DEFINE_SCALAR_AS_CONST(uint64_t, uint) 19637e102996SmayaNIR_DEFINE_SCALAR_AS_CONST(bool, bool) 19647e102996SmayaNIR_DEFINE_SCALAR_AS_CONST(double, float) 19657e102996Smaya 19667e102996Smaya#undef NIR_DEFINE_SCALAR_AS_CONST 19677e102996Smaya 19687e102996Smayastatic inline bool 19697e102996Smayanir_ssa_scalar_is_alu(nir_ssa_scalar s) 19707e102996Smaya{ 19717e102996Smaya return s.def->parent_instr->type == nir_instr_type_alu; 19727e102996Smaya} 19737e102996Smaya 19747e102996Smayastatic inline nir_op 19757e102996Smayanir_ssa_scalar_alu_op(nir_ssa_scalar s) 19767e102996Smaya{ 19777e102996Smaya return nir_instr_as_alu(s.def->parent_instr)->op; 19787e102996Smaya} 19797e102996Smaya 19807e102996Smayastatic inline nir_ssa_scalar 19817e102996Smayanir_ssa_scalar_chase_alu_src(nir_ssa_scalar s, unsigned alu_src_idx) 19827e102996Smaya{ 19837e102996Smaya nir_ssa_scalar out = { NULL, 0 }; 19847e102996Smaya 19857e102996Smaya nir_alu_instr *alu = nir_instr_as_alu(s.def->parent_instr); 19867e102996Smaya assert(alu_src_idx < nir_op_infos[alu->op].num_inputs); 19877e102996Smaya 19887e102996Smaya /* Our component must be written */ 19897e102996Smaya assert(s.comp < s.def->num_components); 19907e102996Smaya assert(alu->dest.write_mask & (1u << s.comp)); 19917e102996Smaya 19927e102996Smaya assert(alu->src[alu_src_idx].src.is_ssa); 19937e102996Smaya out.def = alu->src[alu_src_idx].src.ssa; 19947e102996Smaya 19957e102996Smaya if (nir_op_infos[alu->op].input_sizes[alu_src_idx] == 0) { 19967e102996Smaya /* The ALU src is unsized so the source component follows the 19977e102996Smaya * destination component. 19987e102996Smaya */ 19997e102996Smaya out.comp = alu->src[alu_src_idx].swizzle[s.comp]; 20007e102996Smaya } else { 20017e102996Smaya /* This is a sized source so all source components work together to 20027e102996Smaya * produce all the destination components. Since we need to return a 20037e102996Smaya * scalar, this only works if the source is a scalar. 20047e102996Smaya */ 20057e102996Smaya assert(nir_op_infos[alu->op].input_sizes[alu_src_idx] == 1); 20067e102996Smaya out.comp = alu->src[alu_src_idx].swizzle[0]; 20077e102996Smaya } 20087e102996Smaya assert(out.comp < out.def->num_components); 20097e102996Smaya 20107e102996Smaya return out; 20117e102996Smaya} 20127e102996Smaya 201301e04c3fSmrg/* 201401e04c3fSmrg * Control flow 201501e04c3fSmrg * 201601e04c3fSmrg * Control flow consists of a tree of control flow nodes, which include 201701e04c3fSmrg * if-statements and loops. The leaves of the tree are basic blocks, lists of 201801e04c3fSmrg * instructions that always run start-to-finish. Each basic block also keeps 201901e04c3fSmrg * track of its successors (blocks which may run immediately after the current 202001e04c3fSmrg * block) and predecessors (blocks which could have run immediately before the 202101e04c3fSmrg * current block). Each function also has a start block and an end block which 202201e04c3fSmrg * all return statements point to (which is always empty). Together, all the 202301e04c3fSmrg * blocks with their predecessors and successors make up the control flow 202401e04c3fSmrg * graph (CFG) of the function. There are helpers that modify the tree of 202501e04c3fSmrg * control flow nodes while modifying the CFG appropriately; these should be 202601e04c3fSmrg * used instead of modifying the tree directly. 202701e04c3fSmrg */ 202801e04c3fSmrg 202901e04c3fSmrgtypedef enum { 203001e04c3fSmrg nir_cf_node_block, 203101e04c3fSmrg nir_cf_node_if, 203201e04c3fSmrg nir_cf_node_loop, 203301e04c3fSmrg nir_cf_node_function 203401e04c3fSmrg} nir_cf_node_type; 203501e04c3fSmrg 203601e04c3fSmrgtypedef struct nir_cf_node { 203701e04c3fSmrg struct exec_node node; 203801e04c3fSmrg nir_cf_node_type type; 203901e04c3fSmrg struct nir_cf_node *parent; 204001e04c3fSmrg} nir_cf_node; 204101e04c3fSmrg 204201e04c3fSmrgtypedef struct nir_block { 204301e04c3fSmrg nir_cf_node cf_node; 204401e04c3fSmrg 204501e04c3fSmrg struct exec_list instr_list; /** < list of nir_instr */ 204601e04c3fSmrg 204701e04c3fSmrg /** generic block index; generated by nir_index_blocks */ 204801e04c3fSmrg unsigned index; 204901e04c3fSmrg 205001e04c3fSmrg /* 205101e04c3fSmrg * Each block can only have up to 2 successors, so we put them in a simple 205201e04c3fSmrg * array - no need for anything more complicated. 205301e04c3fSmrg */ 205401e04c3fSmrg struct nir_block *successors[2]; 205501e04c3fSmrg 205601e04c3fSmrg /* Set of nir_block predecessors in the CFG */ 205701e04c3fSmrg struct set *predecessors; 205801e04c3fSmrg 205901e04c3fSmrg /* 206001e04c3fSmrg * this node's immediate dominator in the dominance tree - set to NULL for 206101e04c3fSmrg * the start block. 206201e04c3fSmrg */ 206301e04c3fSmrg struct nir_block *imm_dom; 206401e04c3fSmrg 206501e04c3fSmrg /* This node's children in the dominance tree */ 206601e04c3fSmrg unsigned num_dom_children; 206701e04c3fSmrg struct nir_block **dom_children; 206801e04c3fSmrg 206901e04c3fSmrg /* Set of nir_blocks on the dominance frontier of this block */ 207001e04c3fSmrg struct set *dom_frontier; 207101e04c3fSmrg 207201e04c3fSmrg /* 207301e04c3fSmrg * These two indices have the property that dom_{pre,post}_index for each 207401e04c3fSmrg * child of this block in the dominance tree will always be between 207501e04c3fSmrg * dom_pre_index and dom_post_index for this block, which makes testing if 207601e04c3fSmrg * a given block is dominated by another block an O(1) operation. 207701e04c3fSmrg */ 207801e04c3fSmrg unsigned dom_pre_index, dom_post_index; 207901e04c3fSmrg 208001e04c3fSmrg /* live in and out for this block; used for liveness analysis */ 208101e04c3fSmrg BITSET_WORD *live_in; 208201e04c3fSmrg BITSET_WORD *live_out; 208301e04c3fSmrg} nir_block; 208401e04c3fSmrg 208501e04c3fSmrgstatic inline nir_instr * 208601e04c3fSmrgnir_block_first_instr(nir_block *block) 208701e04c3fSmrg{ 208801e04c3fSmrg struct exec_node *head = exec_list_get_head(&block->instr_list); 208901e04c3fSmrg return exec_node_data(nir_instr, head, node); 209001e04c3fSmrg} 209101e04c3fSmrg 209201e04c3fSmrgstatic inline nir_instr * 209301e04c3fSmrgnir_block_last_instr(nir_block *block) 209401e04c3fSmrg{ 209501e04c3fSmrg struct exec_node *tail = exec_list_get_tail(&block->instr_list); 209601e04c3fSmrg return exec_node_data(nir_instr, tail, node); 209701e04c3fSmrg} 209801e04c3fSmrg 209901e04c3fSmrgstatic inline bool 210001e04c3fSmrgnir_block_ends_in_jump(nir_block *block) 210101e04c3fSmrg{ 210201e04c3fSmrg return !exec_list_is_empty(&block->instr_list) && 210301e04c3fSmrg nir_block_last_instr(block)->type == nir_instr_type_jump; 210401e04c3fSmrg} 210501e04c3fSmrg 210601e04c3fSmrg#define nir_foreach_instr(instr, block) \ 210701e04c3fSmrg foreach_list_typed(nir_instr, instr, node, &(block)->instr_list) 210801e04c3fSmrg#define nir_foreach_instr_reverse(instr, block) \ 210901e04c3fSmrg foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list) 211001e04c3fSmrg#define nir_foreach_instr_safe(instr, block) \ 211101e04c3fSmrg foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list) 211201e04c3fSmrg#define nir_foreach_instr_reverse_safe(instr, block) \ 211301e04c3fSmrg foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list) 211401e04c3fSmrg 21157e102996Smayatypedef enum { 21167e102996Smaya nir_selection_control_none = 0x0, 21177e102996Smaya nir_selection_control_flatten = 0x1, 21187e102996Smaya nir_selection_control_dont_flatten = 0x2, 21197e102996Smaya} nir_selection_control; 21207e102996Smaya 212101e04c3fSmrgtypedef struct nir_if { 212201e04c3fSmrg nir_cf_node cf_node; 212301e04c3fSmrg nir_src condition; 21247e102996Smaya nir_selection_control control; 212501e04c3fSmrg 212601e04c3fSmrg struct exec_list then_list; /** < list of nir_cf_node */ 212701e04c3fSmrg struct exec_list else_list; /** < list of nir_cf_node */ 212801e04c3fSmrg} nir_if; 212901e04c3fSmrg 213001e04c3fSmrgtypedef struct { 213101e04c3fSmrg nir_if *nif; 213201e04c3fSmrg 21337e102996Smaya /** Instruction that generates nif::condition. */ 213401e04c3fSmrg nir_instr *conditional_instr; 213501e04c3fSmrg 21367e102996Smaya /** Block within ::nif that has the break instruction. */ 213701e04c3fSmrg nir_block *break_block; 21387e102996Smaya 21397e102996Smaya /** Last block for the then- or else-path that does not contain the break. */ 214001e04c3fSmrg nir_block *continue_from_block; 214101e04c3fSmrg 21427e102996Smaya /** True when ::break_block is in the else-path of ::nif. */ 214301e04c3fSmrg bool continue_from_then; 21447e102996Smaya bool induction_rhs; 21457e102996Smaya 21467e102996Smaya /* This is true if the terminators exact trip count is unknown. For 21477e102996Smaya * example: 21487e102996Smaya * 21497e102996Smaya * for (int i = 0; i < imin(x, 4); i++) 21507e102996Smaya * ... 21517e102996Smaya * 21527e102996Smaya * Here loop analysis would have set a max_trip_count of 4 however we dont 21537e102996Smaya * know for sure that this is the exact trip count. 21547e102996Smaya */ 21557e102996Smaya bool exact_trip_count_unknown; 215601e04c3fSmrg 215701e04c3fSmrg struct list_head loop_terminator_link; 215801e04c3fSmrg} nir_loop_terminator; 215901e04c3fSmrg 216001e04c3fSmrgtypedef struct { 21617e102996Smaya /* Estimated cost (in number of instructions) of the loop */ 21627e102996Smaya unsigned instr_cost; 21637e102996Smaya 21647e102996Smaya /* Guessed trip count based on array indexing */ 21657e102996Smaya unsigned guessed_trip_count; 216601e04c3fSmrg 21677e102996Smaya /* Maximum number of times the loop is run (if known) */ 21687e102996Smaya unsigned max_trip_count; 21697e102996Smaya 21707e102996Smaya /* Do we know the exact number of times the loop will be run */ 21717e102996Smaya bool exact_trip_count_known; 217201e04c3fSmrg 217301e04c3fSmrg /* Unroll the loop regardless of its size */ 217401e04c3fSmrg bool force_unroll; 217501e04c3fSmrg 217601e04c3fSmrg /* Does the loop contain complex loop terminators, continues or other 217701e04c3fSmrg * complex behaviours? If this is true we can't rely on 217801e04c3fSmrg * loop_terminator_list to be complete or accurate. 217901e04c3fSmrg */ 218001e04c3fSmrg bool complex_loop; 218101e04c3fSmrg 218201e04c3fSmrg nir_loop_terminator *limiting_terminator; 218301e04c3fSmrg 218401e04c3fSmrg /* A list of loop_terminators terminating this loop. */ 218501e04c3fSmrg struct list_head loop_terminator_list; 218601e04c3fSmrg} nir_loop_info; 218701e04c3fSmrg 21887e102996Smayatypedef enum { 21897e102996Smaya nir_loop_control_none = 0x0, 21907e102996Smaya nir_loop_control_unroll = 0x1, 21917e102996Smaya nir_loop_control_dont_unroll = 0x2, 21927e102996Smaya} nir_loop_control; 21937e102996Smaya 219401e04c3fSmrgtypedef struct { 219501e04c3fSmrg nir_cf_node cf_node; 219601e04c3fSmrg 219701e04c3fSmrg struct exec_list body; /** < list of nir_cf_node */ 219801e04c3fSmrg 219901e04c3fSmrg nir_loop_info *info; 22007e102996Smaya nir_loop_control control; 22017e102996Smaya bool partially_unrolled; 220201e04c3fSmrg} nir_loop; 220301e04c3fSmrg 220401e04c3fSmrg/** 220501e04c3fSmrg * Various bits of metadata that can may be created or required by 220601e04c3fSmrg * optimization and analysis passes 220701e04c3fSmrg */ 220801e04c3fSmrgtypedef enum { 220901e04c3fSmrg nir_metadata_none = 0x0, 221001e04c3fSmrg nir_metadata_block_index = 0x1, 221101e04c3fSmrg nir_metadata_dominance = 0x2, 221201e04c3fSmrg nir_metadata_live_ssa_defs = 0x4, 221301e04c3fSmrg nir_metadata_not_properly_reset = 0x8, 221401e04c3fSmrg nir_metadata_loop_analysis = 0x10, 221501e04c3fSmrg} nir_metadata; 221601e04c3fSmrg 221701e04c3fSmrgtypedef struct { 221801e04c3fSmrg nir_cf_node cf_node; 221901e04c3fSmrg 222001e04c3fSmrg /** pointer to the function of which this is an implementation */ 222101e04c3fSmrg struct nir_function *function; 222201e04c3fSmrg 222301e04c3fSmrg struct exec_list body; /** < list of nir_cf_node */ 222401e04c3fSmrg 222501e04c3fSmrg nir_block *end_block; 222601e04c3fSmrg 222701e04c3fSmrg /** list for all local variables in the function */ 222801e04c3fSmrg struct exec_list locals; 222901e04c3fSmrg 223001e04c3fSmrg /** list of local registers in the function */ 223101e04c3fSmrg struct exec_list registers; 223201e04c3fSmrg 223301e04c3fSmrg /** next available local register index */ 223401e04c3fSmrg unsigned reg_alloc; 223501e04c3fSmrg 223601e04c3fSmrg /** next available SSA value index */ 223701e04c3fSmrg unsigned ssa_alloc; 223801e04c3fSmrg 223901e04c3fSmrg /* total number of basic blocks, only valid when block_index_dirty = false */ 224001e04c3fSmrg unsigned num_blocks; 224101e04c3fSmrg 224201e04c3fSmrg nir_metadata valid_metadata; 224301e04c3fSmrg} nir_function_impl; 224401e04c3fSmrg 224501e04c3fSmrgATTRIBUTE_RETURNS_NONNULL static inline nir_block * 224601e04c3fSmrgnir_start_block(nir_function_impl *impl) 224701e04c3fSmrg{ 224801e04c3fSmrg return (nir_block *) impl->body.head_sentinel.next; 224901e04c3fSmrg} 225001e04c3fSmrg 225101e04c3fSmrgATTRIBUTE_RETURNS_NONNULL static inline nir_block * 225201e04c3fSmrgnir_impl_last_block(nir_function_impl *impl) 225301e04c3fSmrg{ 225401e04c3fSmrg return (nir_block *) impl->body.tail_sentinel.prev; 225501e04c3fSmrg} 225601e04c3fSmrg 225701e04c3fSmrgstatic inline nir_cf_node * 225801e04c3fSmrgnir_cf_node_next(nir_cf_node *node) 225901e04c3fSmrg{ 226001e04c3fSmrg struct exec_node *next = exec_node_get_next(&node->node); 226101e04c3fSmrg if (exec_node_is_tail_sentinel(next)) 226201e04c3fSmrg return NULL; 226301e04c3fSmrg else 226401e04c3fSmrg return exec_node_data(nir_cf_node, next, node); 226501e04c3fSmrg} 226601e04c3fSmrg 226701e04c3fSmrgstatic inline nir_cf_node * 226801e04c3fSmrgnir_cf_node_prev(nir_cf_node *node) 226901e04c3fSmrg{ 227001e04c3fSmrg struct exec_node *prev = exec_node_get_prev(&node->node); 227101e04c3fSmrg if (exec_node_is_head_sentinel(prev)) 227201e04c3fSmrg return NULL; 227301e04c3fSmrg else 227401e04c3fSmrg return exec_node_data(nir_cf_node, prev, node); 227501e04c3fSmrg} 227601e04c3fSmrg 227701e04c3fSmrgstatic inline bool 227801e04c3fSmrgnir_cf_node_is_first(const nir_cf_node *node) 227901e04c3fSmrg{ 228001e04c3fSmrg return exec_node_is_head_sentinel(node->node.prev); 228101e04c3fSmrg} 228201e04c3fSmrg 228301e04c3fSmrgstatic inline bool 228401e04c3fSmrgnir_cf_node_is_last(const nir_cf_node *node) 228501e04c3fSmrg{ 228601e04c3fSmrg return exec_node_is_tail_sentinel(node->node.next); 228701e04c3fSmrg} 228801e04c3fSmrg 228901e04c3fSmrgNIR_DEFINE_CAST(nir_cf_node_as_block, nir_cf_node, nir_block, cf_node, 229001e04c3fSmrg type, nir_cf_node_block) 229101e04c3fSmrgNIR_DEFINE_CAST(nir_cf_node_as_if, nir_cf_node, nir_if, cf_node, 229201e04c3fSmrg type, nir_cf_node_if) 229301e04c3fSmrgNIR_DEFINE_CAST(nir_cf_node_as_loop, nir_cf_node, nir_loop, cf_node, 229401e04c3fSmrg type, nir_cf_node_loop) 229501e04c3fSmrgNIR_DEFINE_CAST(nir_cf_node_as_function, nir_cf_node, 229601e04c3fSmrg nir_function_impl, cf_node, type, nir_cf_node_function) 229701e04c3fSmrg 229801e04c3fSmrgstatic inline nir_block * 229901e04c3fSmrgnir_if_first_then_block(nir_if *if_stmt) 230001e04c3fSmrg{ 230101e04c3fSmrg struct exec_node *head = exec_list_get_head(&if_stmt->then_list); 230201e04c3fSmrg return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node)); 230301e04c3fSmrg} 230401e04c3fSmrg 230501e04c3fSmrgstatic inline nir_block * 230601e04c3fSmrgnir_if_last_then_block(nir_if *if_stmt) 230701e04c3fSmrg{ 230801e04c3fSmrg struct exec_node *tail = exec_list_get_tail(&if_stmt->then_list); 230901e04c3fSmrg return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node)); 231001e04c3fSmrg} 231101e04c3fSmrg 231201e04c3fSmrgstatic inline nir_block * 231301e04c3fSmrgnir_if_first_else_block(nir_if *if_stmt) 231401e04c3fSmrg{ 231501e04c3fSmrg struct exec_node *head = exec_list_get_head(&if_stmt->else_list); 231601e04c3fSmrg return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node)); 231701e04c3fSmrg} 231801e04c3fSmrg 231901e04c3fSmrgstatic inline nir_block * 232001e04c3fSmrgnir_if_last_else_block(nir_if *if_stmt) 232101e04c3fSmrg{ 232201e04c3fSmrg struct exec_node *tail = exec_list_get_tail(&if_stmt->else_list); 232301e04c3fSmrg return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node)); 232401e04c3fSmrg} 232501e04c3fSmrg 232601e04c3fSmrgstatic inline nir_block * 232701e04c3fSmrgnir_loop_first_block(nir_loop *loop) 232801e04c3fSmrg{ 232901e04c3fSmrg struct exec_node *head = exec_list_get_head(&loop->body); 233001e04c3fSmrg return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node)); 233101e04c3fSmrg} 233201e04c3fSmrg 233301e04c3fSmrgstatic inline nir_block * 233401e04c3fSmrgnir_loop_last_block(nir_loop *loop) 233501e04c3fSmrg{ 233601e04c3fSmrg struct exec_node *tail = exec_list_get_tail(&loop->body); 233701e04c3fSmrg return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node)); 233801e04c3fSmrg} 233901e04c3fSmrg 23407e102996Smaya/** 23417e102996Smaya * Return true if this list of cf_nodes contains a single empty block. 23427e102996Smaya */ 23437e102996Smayastatic inline bool 23447e102996Smayanir_cf_list_is_empty_block(struct exec_list *cf_list) 23457e102996Smaya{ 23467e102996Smaya if (exec_list_is_singular(cf_list)) { 23477e102996Smaya struct exec_node *head = exec_list_get_head(cf_list); 23487e102996Smaya nir_block *block = 23497e102996Smaya nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node)); 23507e102996Smaya return exec_list_is_empty(&block->instr_list); 23517e102996Smaya } 23527e102996Smaya return false; 23537e102996Smaya} 23547e102996Smaya 235501e04c3fSmrgtypedef struct { 235601e04c3fSmrg uint8_t num_components; 235701e04c3fSmrg uint8_t bit_size; 235801e04c3fSmrg} nir_parameter; 235901e04c3fSmrg 236001e04c3fSmrgtypedef struct nir_function { 236101e04c3fSmrg struct exec_node node; 236201e04c3fSmrg 236301e04c3fSmrg const char *name; 236401e04c3fSmrg struct nir_shader *shader; 236501e04c3fSmrg 236601e04c3fSmrg unsigned num_params; 236701e04c3fSmrg nir_parameter *params; 236801e04c3fSmrg 236901e04c3fSmrg /** The implementation of this function. 237001e04c3fSmrg * 237101e04c3fSmrg * If the function is only declared and not implemented, this is NULL. 237201e04c3fSmrg */ 237301e04c3fSmrg nir_function_impl *impl; 23747e102996Smaya 23757e102996Smaya bool is_entrypoint; 237601e04c3fSmrg} nir_function; 237701e04c3fSmrg 23787e102996Smayatypedef enum { 23797e102996Smaya nir_lower_imul64 = (1 << 0), 23807e102996Smaya nir_lower_isign64 = (1 << 1), 23817e102996Smaya /** Lower all int64 modulus and division opcodes */ 23827e102996Smaya nir_lower_divmod64 = (1 << 2), 23837e102996Smaya /** Lower all 64-bit umul_high and imul_high opcodes */ 23847e102996Smaya nir_lower_imul_high64 = (1 << 3), 23857e102996Smaya nir_lower_mov64 = (1 << 4), 23867e102996Smaya nir_lower_icmp64 = (1 << 5), 23877e102996Smaya nir_lower_iadd64 = (1 << 6), 23887e102996Smaya nir_lower_iabs64 = (1 << 7), 23897e102996Smaya nir_lower_ineg64 = (1 << 8), 23907e102996Smaya nir_lower_logic64 = (1 << 9), 23917e102996Smaya nir_lower_minmax64 = (1 << 10), 23927e102996Smaya nir_lower_shift64 = (1 << 11), 23937e102996Smaya nir_lower_imul_2x32_64 = (1 << 12), 23947e102996Smaya nir_lower_extract64 = (1 << 13), 23957e102996Smaya} nir_lower_int64_options; 23967e102996Smaya 23977e102996Smayatypedef enum { 23987e102996Smaya nir_lower_drcp = (1 << 0), 23997e102996Smaya nir_lower_dsqrt = (1 << 1), 24007e102996Smaya nir_lower_drsq = (1 << 2), 24017e102996Smaya nir_lower_dtrunc = (1 << 3), 24027e102996Smaya nir_lower_dfloor = (1 << 4), 24037e102996Smaya nir_lower_dceil = (1 << 5), 24047e102996Smaya nir_lower_dfract = (1 << 6), 24057e102996Smaya nir_lower_dround_even = (1 << 7), 24067e102996Smaya nir_lower_dmod = (1 << 8), 24077e102996Smaya nir_lower_fp64_full_software = (1 << 9), 24087e102996Smaya} nir_lower_doubles_options; 24097e102996Smaya 241001e04c3fSmrgtypedef struct nir_shader_compiler_options { 241101e04c3fSmrg bool lower_fdiv; 241201e04c3fSmrg bool lower_ffma; 241301e04c3fSmrg bool fuse_ffma; 24147e102996Smaya bool lower_flrp16; 241501e04c3fSmrg bool lower_flrp32; 241601e04c3fSmrg /** Lowers flrp when it does not support doubles */ 241701e04c3fSmrg bool lower_flrp64; 241801e04c3fSmrg bool lower_fpow; 241901e04c3fSmrg bool lower_fsat; 242001e04c3fSmrg bool lower_fsqrt; 24217e102996Smaya bool lower_fmod16; 242201e04c3fSmrg bool lower_fmod32; 242301e04c3fSmrg bool lower_fmod64; 242401e04c3fSmrg /** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */ 242501e04c3fSmrg bool lower_bitfield_extract; 242601e04c3fSmrg /** Lowers ibitfield_extract/ubitfield_extract to bfm, compares, shifts. */ 242701e04c3fSmrg bool lower_bitfield_extract_to_shifts; 242801e04c3fSmrg /** Lowers bitfield_insert to bfi/bfm */ 242901e04c3fSmrg bool lower_bitfield_insert; 243001e04c3fSmrg /** Lowers bitfield_insert to bfm, compares, and shifts. */ 243101e04c3fSmrg bool lower_bitfield_insert_to_shifts; 243201e04c3fSmrg /** Lowers bitfield_reverse to shifts. */ 243301e04c3fSmrg bool lower_bitfield_reverse; 243401e04c3fSmrg /** Lowers bit_count to shifts. */ 243501e04c3fSmrg bool lower_bit_count; 243601e04c3fSmrg /** Lowers bfm to shifts and subtracts. */ 243701e04c3fSmrg bool lower_bfm; 243801e04c3fSmrg /** Lowers ifind_msb to compare and ufind_msb */ 243901e04c3fSmrg bool lower_ifind_msb; 244001e04c3fSmrg /** Lowers find_lsb to ufind_msb and logic ops */ 244101e04c3fSmrg bool lower_find_lsb; 244201e04c3fSmrg bool lower_uadd_carry; 244301e04c3fSmrg bool lower_usub_borrow; 244401e04c3fSmrg /** Lowers imul_high/umul_high to 16-bit multiplies and carry operations. */ 244501e04c3fSmrg bool lower_mul_high; 244601e04c3fSmrg /** lowers fneg and ineg to fsub and isub. */ 244701e04c3fSmrg bool lower_negate; 244801e04c3fSmrg /** lowers fsub and isub to fadd+fneg and iadd+ineg. */ 244901e04c3fSmrg bool lower_sub; 245001e04c3fSmrg 245101e04c3fSmrg /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */ 245201e04c3fSmrg bool lower_scmp; 245301e04c3fSmrg 245401e04c3fSmrg /** enables rules to lower idiv by power-of-two: */ 245501e04c3fSmrg bool lower_idiv; 245601e04c3fSmrg 24577e102996Smaya /** enables rules to lower isign to imin+imax */ 24587e102996Smaya bool lower_isign; 24597e102996Smaya 24607e102996Smaya /** enables rules to lower fsign to fsub and flt */ 24617e102996Smaya bool lower_fsign; 246201e04c3fSmrg 246301e04c3fSmrg /* Does the native fdot instruction replicate its result for four 246401e04c3fSmrg * components? If so, then opt_algebraic_late will turn all fdotN 246501e04c3fSmrg * instructions into fdot_replicatedN instructions. 246601e04c3fSmrg */ 246701e04c3fSmrg bool fdot_replicates; 246801e04c3fSmrg 24697e102996Smaya /** lowers ffloor to fsub+ffract: */ 24707e102996Smaya bool lower_ffloor; 24717e102996Smaya 247201e04c3fSmrg /** lowers ffract to fsub+ffloor: */ 247301e04c3fSmrg bool lower_ffract; 247401e04c3fSmrg 24757e102996Smaya /** lowers fceil to fneg+ffloor+fneg: */ 24767e102996Smaya bool lower_fceil; 24777e102996Smaya 24787e102996Smaya bool lower_ftrunc; 24797e102996Smaya 248001e04c3fSmrg bool lower_ldexp; 248101e04c3fSmrg 248201e04c3fSmrg bool lower_pack_half_2x16; 248301e04c3fSmrg bool lower_pack_unorm_2x16; 248401e04c3fSmrg bool lower_pack_snorm_2x16; 248501e04c3fSmrg bool lower_pack_unorm_4x8; 248601e04c3fSmrg bool lower_pack_snorm_4x8; 248701e04c3fSmrg bool lower_unpack_half_2x16; 248801e04c3fSmrg bool lower_unpack_unorm_2x16; 248901e04c3fSmrg bool lower_unpack_snorm_2x16; 249001e04c3fSmrg bool lower_unpack_unorm_4x8; 249101e04c3fSmrg bool lower_unpack_snorm_4x8; 249201e04c3fSmrg 249301e04c3fSmrg bool lower_extract_byte; 249401e04c3fSmrg bool lower_extract_word; 249501e04c3fSmrg 249601e04c3fSmrg bool lower_all_io_to_temps; 24977e102996Smaya bool lower_all_io_to_elements; 249801e04c3fSmrg 249901e04c3fSmrg /** 250001e04c3fSmrg * Does the driver support real 32-bit integers? (Otherwise, integers 250101e04c3fSmrg * are simulated by floats.) 250201e04c3fSmrg */ 250301e04c3fSmrg bool native_integers; 250401e04c3fSmrg 250501e04c3fSmrg /* Indicates that the driver only has zero-based vertex id */ 250601e04c3fSmrg bool vertex_id_zero_based; 250701e04c3fSmrg 250801e04c3fSmrg /** 250901e04c3fSmrg * If enabled, gl_BaseVertex will be lowered as: 251001e04c3fSmrg * is_indexed_draw (~0/0) & firstvertex 251101e04c3fSmrg */ 251201e04c3fSmrg bool lower_base_vertex; 251301e04c3fSmrg 251401e04c3fSmrg /** 251501e04c3fSmrg * If enabled, gl_HelperInvocation will be lowered as: 251601e04c3fSmrg * 251701e04c3fSmrg * !((1 << sample_id) & sample_mask_in)) 251801e04c3fSmrg * 251901e04c3fSmrg * This depends on some possibly hw implementation details, which may 252001e04c3fSmrg * not be true for all hw. In particular that the FS is only executed 252101e04c3fSmrg * for covered samples or for helper invocations. So, do not blindly 252201e04c3fSmrg * enable this option. 252301e04c3fSmrg * 252401e04c3fSmrg * Note: See also issue #22 in ARB_shader_image_load_store 252501e04c3fSmrg */ 252601e04c3fSmrg bool lower_helper_invocation; 252701e04c3fSmrg 25287e102996Smaya /** 25297e102996Smaya * Convert gl_SampleMaskIn to gl_HelperInvocation as follows: 25307e102996Smaya * 25317e102996Smaya * gl_SampleMaskIn == 0 ---> gl_HelperInvocation 25327e102996Smaya * gl_SampleMaskIn != 0 ---> !gl_HelperInvocation 25337e102996Smaya */ 25347e102996Smaya bool optimize_sample_mask_in; 25357e102996Smaya 253601e04c3fSmrg bool lower_cs_local_index_from_id; 25377e102996Smaya bool lower_cs_local_id_from_index; 253801e04c3fSmrg 253901e04c3fSmrg bool lower_device_index_to_zero; 254001e04c3fSmrg 254101e04c3fSmrg /* Set if nir_lower_wpos_ytransform() should also invert gl_PointCoord. */ 254201e04c3fSmrg bool lower_wpos_pntc; 254301e04c3fSmrg 25447e102996Smaya bool lower_hadd; 25457e102996Smaya bool lower_add_sat; 25467e102996Smaya 254701e04c3fSmrg /** 254801e04c3fSmrg * Should nir_lower_io() create load_interpolated_input intrinsics? 254901e04c3fSmrg * 255001e04c3fSmrg * If not, it generates regular load_input intrinsics and interpolation 255101e04c3fSmrg * information must be inferred from the list of input nir_variables. 255201e04c3fSmrg */ 255301e04c3fSmrg bool use_interpolated_input_intrinsics; 255401e04c3fSmrg 25557e102996Smaya /* Lowers when 32x32->64 bit multiplication is not supported */ 25567e102996Smaya bool lower_mul_2x32_64; 25577e102996Smaya 255801e04c3fSmrg unsigned max_unroll_iterations; 25597e102996Smaya 25607e102996Smaya nir_lower_int64_options lower_int64_options; 25617e102996Smaya nir_lower_doubles_options lower_doubles_options; 256201e04c3fSmrg} nir_shader_compiler_options; 256301e04c3fSmrg 256401e04c3fSmrgtypedef struct nir_shader { 256501e04c3fSmrg /** list of uniforms (nir_variable) */ 256601e04c3fSmrg struct exec_list uniforms; 256701e04c3fSmrg 256801e04c3fSmrg /** list of inputs (nir_variable) */ 256901e04c3fSmrg struct exec_list inputs; 257001e04c3fSmrg 257101e04c3fSmrg /** list of outputs (nir_variable) */ 257201e04c3fSmrg struct exec_list outputs; 257301e04c3fSmrg 257401e04c3fSmrg /** list of shared compute variables (nir_variable) */ 257501e04c3fSmrg struct exec_list shared; 257601e04c3fSmrg 257701e04c3fSmrg /** Set of driver-specific options for the shader. 257801e04c3fSmrg * 257901e04c3fSmrg * The memory for the options is expected to be kept in a single static 258001e04c3fSmrg * copy by the driver. 258101e04c3fSmrg */ 258201e04c3fSmrg const struct nir_shader_compiler_options *options; 258301e04c3fSmrg 258401e04c3fSmrg /** Various bits of compile-time information about a given shader */ 258501e04c3fSmrg struct shader_info info; 258601e04c3fSmrg 258701e04c3fSmrg /** list of global variables in the shader (nir_variable) */ 258801e04c3fSmrg struct exec_list globals; 258901e04c3fSmrg 259001e04c3fSmrg /** list of system value variables in the shader (nir_variable) */ 259101e04c3fSmrg struct exec_list system_values; 259201e04c3fSmrg 259301e04c3fSmrg struct exec_list functions; /** < list of nir_function */ 259401e04c3fSmrg 259501e04c3fSmrg /** 259601e04c3fSmrg * the highest index a load_input_*, load_uniform_*, etc. intrinsic can 259701e04c3fSmrg * access plus one 259801e04c3fSmrg */ 259901e04c3fSmrg unsigned num_inputs, num_uniforms, num_outputs, num_shared; 260001e04c3fSmrg 26017e102996Smaya /** Size in bytes of required scratch space */ 26027e102996Smaya unsigned scratch_size; 26037e102996Smaya 260401e04c3fSmrg /** Constant data associated with this shader. 260501e04c3fSmrg * 260601e04c3fSmrg * Constant data is loaded through load_constant intrinsics. See also 260701e04c3fSmrg * nir_opt_large_constants. 260801e04c3fSmrg */ 260901e04c3fSmrg void *constant_data; 261001e04c3fSmrg unsigned constant_data_size; 261101e04c3fSmrg} nir_shader; 261201e04c3fSmrg 26137e102996Smaya#define nir_foreach_function(func, shader) \ 26147e102996Smaya foreach_list_typed(nir_function, func, node, &(shader)->functions) 26157e102996Smaya 261601e04c3fSmrgstatic inline nir_function_impl * 261701e04c3fSmrgnir_shader_get_entrypoint(nir_shader *shader) 261801e04c3fSmrg{ 26197e102996Smaya nir_function *func = NULL; 26207e102996Smaya 26217e102996Smaya nir_foreach_function(function, shader) { 26227e102996Smaya assert(func == NULL); 26237e102996Smaya if (function->is_entrypoint) { 26247e102996Smaya func = function; 26257e102996Smaya#ifndef NDEBUG 26267e102996Smaya break; 26277e102996Smaya#endif 26287e102996Smaya } 26297e102996Smaya } 26307e102996Smaya 26317e102996Smaya if (!func) 26327e102996Smaya return NULL; 26337e102996Smaya 263401e04c3fSmrg assert(func->num_params == 0); 263501e04c3fSmrg assert(func->impl); 263601e04c3fSmrg return func->impl; 263701e04c3fSmrg} 263801e04c3fSmrg 263901e04c3fSmrgnir_shader *nir_shader_create(void *mem_ctx, 264001e04c3fSmrg gl_shader_stage stage, 264101e04c3fSmrg const nir_shader_compiler_options *options, 264201e04c3fSmrg shader_info *si); 264301e04c3fSmrg 264401e04c3fSmrgnir_register *nir_local_reg_create(nir_function_impl *impl); 264501e04c3fSmrg 264601e04c3fSmrgvoid nir_reg_remove(nir_register *reg); 264701e04c3fSmrg 264801e04c3fSmrg/** Adds a variable to the appropriate list in nir_shader */ 264901e04c3fSmrgvoid nir_shader_add_variable(nir_shader *shader, nir_variable *var); 265001e04c3fSmrg 265101e04c3fSmrgstatic inline void 265201e04c3fSmrgnir_function_impl_add_variable(nir_function_impl *impl, nir_variable *var) 265301e04c3fSmrg{ 26547e102996Smaya assert(var->data.mode == nir_var_function_temp); 265501e04c3fSmrg exec_list_push_tail(&impl->locals, &var->node); 265601e04c3fSmrg} 265701e04c3fSmrg 265801e04c3fSmrg/** creates a variable, sets a few defaults, and adds it to the list */ 265901e04c3fSmrgnir_variable *nir_variable_create(nir_shader *shader, 266001e04c3fSmrg nir_variable_mode mode, 266101e04c3fSmrg const struct glsl_type *type, 266201e04c3fSmrg const char *name); 266301e04c3fSmrg/** creates a local variable and adds it to the list */ 266401e04c3fSmrgnir_variable *nir_local_variable_create(nir_function_impl *impl, 266501e04c3fSmrg const struct glsl_type *type, 266601e04c3fSmrg const char *name); 266701e04c3fSmrg 266801e04c3fSmrg/** creates a function and adds it to the shader's list of functions */ 266901e04c3fSmrgnir_function *nir_function_create(nir_shader *shader, const char *name); 267001e04c3fSmrg 267101e04c3fSmrgnir_function_impl *nir_function_impl_create(nir_function *func); 267201e04c3fSmrg/** creates a function_impl that isn't tied to any particular function */ 267301e04c3fSmrgnir_function_impl *nir_function_impl_create_bare(nir_shader *shader); 267401e04c3fSmrg 267501e04c3fSmrgnir_block *nir_block_create(nir_shader *shader); 267601e04c3fSmrgnir_if *nir_if_create(nir_shader *shader); 267701e04c3fSmrgnir_loop *nir_loop_create(nir_shader *shader); 267801e04c3fSmrg 267901e04c3fSmrgnir_function_impl *nir_cf_node_get_function(nir_cf_node *node); 268001e04c3fSmrg 268101e04c3fSmrg/** requests that the given pieces of metadata be generated */ 268201e04c3fSmrgvoid nir_metadata_require(nir_function_impl *impl, nir_metadata required, ...); 268301e04c3fSmrg/** dirties all but the preserved metadata */ 268401e04c3fSmrgvoid nir_metadata_preserve(nir_function_impl *impl, nir_metadata preserved); 268501e04c3fSmrg 268601e04c3fSmrg/** creates an instruction with default swizzle/writemask/etc. with NULL registers */ 268701e04c3fSmrgnir_alu_instr *nir_alu_instr_create(nir_shader *shader, nir_op op); 268801e04c3fSmrg 268901e04c3fSmrgnir_deref_instr *nir_deref_instr_create(nir_shader *shader, 269001e04c3fSmrg nir_deref_type deref_type); 269101e04c3fSmrg 269201e04c3fSmrgnir_jump_instr *nir_jump_instr_create(nir_shader *shader, nir_jump_type type); 269301e04c3fSmrg 269401e04c3fSmrgnir_load_const_instr *nir_load_const_instr_create(nir_shader *shader, 269501e04c3fSmrg unsigned num_components, 269601e04c3fSmrg unsigned bit_size); 269701e04c3fSmrg 269801e04c3fSmrgnir_intrinsic_instr *nir_intrinsic_instr_create(nir_shader *shader, 269901e04c3fSmrg nir_intrinsic_op op); 270001e04c3fSmrg 270101e04c3fSmrgnir_call_instr *nir_call_instr_create(nir_shader *shader, 270201e04c3fSmrg nir_function *callee); 270301e04c3fSmrg 270401e04c3fSmrgnir_tex_instr *nir_tex_instr_create(nir_shader *shader, unsigned num_srcs); 270501e04c3fSmrg 270601e04c3fSmrgnir_phi_instr *nir_phi_instr_create(nir_shader *shader); 270701e04c3fSmrg 270801e04c3fSmrgnir_parallel_copy_instr *nir_parallel_copy_instr_create(nir_shader *shader); 270901e04c3fSmrg 271001e04c3fSmrgnir_ssa_undef_instr *nir_ssa_undef_instr_create(nir_shader *shader, 271101e04c3fSmrg unsigned num_components, 271201e04c3fSmrg unsigned bit_size); 271301e04c3fSmrg 271401e04c3fSmrgnir_const_value nir_alu_binop_identity(nir_op binop, unsigned bit_size); 271501e04c3fSmrg 271601e04c3fSmrg/** 271701e04c3fSmrg * NIR Cursors and Instruction Insertion API 271801e04c3fSmrg * @{ 271901e04c3fSmrg * 272001e04c3fSmrg * A tiny struct representing a point to insert/extract instructions or 272101e04c3fSmrg * control flow nodes. Helps reduce the combinatorial explosion of possible 272201e04c3fSmrg * points to insert/extract. 272301e04c3fSmrg * 272401e04c3fSmrg * \sa nir_control_flow.h 272501e04c3fSmrg */ 272601e04c3fSmrgtypedef enum { 272701e04c3fSmrg nir_cursor_before_block, 272801e04c3fSmrg nir_cursor_after_block, 272901e04c3fSmrg nir_cursor_before_instr, 273001e04c3fSmrg nir_cursor_after_instr, 273101e04c3fSmrg} nir_cursor_option; 273201e04c3fSmrg 273301e04c3fSmrgtypedef struct { 273401e04c3fSmrg nir_cursor_option option; 273501e04c3fSmrg union { 273601e04c3fSmrg nir_block *block; 273701e04c3fSmrg nir_instr *instr; 273801e04c3fSmrg }; 273901e04c3fSmrg} nir_cursor; 274001e04c3fSmrg 274101e04c3fSmrgstatic inline nir_block * 274201e04c3fSmrgnir_cursor_current_block(nir_cursor cursor) 274301e04c3fSmrg{ 274401e04c3fSmrg if (cursor.option == nir_cursor_before_instr || 274501e04c3fSmrg cursor.option == nir_cursor_after_instr) { 274601e04c3fSmrg return cursor.instr->block; 274701e04c3fSmrg } else { 274801e04c3fSmrg return cursor.block; 274901e04c3fSmrg } 275001e04c3fSmrg} 275101e04c3fSmrg 275201e04c3fSmrgbool nir_cursors_equal(nir_cursor a, nir_cursor b); 275301e04c3fSmrg 275401e04c3fSmrgstatic inline nir_cursor 275501e04c3fSmrgnir_before_block(nir_block *block) 275601e04c3fSmrg{ 275701e04c3fSmrg nir_cursor cursor; 275801e04c3fSmrg cursor.option = nir_cursor_before_block; 275901e04c3fSmrg cursor.block = block; 276001e04c3fSmrg return cursor; 276101e04c3fSmrg} 276201e04c3fSmrg 276301e04c3fSmrgstatic inline nir_cursor 276401e04c3fSmrgnir_after_block(nir_block *block) 276501e04c3fSmrg{ 276601e04c3fSmrg nir_cursor cursor; 276701e04c3fSmrg cursor.option = nir_cursor_after_block; 276801e04c3fSmrg cursor.block = block; 276901e04c3fSmrg return cursor; 277001e04c3fSmrg} 277101e04c3fSmrg 277201e04c3fSmrgstatic inline nir_cursor 277301e04c3fSmrgnir_before_instr(nir_instr *instr) 277401e04c3fSmrg{ 277501e04c3fSmrg nir_cursor cursor; 277601e04c3fSmrg cursor.option = nir_cursor_before_instr; 277701e04c3fSmrg cursor.instr = instr; 277801e04c3fSmrg return cursor; 277901e04c3fSmrg} 278001e04c3fSmrg 278101e04c3fSmrgstatic inline nir_cursor 278201e04c3fSmrgnir_after_instr(nir_instr *instr) 278301e04c3fSmrg{ 278401e04c3fSmrg nir_cursor cursor; 278501e04c3fSmrg cursor.option = nir_cursor_after_instr; 278601e04c3fSmrg cursor.instr = instr; 278701e04c3fSmrg return cursor; 278801e04c3fSmrg} 278901e04c3fSmrg 279001e04c3fSmrgstatic inline nir_cursor 279101e04c3fSmrgnir_after_block_before_jump(nir_block *block) 279201e04c3fSmrg{ 279301e04c3fSmrg nir_instr *last_instr = nir_block_last_instr(block); 279401e04c3fSmrg if (last_instr && last_instr->type == nir_instr_type_jump) { 279501e04c3fSmrg return nir_before_instr(last_instr); 279601e04c3fSmrg } else { 279701e04c3fSmrg return nir_after_block(block); 279801e04c3fSmrg } 279901e04c3fSmrg} 280001e04c3fSmrg 280101e04c3fSmrgstatic inline nir_cursor 280201e04c3fSmrgnir_before_src(nir_src *src, bool is_if_condition) 280301e04c3fSmrg{ 280401e04c3fSmrg if (is_if_condition) { 280501e04c3fSmrg nir_block *prev_block = 280601e04c3fSmrg nir_cf_node_as_block(nir_cf_node_prev(&src->parent_if->cf_node)); 280701e04c3fSmrg assert(!nir_block_ends_in_jump(prev_block)); 280801e04c3fSmrg return nir_after_block(prev_block); 280901e04c3fSmrg } else if (src->parent_instr->type == nir_instr_type_phi) { 281001e04c3fSmrg#ifndef NDEBUG 281101e04c3fSmrg nir_phi_instr *cond_phi = nir_instr_as_phi(src->parent_instr); 281201e04c3fSmrg bool found = false; 281301e04c3fSmrg nir_foreach_phi_src(phi_src, cond_phi) { 281401e04c3fSmrg if (phi_src->src.ssa == src->ssa) { 281501e04c3fSmrg found = true; 281601e04c3fSmrg break; 281701e04c3fSmrg } 281801e04c3fSmrg } 281901e04c3fSmrg assert(found); 282001e04c3fSmrg#endif 282101e04c3fSmrg /* The LIST_ENTRY macro is a generic container-of macro, it just happens 282201e04c3fSmrg * to have a more specific name. 282301e04c3fSmrg */ 282401e04c3fSmrg nir_phi_src *phi_src = LIST_ENTRY(nir_phi_src, src, src); 282501e04c3fSmrg return nir_after_block_before_jump(phi_src->pred); 282601e04c3fSmrg } else { 282701e04c3fSmrg return nir_before_instr(src->parent_instr); 282801e04c3fSmrg } 282901e04c3fSmrg} 283001e04c3fSmrg 283101e04c3fSmrgstatic inline nir_cursor 283201e04c3fSmrgnir_before_cf_node(nir_cf_node *node) 283301e04c3fSmrg{ 283401e04c3fSmrg if (node->type == nir_cf_node_block) 283501e04c3fSmrg return nir_before_block(nir_cf_node_as_block(node)); 283601e04c3fSmrg 283701e04c3fSmrg return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node))); 283801e04c3fSmrg} 283901e04c3fSmrg 284001e04c3fSmrgstatic inline nir_cursor 284101e04c3fSmrgnir_after_cf_node(nir_cf_node *node) 284201e04c3fSmrg{ 284301e04c3fSmrg if (node->type == nir_cf_node_block) 284401e04c3fSmrg return nir_after_block(nir_cf_node_as_block(node)); 284501e04c3fSmrg 284601e04c3fSmrg return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node))); 284701e04c3fSmrg} 284801e04c3fSmrg 284901e04c3fSmrgstatic inline nir_cursor 285001e04c3fSmrgnir_after_phis(nir_block *block) 285101e04c3fSmrg{ 285201e04c3fSmrg nir_foreach_instr(instr, block) { 285301e04c3fSmrg if (instr->type != nir_instr_type_phi) 285401e04c3fSmrg return nir_before_instr(instr); 285501e04c3fSmrg } 285601e04c3fSmrg return nir_after_block(block); 285701e04c3fSmrg} 285801e04c3fSmrg 285901e04c3fSmrgstatic inline nir_cursor 286001e04c3fSmrgnir_after_cf_node_and_phis(nir_cf_node *node) 286101e04c3fSmrg{ 286201e04c3fSmrg if (node->type == nir_cf_node_block) 286301e04c3fSmrg return nir_after_block(nir_cf_node_as_block(node)); 286401e04c3fSmrg 286501e04c3fSmrg nir_block *block = nir_cf_node_as_block(nir_cf_node_next(node)); 286601e04c3fSmrg 286701e04c3fSmrg return nir_after_phis(block); 286801e04c3fSmrg} 286901e04c3fSmrg 287001e04c3fSmrgstatic inline nir_cursor 287101e04c3fSmrgnir_before_cf_list(struct exec_list *cf_list) 287201e04c3fSmrg{ 287301e04c3fSmrg nir_cf_node *first_node = exec_node_data(nir_cf_node, 287401e04c3fSmrg exec_list_get_head(cf_list), node); 287501e04c3fSmrg return nir_before_cf_node(first_node); 287601e04c3fSmrg} 287701e04c3fSmrg 287801e04c3fSmrgstatic inline nir_cursor 287901e04c3fSmrgnir_after_cf_list(struct exec_list *cf_list) 288001e04c3fSmrg{ 288101e04c3fSmrg nir_cf_node *last_node = exec_node_data(nir_cf_node, 288201e04c3fSmrg exec_list_get_tail(cf_list), node); 288301e04c3fSmrg return nir_after_cf_node(last_node); 288401e04c3fSmrg} 288501e04c3fSmrg 288601e04c3fSmrg/** 288701e04c3fSmrg * Insert a NIR instruction at the given cursor. 288801e04c3fSmrg * 288901e04c3fSmrg * Note: This does not update the cursor. 289001e04c3fSmrg */ 289101e04c3fSmrgvoid nir_instr_insert(nir_cursor cursor, nir_instr *instr); 289201e04c3fSmrg 289301e04c3fSmrgstatic inline void 289401e04c3fSmrgnir_instr_insert_before(nir_instr *instr, nir_instr *before) 289501e04c3fSmrg{ 289601e04c3fSmrg nir_instr_insert(nir_before_instr(instr), before); 289701e04c3fSmrg} 289801e04c3fSmrg 289901e04c3fSmrgstatic inline void 290001e04c3fSmrgnir_instr_insert_after(nir_instr *instr, nir_instr *after) 290101e04c3fSmrg{ 290201e04c3fSmrg nir_instr_insert(nir_after_instr(instr), after); 290301e04c3fSmrg} 290401e04c3fSmrg 290501e04c3fSmrgstatic inline void 290601e04c3fSmrgnir_instr_insert_before_block(nir_block *block, nir_instr *before) 290701e04c3fSmrg{ 290801e04c3fSmrg nir_instr_insert(nir_before_block(block), before); 290901e04c3fSmrg} 291001e04c3fSmrg 291101e04c3fSmrgstatic inline void 291201e04c3fSmrgnir_instr_insert_after_block(nir_block *block, nir_instr *after) 291301e04c3fSmrg{ 291401e04c3fSmrg nir_instr_insert(nir_after_block(block), after); 291501e04c3fSmrg} 291601e04c3fSmrg 291701e04c3fSmrgstatic inline void 291801e04c3fSmrgnir_instr_insert_before_cf(nir_cf_node *node, nir_instr *before) 291901e04c3fSmrg{ 292001e04c3fSmrg nir_instr_insert(nir_before_cf_node(node), before); 292101e04c3fSmrg} 292201e04c3fSmrg 292301e04c3fSmrgstatic inline void 292401e04c3fSmrgnir_instr_insert_after_cf(nir_cf_node *node, nir_instr *after) 292501e04c3fSmrg{ 292601e04c3fSmrg nir_instr_insert(nir_after_cf_node(node), after); 292701e04c3fSmrg} 292801e04c3fSmrg 292901e04c3fSmrgstatic inline void 293001e04c3fSmrgnir_instr_insert_before_cf_list(struct exec_list *list, nir_instr *before) 293101e04c3fSmrg{ 293201e04c3fSmrg nir_instr_insert(nir_before_cf_list(list), before); 293301e04c3fSmrg} 293401e04c3fSmrg 293501e04c3fSmrgstatic inline void 293601e04c3fSmrgnir_instr_insert_after_cf_list(struct exec_list *list, nir_instr *after) 293701e04c3fSmrg{ 293801e04c3fSmrg nir_instr_insert(nir_after_cf_list(list), after); 293901e04c3fSmrg} 294001e04c3fSmrg 294101e04c3fSmrgvoid nir_instr_remove_v(nir_instr *instr); 294201e04c3fSmrg 294301e04c3fSmrgstatic inline nir_cursor 294401e04c3fSmrgnir_instr_remove(nir_instr *instr) 294501e04c3fSmrg{ 294601e04c3fSmrg nir_cursor cursor; 294701e04c3fSmrg nir_instr *prev = nir_instr_prev(instr); 294801e04c3fSmrg if (prev) { 294901e04c3fSmrg cursor = nir_after_instr(prev); 295001e04c3fSmrg } else { 295101e04c3fSmrg cursor = nir_before_block(instr->block); 295201e04c3fSmrg } 295301e04c3fSmrg nir_instr_remove_v(instr); 295401e04c3fSmrg return cursor; 295501e04c3fSmrg} 295601e04c3fSmrg 295701e04c3fSmrg/** @} */ 295801e04c3fSmrg 295901e04c3fSmrgtypedef bool (*nir_foreach_ssa_def_cb)(nir_ssa_def *def, void *state); 296001e04c3fSmrgtypedef bool (*nir_foreach_dest_cb)(nir_dest *dest, void *state); 296101e04c3fSmrgtypedef bool (*nir_foreach_src_cb)(nir_src *src, void *state); 296201e04c3fSmrgbool nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, 296301e04c3fSmrg void *state); 296401e04c3fSmrgbool nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state); 296501e04c3fSmrgbool nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state); 296601e04c3fSmrg 296701e04c3fSmrgnir_const_value *nir_src_as_const_value(nir_src src); 296801e04c3fSmrg 296901e04c3fSmrg#define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \ 297001e04c3fSmrgstatic inline c_type * \ 29717e102996Smayanir_src_as_ ## name (nir_src src) \ 297201e04c3fSmrg{ \ 29737e102996Smaya return src.is_ssa && src.ssa->parent_instr->type == type_enum \ 29747e102996Smaya ? cast_macro(src.ssa->parent_instr) : NULL; \ 297501e04c3fSmrg} 297601e04c3fSmrg 297701e04c3fSmrgNIR_SRC_AS_(alu_instr, nir_alu_instr, nir_instr_type_alu, nir_instr_as_alu) 29787e102996SmayaNIR_SRC_AS_(intrinsic, nir_intrinsic_instr, 29797e102996Smaya nir_instr_type_intrinsic, nir_instr_as_intrinsic) 29807e102996SmayaNIR_SRC_AS_(deref, nir_deref_instr, nir_instr_type_deref, nir_instr_as_deref) 298101e04c3fSmrg 298201e04c3fSmrgbool nir_src_is_dynamically_uniform(nir_src src); 298301e04c3fSmrgbool nir_srcs_equal(nir_src src1, nir_src src2); 29847e102996Smayabool nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2); 298501e04c3fSmrgvoid nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src); 298601e04c3fSmrgvoid nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src); 298701e04c3fSmrgvoid nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src); 298801e04c3fSmrgvoid nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, 298901e04c3fSmrg nir_dest new_dest); 299001e04c3fSmrg 299101e04c3fSmrgvoid nir_ssa_dest_init(nir_instr *instr, nir_dest *dest, 299201e04c3fSmrg unsigned num_components, unsigned bit_size, 299301e04c3fSmrg const char *name); 299401e04c3fSmrgvoid nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def, 299501e04c3fSmrg unsigned num_components, unsigned bit_size, 299601e04c3fSmrg const char *name); 299701e04c3fSmrgstatic inline void 299801e04c3fSmrgnir_ssa_dest_init_for_type(nir_instr *instr, nir_dest *dest, 299901e04c3fSmrg const struct glsl_type *type, 300001e04c3fSmrg const char *name) 300101e04c3fSmrg{ 300201e04c3fSmrg assert(glsl_type_is_vector_or_scalar(type)); 300301e04c3fSmrg nir_ssa_dest_init(instr, dest, glsl_get_components(type), 300401e04c3fSmrg glsl_get_bit_size(type), name); 300501e04c3fSmrg} 300601e04c3fSmrgvoid nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src); 300701e04c3fSmrgvoid nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src, 300801e04c3fSmrg nir_instr *after_me); 300901e04c3fSmrg 301001e04c3fSmrgnir_component_mask_t nir_ssa_def_components_read(const nir_ssa_def *def); 301101e04c3fSmrg 301201e04c3fSmrg/* 301301e04c3fSmrg * finds the next basic block in source-code order, returns NULL if there is 301401e04c3fSmrg * none 301501e04c3fSmrg */ 301601e04c3fSmrg 301701e04c3fSmrgnir_block *nir_block_cf_tree_next(nir_block *block); 301801e04c3fSmrg 301901e04c3fSmrg/* Performs the opposite of nir_block_cf_tree_next() */ 302001e04c3fSmrg 302101e04c3fSmrgnir_block *nir_block_cf_tree_prev(nir_block *block); 302201e04c3fSmrg 302301e04c3fSmrg/* Gets the first block in a CF node in source-code order */ 302401e04c3fSmrg 302501e04c3fSmrgnir_block *nir_cf_node_cf_tree_first(nir_cf_node *node); 302601e04c3fSmrg 302701e04c3fSmrg/* Gets the last block in a CF node in source-code order */ 302801e04c3fSmrg 302901e04c3fSmrgnir_block *nir_cf_node_cf_tree_last(nir_cf_node *node); 303001e04c3fSmrg 303101e04c3fSmrg/* Gets the next block after a CF node in source-code order */ 303201e04c3fSmrg 303301e04c3fSmrgnir_block *nir_cf_node_cf_tree_next(nir_cf_node *node); 303401e04c3fSmrg 303501e04c3fSmrg/* Macros for loops that visit blocks in source-code order */ 303601e04c3fSmrg 303701e04c3fSmrg#define nir_foreach_block(block, impl) \ 303801e04c3fSmrg for (nir_block *block = nir_start_block(impl); block != NULL; \ 303901e04c3fSmrg block = nir_block_cf_tree_next(block)) 304001e04c3fSmrg 304101e04c3fSmrg#define nir_foreach_block_safe(block, impl) \ 304201e04c3fSmrg for (nir_block *block = nir_start_block(impl), \ 304301e04c3fSmrg *next = nir_block_cf_tree_next(block); \ 304401e04c3fSmrg block != NULL; \ 304501e04c3fSmrg block = next, next = nir_block_cf_tree_next(block)) 304601e04c3fSmrg 304701e04c3fSmrg#define nir_foreach_block_reverse(block, impl) \ 304801e04c3fSmrg for (nir_block *block = nir_impl_last_block(impl); block != NULL; \ 304901e04c3fSmrg block = nir_block_cf_tree_prev(block)) 305001e04c3fSmrg 305101e04c3fSmrg#define nir_foreach_block_reverse_safe(block, impl) \ 305201e04c3fSmrg for (nir_block *block = nir_impl_last_block(impl), \ 305301e04c3fSmrg *prev = nir_block_cf_tree_prev(block); \ 305401e04c3fSmrg block != NULL; \ 305501e04c3fSmrg block = prev, prev = nir_block_cf_tree_prev(block)) 305601e04c3fSmrg 305701e04c3fSmrg#define nir_foreach_block_in_cf_node(block, node) \ 305801e04c3fSmrg for (nir_block *block = nir_cf_node_cf_tree_first(node); \ 305901e04c3fSmrg block != nir_cf_node_cf_tree_next(node); \ 306001e04c3fSmrg block = nir_block_cf_tree_next(block)) 306101e04c3fSmrg 306201e04c3fSmrg/* If the following CF node is an if, this function returns that if. 306301e04c3fSmrg * Otherwise, it returns NULL. 306401e04c3fSmrg */ 306501e04c3fSmrgnir_if *nir_block_get_following_if(nir_block *block); 306601e04c3fSmrg 306701e04c3fSmrgnir_loop *nir_block_get_following_loop(nir_block *block); 306801e04c3fSmrg 306901e04c3fSmrgvoid nir_index_local_regs(nir_function_impl *impl); 307001e04c3fSmrgvoid nir_index_ssa_defs(nir_function_impl *impl); 307101e04c3fSmrgunsigned nir_index_instrs(nir_function_impl *impl); 307201e04c3fSmrg 307301e04c3fSmrgvoid nir_index_blocks(nir_function_impl *impl); 307401e04c3fSmrg 307501e04c3fSmrgvoid nir_print_shader(nir_shader *shader, FILE *fp); 307601e04c3fSmrgvoid nir_print_shader_annotated(nir_shader *shader, FILE *fp, struct hash_table *errors); 307701e04c3fSmrgvoid nir_print_instr(const nir_instr *instr, FILE *fp); 30787e102996Smayavoid nir_print_deref(const nir_deref_instr *deref, FILE *fp); 307901e04c3fSmrg 308001e04c3fSmrgnir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s); 30817e102996Smayanir_function_impl *nir_function_impl_clone(nir_shader *shader, 30827e102996Smaya const nir_function_impl *fi); 308301e04c3fSmrgnir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var); 308401e04c3fSmrgnir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader); 308501e04c3fSmrg 308601e04c3fSmrgnir_shader *nir_shader_serialize_deserialize(void *mem_ctx, nir_shader *s); 308701e04c3fSmrg 308801e04c3fSmrg#ifndef NDEBUG 308901e04c3fSmrgvoid nir_validate_shader(nir_shader *shader, const char *when); 309001e04c3fSmrgvoid nir_metadata_set_validation_flag(nir_shader *shader); 309101e04c3fSmrgvoid nir_metadata_check_validation_flag(nir_shader *shader); 309201e04c3fSmrg 30937e102996Smayastatic inline bool 30947e102996Smayashould_skip_nir(const char *name) 30957e102996Smaya{ 30967e102996Smaya static const char *list = NULL; 30977e102996Smaya if (!list) { 30987e102996Smaya /* Comma separated list of names to skip. */ 30997e102996Smaya list = getenv("NIR_SKIP"); 31007e102996Smaya if (!list) 31017e102996Smaya list = ""; 31027e102996Smaya } 31037e102996Smaya 31047e102996Smaya if (!list[0]) 31057e102996Smaya return false; 31067e102996Smaya 31077e102996Smaya return comma_separated_list_contains(list, name); 31087e102996Smaya} 31097e102996Smaya 311001e04c3fSmrgstatic inline bool 311101e04c3fSmrgshould_clone_nir(void) 311201e04c3fSmrg{ 311301e04c3fSmrg static int should_clone = -1; 311401e04c3fSmrg if (should_clone < 0) 311501e04c3fSmrg should_clone = env_var_as_boolean("NIR_TEST_CLONE", false); 311601e04c3fSmrg 311701e04c3fSmrg return should_clone; 311801e04c3fSmrg} 311901e04c3fSmrg 312001e04c3fSmrgstatic inline bool 312101e04c3fSmrgshould_serialize_deserialize_nir(void) 312201e04c3fSmrg{ 312301e04c3fSmrg static int test_serialize = -1; 312401e04c3fSmrg if (test_serialize < 0) 312501e04c3fSmrg test_serialize = env_var_as_boolean("NIR_TEST_SERIALIZE", false); 312601e04c3fSmrg 312701e04c3fSmrg return test_serialize; 312801e04c3fSmrg} 312901e04c3fSmrg 313001e04c3fSmrgstatic inline bool 313101e04c3fSmrgshould_print_nir(void) 313201e04c3fSmrg{ 313301e04c3fSmrg static int should_print = -1; 313401e04c3fSmrg if (should_print < 0) 313501e04c3fSmrg should_print = env_var_as_boolean("NIR_PRINT", false); 313601e04c3fSmrg 313701e04c3fSmrg return should_print; 313801e04c3fSmrg} 313901e04c3fSmrg#else 314001e04c3fSmrgstatic inline void nir_validate_shader(nir_shader *shader, const char *when) { (void) shader; (void)when; } 314101e04c3fSmrgstatic inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; } 314201e04c3fSmrgstatic inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; } 31437e102996Smayastatic inline bool should_skip_nir(UNUSED const char *pass_name) { return false; } 314401e04c3fSmrgstatic inline bool should_clone_nir(void) { return false; } 314501e04c3fSmrgstatic inline bool should_serialize_deserialize_nir(void) { return false; } 314601e04c3fSmrgstatic inline bool should_print_nir(void) { return false; } 314701e04c3fSmrg#endif /* NDEBUG */ 314801e04c3fSmrg 314901e04c3fSmrg#define _PASS(pass, nir, do_pass) do { \ 31507e102996Smaya if (should_skip_nir(#pass)) { \ 31517e102996Smaya printf("skipping %s\n", #pass); \ 31527e102996Smaya break; \ 31537e102996Smaya } \ 315401e04c3fSmrg do_pass \ 315501e04c3fSmrg nir_validate_shader(nir, "after " #pass); \ 315601e04c3fSmrg if (should_clone_nir()) { \ 315701e04c3fSmrg nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \ 315801e04c3fSmrg ralloc_free(nir); \ 315901e04c3fSmrg nir = clone; \ 316001e04c3fSmrg } \ 316101e04c3fSmrg if (should_serialize_deserialize_nir()) { \ 316201e04c3fSmrg void *mem_ctx = ralloc_parent(nir); \ 316301e04c3fSmrg nir = nir_shader_serialize_deserialize(mem_ctx, nir); \ 316401e04c3fSmrg } \ 316501e04c3fSmrg} while (0) 316601e04c3fSmrg 316701e04c3fSmrg#define NIR_PASS(progress, nir, pass, ...) _PASS(pass, nir, \ 316801e04c3fSmrg nir_metadata_set_validation_flag(nir); \ 316901e04c3fSmrg if (should_print_nir()) \ 317001e04c3fSmrg printf("%s\n", #pass); \ 317101e04c3fSmrg if (pass(nir, ##__VA_ARGS__)) { \ 317201e04c3fSmrg progress = true; \ 317301e04c3fSmrg if (should_print_nir()) \ 317401e04c3fSmrg nir_print_shader(nir, stdout); \ 317501e04c3fSmrg nir_metadata_check_validation_flag(nir); \ 317601e04c3fSmrg } \ 317701e04c3fSmrg) 317801e04c3fSmrg 317901e04c3fSmrg#define NIR_PASS_V(nir, pass, ...) _PASS(pass, nir, \ 318001e04c3fSmrg if (should_print_nir()) \ 318101e04c3fSmrg printf("%s\n", #pass); \ 318201e04c3fSmrg pass(nir, ##__VA_ARGS__); \ 318301e04c3fSmrg if (should_print_nir()) \ 318401e04c3fSmrg nir_print_shader(nir, stdout); \ 318501e04c3fSmrg) 318601e04c3fSmrg 31877e102996Smaya#define NIR_SKIP(name) should_skip_nir(#name) 31887e102996Smaya 318901e04c3fSmrgvoid nir_calc_dominance_impl(nir_function_impl *impl); 319001e04c3fSmrgvoid nir_calc_dominance(nir_shader *shader); 319101e04c3fSmrg 319201e04c3fSmrgnir_block *nir_dominance_lca(nir_block *b1, nir_block *b2); 319301e04c3fSmrgbool nir_block_dominates(nir_block *parent, nir_block *child); 31947e102996Smayabool nir_block_is_unreachable(nir_block *block); 319501e04c3fSmrg 319601e04c3fSmrgvoid nir_dump_dom_tree_impl(nir_function_impl *impl, FILE *fp); 319701e04c3fSmrgvoid nir_dump_dom_tree(nir_shader *shader, FILE *fp); 319801e04c3fSmrg 319901e04c3fSmrgvoid nir_dump_dom_frontier_impl(nir_function_impl *impl, FILE *fp); 320001e04c3fSmrgvoid nir_dump_dom_frontier(nir_shader *shader, FILE *fp); 320101e04c3fSmrg 320201e04c3fSmrgvoid nir_dump_cfg_impl(nir_function_impl *impl, FILE *fp); 320301e04c3fSmrgvoid nir_dump_cfg(nir_shader *shader, FILE *fp); 320401e04c3fSmrg 320501e04c3fSmrgint nir_gs_count_vertices(const nir_shader *shader); 320601e04c3fSmrg 320701e04c3fSmrgbool nir_shrink_vec_array_vars(nir_shader *shader, nir_variable_mode modes); 320801e04c3fSmrgbool nir_split_array_vars(nir_shader *shader, nir_variable_mode modes); 320901e04c3fSmrgbool nir_split_var_copies(nir_shader *shader); 321001e04c3fSmrgbool nir_split_per_member_structs(nir_shader *shader); 321101e04c3fSmrgbool nir_split_struct_vars(nir_shader *shader, nir_variable_mode modes); 321201e04c3fSmrg 321301e04c3fSmrgbool nir_lower_returns_impl(nir_function_impl *impl); 321401e04c3fSmrgbool nir_lower_returns(nir_shader *shader); 321501e04c3fSmrg 32167e102996Smayavoid nir_inline_function_impl(struct nir_builder *b, 32177e102996Smaya const nir_function_impl *impl, 32187e102996Smaya nir_ssa_def **params); 321901e04c3fSmrgbool nir_inline_functions(nir_shader *shader); 322001e04c3fSmrg 322101e04c3fSmrgbool nir_propagate_invariant(nir_shader *shader); 322201e04c3fSmrg 322301e04c3fSmrgvoid nir_lower_var_copy_instr(nir_intrinsic_instr *copy, nir_shader *shader); 322401e04c3fSmrgvoid nir_lower_deref_copy_instr(struct nir_builder *b, 322501e04c3fSmrg nir_intrinsic_instr *copy); 322601e04c3fSmrgbool nir_lower_var_copies(nir_shader *shader); 322701e04c3fSmrg 322801e04c3fSmrgvoid nir_fixup_deref_modes(nir_shader *shader); 322901e04c3fSmrg 323001e04c3fSmrgbool nir_lower_global_vars_to_local(nir_shader *shader); 323101e04c3fSmrg 32327e102996Smayatypedef enum { 32337e102996Smaya nir_lower_direct_array_deref_of_vec_load = (1 << 0), 32347e102996Smaya nir_lower_indirect_array_deref_of_vec_load = (1 << 1), 32357e102996Smaya nir_lower_direct_array_deref_of_vec_store = (1 << 2), 32367e102996Smaya nir_lower_indirect_array_deref_of_vec_store = (1 << 3), 32377e102996Smaya} nir_lower_array_deref_of_vec_options; 32387e102996Smaya 32397e102996Smayabool nir_lower_array_deref_of_vec(nir_shader *shader, nir_variable_mode modes, 32407e102996Smaya nir_lower_array_deref_of_vec_options options); 32417e102996Smaya 324201e04c3fSmrgbool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes); 324301e04c3fSmrg 324401e04c3fSmrgbool nir_lower_locals_to_regs(nir_shader *shader); 324501e04c3fSmrg 324601e04c3fSmrgvoid nir_lower_io_to_temporaries(nir_shader *shader, 324701e04c3fSmrg nir_function_impl *entrypoint, 324801e04c3fSmrg bool outputs, bool inputs); 324901e04c3fSmrg 32507e102996Smayabool nir_lower_vars_to_scratch(nir_shader *shader, 32517e102996Smaya nir_variable_mode modes, 32527e102996Smaya int size_threshold, 32537e102996Smaya glsl_type_size_align_func size_align); 32547e102996Smaya 325501e04c3fSmrgvoid nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint); 325601e04c3fSmrg 32577e102996Smayavoid nir_gather_ssa_types(nir_function_impl *impl, 32587e102996Smaya BITSET_WORD *float_types, 32597e102996Smaya BITSET_WORD *int_types); 32607e102996Smaya 326101e04c3fSmrgvoid nir_assign_var_locations(struct exec_list *var_list, unsigned *size, 32627e102996Smaya int (*type_size)(const struct glsl_type *, bool)); 326301e04c3fSmrg 326401e04c3fSmrg/* Some helpers to do very simple linking */ 326501e04c3fSmrgbool nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer); 326601e04c3fSmrgbool nir_remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list, 326701e04c3fSmrg uint64_t *used_by_other_stage, 326801e04c3fSmrg uint64_t *used_by_other_stage_patches); 326901e04c3fSmrgvoid nir_compact_varyings(nir_shader *producer, nir_shader *consumer, 327001e04c3fSmrg bool default_to_smooth_interp); 327101e04c3fSmrgvoid nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer); 32727e102996Smayabool nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer); 327301e04c3fSmrg 327401e04c3fSmrgtypedef enum { 327501e04c3fSmrg /* If set, this forces all non-flat fragment shader inputs to be 327601e04c3fSmrg * interpolated as if with the "sample" qualifier. This requires 327701e04c3fSmrg * nir_shader_compiler_options::use_interpolated_input_intrinsics. 327801e04c3fSmrg */ 327901e04c3fSmrg nir_lower_io_force_sample_interpolation = (1 << 1), 328001e04c3fSmrg} nir_lower_io_options; 328101e04c3fSmrgbool nir_lower_io(nir_shader *shader, 328201e04c3fSmrg nir_variable_mode modes, 32837e102996Smaya int (*type_size)(const struct glsl_type *, bool), 328401e04c3fSmrg nir_lower_io_options); 32857e102996Smaya 32867e102996Smayatypedef enum { 32877e102996Smaya /** 32887e102996Smaya * An address format which is a simple 32-bit global GPU address. 32897e102996Smaya */ 32907e102996Smaya nir_address_format_32bit_global, 32917e102996Smaya 32927e102996Smaya /** 32937e102996Smaya * An address format which is a simple 64-bit global GPU address. 32947e102996Smaya */ 32957e102996Smaya nir_address_format_64bit_global, 32967e102996Smaya 32977e102996Smaya /** 32987e102996Smaya * An address format which is a bounds-checked 64-bit global GPU address. 32997e102996Smaya * 33007e102996Smaya * The address is comprised as a 32-bit vec4 where .xy are a uint64_t base 33017e102996Smaya * address stored with the low bits in .x and high bits in .y, .z is a 33027e102996Smaya * size, and .w is an offset. When the final I/O operation is lowered, .w 33037e102996Smaya * is checked against .z and the operation is predicated on the result. 33047e102996Smaya */ 33057e102996Smaya nir_address_format_64bit_bounded_global, 33067e102996Smaya 33077e102996Smaya /** 33087e102996Smaya * An address format which is comprised of a vec2 where the first 33097e102996Smaya * component is a buffer index and the second is an offset. 33107e102996Smaya */ 33117e102996Smaya nir_address_format_32bit_index_offset, 33127e102996Smaya} nir_address_format; 33137e102996Smaya 33147e102996Smayastatic inline unsigned 33157e102996Smayanir_address_format_bit_size(nir_address_format addr_format) 33167e102996Smaya{ 33177e102996Smaya switch (addr_format) { 33187e102996Smaya case nir_address_format_32bit_global: return 32; 33197e102996Smaya case nir_address_format_64bit_global: return 64; 33207e102996Smaya case nir_address_format_64bit_bounded_global: return 32; 33217e102996Smaya case nir_address_format_32bit_index_offset: return 32; 33227e102996Smaya } 33237e102996Smaya unreachable("Invalid address format"); 33247e102996Smaya} 33257e102996Smaya 33267e102996Smayastatic inline unsigned 33277e102996Smayanir_address_format_num_components(nir_address_format addr_format) 33287e102996Smaya{ 33297e102996Smaya switch (addr_format) { 33307e102996Smaya case nir_address_format_32bit_global: return 1; 33317e102996Smaya case nir_address_format_64bit_global: return 1; 33327e102996Smaya case nir_address_format_64bit_bounded_global: return 4; 33337e102996Smaya case nir_address_format_32bit_index_offset: return 2; 33347e102996Smaya } 33357e102996Smaya unreachable("Invalid address format"); 33367e102996Smaya} 33377e102996Smaya 33387e102996Smayastatic inline const struct glsl_type * 33397e102996Smayanir_address_format_to_glsl_type(nir_address_format addr_format) 33407e102996Smaya{ 33417e102996Smaya unsigned bit_size = nir_address_format_bit_size(addr_format); 33427e102996Smaya assert(bit_size == 32 || bit_size == 64); 33437e102996Smaya return glsl_vector_type(bit_size == 32 ? GLSL_TYPE_UINT : GLSL_TYPE_UINT64, 33447e102996Smaya nir_address_format_num_components(addr_format)); 33457e102996Smaya} 33467e102996Smaya 33477e102996Smayanir_ssa_def * nir_explicit_io_address_from_deref(struct nir_builder *b, 33487e102996Smaya nir_deref_instr *deref, 33497e102996Smaya nir_ssa_def *base_addr, 33507e102996Smaya nir_address_format addr_format); 33517e102996Smayavoid nir_lower_explicit_io_instr(struct nir_builder *b, 33527e102996Smaya nir_intrinsic_instr *io_instr, 33537e102996Smaya nir_ssa_def *addr, 33547e102996Smaya nir_address_format addr_format); 33557e102996Smaya 33567e102996Smayabool nir_lower_explicit_io(nir_shader *shader, 33577e102996Smaya nir_variable_mode modes, 33587e102996Smaya nir_address_format); 33597e102996Smaya 336001e04c3fSmrgnir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr); 336101e04c3fSmrgnir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr); 336201e04c3fSmrg 336301e04c3fSmrgbool nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage); 336401e04c3fSmrg 336501e04c3fSmrgbool nir_lower_regs_to_ssa_impl(nir_function_impl *impl); 336601e04c3fSmrgbool nir_lower_regs_to_ssa(nir_shader *shader); 336701e04c3fSmrgbool nir_lower_vars_to_ssa(nir_shader *shader); 336801e04c3fSmrg 336901e04c3fSmrgbool nir_remove_dead_derefs(nir_shader *shader); 337001e04c3fSmrgbool nir_remove_dead_derefs_impl(nir_function_impl *impl); 337101e04c3fSmrgbool nir_remove_dead_variables(nir_shader *shader, nir_variable_mode modes); 337201e04c3fSmrgbool nir_lower_constant_initializers(nir_shader *shader, 337301e04c3fSmrg nir_variable_mode modes); 337401e04c3fSmrg 337501e04c3fSmrgbool nir_move_load_const(nir_shader *shader); 337601e04c3fSmrgbool nir_move_vec_src_uses_to_dest(nir_shader *shader); 337701e04c3fSmrgbool nir_lower_vec_to_movs(nir_shader *shader); 337801e04c3fSmrgvoid nir_lower_alpha_test(nir_shader *shader, enum compare_func func, 337901e04c3fSmrg bool alpha_to_one); 338001e04c3fSmrgbool nir_lower_alu(nir_shader *shader); 338101e04c3fSmrgbool nir_lower_alu_to_scalar(nir_shader *shader); 33827e102996Smayabool nir_lower_bool_to_float(nir_shader *shader); 33837e102996Smayabool nir_lower_bool_to_int32(nir_shader *shader); 338401e04c3fSmrgbool nir_lower_load_const_to_scalar(nir_shader *shader); 338501e04c3fSmrgbool nir_lower_read_invocation_to_scalar(nir_shader *shader); 338601e04c3fSmrgbool nir_lower_phis_to_scalar(nir_shader *shader); 338701e04c3fSmrgvoid nir_lower_io_arrays_to_elements(nir_shader *producer, nir_shader *consumer); 338801e04c3fSmrgvoid nir_lower_io_arrays_to_elements_no_indirects(nir_shader *shader, 338901e04c3fSmrg bool outputs_only); 339001e04c3fSmrgvoid nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask); 339101e04c3fSmrgvoid nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask); 33927e102996Smayabool nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode mask); 33937e102996Smaya 33947e102996Smayavoid nir_lower_fragcoord_wtrans(nir_shader *shader); 33957e102996Smayavoid nir_lower_viewport_transform(nir_shader *shader); 33967e102996Smayabool nir_lower_uniforms_to_ubo(nir_shader *shader, int multiplier); 339701e04c3fSmrg 339801e04c3fSmrgtypedef struct nir_lower_subgroups_options { 339901e04c3fSmrg uint8_t subgroup_size; 340001e04c3fSmrg uint8_t ballot_bit_size; 340101e04c3fSmrg bool lower_to_scalar:1; 340201e04c3fSmrg bool lower_vote_trivial:1; 340301e04c3fSmrg bool lower_vote_eq_to_ballot:1; 340401e04c3fSmrg bool lower_subgroup_masks:1; 340501e04c3fSmrg bool lower_shuffle:1; 340601e04c3fSmrg bool lower_shuffle_to_32bit:1; 340701e04c3fSmrg bool lower_quad:1; 340801e04c3fSmrg} nir_lower_subgroups_options; 340901e04c3fSmrg 341001e04c3fSmrgbool nir_lower_subgroups(nir_shader *shader, 341101e04c3fSmrg const nir_lower_subgroups_options *options); 341201e04c3fSmrg 341301e04c3fSmrgbool nir_lower_system_values(nir_shader *shader); 341401e04c3fSmrg 34157e102996Smayaenum PACKED nir_lower_tex_packing { 34167e102996Smaya nir_lower_tex_packing_none = 0, 34177e102996Smaya /* The sampler returns up to 2 32-bit words of half floats or 16-bit signed 34187e102996Smaya * or unsigned ints based on the sampler type 34197e102996Smaya */ 34207e102996Smaya nir_lower_tex_packing_16, 34217e102996Smaya /* The sampler returns 1 32-bit word of 4x8 unorm */ 34227e102996Smaya nir_lower_tex_packing_8, 34237e102996Smaya}; 34247e102996Smaya 342501e04c3fSmrgtypedef struct nir_lower_tex_options { 342601e04c3fSmrg /** 342701e04c3fSmrg * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which 342801e04c3fSmrg * sampler types a texture projector is lowered. 342901e04c3fSmrg */ 343001e04c3fSmrg unsigned lower_txp; 343101e04c3fSmrg 343201e04c3fSmrg /** 343301e04c3fSmrg * If true, lower away nir_tex_src_offset for all texelfetch instructions. 343401e04c3fSmrg */ 343501e04c3fSmrg bool lower_txf_offset; 343601e04c3fSmrg 343701e04c3fSmrg /** 343801e04c3fSmrg * If true, lower away nir_tex_src_offset for all rect textures. 343901e04c3fSmrg */ 344001e04c3fSmrg bool lower_rect_offset; 344101e04c3fSmrg 344201e04c3fSmrg /** 344301e04c3fSmrg * If true, lower rect textures to 2D, using txs to fetch the 344401e04c3fSmrg * texture dimensions and dividing the texture coords by the 344501e04c3fSmrg * texture dims to normalize. 344601e04c3fSmrg */ 344701e04c3fSmrg bool lower_rect; 344801e04c3fSmrg 344901e04c3fSmrg /** 345001e04c3fSmrg * If true, convert yuv to rgb. 345101e04c3fSmrg */ 345201e04c3fSmrg unsigned lower_y_uv_external; 345301e04c3fSmrg unsigned lower_y_u_v_external; 345401e04c3fSmrg unsigned lower_yx_xuxv_external; 345501e04c3fSmrg unsigned lower_xy_uxvx_external; 34567e102996Smaya unsigned lower_ayuv_external; 34577e102996Smaya unsigned lower_xyuv_external; 345801e04c3fSmrg 345901e04c3fSmrg /** 346001e04c3fSmrg * To emulate certain texture wrap modes, this can be used 346101e04c3fSmrg * to saturate the specified tex coord to [0.0, 1.0]. The 346201e04c3fSmrg * bits are according to sampler #, ie. if, for example: 346301e04c3fSmrg * 346401e04c3fSmrg * (conf->saturate_s & (1 << n)) 346501e04c3fSmrg * 346601e04c3fSmrg * is true, then the s coord for sampler n is saturated. 346701e04c3fSmrg * 346801e04c3fSmrg * Note that clamping must happen *after* projector lowering 346901e04c3fSmrg * so any projected texture sample instruction with a clamped 347001e04c3fSmrg * coordinate gets automatically lowered, regardless of the 347101e04c3fSmrg * 'lower_txp' setting. 347201e04c3fSmrg */ 347301e04c3fSmrg unsigned saturate_s; 347401e04c3fSmrg unsigned saturate_t; 347501e04c3fSmrg unsigned saturate_r; 347601e04c3fSmrg 347701e04c3fSmrg /* Bitmask of textures that need swizzling. 347801e04c3fSmrg * 347901e04c3fSmrg * If (swizzle_result & (1 << texture_index)), then the swizzle in 348001e04c3fSmrg * swizzles[texture_index] is applied to the result of the texturing 348101e04c3fSmrg * operation. 348201e04c3fSmrg */ 348301e04c3fSmrg unsigned swizzle_result; 348401e04c3fSmrg 348501e04c3fSmrg /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles 348601e04c3fSmrg * while 4 and 5 represent 0 and 1 respectively. 348701e04c3fSmrg */ 348801e04c3fSmrg uint8_t swizzles[32][4]; 348901e04c3fSmrg 34907e102996Smaya /* Can be used to scale sampled values in range required by the format. */ 34917e102996Smaya float scale_factors[32]; 34927e102996Smaya 349301e04c3fSmrg /** 349401e04c3fSmrg * Bitmap of textures that need srgb to linear conversion. If 349501e04c3fSmrg * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components 349601e04c3fSmrg * of the texture are lowered to linear. 349701e04c3fSmrg */ 349801e04c3fSmrg unsigned lower_srgb; 349901e04c3fSmrg 35007e102996Smaya /** 35017e102996Smaya * If true, lower nir_texop_tex on shaders that doesn't support implicit 35027e102996Smaya * LODs to nir_texop_txl. 35037e102996Smaya */ 35047e102996Smaya bool lower_tex_without_implicit_lod; 35057e102996Smaya 350601e04c3fSmrg /** 350701e04c3fSmrg * If true, lower nir_texop_txd on cube maps with nir_texop_txl. 350801e04c3fSmrg */ 350901e04c3fSmrg bool lower_txd_cube_map; 351001e04c3fSmrg 35117e102996Smaya /** 35127e102996Smaya * If true, lower nir_texop_txd on 3D surfaces with nir_texop_txl. 35137e102996Smaya */ 35147e102996Smaya bool lower_txd_3d; 35157e102996Smaya 351601e04c3fSmrg /** 351701e04c3fSmrg * If true, lower nir_texop_txd on shadow samplers (except cube maps) 351801e04c3fSmrg * with nir_texop_txl. Notice that cube map shadow samplers are lowered 351901e04c3fSmrg * with lower_txd_cube_map. 352001e04c3fSmrg */ 352101e04c3fSmrg bool lower_txd_shadow; 352201e04c3fSmrg 352301e04c3fSmrg /** 352401e04c3fSmrg * If true, lower nir_texop_txd on all samplers to a nir_texop_txl. 352501e04c3fSmrg * Implies lower_txd_cube_map and lower_txd_shadow. 352601e04c3fSmrg */ 352701e04c3fSmrg bool lower_txd; 35287e102996Smaya 35297e102996Smaya /** 35307e102996Smaya * If true, lower nir_texop_txb that try to use shadow compare and min_lod 35317e102996Smaya * at the same time to a nir_texop_lod, some math, and nir_texop_tex. 35327e102996Smaya */ 35337e102996Smaya bool lower_txb_shadow_clamp; 35347e102996Smaya 35357e102996Smaya /** 35367e102996Smaya * If true, lower nir_texop_txd on shadow samplers when it uses min_lod 35377e102996Smaya * with nir_texop_txl. This includes cube maps. 35387e102996Smaya */ 35397e102996Smaya bool lower_txd_shadow_clamp; 35407e102996Smaya 35417e102996Smaya /** 35427e102996Smaya * If true, lower nir_texop_txd on when it uses both offset and min_lod 35437e102996Smaya * with nir_texop_txl. This includes cube maps. 35447e102996Smaya */ 35457e102996Smaya bool lower_txd_offset_clamp; 35467e102996Smaya 35477e102996Smaya /** 35487e102996Smaya * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the 35497e102996Smaya * sampler is bindless. 35507e102996Smaya */ 35517e102996Smaya bool lower_txd_clamp_bindless_sampler; 35527e102996Smaya 35537e102996Smaya /** 35547e102996Smaya * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the 35557e102996Smaya * sampler index is not statically determinable to be less than 16. 35567e102996Smaya */ 35577e102996Smaya bool lower_txd_clamp_if_sampler_index_not_lt_16; 35587e102996Smaya 35597e102996Smaya /** 35607e102996Smaya * If true, apply a .bagr swizzle on tg4 results to handle Broadcom's 35617e102996Smaya * mixed-up tg4 locations. 35627e102996Smaya */ 35637e102996Smaya bool lower_tg4_broadcom_swizzle; 35647e102996Smaya 35657e102996Smaya /** 35667e102996Smaya * If true, lowers tg4 with 4 constant offsets to 4 tg4 calls 35677e102996Smaya */ 35687e102996Smaya bool lower_tg4_offsets; 35697e102996Smaya 35707e102996Smaya enum nir_lower_tex_packing lower_tex_packing[32]; 357101e04c3fSmrg} nir_lower_tex_options; 357201e04c3fSmrg 357301e04c3fSmrgbool nir_lower_tex(nir_shader *shader, 357401e04c3fSmrg const nir_lower_tex_options *options); 357501e04c3fSmrg 35767e102996Smayaenum nir_lower_non_uniform_access_type { 35777e102996Smaya nir_lower_non_uniform_ubo_access = (1 << 0), 35787e102996Smaya nir_lower_non_uniform_ssbo_access = (1 << 1), 35797e102996Smaya nir_lower_non_uniform_texture_access = (1 << 2), 35807e102996Smaya nir_lower_non_uniform_image_access = (1 << 3), 35817e102996Smaya}; 35827e102996Smaya 35837e102996Smayabool nir_lower_non_uniform_access(nir_shader *shader, 35847e102996Smaya enum nir_lower_non_uniform_access_type); 35857e102996Smaya 358601e04c3fSmrgbool nir_lower_idiv(nir_shader *shader); 358701e04c3fSmrg 35887e102996Smayabool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables, bool use_vars); 358901e04c3fSmrgbool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables); 359001e04c3fSmrgbool nir_lower_clip_cull_distance_arrays(nir_shader *nir); 359101e04c3fSmrg 35927e102996Smayabool nir_lower_frexp(nir_shader *nir); 35937e102996Smaya 359401e04c3fSmrgvoid nir_lower_two_sided_color(nir_shader *shader); 359501e04c3fSmrg 359601e04c3fSmrgbool nir_lower_clamp_color_outputs(nir_shader *shader); 359701e04c3fSmrg 359801e04c3fSmrgvoid nir_lower_passthrough_edgeflags(nir_shader *shader); 359901e04c3fSmrgbool nir_lower_patch_vertices(nir_shader *nir, unsigned static_count, 360001e04c3fSmrg const gl_state_index16 *uniform_state_tokens); 360101e04c3fSmrg 360201e04c3fSmrgtypedef struct nir_lower_wpos_ytransform_options { 360301e04c3fSmrg gl_state_index16 state_tokens[STATE_LENGTH]; 360401e04c3fSmrg bool fs_coord_origin_upper_left :1; 360501e04c3fSmrg bool fs_coord_origin_lower_left :1; 360601e04c3fSmrg bool fs_coord_pixel_center_integer :1; 360701e04c3fSmrg bool fs_coord_pixel_center_half_integer :1; 360801e04c3fSmrg} nir_lower_wpos_ytransform_options; 360901e04c3fSmrg 361001e04c3fSmrgbool nir_lower_wpos_ytransform(nir_shader *shader, 361101e04c3fSmrg const nir_lower_wpos_ytransform_options *options); 361201e04c3fSmrgbool nir_lower_wpos_center(nir_shader *shader, const bool for_sample_shading); 361301e04c3fSmrg 36147e102996Smayabool nir_lower_fb_read(nir_shader *shader); 36157e102996Smaya 361601e04c3fSmrgtypedef struct nir_lower_drawpixels_options { 361701e04c3fSmrg gl_state_index16 texcoord_state_tokens[STATE_LENGTH]; 361801e04c3fSmrg gl_state_index16 scale_state_tokens[STATE_LENGTH]; 361901e04c3fSmrg gl_state_index16 bias_state_tokens[STATE_LENGTH]; 362001e04c3fSmrg unsigned drawpix_sampler; 362101e04c3fSmrg unsigned pixelmap_sampler; 362201e04c3fSmrg bool pixel_maps :1; 362301e04c3fSmrg bool scale_and_bias :1; 362401e04c3fSmrg} nir_lower_drawpixels_options; 362501e04c3fSmrg 362601e04c3fSmrgvoid nir_lower_drawpixels(nir_shader *shader, 362701e04c3fSmrg const nir_lower_drawpixels_options *options); 362801e04c3fSmrg 362901e04c3fSmrgtypedef struct nir_lower_bitmap_options { 363001e04c3fSmrg unsigned sampler; 363101e04c3fSmrg bool swizzle_xxxx; 363201e04c3fSmrg} nir_lower_bitmap_options; 363301e04c3fSmrg 363401e04c3fSmrgvoid nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options); 363501e04c3fSmrg 363601e04c3fSmrgbool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset); 36377e102996Smaya 36387e102996Smayatypedef enum { 36397e102996Smaya nir_lower_int_source_mods = 1 << 0, 36407e102996Smaya nir_lower_float_source_mods = 1 << 1, 36417e102996Smaya nir_lower_triop_abs = 1 << 2, 36427e102996Smaya nir_lower_all_source_mods = (1 << 3) - 1 36437e102996Smaya} nir_lower_to_source_mods_flags; 36447e102996Smaya 36457e102996Smaya 36467e102996Smayabool nir_lower_to_source_mods(nir_shader *shader, nir_lower_to_source_mods_flags options); 364701e04c3fSmrg 364801e04c3fSmrgbool nir_lower_gs_intrinsics(nir_shader *shader); 364901e04c3fSmrg 365001e04c3fSmrgtypedef unsigned (*nir_lower_bit_size_callback)(const nir_alu_instr *, void *); 365101e04c3fSmrg 365201e04c3fSmrgbool nir_lower_bit_size(nir_shader *shader, 365301e04c3fSmrg nir_lower_bit_size_callback callback, 365401e04c3fSmrg void *callback_data); 365501e04c3fSmrg 36567e102996Smayanir_lower_int64_options nir_lower_int64_op_to_options_mask(nir_op opcode); 365701e04c3fSmrgbool nir_lower_int64(nir_shader *shader, nir_lower_int64_options options); 365801e04c3fSmrg 36597e102996Smayanir_lower_doubles_options nir_lower_doubles_op_to_options_mask(nir_op opcode); 36607e102996Smayabool nir_lower_doubles(nir_shader *shader, const nir_shader *softfp64, 36617e102996Smaya nir_lower_doubles_options options); 366201e04c3fSmrgbool nir_lower_pack(nir_shader *shader); 366301e04c3fSmrg 366401e04c3fSmrgbool nir_normalize_cubemap_coords(nir_shader *shader); 366501e04c3fSmrg 366601e04c3fSmrgvoid nir_live_ssa_defs_impl(nir_function_impl *impl); 366701e04c3fSmrg 366801e04c3fSmrgvoid nir_loop_analyze_impl(nir_function_impl *impl, 366901e04c3fSmrg nir_variable_mode indirect_mask); 367001e04c3fSmrg 367101e04c3fSmrgbool nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b); 367201e04c3fSmrg 367301e04c3fSmrgbool nir_repair_ssa_impl(nir_function_impl *impl); 367401e04c3fSmrgbool nir_repair_ssa(nir_shader *shader); 367501e04c3fSmrg 367601e04c3fSmrgvoid nir_convert_loop_to_lcssa(nir_loop *loop); 367701e04c3fSmrg 367801e04c3fSmrg/* If phi_webs_only is true, only convert SSA values involved in phi nodes to 367901e04c3fSmrg * registers. If false, convert all values (even those not involved in a phi 368001e04c3fSmrg * node) to registers. 368101e04c3fSmrg */ 368201e04c3fSmrgbool nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only); 368301e04c3fSmrg 368401e04c3fSmrgbool nir_lower_phis_to_regs_block(nir_block *block); 368501e04c3fSmrgbool nir_lower_ssa_defs_to_regs_block(nir_block *block); 368601e04c3fSmrgbool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl); 368701e04c3fSmrg 36887e102996Smaya/* This is here for unit tests. */ 36897e102996Smayabool nir_opt_comparison_pre_impl(nir_function_impl *impl); 36907e102996Smaya 36917e102996Smayabool nir_opt_comparison_pre(nir_shader *shader); 36927e102996Smaya 369301e04c3fSmrgbool nir_opt_algebraic(nir_shader *shader); 369401e04c3fSmrgbool nir_opt_algebraic_before_ffma(nir_shader *shader); 369501e04c3fSmrgbool nir_opt_algebraic_late(nir_shader *shader); 369601e04c3fSmrgbool nir_opt_constant_folding(nir_shader *shader); 369701e04c3fSmrg 36987e102996Smayabool nir_opt_combine_stores(nir_shader *shader, nir_variable_mode modes); 369901e04c3fSmrg 370001e04c3fSmrgbool nir_copy_prop(nir_shader *shader); 370101e04c3fSmrg 370201e04c3fSmrgbool nir_opt_copy_prop_vars(nir_shader *shader); 370301e04c3fSmrg 370401e04c3fSmrgbool nir_opt_cse(nir_shader *shader); 370501e04c3fSmrg 370601e04c3fSmrgbool nir_opt_dce(nir_shader *shader); 370701e04c3fSmrg 370801e04c3fSmrgbool nir_opt_dead_cf(nir_shader *shader); 370901e04c3fSmrg 371001e04c3fSmrgbool nir_opt_dead_write_vars(nir_shader *shader); 371101e04c3fSmrg 37127e102996Smayabool nir_opt_deref_impl(nir_function_impl *impl); 37137e102996Smayabool nir_opt_deref(nir_shader *shader); 37147e102996Smaya 371501e04c3fSmrgbool nir_opt_find_array_copies(nir_shader *shader); 371601e04c3fSmrg 371701e04c3fSmrgbool nir_opt_gcm(nir_shader *shader, bool value_number); 371801e04c3fSmrg 37197e102996Smayabool nir_opt_idiv_const(nir_shader *shader, unsigned min_bit_size); 37207e102996Smaya 37217e102996Smayabool nir_opt_if(nir_shader *shader, bool aggressive_last_continue); 372201e04c3fSmrg 372301e04c3fSmrgbool nir_opt_intrinsics(nir_shader *shader); 372401e04c3fSmrg 372501e04c3fSmrgbool nir_opt_large_constants(nir_shader *shader, 372601e04c3fSmrg glsl_type_size_align_func size_align, 372701e04c3fSmrg unsigned threshold); 372801e04c3fSmrg 372901e04c3fSmrgbool nir_opt_loop_unroll(nir_shader *shader, nir_variable_mode indirect_mask); 373001e04c3fSmrg 373101e04c3fSmrgbool nir_opt_move_comparisons(nir_shader *shader); 373201e04c3fSmrg 373301e04c3fSmrgbool nir_opt_move_load_ubo(nir_shader *shader); 373401e04c3fSmrg 37357e102996Smayabool nir_opt_peephole_select(nir_shader *shader, unsigned limit, 37367e102996Smaya bool indirect_load_ok, bool expensive_alu_ok); 373701e04c3fSmrg 373801e04c3fSmrgbool nir_opt_remove_phis(nir_shader *shader); 37397e102996Smayabool nir_opt_remove_phis_block(nir_block *block); 374001e04c3fSmrg 374101e04c3fSmrgbool nir_opt_shrink_load(nir_shader *shader); 374201e04c3fSmrg 374301e04c3fSmrgbool nir_opt_trivial_continues(nir_shader *shader); 374401e04c3fSmrg 374501e04c3fSmrgbool nir_opt_undef(nir_shader *shader); 374601e04c3fSmrg 374701e04c3fSmrgbool nir_opt_conditional_discard(nir_shader *shader); 374801e04c3fSmrg 37497e102996Smayavoid nir_strip(nir_shader *shader); 37507e102996Smaya 375101e04c3fSmrgvoid nir_sweep(nir_shader *shader); 375201e04c3fSmrg 375301e04c3fSmrgvoid nir_remap_dual_slot_attributes(nir_shader *shader, 375401e04c3fSmrg uint64_t *dual_slot_inputs); 375501e04c3fSmrguint64_t nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot); 375601e04c3fSmrg 375701e04c3fSmrgnir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val); 375801e04c3fSmrggl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin); 375901e04c3fSmrg 376001e04c3fSmrg#ifdef __cplusplus 376101e04c3fSmrg} /* extern "C" */ 376201e04c3fSmrg#endif 376301e04c3fSmrg 376401e04c3fSmrg#endif /* NIR_H */ 3765