1/* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#ifndef BRW_SHADER_H 25#define BRW_SHADER_H 26 27#include <stdint.h> 28#include "brw_cfg.h" 29#include "brw_compiler.h" 30#include "compiler/nir/nir.h" 31 32#ifdef __cplusplus 33#include "brw_ir_analysis.h" 34#include "brw_ir_allocator.h" 35 36enum instruction_scheduler_mode { 37 SCHEDULE_PRE, 38 SCHEDULE_PRE_NON_LIFO, 39 SCHEDULE_PRE_LIFO, 40 SCHEDULE_POST, 41}; 42 43#define UBO_START ((1 << 16) - 4) 44 45struct backend_shader { 46protected: 47 48 backend_shader(const struct brw_compiler *compiler, 49 void *log_data, 50 void *mem_ctx, 51 const nir_shader *shader, 52 struct brw_stage_prog_data *stage_prog_data, 53 bool debug_enabled); 54 55public: 56 virtual ~backend_shader(); 57 58 const struct brw_compiler *compiler; 59 void *log_data; /* Passed to compiler->*_log functions */ 60 61 const struct intel_device_info * const devinfo; 62 const nir_shader *nir; 63 struct brw_stage_prog_data * const stage_prog_data; 64 65 /** ralloc context for temporary data used during compile */ 66 void *mem_ctx; 67 68 /** 69 * List of either fs_inst or vec4_instruction (inheriting from 70 * backend_instruction) 71 */ 72 exec_list instructions; 73 74 cfg_t *cfg; 75 brw_analysis<brw::idom_tree, backend_shader> idom_analysis; 76 77 gl_shader_stage stage; 78 bool debug_enabled; 79 const char *stage_name; 80 const char *stage_abbrev; 81 82 brw::simple_allocator alloc; 83 84 virtual void dump_instruction(const backend_instruction *inst) const = 0; 85 virtual void dump_instruction(const backend_instruction *inst, FILE *file) const = 0; 86 virtual void dump_instructions() const; 87 virtual void dump_instructions(const char *name) const; 88 89 void calculate_cfg(); 90 91 virtual void invalidate_analysis(brw::analysis_dependency_class c); 92}; 93 94#else 95struct backend_shader; 96#endif /* __cplusplus */ 97 98enum brw_reg_type brw_type_for_base_type(const struct glsl_type *type); 99enum brw_conditional_mod brw_conditional_for_comparison(unsigned int op); 100uint32_t brw_math_function(enum opcode op); 101const char *brw_instruction_name(const struct intel_device_info *devinfo, 102 enum opcode op); 103bool brw_saturate_immediate(enum brw_reg_type type, struct brw_reg *reg); 104bool brw_negate_immediate(enum brw_reg_type type, struct brw_reg *reg); 105bool brw_abs_immediate(enum brw_reg_type type, struct brw_reg *reg); 106 107bool opt_predicated_break(struct backend_shader *s); 108 109#ifdef __cplusplus 110extern "C" { 111#endif 112 113/* brw_fs_reg_allocate.cpp */ 114void brw_fs_alloc_reg_sets(struct brw_compiler *compiler); 115 116/* brw_vec4_reg_allocate.cpp */ 117void brw_vec4_alloc_reg_set(struct brw_compiler *compiler); 118 119/* brw_disasm.c */ 120extern const char *const conditional_modifier[16]; 121extern const char *const pred_ctrl_align16[16]; 122 123/* Per-thread scratch space is a power-of-two multiple of 1KB. */ 124static inline int 125brw_get_scratch_size(int size) 126{ 127 return MAX2(1024, util_next_power_of_two(size)); 128} 129 130 131static inline nir_variable_mode 132brw_nir_no_indirect_mask(const struct brw_compiler *compiler, 133 gl_shader_stage stage) 134{ 135 const struct intel_device_info *devinfo = compiler->devinfo; 136 const bool is_scalar = compiler->scalar_stage[stage]; 137 nir_variable_mode indirect_mask = (nir_variable_mode) 0; 138 139 switch (stage) { 140 case MESA_SHADER_VERTEX: 141 case MESA_SHADER_FRAGMENT: 142 indirect_mask |= nir_var_shader_in; 143 break; 144 145 case MESA_SHADER_GEOMETRY: 146 if (!is_scalar) 147 indirect_mask |= nir_var_shader_in; 148 break; 149 150 default: 151 /* Everything else can handle indirect inputs */ 152 break; 153 } 154 155 if (is_scalar && stage != MESA_SHADER_TESS_CTRL) 156 indirect_mask |= nir_var_shader_out; 157 158 /* On HSW+, we allow indirects in scalar shaders. They get implemented 159 * using nir_lower_vars_to_explicit_types and nir_lower_explicit_io in 160 * brw_postprocess_nir. 161 * 162 * We haven't plumbed through the indirect scratch messages on gfx6 or 163 * earlier so doing indirects via scratch doesn't work there. On gfx7 and 164 * earlier the scratch space size is limited to 12kB. If we allowed 165 * indirects as scratch all the time, we may easily exceed this limit 166 * without having any fallback. 167 */ 168 if (is_scalar && devinfo->verx10 <= 70) 169 indirect_mask |= nir_var_function_temp; 170 171 return indirect_mask; 172} 173 174bool brw_texture_offset(const nir_tex_instr *tex, unsigned src, 175 uint32_t *offset_bits); 176 177/** 178 * Scratch data used when compiling a GLSL geometry shader. 179 */ 180struct brw_gs_compile 181{ 182 struct brw_gs_prog_key key; 183 struct brw_vue_map input_vue_map; 184 185 unsigned control_data_bits_per_vertex; 186 unsigned control_data_header_size_bits; 187}; 188 189#ifdef __cplusplus 190} 191#endif 192 193#endif /* BRW_SHADER_H */ 194