1/* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include "anv_nir.h" 25#include "nir/nir_builder.h" 26 27/** 28 * This file implements the lowering required for VK_KHR_multiview. We 29 * implement multiview using instanced rendering. The number of instances in 30 * each draw call is multiplied by the number of views in the subpass. Then, 31 * in the shader, we divide gl_InstanceId by the number of views and use 32 * gl_InstanceId % view_count to compute the actual ViewIndex. 33 */ 34 35struct lower_multiview_state { 36 nir_builder builder; 37 38 uint32_t view_mask; 39 40 nir_ssa_def *instance_id; 41 nir_ssa_def *view_index; 42}; 43 44static nir_ssa_def * 45build_instance_id(struct lower_multiview_state *state) 46{ 47 assert(state->builder.shader->info.stage == MESA_SHADER_VERTEX); 48 49 if (state->instance_id == NULL) { 50 nir_builder *b = &state->builder; 51 52 b->cursor = nir_before_block(nir_start_block(b->impl)); 53 54 /* We use instancing for implementing multiview. The actual instance id 55 * is given by dividing instance_id by the number of views in this 56 * subpass. 57 */ 58 state->instance_id = 59 nir_idiv(b, nir_load_instance_id(b), 60 nir_imm_int(b, util_bitcount(state->view_mask))); 61 } 62 63 return state->instance_id; 64} 65 66static nir_ssa_def * 67build_view_index(struct lower_multiview_state *state) 68{ 69 if (state->view_index == NULL) { 70 nir_builder *b = &state->builder; 71 72 b->cursor = nir_before_block(nir_start_block(b->impl)); 73 74 assert(state->view_mask != 0); 75 if (util_bitcount(state->view_mask) == 1) { 76 /* Set the view index directly. */ 77 state->view_index = nir_imm_int(b, ffs(state->view_mask) - 1); 78 } else if (state->builder.shader->info.stage == MESA_SHADER_VERTEX) { 79 /* We only support 16 viewports */ 80 assert((state->view_mask & 0xffff0000) == 0); 81 82 /* We use instancing for implementing multiview. The compacted view 83 * id is given by instance_id % view_count. We then have to convert 84 * that to an actual view id. 85 */ 86 nir_ssa_def *compacted = 87 nir_umod(b, nir_load_instance_id(b), 88 nir_imm_int(b, util_bitcount(state->view_mask))); 89 90 if (util_is_power_of_two_or_zero(state->view_mask + 1)) { 91 /* If we have a full view mask, then compacted is what we want */ 92 state->view_index = compacted; 93 } else { 94 /* Now we define a map from compacted view index to the actual 95 * view index that's based on the view_mask. The map is given by 96 * 16 nibbles, each of which is a value from 0 to 15. 97 */ 98 uint64_t remap = 0; 99 uint32_t bit, i = 0; 100 for_each_bit(bit, state->view_mask) { 101 assert(bit < 16); 102 remap |= (uint64_t)bit << (i++ * 4); 103 } 104 105 nir_ssa_def *shift = nir_imul(b, compacted, nir_imm_int(b, 4)); 106 107 /* One of these days, when we have int64 everywhere, this will be 108 * easier. 109 */ 110 nir_ssa_def *shifted; 111 if (remap <= UINT32_MAX) { 112 shifted = nir_ushr(b, nir_imm_int(b, remap), shift); 113 } else { 114 nir_ssa_def *shifted_low = 115 nir_ushr(b, nir_imm_int(b, remap), shift); 116 nir_ssa_def *shifted_high = 117 nir_ushr(b, nir_imm_int(b, remap >> 32), 118 nir_isub(b, shift, nir_imm_int(b, 32))); 119 shifted = nir_bcsel(b, nir_ilt(b, shift, nir_imm_int(b, 32)), 120 shifted_low, shifted_high); 121 } 122 state->view_index = nir_iand(b, shifted, nir_imm_int(b, 0xf)); 123 } 124 } else { 125 const struct glsl_type *type = glsl_int_type(); 126 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL || 127 b->shader->info.stage == MESA_SHADER_GEOMETRY) 128 type = glsl_array_type(type, 1, 0); 129 130 nir_variable *idx_var = 131 nir_variable_create(b->shader, nir_var_shader_in, 132 type, "view index"); 133 idx_var->data.location = VARYING_SLOT_VIEW_INDEX; 134 if (b->shader->info.stage == MESA_SHADER_FRAGMENT) 135 idx_var->data.interpolation = INTERP_MODE_FLAT; 136 137 nir_deref_instr *deref = nir_build_deref_var(b, idx_var); 138 if (glsl_type_is_array(type)) 139 deref = nir_build_deref_array_imm(b, deref, 0); 140 141 state->view_index = nir_load_deref(b, deref); 142 } 143 } 144 145 return state->view_index; 146} 147 148bool 149anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask) 150{ 151 assert(shader->info.stage != MESA_SHADER_COMPUTE); 152 153 /* If multiview isn't enabled, we have nothing to do. */ 154 if (view_mask == 0) 155 return false; 156 157 struct lower_multiview_state state = { 158 .view_mask = view_mask, 159 }; 160 161 /* This pass assumes a single entrypoint */ 162 nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader); 163 164 nir_builder_init(&state.builder, entrypoint); 165 166 bool progress = false; 167 nir_foreach_block(block, entrypoint) { 168 nir_foreach_instr_safe(instr, block) { 169 if (instr->type != nir_instr_type_intrinsic) 170 continue; 171 172 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr); 173 174 if (load->intrinsic != nir_intrinsic_load_instance_id && 175 load->intrinsic != nir_intrinsic_load_view_index) 176 continue; 177 178 assert(load->dest.is_ssa); 179 180 nir_ssa_def *value; 181 if (load->intrinsic == nir_intrinsic_load_instance_id) { 182 value = build_instance_id(&state); 183 } else { 184 assert(load->intrinsic == nir_intrinsic_load_view_index); 185 value = build_view_index(&state); 186 } 187 188 nir_ssa_def_rewrite_uses(&load->dest.ssa, nir_src_for_ssa(value)); 189 190 nir_instr_remove(&load->instr); 191 progress = true; 192 } 193 } 194 195 /* The view index is available in all stages but the instance id is only 196 * available in the VS. If it's not a fragment shader, we need to pass 197 * the view index on to the next stage. 198 */ 199 if (shader->info.stage != MESA_SHADER_FRAGMENT) { 200 nir_ssa_def *view_index = build_view_index(&state); 201 202 nir_builder *b = &state.builder; 203 204 assert(view_index->parent_instr->block == nir_start_block(entrypoint)); 205 b->cursor = nir_after_instr(view_index->parent_instr); 206 207 /* Unless there is only one possible view index (that would be set 208 * directly), pass it to the next stage. */ 209 if (util_bitcount(state.view_mask) != 1) { 210 nir_variable *view_index_out = 211 nir_variable_create(shader, nir_var_shader_out, 212 glsl_int_type(), "view index"); 213 view_index_out->data.location = VARYING_SLOT_VIEW_INDEX; 214 nir_store_var(b, view_index_out, view_index, 0x1); 215 } 216 217 nir_variable *layer_id_out = 218 nir_variable_create(shader, nir_var_shader_out, 219 glsl_int_type(), "layer ID"); 220 layer_id_out->data.location = VARYING_SLOT_LAYER; 221 nir_store_var(b, layer_id_out, view_index, 0x1); 222 223 progress = true; 224 } 225 226 if (progress) { 227 nir_metadata_preserve(entrypoint, nir_metadata_block_index | 228 nir_metadata_dominance); 229 } 230 231 return progress; 232} 233