1/* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24/** 25 * \file brw_vec4_gs_visitor.cpp 26 * 27 * Geometry-shader-specific code derived from the vec4_visitor class. 28 */ 29 30#include "brw_vec4_gs_visitor.h" 31#include "gen6_gs_visitor.h" 32#include "brw_cfg.h" 33#include "brw_fs.h" 34#include "brw_nir.h" 35#include "dev/gen_debug.h" 36 37namespace brw { 38 39vec4_gs_visitor::vec4_gs_visitor(const struct brw_compiler *compiler, 40 void *log_data, 41 struct brw_gs_compile *c, 42 struct brw_gs_prog_data *prog_data, 43 const nir_shader *shader, 44 void *mem_ctx, 45 bool no_spills, 46 int shader_time_index) 47 : vec4_visitor(compiler, log_data, &c->key.tex, 48 &prog_data->base, shader, mem_ctx, 49 no_spills, shader_time_index), 50 c(c), 51 gs_prog_data(prog_data) 52{ 53} 54 55 56static inline struct brw_reg 57attribute_to_hw_reg(int attr, brw_reg_type type, bool interleaved) 58{ 59 struct brw_reg reg; 60 61 unsigned width = REG_SIZE / 2 / MAX2(4, type_sz(type)); 62 if (interleaved) { 63 reg = stride(brw_vecn_grf(width, attr / 2, (attr % 2) * 4), 0, width, 1); 64 } else { 65 reg = brw_vecn_grf(width, attr, 0); 66 } 67 68 reg.type = type; 69 return reg; 70} 71 72/** 73 * Replace each register of type ATTR in this->instructions with a reference 74 * to a fixed HW register. 75 * 76 * If interleaved is true, then each attribute takes up half a register, with 77 * register N containing attribute 2*N in its first half and attribute 2*N+1 78 * in its second half (this corresponds to the payload setup used by geometry 79 * shaders in "single" or "dual instanced" dispatch mode). If interleaved is 80 * false, then each attribute takes up a whole register, with register N 81 * containing attribute N (this corresponds to the payload setup used by 82 * vertex shaders, and by geometry shaders in "dual object" dispatch mode). 83 */ 84int 85vec4_gs_visitor::setup_varying_inputs(int payload_reg, 86 int attributes_per_reg) 87{ 88 /* For geometry shaders there are N copies of the input attributes, where N 89 * is the number of input vertices. attribute_map[BRW_VARYING_SLOT_COUNT * 90 * i + j] represents attribute j for vertex i. 91 * 92 * Note that GS inputs are read from the VUE 256 bits (2 vec4's) at a time, 93 * so the total number of input slots that will be delivered to the GS (and 94 * thus the stride of the input arrays) is urb_read_length * 2. 95 */ 96 const unsigned num_input_vertices = nir->info.gs.vertices_in; 97 assert(num_input_vertices <= MAX_GS_INPUT_VERTICES); 98 unsigned input_array_stride = prog_data->urb_read_length * 2; 99 100 foreach_block_and_inst(block, vec4_instruction, inst, cfg) { 101 for (int i = 0; i < 3; i++) { 102 if (inst->src[i].file != ATTR) 103 continue; 104 105 assert(inst->src[i].offset % REG_SIZE == 0); 106 int grf = payload_reg * attributes_per_reg + 107 inst->src[i].nr + inst->src[i].offset / REG_SIZE; 108 109 struct brw_reg reg = 110 attribute_to_hw_reg(grf, inst->src[i].type, attributes_per_reg > 1); 111 reg.swizzle = inst->src[i].swizzle; 112 if (inst->src[i].abs) 113 reg = brw_abs(reg); 114 if (inst->src[i].negate) 115 reg = negate(reg); 116 117 inst->src[i] = reg; 118 } 119 } 120 121 int regs_used = ALIGN(input_array_stride * num_input_vertices, 122 attributes_per_reg) / attributes_per_reg; 123 return payload_reg + regs_used; 124} 125 126void 127vec4_gs_visitor::setup_payload() 128{ 129 /* If we are in dual instanced or single mode, then attributes are going 130 * to be interleaved, so one register contains two attribute slots. 131 */ 132 int attributes_per_reg = 133 prog_data->dispatch_mode == DISPATCH_MODE_4X2_DUAL_OBJECT ? 1 : 2; 134 135 int reg = 0; 136 137 /* The payload always contains important data in r0, which contains 138 * the URB handles that are passed on to the URB write at the end 139 * of the thread. 140 */ 141 reg++; 142 143 /* If the shader uses gl_PrimitiveIDIn, that goes in r1. */ 144 if (gs_prog_data->include_primitive_id) 145 reg++; 146 147 reg = setup_uniforms(reg); 148 149 reg = setup_varying_inputs(reg, attributes_per_reg); 150 151 this->first_non_payload_grf = reg; 152} 153 154 155void 156vec4_gs_visitor::emit_prolog() 157{ 158 /* In vertex shaders, r0.2 is guaranteed to be initialized to zero. In 159 * geometry shaders, it isn't (it contains a bunch of information we don't 160 * need, like the input primitive type). We need r0.2 to be zero in order 161 * to build scratch read/write messages correctly (otherwise this value 162 * will be interpreted as a global offset, causing us to do our scratch 163 * reads/writes to garbage memory). So just set it to zero at the top of 164 * the shader. 165 */ 166 this->current_annotation = "clear r0.2"; 167 dst_reg r0(retype(brw_vec4_grf(0, 0), BRW_REGISTER_TYPE_UD)); 168 vec4_instruction *inst = emit(GS_OPCODE_SET_DWORD_2, r0, brw_imm_ud(0u)); 169 inst->force_writemask_all = true; 170 171 /* Create a virtual register to hold the vertex count */ 172 this->vertex_count = src_reg(this, glsl_type::uint_type); 173 174 /* Initialize the vertex_count register to 0 */ 175 this->current_annotation = "initialize vertex_count"; 176 inst = emit(MOV(dst_reg(this->vertex_count), brw_imm_ud(0u))); 177 inst->force_writemask_all = true; 178 179 if (c->control_data_header_size_bits > 0) { 180 /* Create a virtual register to hold the current set of control data 181 * bits. 182 */ 183 this->control_data_bits = src_reg(this, glsl_type::uint_type); 184 185 /* If we're outputting more than 32 control data bits, then EmitVertex() 186 * will set control_data_bits to 0 after emitting the first vertex. 187 * Otherwise, we need to initialize it to 0 here. 188 */ 189 if (c->control_data_header_size_bits <= 32) { 190 this->current_annotation = "initialize control data bits"; 191 inst = emit(MOV(dst_reg(this->control_data_bits), brw_imm_ud(0u))); 192 inst->force_writemask_all = true; 193 } 194 } 195 196 this->current_annotation = NULL; 197} 198 199void 200vec4_gs_visitor::emit_thread_end() 201{ 202 if (c->control_data_header_size_bits > 0) { 203 /* During shader execution, we only ever call emit_control_data_bits() 204 * just prior to outputting a vertex. Therefore, the control data bits 205 * corresponding to the most recently output vertex still need to be 206 * emitted. 207 */ 208 current_annotation = "thread end: emit control data bits"; 209 emit_control_data_bits(); 210 } 211 212 /* MRF 0 is reserved for the debugger, so start with message header 213 * in MRF 1. 214 */ 215 int base_mrf = 1; 216 217 bool static_vertex_count = gs_prog_data->static_vertex_count != -1; 218 219 /* If the previous instruction was a URB write, we don't need to issue 220 * a second one - we can just set the EOT bit on the previous write. 221 * 222 * Skip this on Gen8+ unless there's a static vertex count, as we also 223 * need to write the vertex count out, and combining the two may not be 224 * possible (or at least not straightforward). 225 */ 226 vec4_instruction *last = (vec4_instruction *) instructions.get_tail(); 227 if (last && last->opcode == GS_OPCODE_URB_WRITE && 228 !(INTEL_DEBUG & DEBUG_SHADER_TIME) && 229 devinfo->gen >= 8 && static_vertex_count) { 230 last->urb_write_flags = BRW_URB_WRITE_EOT | last->urb_write_flags; 231 return; 232 } 233 234 current_annotation = "thread end"; 235 dst_reg mrf_reg(MRF, base_mrf); 236 src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)); 237 vec4_instruction *inst = emit(MOV(mrf_reg, r0)); 238 inst->force_writemask_all = true; 239 if (devinfo->gen < 8 || !static_vertex_count) 240 emit(GS_OPCODE_SET_VERTEX_COUNT, mrf_reg, this->vertex_count); 241 if (INTEL_DEBUG & DEBUG_SHADER_TIME) 242 emit_shader_time_end(); 243 inst = emit(GS_OPCODE_THREAD_END); 244 inst->base_mrf = base_mrf; 245 inst->mlen = devinfo->gen >= 8 && !static_vertex_count ? 2 : 1; 246} 247 248 249void 250vec4_gs_visitor::emit_urb_write_header(int mrf) 251{ 252 /* The SEND instruction that writes the vertex data to the VUE will use 253 * per_slot_offset=true, which means that DWORDs 3 and 4 of the message 254 * header specify an offset (in multiples of 256 bits) into the URB entry 255 * at which the write should take place. 256 * 257 * So we have to prepare a message header with the appropriate offset 258 * values. 259 */ 260 dst_reg mrf_reg(MRF, mrf); 261 src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)); 262 this->current_annotation = "URB write header"; 263 vec4_instruction *inst = emit(MOV(mrf_reg, r0)); 264 inst->force_writemask_all = true; 265 emit(GS_OPCODE_SET_WRITE_OFFSET, mrf_reg, this->vertex_count, 266 brw_imm_ud(gs_prog_data->output_vertex_size_hwords)); 267} 268 269 270vec4_instruction * 271vec4_gs_visitor::emit_urb_write_opcode(bool complete) 272{ 273 /* We don't care whether the vertex is complete, because in general 274 * geometry shaders output multiple vertices, and we don't terminate the 275 * thread until all vertices are complete. 276 */ 277 (void) complete; 278 279 vec4_instruction *inst = emit(GS_OPCODE_URB_WRITE); 280 inst->offset = gs_prog_data->control_data_header_size_hwords; 281 282 /* We need to increment Global Offset by 1 to make room for Broadwell's 283 * extra "Vertex Count" payload at the beginning of the URB entry. 284 */ 285 if (devinfo->gen >= 8 && gs_prog_data->static_vertex_count == -1) 286 inst->offset++; 287 288 inst->urb_write_flags = BRW_URB_WRITE_PER_SLOT_OFFSET; 289 return inst; 290} 291 292 293/** 294 * Write out a batch of 32 control data bits from the control_data_bits 295 * register to the URB. 296 * 297 * The current value of the vertex_count register determines which DWORD in 298 * the URB receives the control data bits. The control_data_bits register is 299 * assumed to contain the correct data for the vertex that was most recently 300 * output, and all previous vertices that share the same DWORD. 301 * 302 * This function takes care of ensuring that if no vertices have been output 303 * yet, no control bits are emitted. 304 */ 305void 306vec4_gs_visitor::emit_control_data_bits() 307{ 308 assert(c->control_data_bits_per_vertex != 0); 309 310 /* Since the URB_WRITE_OWORD message operates with 128-bit (vec4 sized) 311 * granularity, we need to use two tricks to ensure that the batch of 32 312 * control data bits is written to the appropriate DWORD in the URB. To 313 * select which vec4 we are writing to, we use the "slot {0,1} offset" 314 * fields of the message header. To select which DWORD in the vec4 we are 315 * writing to, we use the channel mask fields of the message header. To 316 * avoid penalizing geometry shaders that emit a small number of vertices 317 * with extra bookkeeping, we only do each of these tricks when 318 * c->prog_data.control_data_header_size_bits is large enough to make it 319 * necessary. 320 * 321 * Note: this means that if we're outputting just a single DWORD of control 322 * data bits, we'll actually replicate it four times since we won't do any 323 * channel masking. But that's not a problem since in this case the 324 * hardware only pays attention to the first DWORD. 325 */ 326 enum brw_urb_write_flags urb_write_flags = BRW_URB_WRITE_OWORD; 327 if (c->control_data_header_size_bits > 32) 328 urb_write_flags = urb_write_flags | BRW_URB_WRITE_USE_CHANNEL_MASKS; 329 if (c->control_data_header_size_bits > 128) 330 urb_write_flags = urb_write_flags | BRW_URB_WRITE_PER_SLOT_OFFSET; 331 332 /* If we are using either channel masks or a per-slot offset, then we 333 * need to figure out which DWORD we are trying to write to, using the 334 * formula: 335 * 336 * dword_index = (vertex_count - 1) * bits_per_vertex / 32 337 * 338 * Since bits_per_vertex is a power of two, and is known at compile 339 * time, this can be optimized to: 340 * 341 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex)) 342 */ 343 src_reg dword_index(this, glsl_type::uint_type); 344 if (urb_write_flags) { 345 src_reg prev_count(this, glsl_type::uint_type); 346 emit(ADD(dst_reg(prev_count), this->vertex_count, 347 brw_imm_ud(0xffffffffu))); 348 unsigned log2_bits_per_vertex = 349 util_last_bit(c->control_data_bits_per_vertex); 350 emit(SHR(dst_reg(dword_index), prev_count, 351 brw_imm_ud(6 - log2_bits_per_vertex))); 352 } 353 354 /* Start building the URB write message. The first MRF gets a copy of 355 * R0. 356 */ 357 int base_mrf = 1; 358 dst_reg mrf_reg(MRF, base_mrf); 359 src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)); 360 vec4_instruction *inst = emit(MOV(mrf_reg, r0)); 361 inst->force_writemask_all = true; 362 363 if (urb_write_flags & BRW_URB_WRITE_PER_SLOT_OFFSET) { 364 /* Set the per-slot offset to dword_index / 4, to that we'll write to 365 * the appropriate OWORD within the control data header. 366 */ 367 src_reg per_slot_offset(this, glsl_type::uint_type); 368 emit(SHR(dst_reg(per_slot_offset), dword_index, brw_imm_ud(2u))); 369 emit(GS_OPCODE_SET_WRITE_OFFSET, mrf_reg, per_slot_offset, 370 brw_imm_ud(1u)); 371 } 372 373 if (urb_write_flags & BRW_URB_WRITE_USE_CHANNEL_MASKS) { 374 /* Set the channel masks to 1 << (dword_index % 4), so that we'll 375 * write to the appropriate DWORD within the OWORD. We need to do 376 * this computation with force_writemask_all, otherwise garbage data 377 * from invocation 0 might clobber the mask for invocation 1 when 378 * GS_OPCODE_PREPARE_CHANNEL_MASKS tries to OR the two masks 379 * together. 380 */ 381 src_reg channel(this, glsl_type::uint_type); 382 inst = emit(AND(dst_reg(channel), dword_index, brw_imm_ud(3u))); 383 inst->force_writemask_all = true; 384 src_reg one(this, glsl_type::uint_type); 385 inst = emit(MOV(dst_reg(one), brw_imm_ud(1u))); 386 inst->force_writemask_all = true; 387 src_reg channel_mask(this, glsl_type::uint_type); 388 inst = emit(SHL(dst_reg(channel_mask), one, channel)); 389 inst->force_writemask_all = true; 390 emit(GS_OPCODE_PREPARE_CHANNEL_MASKS, dst_reg(channel_mask), 391 channel_mask); 392 emit(GS_OPCODE_SET_CHANNEL_MASKS, mrf_reg, channel_mask); 393 } 394 395 /* Store the control data bits in the message payload and send it. */ 396 dst_reg mrf_reg2(MRF, base_mrf + 1); 397 inst = emit(MOV(mrf_reg2, this->control_data_bits)); 398 inst->force_writemask_all = true; 399 inst = emit(GS_OPCODE_URB_WRITE); 400 inst->urb_write_flags = urb_write_flags; 401 /* We need to increment Global Offset by 256-bits to make room for 402 * Broadwell's extra "Vertex Count" payload at the beginning of the 403 * URB entry. Since this is an OWord message, Global Offset is counted 404 * in 128-bit units, so we must set it to 2. 405 */ 406 if (devinfo->gen >= 8 && gs_prog_data->static_vertex_count == -1) 407 inst->offset = 2; 408 inst->base_mrf = base_mrf; 409 inst->mlen = 2; 410} 411 412void 413vec4_gs_visitor::set_stream_control_data_bits(unsigned stream_id) 414{ 415 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */ 416 417 /* Note: we are calling this *before* increasing vertex_count, so 418 * this->vertex_count == vertex_count - 1 in the formula above. 419 */ 420 421 /* Stream mode uses 2 bits per vertex */ 422 assert(c->control_data_bits_per_vertex == 2); 423 424 /* Must be a valid stream */ 425 assert(stream_id < MAX_VERTEX_STREAMS); 426 427 /* Control data bits are initialized to 0 so we don't have to set any 428 * bits when sending vertices to stream 0. 429 */ 430 if (stream_id == 0) 431 return; 432 433 /* reg::sid = stream_id */ 434 src_reg sid(this, glsl_type::uint_type); 435 emit(MOV(dst_reg(sid), brw_imm_ud(stream_id))); 436 437 /* reg:shift_count = 2 * (vertex_count - 1) */ 438 src_reg shift_count(this, glsl_type::uint_type); 439 emit(SHL(dst_reg(shift_count), this->vertex_count, brw_imm_ud(1u))); 440 441 /* Note: we're relying on the fact that the GEN SHL instruction only pays 442 * attention to the lower 5 bits of its second source argument, so on this 443 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to 444 * stream_id << ((2 * (vertex_count - 1)) % 32). 445 */ 446 src_reg mask(this, glsl_type::uint_type); 447 emit(SHL(dst_reg(mask), sid, shift_count)); 448 emit(OR(dst_reg(this->control_data_bits), this->control_data_bits, mask)); 449} 450 451void 452vec4_gs_visitor::gs_emit_vertex(int stream_id) 453{ 454 this->current_annotation = "emit vertex: safety check"; 455 456 /* Haswell and later hardware ignores the "Render Stream Select" bits 457 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled, 458 * and instead sends all primitives down the pipeline for rasterization. 459 * If the SOL stage is enabled, "Render Stream Select" is honored and 460 * primitives bound to non-zero streams are discarded after stream output. 461 * 462 * Since the only purpose of primives sent to non-zero streams is to 463 * be recorded by transform feedback, we can simply discard all geometry 464 * bound to these streams when transform feedback is disabled. 465 */ 466 if (stream_id > 0 && !nir->info.has_transform_feedback_varyings) 467 return; 468 469 /* If we're outputting 32 control data bits or less, then we can wait 470 * until the shader is over to output them all. Otherwise we need to 471 * output them as we go. Now is the time to do it, since we're about to 472 * output the vertex_count'th vertex, so it's guaranteed that the 473 * control data bits associated with the (vertex_count - 1)th vertex are 474 * correct. 475 */ 476 if (c->control_data_header_size_bits > 32) { 477 this->current_annotation = "emit vertex: emit control data bits"; 478 /* Only emit control data bits if we've finished accumulating a batch 479 * of 32 bits. This is the case when: 480 * 481 * (vertex_count * bits_per_vertex) % 32 == 0 482 * 483 * (in other words, when the last 5 bits of vertex_count * 484 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some 485 * integer n (which is always the case, since bits_per_vertex is 486 * always 1 or 2), this is equivalent to requiring that the last 5-n 487 * bits of vertex_count are 0: 488 * 489 * vertex_count & (2^(5-n) - 1) == 0 490 * 491 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is 492 * equivalent to: 493 * 494 * vertex_count & (32 / bits_per_vertex - 1) == 0 495 */ 496 vec4_instruction *inst = 497 emit(AND(dst_null_ud(), this->vertex_count, 498 brw_imm_ud(32 / c->control_data_bits_per_vertex - 1))); 499 inst->conditional_mod = BRW_CONDITIONAL_Z; 500 501 emit(IF(BRW_PREDICATE_NORMAL)); 502 { 503 /* If vertex_count is 0, then no control data bits have been 504 * accumulated yet, so we skip emitting them. 505 */ 506 emit(CMP(dst_null_ud(), this->vertex_count, brw_imm_ud(0u), 507 BRW_CONDITIONAL_NEQ)); 508 emit(IF(BRW_PREDICATE_NORMAL)); 509 emit_control_data_bits(); 510 emit(BRW_OPCODE_ENDIF); 511 512 /* Reset control_data_bits to 0 so we can start accumulating a new 513 * batch. 514 * 515 * Note: in the case where vertex_count == 0, this neutralizes the 516 * effect of any call to EndPrimitive() that the shader may have 517 * made before outputting its first vertex. 518 */ 519 inst = emit(MOV(dst_reg(this->control_data_bits), brw_imm_ud(0u))); 520 inst->force_writemask_all = true; 521 } 522 emit(BRW_OPCODE_ENDIF); 523 } 524 525 this->current_annotation = "emit vertex: vertex data"; 526 emit_vertex(); 527 528 /* In stream mode we have to set control data bits for all vertices 529 * unless we have disabled control data bits completely (which we do 530 * do for GL_POINTS outputs that don't use streams). 531 */ 532 if (c->control_data_header_size_bits > 0 && 533 gs_prog_data->control_data_format == 534 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) { 535 this->current_annotation = "emit vertex: Stream control data bits"; 536 set_stream_control_data_bits(stream_id); 537 } 538 539 this->current_annotation = NULL; 540} 541 542void 543vec4_gs_visitor::gs_end_primitive() 544{ 545 /* We can only do EndPrimitive() functionality when the control data 546 * consists of cut bits. Fortunately, the only time it isn't is when the 547 * output type is points, in which case EndPrimitive() is a no-op. 548 */ 549 if (gs_prog_data->control_data_format != 550 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) { 551 return; 552 } 553 554 if (c->control_data_header_size_bits == 0) 555 return; 556 557 /* Cut bits use one bit per vertex. */ 558 assert(c->control_data_bits_per_vertex == 1); 559 560 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting 561 * vertex n, 0 otherwise. So all we need to do here is mark bit 562 * (vertex_count - 1) % 32 in the cut_bits register to indicate that 563 * EndPrimitive() was called after emitting vertex (vertex_count - 1); 564 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest. 565 * 566 * Note that if EndPrimitve() is called before emitting any vertices, this 567 * will cause us to set bit 31 of the control_data_bits register to 1. 568 * That's fine because: 569 * 570 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be 571 * output, so the hardware will ignore cut bit 31. 572 * 573 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the 574 * last vertex, so setting cut bit 31 has no effect (since the primitive 575 * is automatically ended when the GS terminates). 576 * 577 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the 578 * control_data_bits register to 0 when the first vertex is emitted. 579 */ 580 581 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */ 582 src_reg one(this, glsl_type::uint_type); 583 emit(MOV(dst_reg(one), brw_imm_ud(1u))); 584 src_reg prev_count(this, glsl_type::uint_type); 585 emit(ADD(dst_reg(prev_count), this->vertex_count, brw_imm_ud(0xffffffffu))); 586 src_reg mask(this, glsl_type::uint_type); 587 /* Note: we're relying on the fact that the GEN SHL instruction only pays 588 * attention to the lower 5 bits of its second source argument, so on this 589 * architecture, 1 << (vertex_count - 1) is equivalent to 1 << 590 * ((vertex_count - 1) % 32). 591 */ 592 emit(SHL(dst_reg(mask), one, prev_count)); 593 emit(OR(dst_reg(this->control_data_bits), this->control_data_bits, mask)); 594} 595 596static const GLuint gl_prim_to_hw_prim[GL_TRIANGLE_STRIP_ADJACENCY+1] = { 597 [GL_POINTS] =_3DPRIM_POINTLIST, 598 [GL_LINES] = _3DPRIM_LINELIST, 599 [GL_LINE_LOOP] = _3DPRIM_LINELOOP, 600 [GL_LINE_STRIP] = _3DPRIM_LINESTRIP, 601 [GL_TRIANGLES] = _3DPRIM_TRILIST, 602 [GL_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP, 603 [GL_TRIANGLE_FAN] = _3DPRIM_TRIFAN, 604 [GL_QUADS] = _3DPRIM_QUADLIST, 605 [GL_QUAD_STRIP] = _3DPRIM_QUADSTRIP, 606 [GL_POLYGON] = _3DPRIM_POLYGON, 607 [GL_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ, 608 [GL_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ, 609 [GL_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ, 610 [GL_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ, 611}; 612 613extern "C" const unsigned * 614brw_compile_gs(const struct brw_compiler *compiler, void *log_data, 615 void *mem_ctx, 616 const struct brw_gs_prog_key *key, 617 struct brw_gs_prog_data *prog_data, 618 nir_shader *shader, 619 struct gl_program *prog, 620 int shader_time_index, 621 char **error_str) 622{ 623 struct brw_gs_compile c; 624 memset(&c, 0, sizeof(c)); 625 c.key = *key; 626 627 const bool is_scalar = compiler->scalar_stage[MESA_SHADER_GEOMETRY]; 628 629 /* The GLSL linker will have already matched up GS inputs and the outputs 630 * of prior stages. The driver does extend VS outputs in some cases, but 631 * only for legacy OpenGL or Gen4-5 hardware, neither of which offer 632 * geometry shader support. So we can safely ignore that. 633 * 634 * For SSO pipelines, we use a fixed VUE map layout based on variable 635 * locations, so we can rely on rendezvous-by-location making this work. 636 */ 637 GLbitfield64 inputs_read = shader->info.inputs_read; 638 brw_compute_vue_map(compiler->devinfo, 639 &c.input_vue_map, inputs_read, 640 shader->info.separate_shader); 641 642 shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, is_scalar); 643 brw_nir_lower_vue_inputs(shader, &c.input_vue_map); 644 brw_nir_lower_vue_outputs(shader); 645 shader = brw_postprocess_nir(shader, compiler, is_scalar); 646 647 prog_data->base.clip_distance_mask = 648 ((1 << shader->info.clip_distance_array_size) - 1); 649 prog_data->base.cull_distance_mask = 650 ((1 << shader->info.cull_distance_array_size) - 1) << 651 shader->info.clip_distance_array_size; 652 653 prog_data->include_primitive_id = 654 (shader->info.system_values_read & (1 << SYSTEM_VALUE_PRIMITIVE_ID)) != 0; 655 656 prog_data->invocations = shader->info.gs.invocations; 657 658 if (compiler->devinfo->gen >= 8) 659 prog_data->static_vertex_count = nir_gs_count_vertices(shader); 660 661 if (compiler->devinfo->gen >= 7) { 662 if (shader->info.gs.output_primitive == GL_POINTS) { 663 /* When the output type is points, the geometry shader may output data 664 * to multiple streams, and EndPrimitive() has no effect. So we 665 * configure the hardware to interpret the control data as stream ID. 666 */ 667 prog_data->control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID; 668 669 /* We only have to emit control bits if we are using streams */ 670 if (shader->info.gs.uses_streams) 671 c.control_data_bits_per_vertex = 2; 672 else 673 c.control_data_bits_per_vertex = 0; 674 } else { 675 /* When the output type is triangle_strip or line_strip, EndPrimitive() 676 * may be used to terminate the current strip and start a new one 677 * (similar to primitive restart), and outputting data to multiple 678 * streams is not supported. So we configure the hardware to interpret 679 * the control data as EndPrimitive information (a.k.a. "cut bits"). 680 */ 681 prog_data->control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT; 682 683 /* We only need to output control data if the shader actually calls 684 * EndPrimitive(). 685 */ 686 c.control_data_bits_per_vertex = 687 shader->info.gs.uses_end_primitive ? 1 : 0; 688 } 689 } else { 690 /* There are no control data bits in gen6. */ 691 c.control_data_bits_per_vertex = 0; 692 } 693 c.control_data_header_size_bits = 694 shader->info.gs.vertices_out * c.control_data_bits_per_vertex; 695 696 /* 1 HWORD = 32 bytes = 256 bits */ 697 prog_data->control_data_header_size_hwords = 698 ALIGN(c.control_data_header_size_bits, 256) / 256; 699 700 /* Compute the output vertex size. 701 * 702 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex 703 * Size (p168): 704 * 705 * [0,62] indicating [1,63] 16B units 706 * 707 * Specifies the size of each vertex stored in the GS output entry 708 * (following any Control Header data) as a number of 128-bit units 709 * (minus one). 710 * 711 * Programming Restrictions: The vertex size must be programmed as a 712 * multiple of 32B units with the following exception: Rendering is 713 * disabled (as per SOL stage state) and the vertex size output by the 714 * GS thread is 16B. 715 * 716 * If rendering is enabled (as per SOL state) the vertex size must be 717 * programmed as a multiple of 32B units. In other words, the only time 718 * software can program a vertex size with an odd number of 16B units 719 * is when rendering is disabled. 720 * 721 * Note: B=bytes in the above text. 722 * 723 * It doesn't seem worth the extra trouble to optimize the case where the 724 * vertex size is 16B (especially since this would require special-casing 725 * the GEN assembly that writes to the URB). So we just set the vertex 726 * size to a multiple of 32B (2 vec4's) in all cases. 727 * 728 * The maximum output vertex size is 62*16 = 992 bytes (31 hwords). We 729 * budget that as follows: 730 * 731 * 512 bytes for varyings (a varying component is 4 bytes and 732 * gl_MaxGeometryOutputComponents = 128) 733 * 16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16 734 * bytes) 735 * 16 bytes overhead for gl_Position (we allocate it a slot in the VUE 736 * even if it's not used) 737 * 32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots 738 * whenever clip planes are enabled, even if the shader doesn't 739 * write to gl_ClipDistance) 740 * 16 bytes overhead since the VUE size must be a multiple of 32 bytes 741 * (see below)--this causes up to 1 VUE slot to be wasted 742 * 400 bytes available for varying packing overhead 743 * 744 * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes) 745 * per interpolation type, so this is plenty. 746 * 747 */ 748 unsigned output_vertex_size_bytes = prog_data->base.vue_map.num_slots * 16; 749 assert(compiler->devinfo->gen == 6 || 750 output_vertex_size_bytes <= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES); 751 prog_data->output_vertex_size_hwords = 752 ALIGN(output_vertex_size_bytes, 32) / 32; 753 754 /* Compute URB entry size. The maximum allowed URB entry size is 32k. 755 * That divides up as follows: 756 * 757 * 64 bytes for the control data header (cut indices or StreamID bits) 758 * 4096 bytes for varyings (a varying component is 4 bytes and 759 * gl_MaxGeometryTotalOutputComponents = 1024) 760 * 4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16 761 * bytes/vertex and gl_MaxGeometryOutputVertices is 256) 762 * 4096 bytes overhead for gl_Position (we allocate it a slot in the VUE 763 * even if it's not used) 764 * 8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots 765 * whenever clip planes are enabled, even if the shader doesn't 766 * write to gl_ClipDistance) 767 * 4096 bytes overhead since the VUE size must be a multiple of 32 768 * bytes (see above)--this causes up to 1 VUE slot to be wasted 769 * 8128 bytes available for varying packing overhead 770 * 771 * Worst-case varying packing overhead is 3/4 of a varying slot per 772 * interpolation type, which works out to 3072 bytes, so this would allow 773 * us to accommodate 2 interpolation types without any danger of running 774 * out of URB space. 775 * 776 * In practice, the risk of running out of URB space is very small, since 777 * the above figures are all worst-case, and most of them scale with the 778 * number of output vertices. So we'll just calculate the amount of space 779 * we need, and if it's too large, fail to compile. 780 * 781 * The above is for gen7+ where we have a single URB entry that will hold 782 * all the output. In gen6, we will have to allocate URB entries for every 783 * vertex we emit, so our URB entries only need to be large enough to hold 784 * a single vertex. Also, gen6 does not have a control data header. 785 */ 786 unsigned output_size_bytes; 787 if (compiler->devinfo->gen >= 7) { 788 output_size_bytes = 789 prog_data->output_vertex_size_hwords * 32 * shader->info.gs.vertices_out; 790 output_size_bytes += 32 * prog_data->control_data_header_size_hwords; 791 } else { 792 output_size_bytes = prog_data->output_vertex_size_hwords * 32; 793 } 794 795 /* Broadwell stores "Vertex Count" as a full 8 DWord (32 byte) URB output, 796 * which comes before the control header. 797 */ 798 if (compiler->devinfo->gen >= 8) 799 output_size_bytes += 32; 800 801 /* Shaders can technically set max_vertices = 0, at which point we 802 * may have a URB size of 0 bytes. Nothing good can come from that, 803 * so enforce a minimum size. 804 */ 805 if (output_size_bytes == 0) 806 output_size_bytes = 1; 807 808 unsigned max_output_size_bytes = GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES; 809 if (compiler->devinfo->gen == 6) 810 max_output_size_bytes = GEN6_MAX_GS_URB_ENTRY_SIZE_BYTES; 811 if (output_size_bytes > max_output_size_bytes) 812 return NULL; 813 814 815 /* URB entry sizes are stored as a multiple of 64 bytes in gen7+ and 816 * a multiple of 128 bytes in gen6. 817 */ 818 if (compiler->devinfo->gen >= 7) { 819 prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64; 820 /* On Cannonlake software shall not program an allocation size that 821 * specifies a size that is a multiple of 3 64B (512-bit) cachelines. 822 */ 823 if (compiler->devinfo->gen == 10 && 824 prog_data->base.urb_entry_size % 3 == 0) 825 prog_data->base.urb_entry_size++; 826 } else { 827 prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128; 828 } 829 830 assert(shader->info.gs.output_primitive < ARRAY_SIZE(gl_prim_to_hw_prim)); 831 prog_data->output_topology = 832 gl_prim_to_hw_prim[shader->info.gs.output_primitive]; 833 834 prog_data->vertices_in = shader->info.gs.vertices_in; 835 836 /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we 837 * need to program a URB read length of ceiling(num_slots / 2). 838 */ 839 prog_data->base.urb_read_length = (c.input_vue_map.num_slots + 1) / 2; 840 841 /* Now that prog_data setup is done, we are ready to actually compile the 842 * program. 843 */ 844 if (unlikely(INTEL_DEBUG & DEBUG_GS)) { 845 fprintf(stderr, "GS Input "); 846 brw_print_vue_map(stderr, &c.input_vue_map); 847 fprintf(stderr, "GS Output "); 848 brw_print_vue_map(stderr, &prog_data->base.vue_map); 849 } 850 851 if (is_scalar) { 852 fs_visitor v(compiler, log_data, mem_ctx, &c, prog_data, shader, 853 shader_time_index); 854 if (v.run_gs()) { 855 prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8; 856 prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs; 857 858 fs_generator g(compiler, log_data, mem_ctx, 859 &prog_data->base.base, v.promoted_constants, 860 false, MESA_SHADER_GEOMETRY); 861 if (unlikely(INTEL_DEBUG & DEBUG_GS)) { 862 const char *label = 863 shader->info.label ? shader->info.label : "unnamed"; 864 char *name = ralloc_asprintf(mem_ctx, "%s geometry shader %s", 865 label, shader->info.name); 866 g.enable_debug(name); 867 } 868 g.generate_code(v.cfg, 8); 869 return g.get_assembly(); 870 } 871 } 872 873 if (compiler->devinfo->gen >= 7) { 874 /* Compile the geometry shader in DUAL_OBJECT dispatch mode, if we can do 875 * so without spilling. If the GS invocations count > 1, then we can't use 876 * dual object mode. 877 */ 878 if (prog_data->invocations <= 1 && 879 likely(!(INTEL_DEBUG & DEBUG_NO_DUAL_OBJECT_GS))) { 880 prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_OBJECT; 881 882 vec4_gs_visitor v(compiler, log_data, &c, prog_data, shader, 883 mem_ctx, true /* no_spills */, shader_time_index); 884 885 /* Backup 'nr_params' and 'param' as they can be modified by the 886 * the DUAL_OBJECT visitor. If it fails, we will run the fallback 887 * (DUAL_INSTANCED or SINGLE mode) and we need to restore original 888 * values. 889 */ 890 const unsigned param_count = prog_data->base.base.nr_params; 891 uint32_t *param = ralloc_array(NULL, uint32_t, param_count); 892 memcpy(param, prog_data->base.base.param, 893 sizeof(uint32_t) * param_count); 894 895 if (v.run()) { 896 /* Success! Backup is not needed */ 897 ralloc_free(param); 898 return brw_vec4_generate_assembly(compiler, log_data, mem_ctx, 899 shader, &prog_data->base, v.cfg); 900 } else { 901 /* These variables could be modified by the execution of the GS 902 * visitor if it packed the uniforms in the push constant buffer. 903 * As it failed, we need restore them so we can start again with 904 * DUAL_INSTANCED or SINGLE mode. 905 * 906 * FIXME: Could more variables be modified by this execution? 907 */ 908 memcpy(prog_data->base.base.param, param, 909 sizeof(uint32_t) * param_count); 910 prog_data->base.base.nr_params = param_count; 911 prog_data->base.base.nr_pull_params = 0; 912 ralloc_free(param); 913 } 914 } 915 } 916 917 /* Either we failed to compile in DUAL_OBJECT mode (probably because it 918 * would have required spilling) or DUAL_OBJECT mode is disabled. So fall 919 * back to DUAL_INSTANCED or SINGLE mode, which consumes fewer registers. 920 * 921 * FIXME: Single dispatch mode requires that the driver can handle 922 * interleaving of input registers, but this is already supported (dual 923 * instance mode has the same requirement). However, to take full advantage 924 * of single dispatch mode to reduce register pressure we would also need to 925 * do interleaved outputs, but currently, the vec4 visitor and generator 926 * classes do not support this, so at the moment register pressure in 927 * single and dual instance modes is the same. 928 * 929 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 "3DSTATE_GS" 930 * "If InstanceCount>1, DUAL_OBJECT mode is invalid. Software will likely 931 * want to use DUAL_INSTANCE mode for higher performance, but SINGLE mode 932 * is also supported. When InstanceCount=1 (one instance per object) software 933 * can decide which dispatch mode to use. DUAL_OBJECT mode would likely be 934 * the best choice for performance, followed by SINGLE mode." 935 * 936 * So SINGLE mode is more performant when invocations == 1 and DUAL_INSTANCE 937 * mode is more performant when invocations > 1. Gen6 only supports 938 * SINGLE mode. 939 */ 940 if (prog_data->invocations <= 1 || compiler->devinfo->gen < 7) 941 prog_data->base.dispatch_mode = DISPATCH_MODE_4X1_SINGLE; 942 else 943 prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_INSTANCE; 944 945 vec4_gs_visitor *gs = NULL; 946 const unsigned *ret = NULL; 947 948 if (compiler->devinfo->gen >= 7) 949 gs = new vec4_gs_visitor(compiler, log_data, &c, prog_data, 950 shader, mem_ctx, false /* no_spills */, 951 shader_time_index); 952 else 953 gs = new gen6_gs_visitor(compiler, log_data, &c, prog_data, prog, 954 shader, mem_ctx, false /* no_spills */, 955 shader_time_index); 956 957 if (!gs->run()) { 958 if (error_str) 959 *error_str = ralloc_strdup(mem_ctx, gs->fail_msg); 960 } else { 961 ret = brw_vec4_generate_assembly(compiler, log_data, mem_ctx, shader, 962 &prog_data->base, gs->cfg); 963 } 964 965 delete gs; 966 return ret; 967} 968 969 970} /* namespace brw */ 971