1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include "ac_llvm_cull.h"
25#include "si_pipe.h"
26#include "si_shader_internal.h"
27#include "sid.h"
28#include "util/u_memory.h"
29#include "util/u_prim.h"
30
31static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx)
32{
33   return si_unpack_param(ctx, ctx->args.merged_wave_info, 24, 4);
34}
35
36static LLVMValueRef get_tgsize(struct si_shader_context *ctx)
37{
38   return si_unpack_param(ctx, ctx->args.merged_wave_info, 28, 4);
39}
40
41static LLVMValueRef get_thread_id_in_tg(struct si_shader_context *ctx)
42{
43   LLVMBuilderRef builder = ctx->ac.builder;
44   LLVMValueRef tmp;
45   tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
46                      LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, false), "");
47   return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
48}
49
50static LLVMValueRef ngg_get_vtx_cnt(struct si_shader_context *ctx)
51{
52   return si_unpack_param(ctx, ctx->args.gs_tg_info, 12, 9);
53}
54
55static LLVMValueRef ngg_get_prim_cnt(struct si_shader_context *ctx)
56{
57   return si_unpack_param(ctx, ctx->args.gs_tg_info, 22, 9);
58}
59
60static LLVMValueRef ngg_get_ordered_id(struct si_shader_context *ctx)
61{
62   return si_unpack_param(ctx, ctx->args.gs_tg_info, 0, 12);
63}
64
65static LLVMValueRef ngg_get_query_buf(struct si_shader_context *ctx)
66{
67   LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->internal_bindings);
68
69   return ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
70                                LLVMConstInt(ctx->ac.i32, GFX10_GS_QUERY_BUF, false));
71}
72
73/**
74 * Return the number of vertices as a constant in \p num_vertices,
75 * and return a more precise value as LLVMValueRef from the function.
76 */
77static LLVMValueRef ngg_get_vertices_per_prim(struct si_shader_context *ctx, unsigned *num_vertices)
78{
79   const struct si_shader_info *info = &ctx->shader->selector->info;
80
81   if (ctx->stage == MESA_SHADER_VERTEX) {
82      if (info->base.vs.blit_sgprs_amd) {
83         /* Blits always use axis-aligned rectangles with 3 vertices. */
84         *num_vertices = 3;
85         return LLVMConstInt(ctx->ac.i32, 3, 0);
86      } else if (ctx->shader->key.opt.ngg_culling & SI_NGG_CULL_LINES) {
87         *num_vertices = 2;
88         return LLVMConstInt(ctx->ac.i32, 2, 0);
89      } else {
90         /* We always build up all three indices for the prim export
91          * independent of the primitive type. The additional garbage
92          * data shouldn't hurt. This is used by exports and streamout.
93          */
94         *num_vertices = 3;
95
96         /* Extract OUTPRIM field. */
97         LLVMValueRef num = si_unpack_param(ctx, ctx->vs_state_bits, 2, 2);
98         return LLVMBuildAdd(ctx->ac.builder, num, ctx->ac.i32_1, "");
99      }
100   } else {
101      assert(ctx->stage == MESA_SHADER_TESS_EVAL);
102
103      if (info->base.tess.point_mode)
104         *num_vertices = 1;
105      else if (info->base.tess.primitive_mode == GL_LINES)
106         *num_vertices = 2;
107      else
108         *num_vertices = 3;
109
110      return LLVMConstInt(ctx->ac.i32, *num_vertices, false);
111   }
112}
113
114bool gfx10_ngg_export_prim_early(struct si_shader *shader)
115{
116   struct si_shader_selector *sel = shader->selector;
117
118   assert(shader->key.as_ngg && !shader->key.as_es);
119
120   return sel->info.stage != MESA_SHADER_GEOMETRY &&
121          !gfx10_ngg_writes_user_edgeflags(shader);
122}
123
124void gfx10_ngg_build_sendmsg_gs_alloc_req(struct si_shader_context *ctx)
125{
126   /* Newer chips can use PRIMGEN_PASSTHRU_NO_MSG to skip gs_alloc_req for NGG passthrough. */
127   if (gfx10_is_ngg_passthrough(ctx->shader) &&
128       ctx->screen->info.family >= CHIP_DIMGREY_CAVEFISH)
129      return;
130
131   ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), ngg_get_vtx_cnt(ctx),
132                                 ngg_get_prim_cnt(ctx));
133}
134
135void gfx10_ngg_build_export_prim(struct si_shader_context *ctx, LLVMValueRef user_edgeflags[3],
136                                 LLVMValueRef prim_passthrough)
137{
138   LLVMBuilderRef builder = ctx->ac.builder;
139
140   if (gfx10_is_ngg_passthrough(ctx->shader) || ctx->shader->key.opt.ngg_culling) {
141      ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 6001);
142      {
143         struct ac_ngg_prim prim = {};
144
145         if (prim_passthrough)
146            prim.passthrough = prim_passthrough;
147         else
148            prim.passthrough = ac_get_arg(&ctx->ac, ctx->args.gs_vtx_offset[0]);
149
150         /* This is only used with NGG culling, which returns the NGG
151          * passthrough prim export encoding.
152          */
153         if (gfx10_ngg_writes_user_edgeflags(ctx->shader)) {
154            unsigned all_bits_no_edgeflags = ~SI_NGG_PRIM_EDGE_FLAG_BITS;
155            LLVMValueRef edgeflags = LLVMConstInt(ctx->ac.i32, all_bits_no_edgeflags, 0);
156
157            unsigned num_vertices;
158            ngg_get_vertices_per_prim(ctx, &num_vertices);
159
160            for (unsigned i = 0; i < num_vertices; i++) {
161               unsigned shift = 9 + i * 10;
162               LLVMValueRef edge;
163
164               edge = LLVMBuildLoad(builder, user_edgeflags[i], "");
165               edge = LLVMBuildZExt(builder, edge, ctx->ac.i32, "");
166               edge = LLVMBuildShl(builder, edge, LLVMConstInt(ctx->ac.i32, shift, 0), "");
167               edgeflags = LLVMBuildOr(builder, edgeflags, edge, "");
168            }
169            prim.passthrough = LLVMBuildAnd(builder, prim.passthrough, edgeflags, "");
170         }
171
172         ac_build_export_prim(&ctx->ac, &prim);
173      }
174      ac_build_endif(&ctx->ac, 6001);
175      return;
176   }
177
178   ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 6001);
179   {
180      struct ac_ngg_prim prim = {};
181
182      ngg_get_vertices_per_prim(ctx, &prim.num_vertices);
183
184      prim.isnull = ctx->ac.i1false;
185
186      if (gfx10_edgeflags_have_effect(ctx->shader))
187         prim.edgeflags = ac_pack_edgeflags_for_export(&ctx->ac, &ctx->args);
188      else
189         prim.edgeflags = ctx->ac.i32_0;
190
191      for (unsigned i = 0; i < prim.num_vertices; ++i)
192         prim.index[i] = si_unpack_param(ctx, ctx->args.gs_vtx_offset[i / 2], (i & 1) * 16, 16);
193
194      if (gfx10_ngg_writes_user_edgeflags(ctx->shader)) {
195         LLVMValueRef edgeflags = ctx->ac.i32_0;
196
197         for (unsigned i = 0; i < prim.num_vertices; ++i) {
198            LLVMValueRef edge;
199
200            edge = LLVMBuildLoad(ctx->ac.builder, user_edgeflags[i], "");
201            edge = LLVMBuildZExt(ctx->ac.builder, edge, ctx->ac.i32, "");
202            edge = LLVMBuildShl(ctx->ac.builder, edge, LLVMConstInt(ctx->ac.i32, 9 + i*10, 0), "");
203            edgeflags = LLVMBuildOr(ctx->ac.builder, edgeflags, edge, "");
204         }
205         prim.edgeflags = LLVMBuildAnd(ctx->ac.builder, prim.edgeflags, edgeflags, "");
206      }
207
208      ac_build_export_prim(&ctx->ac, &prim);
209   }
210   ac_build_endif(&ctx->ac, 6001);
211}
212
213static void build_streamout_vertex(struct si_shader_context *ctx, LLVMValueRef *so_buffer,
214                                   LLVMValueRef *wg_offset_dw, unsigned stream,
215                                   LLVMValueRef offset_vtx, LLVMValueRef vertexptr)
216{
217   struct si_shader_info *info = &ctx->shader->selector->info;
218   struct pipe_stream_output_info *so = &ctx->shader->selector->so;
219   LLVMBuilderRef builder = ctx->ac.builder;
220   LLVMValueRef offset[4] = {};
221   LLVMValueRef tmp;
222
223   for (unsigned buffer = 0; buffer < 4; ++buffer) {
224      if (!wg_offset_dw[buffer])
225         continue;
226
227      tmp = LLVMBuildMul(builder, offset_vtx, LLVMConstInt(ctx->ac.i32, so->stride[buffer], false),
228                         "");
229      tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
230      offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->ac.i32, 2, false), "");
231   }
232
233   for (unsigned i = 0; i < so->num_outputs; ++i) {
234      if (so->output[i].stream != stream)
235         continue;
236
237      unsigned reg = so->output[i].register_index;
238      struct si_shader_output_values out;
239      out.semantic = info->output_semantic[reg];
240
241      for (unsigned comp = 0; comp < 4; comp++) {
242         tmp = ac_build_gep0(&ctx->ac, vertexptr, LLVMConstInt(ctx->ac.i32, 4 * reg + comp, false));
243         out.values[comp] = LLVMBuildLoad(builder, tmp, "");
244         out.vertex_stream[comp] = (info->output_streams[reg] >> (2 * comp)) & 3;
245      }
246
247      si_llvm_streamout_store_output(ctx, so_buffer, offset, &so->output[i], &out);
248   }
249}
250
251struct ngg_streamout {
252   LLVMValueRef num_vertices;
253
254   /* per-thread data */
255   LLVMValueRef prim_enable[4]; /* i1 per stream */
256   LLVMValueRef vertices[3];    /* [N x i32] addrspace(LDS)* */
257
258   /* Output */
259   LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
260};
261
262/**
263 * Build streamout logic.
264 *
265 * Implies a barrier.
266 *
267 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
268 *
269 * Clobbers gs_ngg_scratch[8:].
270 */
271static void build_streamout(struct si_shader_context *ctx, struct ngg_streamout *nggso)
272{
273   struct si_shader_info *info = &ctx->shader->selector->info;
274   struct pipe_stream_output_info *so = &ctx->shader->selector->so;
275   LLVMBuilderRef builder = ctx->ac.builder;
276   LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->internal_bindings);
277   LLVMValueRef tid = get_thread_id_in_tg(ctx);
278   LLVMValueRef tmp, tmp2;
279   LLVMValueRef i32_2 = LLVMConstInt(ctx->ac.i32, 2, false);
280   LLVMValueRef i32_4 = LLVMConstInt(ctx->ac.i32, 4, false);
281   LLVMValueRef i32_8 = LLVMConstInt(ctx->ac.i32, 8, false);
282   LLVMValueRef so_buffer[4] = {};
283   unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) + (nggso->vertices[2] ? 1 : 0);
284   LLVMValueRef prim_stride_dw[4] = {};
285   LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->ac.i32);
286   int stream_for_buffer[4] = {-1, -1, -1, -1};
287   unsigned bufmask_for_stream[4] = {};
288   bool isgs = ctx->stage == MESA_SHADER_GEOMETRY;
289   unsigned scratch_emit_base = isgs ? 4 : 0;
290   LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->ac.i32_0;
291   unsigned scratch_offset_base = isgs ? 8 : 4;
292   LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
293
294   ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-gds-size", 256);
295
296   /* Determine the mapping of streamout buffers to vertex streams. */
297   for (unsigned i = 0; i < so->num_outputs; ++i) {
298      unsigned buf = so->output[i].output_buffer;
299      unsigned stream = so->output[i].stream;
300      assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
301      stream_for_buffer[buf] = stream;
302      bufmask_for_stream[stream] |= 1 << buf;
303   }
304
305   for (unsigned buffer = 0; buffer < 4; ++buffer) {
306      if (stream_for_buffer[buffer] == -1)
307         continue;
308
309      assert(so->stride[buffer]);
310
311      tmp = LLVMConstInt(ctx->ac.i32, so->stride[buffer], false);
312      prim_stride_dw[buffer] = LLVMBuildMul(builder, tmp, nggso->num_vertices, "");
313      prim_stride_dw_vgpr =
314         ac_build_writelane(&ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
315                            LLVMConstInt(ctx->ac.i32, buffer, false));
316
317      so_buffer[buffer] = ac_build_load_to_sgpr(
318         &ctx->ac, buf_ptr, LLVMConstInt(ctx->ac.i32, SI_VS_STREAMOUT_BUF0 + buffer, false));
319   }
320
321   tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
322   ac_build_ifcc(&ctx->ac, tmp, 5200);
323   {
324      LLVMTypeRef gdsptr = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
325      LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->ac.i32_0, gdsptr, "");
326
327      /* Advance the streamout offsets in GDS. */
328      LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
329      LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
330
331      tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
332      ac_build_ifcc(&ctx->ac, tmp, 5210);
333      {
334         if (isgs) {
335            tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
336            tmp = LLVMBuildLoad(builder, tmp, "");
337         } else {
338            tmp = ac_build_writelane(&ctx->ac, ctx->ac.i32_0, ngg_get_prim_cnt(ctx), ctx->ac.i32_0);
339         }
340         LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
341
342         unsigned swizzle[4];
343         int unused_stream = -1;
344         for (unsigned stream = 0; stream < 4; ++stream) {
345            if (!info->num_stream_output_components[stream]) {
346               unused_stream = stream;
347               break;
348            }
349         }
350         for (unsigned buffer = 0; buffer < 4; ++buffer) {
351            if (stream_for_buffer[buffer] >= 0) {
352               swizzle[buffer] = stream_for_buffer[buffer];
353            } else {
354               assert(unused_stream >= 0);
355               swizzle[buffer] = unused_stream;
356            }
357         }
358
359         tmp = ac_build_quad_swizzle(&ctx->ac, tmp, swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
360         tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
361
362         LLVMValueRef args[] = {
363            LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
364            tmp,
365            ctx->ac.i32_0,                             // ordering
366            ctx->ac.i32_0,                             // scope
367            ctx->ac.i1false,                           // isVolatile
368            LLVMConstInt(ctx->ac.i32, 4 << 24, false), // OA index
369            ctx->ac.i1true,                            // wave release
370            ctx->ac.i1true,                            // wave done
371         };
372         tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add", ctx->ac.i32, args,
373                                  ARRAY_SIZE(args), 0);
374
375         /* Keep offsets in a VGPR for quick retrieval via readlane by
376          * the first wave for bounds checking, and also store in LDS
377          * for retrieval by all waves later. */
378         LLVMBuildStore(builder, tmp, offsets_vgpr);
379
380         tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac), scratch_offset_basev, "");
381         tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
382         LLVMBuildStore(builder, tmp, tmp2);
383      }
384      ac_build_endif(&ctx->ac, 5210);
385
386      /* Determine the max emit per buffer. This is done via the SALU, in part
387       * because LLVM can't generate divide-by-multiply if we try to do this
388       * via VALU with one lane per buffer.
389       */
390      LLVMValueRef max_emit[4] = {};
391      for (unsigned buffer = 0; buffer < 4; ++buffer) {
392         if (stream_for_buffer[buffer] == -1)
393            continue;
394
395         LLVMValueRef bufsize_dw = LLVMBuildLShr(
396            builder, LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""), i32_2, "");
397
398         tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
399         LLVMValueRef offset_dw =
400            ac_build_readlane(&ctx->ac, tmp, LLVMConstInt(ctx->ac.i32, buffer, false));
401
402         tmp = LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
403         tmp = LLVMBuildUDiv(builder, tmp, prim_stride_dw[buffer], "");
404
405         tmp2 = LLVMBuildICmp(builder, LLVMIntULT, bufsize_dw, offset_dw, "");
406         max_emit[buffer] = LLVMBuildSelect(builder, tmp2, ctx->ac.i32_0, tmp, "");
407      }
408
409      /* Determine the number of emitted primitives per stream and fixup the
410       * GDS counter if necessary.
411       *
412       * This is complicated by the fact that a single stream can emit to
413       * multiple buffers (but luckily not vice versa).
414       */
415      LLVMValueRef emit_vgpr = ctx->ac.i32_0;
416
417      for (unsigned stream = 0; stream < 4; ++stream) {
418         if (!info->num_stream_output_components[stream])
419            continue;
420
421         tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
422         LLVMValueRef generated =
423            ac_build_readlane(&ctx->ac, tmp, LLVMConstInt(ctx->ac.i32, stream, false));
424
425         LLVMValueRef emit = generated;
426         for (unsigned buffer = 0; buffer < 4; ++buffer) {
427            if (stream_for_buffer[buffer] == stream)
428               emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
429         }
430
431         emit_vgpr =
432            ac_build_writelane(&ctx->ac, emit_vgpr, emit, LLVMConstInt(ctx->ac.i32, stream, false));
433
434         /* Fixup the offset using a plain GDS atomic if we overflowed. */
435         tmp = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
436         ac_build_ifcc(&ctx->ac, tmp, 5221); /* scalar branch */
437         tmp = LLVMBuildLShr(builder, LLVMConstInt(ctx->ac.i32, bufmask_for_stream[stream], false),
438                             ac_get_thread_id(&ctx->ac), "");
439         tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
440         ac_build_ifcc(&ctx->ac, tmp, 5222);
441         {
442            tmp = LLVMBuildSub(builder, generated, emit, "");
443            tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
444            tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
445            LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
446                               LLVMAtomicOrderingMonotonic, false);
447         }
448         ac_build_endif(&ctx->ac, 5222);
449         ac_build_endif(&ctx->ac, 5221);
450      }
451
452      tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
453      ac_build_ifcc(&ctx->ac, tmp, 5225);
454      {
455         tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac), scratch_emit_basev, "");
456         tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
457         LLVMBuildStore(builder, emit_vgpr, tmp);
458      }
459      ac_build_endif(&ctx->ac, 5225);
460   }
461   ac_build_endif(&ctx->ac, 5200);
462
463   /* Determine the workgroup-relative per-thread / primitive offset into
464    * the streamout buffers */
465   struct ac_wg_scan primemit_scan[4] = {};
466
467   if (isgs) {
468      for (unsigned stream = 0; stream < 4; ++stream) {
469         if (!info->num_stream_output_components[stream])
470            continue;
471
472         primemit_scan[stream].enable_exclusive = true;
473         primemit_scan[stream].op = nir_op_iadd;
474         primemit_scan[stream].src = nggso->prim_enable[stream];
475         primemit_scan[stream].scratch = ac_build_gep0(
476            &ctx->ac, ctx->gs_ngg_scratch, LLVMConstInt(ctx->ac.i32, 12 + 8 * stream, false));
477         primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
478         primemit_scan[stream].numwaves = get_tgsize(ctx);
479         if (ctx->stage == MESA_SHADER_GEOMETRY) {
480            /* ngg_subgroup_size is only the input size. GS can always generate up to 256 vertices. */
481            primemit_scan[stream].maxwaves = DIV_ROUND_UP(256, ctx->ac.wave_size);
482         } else {
483            primemit_scan[stream].maxwaves = DIV_ROUND_UP(ctx->screen->ngg_subgroup_size,
484                                                          ctx->ac.wave_size);
485         }
486         ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
487      }
488   }
489
490   ac_build_s_barrier(&ctx->ac);
491
492   /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
493   LLVMValueRef wgoffset_dw[4] = {};
494
495   {
496      LLVMValueRef scratch_vgpr;
497
498      tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
499      scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
500
501      for (unsigned buffer = 0; buffer < 4; ++buffer) {
502         if (stream_for_buffer[buffer] >= 0) {
503            wgoffset_dw[buffer] =
504               ac_build_readlane(&ctx->ac, scratch_vgpr,
505                                 LLVMConstInt(ctx->ac.i32, scratch_offset_base + buffer, false));
506         }
507      }
508
509      for (unsigned stream = 0; stream < 4; ++stream) {
510         if (info->num_stream_output_components[stream]) {
511            nggso->emit[stream] =
512               ac_build_readlane(&ctx->ac, scratch_vgpr,
513                                 LLVMConstInt(ctx->ac.i32, scratch_emit_base + stream, false));
514         }
515      }
516   }
517
518   /* Write out primitive data */
519   for (unsigned stream = 0; stream < 4; ++stream) {
520      if (!info->num_stream_output_components[stream])
521         continue;
522
523      if (isgs) {
524         ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
525      } else {
526         primemit_scan[stream].result_exclusive = tid;
527      }
528
529      tmp = LLVMBuildICmp(builder, LLVMIntULT, primemit_scan[stream].result_exclusive,
530                          nggso->emit[stream], "");
531      tmp = LLVMBuildAnd(builder, tmp, nggso->prim_enable[stream], "");
532      ac_build_ifcc(&ctx->ac, tmp, 5240);
533      {
534         LLVMValueRef offset_vtx =
535            LLVMBuildMul(builder, primemit_scan[stream].result_exclusive, nggso->num_vertices, "");
536
537         for (unsigned i = 0; i < max_num_vertices; ++i) {
538            tmp = LLVMBuildICmp(builder, LLVMIntULT, LLVMConstInt(ctx->ac.i32, i, false),
539                                nggso->num_vertices, "");
540            ac_build_ifcc(&ctx->ac, tmp, 5241);
541            build_streamout_vertex(ctx, so_buffer, wgoffset_dw, stream, offset_vtx,
542                                   nggso->vertices[i]);
543            ac_build_endif(&ctx->ac, 5241);
544            offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->ac.i32_1, "");
545         }
546      }
547      ac_build_endif(&ctx->ac, 5240);
548   }
549}
550
551/* LDS layout of ES vertex data for NGG culling. */
552enum
553{
554   /* Byte 0: Boolean ES thread accepted (unculled) flag, and later the old
555    *         ES thread ID. After vertex compaction, compacted ES threads
556    *         store the old thread ID here to copy input VGPRs from uncompacted
557    *         ES threads.
558    * Byte 1: New ES thread ID, loaded by GS to prepare the prim export value.
559    * Byte 2: TES rel patch ID
560    * Byte 3: Unused
561    */
562   lds_byte0_accept_flag = 0,
563   lds_byte1_new_thread_id,
564   lds_byte2_tes_rel_patch_id,
565   lds_byte3_unused,
566
567   lds_packed_data = 0, /* lds_byteN_... */
568   lds_pos_cull_x_div_w,
569   lds_pos_cull_y_div_w,
570   lds_pos_cull_w,
571
572   lds_pos_x = lds_packed_data + 1,
573   lds_pos_y,
574   lds_pos_z,
575   lds_pos_w,
576   /* If VS: */
577   lds_vertex_id,
578   lds_instance_id, /* optional */
579   /* If TES: */
580   lds_tes_u = lds_vertex_id,
581   lds_tes_v = lds_instance_id,
582   lds_tes_patch_id, /* optional */
583};
584
585static LLVMValueRef si_build_gep_i8_var(struct si_shader_context *ctx, LLVMValueRef ptr,
586                                        LLVMValueRef index)
587{
588   LLVMTypeRef pi8 = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_LDS);
589
590   return LLVMBuildGEP(ctx->ac.builder, LLVMBuildPointerCast(ctx->ac.builder, ptr, pi8, ""), &index,
591                       1, "");
592}
593
594static LLVMValueRef si_build_gep_i8(struct si_shader_context *ctx, LLVMValueRef ptr,
595                                    unsigned byte_index)
596{
597   assert(byte_index < 4);
598   return si_build_gep_i8_var(ctx, ptr, LLVMConstInt(ctx->ac.i32, byte_index, 0));
599}
600
601static unsigned ngg_nogs_vertex_size(struct si_shader *shader)
602{
603   unsigned lds_vertex_size = 0;
604
605   /* The edgeflag is always stored in the last element that's also
606    * used for padding to reduce LDS bank conflicts. */
607   if (shader->selector->so.num_outputs)
608      lds_vertex_size = 4 * shader->selector->info.num_outputs + 1;
609   if (gfx10_ngg_writes_user_edgeflags(shader))
610      lds_vertex_size = MAX2(lds_vertex_size, 1);
611
612   /* LDS size for passing data from GS to ES.
613    * GS stores Primitive IDs into LDS at the address corresponding
614    * to the ES thread of the provoking vertex. All ES threads
615    * load and export PrimitiveID for their thread.
616    */
617   if (shader->selector->info.stage == MESA_SHADER_VERTEX && shader->key.mono.u.vs_export_prim_id)
618      lds_vertex_size = MAX2(lds_vertex_size, 1);
619
620   if (shader->key.opt.ngg_culling) {
621      if (shader->selector->info.stage == MESA_SHADER_VERTEX) {
622         STATIC_ASSERT(lds_instance_id + 1 == 7);
623         lds_vertex_size = MAX2(lds_vertex_size, 7);
624      } else {
625         assert(shader->selector->info.stage == MESA_SHADER_TESS_EVAL);
626
627         if (shader->selector->info.uses_primid || shader->key.mono.u.vs_export_prim_id) {
628            STATIC_ASSERT(lds_tes_patch_id + 2 == 9); /* +1 for LDS padding */
629            lds_vertex_size = MAX2(lds_vertex_size, 9);
630         } else {
631            STATIC_ASSERT(lds_tes_v + 1 == 7);
632            lds_vertex_size = MAX2(lds_vertex_size, 7);
633         }
634      }
635   }
636
637   return lds_vertex_size;
638}
639
640/**
641 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
642 * for the vertex outputs.
643 */
644static LLVMValueRef ngg_nogs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vtxid)
645{
646   /* The extra dword is used to avoid LDS bank conflicts. */
647   unsigned vertex_size = ngg_nogs_vertex_size(ctx->shader);
648   LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, vertex_size);
649   LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
650   LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
651   return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
652}
653
654static LLVMValueRef si_insert_input_v4i32(struct si_shader_context *ctx, LLVMValueRef ret,
655                                          struct ac_arg param, unsigned return_index)
656{
657   LLVMValueRef v = ac_get_arg(&ctx->ac, param);
658
659   for (unsigned i = 0; i < 4; i++) {
660      ret = LLVMBuildInsertValue(ctx->ac.builder, ret, ac_llvm_extract_elem(&ctx->ac, v, i),
661                                 return_index + i, "");
662   }
663   return ret;
664}
665
666static void load_vertex_counts(struct si_shader_context *ctx, LLVMValueRef lds,
667                               unsigned max_waves, LLVMValueRef tid,
668                               LLVMValueRef *total_count,
669                               LLVMValueRef *prefix_sum)
670{
671   LLVMBuilderRef builder = ctx->ac.builder;
672   LLVMValueRef i8vec4_lane = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
673   unsigned num_i8vec4 = DIV_ROUND_UP(max_waves, 4);
674
675   /* If all threads loaded the vertex counts, it would cause many LDS bank conflicts
676    * and the performance could decrease up to WaveSize times (32x or 64x).
677    *
678    * Therefore, only load the i-th tuple of vertex counts in the i-th thread. Other threads will
679    * get them through readlane. 4 8-bit vertex counts are loaded per thread.
680    */
681   ac_build_ifcc(&ctx->ac, LLVMBuildICmp(builder, LLVMIntULT, tid,
682                                         LLVMConstInt(ctx->ac.i32, num_i8vec4, 0), ""), 17771);
683   LLVMBuildStore(builder, LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, lds, tid), ""), i8vec4_lane);
684   ac_build_endif(&ctx->ac, 17771);
685
686   /* Compute the number of ES waves. */
687   LLVMValueRef num_waves = get_tgsize(ctx);
688
689   /* Compute a byte mask where each byte is either 0 or 0xff depending on whether the wave
690    * exists. We need the mask to clear uninitialized bytes in LDS and to compute the prefix sum.
691    *
692    * 8 waves: valid_mask = ~0ull >> (64 - num_waves * 8)
693    * 4 waves: valid_mask = ~0 >> (32 - num_waves * 8)
694    */
695   LLVMValueRef num_waves8 = LLVMBuildShl(builder, num_waves, LLVMConstInt(ctx->ac.i32, 3, 0), "");
696   LLVMValueRef valid_mask;
697
698   if (max_waves > 4) {
699      LLVMValueRef num_waves8_rev = LLVMBuildSub(builder, LLVMConstInt(ctx->ac.i32, 64, 0),
700                                                 num_waves8, "");
701      valid_mask = LLVMBuildLShr(builder, LLVMConstInt(ctx->ac.i64, ~0ull, 0),
702                                 LLVMBuildZExt(builder, num_waves8_rev, ctx->ac.i64, ""), "");
703   } else {
704      LLVMValueRef num_waves8_rev = LLVMBuildSub(builder, LLVMConstInt(ctx->ac.i32, 32, 0),
705                                                 num_waves8, "");
706      valid_mask = LLVMBuildLShr(builder, LLVMConstInt(ctx->ac.i32, ~0, 0), num_waves8_rev, "");
707   }
708
709   /* Compute a byte mask where bytes below wave_id are 0xff, else they are 0.
710    *
711    * prefix_mask = ~(~0 << (wave_id * 8))
712    */
713   LLVMTypeRef type = max_waves > 4 ? ctx->ac.i64 : ctx->ac.i32;
714   LLVMValueRef wave_id8 = LLVMBuildShl(builder, get_wave_id_in_tg(ctx),
715                                        LLVMConstInt(ctx->ac.i32, 3, 0), "");
716   LLVMValueRef prefix_mask =
717      LLVMBuildNot(builder, LLVMBuildShl(builder, LLVMConstInt(type, ~0ull, 0),
718                                         LLVMBuildZExt(builder, wave_id8, type, ""), ""), "");
719
720   /* Compute the total vertex count and the vertex count of previous waves (prefix). */
721   *total_count = ctx->ac.i32_0;
722   *prefix_sum = ctx->ac.i32_0;
723
724   for (unsigned i = 0; i < num_i8vec4; i++) {
725      LLVMValueRef i8vec4;
726
727      i8vec4 = ac_build_readlane_no_opt_barrier(&ctx->ac, LLVMBuildLoad(builder, i8vec4_lane, ""),
728                                                LLVMConstInt(ctx->ac.i32, i, 0));
729      /* Inactive waves have uninitialized vertex counts. Set them to 0 using this. */
730      i8vec4 = LLVMBuildAnd(builder, i8vec4,
731                            ac_unpack_param(&ctx->ac, valid_mask, 32 * i, 32), "");
732      /* Compute the sum of all i8vec4 components and add it to the result. */
733      *total_count = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.sad.u8", ctx->ac.i32,
734                                        (LLVMValueRef[]){i8vec4, ctx->ac.i32_0, *total_count},
735                                        3, AC_FUNC_ATTR_READNONE);
736      ac_set_range_metadata(&ctx->ac, *total_count, 0, 64*4 + 1); /* the result is at most 64*4 */
737
738      /* Compute the sum of the vertex counts of all previous waves. */
739      i8vec4 = LLVMBuildAnd(builder, i8vec4,
740                                ac_unpack_param(&ctx->ac, prefix_mask, 32 * i, 32), "");
741      *prefix_sum = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.sad.u8", ctx->ac.i32,
742                                       (LLVMValueRef[]){i8vec4, ctx->ac.i32_0, *prefix_sum},
743                                       3, AC_FUNC_ATTR_READNONE);
744      ac_set_range_metadata(&ctx->ac, *prefix_sum, 0, 64*4 + 1); /* the result is at most 64*4 */
745   }
746   *total_count = ac_build_readlane_no_opt_barrier(&ctx->ac, *total_count, NULL);
747}
748
749/**
750 * Given a total thread count, update total and per-wave thread counts in input SGPRs
751 * and return the per-wave thread count.
752 *
753 * \param new_num_threads    Total thread count on the input, per-wave thread count on the output.
754 * \param tg_info            tg_info SGPR value
755 * \param tg_info_num_bits   the bit size of thread count field in tg_info
756 * \param tg_info_shift      the bit offset of the thread count field in tg_info
757 * \param wave_info          merged_wave_info SGPR value
758 * \param wave_info_num_bits the bit size of thread count field in merged_wave_info
759 * \param wave_info_shift    the bit offset of the thread count field in merged_wave_info
760 */
761static void update_thread_counts(struct si_shader_context *ctx, LLVMValueRef *new_num_threads,
762                                 LLVMValueRef *tg_info, unsigned tg_info_num_bits,
763                                 unsigned tg_info_shift, LLVMValueRef *wave_info,
764                                 unsigned wave_info_num_bits, unsigned wave_info_shift)
765{
766   LLVMBuilderRef builder = ctx->ac.builder;
767
768   /* Update the total thread count. */
769   unsigned tg_info_mask = ~(u_bit_consecutive(0, tg_info_num_bits) << tg_info_shift);
770   *tg_info = LLVMBuildAnd(builder, *tg_info, LLVMConstInt(ctx->ac.i32, tg_info_mask, 0), "");
771   *tg_info = LLVMBuildOr(
772      builder, *tg_info,
773      LLVMBuildShl(builder, *new_num_threads, LLVMConstInt(ctx->ac.i32, tg_info_shift, 0), ""), "");
774
775   /* Update the per-wave thread count. */
776   LLVMValueRef prev_threads = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
777                                            LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0), "");
778   *new_num_threads = LLVMBuildSub(builder, *new_num_threads, prev_threads, "");
779   *new_num_threads = ac_build_imax(&ctx->ac, *new_num_threads, ctx->ac.i32_0);
780   *new_num_threads =
781      ac_build_imin(&ctx->ac, *new_num_threads, LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0));
782   unsigned wave_info_mask = ~(u_bit_consecutive(0, wave_info_num_bits) << wave_info_shift);
783   *wave_info = LLVMBuildAnd(builder, *wave_info, LLVMConstInt(ctx->ac.i32, wave_info_mask, 0), "");
784   *wave_info = LLVMBuildOr(
785      builder, *wave_info,
786      LLVMBuildShl(builder, *new_num_threads, LLVMConstInt(ctx->ac.i32, wave_info_shift, 0), ""),
787      "");
788}
789
790static void gfx10_build_primitive_accepted(struct ac_llvm_context *ac, LLVMValueRef accepted,
791                                           void *userdata)
792{
793   struct si_shader_context *ctx = container_of(ac, struct si_shader_context, ac);
794   LLVMValueRef *params = (LLVMValueRef *)userdata;
795   LLVMValueRef gs_accepted = params[0];
796   LLVMValueRef *gs_vtxptr = (LLVMValueRef *)params[1];
797
798   unsigned num_vertices;
799   ngg_get_vertices_per_prim(ctx, &num_vertices);
800
801   ac_build_ifcc(&ctx->ac, accepted, 0);
802   LLVMBuildStore(ctx->ac.builder, ctx->ac.i32_1, gs_accepted);
803   for (unsigned vtx = 0; vtx < num_vertices; vtx++) {
804      LLVMBuildStore(ctx->ac.builder, ctx->ac.i8_1,
805                     si_build_gep_i8(ctx, gs_vtxptr[vtx], lds_byte0_accept_flag));
806   }
807   ac_build_endif(&ctx->ac, 0);
808}
809
810/**
811 * Cull primitives for NGG VS or TES, then compact vertices, which happens
812 * before the VS or TES main function. Return values for the main function.
813 * Also return the position, which is passed to the shader as an input,
814 * so that we don't compute it twice.
815 */
816void gfx10_emit_ngg_culling_epilogue(struct ac_shader_abi *abi)
817{
818   struct si_shader_context *ctx = si_shader_context_from_abi(abi);
819   struct si_shader *shader = ctx->shader;
820   struct si_shader_selector *sel = shader->selector;
821   struct si_shader_info *info = &sel->info;
822   LLVMBuilderRef builder = ctx->ac.builder;
823   LLVMValueRef *addrs = abi->outputs;
824   unsigned max_waves = DIV_ROUND_UP(ctx->screen->ngg_subgroup_size, ctx->ac.wave_size);
825
826   assert(shader->key.opt.ngg_culling);
827   assert(shader->key.as_ngg);
828   assert(sel->info.stage == MESA_SHADER_VERTEX ||
829          (sel->info.stage == MESA_SHADER_TESS_EVAL && !shader->key.as_es));
830
831   LLVMValueRef es_vtxptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
832   unsigned pos_index = 0;
833
834   for (unsigned i = 0; i < info->num_outputs; i++) {
835      LLVMValueRef position[4];
836
837      switch (info->output_semantic[i]) {
838      case VARYING_SLOT_POS:
839         /* If we are going to cull everything (rasterizer_discard), discard
840          * the position. This is useful for analyzing maximum theoretical
841          * performance without VS input loads.
842          */
843         if (shader->key.opt.ngg_culling & SI_NGG_CULL_FRONT_FACE &&
844             shader->key.opt.ngg_culling & SI_NGG_CULL_BACK_FACE) {
845            for (unsigned j = 0; j < 4; j++)
846               LLVMBuildStore(builder, LLVMGetUndef(ctx->ac.f32), addrs[4 * i + j]);
847            break;
848         }
849
850         pos_index = i;
851         for (unsigned j = 0; j < 4; j++) {
852            position[j] = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + j], "");
853         }
854
855         /* Store Position.W into LDS. */
856         LLVMBuildStore(
857            builder, ac_to_integer(&ctx->ac, position[3]),
858            ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_pos_cull_w, 0)));
859
860         /* Store Position.XY / W into LDS. */
861         for (unsigned chan = 0; chan < 2; chan++) {
862            LLVMValueRef val = ac_build_fdiv(&ctx->ac, position[chan], position[3]);
863            LLVMBuildStore(
864               builder, ac_to_integer(&ctx->ac, val),
865               ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_pos_cull_x_div_w + chan, 0)));
866         }
867         break;
868      }
869   }
870
871   /* Initialize the packed data. */
872   LLVMBuildStore(
873      builder, ctx->ac.i32_0,
874      ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_packed_data, 0)));
875   ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
876   ac_build_s_barrier(&ctx->ac);
877
878   LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
879
880   unsigned num_vertices;
881   ngg_get_vertices_per_prim(ctx, &num_vertices);
882
883   /* The hardware requires that there are no holes between unculled vertices,
884    * which means we have to pack ES threads, i.e. reduce the ES thread count
885    * and move ES input VGPRs to lower threads. The upside is that varyings
886    * are only fetched and computed for unculled vertices.
887    *
888    * Vertex compaction:
889    *
890    * Part 1: Store the surviving vertex count for each wave in LDS.
891    *   - The GS culling code notifies ES threads which vertices were accepted.
892    *   - Barrier
893    *   - ES threads will compute the vertex count and store it in LDS.
894    * - Barrier
895    * - Each wave loads the vertex counts from LDS.
896    *
897    * Part 2: Compact ES threads:
898    * - Compute the prefix sum for each surviving vertex. This is the new thread ID
899    *   of the vertex.
900    * - Write input VGPRs and vertex positions for each surviving vertex into the LDS
901    *   address of the new thread ID.
902    * - Now kill all waves that have inactive threads.
903    * - Barrier
904    * - Update vertex indices and null flag in the GS input VGPRs.
905    *
906    * Part 3: Update inputs GPRs
907    * - For all waves, update per-wave thread counts in input SGPRs.
908    * - In ES threads, update the ES input VGPRs (VertexID, InstanceID, TES inputs).
909    */
910
911   LLVMValueRef vtxindex[3];
912   for (unsigned i = 0; i < num_vertices; ++i)
913      vtxindex[i] = si_unpack_param(ctx, ctx->args.gs_vtx_offset[i / 2], (i & 1) * 16, 16);
914
915   LLVMValueRef gs_vtxptr[3];
916   for (unsigned i = 0; i < num_vertices; i++)
917      gs_vtxptr[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
918
919   es_vtxptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
920
921   /* Adding these optimization barriers improves the generated code as follows. Crazy right?
922    *
923    * - s_mov_b32 s4, 0xffff
924    * - v_lshrrev_b32_e32 v10, 16, v0
925    * - v_and_b32_e32 v12, s4, v0
926    * - v_and_b32_e32 v11, s4, v1
927    *   s_bfe_u32 s4, s3, 0x80008
928    * - s_mov_b64 s[8:9], 0
929    * - v_mul_u32_u24_e32 v0, 28, v10
930    * - v_mul_u32_u24_e32 v9, 28, v12
931    * - v_mul_u32_u24_e32 v1, 28, v11
932    * + v_mov_b32_e32 v11, 28
933    *   v_cmp_gt_u32_e32 vcc, s4, v2
934    * + s_mov_b64 s[8:9], 0
935    *   s_waitcnt lgkmcnt(0)
936    *   s_barrier
937    * + v_mul_u32_u24_sdwa v10, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
938    * + v_mul_u32_u24_sdwa v23, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
939    * + v_mul_u32_u24_sdwa v0, v1, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
940    *   s_and_saveexec_b64 s[44:45], vcc
941    *   s_cbranch_execz BB2_8
942    * - v_mul_u32_u24_e32 v16, 28, v12
943    * - v_mul_u32_u24_e32 v17, 28, v11
944    * - v_mul_u32_u24_e32 v18, 28, v10
945    */
946   for (unsigned i = 0; i < num_vertices; i++)
947      ac_build_optimization_barrier(&ctx->ac, &gs_vtxptr[i], false);
948
949   LLVMValueRef gs_accepted = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
950
951   /* Do culling in GS threads. */
952   ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 16002);
953   {
954      /* Load positions. */
955      LLVMValueRef pos[3][4] = {};
956      for (unsigned vtx = 0; vtx < num_vertices; vtx++) {
957         for (unsigned chan = 0; chan < 4; chan++) {
958            unsigned index;
959            if (chan == 0 || chan == 1)
960               index = lds_pos_cull_x_div_w + chan;
961            else if (chan == 3)
962               index = lds_pos_cull_w;
963            else
964               continue;
965
966            LLVMValueRef addr =
967               ac_build_gep0(&ctx->ac, gs_vtxptr[vtx], LLVMConstInt(ctx->ac.i32, index, 0));
968            pos[vtx][chan] = LLVMBuildLoad(builder, addr, "");
969            pos[vtx][chan] = ac_to_float(&ctx->ac, pos[vtx][chan]);
970         }
971      }
972
973      /* Load the viewport state for small prim culling. */
974      LLVMValueRef vp = ac_build_load_invariant(
975         &ctx->ac, ac_get_arg(&ctx->ac, ctx->small_prim_cull_info), ctx->ac.i32_0);
976      vp = LLVMBuildBitCast(builder, vp, ctx->ac.v4f32, "");
977      LLVMValueRef vp_scale[2], vp_translate[2];
978      vp_scale[0] = ac_llvm_extract_elem(&ctx->ac, vp, 0);
979      vp_scale[1] = ac_llvm_extract_elem(&ctx->ac, vp, 1);
980      vp_translate[0] = ac_llvm_extract_elem(&ctx->ac, vp, 2);
981      vp_translate[1] = ac_llvm_extract_elem(&ctx->ac, vp, 3);
982
983      /* Get the small prim filter precision. */
984      LLVMValueRef small_prim_precision = si_unpack_param(ctx, ctx->vs_state_bits, 7, 4);
985      small_prim_precision =
986         LLVMBuildOr(builder, small_prim_precision, LLVMConstInt(ctx->ac.i32, 0x70, 0), "");
987      small_prim_precision =
988         LLVMBuildShl(builder, small_prim_precision, LLVMConstInt(ctx->ac.i32, 23, 0), "");
989      small_prim_precision = LLVMBuildBitCast(builder, small_prim_precision, ctx->ac.f32, "");
990
991      /* Execute culling code. */
992      struct ac_cull_options options = {};
993      options.cull_view_xy = true;
994      options.cull_w = true;
995
996      if (shader->key.opt.ngg_culling & SI_NGG_CULL_LINES) {
997         options.num_vertices = 2;
998
999         assert(!(shader->key.opt.ngg_culling & SI_NGG_CULL_BACK_FACE));
1000         assert(!(shader->key.opt.ngg_culling & SI_NGG_CULL_FRONT_FACE));
1001      } else {
1002         options.num_vertices = 3;
1003         options.cull_front = shader->key.opt.ngg_culling & SI_NGG_CULL_FRONT_FACE;
1004         options.cull_back = shader->key.opt.ngg_culling & SI_NGG_CULL_BACK_FACE;
1005         options.cull_small_prims = true; /* this would only be false with conservative rasterization */
1006         options.cull_zero_area = options.cull_front || options.cull_back;
1007      }
1008
1009      /* Tell ES threads whether their vertex survived. */
1010      LLVMValueRef params[] = {
1011         gs_accepted,
1012         (void*)gs_vtxptr,
1013      };
1014      ac_cull_primitive(&ctx->ac, pos, ctx->ac.i1true, vp_scale, vp_translate,
1015                        small_prim_precision, &options,
1016                        gfx10_build_primitive_accepted, params);
1017   }
1018   ac_build_endif(&ctx->ac, 16002);
1019   ac_build_s_barrier(&ctx->ac);
1020
1021   gs_accepted = LLVMBuildLoad(builder, gs_accepted, "");
1022
1023   LLVMValueRef vertex_accepted = ac_build_alloca(&ctx->ac, ctx->ac.i1, "");
1024   LLVMValueRef vertex_mask = ac_build_alloca(&ctx->ac, ctx->ac.iN_wavemask, "");
1025
1026   /* Convert the per-vertex accept flag to a vertex thread mask, store it in registers. */
1027   ac_build_ifcc(&ctx->ac, si_is_es_thread(ctx), 16007);
1028   {
1029      LLVMValueRef accepted =
1030         LLVMBuildLoad(builder, si_build_gep_i8(ctx, es_vtxptr, lds_byte0_accept_flag), "");
1031      accepted = LLVMBuildICmp(builder, LLVMIntNE, accepted, ctx->ac.i8_0, "");
1032      LLVMValueRef mask = ac_get_i1_sgpr_mask(&ctx->ac, accepted);
1033
1034      LLVMBuildStore(builder, accepted, vertex_accepted);
1035      LLVMBuildStore(builder, mask, vertex_mask);
1036   }
1037   ac_build_endif(&ctx->ac, 16007);
1038
1039   /* Store the per-wave vertex count to LDS. Non-ES waves store 0. */
1040   vertex_mask = LLVMBuildLoad(builder, vertex_mask, "");
1041   ac_build_ifcc(&ctx->ac, LLVMBuildICmp(builder, LLVMIntEQ, tid, ctx->ac.i32_0, ""), 16008);
1042   {
1043      LLVMValueRef vertex_count = ac_build_bit_count(&ctx->ac, vertex_mask);
1044      LLVMBuildStore(builder, LLVMBuildTrunc(builder, vertex_count, ctx->ac.i8, ""),
1045                     si_build_gep_i8_var(ctx, ctx->gs_ngg_scratch, get_wave_id_in_tg(ctx)));
1046   }
1047   ac_build_endif(&ctx->ac, 16008);
1048
1049   ac_build_s_barrier(&ctx->ac);
1050
1051   /* Load the vertex masks and compute the new ES thread count. */
1052   LLVMValueRef new_num_es_threads, prefix_sum, kill_wave;
1053   load_vertex_counts(ctx, ctx->gs_ngg_scratch, max_waves, tid, &new_num_es_threads,
1054                      &prefix_sum);
1055
1056   bool uses_instance_id = ctx->stage == MESA_SHADER_VERTEX &&
1057                           (sel->info.uses_instanceid ||
1058                            shader->key.part.vs.prolog.instance_divisor_is_one ||
1059                            shader->key.part.vs.prolog.instance_divisor_is_fetched);
1060   bool uses_tes_prim_id = ctx->stage == MESA_SHADER_TESS_EVAL &&
1061                           (sel->info.uses_primid || shader->key.mono.u.vs_export_prim_id);
1062
1063   /* ES threads compute their prefix sum, which is the new ES thread ID.
1064    * Then they write the vertex position and input VGPRs into the LDS address
1065    * of the new thread ID. It will be used to load input VGPRs by compacted
1066    * threads.
1067    */
1068   vertex_accepted = LLVMBuildLoad(builder, vertex_accepted, "");
1069   ac_build_ifcc(&ctx->ac, vertex_accepted, 16009);
1070   {
1071      /* Add the number of bits set in vertex_mask up to the current thread ID - 1
1072       * to get the prefix sum.
1073       */
1074      prefix_sum = LLVMBuildAdd(builder, prefix_sum, ac_build_mbcnt(&ctx->ac, vertex_mask), "");
1075
1076      LLVMValueRef new_id = prefix_sum;
1077      LLVMValueRef new_vtx = ngg_nogs_vertex_ptr(ctx, new_id);
1078
1079      LLVMBuildStore(builder, LLVMBuildTrunc(builder, new_id, ctx->ac.i8, ""),
1080                     si_build_gep_i8(ctx, es_vtxptr, lds_byte1_new_thread_id));
1081
1082      /* Store Position.XYZW into LDS. */
1083      for (unsigned chan = 0; chan < 4; chan++) {
1084         LLVMBuildStore(
1085            builder, ac_to_integer(&ctx->ac, LLVMBuildLoad(builder, addrs[4 * pos_index + chan], "")),
1086            ac_build_gep0(&ctx->ac, new_vtx, LLVMConstInt(ctx->ac.i32, lds_pos_x + chan, 0)));
1087      }
1088
1089      /* Store VertexID and InstanceID into LDS. ES threads will have to load them
1090       * from LDS after vertex compaction and use them instead of their own
1091       * system values.
1092       */
1093      if (ctx->stage == MESA_SHADER_VERTEX) {
1094         LLVMBuildStore(
1095            builder, ctx->abi.vertex_id,
1096            ac_build_gep0(&ctx->ac, new_vtx, LLVMConstInt(ctx->ac.i32, lds_vertex_id, 0)));
1097         if (uses_instance_id) {
1098            LLVMBuildStore(
1099               builder, ctx->abi.instance_id,
1100               ac_build_gep0(&ctx->ac, new_vtx, LLVMConstInt(ctx->ac.i32, lds_instance_id, 0)));
1101         }
1102      } else {
1103         assert(ctx->stage == MESA_SHADER_TESS_EVAL);
1104         LLVMBuildStore(builder, ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args.tes_u)),
1105                        ac_build_gep0(&ctx->ac, new_vtx, LLVMConstInt(ctx->ac.i32, lds_tes_u, 0)));
1106         LLVMBuildStore(builder, ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args.tes_v)),
1107                        ac_build_gep0(&ctx->ac, new_vtx, LLVMConstInt(ctx->ac.i32, lds_tes_v, 0)));
1108         LLVMBuildStore(builder, LLVMBuildTrunc(builder, ac_get_arg(&ctx->ac, ctx->args.tes_rel_patch_id), ctx->ac.i8, ""),
1109                        si_build_gep_i8(ctx, new_vtx, lds_byte2_tes_rel_patch_id));
1110         if (uses_tes_prim_id) {
1111            LLVMBuildStore(
1112               builder, ac_get_arg(&ctx->ac, ctx->args.tes_patch_id),
1113               ac_build_gep0(&ctx->ac, new_vtx, LLVMConstInt(ctx->ac.i32, lds_tes_patch_id, 0)));
1114         }
1115      }
1116   }
1117   ac_build_endif(&ctx->ac, 16009);
1118
1119   /* If all vertices are culled, set the primitive count to 0, so that all waves are culled here. */
1120   LLVMValueRef num_primitives = ngg_get_prim_cnt(ctx);
1121   num_primitives = LLVMBuildSelect(builder,
1122                                    LLVMBuildICmp(builder, LLVMIntEQ, new_num_es_threads,
1123                                                  ctx->ac.i32_0, ""),
1124                                    ctx->ac.i32_0, num_primitives, "");
1125   /* Kill waves that have inactive threads. */
1126   kill_wave = LLVMBuildICmp(builder, LLVMIntULE,
1127                             ac_build_imax(&ctx->ac, new_num_es_threads, num_primitives),
1128                             LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
1129                                          LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0), ""),
1130                             "");
1131   ac_build_ifcc(&ctx->ac, kill_wave, 19202);
1132   {
1133      /* If we are killing wave 0, send that there are no primitives
1134       * in this threadgroup.
1135       */
1136      ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), ctx->ac.i32_0, ctx->ac.i32_0);
1137      ac_build_s_endpgm(&ctx->ac);
1138   }
1139   ac_build_endif(&ctx->ac, 19202);
1140   ac_build_s_barrier(&ctx->ac);
1141
1142   /* Send the final vertex and primitive counts. */
1143   ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), new_num_es_threads,
1144                                 ngg_get_prim_cnt(ctx));
1145
1146   /* Update thread counts in SGPRs. */
1147   LLVMValueRef new_gs_tg_info = ac_get_arg(&ctx->ac, ctx->args.gs_tg_info);
1148   LLVMValueRef new_merged_wave_info = ac_get_arg(&ctx->ac, ctx->args.merged_wave_info);
1149
1150   /* This also converts the thread count from the total count to the per-wave count. */
1151   update_thread_counts(ctx, &new_num_es_threads, &new_gs_tg_info, 9, 12, &new_merged_wave_info, 8,
1152                        0);
1153
1154   /* Update vertex indices in VGPR0 (same format as NGG passthrough).
1155    *
1156    * Set the null flag at the beginning (culled), and then
1157    * overwrite it for accepted primitives.
1158    */
1159   LLVMValueRef new_vgpr0 =
1160      ac_build_alloca_init(&ctx->ac, LLVMConstInt(ctx->ac.i32, 1u << 31, 0), "");
1161
1162   /* Get vertex indices after vertex compaction. */
1163   ac_build_ifcc(&ctx->ac, LLVMBuildTrunc(builder, gs_accepted, ctx->ac.i1, ""), 16011);
1164   {
1165      struct ac_ngg_prim prim = {};
1166      prim.num_vertices = num_vertices;
1167      prim.isnull = ctx->ac.i1false;
1168
1169      if (gfx10_edgeflags_have_effect(shader))
1170         prim.edgeflags = ac_pack_edgeflags_for_export(&ctx->ac, &ctx->args);
1171      else
1172         prim.edgeflags = ctx->ac.i32_0;
1173
1174      for (unsigned vtx = 0; vtx < num_vertices; vtx++) {
1175         prim.index[vtx] = LLVMBuildLoad(
1176            builder, si_build_gep_i8(ctx, gs_vtxptr[vtx], lds_byte1_new_thread_id), "");
1177         prim.index[vtx] = LLVMBuildZExt(builder, prim.index[vtx], ctx->ac.i32, "");
1178      }
1179
1180      /* Set the new GS input VGPR. */
1181      LLVMBuildStore(builder, ac_pack_prim_export(&ctx->ac, &prim), new_vgpr0);
1182   }
1183   ac_build_endif(&ctx->ac, 16011);
1184
1185   if (gfx10_ngg_export_prim_early(shader))
1186      gfx10_ngg_build_export_prim(ctx, NULL, LLVMBuildLoad(builder, new_vgpr0, ""));
1187
1188   /* Prepare LDS addresses of the new ES input VGPRs. */
1189   LLVMValueRef input_vgpr_addresses[4] = {
1190      ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_vertex_id, 0)),
1191      ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_instance_id, 0)),
1192   };
1193   if (ctx->stage == MESA_SHADER_TESS_EVAL) {
1194      input_vgpr_addresses[2] = si_build_gep_i8(ctx, es_vtxptr, lds_byte2_tes_rel_patch_id);
1195      if (uses_tes_prim_id) {
1196         input_vgpr_addresses[3] = ac_build_gep0(&ctx->ac, es_vtxptr,
1197                                                 LLVMConstInt(ctx->ac.i32, lds_tes_patch_id, 0));
1198      }
1199   }
1200
1201   /* Return values for the main function. */
1202   LLVMValueRef ret = ctx->return_value;
1203   LLVMValueRef val;
1204
1205   ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_gs_tg_info, 2, "");
1206   ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_merged_wave_info, 3, "");
1207   if (ctx->stage == MESA_SHADER_TESS_EVAL)
1208      ret = si_insert_input_ret(ctx, ret, ctx->args.tess_offchip_offset, 4);
1209
1210   ret = si_insert_input_ptr(ctx, ret, ctx->internal_bindings, 8 + SI_SGPR_INTERNAL_BINDINGS);
1211   ret = si_insert_input_ptr(ctx, ret, ctx->bindless_samplers_and_images,
1212                             8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
1213   ret = si_insert_input_ptr(ctx, ret, ctx->const_and_shader_buffers,
1214                             8 + SI_SGPR_CONST_AND_SHADER_BUFFERS);
1215   ret = si_insert_input_ptr(ctx, ret, ctx->samplers_and_images, 8 + SI_SGPR_SAMPLERS_AND_IMAGES);
1216   ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits, 8 + SI_SGPR_VS_STATE_BITS);
1217
1218   if (ctx->stage == MESA_SHADER_VERTEX) {
1219      ret = si_insert_input_ptr(ctx, ret, ctx->args.base_vertex, 8 + SI_SGPR_BASE_VERTEX);
1220      ret = si_insert_input_ptr(ctx, ret, ctx->args.draw_id, 8 + SI_SGPR_DRAWID);
1221      ret = si_insert_input_ptr(ctx, ret, ctx->args.start_instance, 8 + SI_SGPR_START_INSTANCE);
1222      ret = si_insert_input_ptr(ctx, ret, ctx->args.vertex_buffers, 8 + SI_VS_NUM_USER_SGPR);
1223
1224      for (unsigned i = 0; i < shader->selector->num_vbos_in_user_sgprs; i++) {
1225         ret = si_insert_input_v4i32(ctx, ret, ctx->vb_descriptors[i],
1226                                     8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST + i * 4);
1227      }
1228   } else {
1229      assert(ctx->stage == MESA_SHADER_TESS_EVAL);
1230      ret = si_insert_input_ptr(ctx, ret, ctx->tcs_offchip_layout, 8 + SI_SGPR_TES_OFFCHIP_LAYOUT);
1231      ret = si_insert_input_ptr(ctx, ret, ctx->tes_offchip_addr, 8 + SI_SGPR_TES_OFFCHIP_ADDR);
1232   }
1233
1234   unsigned vgpr;
1235   if (ctx->stage == MESA_SHADER_VERTEX) {
1236      if (shader->selector->num_vbos_in_user_sgprs) {
1237         vgpr = 8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST + shader->selector->num_vbos_in_user_sgprs * 4;
1238      } else {
1239         vgpr = 8 + GFX9_VSGS_NUM_USER_SGPR + 1;
1240      }
1241   } else {
1242      vgpr = 8 + GFX9_TESGS_NUM_USER_SGPR;
1243   }
1244
1245   val = LLVMBuildLoad(builder, new_vgpr0, "");
1246   ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++, "");
1247   vgpr++; /* gs_vtx_offset[1] = offsets of vertices 2-3  */
1248
1249   ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_prim_id, vgpr++);
1250   ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_invocation_id, vgpr++);
1251   vgpr++; /* gs_vtx_offset[2] = offsets of vertices 4-5 */
1252
1253   /* Set the input VPGRs to the corresponding LDS addresses where the VGPR values are
1254    * stored. The VS prolog will load them.
1255    */
1256   if (ctx->stage == MESA_SHADER_VERTEX) {
1257      val = LLVMBuildPtrToInt(builder, input_vgpr_addresses[0], ctx->ac.i32, "");
1258      ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++,
1259                                 ""); /* VGPR5 - VertexID */
1260      vgpr += 2;
1261      if (uses_instance_id) {
1262         val = LLVMBuildPtrToInt(builder, input_vgpr_addresses[1], ctx->ac.i32, "");
1263         ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++,
1264                                    ""); /* VGPR8 - InstanceID */
1265      } else {
1266         vgpr++;
1267      }
1268   } else {
1269      assert(ctx->stage == MESA_SHADER_TESS_EVAL);
1270      unsigned num_vgprs = uses_tes_prim_id ? 4 : 3;
1271      for (unsigned i = 0; i < num_vgprs; i++) {
1272         val = LLVMBuildPtrToInt(builder, input_vgpr_addresses[i], ctx->ac.i32, "");
1273         ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++, "");
1274      }
1275      if (num_vgprs == 3)
1276         vgpr++;
1277   }
1278
1279   /* These two also use LDS. */
1280   if (gfx10_ngg_writes_user_edgeflags(shader) ||
1281       (ctx->stage == MESA_SHADER_VERTEX && shader->key.mono.u.vs_export_prim_id))
1282      ac_build_s_barrier(&ctx->ac);
1283
1284   ctx->return_value = ret;
1285}
1286
1287/**
1288 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
1289 */
1290void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi)
1291{
1292   struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1293   struct si_shader_selector *sel = ctx->shader->selector;
1294   struct si_shader_info *info = &sel->info;
1295   struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
1296   LLVMBuilderRef builder = ctx->ac.builder;
1297   LLVMValueRef *addrs = abi->outputs;
1298   LLVMValueRef tmp, tmp2;
1299
1300   assert(!ctx->shader->is_gs_copy_shader);
1301   assert(info->num_outputs <= AC_LLVM_MAX_OUTPUTS);
1302
1303   LLVMValueRef vertex_ptr = NULL;
1304
1305   if (sel->so.num_outputs || gfx10_ngg_writes_user_edgeflags(ctx->shader))
1306      vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
1307
1308   for (unsigned i = 0; i < info->num_outputs; i++) {
1309      outputs[i].semantic = info->output_semantic[i];
1310
1311      for (unsigned j = 0; j < 4; j++) {
1312         outputs[i].vertex_stream[j] = (info->output_streams[i] >> (2 * j)) & 3;
1313
1314         /* TODO: we may store more outputs than streamout needs,
1315          * but streamout performance isn't that important.
1316          */
1317         if (sel->so.num_outputs) {
1318            tmp = ac_build_gep0(&ctx->ac, vertex_ptr, LLVMConstInt(ctx->ac.i32, 4 * i + j, false));
1319            tmp2 = LLVMBuildLoad(builder, addrs[4 * i + j], "");
1320            tmp2 = ac_to_integer(&ctx->ac, tmp2);
1321            LLVMBuildStore(builder, tmp2, tmp);
1322         }
1323      }
1324
1325      /* Store the edgeflag at the end (if streamout is enabled) */
1326      if (info->output_semantic[i] == VARYING_SLOT_EDGE && gfx10_ngg_writes_user_edgeflags(ctx->shader)) {
1327         LLVMValueRef edgeflag = LLVMBuildLoad(builder, addrs[4 * i], "");
1328         /* The output is a float, but the hw expects a 1-bit integer. */
1329         edgeflag = LLVMBuildFPToUI(ctx->ac.builder, edgeflag, ctx->ac.i32, "");
1330         edgeflag = ac_build_umin(&ctx->ac, edgeflag, ctx->ac.i32_1);
1331
1332         tmp = LLVMConstInt(ctx->ac.i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
1333         tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
1334         LLVMBuildStore(builder, edgeflag, tmp);
1335      }
1336   }
1337
1338   bool unterminated_es_if_block =
1339      !sel->so.num_outputs && !gfx10_ngg_writes_user_edgeflags(ctx->shader) &&
1340      !ctx->screen->use_ngg_streamout && /* no query buffer */
1341      (ctx->stage != MESA_SHADER_VERTEX || !ctx->shader->key.mono.u.vs_export_prim_id);
1342
1343   if (!unterminated_es_if_block)
1344      ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
1345
1346   LLVMValueRef is_gs_thread = si_is_gs_thread(ctx);
1347   LLVMValueRef is_es_thread = si_is_es_thread(ctx);
1348   LLVMValueRef vtxindex[3];
1349
1350   if (ctx->shader->key.opt.ngg_culling || gfx10_is_ngg_passthrough(ctx->shader)) {
1351      for (unsigned i = 0; i < 3; ++i)
1352         vtxindex[i] = si_unpack_param(ctx, ctx->args.gs_vtx_offset[0], 10 * i, 9);
1353   } else {
1354      for (unsigned i = 0; i < 3; ++i)
1355         vtxindex[i] = si_unpack_param(ctx, ctx->args.gs_vtx_offset[i / 2], (i & 1) * 16, 16);
1356   }
1357
1358   /* Determine the number of vertices per primitive. */
1359   unsigned num_vertices;
1360   LLVMValueRef num_vertices_val = ngg_get_vertices_per_prim(ctx, &num_vertices);
1361
1362   /* Streamout */
1363   LLVMValueRef emitted_prims = NULL;
1364
1365   if (sel->so.num_outputs) {
1366      assert(!unterminated_es_if_block);
1367
1368      struct ngg_streamout nggso = {};
1369      nggso.num_vertices = num_vertices_val;
1370      nggso.prim_enable[0] = is_gs_thread;
1371
1372      for (unsigned i = 0; i < num_vertices; ++i)
1373         nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
1374
1375      build_streamout(ctx, &nggso);
1376      emitted_prims = nggso.emit[0];
1377   }
1378
1379   LLVMValueRef user_edgeflags[3] = {};
1380
1381   if (gfx10_ngg_writes_user_edgeflags(ctx->shader)) {
1382      assert(!unterminated_es_if_block);
1383
1384      /* Streamout already inserted the barrier, so don't insert it again. */
1385      if (!sel->so.num_outputs)
1386         ac_build_s_barrier(&ctx->ac);
1387
1388      ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
1389      /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
1390      for (unsigned i = 0; i < num_vertices; i++) {
1391         tmp = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
1392         tmp2 = LLVMConstInt(ctx->ac.i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
1393         tmp = ac_build_gep0(&ctx->ac, tmp, tmp2);
1394         tmp = LLVMBuildLoad(builder, tmp, "");
1395         tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1396
1397         user_edgeflags[i] = ac_build_alloca_init(&ctx->ac, tmp, "");
1398      }
1399      ac_build_endif(&ctx->ac, 5400);
1400   }
1401
1402   /* Copy Primitive IDs from GS threads to the LDS address corresponding
1403    * to the ES thread of the provoking vertex.
1404    */
1405   if (ctx->stage == MESA_SHADER_VERTEX && ctx->shader->key.mono.u.vs_export_prim_id) {
1406      assert(!unterminated_es_if_block);
1407
1408      /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
1409      if (sel->so.num_outputs || gfx10_ngg_writes_user_edgeflags(ctx->shader))
1410         ac_build_s_barrier(&ctx->ac);
1411
1412      ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
1413      /* Extract the PROVOKING_VTX_INDEX field. */
1414      LLVMValueRef provoking_vtx_in_prim = si_unpack_param(ctx, ctx->vs_state_bits, 4, 2);
1415
1416      /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
1417      LLVMValueRef indices = ac_build_gather_values(&ctx->ac, vtxindex, 3);
1418      LLVMValueRef provoking_vtx_index =
1419         LLVMBuildExtractElement(builder, indices, provoking_vtx_in_prim, "");
1420      LLVMValueRef vertex_ptr = ngg_nogs_vertex_ptr(ctx, provoking_vtx_index);
1421
1422      LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args.gs_prim_id),
1423                     ac_build_gep0(&ctx->ac, vertex_ptr, ctx->ac.i32_0));
1424      ac_build_endif(&ctx->ac, 5400);
1425   }
1426
1427   /* Update query buffer */
1428   if (ctx->screen->use_ngg_streamout && !info->base.vs.blit_sgprs_amd) {
1429      assert(!unterminated_es_if_block);
1430
1431      tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
1432      tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1433      ac_build_ifcc(&ctx->ac, tmp, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
1434      tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
1435      ac_build_ifcc(&ctx->ac, tmp, 5030);
1436      tmp = LLVMBuildICmp(builder, LLVMIntULE, ac_get_thread_id(&ctx->ac),
1437                          sel->so.num_outputs ? ctx->ac.i32_1 : ctx->ac.i32_0, "");
1438      ac_build_ifcc(&ctx->ac, tmp, 5031);
1439      {
1440         LLVMValueRef args[] = {
1441            ngg_get_prim_cnt(ctx),
1442            ngg_get_query_buf(ctx),
1443            LLVMConstInt(ctx->ac.i32, 16, false), /* offset of stream[0].generated_primitives */
1444            ctx->ac.i32_0,                        /* soffset */
1445            ctx->ac.i32_0,                        /* cachepolicy */
1446         };
1447
1448         if (sel->so.num_outputs) {
1449            args[0] = ac_build_writelane(&ctx->ac, args[0], emitted_prims, ctx->ac.i32_1);
1450            args[2] = ac_build_writelane(&ctx->ac, args[2], LLVMConstInt(ctx->ac.i32, 24, false),
1451                                         ctx->ac.i32_1);
1452         }
1453
1454         /* TODO: should this be 64-bit atomics? */
1455         ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32", ctx->ac.i32, args, 5,
1456                            0);
1457      }
1458      ac_build_endif(&ctx->ac, 5031);
1459      ac_build_endif(&ctx->ac, 5030);
1460      ac_build_endif(&ctx->ac, 5029);
1461   }
1462
1463   /* Build the primitive export. */
1464   if (!gfx10_ngg_export_prim_early(ctx->shader)) {
1465      assert(!unterminated_es_if_block);
1466      gfx10_ngg_build_export_prim(ctx, user_edgeflags, NULL);
1467   }
1468
1469   /* Export per-vertex data (positions and parameters). */
1470   if (!unterminated_es_if_block)
1471      ac_build_ifcc(&ctx->ac, is_es_thread, 6002);
1472   {
1473      unsigned i;
1474
1475      /* Unconditionally (re-)load the values for proper SSA form. */
1476      for (i = 0; i < info->num_outputs; i++) {
1477         /* If the NGG cull shader part computed the position, don't
1478          * use the position from the current shader part. Instead,
1479          * load it from LDS.
1480          */
1481         if (info->output_semantic[i] == VARYING_SLOT_POS &&
1482             ctx->shader->key.opt.ngg_culling) {
1483            vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
1484
1485            for (unsigned j = 0; j < 4; j++) {
1486               tmp = LLVMConstInt(ctx->ac.i32, lds_pos_x + j, 0);
1487               tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
1488               tmp = LLVMBuildLoad(builder, tmp, "");
1489               outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
1490            }
1491         } else {
1492            for (unsigned j = 0; j < 4; j++) {
1493               outputs[i].values[j] = LLVMBuildLoad(builder, addrs[4 * i + j], "");
1494            }
1495         }
1496      }
1497
1498      if (ctx->shader->key.mono.u.vs_export_prim_id) {
1499         outputs[i].semantic = VARYING_SLOT_PRIMITIVE_ID;
1500
1501         if (ctx->stage == MESA_SHADER_VERTEX) {
1502            /* Wait for GS stores to finish. */
1503            ac_build_s_barrier(&ctx->ac);
1504
1505            tmp = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
1506            tmp = ac_build_gep0(&ctx->ac, tmp, ctx->ac.i32_0);
1507            outputs[i].values[0] = LLVMBuildLoad(builder, tmp, "");
1508         } else {
1509            assert(ctx->stage == MESA_SHADER_TESS_EVAL);
1510            outputs[i].values[0] = si_get_primitive_id(ctx, 0);
1511         }
1512
1513         outputs[i].values[0] = ac_to_float(&ctx->ac, outputs[i].values[0]);
1514         for (unsigned j = 1; j < 4; j++)
1515            outputs[i].values[j] = LLVMGetUndef(ctx->ac.f32);
1516
1517         memset(outputs[i].vertex_stream, 0, sizeof(outputs[i].vertex_stream));
1518         i++;
1519      }
1520
1521      si_llvm_build_vs_exports(ctx, outputs, i);
1522   }
1523   ac_build_endif(&ctx->ac, 6002);
1524}
1525
1526static LLVMValueRef ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
1527{
1528   const struct si_shader_selector *sel = ctx->shader->selector;
1529   const struct si_shader_info *info = &sel->info;
1530
1531   LLVMTypeRef elements[2] = {
1532      LLVMArrayType(ctx->ac.i32, 4 * info->num_outputs),
1533      LLVMArrayType(ctx->ac.i8, 4),
1534   };
1535   LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
1536   type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
1537   return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
1538}
1539
1540/**
1541 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
1542 * is in emit order; that is:
1543 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
1544 * - during vertex emit, i.e. while the API GS shader invocation is running,
1545 *   N = threadidx * gs.vertices_out + emitidx
1546 *
1547 * Goals of the LDS memory layout:
1548 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
1549 *    in uniform control flow
1550 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
1551 *    culling
1552 * 3. Agnostic to the number of waves (since we don't know it before compiling)
1553 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
1554 * 5. Avoid wasting memory.
1555 *
1556 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
1557 * layout, elimination of bank conflicts requires that each vertex occupy an
1558 * odd number of dwords. We use the additional dword to store the output stream
1559 * index as well as a flag to indicate whether this vertex ends a primitive
1560 * for rasterization.
1561 *
1562 * Swizzling is required to satisfy points 1 and 2 simultaneously.
1563 *
1564 * Vertices are stored in export order (gsthread * gs.vertices_out + emitidx).
1565 * Indices are swizzled in groups of 32, which ensures point 1 without
1566 * disturbing point 2.
1567 *
1568 * \return an LDS pointer to type {[N x i32], [4 x i8]}
1569 */
1570static LLVMValueRef ngg_gs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vertexidx)
1571{
1572   struct si_shader_selector *sel = ctx->shader->selector;
1573   LLVMBuilderRef builder = ctx->ac.builder;
1574   LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
1575
1576   /* gs.vertices_out = 2^(write_stride_2exp) * some odd number */
1577   unsigned write_stride_2exp = ffs(sel->info.base.gs.vertices_out) - 1;
1578   if (write_stride_2exp) {
1579      LLVMValueRef row = LLVMBuildLShr(builder, vertexidx, LLVMConstInt(ctx->ac.i32, 5, false), "");
1580      LLVMValueRef swizzle = LLVMBuildAnd(
1581         builder, row, LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1, false), "");
1582      vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
1583   }
1584
1585   return ac_build_gep0(&ctx->ac, storage, vertexidx);
1586}
1587
1588static LLVMValueRef ngg_gs_emit_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef gsthread,
1589                                           LLVMValueRef emitidx)
1590{
1591   struct si_shader_selector *sel = ctx->shader->selector;
1592   LLVMBuilderRef builder = ctx->ac.builder;
1593   LLVMValueRef tmp;
1594
1595   tmp = LLVMConstInt(ctx->ac.i32, sel->info.base.gs.vertices_out, false);
1596   tmp = LLVMBuildMul(builder, tmp, gsthread, "");
1597   const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
1598   return ngg_gs_vertex_ptr(ctx, vertexidx);
1599}
1600
1601static LLVMValueRef ngg_gs_get_emit_output_ptr(struct si_shader_context *ctx,
1602                                               LLVMValueRef vertexptr, unsigned out_idx)
1603{
1604   LLVMValueRef gep_idx[3] = {
1605      ctx->ac.i32_0, /* implied C-style array */
1606      ctx->ac.i32_0, /* first struct entry */
1607      LLVMConstInt(ctx->ac.i32, out_idx, false),
1608   };
1609   return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
1610}
1611
1612static LLVMValueRef ngg_gs_get_emit_primflag_ptr(struct si_shader_context *ctx,
1613                                                 LLVMValueRef vertexptr, unsigned stream)
1614{
1615   LLVMValueRef gep_idx[3] = {
1616      ctx->ac.i32_0, /* implied C-style array */
1617      ctx->ac.i32_1, /* second struct entry */
1618      LLVMConstInt(ctx->ac.i32, stream, false),
1619   };
1620   return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
1621}
1622
1623void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx, unsigned stream, LLVMValueRef *addrs)
1624{
1625   const struct si_shader_selector *sel = ctx->shader->selector;
1626   const struct si_shader_info *info = &sel->info;
1627   LLVMBuilderRef builder = ctx->ac.builder;
1628   LLVMValueRef tmp;
1629   const LLVMValueRef vertexidx = LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
1630
1631   /* If this thread has already emitted the declared maximum number of
1632    * vertices, skip the write: excessive vertex emissions are not
1633    * supposed to have any effect.
1634    */
1635   const LLVMValueRef can_emit =
1636      LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
1637                    LLVMConstInt(ctx->ac.i32, sel->info.base.gs.vertices_out, false), "");
1638
1639   tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
1640   tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
1641   LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
1642
1643   ac_build_ifcc(&ctx->ac, can_emit, 9001);
1644
1645   const LLVMValueRef vertexptr = ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
1646   unsigned out_idx = 0;
1647   for (unsigned i = 0; i < info->num_outputs; i++) {
1648      for (unsigned chan = 0; chan < 4; chan++, out_idx++) {
1649         if (!(info->output_usagemask[i] & (1 << chan)) ||
1650             ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
1651            continue;
1652
1653         LLVMValueRef out_val = LLVMBuildLoad(builder, addrs[4 * i + chan], "");
1654         out_val = ac_to_integer(&ctx->ac, out_val);
1655         LLVMBuildStore(builder, out_val, ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx));
1656      }
1657   }
1658   assert(out_idx * 4 == sel->gsvs_vertex_size);
1659
1660   /* Determine and store whether this vertex completed a primitive. */
1661   const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
1662
1663   tmp = LLVMConstInt(ctx->ac.i32, u_vertices_per_prim(sel->info.base.gs.output_primitive) - 1, false);
1664   const LLVMValueRef iscompleteprim = LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
1665
1666   /* Since the geometry shader emits triangle strips, we need to
1667    * track which primitive is odd and swap vertex indices to get
1668    * the correct vertex order.
1669    */
1670   LLVMValueRef is_odd = ctx->ac.i1false;
1671   if (stream == 0 && u_vertices_per_prim(sel->info.base.gs.output_primitive) == 3) {
1672      tmp = LLVMBuildAnd(builder, curverts, ctx->ac.i32_1, "");
1673      is_odd = LLVMBuildICmp(builder, LLVMIntEQ, tmp, ctx->ac.i32_1, "");
1674   }
1675
1676   tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
1677   LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
1678
1679   /* The per-vertex primitive flag encoding:
1680    *   bit 0: whether this vertex finishes a primitive
1681    *   bit 1: whether the primitive is odd (if we are emitting triangle strips)
1682    */
1683   tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
1684   tmp = LLVMBuildOr(
1685      builder, tmp,
1686      LLVMBuildShl(builder, LLVMBuildZExt(builder, is_odd, ctx->ac.i8, ""), ctx->ac.i8_1, ""), "");
1687   LLVMBuildStore(builder, tmp, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream));
1688
1689   tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
1690   tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
1691   LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
1692
1693   ac_build_endif(&ctx->ac, 9001);
1694}
1695
1696void gfx10_ngg_gs_emit_prologue(struct si_shader_context *ctx)
1697{
1698   /* Zero out the part of LDS scratch that is used to accumulate the
1699    * per-stream generated primitive count.
1700    */
1701   LLVMBuilderRef builder = ctx->ac.builder;
1702   LLVMValueRef scratchptr = ctx->gs_ngg_scratch;
1703   LLVMValueRef tid = get_thread_id_in_tg(ctx);
1704   LLVMValueRef tmp;
1705
1706   tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, LLVMConstInt(ctx->ac.i32, 4, false), "");
1707   ac_build_ifcc(&ctx->ac, tmp, 5090);
1708   {
1709      LLVMValueRef ptr = ac_build_gep0(&ctx->ac, scratchptr, tid);
1710      LLVMBuildStore(builder, ctx->ac.i32_0, ptr);
1711   }
1712   ac_build_endif(&ctx->ac, 5090);
1713
1714   ac_build_s_barrier(&ctx->ac);
1715}
1716
1717void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx)
1718{
1719   const struct si_shader_selector *sel = ctx->shader->selector;
1720   const struct si_shader_info *info = &sel->info;
1721   const unsigned verts_per_prim = u_vertices_per_prim(sel->info.base.gs.output_primitive);
1722   LLVMBuilderRef builder = ctx->ac.builder;
1723   LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
1724   LLVMValueRef tmp, tmp2;
1725
1726   /* Zero out remaining (non-emitted) primitive flags.
1727    *
1728    * Note: Alternatively, we could pass the relevant gs_next_vertex to
1729    *       the emit threads via LDS. This is likely worse in the expected
1730    *       typical case where each GS thread emits the full set of
1731    *       vertices.
1732    */
1733   for (unsigned stream = 0; stream < 4; ++stream) {
1734      if (!info->num_stream_output_components[stream])
1735         continue;
1736
1737      const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
1738
1739      ac_build_bgnloop(&ctx->ac, 5100);
1740
1741      const LLVMValueRef vertexidx = LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
1742      tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
1743                          LLVMConstInt(ctx->ac.i32, sel->info.base.gs.vertices_out, false), "");
1744      ac_build_ifcc(&ctx->ac, tmp, 5101);
1745      ac_build_break(&ctx->ac);
1746      ac_build_endif(&ctx->ac, 5101);
1747
1748      tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
1749      LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
1750
1751      tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
1752      LLVMBuildStore(builder, i8_0, ngg_gs_get_emit_primflag_ptr(ctx, tmp, stream));
1753
1754      ac_build_endloop(&ctx->ac, 5100);
1755   }
1756
1757   /* Accumulate generated primitives counts across the entire threadgroup. */
1758   for (unsigned stream = 0; stream < 4; ++stream) {
1759      if (!info->num_stream_output_components[stream])
1760         continue;
1761
1762      LLVMValueRef numprims = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
1763      numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, ctx->ac.wave_size);
1764
1765      tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->ac.i32_0, "");
1766      ac_build_ifcc(&ctx->ac, tmp, 5105);
1767      {
1768         LLVMBuildAtomicRMW(
1769            builder, LLVMAtomicRMWBinOpAdd,
1770            ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, LLVMConstInt(ctx->ac.i32, stream, false)),
1771            numprims, LLVMAtomicOrderingMonotonic, false);
1772      }
1773      ac_build_endif(&ctx->ac, 5105);
1774   }
1775
1776   ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
1777
1778   ac_build_s_barrier(&ctx->ac);
1779
1780   const LLVMValueRef tid = get_thread_id_in_tg(ctx);
1781   LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
1782
1783   /* Streamout */
1784   if (sel->so.num_outputs) {
1785      struct ngg_streamout nggso = {};
1786
1787      nggso.num_vertices = LLVMConstInt(ctx->ac.i32, verts_per_prim, false);
1788
1789      LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
1790      for (unsigned stream = 0; stream < 4; ++stream) {
1791         if (!info->num_stream_output_components[stream])
1792            continue;
1793
1794         tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream), "");
1795         tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1796         tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1797         nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
1798      }
1799
1800      for (unsigned i = 0; i < verts_per_prim; ++i) {
1801         tmp = LLVMBuildSub(builder, tid, LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false),
1802                            "");
1803         tmp = ngg_gs_vertex_ptr(ctx, tmp);
1804         nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->ac.i32_0);
1805      }
1806
1807      build_streamout(ctx, &nggso);
1808   }
1809
1810   /* Write shader query data. */
1811   if (ctx->screen->use_ngg_streamout) {
1812      tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
1813      tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1814      ac_build_ifcc(&ctx->ac, tmp, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1815      unsigned num_query_comps = sel->so.num_outputs ? 8 : 4;
1816      tmp = LLVMBuildICmp(builder, LLVMIntULT, tid,
1817                          LLVMConstInt(ctx->ac.i32, num_query_comps, false), "");
1818      ac_build_ifcc(&ctx->ac, tmp, 5110);
1819      {
1820         LLVMValueRef offset;
1821         tmp = tid;
1822         if (sel->so.num_outputs)
1823            tmp = LLVMBuildAnd(builder, tmp, LLVMConstInt(ctx->ac.i32, 3, false), "");
1824         offset = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->ac.i32, 32, false), "");
1825         if (sel->so.num_outputs) {
1826            tmp = LLVMBuildLShr(builder, tid, LLVMConstInt(ctx->ac.i32, 2, false), "");
1827            tmp = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->ac.i32, 8, false), "");
1828            offset = LLVMBuildAdd(builder, offset, tmp, "");
1829         }
1830
1831         tmp = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid), "");
1832         LLVMValueRef args[] = {
1833            tmp,           ngg_get_query_buf(ctx),
1834            offset,        LLVMConstInt(ctx->ac.i32, 16, false), /* soffset */
1835            ctx->ac.i32_0,                                       /* cachepolicy */
1836         };
1837         ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32", ctx->ac.i32, args, 5,
1838                            0);
1839      }
1840      ac_build_endif(&ctx->ac, 5110);
1841      ac_build_endif(&ctx->ac, 5109);
1842   }
1843
1844   /* Determine vertex liveness. */
1845   LLVMValueRef vertliveptr = ac_build_alloca(&ctx->ac, ctx->ac.i1, "vertexlive");
1846
1847   tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1848   ac_build_ifcc(&ctx->ac, tmp, 5120);
1849   {
1850      for (unsigned i = 0; i < verts_per_prim; ++i) {
1851         const LLVMValueRef primidx =
1852            LLVMBuildAdd(builder, tid, LLVMConstInt(ctx->ac.i32, i, false), "");
1853
1854         if (i > 0) {
1855            tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
1856            ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
1857         }
1858
1859         /* Load primitive liveness */
1860         tmp = ngg_gs_vertex_ptr(ctx, primidx);
1861         tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1862         const LLVMValueRef primlive = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1863
1864         tmp = LLVMBuildLoad(builder, vertliveptr, "");
1865         tmp = LLVMBuildOr(builder, tmp, primlive, ""), LLVMBuildStore(builder, tmp, vertliveptr);
1866
1867         if (i > 0)
1868            ac_build_endif(&ctx->ac, 5121 + i);
1869      }
1870   }
1871   ac_build_endif(&ctx->ac, 5120);
1872
1873   /* Inclusive scan addition across the current wave. */
1874   LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
1875   struct ac_wg_scan vertlive_scan = {};
1876   vertlive_scan.op = nir_op_iadd;
1877   vertlive_scan.enable_reduce = true;
1878   vertlive_scan.enable_exclusive = true;
1879   vertlive_scan.src = vertlive;
1880   vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->ac.i32_0);
1881   vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
1882   vertlive_scan.numwaves = get_tgsize(ctx);
1883   vertlive_scan.maxwaves = DIV_ROUND_UP(256, ctx->ac.wave_size);
1884
1885   ac_build_wg_scan(&ctx->ac, &vertlive_scan);
1886
1887   /* Skip all exports (including index exports) when possible. */
1888   LLVMValueRef have_exports =
1889      LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
1890   num_emit_threads = LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
1891
1892   /* Allocate export space. Send this message as early as possible, to
1893    * hide the latency of the SQ <-> SPI roundtrip.
1894    */
1895   ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), vertlive_scan.result_reduce,
1896                                 num_emit_threads);
1897
1898   /* Setup the reverse vertex compaction permutation. We re-use stream 1
1899    * of the primitive liveness flags, relying on the fact that each
1900    * threadgroup can have at most 256 threads. */
1901   ac_build_ifcc(&ctx->ac, vertlive, 5130);
1902   {
1903      tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
1904      tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
1905      LLVMBuildStore(builder, tmp2, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1));
1906   }
1907   ac_build_endif(&ctx->ac, 5130);
1908
1909   ac_build_s_barrier(&ctx->ac);
1910
1911   /* Export primitive data */
1912   tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1913   ac_build_ifcc(&ctx->ac, tmp, 5140);
1914   {
1915      LLVMValueRef flags;
1916      struct ac_ngg_prim prim = {};
1917      prim.num_vertices = verts_per_prim;
1918
1919      tmp = ngg_gs_vertex_ptr(ctx, tid);
1920      flags = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1921      prim.isnull = LLVMBuildNot(builder, LLVMBuildTrunc(builder, flags, ctx->ac.i1, ""), "");
1922      prim.edgeflags = ctx->ac.i32_0;
1923
1924      for (unsigned i = 0; i < verts_per_prim; ++i) {
1925         prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
1926                                      LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
1927      }
1928
1929      /* Geometry shaders output triangle strips, but NGG expects triangles. */
1930      if (verts_per_prim == 3) {
1931         LLVMValueRef is_odd = LLVMBuildLShr(builder, flags, ctx->ac.i8_1, "");
1932         is_odd = LLVMBuildTrunc(builder, is_odd, ctx->ac.i1, "");
1933         LLVMValueRef flatshade_first = LLVMBuildICmp(
1934            builder, LLVMIntEQ, si_unpack_param(ctx, ctx->vs_state_bits, 4, 2), ctx->ac.i32_0, "");
1935
1936         ac_build_triangle_strip_indices_to_triangle(&ctx->ac, is_odd, flatshade_first, prim.index);
1937      }
1938
1939      ac_build_export_prim(&ctx->ac, &prim);
1940   }
1941   ac_build_endif(&ctx->ac, 5140);
1942
1943   /* Export position and parameter data */
1944   tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
1945   ac_build_ifcc(&ctx->ac, tmp, 5145);
1946   {
1947      struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
1948
1949      tmp = ngg_gs_vertex_ptr(ctx, tid);
1950      tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1), "");
1951      tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
1952      const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
1953
1954      unsigned out_idx = 0;
1955      for (unsigned i = 0; i < info->num_outputs; i++) {
1956         outputs[i].semantic = info->output_semantic[i];
1957
1958         for (unsigned j = 0; j < 4; j++, out_idx++) {
1959            tmp = ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx);
1960            tmp = LLVMBuildLoad(builder, tmp, "");
1961            outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
1962            outputs[i].vertex_stream[j] = (info->output_streams[i] >> (2 * j)) & 3;
1963         }
1964      }
1965
1966      si_llvm_build_vs_exports(ctx, outputs, info->num_outputs);
1967   }
1968   ac_build_endif(&ctx->ac, 5145);
1969}
1970
1971static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
1972                                     unsigned min_verts_per_prim, bool use_adjacency)
1973{
1974   unsigned max_reuse = max_esverts - min_verts_per_prim;
1975   if (use_adjacency)
1976      max_reuse /= 2;
1977   *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
1978}
1979
1980unsigned gfx10_ngg_get_scratch_dw_size(struct si_shader *shader)
1981{
1982   const struct si_shader_selector *sel = shader->selector;
1983
1984   if (sel->info.stage == MESA_SHADER_GEOMETRY && sel->so.num_outputs)
1985      return 44;
1986
1987   return 8;
1988}
1989
1990/**
1991 * Determine subgroup information like maximum number of vertices and prims.
1992 *
1993 * This happens before the shader is uploaded, since LDS relocations during
1994 * upload depend on the subgroup size.
1995 */
1996bool gfx10_ngg_calculate_subgroup_info(struct si_shader *shader)
1997{
1998   const struct si_shader_selector *gs_sel = shader->selector;
1999   const struct si_shader_selector *es_sel =
2000      shader->previous_stage_sel ? shader->previous_stage_sel : gs_sel;
2001   const gl_shader_stage gs_stage = gs_sel->info.stage;
2002   const unsigned gs_num_invocations = MAX2(gs_sel->info.base.gs.invocations, 1);
2003   const unsigned input_prim = si_get_input_prim(gs_sel, &shader->key);
2004   const bool use_adjacency =
2005      input_prim >= PIPE_PRIM_LINES_ADJACENCY && input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
2006   const unsigned max_verts_per_prim = u_vertices_per_prim(input_prim);
2007   const unsigned min_verts_per_prim = gs_stage == MESA_SHADER_GEOMETRY ? max_verts_per_prim : 1;
2008
2009   /* All these are in dwords: */
2010   /* GE can only use 8K dwords (32KB) of LDS per workgroup.
2011    */
2012   const unsigned max_lds_size = 8 * 1024 - gfx10_ngg_get_scratch_dw_size(shader);
2013   const unsigned target_lds_size = max_lds_size;
2014   unsigned esvert_lds_size = 0;
2015   unsigned gsprim_lds_size = 0;
2016
2017   /* All these are per subgroup: */
2018   const unsigned min_esverts = gs_sel->screen->info.chip_class >= GFX10_3 ? 29 : 24;
2019   bool max_vert_out_per_gs_instance = false;
2020   unsigned max_gsprims_base = gs_sel->screen->ngg_subgroup_size; /* default prim group size clamp */
2021   unsigned max_esverts_base = gs_sel->screen->ngg_subgroup_size;
2022
2023   if (gs_stage == MESA_SHADER_GEOMETRY) {
2024      bool force_multi_cycling = false;
2025      unsigned max_out_verts_per_gsprim = gs_sel->info.base.gs.vertices_out * gs_num_invocations;
2026
2027retry_select_mode:
2028      if (max_out_verts_per_gsprim <= 256 && !force_multi_cycling) {
2029         if (max_out_verts_per_gsprim) {
2030            max_gsprims_base = MIN2(max_gsprims_base, 256 / max_out_verts_per_gsprim);
2031         }
2032      } else {
2033         /* Use special multi-cycling mode in which each GS
2034          * instance gets its own subgroup. Does not work with
2035          * tessellation. */
2036         max_vert_out_per_gs_instance = true;
2037         max_gsprims_base = 1;
2038         max_out_verts_per_gsprim = gs_sel->info.base.gs.vertices_out;
2039      }
2040
2041      esvert_lds_size = es_sel->esgs_itemsize / 4;
2042      gsprim_lds_size = (gs_sel->gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
2043
2044      if (gsprim_lds_size > target_lds_size && !force_multi_cycling) {
2045         if (gs_sel->tess_turns_off_ngg || es_sel->info.stage != MESA_SHADER_TESS_EVAL) {
2046            force_multi_cycling = true;
2047            goto retry_select_mode;
2048         }
2049      }
2050   } else {
2051      /* VS and TES. */
2052      /* LDS size for passing data from ES to GS. */
2053      esvert_lds_size = ngg_nogs_vertex_size(shader);
2054   }
2055
2056   unsigned max_gsprims = max_gsprims_base;
2057   unsigned max_esverts = max_esverts_base;
2058
2059   if (esvert_lds_size)
2060      max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
2061   if (gsprim_lds_size)
2062      max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
2063
2064   max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
2065   clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
2066   assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
2067
2068   if (esvert_lds_size || gsprim_lds_size) {
2069      /* Now that we have a rough proportionality between esverts
2070       * and gsprims based on the primitive type, scale both of them
2071       * down simultaneously based on required LDS space.
2072       *
2073       * We could be smarter about this if we knew how much vertex
2074       * reuse to expect.
2075       */
2076      unsigned lds_total = max_esverts * esvert_lds_size + max_gsprims * gsprim_lds_size;
2077      if (lds_total > target_lds_size) {
2078         max_esverts = max_esverts * target_lds_size / lds_total;
2079         max_gsprims = max_gsprims * target_lds_size / lds_total;
2080
2081         max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
2082         clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
2083         assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
2084      }
2085   }
2086
2087   /* Round up towards full wave sizes for better ALU utilization. */
2088   if (!max_vert_out_per_gs_instance) {
2089      const unsigned wavesize = si_get_shader_wave_size(shader);
2090      unsigned orig_max_esverts;
2091      unsigned orig_max_gsprims;
2092      do {
2093         orig_max_esverts = max_esverts;
2094         orig_max_gsprims = max_gsprims;
2095
2096         max_esverts = align(max_esverts, wavesize);
2097         max_esverts = MIN2(max_esverts, max_esverts_base);
2098         if (esvert_lds_size)
2099            max_esverts =
2100               MIN2(max_esverts, (max_lds_size - max_gsprims * gsprim_lds_size) / esvert_lds_size);
2101         max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
2102
2103         /* Hardware restriction: minimum value of max_esverts */
2104         if (gs_sel->screen->info.chip_class == GFX10)
2105            max_esverts = MAX2(max_esverts, min_esverts - 1 + max_verts_per_prim);
2106         else
2107            max_esverts = MAX2(max_esverts, min_esverts);
2108
2109         max_gsprims = align(max_gsprims, wavesize);
2110         max_gsprims = MIN2(max_gsprims, max_gsprims_base);
2111         if (gsprim_lds_size) {
2112            /* Don't count unusable vertices to the LDS size. Those are vertices above
2113             * the maximum number of vertices that can occur in the workgroup,
2114             * which is e.g. max_gsprims * 3 for triangles.
2115             */
2116            unsigned usable_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
2117            max_gsprims =
2118               MIN2(max_gsprims, (max_lds_size - usable_esverts * esvert_lds_size) / gsprim_lds_size);
2119         }
2120         clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
2121         assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
2122      } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
2123
2124      /* Verify the restriction. */
2125      if (gs_sel->screen->info.chip_class == GFX10)
2126         assert(max_esverts >= min_esverts - 1 + max_verts_per_prim);
2127      else
2128         assert(max_esverts >= min_esverts);
2129   } else {
2130      /* Hardware restriction: minimum value of max_esverts */
2131      if (gs_sel->screen->info.chip_class == GFX10)
2132         max_esverts = MAX2(max_esverts, min_esverts - 1 + max_verts_per_prim);
2133      else
2134         max_esverts = MAX2(max_esverts, min_esverts);
2135   }
2136
2137   unsigned max_out_vertices =
2138      max_vert_out_per_gs_instance
2139         ? gs_sel->info.base.gs.vertices_out
2140         : gs_stage == MESA_SHADER_GEOMETRY
2141              ? max_gsprims * gs_num_invocations * gs_sel->info.base.gs.vertices_out
2142              : max_esverts;
2143   assert(max_out_vertices <= 256);
2144
2145   unsigned prim_amp_factor = 1;
2146   if (gs_stage == MESA_SHADER_GEOMETRY) {
2147      /* Number of output primitives per GS input primitive after
2148       * GS instancing. */
2149      prim_amp_factor = gs_sel->info.base.gs.vertices_out;
2150   }
2151
2152   shader->ngg.hw_max_esverts = max_esverts;
2153   shader->ngg.max_gsprims = max_gsprims;
2154   shader->ngg.max_out_verts = max_out_vertices;
2155   shader->ngg.prim_amp_factor = prim_amp_factor;
2156   shader->ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
2157
2158   /* Don't count unusable vertices. */
2159   shader->gs_info.esgs_ring_size = MIN2(max_esverts, max_gsprims * max_verts_per_prim) *
2160                                    esvert_lds_size;
2161   shader->ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
2162
2163   assert(shader->ngg.hw_max_esverts >= min_esverts); /* HW limitation */
2164
2165   /* If asserts are disabled, we use the same conditions to return false */
2166   return max_esverts >= max_verts_per_prim && max_gsprims >= 1 &&
2167          max_out_vertices <= 256 &&
2168          shader->ngg.hw_max_esverts >= min_esverts;
2169}
2170