1/*
2 * Copyright 2008 Ben Skeggs
3 * Copyright 2010 Christoph Bumiller
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include "pipe/p_context.h"
25#include "pipe/p_defines.h"
26#include "pipe/p_state.h"
27#include "util/u_inlines.h"
28
29#include "nv50/nv50_context.h"
30#include "nv50/nv50_query_hw.h"
31
32#include "nv50/nv50_compute.xml.h"
33
34void
35nv50_constbufs_validate(struct nv50_context *nv50)
36{
37   struct nouveau_pushbuf *push = nv50->base.pushbuf;
38   unsigned s;
39
40   for (s = 0; s < NV50_MAX_3D_SHADER_STAGES; ++s) {
41      unsigned p;
42
43      if (s == NV50_SHADER_STAGE_FRAGMENT)
44         p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT;
45      else
46      if (s == NV50_SHADER_STAGE_GEOMETRY)
47         p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY;
48      else
49         p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX;
50
51      while (nv50->constbuf_dirty[s]) {
52         const unsigned i = (unsigned)ffs(nv50->constbuf_dirty[s]) - 1;
53
54         assert(i < NV50_MAX_PIPE_CONSTBUFS);
55         nv50->constbuf_dirty[s] &= ~(1 << i);
56
57         if (nv50->constbuf[s][i].user) {
58            const unsigned b = NV50_CB_PVP + s;
59            unsigned start = 0;
60            unsigned words = nv50->constbuf[s][0].size / 4;
61            if (i) {
62               NOUVEAU_ERR("user constbufs only supported in slot 0\n");
63               continue;
64            }
65            if (!nv50->state.uniform_buffer_bound[s]) {
66               nv50->state.uniform_buffer_bound[s] = true;
67               BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
68               PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
69            }
70            while (words) {
71               unsigned nr = MIN2(words, NV04_PFIFO_MAX_PACKET_LEN);
72
73               PUSH_SPACE(push, nr + 3);
74               BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
75               PUSH_DATA (push, (start << 8) | b);
76               BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nr);
77               PUSH_DATAp(push, &nv50->constbuf[s][0].u.data[start * 4], nr);
78
79               start += nr;
80               words -= nr;
81            }
82         } else {
83            struct nv04_resource *res =
84               nv04_resource(nv50->constbuf[s][i].u.buf);
85            if (res) {
86               /* TODO: allocate persistent bindings */
87               const unsigned b = s * 16 + i;
88
89               assert(nouveau_resource_mapped_by_gpu(&res->base));
90
91               BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
92               PUSH_DATAh(push, res->address + nv50->constbuf[s][i].offset);
93               PUSH_DATA (push, res->address + nv50->constbuf[s][i].offset);
94               PUSH_DATA (push, (b << 16) |
95                          (nv50->constbuf[s][i].size & 0xffff));
96               BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
97               PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
98
99               BCTX_REFN(nv50->bufctx_3d, 3D_CB(s, i), res, RD);
100
101               nv50->cb_dirty = 1; /* Force cache flush for UBO. */
102               res->cb_bindings[s] |= 1 << i;
103            } else {
104               BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
105               PUSH_DATA (push, (i << 8) | p | 0);
106            }
107            if (i == 0)
108               nv50->state.uniform_buffer_bound[s] = false;
109         }
110      }
111   }
112
113   /* Invalidate all COMPUTE constbufs because they are aliased with 3D. */
114   nv50->dirty_cp |= NV50_NEW_CP_CONSTBUF;
115   nv50->constbuf_dirty[NV50_SHADER_STAGE_COMPUTE] |= nv50->constbuf_valid[NV50_SHADER_STAGE_COMPUTE];
116   nv50->state.uniform_buffer_bound[NV50_SHADER_STAGE_COMPUTE] = false;
117}
118
119static bool
120nv50_program_validate(struct nv50_context *nv50, struct nv50_program *prog)
121{
122   if (!prog->translated) {
123      prog->translated = nv50_program_translate(
124         prog, nv50->screen->base.device->chipset, &nv50->base.debug);
125      if (!prog->translated)
126         return false;
127   } else
128   if (prog->mem)
129      return true;
130
131   return nv50_program_upload_code(nv50, prog);
132}
133
134static inline void
135nv50_program_update_context_state(struct nv50_context *nv50,
136                                  struct nv50_program *prog, int stage)
137{
138   const unsigned flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR;
139
140   if (prog && prog->tls_space) {
141      if (nv50->state.new_tls_space)
142         nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TLS);
143      if (!nv50->state.tls_required || nv50->state.new_tls_space)
144         BCTX_REFN_bo(nv50->bufctx_3d, 3D_TLS, flags, nv50->screen->tls_bo);
145      nv50->state.new_tls_space = false;
146      nv50->state.tls_required |= 1 << stage;
147   } else {
148      if (nv50->state.tls_required == (1 << stage))
149         nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TLS);
150      nv50->state.tls_required &= ~(1 << stage);
151   }
152}
153
154void
155nv50_vertprog_validate(struct nv50_context *nv50)
156{
157   struct nouveau_pushbuf *push = nv50->base.pushbuf;
158   struct nv50_program *vp = nv50->vertprog;
159
160   if (!nv50_program_validate(nv50, vp))
161         return;
162   nv50_program_update_context_state(nv50, vp, 0);
163
164   BEGIN_NV04(push, NV50_3D(VP_ATTR_EN(0)), 2);
165   PUSH_DATA (push, vp->vp.attrs[0]);
166   PUSH_DATA (push, vp->vp.attrs[1]);
167   BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_RESULT), 1);
168   PUSH_DATA (push, vp->max_out);
169   BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_TEMP), 1);
170   PUSH_DATA (push, vp->max_gpr);
171   BEGIN_NV04(push, NV50_3D(VP_START_ID), 1);
172   PUSH_DATA (push, vp->code_base);
173}
174
175void
176nv50_fragprog_validate(struct nv50_context *nv50)
177{
178   struct nouveau_pushbuf *push = nv50->base.pushbuf;
179   struct nv50_program *fp = nv50->fragprog;
180   struct pipe_rasterizer_state *rast = &nv50->rast->pipe;
181
182   if (!fp || !rast)
183      return;
184
185   if (nv50->zsa && nv50->zsa->pipe.alpha_enabled) {
186      struct pipe_framebuffer_state *fb = &nv50->framebuffer;
187      bool blendable = fb->nr_cbufs == 0 || !fb->cbufs[0] ||
188         nv50->screen->base.base.is_format_supported(
189               &nv50->screen->base.base,
190               fb->cbufs[0]->format,
191               fb->cbufs[0]->texture->target,
192               fb->cbufs[0]->texture->nr_samples,
193               fb->cbufs[0]->texture->nr_storage_samples,
194               PIPE_BIND_BLENDABLE);
195      /* If we already have alphatest code, we have to keep updating
196       * it. However we only have to have different code if the current RT0 is
197       * non-blendable. Otherwise we just set it to always pass and use the
198       * hardware alpha test.
199       */
200      if (fp->fp.alphatest || !blendable) {
201         uint8_t alphatest = PIPE_FUNC_ALWAYS + 1;
202         if (!blendable)
203            alphatest = nv50->zsa->pipe.alpha_func + 1;
204         if (!fp->fp.alphatest)
205            nv50_program_destroy(nv50, fp);
206         else if (fp->mem && fp->fp.alphatest != alphatest)
207            nouveau_heap_free(&fp->mem);
208
209         fp->fp.alphatest = alphatest;
210      }
211   } else if (fp->fp.alphatest && fp->fp.alphatest != PIPE_FUNC_ALWAYS + 1) {
212      /* Alpha test is disabled but we have a shader where it's filled
213       * in. Make sure to reset the function to 'always', otherwise it'll end
214       * up discarding fragments incorrectly.
215       */
216      if (fp->mem)
217         nouveau_heap_free(&fp->mem);
218
219      fp->fp.alphatest = PIPE_FUNC_ALWAYS + 1;
220   }
221
222   if (fp->fp.force_persample_interp != rast->force_persample_interp) {
223      /* Force the program to be reuploaded, which will trigger interp fixups
224       * to get applied
225       */
226      if (fp->mem)
227         nouveau_heap_free(&fp->mem);
228
229      fp->fp.force_persample_interp = rast->force_persample_interp;
230   }
231
232   if (fp->mem && !(nv50->dirty_3d & (NV50_NEW_3D_FRAGPROG | NV50_NEW_3D_MIN_SAMPLES)))
233      return;
234
235   if (!nv50_program_validate(nv50, fp))
236      return;
237   nv50_program_update_context_state(nv50, fp, 1);
238
239   BEGIN_NV04(push, NV50_3D(FP_REG_ALLOC_TEMP), 1);
240   PUSH_DATA (push, fp->max_gpr);
241   BEGIN_NV04(push, NV50_3D(FP_RESULT_COUNT), 1);
242   PUSH_DATA (push, fp->max_out);
243   BEGIN_NV04(push, NV50_3D(FP_CONTROL), 1);
244   PUSH_DATA (push, fp->fp.flags[0]);
245   BEGIN_NV04(push, NV50_3D(FP_CTRL_UNK196C), 1);
246   PUSH_DATA (push, fp->fp.flags[1]);
247   BEGIN_NV04(push, NV50_3D(FP_START_ID), 1);
248   PUSH_DATA (push, fp->code_base);
249
250   if (nv50->screen->tesla->oclass >= NVA3_3D_CLASS) {
251      BEGIN_NV04(push, SUBC_3D(NVA3_3D_FP_MULTISAMPLE), 1);
252      if (nv50->min_samples > 1 || fp->fp.has_samplemask)
253         PUSH_DATA(push,
254                   NVA3_3D_FP_MULTISAMPLE_FORCE_PER_SAMPLE |
255                   (NVA3_3D_FP_MULTISAMPLE_EXPORT_SAMPLE_MASK *
256                    fp->fp.has_samplemask));
257      else
258         PUSH_DATA(push, 0);
259   }
260}
261
262void
263nv50_gmtyprog_validate(struct nv50_context *nv50)
264{
265   struct nouveau_pushbuf *push = nv50->base.pushbuf;
266   struct nv50_program *gp = nv50->gmtyprog;
267
268   if (gp) {
269      if (!nv50_program_validate(nv50, gp))
270         return;
271      BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_TEMP), 1);
272      PUSH_DATA (push, gp->max_gpr);
273      BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_RESULT), 1);
274      PUSH_DATA (push, gp->max_out);
275      BEGIN_NV04(push, NV50_3D(GP_OUTPUT_PRIMITIVE_TYPE), 1);
276      PUSH_DATA (push, gp->gp.prim_type);
277      BEGIN_NV04(push, NV50_3D(GP_VERTEX_OUTPUT_COUNT), 1);
278      PUSH_DATA (push, gp->gp.vert_count);
279      BEGIN_NV04(push, NV50_3D(GP_START_ID), 1);
280      PUSH_DATA (push, gp->code_base);
281
282      nv50->state.prim_size = gp->gp.prim_type; /* enum matches vertex count */
283   }
284   nv50_program_update_context_state(nv50, gp, 2);
285
286   /* GP_ENABLE is updated in linkage validation */
287}
288
289void
290nv50_compprog_validate(struct nv50_context *nv50)
291{
292   struct nouveau_pushbuf *push = nv50->base.pushbuf;
293   struct nv50_program *cp = nv50->compprog;
294
295   if (cp && !nv50_program_validate(nv50, cp))
296      return;
297
298   BEGIN_NV04(push, NV50_CP(CODE_CB_FLUSH), 1);
299   PUSH_DATA (push, 0);
300}
301
302static void
303nv50_sprite_coords_validate(struct nv50_context *nv50)
304{
305   struct nouveau_pushbuf *push = nv50->base.pushbuf;
306   uint32_t pntc[8], mode;
307   struct nv50_program *fp = nv50->fragprog;
308   unsigned i, c;
309   unsigned m = (nv50->state.interpolant_ctrl >> 8) & 0xff;
310
311   if (!nv50->rast->pipe.point_quad_rasterization) {
312      if (nv50->state.point_sprite) {
313         BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
314         for (i = 0; i < 8; ++i)
315            PUSH_DATA(push, 0);
316
317         nv50->state.point_sprite = false;
318      }
319      return;
320   } else {
321      nv50->state.point_sprite = true;
322   }
323
324   memset(pntc, 0, sizeof(pntc));
325
326   for (i = 0; i < fp->in_nr; i++) {
327      unsigned n = util_bitcount(fp->in[i].mask);
328
329      if (fp->in[i].sn != TGSI_SEMANTIC_GENERIC) {
330         m += n;
331         continue;
332      }
333      if (!(nv50->rast->pipe.sprite_coord_enable & (1 << fp->in[i].si))) {
334         m += n;
335         continue;
336      }
337
338      for (c = 0; c < 4; ++c) {
339         if (fp->in[i].mask & (1 << c)) {
340            pntc[m / 8] |= (c + 1) << ((m % 8) * 4);
341            ++m;
342         }
343      }
344   }
345
346   if (nv50->rast->pipe.sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT)
347      mode = 0x00;
348   else
349      mode = 0x10;
350
351   BEGIN_NV04(push, NV50_3D(POINT_SPRITE_CTRL), 1);
352   PUSH_DATA (push, mode);
353
354   BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
355   PUSH_DATAp(push, pntc, 8);
356}
357
358/* Validate state derived from shaders and the rasterizer cso. */
359void
360nv50_validate_derived_rs(struct nv50_context *nv50)
361{
362   struct nouveau_pushbuf *push = nv50->base.pushbuf;
363   uint32_t color, psize;
364
365   nv50_sprite_coords_validate(nv50);
366
367   if (nv50->state.rasterizer_discard != nv50->rast->pipe.rasterizer_discard) {
368      nv50->state.rasterizer_discard = nv50->rast->pipe.rasterizer_discard;
369      BEGIN_NV04(push, NV50_3D(RASTERIZE_ENABLE), 1);
370      PUSH_DATA (push, !nv50->rast->pipe.rasterizer_discard);
371   }
372
373   if (nv50->dirty_3d & NV50_NEW_3D_FRAGPROG)
374      return;
375   psize = nv50->state.semantic_psize & ~NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
376   color = nv50->state.semantic_color & ~NV50_3D_SEMANTIC_COLOR_CLMP_EN;
377
378   if (nv50->rast->pipe.clamp_vertex_color)
379      color |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
380
381   if (color != nv50->state.semantic_color) {
382      nv50->state.semantic_color = color;
383      BEGIN_NV04(push, NV50_3D(SEMANTIC_COLOR), 1);
384      PUSH_DATA (push, color);
385   }
386
387   if (nv50->rast->pipe.point_size_per_vertex)
388      psize |= NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
389
390   if (psize != nv50->state.semantic_psize) {
391      nv50->state.semantic_psize = psize;
392      BEGIN_NV04(push, NV50_3D(SEMANTIC_PTSZ), 1);
393      PUSH_DATA (push, psize);
394   }
395}
396
397static int
398nv50_vec4_map(uint8_t *map, int mid, uint32_t lin[4],
399              struct nv50_varying *in, struct nv50_varying *out)
400{
401   int c;
402   uint8_t mv = out->mask, mf = in->mask, oid = out->hw;
403
404   for (c = 0; c < 4; ++c) {
405      if (mf & 1) {
406         if (in->linear)
407            lin[mid / 32] |= 1 << (mid % 32);
408         if (mv & 1)
409            map[mid] = oid;
410         else
411         if (c == 3)
412            map[mid] |= 1;
413         ++mid;
414      }
415
416      oid += mv & 1;
417      mf >>= 1;
418      mv >>= 1;
419   }
420
421   return mid;
422}
423
424void
425nv50_fp_linkage_validate(struct nv50_context *nv50)
426{
427   struct nouveau_pushbuf *push = nv50->base.pushbuf;
428   struct nv50_program *vp = nv50->gmtyprog ? nv50->gmtyprog : nv50->vertprog;
429   struct nv50_program *fp = nv50->fragprog;
430   struct nv50_varying dummy;
431   int i, n, c, m;
432   uint32_t primid = 0;
433   uint32_t layerid = 0;
434   uint32_t viewportid = 0;
435   uint32_t psiz = 0x000;
436   uint32_t interp = fp->fp.interp;
437   uint32_t colors = fp->fp.colors;
438   uint32_t clpd_nr = util_last_bit(vp->vp.clip_enable | vp->vp.cull_enable);
439   uint32_t lin[4];
440   uint8_t map[64];
441   uint8_t so_map[64];
442
443   if (!(nv50->dirty_3d & (NV50_NEW_3D_VERTPROG |
444                           NV50_NEW_3D_FRAGPROG |
445                           NV50_NEW_3D_GMTYPROG))) {
446      uint8_t bfc, ffc;
447      ffc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_FFC0_ID__MASK);
448      bfc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_BFC0_ID__MASK)
449         >> 8;
450      if (nv50->rast->pipe.light_twoside == ((ffc == bfc) ? 0 : 1))
451         return;
452   }
453
454   memset(lin, 0x00, sizeof(lin));
455
456   /* XXX: in buggy-endian mode, is the first element of map (u32)0x000000xx
457    *  or is it the first byte ?
458    */
459   memset(map, nv50->gmtyprog ? 0x80 : 0x40, sizeof(map));
460
461   dummy.mask = 0xf; /* map all components of HPOS */
462   dummy.linear = 0;
463   m = nv50_vec4_map(map, 0, lin, &dummy, &vp->out[0]);
464
465   for (c = 0; c < clpd_nr; ++c)
466      map[m++] = vp->vp.clpd[c / 4] + (c % 4);
467
468   colors |= m << 8; /* adjust BFC0 id */
469
470   dummy.mask = 0x0;
471
472   /* if light_twoside is active, FFC0_ID == BFC0_ID is invalid */
473   if (nv50->rast->pipe.light_twoside) {
474      for (i = 0; i < 2; ++i) {
475         n = vp->vp.bfc[i];
476         if (fp->vp.bfc[i] >= fp->in_nr)
477            continue;
478         m = nv50_vec4_map(map, m, lin, &fp->in[fp->vp.bfc[i]],
479                           (n < vp->out_nr) ? &vp->out[n] : &dummy);
480      }
481   }
482   colors += m - 4; /* adjust FFC0 id */
483   interp |= m << 8; /* set map id where 'normal' FP inputs start */
484
485   for (i = 0; i < fp->in_nr; ++i) {
486      for (n = 0; n < vp->out_nr; ++n)
487         if (vp->out[n].sn == fp->in[i].sn &&
488             vp->out[n].si == fp->in[i].si)
489            break;
490      switch (fp->in[i].sn) {
491      case TGSI_SEMANTIC_PRIMID:
492         primid = m;
493         break;
494      case TGSI_SEMANTIC_LAYER:
495         layerid = m;
496         break;
497      case TGSI_SEMANTIC_VIEWPORT_INDEX:
498         viewportid = m;
499         break;
500      }
501      m = nv50_vec4_map(map, m, lin,
502                        &fp->in[i], (n < vp->out_nr) ? &vp->out[n] : &dummy);
503   }
504
505   if (vp->gp.has_layer && !layerid) {
506      layerid = m;
507      map[m++] = vp->gp.layerid;
508   }
509
510   if (vp->gp.has_viewport && !viewportid) {
511      viewportid = m;
512      map[m++] = vp->gp.viewportid;
513   }
514
515   if (nv50->rast->pipe.point_size_per_vertex) {
516      psiz = (m << 4) | 1;
517      map[m++] = vp->vp.psiz;
518   }
519
520   if (nv50->rast->pipe.clamp_vertex_color)
521      colors |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
522
523   if (unlikely(vp->so)) {
524      /* Slot i in STRMOUT_MAP specifies the offset where slot i in RESULT_MAP
525       * gets written.
526       *
527       * TODO:
528       * Inverting vp->so->map (output -> offset) would probably speed this up.
529       */
530      memset(so_map, 0, sizeof(so_map));
531      for (i = 0; i < vp->so->map_size; ++i) {
532         if (vp->so->map[i] == 0xff)
533            continue;
534         for (c = 0; c < m; ++c)
535            if (map[c] == vp->so->map[i] && !so_map[c])
536               break;
537         if (c == m) {
538            c = m;
539            map[m++] = vp->so->map[i];
540         }
541         so_map[c] = 0x80 | i;
542      }
543      for (c = m; c & 3; ++c)
544         so_map[c] = 0;
545   }
546
547   n = (m + 3) / 4;
548   assert(m <= 64);
549
550   if (unlikely(nv50->gmtyprog)) {
551      BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP_SIZE), 1);
552      PUSH_DATA (push, m);
553      BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP(0)), n);
554      PUSH_DATAp(push, map, n);
555   } else {
556      BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
557      PUSH_DATA (push, vp->vp.attrs[2] | fp->vp.attrs[2]);
558
559      BEGIN_NV04(push, NV50_3D(SEMANTIC_PRIM_ID), 1);
560      PUSH_DATA (push, primid);
561
562      assert(m > 0);
563      BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
564      PUSH_DATA (push, m);
565      BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
566      PUSH_DATAp(push, map, n);
567   }
568
569   BEGIN_NV04(push, NV50_3D(GP_VIEWPORT_ID_ENABLE), 5);
570   PUSH_DATA (push, vp->gp.has_viewport);
571   PUSH_DATA (push, colors);
572   PUSH_DATA (push, (clpd_nr << 8) | 4);
573   PUSH_DATA (push, layerid);
574   PUSH_DATA (push, psiz);
575
576   BEGIN_NV04(push, NV50_3D(SEMANTIC_VIEWPORT), 1);
577   PUSH_DATA (push, viewportid);
578
579   BEGIN_NV04(push, NV50_3D(LAYER), 1);
580   PUSH_DATA (push, vp->gp.has_layer << 16);
581
582   BEGIN_NV04(push, NV50_3D(FP_INTERPOLANT_CTRL), 1);
583   PUSH_DATA (push, interp);
584
585   nv50->state.interpolant_ctrl = interp;
586
587   nv50->state.semantic_color = colors;
588   nv50->state.semantic_psize = psiz;
589
590   BEGIN_NV04(push, NV50_3D(NOPERSPECTIVE_BITMAP(0)), 4);
591   PUSH_DATAp(push, lin, 4);
592
593   BEGIN_NV04(push, NV50_3D(GP_ENABLE), 1);
594   PUSH_DATA (push, nv50->gmtyprog ? 1 : 0);
595
596   if (vp->so) {
597      BEGIN_NV04(push, NV50_3D(STRMOUT_MAP(0)), n);
598      PUSH_DATAp(push, so_map, n);
599   }
600}
601
602static int
603nv50_vp_gp_mapping(uint8_t *map, int m,
604                   struct nv50_program *vp, struct nv50_program *gp)
605{
606   int i, j, c;
607
608   for (i = 0; i < gp->in_nr; ++i) {
609      uint8_t oid = 0, mv = 0, mg = gp->in[i].mask;
610
611      for (j = 0; j < vp->out_nr; ++j) {
612         if (vp->out[j].sn == gp->in[i].sn &&
613             vp->out[j].si == gp->in[i].si) {
614            mv = vp->out[j].mask;
615            oid = vp->out[j].hw;
616            break;
617         }
618      }
619
620      for (c = 0; c < 4; ++c, mv >>= 1, mg >>= 1) {
621         if (mg & mv & 1)
622            map[m++] = oid;
623         else
624         if (mg & 1)
625            map[m++] = (c == 3) ? 0x41 : 0x40;
626         oid += mv & 1;
627      }
628   }
629   if (!m)
630      map[m++] = 0;
631   return m;
632}
633
634void
635nv50_gp_linkage_validate(struct nv50_context *nv50)
636{
637   struct nouveau_pushbuf *push = nv50->base.pushbuf;
638   struct nv50_program *vp = nv50->vertprog;
639   struct nv50_program *gp = nv50->gmtyprog;
640   int m = 0;
641   int n;
642   uint8_t map[64];
643
644   if (!gp)
645      return;
646   memset(map, 0, sizeof(map));
647
648   m = nv50_vp_gp_mapping(map, m, vp, gp);
649
650   n = (m + 3) / 4;
651
652   BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
653   PUSH_DATA (push, vp->vp.attrs[2] | gp->vp.attrs[2]);
654
655   assert(m > 0);
656   BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
657   PUSH_DATA (push, m);
658   BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
659   PUSH_DATAp(push, map, n);
660}
661
662void
663nv50_stream_output_validate(struct nv50_context *nv50)
664{
665   struct nouveau_pushbuf *push = nv50->base.pushbuf;
666   struct nv50_stream_output_state *so;
667   uint32_t ctrl;
668   unsigned i;
669   unsigned prims = ~0;
670
671   so = nv50->gmtyprog ? nv50->gmtyprog->so : nv50->vertprog->so;
672
673   BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
674   PUSH_DATA (push, 0);
675   if (!so || !nv50->num_so_targets) {
676      if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
677         BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
678         PUSH_DATA (push, 0);
679      }
680      BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
681      PUSH_DATA (push, 1);
682      return;
683   }
684
685   /* previous TFB needs to complete */
686   if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
687      BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
688      PUSH_DATA (push, 0);
689   }
690
691   ctrl = so->ctrl;
692   if (nv50->screen->base.class_3d >= NVA0_3D_CLASS)
693      ctrl |= NVA0_3D_STRMOUT_BUFFERS_CTRL_LIMIT_MODE_OFFSET;
694
695   BEGIN_NV04(push, NV50_3D(STRMOUT_BUFFERS_CTRL), 1);
696   PUSH_DATA (push, ctrl);
697
698   for (i = 0; i < nv50->num_so_targets; ++i) {
699      struct nv50_so_target *targ = nv50_so_target(nv50->so_target[i]);
700      struct nv04_resource *buf = nv04_resource(targ->pipe.buffer);
701
702      const unsigned n = nv50->screen->base.class_3d >= NVA0_3D_CLASS ? 4 : 3;
703
704      uint32_t so_used = 0;
705
706      if (!targ->clean) {
707         if (n == 4)
708            nv84_hw_query_fifo_wait(push, nv50_query(targ->pq));
709         else
710            so_used = nv50->so_used[i];
711      }
712      BEGIN_NV04(push, NV50_3D(STRMOUT_ADDRESS_HIGH(i)), n);
713      PUSH_DATAh(push, buf->address + targ->pipe.buffer_offset + so_used);
714      PUSH_DATA (push, buf->address + targ->pipe.buffer_offset + so_used);
715      PUSH_DATA (push, so->num_attribs[i]);
716      if (n == 4) {
717         PUSH_DATA(push, targ->pipe.buffer_size);
718         if (!targ->clean) {
719            assert(targ->pq);
720            nv50_hw_query_pushbuf_submit(push, NVA0_3D_STRMOUT_OFFSET(i),
721                                         nv50_query(targ->pq), 0x4);
722         } else {
723            BEGIN_NV04(push, NVA0_3D(STRMOUT_OFFSET(i)), 1);
724            PUSH_DATA(push, 0);
725            targ->clean = false;
726         }
727      } else {
728         const unsigned limit = (targ->pipe.buffer_size - so_used) /
729            (so->stride[i] * nv50->state.prim_size);
730         prims = MIN2(prims, limit);
731         targ->clean = false;
732      }
733      targ->stride = so->stride[i];
734      BCTX_REFN(nv50->bufctx_3d, 3D_SO, buf, WR);
735   }
736   if (prims != ~0) {
737      BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
738      PUSH_DATA (push, prims);
739   }
740   BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
741   PUSH_DATA (push, 1);
742   BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
743   PUSH_DATA (push, 1);
744}
745