1/*
2 Copyright (C) Intel Corp.  2006.  All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28  * Authors:
29  *   Keith Whitwell <keithw@vmware.com>
30  */
31
32
33
34#include "intel_batchbuffer.h"
35#include "intel_fbo.h"
36#include "intel_mipmap_tree.h"
37
38#include "brw_context.h"
39#include "brw_state.h"
40#include "brw_defines.h"
41#include "compiler/brw_eu_defines.h"
42
43#include "main/framebuffer.h"
44#include "main/fbobject.h"
45#include "main/format_utils.h"
46#include "main/glformats.h"
47
48/**
49 * Upload pointers to the per-stage state.
50 *
51 * The state pointers in this packet are all relative to the general state
52 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
53 */
54static void
55upload_pipelined_state_pointers(struct brw_context *brw)
56{
57   const struct gen_device_info *devinfo = &brw->screen->devinfo;
58
59   if (devinfo->gen == 5) {
60      /* Need to flush before changing clip max threads for errata. */
61      BEGIN_BATCH(1);
62      OUT_BATCH(MI_FLUSH);
63      ADVANCE_BATCH();
64   }
65
66   BEGIN_BATCH(7);
67   OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
68   OUT_RELOC(brw->batch.state.bo, 0, brw->vs.base.state_offset);
69   if (brw->ff_gs.prog_active)
70      OUT_RELOC(brw->batch.state.bo, 0, brw->ff_gs.state_offset | 1);
71   else
72      OUT_BATCH(0);
73   OUT_RELOC(brw->batch.state.bo, 0, brw->clip.state_offset | 1);
74   OUT_RELOC(brw->batch.state.bo, 0, brw->sf.state_offset);
75   OUT_RELOC(brw->batch.state.bo, 0, brw->wm.base.state_offset);
76   OUT_RELOC(brw->batch.state.bo, 0, brw->cc.state_offset);
77   ADVANCE_BATCH();
78
79   brw->ctx.NewDriverState |= BRW_NEW_PSP;
80}
81
82static void
83upload_psp_urb_cbs(struct brw_context *brw)
84{
85   upload_pipelined_state_pointers(brw);
86   brw_upload_urb_fence(brw);
87   brw_upload_cs_urb_state(brw);
88}
89
90const struct brw_tracked_state brw_psp_urb_cbs = {
91   .dirty = {
92      .mesa = 0,
93      .brw = BRW_NEW_BATCH |
94             BRW_NEW_BLORP |
95             BRW_NEW_FF_GS_PROG_DATA |
96             BRW_NEW_GEN4_UNIT_STATE |
97             BRW_NEW_STATE_BASE_ADDRESS |
98             BRW_NEW_URB_FENCE,
99   },
100   .emit = upload_psp_urb_cbs,
101};
102
103uint32_t
104brw_depthbuffer_format(struct brw_context *brw)
105{
106   struct gl_context *ctx = &brw->ctx;
107   struct gl_framebuffer *fb = ctx->DrawBuffer;
108   struct intel_renderbuffer *drb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
109   struct intel_renderbuffer *srb;
110
111   if (!drb &&
112       (srb = intel_get_renderbuffer(fb, BUFFER_STENCIL)) &&
113       !srb->mt->stencil_mt &&
114       (intel_rb_format(srb) == MESA_FORMAT_Z24_UNORM_S8_UINT ||
115	intel_rb_format(srb) == MESA_FORMAT_Z32_FLOAT_S8X24_UINT)) {
116      drb = srb;
117   }
118
119   if (!drb)
120      return BRW_DEPTHFORMAT_D32_FLOAT;
121
122   return brw_depth_format(brw, drb->mt->format);
123}
124
125static struct intel_mipmap_tree *
126get_stencil_miptree(struct intel_renderbuffer *irb)
127{
128   if (!irb)
129      return NULL;
130   if (irb->mt->stencil_mt)
131      return irb->mt->stencil_mt;
132   return intel_renderbuffer_get_mt(irb);
133}
134
135static bool
136rebase_depth_stencil(struct brw_context *brw, struct intel_renderbuffer *irb,
137                     bool invalidate)
138{
139   const struct gen_device_info *devinfo = &brw->screen->devinfo;
140   struct gl_context *ctx = &brw->ctx;
141   uint32_t tile_mask_x = 0, tile_mask_y = 0;
142
143   intel_get_tile_masks(irb->mt->surf.tiling, irb->mt->cpp,
144                        &tile_mask_x, &tile_mask_y);
145   assert(!intel_miptree_level_has_hiz(irb->mt, irb->mt_level));
146
147   uint32_t tile_x = irb->draw_x & tile_mask_x;
148   uint32_t tile_y = irb->draw_y & tile_mask_y;
149
150   /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
151    * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
152    * Coordinate Offset X/Y":
153    *
154    *   "The 3 LSBs of both offsets must be zero to ensure correct
155    *   alignment"
156    */
157   bool rebase = tile_x & 7 || tile_y & 7;
158
159   /* We didn't even have intra-tile offsets before g45. */
160   rebase |= (!devinfo->has_surface_tile_offset && (tile_x || tile_y));
161
162   if (rebase) {
163      perf_debug("HW workaround: blitting depth level %d to a temporary "
164                 "to fix alignment (depth tile offset %d,%d)\n",
165                 irb->mt_level, tile_x, tile_y);
166      intel_renderbuffer_move_to_temp(brw, irb, invalidate);
167
168      /* There is now only single slice miptree. */
169      brw->depthstencil.tile_x = 0;
170      brw->depthstencil.tile_y = 0;
171      brw->depthstencil.depth_offset = 0;
172      return true;
173   }
174
175   /* While we just tried to get everything aligned, we may have failed to do
176    * so in the case of rendering to array or 3D textures, where nonzero faces
177    * will still have an offset post-rebase.  At least give an informative
178    * warning.
179    */
180   WARN_ONCE((tile_x & 7) || (tile_y & 7),
181             "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n"
182             "Truncating offset (%u:%u), bad rendering may occur.\n",
183             tile_x, tile_y);
184   tile_x &= ~7;
185   tile_y &= ~7;
186
187   brw->depthstencil.tile_x = tile_x;
188   brw->depthstencil.tile_y = tile_y;
189   brw->depthstencil.depth_offset = intel_miptree_get_aligned_offset(
190                                       irb->mt,
191                                       irb->draw_x & ~tile_mask_x,
192                                       irb->draw_y & ~tile_mask_y);
193
194   return false;
195}
196
197void
198brw_workaround_depthstencil_alignment(struct brw_context *brw,
199                                      GLbitfield clear_mask)
200{
201   const struct gen_device_info *devinfo = &brw->screen->devinfo;
202   struct gl_context *ctx = &brw->ctx;
203   struct gl_framebuffer *fb = ctx->DrawBuffer;
204   struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
205   struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
206   struct intel_mipmap_tree *depth_mt = NULL;
207   bool invalidate_depth = clear_mask & BUFFER_BIT_DEPTH;
208   bool invalidate_stencil = clear_mask & BUFFER_BIT_STENCIL;
209
210   if (depth_irb)
211      depth_mt = depth_irb->mt;
212
213   /* Initialize brw->depthstencil to 'nop' workaround state.
214    */
215   brw->depthstencil.tile_x = 0;
216   brw->depthstencil.tile_y = 0;
217   brw->depthstencil.depth_offset = 0;
218
219   /* Gen6+ doesn't require the workarounds, since we always program the
220    * surface state at the start of the whole surface.
221    */
222   if (devinfo->gen >= 6)
223      return;
224
225   /* Check if depth buffer is in depth/stencil format.  If so, then it's only
226    * safe to invalidate it if we're also clearing stencil.
227    */
228   if (depth_irb && invalidate_depth &&
229      _mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL)
230      invalidate_depth = invalidate_stencil && stencil_irb;
231
232   if (depth_irb) {
233      if (rebase_depth_stencil(brw, depth_irb, invalidate_depth)) {
234         /* In the case of stencil_irb being the same packed depth/stencil
235          * texture but not the same rb, make it point at our rebased mt, too.
236          */
237         if (stencil_irb &&
238             stencil_irb != depth_irb &&
239             stencil_irb->mt == depth_mt) {
240            intel_miptree_reference(&stencil_irb->mt, depth_irb->mt);
241            intel_renderbuffer_set_draw_offset(stencil_irb);
242         }
243      }
244
245      if (stencil_irb) {
246         assert(stencil_irb->mt == depth_irb->mt);
247         assert(stencil_irb->mt_level == depth_irb->mt_level);
248         assert(stencil_irb->mt_layer == depth_irb->mt_layer);
249      }
250   }
251
252   /* If there is no depth attachment, consider if stencil needs rebase. */
253   if (!depth_irb && stencil_irb)
254       rebase_depth_stencil(brw, stencil_irb, invalidate_stencil);
255}
256
257static void
258brw_emit_depth_stencil_hiz(struct brw_context *brw,
259                           struct intel_renderbuffer *depth_irb,
260                           struct intel_mipmap_tree *depth_mt,
261                           struct intel_renderbuffer *stencil_irb,
262                           struct intel_mipmap_tree *stencil_mt)
263{
264   uint32_t tile_x = brw->depthstencil.tile_x;
265   uint32_t tile_y = brw->depthstencil.tile_y;
266   uint32_t depth_surface_type = BRW_SURFACE_NULL;
267   uint32_t depthbuffer_format = BRW_DEPTHFORMAT_D32_FLOAT;
268   uint32_t depth_offset = 0;
269   uint32_t width = 1, height = 1;
270   bool tiled_surface = true;
271
272   /* If there's a packed depth/stencil bound to stencil only, we need to
273    * emit the packed depth/stencil buffer packet.
274    */
275   if (!depth_irb && stencil_irb) {
276      depth_irb = stencil_irb;
277      depth_mt = stencil_mt;
278   }
279
280   if (depth_irb && depth_mt) {
281      depthbuffer_format = brw_depthbuffer_format(brw);
282      depth_surface_type = BRW_SURFACE_2D;
283      depth_offset = brw->depthstencil.depth_offset;
284      width = depth_irb->Base.Base.Width;
285      height = depth_irb->Base.Base.Height;
286      tiled_surface = depth_mt->surf.tiling != ISL_TILING_LINEAR;
287   }
288
289   const struct gen_device_info *devinfo = &brw->screen->devinfo;
290   const unsigned len = (devinfo->is_g4x || devinfo->gen == 5) ? 6 : 5;
291
292   BEGIN_BATCH(len);
293   OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
294   OUT_BATCH((depth_mt ? depth_mt->surf.row_pitch_B - 1 : 0) |
295             (depthbuffer_format << 18) |
296             (BRW_TILEWALK_YMAJOR << 26) |
297             (tiled_surface << 27) |
298             (depth_surface_type << 29));
299
300   if (depth_mt) {
301      OUT_RELOC(depth_mt->bo, RELOC_WRITE, depth_offset);
302   } else {
303      OUT_BATCH(0);
304   }
305
306   OUT_BATCH(((width + tile_x - 1) << 6) |
307             ((height + tile_y - 1) << 19));
308   OUT_BATCH(0);
309
310   if (devinfo->is_g4x || devinfo->gen >= 5)
311      OUT_BATCH(tile_x | (tile_y << 16));
312   else
313      assert(tile_x == 0 && tile_y == 0);
314
315   if (devinfo->gen >= 6)
316      OUT_BATCH(0);
317
318   ADVANCE_BATCH();
319}
320
321void
322brw_emit_depthbuffer(struct brw_context *brw)
323{
324   const struct gen_device_info *devinfo = &brw->screen->devinfo;
325   struct gl_context *ctx = &brw->ctx;
326   struct gl_framebuffer *fb = ctx->DrawBuffer;
327   /* _NEW_BUFFERS */
328   struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
329   struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
330   struct intel_mipmap_tree *depth_mt = intel_renderbuffer_get_mt(depth_irb);
331   struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
332
333   if (depth_mt)
334      brw_cache_flush_for_depth(brw, depth_mt->bo);
335   if (stencil_mt)
336      brw_cache_flush_for_depth(brw, stencil_mt->bo);
337
338   if (devinfo->gen < 6) {
339      brw_emit_depth_stencil_hiz(brw, depth_irb, depth_mt,
340                                 stencil_irb, stencil_mt);
341      return;
342   }
343
344   /* Skip repeated NULL depth/stencil emits (think 2D rendering). */
345   if (!depth_mt && !stencil_mt && brw->no_depth_or_stencil) {
346      assert(brw->hw_ctx);
347      return;
348   }
349
350   brw_emit_depth_stall_flushes(brw);
351
352   const unsigned ds_dwords = brw->isl_dev.ds.size / 4;
353   intel_batchbuffer_begin(brw, ds_dwords);
354   uint32_t *ds_map = brw->batch.map_next;
355   const uint32_t ds_offset = (char *)ds_map - (char *)brw->batch.batch.map;
356
357   struct isl_view view = {
358      /* Some nice defaults */
359      .base_level = 0,
360      .levels = 1,
361      .base_array_layer = 0,
362      .array_len = 1,
363      .swizzle = ISL_SWIZZLE_IDENTITY,
364   };
365
366   struct isl_depth_stencil_hiz_emit_info info = {
367      .view = &view,
368   };
369
370   if (depth_mt) {
371      view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
372      info.depth_surf = &depth_mt->surf;
373
374      info.depth_address =
375         brw_batch_reloc(&brw->batch,
376                         ds_offset + brw->isl_dev.ds.depth_offset,
377                         depth_mt->bo, depth_mt->offset, RELOC_WRITE);
378
379      info.mocs = brw_get_bo_mocs(devinfo, depth_mt->bo);
380      view.base_level = depth_irb->mt_level - depth_irb->mt->first_level;
381      view.base_array_layer = depth_irb->mt_layer;
382      view.array_len = MAX2(depth_irb->layer_count, 1);
383      view.format = depth_mt->surf.format;
384
385      info.hiz_usage = depth_mt->aux_usage;
386      if (!intel_renderbuffer_has_hiz(depth_irb)) {
387         /* Just because a miptree has ISL_AUX_USAGE_HIZ does not mean that
388          * all miplevels of that miptree are guaranteed to support HiZ.  See
389          * intel_miptree_level_enable_hiz for details.
390          */
391         info.hiz_usage = ISL_AUX_USAGE_NONE;
392      }
393
394      if (info.hiz_usage == ISL_AUX_USAGE_HIZ) {
395         info.hiz_surf = &depth_mt->aux_buf->surf;
396
397         uint32_t hiz_offset = 0;
398         if (devinfo->gen == 6) {
399            /* HiZ surfaces on Sandy Bridge technically don't support
400             * mip-mapping.  However, we can fake it by offsetting to the
401             * first slice of LOD0 in the HiZ surface.
402             */
403            isl_surf_get_image_offset_B_tile_sa(&depth_mt->aux_buf->surf,
404                                                view.base_level, 0, 0,
405                                                &hiz_offset, NULL, NULL);
406         }
407
408         info.hiz_address =
409            brw_batch_reloc(&brw->batch,
410                            ds_offset + brw->isl_dev.ds.hiz_offset,
411                            depth_mt->aux_buf->bo,
412                            depth_mt->aux_buf->offset + hiz_offset,
413                            RELOC_WRITE);
414      }
415
416      info.depth_clear_value = depth_mt->fast_clear_color.f32[0];
417   }
418
419   if (stencil_mt) {
420      view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
421      info.stencil_surf = &stencil_mt->surf;
422
423      if (!depth_mt) {
424         info.mocs = brw_get_bo_mocs(devinfo, stencil_mt->bo);
425         view.base_level = stencil_irb->mt_level - stencil_irb->mt->first_level;
426         view.base_array_layer = stencil_irb->mt_layer;
427         view.array_len = MAX2(stencil_irb->layer_count, 1);
428         view.format = stencil_mt->surf.format;
429      }
430
431      uint32_t stencil_offset = 0;
432      if (devinfo->gen == 6) {
433         /* Stencil surfaces on Sandy Bridge technically don't support
434          * mip-mapping.  However, we can fake it by offsetting to the
435          * first slice of LOD0 in the stencil surface.
436          */
437         isl_surf_get_image_offset_B_tile_sa(&stencil_mt->surf,
438                                             view.base_level, 0, 0,
439                                             &stencil_offset, NULL, NULL);
440      }
441
442      info.stencil_address =
443         brw_batch_reloc(&brw->batch,
444                         ds_offset + brw->isl_dev.ds.stencil_offset,
445                         stencil_mt->bo,
446                         stencil_mt->offset + stencil_offset,
447                         RELOC_WRITE);
448   }
449
450   isl_emit_depth_stencil_hiz_s(&brw->isl_dev, ds_map, &info);
451
452   brw->batch.map_next += ds_dwords;
453   intel_batchbuffer_advance(brw);
454
455   brw->no_depth_or_stencil = !depth_mt && !stencil_mt;
456}
457
458const struct brw_tracked_state brw_depthbuffer = {
459   .dirty = {
460      .mesa = _NEW_BUFFERS,
461      .brw = BRW_NEW_AUX_STATE |
462             BRW_NEW_BATCH |
463             BRW_NEW_BLORP,
464   },
465   .emit = brw_emit_depthbuffer,
466};
467
468void
469brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
470{
471   const struct gen_device_info *devinfo = &brw->screen->devinfo;
472   const bool is_965 = devinfo->gen == 4 && !devinfo->is_g4x;
473   const uint32_t _3DSTATE_PIPELINE_SELECT =
474      is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
475
476   if (devinfo->gen >= 8 && devinfo->gen < 10) {
477      /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
478       *
479       *   Software must clear the COLOR_CALC_STATE Valid field in
480       *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
481       *   with Pipeline Select set to GPGPU.
482       *
483       * The internal hardware docs recommend the same workaround for Gen9
484       * hardware too.
485       */
486      if (pipeline == BRW_COMPUTE_PIPELINE) {
487         BEGIN_BATCH(2);
488         OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
489         OUT_BATCH(0);
490         ADVANCE_BATCH();
491
492         brw->ctx.NewDriverState |= BRW_NEW_CC_STATE;
493      }
494   }
495
496   if (devinfo->gen == 9 && pipeline == BRW_RENDER_PIPELINE) {
497      /* We seem to have issues with geometry flickering when 3D and compute
498       * are combined in the same batch and this appears to fix it.
499       */
500      const uint32_t subslices = MAX2(brw->screen->subslice_total, 1);
501      const uint32_t maxNumberofThreads =
502         devinfo->max_cs_threads * subslices - 1;
503
504      BEGIN_BATCH(9);
505      OUT_BATCH(MEDIA_VFE_STATE << 16 | (9 - 2));
506      OUT_BATCH(0);
507      OUT_BATCH(0);
508      OUT_BATCH(2 << 8 | maxNumberofThreads << 16);
509      OUT_BATCH(0);
510      OUT_BATCH(2 << 16);
511      OUT_BATCH(0);
512      OUT_BATCH(0);
513      OUT_BATCH(0);
514      ADVANCE_BATCH();
515   }
516
517   if (devinfo->gen >= 6) {
518      /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
519       * PIPELINE_SELECT [DevBWR+]":
520       *
521       *   Project: DEVSNB+
522       *
523       *   Software must ensure all the write caches are flushed through a
524       *   stalling PIPE_CONTROL command followed by another PIPE_CONTROL
525       *   command to invalidate read only caches prior to programming
526       *   MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
527       */
528      const unsigned dc_flush =
529         devinfo->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
530
531      brw_emit_pipe_control_flush(brw,
532                                  PIPE_CONTROL_RENDER_TARGET_FLUSH |
533                                  PIPE_CONTROL_DEPTH_CACHE_FLUSH |
534                                  dc_flush |
535                                  PIPE_CONTROL_CS_STALL);
536
537      brw_emit_pipe_control_flush(brw,
538                                  PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
539                                  PIPE_CONTROL_CONST_CACHE_INVALIDATE |
540                                  PIPE_CONTROL_STATE_CACHE_INVALIDATE |
541                                  PIPE_CONTROL_INSTRUCTION_INVALIDATE);
542
543   } else {
544      /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
545       * PIPELINE_SELECT [DevBWR+]":
546       *
547       *   Project: PRE-DEVSNB
548       *
549       *   Software must ensure the current pipeline is flushed via an
550       *   MI_FLUSH or PIPE_CONTROL prior to the execution of PIPELINE_SELECT.
551       */
552      BEGIN_BATCH(1);
553      OUT_BATCH(MI_FLUSH);
554      ADVANCE_BATCH();
555   }
556
557   /* Select the pipeline */
558   BEGIN_BATCH(1);
559   OUT_BATCH(_3DSTATE_PIPELINE_SELECT << 16 |
560             (devinfo->gen >= 9 ? (3 << 8) : 0) |
561             (pipeline == BRW_COMPUTE_PIPELINE ? 2 : 0));
562   ADVANCE_BATCH();
563
564   if (devinfo->gen == 7 && !devinfo->is_haswell &&
565       pipeline == BRW_RENDER_PIPELINE) {
566      /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
567       * PIPELINE_SELECT [DevBWR+]":
568       *
569       *   Project: DEVIVB, DEVHSW:GT3:A0
570       *
571       *   Software must send a pipe_control with a CS stall and a post sync
572       *   operation and then a dummy DRAW after every MI_SET_CONTEXT and
573       *   after any PIPELINE_SELECT that is enabling 3D mode.
574       */
575      gen7_emit_cs_stall_flush(brw);
576
577      BEGIN_BATCH(7);
578      OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
579      OUT_BATCH(_3DPRIM_POINTLIST);
580      OUT_BATCH(0);
581      OUT_BATCH(0);
582      OUT_BATCH(0);
583      OUT_BATCH(0);
584      OUT_BATCH(0);
585      ADVANCE_BATCH();
586   }
587
588   if (devinfo->is_geminilake) {
589      /* Project: DevGLK
590       *
591       * "This chicken bit works around a hardware issue with barrier logic
592       *  encountered when switching between GPGPU and 3D pipelines.  To
593       *  workaround the issue, this mode bit should be set after a pipeline
594       *  is selected."
595       */
596      const unsigned barrier_mode =
597         pipeline == BRW_RENDER_PIPELINE ? GLK_SCEC_BARRIER_MODE_3D_HULL
598                                         : GLK_SCEC_BARRIER_MODE_GPGPU;
599      brw_load_register_imm32(brw, SLICE_COMMON_ECO_CHICKEN1,
600                              barrier_mode | GLK_SCEC_BARRIER_MODE_MASK);
601   }
602}
603
604/**
605 * Misc invariant state packets
606 */
607void
608brw_upload_invariant_state(struct brw_context *brw)
609{
610   const struct gen_device_info *devinfo = &brw->screen->devinfo;
611   const bool is_965 = devinfo->gen == 4 && !devinfo->is_g4x;
612
613   brw_emit_select_pipeline(brw, BRW_RENDER_PIPELINE);
614   brw->last_pipeline = BRW_RENDER_PIPELINE;
615
616   if (devinfo->gen >= 8) {
617      BEGIN_BATCH(3);
618      OUT_BATCH(CMD_STATE_SIP << 16 | (3 - 2));
619      OUT_BATCH(0);
620      OUT_BATCH(0);
621      ADVANCE_BATCH();
622   } else {
623      BEGIN_BATCH(2);
624      OUT_BATCH(CMD_STATE_SIP << 16 | (2 - 2));
625      OUT_BATCH(0);
626      ADVANCE_BATCH();
627   }
628
629   /* Original Gen4 doesn't have 3DSTATE_AA_LINE_PARAMETERS. */
630   if (!is_965) {
631      BEGIN_BATCH(3);
632      OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
633      /* use legacy aa line coverage computation */
634      OUT_BATCH(0);
635      OUT_BATCH(0);
636      ADVANCE_BATCH();
637   }
638}
639
640/**
641 * Define the base addresses which some state is referenced from.
642 *
643 * This allows us to avoid having to emit relocations for the objects,
644 * and is actually required for binding table pointers on gen6.
645 *
646 * Surface state base address covers binding table pointers and
647 * surface state objects, but not the surfaces that the surface state
648 * objects point to.
649 */
650void
651brw_upload_state_base_address(struct brw_context *brw)
652{
653   const struct gen_device_info *devinfo = &brw->screen->devinfo;
654
655   if (brw->batch.state_base_address_emitted)
656      return;
657
658   /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
659    * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
660    * programmed prior to STATE_BASE_ADDRESS.
661    *
662    * However, given that the instruction SBA (general state base
663    * address) on this chipset is always set to 0 across X and GL,
664    * maybe this isn't required for us in particular.
665    */
666
667   if (devinfo->gen >= 6) {
668      const unsigned dc_flush =
669         devinfo->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
670
671      /* Emit a render target cache flush.
672       *
673       * This isn't documented anywhere in the PRM.  However, it seems to be
674       * necessary prior to changing the surface state base adress.  We've
675       * seen issues in Vulkan where we get GPU hangs when using multi-level
676       * command buffers which clear depth, reset state base address, and then
677       * go render stuff.
678       *
679       * Normally, in GL, we would trust the kernel to do sufficient stalls
680       * and flushes prior to executing our batch.  However, it doesn't seem
681       * as if the kernel's flushing is always sufficient and we don't want to
682       * rely on it.
683       *
684       * We make this an end-of-pipe sync instead of a normal flush because we
685       * do not know the current status of the GPU.  On Haswell at least,
686       * having a fast-clear operation in flight at the same time as a normal
687       * rendering operation can cause hangs.  Since the kernel's flushing is
688       * insufficient, we need to ensure that any rendering operations from
689       * other processes are definitely complete before we try to do our own
690       * rendering.  It's a bit of a big hammer but it appears to work.
691       */
692      brw_emit_end_of_pipe_sync(brw,
693                                PIPE_CONTROL_RENDER_TARGET_FLUSH |
694                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
695                                dc_flush);
696   }
697
698   if (devinfo->gen >= 8) {
699      /* STATE_BASE_ADDRESS has issues with 48-bit address spaces.  If the
700       * address + size as seen by STATE_BASE_ADDRESS overflows 48 bits,
701       * the GPU appears to treat all accesses to the buffer as being out
702       * of bounds and returns zero.  To work around this, we pin all SBAs
703       * to the bottom 4GB.
704       */
705      uint32_t mocs_wb = devinfo->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
706      int pkt_len = devinfo->gen >= 10 ? 22 : (devinfo->gen >= 9 ? 19 : 16);
707
708      BEGIN_BATCH(pkt_len);
709      OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (pkt_len - 2));
710      /* General state base address: stateless DP read/write requests */
711      OUT_BATCH(mocs_wb << 4 | 1);
712      OUT_BATCH(0);
713      OUT_BATCH(mocs_wb << 16);
714      /* Surface state base address: */
715      OUT_RELOC64(brw->batch.state.bo, RELOC_32BIT, mocs_wb << 4 | 1);
716      /* Dynamic state base address: */
717      OUT_RELOC64(brw->batch.state.bo, RELOC_32BIT, mocs_wb << 4 | 1);
718      /* Indirect object base address: MEDIA_OBJECT data */
719      OUT_BATCH(mocs_wb << 4 | 1);
720      OUT_BATCH(0);
721      /* Instruction base address: shader kernels (incl. SIP) */
722      OUT_RELOC64(brw->cache.bo, RELOC_32BIT, mocs_wb << 4 | 1);
723      /* General state buffer size */
724      OUT_BATCH(0xfffff001);
725      /* Dynamic state buffer size */
726      OUT_BATCH(ALIGN(MAX_STATE_SIZE, 4096) | 1);
727      /* Indirect object upper bound */
728      OUT_BATCH(0xfffff001);
729      /* Instruction access upper bound */
730      OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
731      if (devinfo->gen >= 9) {
732         OUT_BATCH(1);
733         OUT_BATCH(0);
734         OUT_BATCH(0);
735      }
736      if (devinfo->gen >= 10) {
737         OUT_BATCH(1);
738         OUT_BATCH(0);
739         OUT_BATCH(0);
740      }
741      ADVANCE_BATCH();
742   } else if (devinfo->gen >= 6) {
743      uint8_t mocs = devinfo->gen == 7 ? GEN7_MOCS_L3 : 0;
744
745       BEGIN_BATCH(10);
746       OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
747       OUT_BATCH(mocs << 8 | /* General State Memory Object Control State */
748                 mocs << 4 | /* Stateless Data Port Access Memory Object Control State */
749                 1); /* General State Base Address Modify Enable */
750       /* Surface state base address:
751	* BINDING_TABLE_STATE
752	* SURFACE_STATE
753	*/
754       OUT_RELOC(brw->batch.state.bo, 0, 1);
755        /* Dynamic state base address:
756	 * SAMPLER_STATE
757	 * SAMPLER_BORDER_COLOR_STATE
758	 * CLIP, SF, WM/CC viewport state
759	 * COLOR_CALC_STATE
760	 * DEPTH_STENCIL_STATE
761	 * BLEND_STATE
762	 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
763	 * Disable is clear, which we rely on)
764	 */
765       OUT_RELOC(brw->batch.state.bo, 0, 1);
766
767       OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
768
769       /* Instruction base address: shader kernels (incl. SIP) */
770       OUT_RELOC(brw->cache.bo, 0, 1);
771
772       OUT_BATCH(1); /* General state upper bound */
773       /* Dynamic state upper bound.  Although the documentation says that
774	* programming it to zero will cause it to be ignored, that is a lie.
775	* If this isn't programmed to a real bound, the sampler border color
776	* pointer is rejected, causing border color to mysteriously fail.
777	*/
778       OUT_BATCH(0xfffff001);
779       OUT_BATCH(1); /* Indirect object upper bound */
780       OUT_BATCH(1); /* Instruction access upper bound */
781       ADVANCE_BATCH();
782   } else if (devinfo->gen == 5) {
783       BEGIN_BATCH(8);
784       OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
785       OUT_BATCH(1); /* General state base address */
786       OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
787       OUT_BATCH(1); /* Indirect object base address */
788       OUT_RELOC(brw->cache.bo, 0, 1); /* Instruction base address */
789       OUT_BATCH(0xfffff001); /* General state upper bound */
790       OUT_BATCH(1); /* Indirect object upper bound */
791       OUT_BATCH(1); /* Instruction access upper bound */
792       ADVANCE_BATCH();
793   } else {
794       BEGIN_BATCH(6);
795       OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
796       OUT_BATCH(1); /* General state base address */
797       OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
798       OUT_BATCH(1); /* Indirect object base address */
799       OUT_BATCH(1); /* General state upper bound */
800       OUT_BATCH(1); /* Indirect object upper bound */
801       ADVANCE_BATCH();
802   }
803
804   if (devinfo->gen >= 6) {
805      brw_emit_pipe_control_flush(brw,
806                                  PIPE_CONTROL_INSTRUCTION_INVALIDATE |
807                                  PIPE_CONTROL_STATE_CACHE_INVALIDATE |
808                                  PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
809   }
810
811   /* According to section 3.6.1 of VOL1 of the 965 PRM,
812    * STATE_BASE_ADDRESS updates require a reissue of:
813    *
814    * 3DSTATE_PIPELINE_POINTERS
815    * 3DSTATE_BINDING_TABLE_POINTERS
816    * MEDIA_STATE_POINTERS
817    *
818    * and this continues through Ironlake.  The Sandy Bridge PRM, vol
819    * 1 part 1 says that the folowing packets must be reissued:
820    *
821    * 3DSTATE_CC_POINTERS
822    * 3DSTATE_BINDING_TABLE_POINTERS
823    * 3DSTATE_SAMPLER_STATE_POINTERS
824    * 3DSTATE_VIEWPORT_STATE_POINTERS
825    * MEDIA_STATE_POINTERS
826    *
827    * Those are always reissued following SBA updates anyway (new
828    * batch time), except in the case of the program cache BO
829    * changing.  Having a separate state flag makes the sequence more
830    * obvious.
831    */
832
833   brw->ctx.NewDriverState |= BRW_NEW_STATE_BASE_ADDRESS;
834   brw->batch.state_base_address_emitted = true;
835}
836