1/*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23/**
24 * @file iris_pipe_control.c
25 *
26 * PIPE_CONTROL is the main flushing and synchronization primitive on Intel
27 * GPUs.  It can invalidate caches, stall until rendering reaches various
28 * stages of completion, write to memory, and other things.  In a way, it's
29 * a swiss army knife command - it has all kinds of capabilities, but some
30 * significant limitations as well.
31 *
32 * Unfortunately, it's notoriously complicated and difficult to use.  Many
33 * sub-commands can't be used together.  Some are meant to be used at the
34 * top of the pipeline (invalidating caches before drawing), while some are
35 * meant to be used at the end (stalling or flushing after drawing).
36 *
37 * Also, there's a list of restrictions a mile long, which vary by generation.
38 * Do this before doing that, or suffer the consequences (usually a GPU hang).
39 *
40 * This file contains helpers for emitting them safely.  You can simply call
41 * iris_emit_pipe_control_flush() with the desired operations (as logical
42 * PIPE_CONTROL_* bits), and it will take care of splitting it into multiple
43 * PIPE_CONTROL commands as necessary.  The per-generation workarounds are
44 * applied in iris_emit_raw_pipe_control() in iris_state.c.
45 */
46
47#include "iris_context.h"
48#include "util/hash_table.h"
49#include "util/set.h"
50
51/**
52 * Emit a PIPE_CONTROL with various flushing flags.
53 *
54 * The caller is responsible for deciding what flags are appropriate for the
55 * given generation.
56 */
57void
58iris_emit_pipe_control_flush(struct iris_batch *batch,
59                             const char *reason,
60                             uint32_t flags)
61{
62   if ((flags & PIPE_CONTROL_CACHE_FLUSH_BITS) &&
63       (flags & PIPE_CONTROL_CACHE_INVALIDATE_BITS)) {
64      /* A pipe control command with flush and invalidate bits set
65       * simultaneously is an inherently racy operation on Gfx6+ if the
66       * contents of the flushed caches were intended to become visible from
67       * any of the invalidated caches.  Split it in two PIPE_CONTROLs, the
68       * first one should stall the pipeline to make sure that the flushed R/W
69       * caches are coherent with memory once the specified R/O caches are
70       * invalidated.  On pre-Gfx6 hardware the (implicit) R/O cache
71       * invalidation seems to happen at the bottom of the pipeline together
72       * with any write cache flush, so this shouldn't be a concern.  In order
73       * to ensure a full stall, we do an end-of-pipe sync.
74       */
75      iris_emit_end_of_pipe_sync(batch, reason,
76                                 flags & PIPE_CONTROL_CACHE_FLUSH_BITS);
77      flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);
78   }
79
80   batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);
81}
82
83/**
84 * Emit a PIPE_CONTROL that writes to a buffer object.
85 *
86 * \p flags should contain one of the following items:
87 *  - PIPE_CONTROL_WRITE_IMMEDIATE
88 *  - PIPE_CONTROL_WRITE_TIMESTAMP
89 *  - PIPE_CONTROL_WRITE_DEPTH_COUNT
90 */
91void
92iris_emit_pipe_control_write(struct iris_batch *batch,
93                             const char *reason, uint32_t flags,
94                             struct iris_bo *bo, uint32_t offset,
95                             uint64_t imm)
96{
97   batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);
98}
99
100/*
101 * From Sandybridge PRM, volume 2, "1.7.2 End-of-Pipe Synchronization":
102 *
103 *  Write synchronization is a special case of end-of-pipe
104 *  synchronization that requires that the render cache and/or depth
105 *  related caches are flushed to memory, where the data will become
106 *  globally visible. This type of synchronization is required prior to
107 *  SW (CPU) actually reading the result data from memory, or initiating
108 *  an operation that will use as a read surface (such as a texture
109 *  surface) a previous render target and/or depth/stencil buffer
110 *
111 * From Haswell PRM, volume 2, part 1, "End-of-Pipe Synchronization":
112 *
113 *  Exercising the write cache flush bits (Render Target Cache Flush
114 *  Enable, Depth Cache Flush Enable, DC Flush) in PIPE_CONTROL only
115 *  ensures the write caches are flushed and doesn't guarantee the data
116 *  is globally visible.
117 *
118 *  SW can track the completion of the end-of-pipe-synchronization by
119 *  using "Notify Enable" and "PostSync Operation - Write Immediate
120 *  Data" in the PIPE_CONTROL command.
121 */
122void
123iris_emit_end_of_pipe_sync(struct iris_batch *batch,
124                           const char *reason, uint32_t flags)
125{
126   /* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":
127    *
128    *    "The most common action to perform upon reaching a synchronization
129    *    point is to write a value out to memory. An immediate value
130    *    (included with the synchronization command) may be written."
131    *
132    * From Broadwell PRM, volume 7, "End-of-Pipe Synchronization":
133    *
134    *    "In case the data flushed out by the render engine is to be read
135    *    back in to the render engine in coherent manner, then the render
136    *    engine has to wait for the fence completion before accessing the
137    *    flushed data. This can be achieved by following means on various
138    *    products: PIPE_CONTROL command with CS Stall and the required
139    *    write caches flushed with Post-Sync-Operation as Write Immediate
140    *    Data.
141    *
142    *    Example:
143    *       - Workload-1 (3D/GPGPU/MEDIA)
144    *       - PIPE_CONTROL (CS Stall, Post-Sync-Operation Write Immediate
145    *         Data, Required Write Cache Flush bits set)
146    *       - Workload-2 (Can use the data produce or output by Workload-1)
147    */
148   iris_emit_pipe_control_write(batch, reason,
149                                flags | PIPE_CONTROL_CS_STALL |
150                                PIPE_CONTROL_WRITE_IMMEDIATE,
151                                batch->screen->workaround_address.bo,
152                                batch->screen->workaround_address.offset, 0);
153}
154
155/**
156 * Emits appropriate flushes and invalidations for any previous memory
157 * operations on \p bo to be strictly ordered relative to any subsequent
158 * memory operations performed from the caching domain \p access.
159 *
160 * This is useful because the GPU has separate incoherent caches for the
161 * render target, sampler, etc., which need to be explicitly invalidated or
162 * flushed in order to obtain the expected memory ordering in cases where the
163 * same surface is accessed through multiple caches (e.g. due to
164 * render-to-texture).
165 *
166 * This provides the expected memory ordering guarantees whether or not the
167 * previous access was performed from the same batch or a different one, but
168 * only the former case needs to be handled explicitly here, since the kernel
169 * already inserts implicit flushes and synchronization in order to guarantee
170 * that any data dependencies between batches are satisfied.
171 *
172 * Even though no flushing nor invalidation is required in order to account
173 * for concurrent updates from other batches, we provide the guarantee that a
174 * required synchronization operation due to a previous batch-local update
175 * will never be omitted due to the influence of another thread accessing the
176 * same buffer concurrently from the same caching domain: Such a concurrent
177 * update will only ever change the seqno of the last update to a value
178 * greater than the local value (see iris_bo_bump_seqno()), which means that
179 * we will always emit at least as much flushing and invalidation as we would
180 * have for the local seqno (see the coherent_seqnos comparisons below).
181 */
182void
183iris_emit_buffer_barrier_for(struct iris_batch *batch,
184                             struct iris_bo *bo,
185                             enum iris_domain access)
186{
187   const uint32_t all_flush_bits = (PIPE_CONTROL_CACHE_FLUSH_BITS |
188                                    PIPE_CONTROL_STALL_AT_SCOREBOARD |
189                                    PIPE_CONTROL_FLUSH_ENABLE);
190   const uint32_t flush_bits[NUM_IRIS_DOMAINS] = {
191      [IRIS_DOMAIN_RENDER_WRITE] = PIPE_CONTROL_RENDER_TARGET_FLUSH,
192      [IRIS_DOMAIN_DEPTH_WRITE] = PIPE_CONTROL_DEPTH_CACHE_FLUSH,
193      [IRIS_DOMAIN_DATA_WRITE] = PIPE_CONTROL_DATA_CACHE_FLUSH,
194      [IRIS_DOMAIN_OTHER_WRITE] = PIPE_CONTROL_FLUSH_ENABLE,
195      [IRIS_DOMAIN_VF_READ] = PIPE_CONTROL_STALL_AT_SCOREBOARD,
196      [IRIS_DOMAIN_OTHER_READ] = PIPE_CONTROL_STALL_AT_SCOREBOARD,
197   };
198   const uint32_t invalidate_bits[NUM_IRIS_DOMAINS] = {
199      [IRIS_DOMAIN_RENDER_WRITE] = PIPE_CONTROL_RENDER_TARGET_FLUSH,
200      [IRIS_DOMAIN_DEPTH_WRITE] = PIPE_CONTROL_DEPTH_CACHE_FLUSH,
201      [IRIS_DOMAIN_DATA_WRITE] = PIPE_CONTROL_DATA_CACHE_FLUSH,
202      [IRIS_DOMAIN_OTHER_WRITE] = PIPE_CONTROL_FLUSH_ENABLE,
203      [IRIS_DOMAIN_VF_READ] = PIPE_CONTROL_VF_CACHE_INVALIDATE,
204      [IRIS_DOMAIN_OTHER_READ] = (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
205                                  PIPE_CONTROL_CONST_CACHE_INVALIDATE),
206   };
207   uint32_t bits = 0;
208
209   /* Iterate over all read/write domains first in order to handle RaW
210    * and WaW dependencies, which might involve flushing the domain of
211    * the previous access and invalidating the specified domain.
212    */
213   for (unsigned i = 0; i < IRIS_DOMAIN_OTHER_WRITE; i++) {
214      assert(!iris_domain_is_read_only(i));
215      if (i != access) {
216         const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);
217
218         /* Invalidate unless the most recent read/write access from
219          * this domain is already guaranteed to be visible to the
220          * specified domain.  Flush if the most recent access from
221          * this domain occurred after its most recent flush.
222          */
223         if (seqno > batch->coherent_seqnos[access][i]) {
224            bits |= invalidate_bits[access];
225
226            if (seqno > batch->coherent_seqnos[i][i])
227               bits |= flush_bits[i];
228         }
229      }
230   }
231
232   /* All read-only domains can be considered mutually coherent since
233    * the order of read-only memory operations is immaterial.  If the
234    * specified domain is read/write we need to iterate over them too,
235    * in order to handle any WaR dependencies.
236    */
237   if (!iris_domain_is_read_only(access)) {
238      for (unsigned i = IRIS_DOMAIN_VF_READ; i < NUM_IRIS_DOMAINS; i++) {
239         assert(iris_domain_is_read_only(i));
240         const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);
241
242         /* Flush if the most recent access from this domain occurred
243          * after its most recent flush.
244          */
245         if (seqno > batch->coherent_seqnos[i][i])
246            bits |= flush_bits[i];
247      }
248   }
249
250   /* The IRIS_DOMAIN_OTHER_WRITE kitchen-sink domain cannot be
251    * considered coherent with itself since it's really a collection
252    * of multiple incoherent read/write domains, so we special-case it
253    * here.
254    */
255   const unsigned i = IRIS_DOMAIN_OTHER_WRITE;
256   const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);
257
258   /* Invalidate unless the most recent read/write access from this
259    * domain is already guaranteed to be visible to the specified
260    * domain.  Flush if the most recent access from this domain
261    * occurred after its most recent flush.
262    */
263   if (seqno > batch->coherent_seqnos[access][i]) {
264      bits |= invalidate_bits[access];
265
266      if (seqno > batch->coherent_seqnos[i][i])
267         bits |= flush_bits[i];
268   }
269
270   if (bits) {
271      /* Stall-at-scoreboard is not expected to work in combination with other
272       * flush bits.
273       */
274      if (bits & PIPE_CONTROL_CACHE_FLUSH_BITS)
275         bits &= ~PIPE_CONTROL_STALL_AT_SCOREBOARD;
276
277      /* Emit any required flushes and invalidations. */
278      if (bits & all_flush_bits)
279         iris_emit_end_of_pipe_sync(batch, "cache tracker: flush",
280                                    bits & all_flush_bits);
281
282      if (bits & ~all_flush_bits)
283         iris_emit_pipe_control_flush(batch, "cache tracker: invalidate",
284                                      bits & ~all_flush_bits);
285   }
286}
287
288/**
289 * Flush and invalidate all caches (for debugging purposes).
290 */
291void
292iris_flush_all_caches(struct iris_batch *batch)
293{
294   iris_emit_pipe_control_flush(batch, "debug: flush all caches",
295                                PIPE_CONTROL_CS_STALL |
296                                PIPE_CONTROL_DATA_CACHE_FLUSH |
297                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
298                                PIPE_CONTROL_RENDER_TARGET_FLUSH |
299                                PIPE_CONTROL_TILE_CACHE_FLUSH |
300                                PIPE_CONTROL_VF_CACHE_INVALIDATE |
301                                PIPE_CONTROL_INSTRUCTION_INVALIDATE |
302                                PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
303                                PIPE_CONTROL_CONST_CACHE_INVALIDATE |
304                                PIPE_CONTROL_STATE_CACHE_INVALIDATE);
305}
306
307static void
308iris_texture_barrier(struct pipe_context *ctx, unsigned flags)
309{
310   struct iris_context *ice = (void *) ctx;
311   struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER];
312   struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE];
313
314   if (render_batch->contains_draw) {
315      iris_batch_maybe_flush(render_batch, 48);
316      iris_emit_pipe_control_flush(render_batch,
317                                   "API: texture barrier (1/2)",
318                                   PIPE_CONTROL_DEPTH_CACHE_FLUSH |
319                                   PIPE_CONTROL_RENDER_TARGET_FLUSH |
320                                   PIPE_CONTROL_CS_STALL);
321      iris_emit_pipe_control_flush(render_batch,
322                                   "API: texture barrier (2/2)",
323                                   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
324   }
325
326   if (compute_batch->contains_draw) {
327      iris_batch_maybe_flush(compute_batch, 48);
328      iris_emit_pipe_control_flush(compute_batch,
329                                   "API: texture barrier (1/2)",
330                                   PIPE_CONTROL_CS_STALL);
331      iris_emit_pipe_control_flush(compute_batch,
332                                   "API: texture barrier (2/2)",
333                                   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
334   }
335}
336
337static void
338iris_memory_barrier(struct pipe_context *ctx, unsigned flags)
339{
340   struct iris_context *ice = (void *) ctx;
341   unsigned bits = PIPE_CONTROL_DATA_CACHE_FLUSH | PIPE_CONTROL_CS_STALL;
342
343   if (flags & (PIPE_BARRIER_VERTEX_BUFFER |
344                PIPE_BARRIER_INDEX_BUFFER |
345                PIPE_BARRIER_INDIRECT_BUFFER)) {
346      bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
347   }
348
349   if (flags & PIPE_BARRIER_CONSTANT_BUFFER) {
350      bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
351              PIPE_CONTROL_CONST_CACHE_INVALIDATE;
352   }
353
354   if (flags & (PIPE_BARRIER_TEXTURE | PIPE_BARRIER_FRAMEBUFFER)) {
355      bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
356              PIPE_CONTROL_RENDER_TARGET_FLUSH |
357              PIPE_CONTROL_TILE_CACHE_FLUSH;
358   }
359
360   for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
361      if (ice->batches[i].contains_draw) {
362         iris_batch_maybe_flush(&ice->batches[i], 24);
363         iris_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier",
364                                      bits);
365      }
366   }
367}
368
369void
370iris_init_flush_functions(struct pipe_context *ctx)
371{
372   ctx->memory_barrier = iris_memory_barrier;
373   ctx->texture_barrier = iris_texture_barrier;
374}
375