1b8e80941Smrg/*
2b8e80941Smrg * Copyright © 2017 Intel Corporation
3b8e80941Smrg *
4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
5b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
6b8e80941Smrg * to deal in the Software without restriction, including without limitation
7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
9b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
10b8e80941Smrg *
11b8e80941Smrg * The above copyright notice and this permission notice (including the next
12b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
13b8e80941Smrg * Software.
14b8e80941Smrg *
15b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20b8e80941Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21b8e80941Smrg * IN THE SOFTWARE.
22b8e80941Smrg */
23b8e80941Smrg
24b8e80941Smrg#ifndef IRIS_BATCH_DOT_H
25b8e80941Smrg#define IRIS_BATCH_DOT_H
26b8e80941Smrg
27b8e80941Smrg#include <stdint.h>
28b8e80941Smrg#include <stdbool.h>
29b8e80941Smrg#include <string.h>
30b8e80941Smrg
31b8e80941Smrg#include "util/u_dynarray.h"
32b8e80941Smrg
33b8e80941Smrg#include "drm-uapi/i915_drm.h"
34b8e80941Smrg#include "common/gen_decoder.h"
35b8e80941Smrg
36b8e80941Smrg#include "iris_fence.h"
37b8e80941Smrg
38b8e80941Smrg/* The kernel assumes batchbuffers are smaller than 256kB. */
39b8e80941Smrg#define MAX_BATCH_SIZE (256 * 1024)
40b8e80941Smrg
41b8e80941Smrg/* Our target batch size - flush approximately at this point. */
42b8e80941Smrg#define BATCH_SZ (20 * 1024)
43b8e80941Smrg
44b8e80941Smrgenum iris_batch_name {
45b8e80941Smrg   IRIS_BATCH_RENDER,
46b8e80941Smrg   IRIS_BATCH_COMPUTE,
47b8e80941Smrg};
48b8e80941Smrg
49b8e80941Smrg#define IRIS_BATCH_COUNT 2
50b8e80941Smrg
51b8e80941Smrgstruct iris_address {
52b8e80941Smrg   struct iris_bo *bo;
53b8e80941Smrg   uint64_t offset;
54b8e80941Smrg   bool write;
55b8e80941Smrg};
56b8e80941Smrg
57b8e80941Smrgstruct iris_batch {
58b8e80941Smrg   struct iris_screen *screen;
59b8e80941Smrg   struct iris_vtable *vtbl;
60b8e80941Smrg   struct pipe_debug_callback *dbg;
61b8e80941Smrg
62b8e80941Smrg   /** What batch is this? (e.g. IRIS_BATCH_RENDER/COMPUTE) */
63b8e80941Smrg   enum iris_batch_name name;
64b8e80941Smrg
65b8e80941Smrg   /** Current batchbuffer being queued up. */
66b8e80941Smrg   struct iris_bo *bo;
67b8e80941Smrg   void *map;
68b8e80941Smrg   void *map_next;
69b8e80941Smrg   /** Size of the primary batch if we've moved on to a secondary. */
70b8e80941Smrg   unsigned primary_batch_size;
71b8e80941Smrg
72b8e80941Smrg   /** Last Surface State Base Address set in this hardware context. */
73b8e80941Smrg   uint64_t last_surface_base_address;
74b8e80941Smrg
75b8e80941Smrg   uint32_t hw_ctx_id;
76b8e80941Smrg
77b8e80941Smrg   /** Which engine this batch targets - a I915_EXEC_RING_MASK value */
78b8e80941Smrg   uint8_t engine;
79b8e80941Smrg
80b8e80941Smrg   /** The validation list */
81b8e80941Smrg   struct drm_i915_gem_exec_object2 *validation_list;
82b8e80941Smrg   struct iris_bo **exec_bos;
83b8e80941Smrg   int exec_count;
84b8e80941Smrg   int exec_array_size;
85b8e80941Smrg
86b8e80941Smrg   /**
87b8e80941Smrg    * A list of iris_syncpts associated with this batch.
88b8e80941Smrg    *
89b8e80941Smrg    * The first list entry will always be a signalling sync-point, indicating
90b8e80941Smrg    * that this batch has completed.  The others are likely to be sync-points
91b8e80941Smrg    * to wait on before executing the batch.
92b8e80941Smrg    */
93b8e80941Smrg   struct util_dynarray syncpts;
94b8e80941Smrg
95b8e80941Smrg   /** A list of drm_i915_exec_fences to have execbuf signal or wait on */
96b8e80941Smrg   struct util_dynarray exec_fences;
97b8e80941Smrg
98b8e80941Smrg   /** The amount of aperture space (in bytes) used by all exec_bos */
99b8e80941Smrg   int aperture_space;
100b8e80941Smrg
101b8e80941Smrg   /** A sync-point for the last batch that was submitted. */
102b8e80941Smrg   struct iris_syncpt *last_syncpt;
103b8e80941Smrg
104b8e80941Smrg   /** List of other batches which we might need to flush to use a BO */
105b8e80941Smrg   struct iris_batch *other_batches[IRIS_BATCH_COUNT - 1];
106b8e80941Smrg
107b8e80941Smrg   struct {
108b8e80941Smrg      /**
109b8e80941Smrg       * Set of struct brw_bo * that have been rendered to within this
110b8e80941Smrg       * batchbuffer and would need flushing before being used from another
111b8e80941Smrg       * cache domain that isn't coherent with it (i.e. the sampler).
112b8e80941Smrg       */
113b8e80941Smrg      struct hash_table *render;
114b8e80941Smrg
115b8e80941Smrg      /**
116b8e80941Smrg       * Set of struct brw_bo * that have been used as a depth buffer within
117b8e80941Smrg       * this batchbuffer and would need flushing before being used from
118b8e80941Smrg       * another cache domain that isn't coherent with it (i.e. the sampler).
119b8e80941Smrg       */
120b8e80941Smrg      struct set *depth;
121b8e80941Smrg   } cache;
122b8e80941Smrg
123b8e80941Smrg   struct gen_batch_decode_ctx decoder;
124b8e80941Smrg
125b8e80941Smrg   /** Have we emitted any draw calls to this batch? */
126b8e80941Smrg   bool contains_draw;
127b8e80941Smrg};
128b8e80941Smrg
129b8e80941Smrgvoid iris_init_batch(struct iris_batch *batch,
130b8e80941Smrg                     struct iris_screen *screen,
131b8e80941Smrg                     struct iris_vtable *vtbl,
132b8e80941Smrg                     struct pipe_debug_callback *dbg,
133b8e80941Smrg                     struct iris_batch *all_batches,
134b8e80941Smrg                     enum iris_batch_name name,
135b8e80941Smrg                     uint8_t ring,
136b8e80941Smrg                     int priority);
137b8e80941Smrgvoid iris_chain_to_new_batch(struct iris_batch *batch);
138b8e80941Smrgvoid iris_batch_free(struct iris_batch *batch);
139b8e80941Smrgvoid iris_batch_maybe_flush(struct iris_batch *batch, unsigned estimate);
140b8e80941Smrg
141b8e80941Smrgvoid _iris_batch_flush(struct iris_batch *batch, const char *file, int line);
142b8e80941Smrg#define iris_batch_flush(batch) _iris_batch_flush((batch), __FILE__, __LINE__)
143b8e80941Smrg
144b8e80941Smrgbool iris_batch_references(struct iris_batch *batch, struct iris_bo *bo);
145b8e80941Smrg
146b8e80941Smrg#define RELOC_WRITE EXEC_OBJECT_WRITE
147b8e80941Smrg
148b8e80941Smrgvoid iris_use_pinned_bo(struct iris_batch *batch, struct iris_bo *bo,
149b8e80941Smrg                        bool writable);
150b8e80941Smrg
151b8e80941Smrgstatic inline unsigned
152b8e80941Smrgiris_batch_bytes_used(struct iris_batch *batch)
153b8e80941Smrg{
154b8e80941Smrg   return batch->map_next - batch->map;
155b8e80941Smrg}
156b8e80941Smrg
157b8e80941Smrg/**
158b8e80941Smrg * Ensure the current command buffer has \param size bytes of space
159b8e80941Smrg * remaining.  If not, this creates a secondary batch buffer and emits
160b8e80941Smrg * a jump from the primary batch to the start of the secondary.
161b8e80941Smrg *
162b8e80941Smrg * Most callers want iris_get_command_space() instead.
163b8e80941Smrg */
164b8e80941Smrgstatic inline void
165b8e80941Smrgiris_require_command_space(struct iris_batch *batch, unsigned size)
166b8e80941Smrg{
167b8e80941Smrg   const unsigned required_bytes = iris_batch_bytes_used(batch) + size;
168b8e80941Smrg
169b8e80941Smrg   if (required_bytes >= BATCH_SZ) {
170b8e80941Smrg      iris_chain_to_new_batch(batch);
171b8e80941Smrg   }
172b8e80941Smrg}
173b8e80941Smrg
174b8e80941Smrg/**
175b8e80941Smrg * Allocate space in the current command buffer, and return a pointer
176b8e80941Smrg * to the mapped area so the caller can write commands there.
177b8e80941Smrg *
178b8e80941Smrg * This should be called whenever emitting commands.
179b8e80941Smrg */
180b8e80941Smrgstatic inline void *
181b8e80941Smrgiris_get_command_space(struct iris_batch *batch, unsigned bytes)
182b8e80941Smrg{
183b8e80941Smrg   iris_require_command_space(batch, bytes);
184b8e80941Smrg   void *map = batch->map_next;
185b8e80941Smrg   batch->map_next += bytes;
186b8e80941Smrg   return map;
187b8e80941Smrg}
188b8e80941Smrg
189b8e80941Smrg/**
190b8e80941Smrg * Helper to emit GPU commands - allocates space, copies them there.
191b8e80941Smrg */
192b8e80941Smrgstatic inline void
193b8e80941Smrgiris_batch_emit(struct iris_batch *batch, const void *data, unsigned size)
194b8e80941Smrg{
195b8e80941Smrg   void *map = iris_get_command_space(batch, size);
196b8e80941Smrg   memcpy(map, data, size);
197b8e80941Smrg}
198b8e80941Smrg
199b8e80941Smrg/**
200b8e80941Smrg * Take a reference to the batch's signalling syncpt.
201b8e80941Smrg *
202b8e80941Smrg * Callers can use this to wait for the the current batch under construction
203b8e80941Smrg * to complete (after flushing it).
204b8e80941Smrg */
205b8e80941Smrgstatic inline void
206b8e80941Smrgiris_batch_reference_signal_syncpt(struct iris_batch *batch,
207b8e80941Smrg                                   struct iris_syncpt **out_syncpt)
208b8e80941Smrg{
209b8e80941Smrg   /* The signalling syncpt is the first one in the list. */
210b8e80941Smrg   struct iris_syncpt *syncpt =
211b8e80941Smrg      ((struct iris_syncpt **) util_dynarray_begin(&batch->syncpts))[0];
212b8e80941Smrg   iris_syncpt_reference(batch->screen, out_syncpt, syncpt);
213b8e80941Smrg}
214b8e80941Smrg
215b8e80941Smrg#endif
216