1/*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef IRIS_BATCH_DOT_H
25#define IRIS_BATCH_DOT_H
26
27#include <stdint.h>
28#include <stdbool.h>
29#include <string.h>
30
31#include "util/u_dynarray.h"
32
33#include "drm-uapi/i915_drm.h"
34#include "common/gen_decoder.h"
35
36#include "iris_fence.h"
37
38/* The kernel assumes batchbuffers are smaller than 256kB. */
39#define MAX_BATCH_SIZE (256 * 1024)
40
41/* Our target batch size - flush approximately at this point. */
42#define BATCH_SZ (20 * 1024)
43
44enum iris_batch_name {
45   IRIS_BATCH_RENDER,
46   IRIS_BATCH_COMPUTE,
47};
48
49#define IRIS_BATCH_COUNT 2
50
51struct iris_address {
52   struct iris_bo *bo;
53   uint64_t offset;
54   bool write;
55};
56
57struct iris_batch {
58   struct iris_screen *screen;
59   struct iris_vtable *vtbl;
60   struct pipe_debug_callback *dbg;
61
62   /** What batch is this? (e.g. IRIS_BATCH_RENDER/COMPUTE) */
63   enum iris_batch_name name;
64
65   /** Current batchbuffer being queued up. */
66   struct iris_bo *bo;
67   void *map;
68   void *map_next;
69   /** Size of the primary batch if we've moved on to a secondary. */
70   unsigned primary_batch_size;
71
72   /** Last Surface State Base Address set in this hardware context. */
73   uint64_t last_surface_base_address;
74
75   uint32_t hw_ctx_id;
76
77   /** Which engine this batch targets - a I915_EXEC_RING_MASK value */
78   uint8_t engine;
79
80   /** The validation list */
81   struct drm_i915_gem_exec_object2 *validation_list;
82   struct iris_bo **exec_bos;
83   int exec_count;
84   int exec_array_size;
85
86   /**
87    * A list of iris_syncpts associated with this batch.
88    *
89    * The first list entry will always be a signalling sync-point, indicating
90    * that this batch has completed.  The others are likely to be sync-points
91    * to wait on before executing the batch.
92    */
93   struct util_dynarray syncpts;
94
95   /** A list of drm_i915_exec_fences to have execbuf signal or wait on */
96   struct util_dynarray exec_fences;
97
98   /** The amount of aperture space (in bytes) used by all exec_bos */
99   int aperture_space;
100
101   /** A sync-point for the last batch that was submitted. */
102   struct iris_syncpt *last_syncpt;
103
104   /** List of other batches which we might need to flush to use a BO */
105   struct iris_batch *other_batches[IRIS_BATCH_COUNT - 1];
106
107   struct {
108      /**
109       * Set of struct brw_bo * that have been rendered to within this
110       * batchbuffer and would need flushing before being used from another
111       * cache domain that isn't coherent with it (i.e. the sampler).
112       */
113      struct hash_table *render;
114
115      /**
116       * Set of struct brw_bo * that have been used as a depth buffer within
117       * this batchbuffer and would need flushing before being used from
118       * another cache domain that isn't coherent with it (i.e. the sampler).
119       */
120      struct set *depth;
121   } cache;
122
123   struct gen_batch_decode_ctx decoder;
124
125   /** Have we emitted any draw calls to this batch? */
126   bool contains_draw;
127};
128
129void iris_init_batch(struct iris_batch *batch,
130                     struct iris_screen *screen,
131                     struct iris_vtable *vtbl,
132                     struct pipe_debug_callback *dbg,
133                     struct iris_batch *all_batches,
134                     enum iris_batch_name name,
135                     uint8_t ring,
136                     int priority);
137void iris_chain_to_new_batch(struct iris_batch *batch);
138void iris_batch_free(struct iris_batch *batch);
139void iris_batch_maybe_flush(struct iris_batch *batch, unsigned estimate);
140
141void _iris_batch_flush(struct iris_batch *batch, const char *file, int line);
142#define iris_batch_flush(batch) _iris_batch_flush((batch), __FILE__, __LINE__)
143
144bool iris_batch_references(struct iris_batch *batch, struct iris_bo *bo);
145
146#define RELOC_WRITE EXEC_OBJECT_WRITE
147
148void iris_use_pinned_bo(struct iris_batch *batch, struct iris_bo *bo,
149                        bool writable);
150
151static inline unsigned
152iris_batch_bytes_used(struct iris_batch *batch)
153{
154   return batch->map_next - batch->map;
155}
156
157/**
158 * Ensure the current command buffer has \param size bytes of space
159 * remaining.  If not, this creates a secondary batch buffer and emits
160 * a jump from the primary batch to the start of the secondary.
161 *
162 * Most callers want iris_get_command_space() instead.
163 */
164static inline void
165iris_require_command_space(struct iris_batch *batch, unsigned size)
166{
167   const unsigned required_bytes = iris_batch_bytes_used(batch) + size;
168
169   if (required_bytes >= BATCH_SZ) {
170      iris_chain_to_new_batch(batch);
171   }
172}
173
174/**
175 * Allocate space in the current command buffer, and return a pointer
176 * to the mapped area so the caller can write commands there.
177 *
178 * This should be called whenever emitting commands.
179 */
180static inline void *
181iris_get_command_space(struct iris_batch *batch, unsigned bytes)
182{
183   iris_require_command_space(batch, bytes);
184   void *map = batch->map_next;
185   batch->map_next += bytes;
186   return map;
187}
188
189/**
190 * Helper to emit GPU commands - allocates space, copies them there.
191 */
192static inline void
193iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size)
194{
195   void *map = iris_get_command_space(batch, size);
196   memcpy(map, data, size);
197}
198
199/**
200 * Take a reference to the batch's signalling syncpt.
201 *
202 * Callers can use this to wait for the the current batch under construction
203 * to complete (after flushing it).
204 */
205static inline void
206iris_batch_reference_signal_syncpt(struct iris_batch *batch,
207                                   struct iris_syncpt **out_syncpt)
208{
209   /* The signalling syncpt is the first one in the list. */
210   struct iris_syncpt *syncpt =
211      ((struct iris_syncpt **) util_dynarray_begin(&batch->syncpts))[0];
212   iris_syncpt_reference(batch->screen, out_syncpt, syncpt);
213}
214
215#endif
216