1/*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Rob Clark <robclark@freedesktop.org>
25 */
26
27#include "util/list.h"
28#include "util/set.h"
29#include "util/hash_table.h"
30#include "util/u_string.h"
31
32#include "freedreno_batch.h"
33#include "freedreno_context.h"
34#include "freedreno_fence.h"
35#include "freedreno_resource.h"
36#include "freedreno_query_hw.h"
37
38static void
39batch_init(struct fd_batch *batch)
40{
41	struct fd_context *ctx = batch->ctx;
42	unsigned size = 0;
43
44	if (ctx->screen->reorder)
45		util_queue_fence_init(&batch->flush_fence);
46
47	/* if kernel is too old to support unlimited # of cmd buffers, we
48	 * have no option but to allocate large worst-case sizes so that
49	 * we don't need to grow the ringbuffer.  Performance is likely to
50	 * suffer, but there is no good alternative.
51	 *
52	 * XXX I think we can just require new enough kernel for this?
53	 */
54	if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
55			(fd_mesa_debug & FD_DBG_NOGROW)){
56		size = 0x100000;
57	}
58
59	batch->submit = fd_submit_new(ctx->pipe);
60	if (batch->nondraw) {
61		batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
62				FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
63	} else {
64		batch->gmem = fd_submit_new_ringbuffer(batch->submit, size,
65				FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
66		batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
67				FD_RINGBUFFER_GROWABLE);
68
69		if (ctx->screen->gpu_id < 600) {
70			batch->binning = fd_submit_new_ringbuffer(batch->submit,
71					size, FD_RINGBUFFER_GROWABLE);
72		}
73	}
74
75	batch->in_fence_fd = -1;
76	batch->fence = fd_fence_create(batch);
77
78	batch->cleared = 0;
79	batch->fast_cleared = 0;
80	batch->invalidated = 0;
81	batch->restore = batch->resolve = 0;
82	batch->needs_flush = false;
83	batch->flushed = false;
84	batch->gmem_reason = 0;
85	batch->num_draws = 0;
86	batch->num_vertices = 0;
87	batch->stage = FD_STAGE_NULL;
88
89	fd_reset_wfi(batch);
90
91	util_dynarray_init(&batch->draw_patches, NULL);
92	util_dynarray_init(&batch->fb_read_patches, NULL);
93
94	if (is_a2xx(ctx->screen)) {
95		util_dynarray_init(&batch->shader_patches, NULL);
96		util_dynarray_init(&batch->gmem_patches, NULL);
97	}
98
99	if (is_a3xx(ctx->screen))
100		util_dynarray_init(&batch->rbrc_patches, NULL);
101
102	assert(batch->resources->entries == 0);
103
104	util_dynarray_init(&batch->samples, NULL);
105}
106
107struct fd_batch *
108fd_batch_create(struct fd_context *ctx, bool nondraw)
109{
110	struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
111
112	if (!batch)
113		return NULL;
114
115	DBG("%p", batch);
116
117	pipe_reference_init(&batch->reference, 1);
118	batch->ctx = ctx;
119	batch->nondraw = nondraw;
120
121	batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
122			_mesa_key_pointer_equal);
123
124	batch_init(batch);
125
126	return batch;
127}
128
129static void
130batch_fini(struct fd_batch *batch)
131{
132	DBG("%p", batch);
133
134	pipe_resource_reference(&batch->query_buf, NULL);
135
136	if (batch->in_fence_fd != -1)
137		close(batch->in_fence_fd);
138
139	/* in case batch wasn't flushed but fence was created: */
140	fd_fence_populate(batch->fence, 0, -1);
141
142	fd_fence_ref(NULL, &batch->fence, NULL);
143
144	fd_ringbuffer_del(batch->draw);
145	if (!batch->nondraw) {
146		if (batch->binning)
147			fd_ringbuffer_del(batch->binning);
148		fd_ringbuffer_del(batch->gmem);
149	} else {
150		debug_assert(!batch->binning);
151		debug_assert(!batch->gmem);
152	}
153
154	if (batch->lrz_clear) {
155		fd_ringbuffer_del(batch->lrz_clear);
156		batch->lrz_clear = NULL;
157	}
158
159	if (batch->tile_setup) {
160		fd_ringbuffer_del(batch->tile_setup);
161		batch->tile_setup = NULL;
162	}
163
164	if (batch->tile_fini) {
165		fd_ringbuffer_del(batch->tile_fini);
166		batch->tile_fini = NULL;
167	}
168
169	fd_submit_del(batch->submit);
170
171	util_dynarray_fini(&batch->draw_patches);
172	util_dynarray_fini(&batch->fb_read_patches);
173
174	if (is_a2xx(batch->ctx->screen)) {
175		util_dynarray_fini(&batch->shader_patches);
176		util_dynarray_fini(&batch->gmem_patches);
177	}
178
179	if (is_a3xx(batch->ctx->screen))
180		util_dynarray_fini(&batch->rbrc_patches);
181
182	while (batch->samples.size > 0) {
183		struct fd_hw_sample *samp =
184			util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
185		fd_hw_sample_reference(batch->ctx, &samp, NULL);
186	}
187	util_dynarray_fini(&batch->samples);
188
189	if (batch->ctx->screen->reorder)
190		util_queue_fence_destroy(&batch->flush_fence);
191}
192
193static void
194batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
195{
196	struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
197	struct fd_batch *dep;
198
199	foreach_batch(dep, cache, batch->dependents_mask) {
200		if (flush)
201			fd_batch_flush(dep, false, false);
202		fd_batch_reference(&dep, NULL);
203	}
204
205	batch->dependents_mask = 0;
206}
207
208static void
209batch_reset_resources_locked(struct fd_batch *batch)
210{
211	pipe_mutex_assert_locked(batch->ctx->screen->lock);
212
213	set_foreach(batch->resources, entry) {
214		struct fd_resource *rsc = (struct fd_resource *)entry->key;
215		_mesa_set_remove(batch->resources, entry);
216		debug_assert(rsc->batch_mask & (1 << batch->idx));
217		rsc->batch_mask &= ~(1 << batch->idx);
218		if (rsc->write_batch == batch)
219			fd_batch_reference_locked(&rsc->write_batch, NULL);
220	}
221}
222
223static void
224batch_reset_resources(struct fd_batch *batch)
225{
226	mtx_lock(&batch->ctx->screen->lock);
227	batch_reset_resources_locked(batch);
228	mtx_unlock(&batch->ctx->screen->lock);
229}
230
231static void
232batch_reset(struct fd_batch *batch)
233{
234	DBG("%p", batch);
235
236	fd_batch_sync(batch);
237
238	batch_flush_reset_dependencies(batch, false);
239	batch_reset_resources(batch);
240
241	batch_fini(batch);
242	batch_init(batch);
243}
244
245void
246fd_batch_reset(struct fd_batch *batch)
247{
248	if (batch->needs_flush)
249		batch_reset(batch);
250}
251
252void
253__fd_batch_destroy(struct fd_batch *batch)
254{
255	struct fd_context *ctx = batch->ctx;
256
257	DBG("%p", batch);
258
259	fd_context_assert_locked(batch->ctx);
260
261	fd_bc_invalidate_batch(batch, true);
262
263	batch_reset_resources_locked(batch);
264	debug_assert(batch->resources->entries == 0);
265	_mesa_set_destroy(batch->resources, NULL);
266
267	fd_context_unlock(ctx);
268	batch_flush_reset_dependencies(batch, false);
269	debug_assert(batch->dependents_mask == 0);
270
271	util_copy_framebuffer_state(&batch->framebuffer, NULL);
272	batch_fini(batch);
273	free(batch);
274	fd_context_lock(ctx);
275}
276
277void
278__fd_batch_describe(char* buf, const struct fd_batch *batch)
279{
280	util_sprintf(buf, "fd_batch<%u>", batch->seqno);
281}
282
283void
284fd_batch_sync(struct fd_batch *batch)
285{
286	if (!batch->ctx->screen->reorder)
287		return;
288	util_queue_fence_wait(&batch->flush_fence);
289}
290
291static void
292batch_flush_func(void *job, int id)
293{
294	struct fd_batch *batch = job;
295
296	DBG("%p", batch);
297
298	fd_gmem_render_tiles(batch);
299	batch_reset_resources(batch);
300}
301
302static void
303batch_cleanup_func(void *job, int id)
304{
305	struct fd_batch *batch = job;
306	fd_batch_reference(&batch, NULL);
307}
308
309static void
310batch_flush(struct fd_batch *batch, bool force)
311{
312	DBG("%p: needs_flush=%d", batch, batch->needs_flush);
313
314	if (batch->flushed)
315		return;
316
317	batch->needs_flush = false;
318
319	/* close out the draw cmds by making sure any active queries are
320	 * paused:
321	 */
322	fd_batch_set_stage(batch, FD_STAGE_NULL);
323
324	batch_flush_reset_dependencies(batch, true);
325
326	batch->flushed = true;
327
328	if (batch->ctx->screen->reorder) {
329		struct fd_batch *tmp = NULL;
330		fd_batch_reference(&tmp, batch);
331
332		if (!util_queue_is_initialized(&batch->ctx->flush_queue))
333			util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
334
335		util_queue_add_job(&batch->ctx->flush_queue,
336				batch, &batch->flush_fence,
337				batch_flush_func, batch_cleanup_func);
338	} else {
339		fd_gmem_render_tiles(batch);
340		batch_reset_resources(batch);
341	}
342
343	debug_assert(batch->reference.count > 0);
344
345	mtx_lock(&batch->ctx->screen->lock);
346	fd_bc_invalidate_batch(batch, false);
347	mtx_unlock(&batch->ctx->screen->lock);
348}
349
350/* NOTE: could drop the last ref to batch
351 *
352 * @sync: synchronize with flush_queue, ensures batch is *actually* flushed
353 *   to kernel before this returns, as opposed to just being queued to be
354 *   flushed
355 * @force: force a flush even if no rendering, mostly useful if you need
356 *   a fence to sync on
357 */
358void
359fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
360{
361	struct fd_batch *tmp = NULL;
362	bool newbatch = false;
363
364	/* NOTE: we need to hold an extra ref across the body of flush,
365	 * since the last ref to this batch could be dropped when cleaning
366	 * up used_resources
367	 */
368	fd_batch_reference(&tmp, batch);
369
370	if (batch == batch->ctx->batch) {
371		batch->ctx->batch = NULL;
372		newbatch = true;
373	}
374
375	batch_flush(tmp, force);
376
377	if (newbatch) {
378		struct fd_context *ctx = batch->ctx;
379		struct fd_batch *new_batch;
380
381		if (ctx->screen->reorder) {
382			/* defer allocating new batch until one is needed for rendering
383			 * to avoid unused batches for apps that create many contexts
384			 */
385			new_batch = NULL;
386		} else {
387			new_batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, false);
388			util_copy_framebuffer_state(&new_batch->framebuffer, &batch->framebuffer);
389		}
390
391		fd_batch_reference(&batch, NULL);
392		ctx->batch = new_batch;
393		fd_context_all_dirty(ctx);
394	}
395
396	if (sync)
397		fd_batch_sync(tmp);
398
399	fd_batch_reference(&tmp, NULL);
400}
401
402/* does 'batch' depend directly or indirectly on 'other' ? */
403static bool
404batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
405{
406	struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
407	struct fd_batch *dep;
408
409	if (batch->dependents_mask & (1 << other->idx))
410		return true;
411
412	foreach_batch(dep, cache, batch->dependents_mask)
413		if (batch_depends_on(batch, dep))
414			return true;
415
416	return false;
417}
418
419void
420fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
421{
422	if (batch->dependents_mask & (1 << dep->idx))
423		return;
424
425	/* a loop should not be possible */
426	debug_assert(!batch_depends_on(dep, batch));
427
428	struct fd_batch *other = NULL;
429	fd_batch_reference_locked(&other, dep);
430	batch->dependents_mask |= (1 << dep->idx);
431	DBG("%p: added dependency on %p", batch, dep);
432}
433
434static void
435flush_write_batch(struct fd_resource *rsc)
436{
437	struct fd_batch *b = NULL;
438	fd_batch_reference_locked(&b, rsc->write_batch);
439
440	mtx_unlock(&b->ctx->screen->lock);
441	fd_batch_flush(b, true, false);
442	mtx_lock(&b->ctx->screen->lock);
443
444	fd_bc_invalidate_batch(b, false);
445	fd_batch_reference_locked(&b, NULL);
446}
447
448void
449fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
450{
451	pipe_mutex_assert_locked(batch->ctx->screen->lock);
452
453	if (rsc->stencil)
454		fd_batch_resource_used(batch, rsc->stencil, write);
455
456	DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
457
458	if (write)
459		rsc->valid = true;
460
461	/* note, invalidate write batch, to avoid further writes to rsc
462	 * resulting in a write-after-read hazard.
463	 */
464
465	if (write) {
466		/* if we are pending read or write by any other batch: */
467		if (rsc->batch_mask & ~(1 << batch->idx)) {
468			struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
469			struct fd_batch *dep;
470
471			if (rsc->write_batch && rsc->write_batch != batch)
472				flush_write_batch(rsc);
473
474			foreach_batch(dep, cache, rsc->batch_mask) {
475				struct fd_batch *b = NULL;
476				if (dep == batch)
477					continue;
478				/* note that batch_add_dep could flush and unref dep, so
479				 * we need to hold a reference to keep it live for the
480				 * fd_bc_invalidate_batch()
481				 */
482				fd_batch_reference(&b, dep);
483				fd_batch_add_dep(batch, b);
484				fd_bc_invalidate_batch(b, false);
485				fd_batch_reference_locked(&b, NULL);
486			}
487		}
488		fd_batch_reference_locked(&rsc->write_batch, batch);
489	} else {
490		/* If reading a resource pending a write, go ahead and flush the
491		 * writer.  This avoids situations where we end up having to
492		 * flush the current batch in _resource_used()
493		 */
494		if (rsc->write_batch && rsc->write_batch != batch)
495			flush_write_batch(rsc);
496	}
497
498	if (rsc->batch_mask & (1 << batch->idx)) {
499		debug_assert(_mesa_set_search(batch->resources, rsc));
500		return;
501	}
502
503	debug_assert(!_mesa_set_search(batch->resources, rsc));
504
505	_mesa_set_add(batch->resources, rsc);
506	rsc->batch_mask |= (1 << batch->idx);
507}
508
509void
510fd_batch_check_size(struct fd_batch *batch)
511{
512	debug_assert(!batch->flushed);
513
514	if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
515		fd_batch_flush(batch, true, false);
516		return;
517	}
518
519	if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS)
520		return;
521
522	struct fd_ringbuffer *ring = batch->draw;
523	if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
524		fd_batch_flush(batch, true, false);
525}
526
527/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
528 * been one since last draw:
529 */
530void
531fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
532{
533	if (batch->needs_wfi) {
534		if (batch->ctx->screen->gpu_id >= 500)
535			OUT_WFI5(ring);
536		else
537			OUT_WFI(ring);
538		batch->needs_wfi = false;
539	}
540}
541