1848b8605Smrg/*
2848b8605Smrg * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3848b8605Smrg *
4848b8605Smrg * Permission is hereby granted, free of charge, to any person obtaining a
5848b8605Smrg * copy of this software and associated documentation files (the "Software"),
6848b8605Smrg * to deal in the Software without restriction, including without limitation
7848b8605Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8848b8605Smrg * and/or sell copies of the Software, and to permit persons to whom the
9848b8605Smrg * Software is furnished to do so, subject to the following conditions:
10848b8605Smrg *
11848b8605Smrg * The above copyright notice and this permission notice (including the next
12848b8605Smrg * paragraph) shall be included in all copies or substantial portions of the
13848b8605Smrg * Software.
14848b8605Smrg *
15848b8605Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16848b8605Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17848b8605Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18848b8605Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19848b8605Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20848b8605Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21848b8605Smrg * SOFTWARE.
22848b8605Smrg *
23848b8605Smrg * Authors:
24848b8605Smrg *    Rob Clark <robclark@freedesktop.org>
25848b8605Smrg */
26848b8605Smrg
27848b8605Smrg#include "pipe/p_state.h"
28b8e80941Smrg#include "util/u_draw.h"
29848b8605Smrg#include "util/u_string.h"
30848b8605Smrg#include "util/u_memory.h"
31848b8605Smrg#include "util/u_prim.h"
32848b8605Smrg#include "util/u_format.h"
33b8e80941Smrg#include "util/u_helpers.h"
34848b8605Smrg
35b8e80941Smrg#include "freedreno_blitter.h"
36848b8605Smrg#include "freedreno_draw.h"
37848b8605Smrg#include "freedreno_context.h"
38b8e80941Smrg#include "freedreno_fence.h"
39848b8605Smrg#include "freedreno_state.h"
40848b8605Smrg#include "freedreno_resource.h"
41b8e80941Smrg#include "freedreno_query_acc.h"
42848b8605Smrg#include "freedreno_query_hw.h"
43848b8605Smrg#include "freedreno_util.h"
44848b8605Smrg
45b8e80941Smrgstatic void
46b8e80941Smrgresource_read(struct fd_batch *batch, struct pipe_resource *prsc)
47b8e80941Smrg{
48b8e80941Smrg	if (!prsc)
49b8e80941Smrg		return;
50b8e80941Smrg	fd_batch_resource_used(batch, fd_resource(prsc), false);
51b8e80941Smrg}
52b8e80941Smrg
53b8e80941Smrgstatic void
54b8e80941Smrgresource_written(struct fd_batch *batch, struct pipe_resource *prsc)
55b8e80941Smrg{
56b8e80941Smrg	if (!prsc)
57b8e80941Smrg		return;
58b8e80941Smrg	fd_batch_resource_used(batch, fd_resource(prsc), true);
59b8e80941Smrg}
60848b8605Smrg
61848b8605Smrgstatic void
62848b8605Smrgfd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
63848b8605Smrg{
64848b8605Smrg	struct fd_context *ctx = fd_context(pctx);
65b8e80941Smrg	struct fd_batch *batch = fd_context_batch(ctx);
66b8e80941Smrg	struct pipe_framebuffer_state *pfb = &batch->framebuffer;
67848b8605Smrg	struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
68b8e80941Smrg	unsigned i, prims, buffers = 0, restore_buffers = 0;
69b8e80941Smrg
70b8e80941Smrg	/* for debugging problems with indirect draw, it is convenient
71b8e80941Smrg	 * to be able to emulate it, to determine if game is feeding us
72b8e80941Smrg	 * bogus data:
73b8e80941Smrg	 */
74b8e80941Smrg	if (info->indirect && (fd_mesa_debug & FD_DBG_NOINDR)) {
75b8e80941Smrg		util_draw_indirect(pctx, info);
76b8e80941Smrg		return;
77b8e80941Smrg	}
78b8e80941Smrg
79b8e80941Smrg	if (!info->count_from_stream_output && !info->indirect &&
80b8e80941Smrg	    !info->primitive_restart &&
81b8e80941Smrg	    !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
82b8e80941Smrg		return;
83848b8605Smrg
84848b8605Smrg	/* if we supported transform feedback, we'd have to disable this: */
85848b8605Smrg	if (((scissor->maxx - scissor->minx) *
86848b8605Smrg			(scissor->maxy - scissor->miny)) == 0) {
87848b8605Smrg		return;
88848b8605Smrg	}
89848b8605Smrg
90b8e80941Smrg	/* TODO: push down the region versions into the tiles */
91b8e80941Smrg	if (!fd_render_condition_check(pctx))
92b8e80941Smrg		return;
93b8e80941Smrg
94848b8605Smrg	/* emulate unsupported primitives: */
95848b8605Smrg	if (!fd_supported_prim(ctx, info->mode)) {
96b8e80941Smrg		if (ctx->streamout.num_targets > 0)
97b8e80941Smrg			debug_error("stream-out with emulated prims");
98848b8605Smrg		util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
99848b8605Smrg		util_primconvert_draw_vbo(ctx->primconvert, info);
100848b8605Smrg		return;
101848b8605Smrg	}
102848b8605Smrg
103b8e80941Smrg	fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);
104b8e80941Smrg
105b8e80941Smrg	/* Upload a user index buffer. */
106b8e80941Smrg	struct pipe_resource *indexbuf = NULL;
107b8e80941Smrg	unsigned index_offset = 0;
108b8e80941Smrg	struct pipe_draw_info new_info;
109b8e80941Smrg	if (info->index_size) {
110b8e80941Smrg		if (info->has_user_indices) {
111b8e80941Smrg			if (!util_upload_index_buffer(pctx, info, &indexbuf, &index_offset))
112b8e80941Smrg				return;
113b8e80941Smrg			new_info = *info;
114b8e80941Smrg			new_info.index.resource = indexbuf;
115b8e80941Smrg			new_info.has_user_indices = false;
116b8e80941Smrg			info = &new_info;
117b8e80941Smrg		} else {
118b8e80941Smrg			indexbuf = info->index.resource;
119b8e80941Smrg		}
120b8e80941Smrg	}
121b8e80941Smrg
122b8e80941Smrg	if (ctx->in_blit) {
123b8e80941Smrg		fd_batch_reset(batch);
124b8e80941Smrg		fd_context_all_dirty(ctx);
125b8e80941Smrg	}
126b8e80941Smrg
127b8e80941Smrg	batch->blit = ctx->in_blit;
128b8e80941Smrg	batch->back_blit = ctx->in_shadow;
129b8e80941Smrg
130b8e80941Smrg	/* NOTE: needs to be before resource_written(batch->query_buf), otherwise
131b8e80941Smrg	 * query_buf may not be created yet.
132b8e80941Smrg	 */
133b8e80941Smrg	fd_batch_set_stage(batch, FD_STAGE_DRAW);
134848b8605Smrg
135848b8605Smrg	/*
136848b8605Smrg	 * Figure out the buffers/features we need:
137848b8605Smrg	 */
138848b8605Smrg
139b8e80941Smrg	mtx_lock(&ctx->screen->lock);
140b8e80941Smrg
141b8e80941Smrg	if (ctx->dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA)) {
142b8e80941Smrg		if (fd_depth_enabled(ctx)) {
143b8e80941Smrg			if (fd_resource(pfb->zsbuf->texture)->valid) {
144b8e80941Smrg				restore_buffers |= FD_BUFFER_DEPTH;
145b8e80941Smrg			} else {
146b8e80941Smrg				batch->invalidated |= FD_BUFFER_DEPTH;
147b8e80941Smrg			}
148b8e80941Smrg			batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
149b8e80941Smrg			if (fd_depth_write_enabled(ctx)) {
150b8e80941Smrg				buffers |= FD_BUFFER_DEPTH;
151b8e80941Smrg				resource_written(batch, pfb->zsbuf->texture);
152b8e80941Smrg			} else {
153b8e80941Smrg				resource_read(batch, pfb->zsbuf->texture);
154b8e80941Smrg			}
155b8e80941Smrg		}
156b8e80941Smrg
157b8e80941Smrg		if (fd_stencil_enabled(ctx)) {
158b8e80941Smrg			if (fd_resource(pfb->zsbuf->texture)->valid) {
159b8e80941Smrg				restore_buffers |= FD_BUFFER_STENCIL;
160b8e80941Smrg			} else {
161b8e80941Smrg				batch->invalidated |= FD_BUFFER_STENCIL;
162b8e80941Smrg			}
163b8e80941Smrg			batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
164b8e80941Smrg			buffers |= FD_BUFFER_STENCIL;
165b8e80941Smrg			resource_written(batch, pfb->zsbuf->texture);
166b8e80941Smrg		}
167848b8605Smrg	}
168848b8605Smrg
169848b8605Smrg	if (fd_logicop_enabled(ctx))
170b8e80941Smrg		batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;
171848b8605Smrg
172848b8605Smrg	for (i = 0; i < pfb->nr_cbufs; i++) {
173848b8605Smrg		struct pipe_resource *surf;
174848b8605Smrg
175848b8605Smrg		if (!pfb->cbufs[i])
176848b8605Smrg			continue;
177848b8605Smrg
178848b8605Smrg		surf = pfb->cbufs[i]->texture;
179848b8605Smrg
180b8e80941Smrg		if (fd_resource(surf)->valid) {
181b8e80941Smrg			restore_buffers |= PIPE_CLEAR_COLOR0 << i;
182b8e80941Smrg		} else {
183b8e80941Smrg			batch->invalidated |= PIPE_CLEAR_COLOR0 << i;
184b8e80941Smrg		}
185848b8605Smrg
186b8e80941Smrg		buffers |= PIPE_CLEAR_COLOR0 << i;
187848b8605Smrg
188848b8605Smrg		if (fd_blend_enabled(ctx, i))
189b8e80941Smrg			batch->gmem_reason |= FD_GMEM_BLEND_ENABLED;
190b8e80941Smrg
191b8e80941Smrg		if (ctx->dirty & FD_DIRTY_FRAMEBUFFER)
192b8e80941Smrg			resource_written(batch, pfb->cbufs[i]->texture);
193b8e80941Smrg	}
194b8e80941Smrg
195b8e80941Smrg	/* Mark SSBOs as being written.. we don't actually know which ones are
196b8e80941Smrg	 * read vs written, so just assume the worst
197b8e80941Smrg	 */
198b8e80941Smrg	if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_SSBO) {
199b8e80941Smrg		foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
200b8e80941Smrg				resource_written(batch, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].sb[i].buffer);
201b8e80941Smrg	}
202b8e80941Smrg
203b8e80941Smrg	if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_IMAGE) {
204b8e80941Smrg		foreach_bit(i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) {
205b8e80941Smrg			struct pipe_image_view *img =
206b8e80941Smrg					&ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i];
207b8e80941Smrg			if (img->access & PIPE_IMAGE_ACCESS_WRITE)
208b8e80941Smrg				resource_written(batch, img->resource);
209b8e80941Smrg			else
210b8e80941Smrg				resource_read(batch, img->resource);
211b8e80941Smrg		}
212b8e80941Smrg	}
213b8e80941Smrg
214b8e80941Smrg	if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_CONST) {
215b8e80941Smrg		foreach_bit(i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask)
216b8e80941Smrg			resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
217b8e80941Smrg	}
218b8e80941Smrg
219b8e80941Smrg	if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_CONST) {
220b8e80941Smrg		foreach_bit(i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
221b8e80941Smrg			resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
222b8e80941Smrg	}
223b8e80941Smrg
224b8e80941Smrg	/* Mark VBOs as being read */
225b8e80941Smrg	if (ctx->dirty & FD_DIRTY_VTXBUF) {
226b8e80941Smrg		foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) {
227b8e80941Smrg			assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
228b8e80941Smrg			resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
229b8e80941Smrg		}
230b8e80941Smrg	}
231b8e80941Smrg
232b8e80941Smrg	/* Mark index buffer as being read */
233b8e80941Smrg	resource_read(batch, indexbuf);
234b8e80941Smrg
235b8e80941Smrg	/* Mark indirect draw buffer as being read */
236b8e80941Smrg	if (info->indirect)
237b8e80941Smrg		resource_read(batch, info->indirect->buffer);
238b8e80941Smrg
239b8e80941Smrg	/* Mark textures as being read */
240b8e80941Smrg	if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX) {
241b8e80941Smrg		foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
242b8e80941Smrg			resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture);
243b8e80941Smrg	}
244b8e80941Smrg
245b8e80941Smrg	if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX) {
246b8e80941Smrg		foreach_bit(i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures)
247b8e80941Smrg			resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture);
248b8e80941Smrg	}
249b8e80941Smrg
250b8e80941Smrg	/* Mark streamout buffers as being written.. */
251b8e80941Smrg	if (ctx->dirty & FD_DIRTY_STREAMOUT) {
252b8e80941Smrg		for (i = 0; i < ctx->streamout.num_targets; i++)
253b8e80941Smrg			if (ctx->streamout.targets[i])
254b8e80941Smrg				resource_written(batch, ctx->streamout.targets[i]->buffer);
255848b8605Smrg	}
256848b8605Smrg
257b8e80941Smrg	resource_written(batch, batch->query_buf);
258b8e80941Smrg
259b8e80941Smrg	list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
260b8e80941Smrg		resource_written(batch, aq->prsc);
261b8e80941Smrg
262b8e80941Smrg	mtx_unlock(&ctx->screen->lock);
263b8e80941Smrg
264b8e80941Smrg	batch->num_draws++;
265b8e80941Smrg
266b8e80941Smrg	prims = u_reduced_prims_for_vertices(info->mode, info->count);
267848b8605Smrg
268848b8605Smrg	ctx->stats.draw_calls++;
269b8e80941Smrg
270b8e80941Smrg	/* TODO prims_emitted should be clipped when the stream-out buffer is
271b8e80941Smrg	 * not large enough.  See max_tf_vtx().. probably need to move that
272b8e80941Smrg	 * into common code.  Although a bit more annoying since a2xx doesn't
273b8e80941Smrg	 * use ir3 so no common way to get at the pipe_stream_output_info
274b8e80941Smrg	 * which is needed for this calculation.
275b8e80941Smrg	 */
276b8e80941Smrg	if (ctx->streamout.num_targets > 0)
277b8e80941Smrg		ctx->stats.prims_emitted += prims;
278b8e80941Smrg	ctx->stats.prims_generated += prims;
279848b8605Smrg
280848b8605Smrg	/* any buffers that haven't been cleared yet, we need to restore: */
281b8e80941Smrg	batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->invalidated);
282848b8605Smrg	/* and any buffers used, need to be resolved: */
283b8e80941Smrg	batch->resolve |= buffers;
284848b8605Smrg
285b8e80941Smrg	DBG("%p: %x %ux%u num_draws=%u (%s/%s)", batch, buffers,
286b8e80941Smrg		pfb->width, pfb->height, batch->num_draws,
287848b8605Smrg		util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
288848b8605Smrg		util_format_short_name(pipe_surface_format(pfb->zsbuf)));
289848b8605Smrg
290b8e80941Smrg	if (ctx->draw_vbo(ctx, info, index_offset))
291b8e80941Smrg		batch->needs_flush = true;
292848b8605Smrg
293b8e80941Smrg	batch->num_vertices += info->count * info->instance_count;
294848b8605Smrg
295b8e80941Smrg	for (i = 0; i < ctx->streamout.num_targets; i++)
296b8e80941Smrg		ctx->streamout.offsets[i] += info->count;
297b8e80941Smrg
298b8e80941Smrg	if (fd_mesa_debug & FD_DBG_DDRAW)
299b8e80941Smrg		fd_context_all_dirty(ctx);
300b8e80941Smrg
301b8e80941Smrg	fd_batch_check_size(batch);
302b8e80941Smrg
303b8e80941Smrg	if (info == &new_info)
304b8e80941Smrg		pipe_resource_reference(&indexbuf, NULL);
305b8e80941Smrg}
306848b8605Smrg
307848b8605Smrgstatic void
308848b8605Smrgfd_clear(struct pipe_context *pctx, unsigned buffers,
309848b8605Smrg		const union pipe_color_union *color, double depth, unsigned stencil)
310848b8605Smrg{
311848b8605Smrg	struct fd_context *ctx = fd_context(pctx);
312b8e80941Smrg	struct fd_batch *batch = fd_context_batch(ctx);
313b8e80941Smrg	struct pipe_framebuffer_state *pfb = &batch->framebuffer;
314848b8605Smrg	unsigned cleared_buffers;
315b8e80941Smrg	int i;
316b8e80941Smrg
317b8e80941Smrg	/* TODO: push down the region versions into the tiles */
318b8e80941Smrg	if (!fd_render_condition_check(pctx))
319b8e80941Smrg		return;
320b8e80941Smrg
321b8e80941Smrg	fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);
322b8e80941Smrg
323b8e80941Smrg	if (ctx->in_blit) {
324b8e80941Smrg		fd_batch_reset(batch);
325b8e80941Smrg		fd_context_all_dirty(ctx);
326b8e80941Smrg	}
327b8e80941Smrg
328b8e80941Smrg	/* pctx->clear() is only for full-surface clears, so scissor is
329b8e80941Smrg	 * equivalent to having GL_SCISSOR_TEST disabled:
330b8e80941Smrg	 */
331b8e80941Smrg	batch->max_scissor.minx = 0;
332b8e80941Smrg	batch->max_scissor.miny = 0;
333b8e80941Smrg	batch->max_scissor.maxx = pfb->width;
334b8e80941Smrg	batch->max_scissor.maxy = pfb->height;
335848b8605Smrg
336848b8605Smrg	/* for bookkeeping about which buffers have been cleared (and thus
337848b8605Smrg	 * can fully or partially skip mem2gmem) we need to ignore buffers
338848b8605Smrg	 * that have already had a draw, in case apps do silly things like
339848b8605Smrg	 * clear after draw (ie. if you only clear the color buffer, but
340848b8605Smrg	 * something like alpha-test causes side effects from the draw in
341848b8605Smrg	 * the depth buffer, etc)
342848b8605Smrg	 */
343b8e80941Smrg	cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore);
344b8e80941Smrg	batch->cleared |= buffers;
345b8e80941Smrg	batch->invalidated |= cleared_buffers;
346b8e80941Smrg
347b8e80941Smrg	batch->resolve |= buffers;
348b8e80941Smrg	batch->needs_flush = true;
349b8e80941Smrg
350b8e80941Smrg	mtx_lock(&ctx->screen->lock);
351848b8605Smrg
352848b8605Smrg	if (buffers & PIPE_CLEAR_COLOR)
353b8e80941Smrg		for (i = 0; i < pfb->nr_cbufs; i++)
354b8e80941Smrg			if (buffers & (PIPE_CLEAR_COLOR0 << i))
355b8e80941Smrg				resource_written(batch, pfb->cbufs[i]->texture);
356848b8605Smrg
357848b8605Smrg	if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
358b8e80941Smrg		resource_written(batch, pfb->zsbuf->texture);
359b8e80941Smrg		batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
360848b8605Smrg	}
361848b8605Smrg
362b8e80941Smrg	resource_written(batch, batch->query_buf);
363b8e80941Smrg
364b8e80941Smrg	list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
365b8e80941Smrg		resource_written(batch, aq->prsc);
366b8e80941Smrg
367b8e80941Smrg	mtx_unlock(&ctx->screen->lock);
368b8e80941Smrg
369b8e80941Smrg	DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
370b8e80941Smrg		pfb->width, pfb->height, depth, stencil,
371848b8605Smrg		util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
372848b8605Smrg		util_format_short_name(pipe_surface_format(pfb->zsbuf)));
373848b8605Smrg
374b8e80941Smrg	/* if per-gen backend doesn't implement ctx->clear() generic
375b8e80941Smrg	 * blitter clear:
376b8e80941Smrg	 */
377b8e80941Smrg	bool fallback = true;
378848b8605Smrg
379b8e80941Smrg	if (ctx->clear) {
380b8e80941Smrg		fd_batch_set_stage(batch, FD_STAGE_CLEAR);
381848b8605Smrg
382b8e80941Smrg		if (ctx->clear(ctx, buffers, color, depth, stencil)) {
383b8e80941Smrg			if (fd_mesa_debug & FD_DBG_DCLEAR)
384b8e80941Smrg				fd_context_all_dirty(ctx);
385848b8605Smrg
386b8e80941Smrg			fallback = false;
387b8e80941Smrg		}
388b8e80941Smrg	}
389b8e80941Smrg
390b8e80941Smrg	if (fallback) {
391b8e80941Smrg		fd_blitter_clear(pctx, buffers, color, depth, stencil);
392b8e80941Smrg	}
393b8e80941Smrg
394b8e80941Smrg	fd_batch_check_size(batch);
395848b8605Smrg}
396848b8605Smrg
397848b8605Smrgstatic void
398848b8605Smrgfd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
399848b8605Smrg		const union pipe_color_union *color,
400b8e80941Smrg		unsigned x, unsigned y, unsigned w, unsigned h,
401b8e80941Smrg		bool render_condition_enabled)
402848b8605Smrg{
403848b8605Smrg	DBG("TODO: x=%u, y=%u, w=%u, h=%u", x, y, w, h);
404848b8605Smrg}
405848b8605Smrg
406848b8605Smrgstatic void
407848b8605Smrgfd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
408848b8605Smrg		unsigned buffers, double depth, unsigned stencil,
409b8e80941Smrg		unsigned x, unsigned y, unsigned w, unsigned h,
410b8e80941Smrg		bool render_condition_enabled)
411848b8605Smrg{
412848b8605Smrg	DBG("TODO: buffers=%u, depth=%f, stencil=%u, x=%u, y=%u, w=%u, h=%u",
413848b8605Smrg			buffers, depth, stencil, x, y, w, h);
414848b8605Smrg}
415848b8605Smrg
416b8e80941Smrgstatic void
417b8e80941Smrgfd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
418b8e80941Smrg{
419b8e80941Smrg	struct fd_context *ctx = fd_context(pctx);
420b8e80941Smrg	struct fd_batch *batch, *save_batch = NULL;
421b8e80941Smrg	unsigned i;
422b8e80941Smrg
423b8e80941Smrg	batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
424b8e80941Smrg	fd_batch_reference(&save_batch, ctx->batch);
425b8e80941Smrg	fd_batch_reference(&ctx->batch, batch);
426b8e80941Smrg	fd_context_all_dirty(ctx);
427b8e80941Smrg
428b8e80941Smrg	mtx_lock(&ctx->screen->lock);
429b8e80941Smrg
430b8e80941Smrg	/* Mark SSBOs as being written.. we don't actually know which ones are
431b8e80941Smrg	 * read vs written, so just assume the worst
432b8e80941Smrg	 */
433b8e80941Smrg	foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_COMPUTE].enabled_mask)
434b8e80941Smrg		resource_written(batch, ctx->shaderbuf[PIPE_SHADER_COMPUTE].sb[i].buffer);
435b8e80941Smrg
436b8e80941Smrg	foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
437b8e80941Smrg		struct pipe_image_view *img =
438b8e80941Smrg			&ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
439b8e80941Smrg		if (img->access & PIPE_IMAGE_ACCESS_WRITE)
440b8e80941Smrg			resource_written(batch, img->resource);
441b8e80941Smrg		else
442b8e80941Smrg			resource_read(batch, img->resource);
443b8e80941Smrg	}
444b8e80941Smrg
445b8e80941Smrg	/* UBO's are read */
446b8e80941Smrg	foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
447b8e80941Smrg		resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
448b8e80941Smrg
449b8e80941Smrg	/* Mark textures as being read */
450b8e80941Smrg	foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
451b8e80941Smrg		resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
452b8e80941Smrg
453b8e80941Smrg	/* For global buffers, we don't really know if read or written, so assume
454b8e80941Smrg	 * the worst:
455b8e80941Smrg	 */
456b8e80941Smrg	foreach_bit(i, ctx->global_bindings.enabled_mask)
457b8e80941Smrg		resource_written(batch, ctx->global_bindings.buf[i]);
458b8e80941Smrg
459b8e80941Smrg	if (info->indirect)
460b8e80941Smrg		resource_read(batch, info->indirect);
461b8e80941Smrg
462b8e80941Smrg	mtx_unlock(&ctx->screen->lock);
463b8e80941Smrg
464b8e80941Smrg	batch->needs_flush = true;
465b8e80941Smrg	ctx->launch_grid(ctx, info);
466b8e80941Smrg
467b8e80941Smrg	fd_batch_flush(batch, false, false);
468b8e80941Smrg
469b8e80941Smrg	fd_batch_reference(&ctx->batch, save_batch);
470b8e80941Smrg	fd_context_all_dirty(ctx);
471b8e80941Smrg	fd_batch_reference(&save_batch, NULL);
472b8e80941Smrg	fd_batch_reference(&batch, NULL);
473b8e80941Smrg}
474b8e80941Smrg
475848b8605Smrgvoid
476848b8605Smrgfd_draw_init(struct pipe_context *pctx)
477848b8605Smrg{
478848b8605Smrg	pctx->draw_vbo = fd_draw_vbo;
479848b8605Smrg	pctx->clear = fd_clear;
480848b8605Smrg	pctx->clear_render_target = fd_clear_render_target;
481848b8605Smrg	pctx->clear_depth_stencil = fd_clear_depth_stencil;
482b8e80941Smrg
483b8e80941Smrg	if (has_compute(fd_screen(pctx->screen))) {
484b8e80941Smrg		pctx->launch_grid = fd_launch_grid;
485b8e80941Smrg	}
486848b8605Smrg}
487