1b8e80941Smrg/*
2b8e80941Smrg * Copyright 2013 Advanced Micro Devices, Inc.
3b8e80941Smrg * All Rights Reserved.
4b8e80941Smrg *
5b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
6b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
7b8e80941Smrg * to deal in the Software without restriction, including without limitation
8b8e80941Smrg * on the rights to use, copy, modify, merge, publish, distribute, sub
9b8e80941Smrg * license, and/or sell copies of the Software, and to permit persons to whom
10b8e80941Smrg * the Software is furnished to do so, subject to the following conditions:
11b8e80941Smrg *
12b8e80941Smrg * The above copyright notice and this permission notice (including the next
13b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
14b8e80941Smrg * Software.
15b8e80941Smrg *
16b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19b8e80941Smrg * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20b8e80941Smrg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21b8e80941Smrg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22b8e80941Smrg * USE OR OTHER DEALINGS IN THE SOFTWARE.
23b8e80941Smrg */
24b8e80941Smrg
25b8e80941Smrg#include "radeonsi/si_pipe.h"
26b8e80941Smrg#include "util/u_memory.h"
27b8e80941Smrg#include "util/u_upload_mgr.h"
28b8e80941Smrg#include "util/u_transfer.h"
29b8e80941Smrg#include <inttypes.h>
30b8e80941Smrg#include <stdio.h>
31b8e80941Smrg
32b8e80941Smrgbool si_rings_is_buffer_referenced(struct si_context *sctx,
33b8e80941Smrg				   struct pb_buffer *buf,
34b8e80941Smrg				   enum radeon_bo_usage usage)
35b8e80941Smrg{
36b8e80941Smrg	if (sctx->ws->cs_is_buffer_referenced(sctx->gfx_cs, buf, usage)) {
37b8e80941Smrg		return true;
38b8e80941Smrg	}
39b8e80941Smrg	if (radeon_emitted(sctx->dma_cs, 0) &&
40b8e80941Smrg	    sctx->ws->cs_is_buffer_referenced(sctx->dma_cs, buf, usage)) {
41b8e80941Smrg		return true;
42b8e80941Smrg	}
43b8e80941Smrg	return false;
44b8e80941Smrg}
45b8e80941Smrg
46b8e80941Smrgvoid *si_buffer_map_sync_with_rings(struct si_context *sctx,
47b8e80941Smrg				    struct si_resource *resource,
48b8e80941Smrg				    unsigned usage)
49b8e80941Smrg{
50b8e80941Smrg	enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
51b8e80941Smrg	bool busy = false;
52b8e80941Smrg
53b8e80941Smrg	assert(!(resource->flags & RADEON_FLAG_SPARSE));
54b8e80941Smrg
55b8e80941Smrg	if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
56b8e80941Smrg		return sctx->ws->buffer_map(resource->buf, NULL, usage);
57b8e80941Smrg	}
58b8e80941Smrg
59b8e80941Smrg	if (!(usage & PIPE_TRANSFER_WRITE)) {
60b8e80941Smrg		/* have to wait for the last write */
61b8e80941Smrg		rusage = RADEON_USAGE_WRITE;
62b8e80941Smrg	}
63b8e80941Smrg
64b8e80941Smrg	if (radeon_emitted(sctx->gfx_cs, sctx->initial_gfx_cs_size) &&
65b8e80941Smrg	    sctx->ws->cs_is_buffer_referenced(sctx->gfx_cs,
66b8e80941Smrg						resource->buf, rusage)) {
67b8e80941Smrg		if (usage & PIPE_TRANSFER_DONTBLOCK) {
68b8e80941Smrg			si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
69b8e80941Smrg			return NULL;
70b8e80941Smrg		} else {
71b8e80941Smrg			si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
72b8e80941Smrg			busy = true;
73b8e80941Smrg		}
74b8e80941Smrg	}
75b8e80941Smrg	if (radeon_emitted(sctx->dma_cs, 0) &&
76b8e80941Smrg	    sctx->ws->cs_is_buffer_referenced(sctx->dma_cs,
77b8e80941Smrg						resource->buf, rusage)) {
78b8e80941Smrg		if (usage & PIPE_TRANSFER_DONTBLOCK) {
79b8e80941Smrg			si_flush_dma_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
80b8e80941Smrg			return NULL;
81b8e80941Smrg		} else {
82b8e80941Smrg			si_flush_dma_cs(sctx, 0, NULL);
83b8e80941Smrg			busy = true;
84b8e80941Smrg		}
85b8e80941Smrg	}
86b8e80941Smrg
87b8e80941Smrg	if (busy || !sctx->ws->buffer_wait(resource->buf, 0, rusage)) {
88b8e80941Smrg		if (usage & PIPE_TRANSFER_DONTBLOCK) {
89b8e80941Smrg			return NULL;
90b8e80941Smrg		} else {
91b8e80941Smrg			/* We will be wait for the GPU. Wait for any offloaded
92b8e80941Smrg			 * CS flush to complete to avoid busy-waiting in the winsys. */
93b8e80941Smrg			sctx->ws->cs_sync_flush(sctx->gfx_cs);
94b8e80941Smrg			if (sctx->dma_cs)
95b8e80941Smrg				sctx->ws->cs_sync_flush(sctx->dma_cs);
96b8e80941Smrg		}
97b8e80941Smrg	}
98b8e80941Smrg
99b8e80941Smrg	/* Setting the CS to NULL will prevent doing checks we have done already. */
100b8e80941Smrg	return sctx->ws->buffer_map(resource->buf, NULL, usage);
101b8e80941Smrg}
102b8e80941Smrg
103b8e80941Smrgvoid si_init_resource_fields(struct si_screen *sscreen,
104b8e80941Smrg			     struct si_resource *res,
105b8e80941Smrg			     uint64_t size, unsigned alignment)
106b8e80941Smrg{
107b8e80941Smrg	struct si_texture *tex = (struct si_texture*)res;
108b8e80941Smrg
109b8e80941Smrg	res->bo_size = size;
110b8e80941Smrg	res->bo_alignment = alignment;
111b8e80941Smrg	res->flags = 0;
112b8e80941Smrg	res->texture_handle_allocated = false;
113b8e80941Smrg	res->image_handle_allocated = false;
114b8e80941Smrg
115b8e80941Smrg	switch (res->b.b.usage) {
116b8e80941Smrg	case PIPE_USAGE_STREAM:
117b8e80941Smrg		res->flags = RADEON_FLAG_GTT_WC;
118b8e80941Smrg		/* fall through */
119b8e80941Smrg	case PIPE_USAGE_STAGING:
120b8e80941Smrg		/* Transfers are likely to occur more often with these
121b8e80941Smrg		 * resources. */
122b8e80941Smrg		res->domains = RADEON_DOMAIN_GTT;
123b8e80941Smrg		break;
124b8e80941Smrg	case PIPE_USAGE_DYNAMIC:
125b8e80941Smrg		/* Older kernels didn't always flush the HDP cache before
126b8e80941Smrg		 * CS execution
127b8e80941Smrg		 */
128b8e80941Smrg		if (!sscreen->info.kernel_flushes_hdp_before_ib) {
129b8e80941Smrg			res->domains = RADEON_DOMAIN_GTT;
130b8e80941Smrg			res->flags |= RADEON_FLAG_GTT_WC;
131b8e80941Smrg			break;
132b8e80941Smrg		}
133b8e80941Smrg		/* fall through */
134b8e80941Smrg	case PIPE_USAGE_DEFAULT:
135b8e80941Smrg	case PIPE_USAGE_IMMUTABLE:
136b8e80941Smrg	default:
137b8e80941Smrg		/* Not listing GTT here improves performance in some
138b8e80941Smrg		 * apps. */
139b8e80941Smrg		res->domains = RADEON_DOMAIN_VRAM;
140b8e80941Smrg		res->flags |= RADEON_FLAG_GTT_WC;
141b8e80941Smrg		break;
142b8e80941Smrg	}
143b8e80941Smrg
144b8e80941Smrg	if (res->b.b.target == PIPE_BUFFER &&
145b8e80941Smrg	    res->b.b.flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) {
146b8e80941Smrg		/* Use GTT for all persistent mappings with older
147b8e80941Smrg		 * kernels, because they didn't always flush the HDP
148b8e80941Smrg		 * cache before CS execution.
149b8e80941Smrg		 *
150b8e80941Smrg		 * Write-combined CPU mappings are fine, the kernel
151b8e80941Smrg		 * ensures all CPU writes finish before the GPU
152b8e80941Smrg		 * executes a command stream.
153b8e80941Smrg		 *
154b8e80941Smrg		 * radeon doesn't have good BO move throttling, so put all
155b8e80941Smrg		 * persistent buffers into GTT to prevent VRAM CPU page faults.
156b8e80941Smrg		 */
157b8e80941Smrg		if (!sscreen->info.kernel_flushes_hdp_before_ib ||
158b8e80941Smrg		    sscreen->info.drm_major == 2)
159b8e80941Smrg			res->domains = RADEON_DOMAIN_GTT;
160b8e80941Smrg	}
161b8e80941Smrg
162b8e80941Smrg	/* Tiled textures are unmappable. Always put them in VRAM. */
163b8e80941Smrg	if ((res->b.b.target != PIPE_BUFFER && !tex->surface.is_linear) ||
164b8e80941Smrg	    res->b.b.flags & SI_RESOURCE_FLAG_UNMAPPABLE) {
165b8e80941Smrg		res->domains = RADEON_DOMAIN_VRAM;
166b8e80941Smrg		res->flags |= RADEON_FLAG_NO_CPU_ACCESS |
167b8e80941Smrg			 RADEON_FLAG_GTT_WC;
168b8e80941Smrg	}
169b8e80941Smrg
170b8e80941Smrg	/* Displayable and shareable surfaces are not suballocated. */
171b8e80941Smrg	if (res->b.b.bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
172b8e80941Smrg		res->flags |= RADEON_FLAG_NO_SUBALLOC; /* shareable */
173b8e80941Smrg	else
174b8e80941Smrg		res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
175b8e80941Smrg
176b8e80941Smrg	if (sscreen->debug_flags & DBG(NO_WC))
177b8e80941Smrg		res->flags &= ~RADEON_FLAG_GTT_WC;
178b8e80941Smrg
179b8e80941Smrg	if (res->b.b.flags & SI_RESOURCE_FLAG_READ_ONLY)
180b8e80941Smrg		res->flags |= RADEON_FLAG_READ_ONLY;
181b8e80941Smrg
182b8e80941Smrg	if (res->b.b.flags & SI_RESOURCE_FLAG_32BIT)
183b8e80941Smrg		res->flags |= RADEON_FLAG_32BIT;
184b8e80941Smrg
185b8e80941Smrg	/* Set expected VRAM and GART usage for the buffer. */
186b8e80941Smrg	res->vram_usage = 0;
187b8e80941Smrg	res->gart_usage = 0;
188b8e80941Smrg	res->max_forced_staging_uploads = 0;
189b8e80941Smrg	res->b.max_forced_staging_uploads = 0;
190b8e80941Smrg
191b8e80941Smrg	if (res->domains & RADEON_DOMAIN_VRAM) {
192b8e80941Smrg		res->vram_usage = size;
193b8e80941Smrg
194b8e80941Smrg		res->max_forced_staging_uploads =
195b8e80941Smrg		res->b.max_forced_staging_uploads =
196b8e80941Smrg			sscreen->info.has_dedicated_vram &&
197b8e80941Smrg			size >= sscreen->info.vram_vis_size / 4 ? 1 : 0;
198b8e80941Smrg	} else if (res->domains & RADEON_DOMAIN_GTT) {
199b8e80941Smrg		res->gart_usage = size;
200b8e80941Smrg	}
201b8e80941Smrg}
202b8e80941Smrg
203b8e80941Smrgbool si_alloc_resource(struct si_screen *sscreen,
204b8e80941Smrg		       struct si_resource *res)
205b8e80941Smrg{
206b8e80941Smrg	struct pb_buffer *old_buf, *new_buf;
207b8e80941Smrg
208b8e80941Smrg	/* Allocate a new resource. */
209b8e80941Smrg	new_buf = sscreen->ws->buffer_create(sscreen->ws, res->bo_size,
210b8e80941Smrg					     res->bo_alignment,
211b8e80941Smrg					     res->domains, res->flags);
212b8e80941Smrg	if (!new_buf) {
213b8e80941Smrg		return false;
214b8e80941Smrg	}
215b8e80941Smrg
216b8e80941Smrg	/* Replace the pointer such that if res->buf wasn't NULL, it won't be
217b8e80941Smrg	 * NULL. This should prevent crashes with multiple contexts using
218b8e80941Smrg	 * the same buffer where one of the contexts invalidates it while
219b8e80941Smrg	 * the others are using it. */
220b8e80941Smrg	old_buf = res->buf;
221b8e80941Smrg	res->buf = new_buf; /* should be atomic */
222b8e80941Smrg	res->gpu_address = sscreen->ws->buffer_get_virtual_address(res->buf);
223b8e80941Smrg
224b8e80941Smrg	if (res->flags & RADEON_FLAG_32BIT) {
225b8e80941Smrg		uint64_t start = res->gpu_address;
226b8e80941Smrg		uint64_t last = start + res->bo_size - 1;
227b8e80941Smrg		(void)start;
228b8e80941Smrg		(void)last;
229b8e80941Smrg
230b8e80941Smrg		assert((start >> 32) == sscreen->info.address32_hi);
231b8e80941Smrg		assert((last >> 32) == sscreen->info.address32_hi);
232b8e80941Smrg	}
233b8e80941Smrg
234b8e80941Smrg	pb_reference(&old_buf, NULL);
235b8e80941Smrg
236b8e80941Smrg	util_range_set_empty(&res->valid_buffer_range);
237b8e80941Smrg	res->TC_L2_dirty = false;
238b8e80941Smrg
239b8e80941Smrg	/* Print debug information. */
240b8e80941Smrg	if (sscreen->debug_flags & DBG(VM) && res->b.b.target == PIPE_BUFFER) {
241b8e80941Smrg		fprintf(stderr, "VM start=0x%"PRIX64"  end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
242b8e80941Smrg			res->gpu_address, res->gpu_address + res->buf->size,
243b8e80941Smrg			res->buf->size);
244b8e80941Smrg	}
245b8e80941Smrg
246b8e80941Smrg	if (res->b.b.flags & SI_RESOURCE_FLAG_CLEAR)
247b8e80941Smrg		si_screen_clear_buffer(sscreen, &res->b.b, 0, res->bo_size, 0);
248b8e80941Smrg
249b8e80941Smrg	return true;
250b8e80941Smrg}
251b8e80941Smrg
252b8e80941Smrgstatic void si_buffer_destroy(struct pipe_screen *screen,
253b8e80941Smrg			      struct pipe_resource *buf)
254b8e80941Smrg{
255b8e80941Smrg	struct si_resource *buffer = si_resource(buf);
256b8e80941Smrg
257b8e80941Smrg	threaded_resource_deinit(buf);
258b8e80941Smrg	util_range_destroy(&buffer->valid_buffer_range);
259b8e80941Smrg	pb_reference(&buffer->buf, NULL);
260b8e80941Smrg	FREE(buffer);
261b8e80941Smrg}
262b8e80941Smrg
263b8e80941Smrg/* Reallocate the buffer a update all resource bindings where the buffer is
264b8e80941Smrg * bound.
265b8e80941Smrg *
266b8e80941Smrg * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
267b8e80941Smrg * idle by discarding its contents.
268b8e80941Smrg */
269b8e80941Smrgstatic bool
270b8e80941Smrgsi_invalidate_buffer(struct si_context *sctx,
271b8e80941Smrg		     struct si_resource *buf)
272b8e80941Smrg{
273b8e80941Smrg	/* Shared buffers can't be reallocated. */
274b8e80941Smrg	if (buf->b.is_shared)
275b8e80941Smrg		return false;
276b8e80941Smrg
277b8e80941Smrg	/* Sparse buffers can't be reallocated. */
278b8e80941Smrg	if (buf->flags & RADEON_FLAG_SPARSE)
279b8e80941Smrg		return false;
280b8e80941Smrg
281b8e80941Smrg	/* In AMD_pinned_memory, the user pointer association only gets
282b8e80941Smrg	 * broken when the buffer is explicitly re-allocated.
283b8e80941Smrg	 */
284b8e80941Smrg	if (buf->b.is_user_ptr)
285b8e80941Smrg		return false;
286b8e80941Smrg
287b8e80941Smrg	/* Check if mapping this buffer would cause waiting for the GPU. */
288b8e80941Smrg	if (si_rings_is_buffer_referenced(sctx, buf->buf, RADEON_USAGE_READWRITE) ||
289b8e80941Smrg	    !sctx->ws->buffer_wait(buf->buf, 0, RADEON_USAGE_READWRITE)) {
290b8e80941Smrg		/* Reallocate the buffer in the same pipe_resource. */
291b8e80941Smrg		si_alloc_resource(sctx->screen, buf);
292b8e80941Smrg		si_rebind_buffer(sctx, &buf->b.b);
293b8e80941Smrg	} else {
294b8e80941Smrg		util_range_set_empty(&buf->valid_buffer_range);
295b8e80941Smrg	}
296b8e80941Smrg
297b8e80941Smrg	return true;
298b8e80941Smrg}
299b8e80941Smrg
300b8e80941Smrg/* Replace the storage of dst with src. */
301b8e80941Smrgvoid si_replace_buffer_storage(struct pipe_context *ctx,
302b8e80941Smrg				 struct pipe_resource *dst,
303b8e80941Smrg				 struct pipe_resource *src)
304b8e80941Smrg{
305b8e80941Smrg	struct si_context *sctx = (struct si_context*)ctx;
306b8e80941Smrg	struct si_resource *sdst = si_resource(dst);
307b8e80941Smrg	struct si_resource *ssrc = si_resource(src);
308b8e80941Smrg
309b8e80941Smrg	pb_reference(&sdst->buf, ssrc->buf);
310b8e80941Smrg	sdst->gpu_address = ssrc->gpu_address;
311b8e80941Smrg	sdst->b.b.bind = ssrc->b.b.bind;
312b8e80941Smrg	sdst->b.max_forced_staging_uploads = ssrc->b.max_forced_staging_uploads;
313b8e80941Smrg	sdst->max_forced_staging_uploads = ssrc->max_forced_staging_uploads;
314b8e80941Smrg	sdst->flags = ssrc->flags;
315b8e80941Smrg
316b8e80941Smrg	assert(sdst->vram_usage == ssrc->vram_usage);
317b8e80941Smrg	assert(sdst->gart_usage == ssrc->gart_usage);
318b8e80941Smrg	assert(sdst->bo_size == ssrc->bo_size);
319b8e80941Smrg	assert(sdst->bo_alignment == ssrc->bo_alignment);
320b8e80941Smrg	assert(sdst->domains == ssrc->domains);
321b8e80941Smrg
322b8e80941Smrg	si_rebind_buffer(sctx, dst);
323b8e80941Smrg}
324b8e80941Smrg
325b8e80941Smrgstatic void si_invalidate_resource(struct pipe_context *ctx,
326b8e80941Smrg				   struct pipe_resource *resource)
327b8e80941Smrg{
328b8e80941Smrg	struct si_context *sctx = (struct si_context*)ctx;
329b8e80941Smrg	struct si_resource *buf = si_resource(resource);
330b8e80941Smrg
331b8e80941Smrg	/* We currently only do anyting here for buffers */
332b8e80941Smrg	if (resource->target == PIPE_BUFFER)
333b8e80941Smrg		(void)si_invalidate_buffer(sctx, buf);
334b8e80941Smrg}
335b8e80941Smrg
336b8e80941Smrgstatic void *si_buffer_get_transfer(struct pipe_context *ctx,
337b8e80941Smrg				    struct pipe_resource *resource,
338b8e80941Smrg				    unsigned usage,
339b8e80941Smrg				    const struct pipe_box *box,
340b8e80941Smrg				    struct pipe_transfer **ptransfer,
341b8e80941Smrg				    void *data, struct si_resource *staging,
342b8e80941Smrg				    unsigned offset)
343b8e80941Smrg{
344b8e80941Smrg	struct si_context *sctx = (struct si_context*)ctx;
345b8e80941Smrg	struct si_transfer *transfer;
346b8e80941Smrg
347b8e80941Smrg	if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
348b8e80941Smrg		transfer = slab_alloc(&sctx->pool_transfers_unsync);
349b8e80941Smrg	else
350b8e80941Smrg		transfer = slab_alloc(&sctx->pool_transfers);
351b8e80941Smrg
352b8e80941Smrg	transfer->b.b.resource = NULL;
353b8e80941Smrg	pipe_resource_reference(&transfer->b.b.resource, resource);
354b8e80941Smrg	transfer->b.b.level = 0;
355b8e80941Smrg	transfer->b.b.usage = usage;
356b8e80941Smrg	transfer->b.b.box = *box;
357b8e80941Smrg	transfer->b.b.stride = 0;
358b8e80941Smrg	transfer->b.b.layer_stride = 0;
359b8e80941Smrg	transfer->b.staging = NULL;
360b8e80941Smrg	transfer->offset = offset;
361b8e80941Smrg	transfer->staging = staging;
362b8e80941Smrg	*ptransfer = &transfer->b.b;
363b8e80941Smrg	return data;
364b8e80941Smrg}
365b8e80941Smrg
366b8e80941Smrgstatic void *si_buffer_transfer_map(struct pipe_context *ctx,
367b8e80941Smrg				    struct pipe_resource *resource,
368b8e80941Smrg				    unsigned level,
369b8e80941Smrg				    unsigned usage,
370b8e80941Smrg				    const struct pipe_box *box,
371b8e80941Smrg				    struct pipe_transfer **ptransfer)
372b8e80941Smrg{
373b8e80941Smrg	struct si_context *sctx = (struct si_context*)ctx;
374b8e80941Smrg	struct si_resource *buf = si_resource(resource);
375b8e80941Smrg	uint8_t *data;
376b8e80941Smrg
377b8e80941Smrg	assert(box->x + box->width <= resource->width0);
378b8e80941Smrg
379b8e80941Smrg	/* From GL_AMD_pinned_memory issues:
380b8e80941Smrg	 *
381b8e80941Smrg	 *     4) Is glMapBuffer on a shared buffer guaranteed to return the
382b8e80941Smrg	 *        same system address which was specified at creation time?
383b8e80941Smrg	 *
384b8e80941Smrg	 *        RESOLVED: NO. The GL implementation might return a different
385b8e80941Smrg	 *        virtual mapping of that memory, although the same physical
386b8e80941Smrg	 *        page will be used.
387b8e80941Smrg	 *
388b8e80941Smrg	 * So don't ever use staging buffers.
389b8e80941Smrg	 */
390b8e80941Smrg	if (buf->b.is_user_ptr)
391b8e80941Smrg		usage |= PIPE_TRANSFER_PERSISTENT;
392b8e80941Smrg
393b8e80941Smrg	/* See if the buffer range being mapped has never been initialized,
394b8e80941Smrg	 * in which case it can be mapped unsynchronized. */
395b8e80941Smrg	if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
396b8e80941Smrg		       TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
397b8e80941Smrg	    usage & PIPE_TRANSFER_WRITE &&
398b8e80941Smrg	    !buf->b.is_shared &&
399b8e80941Smrg	    !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width)) {
400b8e80941Smrg		usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
401b8e80941Smrg	}
402b8e80941Smrg
403b8e80941Smrg	/* If discarding the entire range, discard the whole resource instead. */
404b8e80941Smrg	if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
405b8e80941Smrg	    box->x == 0 && box->width == resource->width0) {
406b8e80941Smrg		usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
407b8e80941Smrg	}
408b8e80941Smrg
409b8e80941Smrg	/* If a buffer in VRAM is too large and the range is discarded, don't
410b8e80941Smrg	 * map it directly. This makes sure that the buffer stays in VRAM.
411b8e80941Smrg	 */
412b8e80941Smrg	bool force_discard_range = false;
413b8e80941Smrg	if (usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
414b8e80941Smrg		     PIPE_TRANSFER_DISCARD_RANGE) &&
415b8e80941Smrg	    !(usage & PIPE_TRANSFER_PERSISTENT) &&
416b8e80941Smrg	    /* Try not to decrement the counter if it's not positive. Still racy,
417b8e80941Smrg	     * but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
418b8e80941Smrg	    buf->max_forced_staging_uploads > 0 &&
419b8e80941Smrg	    p_atomic_dec_return(&buf->max_forced_staging_uploads) >= 0) {
420b8e80941Smrg		usage &= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
421b8e80941Smrg			   PIPE_TRANSFER_UNSYNCHRONIZED);
422b8e80941Smrg		usage |= PIPE_TRANSFER_DISCARD_RANGE;
423b8e80941Smrg		force_discard_range = true;
424b8e80941Smrg	}
425b8e80941Smrg
426b8e80941Smrg	if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
427b8e80941Smrg	    !(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
428b8e80941Smrg		       TC_TRANSFER_MAP_NO_INVALIDATE))) {
429b8e80941Smrg		assert(usage & PIPE_TRANSFER_WRITE);
430b8e80941Smrg
431b8e80941Smrg		if (si_invalidate_buffer(sctx, buf)) {
432b8e80941Smrg			/* At this point, the buffer is always idle. */
433b8e80941Smrg			usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
434b8e80941Smrg		} else {
435b8e80941Smrg			/* Fall back to a temporary buffer. */
436b8e80941Smrg			usage |= PIPE_TRANSFER_DISCARD_RANGE;
437b8e80941Smrg		}
438b8e80941Smrg	}
439b8e80941Smrg
440b8e80941Smrg	if (usage & PIPE_TRANSFER_FLUSH_EXPLICIT &&
441b8e80941Smrg	    buf->b.b.flags & SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA) {
442b8e80941Smrg		usage &= ~(PIPE_TRANSFER_UNSYNCHRONIZED |
443b8e80941Smrg			   PIPE_TRANSFER_PERSISTENT);
444b8e80941Smrg		usage |= PIPE_TRANSFER_DISCARD_RANGE;
445b8e80941Smrg		force_discard_range = true;
446b8e80941Smrg	}
447b8e80941Smrg
448b8e80941Smrg	if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
449b8e80941Smrg	    ((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
450b8e80941Smrg			 PIPE_TRANSFER_PERSISTENT))) ||
451b8e80941Smrg	     (buf->flags & RADEON_FLAG_SPARSE))) {
452b8e80941Smrg		assert(usage & PIPE_TRANSFER_WRITE);
453b8e80941Smrg
454b8e80941Smrg		/* Check if mapping this buffer would cause waiting for the GPU.
455b8e80941Smrg		 */
456b8e80941Smrg		if (buf->flags & RADEON_FLAG_SPARSE ||
457b8e80941Smrg		    force_discard_range ||
458b8e80941Smrg		    si_rings_is_buffer_referenced(sctx, buf->buf, RADEON_USAGE_READWRITE) ||
459b8e80941Smrg		    !sctx->ws->buffer_wait(buf->buf, 0, RADEON_USAGE_READWRITE)) {
460b8e80941Smrg			/* Do a wait-free write-only transfer using a temporary buffer. */
461b8e80941Smrg			struct u_upload_mgr *uploader;
462b8e80941Smrg			struct si_resource *staging = NULL;
463b8e80941Smrg			unsigned offset;
464b8e80941Smrg
465b8e80941Smrg			/* If we are not called from the driver thread, we have
466b8e80941Smrg			 * to use the uploader from u_threaded_context, which is
467b8e80941Smrg			 * local to the calling thread.
468b8e80941Smrg			 */
469b8e80941Smrg			if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
470b8e80941Smrg				uploader = sctx->tc->base.stream_uploader;
471b8e80941Smrg			else
472b8e80941Smrg				uploader = sctx->b.stream_uploader;
473b8e80941Smrg
474b8e80941Smrg			u_upload_alloc(uploader, 0,
475b8e80941Smrg                                       box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT),
476b8e80941Smrg				       sctx->screen->info.tcc_cache_line_size,
477b8e80941Smrg				       &offset, (struct pipe_resource**)&staging,
478b8e80941Smrg                                       (void**)&data);
479b8e80941Smrg
480b8e80941Smrg			if (staging) {
481b8e80941Smrg				data += box->x % SI_MAP_BUFFER_ALIGNMENT;
482b8e80941Smrg				return si_buffer_get_transfer(ctx, resource, usage, box,
483b8e80941Smrg								ptransfer, data, staging, offset);
484b8e80941Smrg			} else if (buf->flags & RADEON_FLAG_SPARSE) {
485b8e80941Smrg				return NULL;
486b8e80941Smrg			}
487b8e80941Smrg		} else {
488b8e80941Smrg			/* At this point, the buffer is always idle (we checked it above). */
489b8e80941Smrg			usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
490b8e80941Smrg		}
491b8e80941Smrg	}
492b8e80941Smrg	/* Use a staging buffer in cached GTT for reads. */
493b8e80941Smrg	else if (((usage & PIPE_TRANSFER_READ) &&
494b8e80941Smrg		  !(usage & PIPE_TRANSFER_PERSISTENT) &&
495b8e80941Smrg		  (buf->domains & RADEON_DOMAIN_VRAM ||
496b8e80941Smrg		   buf->flags & RADEON_FLAG_GTT_WC)) ||
497b8e80941Smrg		 (buf->flags & RADEON_FLAG_SPARSE)) {
498b8e80941Smrg		struct si_resource *staging;
499b8e80941Smrg
500b8e80941Smrg		assert(!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC));
501b8e80941Smrg		staging = si_resource(pipe_buffer_create(
502b8e80941Smrg				ctx->screen, 0, PIPE_USAGE_STAGING,
503b8e80941Smrg				box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT)));
504b8e80941Smrg		if (staging) {
505b8e80941Smrg			/* Copy the VRAM buffer to the staging buffer. */
506b8e80941Smrg			sctx->dma_copy(ctx, &staging->b.b, 0,
507b8e80941Smrg				       box->x % SI_MAP_BUFFER_ALIGNMENT,
508b8e80941Smrg				       0, 0, resource, 0, box);
509b8e80941Smrg
510b8e80941Smrg			data = si_buffer_map_sync_with_rings(sctx, staging,
511b8e80941Smrg							     usage & ~PIPE_TRANSFER_UNSYNCHRONIZED);
512b8e80941Smrg			if (!data) {
513b8e80941Smrg				si_resource_reference(&staging, NULL);
514b8e80941Smrg				return NULL;
515b8e80941Smrg			}
516b8e80941Smrg			data += box->x % SI_MAP_BUFFER_ALIGNMENT;
517b8e80941Smrg
518b8e80941Smrg			return si_buffer_get_transfer(ctx, resource, usage, box,
519b8e80941Smrg							ptransfer, data, staging, 0);
520b8e80941Smrg		} else if (buf->flags & RADEON_FLAG_SPARSE) {
521b8e80941Smrg			return NULL;
522b8e80941Smrg		}
523b8e80941Smrg	}
524b8e80941Smrg
525b8e80941Smrg	data = si_buffer_map_sync_with_rings(sctx, buf, usage);
526b8e80941Smrg	if (!data) {
527b8e80941Smrg		return NULL;
528b8e80941Smrg	}
529b8e80941Smrg	data += box->x;
530b8e80941Smrg
531b8e80941Smrg	return si_buffer_get_transfer(ctx, resource, usage, box,
532b8e80941Smrg					ptransfer, data, NULL, 0);
533b8e80941Smrg}
534b8e80941Smrg
535b8e80941Smrgstatic void si_buffer_do_flush_region(struct pipe_context *ctx,
536b8e80941Smrg				      struct pipe_transfer *transfer,
537b8e80941Smrg				      const struct pipe_box *box)
538b8e80941Smrg{
539b8e80941Smrg	struct si_context *sctx = (struct si_context*)ctx;
540b8e80941Smrg	struct si_transfer *stransfer = (struct si_transfer*)transfer;
541b8e80941Smrg	struct si_resource *buf = si_resource(transfer->resource);
542b8e80941Smrg
543b8e80941Smrg	if (stransfer->staging) {
544b8e80941Smrg		unsigned src_offset = stransfer->offset +
545b8e80941Smrg				      transfer->box.x % SI_MAP_BUFFER_ALIGNMENT +
546b8e80941Smrg				      (box->x - transfer->box.x);
547b8e80941Smrg
548b8e80941Smrg		if (buf->b.b.flags & SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA) {
549b8e80941Smrg			/* This should be true for all uploaders. */
550b8e80941Smrg			assert(transfer->box.x == 0);
551b8e80941Smrg
552b8e80941Smrg			/* Find a previous upload and extend its range. The last
553b8e80941Smrg			 * upload is likely to be at the end of the list.
554b8e80941Smrg			 */
555b8e80941Smrg			for (int i = sctx->num_sdma_uploads - 1; i >= 0; i--) {
556b8e80941Smrg				struct si_sdma_upload *up = &sctx->sdma_uploads[i];
557b8e80941Smrg
558b8e80941Smrg				if (up->dst != buf)
559b8e80941Smrg					continue;
560b8e80941Smrg
561b8e80941Smrg				assert(up->src == stransfer->staging);
562b8e80941Smrg				assert(box->x > up->dst_offset);
563b8e80941Smrg				up->size = box->x + box->width - up->dst_offset;
564b8e80941Smrg				return;
565b8e80941Smrg			}
566b8e80941Smrg
567b8e80941Smrg			/* Enlarge the array if it's full. */
568b8e80941Smrg			if (sctx->num_sdma_uploads == sctx->max_sdma_uploads) {
569b8e80941Smrg				unsigned size;
570b8e80941Smrg
571b8e80941Smrg				sctx->max_sdma_uploads += 4;
572b8e80941Smrg				size = sctx->max_sdma_uploads * sizeof(sctx->sdma_uploads[0]);
573b8e80941Smrg				sctx->sdma_uploads = realloc(sctx->sdma_uploads, size);
574b8e80941Smrg			}
575b8e80941Smrg
576b8e80941Smrg			/* Add a new upload. */
577b8e80941Smrg			struct si_sdma_upload *up =
578b8e80941Smrg				&sctx->sdma_uploads[sctx->num_sdma_uploads++];
579b8e80941Smrg			up->dst = up->src = NULL;
580b8e80941Smrg			si_resource_reference(&up->dst, buf);
581b8e80941Smrg			si_resource_reference(&up->src, stransfer->staging);
582b8e80941Smrg			up->dst_offset = box->x;
583b8e80941Smrg			up->src_offset = src_offset;
584b8e80941Smrg			up->size = box->width;
585b8e80941Smrg			return;
586b8e80941Smrg		}
587b8e80941Smrg
588b8e80941Smrg		/* Copy the staging buffer into the original one. */
589b8e80941Smrg		si_copy_buffer(sctx, transfer->resource, &stransfer->staging->b.b,
590b8e80941Smrg			       box->x, src_offset, box->width);
591b8e80941Smrg	}
592b8e80941Smrg
593b8e80941Smrg	util_range_add(&buf->valid_buffer_range, box->x,
594b8e80941Smrg		       box->x + box->width);
595b8e80941Smrg}
596b8e80941Smrg
597b8e80941Smrgstatic void si_buffer_flush_region(struct pipe_context *ctx,
598b8e80941Smrg				   struct pipe_transfer *transfer,
599b8e80941Smrg				   const struct pipe_box *rel_box)
600b8e80941Smrg{
601b8e80941Smrg	unsigned required_usage = PIPE_TRANSFER_WRITE |
602b8e80941Smrg				  PIPE_TRANSFER_FLUSH_EXPLICIT;
603b8e80941Smrg
604b8e80941Smrg	if ((transfer->usage & required_usage) == required_usage) {
605b8e80941Smrg		struct pipe_box box;
606b8e80941Smrg
607b8e80941Smrg		u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
608b8e80941Smrg		si_buffer_do_flush_region(ctx, transfer, &box);
609b8e80941Smrg	}
610b8e80941Smrg}
611b8e80941Smrg
612b8e80941Smrgstatic void si_buffer_transfer_unmap(struct pipe_context *ctx,
613b8e80941Smrg				     struct pipe_transfer *transfer)
614b8e80941Smrg{
615b8e80941Smrg	struct si_context *sctx = (struct si_context*)ctx;
616b8e80941Smrg	struct si_transfer *stransfer = (struct si_transfer*)transfer;
617b8e80941Smrg
618b8e80941Smrg	if (transfer->usage & PIPE_TRANSFER_WRITE &&
619b8e80941Smrg	    !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
620b8e80941Smrg		si_buffer_do_flush_region(ctx, transfer, &transfer->box);
621b8e80941Smrg
622b8e80941Smrg	si_resource_reference(&stransfer->staging, NULL);
623b8e80941Smrg	assert(stransfer->b.staging == NULL); /* for threaded context only */
624b8e80941Smrg	pipe_resource_reference(&transfer->resource, NULL);
625b8e80941Smrg
626b8e80941Smrg	/* Don't use pool_transfers_unsync. We are always in the driver
627b8e80941Smrg	 * thread. */
628b8e80941Smrg	slab_free(&sctx->pool_transfers, transfer);
629b8e80941Smrg}
630b8e80941Smrg
631b8e80941Smrgstatic void si_buffer_subdata(struct pipe_context *ctx,
632b8e80941Smrg			      struct pipe_resource *buffer,
633b8e80941Smrg			      unsigned usage, unsigned offset,
634b8e80941Smrg			      unsigned size, const void *data)
635b8e80941Smrg{
636b8e80941Smrg	struct pipe_transfer *transfer = NULL;
637b8e80941Smrg	struct pipe_box box;
638b8e80941Smrg	uint8_t *map = NULL;
639b8e80941Smrg
640b8e80941Smrg	u_box_1d(offset, size, &box);
641b8e80941Smrg	map = si_buffer_transfer_map(ctx, buffer, 0,
642b8e80941Smrg				       PIPE_TRANSFER_WRITE |
643b8e80941Smrg				       PIPE_TRANSFER_DISCARD_RANGE |
644b8e80941Smrg				       usage,
645b8e80941Smrg				       &box, &transfer);
646b8e80941Smrg	if (!map)
647b8e80941Smrg		return;
648b8e80941Smrg
649b8e80941Smrg	memcpy(map, data, size);
650b8e80941Smrg	si_buffer_transfer_unmap(ctx, transfer);
651b8e80941Smrg}
652b8e80941Smrg
653b8e80941Smrgstatic const struct u_resource_vtbl si_buffer_vtbl =
654b8e80941Smrg{
655b8e80941Smrg	NULL,				/* get_handle */
656b8e80941Smrg	si_buffer_destroy,		/* resource_destroy */
657b8e80941Smrg	si_buffer_transfer_map,	/* transfer_map */
658b8e80941Smrg	si_buffer_flush_region,	/* transfer_flush_region */
659b8e80941Smrg	si_buffer_transfer_unmap,	/* transfer_unmap */
660b8e80941Smrg};
661b8e80941Smrg
662b8e80941Smrgstatic struct si_resource *
663b8e80941Smrgsi_alloc_buffer_struct(struct pipe_screen *screen,
664b8e80941Smrg		       const struct pipe_resource *templ)
665b8e80941Smrg{
666b8e80941Smrg	struct si_resource *buf;
667b8e80941Smrg
668b8e80941Smrg	buf = MALLOC_STRUCT(si_resource);
669b8e80941Smrg
670b8e80941Smrg	buf->b.b = *templ;
671b8e80941Smrg	buf->b.b.next = NULL;
672b8e80941Smrg	pipe_reference_init(&buf->b.b.reference, 1);
673b8e80941Smrg	buf->b.b.screen = screen;
674b8e80941Smrg
675b8e80941Smrg	buf->b.vtbl = &si_buffer_vtbl;
676b8e80941Smrg	threaded_resource_init(&buf->b.b);
677b8e80941Smrg
678b8e80941Smrg	buf->buf = NULL;
679b8e80941Smrg	buf->bind_history = 0;
680b8e80941Smrg	buf->TC_L2_dirty = false;
681b8e80941Smrg	util_range_init(&buf->valid_buffer_range);
682b8e80941Smrg	return buf;
683b8e80941Smrg}
684b8e80941Smrg
685b8e80941Smrgstatic struct pipe_resource *si_buffer_create(struct pipe_screen *screen,
686b8e80941Smrg					      const struct pipe_resource *templ,
687b8e80941Smrg					      unsigned alignment)
688b8e80941Smrg{
689b8e80941Smrg	struct si_screen *sscreen = (struct si_screen*)screen;
690b8e80941Smrg	struct si_resource *buf = si_alloc_buffer_struct(screen, templ);
691b8e80941Smrg
692b8e80941Smrg	if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
693b8e80941Smrg		buf->b.b.flags |= SI_RESOURCE_FLAG_UNMAPPABLE;
694b8e80941Smrg
695b8e80941Smrg	si_init_resource_fields(sscreen, buf, templ->width0, alignment);
696b8e80941Smrg
697b8e80941Smrg	if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
698b8e80941Smrg		buf->flags |= RADEON_FLAG_SPARSE;
699b8e80941Smrg
700b8e80941Smrg	if (!si_alloc_resource(sscreen, buf)) {
701b8e80941Smrg		FREE(buf);
702b8e80941Smrg		return NULL;
703b8e80941Smrg	}
704b8e80941Smrg	return &buf->b.b;
705b8e80941Smrg}
706b8e80941Smrg
707b8e80941Smrgstruct pipe_resource *pipe_aligned_buffer_create(struct pipe_screen *screen,
708b8e80941Smrg						 unsigned flags, unsigned usage,
709b8e80941Smrg						 unsigned size, unsigned alignment)
710b8e80941Smrg{
711b8e80941Smrg	struct pipe_resource buffer;
712b8e80941Smrg
713b8e80941Smrg	memset(&buffer, 0, sizeof buffer);
714b8e80941Smrg	buffer.target = PIPE_BUFFER;
715b8e80941Smrg	buffer.format = PIPE_FORMAT_R8_UNORM;
716b8e80941Smrg	buffer.bind = 0;
717b8e80941Smrg	buffer.usage = usage;
718b8e80941Smrg	buffer.flags = flags;
719b8e80941Smrg	buffer.width0 = size;
720b8e80941Smrg	buffer.height0 = 1;
721b8e80941Smrg	buffer.depth0 = 1;
722b8e80941Smrg	buffer.array_size = 1;
723b8e80941Smrg	return si_buffer_create(screen, &buffer, alignment);
724b8e80941Smrg}
725b8e80941Smrg
726b8e80941Smrgstruct si_resource *si_aligned_buffer_create(struct pipe_screen *screen,
727b8e80941Smrg					       unsigned flags, unsigned usage,
728b8e80941Smrg					       unsigned size, unsigned alignment)
729b8e80941Smrg{
730b8e80941Smrg	return si_resource(pipe_aligned_buffer_create(screen, flags, usage,
731b8e80941Smrg							size, alignment));
732b8e80941Smrg}
733b8e80941Smrg
734b8e80941Smrgstatic struct pipe_resource *
735b8e80941Smrgsi_buffer_from_user_memory(struct pipe_screen *screen,
736b8e80941Smrg			   const struct pipe_resource *templ,
737b8e80941Smrg			   void *user_memory)
738b8e80941Smrg{
739b8e80941Smrg	struct si_screen *sscreen = (struct si_screen*)screen;
740b8e80941Smrg	struct radeon_winsys *ws = sscreen->ws;
741b8e80941Smrg	struct si_resource *buf = si_alloc_buffer_struct(screen, templ);
742b8e80941Smrg
743b8e80941Smrg	buf->domains = RADEON_DOMAIN_GTT;
744b8e80941Smrg	buf->flags = 0;
745b8e80941Smrg	buf->b.is_user_ptr = true;
746b8e80941Smrg	util_range_add(&buf->valid_buffer_range, 0, templ->width0);
747b8e80941Smrg	util_range_add(&buf->b.valid_buffer_range, 0, templ->width0);
748b8e80941Smrg
749b8e80941Smrg	/* Convert a user pointer to a buffer. */
750b8e80941Smrg	buf->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0);
751b8e80941Smrg	if (!buf->buf) {
752b8e80941Smrg		FREE(buf);
753b8e80941Smrg		return NULL;
754b8e80941Smrg	}
755b8e80941Smrg
756b8e80941Smrg	buf->gpu_address = ws->buffer_get_virtual_address(buf->buf);
757b8e80941Smrg	buf->vram_usage = 0;
758b8e80941Smrg	buf->gart_usage = templ->width0;
759b8e80941Smrg
760b8e80941Smrg	return &buf->b.b;
761b8e80941Smrg}
762b8e80941Smrg
763b8e80941Smrgstatic struct pipe_resource *si_resource_create(struct pipe_screen *screen,
764b8e80941Smrg						const struct pipe_resource *templ)
765b8e80941Smrg{
766b8e80941Smrg	if (templ->target == PIPE_BUFFER) {
767b8e80941Smrg		return si_buffer_create(screen, templ, 256);
768b8e80941Smrg	} else {
769b8e80941Smrg		return si_texture_create(screen, templ);
770b8e80941Smrg	}
771b8e80941Smrg}
772b8e80941Smrg
773b8e80941Smrgstatic bool si_resource_commit(struct pipe_context *pctx,
774b8e80941Smrg			       struct pipe_resource *resource,
775b8e80941Smrg			       unsigned level, struct pipe_box *box,
776b8e80941Smrg			       bool commit)
777b8e80941Smrg{
778b8e80941Smrg	struct si_context *ctx = (struct si_context *)pctx;
779b8e80941Smrg	struct si_resource *res = si_resource(resource);
780b8e80941Smrg
781b8e80941Smrg	/*
782b8e80941Smrg	 * Since buffer commitment changes cannot be pipelined, we need to
783b8e80941Smrg	 * (a) flush any pending commands that refer to the buffer we're about
784b8e80941Smrg	 *     to change, and
785b8e80941Smrg	 * (b) wait for threaded submit to finish, including those that were
786b8e80941Smrg	 *     triggered by some other, earlier operation.
787b8e80941Smrg	 */
788b8e80941Smrg	if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
789b8e80941Smrg	    ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs,
790b8e80941Smrg					       res->buf, RADEON_USAGE_READWRITE)) {
791b8e80941Smrg		si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
792b8e80941Smrg	}
793b8e80941Smrg	if (radeon_emitted(ctx->dma_cs, 0) &&
794b8e80941Smrg	    ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
795b8e80941Smrg					       res->buf, RADEON_USAGE_READWRITE)) {
796b8e80941Smrg		si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
797b8e80941Smrg	}
798b8e80941Smrg
799b8e80941Smrg	ctx->ws->cs_sync_flush(ctx->dma_cs);
800b8e80941Smrg	ctx->ws->cs_sync_flush(ctx->gfx_cs);
801b8e80941Smrg
802b8e80941Smrg	assert(resource->target == PIPE_BUFFER);
803b8e80941Smrg
804b8e80941Smrg	return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit);
805b8e80941Smrg}
806b8e80941Smrg
807b8e80941Smrgvoid si_init_screen_buffer_functions(struct si_screen *sscreen)
808b8e80941Smrg{
809b8e80941Smrg	sscreen->b.resource_create = si_resource_create;
810b8e80941Smrg	sscreen->b.resource_destroy = u_resource_destroy_vtbl;
811b8e80941Smrg	sscreen->b.resource_from_user_memory = si_buffer_from_user_memory;
812b8e80941Smrg}
813b8e80941Smrg
814b8e80941Smrgvoid si_init_buffer_functions(struct si_context *sctx)
815b8e80941Smrg{
816b8e80941Smrg	sctx->b.invalidate_resource = si_invalidate_resource;
817b8e80941Smrg	sctx->b.transfer_map = u_transfer_map_vtbl;
818b8e80941Smrg	sctx->b.transfer_flush_region = u_transfer_flush_region_vtbl;
819b8e80941Smrg	sctx->b.transfer_unmap = u_transfer_unmap_vtbl;
820b8e80941Smrg	sctx->b.texture_subdata = u_default_texture_subdata;
821b8e80941Smrg	sctx->b.buffer_subdata = si_buffer_subdata;
822b8e80941Smrg	sctx->b.resource_commit = si_resource_commit;
823b8e80941Smrg}
824