1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include "radeonsi/si_pipe.h"
26#include "util/u_memory.h"
27#include "util/u_upload_mgr.h"
28#include "util/u_transfer.h"
29#include <inttypes.h>
30#include <stdio.h>
31
32bool si_rings_is_buffer_referenced(struct si_context *sctx,
33				   struct pb_buffer *buf,
34				   enum radeon_bo_usage usage)
35{
36	if (sctx->ws->cs_is_buffer_referenced(sctx->gfx_cs, buf, usage)) {
37		return true;
38	}
39	if (radeon_emitted(sctx->dma_cs, 0) &&
40	    sctx->ws->cs_is_buffer_referenced(sctx->dma_cs, buf, usage)) {
41		return true;
42	}
43	return false;
44}
45
46void *si_buffer_map_sync_with_rings(struct si_context *sctx,
47				    struct si_resource *resource,
48				    unsigned usage)
49{
50	enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
51	bool busy = false;
52
53	assert(!(resource->flags & RADEON_FLAG_SPARSE));
54
55	if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
56		return sctx->ws->buffer_map(resource->buf, NULL, usage);
57	}
58
59	if (!(usage & PIPE_TRANSFER_WRITE)) {
60		/* have to wait for the last write */
61		rusage = RADEON_USAGE_WRITE;
62	}
63
64	if (radeon_emitted(sctx->gfx_cs, sctx->initial_gfx_cs_size) &&
65	    sctx->ws->cs_is_buffer_referenced(sctx->gfx_cs,
66						resource->buf, rusage)) {
67		if (usage & PIPE_TRANSFER_DONTBLOCK) {
68			si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
69			return NULL;
70		} else {
71			si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
72			busy = true;
73		}
74	}
75	if (radeon_emitted(sctx->dma_cs, 0) &&
76	    sctx->ws->cs_is_buffer_referenced(sctx->dma_cs,
77						resource->buf, rusage)) {
78		if (usage & PIPE_TRANSFER_DONTBLOCK) {
79			si_flush_dma_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
80			return NULL;
81		} else {
82			si_flush_dma_cs(sctx, 0, NULL);
83			busy = true;
84		}
85	}
86
87	if (busy || !sctx->ws->buffer_wait(resource->buf, 0, rusage)) {
88		if (usage & PIPE_TRANSFER_DONTBLOCK) {
89			return NULL;
90		} else {
91			/* We will be wait for the GPU. Wait for any offloaded
92			 * CS flush to complete to avoid busy-waiting in the winsys. */
93			sctx->ws->cs_sync_flush(sctx->gfx_cs);
94			if (sctx->dma_cs)
95				sctx->ws->cs_sync_flush(sctx->dma_cs);
96		}
97	}
98
99	/* Setting the CS to NULL will prevent doing checks we have done already. */
100	return sctx->ws->buffer_map(resource->buf, NULL, usage);
101}
102
103void si_init_resource_fields(struct si_screen *sscreen,
104			     struct si_resource *res,
105			     uint64_t size, unsigned alignment)
106{
107	struct si_texture *tex = (struct si_texture*)res;
108
109	res->bo_size = size;
110	res->bo_alignment = alignment;
111	res->flags = 0;
112	res->texture_handle_allocated = false;
113	res->image_handle_allocated = false;
114
115	switch (res->b.b.usage) {
116	case PIPE_USAGE_STREAM:
117		res->flags = RADEON_FLAG_GTT_WC;
118		/* fall through */
119	case PIPE_USAGE_STAGING:
120		/* Transfers are likely to occur more often with these
121		 * resources. */
122		res->domains = RADEON_DOMAIN_GTT;
123		break;
124	case PIPE_USAGE_DYNAMIC:
125		/* Older kernels didn't always flush the HDP cache before
126		 * CS execution
127		 */
128		if (!sscreen->info.kernel_flushes_hdp_before_ib) {
129			res->domains = RADEON_DOMAIN_GTT;
130			res->flags |= RADEON_FLAG_GTT_WC;
131			break;
132		}
133		/* fall through */
134	case PIPE_USAGE_DEFAULT:
135	case PIPE_USAGE_IMMUTABLE:
136	default:
137		/* Not listing GTT here improves performance in some
138		 * apps. */
139		res->domains = RADEON_DOMAIN_VRAM;
140		res->flags |= RADEON_FLAG_GTT_WC;
141		break;
142	}
143
144	if (res->b.b.target == PIPE_BUFFER &&
145	    res->b.b.flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) {
146		/* Use GTT for all persistent mappings with older
147		 * kernels, because they didn't always flush the HDP
148		 * cache before CS execution.
149		 *
150		 * Write-combined CPU mappings are fine, the kernel
151		 * ensures all CPU writes finish before the GPU
152		 * executes a command stream.
153		 *
154		 * radeon doesn't have good BO move throttling, so put all
155		 * persistent buffers into GTT to prevent VRAM CPU page faults.
156		 */
157		if (!sscreen->info.kernel_flushes_hdp_before_ib ||
158		    sscreen->info.drm_major == 2)
159			res->domains = RADEON_DOMAIN_GTT;
160	}
161
162	/* Tiled textures are unmappable. Always put them in VRAM. */
163	if ((res->b.b.target != PIPE_BUFFER && !tex->surface.is_linear) ||
164	    res->b.b.flags & SI_RESOURCE_FLAG_UNMAPPABLE) {
165		res->domains = RADEON_DOMAIN_VRAM;
166		res->flags |= RADEON_FLAG_NO_CPU_ACCESS |
167			 RADEON_FLAG_GTT_WC;
168	}
169
170	/* Displayable and shareable surfaces are not suballocated. */
171	if (res->b.b.bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
172		res->flags |= RADEON_FLAG_NO_SUBALLOC; /* shareable */
173	else
174		res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
175
176	if (sscreen->debug_flags & DBG(NO_WC))
177		res->flags &= ~RADEON_FLAG_GTT_WC;
178
179	if (res->b.b.flags & SI_RESOURCE_FLAG_READ_ONLY)
180		res->flags |= RADEON_FLAG_READ_ONLY;
181
182	if (res->b.b.flags & SI_RESOURCE_FLAG_32BIT)
183		res->flags |= RADEON_FLAG_32BIT;
184
185	/* Set expected VRAM and GART usage for the buffer. */
186	res->vram_usage = 0;
187	res->gart_usage = 0;
188	res->max_forced_staging_uploads = 0;
189	res->b.max_forced_staging_uploads = 0;
190
191	if (res->domains & RADEON_DOMAIN_VRAM) {
192		res->vram_usage = size;
193
194		res->max_forced_staging_uploads =
195		res->b.max_forced_staging_uploads =
196			sscreen->info.has_dedicated_vram &&
197			size >= sscreen->info.vram_vis_size / 4 ? 1 : 0;
198	} else if (res->domains & RADEON_DOMAIN_GTT) {
199		res->gart_usage = size;
200	}
201}
202
203bool si_alloc_resource(struct si_screen *sscreen,
204		       struct si_resource *res)
205{
206	struct pb_buffer *old_buf, *new_buf;
207
208	/* Allocate a new resource. */
209	new_buf = sscreen->ws->buffer_create(sscreen->ws, res->bo_size,
210					     res->bo_alignment,
211					     res->domains, res->flags);
212	if (!new_buf) {
213		return false;
214	}
215
216	/* Replace the pointer such that if res->buf wasn't NULL, it won't be
217	 * NULL. This should prevent crashes with multiple contexts using
218	 * the same buffer where one of the contexts invalidates it while
219	 * the others are using it. */
220	old_buf = res->buf;
221	res->buf = new_buf; /* should be atomic */
222	res->gpu_address = sscreen->ws->buffer_get_virtual_address(res->buf);
223
224	if (res->flags & RADEON_FLAG_32BIT) {
225		uint64_t start = res->gpu_address;
226		uint64_t last = start + res->bo_size - 1;
227		(void)start;
228		(void)last;
229
230		assert((start >> 32) == sscreen->info.address32_hi);
231		assert((last >> 32) == sscreen->info.address32_hi);
232	}
233
234	pb_reference(&old_buf, NULL);
235
236	util_range_set_empty(&res->valid_buffer_range);
237	res->TC_L2_dirty = false;
238
239	/* Print debug information. */
240	if (sscreen->debug_flags & DBG(VM) && res->b.b.target == PIPE_BUFFER) {
241		fprintf(stderr, "VM start=0x%"PRIX64"  end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
242			res->gpu_address, res->gpu_address + res->buf->size,
243			res->buf->size);
244	}
245
246	if (res->b.b.flags & SI_RESOURCE_FLAG_CLEAR)
247		si_screen_clear_buffer(sscreen, &res->b.b, 0, res->bo_size, 0);
248
249	return true;
250}
251
252static void si_buffer_destroy(struct pipe_screen *screen,
253			      struct pipe_resource *buf)
254{
255	struct si_resource *buffer = si_resource(buf);
256
257	threaded_resource_deinit(buf);
258	util_range_destroy(&buffer->valid_buffer_range);
259	pb_reference(&buffer->buf, NULL);
260	FREE(buffer);
261}
262
263/* Reallocate the buffer a update all resource bindings where the buffer is
264 * bound.
265 *
266 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
267 * idle by discarding its contents.
268 */
269static bool
270si_invalidate_buffer(struct si_context *sctx,
271		     struct si_resource *buf)
272{
273	/* Shared buffers can't be reallocated. */
274	if (buf->b.is_shared)
275		return false;
276
277	/* Sparse buffers can't be reallocated. */
278	if (buf->flags & RADEON_FLAG_SPARSE)
279		return false;
280
281	/* In AMD_pinned_memory, the user pointer association only gets
282	 * broken when the buffer is explicitly re-allocated.
283	 */
284	if (buf->b.is_user_ptr)
285		return false;
286
287	/* Check if mapping this buffer would cause waiting for the GPU. */
288	if (si_rings_is_buffer_referenced(sctx, buf->buf, RADEON_USAGE_READWRITE) ||
289	    !sctx->ws->buffer_wait(buf->buf, 0, RADEON_USAGE_READWRITE)) {
290		/* Reallocate the buffer in the same pipe_resource. */
291		si_alloc_resource(sctx->screen, buf);
292		si_rebind_buffer(sctx, &buf->b.b);
293	} else {
294		util_range_set_empty(&buf->valid_buffer_range);
295	}
296
297	return true;
298}
299
300/* Replace the storage of dst with src. */
301void si_replace_buffer_storage(struct pipe_context *ctx,
302				 struct pipe_resource *dst,
303				 struct pipe_resource *src)
304{
305	struct si_context *sctx = (struct si_context*)ctx;
306	struct si_resource *sdst = si_resource(dst);
307	struct si_resource *ssrc = si_resource(src);
308
309	pb_reference(&sdst->buf, ssrc->buf);
310	sdst->gpu_address = ssrc->gpu_address;
311	sdst->b.b.bind = ssrc->b.b.bind;
312	sdst->b.max_forced_staging_uploads = ssrc->b.max_forced_staging_uploads;
313	sdst->max_forced_staging_uploads = ssrc->max_forced_staging_uploads;
314	sdst->flags = ssrc->flags;
315
316	assert(sdst->vram_usage == ssrc->vram_usage);
317	assert(sdst->gart_usage == ssrc->gart_usage);
318	assert(sdst->bo_size == ssrc->bo_size);
319	assert(sdst->bo_alignment == ssrc->bo_alignment);
320	assert(sdst->domains == ssrc->domains);
321
322	si_rebind_buffer(sctx, dst);
323}
324
325static void si_invalidate_resource(struct pipe_context *ctx,
326				   struct pipe_resource *resource)
327{
328	struct si_context *sctx = (struct si_context*)ctx;
329	struct si_resource *buf = si_resource(resource);
330
331	/* We currently only do anyting here for buffers */
332	if (resource->target == PIPE_BUFFER)
333		(void)si_invalidate_buffer(sctx, buf);
334}
335
336static void *si_buffer_get_transfer(struct pipe_context *ctx,
337				    struct pipe_resource *resource,
338				    unsigned usage,
339				    const struct pipe_box *box,
340				    struct pipe_transfer **ptransfer,
341				    void *data, struct si_resource *staging,
342				    unsigned offset)
343{
344	struct si_context *sctx = (struct si_context*)ctx;
345	struct si_transfer *transfer;
346
347	if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
348		transfer = slab_alloc(&sctx->pool_transfers_unsync);
349	else
350		transfer = slab_alloc(&sctx->pool_transfers);
351
352	transfer->b.b.resource = NULL;
353	pipe_resource_reference(&transfer->b.b.resource, resource);
354	transfer->b.b.level = 0;
355	transfer->b.b.usage = usage;
356	transfer->b.b.box = *box;
357	transfer->b.b.stride = 0;
358	transfer->b.b.layer_stride = 0;
359	transfer->b.staging = NULL;
360	transfer->offset = offset;
361	transfer->staging = staging;
362	*ptransfer = &transfer->b.b;
363	return data;
364}
365
366static void *si_buffer_transfer_map(struct pipe_context *ctx,
367				    struct pipe_resource *resource,
368				    unsigned level,
369				    unsigned usage,
370				    const struct pipe_box *box,
371				    struct pipe_transfer **ptransfer)
372{
373	struct si_context *sctx = (struct si_context*)ctx;
374	struct si_resource *buf = si_resource(resource);
375	uint8_t *data;
376
377	assert(box->x + box->width <= resource->width0);
378
379	/* From GL_AMD_pinned_memory issues:
380	 *
381	 *     4) Is glMapBuffer on a shared buffer guaranteed to return the
382	 *        same system address which was specified at creation time?
383	 *
384	 *        RESOLVED: NO. The GL implementation might return a different
385	 *        virtual mapping of that memory, although the same physical
386	 *        page will be used.
387	 *
388	 * So don't ever use staging buffers.
389	 */
390	if (buf->b.is_user_ptr)
391		usage |= PIPE_TRANSFER_PERSISTENT;
392
393	/* See if the buffer range being mapped has never been initialized,
394	 * in which case it can be mapped unsynchronized. */
395	if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
396		       TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
397	    usage & PIPE_TRANSFER_WRITE &&
398	    !buf->b.is_shared &&
399	    !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width)) {
400		usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
401	}
402
403	/* If discarding the entire range, discard the whole resource instead. */
404	if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
405	    box->x == 0 && box->width == resource->width0) {
406		usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
407	}
408
409	/* If a buffer in VRAM is too large and the range is discarded, don't
410	 * map it directly. This makes sure that the buffer stays in VRAM.
411	 */
412	bool force_discard_range = false;
413	if (usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
414		     PIPE_TRANSFER_DISCARD_RANGE) &&
415	    !(usage & PIPE_TRANSFER_PERSISTENT) &&
416	    /* Try not to decrement the counter if it's not positive. Still racy,
417	     * but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
418	    buf->max_forced_staging_uploads > 0 &&
419	    p_atomic_dec_return(&buf->max_forced_staging_uploads) >= 0) {
420		usage &= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
421			   PIPE_TRANSFER_UNSYNCHRONIZED);
422		usage |= PIPE_TRANSFER_DISCARD_RANGE;
423		force_discard_range = true;
424	}
425
426	if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
427	    !(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
428		       TC_TRANSFER_MAP_NO_INVALIDATE))) {
429		assert(usage & PIPE_TRANSFER_WRITE);
430
431		if (si_invalidate_buffer(sctx, buf)) {
432			/* At this point, the buffer is always idle. */
433			usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
434		} else {
435			/* Fall back to a temporary buffer. */
436			usage |= PIPE_TRANSFER_DISCARD_RANGE;
437		}
438	}
439
440	if (usage & PIPE_TRANSFER_FLUSH_EXPLICIT &&
441	    buf->b.b.flags & SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA) {
442		usage &= ~(PIPE_TRANSFER_UNSYNCHRONIZED |
443			   PIPE_TRANSFER_PERSISTENT);
444		usage |= PIPE_TRANSFER_DISCARD_RANGE;
445		force_discard_range = true;
446	}
447
448	if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
449	    ((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
450			 PIPE_TRANSFER_PERSISTENT))) ||
451	     (buf->flags & RADEON_FLAG_SPARSE))) {
452		assert(usage & PIPE_TRANSFER_WRITE);
453
454		/* Check if mapping this buffer would cause waiting for the GPU.
455		 */
456		if (buf->flags & RADEON_FLAG_SPARSE ||
457		    force_discard_range ||
458		    si_rings_is_buffer_referenced(sctx, buf->buf, RADEON_USAGE_READWRITE) ||
459		    !sctx->ws->buffer_wait(buf->buf, 0, RADEON_USAGE_READWRITE)) {
460			/* Do a wait-free write-only transfer using a temporary buffer. */
461			struct u_upload_mgr *uploader;
462			struct si_resource *staging = NULL;
463			unsigned offset;
464
465			/* If we are not called from the driver thread, we have
466			 * to use the uploader from u_threaded_context, which is
467			 * local to the calling thread.
468			 */
469			if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
470				uploader = sctx->tc->base.stream_uploader;
471			else
472				uploader = sctx->b.stream_uploader;
473
474			u_upload_alloc(uploader, 0,
475                                       box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT),
476				       sctx->screen->info.tcc_cache_line_size,
477				       &offset, (struct pipe_resource**)&staging,
478                                       (void**)&data);
479
480			if (staging) {
481				data += box->x % SI_MAP_BUFFER_ALIGNMENT;
482				return si_buffer_get_transfer(ctx, resource, usage, box,
483								ptransfer, data, staging, offset);
484			} else if (buf->flags & RADEON_FLAG_SPARSE) {
485				return NULL;
486			}
487		} else {
488			/* At this point, the buffer is always idle (we checked it above). */
489			usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
490		}
491	}
492	/* Use a staging buffer in cached GTT for reads. */
493	else if (((usage & PIPE_TRANSFER_READ) &&
494		  !(usage & PIPE_TRANSFER_PERSISTENT) &&
495		  (buf->domains & RADEON_DOMAIN_VRAM ||
496		   buf->flags & RADEON_FLAG_GTT_WC)) ||
497		 (buf->flags & RADEON_FLAG_SPARSE)) {
498		struct si_resource *staging;
499
500		assert(!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC));
501		staging = si_resource(pipe_buffer_create(
502				ctx->screen, 0, PIPE_USAGE_STAGING,
503				box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT)));
504		if (staging) {
505			/* Copy the VRAM buffer to the staging buffer. */
506			sctx->dma_copy(ctx, &staging->b.b, 0,
507				       box->x % SI_MAP_BUFFER_ALIGNMENT,
508				       0, 0, resource, 0, box);
509
510			data = si_buffer_map_sync_with_rings(sctx, staging,
511							     usage & ~PIPE_TRANSFER_UNSYNCHRONIZED);
512			if (!data) {
513				si_resource_reference(&staging, NULL);
514				return NULL;
515			}
516			data += box->x % SI_MAP_BUFFER_ALIGNMENT;
517
518			return si_buffer_get_transfer(ctx, resource, usage, box,
519							ptransfer, data, staging, 0);
520		} else if (buf->flags & RADEON_FLAG_SPARSE) {
521			return NULL;
522		}
523	}
524
525	data = si_buffer_map_sync_with_rings(sctx, buf, usage);
526	if (!data) {
527		return NULL;
528	}
529	data += box->x;
530
531	return si_buffer_get_transfer(ctx, resource, usage, box,
532					ptransfer, data, NULL, 0);
533}
534
535static void si_buffer_do_flush_region(struct pipe_context *ctx,
536				      struct pipe_transfer *transfer,
537				      const struct pipe_box *box)
538{
539	struct si_context *sctx = (struct si_context*)ctx;
540	struct si_transfer *stransfer = (struct si_transfer*)transfer;
541	struct si_resource *buf = si_resource(transfer->resource);
542
543	if (stransfer->staging) {
544		unsigned src_offset = stransfer->offset +
545				      transfer->box.x % SI_MAP_BUFFER_ALIGNMENT +
546				      (box->x - transfer->box.x);
547
548		if (buf->b.b.flags & SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA) {
549			/* This should be true for all uploaders. */
550			assert(transfer->box.x == 0);
551
552			/* Find a previous upload and extend its range. The last
553			 * upload is likely to be at the end of the list.
554			 */
555			for (int i = sctx->num_sdma_uploads - 1; i >= 0; i--) {
556				struct si_sdma_upload *up = &sctx->sdma_uploads[i];
557
558				if (up->dst != buf)
559					continue;
560
561				assert(up->src == stransfer->staging);
562				assert(box->x > up->dst_offset);
563				up->size = box->x + box->width - up->dst_offset;
564				return;
565			}
566
567			/* Enlarge the array if it's full. */
568			if (sctx->num_sdma_uploads == sctx->max_sdma_uploads) {
569				unsigned size;
570
571				sctx->max_sdma_uploads += 4;
572				size = sctx->max_sdma_uploads * sizeof(sctx->sdma_uploads[0]);
573				sctx->sdma_uploads = realloc(sctx->sdma_uploads, size);
574			}
575
576			/* Add a new upload. */
577			struct si_sdma_upload *up =
578				&sctx->sdma_uploads[sctx->num_sdma_uploads++];
579			up->dst = up->src = NULL;
580			si_resource_reference(&up->dst, buf);
581			si_resource_reference(&up->src, stransfer->staging);
582			up->dst_offset = box->x;
583			up->src_offset = src_offset;
584			up->size = box->width;
585			return;
586		}
587
588		/* Copy the staging buffer into the original one. */
589		si_copy_buffer(sctx, transfer->resource, &stransfer->staging->b.b,
590			       box->x, src_offset, box->width);
591	}
592
593	util_range_add(&buf->valid_buffer_range, box->x,
594		       box->x + box->width);
595}
596
597static void si_buffer_flush_region(struct pipe_context *ctx,
598				   struct pipe_transfer *transfer,
599				   const struct pipe_box *rel_box)
600{
601	unsigned required_usage = PIPE_TRANSFER_WRITE |
602				  PIPE_TRANSFER_FLUSH_EXPLICIT;
603
604	if ((transfer->usage & required_usage) == required_usage) {
605		struct pipe_box box;
606
607		u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
608		si_buffer_do_flush_region(ctx, transfer, &box);
609	}
610}
611
612static void si_buffer_transfer_unmap(struct pipe_context *ctx,
613				     struct pipe_transfer *transfer)
614{
615	struct si_context *sctx = (struct si_context*)ctx;
616	struct si_transfer *stransfer = (struct si_transfer*)transfer;
617
618	if (transfer->usage & PIPE_TRANSFER_WRITE &&
619	    !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
620		si_buffer_do_flush_region(ctx, transfer, &transfer->box);
621
622	si_resource_reference(&stransfer->staging, NULL);
623	assert(stransfer->b.staging == NULL); /* for threaded context only */
624	pipe_resource_reference(&transfer->resource, NULL);
625
626	/* Don't use pool_transfers_unsync. We are always in the driver
627	 * thread. */
628	slab_free(&sctx->pool_transfers, transfer);
629}
630
631static void si_buffer_subdata(struct pipe_context *ctx,
632			      struct pipe_resource *buffer,
633			      unsigned usage, unsigned offset,
634			      unsigned size, const void *data)
635{
636	struct pipe_transfer *transfer = NULL;
637	struct pipe_box box;
638	uint8_t *map = NULL;
639
640	u_box_1d(offset, size, &box);
641	map = si_buffer_transfer_map(ctx, buffer, 0,
642				       PIPE_TRANSFER_WRITE |
643				       PIPE_TRANSFER_DISCARD_RANGE |
644				       usage,
645				       &box, &transfer);
646	if (!map)
647		return;
648
649	memcpy(map, data, size);
650	si_buffer_transfer_unmap(ctx, transfer);
651}
652
653static const struct u_resource_vtbl si_buffer_vtbl =
654{
655	NULL,				/* get_handle */
656	si_buffer_destroy,		/* resource_destroy */
657	si_buffer_transfer_map,	/* transfer_map */
658	si_buffer_flush_region,	/* transfer_flush_region */
659	si_buffer_transfer_unmap,	/* transfer_unmap */
660};
661
662static struct si_resource *
663si_alloc_buffer_struct(struct pipe_screen *screen,
664		       const struct pipe_resource *templ)
665{
666	struct si_resource *buf;
667
668	buf = MALLOC_STRUCT(si_resource);
669
670	buf->b.b = *templ;
671	buf->b.b.next = NULL;
672	pipe_reference_init(&buf->b.b.reference, 1);
673	buf->b.b.screen = screen;
674
675	buf->b.vtbl = &si_buffer_vtbl;
676	threaded_resource_init(&buf->b.b);
677
678	buf->buf = NULL;
679	buf->bind_history = 0;
680	buf->TC_L2_dirty = false;
681	util_range_init(&buf->valid_buffer_range);
682	return buf;
683}
684
685static struct pipe_resource *si_buffer_create(struct pipe_screen *screen,
686					      const struct pipe_resource *templ,
687					      unsigned alignment)
688{
689	struct si_screen *sscreen = (struct si_screen*)screen;
690	struct si_resource *buf = si_alloc_buffer_struct(screen, templ);
691
692	if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
693		buf->b.b.flags |= SI_RESOURCE_FLAG_UNMAPPABLE;
694
695	si_init_resource_fields(sscreen, buf, templ->width0, alignment);
696
697	if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
698		buf->flags |= RADEON_FLAG_SPARSE;
699
700	if (!si_alloc_resource(sscreen, buf)) {
701		FREE(buf);
702		return NULL;
703	}
704	return &buf->b.b;
705}
706
707struct pipe_resource *pipe_aligned_buffer_create(struct pipe_screen *screen,
708						 unsigned flags, unsigned usage,
709						 unsigned size, unsigned alignment)
710{
711	struct pipe_resource buffer;
712
713	memset(&buffer, 0, sizeof buffer);
714	buffer.target = PIPE_BUFFER;
715	buffer.format = PIPE_FORMAT_R8_UNORM;
716	buffer.bind = 0;
717	buffer.usage = usage;
718	buffer.flags = flags;
719	buffer.width0 = size;
720	buffer.height0 = 1;
721	buffer.depth0 = 1;
722	buffer.array_size = 1;
723	return si_buffer_create(screen, &buffer, alignment);
724}
725
726struct si_resource *si_aligned_buffer_create(struct pipe_screen *screen,
727					       unsigned flags, unsigned usage,
728					       unsigned size, unsigned alignment)
729{
730	return si_resource(pipe_aligned_buffer_create(screen, flags, usage,
731							size, alignment));
732}
733
734static struct pipe_resource *
735si_buffer_from_user_memory(struct pipe_screen *screen,
736			   const struct pipe_resource *templ,
737			   void *user_memory)
738{
739	struct si_screen *sscreen = (struct si_screen*)screen;
740	struct radeon_winsys *ws = sscreen->ws;
741	struct si_resource *buf = si_alloc_buffer_struct(screen, templ);
742
743	buf->domains = RADEON_DOMAIN_GTT;
744	buf->flags = 0;
745	buf->b.is_user_ptr = true;
746	util_range_add(&buf->valid_buffer_range, 0, templ->width0);
747	util_range_add(&buf->b.valid_buffer_range, 0, templ->width0);
748
749	/* Convert a user pointer to a buffer. */
750	buf->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0);
751	if (!buf->buf) {
752		FREE(buf);
753		return NULL;
754	}
755
756	buf->gpu_address = ws->buffer_get_virtual_address(buf->buf);
757	buf->vram_usage = 0;
758	buf->gart_usage = templ->width0;
759
760	return &buf->b.b;
761}
762
763static struct pipe_resource *si_resource_create(struct pipe_screen *screen,
764						const struct pipe_resource *templ)
765{
766	if (templ->target == PIPE_BUFFER) {
767		return si_buffer_create(screen, templ, 256);
768	} else {
769		return si_texture_create(screen, templ);
770	}
771}
772
773static bool si_resource_commit(struct pipe_context *pctx,
774			       struct pipe_resource *resource,
775			       unsigned level, struct pipe_box *box,
776			       bool commit)
777{
778	struct si_context *ctx = (struct si_context *)pctx;
779	struct si_resource *res = si_resource(resource);
780
781	/*
782	 * Since buffer commitment changes cannot be pipelined, we need to
783	 * (a) flush any pending commands that refer to the buffer we're about
784	 *     to change, and
785	 * (b) wait for threaded submit to finish, including those that were
786	 *     triggered by some other, earlier operation.
787	 */
788	if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
789	    ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs,
790					       res->buf, RADEON_USAGE_READWRITE)) {
791		si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
792	}
793	if (radeon_emitted(ctx->dma_cs, 0) &&
794	    ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
795					       res->buf, RADEON_USAGE_READWRITE)) {
796		si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
797	}
798
799	ctx->ws->cs_sync_flush(ctx->dma_cs);
800	ctx->ws->cs_sync_flush(ctx->gfx_cs);
801
802	assert(resource->target == PIPE_BUFFER);
803
804	return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit);
805}
806
807void si_init_screen_buffer_functions(struct si_screen *sscreen)
808{
809	sscreen->b.resource_create = si_resource_create;
810	sscreen->b.resource_destroy = u_resource_destroy_vtbl;
811	sscreen->b.resource_from_user_memory = si_buffer_from_user_memory;
812}
813
814void si_init_buffer_functions(struct si_context *sctx)
815{
816	sctx->b.invalidate_resource = si_invalidate_resource;
817	sctx->b.transfer_map = u_transfer_map_vtbl;
818	sctx->b.transfer_flush_region = u_transfer_flush_region_vtbl;
819	sctx->b.transfer_unmap = u_transfer_unmap_vtbl;
820	sctx->b.texture_subdata = u_default_texture_subdata;
821	sctx->b.buffer_subdata = si_buffer_subdata;
822	sctx->b.resource_commit = si_resource_commit;
823}
824