1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include "si_pipe.h"
26#include "sid.h"
27
28/* Set this if you want the ME to wait until CP DMA is done.
29 * It should be set on the last CP DMA packet. */
30#define CP_DMA_SYNC		(1 << 0)
31
32/* Set this if the source data was used as a destination in a previous CP DMA
33 * packet. It's for preventing a read-after-write (RAW) hazard between two
34 * CP DMA packets. */
35#define CP_DMA_RAW_WAIT		(1 << 1)
36#define CP_DMA_DST_IS_GDS	(1 << 2)
37#define CP_DMA_CLEAR		(1 << 3)
38#define CP_DMA_PFP_SYNC_ME	(1 << 4)
39#define CP_DMA_SRC_IS_GDS	(1 << 5)
40
41/* The max number of bytes that can be copied per packet. */
42static inline unsigned cp_dma_max_byte_count(struct si_context *sctx)
43{
44	unsigned max = sctx->chip_class >= GFX9 ?
45			       S_414_BYTE_COUNT_GFX9(~0u) :
46			       S_414_BYTE_COUNT_GFX6(~0u);
47
48	/* make it aligned for optimal performance */
49	return max & ~(SI_CPDMA_ALIGNMENT - 1);
50}
51
52
53/* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
54 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
55 * clear value.
56 */
57static void si_emit_cp_dma(struct si_context *sctx, struct radeon_cmdbuf *cs,
58			   uint64_t dst_va, uint64_t src_va, unsigned size,
59			   unsigned flags, enum si_cache_policy cache_policy)
60{
61	uint32_t header = 0, command = 0;
62
63	assert(size <= cp_dma_max_byte_count(sctx));
64	assert(sctx->chip_class != SI || cache_policy == L2_BYPASS);
65
66	if (sctx->chip_class >= GFX9)
67		command |= S_414_BYTE_COUNT_GFX9(size);
68	else
69		command |= S_414_BYTE_COUNT_GFX6(size);
70
71	/* Sync flags. */
72	if (flags & CP_DMA_SYNC)
73		header |= S_411_CP_SYNC(1);
74	else {
75		if (sctx->chip_class >= GFX9)
76			command |= S_414_DISABLE_WR_CONFIRM_GFX9(1);
77		else
78			command |= S_414_DISABLE_WR_CONFIRM_GFX6(1);
79	}
80
81	if (flags & CP_DMA_RAW_WAIT)
82		command |= S_414_RAW_WAIT(1);
83
84	/* Src and dst flags. */
85	if (sctx->chip_class >= GFX9 && !(flags & CP_DMA_CLEAR) &&
86	    src_va == dst_va) {
87		header |= S_411_DST_SEL(V_411_NOWHERE); /* prefetch only */
88	} else if (flags & CP_DMA_DST_IS_GDS) {
89		header |= S_411_DST_SEL(V_411_GDS);
90		/* GDS increments the address, not CP. */
91		command |= S_414_DAS(V_414_REGISTER) |
92			   S_414_DAIC(V_414_NO_INCREMENT);
93	} else if (sctx->chip_class >= CIK && cache_policy != L2_BYPASS) {
94		header |= S_411_DST_SEL(V_411_DST_ADDR_TC_L2) |
95			  S_500_DST_CACHE_POLICY(cache_policy == L2_STREAM);
96	}
97
98	if (flags & CP_DMA_CLEAR) {
99		header |= S_411_SRC_SEL(V_411_DATA);
100	} else if (flags & CP_DMA_SRC_IS_GDS) {
101		header |= S_411_SRC_SEL(V_411_GDS);
102		/* Both of these are required for GDS. It does increment the address. */
103		command |= S_414_SAS(V_414_REGISTER) |
104			   S_414_SAIC(V_414_NO_INCREMENT);
105	} else if (sctx->chip_class >= CIK && cache_policy != L2_BYPASS) {
106		header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2) |
107			  S_500_SRC_CACHE_POLICY(cache_policy == L2_STREAM);
108	}
109
110	if (sctx->chip_class >= CIK) {
111		radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
112		radeon_emit(cs, header);
113		radeon_emit(cs, src_va);	/* SRC_ADDR_LO [31:0] */
114		radeon_emit(cs, src_va >> 32);	/* SRC_ADDR_HI [31:0] */
115		radeon_emit(cs, dst_va);	/* DST_ADDR_LO [31:0] */
116		radeon_emit(cs, dst_va >> 32);	/* DST_ADDR_HI [31:0] */
117		radeon_emit(cs, command);
118	} else {
119		header |= S_411_SRC_ADDR_HI(src_va >> 32);
120
121		radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
122		radeon_emit(cs, src_va);	/* SRC_ADDR_LO [31:0] */
123		radeon_emit(cs, header);	/* SRC_ADDR_HI [15:0] + flags. */
124		radeon_emit(cs, dst_va);	/* DST_ADDR_LO [31:0] */
125		radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
126		radeon_emit(cs, command);
127	}
128
129	/* CP DMA is executed in ME, but index buffers are read by PFP.
130	 * This ensures that ME (CP DMA) is idle before PFP starts fetching
131	 * indices. If we wanted to execute CP DMA in PFP, this packet
132	 * should precede it.
133	 */
134	if (sctx->has_graphics && flags & CP_DMA_PFP_SYNC_ME) {
135		radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
136		radeon_emit(cs, 0);
137	}
138}
139
140void si_cp_dma_wait_for_idle(struct si_context *sctx)
141{
142	/* Issue a dummy DMA that copies zero bytes.
143	 *
144	 * The DMA engine will see that there's no work to do and skip this
145	 * DMA request, however, the CP will see the sync flag and still wait
146	 * for all DMAs to complete.
147	 */
148	si_emit_cp_dma(sctx, sctx->gfx_cs, 0, 0, 0, CP_DMA_SYNC, L2_BYPASS);
149}
150
151static void si_cp_dma_prepare(struct si_context *sctx, struct pipe_resource *dst,
152			      struct pipe_resource *src, unsigned byte_count,
153			      uint64_t remaining_size, unsigned user_flags,
154			      enum si_coherency coher, bool *is_first,
155			      unsigned *packet_flags)
156{
157	/* Fast exit for a CPDMA prefetch. */
158	if ((user_flags & SI_CPDMA_SKIP_ALL) == SI_CPDMA_SKIP_ALL) {
159		*is_first = false;
160		return;
161	}
162
163	if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
164		/* Count memory usage in so that need_cs_space can take it into account. */
165		if (dst)
166			si_context_add_resource_size(sctx, dst);
167		if (src)
168			si_context_add_resource_size(sctx, src);
169	}
170
171	if (!(user_flags & SI_CPDMA_SKIP_CHECK_CS_SPACE))
172		si_need_gfx_cs_space(sctx);
173
174	/* This must be done after need_cs_space. */
175	if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
176		if (dst)
177			radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
178						  si_resource(dst),
179						  RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
180		if (src)
181			radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
182						  si_resource(src),
183						  RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
184	}
185
186	/* Flush the caches for the first copy only.
187	 * Also wait for the previous CP DMA operations.
188	 */
189	if (!(user_flags & SI_CPDMA_SKIP_GFX_SYNC) && sctx->flags)
190		si_emit_cache_flush(sctx);
191
192	if (!(user_flags & SI_CPDMA_SKIP_SYNC_BEFORE) && *is_first &&
193	    !(*packet_flags & CP_DMA_CLEAR))
194		*packet_flags |= CP_DMA_RAW_WAIT;
195
196	*is_first = false;
197
198	/* Do the synchronization after the last dma, so that all data
199	 * is written to memory.
200	 */
201	if (!(user_flags & SI_CPDMA_SKIP_SYNC_AFTER) &&
202	    byte_count == remaining_size) {
203		*packet_flags |= CP_DMA_SYNC;
204
205		if (coher == SI_COHERENCY_SHADER)
206			*packet_flags |= CP_DMA_PFP_SYNC_ME;
207	}
208}
209
210void si_cp_dma_clear_buffer(struct si_context *sctx, struct radeon_cmdbuf *cs,
211			    struct pipe_resource *dst, uint64_t offset,
212			    uint64_t size, unsigned value, unsigned user_flags,
213			    enum si_coherency coher, enum si_cache_policy cache_policy)
214{
215	struct si_resource *sdst = si_resource(dst);
216	uint64_t va = (sdst ? sdst->gpu_address : 0) + offset;
217	bool is_first = true;
218
219	assert(size && size % 4 == 0);
220
221	/* Mark the buffer range of destination as valid (initialized),
222	 * so that transfer_map knows it should wait for the GPU when mapping
223	 * that range. */
224	if (sdst)
225		util_range_add(&sdst->valid_buffer_range, offset, offset + size);
226
227	/* Flush the caches. */
228	if (sdst && !(user_flags & SI_CPDMA_SKIP_GFX_SYNC)) {
229		sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
230			       SI_CONTEXT_CS_PARTIAL_FLUSH |
231			       si_get_flush_flags(sctx, coher, cache_policy);
232	}
233
234	while (size) {
235		unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx));
236		unsigned dma_flags = CP_DMA_CLEAR | (sdst ? 0 : CP_DMA_DST_IS_GDS);
237
238		si_cp_dma_prepare(sctx, dst, NULL, byte_count, size, user_flags,
239				  coher, &is_first, &dma_flags);
240
241		/* Emit the clear packet. */
242		si_emit_cp_dma(sctx, cs, va, value, byte_count, dma_flags, cache_policy);
243
244		size -= byte_count;
245		va += byte_count;
246	}
247
248	if (sdst && cache_policy != L2_BYPASS)
249		sdst->TC_L2_dirty = true;
250
251	/* If it's not a framebuffer fast clear... */
252	if (coher == SI_COHERENCY_SHADER)
253		sctx->num_cp_dma_calls++;
254}
255
256/**
257 * Realign the CP DMA engine. This must be done after a copy with an unaligned
258 * size.
259 *
260 * \param size  Remaining size to the CP DMA alignment.
261 */
262static void si_cp_dma_realign_engine(struct si_context *sctx, unsigned size,
263				     unsigned user_flags, enum si_coherency coher,
264				     enum si_cache_policy cache_policy,
265				     bool *is_first)
266{
267	uint64_t va;
268	unsigned dma_flags = 0;
269	unsigned scratch_size = SI_CPDMA_ALIGNMENT * 2;
270
271	assert(size < SI_CPDMA_ALIGNMENT);
272
273	/* Use the scratch buffer as the dummy buffer. The 3D engine should be
274	 * idle at this point.
275	 */
276	if (!sctx->scratch_buffer ||
277	    sctx->scratch_buffer->b.b.width0 < scratch_size) {
278		si_resource_reference(&sctx->scratch_buffer, NULL);
279		sctx->scratch_buffer =
280			si_aligned_buffer_create(&sctx->screen->b,
281						   SI_RESOURCE_FLAG_UNMAPPABLE,
282						   PIPE_USAGE_DEFAULT,
283						   scratch_size, 256);
284		if (!sctx->scratch_buffer)
285			return;
286
287		si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state);
288	}
289
290	si_cp_dma_prepare(sctx, &sctx->scratch_buffer->b.b,
291			  &sctx->scratch_buffer->b.b, size, size, user_flags,
292			  coher, is_first, &dma_flags);
293
294	va = sctx->scratch_buffer->gpu_address;
295	si_emit_cp_dma(sctx, sctx->gfx_cs, va, va + SI_CPDMA_ALIGNMENT, size, dma_flags,
296		       cache_policy);
297}
298
299/**
300 * Do memcpy between buffers using CP DMA.
301 * If src or dst is NULL, it means read or write GDS, respectively.
302 *
303 * \param user_flags	bitmask of SI_CPDMA_*
304 */
305void si_cp_dma_copy_buffer(struct si_context *sctx,
306			   struct pipe_resource *dst, struct pipe_resource *src,
307			   uint64_t dst_offset, uint64_t src_offset, unsigned size,
308			   unsigned user_flags, enum si_coherency coher,
309			   enum si_cache_policy cache_policy)
310{
311	uint64_t main_dst_offset, main_src_offset;
312	unsigned skipped_size = 0;
313	unsigned realign_size = 0;
314	unsigned gds_flags = (dst ? 0 : CP_DMA_DST_IS_GDS) |
315			     (src ? 0 : CP_DMA_SRC_IS_GDS);
316	bool is_first = true;
317
318	assert(size);
319
320	if (dst) {
321		/* Skip this for the L2 prefetch. */
322		if (dst != src || dst_offset != src_offset) {
323			/* Mark the buffer range of destination as valid (initialized),
324			 * so that transfer_map knows it should wait for the GPU when mapping
325			 * that range. */
326			util_range_add(&si_resource(dst)->valid_buffer_range, dst_offset,
327				       dst_offset + size);
328		}
329
330		dst_offset += si_resource(dst)->gpu_address;
331	}
332	if (src)
333		src_offset += si_resource(src)->gpu_address;
334
335	/* The workarounds aren't needed on Fiji and beyond. */
336	if (sctx->family <= CHIP_CARRIZO ||
337	    sctx->family == CHIP_STONEY) {
338		/* If the size is not aligned, we must add a dummy copy at the end
339		 * just to align the internal counter. Otherwise, the DMA engine
340		 * would slow down by an order of magnitude for following copies.
341		 */
342		if (size % SI_CPDMA_ALIGNMENT)
343			realign_size = SI_CPDMA_ALIGNMENT - (size % SI_CPDMA_ALIGNMENT);
344
345		/* If the copy begins unaligned, we must start copying from the next
346		 * aligned block and the skipped part should be copied after everything
347		 * else has been copied. Only the src alignment matters, not dst.
348		 *
349		 * GDS doesn't need the source address to be aligned.
350		 */
351		if (src && src_offset % SI_CPDMA_ALIGNMENT) {
352			skipped_size = SI_CPDMA_ALIGNMENT - (src_offset % SI_CPDMA_ALIGNMENT);
353			/* The main part will be skipped if the size is too small. */
354			skipped_size = MIN2(skipped_size, size);
355			size -= skipped_size;
356		}
357	}
358
359	/* Flush the caches. */
360	if ((dst || src) && !(user_flags & SI_CPDMA_SKIP_GFX_SYNC)) {
361		sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
362			       SI_CONTEXT_CS_PARTIAL_FLUSH |
363			       si_get_flush_flags(sctx, coher, cache_policy);
364	}
365
366	/* This is the main part doing the copying. Src is always aligned. */
367	main_dst_offset = dst_offset + skipped_size;
368	main_src_offset = src_offset + skipped_size;
369
370	while (size) {
371		unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx));
372		unsigned dma_flags = gds_flags;
373
374		si_cp_dma_prepare(sctx, dst, src, byte_count,
375				  size + skipped_size + realign_size,
376				  user_flags, coher, &is_first, &dma_flags);
377
378		si_emit_cp_dma(sctx, sctx->gfx_cs, main_dst_offset, main_src_offset,
379			       byte_count, dma_flags, cache_policy);
380
381		size -= byte_count;
382		main_src_offset += byte_count;
383		main_dst_offset += byte_count;
384	}
385
386	/* Copy the part we skipped because src wasn't aligned. */
387	if (skipped_size) {
388		unsigned dma_flags = gds_flags;
389
390		si_cp_dma_prepare(sctx, dst, src, skipped_size,
391				  skipped_size + realign_size, user_flags,
392				  coher, &is_first, &dma_flags);
393
394		si_emit_cp_dma(sctx, sctx->gfx_cs, dst_offset, src_offset, skipped_size,
395			       dma_flags, cache_policy);
396	}
397
398	/* Finally, realign the engine if the size wasn't aligned. */
399	if (realign_size) {
400		si_cp_dma_realign_engine(sctx, realign_size, user_flags, coher,
401					 cache_policy, &is_first);
402	}
403
404	if (dst && cache_policy != L2_BYPASS)
405		si_resource(dst)->TC_L2_dirty = true;
406
407	/* If it's not a prefetch or GDS copy... */
408	if (dst && src && (dst != src || dst_offset != src_offset))
409		sctx->num_cp_dma_calls++;
410}
411
412void cik_prefetch_TC_L2_async(struct si_context *sctx, struct pipe_resource *buf,
413			      uint64_t offset, unsigned size)
414{
415	assert(sctx->chip_class >= CIK);
416
417	si_cp_dma_copy_buffer(sctx, buf, buf, offset, offset, size,
418			      SI_CPDMA_SKIP_ALL, SI_COHERENCY_SHADER, L2_LRU);
419}
420
421static void cik_prefetch_shader_async(struct si_context *sctx,
422				      struct si_pm4_state *state)
423{
424	struct pipe_resource *bo = &state->bo[0]->b.b;
425	assert(state->nbo == 1);
426
427	cik_prefetch_TC_L2_async(sctx, bo, 0, bo->width0);
428}
429
430static void cik_prefetch_VBO_descriptors(struct si_context *sctx)
431{
432	if (!sctx->vertex_elements || !sctx->vertex_elements->desc_list_byte_size)
433		return;
434
435	cik_prefetch_TC_L2_async(sctx, &sctx->vb_descriptors_buffer->b.b,
436				 sctx->vb_descriptors_offset,
437				 sctx->vertex_elements->desc_list_byte_size);
438}
439
440/**
441 * Prefetch shaders and VBO descriptors.
442 *
443 * \param vertex_stage_only  Whether only the the API VS and VBO descriptors
444 *                           should be prefetched.
445 */
446void cik_emit_prefetch_L2(struct si_context *sctx, bool vertex_stage_only)
447{
448	unsigned mask = sctx->prefetch_L2_mask;
449	assert(mask);
450
451	/* Prefetch shaders and VBO descriptors to TC L2. */
452	if (sctx->chip_class >= GFX9) {
453		/* Choose the right spot for the VBO prefetch. */
454		if (sctx->tes_shader.cso) {
455			if (mask & SI_PREFETCH_HS)
456				cik_prefetch_shader_async(sctx, sctx->queued.named.hs);
457			if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
458				cik_prefetch_VBO_descriptors(sctx);
459			if (vertex_stage_only) {
460				sctx->prefetch_L2_mask &= ~(SI_PREFETCH_HS |
461							    SI_PREFETCH_VBO_DESCRIPTORS);
462				return;
463			}
464
465			if (mask & SI_PREFETCH_GS)
466				cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
467			if (mask & SI_PREFETCH_VS)
468				cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
469		} else if (sctx->gs_shader.cso) {
470			if (mask & SI_PREFETCH_GS)
471				cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
472			if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
473				cik_prefetch_VBO_descriptors(sctx);
474			if (vertex_stage_only) {
475				sctx->prefetch_L2_mask &= ~(SI_PREFETCH_GS |
476							    SI_PREFETCH_VBO_DESCRIPTORS);
477				return;
478			}
479
480			if (mask & SI_PREFETCH_VS)
481				cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
482		} else {
483			if (mask & SI_PREFETCH_VS)
484				cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
485			if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
486				cik_prefetch_VBO_descriptors(sctx);
487			if (vertex_stage_only) {
488				sctx->prefetch_L2_mask &= ~(SI_PREFETCH_VS |
489							    SI_PREFETCH_VBO_DESCRIPTORS);
490				return;
491			}
492		}
493	} else {
494		/* SI-CI-VI */
495		/* Choose the right spot for the VBO prefetch. */
496		if (sctx->tes_shader.cso) {
497			if (mask & SI_PREFETCH_LS)
498				cik_prefetch_shader_async(sctx, sctx->queued.named.ls);
499			if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
500				cik_prefetch_VBO_descriptors(sctx);
501			if (vertex_stage_only) {
502				sctx->prefetch_L2_mask &= ~(SI_PREFETCH_LS |
503							    SI_PREFETCH_VBO_DESCRIPTORS);
504				return;
505			}
506
507			if (mask & SI_PREFETCH_HS)
508				cik_prefetch_shader_async(sctx, sctx->queued.named.hs);
509			if (mask & SI_PREFETCH_ES)
510				cik_prefetch_shader_async(sctx, sctx->queued.named.es);
511			if (mask & SI_PREFETCH_GS)
512				cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
513			if (mask & SI_PREFETCH_VS)
514				cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
515		} else if (sctx->gs_shader.cso) {
516			if (mask & SI_PREFETCH_ES)
517				cik_prefetch_shader_async(sctx, sctx->queued.named.es);
518			if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
519				cik_prefetch_VBO_descriptors(sctx);
520			if (vertex_stage_only) {
521				sctx->prefetch_L2_mask &= ~(SI_PREFETCH_ES |
522							    SI_PREFETCH_VBO_DESCRIPTORS);
523				return;
524			}
525
526			if (mask & SI_PREFETCH_GS)
527				cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
528			if (mask & SI_PREFETCH_VS)
529				cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
530		} else {
531			if (mask & SI_PREFETCH_VS)
532				cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
533			if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
534				cik_prefetch_VBO_descriptors(sctx);
535			if (vertex_stage_only) {
536				sctx->prefetch_L2_mask &= ~(SI_PREFETCH_VS |
537							    SI_PREFETCH_VBO_DESCRIPTORS);
538				return;
539			}
540		}
541	}
542
543	if (mask & SI_PREFETCH_PS)
544		cik_prefetch_shader_async(sctx, sctx->queued.named.ps);
545
546	sctx->prefetch_L2_mask = 0;
547}
548
549void si_test_gds(struct si_context *sctx)
550{
551	struct pipe_context *ctx = &sctx->b;
552	struct pipe_resource *src, *dst;
553	unsigned r[4] = {};
554	unsigned offset = debug_get_num_option("OFFSET", 16);
555
556	src = pipe_buffer_create(ctx->screen, 0, PIPE_USAGE_DEFAULT, 16);
557	dst = pipe_buffer_create(ctx->screen, 0, PIPE_USAGE_DEFAULT, 16);
558	si_cp_dma_clear_buffer(sctx, sctx->gfx_cs, src, 0, 4, 0xabcdef01, 0, SI_COHERENCY_SHADER, L2_BYPASS);
559	si_cp_dma_clear_buffer(sctx, sctx->gfx_cs, src, 4, 4, 0x23456789, 0, SI_COHERENCY_SHADER, L2_BYPASS);
560	si_cp_dma_clear_buffer(sctx, sctx->gfx_cs, src, 8, 4, 0x87654321, 0, SI_COHERENCY_SHADER, L2_BYPASS);
561	si_cp_dma_clear_buffer(sctx, sctx->gfx_cs, src, 12, 4, 0xfedcba98, 0, SI_COHERENCY_SHADER, L2_BYPASS);
562	si_cp_dma_clear_buffer(sctx, sctx->gfx_cs, dst, 0, 16, 0xdeadbeef, 0, SI_COHERENCY_SHADER, L2_BYPASS);
563
564	si_cp_dma_copy_buffer(sctx, NULL, src, offset, 0, 16, 0, SI_COHERENCY_NONE, L2_BYPASS);
565	si_cp_dma_copy_buffer(sctx, dst, NULL, 0, offset, 16, 0, SI_COHERENCY_NONE, L2_BYPASS);
566
567	pipe_buffer_read(ctx, dst, 0, sizeof(r), r);
568	printf("GDS copy  = %08x %08x %08x %08x -> %s\n", r[0], r[1], r[2], r[3],
569			r[0] == 0xabcdef01 && r[1] == 0x23456789 &&
570			r[2] == 0x87654321 && r[3] == 0xfedcba98 ? "pass" : "fail");
571
572	si_cp_dma_clear_buffer(sctx, sctx->gfx_cs, NULL, offset, 16, 0xc1ea4146, 0, SI_COHERENCY_NONE, L2_BYPASS);
573	si_cp_dma_copy_buffer(sctx, dst, NULL, 0, offset, 16, 0, SI_COHERENCY_NONE, L2_BYPASS);
574
575	pipe_buffer_read(ctx, dst, 0, sizeof(r), r);
576	printf("GDS clear = %08x %08x %08x %08x -> %s\n", r[0], r[1], r[2], r[3],
577			r[0] == 0xc1ea4146 && r[1] == 0xc1ea4146 &&
578			r[2] == 0xc1ea4146 && r[3] == 0xc1ea4146 ? "pass" : "fail");
579
580	pipe_resource_reference(&src, NULL);
581	pipe_resource_reference(&dst, NULL);
582	exit(0);
583}
584
585void si_cp_write_data(struct si_context *sctx, struct si_resource *buf,
586		      unsigned offset, unsigned size, unsigned dst_sel,
587		      unsigned engine, const void *data)
588{
589	struct radeon_cmdbuf *cs = sctx->gfx_cs;
590
591	assert(offset % 4 == 0);
592	assert(size % 4 == 0);
593
594	if (sctx->chip_class == SI && dst_sel == V_370_MEM)
595		dst_sel = V_370_MEM_GRBM;
596
597	radeon_add_to_buffer_list(sctx, cs, buf,
598				  RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
599	uint64_t va = buf->gpu_address + offset;
600
601	radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + size/4, 0));
602	radeon_emit(cs, S_370_DST_SEL(dst_sel) |
603		    S_370_WR_CONFIRM(1) |
604		    S_370_ENGINE_SEL(engine));
605	radeon_emit(cs, va);
606	radeon_emit(cs, va >> 32);
607	radeon_emit_array(cs, (const uint32_t*)data, size/4);
608}
609
610void si_cp_copy_data(struct si_context *sctx,
611		     unsigned dst_sel, struct si_resource *dst, unsigned dst_offset,
612		     unsigned src_sel, struct si_resource *src, unsigned src_offset)
613{
614	struct radeon_cmdbuf *cs = sctx->gfx_cs;
615
616	if (dst) {
617		radeon_add_to_buffer_list(sctx, cs, dst,
618					  RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
619	}
620	if (src) {
621		radeon_add_to_buffer_list(sctx, cs, src,
622					  RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
623	}
624
625	uint64_t dst_va = (dst ? dst->gpu_address : 0ull) + dst_offset;
626	uint64_t src_va = (src ? src->gpu_address : 0ull) + src_offset;
627
628	radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
629	radeon_emit(cs, COPY_DATA_SRC_SEL(src_sel) |
630			COPY_DATA_DST_SEL(dst_sel) |
631			COPY_DATA_WR_CONFIRM);
632	radeon_emit(cs, src_va);
633	radeon_emit(cs, src_va >> 32);
634	radeon_emit(cs, dst_va);
635	radeon_emit(cs, dst_va >> 32);
636}
637