1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 */
25
26/* This file implements randomized SDMA texture blit tests. */
27
28#include "si_pipe.h"
29#include "util/u_surface.h"
30#include "util/rand_xor.h"
31
32static uint64_t seed_xorshift128plus[2];
33
34#define RAND_NUM_SIZE 8
35
36/* The GPU blits are emulated on the CPU using these CPU textures. */
37
38struct cpu_texture {
39	uint8_t *ptr;
40	uint64_t size;
41	uint64_t layer_stride;
42	unsigned stride;
43};
44
45static void alloc_cpu_texture(struct cpu_texture *tex,
46			      struct pipe_resource *templ, int bpp)
47{
48	tex->stride = align(templ->width0 * bpp, RAND_NUM_SIZE);
49	tex->layer_stride = (uint64_t)tex->stride * templ->height0;
50	tex->size = tex->layer_stride * templ->array_size;
51	tex->ptr = malloc(tex->size);
52	assert(tex->ptr);
53}
54
55static void set_random_pixels(struct pipe_context *ctx,
56			      struct pipe_resource *tex,
57			      struct cpu_texture *cpu)
58{
59	struct pipe_transfer *t;
60	uint8_t *map;
61	int x,y,z;
62
63	map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_WRITE,
64				   0, 0, 0, tex->width0, tex->height0,
65				   tex->array_size, &t);
66	assert(map);
67
68	for (z = 0; z < tex->array_size; z++) {
69		for (y = 0; y < tex->height0; y++) {
70			uint64_t *ptr = (uint64_t*)
71				(map + t->layer_stride*z + t->stride*y);
72			uint64_t *ptr_cpu = (uint64_t*)
73				(cpu->ptr + cpu->layer_stride*z + cpu->stride*y);
74			unsigned size = cpu->stride / RAND_NUM_SIZE;
75
76			assert(t->stride % RAND_NUM_SIZE == 0);
77			assert(cpu->stride % RAND_NUM_SIZE == 0);
78
79			for (x = 0; x < size; x++) {
80				*ptr++ = *ptr_cpu++ =
81					rand_xorshift128plus(seed_xorshift128plus);
82			}
83		}
84	}
85
86	pipe_transfer_unmap(ctx, t);
87}
88
89static bool compare_textures(struct pipe_context *ctx,
90			     struct pipe_resource *tex,
91			     struct cpu_texture *cpu, int bpp)
92{
93	struct pipe_transfer *t;
94	uint8_t *map;
95	int y,z;
96	bool pass = true;
97
98	map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_READ,
99				   0, 0, 0, tex->width0, tex->height0,
100				   tex->array_size, &t);
101	assert(map);
102
103	for (z = 0; z < tex->array_size; z++) {
104		for (y = 0; y < tex->height0; y++) {
105			uint8_t *ptr = map + t->layer_stride*z + t->stride*y;
106			uint8_t *cpu_ptr = cpu->ptr +
107					   cpu->layer_stride*z + cpu->stride*y;
108
109			if (memcmp(ptr, cpu_ptr, tex->width0 * bpp)) {
110				pass = false;
111				goto done;
112			}
113		}
114	}
115done:
116	pipe_transfer_unmap(ctx, t);
117	return pass;
118}
119
120static enum pipe_format get_format_from_bpp(int bpp)
121{
122	switch (bpp) {
123	case 1:
124		return PIPE_FORMAT_R8_UINT;
125	case 2:
126		return PIPE_FORMAT_R16_UINT;
127	case 4:
128		return PIPE_FORMAT_R32_UINT;
129	case 8:
130		return PIPE_FORMAT_R32G32_UINT;
131	case 16:
132		return PIPE_FORMAT_R32G32B32A32_UINT;
133	default:
134		assert(0);
135		return PIPE_FORMAT_NONE;
136	}
137}
138
139static const char *array_mode_to_string(struct si_screen *sscreen,
140					struct radeon_surf *surf)
141{
142	if (sscreen->info.chip_class >= GFX9) {
143		switch (surf->u.gfx9.surf.swizzle_mode) {
144		case 0:
145			return "  LINEAR";
146		case 21:
147			return " 4KB_S_X";
148		case 22:
149			return " 4KB_D_X";
150		case 25:
151			return "64KB_S_X";
152		case 26:
153			return "64KB_D_X";
154		default:
155			printf("Unhandled swizzle mode = %u\n",
156			       surf->u.gfx9.surf.swizzle_mode);
157			return " UNKNOWN";
158		}
159	} else {
160		switch (surf->u.legacy.level[0].mode) {
161		case RADEON_SURF_MODE_LINEAR_ALIGNED:
162			return "LINEAR_ALIGNED";
163		case RADEON_SURF_MODE_1D:
164			return "1D_TILED_THIN1";
165		case RADEON_SURF_MODE_2D:
166			return "2D_TILED_THIN1";
167		default:
168			assert(0);
169			return "       UNKNOWN";
170		}
171	}
172}
173
174static unsigned generate_max_tex_side(unsigned max_tex_side)
175{
176	switch (rand() % 4) {
177	case 0:
178		/* Try to hit large sizes in 1/4 of the cases. */
179		return max_tex_side;
180	case 1:
181		/* Try to hit 1D tiling in 1/4 of the cases. */
182		return 128;
183	default:
184		/* Try to hit common sizes in 2/4 of the cases. */
185		return 2048;
186	}
187}
188
189void si_test_dma(struct si_screen *sscreen)
190{
191	struct pipe_screen *screen = &sscreen->b;
192	struct pipe_context *ctx = screen->context_create(screen, NULL, 0);
193	struct si_context *sctx = (struct si_context*)ctx;
194	uint64_t max_alloc_size;
195	unsigned i, iterations, num_partial_copies, max_levels, max_tex_side;
196	unsigned num_pass = 0, num_fail = 0;
197
198	max_levels = screen->get_param(screen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
199	max_tex_side = 1 << (max_levels - 1);
200
201	/* Max 128 MB allowed for both textures. */
202	max_alloc_size = 128 * 1024 * 1024;
203
204	/* the seed for random test parameters */
205	srand(0x9b47d95b);
206	/* the seed for random pixel data */
207	s_rand_xorshift128plus(seed_xorshift128plus, false);
208
209	iterations = 1000000000; /* just kill it when you are bored */
210	num_partial_copies = 30;
211
212	/* These parameters are randomly generated per test:
213	 * - whether to do one whole-surface copy or N partial copies per test
214	 * - which tiling modes to use (LINEAR_ALIGNED, 1D, 2D)
215	 * - which texture dimensions to use
216	 * - whether to use VRAM (all tiling modes) and GTT (staging, linear
217	 *   only) allocations
218	 * - random initial pixels in src
219	 * - generate random subrectangle copies for partial blits
220	 */
221	for (i = 0; i < iterations; i++) {
222		struct pipe_resource tsrc = {}, tdst = {}, *src, *dst;
223		struct si_texture *sdst;
224		struct si_texture *ssrc;
225		struct cpu_texture src_cpu, dst_cpu;
226		unsigned bpp, max_width, max_height, max_depth, j, num;
227		unsigned gfx_blits = 0, dma_blits = 0, max_tex_side_gen;
228		unsigned max_tex_layers;
229		bool pass;
230		bool do_partial_copies = rand() & 1;
231
232		/* generate a random test case */
233		tsrc.target = tdst.target = PIPE_TEXTURE_2D_ARRAY;
234		tsrc.depth0 = tdst.depth0 = 1;
235
236		bpp = 1 << (rand() % 5);
237		tsrc.format = tdst.format = get_format_from_bpp(bpp);
238
239		max_tex_side_gen = generate_max_tex_side(max_tex_side);
240		max_tex_layers = rand() % 4 ? 1 : 5;
241
242		tsrc.width0 = (rand() % max_tex_side_gen) + 1;
243		tsrc.height0 = (rand() % max_tex_side_gen) + 1;
244		tsrc.array_size = (rand() % max_tex_layers) + 1;
245
246		/* Have a 1/4 chance of getting power-of-two dimensions. */
247		if (rand() % 4 == 0) {
248			tsrc.width0 = util_next_power_of_two(tsrc.width0);
249			tsrc.height0 = util_next_power_of_two(tsrc.height0);
250		}
251
252		if (!do_partial_copies) {
253			/* whole-surface copies only, same dimensions */
254			tdst = tsrc;
255		} else {
256			max_tex_side_gen = generate_max_tex_side(max_tex_side);
257			max_tex_layers = rand() % 4 ? 1 : 5;
258
259			/* many partial copies, dimensions can be different */
260			tdst.width0 = (rand() % max_tex_side_gen) + 1;
261			tdst.height0 = (rand() % max_tex_side_gen) + 1;
262			tdst.array_size = (rand() % max_tex_layers) + 1;
263
264			/* Have a 1/4 chance of getting power-of-two dimensions. */
265			if (rand() % 4 == 0) {
266				tdst.width0 = util_next_power_of_two(tdst.width0);
267				tdst.height0 = util_next_power_of_two(tdst.height0);
268			}
269		}
270
271		/* check texture sizes */
272		if ((uint64_t)tsrc.width0 * tsrc.height0 * tsrc.array_size * bpp +
273		    (uint64_t)tdst.width0 * tdst.height0 * tdst.array_size * bpp >
274		    max_alloc_size) {
275			/* too large, try again */
276			i--;
277			continue;
278		}
279
280		/* VRAM + the tiling mode depends on dimensions (3/4 of cases),
281		 * or GTT + linear only (1/4 of cases)
282		 */
283		tsrc.usage = rand() % 4 ? PIPE_USAGE_DEFAULT : PIPE_USAGE_STAGING;
284		tdst.usage = rand() % 4 ? PIPE_USAGE_DEFAULT : PIPE_USAGE_STAGING;
285
286		/* Allocate textures (both the GPU and CPU copies).
287		 * The CPU will emulate what the GPU should be doing.
288		 */
289		src = screen->resource_create(screen, &tsrc);
290		dst = screen->resource_create(screen, &tdst);
291		assert(src);
292		assert(dst);
293		sdst = (struct si_texture*)dst;
294		ssrc = (struct si_texture*)src;
295		alloc_cpu_texture(&src_cpu, &tsrc, bpp);
296		alloc_cpu_texture(&dst_cpu, &tdst, bpp);
297
298		printf("%4u: dst = (%5u x %5u x %u, %s), "
299		       " src = (%5u x %5u x %u, %s), bpp = %2u, ",
300		       i, tdst.width0, tdst.height0, tdst.array_size,
301		       array_mode_to_string(sscreen, &sdst->surface),
302		       tsrc.width0, tsrc.height0, tsrc.array_size,
303		       array_mode_to_string(sscreen, &ssrc->surface), bpp);
304		fflush(stdout);
305
306		/* set src pixels */
307		set_random_pixels(ctx, src, &src_cpu);
308
309		/* clear dst pixels */
310		uint32_t zero = 0;
311		si_clear_buffer(sctx, dst, 0, sdst->surface.surf_size, &zero, 4,
312		                SI_COHERENCY_SHADER, false);
313		memset(dst_cpu.ptr, 0, dst_cpu.layer_stride * tdst.array_size);
314
315		/* preparation */
316		max_width = MIN2(tsrc.width0, tdst.width0);
317		max_height = MIN2(tsrc.height0, tdst.height0);
318		max_depth = MIN2(tsrc.array_size, tdst.array_size);
319
320		num = do_partial_copies ? num_partial_copies : 1;
321		for (j = 0; j < num; j++) {
322			int width, height, depth;
323			int srcx, srcy, srcz, dstx, dsty, dstz;
324			struct pipe_box box;
325			unsigned old_num_draw_calls = sctx->num_draw_calls;
326			unsigned old_num_dma_calls = sctx->num_dma_calls;
327
328			if (!do_partial_copies) {
329				/* copy whole src to dst */
330				width = max_width;
331				height = max_height;
332				depth = max_depth;
333
334				srcx = srcy = srcz = dstx = dsty = dstz = 0;
335			} else {
336				/* random sub-rectangle copies from src to dst */
337				depth = (rand() % max_depth) + 1;
338				srcz = rand() % (tsrc.array_size - depth + 1);
339				dstz = rand() % (tdst.array_size - depth + 1);
340
341				/* special code path to hit the tiled partial copies */
342				if (!ssrc->surface.is_linear &&
343				    !sdst->surface.is_linear &&
344				    rand() & 1) {
345					if (max_width < 8 || max_height < 8)
346						continue;
347					width = ((rand() % (max_width / 8)) + 1) * 8;
348					height = ((rand() % (max_height / 8)) + 1) * 8;
349
350					srcx = rand() % (tsrc.width0 - width + 1) & ~0x7;
351					srcy = rand() % (tsrc.height0 - height + 1) & ~0x7;
352
353					dstx = rand() % (tdst.width0 - width + 1) & ~0x7;
354					dsty = rand() % (tdst.height0 - height + 1) & ~0x7;
355				} else {
356					/* just make sure that it doesn't divide by zero */
357					assert(max_width > 0 && max_height > 0);
358
359					width = (rand() % max_width) + 1;
360					height = (rand() % max_height) + 1;
361
362					srcx = rand() % (tsrc.width0 - width + 1);
363					srcy = rand() % (tsrc.height0 - height + 1);
364
365					dstx = rand() % (tdst.width0 - width + 1);
366					dsty = rand() % (tdst.height0 - height + 1);
367				}
368
369				/* special code path to hit out-of-bounds reads in L2T */
370				if (ssrc->surface.is_linear &&
371				    !sdst->surface.is_linear &&
372				    rand() % 4 == 0) {
373					srcx = 0;
374					srcy = 0;
375					srcz = 0;
376				}
377			}
378
379			/* GPU copy */
380			u_box_3d(srcx, srcy, srcz, width, height, depth, &box);
381			sctx->dma_copy(ctx, dst, 0, dstx, dsty, dstz, src, 0, &box);
382
383			/* See which engine was used. */
384			gfx_blits += sctx->num_draw_calls > old_num_draw_calls;
385			dma_blits += sctx->num_dma_calls > old_num_dma_calls;
386
387			/* CPU copy */
388			util_copy_box(dst_cpu.ptr, tdst.format, dst_cpu.stride,
389				      dst_cpu.layer_stride,
390				      dstx, dsty, dstz, width, height, depth,
391				      src_cpu.ptr, src_cpu.stride,
392				      src_cpu.layer_stride,
393				      srcx, srcy, srcz);
394		}
395
396		pass = compare_textures(ctx, dst, &dst_cpu, bpp);
397		if (pass)
398			num_pass++;
399		else
400			num_fail++;
401
402		printf("BLITs: GFX = %2u, DMA = %2u, %s [%u/%u]\n",
403		       gfx_blits, dma_blits, pass ? "pass" : "fail",
404		       num_pass, num_pass+num_fail);
405
406		/* cleanup */
407		pipe_resource_reference(&src, NULL);
408		pipe_resource_reference(&dst, NULL);
409		free(src_cpu.ptr);
410		free(dst_cpu.ptr);
411	}
412
413	ctx->destroy(ctx);
414	exit(0);
415}
416