1/*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2018 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include "radeonsi/si_pipe.h"
27#include "radeonsi/si_query.h"
28#include "util/u_format.h"
29#include "util/u_log.h"
30#include "util/u_memory.h"
31#include "util/u_pack_color.h"
32#include "util/u_resource.h"
33#include "util/u_surface.h"
34#include "util/u_transfer.h"
35#include "util/os_time.h"
36#include <errno.h>
37#include <inttypes.h>
38#include "state_tracker/drm_driver.h"
39#include "amd/common/sid.h"
40#include "amd/common/gfx9d.h"
41
42static enum radeon_surf_mode
43si_choose_tiling(struct si_screen *sscreen,
44		 const struct pipe_resource *templ, bool tc_compatible_htile);
45
46
47bool si_prepare_for_dma_blit(struct si_context *sctx,
48			     struct si_texture *dst,
49			     unsigned dst_level, unsigned dstx,
50			     unsigned dsty, unsigned dstz,
51			     struct si_texture *src,
52			     unsigned src_level,
53			     const struct pipe_box *src_box)
54{
55	if (!sctx->dma_cs)
56		return false;
57
58	if (dst->surface.bpe != src->surface.bpe)
59		return false;
60
61	/* MSAA: Blits don't exist in the real world. */
62	if (src->buffer.b.b.nr_samples > 1 ||
63	    dst->buffer.b.b.nr_samples > 1)
64		return false;
65
66	/* Depth-stencil surfaces:
67	 *   When dst is linear, the DB->CB copy preserves HTILE.
68	 *   When dst is tiled, the 3D path must be used to update HTILE.
69	 */
70	if (src->is_depth || dst->is_depth)
71		return false;
72
73	/* DCC as:
74	 *   src: Use the 3D path. DCC decompression is expensive.
75	 *   dst: Use the 3D path to compress the pixels with DCC.
76	 */
77	if (vi_dcc_enabled(src, src_level) ||
78	    vi_dcc_enabled(dst, dst_level))
79		return false;
80
81	/* CMASK as:
82	 *   src: Both texture and SDMA paths need decompression. Use SDMA.
83	 *   dst: If overwriting the whole texture, discard CMASK and use
84	 *        SDMA. Otherwise, use the 3D path.
85	 */
86	if (dst->cmask_buffer && dst->dirty_level_mask & (1 << dst_level)) {
87		/* The CMASK clear is only enabled for the first level. */
88		assert(dst_level == 0);
89		if (!util_texrange_covers_whole_level(&dst->buffer.b.b, dst_level,
90						      dstx, dsty, dstz, src_box->width,
91						      src_box->height, src_box->depth))
92			return false;
93
94		si_texture_discard_cmask(sctx->screen, dst);
95	}
96
97	/* All requirements are met. Prepare textures for SDMA. */
98	if (src->cmask_buffer && src->dirty_level_mask & (1 << src_level))
99		sctx->b.flush_resource(&sctx->b, &src->buffer.b.b);
100
101	assert(!(src->dirty_level_mask & (1 << src_level)));
102	assert(!(dst->dirty_level_mask & (1 << dst_level)));
103
104	return true;
105}
106
107/* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
108static void si_copy_region_with_blit(struct pipe_context *pipe,
109				     struct pipe_resource *dst,
110				     unsigned dst_level,
111				     unsigned dstx, unsigned dsty, unsigned dstz,
112				     struct pipe_resource *src,
113				     unsigned src_level,
114				     const struct pipe_box *src_box)
115{
116	struct pipe_blit_info blit;
117
118	memset(&blit, 0, sizeof(blit));
119	blit.src.resource = src;
120	blit.src.format = src->format;
121	blit.src.level = src_level;
122	blit.src.box = *src_box;
123	blit.dst.resource = dst;
124	blit.dst.format = dst->format;
125	blit.dst.level = dst_level;
126	blit.dst.box.x = dstx;
127	blit.dst.box.y = dsty;
128	blit.dst.box.z = dstz;
129	blit.dst.box.width = src_box->width;
130	blit.dst.box.height = src_box->height;
131	blit.dst.box.depth = src_box->depth;
132	blit.mask = util_format_get_mask(src->format) &
133		    util_format_get_mask(dst->format);
134	blit.filter = PIPE_TEX_FILTER_NEAREST;
135
136	if (blit.mask) {
137		pipe->blit(pipe, &blit);
138	}
139}
140
141/* Copy from a full GPU texture to a transfer's staging one. */
142static void si_copy_to_staging_texture(struct pipe_context *ctx, struct si_transfer *stransfer)
143{
144	struct si_context *sctx = (struct si_context*)ctx;
145	struct pipe_transfer *transfer = (struct pipe_transfer*)stransfer;
146	struct pipe_resource *dst = &stransfer->staging->b.b;
147	struct pipe_resource *src = transfer->resource;
148
149	if (src->nr_samples > 1) {
150		si_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
151					   src, transfer->level, &transfer->box);
152		return;
153	}
154
155	sctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
156		       &transfer->box);
157}
158
159/* Copy from a transfer's staging texture to a full GPU one. */
160static void si_copy_from_staging_texture(struct pipe_context *ctx, struct si_transfer *stransfer)
161{
162	struct si_context *sctx = (struct si_context*)ctx;
163	struct pipe_transfer *transfer = (struct pipe_transfer*)stransfer;
164	struct pipe_resource *dst = transfer->resource;
165	struct pipe_resource *src = &stransfer->staging->b.b;
166	struct pipe_box sbox;
167
168	u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
169
170	if (dst->nr_samples > 1) {
171		si_copy_region_with_blit(ctx, dst, transfer->level,
172					   transfer->box.x, transfer->box.y, transfer->box.z,
173					   src, 0, &sbox);
174		return;
175	}
176
177	sctx->dma_copy(ctx, dst, transfer->level,
178		       transfer->box.x, transfer->box.y, transfer->box.z,
179		       src, 0, &sbox);
180}
181
182static unsigned si_texture_get_offset(struct si_screen *sscreen,
183				      struct si_texture *tex, unsigned level,
184				      const struct pipe_box *box,
185				      unsigned *stride,
186				      unsigned *layer_stride)
187{
188	if (sscreen->info.chip_class >= GFX9) {
189		*stride = tex->surface.u.gfx9.surf_pitch * tex->surface.bpe;
190		*layer_stride = tex->surface.u.gfx9.surf_slice_size;
191
192		if (!box)
193			return 0;
194
195		/* Each texture is an array of slices. Each slice is an array
196		 * of mipmap levels. */
197		return box->z * tex->surface.u.gfx9.surf_slice_size +
198		       tex->surface.u.gfx9.offset[level] +
199		       (box->y / tex->surface.blk_h *
200			tex->surface.u.gfx9.surf_pitch +
201			box->x / tex->surface.blk_w) * tex->surface.bpe;
202	} else {
203		*stride = tex->surface.u.legacy.level[level].nblk_x *
204			  tex->surface.bpe;
205		assert((uint64_t)tex->surface.u.legacy.level[level].slice_size_dw * 4 <= UINT_MAX);
206		*layer_stride = (uint64_t)tex->surface.u.legacy.level[level].slice_size_dw * 4;
207
208		if (!box)
209			return tex->surface.u.legacy.level[level].offset;
210
211		/* Each texture is an array of mipmap levels. Each level is
212		 * an array of slices. */
213		return tex->surface.u.legacy.level[level].offset +
214		       box->z * (uint64_t)tex->surface.u.legacy.level[level].slice_size_dw * 4 +
215		       (box->y / tex->surface.blk_h *
216		        tex->surface.u.legacy.level[level].nblk_x +
217		        box->x / tex->surface.blk_w) * tex->surface.bpe;
218	}
219}
220
221static int si_init_surface(struct si_screen *sscreen,
222			   struct radeon_surf *surface,
223			   const struct pipe_resource *ptex,
224			   enum radeon_surf_mode array_mode,
225			   unsigned pitch_in_bytes_override,
226			   unsigned offset,
227			   bool is_imported,
228			   bool is_scanout,
229			   bool is_flushed_depth,
230			   bool tc_compatible_htile)
231{
232	const struct util_format_description *desc =
233		util_format_description(ptex->format);
234	bool is_depth, is_stencil;
235	int r;
236	unsigned i, bpe, flags = 0;
237
238	is_depth = util_format_has_depth(desc);
239	is_stencil = util_format_has_stencil(desc);
240
241	if (!is_flushed_depth &&
242	    ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
243		bpe = 4; /* stencil is allocated separately */
244	} else {
245		bpe = util_format_get_blocksize(ptex->format);
246		assert(util_is_power_of_two_or_zero(bpe));
247	}
248
249	if (!is_flushed_depth && is_depth) {
250		flags |= RADEON_SURF_ZBUFFER;
251
252		if (tc_compatible_htile &&
253		    (sscreen->info.chip_class >= GFX9 ||
254		     array_mode == RADEON_SURF_MODE_2D)) {
255			/* TC-compatible HTILE only supports Z32_FLOAT.
256			 * GFX9 also supports Z16_UNORM.
257			 * On VI, promote Z16 to Z32. DB->CB copies will convert
258			 * the format for transfers.
259			 */
260			if (sscreen->info.chip_class == VI)
261				bpe = 4;
262
263			flags |= RADEON_SURF_TC_COMPATIBLE_HTILE;
264		}
265
266		if (is_stencil)
267			flags |= RADEON_SURF_SBUFFER;
268	}
269
270	if (sscreen->info.chip_class >= VI &&
271	    (ptex->flags & SI_RESOURCE_FLAG_DISABLE_DCC ||
272	     ptex->format == PIPE_FORMAT_R9G9B9E5_FLOAT ||
273	     (ptex->nr_samples >= 2 && !sscreen->dcc_msaa_allowed)))
274		flags |= RADEON_SURF_DISABLE_DCC;
275
276	/* Stoney: 128bpp MSAA textures randomly fail piglit tests with DCC. */
277	if (sscreen->info.family == CHIP_STONEY &&
278	    bpe == 16 && ptex->nr_samples >= 2)
279		flags |= RADEON_SURF_DISABLE_DCC;
280
281	/* VI: DCC clear for 4x and 8x MSAA array textures unimplemented. */
282	if (sscreen->info.chip_class == VI &&
283	    ptex->nr_storage_samples >= 4 &&
284	    ptex->array_size > 1)
285		flags |= RADEON_SURF_DISABLE_DCC;
286
287	/* GFX9: DCC clear for 4x and 8x MSAA textures unimplemented. */
288	if (sscreen->info.chip_class >= GFX9 &&
289	    ptex->nr_storage_samples >= 4)
290		flags |= RADEON_SURF_DISABLE_DCC;
291
292	if (ptex->bind & PIPE_BIND_SCANOUT || is_scanout) {
293		/* This should catch bugs in gallium users setting incorrect flags. */
294		assert(ptex->nr_samples <= 1 &&
295		       ptex->array_size == 1 &&
296		       ptex->depth0 == 1 &&
297		       ptex->last_level == 0 &&
298		       !(flags & RADEON_SURF_Z_OR_SBUFFER));
299
300		flags |= RADEON_SURF_SCANOUT;
301	}
302
303	if (ptex->bind & PIPE_BIND_SHARED)
304		flags |= RADEON_SURF_SHAREABLE;
305	if (is_imported)
306		flags |= RADEON_SURF_IMPORTED | RADEON_SURF_SHAREABLE;
307	if (!(ptex->flags & SI_RESOURCE_FLAG_FORCE_MSAA_TILING))
308		flags |= RADEON_SURF_OPTIMIZE_FOR_SPACE;
309
310	r = sscreen->ws->surface_init(sscreen->ws, ptex, flags, bpe,
311				      array_mode, surface);
312	if (r) {
313		return r;
314	}
315
316	unsigned pitch = pitch_in_bytes_override / bpe;
317
318	if (sscreen->info.chip_class >= GFX9) {
319		if (pitch) {
320			surface->u.gfx9.surf_pitch = pitch;
321			surface->u.gfx9.surf_slice_size =
322				(uint64_t)pitch * surface->u.gfx9.surf_height * bpe;
323		}
324		surface->u.gfx9.surf_offset = offset;
325	} else {
326		if (pitch) {
327			surface->u.legacy.level[0].nblk_x = pitch;
328			surface->u.legacy.level[0].slice_size_dw =
329				((uint64_t)pitch * surface->u.legacy.level[0].nblk_y * bpe) / 4;
330		}
331		if (offset) {
332			for (i = 0; i < ARRAY_SIZE(surface->u.legacy.level); ++i)
333				surface->u.legacy.level[i].offset += offset;
334		}
335	}
336	return 0;
337}
338
339static void si_get_display_metadata(struct si_screen *sscreen,
340				    struct radeon_surf *surf,
341				    struct radeon_bo_metadata *metadata,
342				    enum radeon_surf_mode *array_mode,
343				    bool *is_scanout)
344{
345	if (sscreen->info.chip_class >= GFX9) {
346		if (metadata->u.gfx9.swizzle_mode > 0)
347			*array_mode = RADEON_SURF_MODE_2D;
348		else
349			*array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
350
351		*is_scanout = metadata->u.gfx9.swizzle_mode == 0 ||
352			      metadata->u.gfx9.swizzle_mode % 4 == 2;
353
354		surf->u.gfx9.surf.swizzle_mode = metadata->u.gfx9.swizzle_mode;
355
356		if (metadata->u.gfx9.dcc_offset_256B) {
357			surf->u.gfx9.display_dcc_pitch_max = metadata->u.gfx9.dcc_pitch_max;
358			assert(metadata->u.gfx9.dcc_independent_64B == 1);
359		}
360	} else {
361		surf->u.legacy.pipe_config = metadata->u.legacy.pipe_config;
362		surf->u.legacy.bankw = metadata->u.legacy.bankw;
363		surf->u.legacy.bankh = metadata->u.legacy.bankh;
364		surf->u.legacy.tile_split = metadata->u.legacy.tile_split;
365		surf->u.legacy.mtilea = metadata->u.legacy.mtilea;
366		surf->u.legacy.num_banks = metadata->u.legacy.num_banks;
367
368		if (metadata->u.legacy.macrotile == RADEON_LAYOUT_TILED)
369			*array_mode = RADEON_SURF_MODE_2D;
370		else if (metadata->u.legacy.microtile == RADEON_LAYOUT_TILED)
371			*array_mode = RADEON_SURF_MODE_1D;
372		else
373			*array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
374
375		*is_scanout = metadata->u.legacy.scanout;
376	}
377}
378
379void si_eliminate_fast_color_clear(struct si_context *sctx,
380				   struct si_texture *tex)
381{
382	struct si_screen *sscreen = sctx->screen;
383	struct pipe_context *ctx = &sctx->b;
384
385	if (ctx == sscreen->aux_context)
386		mtx_lock(&sscreen->aux_context_lock);
387
388	unsigned n = sctx->num_decompress_calls;
389	ctx->flush_resource(ctx, &tex->buffer.b.b);
390
391	/* Flush only if any fast clear elimination took place. */
392	if (n != sctx->num_decompress_calls)
393		ctx->flush(ctx, NULL, 0);
394
395	if (ctx == sscreen->aux_context)
396		mtx_unlock(&sscreen->aux_context_lock);
397}
398
399void si_texture_discard_cmask(struct si_screen *sscreen,
400			      struct si_texture *tex)
401{
402	if (!tex->cmask_buffer)
403		return;
404
405	assert(tex->buffer.b.b.nr_samples <= 1);
406
407	/* Disable CMASK. */
408	tex->cmask_base_address_reg = tex->buffer.gpu_address >> 8;
409	tex->dirty_level_mask = 0;
410
411	tex->cb_color_info &= ~S_028C70_FAST_CLEAR(1);
412
413	if (tex->cmask_buffer != &tex->buffer)
414	    si_resource_reference(&tex->cmask_buffer, NULL);
415
416	tex->cmask_buffer = NULL;
417
418	/* Notify all contexts about the change. */
419	p_atomic_inc(&sscreen->dirty_tex_counter);
420	p_atomic_inc(&sscreen->compressed_colortex_counter);
421}
422
423static bool si_can_disable_dcc(struct si_texture *tex)
424{
425	/* We can't disable DCC if it can be written by another process. */
426	return tex->dcc_offset &&
427	       (!tex->buffer.b.is_shared ||
428		!(tex->buffer.external_usage & PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE));
429}
430
431static bool si_texture_discard_dcc(struct si_screen *sscreen,
432				   struct si_texture *tex)
433{
434	if (!si_can_disable_dcc(tex)) {
435		assert(tex->display_dcc_offset == 0);
436		return false;
437	}
438
439	assert(tex->dcc_separate_buffer == NULL);
440
441	/* Disable DCC. */
442	tex->dcc_offset = 0;
443	tex->display_dcc_offset = 0;
444	tex->dcc_retile_map_offset = 0;
445
446	/* Notify all contexts about the change. */
447	p_atomic_inc(&sscreen->dirty_tex_counter);
448	return true;
449}
450
451/**
452 * Disable DCC for the texture. (first decompress, then discard metadata).
453 *
454 * There is unresolved multi-context synchronization issue between
455 * screen::aux_context and the current context. If applications do this with
456 * multiple contexts, it's already undefined behavior for them and we don't
457 * have to worry about that. The scenario is:
458 *
459 * If context 1 disables DCC and context 2 has queued commands that write
460 * to the texture via CB with DCC enabled, and the order of operations is
461 * as follows:
462 *   context 2 queues draw calls rendering to the texture, but doesn't flush
463 *   context 1 disables DCC and flushes
464 *   context 1 & 2 reset descriptors and FB state
465 *   context 2 flushes (new compressed tiles written by the draw calls)
466 *   context 1 & 2 read garbage, because DCC is disabled, yet there are
467 *   compressed tiled
468 *
469 * \param sctx  the current context if you have one, or sscreen->aux_context
470 *              if you don't.
471 */
472bool si_texture_disable_dcc(struct si_context *sctx,
473			    struct si_texture *tex)
474{
475	struct si_screen *sscreen = sctx->screen;
476
477	if (!sctx->has_graphics)
478		return si_texture_discard_dcc(sscreen, tex);
479
480	if (!si_can_disable_dcc(tex))
481		return false;
482
483	if (&sctx->b == sscreen->aux_context)
484		mtx_lock(&sscreen->aux_context_lock);
485
486	/* Decompress DCC. */
487	si_decompress_dcc(sctx, tex);
488	sctx->b.flush(&sctx->b, NULL, 0);
489
490	if (&sctx->b == sscreen->aux_context)
491		mtx_unlock(&sscreen->aux_context_lock);
492
493	return si_texture_discard_dcc(sscreen, tex);
494}
495
496static void si_reallocate_texture_inplace(struct si_context *sctx,
497					  struct si_texture *tex,
498					  unsigned new_bind_flag,
499					  bool invalidate_storage)
500{
501	struct pipe_screen *screen = sctx->b.screen;
502	struct si_texture *new_tex;
503	struct pipe_resource templ = tex->buffer.b.b;
504	unsigned i;
505
506	templ.bind |= new_bind_flag;
507
508	if (tex->buffer.b.is_shared)
509		return;
510
511	if (new_bind_flag == PIPE_BIND_LINEAR) {
512		if (tex->surface.is_linear)
513			return;
514
515		/* This fails with MSAA, depth, and compressed textures. */
516		if (si_choose_tiling(sctx->screen, &templ, false) !=
517		    RADEON_SURF_MODE_LINEAR_ALIGNED)
518			return;
519	}
520
521	new_tex = (struct si_texture*)screen->resource_create(screen, &templ);
522	if (!new_tex)
523		return;
524
525	/* Copy the pixels to the new texture. */
526	if (!invalidate_storage) {
527		for (i = 0; i <= templ.last_level; i++) {
528			struct pipe_box box;
529
530			u_box_3d(0, 0, 0,
531				 u_minify(templ.width0, i), u_minify(templ.height0, i),
532				 util_num_layers(&templ, i), &box);
533
534			sctx->dma_copy(&sctx->b, &new_tex->buffer.b.b, i, 0, 0, 0,
535				       &tex->buffer.b.b, i, &box);
536		}
537	}
538
539	if (new_bind_flag == PIPE_BIND_LINEAR) {
540		si_texture_discard_cmask(sctx->screen, tex);
541		si_texture_discard_dcc(sctx->screen, tex);
542	}
543
544	/* Replace the structure fields of tex. */
545	tex->buffer.b.b.bind = templ.bind;
546	pb_reference(&tex->buffer.buf, new_tex->buffer.buf);
547	tex->buffer.gpu_address = new_tex->buffer.gpu_address;
548	tex->buffer.vram_usage = new_tex->buffer.vram_usage;
549	tex->buffer.gart_usage = new_tex->buffer.gart_usage;
550	tex->buffer.bo_size = new_tex->buffer.bo_size;
551	tex->buffer.bo_alignment = new_tex->buffer.bo_alignment;
552	tex->buffer.domains = new_tex->buffer.domains;
553	tex->buffer.flags = new_tex->buffer.flags;
554
555	tex->surface = new_tex->surface;
556	tex->size = new_tex->size;
557	si_texture_reference(&tex->flushed_depth_texture,
558			     new_tex->flushed_depth_texture);
559
560	tex->fmask_offset = new_tex->fmask_offset;
561	tex->cmask_offset = new_tex->cmask_offset;
562	tex->cmask_base_address_reg = new_tex->cmask_base_address_reg;
563
564	if (tex->cmask_buffer == &tex->buffer)
565		tex->cmask_buffer = NULL;
566	else
567		si_resource_reference(&tex->cmask_buffer, NULL);
568
569	if (new_tex->cmask_buffer == &new_tex->buffer)
570		tex->cmask_buffer = &tex->buffer;
571	else
572		si_resource_reference(&tex->cmask_buffer, new_tex->cmask_buffer);
573
574	tex->dcc_offset = new_tex->dcc_offset;
575	tex->cb_color_info = new_tex->cb_color_info;
576	memcpy(tex->color_clear_value, new_tex->color_clear_value,
577	       sizeof(tex->color_clear_value));
578	tex->last_msaa_resolve_target_micro_mode = new_tex->last_msaa_resolve_target_micro_mode;
579
580	tex->htile_offset = new_tex->htile_offset;
581	tex->depth_clear_value = new_tex->depth_clear_value;
582	tex->dirty_level_mask = new_tex->dirty_level_mask;
583	tex->stencil_dirty_level_mask = new_tex->stencil_dirty_level_mask;
584	tex->db_render_format = new_tex->db_render_format;
585	tex->stencil_clear_value = new_tex->stencil_clear_value;
586	tex->tc_compatible_htile = new_tex->tc_compatible_htile;
587	tex->depth_cleared = new_tex->depth_cleared;
588	tex->stencil_cleared = new_tex->stencil_cleared;
589	tex->upgraded_depth = new_tex->upgraded_depth;
590	tex->db_compatible = new_tex->db_compatible;
591	tex->can_sample_z = new_tex->can_sample_z;
592	tex->can_sample_s = new_tex->can_sample_s;
593
594	tex->separate_dcc_dirty = new_tex->separate_dcc_dirty;
595	tex->dcc_gather_statistics = new_tex->dcc_gather_statistics;
596	si_resource_reference(&tex->dcc_separate_buffer,
597				new_tex->dcc_separate_buffer);
598	si_resource_reference(&tex->last_dcc_separate_buffer,
599				new_tex->last_dcc_separate_buffer);
600
601	if (new_bind_flag == PIPE_BIND_LINEAR) {
602		assert(!tex->htile_offset);
603		assert(!tex->cmask_buffer);
604		assert(!tex->surface.fmask_size);
605		assert(!tex->dcc_offset);
606		assert(!tex->is_depth);
607	}
608
609	si_texture_reference(&new_tex, NULL);
610
611	p_atomic_inc(&sctx->screen->dirty_tex_counter);
612}
613
614static uint32_t si_get_bo_metadata_word1(struct si_screen *sscreen)
615{
616	return (ATI_VENDOR_ID << 16) | sscreen->info.pci_id;
617}
618
619static void si_set_tex_bo_metadata(struct si_screen *sscreen,
620				   struct si_texture *tex)
621{
622	struct radeon_surf *surface = &tex->surface;
623	struct pipe_resource *res = &tex->buffer.b.b;
624	struct radeon_bo_metadata md;
625
626	memset(&md, 0, sizeof(md));
627
628	if (sscreen->info.chip_class >= GFX9) {
629		md.u.gfx9.swizzle_mode = surface->u.gfx9.surf.swizzle_mode;
630
631		if (tex->dcc_offset && !tex->dcc_separate_buffer) {
632			uint64_t dcc_offset =
633				tex->display_dcc_offset ? tex->display_dcc_offset
634							: tex->dcc_offset;
635
636			assert((dcc_offset >> 8) != 0 && (dcc_offset >> 8) < (1 << 24));
637			md.u.gfx9.dcc_offset_256B = dcc_offset >> 8;
638			md.u.gfx9.dcc_pitch_max = tex->surface.u.gfx9.display_dcc_pitch_max;
639			md.u.gfx9.dcc_independent_64B = 1;
640		}
641	} else {
642		md.u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ?
643					   RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
644		md.u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ?
645					   RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
646		md.u.legacy.pipe_config = surface->u.legacy.pipe_config;
647		md.u.legacy.bankw = surface->u.legacy.bankw;
648		md.u.legacy.bankh = surface->u.legacy.bankh;
649		md.u.legacy.tile_split = surface->u.legacy.tile_split;
650		md.u.legacy.mtilea = surface->u.legacy.mtilea;
651		md.u.legacy.num_banks = surface->u.legacy.num_banks;
652		md.u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe;
653		md.u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
654	}
655
656	assert(tex->dcc_separate_buffer == NULL);
657	assert(tex->surface.fmask_size == 0);
658
659	/* Metadata image format format version 1:
660	 * [0] = 1 (metadata format identifier)
661	 * [1] = (VENDOR_ID << 16) | PCI_ID
662	 * [2:9] = image descriptor for the whole resource
663	 *         [2] is always 0, because the base address is cleared
664	 *         [9] is the DCC offset bits [39:8] from the beginning of
665	 *             the buffer
666	 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
667	 */
668
669	md.metadata[0] = 1; /* metadata image format version 1 */
670
671	/* TILE_MODE_INDEX is ambiguous without a PCI ID. */
672	md.metadata[1] = si_get_bo_metadata_word1(sscreen);
673
674	static const unsigned char swizzle[] = {
675		PIPE_SWIZZLE_X,
676		PIPE_SWIZZLE_Y,
677		PIPE_SWIZZLE_Z,
678		PIPE_SWIZZLE_W
679	};
680	bool is_array = util_texture_is_array(res->target);
681	uint32_t desc[8];
682
683	si_make_texture_descriptor(sscreen, tex, true,
684				   res->target, res->format,
685				   swizzle, 0, res->last_level, 0,
686				   is_array ? res->array_size - 1 : 0,
687				   res->width0, res->height0, res->depth0,
688				   desc, NULL);
689
690	si_set_mutable_tex_desc_fields(sscreen, tex, &tex->surface.u.legacy.level[0],
691				       0, 0, tex->surface.blk_w, false, desc);
692
693	/* Clear the base address and set the relative DCC offset. */
694	desc[0] = 0;
695	desc[1] &= C_008F14_BASE_ADDRESS_HI;
696	desc[7] = tex->dcc_offset >> 8;
697
698	/* Dwords [2:9] contain the image descriptor. */
699	memcpy(&md.metadata[2], desc, sizeof(desc));
700	md.size_metadata = 10 * 4;
701
702	/* Dwords [10:..] contain the mipmap level offsets. */
703	if (sscreen->info.chip_class <= VI) {
704		for (unsigned i = 0; i <= res->last_level; i++)
705			md.metadata[10+i] = tex->surface.u.legacy.level[i].offset >> 8;
706
707		md.size_metadata += (1 + res->last_level) * 4;
708	}
709
710	sscreen->ws->buffer_set_metadata(tex->buffer.buf, &md);
711}
712
713static void si_get_opaque_metadata(struct si_screen *sscreen,
714				   struct si_texture *tex,
715				   struct radeon_bo_metadata *md)
716{
717	uint32_t *desc = &md->metadata[2];
718
719	if (sscreen->info.chip_class < VI)
720		return;
721
722	/* Return if DCC is enabled. The texture should be set up with it
723	 * already.
724	 */
725	if (md->size_metadata >= 10 * 4 && /* at least 2(header) + 8(desc) dwords */
726	    md->metadata[0] != 0 &&
727	    md->metadata[1] == si_get_bo_metadata_word1(sscreen) &&
728	    G_008F28_COMPRESSION_EN(desc[6])) {
729		tex->dcc_offset = (uint64_t)desc[7] << 8;
730
731		if (sscreen->info.chip_class >= GFX9) {
732			/* Fix up parameters for displayable DCC. Some state
733			 * trackers don't set the SCANOUT flag when importing
734			 * displayable images, so we have to recover the correct
735			 * parameters here.
736			 */
737			tex->surface.u.gfx9.dcc.pipe_aligned =
738				G_008F24_META_PIPE_ALIGNED(desc[5]);
739			tex->surface.u.gfx9.dcc.rb_aligned =
740				G_008F24_META_RB_ALIGNED(desc[5]);
741
742			/* If DCC is unaligned, this can only be a displayable image. */
743			if (!tex->surface.u.gfx9.dcc.pipe_aligned &&
744			    !tex->surface.u.gfx9.dcc.rb_aligned)
745				tex->surface.is_displayable = true;
746		}
747		return;
748	}
749
750	/* Disable DCC. These are always set by texture_from_handle and must
751	 * be cleared here.
752	 */
753	tex->dcc_offset = 0;
754}
755
756static bool si_has_displayable_dcc(struct si_texture *tex)
757{
758	struct si_screen *sscreen = (struct si_screen*)tex->buffer.b.b.screen;
759
760	if (sscreen->info.chip_class <= VI)
761		return false;
762
763	/* This needs a cache flush before scanout.
764	 * (it can't be scanned out and rendered to simultaneously)
765	 */
766	if (sscreen->info.use_display_dcc_unaligned &&
767	    tex->dcc_offset &&
768	    !tex->surface.u.gfx9.dcc.pipe_aligned &&
769	    !tex->surface.u.gfx9.dcc.rb_aligned)
770		return true;
771
772	/* This needs an explicit flush (flush_resource). */
773	if (sscreen->info.use_display_dcc_with_retile_blit &&
774	    tex->display_dcc_offset)
775		return true;
776
777	return false;
778}
779
780static void si_texture_get_info(struct pipe_screen* screen,
781				struct pipe_resource *resource,
782				unsigned *pstride,
783				unsigned *poffset)
784{
785	struct si_screen *sscreen = (struct si_screen*)screen;
786	struct si_texture *tex = (struct si_texture*)resource;
787	unsigned stride = 0;
788	unsigned offset = 0;
789
790	if (!sscreen || !tex)
791		return;
792
793	if (resource->target != PIPE_BUFFER) {
794		if (sscreen->info.chip_class >= GFX9) {
795			offset = tex->surface.u.gfx9.surf_offset;
796			stride = tex->surface.u.gfx9.surf_pitch *
797					tex->surface.bpe;
798		} else {
799			offset = tex->surface.u.legacy.level[0].offset;
800			stride = tex->surface.u.legacy.level[0].nblk_x *
801					tex->surface.bpe;
802		}
803	}
804
805	if (pstride)
806		*pstride = stride;
807
808	if (poffset)
809		*poffset = offset;
810}
811
812static boolean si_texture_get_handle(struct pipe_screen* screen,
813				     struct pipe_context *ctx,
814				     struct pipe_resource *resource,
815				     struct winsys_handle *whandle,
816				     unsigned usage)
817{
818	struct si_screen *sscreen = (struct si_screen*)screen;
819	struct si_context *sctx;
820	struct si_resource *res = si_resource(resource);
821	struct si_texture *tex = (struct si_texture*)resource;
822	bool update_metadata = false;
823	unsigned stride, offset, slice_size;
824	bool flush = false;
825
826	ctx = threaded_context_unwrap_sync(ctx);
827	sctx = (struct si_context*)(ctx ? ctx : sscreen->aux_context);
828
829	if (resource->target != PIPE_BUFFER) {
830		/* This is not supported now, but it might be required for OpenCL
831		 * interop in the future.
832		 */
833		if (resource->nr_samples > 1 || tex->is_depth)
834			return false;
835
836		/* Move a suballocated texture into a non-suballocated allocation. */
837		if (sscreen->ws->buffer_is_suballocated(res->buf) ||
838		    tex->surface.tile_swizzle ||
839		    (tex->buffer.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
840		     sscreen->info.has_local_buffers)) {
841			assert(!res->b.is_shared);
842			si_reallocate_texture_inplace(sctx, tex,
843							PIPE_BIND_SHARED, false);
844			flush = true;
845			assert(res->b.b.bind & PIPE_BIND_SHARED);
846			assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
847			assert(!(res->flags & RADEON_FLAG_NO_INTERPROCESS_SHARING));
848			assert(tex->surface.tile_swizzle == 0);
849		}
850
851		/* Since shader image stores don't support DCC on VI,
852		 * disable it for external clients that want write
853		 * access.
854		 */
855		if ((usage & PIPE_HANDLE_USAGE_SHADER_WRITE && tex->dcc_offset) ||
856		    /* Displayable DCC requires an explicit flush. */
857		    (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&
858		     si_has_displayable_dcc(tex))) {
859			if (si_texture_disable_dcc(sctx, tex)) {
860				update_metadata = true;
861				/* si_texture_disable_dcc flushes the context */
862				flush = false;
863			}
864		}
865
866		if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&
867		    (tex->cmask_buffer || tex->dcc_offset)) {
868			/* Eliminate fast clear (both CMASK and DCC) */
869			si_eliminate_fast_color_clear(sctx, tex);
870			/* eliminate_fast_color_clear flushes the context */
871			flush = false;
872
873			/* Disable CMASK if flush_resource isn't going
874			 * to be called.
875			 */
876			if (tex->cmask_buffer)
877				si_texture_discard_cmask(sscreen, tex);
878		}
879
880		/* Set metadata. */
881		if (!res->b.is_shared || update_metadata)
882			si_set_tex_bo_metadata(sscreen, tex);
883
884		if (sscreen->info.chip_class >= GFX9) {
885			slice_size = tex->surface.u.gfx9.surf_slice_size;
886		} else {
887			slice_size = (uint64_t)tex->surface.u.legacy.level[0].slice_size_dw * 4;
888		}
889	} else {
890		/* Buffer exports are for the OpenCL interop. */
891		/* Move a suballocated buffer into a non-suballocated allocation. */
892		if (sscreen->ws->buffer_is_suballocated(res->buf) ||
893		    /* A DMABUF export always fails if the BO is local. */
894		    (tex->buffer.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
895		     sscreen->info.has_local_buffers)) {
896			assert(!res->b.is_shared);
897
898			/* Allocate a new buffer with PIPE_BIND_SHARED. */
899			struct pipe_resource templ = res->b.b;
900			templ.bind |= PIPE_BIND_SHARED;
901
902			struct pipe_resource *newb =
903				screen->resource_create(screen, &templ);
904			if (!newb)
905				return false;
906
907			/* Copy the old buffer contents to the new one. */
908			struct pipe_box box;
909			u_box_1d(0, newb->width0, &box);
910			sctx->b.resource_copy_region(&sctx->b, newb, 0, 0, 0, 0,
911						     &res->b.b, 0, &box);
912			flush = true;
913			/* Move the new buffer storage to the old pipe_resource. */
914			si_replace_buffer_storage(&sctx->b, &res->b.b, newb);
915			pipe_resource_reference(&newb, NULL);
916
917			assert(res->b.b.bind & PIPE_BIND_SHARED);
918			assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
919		}
920
921		/* Buffers */
922		slice_size = 0;
923	}
924
925	si_texture_get_info(screen, resource, &stride, &offset);
926
927	if (flush)
928		sctx->b.flush(&sctx->b, NULL, 0);
929
930	if (res->b.is_shared) {
931		/* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
932		 * doesn't set it.
933		 */
934		res->external_usage |= usage & ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
935		if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
936			res->external_usage &= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
937	} else {
938		res->b.is_shared = true;
939		res->external_usage = usage;
940	}
941
942	return sscreen->ws->buffer_get_handle(res->buf, stride, offset,
943					      slice_size, whandle);
944}
945
946static void si_texture_destroy(struct pipe_screen *screen,
947			       struct pipe_resource *ptex)
948{
949	struct si_screen *sscreen = (struct si_screen*)screen;
950	struct si_texture *tex = (struct si_texture*)ptex;
951	struct si_resource *resource = &tex->buffer;
952
953	if (sscreen->info.chip_class >= GFX9)
954		free(tex->surface.u.gfx9.dcc_retile_map);
955
956	si_texture_reference(&tex->flushed_depth_texture, NULL);
957
958	if (tex->cmask_buffer != &tex->buffer) {
959	    si_resource_reference(&tex->cmask_buffer, NULL);
960	}
961	pb_reference(&resource->buf, NULL);
962	si_resource_reference(&tex->dcc_separate_buffer, NULL);
963	si_resource_reference(&tex->last_dcc_separate_buffer, NULL);
964	FREE(tex);
965}
966
967static const struct u_resource_vtbl si_texture_vtbl;
968
969static void si_texture_get_htile_size(struct si_screen *sscreen,
970				      struct si_texture *tex)
971{
972	unsigned cl_width, cl_height, width, height;
973	unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
974	unsigned num_pipes = sscreen->info.num_tile_pipes;
975
976	assert(sscreen->info.chip_class <= VI);
977
978	tex->surface.htile_size = 0;
979
980	if (tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
981	    !sscreen->info.htile_cmask_support_1d_tiling)
982		return;
983
984	/* Overalign HTILE on P2 configs to work around GPU hangs in
985	 * piglit/depthstencil-render-miplevels 585.
986	 *
987	 * This has been confirmed to help Kabini & Stoney, where the hangs
988	 * are always reproducible. I think I have seen the test hang
989	 * on Carrizo too, though it was very rare there.
990	 */
991	if (sscreen->info.chip_class >= CIK && num_pipes < 4)
992		num_pipes = 4;
993
994	switch (num_pipes) {
995	case 1:
996		cl_width = 32;
997		cl_height = 16;
998		break;
999	case 2:
1000		cl_width = 32;
1001		cl_height = 32;
1002		break;
1003	case 4:
1004		cl_width = 64;
1005		cl_height = 32;
1006		break;
1007	case 8:
1008		cl_width = 64;
1009		cl_height = 64;
1010		break;
1011	case 16:
1012		cl_width = 128;
1013		cl_height = 64;
1014		break;
1015	default:
1016		assert(0);
1017		return;
1018	}
1019
1020	width = align(tex->surface.u.legacy.level[0].nblk_x, cl_width * 8);
1021	height = align(tex->surface.u.legacy.level[0].nblk_y, cl_height * 8);
1022
1023	slice_elements = (width * height) / (8 * 8);
1024	slice_bytes = slice_elements * 4;
1025
1026	pipe_interleave_bytes = sscreen->info.pipe_interleave_bytes;
1027	base_align = num_pipes * pipe_interleave_bytes;
1028
1029	tex->surface.htile_alignment = base_align;
1030	tex->surface.htile_size =
1031		util_num_layers(&tex->buffer.b.b, 0) *
1032		align(slice_bytes, base_align);
1033}
1034
1035static void si_texture_allocate_htile(struct si_screen *sscreen,
1036				      struct si_texture *tex)
1037{
1038	if (sscreen->info.chip_class <= VI && !tex->tc_compatible_htile)
1039		si_texture_get_htile_size(sscreen, tex);
1040
1041	if (!tex->surface.htile_size)
1042		return;
1043
1044	tex->htile_offset = align(tex->size, tex->surface.htile_alignment);
1045	tex->size = tex->htile_offset + tex->surface.htile_size;
1046}
1047
1048void si_print_texture_info(struct si_screen *sscreen,
1049			   struct si_texture *tex, struct u_log_context *log)
1050{
1051	int i;
1052
1053	/* Common parameters. */
1054	u_log_printf(log, "  Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
1055		"blk_h=%u, array_size=%u, last_level=%u, "
1056		"bpe=%u, nsamples=%u, flags=0x%x, %s\n",
1057		tex->buffer.b.b.width0, tex->buffer.b.b.height0,
1058		tex->buffer.b.b.depth0, tex->surface.blk_w,
1059		tex->surface.blk_h,
1060		tex->buffer.b.b.array_size, tex->buffer.b.b.last_level,
1061		tex->surface.bpe, tex->buffer.b.b.nr_samples,
1062		tex->surface.flags, util_format_short_name(tex->buffer.b.b.format));
1063
1064	if (sscreen->info.chip_class >= GFX9) {
1065		u_log_printf(log, "  Surf: size=%"PRIu64", slice_size=%"PRIu64", "
1066			"alignment=%u, swmode=%u, epitch=%u, pitch=%u\n",
1067			tex->surface.surf_size,
1068			tex->surface.u.gfx9.surf_slice_size,
1069			tex->surface.surf_alignment,
1070			tex->surface.u.gfx9.surf.swizzle_mode,
1071			tex->surface.u.gfx9.surf.epitch,
1072			tex->surface.u.gfx9.surf_pitch);
1073
1074		if (tex->surface.fmask_size) {
1075			u_log_printf(log, "  FMASK: offset=%"PRIu64", size=%"PRIu64", "
1076				"alignment=%u, swmode=%u, epitch=%u\n",
1077				tex->fmask_offset,
1078				tex->surface.fmask_size,
1079				tex->surface.fmask_alignment,
1080				tex->surface.u.gfx9.fmask.swizzle_mode,
1081				tex->surface.u.gfx9.fmask.epitch);
1082		}
1083
1084		if (tex->cmask_buffer) {
1085			u_log_printf(log, "  CMask: offset=%"PRIu64", size=%u, "
1086				"alignment=%u, rb_aligned=%u, pipe_aligned=%u\n",
1087				tex->cmask_offset,
1088				tex->surface.cmask_size,
1089				tex->surface.cmask_alignment,
1090				tex->surface.u.gfx9.cmask.rb_aligned,
1091				tex->surface.u.gfx9.cmask.pipe_aligned);
1092		}
1093
1094		if (tex->htile_offset) {
1095			u_log_printf(log, "  HTile: offset=%"PRIu64", size=%u, alignment=%u, "
1096				"rb_aligned=%u, pipe_aligned=%u\n",
1097				tex->htile_offset,
1098				tex->surface.htile_size,
1099				tex->surface.htile_alignment,
1100				tex->surface.u.gfx9.htile.rb_aligned,
1101				tex->surface.u.gfx9.htile.pipe_aligned);
1102		}
1103
1104		if (tex->dcc_offset) {
1105			u_log_printf(log, "  DCC: offset=%"PRIu64", size=%u, "
1106				"alignment=%u, pitch_max=%u, num_dcc_levels=%u\n",
1107				tex->dcc_offset, tex->surface.dcc_size,
1108				tex->surface.dcc_alignment,
1109				tex->surface.u.gfx9.display_dcc_pitch_max,
1110				tex->surface.num_dcc_levels);
1111		}
1112
1113		if (tex->surface.u.gfx9.stencil_offset) {
1114			u_log_printf(log, "  Stencil: offset=%"PRIu64", swmode=%u, epitch=%u\n",
1115				tex->surface.u.gfx9.stencil_offset,
1116				tex->surface.u.gfx9.stencil.swizzle_mode,
1117				tex->surface.u.gfx9.stencil.epitch);
1118		}
1119		return;
1120	}
1121
1122	u_log_printf(log, "  Layout: size=%"PRIu64", alignment=%u, bankw=%u, "
1123		"bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
1124		tex->surface.surf_size, tex->surface.surf_alignment, tex->surface.u.legacy.bankw,
1125		tex->surface.u.legacy.bankh, tex->surface.u.legacy.num_banks, tex->surface.u.legacy.mtilea,
1126		tex->surface.u.legacy.tile_split, tex->surface.u.legacy.pipe_config,
1127		(tex->surface.flags & RADEON_SURF_SCANOUT) != 0);
1128
1129	if (tex->surface.fmask_size)
1130		u_log_printf(log, "  FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, "
1131			"bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
1132			tex->fmask_offset, tex->surface.fmask_size, tex->surface.fmask_alignment,
1133			tex->surface.u.legacy.fmask.pitch_in_pixels,
1134			tex->surface.u.legacy.fmask.bankh,
1135			tex->surface.u.legacy.fmask.slice_tile_max,
1136			tex->surface.u.legacy.fmask.tiling_index);
1137
1138	if (tex->cmask_buffer)
1139		u_log_printf(log, "  CMask: offset=%"PRIu64", size=%u, alignment=%u, "
1140			"slice_tile_max=%u\n",
1141			tex->cmask_offset, tex->surface.cmask_size, tex->surface.cmask_alignment,
1142			tex->surface.u.legacy.cmask_slice_tile_max);
1143
1144	if (tex->htile_offset)
1145		u_log_printf(log, "  HTile: offset=%"PRIu64", size=%u, "
1146			"alignment=%u, TC_compatible = %u\n",
1147			tex->htile_offset, tex->surface.htile_size,
1148			tex->surface.htile_alignment,
1149			tex->tc_compatible_htile);
1150
1151	if (tex->dcc_offset) {
1152		u_log_printf(log, "  DCC: offset=%"PRIu64", size=%u, alignment=%u\n",
1153			tex->dcc_offset, tex->surface.dcc_size,
1154			tex->surface.dcc_alignment);
1155		for (i = 0; i <= tex->buffer.b.b.last_level; i++)
1156			u_log_printf(log, "  DCCLevel[%i]: enabled=%u, offset=%u, "
1157				"fast_clear_size=%u\n",
1158				i, i < tex->surface.num_dcc_levels,
1159				tex->surface.u.legacy.level[i].dcc_offset,
1160				tex->surface.u.legacy.level[i].dcc_fast_clear_size);
1161	}
1162
1163	for (i = 0; i <= tex->buffer.b.b.last_level; i++)
1164		u_log_printf(log, "  Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
1165			"npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
1166			"mode=%u, tiling_index = %u\n",
1167			i, tex->surface.u.legacy.level[i].offset,
1168			(uint64_t)tex->surface.u.legacy.level[i].slice_size_dw * 4,
1169			u_minify(tex->buffer.b.b.width0, i),
1170			u_minify(tex->buffer.b.b.height0, i),
1171			u_minify(tex->buffer.b.b.depth0, i),
1172			tex->surface.u.legacy.level[i].nblk_x,
1173			tex->surface.u.legacy.level[i].nblk_y,
1174			tex->surface.u.legacy.level[i].mode,
1175			tex->surface.u.legacy.tiling_index[i]);
1176
1177	if (tex->surface.has_stencil) {
1178		u_log_printf(log, "  StencilLayout: tilesplit=%u\n",
1179			tex->surface.u.legacy.stencil_tile_split);
1180		for (i = 0; i <= tex->buffer.b.b.last_level; i++) {
1181			u_log_printf(log, "  StencilLevel[%i]: offset=%"PRIu64", "
1182				"slice_size=%"PRIu64", npix_x=%u, "
1183				"npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
1184				"mode=%u, tiling_index = %u\n",
1185				i, tex->surface.u.legacy.stencil_level[i].offset,
1186				(uint64_t)tex->surface.u.legacy.stencil_level[i].slice_size_dw * 4,
1187				u_minify(tex->buffer.b.b.width0, i),
1188				u_minify(tex->buffer.b.b.height0, i),
1189				u_minify(tex->buffer.b.b.depth0, i),
1190				tex->surface.u.legacy.stencil_level[i].nblk_x,
1191				tex->surface.u.legacy.stencil_level[i].nblk_y,
1192				tex->surface.u.legacy.stencil_level[i].mode,
1193				tex->surface.u.legacy.stencil_tiling_index[i]);
1194		}
1195	}
1196}
1197
1198/* Common processing for si_texture_create and si_texture_from_handle */
1199static struct si_texture *
1200si_texture_create_object(struct pipe_screen *screen,
1201			 const struct pipe_resource *base,
1202			 struct pb_buffer *buf,
1203			 struct radeon_surf *surface)
1204{
1205	struct si_texture *tex;
1206	struct si_resource *resource;
1207	struct si_screen *sscreen = (struct si_screen*)screen;
1208
1209	tex = CALLOC_STRUCT(si_texture);
1210	if (!tex)
1211		goto error;
1212
1213	resource = &tex->buffer;
1214	resource->b.b = *base;
1215	resource->b.b.next = NULL;
1216	resource->b.vtbl = &si_texture_vtbl;
1217	pipe_reference_init(&resource->b.b.reference, 1);
1218	resource->b.b.screen = screen;
1219
1220	/* don't include stencil-only formats which we don't support for rendering */
1221	tex->is_depth = util_format_has_depth(util_format_description(tex->buffer.b.b.format));
1222
1223	tex->surface = *surface;
1224	tex->size = tex->surface.surf_size;
1225
1226	tex->tc_compatible_htile = tex->surface.htile_size != 0 &&
1227				   (tex->surface.flags &
1228				    RADEON_SURF_TC_COMPATIBLE_HTILE);
1229
1230	/* TC-compatible HTILE:
1231	 * - VI only supports Z32_FLOAT.
1232	 * - GFX9 only supports Z32_FLOAT and Z16_UNORM. */
1233	if (tex->tc_compatible_htile) {
1234		if (sscreen->info.chip_class >= GFX9 &&
1235		    base->format == PIPE_FORMAT_Z16_UNORM)
1236			tex->db_render_format = base->format;
1237		else {
1238			tex->db_render_format = PIPE_FORMAT_Z32_FLOAT;
1239			tex->upgraded_depth = base->format != PIPE_FORMAT_Z32_FLOAT &&
1240					       base->format != PIPE_FORMAT_Z32_FLOAT_S8X24_UINT;
1241		}
1242	} else {
1243		tex->db_render_format = base->format;
1244	}
1245
1246	/* Applies to GCN. */
1247	tex->last_msaa_resolve_target_micro_mode = tex->surface.micro_tile_mode;
1248
1249	/* Disable separate DCC at the beginning. DRI2 doesn't reuse buffers
1250	 * between frames, so the only thing that can enable separate DCC
1251	 * with DRI2 is multiple slow clears within a frame.
1252	 */
1253	tex->ps_draw_ratio = 0;
1254
1255	if (tex->is_depth) {
1256		if (sscreen->info.chip_class >= GFX9) {
1257			tex->can_sample_z = true;
1258			tex->can_sample_s = true;
1259		} else {
1260			tex->can_sample_z = !tex->surface.u.legacy.depth_adjusted;
1261			tex->can_sample_s = !tex->surface.u.legacy.stencil_adjusted;
1262		}
1263
1264		if (!(base->flags & (SI_RESOURCE_FLAG_TRANSFER |
1265				     SI_RESOURCE_FLAG_FLUSHED_DEPTH))) {
1266			tex->db_compatible = true;
1267
1268			if (!(sscreen->debug_flags & DBG(NO_HYPERZ)))
1269				si_texture_allocate_htile(sscreen, tex);
1270		}
1271	} else {
1272		if (base->nr_samples > 1 &&
1273		    !buf &&
1274		    !(sscreen->debug_flags & DBG(NO_FMASK))) {
1275			/* Allocate FMASK. */
1276			tex->fmask_offset = align64(tex->size,
1277						     tex->surface.fmask_alignment);
1278			tex->size = tex->fmask_offset + tex->surface.fmask_size;
1279
1280			/* Allocate CMASK. */
1281			tex->cmask_offset = align64(tex->size, tex->surface.cmask_alignment);
1282			tex->size = tex->cmask_offset + tex->surface.cmask_size;
1283			tex->cb_color_info |= S_028C70_FAST_CLEAR(1);
1284			tex->cmask_buffer = &tex->buffer;
1285
1286			if (!tex->surface.fmask_size || !tex->surface.cmask_size)
1287				goto error;
1288		}
1289
1290		/* Shared textures must always set up DCC here.
1291		 * If it's not present, it will be disabled by
1292		 * apply_opaque_metadata later.
1293		 */
1294		if (tex->surface.dcc_size &&
1295		    (buf || !(sscreen->debug_flags & DBG(NO_DCC))) &&
1296		    (sscreen->info.use_display_dcc_unaligned ||
1297		     sscreen->info.use_display_dcc_with_retile_blit ||
1298		     !(tex->surface.flags & RADEON_SURF_SCANOUT))) {
1299			/* Add space for the DCC buffer. */
1300			tex->dcc_offset = align64(tex->size, tex->surface.dcc_alignment);
1301			tex->size = tex->dcc_offset + tex->surface.dcc_size;
1302
1303			if (sscreen->info.chip_class >= GFX9 &&
1304			    tex->surface.u.gfx9.dcc_retile_num_elements) {
1305				/* Add space for the displayable DCC buffer. */
1306				tex->display_dcc_offset =
1307					align64(tex->size, tex->surface.u.gfx9.display_dcc_alignment);
1308				tex->size = tex->display_dcc_offset +
1309					    tex->surface.u.gfx9.display_dcc_size;
1310
1311				/* Add space for the DCC retile buffer. (16-bit or 32-bit elements) */
1312				tex->dcc_retile_map_offset =
1313					align64(tex->size, sscreen->info.tcc_cache_line_size);
1314
1315				if (tex->surface.u.gfx9.dcc_retile_use_uint16) {
1316					tex->size = tex->dcc_retile_map_offset +
1317						    tex->surface.u.gfx9.dcc_retile_num_elements * 2;
1318				} else {
1319					tex->size = tex->dcc_retile_map_offset +
1320						    tex->surface.u.gfx9.dcc_retile_num_elements * 4;
1321				}
1322			}
1323		}
1324	}
1325
1326	/* Now create the backing buffer. */
1327	if (!buf) {
1328		si_init_resource_fields(sscreen, resource, tex->size,
1329					  tex->surface.surf_alignment);
1330
1331		if (!si_alloc_resource(sscreen, resource))
1332			goto error;
1333	} else {
1334		resource->buf = buf;
1335		resource->gpu_address = sscreen->ws->buffer_get_virtual_address(resource->buf);
1336		resource->bo_size = buf->size;
1337		resource->bo_alignment = buf->alignment;
1338		resource->domains = sscreen->ws->buffer_get_initial_domain(resource->buf);
1339		if (resource->domains & RADEON_DOMAIN_VRAM)
1340			resource->vram_usage = buf->size;
1341		else if (resource->domains & RADEON_DOMAIN_GTT)
1342			resource->gart_usage = buf->size;
1343	}
1344
1345	if (tex->cmask_buffer) {
1346		/* Initialize the cmask to 0xCC (= compressed state). */
1347		si_screen_clear_buffer(sscreen, &tex->cmask_buffer->b.b,
1348					 tex->cmask_offset, tex->surface.cmask_size,
1349					 0xCCCCCCCC);
1350	}
1351	if (tex->htile_offset) {
1352		uint32_t clear_value = 0;
1353
1354		if (sscreen->info.chip_class >= GFX9 || tex->tc_compatible_htile)
1355			clear_value = 0x0000030F;
1356
1357		si_screen_clear_buffer(sscreen, &tex->buffer.b.b,
1358					 tex->htile_offset,
1359					 tex->surface.htile_size,
1360					 clear_value);
1361	}
1362
1363	/* Initialize DCC only if the texture is not being imported. */
1364	if (!buf && tex->dcc_offset) {
1365		/* Clear DCC to black for all tiles with DCC enabled.
1366		 *
1367		 * This fixes corruption in 3DMark Slingshot Extreme, which
1368		 * uses uninitialized textures, causing corruption.
1369		 */
1370		if (tex->surface.num_dcc_levels == tex->buffer.b.b.last_level + 1 &&
1371		    tex->buffer.b.b.nr_samples <= 2) {
1372			/* Simple case - all tiles have DCC enabled. */
1373			si_screen_clear_buffer(sscreen, &tex->buffer.b.b,
1374					       tex->dcc_offset,
1375					       tex->surface.dcc_size,
1376					       DCC_CLEAR_COLOR_0000);
1377		} else if (sscreen->info.chip_class >= GFX9) {
1378			/* Clear to uncompressed. Clearing this to black is complicated. */
1379			si_screen_clear_buffer(sscreen, &tex->buffer.b.b,
1380					       tex->dcc_offset,
1381					       tex->surface.dcc_size,
1382					       DCC_UNCOMPRESSED);
1383		} else {
1384			/* GFX8: Initialize mipmap levels and multisamples separately. */
1385			if (tex->buffer.b.b.nr_samples >= 2) {
1386				/* Clearing this to black is complicated. */
1387				si_screen_clear_buffer(sscreen, &tex->buffer.b.b,
1388						       tex->dcc_offset,
1389						       tex->surface.dcc_size,
1390						       DCC_UNCOMPRESSED);
1391			} else {
1392				/* Clear the enabled mipmap levels to black. */
1393				unsigned size = 0;
1394
1395				for (unsigned i = 0; i < tex->surface.num_dcc_levels; i++) {
1396					if (!tex->surface.u.legacy.level[i].dcc_fast_clear_size)
1397						break;
1398
1399					size = tex->surface.u.legacy.level[i].dcc_offset +
1400					       tex->surface.u.legacy.level[i].dcc_fast_clear_size;
1401				}
1402
1403				/* Mipmap levels with DCC. */
1404				if (size) {
1405					si_screen_clear_buffer(sscreen, &tex->buffer.b.b,
1406							       tex->dcc_offset, size,
1407							       DCC_CLEAR_COLOR_0000);
1408				}
1409				/* Mipmap levels without DCC. */
1410				if (size != tex->surface.dcc_size) {
1411					si_screen_clear_buffer(sscreen, &tex->buffer.b.b,
1412							       tex->dcc_offset + size,
1413							       tex->surface.dcc_size - size,
1414							       DCC_UNCOMPRESSED);
1415				}
1416			}
1417		}
1418
1419		/* Upload the DCC retile map. */
1420		if (tex->dcc_retile_map_offset) {
1421			/* Use a staging buffer for the upload, because
1422			 * the buffer backing the texture is unmappable.
1423			 */
1424			bool use_uint16 = tex->surface.u.gfx9.dcc_retile_use_uint16;
1425			unsigned num_elements = tex->surface.u.gfx9.dcc_retile_num_elements;
1426			struct si_resource *buf =
1427				si_aligned_buffer_create(screen, 0, PIPE_USAGE_STREAM,
1428							 num_elements * (use_uint16 ? 2 : 4),
1429							 sscreen->info.tcc_cache_line_size);
1430			uint32_t *ui = (uint32_t*)sscreen->ws->buffer_map(buf->buf, NULL,
1431									  PIPE_TRANSFER_WRITE);
1432			uint16_t *us = (uint16_t*)ui;
1433
1434			/* Upload the retile map into a staging buffer. */
1435			if (use_uint16) {
1436				for (unsigned i = 0; i < num_elements; i++)
1437					us[i] = tex->surface.u.gfx9.dcc_retile_map[i];
1438			} else {
1439				for (unsigned i = 0; i < num_elements; i++)
1440					ui[i] = tex->surface.u.gfx9.dcc_retile_map[i];
1441			}
1442
1443			/* Copy the staging buffer to the buffer backing the texture. */
1444			struct si_context *sctx = (struct si_context*)sscreen->aux_context;
1445			struct pipe_box box;
1446			u_box_1d(0, buf->b.b.width0, &box);
1447
1448			assert(tex->dcc_retile_map_offset <= UINT_MAX);
1449			mtx_lock(&sscreen->aux_context_lock);
1450			sctx->dma_copy(&sctx->b, &tex->buffer.b.b, 0,
1451				       tex->dcc_retile_map_offset, 0, 0,
1452				       &buf->b.b, 0, &box);
1453			sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
1454			mtx_unlock(&sscreen->aux_context_lock);
1455
1456			si_resource_reference(&buf, NULL);
1457		}
1458	}
1459
1460	/* Initialize the CMASK base register value. */
1461	tex->cmask_base_address_reg =
1462		(tex->buffer.gpu_address + tex->cmask_offset) >> 8;
1463
1464	if (sscreen->debug_flags & DBG(VM)) {
1465		fprintf(stderr, "VM start=0x%"PRIX64"  end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
1466			tex->buffer.gpu_address,
1467			tex->buffer.gpu_address + tex->buffer.buf->size,
1468			base->width0, base->height0, util_num_layers(base, 0), base->last_level+1,
1469			base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
1470	}
1471
1472	if (sscreen->debug_flags & DBG(TEX)) {
1473		puts("Texture:");
1474		struct u_log_context log;
1475		u_log_context_init(&log);
1476		si_print_texture_info(sscreen, tex, &log);
1477		u_log_new_page_print(&log, stdout);
1478		fflush(stdout);
1479		u_log_context_destroy(&log);
1480	}
1481
1482	return tex;
1483
1484error:
1485	FREE(tex);
1486	if (sscreen->info.chip_class >= GFX9)
1487		free(surface->u.gfx9.dcc_retile_map);
1488	return NULL;
1489}
1490
1491static enum radeon_surf_mode
1492si_choose_tiling(struct si_screen *sscreen,
1493		 const struct pipe_resource *templ, bool tc_compatible_htile)
1494{
1495	const struct util_format_description *desc = util_format_description(templ->format);
1496	bool force_tiling = templ->flags & SI_RESOURCE_FLAG_FORCE_MSAA_TILING;
1497	bool is_depth_stencil = util_format_is_depth_or_stencil(templ->format) &&
1498				!(templ->flags & SI_RESOURCE_FLAG_FLUSHED_DEPTH);
1499
1500	/* MSAA resources must be 2D tiled. */
1501	if (templ->nr_samples > 1)
1502		return RADEON_SURF_MODE_2D;
1503
1504	/* Transfer resources should be linear. */
1505	if (templ->flags & SI_RESOURCE_FLAG_TRANSFER)
1506		return RADEON_SURF_MODE_LINEAR_ALIGNED;
1507
1508	/* Avoid Z/S decompress blits by forcing TC-compatible HTILE on VI,
1509	 * which requires 2D tiling.
1510	 */
1511	if (sscreen->info.chip_class == VI && tc_compatible_htile)
1512		return RADEON_SURF_MODE_2D;
1513
1514	/* Handle common candidates for the linear mode.
1515	 * Compressed textures and DB surfaces must always be tiled.
1516	 */
1517	if (!force_tiling &&
1518	    !is_depth_stencil &&
1519	    !util_format_is_compressed(templ->format)) {
1520		if (sscreen->debug_flags & DBG(NO_TILING))
1521			return RADEON_SURF_MODE_LINEAR_ALIGNED;
1522
1523		/* Tiling doesn't work with the 422 (SUBSAMPLED) formats. */
1524		if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)
1525			return RADEON_SURF_MODE_LINEAR_ALIGNED;
1526
1527		/* Cursors are linear on SI.
1528		 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
1529		if (templ->bind & PIPE_BIND_CURSOR)
1530			return RADEON_SURF_MODE_LINEAR_ALIGNED;
1531
1532		if (templ->bind & PIPE_BIND_LINEAR)
1533			return RADEON_SURF_MODE_LINEAR_ALIGNED;
1534
1535		/* Textures with a very small height are recommended to be linear. */
1536		if (templ->target == PIPE_TEXTURE_1D ||
1537		    templ->target == PIPE_TEXTURE_1D_ARRAY ||
1538		    /* Only very thin and long 2D textures should benefit from
1539		     * linear_aligned. */
1540		    (templ->width0 > 8 && templ->height0 <= 2))
1541			return RADEON_SURF_MODE_LINEAR_ALIGNED;
1542
1543		/* Textures likely to be mapped often. */
1544		if (templ->usage == PIPE_USAGE_STAGING ||
1545		    templ->usage == PIPE_USAGE_STREAM)
1546			return RADEON_SURF_MODE_LINEAR_ALIGNED;
1547	}
1548
1549	/* Make small textures 1D tiled. */
1550	if (templ->width0 <= 16 || templ->height0 <= 16 ||
1551	    (sscreen->debug_flags & DBG(NO_2D_TILING)))
1552		return RADEON_SURF_MODE_1D;
1553
1554	/* The allocator will switch to 1D if needed. */
1555	return RADEON_SURF_MODE_2D;
1556}
1557
1558struct pipe_resource *si_texture_create(struct pipe_screen *screen,
1559					const struct pipe_resource *templ)
1560{
1561	struct si_screen *sscreen = (struct si_screen*)screen;
1562	bool is_zs = util_format_is_depth_or_stencil(templ->format);
1563
1564	if (templ->nr_samples >= 2) {
1565		/* This is hackish (overwriting the const pipe_resource template),
1566		 * but should be harmless and state trackers can also see
1567		 * the overriden number of samples in the created pipe_resource.
1568		 */
1569		if (is_zs && sscreen->eqaa_force_z_samples) {
1570			((struct pipe_resource*)templ)->nr_samples =
1571			((struct pipe_resource*)templ)->nr_storage_samples =
1572				sscreen->eqaa_force_z_samples;
1573		} else if (!is_zs && sscreen->eqaa_force_color_samples) {
1574			((struct pipe_resource*)templ)->nr_samples =
1575				sscreen->eqaa_force_coverage_samples;
1576			((struct pipe_resource*)templ)->nr_storage_samples =
1577				sscreen->eqaa_force_color_samples;
1578		}
1579	}
1580
1581	struct radeon_surf surface = {0};
1582	bool is_flushed_depth = templ->flags & SI_RESOURCE_FLAG_FLUSHED_DEPTH;
1583	bool tc_compatible_htile =
1584		sscreen->info.chip_class >= VI &&
1585		/* There are issues with TC-compatible HTILE on Tonga (and
1586		 * Iceland is the same design), and documented bug workarounds
1587		 * don't help. For example, this fails:
1588		 *   piglit/bin/tex-miplevel-selection 'texture()' 2DShadow -auto
1589		 */
1590		sscreen->info.family != CHIP_TONGA &&
1591		sscreen->info.family != CHIP_ICELAND &&
1592		(templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY) &&
1593		!(sscreen->debug_flags & DBG(NO_HYPERZ)) &&
1594		!is_flushed_depth &&
1595		templ->nr_samples <= 1 && /* TC-compat HTILE is less efficient with MSAA */
1596		is_zs;
1597	int r;
1598
1599	r = si_init_surface(sscreen, &surface, templ,
1600			    si_choose_tiling(sscreen, templ, tc_compatible_htile),
1601			    0, 0, false, false, is_flushed_depth,
1602			    tc_compatible_htile);
1603	if (r) {
1604		return NULL;
1605	}
1606
1607	return (struct pipe_resource *)
1608	       si_texture_create_object(screen, templ, NULL, &surface);
1609}
1610
1611static struct pipe_resource *si_texture_from_winsys_buffer(struct si_screen *sscreen,
1612							   const struct pipe_resource *templ,
1613							   struct pb_buffer *buf,
1614							   unsigned stride,
1615							   unsigned offset,
1616							   unsigned usage,
1617							   bool dedicated)
1618{
1619	enum radeon_surf_mode array_mode;
1620	struct radeon_surf surface = {};
1621	struct radeon_bo_metadata metadata = {};
1622	struct si_texture *tex;
1623	bool is_scanout;
1624	int r;
1625
1626	if (dedicated) {
1627		sscreen->ws->buffer_get_metadata(buf, &metadata);
1628		si_get_display_metadata(sscreen, &surface, &metadata,
1629					&array_mode, &is_scanout);
1630	} else {
1631		/**
1632		 * The bo metadata is unset for un-dedicated images. So we fall
1633		 * back to linear. See answer to question 5 of the
1634		 * VK_KHX_external_memory spec for some details.
1635		 *
1636		 * It is possible that this case isn't going to work if the
1637		 * surface pitch isn't correctly aligned by default.
1638		 *
1639		 * In order to support it correctly we require multi-image
1640		 * metadata to be syncrhonized between radv and radeonsi. The
1641		 * semantics of associating multiple image metadata to a memory
1642		 * object on the vulkan export side are not concretely defined
1643		 * either.
1644		 *
1645		 * All the use cases we are aware of at the moment for memory
1646		 * objects use dedicated allocations. So lets keep the initial
1647		 * implementation simple.
1648		 *
1649		 * A possible alternative is to attempt to reconstruct the
1650		 * tiling information when the TexParameter TEXTURE_TILING_EXT
1651		 * is set.
1652		 */
1653		array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
1654		is_scanout = false;
1655	}
1656
1657	r = si_init_surface(sscreen, &surface, templ,
1658			    array_mode, stride, offset, true, is_scanout,
1659			    false, false);
1660	if (r)
1661		return NULL;
1662
1663	tex = si_texture_create_object(&sscreen->b, templ, buf, &surface);
1664	if (!tex)
1665		return NULL;
1666
1667	tex->buffer.b.is_shared = true;
1668	tex->buffer.external_usage = usage;
1669
1670	si_get_opaque_metadata(sscreen, tex, &metadata);
1671
1672	/* Displayable DCC requires an explicit flush. */
1673	if (dedicated &&
1674	    !(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&
1675	    si_has_displayable_dcc(tex)) {
1676		/* TODO: do we need to decompress DCC? */
1677		if (si_texture_discard_dcc(sscreen, tex)) {
1678			/* Update BO metadata after disabling DCC. */
1679			si_set_tex_bo_metadata(sscreen, tex);
1680		}
1681	}
1682
1683	assert(tex->surface.tile_swizzle == 0);
1684	return &tex->buffer.b.b;
1685}
1686
1687static struct pipe_resource *si_texture_from_handle(struct pipe_screen *screen,
1688						    const struct pipe_resource *templ,
1689						    struct winsys_handle *whandle,
1690						    unsigned usage)
1691{
1692	struct si_screen *sscreen = (struct si_screen*)screen;
1693	struct pb_buffer *buf = NULL;
1694	unsigned stride = 0, offset = 0;
1695
1696	/* Support only 2D textures without mipmaps */
1697	if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
1698	      templ->depth0 != 1 || templ->last_level != 0)
1699		return NULL;
1700
1701	buf = sscreen->ws->buffer_from_handle(sscreen->ws, whandle,
1702					      sscreen->info.max_alignment,
1703					      &stride, &offset);
1704	if (!buf)
1705		return NULL;
1706
1707	return si_texture_from_winsys_buffer(sscreen, templ, buf, stride,
1708					     offset, usage, true);
1709}
1710
1711bool si_init_flushed_depth_texture(struct pipe_context *ctx,
1712				   struct pipe_resource *texture,
1713				   struct si_texture **staging)
1714{
1715	struct si_texture *tex = (struct si_texture*)texture;
1716	struct pipe_resource resource;
1717	struct si_texture **flushed_depth_texture = staging ?
1718			staging : &tex->flushed_depth_texture;
1719	enum pipe_format pipe_format = texture->format;
1720
1721	if (!staging) {
1722		if (tex->flushed_depth_texture)
1723			return true; /* it's ready */
1724
1725		if (!tex->can_sample_z && tex->can_sample_s) {
1726			switch (pipe_format) {
1727			case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
1728				/* Save memory by not allocating the S plane. */
1729				pipe_format = PIPE_FORMAT_Z32_FLOAT;
1730				break;
1731			case PIPE_FORMAT_Z24_UNORM_S8_UINT:
1732			case PIPE_FORMAT_S8_UINT_Z24_UNORM:
1733				/* Save memory bandwidth by not copying the
1734				 * stencil part during flush.
1735				 *
1736				 * This potentially increases memory bandwidth
1737				 * if an application uses both Z and S texturing
1738				 * simultaneously (a flushed Z24S8 texture
1739				 * would be stored compactly), but how often
1740				 * does that really happen?
1741				 */
1742				pipe_format = PIPE_FORMAT_Z24X8_UNORM;
1743				break;
1744			default:;
1745			}
1746		} else if (!tex->can_sample_s && tex->can_sample_z) {
1747			assert(util_format_has_stencil(util_format_description(pipe_format)));
1748
1749			/* DB->CB copies to an 8bpp surface don't work. */
1750			pipe_format = PIPE_FORMAT_X24S8_UINT;
1751		}
1752	}
1753
1754	memset(&resource, 0, sizeof(resource));
1755	resource.target = texture->target;
1756	resource.format = pipe_format;
1757	resource.width0 = texture->width0;
1758	resource.height0 = texture->height0;
1759	resource.depth0 = texture->depth0;
1760	resource.array_size = texture->array_size;
1761	resource.last_level = texture->last_level;
1762	resource.nr_samples = texture->nr_samples;
1763	resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
1764	resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;
1765	resource.flags = texture->flags | SI_RESOURCE_FLAG_FLUSHED_DEPTH;
1766
1767	if (staging)
1768		resource.flags |= SI_RESOURCE_FLAG_TRANSFER;
1769
1770	*flushed_depth_texture = (struct si_texture *)ctx->screen->resource_create(ctx->screen, &resource);
1771	if (*flushed_depth_texture == NULL) {
1772		PRINT_ERR("failed to create temporary texture to hold flushed depth\n");
1773		return false;
1774	}
1775	return true;
1776}
1777
1778/**
1779 * Initialize the pipe_resource descriptor to be of the same size as the box,
1780 * which is supposed to hold a subregion of the texture "orig" at the given
1781 * mipmap level.
1782 */
1783static void si_init_temp_resource_from_box(struct pipe_resource *res,
1784					   struct pipe_resource *orig,
1785					   const struct pipe_box *box,
1786					   unsigned level, unsigned flags)
1787{
1788	memset(res, 0, sizeof(*res));
1789	res->format = orig->format;
1790	res->width0 = box->width;
1791	res->height0 = box->height;
1792	res->depth0 = 1;
1793	res->array_size = 1;
1794	res->usage = flags & SI_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
1795	res->flags = flags;
1796
1797	/* We must set the correct texture target and dimensions for a 3D box. */
1798	if (box->depth > 1 && util_max_layer(orig, level) > 0) {
1799		res->target = PIPE_TEXTURE_2D_ARRAY;
1800		res->array_size = box->depth;
1801	} else {
1802		res->target = PIPE_TEXTURE_2D;
1803	}
1804}
1805
1806static bool si_can_invalidate_texture(struct si_screen *sscreen,
1807				      struct si_texture *tex,
1808				      unsigned transfer_usage,
1809				      const struct pipe_box *box)
1810{
1811	return !tex->buffer.b.is_shared &&
1812		!(transfer_usage & PIPE_TRANSFER_READ) &&
1813		tex->buffer.b.b.last_level == 0 &&
1814		util_texrange_covers_whole_level(&tex->buffer.b.b, 0,
1815						 box->x, box->y, box->z,
1816						 box->width, box->height,
1817						 box->depth);
1818}
1819
1820static void si_texture_invalidate_storage(struct si_context *sctx,
1821					  struct si_texture *tex)
1822{
1823	struct si_screen *sscreen = sctx->screen;
1824
1825	/* There is no point in discarding depth and tiled buffers. */
1826	assert(!tex->is_depth);
1827	assert(tex->surface.is_linear);
1828
1829	/* Reallocate the buffer in the same pipe_resource. */
1830	si_alloc_resource(sscreen, &tex->buffer);
1831
1832	/* Initialize the CMASK base address (needed even without CMASK). */
1833	tex->cmask_base_address_reg =
1834		(tex->buffer.gpu_address + tex->cmask_offset) >> 8;
1835
1836	p_atomic_inc(&sscreen->dirty_tex_counter);
1837
1838	sctx->num_alloc_tex_transfer_bytes += tex->size;
1839}
1840
1841static void *si_texture_transfer_map(struct pipe_context *ctx,
1842				     struct pipe_resource *texture,
1843				     unsigned level,
1844				     unsigned usage,
1845				     const struct pipe_box *box,
1846				     struct pipe_transfer **ptransfer)
1847{
1848	struct si_context *sctx = (struct si_context*)ctx;
1849	struct si_texture *tex = (struct si_texture*)texture;
1850	struct si_transfer *trans;
1851	struct si_resource *buf;
1852	unsigned offset = 0;
1853	char *map;
1854	bool use_staging_texture = false;
1855
1856	assert(!(texture->flags & SI_RESOURCE_FLAG_TRANSFER));
1857	assert(box->width && box->height && box->depth);
1858
1859	/* Depth textures use staging unconditionally. */
1860	if (!tex->is_depth) {
1861		/* Degrade the tile mode if we get too many transfers on APUs.
1862		 * On dGPUs, the staging texture is always faster.
1863		 * Only count uploads that are at least 4x4 pixels large.
1864		 */
1865		if (!sctx->screen->info.has_dedicated_vram &&
1866		    level == 0 &&
1867		    box->width >= 4 && box->height >= 4 &&
1868		    p_atomic_inc_return(&tex->num_level0_transfers) == 10) {
1869			bool can_invalidate =
1870				si_can_invalidate_texture(sctx->screen, tex,
1871							    usage, box);
1872
1873			si_reallocate_texture_inplace(sctx, tex,
1874							PIPE_BIND_LINEAR,
1875							can_invalidate);
1876		}
1877
1878		/* Tiled textures need to be converted into a linear texture for CPU
1879		 * access. The staging texture is always linear and is placed in GART.
1880		 *
1881		 * Reading from VRAM or GTT WC is slow, always use the staging
1882		 * texture in this case.
1883		 *
1884		 * Use the staging texture for uploads if the underlying BO
1885		 * is busy.
1886		 */
1887		if (!tex->surface.is_linear)
1888			use_staging_texture = true;
1889		else if (usage & PIPE_TRANSFER_READ)
1890			use_staging_texture =
1891				tex->buffer.domains & RADEON_DOMAIN_VRAM ||
1892				tex->buffer.flags & RADEON_FLAG_GTT_WC;
1893		/* Write & linear only: */
1894		else if (si_rings_is_buffer_referenced(sctx, tex->buffer.buf,
1895						       RADEON_USAGE_READWRITE) ||
1896			 !sctx->ws->buffer_wait(tex->buffer.buf, 0,
1897						RADEON_USAGE_READWRITE)) {
1898			/* It's busy. */
1899			if (si_can_invalidate_texture(sctx->screen, tex,
1900							usage, box))
1901				si_texture_invalidate_storage(sctx, tex);
1902			else
1903				use_staging_texture = true;
1904		}
1905	}
1906
1907	trans = CALLOC_STRUCT(si_transfer);
1908	if (!trans)
1909		return NULL;
1910	pipe_resource_reference(&trans->b.b.resource, texture);
1911	trans->b.b.level = level;
1912	trans->b.b.usage = usage;
1913	trans->b.b.box = *box;
1914
1915	if (tex->is_depth) {
1916		struct si_texture *staging_depth;
1917
1918		if (tex->buffer.b.b.nr_samples > 1) {
1919			/* MSAA depth buffers need to be converted to single sample buffers.
1920			 *
1921			 * Mapping MSAA depth buffers can occur if ReadPixels is called
1922			 * with a multisample GLX visual.
1923			 *
1924			 * First downsample the depth buffer to a temporary texture,
1925			 * then decompress the temporary one to staging.
1926			 *
1927			 * Only the region being mapped is transfered.
1928			 */
1929			struct pipe_resource resource;
1930
1931			si_init_temp_resource_from_box(&resource, texture, box, level, 0);
1932
1933			if (!si_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
1934				PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
1935				goto fail_trans;
1936			}
1937
1938			if (usage & PIPE_TRANSFER_READ) {
1939				struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
1940				if (!temp) {
1941					PRINT_ERR("failed to create a temporary depth texture\n");
1942					goto fail_trans;
1943				}
1944
1945				si_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
1946				si_blit_decompress_depth(ctx, (struct si_texture*)temp, staging_depth,
1947							 0, 0, 0, box->depth, 0, 0);
1948				pipe_resource_reference(&temp, NULL);
1949			}
1950
1951			/* Just get the strides. */
1952			si_texture_get_offset(sctx->screen, staging_depth, level, NULL,
1953						&trans->b.b.stride,
1954						&trans->b.b.layer_stride);
1955		} else {
1956			/* XXX: only readback the rectangle which is being mapped? */
1957			/* XXX: when discard is true, no need to read back from depth texture */
1958			if (!si_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
1959				PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
1960				goto fail_trans;
1961			}
1962
1963			si_blit_decompress_depth(ctx, tex, staging_depth,
1964						 level, level,
1965						 box->z, box->z + box->depth - 1,
1966						 0, 0);
1967
1968			offset = si_texture_get_offset(sctx->screen, staging_depth,
1969							 level, box,
1970							 &trans->b.b.stride,
1971							 &trans->b.b.layer_stride);
1972		}
1973
1974		trans->staging = &staging_depth->buffer;
1975		buf = trans->staging;
1976	} else if (use_staging_texture) {
1977		struct pipe_resource resource;
1978		struct si_texture *staging;
1979
1980		si_init_temp_resource_from_box(&resource, texture, box, level,
1981						 SI_RESOURCE_FLAG_TRANSFER);
1982		resource.usage = (usage & PIPE_TRANSFER_READ) ?
1983			PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
1984
1985		/* Create the temporary texture. */
1986		staging = (struct si_texture*)ctx->screen->resource_create(ctx->screen, &resource);
1987		if (!staging) {
1988			PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
1989			goto fail_trans;
1990		}
1991		trans->staging = &staging->buffer;
1992
1993		/* Just get the strides. */
1994		si_texture_get_offset(sctx->screen, staging, 0, NULL,
1995					&trans->b.b.stride,
1996					&trans->b.b.layer_stride);
1997
1998		if (usage & PIPE_TRANSFER_READ)
1999			si_copy_to_staging_texture(ctx, trans);
2000		else
2001			usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
2002
2003		buf = trans->staging;
2004	} else {
2005		/* the resource is mapped directly */
2006		offset = si_texture_get_offset(sctx->screen, tex, level, box,
2007						 &trans->b.b.stride,
2008						 &trans->b.b.layer_stride);
2009		buf = &tex->buffer;
2010	}
2011
2012	/* Always unmap texture CPU mappings on 32-bit architectures, so that
2013	 * we don't run out of the CPU address space.
2014	 */
2015	if (sizeof(void*) == 4)
2016		usage |= RADEON_TRANSFER_TEMPORARY;
2017
2018	if (!(map = si_buffer_map_sync_with_rings(sctx, buf, usage)))
2019		goto fail_trans;
2020
2021	*ptransfer = &trans->b.b;
2022	return map + offset;
2023
2024fail_trans:
2025	si_resource_reference(&trans->staging, NULL);
2026	pipe_resource_reference(&trans->b.b.resource, NULL);
2027	FREE(trans);
2028	return NULL;
2029}
2030
2031static void si_texture_transfer_unmap(struct pipe_context *ctx,
2032				      struct pipe_transfer* transfer)
2033{
2034	struct si_context *sctx = (struct si_context*)ctx;
2035	struct si_transfer *stransfer = (struct si_transfer*)transfer;
2036	struct pipe_resource *texture = transfer->resource;
2037	struct si_texture *tex = (struct si_texture*)texture;
2038
2039	/* Always unmap texture CPU mappings on 32-bit architectures, so that
2040	 * we don't run out of the CPU address space.
2041	 */
2042	if (sizeof(void*) == 4) {
2043		struct si_resource *buf =
2044			stransfer->staging ? stransfer->staging : &tex->buffer;
2045
2046		sctx->ws->buffer_unmap(buf->buf);
2047	}
2048
2049	if ((transfer->usage & PIPE_TRANSFER_WRITE) && stransfer->staging) {
2050		if (tex->is_depth && tex->buffer.b.b.nr_samples <= 1) {
2051			ctx->resource_copy_region(ctx, texture, transfer->level,
2052						  transfer->box.x, transfer->box.y, transfer->box.z,
2053						  &stransfer->staging->b.b, transfer->level,
2054						  &transfer->box);
2055		} else {
2056			si_copy_from_staging_texture(ctx, stransfer);
2057		}
2058	}
2059
2060	if (stransfer->staging) {
2061		sctx->num_alloc_tex_transfer_bytes += stransfer->staging->buf->size;
2062		si_resource_reference(&stransfer->staging, NULL);
2063	}
2064
2065	/* Heuristic for {upload, draw, upload, draw, ..}:
2066	 *
2067	 * Flush the gfx IB if we've allocated too much texture storage.
2068	 *
2069	 * The idea is that we don't want to build IBs that use too much
2070	 * memory and put pressure on the kernel memory manager and we also
2071	 * want to make temporary and invalidated buffers go idle ASAP to
2072	 * decrease the total memory usage or make them reusable. The memory
2073	 * usage will be slightly higher than given here because of the buffer
2074	 * cache in the winsys.
2075	 *
2076	 * The result is that the kernel memory manager is never a bottleneck.
2077	 */
2078	if (sctx->num_alloc_tex_transfer_bytes > sctx->screen->info.gart_size / 4) {
2079		si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
2080		sctx->num_alloc_tex_transfer_bytes = 0;
2081	}
2082
2083	pipe_resource_reference(&transfer->resource, NULL);
2084	FREE(transfer);
2085}
2086
2087static const struct u_resource_vtbl si_texture_vtbl =
2088{
2089	NULL,				/* get_handle */
2090	si_texture_destroy,		/* resource_destroy */
2091	si_texture_transfer_map,	/* transfer_map */
2092	u_default_transfer_flush_region, /* transfer_flush_region */
2093	si_texture_transfer_unmap,	/* transfer_unmap */
2094};
2095
2096/* Return if it's allowed to reinterpret one format as another with DCC enabled.
2097 */
2098bool vi_dcc_formats_compatible(enum pipe_format format1,
2099			       enum pipe_format format2)
2100{
2101	const struct util_format_description *desc1, *desc2;
2102
2103	/* No format change - exit early. */
2104	if (format1 == format2)
2105		return true;
2106
2107	format1 = si_simplify_cb_format(format1);
2108	format2 = si_simplify_cb_format(format2);
2109
2110	/* Check again after format adjustments. */
2111	if (format1 == format2)
2112		return true;
2113
2114	desc1 = util_format_description(format1);
2115	desc2 = util_format_description(format2);
2116
2117	if (desc1->layout != UTIL_FORMAT_LAYOUT_PLAIN ||
2118	    desc2->layout != UTIL_FORMAT_LAYOUT_PLAIN)
2119		return false;
2120
2121	/* Float and non-float are totally incompatible. */
2122	if ((desc1->channel[0].type == UTIL_FORMAT_TYPE_FLOAT) !=
2123	    (desc2->channel[0].type == UTIL_FORMAT_TYPE_FLOAT))
2124		return false;
2125
2126	/* Channel sizes must match across DCC formats.
2127	 * Comparing just the first 2 channels should be enough.
2128	 */
2129	if (desc1->channel[0].size != desc2->channel[0].size ||
2130	    (desc1->nr_channels >= 2 &&
2131	     desc1->channel[1].size != desc2->channel[1].size))
2132		return false;
2133
2134	/* Everything below is not needed if the driver never uses the DCC
2135	 * clear code with the value of 1.
2136	 */
2137
2138	/* If the clear values are all 1 or all 0, this constraint can be
2139	 * ignored. */
2140	if (vi_alpha_is_on_msb(format1) != vi_alpha_is_on_msb(format2))
2141		return false;
2142
2143	/* Channel types must match if the clear value of 1 is used.
2144	 * The type categories are only float, signed, unsigned.
2145	 * NORM and INT are always compatible.
2146	 */
2147	if (desc1->channel[0].type != desc2->channel[0].type ||
2148	    (desc1->nr_channels >= 2 &&
2149	     desc1->channel[1].type != desc2->channel[1].type))
2150		return false;
2151
2152	return true;
2153}
2154
2155bool vi_dcc_formats_are_incompatible(struct pipe_resource *tex,
2156				     unsigned level,
2157				     enum pipe_format view_format)
2158{
2159	struct si_texture *stex = (struct si_texture *)tex;
2160
2161	return vi_dcc_enabled(stex, level) &&
2162	       !vi_dcc_formats_compatible(tex->format, view_format);
2163}
2164
2165/* This can't be merged with the above function, because
2166 * vi_dcc_formats_compatible should be called only when DCC is enabled. */
2167void vi_disable_dcc_if_incompatible_format(struct si_context *sctx,
2168					   struct pipe_resource *tex,
2169					   unsigned level,
2170					   enum pipe_format view_format)
2171{
2172	struct si_texture *stex = (struct si_texture *)tex;
2173
2174	if (vi_dcc_formats_are_incompatible(tex, level, view_format))
2175		if (!si_texture_disable_dcc(sctx, stex))
2176			si_decompress_dcc(sctx, stex);
2177}
2178
2179struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe,
2180					      struct pipe_resource *texture,
2181					      const struct pipe_surface *templ,
2182					      unsigned width0, unsigned height0,
2183					      unsigned width, unsigned height)
2184{
2185	struct si_surface *surface = CALLOC_STRUCT(si_surface);
2186
2187	if (!surface)
2188		return NULL;
2189
2190	assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level));
2191	assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level));
2192
2193	pipe_reference_init(&surface->base.reference, 1);
2194	pipe_resource_reference(&surface->base.texture, texture);
2195	surface->base.context = pipe;
2196	surface->base.format = templ->format;
2197	surface->base.width = width;
2198	surface->base.height = height;
2199	surface->base.u = templ->u;
2200
2201	surface->width0 = width0;
2202	surface->height0 = height0;
2203
2204	surface->dcc_incompatible =
2205		texture->target != PIPE_BUFFER &&
2206		vi_dcc_formats_are_incompatible(texture, templ->u.tex.level,
2207						templ->format);
2208	return &surface->base;
2209}
2210
2211static struct pipe_surface *si_create_surface(struct pipe_context *pipe,
2212					      struct pipe_resource *tex,
2213					      const struct pipe_surface *templ)
2214{
2215	unsigned level = templ->u.tex.level;
2216	unsigned width = u_minify(tex->width0, level);
2217	unsigned height = u_minify(tex->height0, level);
2218	unsigned width0 = tex->width0;
2219	unsigned height0 = tex->height0;
2220
2221	if (tex->target != PIPE_BUFFER && templ->format != tex->format) {
2222		const struct util_format_description *tex_desc
2223			= util_format_description(tex->format);
2224		const struct util_format_description *templ_desc
2225			= util_format_description(templ->format);
2226
2227		assert(tex_desc->block.bits == templ_desc->block.bits);
2228
2229		/* Adjust size of surface if and only if the block width or
2230		 * height is changed. */
2231		if (tex_desc->block.width != templ_desc->block.width ||
2232		    tex_desc->block.height != templ_desc->block.height) {
2233			unsigned nblks_x = util_format_get_nblocksx(tex->format, width);
2234			unsigned nblks_y = util_format_get_nblocksy(tex->format, height);
2235
2236			width = nblks_x * templ_desc->block.width;
2237			height = nblks_y * templ_desc->block.height;
2238
2239			width0 = util_format_get_nblocksx(tex->format, width0);
2240			height0 = util_format_get_nblocksy(tex->format, height0);
2241		}
2242	}
2243
2244	return si_create_surface_custom(pipe, tex, templ,
2245					  width0, height0,
2246					  width, height);
2247}
2248
2249static void si_surface_destroy(struct pipe_context *pipe,
2250			       struct pipe_surface *surface)
2251{
2252	pipe_resource_reference(&surface->texture, NULL);
2253	FREE(surface);
2254}
2255
2256unsigned si_translate_colorswap(enum pipe_format format, bool do_endian_swap)
2257{
2258	const struct util_format_description *desc = util_format_description(format);
2259
2260#define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
2261
2262	if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
2263		return V_028C70_SWAP_STD;
2264
2265	if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
2266		return ~0U;
2267
2268	switch (desc->nr_channels) {
2269	case 1:
2270		if (HAS_SWIZZLE(0,X))
2271			return V_028C70_SWAP_STD; /* X___ */
2272		else if (HAS_SWIZZLE(3,X))
2273			return V_028C70_SWAP_ALT_REV; /* ___X */
2274		break;
2275	case 2:
2276		if ((HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,Y)) ||
2277		    (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,NONE)) ||
2278		    (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,Y)))
2279			return V_028C70_SWAP_STD; /* XY__ */
2280		else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) ||
2281			 (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) ||
2282		         (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X)))
2283			/* YX__ */
2284			return (do_endian_swap ? V_028C70_SWAP_STD : V_028C70_SWAP_STD_REV);
2285		else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y))
2286			return V_028C70_SWAP_ALT; /* X__Y */
2287		else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X))
2288			return V_028C70_SWAP_ALT_REV; /* Y__X */
2289		break;
2290	case 3:
2291		if (HAS_SWIZZLE(0,X))
2292			return (do_endian_swap ? V_028C70_SWAP_STD_REV : V_028C70_SWAP_STD);
2293		else if (HAS_SWIZZLE(0,Z))
2294			return V_028C70_SWAP_STD_REV; /* ZYX */
2295		break;
2296	case 4:
2297		/* check the middle channels, the 1st and 4th channel can be NONE */
2298		if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z)) {
2299			return V_028C70_SWAP_STD; /* XYZW */
2300		} else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y)) {
2301			return V_028C70_SWAP_STD_REV; /* WZYX */
2302		} else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X)) {
2303			return V_028C70_SWAP_ALT; /* ZYXW */
2304		} else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,W)) {
2305			/* YZWX */
2306			if (desc->is_array)
2307				return V_028C70_SWAP_ALT_REV;
2308			else
2309				return (do_endian_swap ? V_028C70_SWAP_ALT : V_028C70_SWAP_ALT_REV);
2310		}
2311		break;
2312	}
2313	return ~0U;
2314}
2315
2316/* PIPELINE_STAT-BASED DCC ENABLEMENT FOR DISPLAYABLE SURFACES */
2317
2318static void vi_dcc_clean_up_context_slot(struct si_context *sctx,
2319					 int slot)
2320{
2321	int i;
2322
2323	if (sctx->dcc_stats[slot].query_active)
2324		vi_separate_dcc_stop_query(sctx,
2325					   sctx->dcc_stats[slot].tex);
2326
2327	for (i = 0; i < ARRAY_SIZE(sctx->dcc_stats[slot].ps_stats); i++)
2328		if (sctx->dcc_stats[slot].ps_stats[i]) {
2329			sctx->b.destroy_query(&sctx->b,
2330					      sctx->dcc_stats[slot].ps_stats[i]);
2331			sctx->dcc_stats[slot].ps_stats[i] = NULL;
2332		}
2333
2334	si_texture_reference(&sctx->dcc_stats[slot].tex, NULL);
2335}
2336
2337/**
2338 * Return the per-context slot where DCC statistics queries for the texture live.
2339 */
2340static unsigned vi_get_context_dcc_stats_index(struct si_context *sctx,
2341					       struct si_texture *tex)
2342{
2343	int i, empty_slot = -1;
2344
2345	/* Remove zombie textures (textures kept alive by this array only). */
2346	for (i = 0; i < ARRAY_SIZE(sctx->dcc_stats); i++)
2347		if (sctx->dcc_stats[i].tex &&
2348		    sctx->dcc_stats[i].tex->buffer.b.b.reference.count == 1)
2349			vi_dcc_clean_up_context_slot(sctx, i);
2350
2351	/* Find the texture. */
2352	for (i = 0; i < ARRAY_SIZE(sctx->dcc_stats); i++) {
2353		/* Return if found. */
2354		if (sctx->dcc_stats[i].tex == tex) {
2355			sctx->dcc_stats[i].last_use_timestamp = os_time_get();
2356			return i;
2357		}
2358
2359		/* Record the first seen empty slot. */
2360		if (empty_slot == -1 && !sctx->dcc_stats[i].tex)
2361			empty_slot = i;
2362	}
2363
2364	/* Not found. Remove the oldest member to make space in the array. */
2365	if (empty_slot == -1) {
2366		int oldest_slot = 0;
2367
2368		/* Find the oldest slot. */
2369		for (i = 1; i < ARRAY_SIZE(sctx->dcc_stats); i++)
2370			if (sctx->dcc_stats[oldest_slot].last_use_timestamp >
2371			    sctx->dcc_stats[i].last_use_timestamp)
2372				oldest_slot = i;
2373
2374		/* Clean up the oldest slot. */
2375		vi_dcc_clean_up_context_slot(sctx, oldest_slot);
2376		empty_slot = oldest_slot;
2377	}
2378
2379	/* Add the texture to the new slot. */
2380	si_texture_reference(&sctx->dcc_stats[empty_slot].tex, tex);
2381	sctx->dcc_stats[empty_slot].last_use_timestamp = os_time_get();
2382	return empty_slot;
2383}
2384
2385static struct pipe_query *
2386vi_create_resuming_pipestats_query(struct si_context *sctx)
2387{
2388	struct si_query_hw *query = (struct si_query_hw*)
2389		sctx->b.create_query(&sctx->b, PIPE_QUERY_PIPELINE_STATISTICS, 0);
2390
2391	query->flags |= SI_QUERY_HW_FLAG_BEGIN_RESUMES;
2392	return (struct pipe_query*)query;
2393}
2394
2395/**
2396 * Called when binding a color buffer.
2397 */
2398void vi_separate_dcc_start_query(struct si_context *sctx,
2399				 struct si_texture *tex)
2400{
2401	unsigned i = vi_get_context_dcc_stats_index(sctx, tex);
2402
2403	assert(!sctx->dcc_stats[i].query_active);
2404
2405	if (!sctx->dcc_stats[i].ps_stats[0])
2406		sctx->dcc_stats[i].ps_stats[0] = vi_create_resuming_pipestats_query(sctx);
2407
2408	/* begin or resume the query */
2409	sctx->b.begin_query(&sctx->b, sctx->dcc_stats[i].ps_stats[0]);
2410	sctx->dcc_stats[i].query_active = true;
2411}
2412
2413/**
2414 * Called when unbinding a color buffer.
2415 */
2416void vi_separate_dcc_stop_query(struct si_context *sctx,
2417				struct si_texture *tex)
2418{
2419	unsigned i = vi_get_context_dcc_stats_index(sctx, tex);
2420
2421	assert(sctx->dcc_stats[i].query_active);
2422	assert(sctx->dcc_stats[i].ps_stats[0]);
2423
2424	/* pause or end the query */
2425	sctx->b.end_query(&sctx->b, sctx->dcc_stats[i].ps_stats[0]);
2426	sctx->dcc_stats[i].query_active = false;
2427}
2428
2429static bool vi_should_enable_separate_dcc(struct si_texture *tex)
2430{
2431	/* The minimum number of fullscreen draws per frame that is required
2432	 * to enable DCC. */
2433	return tex->ps_draw_ratio + tex->num_slow_clears >= 5;
2434}
2435
2436/* Called by fast clear. */
2437void vi_separate_dcc_try_enable(struct si_context *sctx,
2438				struct si_texture *tex)
2439{
2440	/* The intent is to use this with shared displayable back buffers,
2441	 * but it's not strictly limited only to them.
2442	 */
2443	if (!tex->buffer.b.is_shared ||
2444	    !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) ||
2445	    tex->buffer.b.b.target != PIPE_TEXTURE_2D ||
2446	    tex->buffer.b.b.last_level > 0 ||
2447	    !tex->surface.dcc_size ||
2448	    sctx->screen->debug_flags & DBG(NO_DCC) ||
2449	    sctx->screen->debug_flags & DBG(NO_DCC_FB))
2450		return;
2451
2452	assert(sctx->chip_class >= VI);
2453
2454	if (tex->dcc_offset)
2455		return; /* already enabled */
2456
2457	/* Enable the DCC stat gathering. */
2458	if (!tex->dcc_gather_statistics) {
2459		tex->dcc_gather_statistics = true;
2460		vi_separate_dcc_start_query(sctx, tex);
2461	}
2462
2463	if (!vi_should_enable_separate_dcc(tex))
2464		return; /* stats show that DCC decompression is too expensive */
2465
2466	assert(tex->surface.num_dcc_levels);
2467	assert(!tex->dcc_separate_buffer);
2468
2469	si_texture_discard_cmask(sctx->screen, tex);
2470
2471	/* Get a DCC buffer. */
2472	if (tex->last_dcc_separate_buffer) {
2473		assert(tex->dcc_gather_statistics);
2474		assert(!tex->dcc_separate_buffer);
2475		tex->dcc_separate_buffer = tex->last_dcc_separate_buffer;
2476		tex->last_dcc_separate_buffer = NULL;
2477	} else {
2478		tex->dcc_separate_buffer =
2479			si_aligned_buffer_create(sctx->b.screen,
2480						   SI_RESOURCE_FLAG_UNMAPPABLE,
2481						   PIPE_USAGE_DEFAULT,
2482						   tex->surface.dcc_size,
2483						   tex->surface.dcc_alignment);
2484		if (!tex->dcc_separate_buffer)
2485			return;
2486	}
2487
2488	/* dcc_offset is the absolute GPUVM address. */
2489	tex->dcc_offset = tex->dcc_separate_buffer->gpu_address;
2490
2491	/* no need to flag anything since this is called by fast clear that
2492	 * flags framebuffer state
2493	 */
2494}
2495
2496/**
2497 * Called by pipe_context::flush_resource, the place where DCC decompression
2498 * takes place.
2499 */
2500void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
2501					     struct si_texture *tex)
2502{
2503	struct si_context *sctx = (struct si_context*)ctx;
2504	struct pipe_query *tmp;
2505	unsigned i = vi_get_context_dcc_stats_index(sctx, tex);
2506	bool query_active = sctx->dcc_stats[i].query_active;
2507	bool disable = false;
2508
2509	if (sctx->dcc_stats[i].ps_stats[2]) {
2510		union pipe_query_result result;
2511
2512		/* Read the results. */
2513		struct pipe_query *query = sctx->dcc_stats[i].ps_stats[2];
2514		ctx->get_query_result(ctx, query,
2515				      true, &result);
2516		si_query_buffer_reset(sctx, &((struct si_query_hw*)query)->buffer);
2517
2518		/* Compute the approximate number of fullscreen draws. */
2519		tex->ps_draw_ratio =
2520			result.pipeline_statistics.ps_invocations /
2521			(tex->buffer.b.b.width0 * tex->buffer.b.b.height0);
2522		sctx->last_tex_ps_draw_ratio = tex->ps_draw_ratio;
2523
2524		disable = tex->dcc_separate_buffer &&
2525			  !vi_should_enable_separate_dcc(tex);
2526	}
2527
2528	tex->num_slow_clears = 0;
2529
2530	/* stop the statistics query for ps_stats[0] */
2531	if (query_active)
2532		vi_separate_dcc_stop_query(sctx, tex);
2533
2534	/* Move the queries in the queue by one. */
2535	tmp = sctx->dcc_stats[i].ps_stats[2];
2536	sctx->dcc_stats[i].ps_stats[2] = sctx->dcc_stats[i].ps_stats[1];
2537	sctx->dcc_stats[i].ps_stats[1] = sctx->dcc_stats[i].ps_stats[0];
2538	sctx->dcc_stats[i].ps_stats[0] = tmp;
2539
2540	/* create and start a new query as ps_stats[0] */
2541	if (query_active)
2542		vi_separate_dcc_start_query(sctx, tex);
2543
2544	if (disable) {
2545		assert(!tex->last_dcc_separate_buffer);
2546		tex->last_dcc_separate_buffer = tex->dcc_separate_buffer;
2547		tex->dcc_separate_buffer = NULL;
2548		tex->dcc_offset = 0;
2549		/* no need to flag anything since this is called after
2550		 * decompression that re-sets framebuffer state
2551		 */
2552	}
2553}
2554
2555static struct pipe_memory_object *
2556si_memobj_from_handle(struct pipe_screen *screen,
2557		      struct winsys_handle *whandle,
2558		      bool dedicated)
2559{
2560	struct si_screen *sscreen = (struct si_screen*)screen;
2561	struct si_memory_object *memobj = CALLOC_STRUCT(si_memory_object);
2562	struct pb_buffer *buf = NULL;
2563	uint32_t stride, offset;
2564
2565	if (!memobj)
2566		return NULL;
2567
2568	buf = sscreen->ws->buffer_from_handle(sscreen->ws, whandle,
2569					      sscreen->info.max_alignment,
2570					      &stride, &offset);
2571	if (!buf) {
2572		free(memobj);
2573		return NULL;
2574	}
2575
2576	memobj->b.dedicated = dedicated;
2577	memobj->buf = buf;
2578	memobj->stride = stride;
2579
2580	return (struct pipe_memory_object *)memobj;
2581
2582}
2583
2584static void
2585si_memobj_destroy(struct pipe_screen *screen,
2586		  struct pipe_memory_object *_memobj)
2587{
2588	struct si_memory_object *memobj = (struct si_memory_object *)_memobj;
2589
2590	pb_reference(&memobj->buf, NULL);
2591	free(memobj);
2592}
2593
2594static struct pipe_resource *
2595si_texture_from_memobj(struct pipe_screen *screen,
2596		       const struct pipe_resource *templ,
2597		       struct pipe_memory_object *_memobj,
2598		       uint64_t offset)
2599{
2600	struct si_screen *sscreen = (struct si_screen*)screen;
2601	struct si_memory_object *memobj = (struct si_memory_object *)_memobj;
2602	struct pipe_resource *tex =
2603		si_texture_from_winsys_buffer(sscreen, templ, memobj->buf,
2604					      memobj->stride, offset,
2605					      PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE |
2606					      PIPE_HANDLE_USAGE_SHADER_WRITE,
2607					      memobj->b.dedicated);
2608	if (!tex)
2609		return NULL;
2610
2611	/* si_texture_from_winsys_buffer doesn't increment refcount of
2612	 * memobj->buf, so increment it here.
2613	 */
2614	struct pb_buffer *buf = NULL;
2615	pb_reference(&buf, memobj->buf);
2616	return tex;
2617}
2618
2619static bool si_check_resource_capability(struct pipe_screen *screen,
2620					 struct pipe_resource *resource,
2621					 unsigned bind)
2622{
2623	struct si_texture *tex = (struct si_texture*)resource;
2624
2625	/* Buffers only support the linear flag. */
2626	if (resource->target == PIPE_BUFFER)
2627		return (bind & ~PIPE_BIND_LINEAR) == 0;
2628
2629	if (bind & PIPE_BIND_LINEAR && !tex->surface.is_linear)
2630		return false;
2631
2632	if (bind & PIPE_BIND_SCANOUT && !tex->surface.is_displayable)
2633		return false;
2634
2635	/* TODO: PIPE_BIND_CURSOR - do we care? */
2636	return true;
2637}
2638
2639void si_init_screen_texture_functions(struct si_screen *sscreen)
2640{
2641	sscreen->b.resource_from_handle = si_texture_from_handle;
2642	sscreen->b.resource_get_handle = si_texture_get_handle;
2643	sscreen->b.resource_get_info = si_texture_get_info;
2644	sscreen->b.resource_from_memobj = si_texture_from_memobj;
2645	sscreen->b.memobj_create_from_handle = si_memobj_from_handle;
2646	sscreen->b.memobj_destroy = si_memobj_destroy;
2647	sscreen->b.check_resource_capability = si_check_resource_capability;
2648}
2649
2650void si_init_context_texture_functions(struct si_context *sctx)
2651{
2652	sctx->b.create_surface = si_create_surface;
2653	sctx->b.surface_destroy = si_surface_destroy;
2654}
2655