17e995a2eSmrg/*
27e995a2eSmrg * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
37e995a2eSmrg *
47e995a2eSmrg * Permission is hereby granted, free of charge, to any person obtaining a
57e995a2eSmrg * copy of this software and associated documentation files (the "Software"),
67e995a2eSmrg * to deal in the Software without restriction, including without limitation
77e995a2eSmrg * on the rights to use, copy, modify, merge, publish, distribute, sub
87e995a2eSmrg * license, and/or sell copies of the Software, and to permit persons to whom
97e995a2eSmrg * the Software is furnished to do so, subject to the following conditions:
107e995a2eSmrg *
117e995a2eSmrg * The above copyright notice and this permission notice (including the next
127e995a2eSmrg * paragraph) shall be included in all copies or substantial portions of the
137e995a2eSmrg * Software.
147e995a2eSmrg *
157e995a2eSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
167e995a2eSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
177e995a2eSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
187e995a2eSmrg * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
197e995a2eSmrg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
207e995a2eSmrg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
217e995a2eSmrg * USE OR OTHER DEALINGS IN THE SOFTWARE.
227e995a2eSmrg *
237e995a2eSmrg * Authors:
247e995a2eSmrg *      Jerome Glisse
257e995a2eSmrg *      Corbin Simpson
267e995a2eSmrg */
277e995a2eSmrg#include "r600_pipe_common.h"
287e995a2eSmrg#include "r600_cs.h"
297e995a2eSmrg#include "r600_query.h"
301463c08dSmrg#include "util/format/u_format.h"
317e995a2eSmrg#include "util/u_log.h"
327e995a2eSmrg#include "util/u_memory.h"
337e995a2eSmrg#include "util/u_pack_color.h"
347e995a2eSmrg#include "util/u_surface.h"
357e995a2eSmrg#include "util/os_time.h"
361463c08dSmrg#include "frontend/winsys_handle.h"
377e995a2eSmrg#include <errno.h>
387e995a2eSmrg#include <inttypes.h>
397e995a2eSmrg
407e995a2eSmrgstatic void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
417e995a2eSmrg				       struct r600_texture *rtex);
427e995a2eSmrgstatic enum radeon_surf_mode
437e995a2eSmrgr600_choose_tiling(struct r600_common_screen *rscreen,
447e995a2eSmrg		   const struct pipe_resource *templ);
457e995a2eSmrg
467e995a2eSmrg
477e995a2eSmrgbool r600_prepare_for_dma_blit(struct r600_common_context *rctx,
487e995a2eSmrg			       struct r600_texture *rdst,
497e995a2eSmrg			       unsigned dst_level, unsigned dstx,
507e995a2eSmrg			       unsigned dsty, unsigned dstz,
517e995a2eSmrg			       struct r600_texture *rsrc,
527e995a2eSmrg			       unsigned src_level,
537e995a2eSmrg			       const struct pipe_box *src_box)
547e995a2eSmrg{
551463c08dSmrg	if (!rctx->dma.cs.priv)
567e995a2eSmrg		return false;
577e995a2eSmrg
587e995a2eSmrg	if (rdst->surface.bpe != rsrc->surface.bpe)
597e995a2eSmrg		return false;
607e995a2eSmrg
617e995a2eSmrg	/* MSAA: Blits don't exist in the real world. */
627e995a2eSmrg	if (rsrc->resource.b.b.nr_samples > 1 ||
637e995a2eSmrg	    rdst->resource.b.b.nr_samples > 1)
647e995a2eSmrg		return false;
657e995a2eSmrg
667e995a2eSmrg	/* Depth-stencil surfaces:
677e995a2eSmrg	 *   When dst is linear, the DB->CB copy preserves HTILE.
687e995a2eSmrg	 *   When dst is tiled, the 3D path must be used to update HTILE.
697e995a2eSmrg	 */
707e995a2eSmrg	if (rsrc->is_depth || rdst->is_depth)
717e995a2eSmrg		return false;
727e995a2eSmrg
737e995a2eSmrg	/* CMASK as:
747e995a2eSmrg	 *   src: Both texture and SDMA paths need decompression. Use SDMA.
757e995a2eSmrg	 *   dst: If overwriting the whole texture, discard CMASK and use
767e995a2eSmrg	 *        SDMA. Otherwise, use the 3D path.
777e995a2eSmrg	 */
787e995a2eSmrg	if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) {
797e995a2eSmrg		/* The CMASK clear is only enabled for the first level. */
807e995a2eSmrg		assert(dst_level == 0);
817e995a2eSmrg		if (!util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level,
827e995a2eSmrg						      dstx, dsty, dstz, src_box->width,
837e995a2eSmrg						      src_box->height, src_box->depth))
847e995a2eSmrg			return false;
857e995a2eSmrg
867e995a2eSmrg		r600_texture_discard_cmask(rctx->screen, rdst);
877e995a2eSmrg	}
887e995a2eSmrg
897e995a2eSmrg	/* All requirements are met. Prepare textures for SDMA. */
907e995a2eSmrg	if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))
917e995a2eSmrg		rctx->b.flush_resource(&rctx->b, &rsrc->resource.b.b);
927e995a2eSmrg
937e995a2eSmrg	assert(!(rsrc->dirty_level_mask & (1 << src_level)));
947e995a2eSmrg	assert(!(rdst->dirty_level_mask & (1 << dst_level)));
957e995a2eSmrg
967e995a2eSmrg	return true;
977e995a2eSmrg}
987e995a2eSmrg
997e995a2eSmrg/* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
1007e995a2eSmrgstatic void r600_copy_region_with_blit(struct pipe_context *pipe,
1017e995a2eSmrg				       struct pipe_resource *dst,
1027e995a2eSmrg                                       unsigned dst_level,
1037e995a2eSmrg                                       unsigned dstx, unsigned dsty, unsigned dstz,
1047e995a2eSmrg                                       struct pipe_resource *src,
1057e995a2eSmrg                                       unsigned src_level,
1067e995a2eSmrg                                       const struct pipe_box *src_box)
1077e995a2eSmrg{
1087e995a2eSmrg	struct pipe_blit_info blit;
1097e995a2eSmrg
1107e995a2eSmrg	memset(&blit, 0, sizeof(blit));
1117e995a2eSmrg	blit.src.resource = src;
1127e995a2eSmrg	blit.src.format = src->format;
1137e995a2eSmrg	blit.src.level = src_level;
1147e995a2eSmrg	blit.src.box = *src_box;
1157e995a2eSmrg	blit.dst.resource = dst;
1167e995a2eSmrg	blit.dst.format = dst->format;
1177e995a2eSmrg	blit.dst.level = dst_level;
1187e995a2eSmrg	blit.dst.box.x = dstx;
1197e995a2eSmrg	blit.dst.box.y = dsty;
1207e995a2eSmrg	blit.dst.box.z = dstz;
1217e995a2eSmrg	blit.dst.box.width = src_box->width;
1227e995a2eSmrg	blit.dst.box.height = src_box->height;
1237e995a2eSmrg	blit.dst.box.depth = src_box->depth;
1247e995a2eSmrg	blit.mask = util_format_get_mask(src->format) &
1257e995a2eSmrg		    util_format_get_mask(dst->format);
1267e995a2eSmrg	blit.filter = PIPE_TEX_FILTER_NEAREST;
1277e995a2eSmrg
1287e995a2eSmrg	if (blit.mask) {
1297e995a2eSmrg		pipe->blit(pipe, &blit);
1307e995a2eSmrg	}
1317e995a2eSmrg}
1327e995a2eSmrg
1337e995a2eSmrg/* Copy from a full GPU texture to a transfer's staging one. */
1347e995a2eSmrgstatic void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
1357e995a2eSmrg{
1367e995a2eSmrg	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
1377e995a2eSmrg	struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
1387e995a2eSmrg	struct pipe_resource *dst = &rtransfer->staging->b.b;
1397e995a2eSmrg	struct pipe_resource *src = transfer->resource;
1407e995a2eSmrg
1417e995a2eSmrg	if (src->nr_samples > 1) {
1427e995a2eSmrg		r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
1437e995a2eSmrg					   src, transfer->level, &transfer->box);
1447e995a2eSmrg		return;
1457e995a2eSmrg	}
1467e995a2eSmrg
1477e995a2eSmrg	rctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
1487e995a2eSmrg		       &transfer->box);
1497e995a2eSmrg}
1507e995a2eSmrg
1517e995a2eSmrg/* Copy from a transfer's staging texture to a full GPU one. */
1527e995a2eSmrgstatic void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
1537e995a2eSmrg{
1547e995a2eSmrg	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
1557e995a2eSmrg	struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
1567e995a2eSmrg	struct pipe_resource *dst = transfer->resource;
1577e995a2eSmrg	struct pipe_resource *src = &rtransfer->staging->b.b;
1587e995a2eSmrg	struct pipe_box sbox;
1597e995a2eSmrg
1607e995a2eSmrg	u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
1617e995a2eSmrg
1627e995a2eSmrg	if (dst->nr_samples > 1) {
1637e995a2eSmrg		r600_copy_region_with_blit(ctx, dst, transfer->level,
1647e995a2eSmrg					   transfer->box.x, transfer->box.y, transfer->box.z,
1657e995a2eSmrg					   src, 0, &sbox);
1667e995a2eSmrg		return;
1677e995a2eSmrg	}
1687e995a2eSmrg
1697e995a2eSmrg	rctx->dma_copy(ctx, dst, transfer->level,
1707e995a2eSmrg		       transfer->box.x, transfer->box.y, transfer->box.z,
1717e995a2eSmrg		       src, 0, &sbox);
1727e995a2eSmrg}
1737e995a2eSmrg
1747e995a2eSmrgstatic unsigned r600_texture_get_offset(struct r600_common_screen *rscreen,
1757e995a2eSmrg					struct r600_texture *rtex, unsigned level,
1767e995a2eSmrg					const struct pipe_box *box,
1777e995a2eSmrg					unsigned *stride,
1787e995a2eSmrg					unsigned *layer_stride)
1797e995a2eSmrg{
1807e995a2eSmrg	*stride = rtex->surface.u.legacy.level[level].nblk_x *
1817e995a2eSmrg		rtex->surface.bpe;
1827e995a2eSmrg	assert((uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 <= UINT_MAX);
1837e995a2eSmrg	*layer_stride = (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4;
1847e995a2eSmrg
1857e995a2eSmrg	if (!box)
1861463c08dSmrg		return (uint64_t)rtex->surface.u.legacy.level[level].offset_256B * 256;
1877e995a2eSmrg
1887e995a2eSmrg	/* Each texture is an array of mipmap levels. Each level is
1897e995a2eSmrg	 * an array of slices. */
1901463c08dSmrg	return (uint64_t)rtex->surface.u.legacy.level[level].offset_256B * 256 +
1917e995a2eSmrg		box->z * (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 +
1927e995a2eSmrg		(box->y / rtex->surface.blk_h *
1937e995a2eSmrg		 rtex->surface.u.legacy.level[level].nblk_x +
1947e995a2eSmrg		 box->x / rtex->surface.blk_w) * rtex->surface.bpe;
1957e995a2eSmrg}
1967e995a2eSmrg
1977e995a2eSmrgstatic int r600_init_surface(struct r600_common_screen *rscreen,
1987e995a2eSmrg			     struct radeon_surf *surface,
1997e995a2eSmrg			     const struct pipe_resource *ptex,
2007e995a2eSmrg			     enum radeon_surf_mode array_mode,
2017e995a2eSmrg			     unsigned pitch_in_bytes_override,
2027e995a2eSmrg			     unsigned offset,
2037e995a2eSmrg			     bool is_imported,
2047e995a2eSmrg			     bool is_scanout,
2057e995a2eSmrg			     bool is_flushed_depth)
2067e995a2eSmrg{
2077e995a2eSmrg	const struct util_format_description *desc =
2087e995a2eSmrg		util_format_description(ptex->format);
2097e995a2eSmrg	bool is_depth, is_stencil;
2107e995a2eSmrg	int r;
2117e995a2eSmrg	unsigned i, bpe, flags = 0;
2127e995a2eSmrg
2137e995a2eSmrg	is_depth = util_format_has_depth(desc);
2147e995a2eSmrg	is_stencil = util_format_has_stencil(desc);
2157e995a2eSmrg
2167e995a2eSmrg	if (rscreen->chip_class >= EVERGREEN && !is_flushed_depth &&
2177e995a2eSmrg	    ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
2187e995a2eSmrg		bpe = 4; /* stencil is allocated separately on evergreen */
2197e995a2eSmrg	} else {
2207e995a2eSmrg		bpe = util_format_get_blocksize(ptex->format);
2217e995a2eSmrg		assert(util_is_power_of_two_or_zero(bpe));
2227e995a2eSmrg	}
2237e995a2eSmrg
2247e995a2eSmrg	if (!is_flushed_depth && is_depth) {
2257e995a2eSmrg		flags |= RADEON_SURF_ZBUFFER;
2267e995a2eSmrg
2277e995a2eSmrg		if (is_stencil)
2287e995a2eSmrg			flags |= RADEON_SURF_SBUFFER;
2297e995a2eSmrg	}
2307e995a2eSmrg
2317e995a2eSmrg	if (ptex->bind & PIPE_BIND_SCANOUT || is_scanout) {
2327e995a2eSmrg		/* This should catch bugs in gallium users setting incorrect flags. */
2337e995a2eSmrg		assert(ptex->nr_samples <= 1 &&
2347e995a2eSmrg		       ptex->array_size == 1 &&
2357e995a2eSmrg		       ptex->depth0 == 1 &&
2367e995a2eSmrg		       ptex->last_level == 0 &&
2377e995a2eSmrg		       !(flags & RADEON_SURF_Z_OR_SBUFFER));
2387e995a2eSmrg
2397e995a2eSmrg		flags |= RADEON_SURF_SCANOUT;
2407e995a2eSmrg	}
2417e995a2eSmrg
2427e995a2eSmrg	if (ptex->bind & PIPE_BIND_SHARED)
2437e995a2eSmrg		flags |= RADEON_SURF_SHAREABLE;
2447e995a2eSmrg	if (is_imported)
2457e995a2eSmrg		flags |= RADEON_SURF_IMPORTED | RADEON_SURF_SHAREABLE;
2467e995a2eSmrg
2477e995a2eSmrg	r = rscreen->ws->surface_init(rscreen->ws, ptex,
2487e995a2eSmrg				      flags, bpe, array_mode, surface);
2497e995a2eSmrg	if (r) {
2507e995a2eSmrg		return r;
2517e995a2eSmrg	}
2527e995a2eSmrg
2537e995a2eSmrg	if (pitch_in_bytes_override &&
2547e995a2eSmrg	    pitch_in_bytes_override != surface->u.legacy.level[0].nblk_x * bpe) {
2557e995a2eSmrg		/* old ddx on evergreen over estimate alignment for 1d, only 1 level
2567e995a2eSmrg		 * for those
2577e995a2eSmrg		 */
2587e995a2eSmrg		surface->u.legacy.level[0].nblk_x = pitch_in_bytes_override / bpe;
2597e995a2eSmrg		surface->u.legacy.level[0].slice_size_dw =
2607e995a2eSmrg			((uint64_t)pitch_in_bytes_override * surface->u.legacy.level[0].nblk_y) / 4;
2617e995a2eSmrg	}
2627e995a2eSmrg
2637e995a2eSmrg	if (offset) {
2647e995a2eSmrg		for (i = 0; i < ARRAY_SIZE(surface->u.legacy.level); ++i)
2651463c08dSmrg			surface->u.legacy.level[i].offset_256B += offset / 256;
2667e995a2eSmrg	}
2677e995a2eSmrg
2687e995a2eSmrg	return 0;
2697e995a2eSmrg}
2707e995a2eSmrg
2717e995a2eSmrgstatic void r600_texture_init_metadata(struct r600_common_screen *rscreen,
2727e995a2eSmrg				       struct r600_texture *rtex,
2737e995a2eSmrg				       struct radeon_bo_metadata *metadata)
2747e995a2eSmrg{
2757e995a2eSmrg	struct radeon_surf *surface = &rtex->surface;
2767e995a2eSmrg
2777e995a2eSmrg	memset(metadata, 0, sizeof(*metadata));
2787e995a2eSmrg
2797e995a2eSmrg	metadata->u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ?
2807e995a2eSmrg		RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
2817e995a2eSmrg	metadata->u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ?
2827e995a2eSmrg		RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
2837e995a2eSmrg	metadata->u.legacy.pipe_config = surface->u.legacy.pipe_config;
2847e995a2eSmrg	metadata->u.legacy.bankw = surface->u.legacy.bankw;
2857e995a2eSmrg	metadata->u.legacy.bankh = surface->u.legacy.bankh;
2867e995a2eSmrg	metadata->u.legacy.tile_split = surface->u.legacy.tile_split;
2877e995a2eSmrg	metadata->u.legacy.mtilea = surface->u.legacy.mtilea;
2887e995a2eSmrg	metadata->u.legacy.num_banks = surface->u.legacy.num_banks;
2897e995a2eSmrg	metadata->u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe;
2907e995a2eSmrg	metadata->u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
2917e995a2eSmrg}
2927e995a2eSmrg
2937e995a2eSmrgstatic void r600_surface_import_metadata(struct r600_common_screen *rscreen,
2947e995a2eSmrg					 struct radeon_surf *surf,
2957e995a2eSmrg					 struct radeon_bo_metadata *metadata,
2967e995a2eSmrg					 enum radeon_surf_mode *array_mode,
2977e995a2eSmrg					 bool *is_scanout)
2987e995a2eSmrg{
2997e995a2eSmrg	surf->u.legacy.pipe_config = metadata->u.legacy.pipe_config;
3007e995a2eSmrg	surf->u.legacy.bankw = metadata->u.legacy.bankw;
3017e995a2eSmrg	surf->u.legacy.bankh = metadata->u.legacy.bankh;
3027e995a2eSmrg	surf->u.legacy.tile_split = metadata->u.legacy.tile_split;
3037e995a2eSmrg	surf->u.legacy.mtilea = metadata->u.legacy.mtilea;
3047e995a2eSmrg	surf->u.legacy.num_banks = metadata->u.legacy.num_banks;
3057e995a2eSmrg
3067e995a2eSmrg	if (metadata->u.legacy.macrotile == RADEON_LAYOUT_TILED)
3077e995a2eSmrg		*array_mode = RADEON_SURF_MODE_2D;
3087e995a2eSmrg	else if (metadata->u.legacy.microtile == RADEON_LAYOUT_TILED)
3097e995a2eSmrg		*array_mode = RADEON_SURF_MODE_1D;
3107e995a2eSmrg	else
3117e995a2eSmrg		*array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
3127e995a2eSmrg
3137e995a2eSmrg	*is_scanout = metadata->u.legacy.scanout;
3147e995a2eSmrg}
3157e995a2eSmrg
3167e995a2eSmrgstatic void r600_eliminate_fast_color_clear(struct r600_common_context *rctx,
3177e995a2eSmrg					    struct r600_texture *rtex)
3187e995a2eSmrg{
3197e995a2eSmrg	struct r600_common_screen *rscreen = rctx->screen;
3207e995a2eSmrg	struct pipe_context *ctx = &rctx->b;
3217e995a2eSmrg
3227e995a2eSmrg	if (ctx == rscreen->aux_context)
3237e995a2eSmrg		mtx_lock(&rscreen->aux_context_lock);
3247e995a2eSmrg
3257e995a2eSmrg	ctx->flush_resource(ctx, &rtex->resource.b.b);
3267e995a2eSmrg	ctx->flush(ctx, NULL, 0);
3277e995a2eSmrg
3287e995a2eSmrg	if (ctx == rscreen->aux_context)
3297e995a2eSmrg		mtx_unlock(&rscreen->aux_context_lock);
3307e995a2eSmrg}
3317e995a2eSmrg
3327e995a2eSmrgstatic void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
3337e995a2eSmrg				       struct r600_texture *rtex)
3347e995a2eSmrg{
3357e995a2eSmrg	if (!rtex->cmask.size)
3367e995a2eSmrg		return;
3377e995a2eSmrg
3387e995a2eSmrg	assert(rtex->resource.b.b.nr_samples <= 1);
3397e995a2eSmrg
3407e995a2eSmrg	/* Disable CMASK. */
3417e995a2eSmrg	memset(&rtex->cmask, 0, sizeof(rtex->cmask));
3427e995a2eSmrg	rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;
3437e995a2eSmrg	rtex->dirty_level_mask = 0;
3447e995a2eSmrg
3457e995a2eSmrg	rtex->cb_color_info &= ~EG_S_028C70_FAST_CLEAR(1);
3467e995a2eSmrg
3477e995a2eSmrg	if (rtex->cmask_buffer != &rtex->resource)
3487e995a2eSmrg	    r600_resource_reference(&rtex->cmask_buffer, NULL);
3497e995a2eSmrg
3507e995a2eSmrg	/* Notify all contexts about the change. */
3517e995a2eSmrg	p_atomic_inc(&rscreen->dirty_tex_counter);
3527e995a2eSmrg	p_atomic_inc(&rscreen->compressed_colortex_counter);
3537e995a2eSmrg}
3547e995a2eSmrg
3557e995a2eSmrgstatic void r600_reallocate_texture_inplace(struct r600_common_context *rctx,
3567e995a2eSmrg					    struct r600_texture *rtex,
3577e995a2eSmrg					    unsigned new_bind_flag,
3587e995a2eSmrg					    bool invalidate_storage)
3597e995a2eSmrg{
3607e995a2eSmrg	struct pipe_screen *screen = rctx->b.screen;
3617e995a2eSmrg	struct r600_texture *new_tex;
3627e995a2eSmrg	struct pipe_resource templ = rtex->resource.b.b;
3637e995a2eSmrg	unsigned i;
3647e995a2eSmrg
3657e995a2eSmrg	templ.bind |= new_bind_flag;
3667e995a2eSmrg
3677e995a2eSmrg	/* r600g doesn't react to dirty_tex_descriptor_counter */
3681463c08dSmrg	if (rctx->chip_class < GFX6)
3697e995a2eSmrg		return;
3707e995a2eSmrg
3717e995a2eSmrg	if (rtex->resource.b.is_shared)
3727e995a2eSmrg		return;
3737e995a2eSmrg
3747e995a2eSmrg	if (new_bind_flag == PIPE_BIND_LINEAR) {
3757e995a2eSmrg		if (rtex->surface.is_linear)
3767e995a2eSmrg			return;
3777e995a2eSmrg
3787e995a2eSmrg		/* This fails with MSAA, depth, and compressed textures. */
3797e995a2eSmrg		if (r600_choose_tiling(rctx->screen, &templ) !=
3807e995a2eSmrg		    RADEON_SURF_MODE_LINEAR_ALIGNED)
3817e995a2eSmrg			return;
3827e995a2eSmrg	}
3837e995a2eSmrg
3847e995a2eSmrg	new_tex = (struct r600_texture*)screen->resource_create(screen, &templ);
3857e995a2eSmrg	if (!new_tex)
3867e995a2eSmrg		return;
3877e995a2eSmrg
3887e995a2eSmrg	/* Copy the pixels to the new texture. */
3897e995a2eSmrg	if (!invalidate_storage) {
3907e995a2eSmrg		for (i = 0; i <= templ.last_level; i++) {
3917e995a2eSmrg			struct pipe_box box;
3927e995a2eSmrg
3937e995a2eSmrg			u_box_3d(0, 0, 0,
3947e995a2eSmrg				 u_minify(templ.width0, i), u_minify(templ.height0, i),
3957e995a2eSmrg				 util_num_layers(&templ, i), &box);
3967e995a2eSmrg
3977e995a2eSmrg			rctx->dma_copy(&rctx->b, &new_tex->resource.b.b, i, 0, 0, 0,
3987e995a2eSmrg				       &rtex->resource.b.b, i, &box);
3997e995a2eSmrg		}
4007e995a2eSmrg	}
4017e995a2eSmrg
4027e995a2eSmrg	if (new_bind_flag == PIPE_BIND_LINEAR) {
4037e995a2eSmrg		r600_texture_discard_cmask(rctx->screen, rtex);
4047e995a2eSmrg	}
4057e995a2eSmrg
4067e995a2eSmrg	/* Replace the structure fields of rtex. */
4077e995a2eSmrg	rtex->resource.b.b.bind = templ.bind;
4087e995a2eSmrg	pb_reference(&rtex->resource.buf, new_tex->resource.buf);
4097e995a2eSmrg	rtex->resource.gpu_address = new_tex->resource.gpu_address;
4107e995a2eSmrg	rtex->resource.vram_usage = new_tex->resource.vram_usage;
4117e995a2eSmrg	rtex->resource.gart_usage = new_tex->resource.gart_usage;
4127e995a2eSmrg	rtex->resource.bo_size = new_tex->resource.bo_size;
4137e995a2eSmrg	rtex->resource.bo_alignment = new_tex->resource.bo_alignment;
4147e995a2eSmrg	rtex->resource.domains = new_tex->resource.domains;
4157e995a2eSmrg	rtex->resource.flags = new_tex->resource.flags;
4167e995a2eSmrg	rtex->size = new_tex->size;
4177e995a2eSmrg	rtex->db_render_format = new_tex->db_render_format;
4187e995a2eSmrg	rtex->db_compatible = new_tex->db_compatible;
4197e995a2eSmrg	rtex->can_sample_z = new_tex->can_sample_z;
4207e995a2eSmrg	rtex->can_sample_s = new_tex->can_sample_s;
4217e995a2eSmrg	rtex->surface = new_tex->surface;
4227e995a2eSmrg	rtex->fmask = new_tex->fmask;
4237e995a2eSmrg	rtex->cmask = new_tex->cmask;
4247e995a2eSmrg	rtex->cb_color_info = new_tex->cb_color_info;
4257e995a2eSmrg	rtex->last_msaa_resolve_target_micro_mode = new_tex->last_msaa_resolve_target_micro_mode;
4267e995a2eSmrg	rtex->htile_offset = new_tex->htile_offset;
4277e995a2eSmrg	rtex->depth_cleared = new_tex->depth_cleared;
4287e995a2eSmrg	rtex->stencil_cleared = new_tex->stencil_cleared;
4297e995a2eSmrg	rtex->non_disp_tiling = new_tex->non_disp_tiling;
4307e995a2eSmrg	rtex->framebuffers_bound = new_tex->framebuffers_bound;
4317e995a2eSmrg
4327e995a2eSmrg	if (new_bind_flag == PIPE_BIND_LINEAR) {
4337e995a2eSmrg		assert(!rtex->htile_offset);
4347e995a2eSmrg		assert(!rtex->cmask.size);
4357e995a2eSmrg		assert(!rtex->fmask.size);
4367e995a2eSmrg		assert(!rtex->is_depth);
4377e995a2eSmrg	}
4387e995a2eSmrg
4397e995a2eSmrg	r600_texture_reference(&new_tex, NULL);
4407e995a2eSmrg
4417e995a2eSmrg	p_atomic_inc(&rctx->screen->dirty_tex_counter);
4427e995a2eSmrg}
4437e995a2eSmrg
444d8407755Smayastatic void r600_texture_get_info(struct pipe_screen* screen,
445d8407755Smaya				  struct pipe_resource *resource,
446d8407755Smaya				  unsigned *pstride,
447d8407755Smaya				  unsigned *poffset)
448d8407755Smaya{
449d8407755Smaya	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
450d8407755Smaya	struct r600_texture *rtex = (struct r600_texture*)resource;
451d8407755Smaya	unsigned stride = 0;
452d8407755Smaya	unsigned offset = 0;
453d8407755Smaya
454d8407755Smaya	if (!rscreen || !rtex)
455d8407755Smaya		return;
456d8407755Smaya
457d8407755Smaya	if (resource->target != PIPE_BUFFER) {
4581463c08dSmrg		offset = (uint64_t)rtex->surface.u.legacy.level[0].offset_256B * 256;
459d8407755Smaya		stride = rtex->surface.u.legacy.level[0].nblk_x *
460d8407755Smaya			 rtex->surface.bpe;
461d8407755Smaya	}
462d8407755Smaya
463d8407755Smaya	if (pstride)
464d8407755Smaya		*pstride = stride;
465d8407755Smaya
466d8407755Smaya	if (poffset)
467d8407755Smaya		*poffset = offset;
468d8407755Smaya}
469d8407755Smaya
4701463c08dSmrgstatic bool r600_texture_get_handle(struct pipe_screen* screen,
4711463c08dSmrg				    struct pipe_context *ctx,
4721463c08dSmrg				    struct pipe_resource *resource,
4731463c08dSmrg				    struct winsys_handle *whandle,
4741463c08dSmrg				    unsigned usage)
4757e995a2eSmrg{
4767e995a2eSmrg	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
4777e995a2eSmrg	struct r600_common_context *rctx;
4787e995a2eSmrg	struct r600_resource *res = (struct r600_resource*)resource;
4797e995a2eSmrg	struct r600_texture *rtex = (struct r600_texture*)resource;
4807e995a2eSmrg	struct radeon_bo_metadata metadata;
4817e995a2eSmrg	bool update_metadata = false;
4827e995a2eSmrg	unsigned stride, offset, slice_size;
4837e995a2eSmrg
4847e995a2eSmrg	ctx = threaded_context_unwrap_sync(ctx);
4857e995a2eSmrg	rctx = (struct r600_common_context*)(ctx ? ctx : rscreen->aux_context);
4867e995a2eSmrg
4877e995a2eSmrg	if (resource->target != PIPE_BUFFER) {
4887e995a2eSmrg		/* This is not supported now, but it might be required for OpenCL
4897e995a2eSmrg		 * interop in the future.
4907e995a2eSmrg		 */
4917e995a2eSmrg		if (resource->nr_samples > 1 || rtex->is_depth)
4927e995a2eSmrg			return false;
4937e995a2eSmrg
4947e995a2eSmrg		/* Move a suballocated texture into a non-suballocated allocation. */
4957e995a2eSmrg		if (rscreen->ws->buffer_is_suballocated(res->buf) ||
4967e995a2eSmrg		    rtex->surface.tile_swizzle) {
4977e995a2eSmrg			assert(!res->b.is_shared);
4987e995a2eSmrg			r600_reallocate_texture_inplace(rctx, rtex,
4997e995a2eSmrg							PIPE_BIND_SHARED, false);
5007e995a2eSmrg			rctx->b.flush(&rctx->b, NULL, 0);
5017e995a2eSmrg			assert(res->b.b.bind & PIPE_BIND_SHARED);
5027e995a2eSmrg			assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
5037e995a2eSmrg			assert(rtex->surface.tile_swizzle == 0);
5047e995a2eSmrg		}
5057e995a2eSmrg
5067e995a2eSmrg		if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&
5077e995a2eSmrg		    rtex->cmask.size) {
5087e995a2eSmrg			/* Eliminate fast clear (CMASK) */
5097e995a2eSmrg			r600_eliminate_fast_color_clear(rctx, rtex);
5107e995a2eSmrg
5117e995a2eSmrg			/* Disable CMASK if flush_resource isn't going
5127e995a2eSmrg			 * to be called.
5137e995a2eSmrg			 */
5147e995a2eSmrg			if (rtex->cmask.size)
5157e995a2eSmrg				r600_texture_discard_cmask(rscreen, rtex);
5167e995a2eSmrg		}
5177e995a2eSmrg
5187e995a2eSmrg		/* Set metadata. */
5197e995a2eSmrg		if (!res->b.is_shared || update_metadata) {
5207e995a2eSmrg			r600_texture_init_metadata(rscreen, rtex, &metadata);
5217e995a2eSmrg
5221463c08dSmrg			rscreen->ws->buffer_set_metadata(rscreen->ws, res->buf, &metadata, NULL);
5237e995a2eSmrg		}
5247e995a2eSmrg
5257e995a2eSmrg		slice_size = (uint64_t)rtex->surface.u.legacy.level[0].slice_size_dw * 4;
5267e995a2eSmrg	} else {
5277e995a2eSmrg		/* Move a suballocated buffer into a non-suballocated allocation. */
5287e995a2eSmrg		if (rscreen->ws->buffer_is_suballocated(res->buf)) {
5297e995a2eSmrg			assert(!res->b.is_shared);
5307e995a2eSmrg
5317e995a2eSmrg			/* Allocate a new buffer with PIPE_BIND_SHARED. */
5327e995a2eSmrg			struct pipe_resource templ = res->b.b;
5337e995a2eSmrg			templ.bind |= PIPE_BIND_SHARED;
5347e995a2eSmrg
5357e995a2eSmrg			struct pipe_resource *newb =
5367e995a2eSmrg				screen->resource_create(screen, &templ);
5377e995a2eSmrg			if (!newb)
5387e995a2eSmrg				return false;
5397e995a2eSmrg
5407e995a2eSmrg			/* Copy the old buffer contents to the new one. */
5417e995a2eSmrg			struct pipe_box box;
5427e995a2eSmrg			u_box_1d(0, newb->width0, &box);
5437e995a2eSmrg			rctx->b.resource_copy_region(&rctx->b, newb, 0, 0, 0, 0,
5447e995a2eSmrg						     &res->b.b, 0, &box);
5457e995a2eSmrg			/* Move the new buffer storage to the old pipe_resource. */
5467e995a2eSmrg			r600_replace_buffer_storage(&rctx->b, &res->b.b, newb);
5477e995a2eSmrg			pipe_resource_reference(&newb, NULL);
5487e995a2eSmrg
5497e995a2eSmrg			assert(res->b.b.bind & PIPE_BIND_SHARED);
5507e995a2eSmrg			assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
5517e995a2eSmrg		}
5527e995a2eSmrg
5537e995a2eSmrg		/* Buffers */
5547e995a2eSmrg		slice_size = 0;
5557e995a2eSmrg	}
5567e995a2eSmrg
557d8407755Smaya	r600_texture_get_info(screen, resource, &stride, &offset);
558d8407755Smaya
5597e995a2eSmrg	if (res->b.is_shared) {
5607e995a2eSmrg		/* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
5617e995a2eSmrg		 * doesn't set it.
5627e995a2eSmrg		 */
5637e995a2eSmrg		res->external_usage |= usage & ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
5647e995a2eSmrg		if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
5657e995a2eSmrg			res->external_usage &= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
5667e995a2eSmrg	} else {
5677e995a2eSmrg		res->b.is_shared = true;
5687e995a2eSmrg		res->external_usage = usage;
5697e995a2eSmrg	}
5707e995a2eSmrg
5711463c08dSmrg	whandle->stride = stride;
5721463c08dSmrg	whandle->offset = offset + slice_size * whandle->layer;
5731463c08dSmrg
5741463c08dSmrg	return rscreen->ws->buffer_get_handle(rscreen->ws, res->buf, whandle);
5757e995a2eSmrg}
5767e995a2eSmrg
5771463c08dSmrgvoid r600_texture_destroy(struct pipe_screen *screen, struct pipe_resource *ptex)
5787e995a2eSmrg{
5797e995a2eSmrg	struct r600_texture *rtex = (struct r600_texture*)ptex;
5807e995a2eSmrg	struct r600_resource *resource = &rtex->resource;
5817e995a2eSmrg
5827e995a2eSmrg	r600_texture_reference(&rtex->flushed_depth_texture, NULL);
5837e995a2eSmrg	pipe_resource_reference((struct pipe_resource**)&resource->immed_buffer, NULL);
5847e995a2eSmrg
5857e995a2eSmrg	if (rtex->cmask_buffer != &rtex->resource) {
5867e995a2eSmrg	    r600_resource_reference(&rtex->cmask_buffer, NULL);
5877e995a2eSmrg	}
5887e995a2eSmrg	pb_reference(&resource->buf, NULL);
5897e995a2eSmrg	FREE(rtex);
5907e995a2eSmrg}
5917e995a2eSmrg
5927e995a2eSmrg/* The number of samples can be specified independently of the texture. */
5937e995a2eSmrgvoid r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
5947e995a2eSmrg				 struct r600_texture *rtex,
5957e995a2eSmrg				 unsigned nr_samples,
5967e995a2eSmrg				 struct r600_fmask_info *out)
5977e995a2eSmrg{
5987e995a2eSmrg	/* FMASK is allocated like an ordinary texture. */
5997e995a2eSmrg	struct pipe_resource templ = rtex->resource.b.b;
6007e995a2eSmrg	struct radeon_surf fmask = {};
6017e995a2eSmrg	unsigned flags, bpe;
6027e995a2eSmrg
6037e995a2eSmrg	memset(out, 0, sizeof(*out));
6047e995a2eSmrg
6057e995a2eSmrg	templ.nr_samples = 1;
6067e995a2eSmrg	flags = rtex->surface.flags | RADEON_SURF_FMASK;
6077e995a2eSmrg
6087e995a2eSmrg	/* Use the same parameters and tile mode. */
6097e995a2eSmrg	fmask.u.legacy.bankw = rtex->surface.u.legacy.bankw;
6107e995a2eSmrg	fmask.u.legacy.bankh = rtex->surface.u.legacy.bankh;
6117e995a2eSmrg	fmask.u.legacy.mtilea = rtex->surface.u.legacy.mtilea;
6127e995a2eSmrg	fmask.u.legacy.tile_split = rtex->surface.u.legacy.tile_split;
6137e995a2eSmrg
6147e995a2eSmrg	if (nr_samples <= 4)
6157e995a2eSmrg		fmask.u.legacy.bankh = 4;
6167e995a2eSmrg
6177e995a2eSmrg	switch (nr_samples) {
6187e995a2eSmrg	case 2:
6197e995a2eSmrg	case 4:
6207e995a2eSmrg		bpe = 1;
6217e995a2eSmrg		break;
6227e995a2eSmrg	case 8:
6237e995a2eSmrg		bpe = 4;
6247e995a2eSmrg		break;
6257e995a2eSmrg	default:
6267e995a2eSmrg		R600_ERR("Invalid sample count for FMASK allocation.\n");
6277e995a2eSmrg		return;
6287e995a2eSmrg	}
6297e995a2eSmrg
6307e995a2eSmrg	/* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
6317e995a2eSmrg	 * This can be fixed by writing a separate FMASK allocator specifically
6327e995a2eSmrg	 * for R600-R700 asics. */
6337e995a2eSmrg	if (rscreen->chip_class <= R700) {
6347e995a2eSmrg		bpe *= 2;
6357e995a2eSmrg	}
6367e995a2eSmrg
6377e995a2eSmrg	if (rscreen->ws->surface_init(rscreen->ws, &templ,
6387e995a2eSmrg				      flags, bpe, RADEON_SURF_MODE_2D, &fmask)) {
6397e995a2eSmrg		R600_ERR("Got error in surface_init while allocating FMASK.\n");
6407e995a2eSmrg		return;
6417e995a2eSmrg	}
6427e995a2eSmrg
6437e995a2eSmrg	assert(fmask.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
6447e995a2eSmrg
6457e995a2eSmrg	out->slice_tile_max = (fmask.u.legacy.level[0].nblk_x * fmask.u.legacy.level[0].nblk_y) / 64;
6467e995a2eSmrg	if (out->slice_tile_max)
6477e995a2eSmrg		out->slice_tile_max -= 1;
6487e995a2eSmrg
6497e995a2eSmrg	out->tile_mode_index = fmask.u.legacy.tiling_index[0];
6507e995a2eSmrg	out->pitch_in_pixels = fmask.u.legacy.level[0].nblk_x;
6517e995a2eSmrg	out->bank_height = fmask.u.legacy.bankh;
6527e995a2eSmrg	out->tile_swizzle = fmask.tile_swizzle;
6531463c08dSmrg	out->alignment = MAX2(256, 1 << fmask.surf_alignment_log2);
6547e995a2eSmrg	out->size = fmask.surf_size;
6557e995a2eSmrg}
6567e995a2eSmrg
6577e995a2eSmrgstatic void r600_texture_allocate_fmask(struct r600_common_screen *rscreen,
6587e995a2eSmrg					struct r600_texture *rtex)
6597e995a2eSmrg{
6607e995a2eSmrg	r600_texture_get_fmask_info(rscreen, rtex,
6617e995a2eSmrg				    rtex->resource.b.b.nr_samples, &rtex->fmask);
6627e995a2eSmrg
6637e995a2eSmrg	rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment);
6647e995a2eSmrg	rtex->size = rtex->fmask.offset + rtex->fmask.size;
6657e995a2eSmrg}
6667e995a2eSmrg
6677e995a2eSmrgvoid r600_texture_get_cmask_info(struct r600_common_screen *rscreen,
6687e995a2eSmrg				 struct r600_texture *rtex,
6697e995a2eSmrg				 struct r600_cmask_info *out)
6707e995a2eSmrg{
6717e995a2eSmrg	unsigned cmask_tile_width = 8;
6727e995a2eSmrg	unsigned cmask_tile_height = 8;
6737e995a2eSmrg	unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height;
6747e995a2eSmrg	unsigned element_bits = 4;
6757e995a2eSmrg	unsigned cmask_cache_bits = 1024;
6767e995a2eSmrg	unsigned num_pipes = rscreen->info.num_tile_pipes;
6777e995a2eSmrg	unsigned pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
6787e995a2eSmrg
6797e995a2eSmrg	unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes;
6807e995a2eSmrg	unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements;
6817e995a2eSmrg	unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile);
6827e995a2eSmrg	unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile);
6837e995a2eSmrg	unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width;
6847e995a2eSmrg
6857e995a2eSmrg	unsigned pitch_elements = align(rtex->resource.b.b.width0, macro_tile_width);
6867e995a2eSmrg	unsigned height = align(rtex->resource.b.b.height0, macro_tile_height);
6877e995a2eSmrg
6887e995a2eSmrg	unsigned base_align = num_pipes * pipe_interleave_bytes;
6897e995a2eSmrg	unsigned slice_bytes =
6907e995a2eSmrg		((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements;
6917e995a2eSmrg
6927e995a2eSmrg	assert(macro_tile_width % 128 == 0);
6937e995a2eSmrg	assert(macro_tile_height % 128 == 0);
6947e995a2eSmrg
6957e995a2eSmrg	out->slice_tile_max = ((pitch_elements * height) / (128*128)) - 1;
6967e995a2eSmrg	out->alignment = MAX2(256, base_align);
6977e995a2eSmrg	out->size = util_num_layers(&rtex->resource.b.b, 0) *
6987e995a2eSmrg		    align(slice_bytes, base_align);
6997e995a2eSmrg}
7007e995a2eSmrg
7017e995a2eSmrgstatic void r600_texture_allocate_cmask(struct r600_common_screen *rscreen,
7027e995a2eSmrg					struct r600_texture *rtex)
7037e995a2eSmrg{
7047e995a2eSmrg	r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
7057e995a2eSmrg
7067e995a2eSmrg	rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment);
7077e995a2eSmrg	rtex->size = rtex->cmask.offset + rtex->cmask.size;
7087e995a2eSmrg
7097e995a2eSmrg	rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
7107e995a2eSmrg}
7117e995a2eSmrg
7127e995a2eSmrgstatic void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen,
7137e995a2eSmrg					      struct r600_texture *rtex)
7147e995a2eSmrg{
7157e995a2eSmrg	if (rtex->cmask_buffer)
7167e995a2eSmrg                return;
7177e995a2eSmrg
7187e995a2eSmrg	assert(rtex->cmask.size == 0);
7197e995a2eSmrg
7207e995a2eSmrg	r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
7217e995a2eSmrg
7227e995a2eSmrg	rtex->cmask_buffer = (struct r600_resource *)
7237e995a2eSmrg		r600_aligned_buffer_create(&rscreen->b,
7247e995a2eSmrg					   R600_RESOURCE_FLAG_UNMAPPABLE,
7257e995a2eSmrg					   PIPE_USAGE_DEFAULT,
7267e995a2eSmrg					   rtex->cmask.size,
7277e995a2eSmrg					   rtex->cmask.alignment);
7287e995a2eSmrg	if (rtex->cmask_buffer == NULL) {
7297e995a2eSmrg		rtex->cmask.size = 0;
7307e995a2eSmrg		return;
7317e995a2eSmrg	}
7327e995a2eSmrg
7337e995a2eSmrg	/* update colorbuffer state bits */
7347e995a2eSmrg	rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8;
7357e995a2eSmrg
7367e995a2eSmrg	rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
7377e995a2eSmrg
7387e995a2eSmrg	p_atomic_inc(&rscreen->compressed_colortex_counter);
7397e995a2eSmrg}
7407e995a2eSmrg
7417e995a2eSmrgvoid eg_resource_alloc_immed(struct r600_common_screen *rscreen,
7427e995a2eSmrg			     struct r600_resource *res,
7437e995a2eSmrg			     unsigned immed_size)
7447e995a2eSmrg{
7457e995a2eSmrg	res->immed_buffer = (struct r600_resource *)
7467e995a2eSmrg		pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
7477e995a2eSmrg				   PIPE_USAGE_DEFAULT, immed_size);
7487e995a2eSmrg}
7497e995a2eSmrg
7507e995a2eSmrgstatic void r600_texture_get_htile_size(struct r600_common_screen *rscreen,
7517e995a2eSmrg					struct r600_texture *rtex)
7527e995a2eSmrg{
7537e995a2eSmrg	unsigned cl_width, cl_height, width, height;
7547e995a2eSmrg	unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
7557e995a2eSmrg	unsigned num_pipes = rscreen->info.num_tile_pipes;
7567e995a2eSmrg
7571463c08dSmrg	rtex->surface.meta_size = 0;
7587e995a2eSmrg
7597e995a2eSmrg	if (rscreen->chip_class <= EVERGREEN &&
7601463c08dSmrg	    rscreen->info.drm_minor < 26)
7617e995a2eSmrg		return;
7627e995a2eSmrg
7637e995a2eSmrg	/* HW bug on R6xx. */
7647e995a2eSmrg	if (rscreen->chip_class == R600 &&
7657e995a2eSmrg	    (rtex->resource.b.b.width0 > 7680 ||
7667e995a2eSmrg	     rtex->resource.b.b.height0 > 7680))
7677e995a2eSmrg		return;
7687e995a2eSmrg
7697e995a2eSmrg	switch (num_pipes) {
7707e995a2eSmrg	case 1:
7717e995a2eSmrg		cl_width = 32;
7727e995a2eSmrg		cl_height = 16;
7737e995a2eSmrg		break;
7747e995a2eSmrg	case 2:
7757e995a2eSmrg		cl_width = 32;
7767e995a2eSmrg		cl_height = 32;
7777e995a2eSmrg		break;
7787e995a2eSmrg	case 4:
7797e995a2eSmrg		cl_width = 64;
7807e995a2eSmrg		cl_height = 32;
7817e995a2eSmrg		break;
7827e995a2eSmrg	case 8:
7837e995a2eSmrg		cl_width = 64;
7847e995a2eSmrg		cl_height = 64;
7857e995a2eSmrg		break;
7867e995a2eSmrg	case 16:
7877e995a2eSmrg		cl_width = 128;
7887e995a2eSmrg		cl_height = 64;
7897e995a2eSmrg		break;
7907e995a2eSmrg	default:
7917e995a2eSmrg		assert(0);
7927e995a2eSmrg		return;
7937e995a2eSmrg	}
7947e995a2eSmrg
7957e995a2eSmrg	width = align(rtex->surface.u.legacy.level[0].nblk_x, cl_width * 8);
7967e995a2eSmrg	height = align(rtex->surface.u.legacy.level[0].nblk_y, cl_height * 8);
7977e995a2eSmrg
7987e995a2eSmrg	slice_elements = (width * height) / (8 * 8);
7997e995a2eSmrg	slice_bytes = slice_elements * 4;
8007e995a2eSmrg
8017e995a2eSmrg	pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
8027e995a2eSmrg	base_align = num_pipes * pipe_interleave_bytes;
8037e995a2eSmrg
8041463c08dSmrg	rtex->surface.meta_alignment_log2 = util_logbase2(base_align);
8051463c08dSmrg	rtex->surface.meta_size =
8067e995a2eSmrg		util_num_layers(&rtex->resource.b.b, 0) *
8077e995a2eSmrg		align(slice_bytes, base_align);
8087e995a2eSmrg}
8097e995a2eSmrg
8107e995a2eSmrgstatic void r600_texture_allocate_htile(struct r600_common_screen *rscreen,
8117e995a2eSmrg					struct r600_texture *rtex)
8127e995a2eSmrg{
8137e995a2eSmrg	r600_texture_get_htile_size(rscreen, rtex);
8147e995a2eSmrg
8151463c08dSmrg	if (!rtex->surface.meta_size)
8167e995a2eSmrg		return;
8177e995a2eSmrg
8181463c08dSmrg	rtex->htile_offset = align(rtex->size, 1 << rtex->surface.meta_alignment_log2);
8191463c08dSmrg	rtex->size = rtex->htile_offset + rtex->surface.meta_size;
8207e995a2eSmrg}
8217e995a2eSmrg
8227e995a2eSmrgvoid r600_print_texture_info(struct r600_common_screen *rscreen,
8237e995a2eSmrg			     struct r600_texture *rtex, struct u_log_context *log)
8247e995a2eSmrg{
8257e995a2eSmrg	int i;
8267e995a2eSmrg
8277e995a2eSmrg	/* Common parameters. */
8287e995a2eSmrg	u_log_printf(log, "  Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
8297e995a2eSmrg		"blk_h=%u, array_size=%u, last_level=%u, "
8301463c08dSmrg		"bpe=%u, nsamples=%u, flags=0x%"PRIx64", %s\n",
8317e995a2eSmrg		rtex->resource.b.b.width0, rtex->resource.b.b.height0,
8327e995a2eSmrg		rtex->resource.b.b.depth0, rtex->surface.blk_w,
8337e995a2eSmrg		rtex->surface.blk_h,
8347e995a2eSmrg		rtex->resource.b.b.array_size, rtex->resource.b.b.last_level,
8357e995a2eSmrg		rtex->surface.bpe, rtex->resource.b.b.nr_samples,
8367e995a2eSmrg		rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format));
8377e995a2eSmrg
8387e995a2eSmrg	u_log_printf(log, "  Layout: size=%"PRIu64", alignment=%u, bankw=%u, "
8397e995a2eSmrg		"bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
8401463c08dSmrg		rtex->surface.surf_size, 1 << rtex->surface.surf_alignment_log2, rtex->surface.u.legacy.bankw,
8417e995a2eSmrg		rtex->surface.u.legacy.bankh, rtex->surface.u.legacy.num_banks, rtex->surface.u.legacy.mtilea,
8427e995a2eSmrg		rtex->surface.u.legacy.tile_split, rtex->surface.u.legacy.pipe_config,
8437e995a2eSmrg		(rtex->surface.flags & RADEON_SURF_SCANOUT) != 0);
8447e995a2eSmrg
8457e995a2eSmrg	if (rtex->fmask.size)
8467e995a2eSmrg		u_log_printf(log, "  FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, "
8477e995a2eSmrg			"bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
8487e995a2eSmrg			rtex->fmask.offset, rtex->fmask.size, rtex->fmask.alignment,
8497e995a2eSmrg			rtex->fmask.pitch_in_pixels, rtex->fmask.bank_height,
8507e995a2eSmrg			rtex->fmask.slice_tile_max, rtex->fmask.tile_mode_index);
8517e995a2eSmrg
8527e995a2eSmrg	if (rtex->cmask.size)
8537e995a2eSmrg		u_log_printf(log, "  CMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "
8547e995a2eSmrg			"slice_tile_max=%u\n",
8557e995a2eSmrg			rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,
8567e995a2eSmrg			rtex->cmask.slice_tile_max);
8577e995a2eSmrg
8587e995a2eSmrg	if (rtex->htile_offset)
8597e995a2eSmrg		u_log_printf(log, "  HTile: offset=%"PRIu64", size=%u "
8607e995a2eSmrg			"alignment=%u\n",
8611463c08dSmrg			     rtex->htile_offset, rtex->surface.meta_size,
8621463c08dSmrg			     1 << rtex->surface.meta_alignment_log2);
8637e995a2eSmrg
8647e995a2eSmrg	for (i = 0; i <= rtex->resource.b.b.last_level; i++)
8657e995a2eSmrg		u_log_printf(log, "  Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
8667e995a2eSmrg			"npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
8677e995a2eSmrg			"mode=%u, tiling_index = %u\n",
8681463c08dSmrg			i, (uint64_t)rtex->surface.u.legacy.level[i].offset_256B * 256,
8697e995a2eSmrg			(uint64_t)rtex->surface.u.legacy.level[i].slice_size_dw * 4,
8707e995a2eSmrg			u_minify(rtex->resource.b.b.width0, i),
8717e995a2eSmrg			u_minify(rtex->resource.b.b.height0, i),
8727e995a2eSmrg			u_minify(rtex->resource.b.b.depth0, i),
8737e995a2eSmrg			rtex->surface.u.legacy.level[i].nblk_x,
8747e995a2eSmrg			rtex->surface.u.legacy.level[i].nblk_y,
8757e995a2eSmrg			rtex->surface.u.legacy.level[i].mode,
8767e995a2eSmrg			rtex->surface.u.legacy.tiling_index[i]);
8777e995a2eSmrg
8787e995a2eSmrg	if (rtex->surface.has_stencil) {
8797e995a2eSmrg		u_log_printf(log, "  StencilLayout: tilesplit=%u\n",
8807e995a2eSmrg			rtex->surface.u.legacy.stencil_tile_split);
8817e995a2eSmrg		for (i = 0; i <= rtex->resource.b.b.last_level; i++) {
8827e995a2eSmrg			u_log_printf(log, "  StencilLevel[%i]: offset=%"PRIu64", "
8837e995a2eSmrg				"slice_size=%"PRIu64", npix_x=%u, "
8847e995a2eSmrg				"npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
8857e995a2eSmrg				"mode=%u, tiling_index = %u\n",
8861463c08dSmrg				i, (uint64_t)rtex->surface.u.legacy.zs.stencil_level[i].offset_256B * 256,
8871463c08dSmrg				(uint64_t)rtex->surface.u.legacy.zs.stencil_level[i].slice_size_dw * 4,
8887e995a2eSmrg				u_minify(rtex->resource.b.b.width0, i),
8897e995a2eSmrg				u_minify(rtex->resource.b.b.height0, i),
8907e995a2eSmrg				u_minify(rtex->resource.b.b.depth0, i),
8911463c08dSmrg				rtex->surface.u.legacy.zs.stencil_level[i].nblk_x,
8921463c08dSmrg				rtex->surface.u.legacy.zs.stencil_level[i].nblk_y,
8931463c08dSmrg				rtex->surface.u.legacy.zs.stencil_level[i].mode,
8941463c08dSmrg				rtex->surface.u.legacy.zs.stencil_tiling_index[i]);
8957e995a2eSmrg		}
8967e995a2eSmrg	}
8977e995a2eSmrg}
8987e995a2eSmrg
8997e995a2eSmrg/* Common processing for r600_texture_create and r600_texture_from_handle */
9007e995a2eSmrgstatic struct r600_texture *
9017e995a2eSmrgr600_texture_create_object(struct pipe_screen *screen,
9027e995a2eSmrg			   const struct pipe_resource *base,
9037e995a2eSmrg			   struct pb_buffer *buf,
9047e995a2eSmrg			   struct radeon_surf *surface)
9057e995a2eSmrg{
9067e995a2eSmrg	struct r600_texture *rtex;
9077e995a2eSmrg	struct r600_resource *resource;
9087e995a2eSmrg	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
9097e995a2eSmrg
9107e995a2eSmrg	rtex = CALLOC_STRUCT(r600_texture);
9117e995a2eSmrg	if (!rtex)
9127e995a2eSmrg		return NULL;
9137e995a2eSmrg
9147e995a2eSmrg	resource = &rtex->resource;
9157e995a2eSmrg	resource->b.b = *base;
9167e995a2eSmrg	pipe_reference_init(&resource->b.b.reference, 1);
9177e995a2eSmrg	resource->b.b.screen = screen;
9187e995a2eSmrg
9197e995a2eSmrg	/* don't include stencil-only formats which we don't support for rendering */
9207e995a2eSmrg	rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
9217e995a2eSmrg
9227e995a2eSmrg	rtex->surface = *surface;
9237e995a2eSmrg	rtex->size = rtex->surface.surf_size;
9247e995a2eSmrg	rtex->db_render_format = base->format;
9257e995a2eSmrg
9267e995a2eSmrg	/* Tiled depth textures utilize the non-displayable tile order.
9277e995a2eSmrg	 * This must be done after r600_setup_surface.
9287e995a2eSmrg	 * Applies to R600-Cayman. */
9297e995a2eSmrg	rtex->non_disp_tiling = rtex->is_depth && rtex->surface.u.legacy.level[0].mode >= RADEON_SURF_MODE_1D;
9307e995a2eSmrg	/* Applies to GCN. */
9317e995a2eSmrg	rtex->last_msaa_resolve_target_micro_mode = rtex->surface.micro_tile_mode;
9327e995a2eSmrg
9337e995a2eSmrg	if (rtex->is_depth) {
9347e995a2eSmrg		if (base->flags & (R600_RESOURCE_FLAG_TRANSFER |
9357e995a2eSmrg				   R600_RESOURCE_FLAG_FLUSHED_DEPTH) ||
9367e995a2eSmrg		    rscreen->chip_class >= EVERGREEN) {
9377e995a2eSmrg			rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted;
9387e995a2eSmrg			rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted;
9397e995a2eSmrg		} else {
9407e995a2eSmrg			if (rtex->resource.b.b.nr_samples <= 1 &&
9417e995a2eSmrg			    (rtex->resource.b.b.format == PIPE_FORMAT_Z16_UNORM ||
9427e995a2eSmrg			     rtex->resource.b.b.format == PIPE_FORMAT_Z32_FLOAT))
9437e995a2eSmrg				rtex->can_sample_z = true;
9447e995a2eSmrg		}
9457e995a2eSmrg
9467e995a2eSmrg		if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |
9477e995a2eSmrg				     R600_RESOURCE_FLAG_FLUSHED_DEPTH))) {
9487e995a2eSmrg			rtex->db_compatible = true;
9497e995a2eSmrg
9507e995a2eSmrg			if (!(rscreen->debug_flags & DBG_NO_HYPERZ))
9517e995a2eSmrg				r600_texture_allocate_htile(rscreen, rtex);
9527e995a2eSmrg		}
9537e995a2eSmrg	} else {
9547e995a2eSmrg		if (base->nr_samples > 1) {
9557e995a2eSmrg			if (!buf) {
9567e995a2eSmrg				r600_texture_allocate_fmask(rscreen, rtex);
9577e995a2eSmrg				r600_texture_allocate_cmask(rscreen, rtex);
9587e995a2eSmrg				rtex->cmask_buffer = &rtex->resource;
9597e995a2eSmrg			}
9607e995a2eSmrg			if (!rtex->fmask.size || !rtex->cmask.size) {
9617e995a2eSmrg				FREE(rtex);
9627e995a2eSmrg				return NULL;
9637e995a2eSmrg			}
9647e995a2eSmrg		}
9657e995a2eSmrg	}
9667e995a2eSmrg
9677e995a2eSmrg	/* Now create the backing buffer. */
9687e995a2eSmrg	if (!buf) {
9697e995a2eSmrg		r600_init_resource_fields(rscreen, resource, rtex->size,
9701463c08dSmrg					  1 << rtex->surface.surf_alignment_log2);
9717e995a2eSmrg
9727e995a2eSmrg		if (!r600_alloc_resource(rscreen, resource)) {
9737e995a2eSmrg			FREE(rtex);
9747e995a2eSmrg			return NULL;
9757e995a2eSmrg		}
9767e995a2eSmrg	} else {
9777e995a2eSmrg		resource->buf = buf;
9787e995a2eSmrg		resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->buf);
9797e995a2eSmrg		resource->bo_size = buf->size;
9801463c08dSmrg		resource->bo_alignment = 1 << buf->alignment_log2;
9817e995a2eSmrg		resource->domains = rscreen->ws->buffer_get_initial_domain(resource->buf);
9827e995a2eSmrg		if (resource->domains & RADEON_DOMAIN_VRAM)
9837e995a2eSmrg			resource->vram_usage = buf->size;
9847e995a2eSmrg		else if (resource->domains & RADEON_DOMAIN_GTT)
9857e995a2eSmrg			resource->gart_usage = buf->size;
9867e995a2eSmrg	}
9877e995a2eSmrg
9887e995a2eSmrg	if (rtex->cmask.size) {
9897e995a2eSmrg		/* Initialize the cmask to 0xCC (= compressed state). */
9907e995a2eSmrg		r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b,
9917e995a2eSmrg					 rtex->cmask.offset, rtex->cmask.size,
9927e995a2eSmrg					 0xCCCCCCCC);
9937e995a2eSmrg	}
9947e995a2eSmrg	if (rtex->htile_offset) {
9957e995a2eSmrg		uint32_t clear_value = 0;
9967e995a2eSmrg
9977e995a2eSmrg		r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,
9987e995a2eSmrg					 rtex->htile_offset,
9991463c08dSmrg					 rtex->surface.meta_size,
10007e995a2eSmrg					 clear_value);
10017e995a2eSmrg	}
10027e995a2eSmrg
10037e995a2eSmrg	/* Initialize the CMASK base register value. */
10047e995a2eSmrg	rtex->cmask.base_address_reg =
10057e995a2eSmrg		(rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
10067e995a2eSmrg
10077e995a2eSmrg	if (rscreen->debug_flags & DBG_VM) {
10087e995a2eSmrg		fprintf(stderr, "VM start=0x%"PRIX64"  end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
10097e995a2eSmrg			rtex->resource.gpu_address,
10107e995a2eSmrg			rtex->resource.gpu_address + rtex->resource.buf->size,
10117e995a2eSmrg			base->width0, base->height0, util_num_layers(base, 0), base->last_level+1,
10127e995a2eSmrg			base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
10137e995a2eSmrg	}
10147e995a2eSmrg
10157e995a2eSmrg	if (rscreen->debug_flags & DBG_TEX) {
10167e995a2eSmrg		puts("Texture:");
10177e995a2eSmrg		struct u_log_context log;
10187e995a2eSmrg		u_log_context_init(&log);
10197e995a2eSmrg		r600_print_texture_info(rscreen, rtex, &log);
10207e995a2eSmrg		u_log_new_page_print(&log, stdout);
10217e995a2eSmrg		fflush(stdout);
10227e995a2eSmrg		u_log_context_destroy(&log);
10237e995a2eSmrg	}
10247e995a2eSmrg
10257e995a2eSmrg	return rtex;
10267e995a2eSmrg}
10277e995a2eSmrg
10287e995a2eSmrgstatic enum radeon_surf_mode
10297e995a2eSmrgr600_choose_tiling(struct r600_common_screen *rscreen,
10307e995a2eSmrg		   const struct pipe_resource *templ)
10317e995a2eSmrg{
10327e995a2eSmrg	const struct util_format_description *desc = util_format_description(templ->format);
10337e995a2eSmrg	bool force_tiling = templ->flags & R600_RESOURCE_FLAG_FORCE_TILING;
10347e995a2eSmrg	bool is_depth_stencil = util_format_is_depth_or_stencil(templ->format) &&
10357e995a2eSmrg				!(templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH);
10367e995a2eSmrg
10377e995a2eSmrg	/* MSAA resources must be 2D tiled. */
10387e995a2eSmrg	if (templ->nr_samples > 1)
10397e995a2eSmrg		return RADEON_SURF_MODE_2D;
10407e995a2eSmrg
10417e995a2eSmrg	/* Transfer resources should be linear. */
10427e995a2eSmrg	if (templ->flags & R600_RESOURCE_FLAG_TRANSFER)
10437e995a2eSmrg		return RADEON_SURF_MODE_LINEAR_ALIGNED;
10447e995a2eSmrg
10457e995a2eSmrg	/* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
10467e995a2eSmrg	if (rscreen->chip_class >= R600 && rscreen->chip_class <= CAYMAN &&
10477e995a2eSmrg	    (templ->bind & PIPE_BIND_COMPUTE_RESOURCE) &&
10487e995a2eSmrg	    (templ->target == PIPE_TEXTURE_2D ||
10497e995a2eSmrg	     templ->target == PIPE_TEXTURE_3D))
10507e995a2eSmrg		force_tiling = true;
10517e995a2eSmrg
10527e995a2eSmrg	/* Handle common candidates for the linear mode.
10537e995a2eSmrg	 * Compressed textures and DB surfaces must always be tiled.
10547e995a2eSmrg	 */
10557e995a2eSmrg	if (!force_tiling &&
10567e995a2eSmrg	    !is_depth_stencil &&
10577e995a2eSmrg	    !util_format_is_compressed(templ->format)) {
10587e995a2eSmrg		if (rscreen->debug_flags & DBG_NO_TILING)
10597e995a2eSmrg			return RADEON_SURF_MODE_LINEAR_ALIGNED;
10607e995a2eSmrg
10617e995a2eSmrg		/* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
10627e995a2eSmrg		if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)
10637e995a2eSmrg			return RADEON_SURF_MODE_LINEAR_ALIGNED;
10647e995a2eSmrg
10657e995a2eSmrg		if (templ->bind & PIPE_BIND_LINEAR)
10667e995a2eSmrg			return RADEON_SURF_MODE_LINEAR_ALIGNED;
10677e995a2eSmrg
10687e995a2eSmrg		/* 1D textures should be linear - fixes image operations on 1d */
10697e995a2eSmrg		if (templ->target == PIPE_TEXTURE_1D ||
10707e995a2eSmrg		    templ->target == PIPE_TEXTURE_1D_ARRAY)
10717e995a2eSmrg			return RADEON_SURF_MODE_LINEAR_ALIGNED;
10727e995a2eSmrg
10737e995a2eSmrg		/* Textures likely to be mapped often. */
10747e995a2eSmrg		if (templ->usage == PIPE_USAGE_STAGING ||
10757e995a2eSmrg		    templ->usage == PIPE_USAGE_STREAM)
10767e995a2eSmrg			return RADEON_SURF_MODE_LINEAR_ALIGNED;
10777e995a2eSmrg	}
10787e995a2eSmrg
10797e995a2eSmrg	/* Make small textures 1D tiled. */
10807e995a2eSmrg	if (templ->width0 <= 16 || templ->height0 <= 16 ||
10817e995a2eSmrg	    (rscreen->debug_flags & DBG_NO_2D_TILING))
10827e995a2eSmrg		return RADEON_SURF_MODE_1D;
10837e995a2eSmrg
10847e995a2eSmrg	/* The allocator will switch to 1D if needed. */
10857e995a2eSmrg	return RADEON_SURF_MODE_2D;
10867e995a2eSmrg}
10877e995a2eSmrg
10887e995a2eSmrgstruct pipe_resource *r600_texture_create(struct pipe_screen *screen,
10897e995a2eSmrg					  const struct pipe_resource *templ)
10907e995a2eSmrg{
10917e995a2eSmrg	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
10927e995a2eSmrg	struct radeon_surf surface = {0};
10937e995a2eSmrg	bool is_flushed_depth = templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH;
10947e995a2eSmrg	int r;
10957e995a2eSmrg
10967e995a2eSmrg	r = r600_init_surface(rscreen, &surface, templ,
10977e995a2eSmrg			      r600_choose_tiling(rscreen, templ), 0, 0,
10987e995a2eSmrg			      false, false, is_flushed_depth);
10997e995a2eSmrg	if (r) {
11007e995a2eSmrg		return NULL;
11017e995a2eSmrg	}
11027e995a2eSmrg
11037e995a2eSmrg	return (struct pipe_resource *)
11047e995a2eSmrg	       r600_texture_create_object(screen, templ, NULL, &surface);
11057e995a2eSmrg}
11067e995a2eSmrg
11077e995a2eSmrgstatic struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
11087e995a2eSmrg						      const struct pipe_resource *templ,
11097e995a2eSmrg						      struct winsys_handle *whandle,
11107e995a2eSmrg                                                      unsigned usage)
11117e995a2eSmrg{
11127e995a2eSmrg	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
11137e995a2eSmrg	struct pb_buffer *buf = NULL;
11147e995a2eSmrg	enum radeon_surf_mode array_mode;
11157e995a2eSmrg	struct radeon_surf surface = {};
11167e995a2eSmrg	int r;
11177e995a2eSmrg	struct radeon_bo_metadata metadata = {};
11187e995a2eSmrg	struct r600_texture *rtex;
11197e995a2eSmrg	bool is_scanout;
11207e995a2eSmrg
11217e995a2eSmrg	/* Support only 2D textures without mipmaps */
11227e995a2eSmrg	if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
11237e995a2eSmrg	      templ->depth0 != 1 || templ->last_level != 0)
11247e995a2eSmrg		return NULL;
11257e995a2eSmrg
1126d8407755Smaya	buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle,
11271463c08dSmrg					      rscreen->info.max_alignment);
11287e995a2eSmrg	if (!buf)
11297e995a2eSmrg		return NULL;
11307e995a2eSmrg
11311463c08dSmrg	rscreen->ws->buffer_get_metadata(rscreen->ws, buf, &metadata, NULL);
11327e995a2eSmrg	r600_surface_import_metadata(rscreen, &surface, &metadata,
11337e995a2eSmrg				     &array_mode, &is_scanout);
11347e995a2eSmrg
11351463c08dSmrg	r = r600_init_surface(rscreen, &surface, templ, array_mode,
11361463c08dSmrg			      whandle->stride, whandle->offset,
11371463c08dSmrg			      true, is_scanout, false);
11387e995a2eSmrg	if (r) {
11397e995a2eSmrg		return NULL;
11407e995a2eSmrg	}
11417e995a2eSmrg
11427e995a2eSmrg	rtex = r600_texture_create_object(screen, templ, buf, &surface);
11437e995a2eSmrg	if (!rtex)
11447e995a2eSmrg		return NULL;
11457e995a2eSmrg
11467e995a2eSmrg	rtex->resource.b.is_shared = true;
11477e995a2eSmrg	rtex->resource.external_usage = usage;
11487e995a2eSmrg
11497e995a2eSmrg	assert(rtex->surface.tile_swizzle == 0);
11507e995a2eSmrg	return &rtex->resource.b.b;
11517e995a2eSmrg}
11527e995a2eSmrg
11537e995a2eSmrgbool r600_init_flushed_depth_texture(struct pipe_context *ctx,
11547e995a2eSmrg				     struct pipe_resource *texture,
11557e995a2eSmrg				     struct r600_texture **staging)
11567e995a2eSmrg{
11577e995a2eSmrg	struct r600_texture *rtex = (struct r600_texture*)texture;
11587e995a2eSmrg	struct pipe_resource resource;
11597e995a2eSmrg	struct r600_texture **flushed_depth_texture = staging ?
11607e995a2eSmrg			staging : &rtex->flushed_depth_texture;
11617e995a2eSmrg	enum pipe_format pipe_format = texture->format;
11627e995a2eSmrg
11637e995a2eSmrg	if (!staging) {
11647e995a2eSmrg		if (rtex->flushed_depth_texture)
11657e995a2eSmrg			return true; /* it's ready */
11667e995a2eSmrg
11677e995a2eSmrg		if (!rtex->can_sample_z && rtex->can_sample_s) {
11687e995a2eSmrg			switch (pipe_format) {
11697e995a2eSmrg			case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
11707e995a2eSmrg				/* Save memory by not allocating the S plane. */
11717e995a2eSmrg				pipe_format = PIPE_FORMAT_Z32_FLOAT;
11727e995a2eSmrg				break;
11737e995a2eSmrg			case PIPE_FORMAT_Z24_UNORM_S8_UINT:
11747e995a2eSmrg			case PIPE_FORMAT_S8_UINT_Z24_UNORM:
11757e995a2eSmrg				/* Save memory bandwidth by not copying the
11767e995a2eSmrg				 * stencil part during flush.
11777e995a2eSmrg				 *
11787e995a2eSmrg				 * This potentially increases memory bandwidth
11797e995a2eSmrg				 * if an application uses both Z and S texturing
11807e995a2eSmrg				 * simultaneously (a flushed Z24S8 texture
11817e995a2eSmrg				 * would be stored compactly), but how often
11827e995a2eSmrg				 * does that really happen?
11837e995a2eSmrg				 */
11847e995a2eSmrg				pipe_format = PIPE_FORMAT_Z24X8_UNORM;
11857e995a2eSmrg				break;
11867e995a2eSmrg			default:;
11877e995a2eSmrg			}
11887e995a2eSmrg		} else if (!rtex->can_sample_s && rtex->can_sample_z) {
11897e995a2eSmrg			assert(util_format_has_stencil(util_format_description(pipe_format)));
11907e995a2eSmrg
11917e995a2eSmrg			/* DB->CB copies to an 8bpp surface don't work. */
11927e995a2eSmrg			pipe_format = PIPE_FORMAT_X24S8_UINT;
11937e995a2eSmrg		}
11947e995a2eSmrg	}
11957e995a2eSmrg
11967e995a2eSmrg	memset(&resource, 0, sizeof(resource));
11977e995a2eSmrg	resource.target = texture->target;
11987e995a2eSmrg	resource.format = pipe_format;
11997e995a2eSmrg	resource.width0 = texture->width0;
12007e995a2eSmrg	resource.height0 = texture->height0;
12017e995a2eSmrg	resource.depth0 = texture->depth0;
12027e995a2eSmrg	resource.array_size = texture->array_size;
12037e995a2eSmrg	resource.last_level = texture->last_level;
12047e995a2eSmrg	resource.nr_samples = texture->nr_samples;
12057e995a2eSmrg	resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
12067e995a2eSmrg	resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;
12077e995a2eSmrg	resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH;
12087e995a2eSmrg
12097e995a2eSmrg	if (staging)
12107e995a2eSmrg		resource.flags |= R600_RESOURCE_FLAG_TRANSFER;
12117e995a2eSmrg
12127e995a2eSmrg	*flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);
12137e995a2eSmrg	if (*flushed_depth_texture == NULL) {
12147e995a2eSmrg		R600_ERR("failed to create temporary texture to hold flushed depth\n");
12157e995a2eSmrg		return false;
12167e995a2eSmrg	}
12177e995a2eSmrg
12187e995a2eSmrg	(*flushed_depth_texture)->non_disp_tiling = false;
12197e995a2eSmrg	return true;
12207e995a2eSmrg}
12217e995a2eSmrg
12227e995a2eSmrg/**
12237e995a2eSmrg * Initialize the pipe_resource descriptor to be of the same size as the box,
12247e995a2eSmrg * which is supposed to hold a subregion of the texture "orig" at the given
12257e995a2eSmrg * mipmap level.
12267e995a2eSmrg */
12277e995a2eSmrgstatic void r600_init_temp_resource_from_box(struct pipe_resource *res,
12287e995a2eSmrg					     struct pipe_resource *orig,
12297e995a2eSmrg					     const struct pipe_box *box,
12307e995a2eSmrg					     unsigned level, unsigned flags)
12317e995a2eSmrg{
12327e995a2eSmrg	memset(res, 0, sizeof(*res));
12337e995a2eSmrg	res->format = orig->format;
12347e995a2eSmrg	res->width0 = box->width;
12357e995a2eSmrg	res->height0 = box->height;
12367e995a2eSmrg	res->depth0 = 1;
12377e995a2eSmrg	res->array_size = 1;
12387e995a2eSmrg	res->usage = flags & R600_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
12397e995a2eSmrg	res->flags = flags;
12407e995a2eSmrg
12417e995a2eSmrg	/* We must set the correct texture target and dimensions for a 3D box. */
12427e995a2eSmrg	if (box->depth > 1 && util_max_layer(orig, level) > 0) {
12437e995a2eSmrg		res->target = PIPE_TEXTURE_2D_ARRAY;
12447e995a2eSmrg		res->array_size = box->depth;
12457e995a2eSmrg	} else {
12467e995a2eSmrg		res->target = PIPE_TEXTURE_2D;
12477e995a2eSmrg	}
12487e995a2eSmrg}
12497e995a2eSmrg
12507e995a2eSmrgstatic bool r600_can_invalidate_texture(struct r600_common_screen *rscreen,
12517e995a2eSmrg					struct r600_texture *rtex,
12527e995a2eSmrg					unsigned transfer_usage,
12537e995a2eSmrg					const struct pipe_box *box)
12547e995a2eSmrg{
12557e995a2eSmrg	/* r600g doesn't react to dirty_tex_descriptor_counter */
12561463c08dSmrg	return rscreen->chip_class >= GFX6 &&
12577e995a2eSmrg		!rtex->resource.b.is_shared &&
12581463c08dSmrg		!(transfer_usage & PIPE_MAP_READ) &&
12597e995a2eSmrg		rtex->resource.b.b.last_level == 0 &&
12607e995a2eSmrg		util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
12617e995a2eSmrg						 box->x, box->y, box->z,
12627e995a2eSmrg						 box->width, box->height,
12637e995a2eSmrg						 box->depth);
12647e995a2eSmrg}
12657e995a2eSmrg
12667e995a2eSmrgstatic void r600_texture_invalidate_storage(struct r600_common_context *rctx,
12677e995a2eSmrg					    struct r600_texture *rtex)
12687e995a2eSmrg{
12697e995a2eSmrg	struct r600_common_screen *rscreen = rctx->screen;
12707e995a2eSmrg
12717e995a2eSmrg	/* There is no point in discarding depth and tiled buffers. */
12727e995a2eSmrg	assert(!rtex->is_depth);
12737e995a2eSmrg	assert(rtex->surface.is_linear);
12747e995a2eSmrg
12757e995a2eSmrg	/* Reallocate the buffer in the same pipe_resource. */
12767e995a2eSmrg	r600_alloc_resource(rscreen, &rtex->resource);
12777e995a2eSmrg
12787e995a2eSmrg	/* Initialize the CMASK base address (needed even without CMASK). */
12797e995a2eSmrg	rtex->cmask.base_address_reg =
12807e995a2eSmrg		(rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
12817e995a2eSmrg
12827e995a2eSmrg	p_atomic_inc(&rscreen->dirty_tex_counter);
12837e995a2eSmrg
12847e995a2eSmrg	rctx->num_alloc_tex_transfer_bytes += rtex->size;
12857e995a2eSmrg}
12867e995a2eSmrg
12871463c08dSmrgvoid *r600_texture_transfer_map(struct pipe_context *ctx,
12881463c08dSmrg			       struct pipe_resource *texture,
12891463c08dSmrg			       unsigned level,
12901463c08dSmrg			       unsigned usage,
12911463c08dSmrg			       const struct pipe_box *box,
12921463c08dSmrg			       struct pipe_transfer **ptransfer)
12937e995a2eSmrg{
12947e995a2eSmrg	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
12957e995a2eSmrg	struct r600_texture *rtex = (struct r600_texture*)texture;
12967e995a2eSmrg	struct r600_transfer *trans;
12977e995a2eSmrg	struct r600_resource *buf;
12987e995a2eSmrg	unsigned offset = 0;
12997e995a2eSmrg	char *map;
13007e995a2eSmrg	bool use_staging_texture = false;
13017e995a2eSmrg
13027e995a2eSmrg	assert(!(texture->flags & R600_RESOURCE_FLAG_TRANSFER));
13037e995a2eSmrg	assert(box->width && box->height && box->depth);
13047e995a2eSmrg
13057e995a2eSmrg	/* Depth textures use staging unconditionally. */
13067e995a2eSmrg	if (!rtex->is_depth) {
13077e995a2eSmrg		/* Degrade the tile mode if we get too many transfers on APUs.
13087e995a2eSmrg		 * On dGPUs, the staging texture is always faster.
13097e995a2eSmrg		 * Only count uploads that are at least 4x4 pixels large.
13107e995a2eSmrg		 */
13117e995a2eSmrg		if (!rctx->screen->info.has_dedicated_vram &&
13127e995a2eSmrg		    level == 0 &&
13137e995a2eSmrg		    box->width >= 4 && box->height >= 4 &&
13147e995a2eSmrg		    p_atomic_inc_return(&rtex->num_level0_transfers) == 10) {
13157e995a2eSmrg			bool can_invalidate =
13167e995a2eSmrg				r600_can_invalidate_texture(rctx->screen, rtex,
13177e995a2eSmrg							    usage, box);
13187e995a2eSmrg
13197e995a2eSmrg			r600_reallocate_texture_inplace(rctx, rtex,
13207e995a2eSmrg							PIPE_BIND_LINEAR,
13217e995a2eSmrg							can_invalidate);
13227e995a2eSmrg		}
13237e995a2eSmrg
13247e995a2eSmrg		/* Tiled textures need to be converted into a linear texture for CPU
13257e995a2eSmrg		 * access. The staging texture is always linear and is placed in GART.
13267e995a2eSmrg		 *
13277e995a2eSmrg		 * Reading from VRAM or GTT WC is slow, always use the staging
13287e995a2eSmrg		 * texture in this case.
13297e995a2eSmrg		 *
13307e995a2eSmrg		 * Use the staging texture for uploads if the underlying BO
13317e995a2eSmrg		 * is busy.
13327e995a2eSmrg		 */
13337e995a2eSmrg		if (!rtex->surface.is_linear)
13347e995a2eSmrg			use_staging_texture = true;
13351463c08dSmrg		else if (usage & PIPE_MAP_READ)
13367e995a2eSmrg			use_staging_texture =
13377e995a2eSmrg				rtex->resource.domains & RADEON_DOMAIN_VRAM ||
13387e995a2eSmrg				rtex->resource.flags & RADEON_FLAG_GTT_WC;
13397e995a2eSmrg		/* Write & linear only: */
13407e995a2eSmrg		else if (r600_rings_is_buffer_referenced(rctx, rtex->resource.buf,
13417e995a2eSmrg							 RADEON_USAGE_READWRITE) ||
13421463c08dSmrg			 !rctx->ws->buffer_wait(rctx->ws, rtex->resource.buf, 0,
13437e995a2eSmrg						RADEON_USAGE_READWRITE)) {
13447e995a2eSmrg			/* It's busy. */
13457e995a2eSmrg			if (r600_can_invalidate_texture(rctx->screen, rtex,
13467e995a2eSmrg							usage, box))
13477e995a2eSmrg				r600_texture_invalidate_storage(rctx, rtex);
13487e995a2eSmrg			else
13497e995a2eSmrg				use_staging_texture = true;
13507e995a2eSmrg		}
13517e995a2eSmrg	}
13527e995a2eSmrg
13537e995a2eSmrg	trans = CALLOC_STRUCT(r600_transfer);
13547e995a2eSmrg	if (!trans)
13557e995a2eSmrg		return NULL;
13567e995a2eSmrg	pipe_resource_reference(&trans->b.b.resource, texture);
13577e995a2eSmrg	trans->b.b.level = level;
13587e995a2eSmrg	trans->b.b.usage = usage;
13597e995a2eSmrg	trans->b.b.box = *box;
13607e995a2eSmrg
13617e995a2eSmrg	if (rtex->is_depth) {
13627e995a2eSmrg		struct r600_texture *staging_depth;
13637e995a2eSmrg
13647e995a2eSmrg		if (rtex->resource.b.b.nr_samples > 1) {
13657e995a2eSmrg			/* MSAA depth buffers need to be converted to single sample buffers.
13667e995a2eSmrg			 *
13677e995a2eSmrg			 * Mapping MSAA depth buffers can occur if ReadPixels is called
13687e995a2eSmrg			 * with a multisample GLX visual.
13697e995a2eSmrg			 *
13707e995a2eSmrg			 * First downsample the depth buffer to a temporary texture,
13717e995a2eSmrg			 * then decompress the temporary one to staging.
13727e995a2eSmrg			 *
13737e995a2eSmrg			 * Only the region being mapped is transfered.
13747e995a2eSmrg			 */
13757e995a2eSmrg			struct pipe_resource resource;
13767e995a2eSmrg
13777e995a2eSmrg			r600_init_temp_resource_from_box(&resource, texture, box, level, 0);
13787e995a2eSmrg
13797e995a2eSmrg			if (!r600_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
13807e995a2eSmrg				R600_ERR("failed to create temporary texture to hold untiled copy\n");
13817e995a2eSmrg				FREE(trans);
13827e995a2eSmrg				return NULL;
13837e995a2eSmrg			}
13847e995a2eSmrg
13851463c08dSmrg			if (usage & PIPE_MAP_READ) {
13867e995a2eSmrg				struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
13877e995a2eSmrg				if (!temp) {
13887e995a2eSmrg					R600_ERR("failed to create a temporary depth texture\n");
13897e995a2eSmrg					FREE(trans);
13907e995a2eSmrg					return NULL;
13917e995a2eSmrg				}
13927e995a2eSmrg
13937e995a2eSmrg				r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
13947e995a2eSmrg				rctx->blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
13957e995a2eSmrg							    0, 0, 0, box->depth, 0, 0);
13967e995a2eSmrg				pipe_resource_reference(&temp, NULL);
13977e995a2eSmrg			}
13987e995a2eSmrg
13997e995a2eSmrg			/* Just get the strides. */
14007e995a2eSmrg			r600_texture_get_offset(rctx->screen, staging_depth, level, NULL,
14017e995a2eSmrg						&trans->b.b.stride,
14027e995a2eSmrg						&trans->b.b.layer_stride);
14037e995a2eSmrg		} else {
14047e995a2eSmrg			/* XXX: only readback the rectangle which is being mapped? */
14057e995a2eSmrg			/* XXX: when discard is true, no need to read back from depth texture */
14067e995a2eSmrg			if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
14077e995a2eSmrg				R600_ERR("failed to create temporary texture to hold untiled copy\n");
14087e995a2eSmrg				FREE(trans);
14097e995a2eSmrg				return NULL;
14107e995a2eSmrg			}
14117e995a2eSmrg
14127e995a2eSmrg			rctx->blit_decompress_depth(ctx, rtex, staging_depth,
14137e995a2eSmrg						    level, level,
14147e995a2eSmrg						    box->z, box->z + box->depth - 1,
14157e995a2eSmrg						    0, 0);
14167e995a2eSmrg
14177e995a2eSmrg			offset = r600_texture_get_offset(rctx->screen, staging_depth,
14187e995a2eSmrg							 level, box,
14197e995a2eSmrg							 &trans->b.b.stride,
14207e995a2eSmrg							 &trans->b.b.layer_stride);
14217e995a2eSmrg		}
14227e995a2eSmrg
14237e995a2eSmrg		trans->staging = (struct r600_resource*)staging_depth;
14247e995a2eSmrg		buf = trans->staging;
14257e995a2eSmrg	} else if (use_staging_texture) {
14267e995a2eSmrg		struct pipe_resource resource;
14277e995a2eSmrg		struct r600_texture *staging;
14287e995a2eSmrg
14297e995a2eSmrg		r600_init_temp_resource_from_box(&resource, texture, box, level,
14307e995a2eSmrg						 R600_RESOURCE_FLAG_TRANSFER);
14311463c08dSmrg		resource.usage = (usage & PIPE_MAP_READ) ?
14327e995a2eSmrg			PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
14337e995a2eSmrg
14347e995a2eSmrg		/* Create the temporary texture. */
14357e995a2eSmrg		staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
14367e995a2eSmrg		if (!staging) {
14377e995a2eSmrg			R600_ERR("failed to create temporary texture to hold untiled copy\n");
14387e995a2eSmrg			FREE(trans);
14397e995a2eSmrg			return NULL;
14407e995a2eSmrg		}
14417e995a2eSmrg		trans->staging = &staging->resource;
14427e995a2eSmrg
14437e995a2eSmrg		/* Just get the strides. */
14447e995a2eSmrg		r600_texture_get_offset(rctx->screen, staging, 0, NULL,
14457e995a2eSmrg					&trans->b.b.stride,
14467e995a2eSmrg					&trans->b.b.layer_stride);
14477e995a2eSmrg
14481463c08dSmrg		if (usage & PIPE_MAP_READ)
14497e995a2eSmrg			r600_copy_to_staging_texture(ctx, trans);
14507e995a2eSmrg		else
14511463c08dSmrg			usage |= PIPE_MAP_UNSYNCHRONIZED;
14527e995a2eSmrg
14537e995a2eSmrg		buf = trans->staging;
14547e995a2eSmrg	} else {
14557e995a2eSmrg		/* the resource is mapped directly */
14567e995a2eSmrg		offset = r600_texture_get_offset(rctx->screen, rtex, level, box,
14577e995a2eSmrg						 &trans->b.b.stride,
14587e995a2eSmrg						 &trans->b.b.layer_stride);
14597e995a2eSmrg		buf = &rtex->resource;
14607e995a2eSmrg	}
14617e995a2eSmrg
14627e995a2eSmrg	if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) {
14637e995a2eSmrg		r600_resource_reference(&trans->staging, NULL);
14647e995a2eSmrg		FREE(trans);
14657e995a2eSmrg		return NULL;
14667e995a2eSmrg	}
14677e995a2eSmrg
14687e995a2eSmrg	*ptransfer = &trans->b.b;
14697e995a2eSmrg	return map + offset;
14707e995a2eSmrg}
14717e995a2eSmrg
14721463c08dSmrgvoid r600_texture_transfer_unmap(struct pipe_context *ctx,
14731463c08dSmrg				struct pipe_transfer* transfer)
14747e995a2eSmrg{
14757e995a2eSmrg	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
14767e995a2eSmrg	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
14777e995a2eSmrg	struct pipe_resource *texture = transfer->resource;
14787e995a2eSmrg	struct r600_texture *rtex = (struct r600_texture*)texture;
14797e995a2eSmrg
14801463c08dSmrg	if ((transfer->usage & PIPE_MAP_WRITE) && rtransfer->staging) {
14817e995a2eSmrg		if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
14827e995a2eSmrg			ctx->resource_copy_region(ctx, texture, transfer->level,
14837e995a2eSmrg						  transfer->box.x, transfer->box.y, transfer->box.z,
14847e995a2eSmrg						  &rtransfer->staging->b.b, transfer->level,
14857e995a2eSmrg						  &transfer->box);
14867e995a2eSmrg		} else {
14877e995a2eSmrg			r600_copy_from_staging_texture(ctx, rtransfer);
14887e995a2eSmrg		}
14897e995a2eSmrg	}
14907e995a2eSmrg
14917e995a2eSmrg	if (rtransfer->staging) {
14927e995a2eSmrg		rctx->num_alloc_tex_transfer_bytes += rtransfer->staging->buf->size;
14937e995a2eSmrg		r600_resource_reference(&rtransfer->staging, NULL);
14947e995a2eSmrg	}
14957e995a2eSmrg
14967e995a2eSmrg	/* Heuristic for {upload, draw, upload, draw, ..}:
14977e995a2eSmrg	 *
14987e995a2eSmrg	 * Flush the gfx IB if we've allocated too much texture storage.
14997e995a2eSmrg	 *
15007e995a2eSmrg	 * The idea is that we don't want to build IBs that use too much
15017e995a2eSmrg	 * memory and put pressure on the kernel memory manager and we also
15027e995a2eSmrg	 * want to make temporary and invalidated buffers go idle ASAP to
15037e995a2eSmrg	 * decrease the total memory usage or make them reusable. The memory
15047e995a2eSmrg	 * usage will be slightly higher than given here because of the buffer
15057e995a2eSmrg	 * cache in the winsys.
15067e995a2eSmrg	 *
15077e995a2eSmrg	 * The result is that the kernel memory manager is never a bottleneck.
15087e995a2eSmrg	 */
15097e995a2eSmrg	if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
15107e995a2eSmrg		rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
15117e995a2eSmrg		rctx->num_alloc_tex_transfer_bytes = 0;
15127e995a2eSmrg	}
15137e995a2eSmrg
15147e995a2eSmrg	pipe_resource_reference(&transfer->resource, NULL);
15157e995a2eSmrg	FREE(transfer);
15167e995a2eSmrg}
15177e995a2eSmrg
15187e995a2eSmrgstruct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
15197e995a2eSmrg						struct pipe_resource *texture,
15207e995a2eSmrg						const struct pipe_surface *templ,
15217e995a2eSmrg						unsigned width0, unsigned height0,
15227e995a2eSmrg						unsigned width, unsigned height)
15237e995a2eSmrg{
15247e995a2eSmrg	struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
15257e995a2eSmrg
15267e995a2eSmrg	if (!surface)
15277e995a2eSmrg		return NULL;
15287e995a2eSmrg
15297e995a2eSmrg	assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level));
15307e995a2eSmrg	assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level));
15317e995a2eSmrg
15327e995a2eSmrg	pipe_reference_init(&surface->base.reference, 1);
15337e995a2eSmrg	pipe_resource_reference(&surface->base.texture, texture);
15347e995a2eSmrg	surface->base.context = pipe;
15357e995a2eSmrg	surface->base.format = templ->format;
15367e995a2eSmrg	surface->base.width = width;
15377e995a2eSmrg	surface->base.height = height;
15387e995a2eSmrg	surface->base.u = templ->u;
15397e995a2eSmrg
15407e995a2eSmrg	surface->width0 = width0;
15417e995a2eSmrg	surface->height0 = height0;
15427e995a2eSmrg
15437e995a2eSmrg	return &surface->base;
15447e995a2eSmrg}
15457e995a2eSmrg
15467e995a2eSmrgstatic struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
15477e995a2eSmrg						struct pipe_resource *tex,
15487e995a2eSmrg						const struct pipe_surface *templ)
15497e995a2eSmrg{
15507e995a2eSmrg	unsigned level = templ->u.tex.level;
15517e995a2eSmrg	unsigned width = u_minify(tex->width0, level);
15527e995a2eSmrg	unsigned height = u_minify(tex->height0, level);
15537e995a2eSmrg	unsigned width0 = tex->width0;
15547e995a2eSmrg	unsigned height0 = tex->height0;
15557e995a2eSmrg
15567e995a2eSmrg	if (tex->target != PIPE_BUFFER && templ->format != tex->format) {
15577e995a2eSmrg		const struct util_format_description *tex_desc
15587e995a2eSmrg			= util_format_description(tex->format);
15597e995a2eSmrg		const struct util_format_description *templ_desc
15607e995a2eSmrg			= util_format_description(templ->format);
15617e995a2eSmrg
15627e995a2eSmrg		assert(tex_desc->block.bits == templ_desc->block.bits);
15637e995a2eSmrg
15647e995a2eSmrg		/* Adjust size of surface if and only if the block width or
15657e995a2eSmrg		 * height is changed. */
15667e995a2eSmrg		if (tex_desc->block.width != templ_desc->block.width ||
15677e995a2eSmrg		    tex_desc->block.height != templ_desc->block.height) {
15687e995a2eSmrg			unsigned nblks_x = util_format_get_nblocksx(tex->format, width);
15697e995a2eSmrg			unsigned nblks_y = util_format_get_nblocksy(tex->format, height);
15707e995a2eSmrg
15717e995a2eSmrg			width = nblks_x * templ_desc->block.width;
15727e995a2eSmrg			height = nblks_y * templ_desc->block.height;
15737e995a2eSmrg
15747e995a2eSmrg			width0 = util_format_get_nblocksx(tex->format, width0);
15757e995a2eSmrg			height0 = util_format_get_nblocksy(tex->format, height0);
15767e995a2eSmrg		}
15777e995a2eSmrg	}
15787e995a2eSmrg
15797e995a2eSmrg	return r600_create_surface_custom(pipe, tex, templ,
15807e995a2eSmrg					  width0, height0,
15817e995a2eSmrg					  width, height);
15827e995a2eSmrg}
15837e995a2eSmrg
15847e995a2eSmrgstatic void r600_surface_destroy(struct pipe_context *pipe,
15857e995a2eSmrg				 struct pipe_surface *surface)
15867e995a2eSmrg{
15877e995a2eSmrg	struct r600_surface *surf = (struct r600_surface*)surface;
15887e995a2eSmrg	r600_resource_reference(&surf->cb_buffer_fmask, NULL);
15897e995a2eSmrg	r600_resource_reference(&surf->cb_buffer_cmask, NULL);
15907e995a2eSmrg	pipe_resource_reference(&surface->texture, NULL);
15917e995a2eSmrg	FREE(surface);
15927e995a2eSmrg}
15937e995a2eSmrg
15947e995a2eSmrgstatic void r600_clear_texture(struct pipe_context *pipe,
15957e995a2eSmrg			       struct pipe_resource *tex,
15967e995a2eSmrg			       unsigned level,
15977e995a2eSmrg			       const struct pipe_box *box,
15987e995a2eSmrg			       const void *data)
15997e995a2eSmrg{
16007e995a2eSmrg	struct pipe_screen *screen = pipe->screen;
16017e995a2eSmrg	struct r600_texture *rtex = (struct r600_texture*)tex;
16027e995a2eSmrg	struct pipe_surface tmpl = {{0}};
16037e995a2eSmrg	struct pipe_surface *sf;
16047e995a2eSmrg
16057e995a2eSmrg	tmpl.format = tex->format;
16067e995a2eSmrg	tmpl.u.tex.first_layer = box->z;
16077e995a2eSmrg	tmpl.u.tex.last_layer = box->z + box->depth - 1;
16087e995a2eSmrg	tmpl.u.tex.level = level;
16097e995a2eSmrg	sf = pipe->create_surface(pipe, tex, &tmpl);
16107e995a2eSmrg	if (!sf)
16117e995a2eSmrg		return;
16127e995a2eSmrg
16137e995a2eSmrg	if (rtex->is_depth) {
16147e995a2eSmrg		unsigned clear;
16157e995a2eSmrg		float depth;
16167e995a2eSmrg		uint8_t stencil = 0;
16177e995a2eSmrg
16187e995a2eSmrg		/* Depth is always present. */
16197e995a2eSmrg		clear = PIPE_CLEAR_DEPTH;
16201463c08dSmrg		util_format_unpack_z_float(tex->format, &depth, data, 1);
16217e995a2eSmrg
16227e995a2eSmrg		if (rtex->surface.has_stencil) {
16237e995a2eSmrg			clear |= PIPE_CLEAR_STENCIL;
16241463c08dSmrg			util_format_unpack_s_8uint(tex->format, &stencil, data, 1);
16257e995a2eSmrg		}
16267e995a2eSmrg
16277e995a2eSmrg		pipe->clear_depth_stencil(pipe, sf, clear, depth, stencil,
16287e995a2eSmrg					  box->x, box->y,
16297e995a2eSmrg					  box->width, box->height, false);
16307e995a2eSmrg	} else {
16317e995a2eSmrg		union pipe_color_union color;
16327e995a2eSmrg
16331463c08dSmrg		util_format_unpack_rgba(tex->format, color.ui, data, 1);
16347e995a2eSmrg
16357e995a2eSmrg		if (screen->is_format_supported(screen, tex->format,
16367e995a2eSmrg						tex->target, 0, 0,
16377e995a2eSmrg						PIPE_BIND_RENDER_TARGET)) {
16387e995a2eSmrg			pipe->clear_render_target(pipe, sf, &color,
16397e995a2eSmrg						  box->x, box->y,
16407e995a2eSmrg						  box->width, box->height, false);
16417e995a2eSmrg		} else {
16427e995a2eSmrg			/* Software fallback - just for R9G9B9E5_FLOAT */
16437e995a2eSmrg			util_clear_render_target(pipe, sf, &color,
16447e995a2eSmrg						 box->x, box->y,
16457e995a2eSmrg						 box->width, box->height);
16467e995a2eSmrg		}
16477e995a2eSmrg	}
16487e995a2eSmrg	pipe_surface_reference(&sf, NULL);
16497e995a2eSmrg}
16507e995a2eSmrg
16517e995a2eSmrgunsigned r600_translate_colorswap(enum pipe_format format, bool do_endian_swap)
16527e995a2eSmrg{
16537e995a2eSmrg	const struct util_format_description *desc = util_format_description(format);
16547e995a2eSmrg
16557e995a2eSmrg#define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
16567e995a2eSmrg
16577e995a2eSmrg	if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
16587e995a2eSmrg		return V_0280A0_SWAP_STD;
16597e995a2eSmrg
16607e995a2eSmrg	if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
16617e995a2eSmrg		return ~0U;
16627e995a2eSmrg
16637e995a2eSmrg	switch (desc->nr_channels) {
16647e995a2eSmrg	case 1:
16657e995a2eSmrg		if (HAS_SWIZZLE(0,X))
16667e995a2eSmrg			return V_0280A0_SWAP_STD; /* X___ */
16677e995a2eSmrg		else if (HAS_SWIZZLE(3,X))
16687e995a2eSmrg			return V_0280A0_SWAP_ALT_REV; /* ___X */
16697e995a2eSmrg		break;
16707e995a2eSmrg	case 2:
16717e995a2eSmrg		if ((HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,Y)) ||
16727e995a2eSmrg		    (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,NONE)) ||
16737e995a2eSmrg		    (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,Y)))
16747e995a2eSmrg			return V_0280A0_SWAP_STD; /* XY__ */
16757e995a2eSmrg		else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) ||
16767e995a2eSmrg			 (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) ||
16777e995a2eSmrg		         (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X)))
16787e995a2eSmrg			/* YX__ */
16797e995a2eSmrg			return (do_endian_swap ? V_0280A0_SWAP_STD : V_0280A0_SWAP_STD_REV);
16807e995a2eSmrg		else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y))
16817e995a2eSmrg			return V_0280A0_SWAP_ALT; /* X__Y */
16827e995a2eSmrg		else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X))
16837e995a2eSmrg			return V_0280A0_SWAP_ALT_REV; /* Y__X */
16847e995a2eSmrg		break;
16857e995a2eSmrg	case 3:
16867e995a2eSmrg		if (HAS_SWIZZLE(0,X))
16877e995a2eSmrg			return (do_endian_swap ? V_0280A0_SWAP_STD_REV : V_0280A0_SWAP_STD);
16887e995a2eSmrg		else if (HAS_SWIZZLE(0,Z))
16897e995a2eSmrg			return V_0280A0_SWAP_STD_REV; /* ZYX */
16907e995a2eSmrg		break;
16917e995a2eSmrg	case 4:
16927e995a2eSmrg		/* check the middle channels, the 1st and 4th channel can be NONE */
16937e995a2eSmrg		if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z)) {
16947e995a2eSmrg			return V_0280A0_SWAP_STD; /* XYZW */
16957e995a2eSmrg		} else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y)) {
16967e995a2eSmrg			return V_0280A0_SWAP_STD_REV; /* WZYX */
16977e995a2eSmrg		} else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X)) {
16987e995a2eSmrg			return V_0280A0_SWAP_ALT; /* ZYXW */
16997e995a2eSmrg		} else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,W)) {
17007e995a2eSmrg			/* YZWX */
17017e995a2eSmrg			if (desc->is_array)
17027e995a2eSmrg				return V_0280A0_SWAP_ALT_REV;
17037e995a2eSmrg			else
17047e995a2eSmrg				return (do_endian_swap ? V_0280A0_SWAP_ALT : V_0280A0_SWAP_ALT_REV);
17057e995a2eSmrg		}
17067e995a2eSmrg		break;
17077e995a2eSmrg	}
17087e995a2eSmrg	return ~0U;
17097e995a2eSmrg}
17107e995a2eSmrg
17117e995a2eSmrg/* FAST COLOR CLEAR */
17127e995a2eSmrg
17137e995a2eSmrgstatic void evergreen_set_clear_color(struct r600_texture *rtex,
17147e995a2eSmrg				      enum pipe_format surface_format,
17157e995a2eSmrg				      const union pipe_color_union *color)
17167e995a2eSmrg{
17177e995a2eSmrg	union util_color uc;
17187e995a2eSmrg
17197e995a2eSmrg	memset(&uc, 0, sizeof(uc));
17207e995a2eSmrg
17217e995a2eSmrg	if (rtex->surface.bpe == 16) {
17227e995a2eSmrg		/* DCC fast clear only:
17237e995a2eSmrg		 *   CLEAR_WORD0 = R = G = B
17247e995a2eSmrg		 *   CLEAR_WORD1 = A
17257e995a2eSmrg		 */
17267e995a2eSmrg		assert(color->ui[0] == color->ui[1] &&
17277e995a2eSmrg		       color->ui[0] == color->ui[2]);
17287e995a2eSmrg		uc.ui[0] = color->ui[0];
17297e995a2eSmrg		uc.ui[1] = color->ui[3];
17307e995a2eSmrg	} else {
17311463c08dSmrg		util_pack_color_union(surface_format, &uc, color);
17327e995a2eSmrg	}
17337e995a2eSmrg
17347e995a2eSmrg	memcpy(rtex->color_clear_value, &uc, 2 * sizeof(uint32_t));
17357e995a2eSmrg}
17367e995a2eSmrg
17377e995a2eSmrgvoid evergreen_do_fast_color_clear(struct r600_common_context *rctx,
17387e995a2eSmrg				   struct pipe_framebuffer_state *fb,
17397e995a2eSmrg				   struct r600_atom *fb_state,
17407e995a2eSmrg				   unsigned *buffers, ubyte *dirty_cbufs,
17417e995a2eSmrg				   const union pipe_color_union *color)
17427e995a2eSmrg{
17437e995a2eSmrg	int i;
17447e995a2eSmrg
17457e995a2eSmrg	/* This function is broken in BE, so just disable this path for now */
17461463c08dSmrg#if UTIL_ARCH_BIG_ENDIAN
17477e995a2eSmrg	return;
17487e995a2eSmrg#endif
17497e995a2eSmrg
17507e995a2eSmrg	if (rctx->render_cond)
17517e995a2eSmrg		return;
17527e995a2eSmrg
17537e995a2eSmrg	for (i = 0; i < fb->nr_cbufs; i++) {
17547e995a2eSmrg		struct r600_texture *tex;
17557e995a2eSmrg		unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;
17567e995a2eSmrg
17577e995a2eSmrg		if (!fb->cbufs[i])
17587e995a2eSmrg			continue;
17597e995a2eSmrg
17607e995a2eSmrg		/* if this colorbuffer is not being cleared */
17617e995a2eSmrg		if (!(*buffers & clear_bit))
17627e995a2eSmrg			continue;
17637e995a2eSmrg
17647e995a2eSmrg		tex = (struct r600_texture *)fb->cbufs[i]->texture;
17657e995a2eSmrg
17667e995a2eSmrg		/* the clear is allowed if all layers are bound */
17677e995a2eSmrg		if (fb->cbufs[i]->u.tex.first_layer != 0 ||
17687e995a2eSmrg		    fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->resource.b.b, 0)) {
17697e995a2eSmrg			continue;
17707e995a2eSmrg		}
17717e995a2eSmrg
17727e995a2eSmrg		/* cannot clear mipmapped textures */
17737e995a2eSmrg		if (fb->cbufs[i]->texture->last_level != 0) {
17747e995a2eSmrg			continue;
17757e995a2eSmrg		}
17767e995a2eSmrg
17777e995a2eSmrg		/* only supported on tiled surfaces */
17787e995a2eSmrg		if (tex->surface.is_linear) {
17797e995a2eSmrg			continue;
17807e995a2eSmrg		}
17817e995a2eSmrg
17827e995a2eSmrg		/* shared textures can't use fast clear without an explicit flush,
17837e995a2eSmrg		 * because there is no way to communicate the clear color among
17847e995a2eSmrg		 * all clients
17857e995a2eSmrg		 */
17867e995a2eSmrg		if (tex->resource.b.is_shared &&
17877e995a2eSmrg		    !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
17887e995a2eSmrg			continue;
17897e995a2eSmrg
17907e995a2eSmrg		/* Use a slow clear for small surfaces where the cost of
17917e995a2eSmrg		 * the eliminate pass can be higher than the benefit of fast
17927e995a2eSmrg		 * clear. AMDGPU-pro does this, but the numbers may differ.
17937e995a2eSmrg		 *
17947e995a2eSmrg		 * This helps on both dGPUs and APUs, even small ones.
17957e995a2eSmrg		 */
17967e995a2eSmrg		if (tex->resource.b.b.nr_samples <= 1 &&
17977e995a2eSmrg		    tex->resource.b.b.width0 * tex->resource.b.b.height0 <= 300 * 300)
17987e995a2eSmrg			continue;
17997e995a2eSmrg
18007e995a2eSmrg		{
18017e995a2eSmrg			/* 128-bit formats are unusupported */
18027e995a2eSmrg			if (tex->surface.bpe > 8) {
18037e995a2eSmrg				continue;
18047e995a2eSmrg			}
18057e995a2eSmrg
18067e995a2eSmrg			/* ensure CMASK is enabled */
18077e995a2eSmrg			r600_texture_alloc_cmask_separate(rctx->screen, tex);
18087e995a2eSmrg			if (tex->cmask.size == 0) {
18097e995a2eSmrg				continue;
18107e995a2eSmrg			}
18117e995a2eSmrg
18127e995a2eSmrg			/* Do the fast clear. */
18137e995a2eSmrg			rctx->clear_buffer(&rctx->b, &tex->cmask_buffer->b.b,
18147e995a2eSmrg					   tex->cmask.offset, tex->cmask.size, 0,
18157e995a2eSmrg					   R600_COHERENCY_CB_META);
18167e995a2eSmrg
18177e995a2eSmrg			bool need_compressed_update = !tex->dirty_level_mask;
18187e995a2eSmrg
18197e995a2eSmrg			tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
18207e995a2eSmrg
18217e995a2eSmrg			if (need_compressed_update)
18227e995a2eSmrg				p_atomic_inc(&rctx->screen->compressed_colortex_counter);
18237e995a2eSmrg		}
18247e995a2eSmrg
18257e995a2eSmrg		evergreen_set_clear_color(tex, fb->cbufs[i]->format, color);
18267e995a2eSmrg
18277e995a2eSmrg		if (dirty_cbufs)
18287e995a2eSmrg			*dirty_cbufs |= 1 << i;
18297e995a2eSmrg		rctx->set_atom_dirty(rctx, fb_state, true);
18307e995a2eSmrg		*buffers &= ~clear_bit;
18317e995a2eSmrg	}
18327e995a2eSmrg}
18337e995a2eSmrg
18347e995a2eSmrgstatic struct pipe_memory_object *
18357e995a2eSmrgr600_memobj_from_handle(struct pipe_screen *screen,
18367e995a2eSmrg			struct winsys_handle *whandle,
18377e995a2eSmrg			bool dedicated)
18387e995a2eSmrg{
18397e995a2eSmrg	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
18407e995a2eSmrg	struct r600_memory_object *memobj = CALLOC_STRUCT(r600_memory_object);
18417e995a2eSmrg	struct pb_buffer *buf = NULL;
18427e995a2eSmrg
18437e995a2eSmrg	if (!memobj)
18447e995a2eSmrg		return NULL;
18457e995a2eSmrg
18467e995a2eSmrg	buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle,
18471463c08dSmrg					      rscreen->info.max_alignment);
18487e995a2eSmrg	if (!buf) {
18497e995a2eSmrg		free(memobj);
18507e995a2eSmrg		return NULL;
18517e995a2eSmrg	}
18527e995a2eSmrg
18537e995a2eSmrg	memobj->b.dedicated = dedicated;
18547e995a2eSmrg	memobj->buf = buf;
18551463c08dSmrg	memobj->stride = whandle->stride;
18561463c08dSmrg	memobj->offset = whandle->offset;
18577e995a2eSmrg
18587e995a2eSmrg	return (struct pipe_memory_object *)memobj;
18597e995a2eSmrg
18607e995a2eSmrg}
18617e995a2eSmrg
18627e995a2eSmrgstatic void
18637e995a2eSmrgr600_memobj_destroy(struct pipe_screen *screen,
18647e995a2eSmrg		    struct pipe_memory_object *_memobj)
18657e995a2eSmrg{
18667e995a2eSmrg	struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
18677e995a2eSmrg
18687e995a2eSmrg	pb_reference(&memobj->buf, NULL);
18697e995a2eSmrg	free(memobj);
18707e995a2eSmrg}
18717e995a2eSmrg
18727e995a2eSmrgstatic struct pipe_resource *
18737e995a2eSmrgr600_texture_from_memobj(struct pipe_screen *screen,
18747e995a2eSmrg			 const struct pipe_resource *templ,
18757e995a2eSmrg			 struct pipe_memory_object *_memobj,
18767e995a2eSmrg			 uint64_t offset)
18777e995a2eSmrg{
18787e995a2eSmrg	int r;
18797e995a2eSmrg	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
18807e995a2eSmrg	struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
18817e995a2eSmrg	struct r600_texture *rtex;
18827e995a2eSmrg	struct radeon_surf surface = {};
18837e995a2eSmrg	struct radeon_bo_metadata metadata = {};
18847e995a2eSmrg	enum radeon_surf_mode array_mode;
18857e995a2eSmrg	bool is_scanout;
18867e995a2eSmrg	struct pb_buffer *buf = NULL;
18877e995a2eSmrg
18887e995a2eSmrg	if (memobj->b.dedicated) {
18891463c08dSmrg		rscreen->ws->buffer_get_metadata(rscreen->ws, memobj->buf, &metadata, NULL);
18907e995a2eSmrg		r600_surface_import_metadata(rscreen, &surface, &metadata,
18917e995a2eSmrg				     &array_mode, &is_scanout);
18927e995a2eSmrg	} else {
18937e995a2eSmrg		/**
18947e995a2eSmrg		 * The bo metadata is unset for un-dedicated images. So we fall
18957e995a2eSmrg		 * back to linear. See answer to question 5 of the
18967e995a2eSmrg		 * VK_KHX_external_memory spec for some details.
18977e995a2eSmrg		 *
18987e995a2eSmrg		 * It is possible that this case isn't going to work if the
18997e995a2eSmrg		 * surface pitch isn't correctly aligned by default.
19007e995a2eSmrg		 *
19017e995a2eSmrg		 * In order to support it correctly we require multi-image
19027e995a2eSmrg		 * metadata to be syncrhonized between radv and radeonsi. The
19037e995a2eSmrg		 * semantics of associating multiple image metadata to a memory
19047e995a2eSmrg		 * object on the vulkan export side are not concretely defined
19057e995a2eSmrg		 * either.
19067e995a2eSmrg		 *
19077e995a2eSmrg		 * All the use cases we are aware of at the moment for memory
19087e995a2eSmrg		 * objects use dedicated allocations. So lets keep the initial
19097e995a2eSmrg		 * implementation simple.
19107e995a2eSmrg		 *
19117e995a2eSmrg		 * A possible alternative is to attempt to reconstruct the
19127e995a2eSmrg		 * tiling information when the TexParameter TEXTURE_TILING_EXT
19137e995a2eSmrg		 * is set.
19147e995a2eSmrg		 */
19157e995a2eSmrg		array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
19167e995a2eSmrg		is_scanout = false;
19177e995a2eSmrg
19187e995a2eSmrg	}
19197e995a2eSmrg
19207e995a2eSmrg	r = r600_init_surface(rscreen, &surface, templ,
19217e995a2eSmrg			      array_mode, memobj->stride,
19227e995a2eSmrg			      offset, true, is_scanout,
19237e995a2eSmrg			      false);
19247e995a2eSmrg	if (r)
19257e995a2eSmrg		return NULL;
19267e995a2eSmrg
19277e995a2eSmrg	rtex = r600_texture_create_object(screen, templ, memobj->buf, &surface);
19287e995a2eSmrg	if (!rtex)
19297e995a2eSmrg		return NULL;
19307e995a2eSmrg
19317e995a2eSmrg	/* r600_texture_create_object doesn't increment refcount of
19327e995a2eSmrg	 * memobj->buf, so increment it here.
19337e995a2eSmrg	 */
19347e995a2eSmrg	pb_reference(&buf, memobj->buf);
19357e995a2eSmrg
19367e995a2eSmrg	rtex->resource.b.is_shared = true;
19377e995a2eSmrg	rtex->resource.external_usage = PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE;
19387e995a2eSmrg
19397e995a2eSmrg	return &rtex->resource.b.b;
19407e995a2eSmrg}
19417e995a2eSmrg
19427e995a2eSmrgvoid r600_init_screen_texture_functions(struct r600_common_screen *rscreen)
19437e995a2eSmrg{
19447e995a2eSmrg	rscreen->b.resource_from_handle = r600_texture_from_handle;
19457e995a2eSmrg	rscreen->b.resource_get_handle = r600_texture_get_handle;
1946d8407755Smaya	rscreen->b.resource_get_info = r600_texture_get_info;
19477e995a2eSmrg	rscreen->b.resource_from_memobj = r600_texture_from_memobj;
19487e995a2eSmrg	rscreen->b.memobj_create_from_handle = r600_memobj_from_handle;
19497e995a2eSmrg	rscreen->b.memobj_destroy = r600_memobj_destroy;
19507e995a2eSmrg}
19517e995a2eSmrg
19527e995a2eSmrgvoid r600_init_context_texture_functions(struct r600_common_context *rctx)
19537e995a2eSmrg{
19547e995a2eSmrg	rctx->b.create_surface = r600_create_surface;
19557e995a2eSmrg	rctx->b.surface_destroy = r600_surface_destroy;
19567e995a2eSmrg	rctx->b.clear_texture = r600_clear_texture;
19577e995a2eSmrg}
1958