1/* 2 * Copyright © 2014 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS 17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 20 * USE OR OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * The above copyright notice and this permission notice (including the 23 * next paragraph) shall be included in all copies or substantial portions 24 * of the Software. 25 */ 26 27#include "radeon_drm_winsys.h" 28#include "util/u_format.h" 29#include <radeon_surface.h> 30 31static unsigned cik_get_macro_tile_index(struct radeon_surf *surf) 32{ 33 unsigned index, tileb; 34 35 tileb = 8 * 8 * surf->bpe; 36 tileb = MIN2(surf->u.legacy.tile_split, tileb); 37 38 for (index = 0; tileb > 64; index++) 39 tileb >>= 1; 40 41 assert(index < 16); 42 return index; 43} 44 45#define G_009910_MICRO_TILE_MODE(x) (((x) >> 0) & 0x03) 46#define G_009910_MICRO_TILE_MODE_NEW(x) (((x) >> 22) & 0x07) 47 48static void set_micro_tile_mode(struct radeon_surf *surf, 49 struct radeon_info *info) 50{ 51 uint32_t tile_mode; 52 53 if (info->chip_class < SI) { 54 surf->micro_tile_mode = 0; 55 return; 56 } 57 58 tile_mode = info->si_tile_mode_array[surf->u.legacy.tiling_index[0]]; 59 60 if (info->chip_class >= CIK) 61 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE_NEW(tile_mode); 62 else 63 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE(tile_mode); 64} 65 66static void surf_level_winsys_to_drm(struct radeon_surface_level *level_drm, 67 const struct legacy_surf_level *level_ws, 68 unsigned bpe) 69{ 70 level_drm->offset = level_ws->offset; 71 level_drm->slice_size = (uint64_t)level_ws->slice_size_dw * 4; 72 level_drm->nblk_x = level_ws->nblk_x; 73 level_drm->nblk_y = level_ws->nblk_y; 74 level_drm->pitch_bytes = level_ws->nblk_x * bpe; 75 level_drm->mode = level_ws->mode; 76} 77 78static void surf_level_drm_to_winsys(struct legacy_surf_level *level_ws, 79 const struct radeon_surface_level *level_drm, 80 unsigned bpe) 81{ 82 level_ws->offset = level_drm->offset; 83 level_ws->slice_size_dw = level_drm->slice_size / 4; 84 level_ws->nblk_x = level_drm->nblk_x; 85 level_ws->nblk_y = level_drm->nblk_y; 86 level_ws->mode = level_drm->mode; 87 assert(level_drm->nblk_x * bpe == level_drm->pitch_bytes); 88} 89 90static void surf_winsys_to_drm(struct radeon_surface *surf_drm, 91 const struct pipe_resource *tex, 92 unsigned flags, unsigned bpe, 93 enum radeon_surf_mode mode, 94 const struct radeon_surf *surf_ws) 95{ 96 int i; 97 98 memset(surf_drm, 0, sizeof(*surf_drm)); 99 100 surf_drm->npix_x = tex->width0; 101 surf_drm->npix_y = tex->height0; 102 surf_drm->npix_z = tex->depth0; 103 surf_drm->blk_w = util_format_get_blockwidth(tex->format); 104 surf_drm->blk_h = util_format_get_blockheight(tex->format); 105 surf_drm->blk_d = 1; 106 surf_drm->array_size = 1; 107 surf_drm->last_level = tex->last_level; 108 surf_drm->bpe = bpe; 109 surf_drm->nsamples = tex->nr_samples ? tex->nr_samples : 1; 110 111 surf_drm->flags = flags; 112 surf_drm->flags = RADEON_SURF_CLR(surf_drm->flags, TYPE); 113 surf_drm->flags = RADEON_SURF_CLR(surf_drm->flags, MODE); 114 surf_drm->flags |= RADEON_SURF_SET(mode, MODE) | 115 RADEON_SURF_HAS_SBUFFER_MIPTREE | 116 RADEON_SURF_HAS_TILE_MODE_INDEX; 117 118 switch (tex->target) { 119 case PIPE_TEXTURE_1D: 120 surf_drm->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D, TYPE); 121 break; 122 case PIPE_TEXTURE_RECT: 123 case PIPE_TEXTURE_2D: 124 surf_drm->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE); 125 break; 126 case PIPE_TEXTURE_3D: 127 surf_drm->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE); 128 break; 129 case PIPE_TEXTURE_1D_ARRAY: 130 surf_drm->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE); 131 surf_drm->array_size = tex->array_size; 132 break; 133 case PIPE_TEXTURE_CUBE_ARRAY: /* cube array layout like 2d array */ 134 assert(tex->array_size % 6 == 0); 135 /* fall through */ 136 case PIPE_TEXTURE_2D_ARRAY: 137 surf_drm->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE); 138 surf_drm->array_size = tex->array_size; 139 break; 140 case PIPE_TEXTURE_CUBE: 141 surf_drm->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP, TYPE); 142 break; 143 case PIPE_BUFFER: 144 default: 145 assert(0); 146 } 147 148 surf_drm->bo_size = surf_ws->surf_size; 149 surf_drm->bo_alignment = surf_ws->surf_alignment; 150 151 surf_drm->bankw = surf_ws->u.legacy.bankw; 152 surf_drm->bankh = surf_ws->u.legacy.bankh; 153 surf_drm->mtilea = surf_ws->u.legacy.mtilea; 154 surf_drm->tile_split = surf_ws->u.legacy.tile_split; 155 156 for (i = 0; i <= surf_drm->last_level; i++) { 157 surf_level_winsys_to_drm(&surf_drm->level[i], &surf_ws->u.legacy.level[i], 158 bpe * surf_drm->nsamples); 159 160 surf_drm->tiling_index[i] = surf_ws->u.legacy.tiling_index[i]; 161 } 162 163 if (flags & RADEON_SURF_SBUFFER) { 164 surf_drm->stencil_tile_split = surf_ws->u.legacy.stencil_tile_split; 165 166 for (i = 0; i <= surf_drm->last_level; i++) { 167 surf_level_winsys_to_drm(&surf_drm->stencil_level[i], 168 &surf_ws->u.legacy.stencil_level[i], 169 surf_drm->nsamples); 170 surf_drm->stencil_tiling_index[i] = surf_ws->u.legacy.stencil_tiling_index[i]; 171 } 172 } 173} 174 175static void surf_drm_to_winsys(struct radeon_drm_winsys *ws, 176 struct radeon_surf *surf_ws, 177 const struct radeon_surface *surf_drm) 178{ 179 int i; 180 181 memset(surf_ws, 0, sizeof(*surf_ws)); 182 183 surf_ws->blk_w = surf_drm->blk_w; 184 surf_ws->blk_h = surf_drm->blk_h; 185 surf_ws->bpe = surf_drm->bpe; 186 surf_ws->is_linear = surf_drm->level[0].mode <= RADEON_SURF_MODE_LINEAR_ALIGNED; 187 surf_ws->has_stencil = !!(surf_drm->flags & RADEON_SURF_SBUFFER); 188 surf_ws->flags = surf_drm->flags; 189 190 surf_ws->surf_size = surf_drm->bo_size; 191 surf_ws->surf_alignment = surf_drm->bo_alignment; 192 193 surf_ws->u.legacy.bankw = surf_drm->bankw; 194 surf_ws->u.legacy.bankh = surf_drm->bankh; 195 surf_ws->u.legacy.mtilea = surf_drm->mtilea; 196 surf_ws->u.legacy.tile_split = surf_drm->tile_split; 197 198 surf_ws->u.legacy.macro_tile_index = cik_get_macro_tile_index(surf_ws); 199 200 for (i = 0; i <= surf_drm->last_level; i++) { 201 surf_level_drm_to_winsys(&surf_ws->u.legacy.level[i], &surf_drm->level[i], 202 surf_drm->bpe * surf_drm->nsamples); 203 surf_ws->u.legacy.tiling_index[i] = surf_drm->tiling_index[i]; 204 } 205 206 if (surf_ws->flags & RADEON_SURF_SBUFFER) { 207 surf_ws->u.legacy.stencil_tile_split = surf_drm->stencil_tile_split; 208 209 for (i = 0; i <= surf_drm->last_level; i++) { 210 surf_level_drm_to_winsys(&surf_ws->u.legacy.stencil_level[i], 211 &surf_drm->stencil_level[i], 212 surf_drm->nsamples); 213 surf_ws->u.legacy.stencil_tiling_index[i] = surf_drm->stencil_tiling_index[i]; 214 } 215 } 216 217 set_micro_tile_mode(surf_ws, &ws->info); 218 surf_ws->is_displayable = surf_ws->is_linear || 219 surf_ws->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY || 220 surf_ws->micro_tile_mode == RADEON_MICRO_MODE_ROTATED; 221} 222 223static void si_compute_cmask(const struct radeon_info *info, 224 const struct ac_surf_config *config, 225 struct radeon_surf *surf) 226{ 227 unsigned pipe_interleave_bytes = info->pipe_interleave_bytes; 228 unsigned num_pipes = info->num_tile_pipes; 229 unsigned cl_width, cl_height; 230 231 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER) 232 return; 233 234 assert(info->chip_class <= VI); 235 236 switch (num_pipes) { 237 case 2: 238 cl_width = 32; 239 cl_height = 16; 240 break; 241 case 4: 242 cl_width = 32; 243 cl_height = 32; 244 break; 245 case 8: 246 cl_width = 64; 247 cl_height = 32; 248 break; 249 case 16: /* Hawaii */ 250 cl_width = 64; 251 cl_height = 64; 252 break; 253 default: 254 assert(0); 255 return; 256 } 257 258 unsigned base_align = num_pipes * pipe_interleave_bytes; 259 260 unsigned width = align(surf->u.legacy.level[0].nblk_x, cl_width*8); 261 unsigned height = align(surf->u.legacy.level[0].nblk_y, cl_height*8); 262 unsigned slice_elements = (width * height) / (8*8); 263 264 /* Each element of CMASK is a nibble. */ 265 unsigned slice_bytes = slice_elements / 2; 266 267 surf->u.legacy.cmask_slice_tile_max = (width * height) / (128*128); 268 if (surf->u.legacy.cmask_slice_tile_max) 269 surf->u.legacy.cmask_slice_tile_max -= 1; 270 271 unsigned num_layers; 272 if (config->is_3d) 273 num_layers = config->info.depth; 274 else if (config->is_cube) 275 num_layers = 6; 276 else 277 num_layers = config->info.array_size; 278 279 surf->cmask_alignment = MAX2(256, base_align); 280 surf->cmask_size = align(slice_bytes, base_align) * num_layers; 281} 282 283static int radeon_winsys_surface_init(struct radeon_winsys *rws, 284 const struct pipe_resource *tex, 285 unsigned flags, unsigned bpe, 286 enum radeon_surf_mode mode, 287 struct radeon_surf *surf_ws) 288{ 289 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws; 290 struct radeon_surface surf_drm; 291 int r; 292 293 surf_winsys_to_drm(&surf_drm, tex, flags, bpe, mode, surf_ws); 294 295 if (!(flags & (RADEON_SURF_IMPORTED | RADEON_SURF_FMASK))) { 296 r = radeon_surface_best(ws->surf_man, &surf_drm); 297 if (r) 298 return r; 299 } 300 301 r = radeon_surface_init(ws->surf_man, &surf_drm); 302 if (r) 303 return r; 304 305 surf_drm_to_winsys(ws, surf_ws, &surf_drm); 306 307 /* Compute FMASK. */ 308 if (ws->gen == DRV_SI && 309 tex->nr_samples >= 2 && 310 !(flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_FMASK))) { 311 /* FMASK is allocated like an ordinary texture. */ 312 struct pipe_resource templ = *tex; 313 struct radeon_surf fmask = {}; 314 unsigned fmask_flags, bpe; 315 316 templ.nr_samples = 1; 317 fmask_flags = flags | RADEON_SURF_FMASK; 318 319 switch (tex->nr_samples) { 320 case 2: 321 case 4: 322 bpe = 1; 323 break; 324 case 8: 325 bpe = 4; 326 break; 327 default: 328 fprintf(stderr, "radeon: Invalid sample count for FMASK allocation.\n"); 329 return -1; 330 } 331 332 if (radeon_winsys_surface_init(rws, &templ, fmask_flags, bpe, 333 RADEON_SURF_MODE_2D, &fmask)) { 334 fprintf(stderr, "Got error in surface_init while allocating FMASK.\n"); 335 return -1; 336 } 337 338 assert(fmask.u.legacy.level[0].mode == RADEON_SURF_MODE_2D); 339 340 surf_ws->fmask_size = fmask.surf_size; 341 surf_ws->fmask_alignment = MAX2(256, fmask.surf_alignment); 342 surf_ws->fmask_tile_swizzle = fmask.tile_swizzle; 343 344 surf_ws->u.legacy.fmask.slice_tile_max = 345 (fmask.u.legacy.level[0].nblk_x * fmask.u.legacy.level[0].nblk_y) / 64; 346 if (surf_ws->u.legacy.fmask.slice_tile_max) 347 surf_ws->u.legacy.fmask.slice_tile_max -= 1; 348 349 surf_ws->u.legacy.fmask.tiling_index = fmask.u.legacy.tiling_index[0]; 350 surf_ws->u.legacy.fmask.bankh = fmask.u.legacy.bankh; 351 surf_ws->u.legacy.fmask.pitch_in_pixels = fmask.u.legacy.level[0].nblk_x; 352 } 353 354 if (ws->gen == DRV_SI) { 355 struct ac_surf_config config; 356 357 /* Only these fields need to be set for the CMASK computation. */ 358 config.info.width = tex->width0; 359 config.info.height = tex->height0; 360 config.info.depth = tex->depth0; 361 config.info.array_size = tex->array_size; 362 config.is_3d = !!(tex->target == PIPE_TEXTURE_3D); 363 config.is_cube = !!(tex->target == PIPE_TEXTURE_CUBE); 364 365 si_compute_cmask(&ws->info, &config, surf_ws); 366 } 367 return 0; 368} 369 370void radeon_surface_init_functions(struct radeon_drm_winsys *ws) 371{ 372 ws->base.surface_init = radeon_winsys_surface_init; 373} 374