r600_state.c revision 848b8605
1/* 2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 */ 23#include "r600_formats.h" 24#include "r600_shader.h" 25#include "r600d.h" 26 27#include "pipe/p_shader_tokens.h" 28#include "util/u_pack_color.h" 29#include "util/u_memory.h" 30#include "util/u_framebuffer.h" 31#include "util/u_dual_blend.h" 32 33static uint32_t r600_translate_blend_function(int blend_func) 34{ 35 switch (blend_func) { 36 case PIPE_BLEND_ADD: 37 return V_028804_COMB_DST_PLUS_SRC; 38 case PIPE_BLEND_SUBTRACT: 39 return V_028804_COMB_SRC_MINUS_DST; 40 case PIPE_BLEND_REVERSE_SUBTRACT: 41 return V_028804_COMB_DST_MINUS_SRC; 42 case PIPE_BLEND_MIN: 43 return V_028804_COMB_MIN_DST_SRC; 44 case PIPE_BLEND_MAX: 45 return V_028804_COMB_MAX_DST_SRC; 46 default: 47 R600_ERR("Unknown blend function %d\n", blend_func); 48 assert(0); 49 break; 50 } 51 return 0; 52} 53 54static uint32_t r600_translate_blend_factor(int blend_fact) 55{ 56 switch (blend_fact) { 57 case PIPE_BLENDFACTOR_ONE: 58 return V_028804_BLEND_ONE; 59 case PIPE_BLENDFACTOR_SRC_COLOR: 60 return V_028804_BLEND_SRC_COLOR; 61 case PIPE_BLENDFACTOR_SRC_ALPHA: 62 return V_028804_BLEND_SRC_ALPHA; 63 case PIPE_BLENDFACTOR_DST_ALPHA: 64 return V_028804_BLEND_DST_ALPHA; 65 case PIPE_BLENDFACTOR_DST_COLOR: 66 return V_028804_BLEND_DST_COLOR; 67 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: 68 return V_028804_BLEND_SRC_ALPHA_SATURATE; 69 case PIPE_BLENDFACTOR_CONST_COLOR: 70 return V_028804_BLEND_CONST_COLOR; 71 case PIPE_BLENDFACTOR_CONST_ALPHA: 72 return V_028804_BLEND_CONST_ALPHA; 73 case PIPE_BLENDFACTOR_ZERO: 74 return V_028804_BLEND_ZERO; 75 case PIPE_BLENDFACTOR_INV_SRC_COLOR: 76 return V_028804_BLEND_ONE_MINUS_SRC_COLOR; 77 case PIPE_BLENDFACTOR_INV_SRC_ALPHA: 78 return V_028804_BLEND_ONE_MINUS_SRC_ALPHA; 79 case PIPE_BLENDFACTOR_INV_DST_ALPHA: 80 return V_028804_BLEND_ONE_MINUS_DST_ALPHA; 81 case PIPE_BLENDFACTOR_INV_DST_COLOR: 82 return V_028804_BLEND_ONE_MINUS_DST_COLOR; 83 case PIPE_BLENDFACTOR_INV_CONST_COLOR: 84 return V_028804_BLEND_ONE_MINUS_CONST_COLOR; 85 case PIPE_BLENDFACTOR_INV_CONST_ALPHA: 86 return V_028804_BLEND_ONE_MINUS_CONST_ALPHA; 87 case PIPE_BLENDFACTOR_SRC1_COLOR: 88 return V_028804_BLEND_SRC1_COLOR; 89 case PIPE_BLENDFACTOR_SRC1_ALPHA: 90 return V_028804_BLEND_SRC1_ALPHA; 91 case PIPE_BLENDFACTOR_INV_SRC1_COLOR: 92 return V_028804_BLEND_INV_SRC1_COLOR; 93 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA: 94 return V_028804_BLEND_INV_SRC1_ALPHA; 95 default: 96 R600_ERR("Bad blend factor %d not supported!\n", blend_fact); 97 assert(0); 98 break; 99 } 100 return 0; 101} 102 103static unsigned r600_tex_dim(unsigned dim, unsigned nr_samples) 104{ 105 switch (dim) { 106 default: 107 case PIPE_TEXTURE_1D: 108 return V_038000_SQ_TEX_DIM_1D; 109 case PIPE_TEXTURE_1D_ARRAY: 110 return V_038000_SQ_TEX_DIM_1D_ARRAY; 111 case PIPE_TEXTURE_2D: 112 case PIPE_TEXTURE_RECT: 113 return nr_samples > 1 ? V_038000_SQ_TEX_DIM_2D_MSAA : 114 V_038000_SQ_TEX_DIM_2D; 115 case PIPE_TEXTURE_2D_ARRAY: 116 return nr_samples > 1 ? V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA : 117 V_038000_SQ_TEX_DIM_2D_ARRAY; 118 case PIPE_TEXTURE_3D: 119 return V_038000_SQ_TEX_DIM_3D; 120 case PIPE_TEXTURE_CUBE: 121 case PIPE_TEXTURE_CUBE_ARRAY: 122 return V_038000_SQ_TEX_DIM_CUBEMAP; 123 } 124} 125 126static uint32_t r600_translate_dbformat(enum pipe_format format) 127{ 128 switch (format) { 129 case PIPE_FORMAT_Z16_UNORM: 130 return V_028010_DEPTH_16; 131 case PIPE_FORMAT_Z24X8_UNORM: 132 return V_028010_DEPTH_X8_24; 133 case PIPE_FORMAT_Z24_UNORM_S8_UINT: 134 return V_028010_DEPTH_8_24; 135 case PIPE_FORMAT_Z32_FLOAT: 136 return V_028010_DEPTH_32_FLOAT; 137 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT: 138 return V_028010_DEPTH_X24_8_32_FLOAT; 139 default: 140 return ~0U; 141 } 142} 143 144static bool r600_is_sampler_format_supported(struct pipe_screen *screen, enum pipe_format format) 145{ 146 return r600_translate_texformat(screen, format, NULL, NULL, NULL) != ~0U; 147} 148 149static bool r600_is_colorbuffer_format_supported(enum chip_class chip, enum pipe_format format) 150{ 151 return r600_translate_colorformat(chip, format) != ~0U && 152 r600_translate_colorswap(format) != ~0U; 153} 154 155static bool r600_is_zs_format_supported(enum pipe_format format) 156{ 157 return r600_translate_dbformat(format) != ~0U; 158} 159 160static inline bool r600_is_blending_supported(enum pipe_format format) 161{ 162 return !(util_format_is_pure_integer(format) || util_format_is_depth_or_stencil(format)); 163} 164 165boolean r600_is_format_supported(struct pipe_screen *screen, 166 enum pipe_format format, 167 enum pipe_texture_target target, 168 unsigned sample_count, 169 unsigned usage) 170{ 171 struct r600_screen *rscreen = (struct r600_screen*)screen; 172 unsigned retval = 0; 173 174 if (target >= PIPE_MAX_TEXTURE_TYPES) { 175 R600_ERR("r600: unsupported texture type %d\n", target); 176 return FALSE; 177 } 178 179 if (!util_format_is_supported(format, usage)) 180 return FALSE; 181 182 if (sample_count > 1) { 183 if (!rscreen->has_msaa) 184 return FALSE; 185 186 /* R11G11B10 is broken on R6xx. */ 187 if (rscreen->b.chip_class == R600 && 188 format == PIPE_FORMAT_R11G11B10_FLOAT) 189 return FALSE; 190 191 /* MSAA integer colorbuffers hang. */ 192 if (util_format_is_pure_integer(format) && 193 !util_format_is_depth_or_stencil(format)) 194 return FALSE; 195 196 switch (sample_count) { 197 case 2: 198 case 4: 199 case 8: 200 break; 201 default: 202 return FALSE; 203 } 204 } 205 206 if (usage & PIPE_BIND_SAMPLER_VIEW) { 207 if (target == PIPE_BUFFER) { 208 if (r600_is_vertex_format_supported(format)) 209 retval |= PIPE_BIND_SAMPLER_VIEW; 210 } else { 211 if (r600_is_sampler_format_supported(screen, format)) 212 retval |= PIPE_BIND_SAMPLER_VIEW; 213 } 214 } 215 216 if ((usage & (PIPE_BIND_RENDER_TARGET | 217 PIPE_BIND_DISPLAY_TARGET | 218 PIPE_BIND_SCANOUT | 219 PIPE_BIND_SHARED)) && 220 r600_is_colorbuffer_format_supported(rscreen->b.chip_class, format)) { 221 retval |= usage & 222 (PIPE_BIND_RENDER_TARGET | 223 PIPE_BIND_DISPLAY_TARGET | 224 PIPE_BIND_SCANOUT | 225 PIPE_BIND_SHARED); 226 } 227 228 if ((usage & PIPE_BIND_DEPTH_STENCIL) && 229 r600_is_zs_format_supported(format)) { 230 retval |= PIPE_BIND_DEPTH_STENCIL; 231 } 232 233 if ((usage & PIPE_BIND_VERTEX_BUFFER) && 234 r600_is_vertex_format_supported(format)) { 235 retval |= PIPE_BIND_VERTEX_BUFFER; 236 } 237 238 if (usage & PIPE_BIND_TRANSFER_READ) 239 retval |= PIPE_BIND_TRANSFER_READ; 240 if (usage & PIPE_BIND_TRANSFER_WRITE) 241 retval |= PIPE_BIND_TRANSFER_WRITE; 242 243 if ((usage & PIPE_BIND_BLENDABLE) && 244 r600_is_blending_supported(format)) 245 retval |= PIPE_BIND_BLENDABLE; 246 247 return retval == usage; 248} 249 250static void r600_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a) 251{ 252 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 253 struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a; 254 float offset_units = state->offset_units; 255 float offset_scale = state->offset_scale; 256 257 switch (state->zs_format) { 258 case PIPE_FORMAT_Z24X8_UNORM: 259 case PIPE_FORMAT_Z24_UNORM_S8_UINT: 260 offset_units *= 2.0f; 261 break; 262 case PIPE_FORMAT_Z16_UNORM: 263 offset_units *= 4.0f; 264 break; 265 default:; 266 } 267 268 r600_write_context_reg_seq(cs, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 4); 269 radeon_emit(cs, fui(offset_scale)); 270 radeon_emit(cs, fui(offset_units)); 271 radeon_emit(cs, fui(offset_scale)); 272 radeon_emit(cs, fui(offset_units)); 273} 274 275static uint32_t r600_get_blend_control(const struct pipe_blend_state *state, unsigned i) 276{ 277 int j = state->independent_blend_enable ? i : 0; 278 279 unsigned eqRGB = state->rt[j].rgb_func; 280 unsigned srcRGB = state->rt[j].rgb_src_factor; 281 unsigned dstRGB = state->rt[j].rgb_dst_factor; 282 283 unsigned eqA = state->rt[j].alpha_func; 284 unsigned srcA = state->rt[j].alpha_src_factor; 285 unsigned dstA = state->rt[j].alpha_dst_factor; 286 uint32_t bc = 0; 287 288 if (!state->rt[j].blend_enable) 289 return 0; 290 291 bc |= S_028804_COLOR_COMB_FCN(r600_translate_blend_function(eqRGB)); 292 bc |= S_028804_COLOR_SRCBLEND(r600_translate_blend_factor(srcRGB)); 293 bc |= S_028804_COLOR_DESTBLEND(r600_translate_blend_factor(dstRGB)); 294 295 if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) { 296 bc |= S_028804_SEPARATE_ALPHA_BLEND(1); 297 bc |= S_028804_ALPHA_COMB_FCN(r600_translate_blend_function(eqA)); 298 bc |= S_028804_ALPHA_SRCBLEND(r600_translate_blend_factor(srcA)); 299 bc |= S_028804_ALPHA_DESTBLEND(r600_translate_blend_factor(dstA)); 300 } 301 return bc; 302} 303 304static void *r600_create_blend_state_mode(struct pipe_context *ctx, 305 const struct pipe_blend_state *state, 306 int mode) 307{ 308 struct r600_context *rctx = (struct r600_context *)ctx; 309 uint32_t color_control = 0, target_mask = 0; 310 struct r600_blend_state *blend = CALLOC_STRUCT(r600_blend_state); 311 312 if (!blend) { 313 return NULL; 314 } 315 316 r600_init_command_buffer(&blend->buffer, 20); 317 r600_init_command_buffer(&blend->buffer_no_blend, 20); 318 319 /* R600 does not support per-MRT blends */ 320 if (rctx->b.family > CHIP_R600) 321 color_control |= S_028808_PER_MRT_BLEND(1); 322 323 if (state->logicop_enable) { 324 color_control |= (state->logicop_func << 16) | (state->logicop_func << 20); 325 } else { 326 color_control |= (0xcc << 16); 327 } 328 /* we pretend 8 buffer are used, CB_SHADER_MASK will disable unused one */ 329 if (state->independent_blend_enable) { 330 for (int i = 0; i < 8; i++) { 331 if (state->rt[i].blend_enable) { 332 color_control |= S_028808_TARGET_BLEND_ENABLE(1 << i); 333 } 334 target_mask |= (state->rt[i].colormask << (4 * i)); 335 } 336 } else { 337 for (int i = 0; i < 8; i++) { 338 if (state->rt[0].blend_enable) { 339 color_control |= S_028808_TARGET_BLEND_ENABLE(1 << i); 340 } 341 target_mask |= (state->rt[0].colormask << (4 * i)); 342 } 343 } 344 345 if (target_mask) 346 color_control |= S_028808_SPECIAL_OP(mode); 347 else 348 color_control |= S_028808_SPECIAL_OP(V_028808_DISABLE); 349 350 /* only MRT0 has dual src blend */ 351 blend->dual_src_blend = util_blend_state_is_dual(state, 0); 352 blend->cb_target_mask = target_mask; 353 blend->cb_color_control = color_control; 354 blend->cb_color_control_no_blend = color_control & C_028808_TARGET_BLEND_ENABLE; 355 blend->alpha_to_one = state->alpha_to_one; 356 357 r600_store_context_reg(&blend->buffer, R_028D44_DB_ALPHA_TO_MASK, 358 S_028D44_ALPHA_TO_MASK_ENABLE(state->alpha_to_coverage) | 359 S_028D44_ALPHA_TO_MASK_OFFSET0(2) | 360 S_028D44_ALPHA_TO_MASK_OFFSET1(2) | 361 S_028D44_ALPHA_TO_MASK_OFFSET2(2) | 362 S_028D44_ALPHA_TO_MASK_OFFSET3(2)); 363 364 /* Copy over the registers set so far into buffer_no_blend. */ 365 memcpy(blend->buffer_no_blend.buf, blend->buffer.buf, blend->buffer.num_dw * 4); 366 blend->buffer_no_blend.num_dw = blend->buffer.num_dw; 367 368 /* Only add blend registers if blending is enabled. */ 369 if (!G_028808_TARGET_BLEND_ENABLE(color_control)) { 370 return blend; 371 } 372 373 /* The first R600 does not support per-MRT blends */ 374 r600_store_context_reg(&blend->buffer, R_028804_CB_BLEND_CONTROL, 375 r600_get_blend_control(state, 0)); 376 377 if (rctx->b.family > CHIP_R600) { 378 r600_store_context_reg_seq(&blend->buffer, R_028780_CB_BLEND0_CONTROL, 8); 379 for (int i = 0; i < 8; i++) { 380 r600_store_value(&blend->buffer, r600_get_blend_control(state, i)); 381 } 382 } 383 return blend; 384} 385 386static void *r600_create_blend_state(struct pipe_context *ctx, 387 const struct pipe_blend_state *state) 388{ 389 return r600_create_blend_state_mode(ctx, state, V_028808_SPECIAL_NORMAL); 390} 391 392static void *r600_create_dsa_state(struct pipe_context *ctx, 393 const struct pipe_depth_stencil_alpha_state *state) 394{ 395 unsigned db_depth_control, alpha_test_control, alpha_ref; 396 struct r600_dsa_state *dsa = CALLOC_STRUCT(r600_dsa_state); 397 398 if (dsa == NULL) { 399 return NULL; 400 } 401 402 r600_init_command_buffer(&dsa->buffer, 3); 403 404 dsa->valuemask[0] = state->stencil[0].valuemask; 405 dsa->valuemask[1] = state->stencil[1].valuemask; 406 dsa->writemask[0] = state->stencil[0].writemask; 407 dsa->writemask[1] = state->stencil[1].writemask; 408 dsa->zwritemask = state->depth.writemask; 409 410 db_depth_control = S_028800_Z_ENABLE(state->depth.enabled) | 411 S_028800_Z_WRITE_ENABLE(state->depth.writemask) | 412 S_028800_ZFUNC(state->depth.func); 413 414 /* stencil */ 415 if (state->stencil[0].enabled) { 416 db_depth_control |= S_028800_STENCIL_ENABLE(1); 417 db_depth_control |= S_028800_STENCILFUNC(state->stencil[0].func); /* translates straight */ 418 db_depth_control |= S_028800_STENCILFAIL(r600_translate_stencil_op(state->stencil[0].fail_op)); 419 db_depth_control |= S_028800_STENCILZPASS(r600_translate_stencil_op(state->stencil[0].zpass_op)); 420 db_depth_control |= S_028800_STENCILZFAIL(r600_translate_stencil_op(state->stencil[0].zfail_op)); 421 422 if (state->stencil[1].enabled) { 423 db_depth_control |= S_028800_BACKFACE_ENABLE(1); 424 db_depth_control |= S_028800_STENCILFUNC_BF(state->stencil[1].func); /* translates straight */ 425 db_depth_control |= S_028800_STENCILFAIL_BF(r600_translate_stencil_op(state->stencil[1].fail_op)); 426 db_depth_control |= S_028800_STENCILZPASS_BF(r600_translate_stencil_op(state->stencil[1].zpass_op)); 427 db_depth_control |= S_028800_STENCILZFAIL_BF(r600_translate_stencil_op(state->stencil[1].zfail_op)); 428 } 429 } 430 431 /* alpha */ 432 alpha_test_control = 0; 433 alpha_ref = 0; 434 if (state->alpha.enabled) { 435 alpha_test_control = S_028410_ALPHA_FUNC(state->alpha.func); 436 alpha_test_control |= S_028410_ALPHA_TEST_ENABLE(1); 437 alpha_ref = fui(state->alpha.ref_value); 438 } 439 dsa->sx_alpha_test_control = alpha_test_control & 0xff; 440 dsa->alpha_ref = alpha_ref; 441 442 r600_store_context_reg(&dsa->buffer, R_028800_DB_DEPTH_CONTROL, db_depth_control); 443 return dsa; 444} 445 446static void *r600_create_rs_state(struct pipe_context *ctx, 447 const struct pipe_rasterizer_state *state) 448{ 449 struct r600_context *rctx = (struct r600_context *)ctx; 450 unsigned tmp, sc_mode_cntl, spi_interp; 451 float psize_min, psize_max; 452 struct r600_rasterizer_state *rs = CALLOC_STRUCT(r600_rasterizer_state); 453 454 if (rs == NULL) { 455 return NULL; 456 } 457 458 r600_init_command_buffer(&rs->buffer, 30); 459 460 rs->flatshade = state->flatshade; 461 rs->sprite_coord_enable = state->sprite_coord_enable; 462 rs->two_side = state->light_twoside; 463 rs->clip_plane_enable = state->clip_plane_enable; 464 rs->pa_sc_line_stipple = state->line_stipple_enable ? 465 S_028A0C_LINE_PATTERN(state->line_stipple_pattern) | 466 S_028A0C_REPEAT_COUNT(state->line_stipple_factor) : 0; 467 rs->pa_cl_clip_cntl = 468 S_028810_PS_UCP_MODE(3) | 469 S_028810_ZCLIP_NEAR_DISABLE(!state->depth_clip) | 470 S_028810_ZCLIP_FAR_DISABLE(!state->depth_clip) | 471 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1); 472 if (rctx->b.chip_class == R700) { 473 rs->pa_cl_clip_cntl |= 474 S_028810_DX_RASTERIZATION_KILL(state->rasterizer_discard); 475 } 476 rs->multisample_enable = state->multisample; 477 478 /* offset */ 479 rs->offset_units = state->offset_units; 480 rs->offset_scale = state->offset_scale * 12.0f; 481 rs->offset_enable = state->offset_point || state->offset_line || state->offset_tri; 482 483 if (state->point_size_per_vertex) { 484 psize_min = util_get_min_point_size(state); 485 psize_max = 8192; 486 } else { 487 /* Force the point size to be as if the vertex output was disabled. */ 488 psize_min = state->point_size; 489 psize_max = state->point_size; 490 } 491 492 sc_mode_cntl = S_028A4C_MSAA_ENABLE(state->multisample) | 493 S_028A4C_LINE_STIPPLE_ENABLE(state->line_stipple_enable) | 494 S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1); 495 if (rctx->b.chip_class >= R700) { 496 sc_mode_cntl |= S_028A4C_FORCE_EOV_REZ_ENABLE(1) | 497 S_028A4C_R700_ZMM_LINE_OFFSET(1) | 498 S_028A4C_R700_VPORT_SCISSOR_ENABLE(state->scissor); 499 } else { 500 sc_mode_cntl |= S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1); 501 rs->scissor_enable = state->scissor; 502 } 503 504 spi_interp = S_0286D4_FLAT_SHADE_ENA(1); 505 if (state->sprite_coord_enable) { 506 spi_interp |= S_0286D4_PNT_SPRITE_ENA(1) | 507 S_0286D4_PNT_SPRITE_OVRD_X(2) | 508 S_0286D4_PNT_SPRITE_OVRD_Y(3) | 509 S_0286D4_PNT_SPRITE_OVRD_Z(0) | 510 S_0286D4_PNT_SPRITE_OVRD_W(1); 511 if (state->sprite_coord_mode != PIPE_SPRITE_COORD_UPPER_LEFT) { 512 spi_interp |= S_0286D4_PNT_SPRITE_TOP_1(1); 513 } 514 } 515 516 r600_store_context_reg_seq(&rs->buffer, R_028A00_PA_SU_POINT_SIZE, 3); 517 /* point size 12.4 fixed point (divide by two, because 0.5 = 1 pixel. */ 518 tmp = r600_pack_float_12p4(state->point_size/2); 519 r600_store_value(&rs->buffer, /* R_028A00_PA_SU_POINT_SIZE */ 520 S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp)); 521 r600_store_value(&rs->buffer, /* R_028A04_PA_SU_POINT_MINMAX */ 522 S_028A04_MIN_SIZE(r600_pack_float_12p4(psize_min/2)) | 523 S_028A04_MAX_SIZE(r600_pack_float_12p4(psize_max/2))); 524 r600_store_value(&rs->buffer, /* R_028A08_PA_SU_LINE_CNTL */ 525 S_028A08_WIDTH(r600_pack_float_12p4(state->line_width/2))); 526 527 r600_store_context_reg(&rs->buffer, R_0286D4_SPI_INTERP_CONTROL_0, spi_interp); 528 r600_store_context_reg(&rs->buffer, R_028A4C_PA_SC_MODE_CNTL, sc_mode_cntl); 529 r600_store_context_reg(&rs->buffer, R_028C08_PA_SU_VTX_CNTL, 530 S_028C08_PIX_CENTER_HALF(state->half_pixel_center) | 531 S_028C08_QUANT_MODE(V_028C08_X_1_256TH)); 532 r600_store_context_reg(&rs->buffer, R_028DFC_PA_SU_POLY_OFFSET_CLAMP, fui(state->offset_clamp)); 533 534 rs->pa_su_sc_mode_cntl = S_028814_PROVOKING_VTX_LAST(!state->flatshade_first) | 535 S_028814_CULL_FRONT(state->cull_face & PIPE_FACE_FRONT ? 1 : 0) | 536 S_028814_CULL_BACK(state->cull_face & PIPE_FACE_BACK ? 1 : 0) | 537 S_028814_FACE(!state->front_ccw) | 538 S_028814_POLY_OFFSET_FRONT_ENABLE(util_get_offset(state, state->fill_front)) | 539 S_028814_POLY_OFFSET_BACK_ENABLE(util_get_offset(state, state->fill_back)) | 540 S_028814_POLY_OFFSET_PARA_ENABLE(state->offset_point || state->offset_line) | 541 S_028814_POLY_MODE(state->fill_front != PIPE_POLYGON_MODE_FILL || 542 state->fill_back != PIPE_POLYGON_MODE_FILL) | 543 S_028814_POLYMODE_FRONT_PTYPE(r600_translate_fill(state->fill_front)) | 544 S_028814_POLYMODE_BACK_PTYPE(r600_translate_fill(state->fill_back)); 545 if (rctx->b.chip_class == R700) { 546 r600_store_context_reg(&rs->buffer, R_028814_PA_SU_SC_MODE_CNTL, rs->pa_su_sc_mode_cntl); 547 } 548 if (rctx->b.chip_class == R600) { 549 r600_store_context_reg(&rs->buffer, R_028350_SX_MISC, 550 S_028350_MULTIPASS(state->rasterizer_discard)); 551 } 552 return rs; 553} 554 555static void *r600_create_sampler_state(struct pipe_context *ctx, 556 const struct pipe_sampler_state *state) 557{ 558 struct r600_pipe_sampler_state *ss = CALLOC_STRUCT(r600_pipe_sampler_state); 559 unsigned aniso_flag_offset = state->max_anisotropy > 1 ? 4 : 0; 560 561 if (ss == NULL) { 562 return NULL; 563 } 564 565 ss->seamless_cube_map = state->seamless_cube_map; 566 ss->border_color_use = sampler_state_needs_border_color(state); 567 568 /* R_03C000_SQ_TEX_SAMPLER_WORD0_0 */ 569 ss->tex_sampler_words[0] = 570 S_03C000_CLAMP_X(r600_tex_wrap(state->wrap_s)) | 571 S_03C000_CLAMP_Y(r600_tex_wrap(state->wrap_t)) | 572 S_03C000_CLAMP_Z(r600_tex_wrap(state->wrap_r)) | 573 S_03C000_XY_MAG_FILTER(r600_tex_filter(state->mag_img_filter) | aniso_flag_offset) | 574 S_03C000_XY_MIN_FILTER(r600_tex_filter(state->min_img_filter) | aniso_flag_offset) | 575 S_03C000_MIP_FILTER(r600_tex_mipfilter(state->min_mip_filter)) | 576 S_03C000_MAX_ANISO(r600_tex_aniso_filter(state->max_anisotropy)) | 577 S_03C000_DEPTH_COMPARE_FUNCTION(r600_tex_compare(state->compare_func)) | 578 S_03C000_BORDER_COLOR_TYPE(ss->border_color_use ? V_03C000_SQ_TEX_BORDER_COLOR_REGISTER : 0); 579 /* R_03C004_SQ_TEX_SAMPLER_WORD1_0 */ 580 ss->tex_sampler_words[1] = 581 S_03C004_MIN_LOD(S_FIXED(CLAMP(state->min_lod, 0, 15), 6)) | 582 S_03C004_MAX_LOD(S_FIXED(CLAMP(state->max_lod, 0, 15), 6)) | 583 S_03C004_LOD_BIAS(S_FIXED(CLAMP(state->lod_bias, -16, 16), 6)); 584 /* R_03C008_SQ_TEX_SAMPLER_WORD2_0 */ 585 ss->tex_sampler_words[2] = S_03C008_TYPE(1); 586 587 if (ss->border_color_use) { 588 memcpy(&ss->border_color, &state->border_color, sizeof(state->border_color)); 589 } 590 return ss; 591} 592 593static struct pipe_sampler_view * 594texture_buffer_sampler_view(struct r600_pipe_sampler_view *view, 595 unsigned width0, unsigned height0) 596 597{ 598 struct r600_texture *tmp = (struct r600_texture*)view->base.texture; 599 int stride = util_format_get_blocksize(view->base.format); 600 unsigned format, num_format, format_comp, endian; 601 uint64_t offset = view->base.u.buf.first_element * stride; 602 unsigned size = (view->base.u.buf.last_element - view->base.u.buf.first_element + 1) * stride; 603 604 r600_vertex_data_type(view->base.format, 605 &format, &num_format, &format_comp, 606 &endian); 607 608 view->tex_resource = &tmp->resource; 609 view->skip_mip_address_reloc = true; 610 611 view->tex_resource_words[0] = offset; 612 view->tex_resource_words[1] = size - 1; 613 view->tex_resource_words[2] = S_038008_BASE_ADDRESS_HI(offset >> 32UL) | 614 S_038008_STRIDE(stride) | 615 S_038008_DATA_FORMAT(format) | 616 S_038008_NUM_FORMAT_ALL(num_format) | 617 S_038008_FORMAT_COMP_ALL(format_comp) | 618 S_038008_ENDIAN_SWAP(endian); 619 view->tex_resource_words[3] = 0; 620 /* 621 * in theory dword 4 is for number of elements, for use with resinfo, 622 * but it seems to utterly fail to work, the amd gpu shader analyser 623 * uses a const buffer to store the element sizes for buffer txq 624 */ 625 view->tex_resource_words[4] = 0; 626 view->tex_resource_words[5] = 0; 627 view->tex_resource_words[6] = S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_BUFFER); 628 return &view->base; 629} 630 631struct pipe_sampler_view * 632r600_create_sampler_view_custom(struct pipe_context *ctx, 633 struct pipe_resource *texture, 634 const struct pipe_sampler_view *state, 635 unsigned width_first_level, unsigned height_first_level) 636{ 637 struct r600_pipe_sampler_view *view = CALLOC_STRUCT(r600_pipe_sampler_view); 638 struct r600_texture *tmp = (struct r600_texture*)texture; 639 unsigned format, endian; 640 uint32_t word4 = 0, yuv_format = 0, pitch = 0; 641 unsigned char swizzle[4], array_mode = 0; 642 unsigned width, height, depth, offset_level, last_level; 643 644 if (view == NULL) 645 return NULL; 646 647 /* initialize base object */ 648 view->base = *state; 649 view->base.texture = NULL; 650 pipe_reference(NULL, &texture->reference); 651 view->base.texture = texture; 652 view->base.reference.count = 1; 653 view->base.context = ctx; 654 655 if (texture->target == PIPE_BUFFER) 656 return texture_buffer_sampler_view(view, texture->width0, 1); 657 658 swizzle[0] = state->swizzle_r; 659 swizzle[1] = state->swizzle_g; 660 swizzle[2] = state->swizzle_b; 661 swizzle[3] = state->swizzle_a; 662 663 format = r600_translate_texformat(ctx->screen, state->format, 664 swizzle, 665 &word4, &yuv_format); 666 assert(format != ~0); 667 if (format == ~0) { 668 FREE(view); 669 return NULL; 670 } 671 672 if (tmp->is_depth && !tmp->is_flushing_texture && !r600_can_read_depth(tmp)) { 673 if (!r600_init_flushed_depth_texture(ctx, texture, NULL)) { 674 FREE(view); 675 return NULL; 676 } 677 tmp = tmp->flushed_depth_texture; 678 } 679 680 endian = r600_colorformat_endian_swap(format); 681 682 offset_level = state->u.tex.first_level; 683 last_level = state->u.tex.last_level - offset_level; 684 width = width_first_level; 685 height = height_first_level; 686 depth = u_minify(texture->depth0, offset_level); 687 pitch = tmp->surface.level[offset_level].nblk_x * util_format_get_blockwidth(state->format); 688 689 if (texture->target == PIPE_TEXTURE_1D_ARRAY) { 690 height = 1; 691 depth = texture->array_size; 692 } else if (texture->target == PIPE_TEXTURE_2D_ARRAY) { 693 depth = texture->array_size; 694 } else if (texture->target == PIPE_TEXTURE_CUBE_ARRAY) 695 depth = texture->array_size / 6; 696 switch (tmp->surface.level[offset_level].mode) { 697 case RADEON_SURF_MODE_LINEAR_ALIGNED: 698 array_mode = V_038000_ARRAY_LINEAR_ALIGNED; 699 break; 700 case RADEON_SURF_MODE_1D: 701 array_mode = V_038000_ARRAY_1D_TILED_THIN1; 702 break; 703 case RADEON_SURF_MODE_2D: 704 array_mode = V_038000_ARRAY_2D_TILED_THIN1; 705 break; 706 case RADEON_SURF_MODE_LINEAR: 707 default: 708 array_mode = V_038000_ARRAY_LINEAR_GENERAL; 709 break; 710 } 711 712 view->tex_resource = &tmp->resource; 713 view->tex_resource_words[0] = (S_038000_DIM(r600_tex_dim(texture->target, texture->nr_samples)) | 714 S_038000_TILE_MODE(array_mode) | 715 S_038000_TILE_TYPE(tmp->non_disp_tiling) | 716 S_038000_PITCH((pitch / 8) - 1) | 717 S_038000_TEX_WIDTH(width - 1)); 718 view->tex_resource_words[1] = (S_038004_TEX_HEIGHT(height - 1) | 719 S_038004_TEX_DEPTH(depth - 1) | 720 S_038004_DATA_FORMAT(format)); 721 view->tex_resource_words[2] = tmp->surface.level[offset_level].offset >> 8; 722 if (offset_level >= tmp->surface.last_level) { 723 view->tex_resource_words[3] = tmp->surface.level[offset_level].offset >> 8; 724 } else { 725 view->tex_resource_words[3] = tmp->surface.level[offset_level + 1].offset >> 8; 726 } 727 view->tex_resource_words[4] = (word4 | 728 S_038010_REQUEST_SIZE(1) | 729 S_038010_ENDIAN_SWAP(endian) | 730 S_038010_BASE_LEVEL(0)); 731 view->tex_resource_words[5] = (S_038014_BASE_ARRAY(state->u.tex.first_layer) | 732 S_038014_LAST_ARRAY(state->u.tex.last_layer)); 733 if (texture->nr_samples > 1) { 734 /* LAST_LEVEL holds log2(nr_samples) for multisample textures */ 735 view->tex_resource_words[5] |= S_038014_LAST_LEVEL(util_logbase2(texture->nr_samples)); 736 } else { 737 view->tex_resource_words[5] |= S_038014_LAST_LEVEL(last_level); 738 } 739 view->tex_resource_words[6] = (S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_TEXTURE) | 740 S_038018_MAX_ANISO(4 /* max 16 samples */)); 741 return &view->base; 742} 743 744static struct pipe_sampler_view * 745r600_create_sampler_view(struct pipe_context *ctx, 746 struct pipe_resource *tex, 747 const struct pipe_sampler_view *state) 748{ 749 return r600_create_sampler_view_custom(ctx, tex, state, 750 u_minify(tex->width0, state->u.tex.first_level), 751 u_minify(tex->height0, state->u.tex.first_level)); 752} 753 754static void r600_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom) 755{ 756 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 757 struct pipe_clip_state *state = &rctx->clip_state.state; 758 759 r600_write_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4); 760 radeon_emit_array(cs, (unsigned*)state, 6*4); 761} 762 763static void r600_set_polygon_stipple(struct pipe_context *ctx, 764 const struct pipe_poly_stipple *state) 765{ 766} 767 768static void r600_emit_scissor_state(struct r600_context *rctx, struct r600_atom *atom) 769{ 770 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 771 struct r600_scissor_state *rstate = (struct r600_scissor_state *)atom; 772 struct pipe_scissor_state *state = &rstate->scissor; 773 unsigned offset = rstate->idx * 4 * 2; 774 775 if (rctx->b.chip_class != R600 || rctx->scissor[0].enable) { 776 r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + offset, 2); 777 radeon_emit(cs, S_028240_TL_X(state->minx) | S_028240_TL_Y(state->miny) | 778 S_028240_WINDOW_OFFSET_DISABLE(1)); 779 radeon_emit(cs, S_028244_BR_X(state->maxx) | S_028244_BR_Y(state->maxy)); 780 } else { 781 r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2); 782 radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) | 783 S_028240_WINDOW_OFFSET_DISABLE(1)); 784 radeon_emit(cs, S_028244_BR_X(8192) | S_028244_BR_Y(8192)); 785 } 786} 787 788static void r600_set_scissor_states(struct pipe_context *ctx, 789 unsigned start_slot, 790 unsigned num_scissors, 791 const struct pipe_scissor_state *state) 792{ 793 struct r600_context *rctx = (struct r600_context *)ctx; 794 int i; 795 796 for (i = start_slot ; i < start_slot + num_scissors; i++) { 797 rctx->scissor[i].scissor = state[i - start_slot]; 798 } 799 800 if (rctx->b.chip_class == R600 && !rctx->scissor[0].enable) 801 return; 802 803 for (i = start_slot ; i < start_slot + num_scissors; i++) { 804 rctx->scissor[i].atom.dirty = true; 805 } 806} 807 808static struct r600_resource *r600_buffer_create_helper(struct r600_screen *rscreen, 809 unsigned size, unsigned alignment) 810{ 811 struct pipe_resource buffer; 812 813 memset(&buffer, 0, sizeof buffer); 814 buffer.target = PIPE_BUFFER; 815 buffer.format = PIPE_FORMAT_R8_UNORM; 816 buffer.bind = PIPE_BIND_CUSTOM; 817 buffer.usage = PIPE_USAGE_DEFAULT; 818 buffer.flags = 0; 819 buffer.width0 = size; 820 buffer.height0 = 1; 821 buffer.depth0 = 1; 822 buffer.array_size = 1; 823 824 return (struct r600_resource*) 825 r600_buffer_create(&rscreen->b.b, &buffer, alignment); 826} 827 828static void r600_init_color_surface(struct r600_context *rctx, 829 struct r600_surface *surf, 830 bool force_cmask_fmask) 831{ 832 struct r600_screen *rscreen = rctx->screen; 833 struct r600_texture *rtex = (struct r600_texture*)surf->base.texture; 834 unsigned level = surf->base.u.tex.level; 835 unsigned pitch, slice; 836 unsigned color_info; 837 unsigned color_view; 838 unsigned format, swap, ntype, endian; 839 unsigned offset; 840 const struct util_format_description *desc; 841 int i; 842 bool blend_bypass = 0, blend_clamp = 1; 843 844 if (rtex->is_depth && !rtex->is_flushing_texture && !r600_can_read_depth(rtex)) { 845 r600_init_flushed_depth_texture(&rctx->b.b, surf->base.texture, NULL); 846 rtex = rtex->flushed_depth_texture; 847 assert(rtex); 848 } 849 850 offset = rtex->surface.level[level].offset; 851 if (rtex->surface.level[level].mode == RADEON_SURF_MODE_LINEAR) { 852 assert(surf->base.u.tex.first_layer == surf->base.u.tex.last_layer); 853 offset += rtex->surface.level[level].slice_size * 854 surf->base.u.tex.first_layer; 855 color_view = 0; 856 } else 857 color_view = S_028080_SLICE_START(surf->base.u.tex.first_layer) | 858 S_028080_SLICE_MAX(surf->base.u.tex.last_layer); 859 860 pitch = rtex->surface.level[level].nblk_x / 8 - 1; 861 slice = (rtex->surface.level[level].nblk_x * rtex->surface.level[level].nblk_y) / 64; 862 if (slice) { 863 slice = slice - 1; 864 } 865 color_info = 0; 866 switch (rtex->surface.level[level].mode) { 867 case RADEON_SURF_MODE_LINEAR_ALIGNED: 868 color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_LINEAR_ALIGNED); 869 break; 870 case RADEON_SURF_MODE_1D: 871 color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_1D_TILED_THIN1); 872 break; 873 case RADEON_SURF_MODE_2D: 874 color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_2D_TILED_THIN1); 875 break; 876 case RADEON_SURF_MODE_LINEAR: 877 default: 878 color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_LINEAR_GENERAL); 879 break; 880 } 881 882 desc = util_format_description(surf->base.format); 883 884 for (i = 0; i < 4; i++) { 885 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) { 886 break; 887 } 888 } 889 890 ntype = V_0280A0_NUMBER_UNORM; 891 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) 892 ntype = V_0280A0_NUMBER_SRGB; 893 else if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) { 894 if (desc->channel[i].normalized) 895 ntype = V_0280A0_NUMBER_SNORM; 896 else if (desc->channel[i].pure_integer) 897 ntype = V_0280A0_NUMBER_SINT; 898 } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) { 899 if (desc->channel[i].normalized) 900 ntype = V_0280A0_NUMBER_UNORM; 901 else if (desc->channel[i].pure_integer) 902 ntype = V_0280A0_NUMBER_UINT; 903 } 904 905 format = r600_translate_colorformat(rctx->b.chip_class, surf->base.format); 906 assert(format != ~0); 907 908 swap = r600_translate_colorswap(surf->base.format); 909 assert(swap != ~0); 910 911 if (rtex->resource.b.b.usage == PIPE_USAGE_STAGING) { 912 endian = ENDIAN_NONE; 913 } else { 914 endian = r600_colorformat_endian_swap(format); 915 } 916 917 /* set blend bypass according to docs if SINT/UINT or 918 8/24 COLOR variants */ 919 if (ntype == V_0280A0_NUMBER_UINT || ntype == V_0280A0_NUMBER_SINT || 920 format == V_0280A0_COLOR_8_24 || format == V_0280A0_COLOR_24_8 || 921 format == V_0280A0_COLOR_X24_8_32_FLOAT) { 922 blend_clamp = 0; 923 blend_bypass = 1; 924 } 925 926 surf->alphatest_bypass = ntype == V_0280A0_NUMBER_UINT || ntype == V_0280A0_NUMBER_SINT; 927 928 color_info |= S_0280A0_FORMAT(format) | 929 S_0280A0_COMP_SWAP(swap) | 930 S_0280A0_BLEND_BYPASS(blend_bypass) | 931 S_0280A0_BLEND_CLAMP(blend_clamp) | 932 S_0280A0_NUMBER_TYPE(ntype) | 933 S_0280A0_ENDIAN(endian); 934 935 /* EXPORT_NORM is an optimzation that can be enabled for better 936 * performance in certain cases 937 */ 938 if (rctx->b.chip_class == R600) { 939 /* EXPORT_NORM can be enabled if: 940 * - 11-bit or smaller UNORM/SNORM/SRGB 941 * - BLEND_CLAMP is enabled 942 * - BLEND_FLOAT32 is disabled 943 */ 944 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS && 945 (desc->channel[i].size < 12 && 946 desc->channel[i].type != UTIL_FORMAT_TYPE_FLOAT && 947 ntype != V_0280A0_NUMBER_UINT && 948 ntype != V_0280A0_NUMBER_SINT) && 949 G_0280A0_BLEND_CLAMP(color_info) && 950 !G_0280A0_BLEND_FLOAT32(color_info)) { 951 color_info |= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM); 952 surf->export_16bpc = true; 953 } 954 } else { 955 /* EXPORT_NORM can be enabled if: 956 * - 11-bit or smaller UNORM/SNORM/SRGB 957 * - 16-bit or smaller FLOAT 958 */ 959 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS && 960 ((desc->channel[i].size < 12 && 961 desc->channel[i].type != UTIL_FORMAT_TYPE_FLOAT && 962 ntype != V_0280A0_NUMBER_UINT && ntype != V_0280A0_NUMBER_SINT) || 963 (desc->channel[i].size < 17 && 964 desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT))) { 965 color_info |= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM); 966 surf->export_16bpc = true; 967 } 968 } 969 970 /* These might not always be initialized to zero. */ 971 surf->cb_color_base = offset >> 8; 972 surf->cb_color_size = S_028060_PITCH_TILE_MAX(pitch) | 973 S_028060_SLICE_TILE_MAX(slice); 974 surf->cb_color_fmask = surf->cb_color_base; 975 surf->cb_color_cmask = surf->cb_color_base; 976 surf->cb_color_mask = 0; 977 978 pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask, 979 &rtex->resource.b.b); 980 pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask, 981 &rtex->resource.b.b); 982 983 if (rtex->cmask.size) { 984 surf->cb_color_cmask = rtex->cmask.offset >> 8; 985 surf->cb_color_mask |= S_028100_CMASK_BLOCK_MAX(rtex->cmask.slice_tile_max); 986 987 if (rtex->fmask.size) { 988 color_info |= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE); 989 surf->cb_color_fmask = rtex->fmask.offset >> 8; 990 surf->cb_color_mask |= S_028100_FMASK_TILE_MAX(rtex->fmask.slice_tile_max); 991 } else { /* cmask only */ 992 color_info |= S_0280A0_TILE_MODE(V_0280A0_CLEAR_ENABLE); 993 } 994 } else if (force_cmask_fmask) { 995 /* Allocate dummy FMASK and CMASK if they aren't allocated already. 996 * 997 * R6xx needs FMASK and CMASK for the destination buffer of color resolve, 998 * otherwise it hangs. We don't have FMASK and CMASK pre-allocated, 999 * because it's not an MSAA buffer. 1000 */ 1001 struct r600_cmask_info cmask; 1002 struct r600_fmask_info fmask; 1003 1004 r600_texture_get_cmask_info(&rscreen->b, rtex, &cmask); 1005 r600_texture_get_fmask_info(&rscreen->b, rtex, 8, &fmask); 1006 1007 /* CMASK. */ 1008 if (!rctx->dummy_cmask || 1009 rctx->dummy_cmask->buf->size < cmask.size || 1010 rctx->dummy_cmask->buf->alignment % cmask.alignment != 0) { 1011 struct pipe_transfer *transfer; 1012 void *ptr; 1013 1014 pipe_resource_reference((struct pipe_resource**)&rctx->dummy_cmask, NULL); 1015 rctx->dummy_cmask = r600_buffer_create_helper(rscreen, cmask.size, cmask.alignment); 1016 1017 /* Set the contents to 0xCC. */ 1018 ptr = pipe_buffer_map(&rctx->b.b, &rctx->dummy_cmask->b.b, PIPE_TRANSFER_WRITE, &transfer); 1019 memset(ptr, 0xCC, cmask.size); 1020 pipe_buffer_unmap(&rctx->b.b, transfer); 1021 } 1022 pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask, 1023 &rctx->dummy_cmask->b.b); 1024 1025 /* FMASK. */ 1026 if (!rctx->dummy_fmask || 1027 rctx->dummy_fmask->buf->size < fmask.size || 1028 rctx->dummy_fmask->buf->alignment % fmask.alignment != 0) { 1029 pipe_resource_reference((struct pipe_resource**)&rctx->dummy_fmask, NULL); 1030 rctx->dummy_fmask = r600_buffer_create_helper(rscreen, fmask.size, fmask.alignment); 1031 1032 } 1033 pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask, 1034 &rctx->dummy_fmask->b.b); 1035 1036 /* Init the registers. */ 1037 color_info |= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE); 1038 surf->cb_color_cmask = 0; 1039 surf->cb_color_fmask = 0; 1040 surf->cb_color_mask = S_028100_CMASK_BLOCK_MAX(cmask.slice_tile_max) | 1041 S_028100_FMASK_TILE_MAX(fmask.slice_tile_max); 1042 } 1043 1044 surf->cb_color_info = color_info; 1045 surf->cb_color_view = color_view; 1046 surf->color_initialized = true; 1047} 1048 1049static void r600_init_depth_surface(struct r600_context *rctx, 1050 struct r600_surface *surf) 1051{ 1052 struct r600_texture *rtex = (struct r600_texture*)surf->base.texture; 1053 unsigned level, pitch, slice, format, offset, array_mode; 1054 1055 level = surf->base.u.tex.level; 1056 offset = rtex->surface.level[level].offset; 1057 pitch = rtex->surface.level[level].nblk_x / 8 - 1; 1058 slice = (rtex->surface.level[level].nblk_x * rtex->surface.level[level].nblk_y) / 64; 1059 if (slice) { 1060 slice = slice - 1; 1061 } 1062 switch (rtex->surface.level[level].mode) { 1063 case RADEON_SURF_MODE_2D: 1064 array_mode = V_0280A0_ARRAY_2D_TILED_THIN1; 1065 break; 1066 case RADEON_SURF_MODE_1D: 1067 case RADEON_SURF_MODE_LINEAR_ALIGNED: 1068 case RADEON_SURF_MODE_LINEAR: 1069 default: 1070 array_mode = V_0280A0_ARRAY_1D_TILED_THIN1; 1071 break; 1072 } 1073 1074 format = r600_translate_dbformat(surf->base.format); 1075 assert(format != ~0); 1076 1077 surf->db_depth_info = S_028010_ARRAY_MODE(array_mode) | S_028010_FORMAT(format); 1078 surf->db_depth_base = offset >> 8; 1079 surf->db_depth_view = S_028004_SLICE_START(surf->base.u.tex.first_layer) | 1080 S_028004_SLICE_MAX(surf->base.u.tex.last_layer); 1081 surf->db_depth_size = S_028000_PITCH_TILE_MAX(pitch) | S_028000_SLICE_TILE_MAX(slice); 1082 surf->db_prefetch_limit = (rtex->surface.level[level].nblk_y / 8) - 1; 1083 1084 switch (surf->base.format) { 1085 case PIPE_FORMAT_Z24X8_UNORM: 1086 case PIPE_FORMAT_Z24_UNORM_S8_UINT: 1087 surf->pa_su_poly_offset_db_fmt_cntl = 1088 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-24); 1089 break; 1090 case PIPE_FORMAT_Z32_FLOAT: 1091 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT: 1092 surf->pa_su_poly_offset_db_fmt_cntl = 1093 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-23) | 1094 S_028DF8_POLY_OFFSET_DB_IS_FLOAT_FMT(1); 1095 break; 1096 case PIPE_FORMAT_Z16_UNORM: 1097 surf->pa_su_poly_offset_db_fmt_cntl = 1098 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-16); 1099 break; 1100 default:; 1101 } 1102 1103 /* use htile only for first level */ 1104 if (rtex->htile_buffer && !level) { 1105 surf->db_htile_data_base = 0; 1106 surf->db_htile_surface = S_028D24_HTILE_WIDTH(1) | 1107 S_028D24_HTILE_HEIGHT(1) | 1108 S_028D24_FULL_CACHE(1) | 1109 S_028D24_LINEAR(1); 1110 /* preload is not working properly on r6xx/r7xx */ 1111 surf->db_depth_info |= S_028010_TILE_SURFACE_ENABLE(1); 1112 } 1113 1114 surf->depth_initialized = true; 1115} 1116 1117static void r600_set_framebuffer_state(struct pipe_context *ctx, 1118 const struct pipe_framebuffer_state *state) 1119{ 1120 struct r600_context *rctx = (struct r600_context *)ctx; 1121 struct r600_surface *surf; 1122 struct r600_texture *rtex; 1123 unsigned i; 1124 1125 if (rctx->framebuffer.state.nr_cbufs) { 1126 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; 1127 rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB | 1128 R600_CONTEXT_FLUSH_AND_INV_CB_META; 1129 } 1130 if (rctx->framebuffer.state.zsbuf) { 1131 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; 1132 rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB; 1133 1134 rtex = (struct r600_texture*)rctx->framebuffer.state.zsbuf->texture; 1135 if (rctx->b.chip_class >= R700 && rtex->htile_buffer) { 1136 rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB_META; 1137 } 1138 } 1139 1140 /* Set the new state. */ 1141 util_copy_framebuffer_state(&rctx->framebuffer.state, state); 1142 1143 rctx->framebuffer.export_16bpc = state->nr_cbufs != 0; 1144 rctx->framebuffer.cb0_is_integer = state->nr_cbufs && state->cbufs[0] && 1145 util_format_is_pure_integer(state->cbufs[0]->format); 1146 rctx->framebuffer.compressed_cb_mask = 0; 1147 rctx->framebuffer.is_msaa_resolve = state->nr_cbufs == 2 && 1148 state->cbufs[0] && state->cbufs[1] && 1149 state->cbufs[0]->texture->nr_samples > 1 && 1150 state->cbufs[1]->texture->nr_samples <= 1; 1151 rctx->framebuffer.nr_samples = util_framebuffer_get_num_samples(state); 1152 1153 /* Colorbuffers. */ 1154 for (i = 0; i < state->nr_cbufs; i++) { 1155 /* The resolve buffer must have CMASK and FMASK to prevent hardlocks on R6xx. */ 1156 bool force_cmask_fmask = rctx->b.chip_class == R600 && 1157 rctx->framebuffer.is_msaa_resolve && 1158 i == 1; 1159 1160 surf = (struct r600_surface*)state->cbufs[i]; 1161 if (!surf) 1162 continue; 1163 1164 rtex = (struct r600_texture*)surf->base.texture; 1165 r600_context_add_resource_size(ctx, state->cbufs[i]->texture); 1166 1167 if (!surf->color_initialized || force_cmask_fmask) { 1168 r600_init_color_surface(rctx, surf, force_cmask_fmask); 1169 if (force_cmask_fmask) { 1170 /* re-initialize later without compression */ 1171 surf->color_initialized = false; 1172 } 1173 } 1174 1175 if (!surf->export_16bpc) { 1176 rctx->framebuffer.export_16bpc = false; 1177 } 1178 1179 if (rtex->fmask.size && rtex->cmask.size) { 1180 rctx->framebuffer.compressed_cb_mask |= 1 << i; 1181 } 1182 } 1183 1184 /* Update alpha-test state dependencies. 1185 * Alpha-test is done on the first colorbuffer only. */ 1186 if (state->nr_cbufs) { 1187 bool alphatest_bypass = false; 1188 1189 surf = (struct r600_surface*)state->cbufs[0]; 1190 if (surf) { 1191 alphatest_bypass = surf->alphatest_bypass; 1192 } 1193 1194 if (rctx->alphatest_state.bypass != alphatest_bypass) { 1195 rctx->alphatest_state.bypass = alphatest_bypass; 1196 rctx->alphatest_state.atom.dirty = true; 1197 } 1198 } 1199 1200 /* ZS buffer. */ 1201 if (state->zsbuf) { 1202 surf = (struct r600_surface*)state->zsbuf; 1203 1204 r600_context_add_resource_size(ctx, state->zsbuf->texture); 1205 1206 if (!surf->depth_initialized) { 1207 r600_init_depth_surface(rctx, surf); 1208 } 1209 1210 if (state->zsbuf->format != rctx->poly_offset_state.zs_format) { 1211 rctx->poly_offset_state.zs_format = state->zsbuf->format; 1212 rctx->poly_offset_state.atom.dirty = true; 1213 } 1214 1215 if (rctx->db_state.rsurf != surf) { 1216 rctx->db_state.rsurf = surf; 1217 rctx->db_state.atom.dirty = true; 1218 rctx->db_misc_state.atom.dirty = true; 1219 } 1220 } else if (rctx->db_state.rsurf) { 1221 rctx->db_state.rsurf = NULL; 1222 rctx->db_state.atom.dirty = true; 1223 rctx->db_misc_state.atom.dirty = true; 1224 } 1225 1226 if (rctx->cb_misc_state.nr_cbufs != state->nr_cbufs) { 1227 rctx->cb_misc_state.nr_cbufs = state->nr_cbufs; 1228 rctx->cb_misc_state.atom.dirty = true; 1229 } 1230 1231 if (state->nr_cbufs == 0 && rctx->alphatest_state.bypass) { 1232 rctx->alphatest_state.bypass = false; 1233 rctx->alphatest_state.atom.dirty = true; 1234 } 1235 1236 /* Calculate the CS size. */ 1237 rctx->framebuffer.atom.num_dw = 1238 10 /*COLOR_INFO*/ + 4 /*SCISSOR*/ + 3 /*SHADER_CONTROL*/ + 8 /*MSAA*/; 1239 1240 if (rctx->framebuffer.state.nr_cbufs) { 1241 rctx->framebuffer.atom.num_dw += 15 * rctx->framebuffer.state.nr_cbufs; 1242 rctx->framebuffer.atom.num_dw += 3 * (2 + rctx->framebuffer.state.nr_cbufs); 1243 } 1244 if (rctx->framebuffer.state.zsbuf) { 1245 rctx->framebuffer.atom.num_dw += 16; 1246 } else if (rctx->screen->b.info.drm_minor >= 18) { 1247 rctx->framebuffer.atom.num_dw += 3; 1248 } 1249 if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770) { 1250 rctx->framebuffer.atom.num_dw += 2; 1251 } 1252 1253 rctx->framebuffer.atom.dirty = true; 1254} 1255 1256static uint32_t sample_locs_2x[] = { 1257 FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4), 1258 FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4), 1259}; 1260static unsigned max_dist_2x = 4; 1261 1262static uint32_t sample_locs_4x[] = { 1263 FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6), 1264 FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6), 1265}; 1266static unsigned max_dist_4x = 6; 1267static uint32_t sample_locs_8x[] = { 1268 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3), 1269 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7), 1270}; 1271static unsigned max_dist_8x = 7; 1272 1273static void r600_get_sample_position(struct pipe_context *ctx, 1274 unsigned sample_count, 1275 unsigned sample_index, 1276 float *out_value) 1277{ 1278 int offset, index; 1279 struct { 1280 int idx:4; 1281 } val; 1282 switch (sample_count) { 1283 case 1: 1284 default: 1285 out_value[0] = out_value[1] = 0.5; 1286 break; 1287 case 2: 1288 offset = 4 * (sample_index * 2); 1289 val.idx = (sample_locs_2x[0] >> offset) & 0xf; 1290 out_value[0] = (float)(val.idx + 8) / 16.0f; 1291 val.idx = (sample_locs_2x[0] >> (offset + 4)) & 0xf; 1292 out_value[1] = (float)(val.idx + 8) / 16.0f; 1293 break; 1294 case 4: 1295 offset = 4 * (sample_index * 2); 1296 val.idx = (sample_locs_4x[0] >> offset) & 0xf; 1297 out_value[0] = (float)(val.idx + 8) / 16.0f; 1298 val.idx = (sample_locs_4x[0] >> (offset + 4)) & 0xf; 1299 out_value[1] = (float)(val.idx + 8) / 16.0f; 1300 break; 1301 case 8: 1302 offset = 4 * (sample_index % 4 * 2); 1303 index = (sample_index / 4); 1304 val.idx = (sample_locs_8x[index] >> offset) & 0xf; 1305 out_value[0] = (float)(val.idx + 8) / 16.0f; 1306 val.idx = (sample_locs_8x[index] >> (offset + 4)) & 0xf; 1307 out_value[1] = (float)(val.idx + 8) / 16.0f; 1308 break; 1309 } 1310} 1311 1312static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples) 1313{ 1314 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1315 unsigned max_dist = 0; 1316 1317 if (rctx->b.family == CHIP_R600) { 1318 switch (nr_samples) { 1319 default: 1320 nr_samples = 0; 1321 break; 1322 case 2: 1323 r600_write_config_reg(cs, R_008B40_PA_SC_AA_SAMPLE_LOCS_2S, sample_locs_2x[0]); 1324 max_dist = max_dist_2x; 1325 break; 1326 case 4: 1327 r600_write_config_reg(cs, R_008B44_PA_SC_AA_SAMPLE_LOCS_4S, sample_locs_4x[0]); 1328 max_dist = max_dist_4x; 1329 break; 1330 case 8: 1331 r600_write_config_reg_seq(cs, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0, 2); 1332 radeon_emit(cs, sample_locs_8x[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */ 1333 radeon_emit(cs, sample_locs_8x[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */ 1334 max_dist = max_dist_8x; 1335 break; 1336 } 1337 } else { 1338 switch (nr_samples) { 1339 default: 1340 r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); 1341 radeon_emit(cs, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ 1342 radeon_emit(cs, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ 1343 nr_samples = 0; 1344 break; 1345 case 2: 1346 r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); 1347 radeon_emit(cs, sample_locs_2x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ 1348 radeon_emit(cs, sample_locs_2x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ 1349 max_dist = max_dist_2x; 1350 break; 1351 case 4: 1352 r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); 1353 radeon_emit(cs, sample_locs_4x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ 1354 radeon_emit(cs, sample_locs_4x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ 1355 max_dist = max_dist_4x; 1356 break; 1357 case 8: 1358 r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); 1359 radeon_emit(cs, sample_locs_8x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ 1360 radeon_emit(cs, sample_locs_8x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ 1361 max_dist = max_dist_8x; 1362 break; 1363 } 1364 } 1365 1366 if (nr_samples > 1) { 1367 r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); 1368 radeon_emit(cs, S_028C00_LAST_PIXEL(1) | 1369 S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */ 1370 radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) | 1371 S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */ 1372 } else { 1373 r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); 1374 radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */ 1375 radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */ 1376 } 1377} 1378 1379static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom) 1380{ 1381 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1382 struct pipe_framebuffer_state *state = &rctx->framebuffer.state; 1383 unsigned nr_cbufs = state->nr_cbufs; 1384 struct r600_surface **cb = (struct r600_surface**)&state->cbufs[0]; 1385 unsigned i, sbu = 0; 1386 1387 /* Colorbuffers. */ 1388 r600_write_context_reg_seq(cs, R_0280A0_CB_COLOR0_INFO, 8); 1389 for (i = 0; i < nr_cbufs; i++) { 1390 radeon_emit(cs, cb[i] ? cb[i]->cb_color_info : 0); 1391 } 1392 /* set CB_COLOR1_INFO for possible dual-src blending */ 1393 if (i == 1 && cb[0]) { 1394 radeon_emit(cs, cb[0]->cb_color_info); 1395 i++; 1396 } 1397 for (; i < 8; i++) { 1398 radeon_emit(cs, 0); 1399 } 1400 1401 if (nr_cbufs) { 1402 for (i = 0; i < nr_cbufs; i++) { 1403 unsigned reloc; 1404 1405 if (!cb[i]) 1406 continue; 1407 1408 /* COLOR_BASE */ 1409 r600_write_context_reg(cs, R_028040_CB_COLOR0_BASE + i*4, cb[i]->cb_color_base); 1410 1411 reloc = r600_context_bo_reloc(&rctx->b, 1412 &rctx->b.rings.gfx, 1413 (struct r600_resource*)cb[i]->base.texture, 1414 RADEON_USAGE_READWRITE, 1415 cb[i]->base.texture->nr_samples > 1 ? 1416 RADEON_PRIO_COLOR_BUFFER_MSAA : 1417 RADEON_PRIO_COLOR_BUFFER); 1418 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1419 radeon_emit(cs, reloc); 1420 1421 /* FMASK */ 1422 r600_write_context_reg(cs, R_0280E0_CB_COLOR0_FRAG + i*4, cb[i]->cb_color_fmask); 1423 1424 reloc = r600_context_bo_reloc(&rctx->b, 1425 &rctx->b.rings.gfx, 1426 cb[i]->cb_buffer_fmask, 1427 RADEON_USAGE_READWRITE, 1428 cb[i]->base.texture->nr_samples > 1 ? 1429 RADEON_PRIO_COLOR_BUFFER_MSAA : 1430 RADEON_PRIO_COLOR_BUFFER); 1431 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1432 radeon_emit(cs, reloc); 1433 1434 /* CMASK */ 1435 r600_write_context_reg(cs, R_0280C0_CB_COLOR0_TILE + i*4, cb[i]->cb_color_cmask); 1436 1437 reloc = r600_context_bo_reloc(&rctx->b, 1438 &rctx->b.rings.gfx, 1439 cb[i]->cb_buffer_cmask, 1440 RADEON_USAGE_READWRITE, 1441 cb[i]->base.texture->nr_samples > 1 ? 1442 RADEON_PRIO_COLOR_BUFFER_MSAA : 1443 RADEON_PRIO_COLOR_BUFFER); 1444 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1445 radeon_emit(cs, reloc); 1446 } 1447 1448 r600_write_context_reg_seq(cs, R_028060_CB_COLOR0_SIZE, nr_cbufs); 1449 for (i = 0; i < nr_cbufs; i++) { 1450 radeon_emit(cs, cb[i] ? cb[i]->cb_color_size : 0); 1451 } 1452 1453 r600_write_context_reg_seq(cs, R_028080_CB_COLOR0_VIEW, nr_cbufs); 1454 for (i = 0; i < nr_cbufs; i++) { 1455 radeon_emit(cs, cb[i] ? cb[i]->cb_color_view : 0); 1456 } 1457 1458 r600_write_context_reg_seq(cs, R_028100_CB_COLOR0_MASK, nr_cbufs); 1459 for (i = 0; i < nr_cbufs; i++) { 1460 radeon_emit(cs, cb[i] ? cb[i]->cb_color_mask : 0); 1461 } 1462 1463 sbu |= SURFACE_BASE_UPDATE_COLOR_NUM(nr_cbufs); 1464 } 1465 1466 /* SURFACE_BASE_UPDATE */ 1467 if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770 && sbu) { 1468 radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0)); 1469 radeon_emit(cs, sbu); 1470 sbu = 0; 1471 } 1472 1473 /* Zbuffer. */ 1474 if (state->zsbuf) { 1475 struct r600_surface *surf = (struct r600_surface*)state->zsbuf; 1476 unsigned reloc = r600_context_bo_reloc(&rctx->b, 1477 &rctx->b.rings.gfx, 1478 (struct r600_resource*)state->zsbuf->texture, 1479 RADEON_USAGE_READWRITE, 1480 surf->base.texture->nr_samples > 1 ? 1481 RADEON_PRIO_DEPTH_BUFFER_MSAA : 1482 RADEON_PRIO_DEPTH_BUFFER); 1483 1484 r600_write_context_reg(cs, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL, 1485 surf->pa_su_poly_offset_db_fmt_cntl); 1486 1487 r600_write_context_reg_seq(cs, R_028000_DB_DEPTH_SIZE, 2); 1488 radeon_emit(cs, surf->db_depth_size); /* R_028000_DB_DEPTH_SIZE */ 1489 radeon_emit(cs, surf->db_depth_view); /* R_028004_DB_DEPTH_VIEW */ 1490 r600_write_context_reg_seq(cs, R_02800C_DB_DEPTH_BASE, 2); 1491 radeon_emit(cs, surf->db_depth_base); /* R_02800C_DB_DEPTH_BASE */ 1492 radeon_emit(cs, surf->db_depth_info); /* R_028010_DB_DEPTH_INFO */ 1493 1494 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1495 radeon_emit(cs, reloc); 1496 1497 r600_write_context_reg(cs, R_028D34_DB_PREFETCH_LIMIT, surf->db_prefetch_limit); 1498 1499 sbu |= SURFACE_BASE_UPDATE_DEPTH; 1500 } else if (rctx->screen->b.info.drm_minor >= 18) { 1501 /* DRM 2.6.18 allows the INVALID format to disable depth/stencil. 1502 * Older kernels are out of luck. */ 1503 r600_write_context_reg(cs, R_028010_DB_DEPTH_INFO, S_028010_FORMAT(V_028010_DEPTH_INVALID)); 1504 } 1505 1506 /* SURFACE_BASE_UPDATE */ 1507 if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770 && sbu) { 1508 radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0)); 1509 radeon_emit(cs, sbu); 1510 sbu = 0; 1511 } 1512 1513 /* Framebuffer dimensions. */ 1514 r600_write_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2); 1515 radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) | 1516 S_028240_WINDOW_OFFSET_DISABLE(1)); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */ 1517 radeon_emit(cs, S_028244_BR_X(state->width) | 1518 S_028244_BR_Y(state->height)); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */ 1519 1520 if (rctx->framebuffer.is_msaa_resolve) { 1521 r600_write_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1); 1522 } else { 1523 /* Always enable the first colorbuffer in CB_SHADER_CONTROL. This 1524 * will assure that the alpha-test will work even if there is 1525 * no colorbuffer bound. */ 1526 r600_write_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1527 (1ull << MAX2(nr_cbufs, 1)) - 1); 1528 } 1529 1530 r600_emit_msaa_state(rctx, rctx->framebuffer.nr_samples); 1531} 1532 1533static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom) 1534{ 1535 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1536 struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom; 1537 1538 if (G_028808_SPECIAL_OP(a->cb_color_control) == V_028808_SPECIAL_RESOLVE_BOX) { 1539 r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); 1540 if (rctx->b.chip_class == R600) { 1541 radeon_emit(cs, 0xff); /* R_028238_CB_TARGET_MASK */ 1542 radeon_emit(cs, 0xff); /* R_02823C_CB_SHADER_MASK */ 1543 } else { 1544 radeon_emit(cs, 0xf); /* R_028238_CB_TARGET_MASK */ 1545 radeon_emit(cs, 0xf); /* R_02823C_CB_SHADER_MASK */ 1546 } 1547 r600_write_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control); 1548 } else { 1549 unsigned fb_colormask = (1ULL << ((unsigned)a->nr_cbufs * 4)) - 1; 1550 unsigned ps_colormask = (1ULL << ((unsigned)a->nr_ps_color_outputs * 4)) - 1; 1551 unsigned multiwrite = a->multiwrite && a->nr_cbufs > 1; 1552 1553 r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); 1554 radeon_emit(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */ 1555 /* Always enable the first color output to make sure alpha-test works even without one. */ 1556 radeon_emit(cs, 0xf | (multiwrite ? fb_colormask : ps_colormask)); /* R_02823C_CB_SHADER_MASK */ 1557 r600_write_context_reg(cs, R_028808_CB_COLOR_CONTROL, 1558 a->cb_color_control | 1559 S_028808_MULTIWRITE_ENABLE(multiwrite)); 1560 } 1561} 1562 1563static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom) 1564{ 1565 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1566 struct r600_db_state *a = (struct r600_db_state*)atom; 1567 1568 if (a->rsurf && a->rsurf->db_htile_surface) { 1569 struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture; 1570 unsigned reloc_idx; 1571 1572 r600_write_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value)); 1573 r600_write_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface); 1574 r600_write_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base); 1575 reloc_idx = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rtex->htile_buffer, 1576 RADEON_USAGE_READWRITE, RADEON_PRIO_DEPTH_META); 1577 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); 1578 cs->buf[cs->cdw++] = reloc_idx; 1579 } else { 1580 r600_write_context_reg(cs, R_028D24_DB_HTILE_SURFACE, 0); 1581 } 1582} 1583 1584static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom) 1585{ 1586 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1587 struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom; 1588 unsigned db_render_control = 0; 1589 unsigned db_render_override = 1590 S_028D10_FORCE_HIS_ENABLE0(V_028D10_FORCE_DISABLE) | 1591 S_028D10_FORCE_HIS_ENABLE1(V_028D10_FORCE_DISABLE); 1592 1593 if (a->occlusion_query_enabled) { 1594 if (rctx->b.chip_class >= R700) { 1595 db_render_control |= S_028D0C_R700_PERFECT_ZPASS_COUNTS(1); 1596 } 1597 db_render_override |= S_028D10_NOOP_CULL_DISABLE(1); 1598 } 1599 if (rctx->db_state.rsurf && rctx->db_state.rsurf->db_htile_surface) { 1600 /* FORCE_OFF means HiZ/HiS are determined by DB_SHADER_CONTROL */ 1601 db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_OFF); 1602 /* This is to fix a lockup when hyperz and alpha test are enabled at 1603 * the same time somehow GPU get confuse on which order to pick for 1604 * z test 1605 */ 1606 if (rctx->alphatest_state.sx_alpha_test_control) { 1607 db_render_override |= S_028D10_FORCE_SHADER_Z_ORDER(1); 1608 } 1609 } else { 1610 db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE); 1611 } 1612 if (a->flush_depthstencil_through_cb) { 1613 assert(a->copy_depth || a->copy_stencil); 1614 1615 db_render_control |= S_028D0C_DEPTH_COPY_ENABLE(a->copy_depth) | 1616 S_028D0C_STENCIL_COPY_ENABLE(a->copy_stencil) | 1617 S_028D0C_COPY_CENTROID(1) | 1618 S_028D0C_COPY_SAMPLE(a->copy_sample); 1619 } else if (a->flush_depthstencil_in_place) { 1620 db_render_control |= S_028D0C_DEPTH_COMPRESS_DISABLE(1) | 1621 S_028D0C_STENCIL_COMPRESS_DISABLE(1); 1622 db_render_override |= S_028D10_NOOP_CULL_DISABLE(1); 1623 } 1624 if (a->htile_clear) { 1625 db_render_control |= S_028D0C_DEPTH_CLEAR_ENABLE(1); 1626 } 1627 1628 /* RV770 workaround for a hang with 8x MSAA. */ 1629 if (rctx->b.family == CHIP_RV770 && a->log_samples == 3) { 1630 db_render_override |= S_028D10_MAX_TILES_IN_DTT(6); 1631 } 1632 1633 r600_write_context_reg_seq(cs, R_028D0C_DB_RENDER_CONTROL, 2); 1634 radeon_emit(cs, db_render_control); /* R_028D0C_DB_RENDER_CONTROL */ 1635 radeon_emit(cs, db_render_override); /* R_028D10_DB_RENDER_OVERRIDE */ 1636 r600_write_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control); 1637} 1638 1639static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom *atom) 1640{ 1641 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1642 struct r600_config_state *a = (struct r600_config_state*)atom; 1643 1644 r600_write_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1); 1645 r600_write_config_reg(cs, R_008C08_SQ_GPR_RESOURCE_MGMT_2, a->sq_gpr_resource_mgmt_2); 1646} 1647 1648static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom *atom) 1649{ 1650 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1651 uint32_t dirty_mask = rctx->vertex_buffer_state.dirty_mask; 1652 1653 while (dirty_mask) { 1654 struct pipe_vertex_buffer *vb; 1655 struct r600_resource *rbuffer; 1656 unsigned offset; 1657 unsigned buffer_index = u_bit_scan(&dirty_mask); 1658 1659 vb = &rctx->vertex_buffer_state.vb[buffer_index]; 1660 rbuffer = (struct r600_resource*)vb->buffer; 1661 assert(rbuffer); 1662 1663 offset = vb->buffer_offset; 1664 1665 /* fetch resources start at index 320 */ 1666 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); 1667 radeon_emit(cs, (320 + buffer_index) * 7); 1668 radeon_emit(cs, offset); /* RESOURCEi_WORD0 */ 1669 radeon_emit(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */ 1670 radeon_emit(cs, /* RESOURCEi_WORD2 */ 1671 S_038008_ENDIAN_SWAP(r600_endian_swap(32)) | 1672 S_038008_STRIDE(vb->stride)); 1673 radeon_emit(cs, 0); /* RESOURCEi_WORD3 */ 1674 radeon_emit(cs, 0); /* RESOURCEi_WORD4 */ 1675 radeon_emit(cs, 0); /* RESOURCEi_WORD5 */ 1676 radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */ 1677 1678 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1679 radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, 1680 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO)); 1681 } 1682} 1683 1684static void r600_emit_constant_buffers(struct r600_context *rctx, 1685 struct r600_constbuf_state *state, 1686 unsigned buffer_id_base, 1687 unsigned reg_alu_constbuf_size, 1688 unsigned reg_alu_const_cache) 1689{ 1690 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1691 uint32_t dirty_mask = state->dirty_mask; 1692 1693 while (dirty_mask) { 1694 struct pipe_constant_buffer *cb; 1695 struct r600_resource *rbuffer; 1696 unsigned offset; 1697 unsigned buffer_index = ffs(dirty_mask) - 1; 1698 unsigned gs_ring_buffer = (buffer_index == R600_GS_RING_CONST_BUFFER); 1699 cb = &state->cb[buffer_index]; 1700 rbuffer = (struct r600_resource*)cb->buffer; 1701 assert(rbuffer); 1702 1703 offset = cb->buffer_offset; 1704 1705 if (!gs_ring_buffer) { 1706 r600_write_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4, 1707 ALIGN_DIVUP(cb->buffer_size >> 4, 16)); 1708 r600_write_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8); 1709 } 1710 1711 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1712 radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, 1713 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO)); 1714 1715 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); 1716 radeon_emit(cs, (buffer_id_base + buffer_index) * 7); 1717 radeon_emit(cs, offset); /* RESOURCEi_WORD0 */ 1718 radeon_emit(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */ 1719 radeon_emit(cs, /* RESOURCEi_WORD2 */ 1720 S_038008_ENDIAN_SWAP(gs_ring_buffer ? ENDIAN_NONE : r600_endian_swap(32)) | 1721 S_038008_STRIDE(gs_ring_buffer ? 4 : 16)); 1722 radeon_emit(cs, 0); /* RESOURCEi_WORD3 */ 1723 radeon_emit(cs, 0); /* RESOURCEi_WORD4 */ 1724 radeon_emit(cs, 0); /* RESOURCEi_WORD5 */ 1725 radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */ 1726 1727 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1728 radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, 1729 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO)); 1730 1731 dirty_mask &= ~(1 << buffer_index); 1732 } 1733 state->dirty_mask = 0; 1734} 1735 1736static void r600_emit_vs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 1737{ 1738 r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX], 160, 1739 R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 1740 R_028980_ALU_CONST_CACHE_VS_0); 1741} 1742 1743static void r600_emit_gs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 1744{ 1745 r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY], 336, 1746 R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 1747 R_0289C0_ALU_CONST_CACHE_GS_0); 1748} 1749 1750static void r600_emit_ps_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 1751{ 1752 r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT], 0, 1753 R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 1754 R_028940_ALU_CONST_CACHE_PS_0); 1755} 1756 1757static void r600_emit_sampler_views(struct r600_context *rctx, 1758 struct r600_samplerview_state *state, 1759 unsigned resource_id_base) 1760{ 1761 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1762 uint32_t dirty_mask = state->dirty_mask; 1763 1764 while (dirty_mask) { 1765 struct r600_pipe_sampler_view *rview; 1766 unsigned resource_index = u_bit_scan(&dirty_mask); 1767 unsigned reloc; 1768 1769 rview = state->views[resource_index]; 1770 assert(rview); 1771 1772 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); 1773 radeon_emit(cs, (resource_id_base + resource_index) * 7); 1774 radeon_emit_array(cs, rview->tex_resource_words, 7); 1775 1776 reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rview->tex_resource, 1777 RADEON_USAGE_READ, 1778 rview->tex_resource->b.b.nr_samples > 1 ? 1779 RADEON_PRIO_SHADER_TEXTURE_MSAA : 1780 RADEON_PRIO_SHADER_TEXTURE_RO); 1781 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1782 radeon_emit(cs, reloc); 1783 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1784 radeon_emit(cs, reloc); 1785 } 1786 state->dirty_mask = 0; 1787} 1788 1789/* Resource IDs: 1790 * PS: 0 .. +160 1791 * VS: 160 .. +160 1792 * FS: 320 .. +16 1793 * GS: 336 .. +160 1794 */ 1795 1796static void r600_emit_vs_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 1797{ 1798 r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views, 160 + R600_MAX_CONST_BUFFERS); 1799} 1800 1801static void r600_emit_gs_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 1802{ 1803 r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views, 336 + R600_MAX_CONST_BUFFERS); 1804} 1805 1806static void r600_emit_ps_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 1807{ 1808 r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views, R600_MAX_CONST_BUFFERS); 1809} 1810 1811static void r600_emit_sampler_states(struct r600_context *rctx, 1812 struct r600_textures_info *texinfo, 1813 unsigned resource_id_base, 1814 unsigned border_color_reg) 1815{ 1816 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1817 uint32_t dirty_mask = texinfo->states.dirty_mask; 1818 1819 while (dirty_mask) { 1820 struct r600_pipe_sampler_state *rstate; 1821 struct r600_pipe_sampler_view *rview; 1822 unsigned i = u_bit_scan(&dirty_mask); 1823 1824 rstate = texinfo->states.states[i]; 1825 assert(rstate); 1826 rview = texinfo->views.views[i]; 1827 1828 /* TEX_ARRAY_OVERRIDE must be set for array textures to disable 1829 * filtering between layers. 1830 * Don't update TEX_ARRAY_OVERRIDE if we don't have the sampler view. 1831 */ 1832 if (rview) { 1833 enum pipe_texture_target target = rview->base.texture->target; 1834 if (target == PIPE_TEXTURE_1D_ARRAY || 1835 target == PIPE_TEXTURE_2D_ARRAY) { 1836 rstate->tex_sampler_words[0] |= S_03C000_TEX_ARRAY_OVERRIDE(1); 1837 texinfo->is_array_sampler[i] = true; 1838 } else { 1839 rstate->tex_sampler_words[0] &= C_03C000_TEX_ARRAY_OVERRIDE; 1840 texinfo->is_array_sampler[i] = false; 1841 } 1842 } 1843 1844 radeon_emit(cs, PKT3(PKT3_SET_SAMPLER, 3, 0)); 1845 radeon_emit(cs, (resource_id_base + i) * 3); 1846 radeon_emit_array(cs, rstate->tex_sampler_words, 3); 1847 1848 if (rstate->border_color_use) { 1849 unsigned offset; 1850 1851 offset = border_color_reg; 1852 offset += i * 16; 1853 r600_write_config_reg_seq(cs, offset, 4); 1854 radeon_emit_array(cs, rstate->border_color.ui, 4); 1855 } 1856 } 1857 texinfo->states.dirty_mask = 0; 1858} 1859 1860static void r600_emit_vs_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 1861{ 1862 r600_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_VERTEX], 18, R_00A600_TD_VS_SAMPLER0_BORDER_RED); 1863} 1864 1865static void r600_emit_gs_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 1866{ 1867 r600_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY], 36, R_00A800_TD_GS_SAMPLER0_BORDER_RED); 1868} 1869 1870static void r600_emit_ps_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 1871{ 1872 r600_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT], 0, R_00A400_TD_PS_SAMPLER0_BORDER_RED); 1873} 1874 1875static void r600_emit_seamless_cube_map(struct r600_context *rctx, struct r600_atom *atom) 1876{ 1877 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1878 unsigned tmp; 1879 1880 tmp = S_009508_DISABLE_CUBE_ANISO(1) | 1881 S_009508_SYNC_GRADIENT(1) | 1882 S_009508_SYNC_WALKER(1) | 1883 S_009508_SYNC_ALIGNER(1); 1884 if (!rctx->seamless_cube_map.enabled) { 1885 tmp |= S_009508_DISABLE_CUBE_WRAP(1); 1886 } 1887 r600_write_config_reg(cs, R_009508_TA_CNTL_AUX, tmp); 1888} 1889 1890static void r600_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a) 1891{ 1892 struct r600_sample_mask *s = (struct r600_sample_mask*)a; 1893 uint8_t mask = s->sample_mask; 1894 1895 r600_write_context_reg(rctx->b.rings.gfx.cs, R_028C48_PA_SC_AA_MASK, 1896 mask | (mask << 8) | (mask << 16) | (mask << 24)); 1897} 1898 1899static void r600_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a) 1900{ 1901 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1902 struct r600_cso_state *state = (struct r600_cso_state*)a; 1903 struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso; 1904 1905 r600_write_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8); 1906 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1907 radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->buffer, 1908 RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA)); 1909} 1910 1911static void r600_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a) 1912{ 1913 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1914 struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a; 1915 1916 uint32_t v2 = 0, primid = 0; 1917 1918 if (state->geom_enable) { 1919 uint32_t cut_val; 1920 1921 if (rctx->gs_shader->current->shader.gs_max_out_vertices <= 128) 1922 cut_val = V_028A40_GS_CUT_128; 1923 else if (rctx->gs_shader->current->shader.gs_max_out_vertices <= 256) 1924 cut_val = V_028A40_GS_CUT_256; 1925 else if (rctx->gs_shader->current->shader.gs_max_out_vertices <= 512) 1926 cut_val = V_028A40_GS_CUT_512; 1927 else 1928 cut_val = V_028A40_GS_CUT_1024; 1929 1930 v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_G) | 1931 S_028A40_CUT_MODE(cut_val); 1932 1933 if (rctx->gs_shader->current->shader.gs_prim_id_input) 1934 primid = 1; 1935 } 1936 1937 r600_write_context_reg(cs, R_028A40_VGT_GS_MODE, v2); 1938 r600_write_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid); 1939} 1940 1941static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a) 1942{ 1943 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; 1944 struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a; 1945 struct r600_resource *rbuffer; 1946 1947 r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); 1948 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 1949 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); 1950 1951 if (state->enable) { 1952 rbuffer =(struct r600_resource*)state->esgs_ring.buffer; 1953 r600_write_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 0); 1954 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1955 radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, 1956 RADEON_USAGE_READWRITE, 1957 RADEON_PRIO_SHADER_RESOURCE_RW)); 1958 r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 1959 state->esgs_ring.buffer_size >> 8); 1960 1961 rbuffer =(struct r600_resource*)state->gsvs_ring.buffer; 1962 r600_write_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 0); 1963 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1964 radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, 1965 RADEON_USAGE_READWRITE, 1966 RADEON_PRIO_SHADER_RESOURCE_RW)); 1967 r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 1968 state->gsvs_ring.buffer_size >> 8); 1969 } else { 1970 r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0); 1971 r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0); 1972 } 1973 1974 r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); 1975 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 1976 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); 1977} 1978 1979/* Adjust GPR allocation on R6xx/R7xx */ 1980bool r600_adjust_gprs(struct r600_context *rctx) 1981{ 1982 unsigned num_ps_gprs = rctx->ps_shader->current->shader.bc.ngpr; 1983 unsigned num_vs_gprs, num_es_gprs, num_gs_gprs; 1984 unsigned new_num_ps_gprs = num_ps_gprs; 1985 unsigned new_num_vs_gprs, new_num_es_gprs, new_num_gs_gprs; 1986 unsigned cur_num_ps_gprs = G_008C04_NUM_PS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1); 1987 unsigned cur_num_vs_gprs = G_008C04_NUM_VS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1); 1988 unsigned cur_num_gs_gprs = G_008C08_NUM_GS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2); 1989 unsigned cur_num_es_gprs = G_008C08_NUM_ES_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2); 1990 unsigned def_num_ps_gprs = rctx->default_ps_gprs; 1991 unsigned def_num_vs_gprs = rctx->default_vs_gprs; 1992 unsigned def_num_gs_gprs = 0; 1993 unsigned def_num_es_gprs = 0; 1994 unsigned def_num_clause_temp_gprs = rctx->r6xx_num_clause_temp_gprs; 1995 /* hardware will reserve twice num_clause_temp_gprs */ 1996 unsigned max_gprs = def_num_gs_gprs + def_num_es_gprs + def_num_ps_gprs + def_num_vs_gprs + def_num_clause_temp_gprs * 2; 1997 unsigned tmp, tmp2; 1998 1999 if (rctx->gs_shader) { 2000 num_es_gprs = rctx->vs_shader->current->shader.bc.ngpr; 2001 num_gs_gprs = rctx->gs_shader->current->shader.bc.ngpr; 2002 num_vs_gprs = rctx->gs_shader->current->gs_copy_shader->shader.bc.ngpr; 2003 } else { 2004 num_es_gprs = 0; 2005 num_gs_gprs = 0; 2006 num_vs_gprs = rctx->vs_shader->current->shader.bc.ngpr; 2007 } 2008 new_num_vs_gprs = num_vs_gprs; 2009 new_num_es_gprs = num_es_gprs; 2010 new_num_gs_gprs = num_gs_gprs; 2011 2012 /* the sum of all SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS must <= to max_gprs */ 2013 if (new_num_ps_gprs > cur_num_ps_gprs || new_num_vs_gprs > cur_num_vs_gprs || 2014 new_num_es_gprs > cur_num_es_gprs || new_num_gs_gprs > cur_num_gs_gprs) { 2015 /* try to use switch back to default */ 2016 if (new_num_ps_gprs > def_num_ps_gprs || new_num_vs_gprs > def_num_vs_gprs || 2017 new_num_gs_gprs > def_num_gs_gprs || new_num_es_gprs > def_num_es_gprs) { 2018 /* always privilege vs stage so that at worst we have the 2019 * pixel stage producing wrong output (not the vertex 2020 * stage) */ 2021 new_num_ps_gprs = max_gprs - ((new_num_vs_gprs - new_num_es_gprs - new_num_gs_gprs) + def_num_clause_temp_gprs * 2); 2022 new_num_vs_gprs = num_vs_gprs; 2023 new_num_gs_gprs = num_gs_gprs; 2024 new_num_es_gprs = num_es_gprs; 2025 } else { 2026 new_num_ps_gprs = def_num_ps_gprs; 2027 new_num_vs_gprs = def_num_vs_gprs; 2028 new_num_es_gprs = def_num_es_gprs; 2029 new_num_gs_gprs = def_num_gs_gprs; 2030 } 2031 } else { 2032 return true; 2033 } 2034 2035 /* SQ_PGM_RESOURCES_*.NUM_GPRS must always be program to a value <= 2036 * SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS otherwise the GPU will lockup 2037 * Also if a shader use more gpr than SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS 2038 * it will lockup. So in this case just discard the draw command 2039 * and don't change the current gprs repartitions. 2040 */ 2041 if (num_ps_gprs > new_num_ps_gprs || num_vs_gprs > new_num_vs_gprs || 2042 num_gs_gprs > new_num_gs_gprs || num_es_gprs > new_num_es_gprs) { 2043 R600_ERR("shaders require too many register (%d + %d + %d + %d) " 2044 "for a combined maximum of %d\n", 2045 num_ps_gprs, num_vs_gprs, num_es_gprs, num_gs_gprs, max_gprs); 2046 return false; 2047 } 2048 2049 /* in some case we endup recomputing the current value */ 2050 tmp = S_008C04_NUM_PS_GPRS(new_num_ps_gprs) | 2051 S_008C04_NUM_VS_GPRS(new_num_vs_gprs) | 2052 S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs); 2053 2054 tmp2 = S_008C08_NUM_ES_GPRS(new_num_es_gprs) | 2055 S_008C08_NUM_GS_GPRS(new_num_gs_gprs); 2056 if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp || rctx->config_state.sq_gpr_resource_mgmt_2 != tmp2) { 2057 rctx->config_state.sq_gpr_resource_mgmt_1 = tmp; 2058 rctx->config_state.sq_gpr_resource_mgmt_2 = tmp2; 2059 rctx->config_state.atom.dirty = true; 2060 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; 2061 } 2062 return true; 2063} 2064 2065void r600_init_atom_start_cs(struct r600_context *rctx) 2066{ 2067 int ps_prio; 2068 int vs_prio; 2069 int gs_prio; 2070 int es_prio; 2071 int num_ps_gprs; 2072 int num_vs_gprs; 2073 int num_gs_gprs; 2074 int num_es_gprs; 2075 int num_temp_gprs; 2076 int num_ps_threads; 2077 int num_vs_threads; 2078 int num_gs_threads; 2079 int num_es_threads; 2080 int num_ps_stack_entries; 2081 int num_vs_stack_entries; 2082 int num_gs_stack_entries; 2083 int num_es_stack_entries; 2084 enum radeon_family family; 2085 struct r600_command_buffer *cb = &rctx->start_cs_cmd; 2086 uint32_t tmp, i; 2087 2088 r600_init_command_buffer(cb, 256); 2089 2090 /* R6xx requires this packet at the start of each command buffer */ 2091 if (rctx->b.chip_class == R600) { 2092 r600_store_value(cb, PKT3(PKT3_START_3D_CMDBUF, 0, 0)); 2093 r600_store_value(cb, 0); 2094 } 2095 /* All asics require this one */ 2096 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0)); 2097 r600_store_value(cb, 0x80000000); 2098 r600_store_value(cb, 0x80000000); 2099 2100 /* We're setting config registers here. */ 2101 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); 2102 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); 2103 2104 family = rctx->b.family; 2105 ps_prio = 0; 2106 vs_prio = 1; 2107 gs_prio = 2; 2108 es_prio = 3; 2109 switch (family) { 2110 case CHIP_R600: 2111 num_ps_gprs = 192; 2112 num_vs_gprs = 56; 2113 num_temp_gprs = 4; 2114 num_gs_gprs = 0; 2115 num_es_gprs = 0; 2116 num_ps_threads = 136; 2117 num_vs_threads = 48; 2118 num_gs_threads = 4; 2119 num_es_threads = 4; 2120 num_ps_stack_entries = 128; 2121 num_vs_stack_entries = 128; 2122 num_gs_stack_entries = 0; 2123 num_es_stack_entries = 0; 2124 break; 2125 case CHIP_RV630: 2126 case CHIP_RV635: 2127 num_ps_gprs = 84; 2128 num_vs_gprs = 36; 2129 num_temp_gprs = 4; 2130 num_gs_gprs = 0; 2131 num_es_gprs = 0; 2132 num_ps_threads = 144; 2133 num_vs_threads = 40; 2134 num_gs_threads = 4; 2135 num_es_threads = 4; 2136 num_ps_stack_entries = 40; 2137 num_vs_stack_entries = 40; 2138 num_gs_stack_entries = 32; 2139 num_es_stack_entries = 16; 2140 break; 2141 case CHIP_RV610: 2142 case CHIP_RV620: 2143 case CHIP_RS780: 2144 case CHIP_RS880: 2145 default: 2146 num_ps_gprs = 84; 2147 num_vs_gprs = 36; 2148 num_temp_gprs = 4; 2149 num_gs_gprs = 0; 2150 num_es_gprs = 0; 2151 num_ps_threads = 136; 2152 num_vs_threads = 48; 2153 num_gs_threads = 4; 2154 num_es_threads = 4; 2155 num_ps_stack_entries = 40; 2156 num_vs_stack_entries = 40; 2157 num_gs_stack_entries = 32; 2158 num_es_stack_entries = 16; 2159 break; 2160 case CHIP_RV670: 2161 num_ps_gprs = 144; 2162 num_vs_gprs = 40; 2163 num_temp_gprs = 4; 2164 num_gs_gprs = 0; 2165 num_es_gprs = 0; 2166 num_ps_threads = 136; 2167 num_vs_threads = 48; 2168 num_gs_threads = 4; 2169 num_es_threads = 4; 2170 num_ps_stack_entries = 40; 2171 num_vs_stack_entries = 40; 2172 num_gs_stack_entries = 32; 2173 num_es_stack_entries = 16; 2174 break; 2175 case CHIP_RV770: 2176 num_ps_gprs = 130; 2177 num_vs_gprs = 56; 2178 num_temp_gprs = 4; 2179 num_gs_gprs = 31; 2180 num_es_gprs = 31; 2181 num_ps_threads = 180; 2182 num_vs_threads = 60; 2183 num_gs_threads = 4; 2184 num_es_threads = 4; 2185 num_ps_stack_entries = 128; 2186 num_vs_stack_entries = 128; 2187 num_gs_stack_entries = 128; 2188 num_es_stack_entries = 128; 2189 break; 2190 case CHIP_RV730: 2191 case CHIP_RV740: 2192 num_ps_gprs = 84; 2193 num_vs_gprs = 36; 2194 num_temp_gprs = 4; 2195 num_gs_gprs = 0; 2196 num_es_gprs = 0; 2197 num_ps_threads = 180; 2198 num_vs_threads = 60; 2199 num_gs_threads = 4; 2200 num_es_threads = 4; 2201 num_ps_stack_entries = 128; 2202 num_vs_stack_entries = 128; 2203 num_gs_stack_entries = 0; 2204 num_es_stack_entries = 0; 2205 break; 2206 case CHIP_RV710: 2207 num_ps_gprs = 192; 2208 num_vs_gprs = 56; 2209 num_temp_gprs = 4; 2210 num_gs_gprs = 0; 2211 num_es_gprs = 0; 2212 num_ps_threads = 136; 2213 num_vs_threads = 48; 2214 num_gs_threads = 4; 2215 num_es_threads = 4; 2216 num_ps_stack_entries = 128; 2217 num_vs_stack_entries = 128; 2218 num_gs_stack_entries = 0; 2219 num_es_stack_entries = 0; 2220 break; 2221 } 2222 2223 rctx->default_ps_gprs = num_ps_gprs; 2224 rctx->default_vs_gprs = num_vs_gprs; 2225 rctx->r6xx_num_clause_temp_gprs = num_temp_gprs; 2226 2227 /* SQ_CONFIG */ 2228 tmp = 0; 2229 switch (family) { 2230 case CHIP_RV610: 2231 case CHIP_RV620: 2232 case CHIP_RS780: 2233 case CHIP_RS880: 2234 case CHIP_RV710: 2235 break; 2236 default: 2237 tmp |= S_008C00_VC_ENABLE(1); 2238 break; 2239 } 2240 tmp |= S_008C00_DX9_CONSTS(0); 2241 tmp |= S_008C00_ALU_INST_PREFER_VECTOR(1); 2242 tmp |= S_008C00_PS_PRIO(ps_prio); 2243 tmp |= S_008C00_VS_PRIO(vs_prio); 2244 tmp |= S_008C00_GS_PRIO(gs_prio); 2245 tmp |= S_008C00_ES_PRIO(es_prio); 2246 r600_store_config_reg(cb, R_008C00_SQ_CONFIG, tmp); 2247 2248 /* SQ_GPR_RESOURCE_MGMT_2 */ 2249 tmp = S_008C08_NUM_GS_GPRS(num_gs_gprs); 2250 tmp |= S_008C08_NUM_ES_GPRS(num_es_gprs); 2251 r600_store_config_reg_seq(cb, R_008C08_SQ_GPR_RESOURCE_MGMT_2, 4); 2252 r600_store_value(cb, tmp); 2253 2254 /* SQ_THREAD_RESOURCE_MGMT */ 2255 tmp = S_008C0C_NUM_PS_THREADS(num_ps_threads); 2256 tmp |= S_008C0C_NUM_VS_THREADS(num_vs_threads); 2257 tmp |= S_008C0C_NUM_GS_THREADS(num_gs_threads); 2258 tmp |= S_008C0C_NUM_ES_THREADS(num_es_threads); 2259 r600_store_value(cb, tmp); /* R_008C0C_SQ_THREAD_RESOURCE_MGMT */ 2260 2261 /* SQ_STACK_RESOURCE_MGMT_1 */ 2262 tmp = S_008C10_NUM_PS_STACK_ENTRIES(num_ps_stack_entries); 2263 tmp |= S_008C10_NUM_VS_STACK_ENTRIES(num_vs_stack_entries); 2264 r600_store_value(cb, tmp); /* R_008C10_SQ_STACK_RESOURCE_MGMT_1 */ 2265 2266 /* SQ_STACK_RESOURCE_MGMT_2 */ 2267 tmp = S_008C14_NUM_GS_STACK_ENTRIES(num_gs_stack_entries); 2268 tmp |= S_008C14_NUM_ES_STACK_ENTRIES(num_es_stack_entries); 2269 r600_store_value(cb, tmp); /* R_008C14_SQ_STACK_RESOURCE_MGMT_2 */ 2270 2271 r600_store_config_reg(cb, R_009714_VC_ENHANCE, 0); 2272 2273 if (rctx->b.chip_class >= R700) { 2274 r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0x00004000); 2275 r600_store_config_reg(cb, R_009830_DB_DEBUG, 0); 2276 r600_store_config_reg(cb, R_009838_DB_WATERMARKS, 0x00420204); 2277 r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 0); 2278 } else { 2279 r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0); 2280 r600_store_config_reg(cb, R_009830_DB_DEBUG, 0x82000000); 2281 r600_store_config_reg(cb, R_009838_DB_WATERMARKS, 0x01020204); 2282 r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 1); 2283 } 2284 r600_store_context_reg_seq(cb, R_0288A8_SQ_ESGS_RING_ITEMSIZE, 9); 2285 r600_store_value(cb, 0); /* R_0288A8_SQ_ESGS_RING_ITEMSIZE */ 2286 r600_store_value(cb, 0); /* R_0288AC_SQ_GSVS_RING_ITEMSIZE */ 2287 r600_store_value(cb, 0); /* R_0288B0_SQ_ESTMP_RING_ITEMSIZE */ 2288 r600_store_value(cb, 0); /* R_0288B4_SQ_GSTMP_RING_ITEMSIZE */ 2289 r600_store_value(cb, 0); /* R_0288B8_SQ_VSTMP_RING_ITEMSIZE */ 2290 r600_store_value(cb, 0); /* R_0288BC_SQ_PSTMP_RING_ITEMSIZE */ 2291 r600_store_value(cb, 0); /* R_0288C0_SQ_FBUF_RING_ITEMSIZE */ 2292 r600_store_value(cb, 0); /* R_0288C4_SQ_REDUC_RING_ITEMSIZE */ 2293 r600_store_value(cb, 0); /* R_0288C8_SQ_GS_VERT_ITEMSIZE */ 2294 2295 /* to avoid GPU doing any preloading of constant from random address */ 2296 r600_store_context_reg_seq(cb, R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 16); 2297 for (i = 0; i < 16; i++) 2298 r600_store_value(cb, 0); 2299 2300 r600_store_context_reg_seq(cb, R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 16); 2301 for (i = 0; i < 16; i++) 2302 r600_store_value(cb, 0); 2303 2304 r600_store_context_reg_seq(cb, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 16); 2305 for (i = 0; i < 16; i++) 2306 r600_store_value(cb, 0); 2307 2308 r600_store_context_reg_seq(cb, R_028A10_VGT_OUTPUT_PATH_CNTL, 13); 2309 r600_store_value(cb, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */ 2310 r600_store_value(cb, 0); /* R_028A14_VGT_HOS_CNTL */ 2311 r600_store_value(cb, 0); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */ 2312 r600_store_value(cb, 0); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */ 2313 r600_store_value(cb, 0); /* R_028A20_VGT_HOS_REUSE_DEPTH */ 2314 r600_store_value(cb, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */ 2315 r600_store_value(cb, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */ 2316 r600_store_value(cb, 0); /* R_028A2C_VGT_GROUP_DECR */ 2317 r600_store_value(cb, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */ 2318 r600_store_value(cb, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */ 2319 r600_store_value(cb, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */ 2320 r600_store_value(cb, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */ 2321 r600_store_value(cb, 0); /* R_028A40_VGT_GS_MODE, 0); */ 2322 2323 r600_store_context_reg(cb, R_028A84_VGT_PRIMITIVEID_EN, 0); 2324 r600_store_context_reg(cb, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 0); 2325 r600_store_context_reg(cb, R_028AA4_VGT_INSTANCE_STEP_RATE_1, 0); 2326 2327 r600_store_context_reg_seq(cb, R_028AB4_VGT_REUSE_OFF, 2); 2328 r600_store_value(cb, 1); /* R_028AB4_VGT_REUSE_OFF */ 2329 r600_store_value(cb, 0); /* R_028AB8_VGT_VTX_CNT_EN */ 2330 2331 r600_store_context_reg(cb, R_028B20_VGT_STRMOUT_BUFFER_EN, 0); 2332 2333 r600_store_ctl_const(cb, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0); 2334 2335 r600_store_context_reg(cb, R_028028_DB_STENCIL_CLEAR, 0); 2336 2337 r600_store_context_reg_seq(cb, R_0286DC_SPI_FOG_CNTL, 3); 2338 r600_store_value(cb, 0); /* R_0286DC_SPI_FOG_CNTL */ 2339 r600_store_value(cb, 0); /* R_0286E0_SPI_FOG_FUNC_SCALE */ 2340 r600_store_value(cb, 0); /* R_0286E4_SPI_FOG_FUNC_BIAS */ 2341 2342 r600_store_context_reg_seq(cb, R_028D28_DB_SRESULTS_COMPARE_STATE0, 3); 2343 r600_store_value(cb, 0); /* R_028D28_DB_SRESULTS_COMPARE_STATE0 */ 2344 r600_store_value(cb, 0); /* R_028D2C_DB_SRESULTS_COMPARE_STATE1 */ 2345 r600_store_value(cb, 0); /* R_028D30_DB_PRELOAD_CONTROL */ 2346 2347 r600_store_context_reg(cb, R_028820_PA_CL_NANINF_CNTL, 0); 2348 r600_store_context_reg(cb, R_028A48_PA_SC_MPASS_PS_CNTL, 0); 2349 2350 r600_store_context_reg_seq(cb, R_028C0C_PA_CL_GB_VERT_CLIP_ADJ, 4); 2351 r600_store_value(cb, 0x3F800000); /* R_028C0C_PA_CL_GB_VERT_CLIP_ADJ */ 2352 r600_store_value(cb, 0x3F800000); /* R_028C10_PA_CL_GB_VERT_DISC_ADJ */ 2353 r600_store_value(cb, 0x3F800000); /* R_028C14_PA_CL_GB_HORZ_CLIP_ADJ */ 2354 r600_store_value(cb, 0x3F800000); /* R_028C18_PA_CL_GB_HORZ_DISC_ADJ */ 2355 2356 r600_store_context_reg_seq(cb, R_0282D0_PA_SC_VPORT_ZMIN_0, 2 * 16); 2357 for (tmp = 0; tmp < 16; tmp++) { 2358 r600_store_value(cb, 0); /* R_0282D0_PA_SC_VPORT_ZMIN_0 */ 2359 r600_store_value(cb, 0x3F800000); /* R_0282D4_PA_SC_VPORT_ZMAX_0 */ 2360 } 2361 2362 r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0); 2363 r600_store_context_reg(cb, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF); 2364 2365 if (rctx->b.chip_class >= R700) { 2366 r600_store_context_reg(cb, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA); 2367 } 2368 2369 r600_store_context_reg_seq(cb, R_028C30_CB_CLRCMP_CONTROL, 4); 2370 r600_store_value(cb, 0x1000000); /* R_028C30_CB_CLRCMP_CONTROL */ 2371 r600_store_value(cb, 0); /* R_028C34_CB_CLRCMP_SRC */ 2372 r600_store_value(cb, 0xFF); /* R_028C38_CB_CLRCMP_DST */ 2373 r600_store_value(cb, 0xFFFFFFFF); /* R_028C3C_CB_CLRCMP_MSK */ 2374 2375 r600_store_context_reg_seq(cb, R_028030_PA_SC_SCREEN_SCISSOR_TL, 2); 2376 r600_store_value(cb, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */ 2377 r600_store_value(cb, S_028034_BR_X(8192) | S_028034_BR_Y(8192)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */ 2378 2379 r600_store_context_reg_seq(cb, R_028240_PA_SC_GENERIC_SCISSOR_TL, 2); 2380 r600_store_value(cb, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */ 2381 r600_store_value(cb, S_028244_BR_X(8192) | S_028244_BR_Y(8192)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */ 2382 2383 r600_store_context_reg_seq(cb, R_0288CC_SQ_PGM_CF_OFFSET_PS, 5); 2384 r600_store_value(cb, 0); /* R_0288CC_SQ_PGM_CF_OFFSET_PS */ 2385 r600_store_value(cb, 0); /* R_0288D0_SQ_PGM_CF_OFFSET_VS */ 2386 r600_store_value(cb, 0); /* R_0288D4_SQ_PGM_CF_OFFSET_GS */ 2387 r600_store_value(cb, 0); /* R_0288D8_SQ_PGM_CF_OFFSET_ES */ 2388 r600_store_value(cb, 0); /* R_0288DC_SQ_PGM_CF_OFFSET_FS */ 2389 2390 r600_store_context_reg(cb, R_0288E0_SQ_VTX_SEMANTIC_CLEAR, ~0); 2391 2392 r600_store_context_reg_seq(cb, R_028400_VGT_MAX_VTX_INDX, 2); 2393 r600_store_value(cb, ~0); /* R_028400_VGT_MAX_VTX_INDX */ 2394 r600_store_value(cb, 0); /* R_028404_VGT_MIN_VTX_INDX */ 2395 2396 r600_store_context_reg(cb, R_0288A4_SQ_PGM_RESOURCES_FS, 0); 2397 2398 if (rctx->b.chip_class == R700) 2399 r600_store_context_reg(cb, R_028350_SX_MISC, 0); 2400 if (rctx->b.chip_class == R700 && rctx->screen->b.has_streamout) 2401 r600_store_context_reg(cb, R_028354_SX_SURFACE_SYNC, S_028354_SURFACE_SYNC_MASK(0xf)); 2402 2403 r600_store_context_reg(cb, R_028800_DB_DEPTH_CONTROL, 0); 2404 if (rctx->screen->b.has_streamout) { 2405 r600_store_context_reg(cb, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0); 2406 } 2407 2408 r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0, 0x1000FFF); 2409 r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0 + (32 * 4), 0x1000FFF); 2410 r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0 + (64 * 4), 0x1000FFF); 2411} 2412 2413void r600_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 2414{ 2415 struct r600_context *rctx = (struct r600_context *)ctx; 2416 struct r600_command_buffer *cb = &shader->command_buffer; 2417 struct r600_shader *rshader = &shader->shader; 2418 unsigned i, exports_ps, num_cout, spi_ps_in_control_0, spi_input_z, spi_ps_in_control_1, db_shader_control; 2419 int pos_index = -1, face_index = -1; 2420 unsigned tmp, sid, ufi = 0; 2421 int need_linear = 0; 2422 unsigned z_export = 0, stencil_export = 0; 2423 unsigned sprite_coord_enable = rctx->rasterizer ? rctx->rasterizer->sprite_coord_enable : 0; 2424 2425 if (!cb->buf) { 2426 r600_init_command_buffer(cb, 64); 2427 } else { 2428 cb->num_dw = 0; 2429 } 2430 2431 r600_store_context_reg_seq(cb, R_028644_SPI_PS_INPUT_CNTL_0, rshader->ninput); 2432 for (i = 0; i < rshader->ninput; i++) { 2433 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION) 2434 pos_index = i; 2435 if (rshader->input[i].name == TGSI_SEMANTIC_FACE) 2436 face_index = i; 2437 2438 sid = rshader->input[i].spi_sid; 2439 2440 tmp = S_028644_SEMANTIC(sid); 2441 2442 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION || 2443 rshader->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT || 2444 (rshader->input[i].interpolate == TGSI_INTERPOLATE_COLOR && 2445 rctx->rasterizer && rctx->rasterizer->flatshade)) 2446 tmp |= S_028644_FLAT_SHADE(1); 2447 2448 if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC && 2449 sprite_coord_enable & (1 << rshader->input[i].sid)) { 2450 tmp |= S_028644_PT_SPRITE_TEX(1); 2451 } 2452 2453 if (rshader->input[i].centroid) 2454 tmp |= S_028644_SEL_CENTROID(1); 2455 2456 if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR) { 2457 need_linear = 1; 2458 tmp |= S_028644_SEL_LINEAR(1); 2459 } 2460 2461 r600_store_value(cb, tmp); 2462 } 2463 2464 db_shader_control = 0; 2465 for (i = 0; i < rshader->noutput; i++) { 2466 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION) 2467 z_export = 1; 2468 if (rshader->output[i].name == TGSI_SEMANTIC_STENCIL) 2469 stencil_export = 1; 2470 } 2471 db_shader_control |= S_02880C_Z_EXPORT_ENABLE(z_export); 2472 db_shader_control |= S_02880C_STENCIL_REF_EXPORT_ENABLE(stencil_export); 2473 if (rshader->uses_kill) 2474 db_shader_control |= S_02880C_KILL_ENABLE(1); 2475 2476 exports_ps = 0; 2477 for (i = 0; i < rshader->noutput; i++) { 2478 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION || 2479 rshader->output[i].name == TGSI_SEMANTIC_STENCIL) { 2480 exports_ps |= 1; 2481 } 2482 } 2483 num_cout = rshader->nr_ps_color_exports; 2484 exports_ps |= S_028854_EXPORT_COLORS(num_cout); 2485 if (!exports_ps) { 2486 /* always at least export 1 component per pixel */ 2487 exports_ps = 2; 2488 } 2489 2490 shader->nr_ps_color_outputs = num_cout; 2491 2492 spi_ps_in_control_0 = S_0286CC_NUM_INTERP(rshader->ninput) | 2493 S_0286CC_PERSP_GRADIENT_ENA(1)| 2494 S_0286CC_LINEAR_GRADIENT_ENA(need_linear); 2495 spi_input_z = 0; 2496 if (pos_index != -1) { 2497 spi_ps_in_control_0 |= (S_0286CC_POSITION_ENA(1) | 2498 S_0286CC_POSITION_CENTROID(rshader->input[pos_index].centroid) | 2499 S_0286CC_POSITION_ADDR(rshader->input[pos_index].gpr) | 2500 S_0286CC_BARYC_SAMPLE_CNTL(1)); 2501 spi_input_z |= S_0286D8_PROVIDE_Z_TO_SPI(1); 2502 } 2503 2504 spi_ps_in_control_1 = 0; 2505 if (face_index != -1) { 2506 spi_ps_in_control_1 |= S_0286D0_FRONT_FACE_ENA(1) | 2507 S_0286D0_FRONT_FACE_ADDR(rshader->input[face_index].gpr); 2508 } 2509 2510 /* HW bug in original R600 */ 2511 if (rctx->b.family == CHIP_R600) 2512 ufi = 1; 2513 2514 r600_store_context_reg_seq(cb, R_0286CC_SPI_PS_IN_CONTROL_0, 2); 2515 r600_store_value(cb, spi_ps_in_control_0); /* R_0286CC_SPI_PS_IN_CONTROL_0 */ 2516 r600_store_value(cb, spi_ps_in_control_1); /* R_0286D0_SPI_PS_IN_CONTROL_1 */ 2517 2518 r600_store_context_reg(cb, R_0286D8_SPI_INPUT_Z, spi_input_z); 2519 2520 r600_store_context_reg_seq(cb, R_028850_SQ_PGM_RESOURCES_PS, 2); 2521 r600_store_value(cb, /* R_028850_SQ_PGM_RESOURCES_PS*/ 2522 S_028850_NUM_GPRS(rshader->bc.ngpr) | 2523 S_028850_STACK_SIZE(rshader->bc.nstack) | 2524 S_028850_UNCACHED_FIRST_INST(ufi)); 2525 r600_store_value(cb, exports_ps); /* R_028854_SQ_PGM_EXPORTS_PS */ 2526 2527 r600_store_context_reg(cb, R_028840_SQ_PGM_START_PS, 0); 2528 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 2529 2530 /* only set some bits here, the other bits are set in the dsa state */ 2531 shader->db_shader_control = db_shader_control; 2532 shader->ps_depth_export = z_export | stencil_export; 2533 2534 shader->sprite_coord_enable = sprite_coord_enable; 2535 if (rctx->rasterizer) 2536 shader->flatshade = rctx->rasterizer->flatshade; 2537} 2538 2539void r600_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 2540{ 2541 struct r600_command_buffer *cb = &shader->command_buffer; 2542 struct r600_shader *rshader = &shader->shader; 2543 unsigned spi_vs_out_id[10] = {}; 2544 unsigned i, tmp, nparams = 0; 2545 2546 for (i = 0; i < rshader->noutput; i++) { 2547 if (rshader->output[i].spi_sid) { 2548 tmp = rshader->output[i].spi_sid << ((nparams & 3) * 8); 2549 spi_vs_out_id[nparams / 4] |= tmp; 2550 nparams++; 2551 } 2552 } 2553 2554 r600_init_command_buffer(cb, 32); 2555 2556 r600_store_context_reg_seq(cb, R_028614_SPI_VS_OUT_ID_0, 10); 2557 for (i = 0; i < 10; i++) { 2558 r600_store_value(cb, spi_vs_out_id[i]); 2559 } 2560 2561 /* Certain attributes (position, psize, etc.) don't count as params. 2562 * VS is required to export at least one param and r600_shader_from_tgsi() 2563 * takes care of adding a dummy export. 2564 */ 2565 if (nparams < 1) 2566 nparams = 1; 2567 2568 r600_store_context_reg(cb, R_0286C4_SPI_VS_OUT_CONFIG, 2569 S_0286C4_VS_EXPORT_COUNT(nparams - 1)); 2570 r600_store_context_reg(cb, R_028868_SQ_PGM_RESOURCES_VS, 2571 S_028868_NUM_GPRS(rshader->bc.ngpr) | 2572 S_028868_STACK_SIZE(rshader->bc.nstack)); 2573 if (rshader->vs_position_window_space) { 2574 r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL, 2575 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1)); 2576 } else { 2577 r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL, 2578 S_028818_VTX_W0_FMT(1) | 2579 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | 2580 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | 2581 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1)); 2582 2583 } 2584 r600_store_context_reg(cb, R_028858_SQ_PGM_START_VS, 0); 2585 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 2586 2587 shader->pa_cl_vs_out_cntl = 2588 S_02881C_VS_OUT_CCDIST0_VEC_ENA((rshader->clip_dist_write & 0x0F) != 0) | 2589 S_02881C_VS_OUT_CCDIST1_VEC_ENA((rshader->clip_dist_write & 0xF0) != 0) | 2590 S_02881C_VS_OUT_MISC_VEC_ENA(rshader->vs_out_misc_write) | 2591 S_02881C_USE_VTX_POINT_SIZE(rshader->vs_out_point_size) | 2592 S_02881C_USE_VTX_EDGE_FLAG(rshader->vs_out_edgeflag) | 2593 S_02881C_USE_VTX_RENDER_TARGET_INDX(rshader->vs_out_layer) | 2594 S_02881C_USE_VTX_VIEWPORT_INDX(rshader->vs_out_viewport); 2595} 2596 2597void r600_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 2598{ 2599 struct r600_context *rctx = (struct r600_context *)ctx; 2600 struct r600_command_buffer *cb = &shader->command_buffer; 2601 struct r600_shader *rshader = &shader->shader; 2602 struct r600_shader *cp_shader = &shader->gs_copy_shader->shader; 2603 unsigned gsvs_itemsize = 2604 (cp_shader->ring_item_size * rshader->gs_max_out_vertices) >> 2; 2605 2606 r600_init_command_buffer(cb, 64); 2607 2608 /* VGT_GS_MODE is written by r600_emit_shader_stages */ 2609 r600_store_context_reg(cb, R_028AB8_VGT_VTX_CNT_EN, 1); 2610 2611 if (rctx->b.chip_class >= R700) { 2612 r600_store_context_reg(cb, R_028B38_VGT_GS_MAX_VERT_OUT, 2613 S_028B38_MAX_VERT_OUT(rshader->gs_max_out_vertices)); 2614 } 2615 r600_store_context_reg(cb, R_028A6C_VGT_GS_OUT_PRIM_TYPE, 2616 r600_conv_prim_to_gs_out(rshader->gs_output_prim)); 2617 2618 r600_store_context_reg_seq(cb, R_0288C8_SQ_GS_VERT_ITEMSIZE, 4); 2619 r600_store_value(cb, cp_shader->ring_item_size >> 2); 2620 r600_store_value(cb, 0); 2621 r600_store_value(cb, 0); 2622 r600_store_value(cb, 0); 2623 2624 r600_store_context_reg(cb, R_0288A8_SQ_ESGS_RING_ITEMSIZE, 2625 (rshader->ring_item_size) >> 2); 2626 2627 r600_store_context_reg(cb, R_0288AC_SQ_GSVS_RING_ITEMSIZE, 2628 gsvs_itemsize); 2629 2630 /* FIXME calculate these values somehow ??? */ 2631 r600_store_config_reg_seq(cb, R_0088C8_VGT_GS_PER_ES, 2); 2632 r600_store_value(cb, 0x80); /* GS_PER_ES */ 2633 r600_store_value(cb, 0x100); /* ES_PER_GS */ 2634 r600_store_config_reg_seq(cb, R_0088E8_VGT_GS_PER_VS, 1); 2635 r600_store_value(cb, 0x2); /* GS_PER_VS */ 2636 2637 r600_store_context_reg(cb, R_02887C_SQ_PGM_RESOURCES_GS, 2638 S_02887C_NUM_GPRS(rshader->bc.ngpr) | 2639 S_02887C_STACK_SIZE(rshader->bc.nstack)); 2640 r600_store_context_reg(cb, R_02886C_SQ_PGM_START_GS, 0); 2641 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 2642} 2643 2644void r600_update_es_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 2645{ 2646 struct r600_command_buffer *cb = &shader->command_buffer; 2647 struct r600_shader *rshader = &shader->shader; 2648 2649 r600_init_command_buffer(cb, 32); 2650 2651 r600_store_context_reg(cb, R_028890_SQ_PGM_RESOURCES_ES, 2652 S_028890_NUM_GPRS(rshader->bc.ngpr) | 2653 S_028890_STACK_SIZE(rshader->bc.nstack)); 2654 r600_store_context_reg(cb, R_028880_SQ_PGM_START_ES, 0); 2655 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 2656} 2657 2658 2659void *r600_create_resolve_blend(struct r600_context *rctx) 2660{ 2661 struct pipe_blend_state blend; 2662 unsigned i; 2663 2664 memset(&blend, 0, sizeof(blend)); 2665 blend.independent_blend_enable = true; 2666 for (i = 0; i < 2; i++) { 2667 blend.rt[i].colormask = 0xf; 2668 blend.rt[i].blend_enable = 1; 2669 blend.rt[i].rgb_func = PIPE_BLEND_ADD; 2670 blend.rt[i].alpha_func = PIPE_BLEND_ADD; 2671 blend.rt[i].rgb_src_factor = PIPE_BLENDFACTOR_ZERO; 2672 blend.rt[i].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO; 2673 blend.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ZERO; 2674 blend.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO; 2675 } 2676 return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_RESOLVE_BOX); 2677} 2678 2679void *r700_create_resolve_blend(struct r600_context *rctx) 2680{ 2681 struct pipe_blend_state blend; 2682 2683 memset(&blend, 0, sizeof(blend)); 2684 blend.independent_blend_enable = true; 2685 blend.rt[0].colormask = 0xf; 2686 return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_RESOLVE_BOX); 2687} 2688 2689void *r600_create_decompress_blend(struct r600_context *rctx) 2690{ 2691 struct pipe_blend_state blend; 2692 2693 memset(&blend, 0, sizeof(blend)); 2694 blend.independent_blend_enable = true; 2695 blend.rt[0].colormask = 0xf; 2696 return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_EXPAND_SAMPLES); 2697} 2698 2699void *r600_create_db_flush_dsa(struct r600_context *rctx) 2700{ 2701 struct pipe_depth_stencil_alpha_state dsa; 2702 boolean quirk = false; 2703 2704 if (rctx->b.family == CHIP_RV610 || rctx->b.family == CHIP_RV630 || 2705 rctx->b.family == CHIP_RV620 || rctx->b.family == CHIP_RV635) 2706 quirk = true; 2707 2708 memset(&dsa, 0, sizeof(dsa)); 2709 2710 if (quirk) { 2711 dsa.depth.enabled = 1; 2712 dsa.depth.func = PIPE_FUNC_LEQUAL; 2713 dsa.stencil[0].enabled = 1; 2714 dsa.stencil[0].func = PIPE_FUNC_ALWAYS; 2715 dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_KEEP; 2716 dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_INCR; 2717 dsa.stencil[0].writemask = 0xff; 2718 } 2719 2720 return rctx->b.b.create_depth_stencil_alpha_state(&rctx->b.b, &dsa); 2721} 2722 2723void r600_update_db_shader_control(struct r600_context * rctx) 2724{ 2725 bool dual_export; 2726 unsigned db_shader_control; 2727 2728 if (!rctx->ps_shader) { 2729 return; 2730 } 2731 2732 dual_export = rctx->framebuffer.export_16bpc && 2733 !rctx->ps_shader->current->ps_depth_export; 2734 2735 db_shader_control = rctx->ps_shader->current->db_shader_control | 2736 S_02880C_DUAL_EXPORT_ENABLE(dual_export); 2737 2738 /* When alpha test is enabled we can't trust the hw to make the proper 2739 * decision on the order in which ztest should be run related to fragment 2740 * shader execution. 2741 * 2742 * If alpha test is enabled perform z test after fragment. RE_Z (early 2743 * z test but no write to the zbuffer) seems to cause lockup on r6xx/r7xx 2744 */ 2745 if (rctx->alphatest_state.sx_alpha_test_control) { 2746 db_shader_control |= S_02880C_Z_ORDER(V_02880C_LATE_Z); 2747 } else { 2748 db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z); 2749 } 2750 2751 if (db_shader_control != rctx->db_misc_state.db_shader_control) { 2752 rctx->db_misc_state.db_shader_control = db_shader_control; 2753 rctx->db_misc_state.atom.dirty = true; 2754 } 2755} 2756 2757static INLINE unsigned r600_array_mode(unsigned mode) 2758{ 2759 switch (mode) { 2760 case RADEON_SURF_MODE_LINEAR_ALIGNED: return V_0280A0_ARRAY_LINEAR_ALIGNED; 2761 break; 2762 case RADEON_SURF_MODE_1D: return V_0280A0_ARRAY_1D_TILED_THIN1; 2763 break; 2764 case RADEON_SURF_MODE_2D: return V_0280A0_ARRAY_2D_TILED_THIN1; 2765 default: 2766 case RADEON_SURF_MODE_LINEAR: return V_0280A0_ARRAY_LINEAR_GENERAL; 2767 } 2768} 2769 2770static boolean r600_dma_copy_tile(struct r600_context *rctx, 2771 struct pipe_resource *dst, 2772 unsigned dst_level, 2773 unsigned dst_x, 2774 unsigned dst_y, 2775 unsigned dst_z, 2776 struct pipe_resource *src, 2777 unsigned src_level, 2778 unsigned src_x, 2779 unsigned src_y, 2780 unsigned src_z, 2781 unsigned copy_height, 2782 unsigned pitch, 2783 unsigned bpp) 2784{ 2785 struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs; 2786 struct r600_texture *rsrc = (struct r600_texture*)src; 2787 struct r600_texture *rdst = (struct r600_texture*)dst; 2788 unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size; 2789 unsigned ncopy, height, cheight, detile, i, x, y, z, src_mode, dst_mode; 2790 uint64_t base, addr; 2791 2792 dst_mode = rdst->surface.level[dst_level].mode; 2793 src_mode = rsrc->surface.level[src_level].mode; 2794 /* downcast linear aligned to linear to simplify test */ 2795 src_mode = src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : src_mode; 2796 dst_mode = dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : dst_mode; 2797 assert(dst_mode != src_mode); 2798 2799 y = 0; 2800 lbpp = util_logbase2(bpp); 2801 pitch_tile_max = ((pitch / bpp) / 8) - 1; 2802 2803 if (dst_mode == RADEON_SURF_MODE_LINEAR) { 2804 /* T2L */ 2805 array_mode = r600_array_mode(src_mode); 2806 slice_tile_max = (rsrc->surface.level[src_level].nblk_x * rsrc->surface.level[src_level].nblk_y) / (8*8); 2807 slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0; 2808 /* linear height must be the same as the slice tile max height, it's ok even 2809 * if the linear destination/source have smaller heigh as the size of the 2810 * dma packet will be using the copy_height which is always smaller or equal 2811 * to the linear height 2812 */ 2813 height = rsrc->surface.level[src_level].npix_y; 2814 detile = 1; 2815 x = src_x; 2816 y = src_y; 2817 z = src_z; 2818 base = rsrc->surface.level[src_level].offset; 2819 addr = rdst->surface.level[dst_level].offset; 2820 addr += rdst->surface.level[dst_level].slice_size * dst_z; 2821 addr += dst_y * pitch + dst_x * bpp; 2822 } else { 2823 /* L2T */ 2824 array_mode = r600_array_mode(dst_mode); 2825 slice_tile_max = (rdst->surface.level[dst_level].nblk_x * rdst->surface.level[dst_level].nblk_y) / (8*8); 2826 slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0; 2827 /* linear height must be the same as the slice tile max height, it's ok even 2828 * if the linear destination/source have smaller heigh as the size of the 2829 * dma packet will be using the copy_height which is always smaller or equal 2830 * to the linear height 2831 */ 2832 height = rdst->surface.level[dst_level].npix_y; 2833 detile = 0; 2834 x = dst_x; 2835 y = dst_y; 2836 z = dst_z; 2837 base = rdst->surface.level[dst_level].offset; 2838 addr = rsrc->surface.level[src_level].offset; 2839 addr += rsrc->surface.level[src_level].slice_size * src_z; 2840 addr += src_y * pitch + src_x * bpp; 2841 } 2842 /* check that we are in dw/base alignment constraint */ 2843 if (addr % 4 || base % 256) { 2844 return FALSE; 2845 } 2846 2847 /* It's a r6xx/r7xx limitation, the blit must be on 8 boundary for number 2848 * line in the blit. Compute max 8 line we can copy in the size limit 2849 */ 2850 cheight = ((R600_DMA_COPY_MAX_SIZE_DW * 4) / pitch) & 0xfffffff8; 2851 ncopy = (copy_height / cheight) + !!(copy_height % cheight); 2852 r600_need_dma_space(&rctx->b, ncopy * 7); 2853 2854 for (i = 0; i < ncopy; i++) { 2855 cheight = cheight > copy_height ? copy_height : cheight; 2856 size = (cheight * pitch) / 4; 2857 /* emit reloc before writting cs so that cs is always in consistent state */ 2858 r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, &rsrc->resource, RADEON_USAGE_READ, 2859 RADEON_PRIO_MIN); 2860 r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, &rdst->resource, RADEON_USAGE_WRITE, 2861 RADEON_PRIO_MIN); 2862 cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, 1, 0, size); 2863 cs->buf[cs->cdw++] = base >> 8; 2864 cs->buf[cs->cdw++] = (detile << 31) | (array_mode << 27) | 2865 (lbpp << 24) | ((height - 1) << 10) | 2866 pitch_tile_max; 2867 cs->buf[cs->cdw++] = (slice_tile_max << 12) | (z << 0); 2868 cs->buf[cs->cdw++] = (x << 3) | (y << 17); 2869 cs->buf[cs->cdw++] = addr & 0xfffffffc; 2870 cs->buf[cs->cdw++] = (addr >> 32UL) & 0xff; 2871 copy_height -= cheight; 2872 addr += cheight * pitch; 2873 y += cheight; 2874 } 2875 return TRUE; 2876} 2877 2878static void r600_dma_copy(struct pipe_context *ctx, 2879 struct pipe_resource *dst, 2880 unsigned dst_level, 2881 unsigned dstx, unsigned dsty, unsigned dstz, 2882 struct pipe_resource *src, 2883 unsigned src_level, 2884 const struct pipe_box *src_box) 2885{ 2886 struct r600_context *rctx = (struct r600_context *)ctx; 2887 struct r600_texture *rsrc = (struct r600_texture*)src; 2888 struct r600_texture *rdst = (struct r600_texture*)dst; 2889 unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode, copy_height; 2890 unsigned src_w, dst_w; 2891 unsigned src_x, src_y; 2892 unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz; 2893 2894 if (rctx->b.rings.dma.cs == NULL) { 2895 goto fallback; 2896 } 2897 2898 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { 2899 if (dst_x % 4 || src_box->x % 4 || src_box->width % 4) 2900 goto fallback; 2901 2902 r600_dma_copy_buffer(rctx, dst, src, dst_x, src_box->x, src_box->width); 2903 return; 2904 } 2905 2906 if (src->format != dst->format || src_box->depth > 1) { 2907 goto fallback; 2908 } 2909 2910 src_x = util_format_get_nblocksx(src->format, src_box->x); 2911 dst_x = util_format_get_nblocksx(src->format, dst_x); 2912 src_y = util_format_get_nblocksy(src->format, src_box->y); 2913 dst_y = util_format_get_nblocksy(src->format, dst_y); 2914 2915 bpp = rdst->surface.bpe; 2916 dst_pitch = rdst->surface.level[dst_level].pitch_bytes; 2917 src_pitch = rsrc->surface.level[src_level].pitch_bytes; 2918 src_w = rsrc->surface.level[src_level].npix_x; 2919 dst_w = rdst->surface.level[dst_level].npix_x; 2920 copy_height = src_box->height / rsrc->surface.blk_h; 2921 2922 dst_mode = rdst->surface.level[dst_level].mode; 2923 src_mode = rsrc->surface.level[src_level].mode; 2924 /* downcast linear aligned to linear to simplify test */ 2925 src_mode = src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : src_mode; 2926 dst_mode = dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : dst_mode; 2927 2928 if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w) { 2929 /* strict requirement on r6xx/r7xx */ 2930 goto fallback; 2931 } 2932 /* lot of constraint on alignment this should capture them all */ 2933 if (src_pitch % 8 || src_box->y % 8 || dst_y % 8) { 2934 goto fallback; 2935 } 2936 2937 if (src_mode == dst_mode) { 2938 uint64_t dst_offset, src_offset, size; 2939 2940 /* simple dma blit would do NOTE code here assume : 2941 * src_box.x/y == 0 2942 * dst_x/y == 0 2943 * dst_pitch == src_pitch 2944 */ 2945 src_offset= rsrc->surface.level[src_level].offset; 2946 src_offset += rsrc->surface.level[src_level].slice_size * src_box->z; 2947 src_offset += src_y * src_pitch + src_x * bpp; 2948 dst_offset = rdst->surface.level[dst_level].offset; 2949 dst_offset += rdst->surface.level[dst_level].slice_size * dst_z; 2950 dst_offset += dst_y * dst_pitch + dst_x * bpp; 2951 size = src_box->height * src_pitch; 2952 /* must be dw aligned */ 2953 if (dst_offset % 4 || src_offset % 4 || size % 4) { 2954 goto fallback; 2955 } 2956 r600_dma_copy_buffer(rctx, dst, src, dst_offset, src_offset, size); 2957 } else { 2958 if (!r600_dma_copy_tile(rctx, dst, dst_level, dst_x, dst_y, dst_z, 2959 src, src_level, src_x, src_y, src_box->z, 2960 copy_height, dst_pitch, bpp)) { 2961 goto fallback; 2962 } 2963 } 2964 return; 2965 2966fallback: 2967 ctx->resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, 2968 src, src_level, src_box); 2969} 2970 2971void r600_init_state_functions(struct r600_context *rctx) 2972{ 2973 unsigned id = 4; 2974 int i; 2975 2976 /* !!! 2977 * To avoid GPU lockup registers must be emited in a specific order 2978 * (no kidding ...). The order below is important and have been 2979 * partialy infered from analyzing fglrx command stream. 2980 * 2981 * Don't reorder atom without carefully checking the effect (GPU lockup 2982 * or piglit regression). 2983 * !!! 2984 */ 2985 2986 r600_init_atom(rctx, &rctx->framebuffer.atom, id++, r600_emit_framebuffer_state, 0); 2987 2988 /* shader const */ 2989 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX].atom, id++, r600_emit_vs_constant_buffers, 0); 2990 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY].atom, id++, r600_emit_gs_constant_buffers, 0); 2991 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT].atom, id++, r600_emit_ps_constant_buffers, 0); 2992 2993 /* sampler must be emited before TA_CNTL_AUX otherwise DISABLE_CUBE_WRAP change 2994 * does not take effect (TA_CNTL_AUX emited by r600_emit_seamless_cube_map) 2995 */ 2996 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].states.atom, id++, r600_emit_vs_sampler_states, 0); 2997 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].states.atom, id++, r600_emit_gs_sampler_states, 0); 2998 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].states.atom, id++, r600_emit_ps_sampler_states, 0); 2999 /* resource */ 3000 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views.atom, id++, r600_emit_vs_sampler_views, 0); 3001 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views.atom, id++, r600_emit_gs_sampler_views, 0); 3002 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views.atom, id++, r600_emit_ps_sampler_views, 0); 3003 r600_init_atom(rctx, &rctx->vertex_buffer_state.atom, id++, r600_emit_vertex_buffers, 0); 3004 3005 r600_init_atom(rctx, &rctx->vgt_state.atom, id++, r600_emit_vgt_state, 7); 3006 3007 r600_init_atom(rctx, &rctx->seamless_cube_map.atom, id++, r600_emit_seamless_cube_map, 3); 3008 r600_init_atom(rctx, &rctx->sample_mask.atom, id++, r600_emit_sample_mask, 3); 3009 rctx->sample_mask.sample_mask = ~0; 3010 3011 r600_init_atom(rctx, &rctx->alphatest_state.atom, id++, r600_emit_alphatest_state, 6); 3012 r600_init_atom(rctx, &rctx->blend_color.atom, id++, r600_emit_blend_color, 6); 3013 r600_init_atom(rctx, &rctx->blend_state.atom, id++, r600_emit_cso_state, 0); 3014 r600_init_atom(rctx, &rctx->cb_misc_state.atom, id++, r600_emit_cb_misc_state, 7); 3015 r600_init_atom(rctx, &rctx->clip_misc_state.atom, id++, r600_emit_clip_misc_state, 6); 3016 r600_init_atom(rctx, &rctx->clip_state.atom, id++, r600_emit_clip_state, 26); 3017 r600_init_atom(rctx, &rctx->db_misc_state.atom, id++, r600_emit_db_misc_state, 7); 3018 r600_init_atom(rctx, &rctx->db_state.atom, id++, r600_emit_db_state, 11); 3019 r600_init_atom(rctx, &rctx->dsa_state.atom, id++, r600_emit_cso_state, 0); 3020 r600_init_atom(rctx, &rctx->poly_offset_state.atom, id++, r600_emit_polygon_offset, 6); 3021 r600_init_atom(rctx, &rctx->rasterizer_state.atom, id++, r600_emit_cso_state, 0); 3022 for (i = 0;i < 16; i++) { 3023 r600_init_atom(rctx, &rctx->scissor[i].atom, id++, r600_emit_scissor_state, 4); 3024 r600_init_atom(rctx, &rctx->viewport[i].atom, id++, r600_emit_viewport_state, 8); 3025 rctx->scissor[i].idx = i; 3026 rctx->viewport[i].idx = i; 3027 } 3028 r600_init_atom(rctx, &rctx->config_state.atom, id++, r600_emit_config_state, 3); 3029 r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4); 3030 r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, r600_emit_vertex_fetch_shader, 5); 3031 rctx->atoms[id++] = &rctx->b.streamout.begin_atom; 3032 rctx->atoms[id++] = &rctx->b.streamout.enable_atom; 3033 r600_init_atom(rctx, &rctx->vertex_shader.atom, id++, r600_emit_shader, 23); 3034 r600_init_atom(rctx, &rctx->pixel_shader.atom, id++, r600_emit_shader, 0); 3035 r600_init_atom(rctx, &rctx->geometry_shader.atom, id++, r600_emit_shader, 0); 3036 r600_init_atom(rctx, &rctx->export_shader.atom, id++, r600_emit_shader, 0); 3037 r600_init_atom(rctx, &rctx->shader_stages.atom, id++, r600_emit_shader_stages, 0); 3038 r600_init_atom(rctx, &rctx->gs_rings.atom, id++, r600_emit_gs_rings, 0); 3039 3040 rctx->b.b.create_blend_state = r600_create_blend_state; 3041 rctx->b.b.create_depth_stencil_alpha_state = r600_create_dsa_state; 3042 rctx->b.b.create_rasterizer_state = r600_create_rs_state; 3043 rctx->b.b.create_sampler_state = r600_create_sampler_state; 3044 rctx->b.b.create_sampler_view = r600_create_sampler_view; 3045 rctx->b.b.set_framebuffer_state = r600_set_framebuffer_state; 3046 rctx->b.b.set_polygon_stipple = r600_set_polygon_stipple; 3047 rctx->b.b.set_scissor_states = r600_set_scissor_states; 3048 rctx->b.b.get_sample_position = r600_get_sample_position; 3049 rctx->b.dma_copy = r600_dma_copy; 3050} 3051/* this function must be last */ 3052