1/* 2 * Copyright 2010 Red Hat Inc. 3 * 2010 Jerome Glisse 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * on the rights to use, copy, modify, merge, publish, distribute, sub 9 * license, and/or sell copies of the Software, and to permit persons to whom 10 * the Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 22 * USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie <airlied@redhat.com> 25 * Jerome Glisse <jglisse@redhat.com> 26 */ 27#include "r600_formats.h" 28#include "r600_shader.h" 29#include "r600d.h" 30 31#include "util/u_format_s3tc.h" 32#include "util/u_index_modify.h" 33#include "util/u_memory.h" 34#include "util/u_upload_mgr.h" 35#include "util/u_math.h" 36#include "tgsi/tgsi_parse.h" 37#include "tgsi/tgsi_scan.h" 38#include "tgsi/tgsi_ureg.h" 39 40void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw) 41{ 42 assert(!cb->buf); 43 cb->buf = CALLOC(1, 4 * num_dw); 44 cb->max_num_dw = num_dw; 45} 46 47void r600_release_command_buffer(struct r600_command_buffer *cb) 48{ 49 FREE(cb->buf); 50} 51 52void r600_add_atom(struct r600_context *rctx, 53 struct r600_atom *atom, 54 unsigned id) 55{ 56 assert(id < R600_NUM_ATOMS); 57 assert(rctx->atoms[id] == NULL); 58 rctx->atoms[id] = atom; 59 atom->id = id; 60} 61 62void r600_init_atom(struct r600_context *rctx, 63 struct r600_atom *atom, 64 unsigned id, 65 void (*emit)(struct r600_context *ctx, struct r600_atom *state), 66 unsigned num_dw) 67{ 68 atom->emit = (void*)emit; 69 atom->num_dw = num_dw; 70 r600_add_atom(rctx, atom, id); 71} 72 73void r600_emit_cso_state(struct r600_context *rctx, struct r600_atom *atom) 74{ 75 r600_emit_command_buffer(rctx->b.gfx.cs, ((struct r600_cso_state*)atom)->cb); 76} 77 78void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom) 79{ 80 struct radeon_cmdbuf *cs = rctx->b.gfx.cs; 81 struct r600_alphatest_state *a = (struct r600_alphatest_state*)atom; 82 unsigned alpha_ref = a->sx_alpha_ref; 83 84 if (rctx->b.chip_class >= EVERGREEN && a->cb0_export_16bpc) { 85 alpha_ref &= ~0x1FFF; 86 } 87 88 radeon_set_context_reg(cs, R_028410_SX_ALPHA_TEST_CONTROL, 89 a->sx_alpha_test_control | 90 S_028410_ALPHA_TEST_BYPASS(a->bypass)); 91 radeon_set_context_reg(cs, R_028438_SX_ALPHA_REF, alpha_ref); 92} 93 94static void r600_memory_barrier(struct pipe_context *ctx, unsigned flags) 95{ 96 struct r600_context *rctx = (struct r600_context *)ctx; 97 98 if (!(flags & ~PIPE_BARRIER_UPDATE)) 99 return; 100 101 if (flags & PIPE_BARRIER_CONSTANT_BUFFER) 102 rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE; 103 104 if (flags & (PIPE_BARRIER_VERTEX_BUFFER | 105 PIPE_BARRIER_SHADER_BUFFER | 106 PIPE_BARRIER_TEXTURE | 107 PIPE_BARRIER_IMAGE | 108 PIPE_BARRIER_STREAMOUT_BUFFER | 109 PIPE_BARRIER_GLOBAL_BUFFER)) { 110 rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE| 111 R600_CONTEXT_INV_TEX_CACHE; 112 } 113 114 if (flags & (PIPE_BARRIER_FRAMEBUFFER| 115 PIPE_BARRIER_IMAGE)) 116 rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV; 117 118 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; 119} 120 121static void r600_texture_barrier(struct pipe_context *ctx, unsigned flags) 122{ 123 struct r600_context *rctx = (struct r600_context *)ctx; 124 125 rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | 126 R600_CONTEXT_FLUSH_AND_INV_CB | 127 R600_CONTEXT_FLUSH_AND_INV | 128 R600_CONTEXT_WAIT_3D_IDLE; 129 rctx->framebuffer.do_update_surf_dirtiness = true; 130} 131 132static unsigned r600_conv_pipe_prim(unsigned prim) 133{ 134 static const unsigned prim_conv[] = { 135 [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST, 136 [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST, 137 [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP, 138 [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP, 139 [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST, 140 [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP, 141 [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN, 142 [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST, 143 [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP, 144 [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON, 145 [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ, 146 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ, 147 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ, 148 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ, 149 [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH, 150 [R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST 151 }; 152 assert(prim < ARRAY_SIZE(prim_conv)); 153 return prim_conv[prim]; 154} 155 156unsigned r600_conv_prim_to_gs_out(unsigned mode) 157{ 158 static const int prim_conv[] = { 159 [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST, 160 [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, 161 [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, 162 [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, 163 [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 164 [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 165 [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 166 [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 167 [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 168 [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 169 [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, 170 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, 171 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 172 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 173 [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST, 174 [R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP 175 }; 176 assert(mode < ARRAY_SIZE(prim_conv)); 177 178 return prim_conv[mode]; 179} 180 181/* common state between evergreen and r600 */ 182 183static void r600_bind_blend_state_internal(struct r600_context *rctx, 184 struct r600_blend_state *blend, bool blend_disable) 185{ 186 unsigned color_control; 187 bool update_cb = false; 188 189 rctx->alpha_to_one = blend->alpha_to_one; 190 rctx->dual_src_blend = blend->dual_src_blend; 191 192 if (!blend_disable) { 193 r600_set_cso_state_with_cb(rctx, &rctx->blend_state, blend, &blend->buffer); 194 color_control = blend->cb_color_control; 195 } else { 196 /* Blending is disabled. */ 197 r600_set_cso_state_with_cb(rctx, &rctx->blend_state, blend, &blend->buffer_no_blend); 198 color_control = blend->cb_color_control_no_blend; 199 } 200 201 /* Update derived states. */ 202 if (rctx->cb_misc_state.blend_colormask != blend->cb_target_mask) { 203 rctx->cb_misc_state.blend_colormask = blend->cb_target_mask; 204 update_cb = true; 205 } 206 if (rctx->b.chip_class <= R700 && 207 rctx->cb_misc_state.cb_color_control != color_control) { 208 rctx->cb_misc_state.cb_color_control = color_control; 209 update_cb = true; 210 } 211 if (rctx->cb_misc_state.dual_src_blend != blend->dual_src_blend) { 212 rctx->cb_misc_state.dual_src_blend = blend->dual_src_blend; 213 update_cb = true; 214 } 215 if (update_cb) { 216 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom); 217 } 218 if (rctx->framebuffer.dual_src_blend != blend->dual_src_blend) { 219 rctx->framebuffer.dual_src_blend = blend->dual_src_blend; 220 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom); 221 } 222} 223 224static void r600_bind_blend_state(struct pipe_context *ctx, void *state) 225{ 226 struct r600_context *rctx = (struct r600_context *)ctx; 227 struct r600_blend_state *blend = (struct r600_blend_state *)state; 228 229 if (!blend) { 230 r600_set_cso_state_with_cb(rctx, &rctx->blend_state, NULL, NULL); 231 return; 232 } 233 234 r600_bind_blend_state_internal(rctx, blend, rctx->force_blend_disable); 235} 236 237static void r600_set_blend_color(struct pipe_context *ctx, 238 const struct pipe_blend_color *state) 239{ 240 struct r600_context *rctx = (struct r600_context *)ctx; 241 242 rctx->blend_color.state = *state; 243 r600_mark_atom_dirty(rctx, &rctx->blend_color.atom); 244} 245 246void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom) 247{ 248 struct radeon_cmdbuf *cs = rctx->b.gfx.cs; 249 struct pipe_blend_color *state = &rctx->blend_color.state; 250 251 radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4); 252 radeon_emit(cs, fui(state->color[0])); /* R_028414_CB_BLEND_RED */ 253 radeon_emit(cs, fui(state->color[1])); /* R_028418_CB_BLEND_GREEN */ 254 radeon_emit(cs, fui(state->color[2])); /* R_02841C_CB_BLEND_BLUE */ 255 radeon_emit(cs, fui(state->color[3])); /* R_028420_CB_BLEND_ALPHA */ 256} 257 258void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom) 259{ 260 struct radeon_cmdbuf *cs = rctx->b.gfx.cs; 261 struct r600_vgt_state *a = (struct r600_vgt_state *)atom; 262 263 radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en); 264 radeon_set_context_reg_seq(cs, R_028408_VGT_INDX_OFFSET, 2); 265 radeon_emit(cs, a->vgt_indx_offset); /* R_028408_VGT_INDX_OFFSET */ 266 radeon_emit(cs, a->vgt_multi_prim_ib_reset_indx); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */ 267 if (a->last_draw_was_indirect) { 268 a->last_draw_was_indirect = false; 269 radeon_set_ctl_const(cs, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0); 270 } 271} 272 273static void r600_set_clip_state(struct pipe_context *ctx, 274 const struct pipe_clip_state *state) 275{ 276 struct r600_context *rctx = (struct r600_context *)ctx; 277 278 rctx->clip_state.state = *state; 279 r600_mark_atom_dirty(rctx, &rctx->clip_state.atom); 280 rctx->driver_consts[PIPE_SHADER_VERTEX].vs_ucp_dirty = true; 281} 282 283static void r600_set_stencil_ref(struct pipe_context *ctx, 284 const struct r600_stencil_ref *state) 285{ 286 struct r600_context *rctx = (struct r600_context *)ctx; 287 288 rctx->stencil_ref.state = *state; 289 r600_mark_atom_dirty(rctx, &rctx->stencil_ref.atom); 290} 291 292void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom) 293{ 294 struct radeon_cmdbuf *cs = rctx->b.gfx.cs; 295 struct r600_stencil_ref_state *a = (struct r600_stencil_ref_state*)atom; 296 297 radeon_set_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2); 298 radeon_emit(cs, /* R_028430_DB_STENCILREFMASK */ 299 S_028430_STENCILREF(a->state.ref_value[0]) | 300 S_028430_STENCILMASK(a->state.valuemask[0]) | 301 S_028430_STENCILWRITEMASK(a->state.writemask[0])); 302 radeon_emit(cs, /* R_028434_DB_STENCILREFMASK_BF */ 303 S_028434_STENCILREF_BF(a->state.ref_value[1]) | 304 S_028434_STENCILMASK_BF(a->state.valuemask[1]) | 305 S_028434_STENCILWRITEMASK_BF(a->state.writemask[1])); 306} 307 308static void r600_set_pipe_stencil_ref(struct pipe_context *ctx, 309 const struct pipe_stencil_ref *state) 310{ 311 struct r600_context *rctx = (struct r600_context *)ctx; 312 struct r600_dsa_state *dsa = (struct r600_dsa_state*)rctx->dsa_state.cso; 313 struct r600_stencil_ref ref; 314 315 rctx->stencil_ref.pipe_state = *state; 316 317 if (!dsa) 318 return; 319 320 ref.ref_value[0] = state->ref_value[0]; 321 ref.ref_value[1] = state->ref_value[1]; 322 ref.valuemask[0] = dsa->valuemask[0]; 323 ref.valuemask[1] = dsa->valuemask[1]; 324 ref.writemask[0] = dsa->writemask[0]; 325 ref.writemask[1] = dsa->writemask[1]; 326 327 r600_set_stencil_ref(ctx, &ref); 328} 329 330static void r600_bind_dsa_state(struct pipe_context *ctx, void *state) 331{ 332 struct r600_context *rctx = (struct r600_context *)ctx; 333 struct r600_dsa_state *dsa = state; 334 struct r600_stencil_ref ref; 335 336 if (!state) { 337 r600_set_cso_state_with_cb(rctx, &rctx->dsa_state, NULL, NULL); 338 return; 339 } 340 341 r600_set_cso_state_with_cb(rctx, &rctx->dsa_state, dsa, &dsa->buffer); 342 343 ref.ref_value[0] = rctx->stencil_ref.pipe_state.ref_value[0]; 344 ref.ref_value[1] = rctx->stencil_ref.pipe_state.ref_value[1]; 345 ref.valuemask[0] = dsa->valuemask[0]; 346 ref.valuemask[1] = dsa->valuemask[1]; 347 ref.writemask[0] = dsa->writemask[0]; 348 ref.writemask[1] = dsa->writemask[1]; 349 if (rctx->zwritemask != dsa->zwritemask) { 350 rctx->zwritemask = dsa->zwritemask; 351 if (rctx->b.chip_class >= EVERGREEN) { 352 /* work around some issue when not writing to zbuffer 353 * we are having lockup on evergreen so do not enable 354 * hyperz when not writing zbuffer 355 */ 356 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 357 } 358 } 359 360 r600_set_stencil_ref(ctx, &ref); 361 362 /* Update alphatest state. */ 363 if (rctx->alphatest_state.sx_alpha_test_control != dsa->sx_alpha_test_control || 364 rctx->alphatest_state.sx_alpha_ref != dsa->alpha_ref) { 365 rctx->alphatest_state.sx_alpha_test_control = dsa->sx_alpha_test_control; 366 rctx->alphatest_state.sx_alpha_ref = dsa->alpha_ref; 367 r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom); 368 } 369} 370 371static void r600_bind_rs_state(struct pipe_context *ctx, void *state) 372{ 373 struct r600_rasterizer_state *rs = (struct r600_rasterizer_state *)state; 374 struct r600_context *rctx = (struct r600_context *)ctx; 375 376 if (!state) 377 return; 378 379 rctx->rasterizer = rs; 380 381 r600_set_cso_state_with_cb(rctx, &rctx->rasterizer_state, rs, &rs->buffer); 382 383 if (rs->offset_enable && 384 (rs->offset_units != rctx->poly_offset_state.offset_units || 385 rs->offset_scale != rctx->poly_offset_state.offset_scale || 386 rs->offset_units_unscaled != rctx->poly_offset_state.offset_units_unscaled)) { 387 rctx->poly_offset_state.offset_units = rs->offset_units; 388 rctx->poly_offset_state.offset_scale = rs->offset_scale; 389 rctx->poly_offset_state.offset_units_unscaled = rs->offset_units_unscaled; 390 r600_mark_atom_dirty(rctx, &rctx->poly_offset_state.atom); 391 } 392 393 /* Update clip_misc_state. */ 394 if (rctx->clip_misc_state.pa_cl_clip_cntl != rs->pa_cl_clip_cntl || 395 rctx->clip_misc_state.clip_plane_enable != rs->clip_plane_enable) { 396 rctx->clip_misc_state.pa_cl_clip_cntl = rs->pa_cl_clip_cntl; 397 rctx->clip_misc_state.clip_plane_enable = rs->clip_plane_enable; 398 r600_mark_atom_dirty(rctx, &rctx->clip_misc_state.atom); 399 } 400 401 r600_viewport_set_rast_deps(&rctx->b, rs->scissor_enable, rs->clip_halfz); 402 403 /* Re-emit PA_SC_LINE_STIPPLE. */ 404 rctx->last_primitive_type = -1; 405} 406 407static void r600_delete_rs_state(struct pipe_context *ctx, void *state) 408{ 409 struct r600_rasterizer_state *rs = (struct r600_rasterizer_state *)state; 410 411 r600_release_command_buffer(&rs->buffer); 412 FREE(rs); 413} 414 415static void r600_sampler_view_destroy(struct pipe_context *ctx, 416 struct pipe_sampler_view *state) 417{ 418 struct r600_pipe_sampler_view *view = (struct r600_pipe_sampler_view *)state; 419 420 if (view->tex_resource->gpu_address && 421 view->tex_resource->b.b.target == PIPE_BUFFER) 422 LIST_DELINIT(&view->list); 423 424 pipe_resource_reference(&state->texture, NULL); 425 FREE(view); 426} 427 428void r600_sampler_states_dirty(struct r600_context *rctx, 429 struct r600_sampler_states *state) 430{ 431 if (state->dirty_mask) { 432 if (state->dirty_mask & state->has_bordercolor_mask) { 433 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; 434 } 435 state->atom.num_dw = 436 util_bitcount(state->dirty_mask & state->has_bordercolor_mask) * 11 + 437 util_bitcount(state->dirty_mask & ~state->has_bordercolor_mask) * 5; 438 r600_mark_atom_dirty(rctx, &state->atom); 439 } 440} 441 442static void r600_bind_sampler_states(struct pipe_context *pipe, 443 enum pipe_shader_type shader, 444 unsigned start, 445 unsigned count, void **states) 446{ 447 struct r600_context *rctx = (struct r600_context *)pipe; 448 struct r600_textures_info *dst = &rctx->samplers[shader]; 449 struct r600_pipe_sampler_state **rstates = (struct r600_pipe_sampler_state**)states; 450 int seamless_cube_map = -1; 451 unsigned i; 452 /* This sets 1-bit for states with index >= count. */ 453 uint32_t disable_mask = ~((1ull << count) - 1); 454 /* These are the new states set by this function. */ 455 uint32_t new_mask = 0; 456 457 assert(start == 0); /* XXX fix below */ 458 459 if (!states) { 460 disable_mask = ~0u; 461 count = 0; 462 } 463 464 for (i = 0; i < count; i++) { 465 struct r600_pipe_sampler_state *rstate = rstates[i]; 466 467 if (rstate == dst->states.states[i]) { 468 continue; 469 } 470 471 if (rstate) { 472 if (rstate->border_color_use) { 473 dst->states.has_bordercolor_mask |= 1 << i; 474 } else { 475 dst->states.has_bordercolor_mask &= ~(1 << i); 476 } 477 seamless_cube_map = rstate->seamless_cube_map; 478 479 new_mask |= 1 << i; 480 } else { 481 disable_mask |= 1 << i; 482 } 483 } 484 485 memcpy(dst->states.states, rstates, sizeof(void*) * count); 486 memset(dst->states.states + count, 0, sizeof(void*) * (NUM_TEX_UNITS - count)); 487 488 dst->states.enabled_mask &= ~disable_mask; 489 dst->states.dirty_mask &= dst->states.enabled_mask; 490 dst->states.enabled_mask |= new_mask; 491 dst->states.dirty_mask |= new_mask; 492 dst->states.has_bordercolor_mask &= dst->states.enabled_mask; 493 494 r600_sampler_states_dirty(rctx, &dst->states); 495 496 /* Seamless cubemap state. */ 497 if (rctx->b.chip_class <= R700 && 498 seamless_cube_map != -1 && 499 seamless_cube_map != rctx->seamless_cube_map.enabled) { 500 /* change in TA_CNTL_AUX need a pipeline flush */ 501 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; 502 rctx->seamless_cube_map.enabled = seamless_cube_map; 503 r600_mark_atom_dirty(rctx, &rctx->seamless_cube_map.atom); 504 } 505} 506 507static void r600_delete_sampler_state(struct pipe_context *ctx, void *state) 508{ 509 free(state); 510} 511 512static void r600_delete_blend_state(struct pipe_context *ctx, void *state) 513{ 514 struct r600_context *rctx = (struct r600_context *)ctx; 515 struct r600_blend_state *blend = (struct r600_blend_state*)state; 516 517 if (rctx->blend_state.cso == state) { 518 ctx->bind_blend_state(ctx, NULL); 519 } 520 521 r600_release_command_buffer(&blend->buffer); 522 r600_release_command_buffer(&blend->buffer_no_blend); 523 FREE(blend); 524} 525 526static void r600_delete_dsa_state(struct pipe_context *ctx, void *state) 527{ 528 struct r600_context *rctx = (struct r600_context *)ctx; 529 struct r600_dsa_state *dsa = (struct r600_dsa_state *)state; 530 531 if (rctx->dsa_state.cso == state) { 532 ctx->bind_depth_stencil_alpha_state(ctx, NULL); 533 } 534 535 r600_release_command_buffer(&dsa->buffer); 536 free(dsa); 537} 538 539static void r600_bind_vertex_elements(struct pipe_context *ctx, void *state) 540{ 541 struct r600_context *rctx = (struct r600_context *)ctx; 542 543 r600_set_cso_state(rctx, &rctx->vertex_fetch_shader, state); 544} 545 546static void r600_delete_vertex_elements(struct pipe_context *ctx, void *state) 547{ 548 struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state; 549 r600_resource_reference(&shader->buffer, NULL); 550 FREE(shader); 551} 552 553void r600_vertex_buffers_dirty(struct r600_context *rctx) 554{ 555 if (rctx->vertex_buffer_state.dirty_mask) { 556 rctx->vertex_buffer_state.atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 12 : 11) * 557 util_bitcount(rctx->vertex_buffer_state.dirty_mask); 558 r600_mark_atom_dirty(rctx, &rctx->vertex_buffer_state.atom); 559 } 560} 561 562static void r600_set_vertex_buffers(struct pipe_context *ctx, 563 unsigned start_slot, unsigned count, 564 const struct pipe_vertex_buffer *input) 565{ 566 struct r600_context *rctx = (struct r600_context *)ctx; 567 struct r600_vertexbuf_state *state = &rctx->vertex_buffer_state; 568 struct pipe_vertex_buffer *vb = state->vb + start_slot; 569 unsigned i; 570 uint32_t disable_mask = 0; 571 /* These are the new buffers set by this function. */ 572 uint32_t new_buffer_mask = 0; 573 574 /* Set vertex buffers. */ 575 if (input) { 576 for (i = 0; i < count; i++) { 577 if ((input[i].buffer.resource != vb[i].buffer.resource) || 578 (vb[i].stride != input[i].stride) || 579 (vb[i].buffer_offset != input[i].buffer_offset) || 580 (vb[i].is_user_buffer != input[i].is_user_buffer)) { 581 if (input[i].buffer.resource) { 582 vb[i].stride = input[i].stride; 583 vb[i].buffer_offset = input[i].buffer_offset; 584 pipe_resource_reference(&vb[i].buffer.resource, input[i].buffer.resource); 585 new_buffer_mask |= 1 << i; 586 r600_context_add_resource_size(ctx, input[i].buffer.resource); 587 } else { 588 pipe_resource_reference(&vb[i].buffer.resource, NULL); 589 disable_mask |= 1 << i; 590 } 591 } 592 } 593 } else { 594 for (i = 0; i < count; i++) { 595 pipe_resource_reference(&vb[i].buffer.resource, NULL); 596 } 597 disable_mask = ((1ull << count) - 1); 598 } 599 600 disable_mask <<= start_slot; 601 new_buffer_mask <<= start_slot; 602 603 rctx->vertex_buffer_state.enabled_mask &= ~disable_mask; 604 rctx->vertex_buffer_state.dirty_mask &= rctx->vertex_buffer_state.enabled_mask; 605 rctx->vertex_buffer_state.enabled_mask |= new_buffer_mask; 606 rctx->vertex_buffer_state.dirty_mask |= new_buffer_mask; 607 608 r600_vertex_buffers_dirty(rctx); 609} 610 611void r600_sampler_views_dirty(struct r600_context *rctx, 612 struct r600_samplerview_state *state) 613{ 614 if (state->dirty_mask) { 615 state->atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 14 : 13) * 616 util_bitcount(state->dirty_mask); 617 r600_mark_atom_dirty(rctx, &state->atom); 618 } 619} 620 621static void r600_set_sampler_views(struct pipe_context *pipe, 622 enum pipe_shader_type shader, 623 unsigned start, unsigned count, 624 struct pipe_sampler_view **views) 625{ 626 struct r600_context *rctx = (struct r600_context *) pipe; 627 struct r600_textures_info *dst = &rctx->samplers[shader]; 628 struct r600_pipe_sampler_view **rviews = (struct r600_pipe_sampler_view **)views; 629 uint32_t dirty_sampler_states_mask = 0; 630 unsigned i; 631 /* This sets 1-bit for textures with index >= count. */ 632 uint32_t disable_mask = ~((1ull << count) - 1); 633 /* These are the new textures set by this function. */ 634 uint32_t new_mask = 0; 635 636 /* Set textures with index >= count to NULL. */ 637 uint32_t remaining_mask; 638 639 assert(start == 0); /* XXX fix below */ 640 641 if (!views) { 642 disable_mask = ~0u; 643 count = 0; 644 } 645 646 remaining_mask = dst->views.enabled_mask & disable_mask; 647 648 while (remaining_mask) { 649 i = u_bit_scan(&remaining_mask); 650 assert(dst->views.views[i]); 651 652 pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], NULL); 653 } 654 655 for (i = 0; i < count; i++) { 656 if (rviews[i] == dst->views.views[i]) { 657 continue; 658 } 659 660 if (rviews[i]) { 661 struct r600_texture *rtex = 662 (struct r600_texture*)rviews[i]->base.texture; 663 bool is_buffer = rviews[i]->base.texture->target == PIPE_BUFFER; 664 665 if (!is_buffer && rtex->db_compatible) { 666 dst->views.compressed_depthtex_mask |= 1 << i; 667 } else { 668 dst->views.compressed_depthtex_mask &= ~(1 << i); 669 } 670 671 /* Track compressed colorbuffers. */ 672 if (!is_buffer && rtex->cmask.size) { 673 dst->views.compressed_colortex_mask |= 1 << i; 674 } else { 675 dst->views.compressed_colortex_mask &= ~(1 << i); 676 } 677 678 /* Changing from array to non-arrays textures and vice versa requires 679 * updating TEX_ARRAY_OVERRIDE in sampler states on R6xx-R7xx. */ 680 if (rctx->b.chip_class <= R700 && 681 (dst->states.enabled_mask & (1 << i)) && 682 (rviews[i]->base.texture->target == PIPE_TEXTURE_1D_ARRAY || 683 rviews[i]->base.texture->target == PIPE_TEXTURE_2D_ARRAY) != dst->is_array_sampler[i]) { 684 dirty_sampler_states_mask |= 1 << i; 685 } 686 687 pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], views[i]); 688 new_mask |= 1 << i; 689 r600_context_add_resource_size(pipe, views[i]->texture); 690 } else { 691 pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], NULL); 692 disable_mask |= 1 << i; 693 } 694 } 695 696 dst->views.enabled_mask &= ~disable_mask; 697 dst->views.dirty_mask &= dst->views.enabled_mask; 698 dst->views.enabled_mask |= new_mask; 699 dst->views.dirty_mask |= new_mask; 700 dst->views.compressed_depthtex_mask &= dst->views.enabled_mask; 701 dst->views.compressed_colortex_mask &= dst->views.enabled_mask; 702 dst->views.dirty_buffer_constants = TRUE; 703 r600_sampler_views_dirty(rctx, &dst->views); 704 705 if (dirty_sampler_states_mask) { 706 dst->states.dirty_mask |= dirty_sampler_states_mask; 707 r600_sampler_states_dirty(rctx, &dst->states); 708 } 709} 710 711static void r600_update_compressed_colortex_mask(struct r600_samplerview_state *views) 712{ 713 uint32_t mask = views->enabled_mask; 714 715 while (mask) { 716 unsigned i = u_bit_scan(&mask); 717 struct pipe_resource *res = views->views[i]->base.texture; 718 719 if (res && res->target != PIPE_BUFFER) { 720 struct r600_texture *rtex = (struct r600_texture *)res; 721 722 if (rtex->cmask.size) { 723 views->compressed_colortex_mask |= 1 << i; 724 } else { 725 views->compressed_colortex_mask &= ~(1 << i); 726 } 727 } 728 } 729} 730 731static int r600_get_hw_atomic_count(const struct pipe_context *ctx, 732 enum pipe_shader_type shader) 733{ 734 const struct r600_context *rctx = (struct r600_context *)ctx; 735 int value = 0; 736 switch (shader) { 737 case PIPE_SHADER_FRAGMENT: 738 case PIPE_SHADER_COMPUTE: 739 default: 740 break; 741 case PIPE_SHADER_VERTEX: 742 value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC]; 743 break; 744 case PIPE_SHADER_GEOMETRY: 745 value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC] + 746 rctx->vs_shader->info.file_count[TGSI_FILE_HW_ATOMIC]; 747 break; 748 case PIPE_SHADER_TESS_EVAL: 749 value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC] + 750 rctx->vs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] + 751 (rctx->gs_shader ? rctx->gs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] : 0); 752 break; 753 case PIPE_SHADER_TESS_CTRL: 754 value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC] + 755 rctx->vs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] + 756 (rctx->gs_shader ? rctx->gs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] : 0) + 757 rctx->tes_shader->info.file_count[TGSI_FILE_HW_ATOMIC]; 758 break; 759 } 760 return value; 761} 762 763static void r600_update_compressed_colortex_mask_images(struct r600_image_state *images) 764{ 765 uint32_t mask = images->enabled_mask; 766 767 while (mask) { 768 unsigned i = u_bit_scan(&mask); 769 struct pipe_resource *res = images->views[i].base.resource; 770 771 if (res && res->target != PIPE_BUFFER) { 772 struct r600_texture *rtex = (struct r600_texture *)res; 773 774 if (rtex->cmask.size) { 775 images->compressed_colortex_mask |= 1 << i; 776 } else { 777 images->compressed_colortex_mask &= ~(1 << i); 778 } 779 } 780 } 781} 782 783/* Compute the key for the hw shader variant */ 784static inline void r600_shader_selector_key(const struct pipe_context *ctx, 785 const struct r600_pipe_shader_selector *sel, 786 union r600_shader_key *key) 787{ 788 const struct r600_context *rctx = (struct r600_context *)ctx; 789 memset(key, 0, sizeof(*key)); 790 791 switch (sel->type) { 792 case PIPE_SHADER_VERTEX: { 793 key->vs.as_ls = (rctx->tes_shader != NULL); 794 if (!key->vs.as_ls) 795 key->vs.as_es = (rctx->gs_shader != NULL); 796 797 if (rctx->ps_shader->current->shader.gs_prim_id_input && !rctx->gs_shader) { 798 key->vs.as_gs_a = true; 799 key->vs.prim_id_out = rctx->ps_shader->current->shader.input[rctx->ps_shader->current->shader.ps_prim_id_input].spi_sid; 800 } 801 key->vs.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_VERTEX); 802 break; 803 } 804 case PIPE_SHADER_GEOMETRY: 805 key->gs.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_GEOMETRY); 806 key->gs.tri_strip_adj_fix = rctx->gs_tri_strip_adj_fix; 807 break; 808 case PIPE_SHADER_FRAGMENT: { 809 if (rctx->ps_shader->info.images_declared) 810 key->ps.image_size_const_offset = util_last_bit(rctx->samplers[PIPE_SHADER_FRAGMENT].views.enabled_mask); 811 key->ps.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_FRAGMENT); 812 key->ps.color_two_side = rctx->rasterizer && rctx->rasterizer->two_side; 813 key->ps.alpha_to_one = rctx->alpha_to_one && 814 rctx->rasterizer && rctx->rasterizer->multisample_enable && 815 !rctx->framebuffer.cb0_is_integer; 816 key->ps.nr_cbufs = rctx->framebuffer.state.nr_cbufs; 817 /* Dual-source blending only makes sense with nr_cbufs == 1. */ 818 if (key->ps.nr_cbufs == 1 && rctx->dual_src_blend) 819 key->ps.nr_cbufs = 2; 820 break; 821 } 822 case PIPE_SHADER_TESS_EVAL: 823 key->tes.as_es = (rctx->gs_shader != NULL); 824 key->tes.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_TESS_EVAL); 825 break; 826 case PIPE_SHADER_TESS_CTRL: 827 key->tcs.prim_mode = rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE]; 828 key->tcs.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_TESS_CTRL); 829 break; 830 case PIPE_SHADER_COMPUTE: 831 break; 832 default: 833 assert(0); 834 } 835} 836 837/* Select the hw shader variant depending on the current state. 838 * (*dirty) is set to 1 if current variant was changed */ 839int r600_shader_select(struct pipe_context *ctx, 840 struct r600_pipe_shader_selector* sel, 841 bool *dirty) 842{ 843 union r600_shader_key key; 844 struct r600_pipe_shader * shader = NULL; 845 int r; 846 847 r600_shader_selector_key(ctx, sel, &key); 848 849 /* Check if we don't need to change anything. 850 * This path is also used for most shaders that don't need multiple 851 * variants, it will cost just a computation of the key and this 852 * test. */ 853 if (likely(sel->current && memcmp(&sel->current->key, &key, sizeof(key)) == 0)) { 854 return 0; 855 } 856 857 /* lookup if we have other variants in the list */ 858 if (sel->num_shaders > 1) { 859 struct r600_pipe_shader *p = sel->current, *c = p->next_variant; 860 861 while (c && memcmp(&c->key, &key, sizeof(key)) != 0) { 862 p = c; 863 c = c->next_variant; 864 } 865 866 if (c) { 867 p->next_variant = c->next_variant; 868 shader = c; 869 } 870 } 871 872 if (unlikely(!shader)) { 873 shader = CALLOC(1, sizeof(struct r600_pipe_shader)); 874 shader->selector = sel; 875 876 r = r600_pipe_shader_create(ctx, shader, key); 877 if (unlikely(r)) { 878 R600_ERR("Failed to build shader variant (type=%u) %d\n", 879 sel->type, r); 880 sel->current = NULL; 881 FREE(shader); 882 return r; 883 } 884 885 /* We don't know the value of nr_ps_max_color_exports until we built 886 * at least one variant, so we may need to recompute the key after 887 * building first variant. */ 888 if (sel->type == PIPE_SHADER_FRAGMENT && 889 sel->num_shaders == 0) { 890 sel->nr_ps_max_color_exports = shader->shader.nr_ps_max_color_exports; 891 r600_shader_selector_key(ctx, sel, &key); 892 } 893 894 memcpy(&shader->key, &key, sizeof(key)); 895 sel->num_shaders++; 896 } 897 898 if (dirty) 899 *dirty = true; 900 901 shader->next_variant = sel->current; 902 sel->current = shader; 903 904 return 0; 905} 906 907struct r600_pipe_shader_selector *r600_create_shader_state_tokens(struct pipe_context *ctx, 908 const struct tgsi_token *tokens, 909 unsigned pipe_shader_type) 910{ 911 struct r600_pipe_shader_selector *sel = CALLOC_STRUCT(r600_pipe_shader_selector); 912 913 sel->type = pipe_shader_type; 914 sel->tokens = tgsi_dup_tokens(tokens); 915 tgsi_scan_shader(tokens, &sel->info); 916 return sel; 917} 918 919static void *r600_create_shader_state(struct pipe_context *ctx, 920 const struct pipe_shader_state *state, 921 unsigned pipe_shader_type) 922{ 923 int i; 924 struct r600_pipe_shader_selector *sel = r600_create_shader_state_tokens(ctx, state->tokens, pipe_shader_type); 925 926 sel->so = state->stream_output; 927 928 switch (pipe_shader_type) { 929 case PIPE_SHADER_GEOMETRY: 930 sel->gs_output_prim = 931 sel->info.properties[TGSI_PROPERTY_GS_OUTPUT_PRIM]; 932 sel->gs_max_out_vertices = 933 sel->info.properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES]; 934 sel->gs_num_invocations = 935 sel->info.properties[TGSI_PROPERTY_GS_INVOCATIONS]; 936 break; 937 case PIPE_SHADER_VERTEX: 938 case PIPE_SHADER_TESS_CTRL: 939 sel->lds_patch_outputs_written_mask = 0; 940 sel->lds_outputs_written_mask = 0; 941 942 for (i = 0; i < sel->info.num_outputs; i++) { 943 unsigned name = sel->info.output_semantic_name[i]; 944 unsigned index = sel->info.output_semantic_index[i]; 945 946 switch (name) { 947 case TGSI_SEMANTIC_TESSINNER: 948 case TGSI_SEMANTIC_TESSOUTER: 949 case TGSI_SEMANTIC_PATCH: 950 sel->lds_patch_outputs_written_mask |= 951 1ull << r600_get_lds_unique_index(name, index); 952 break; 953 default: 954 sel->lds_outputs_written_mask |= 955 1ull << r600_get_lds_unique_index(name, index); 956 } 957 } 958 break; 959 default: 960 break; 961 } 962 963 return sel; 964} 965 966static void *r600_create_ps_state(struct pipe_context *ctx, 967 const struct pipe_shader_state *state) 968{ 969 return r600_create_shader_state(ctx, state, PIPE_SHADER_FRAGMENT); 970} 971 972static void *r600_create_vs_state(struct pipe_context *ctx, 973 const struct pipe_shader_state *state) 974{ 975 return r600_create_shader_state(ctx, state, PIPE_SHADER_VERTEX); 976} 977 978static void *r600_create_gs_state(struct pipe_context *ctx, 979 const struct pipe_shader_state *state) 980{ 981 return r600_create_shader_state(ctx, state, PIPE_SHADER_GEOMETRY); 982} 983 984static void *r600_create_tcs_state(struct pipe_context *ctx, 985 const struct pipe_shader_state *state) 986{ 987 return r600_create_shader_state(ctx, state, PIPE_SHADER_TESS_CTRL); 988} 989 990static void *r600_create_tes_state(struct pipe_context *ctx, 991 const struct pipe_shader_state *state) 992{ 993 return r600_create_shader_state(ctx, state, PIPE_SHADER_TESS_EVAL); 994} 995 996static void r600_bind_ps_state(struct pipe_context *ctx, void *state) 997{ 998 struct r600_context *rctx = (struct r600_context *)ctx; 999 1000 if (!state) 1001 state = rctx->dummy_pixel_shader; 1002 1003 rctx->ps_shader = (struct r600_pipe_shader_selector *)state; 1004} 1005 1006static struct tgsi_shader_info *r600_get_vs_info(struct r600_context *rctx) 1007{ 1008 if (rctx->gs_shader) 1009 return &rctx->gs_shader->info; 1010 else if (rctx->tes_shader) 1011 return &rctx->tes_shader->info; 1012 else if (rctx->vs_shader) 1013 return &rctx->vs_shader->info; 1014 else 1015 return NULL; 1016} 1017 1018static void r600_bind_vs_state(struct pipe_context *ctx, void *state) 1019{ 1020 struct r600_context *rctx = (struct r600_context *)ctx; 1021 1022 if (!state || rctx->vs_shader == state) 1023 return; 1024 1025 rctx->vs_shader = (struct r600_pipe_shader_selector *)state; 1026 r600_update_vs_writes_viewport_index(&rctx->b, r600_get_vs_info(rctx)); 1027 1028 if (rctx->vs_shader->so.num_outputs) 1029 rctx->b.streamout.stride_in_dw = rctx->vs_shader->so.stride; 1030} 1031 1032static void r600_bind_gs_state(struct pipe_context *ctx, void *state) 1033{ 1034 struct r600_context *rctx = (struct r600_context *)ctx; 1035 1036 if (state == rctx->gs_shader) 1037 return; 1038 1039 rctx->gs_shader = (struct r600_pipe_shader_selector *)state; 1040 r600_update_vs_writes_viewport_index(&rctx->b, r600_get_vs_info(rctx)); 1041 1042 if (!state) 1043 return; 1044 1045 if (rctx->gs_shader->so.num_outputs) 1046 rctx->b.streamout.stride_in_dw = rctx->gs_shader->so.stride; 1047} 1048 1049static void r600_bind_tcs_state(struct pipe_context *ctx, void *state) 1050{ 1051 struct r600_context *rctx = (struct r600_context *)ctx; 1052 1053 rctx->tcs_shader = (struct r600_pipe_shader_selector *)state; 1054} 1055 1056static void r600_bind_tes_state(struct pipe_context *ctx, void *state) 1057{ 1058 struct r600_context *rctx = (struct r600_context *)ctx; 1059 1060 if (state == rctx->tes_shader) 1061 return; 1062 1063 rctx->tes_shader = (struct r600_pipe_shader_selector *)state; 1064 r600_update_vs_writes_viewport_index(&rctx->b, r600_get_vs_info(rctx)); 1065 1066 if (!state) 1067 return; 1068 1069 if (rctx->tes_shader->so.num_outputs) 1070 rctx->b.streamout.stride_in_dw = rctx->tes_shader->so.stride; 1071} 1072 1073void r600_delete_shader_selector(struct pipe_context *ctx, 1074 struct r600_pipe_shader_selector *sel) 1075{ 1076 struct r600_pipe_shader *p = sel->current, *c; 1077 while (p) { 1078 c = p->next_variant; 1079 r600_pipe_shader_destroy(ctx, p); 1080 free(p); 1081 p = c; 1082 } 1083 1084 free(sel->tokens); 1085 free(sel); 1086} 1087 1088 1089static void r600_delete_ps_state(struct pipe_context *ctx, void *state) 1090{ 1091 struct r600_context *rctx = (struct r600_context *)ctx; 1092 struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state; 1093 1094 if (rctx->ps_shader == sel) { 1095 rctx->ps_shader = NULL; 1096 } 1097 1098 r600_delete_shader_selector(ctx, sel); 1099} 1100 1101static void r600_delete_vs_state(struct pipe_context *ctx, void *state) 1102{ 1103 struct r600_context *rctx = (struct r600_context *)ctx; 1104 struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state; 1105 1106 if (rctx->vs_shader == sel) { 1107 rctx->vs_shader = NULL; 1108 } 1109 1110 r600_delete_shader_selector(ctx, sel); 1111} 1112 1113 1114static void r600_delete_gs_state(struct pipe_context *ctx, void *state) 1115{ 1116 struct r600_context *rctx = (struct r600_context *)ctx; 1117 struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state; 1118 1119 if (rctx->gs_shader == sel) { 1120 rctx->gs_shader = NULL; 1121 } 1122 1123 r600_delete_shader_selector(ctx, sel); 1124} 1125 1126static void r600_delete_tcs_state(struct pipe_context *ctx, void *state) 1127{ 1128 struct r600_context *rctx = (struct r600_context *)ctx; 1129 struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state; 1130 1131 if (rctx->tcs_shader == sel) { 1132 rctx->tcs_shader = NULL; 1133 } 1134 1135 r600_delete_shader_selector(ctx, sel); 1136} 1137 1138static void r600_delete_tes_state(struct pipe_context *ctx, void *state) 1139{ 1140 struct r600_context *rctx = (struct r600_context *)ctx; 1141 struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state; 1142 1143 if (rctx->tes_shader == sel) { 1144 rctx->tes_shader = NULL; 1145 } 1146 1147 r600_delete_shader_selector(ctx, sel); 1148} 1149 1150void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state) 1151{ 1152 if (state->dirty_mask) { 1153 state->atom.num_dw = rctx->b.chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20 1154 : util_bitcount(state->dirty_mask)*19; 1155 r600_mark_atom_dirty(rctx, &state->atom); 1156 } 1157} 1158 1159static void r600_set_constant_buffer(struct pipe_context *ctx, 1160 enum pipe_shader_type shader, uint index, 1161 const struct pipe_constant_buffer *input) 1162{ 1163 struct r600_context *rctx = (struct r600_context *)ctx; 1164 struct r600_constbuf_state *state = &rctx->constbuf_state[shader]; 1165 struct pipe_constant_buffer *cb; 1166 const uint8_t *ptr; 1167 1168 /* Note that the state tracker can unbind constant buffers by 1169 * passing NULL here. 1170 */ 1171 if (unlikely(!input || (!input->buffer && !input->user_buffer))) { 1172 state->enabled_mask &= ~(1 << index); 1173 state->dirty_mask &= ~(1 << index); 1174 pipe_resource_reference(&state->cb[index].buffer, NULL); 1175 return; 1176 } 1177 1178 cb = &state->cb[index]; 1179 cb->buffer_size = input->buffer_size; 1180 1181 ptr = input->user_buffer; 1182 1183 if (ptr) { 1184 /* Upload the user buffer. */ 1185 if (R600_BIG_ENDIAN) { 1186 uint32_t *tmpPtr; 1187 unsigned i, size = input->buffer_size; 1188 1189 if (!(tmpPtr = malloc(size))) { 1190 R600_ERR("Failed to allocate BE swap buffer.\n"); 1191 return; 1192 } 1193 1194 for (i = 0; i < size / 4; ++i) { 1195 tmpPtr[i] = util_cpu_to_le32(((uint32_t *)ptr)[i]); 1196 } 1197 1198 u_upload_data(ctx->stream_uploader, 0, size, 256, 1199 tmpPtr, &cb->buffer_offset, &cb->buffer); 1200 free(tmpPtr); 1201 } else { 1202 u_upload_data(ctx->stream_uploader, 0, 1203 input->buffer_size, 256, ptr, 1204 &cb->buffer_offset, &cb->buffer); 1205 } 1206 /* account it in gtt */ 1207 rctx->b.gtt += input->buffer_size; 1208 } else { 1209 /* Setup the hw buffer. */ 1210 cb->buffer_offset = input->buffer_offset; 1211 pipe_resource_reference(&cb->buffer, input->buffer); 1212 r600_context_add_resource_size(ctx, input->buffer); 1213 } 1214 1215 state->enabled_mask |= 1 << index; 1216 state->dirty_mask |= 1 << index; 1217 r600_constant_buffers_dirty(rctx, state); 1218} 1219 1220static void r600_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask) 1221{ 1222 struct r600_context *rctx = (struct r600_context*)pipe; 1223 1224 if (rctx->sample_mask.sample_mask == (uint16_t)sample_mask) 1225 return; 1226 1227 rctx->sample_mask.sample_mask = sample_mask; 1228 r600_mark_atom_dirty(rctx, &rctx->sample_mask.atom); 1229} 1230 1231void r600_update_driver_const_buffers(struct r600_context *rctx, bool compute_only) 1232{ 1233 int sh, size; 1234 void *ptr; 1235 struct pipe_constant_buffer cb; 1236 int start, end; 1237 1238 start = compute_only ? PIPE_SHADER_COMPUTE : 0; 1239 end = compute_only ? PIPE_SHADER_TYPES : PIPE_SHADER_COMPUTE; 1240 1241 for (sh = start; sh < end; sh++) { 1242 struct r600_shader_driver_constants_info *info = &rctx->driver_consts[sh]; 1243 if (!info->vs_ucp_dirty && 1244 !info->texture_const_dirty && 1245 !info->ps_sample_pos_dirty && 1246 !info->tcs_default_levels_dirty && 1247 !info->cs_block_grid_size_dirty) 1248 continue; 1249 1250 ptr = info->constants; 1251 size = info->alloc_size; 1252 if (info->vs_ucp_dirty) { 1253 assert(sh == PIPE_SHADER_VERTEX); 1254 if (!size) { 1255 ptr = rctx->clip_state.state.ucp; 1256 size = R600_UCP_SIZE; 1257 } else { 1258 memcpy(ptr, rctx->clip_state.state.ucp, R600_UCP_SIZE); 1259 } 1260 info->vs_ucp_dirty = false; 1261 } 1262 1263 else if (info->ps_sample_pos_dirty) { 1264 assert(sh == PIPE_SHADER_FRAGMENT); 1265 if (!size) { 1266 ptr = rctx->sample_positions; 1267 size = R600_UCP_SIZE; 1268 } else { 1269 memcpy(ptr, rctx->sample_positions, R600_UCP_SIZE); 1270 } 1271 info->ps_sample_pos_dirty = false; 1272 } 1273 1274 else if (info->cs_block_grid_size_dirty) { 1275 assert(sh == PIPE_SHADER_COMPUTE); 1276 if (!size) { 1277 ptr = rctx->cs_block_grid_sizes; 1278 size = R600_CS_BLOCK_GRID_SIZE; 1279 } else { 1280 memcpy(ptr, rctx->cs_block_grid_sizes, R600_CS_BLOCK_GRID_SIZE); 1281 } 1282 info->cs_block_grid_size_dirty = false; 1283 } 1284 1285 else if (info->tcs_default_levels_dirty) { 1286 /* 1287 * We'd only really need this for default tcs shader. 1288 */ 1289 assert(sh == PIPE_SHADER_TESS_CTRL); 1290 if (!size) { 1291 ptr = rctx->tess_state; 1292 size = R600_TCS_DEFAULT_LEVELS_SIZE; 1293 } else { 1294 memcpy(ptr, rctx->tess_state, R600_TCS_DEFAULT_LEVELS_SIZE); 1295 } 1296 info->tcs_default_levels_dirty = false; 1297 } 1298 1299 if (info->texture_const_dirty) { 1300 assert (ptr); 1301 assert (size); 1302 if (sh == PIPE_SHADER_VERTEX) 1303 memcpy(ptr, rctx->clip_state.state.ucp, R600_UCP_SIZE); 1304 if (sh == PIPE_SHADER_FRAGMENT) 1305 memcpy(ptr, rctx->sample_positions, R600_UCP_SIZE); 1306 if (sh == PIPE_SHADER_COMPUTE) 1307 memcpy(ptr, rctx->cs_block_grid_sizes, R600_CS_BLOCK_GRID_SIZE); 1308 if (sh == PIPE_SHADER_TESS_CTRL) 1309 memcpy(ptr, rctx->tess_state, R600_TCS_DEFAULT_LEVELS_SIZE); 1310 } 1311 info->texture_const_dirty = false; 1312 1313 cb.buffer = NULL; 1314 cb.user_buffer = ptr; 1315 cb.buffer_offset = 0; 1316 cb.buffer_size = size; 1317 rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, &cb); 1318 pipe_resource_reference(&cb.buffer, NULL); 1319 } 1320} 1321 1322static void *r600_alloc_buf_consts(struct r600_context *rctx, int shader_type, 1323 unsigned array_size, uint32_t *base_offset) 1324{ 1325 struct r600_shader_driver_constants_info *info = &rctx->driver_consts[shader_type]; 1326 if (array_size + R600_UCP_SIZE > info->alloc_size) { 1327 info->constants = realloc(info->constants, array_size + R600_UCP_SIZE); 1328 info->alloc_size = array_size + R600_UCP_SIZE; 1329 } 1330 memset(info->constants + (R600_UCP_SIZE / 4), 0, array_size); 1331 info->texture_const_dirty = true; 1332 *base_offset = R600_UCP_SIZE; 1333 return info->constants; 1334} 1335/* 1336 * On r600/700 hw we don't have vertex fetch swizzle, though TBO 1337 * doesn't require full swizzles it does need masking and setting alpha 1338 * to one, so we setup a set of 5 constants with the masks + alpha value 1339 * then in the shader, we AND the 4 components with 0xffffffff or 0, 1340 * then OR the alpha with the value given here. 1341 * We use a 6th constant to store the txq buffer size in 1342 * we use 7th slot for number of cube layers in a cube map array. 1343 */ 1344static void r600_setup_buffer_constants(struct r600_context *rctx, int shader_type) 1345{ 1346 struct r600_textures_info *samplers = &rctx->samplers[shader_type]; 1347 int bits; 1348 uint32_t array_size; 1349 int i, j; 1350 uint32_t *constants; 1351 uint32_t base_offset; 1352 if (!samplers->views.dirty_buffer_constants) 1353 return; 1354 1355 samplers->views.dirty_buffer_constants = FALSE; 1356 1357 bits = util_last_bit(samplers->views.enabled_mask); 1358 array_size = bits * 8 * sizeof(uint32_t); 1359 1360 constants = r600_alloc_buf_consts(rctx, shader_type, array_size, &base_offset); 1361 1362 for (i = 0; i < bits; i++) { 1363 if (samplers->views.enabled_mask & (1 << i)) { 1364 int offset = (base_offset / 4) + i * 8; 1365 const struct util_format_description *desc; 1366 desc = util_format_description(samplers->views.views[i]->base.format); 1367 1368 for (j = 0; j < 4; j++) 1369 if (j < desc->nr_channels) 1370 constants[offset+j] = 0xffffffff; 1371 else 1372 constants[offset+j] = 0x0; 1373 if (desc->nr_channels < 4) { 1374 if (desc->channel[0].pure_integer) 1375 constants[offset+4] = 1; 1376 else 1377 constants[offset+4] = fui(1.0); 1378 } else 1379 constants[offset + 4] = 0; 1380 1381 constants[offset + 5] = samplers->views.views[i]->base.u.buf.size / 1382 util_format_get_blocksize(samplers->views.views[i]->base.format); 1383 constants[offset + 6] = samplers->views.views[i]->base.texture->array_size / 6; 1384 } 1385 } 1386 1387} 1388 1389/* On evergreen we store one value 1390 * 1. number of cube layers in a cube map array. 1391 */ 1392void eg_setup_buffer_constants(struct r600_context *rctx, int shader_type) 1393{ 1394 struct r600_textures_info *samplers = &rctx->samplers[shader_type]; 1395 struct r600_image_state *images = NULL; 1396 int bits, sview_bits, img_bits; 1397 uint32_t array_size; 1398 int i; 1399 uint32_t *constants; 1400 uint32_t base_offset; 1401 1402 if (shader_type == PIPE_SHADER_FRAGMENT) { 1403 images = &rctx->fragment_images; 1404 } else if (shader_type == PIPE_SHADER_COMPUTE) { 1405 images = &rctx->compute_images; 1406 } 1407 1408 if (!samplers->views.dirty_buffer_constants && 1409 !(images && images->dirty_buffer_constants)) 1410 return; 1411 1412 if (images) 1413 images->dirty_buffer_constants = FALSE; 1414 samplers->views.dirty_buffer_constants = FALSE; 1415 1416 bits = sview_bits = util_last_bit(samplers->views.enabled_mask); 1417 if (images) 1418 bits += util_last_bit(images->enabled_mask); 1419 img_bits = bits; 1420 1421 array_size = bits * sizeof(uint32_t); 1422 1423 constants = r600_alloc_buf_consts(rctx, shader_type, array_size, 1424 &base_offset); 1425 1426 for (i = 0; i < sview_bits; i++) { 1427 if (samplers->views.enabled_mask & (1 << i)) { 1428 uint32_t offset = (base_offset / 4) + i; 1429 constants[offset] = samplers->views.views[i]->base.texture->array_size / 6; 1430 } 1431 } 1432 if (images) { 1433 for (i = sview_bits; i < img_bits; i++) { 1434 int idx = i - sview_bits; 1435 if (images->enabled_mask & (1 << idx)) { 1436 uint32_t offset = (base_offset / 4) + i; 1437 constants[offset] = images->views[idx].base.resource->array_size / 6; 1438 } 1439 } 1440 } 1441} 1442 1443/* set sample xy locations as array of fragment shader constants */ 1444void r600_set_sample_locations_constant_buffer(struct r600_context *rctx) 1445{ 1446 struct pipe_context *ctx = &rctx->b.b; 1447 1448 assert(rctx->framebuffer.nr_samples < R600_UCP_SIZE); 1449 assert(rctx->framebuffer.nr_samples <= ARRAY_SIZE(rctx->sample_positions)/4); 1450 1451 memset(rctx->sample_positions, 0, 4 * 4 * 16); 1452 for (unsigned i = 0; i < rctx->framebuffer.nr_samples; i++) { 1453 ctx->get_sample_position(ctx, rctx->framebuffer.nr_samples, i, &rctx->sample_positions[4*i]); 1454 /* Also fill in center-zeroed positions used for interpolateAtSample */ 1455 rctx->sample_positions[4*i + 2] = rctx->sample_positions[4*i + 0] - 0.5f; 1456 rctx->sample_positions[4*i + 3] = rctx->sample_positions[4*i + 1] - 0.5f; 1457 } 1458 1459 rctx->driver_consts[PIPE_SHADER_FRAGMENT].ps_sample_pos_dirty = true; 1460} 1461 1462static void update_shader_atom(struct pipe_context *ctx, 1463 struct r600_shader_state *state, 1464 struct r600_pipe_shader *shader) 1465{ 1466 struct r600_context *rctx = (struct r600_context *)ctx; 1467 1468 state->shader = shader; 1469 if (shader) { 1470 state->atom.num_dw = shader->command_buffer.num_dw; 1471 r600_context_add_resource_size(ctx, (struct pipe_resource *)shader->bo); 1472 } else { 1473 state->atom.num_dw = 0; 1474 } 1475 r600_mark_atom_dirty(rctx, &state->atom); 1476} 1477 1478static void update_gs_block_state(struct r600_context *rctx, unsigned enable) 1479{ 1480 if (rctx->shader_stages.geom_enable != enable) { 1481 rctx->shader_stages.geom_enable = enable; 1482 r600_mark_atom_dirty(rctx, &rctx->shader_stages.atom); 1483 } 1484 1485 if (rctx->gs_rings.enable != enable) { 1486 rctx->gs_rings.enable = enable; 1487 r600_mark_atom_dirty(rctx, &rctx->gs_rings.atom); 1488 1489 if (enable && !rctx->gs_rings.esgs_ring.buffer) { 1490 unsigned size = 0x1C000; 1491 rctx->gs_rings.esgs_ring.buffer = 1492 pipe_buffer_create(rctx->b.b.screen, 0, 1493 PIPE_USAGE_DEFAULT, size); 1494 rctx->gs_rings.esgs_ring.buffer_size = size; 1495 1496 size = 0x4000000; 1497 1498 rctx->gs_rings.gsvs_ring.buffer = 1499 pipe_buffer_create(rctx->b.b.screen, 0, 1500 PIPE_USAGE_DEFAULT, size); 1501 rctx->gs_rings.gsvs_ring.buffer_size = size; 1502 } 1503 1504 if (enable) { 1505 r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_GEOMETRY, 1506 R600_GS_RING_CONST_BUFFER, &rctx->gs_rings.esgs_ring); 1507 if (rctx->tes_shader) { 1508 r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL, 1509 R600_GS_RING_CONST_BUFFER, &rctx->gs_rings.gsvs_ring); 1510 } else { 1511 r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX, 1512 R600_GS_RING_CONST_BUFFER, &rctx->gs_rings.gsvs_ring); 1513 } 1514 } else { 1515 r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_GEOMETRY, 1516 R600_GS_RING_CONST_BUFFER, NULL); 1517 r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX, 1518 R600_GS_RING_CONST_BUFFER, NULL); 1519 r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL, 1520 R600_GS_RING_CONST_BUFFER, NULL); 1521 } 1522 } 1523} 1524 1525static void r600_update_clip_state(struct r600_context *rctx, 1526 struct r600_pipe_shader *current) 1527{ 1528 if (current->pa_cl_vs_out_cntl != rctx->clip_misc_state.pa_cl_vs_out_cntl || 1529 current->shader.clip_dist_write != rctx->clip_misc_state.clip_dist_write || 1530 current->shader.cull_dist_write != rctx->clip_misc_state.cull_dist_write || 1531 current->shader.vs_position_window_space != rctx->clip_misc_state.clip_disable || 1532 current->shader.vs_out_viewport != rctx->clip_misc_state.vs_out_viewport) { 1533 rctx->clip_misc_state.pa_cl_vs_out_cntl = current->pa_cl_vs_out_cntl; 1534 rctx->clip_misc_state.clip_dist_write = current->shader.clip_dist_write; 1535 rctx->clip_misc_state.cull_dist_write = current->shader.cull_dist_write; 1536 rctx->clip_misc_state.clip_disable = current->shader.vs_position_window_space; 1537 rctx->clip_misc_state.vs_out_viewport = current->shader.vs_out_viewport; 1538 r600_mark_atom_dirty(rctx, &rctx->clip_misc_state.atom); 1539 } 1540} 1541 1542static void r600_generate_fixed_func_tcs(struct r600_context *rctx) 1543{ 1544 struct ureg_src const0, const1; 1545 struct ureg_dst tessouter, tessinner; 1546 struct ureg_program *ureg = ureg_create(PIPE_SHADER_TESS_CTRL); 1547 1548 if (!ureg) 1549 return; /* if we get here, we're screwed */ 1550 1551 assert(!rctx->fixed_func_tcs_shader); 1552 1553 ureg_DECL_constant2D(ureg, 0, 1, R600_BUFFER_INFO_CONST_BUFFER); 1554 const0 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 0), 1555 R600_BUFFER_INFO_CONST_BUFFER); 1556 const1 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 1), 1557 R600_BUFFER_INFO_CONST_BUFFER); 1558 1559 tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0); 1560 tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0); 1561 1562 ureg_MOV(ureg, tessouter, const0); 1563 ureg_MOV(ureg, tessinner, const1); 1564 ureg_END(ureg); 1565 1566 rctx->fixed_func_tcs_shader = 1567 ureg_create_shader_and_destroy(ureg, &rctx->b.b); 1568} 1569 1570void r600_update_compressed_resource_state(struct r600_context *rctx, bool compute_only) 1571{ 1572 unsigned i; 1573 unsigned counter; 1574 1575 counter = p_atomic_read(&rctx->screen->b.compressed_colortex_counter); 1576 if (counter != rctx->b.last_compressed_colortex_counter) { 1577 rctx->b.last_compressed_colortex_counter = counter; 1578 1579 if (compute_only) { 1580 r600_update_compressed_colortex_mask(&rctx->samplers[PIPE_SHADER_COMPUTE].views); 1581 } else { 1582 for (i = 0; i < PIPE_SHADER_TYPES; ++i) { 1583 r600_update_compressed_colortex_mask(&rctx->samplers[i].views); 1584 } 1585 } 1586 if (!compute_only) 1587 r600_update_compressed_colortex_mask_images(&rctx->fragment_images); 1588 r600_update_compressed_colortex_mask_images(&rctx->compute_images); 1589 } 1590 1591 /* Decompress textures if needed. */ 1592 for (i = 0; i < PIPE_SHADER_TYPES; i++) { 1593 struct r600_samplerview_state *views = &rctx->samplers[i].views; 1594 1595 if (compute_only) 1596 if (i != PIPE_SHADER_COMPUTE) 1597 continue; 1598 if (views->compressed_depthtex_mask) { 1599 r600_decompress_depth_textures(rctx, views); 1600 } 1601 if (views->compressed_colortex_mask) { 1602 r600_decompress_color_textures(rctx, views); 1603 } 1604 } 1605 1606 { 1607 struct r600_image_state *istate; 1608 1609 if (!compute_only) { 1610 istate = &rctx->fragment_images; 1611 if (istate->compressed_depthtex_mask) 1612 r600_decompress_depth_images(rctx, istate); 1613 if (istate->compressed_colortex_mask) 1614 r600_decompress_color_images(rctx, istate); 1615 } 1616 1617 istate = &rctx->compute_images; 1618 if (istate->compressed_depthtex_mask) 1619 r600_decompress_depth_images(rctx, istate); 1620 if (istate->compressed_colortex_mask) 1621 r600_decompress_color_images(rctx, istate); 1622 } 1623} 1624 1625/* update MEM_SCRATCH buffers if needed */ 1626void r600_setup_scratch_area_for_shader(struct r600_context *rctx, 1627 struct r600_pipe_shader *shader, struct r600_scratch_buffer *scratch, 1628 unsigned ring_base_reg, unsigned item_size_reg, unsigned ring_size_reg) 1629{ 1630 unsigned num_ses = rctx->screen->b.info.max_se; 1631 unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes; 1632 unsigned nthreads = 128; 1633 1634 unsigned itemsize = shader->scratch_space_needed * 4; 1635 unsigned size = align(itemsize * nthreads * num_pipes * num_ses * 4, 256); 1636 1637 if (scratch->dirty || 1638 unlikely(shader->scratch_space_needed != scratch->item_size || 1639 size > scratch->size)) { 1640 struct radeon_cmdbuf *cs = rctx->b.gfx.cs; 1641 1642 scratch->dirty = false; 1643 1644 if (size > scratch->size) { 1645 // Release prior one if any 1646 if (scratch->buffer) { 1647 pipe_resource_reference((struct pipe_resource**)&scratch->buffer, NULL); 1648 } 1649 1650 scratch->buffer = (struct r600_resource *)pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM, 1651 PIPE_USAGE_DEFAULT, size); 1652 if (scratch->buffer) { 1653 scratch->size = size; 1654 } 1655 } 1656 1657 scratch->item_size = shader->scratch_space_needed; 1658 1659 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); 1660 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 1661 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); 1662 1663 // multi-SE chips need programming per SE 1664 for (unsigned se = 0; se < num_ses; se++) { 1665 struct r600_resource *rbuffer = scratch->buffer; 1666 unsigned size_per_se = size / num_ses; 1667 1668 // Direct to particular SE 1669 if (num_ses > 1) { 1670 radeon_set_config_reg(cs, EG_0802C_GRBM_GFX_INDEX, 1671 S_0802C_INSTANCE_INDEX(0) | 1672 S_0802C_SE_INDEX(se) | 1673 S_0802C_INSTANCE_BROADCAST_WRITES(1) | 1674 S_0802C_SE_BROADCAST_WRITES(0)); 1675 } 1676 1677 radeon_set_config_reg(cs, ring_base_reg, (rbuffer->gpu_address + size_per_se * se) >> 8); 1678 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1679 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 1680 RADEON_USAGE_READWRITE, 1681 RADEON_PRIO_SCRATCH_BUFFER)); 1682 radeon_set_context_reg(cs, item_size_reg, itemsize); 1683 radeon_set_config_reg(cs, ring_size_reg, size_per_se >> 8); 1684 } 1685 1686 // Restore broadcast mode 1687 if (num_ses > 1) { 1688 radeon_set_config_reg(cs, EG_0802C_GRBM_GFX_INDEX, 1689 S_0802C_INSTANCE_INDEX(0) | 1690 S_0802C_SE_INDEX(0) | 1691 S_0802C_INSTANCE_BROADCAST_WRITES(1) | 1692 S_0802C_SE_BROADCAST_WRITES(1)); 1693 } 1694 1695 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); 1696 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 1697 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); 1698 } 1699} 1700 1701void r600_setup_scratch_buffers(struct r600_context *rctx) { 1702 static const struct { 1703 unsigned ring_base; 1704 unsigned item_size; 1705 unsigned ring_size; 1706 } regs[R600_NUM_HW_STAGES] = { 1707 [R600_HW_STAGE_PS] = { R_008C68_SQ_PSTMP_RING_BASE, R_0288BC_SQ_PSTMP_RING_ITEMSIZE, R_008C6C_SQ_PSTMP_RING_SIZE }, 1708 [R600_HW_STAGE_VS] = { R_008C60_SQ_VSTMP_RING_BASE, R_0288B8_SQ_VSTMP_RING_ITEMSIZE, R_008C64_SQ_VSTMP_RING_SIZE }, 1709 [R600_HW_STAGE_GS] = { R_008C58_SQ_GSTMP_RING_BASE, R_0288B4_SQ_GSTMP_RING_ITEMSIZE, R_008C5C_SQ_GSTMP_RING_SIZE }, 1710 [R600_HW_STAGE_ES] = { R_008C50_SQ_ESTMP_RING_BASE, R_0288B0_SQ_ESTMP_RING_ITEMSIZE, R_008C54_SQ_ESTMP_RING_SIZE } 1711 }; 1712 1713 for (unsigned i = 0; i < R600_NUM_HW_STAGES; i++) { 1714 struct r600_pipe_shader *stage = rctx->hw_shader_stages[i].shader; 1715 1716 if (stage && unlikely(stage->scratch_space_needed)) { 1717 r600_setup_scratch_area_for_shader(rctx, stage, 1718 &rctx->scratch_buffers[i], regs[i].ring_base, regs[i].item_size, regs[i].ring_size); 1719 } 1720 } 1721} 1722 1723#define SELECT_SHADER_OR_FAIL(x) do { \ 1724 r600_shader_select(ctx, rctx->x##_shader, &x##_dirty); \ 1725 if (unlikely(!rctx->x##_shader->current)) \ 1726 return false; \ 1727 } while(0) 1728 1729#define UPDATE_SHADER(hw, sw) do { \ 1730 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) \ 1731 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \ 1732 } while(0) 1733 1734#define UPDATE_SHADER_CLIP(hw, sw) do { \ 1735 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) { \ 1736 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \ 1737 clip_so_current = rctx->sw##_shader->current; \ 1738 } \ 1739 } while(0) 1740 1741#define UPDATE_SHADER_GS(hw, hw2, sw) do { \ 1742 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) { \ 1743 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \ 1744 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw2)], rctx->sw##_shader->current->gs_copy_shader); \ 1745 clip_so_current = rctx->sw##_shader->current->gs_copy_shader; \ 1746 } \ 1747 } while(0) 1748 1749#define SET_NULL_SHADER(hw) do { \ 1750 if (rctx->hw_shader_stages[(hw)].shader) \ 1751 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], NULL); \ 1752 } while (0) 1753 1754static bool r600_update_derived_state(struct r600_context *rctx) 1755{ 1756 struct pipe_context * ctx = (struct pipe_context*)rctx; 1757 bool ps_dirty = false, vs_dirty = false, gs_dirty = false; 1758 bool tcs_dirty = false, tes_dirty = false, fixed_func_tcs_dirty = false; 1759 bool blend_disable; 1760 bool need_buf_const; 1761 struct r600_pipe_shader *clip_so_current = NULL; 1762 1763 if (!rctx->blitter->running) 1764 r600_update_compressed_resource_state(rctx, false); 1765 1766 SELECT_SHADER_OR_FAIL(ps); 1767 1768 r600_mark_atom_dirty(rctx, &rctx->shader_stages.atom); 1769 1770 update_gs_block_state(rctx, rctx->gs_shader != NULL); 1771 1772 if (rctx->gs_shader) 1773 SELECT_SHADER_OR_FAIL(gs); 1774 1775 /* Hull Shader */ 1776 if (rctx->tcs_shader) { 1777 SELECT_SHADER_OR_FAIL(tcs); 1778 1779 UPDATE_SHADER(EG_HW_STAGE_HS, tcs); 1780 } else if (rctx->tes_shader) { 1781 if (!rctx->fixed_func_tcs_shader) { 1782 r600_generate_fixed_func_tcs(rctx); 1783 if (!rctx->fixed_func_tcs_shader) 1784 return false; 1785 1786 } 1787 SELECT_SHADER_OR_FAIL(fixed_func_tcs); 1788 1789 UPDATE_SHADER(EG_HW_STAGE_HS, fixed_func_tcs); 1790 } else 1791 SET_NULL_SHADER(EG_HW_STAGE_HS); 1792 1793 if (rctx->tes_shader) { 1794 SELECT_SHADER_OR_FAIL(tes); 1795 } 1796 1797 SELECT_SHADER_OR_FAIL(vs); 1798 1799 if (rctx->gs_shader) { 1800 if (!rctx->shader_stages.geom_enable) { 1801 rctx->shader_stages.geom_enable = true; 1802 r600_mark_atom_dirty(rctx, &rctx->shader_stages.atom); 1803 } 1804 1805 /* gs_shader provides GS and VS (copy shader) */ 1806 UPDATE_SHADER_GS(R600_HW_STAGE_GS, R600_HW_STAGE_VS, gs); 1807 1808 /* vs_shader is used as ES */ 1809 1810 if (rctx->tes_shader) { 1811 /* VS goes to LS, TES goes to ES */ 1812 UPDATE_SHADER(R600_HW_STAGE_ES, tes); 1813 UPDATE_SHADER(EG_HW_STAGE_LS, vs); 1814 } else { 1815 /* vs_shader is used as ES */ 1816 UPDATE_SHADER(R600_HW_STAGE_ES, vs); 1817 SET_NULL_SHADER(EG_HW_STAGE_LS); 1818 } 1819 } else { 1820 if (unlikely(rctx->hw_shader_stages[R600_HW_STAGE_GS].shader)) { 1821 SET_NULL_SHADER(R600_HW_STAGE_GS); 1822 SET_NULL_SHADER(R600_HW_STAGE_ES); 1823 rctx->shader_stages.geom_enable = false; 1824 r600_mark_atom_dirty(rctx, &rctx->shader_stages.atom); 1825 } 1826 1827 if (rctx->tes_shader) { 1828 /* if TES is loaded and no geometry, TES runs on hw VS, VS runs on hw LS */ 1829 UPDATE_SHADER_CLIP(R600_HW_STAGE_VS, tes); 1830 UPDATE_SHADER(EG_HW_STAGE_LS, vs); 1831 } else { 1832 SET_NULL_SHADER(EG_HW_STAGE_LS); 1833 UPDATE_SHADER_CLIP(R600_HW_STAGE_VS, vs); 1834 } 1835 } 1836 1837 /* 1838 * XXX: I believe there's some fatal flaw in the dirty state logic when 1839 * enabling/disabling tes. 1840 * VS/ES share all buffer/resource/sampler slots. If TES is enabled, 1841 * it will therefore overwrite the VS slots. If it now gets disabled, 1842 * the VS needs to rebind all buffer/resource/sampler slots - not only 1843 * has TES overwritten the corresponding slots, but when the VS was 1844 * operating as LS the things with correpsonding dirty bits got bound 1845 * to LS slots and won't reflect what is dirty as VS stage even if the 1846 * TES didn't overwrite it. The story for re-enabled TES is similar. 1847 * In any case, we're not allowed to submit any TES state when 1848 * TES is disabled (the state tracker may not do this but this looks 1849 * like an optimization to me, not something which can be relied on). 1850 */ 1851 1852 /* Update clip misc state. */ 1853 if (clip_so_current) { 1854 r600_update_clip_state(rctx, clip_so_current); 1855 rctx->b.streamout.enabled_stream_buffers_mask = clip_so_current->enabled_stream_buffers_mask; 1856 } 1857 1858 if (unlikely(ps_dirty || rctx->hw_shader_stages[R600_HW_STAGE_PS].shader != rctx->ps_shader->current || 1859 rctx->rasterizer->sprite_coord_enable != rctx->ps_shader->current->sprite_coord_enable || 1860 rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade)) { 1861 1862 if (rctx->cb_misc_state.nr_ps_color_outputs != rctx->ps_shader->current->nr_ps_color_outputs || 1863 rctx->cb_misc_state.ps_color_export_mask != rctx->ps_shader->current->ps_color_export_mask) { 1864 rctx->cb_misc_state.nr_ps_color_outputs = rctx->ps_shader->current->nr_ps_color_outputs; 1865 rctx->cb_misc_state.ps_color_export_mask = rctx->ps_shader->current->ps_color_export_mask; 1866 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom); 1867 } 1868 1869 if (rctx->b.chip_class <= R700) { 1870 bool multiwrite = rctx->ps_shader->current->shader.fs_write_all; 1871 1872 if (rctx->cb_misc_state.multiwrite != multiwrite) { 1873 rctx->cb_misc_state.multiwrite = multiwrite; 1874 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom); 1875 } 1876 } 1877 1878 if (unlikely(!ps_dirty && rctx->ps_shader && rctx->rasterizer && 1879 ((rctx->rasterizer->sprite_coord_enable != rctx->ps_shader->current->sprite_coord_enable) || 1880 (rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade)))) { 1881 1882 if (rctx->b.chip_class >= EVERGREEN) 1883 evergreen_update_ps_state(ctx, rctx->ps_shader->current); 1884 else 1885 r600_update_ps_state(ctx, rctx->ps_shader->current); 1886 } 1887 1888 r600_mark_atom_dirty(rctx, &rctx->shader_stages.atom); 1889 } 1890 UPDATE_SHADER(R600_HW_STAGE_PS, ps); 1891 1892 if (rctx->b.chip_class >= EVERGREEN) { 1893 evergreen_update_db_shader_control(rctx); 1894 } else { 1895 r600_update_db_shader_control(rctx); 1896 } 1897 1898 /* For each shader stage that needs to spill, set up buffer for MEM_SCRATCH */ 1899 if (rctx->b.chip_class >= EVERGREEN) { 1900 evergreen_setup_scratch_buffers(rctx); 1901 } else { 1902 r600_setup_scratch_buffers(rctx); 1903 } 1904 1905 /* on R600 we stuff masks + txq info into one constant buffer */ 1906 /* on evergreen we only need a txq info one */ 1907 if (rctx->ps_shader) { 1908 need_buf_const = rctx->ps_shader->current->shader.uses_tex_buffers || rctx->ps_shader->current->shader.has_txq_cube_array_z_comp; 1909 if (need_buf_const) { 1910 if (rctx->b.chip_class < EVERGREEN) 1911 r600_setup_buffer_constants(rctx, PIPE_SHADER_FRAGMENT); 1912 else 1913 eg_setup_buffer_constants(rctx, PIPE_SHADER_FRAGMENT); 1914 } 1915 } 1916 1917 if (rctx->vs_shader) { 1918 need_buf_const = rctx->vs_shader->current->shader.uses_tex_buffers || rctx->vs_shader->current->shader.has_txq_cube_array_z_comp; 1919 if (need_buf_const) { 1920 if (rctx->b.chip_class < EVERGREEN) 1921 r600_setup_buffer_constants(rctx, PIPE_SHADER_VERTEX); 1922 else 1923 eg_setup_buffer_constants(rctx, PIPE_SHADER_VERTEX); 1924 } 1925 } 1926 1927 if (rctx->gs_shader) { 1928 need_buf_const = rctx->gs_shader->current->shader.uses_tex_buffers || rctx->gs_shader->current->shader.has_txq_cube_array_z_comp; 1929 if (need_buf_const) { 1930 if (rctx->b.chip_class < EVERGREEN) 1931 r600_setup_buffer_constants(rctx, PIPE_SHADER_GEOMETRY); 1932 else 1933 eg_setup_buffer_constants(rctx, PIPE_SHADER_GEOMETRY); 1934 } 1935 } 1936 1937 if (rctx->tes_shader) { 1938 assert(rctx->b.chip_class >= EVERGREEN); 1939 need_buf_const = rctx->tes_shader->current->shader.uses_tex_buffers || 1940 rctx->tes_shader->current->shader.has_txq_cube_array_z_comp; 1941 if (need_buf_const) { 1942 eg_setup_buffer_constants(rctx, PIPE_SHADER_TESS_EVAL); 1943 } 1944 if (rctx->tcs_shader) { 1945 need_buf_const = rctx->tcs_shader->current->shader.uses_tex_buffers || 1946 rctx->tcs_shader->current->shader.has_txq_cube_array_z_comp; 1947 if (need_buf_const) { 1948 eg_setup_buffer_constants(rctx, PIPE_SHADER_TESS_CTRL); 1949 } 1950 } 1951 } 1952 1953 r600_update_driver_const_buffers(rctx, false); 1954 1955 if (rctx->b.chip_class < EVERGREEN && rctx->ps_shader && rctx->vs_shader) { 1956 if (!r600_adjust_gprs(rctx)) { 1957 /* discard rendering */ 1958 return false; 1959 } 1960 } 1961 1962 if (rctx->b.chip_class == EVERGREEN) { 1963 if (!evergreen_adjust_gprs(rctx)) { 1964 /* discard rendering */ 1965 return false; 1966 } 1967 } 1968 1969 blend_disable = (rctx->dual_src_blend && 1970 rctx->ps_shader->current->nr_ps_color_outputs < 2); 1971 1972 if (blend_disable != rctx->force_blend_disable) { 1973 rctx->force_blend_disable = blend_disable; 1974 r600_bind_blend_state_internal(rctx, 1975 rctx->blend_state.cso, 1976 blend_disable); 1977 } 1978 1979 return true; 1980} 1981 1982void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom) 1983{ 1984 struct radeon_cmdbuf *cs = rctx->b.gfx.cs; 1985 struct r600_clip_misc_state *state = &rctx->clip_misc_state; 1986 1987 radeon_set_context_reg(cs, R_028810_PA_CL_CLIP_CNTL, 1988 state->pa_cl_clip_cntl | 1989 (state->clip_dist_write ? 0 : state->clip_plane_enable & 0x3F) | 1990 S_028810_CLIP_DISABLE(state->clip_disable)); 1991 radeon_set_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL, 1992 state->pa_cl_vs_out_cntl | 1993 (state->clip_plane_enable & state->clip_dist_write) | 1994 (state->cull_dist_write << 8)); 1995 /* reuse needs to be set off if we write oViewport */ 1996 if (rctx->b.chip_class >= EVERGREEN) 1997 radeon_set_context_reg(cs, R_028AB4_VGT_REUSE_OFF, 1998 S_028AB4_REUSE_OFF(state->vs_out_viewport)); 1999} 2000 2001/* rast_prim is the primitive type after GS. */ 2002static inline void r600_emit_rasterizer_prim_state(struct r600_context *rctx) 2003{ 2004 struct radeon_cmdbuf *cs = rctx->b.gfx.cs; 2005 enum pipe_prim_type rast_prim = rctx->current_rast_prim; 2006 2007 /* Skip this if not rendering lines. */ 2008 if (rast_prim != PIPE_PRIM_LINES && 2009 rast_prim != PIPE_PRIM_LINE_LOOP && 2010 rast_prim != PIPE_PRIM_LINE_STRIP && 2011 rast_prim != PIPE_PRIM_LINES_ADJACENCY && 2012 rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY) 2013 return; 2014 2015 if (rast_prim == rctx->last_rast_prim) 2016 return; 2017 2018 /* For lines, reset the stipple pattern at each primitive. Otherwise, 2019 * reset the stipple pattern at each packet (line strips, line loops). 2020 */ 2021 radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE, 2022 S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2) | 2023 (rctx->rasterizer ? rctx->rasterizer->pa_sc_line_stipple : 0)); 2024 rctx->last_rast_prim = rast_prim; 2025} 2026 2027static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info) 2028{ 2029 struct r600_context *rctx = (struct r600_context *)ctx; 2030 struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource; 2031 struct radeon_cmdbuf *cs = rctx->b.gfx.cs; 2032 bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off; 2033 bool has_user_indices = info->has_user_indices; 2034 uint64_t mask; 2035 unsigned num_patches, dirty_tex_counter, index_offset = 0; 2036 unsigned index_size = info->index_size; 2037 int index_bias; 2038 struct r600_shader_atomic combined_atomics[8]; 2039 uint8_t atomic_used_mask; 2040 2041 if (!info->indirect && !info->count && (index_size || !info->count_from_stream_output)) { 2042 return; 2043 } 2044 2045 if (unlikely(!rctx->vs_shader)) { 2046 assert(0); 2047 return; 2048 } 2049 if (unlikely(!rctx->ps_shader && 2050 (!rctx->rasterizer || !rctx->rasterizer->rasterizer_discard))) { 2051 assert(0); 2052 return; 2053 } 2054 2055 /* make sure that the gfx ring is only one active */ 2056 if (radeon_emitted(rctx->b.dma.cs, 0)) { 2057 rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL); 2058 } 2059 2060 if (rctx->cmd_buf_is_compute) { 2061 rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL); 2062 rctx->cmd_buf_is_compute = false; 2063 } 2064 2065 /* Re-emit the framebuffer state if needed. */ 2066 dirty_tex_counter = p_atomic_read(&rctx->b.screen->dirty_tex_counter); 2067 if (unlikely(dirty_tex_counter != rctx->b.last_dirty_tex_counter)) { 2068 rctx->b.last_dirty_tex_counter = dirty_tex_counter; 2069 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom); 2070 rctx->framebuffer.do_update_surf_dirtiness = true; 2071 } 2072 2073 if (rctx->gs_shader) { 2074 /* Determine whether the GS triangle strip adjacency fix should 2075 * be applied. Rotate every other triangle if 2076 * - triangle strips with adjacency are fed to the GS and 2077 * - primitive restart is disabled (the rotation doesn't help 2078 * when the restart occurs after an odd number of triangles). 2079 */ 2080 bool gs_tri_strip_adj_fix = 2081 !rctx->tes_shader && 2082 info->mode == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY && 2083 !info->primitive_restart; 2084 if (gs_tri_strip_adj_fix != rctx->gs_tri_strip_adj_fix) 2085 rctx->gs_tri_strip_adj_fix = gs_tri_strip_adj_fix; 2086 } 2087 if (!r600_update_derived_state(rctx)) { 2088 /* useless to render because current rendering command 2089 * can't be achieved 2090 */ 2091 return; 2092 } 2093 2094 rctx->current_rast_prim = (rctx->gs_shader)? rctx->gs_shader->gs_output_prim 2095 : (rctx->tes_shader)? rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] 2096 : info->mode; 2097 2098 if (rctx->b.chip_class >= EVERGREEN) { 2099 evergreen_emit_atomic_buffer_setup_count(rctx, NULL, combined_atomics, &atomic_used_mask); 2100 } 2101 2102 if (index_size) { 2103 index_offset += info->start * index_size; 2104 2105 /* Translate 8-bit indices to 16-bit. */ 2106 if (unlikely(index_size == 1)) { 2107 struct pipe_resource *out_buffer = NULL; 2108 unsigned out_offset; 2109 void *ptr; 2110 unsigned start, count; 2111 2112 if (likely(!info->indirect)) { 2113 start = 0; 2114 count = info->count; 2115 } 2116 else { 2117 /* Have to get start/count from indirect buffer, slow path ahead... */ 2118 struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect->buffer; 2119 unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource, 2120 PIPE_TRANSFER_READ); 2121 if (data) { 2122 data += info->indirect->offset / sizeof(unsigned); 2123 start = data[2] * index_size; 2124 count = data[0]; 2125 } 2126 else { 2127 start = 0; 2128 count = 0; 2129 } 2130 } 2131 2132 u_upload_alloc(ctx->stream_uploader, start, count * 2, 2133 256, &out_offset, &out_buffer, &ptr); 2134 if (unlikely(!ptr)) 2135 return; 2136 2137 util_shorten_ubyte_elts_to_userptr( 2138 &rctx->b.b, info, 0, 0, index_offset, count, ptr); 2139 2140 indexbuf = out_buffer; 2141 index_offset = out_offset; 2142 index_size = 2; 2143 has_user_indices = false; 2144 } 2145 2146 /* Upload the index buffer. 2147 * The upload is skipped for small index counts on little-endian machines 2148 * and the indices are emitted via PKT3_DRAW_INDEX_IMMD. 2149 * Indirect draws never use immediate indices. 2150 * Note: Instanced rendering in combination with immediate indices hangs. */ 2151 if (has_user_indices && (R600_BIG_ENDIAN || info->indirect || 2152 info->instance_count > 1 || 2153 info->count*index_size > 20)) { 2154 indexbuf = NULL; 2155 u_upload_data(ctx->stream_uploader, 0, 2156 info->count * index_size, 256, 2157 info->index.user, &index_offset, &indexbuf); 2158 has_user_indices = false; 2159 } 2160 index_bias = info->index_bias; 2161 } else { 2162 index_bias = info->start; 2163 } 2164 2165 /* Set the index offset and primitive restart. */ 2166 if (rctx->vgt_state.vgt_multi_prim_ib_reset_en != info->primitive_restart || 2167 rctx->vgt_state.vgt_multi_prim_ib_reset_indx != info->restart_index || 2168 rctx->vgt_state.vgt_indx_offset != index_bias || 2169 (rctx->vgt_state.last_draw_was_indirect && !info->indirect)) { 2170 rctx->vgt_state.vgt_multi_prim_ib_reset_en = info->primitive_restart; 2171 rctx->vgt_state.vgt_multi_prim_ib_reset_indx = info->restart_index; 2172 rctx->vgt_state.vgt_indx_offset = index_bias; 2173 r600_mark_atom_dirty(rctx, &rctx->vgt_state.atom); 2174 } 2175 2176 /* Workaround for hardware deadlock on certain R600 ASICs: write into a CB register. */ 2177 if (rctx->b.chip_class == R600) { 2178 rctx->b.flags |= R600_CONTEXT_PS_PARTIAL_FLUSH; 2179 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom); 2180 } 2181 2182 if (rctx->b.chip_class >= EVERGREEN) 2183 evergreen_setup_tess_constants(rctx, info, &num_patches); 2184 2185 /* Emit states. */ 2186 r600_need_cs_space(rctx, has_user_indices ? 5 : 0, TRUE, util_bitcount(atomic_used_mask)); 2187 r600_flush_emit(rctx); 2188 2189 mask = rctx->dirty_atoms; 2190 while (mask != 0) { 2191 r600_emit_atom(rctx, rctx->atoms[u_bit_scan64(&mask)]); 2192 } 2193 2194 if (rctx->b.chip_class >= EVERGREEN) { 2195 evergreen_emit_atomic_buffer_setup(rctx, false, combined_atomics, atomic_used_mask); 2196 } 2197 2198 if (rctx->b.chip_class == CAYMAN) { 2199 /* Copied from radeonsi. */ 2200 unsigned primgroup_size = 128; /* recommended without a GS */ 2201 bool ia_switch_on_eop = false; 2202 bool partial_vs_wave = false; 2203 2204 if (rctx->gs_shader) 2205 primgroup_size = 64; /* recommended with a GS */ 2206 2207 if ((rctx->rasterizer && rctx->rasterizer->pa_sc_line_stipple) || 2208 (rctx->b.screen->debug_flags & DBG_SWITCH_ON_EOP)) { 2209 ia_switch_on_eop = true; 2210 } 2211 2212 if (r600_get_strmout_en(&rctx->b)) 2213 partial_vs_wave = true; 2214 2215 radeon_set_context_reg(cs, CM_R_028AA8_IA_MULTI_VGT_PARAM, 2216 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) | 2217 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) | 2218 S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1)); 2219 } 2220 2221 if (rctx->b.chip_class >= EVERGREEN) { 2222 uint32_t ls_hs_config = evergreen_get_ls_hs_config(rctx, info, 2223 num_patches); 2224 2225 evergreen_set_ls_hs_config(rctx, cs, ls_hs_config); 2226 evergreen_set_lds_alloc(rctx, cs, rctx->lds_alloc); 2227 } 2228 2229 /* On R6xx, CULL_FRONT=1 culls all points, lines, and rectangles, 2230 * even though it should have no effect on those. */ 2231 if (rctx->b.chip_class == R600 && rctx->rasterizer) { 2232 unsigned su_sc_mode_cntl = rctx->rasterizer->pa_su_sc_mode_cntl; 2233 unsigned prim = info->mode; 2234 2235 if (rctx->gs_shader) { 2236 prim = rctx->gs_shader->gs_output_prim; 2237 } 2238 prim = r600_conv_prim_to_gs_out(prim); /* decrease the number of types to 3 */ 2239 2240 if (prim == V_028A6C_OUTPRIM_TYPE_POINTLIST || 2241 prim == V_028A6C_OUTPRIM_TYPE_LINESTRIP || 2242 info->mode == R600_PRIM_RECTANGLE_LIST) { 2243 su_sc_mode_cntl &= C_028814_CULL_FRONT; 2244 } 2245 radeon_set_context_reg(cs, R_028814_PA_SU_SC_MODE_CNTL, su_sc_mode_cntl); 2246 } 2247 2248 /* Update start instance. */ 2249 if (!info->indirect && rctx->last_start_instance != info->start_instance) { 2250 radeon_set_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info->start_instance); 2251 rctx->last_start_instance = info->start_instance; 2252 } 2253 2254 /* Update the primitive type. */ 2255 if (rctx->last_primitive_type != info->mode) { 2256 r600_emit_rasterizer_prim_state(rctx); 2257 radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, 2258 r600_conv_pipe_prim(info->mode)); 2259 2260 rctx->last_primitive_type = info->mode; 2261 } 2262 2263 /* Draw packets. */ 2264 if (likely(!info->indirect)) { 2265 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0)); 2266 radeon_emit(cs, info->instance_count); 2267 } else { 2268 uint64_t va = r600_resource(info->indirect->buffer)->gpu_address; 2269 assert(rctx->b.chip_class >= EVERGREEN); 2270 2271 // Invalidate so non-indirect draw calls reset this state 2272 rctx->vgt_state.last_draw_was_indirect = true; 2273 rctx->last_start_instance = -1; 2274 2275 radeon_emit(cs, PKT3(EG_PKT3_SET_BASE, 2, 0)); 2276 radeon_emit(cs, EG_DRAW_INDEX_INDIRECT_PATCH_TABLE_BASE); 2277 radeon_emit(cs, va); 2278 radeon_emit(cs, (va >> 32UL) & 0xFF); 2279 2280 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 2281 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, 2282 (struct r600_resource*)info->indirect->buffer, 2283 RADEON_USAGE_READ, 2284 RADEON_PRIO_DRAW_INDIRECT)); 2285 } 2286 2287 if (index_size) { 2288 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); 2289 radeon_emit(cs, index_size == 4 ? 2290 (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) : 2291 (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0))); 2292 2293 if (has_user_indices) { 2294 unsigned size_bytes = info->count*index_size; 2295 unsigned size_dw = align(size_bytes, 4) / 4; 2296 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, render_cond_bit)); 2297 radeon_emit(cs, info->count); 2298 radeon_emit(cs, V_0287F0_DI_SRC_SEL_IMMEDIATE); 2299 radeon_emit_array(cs, info->index.user, size_dw); 2300 } else { 2301 uint64_t va = r600_resource(indexbuf)->gpu_address + index_offset; 2302 2303 if (likely(!info->indirect)) { 2304 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit)); 2305 radeon_emit(cs, va); 2306 radeon_emit(cs, (va >> 32UL) & 0xFF); 2307 radeon_emit(cs, info->count); 2308 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA); 2309 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 2310 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, 2311 (struct r600_resource*)indexbuf, 2312 RADEON_USAGE_READ, 2313 RADEON_PRIO_INDEX_BUFFER)); 2314 } 2315 else { 2316 uint32_t max_size = (indexbuf->width0 - index_offset) / index_size; 2317 2318 radeon_emit(cs, PKT3(EG_PKT3_INDEX_BASE, 1, 0)); 2319 radeon_emit(cs, va); 2320 radeon_emit(cs, (va >> 32UL) & 0xFF); 2321 2322 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 2323 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, 2324 (struct r600_resource*)indexbuf, 2325 RADEON_USAGE_READ, 2326 RADEON_PRIO_INDEX_BUFFER)); 2327 2328 radeon_emit(cs, PKT3(EG_PKT3_INDEX_BUFFER_SIZE, 0, 0)); 2329 radeon_emit(cs, max_size); 2330 2331 radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDEX_INDIRECT, 1, render_cond_bit)); 2332 radeon_emit(cs, info->indirect->offset); 2333 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA); 2334 } 2335 } 2336 } else { 2337 if (unlikely(info->count_from_stream_output)) { 2338 struct r600_so_target *t = (struct r600_so_target*)info->count_from_stream_output; 2339 uint64_t va = t->buf_filled_size->gpu_address + t->buf_filled_size_offset; 2340 2341 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw); 2342 2343 radeon_emit(cs, PKT3(PKT3_COPY_DW, 4, 0)); 2344 radeon_emit(cs, COPY_DW_SRC_IS_MEM | COPY_DW_DST_IS_REG); 2345 radeon_emit(cs, va & 0xFFFFFFFFUL); /* src address lo */ 2346 radeon_emit(cs, (va >> 32UL) & 0xFFUL); /* src address hi */ 2347 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2); /* dst register */ 2348 radeon_emit(cs, 0); /* unused */ 2349 2350 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 2351 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, 2352 t->buf_filled_size, RADEON_USAGE_READ, 2353 RADEON_PRIO_SO_FILLED_SIZE)); 2354 } 2355 2356 if (likely(!info->indirect)) { 2357 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit)); 2358 radeon_emit(cs, info->count); 2359 } 2360 else { 2361 radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDIRECT, 1, render_cond_bit)); 2362 radeon_emit(cs, info->indirect->offset); 2363 } 2364 radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX | 2365 (info->count_from_stream_output ? S_0287F0_USE_OPAQUE(1) : 0)); 2366 } 2367 2368 /* SMX returns CONTEXT_DONE too early workaround */ 2369 if (rctx->b.family == CHIP_R600 || 2370 rctx->b.family == CHIP_RV610 || 2371 rctx->b.family == CHIP_RV630 || 2372 rctx->b.family == CHIP_RV635) { 2373 /* if we have gs shader or streamout 2374 we need to do a wait idle after every draw */ 2375 if (rctx->gs_shader || r600_get_strmout_en(&rctx->b)) { 2376 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); 2377 } 2378 } 2379 2380 /* ES ring rolling over at EOP - workaround */ 2381 if (rctx->b.chip_class == R600) { 2382 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 2383 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SQ_NON_EVENT)); 2384 } 2385 2386 2387 if (rctx->b.chip_class >= EVERGREEN) 2388 evergreen_emit_atomic_buffer_save(rctx, false, combined_atomics, &atomic_used_mask); 2389 2390 if (rctx->trace_buf) 2391 eg_trace_emit(rctx); 2392 2393 if (rctx->framebuffer.do_update_surf_dirtiness) { 2394 /* Set the depth buffer as dirty. */ 2395 if (rctx->framebuffer.state.zsbuf) { 2396 struct pipe_surface *surf = rctx->framebuffer.state.zsbuf; 2397 struct r600_texture *rtex = (struct r600_texture *)surf->texture; 2398 2399 rtex->dirty_level_mask |= 1 << surf->u.tex.level; 2400 2401 if (rtex->surface.has_stencil) 2402 rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level; 2403 } 2404 if (rctx->framebuffer.compressed_cb_mask) { 2405 struct pipe_surface *surf; 2406 struct r600_texture *rtex; 2407 unsigned mask = rctx->framebuffer.compressed_cb_mask; 2408 2409 do { 2410 unsigned i = u_bit_scan(&mask); 2411 surf = rctx->framebuffer.state.cbufs[i]; 2412 rtex = (struct r600_texture*)surf->texture; 2413 2414 rtex->dirty_level_mask |= 1 << surf->u.tex.level; 2415 2416 } while (mask); 2417 } 2418 rctx->framebuffer.do_update_surf_dirtiness = false; 2419 } 2420 2421 if (index_size && indexbuf != info->index.resource) 2422 pipe_resource_reference(&indexbuf, NULL); 2423 rctx->b.num_draw_calls++; 2424} 2425 2426uint32_t r600_translate_stencil_op(int s_op) 2427{ 2428 switch (s_op) { 2429 case PIPE_STENCIL_OP_KEEP: 2430 return V_028800_STENCIL_KEEP; 2431 case PIPE_STENCIL_OP_ZERO: 2432 return V_028800_STENCIL_ZERO; 2433 case PIPE_STENCIL_OP_REPLACE: 2434 return V_028800_STENCIL_REPLACE; 2435 case PIPE_STENCIL_OP_INCR: 2436 return V_028800_STENCIL_INCR; 2437 case PIPE_STENCIL_OP_DECR: 2438 return V_028800_STENCIL_DECR; 2439 case PIPE_STENCIL_OP_INCR_WRAP: 2440 return V_028800_STENCIL_INCR_WRAP; 2441 case PIPE_STENCIL_OP_DECR_WRAP: 2442 return V_028800_STENCIL_DECR_WRAP; 2443 case PIPE_STENCIL_OP_INVERT: 2444 return V_028800_STENCIL_INVERT; 2445 default: 2446 R600_ERR("Unknown stencil op %d", s_op); 2447 assert(0); 2448 break; 2449 } 2450 return 0; 2451} 2452 2453uint32_t r600_translate_fill(uint32_t func) 2454{ 2455 switch(func) { 2456 case PIPE_POLYGON_MODE_FILL: 2457 return 2; 2458 case PIPE_POLYGON_MODE_LINE: 2459 return 1; 2460 case PIPE_POLYGON_MODE_POINT: 2461 return 0; 2462 default: 2463 assert(0); 2464 return 0; 2465 } 2466} 2467 2468unsigned r600_tex_wrap(unsigned wrap) 2469{ 2470 switch (wrap) { 2471 default: 2472 case PIPE_TEX_WRAP_REPEAT: 2473 return V_03C000_SQ_TEX_WRAP; 2474 case PIPE_TEX_WRAP_CLAMP: 2475 return V_03C000_SQ_TEX_CLAMP_HALF_BORDER; 2476 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: 2477 return V_03C000_SQ_TEX_CLAMP_LAST_TEXEL; 2478 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: 2479 return V_03C000_SQ_TEX_CLAMP_BORDER; 2480 case PIPE_TEX_WRAP_MIRROR_REPEAT: 2481 return V_03C000_SQ_TEX_MIRROR; 2482 case PIPE_TEX_WRAP_MIRROR_CLAMP: 2483 return V_03C000_SQ_TEX_MIRROR_ONCE_HALF_BORDER; 2484 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: 2485 return V_03C000_SQ_TEX_MIRROR_ONCE_LAST_TEXEL; 2486 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: 2487 return V_03C000_SQ_TEX_MIRROR_ONCE_BORDER; 2488 } 2489} 2490 2491unsigned r600_tex_mipfilter(unsigned filter) 2492{ 2493 switch (filter) { 2494 case PIPE_TEX_MIPFILTER_NEAREST: 2495 return V_03C000_SQ_TEX_Z_FILTER_POINT; 2496 case PIPE_TEX_MIPFILTER_LINEAR: 2497 return V_03C000_SQ_TEX_Z_FILTER_LINEAR; 2498 default: 2499 case PIPE_TEX_MIPFILTER_NONE: 2500 return V_03C000_SQ_TEX_Z_FILTER_NONE; 2501 } 2502} 2503 2504unsigned r600_tex_compare(unsigned compare) 2505{ 2506 switch (compare) { 2507 default: 2508 case PIPE_FUNC_NEVER: 2509 return V_03C000_SQ_TEX_DEPTH_COMPARE_NEVER; 2510 case PIPE_FUNC_LESS: 2511 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESS; 2512 case PIPE_FUNC_EQUAL: 2513 return V_03C000_SQ_TEX_DEPTH_COMPARE_EQUAL; 2514 case PIPE_FUNC_LEQUAL: 2515 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESSEQUAL; 2516 case PIPE_FUNC_GREATER: 2517 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATER; 2518 case PIPE_FUNC_NOTEQUAL: 2519 return V_03C000_SQ_TEX_DEPTH_COMPARE_NOTEQUAL; 2520 case PIPE_FUNC_GEQUAL: 2521 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL; 2522 case PIPE_FUNC_ALWAYS: 2523 return V_03C000_SQ_TEX_DEPTH_COMPARE_ALWAYS; 2524 } 2525} 2526 2527static bool wrap_mode_uses_border_color(unsigned wrap, bool linear_filter) 2528{ 2529 return wrap == PIPE_TEX_WRAP_CLAMP_TO_BORDER || 2530 wrap == PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER || 2531 (linear_filter && 2532 (wrap == PIPE_TEX_WRAP_CLAMP || 2533 wrap == PIPE_TEX_WRAP_MIRROR_CLAMP)); 2534} 2535 2536bool sampler_state_needs_border_color(const struct pipe_sampler_state *state) 2537{ 2538 bool linear_filter = state->min_img_filter != PIPE_TEX_FILTER_NEAREST || 2539 state->mag_img_filter != PIPE_TEX_FILTER_NEAREST; 2540 2541 return (state->border_color.ui[0] || state->border_color.ui[1] || 2542 state->border_color.ui[2] || state->border_color.ui[3]) && 2543 (wrap_mode_uses_border_color(state->wrap_s, linear_filter) || 2544 wrap_mode_uses_border_color(state->wrap_t, linear_filter) || 2545 wrap_mode_uses_border_color(state->wrap_r, linear_filter)); 2546} 2547 2548void r600_emit_shader(struct r600_context *rctx, struct r600_atom *a) 2549{ 2550 2551 struct radeon_cmdbuf *cs = rctx->b.gfx.cs; 2552 struct r600_pipe_shader *shader = ((struct r600_shader_state*)a)->shader; 2553 2554 if (!shader) 2555 return; 2556 2557 r600_emit_command_buffer(cs, &shader->command_buffer); 2558 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 2559 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->bo, 2560 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY)); 2561} 2562 2563unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format, 2564 const unsigned char *swizzle_view, 2565 boolean vtx) 2566{ 2567 unsigned i; 2568 unsigned char swizzle[4]; 2569 unsigned result = 0; 2570 const uint32_t tex_swizzle_shift[4] = { 2571 16, 19, 22, 25, 2572 }; 2573 const uint32_t vtx_swizzle_shift[4] = { 2574 3, 6, 9, 12, 2575 }; 2576 const uint32_t swizzle_bit[4] = { 2577 0, 1, 2, 3, 2578 }; 2579 const uint32_t *swizzle_shift = tex_swizzle_shift; 2580 2581 if (vtx) 2582 swizzle_shift = vtx_swizzle_shift; 2583 2584 if (swizzle_view) { 2585 util_format_compose_swizzles(swizzle_format, swizzle_view, swizzle); 2586 } else { 2587 memcpy(swizzle, swizzle_format, 4); 2588 } 2589 2590 /* Get swizzle. */ 2591 for (i = 0; i < 4; i++) { 2592 switch (swizzle[i]) { 2593 case PIPE_SWIZZLE_Y: 2594 result |= swizzle_bit[1] << swizzle_shift[i]; 2595 break; 2596 case PIPE_SWIZZLE_Z: 2597 result |= swizzle_bit[2] << swizzle_shift[i]; 2598 break; 2599 case PIPE_SWIZZLE_W: 2600 result |= swizzle_bit[3] << swizzle_shift[i]; 2601 break; 2602 case PIPE_SWIZZLE_0: 2603 result |= V_038010_SQ_SEL_0 << swizzle_shift[i]; 2604 break; 2605 case PIPE_SWIZZLE_1: 2606 result |= V_038010_SQ_SEL_1 << swizzle_shift[i]; 2607 break; 2608 default: /* PIPE_SWIZZLE_X */ 2609 result |= swizzle_bit[0] << swizzle_shift[i]; 2610 } 2611 } 2612 return result; 2613} 2614 2615/* texture format translate */ 2616uint32_t r600_translate_texformat(struct pipe_screen *screen, 2617 enum pipe_format format, 2618 const unsigned char *swizzle_view, 2619 uint32_t *word4_p, uint32_t *yuv_format_p, 2620 bool do_endian_swap) 2621{ 2622 struct r600_screen *rscreen = (struct r600_screen *)screen; 2623 uint32_t result = 0, word4 = 0, yuv_format = 0; 2624 const struct util_format_description *desc; 2625 boolean uniform = TRUE; 2626 bool is_srgb_valid = FALSE; 2627 const unsigned char swizzle_xxxx[4] = {0, 0, 0, 0}; 2628 const unsigned char swizzle_yyyy[4] = {1, 1, 1, 1}; 2629 const unsigned char swizzle_xxxy[4] = {0, 0, 0, 1}; 2630 const unsigned char swizzle_zyx1[4] = {2, 1, 0, 5}; 2631 const unsigned char swizzle_zyxw[4] = {2, 1, 0, 3}; 2632 2633 int i; 2634 const uint32_t sign_bit[4] = { 2635 S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED), 2636 S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED), 2637 S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED), 2638 S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED) 2639 }; 2640 2641 /* Need to replace the specified texture formats in case of big-endian. 2642 * These formats are formats that have channels with number of bits 2643 * not divisible by 8. 2644 * Mesa conversion functions don't swap bits for those formats, and because 2645 * we transmit this over a serial bus to the GPU (PCIe), the 2646 * bit-endianess is important!!! 2647 * In case we have an "opposite" format, just use that for the swizzling 2648 * information. If we don't have such an "opposite" format, we need 2649 * to use a fixed swizzle info instead (see below) 2650 */ 2651 if (format == PIPE_FORMAT_R4A4_UNORM && do_endian_swap) 2652 format = PIPE_FORMAT_A4R4_UNORM; 2653 2654 desc = util_format_description(format); 2655 if (!desc) 2656 goto out_unknown; 2657 2658 /* Depth and stencil swizzling is handled separately. */ 2659 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS) { 2660 /* Need to check for specific texture formats that don't have 2661 * an "opposite" format we can use. For those formats, we directly 2662 * specify the swizzling, which is the LE swizzling as defined in 2663 * u_format.csv 2664 */ 2665 if (do_endian_swap) { 2666 if (format == PIPE_FORMAT_L4A4_UNORM) 2667 word4 |= r600_get_swizzle_combined(swizzle_xxxy, swizzle_view, FALSE); 2668 else if (format == PIPE_FORMAT_B4G4R4A4_UNORM) 2669 word4 |= r600_get_swizzle_combined(swizzle_zyxw, swizzle_view, FALSE); 2670 else if (format == PIPE_FORMAT_B4G4R4X4_UNORM || format == PIPE_FORMAT_B5G6R5_UNORM) 2671 word4 |= r600_get_swizzle_combined(swizzle_zyx1, swizzle_view, FALSE); 2672 else 2673 word4 |= r600_get_swizzle_combined(desc->swizzle, swizzle_view, FALSE); 2674 } else { 2675 word4 |= r600_get_swizzle_combined(desc->swizzle, swizzle_view, FALSE); 2676 } 2677 } 2678 2679 /* Colorspace (return non-RGB formats directly). */ 2680 switch (desc->colorspace) { 2681 /* Depth stencil formats */ 2682 case UTIL_FORMAT_COLORSPACE_ZS: 2683 switch (format) { 2684 /* Depth sampler formats. */ 2685 case PIPE_FORMAT_Z16_UNORM: 2686 word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE); 2687 result = FMT_16; 2688 goto out_word4; 2689 case PIPE_FORMAT_Z24X8_UNORM: 2690 case PIPE_FORMAT_Z24_UNORM_S8_UINT: 2691 word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE); 2692 result = FMT_8_24; 2693 goto out_word4; 2694 case PIPE_FORMAT_X8Z24_UNORM: 2695 case PIPE_FORMAT_S8_UINT_Z24_UNORM: 2696 if (rscreen->b.chip_class < EVERGREEN) 2697 goto out_unknown; 2698 word4 |= r600_get_swizzle_combined(swizzle_yyyy, swizzle_view, FALSE); 2699 result = FMT_24_8; 2700 goto out_word4; 2701 case PIPE_FORMAT_Z32_FLOAT: 2702 word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE); 2703 result = FMT_32_FLOAT; 2704 goto out_word4; 2705 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT: 2706 word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE); 2707 result = FMT_X24_8_32_FLOAT; 2708 goto out_word4; 2709 /* Stencil sampler formats. */ 2710 case PIPE_FORMAT_S8_UINT: 2711 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); 2712 word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE); 2713 result = FMT_8; 2714 goto out_word4; 2715 case PIPE_FORMAT_X24S8_UINT: 2716 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); 2717 word4 |= r600_get_swizzle_combined(swizzle_yyyy, swizzle_view, FALSE); 2718 result = FMT_8_24; 2719 goto out_word4; 2720 case PIPE_FORMAT_S8X24_UINT: 2721 if (rscreen->b.chip_class < EVERGREEN) 2722 goto out_unknown; 2723 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); 2724 word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE); 2725 result = FMT_24_8; 2726 goto out_word4; 2727 case PIPE_FORMAT_X32_S8X24_UINT: 2728 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); 2729 word4 |= r600_get_swizzle_combined(swizzle_yyyy, swizzle_view, FALSE); 2730 result = FMT_X24_8_32_FLOAT; 2731 goto out_word4; 2732 default: 2733 goto out_unknown; 2734 } 2735 2736 case UTIL_FORMAT_COLORSPACE_YUV: 2737 yuv_format |= (1 << 30); 2738 switch (format) { 2739 case PIPE_FORMAT_UYVY: 2740 case PIPE_FORMAT_YUYV: 2741 default: 2742 break; 2743 } 2744 goto out_unknown; /* XXX */ 2745 2746 case UTIL_FORMAT_COLORSPACE_SRGB: 2747 word4 |= S_038010_FORCE_DEGAMMA(1); 2748 break; 2749 2750 default: 2751 break; 2752 } 2753 2754 if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC) { 2755 switch (format) { 2756 case PIPE_FORMAT_RGTC1_SNORM: 2757 case PIPE_FORMAT_LATC1_SNORM: 2758 word4 |= sign_bit[0]; 2759 case PIPE_FORMAT_RGTC1_UNORM: 2760 case PIPE_FORMAT_LATC1_UNORM: 2761 result = FMT_BC4; 2762 goto out_word4; 2763 case PIPE_FORMAT_RGTC2_SNORM: 2764 case PIPE_FORMAT_LATC2_SNORM: 2765 word4 |= sign_bit[0] | sign_bit[1]; 2766 case PIPE_FORMAT_RGTC2_UNORM: 2767 case PIPE_FORMAT_LATC2_UNORM: 2768 result = FMT_BC5; 2769 goto out_word4; 2770 default: 2771 goto out_unknown; 2772 } 2773 } 2774 2775 if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) { 2776 switch (format) { 2777 case PIPE_FORMAT_DXT1_RGB: 2778 case PIPE_FORMAT_DXT1_RGBA: 2779 case PIPE_FORMAT_DXT1_SRGB: 2780 case PIPE_FORMAT_DXT1_SRGBA: 2781 result = FMT_BC1; 2782 is_srgb_valid = TRUE; 2783 goto out_word4; 2784 case PIPE_FORMAT_DXT3_RGBA: 2785 case PIPE_FORMAT_DXT3_SRGBA: 2786 result = FMT_BC2; 2787 is_srgb_valid = TRUE; 2788 goto out_word4; 2789 case PIPE_FORMAT_DXT5_RGBA: 2790 case PIPE_FORMAT_DXT5_SRGBA: 2791 result = FMT_BC3; 2792 is_srgb_valid = TRUE; 2793 goto out_word4; 2794 default: 2795 goto out_unknown; 2796 } 2797 } 2798 2799 if (desc->layout == UTIL_FORMAT_LAYOUT_BPTC) { 2800 if (rscreen->b.chip_class < EVERGREEN) 2801 goto out_unknown; 2802 2803 switch (format) { 2804 case PIPE_FORMAT_BPTC_RGBA_UNORM: 2805 case PIPE_FORMAT_BPTC_SRGBA: 2806 result = FMT_BC7; 2807 is_srgb_valid = TRUE; 2808 goto out_word4; 2809 case PIPE_FORMAT_BPTC_RGB_FLOAT: 2810 word4 |= sign_bit[0] | sign_bit[1] | sign_bit[2]; 2811 /* fall through */ 2812 case PIPE_FORMAT_BPTC_RGB_UFLOAT: 2813 result = FMT_BC6; 2814 goto out_word4; 2815 default: 2816 goto out_unknown; 2817 } 2818 } 2819 2820 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED) { 2821 switch (format) { 2822 case PIPE_FORMAT_R8G8_B8G8_UNORM: 2823 case PIPE_FORMAT_G8R8_B8R8_UNORM: 2824 result = FMT_GB_GR; 2825 goto out_word4; 2826 case PIPE_FORMAT_G8R8_G8B8_UNORM: 2827 case PIPE_FORMAT_R8G8_R8B8_UNORM: 2828 result = FMT_BG_RG; 2829 goto out_word4; 2830 default: 2831 goto out_unknown; 2832 } 2833 } 2834 2835 if (format == PIPE_FORMAT_R9G9B9E5_FLOAT) { 2836 result = FMT_5_9_9_9_SHAREDEXP; 2837 goto out_word4; 2838 } else if (format == PIPE_FORMAT_R11G11B10_FLOAT) { 2839 result = FMT_10_11_11_FLOAT; 2840 goto out_word4; 2841 } 2842 2843 2844 for (i = 0; i < desc->nr_channels; i++) { 2845 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) { 2846 word4 |= sign_bit[i]; 2847 } 2848 } 2849 2850 /* R8G8Bx_SNORM - XXX CxV8U8 */ 2851 2852 /* See whether the components are of the same size. */ 2853 for (i = 1; i < desc->nr_channels; i++) { 2854 uniform = uniform && desc->channel[0].size == desc->channel[i].size; 2855 } 2856 2857 /* Non-uniform formats. */ 2858 if (!uniform) { 2859 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB && 2860 desc->channel[0].pure_integer) 2861 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); 2862 switch(desc->nr_channels) { 2863 case 3: 2864 if (desc->channel[0].size == 5 && 2865 desc->channel[1].size == 6 && 2866 desc->channel[2].size == 5) { 2867 result = FMT_5_6_5; 2868 goto out_word4; 2869 } 2870 goto out_unknown; 2871 case 4: 2872 if (desc->channel[0].size == 5 && 2873 desc->channel[1].size == 5 && 2874 desc->channel[2].size == 5 && 2875 desc->channel[3].size == 1) { 2876 result = FMT_1_5_5_5; 2877 goto out_word4; 2878 } 2879 if (desc->channel[0].size == 10 && 2880 desc->channel[1].size == 10 && 2881 desc->channel[2].size == 10 && 2882 desc->channel[3].size == 2) { 2883 result = FMT_2_10_10_10; 2884 goto out_word4; 2885 } 2886 goto out_unknown; 2887 } 2888 goto out_unknown; 2889 } 2890 2891 /* Find the first non-VOID channel. */ 2892 for (i = 0; i < 4; i++) { 2893 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) { 2894 break; 2895 } 2896 } 2897 2898 if (i == 4) 2899 goto out_unknown; 2900 2901 /* uniform formats */ 2902 switch (desc->channel[i].type) { 2903 case UTIL_FORMAT_TYPE_UNSIGNED: 2904 case UTIL_FORMAT_TYPE_SIGNED: 2905#if 0 2906 if (!desc->channel[i].normalized && 2907 desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB) { 2908 goto out_unknown; 2909 } 2910#endif 2911 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB && 2912 desc->channel[i].pure_integer) 2913 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); 2914 2915 switch (desc->channel[i].size) { 2916 case 4: 2917 switch (desc->nr_channels) { 2918 case 2: 2919 result = FMT_4_4; 2920 goto out_word4; 2921 case 4: 2922 result = FMT_4_4_4_4; 2923 goto out_word4; 2924 } 2925 goto out_unknown; 2926 case 8: 2927 switch (desc->nr_channels) { 2928 case 1: 2929 result = FMT_8; 2930 is_srgb_valid = TRUE; 2931 goto out_word4; 2932 case 2: 2933 result = FMT_8_8; 2934 goto out_word4; 2935 case 4: 2936 result = FMT_8_8_8_8; 2937 is_srgb_valid = TRUE; 2938 goto out_word4; 2939 } 2940 goto out_unknown; 2941 case 16: 2942 switch (desc->nr_channels) { 2943 case 1: 2944 result = FMT_16; 2945 goto out_word4; 2946 case 2: 2947 result = FMT_16_16; 2948 goto out_word4; 2949 case 4: 2950 result = FMT_16_16_16_16; 2951 goto out_word4; 2952 } 2953 goto out_unknown; 2954 case 32: 2955 switch (desc->nr_channels) { 2956 case 1: 2957 result = FMT_32; 2958 goto out_word4; 2959 case 2: 2960 result = FMT_32_32; 2961 goto out_word4; 2962 case 4: 2963 result = FMT_32_32_32_32; 2964 goto out_word4; 2965 } 2966 } 2967 goto out_unknown; 2968 2969 case UTIL_FORMAT_TYPE_FLOAT: 2970 switch (desc->channel[i].size) { 2971 case 16: 2972 switch (desc->nr_channels) { 2973 case 1: 2974 result = FMT_16_FLOAT; 2975 goto out_word4; 2976 case 2: 2977 result = FMT_16_16_FLOAT; 2978 goto out_word4; 2979 case 4: 2980 result = FMT_16_16_16_16_FLOAT; 2981 goto out_word4; 2982 } 2983 goto out_unknown; 2984 case 32: 2985 switch (desc->nr_channels) { 2986 case 1: 2987 result = FMT_32_FLOAT; 2988 goto out_word4; 2989 case 2: 2990 result = FMT_32_32_FLOAT; 2991 goto out_word4; 2992 case 4: 2993 result = FMT_32_32_32_32_FLOAT; 2994 goto out_word4; 2995 } 2996 } 2997 goto out_unknown; 2998 } 2999 3000out_word4: 3001 3002 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB && !is_srgb_valid) 3003 return ~0; 3004 if (word4_p) 3005 *word4_p = word4; 3006 if (yuv_format_p) 3007 *yuv_format_p = yuv_format; 3008 return result; 3009out_unknown: 3010 /* R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format)); */ 3011 return ~0; 3012} 3013 3014uint32_t r600_translate_colorformat(enum chip_class chip, enum pipe_format format, 3015 bool do_endian_swap) 3016{ 3017 const struct util_format_description *desc = util_format_description(format); 3018 int channel = util_format_get_first_non_void_channel(format); 3019 bool is_float; 3020 if (!desc) 3021 return ~0U; 3022 3023#define HAS_SIZE(x,y,z,w) \ 3024 (desc->channel[0].size == (x) && desc->channel[1].size == (y) && \ 3025 desc->channel[2].size == (z) && desc->channel[3].size == (w)) 3026 3027 if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */ 3028 return V_0280A0_COLOR_10_11_11_FLOAT; 3029 3030 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN || 3031 channel == -1) 3032 return ~0U; 3033 3034 is_float = desc->channel[channel].type == UTIL_FORMAT_TYPE_FLOAT; 3035 3036 switch (desc->nr_channels) { 3037 case 1: 3038 switch (desc->channel[0].size) { 3039 case 8: 3040 return V_0280A0_COLOR_8; 3041 case 16: 3042 if (is_float) 3043 return V_0280A0_COLOR_16_FLOAT; 3044 else 3045 return V_0280A0_COLOR_16; 3046 case 32: 3047 if (is_float) 3048 return V_0280A0_COLOR_32_FLOAT; 3049 else 3050 return V_0280A0_COLOR_32; 3051 } 3052 break; 3053 case 2: 3054 if (desc->channel[0].size == desc->channel[1].size) { 3055 switch (desc->channel[0].size) { 3056 case 4: 3057 if (chip <= R700) 3058 return V_0280A0_COLOR_4_4; 3059 else 3060 return ~0U; /* removed on Evergreen */ 3061 case 8: 3062 return V_0280A0_COLOR_8_8; 3063 case 16: 3064 if (is_float) 3065 return V_0280A0_COLOR_16_16_FLOAT; 3066 else 3067 return V_0280A0_COLOR_16_16; 3068 case 32: 3069 if (is_float) 3070 return V_0280A0_COLOR_32_32_FLOAT; 3071 else 3072 return V_0280A0_COLOR_32_32; 3073 } 3074 } else if (HAS_SIZE(8,24,0,0)) { 3075 return (do_endian_swap ? V_0280A0_COLOR_8_24 : V_0280A0_COLOR_24_8); 3076 } else if (HAS_SIZE(24,8,0,0)) { 3077 return V_0280A0_COLOR_8_24; 3078 } 3079 break; 3080 case 3: 3081 if (HAS_SIZE(5,6,5,0)) { 3082 return V_0280A0_COLOR_5_6_5; 3083 } else if (HAS_SIZE(32,8,24,0)) { 3084 return V_0280A0_COLOR_X24_8_32_FLOAT; 3085 } 3086 break; 3087 case 4: 3088 if (desc->channel[0].size == desc->channel[1].size && 3089 desc->channel[0].size == desc->channel[2].size && 3090 desc->channel[0].size == desc->channel[3].size) { 3091 switch (desc->channel[0].size) { 3092 case 4: 3093 return V_0280A0_COLOR_4_4_4_4; 3094 case 8: 3095 return V_0280A0_COLOR_8_8_8_8; 3096 case 16: 3097 if (is_float) 3098 return V_0280A0_COLOR_16_16_16_16_FLOAT; 3099 else 3100 return V_0280A0_COLOR_16_16_16_16; 3101 case 32: 3102 if (is_float) 3103 return V_0280A0_COLOR_32_32_32_32_FLOAT; 3104 else 3105 return V_0280A0_COLOR_32_32_32_32; 3106 } 3107 } else if (HAS_SIZE(5,5,5,1)) { 3108 return V_0280A0_COLOR_1_5_5_5; 3109 } else if (HAS_SIZE(10,10,10,2)) { 3110 return V_0280A0_COLOR_2_10_10_10; 3111 } 3112 break; 3113 } 3114 return ~0U; 3115} 3116 3117uint32_t r600_colorformat_endian_swap(uint32_t colorformat, bool do_endian_swap) 3118{ 3119 if (R600_BIG_ENDIAN) { 3120 switch(colorformat) { 3121 /* 8-bit buffers. */ 3122 case V_0280A0_COLOR_4_4: 3123 case V_0280A0_COLOR_8: 3124 return ENDIAN_NONE; 3125 3126 /* 16-bit buffers. */ 3127 case V_0280A0_COLOR_8_8: 3128 /* 3129 * No need to do endian swaps on array formats, 3130 * as mesa<-->pipe formats conversion take into account 3131 * the endianess 3132 */ 3133 return ENDIAN_NONE; 3134 3135 case V_0280A0_COLOR_5_6_5: 3136 case V_0280A0_COLOR_1_5_5_5: 3137 case V_0280A0_COLOR_4_4_4_4: 3138 case V_0280A0_COLOR_16: 3139 return (do_endian_swap ? ENDIAN_8IN16 : ENDIAN_NONE); 3140 3141 /* 32-bit buffers. */ 3142 case V_0280A0_COLOR_8_8_8_8: 3143 /* 3144 * No need to do endian swaps on array formats, 3145 * as mesa<-->pipe formats conversion take into account 3146 * the endianess 3147 */ 3148 return ENDIAN_NONE; 3149 3150 case V_0280A0_COLOR_2_10_10_10: 3151 case V_0280A0_COLOR_8_24: 3152 case V_0280A0_COLOR_24_8: 3153 case V_0280A0_COLOR_32_FLOAT: 3154 return (do_endian_swap ? ENDIAN_8IN32 : ENDIAN_NONE); 3155 3156 case V_0280A0_COLOR_16_16_FLOAT: 3157 case V_0280A0_COLOR_16_16: 3158 return ENDIAN_8IN16; 3159 3160 /* 64-bit buffers. */ 3161 case V_0280A0_COLOR_16_16_16_16: 3162 case V_0280A0_COLOR_16_16_16_16_FLOAT: 3163 return ENDIAN_8IN16; 3164 3165 case V_0280A0_COLOR_32_32_FLOAT: 3166 case V_0280A0_COLOR_32_32: 3167 case V_0280A0_COLOR_X24_8_32_FLOAT: 3168 return ENDIAN_8IN32; 3169 3170 /* 128-bit buffers. */ 3171 case V_0280A0_COLOR_32_32_32_32_FLOAT: 3172 case V_0280A0_COLOR_32_32_32_32: 3173 return ENDIAN_8IN32; 3174 default: 3175 return ENDIAN_NONE; /* Unsupported. */ 3176 } 3177 } else { 3178 return ENDIAN_NONE; 3179 } 3180} 3181 3182static void r600_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf) 3183{ 3184 struct r600_context *rctx = (struct r600_context*)ctx; 3185 struct r600_resource *rbuffer = r600_resource(buf); 3186 unsigned i, shader, mask; 3187 struct r600_pipe_sampler_view *view; 3188 3189 /* Reallocate the buffer in the same pipe_resource. */ 3190 r600_alloc_resource(&rctx->screen->b, rbuffer); 3191 3192 /* We changed the buffer, now we need to bind it where the old one was bound. */ 3193 /* Vertex buffers. */ 3194 mask = rctx->vertex_buffer_state.enabled_mask; 3195 while (mask) { 3196 i = u_bit_scan(&mask); 3197 if (rctx->vertex_buffer_state.vb[i].buffer.resource == &rbuffer->b.b) { 3198 rctx->vertex_buffer_state.dirty_mask |= 1 << i; 3199 r600_vertex_buffers_dirty(rctx); 3200 } 3201 } 3202 /* Streamout buffers. */ 3203 for (i = 0; i < rctx->b.streamout.num_targets; i++) { 3204 if (rctx->b.streamout.targets[i] && 3205 rctx->b.streamout.targets[i]->b.buffer == &rbuffer->b.b) { 3206 if (rctx->b.streamout.begin_emitted) { 3207 r600_emit_streamout_end(&rctx->b); 3208 } 3209 rctx->b.streamout.append_bitmask = rctx->b.streamout.enabled_mask; 3210 r600_streamout_buffers_dirty(&rctx->b); 3211 } 3212 } 3213 3214 /* Constant buffers. */ 3215 for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) { 3216 struct r600_constbuf_state *state = &rctx->constbuf_state[shader]; 3217 bool found = false; 3218 uint32_t mask = state->enabled_mask; 3219 3220 while (mask) { 3221 unsigned i = u_bit_scan(&mask); 3222 if (state->cb[i].buffer == &rbuffer->b.b) { 3223 found = true; 3224 state->dirty_mask |= 1 << i; 3225 } 3226 } 3227 if (found) { 3228 r600_constant_buffers_dirty(rctx, state); 3229 } 3230 } 3231 3232 /* Texture buffer objects - update the virtual addresses in descriptors. */ 3233 LIST_FOR_EACH_ENTRY(view, &rctx->texture_buffers, list) { 3234 if (view->base.texture == &rbuffer->b.b) { 3235 uint64_t offset = view->base.u.buf.offset; 3236 uint64_t va = rbuffer->gpu_address + offset; 3237 3238 view->tex_resource_words[0] = va; 3239 view->tex_resource_words[2] &= C_038008_BASE_ADDRESS_HI; 3240 view->tex_resource_words[2] |= S_038008_BASE_ADDRESS_HI(va >> 32); 3241 } 3242 } 3243 /* Texture buffer objects - make bindings dirty if needed. */ 3244 for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) { 3245 struct r600_samplerview_state *state = &rctx->samplers[shader].views; 3246 bool found = false; 3247 uint32_t mask = state->enabled_mask; 3248 3249 while (mask) { 3250 unsigned i = u_bit_scan(&mask); 3251 if (state->views[i]->base.texture == &rbuffer->b.b) { 3252 found = true; 3253 state->dirty_mask |= 1 << i; 3254 } 3255 } 3256 if (found) { 3257 r600_sampler_views_dirty(rctx, state); 3258 } 3259 } 3260 3261 /* SSBOs */ 3262 struct r600_image_state *istate = &rctx->fragment_buffers; 3263 { 3264 uint32_t mask = istate->enabled_mask; 3265 bool found = false; 3266 while (mask) { 3267 unsigned i = u_bit_scan(&mask); 3268 if (istate->views[i].base.resource == &rbuffer->b.b) { 3269 found = true; 3270 istate->dirty_mask |= 1 << i; 3271 } 3272 } 3273 if (found) { 3274 r600_mark_atom_dirty(rctx, &istate->atom); 3275 } 3276 } 3277 3278} 3279 3280static void r600_set_active_query_state(struct pipe_context *ctx, boolean enable) 3281{ 3282 struct r600_context *rctx = (struct r600_context*)ctx; 3283 3284 /* Pipeline stat & streamout queries. */ 3285 if (enable) { 3286 rctx->b.flags &= ~R600_CONTEXT_STOP_PIPELINE_STATS; 3287 rctx->b.flags |= R600_CONTEXT_START_PIPELINE_STATS; 3288 } else { 3289 rctx->b.flags &= ~R600_CONTEXT_START_PIPELINE_STATS; 3290 rctx->b.flags |= R600_CONTEXT_STOP_PIPELINE_STATS; 3291 } 3292 3293 /* Occlusion queries. */ 3294 if (rctx->db_misc_state.occlusion_queries_disabled != !enable) { 3295 rctx->db_misc_state.occlusion_queries_disabled = !enable; 3296 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 3297 } 3298} 3299 3300static void r600_need_gfx_cs_space(struct pipe_context *ctx, unsigned num_dw, 3301 bool include_draw_vbo) 3302{ 3303 r600_need_cs_space((struct r600_context*)ctx, num_dw, include_draw_vbo, 0); 3304} 3305 3306/* keep this at the end of this file, please */ 3307void r600_init_common_state_functions(struct r600_context *rctx) 3308{ 3309 rctx->b.b.create_fs_state = r600_create_ps_state; 3310 rctx->b.b.create_vs_state = r600_create_vs_state; 3311 rctx->b.b.create_gs_state = r600_create_gs_state; 3312 rctx->b.b.create_tcs_state = r600_create_tcs_state; 3313 rctx->b.b.create_tes_state = r600_create_tes_state; 3314 rctx->b.b.create_vertex_elements_state = r600_create_vertex_fetch_shader; 3315 rctx->b.b.bind_blend_state = r600_bind_blend_state; 3316 rctx->b.b.bind_depth_stencil_alpha_state = r600_bind_dsa_state; 3317 rctx->b.b.bind_sampler_states = r600_bind_sampler_states; 3318 rctx->b.b.bind_fs_state = r600_bind_ps_state; 3319 rctx->b.b.bind_rasterizer_state = r600_bind_rs_state; 3320 rctx->b.b.bind_vertex_elements_state = r600_bind_vertex_elements; 3321 rctx->b.b.bind_vs_state = r600_bind_vs_state; 3322 rctx->b.b.bind_gs_state = r600_bind_gs_state; 3323 rctx->b.b.bind_tcs_state = r600_bind_tcs_state; 3324 rctx->b.b.bind_tes_state = r600_bind_tes_state; 3325 rctx->b.b.delete_blend_state = r600_delete_blend_state; 3326 rctx->b.b.delete_depth_stencil_alpha_state = r600_delete_dsa_state; 3327 rctx->b.b.delete_fs_state = r600_delete_ps_state; 3328 rctx->b.b.delete_rasterizer_state = r600_delete_rs_state; 3329 rctx->b.b.delete_sampler_state = r600_delete_sampler_state; 3330 rctx->b.b.delete_vertex_elements_state = r600_delete_vertex_elements; 3331 rctx->b.b.delete_vs_state = r600_delete_vs_state; 3332 rctx->b.b.delete_gs_state = r600_delete_gs_state; 3333 rctx->b.b.delete_tcs_state = r600_delete_tcs_state; 3334 rctx->b.b.delete_tes_state = r600_delete_tes_state; 3335 rctx->b.b.set_blend_color = r600_set_blend_color; 3336 rctx->b.b.set_clip_state = r600_set_clip_state; 3337 rctx->b.b.set_constant_buffer = r600_set_constant_buffer; 3338 rctx->b.b.set_sample_mask = r600_set_sample_mask; 3339 rctx->b.b.set_stencil_ref = r600_set_pipe_stencil_ref; 3340 rctx->b.b.set_vertex_buffers = r600_set_vertex_buffers; 3341 rctx->b.b.set_sampler_views = r600_set_sampler_views; 3342 rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy; 3343 rctx->b.b.memory_barrier = r600_memory_barrier; 3344 rctx->b.b.texture_barrier = r600_texture_barrier; 3345 rctx->b.b.set_stream_output_targets = r600_set_streamout_targets; 3346 rctx->b.b.set_active_query_state = r600_set_active_query_state; 3347 3348 rctx->b.b.draw_vbo = r600_draw_vbo; 3349 rctx->b.invalidate_buffer = r600_invalidate_buffer; 3350 rctx->b.need_gfx_cs_space = r600_need_gfx_cs_space; 3351} 3352