freedreno_state.c revision 7ec681f3
1/* 2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robclark@freedesktop.org> 25 */ 26 27#include "pipe/p_state.h" 28#include "util/u_dual_blend.h" 29#include "util/u_helpers.h" 30#include "util/u_memory.h" 31#include "util/u_string.h" 32 33#include "freedreno_context.h" 34#include "freedreno_gmem.h" 35#include "freedreno_query_hw.h" 36#include "freedreno_resource.h" 37#include "freedreno_state.h" 38#include "freedreno_texture.h" 39#include "freedreno_util.h" 40 41#define get_safe(ptr, field) ((ptr) ? (ptr)->field : 0) 42 43/* All the generic state handling.. In case of CSO's that are specific 44 * to the GPU version, when the bind and the delete are common they can 45 * go in here. 46 */ 47 48static void 49update_draw_cost(struct fd_context *ctx) assert_dt 50{ 51 struct pipe_framebuffer_state *pfb = &ctx->framebuffer; 52 53 ctx->draw_cost = pfb->nr_cbufs; 54 for (unsigned i = 0; i < pfb->nr_cbufs; i++) 55 if (fd_blend_enabled(ctx, i)) 56 ctx->draw_cost++; 57 if (fd_depth_enabled(ctx)) 58 ctx->draw_cost++; 59 if (fd_depth_write_enabled(ctx)) 60 ctx->draw_cost++; 61} 62 63static void 64fd_set_blend_color(struct pipe_context *pctx, 65 const struct pipe_blend_color *blend_color) in_dt 66{ 67 struct fd_context *ctx = fd_context(pctx); 68 ctx->blend_color = *blend_color; 69 fd_context_dirty(ctx, FD_DIRTY_BLEND_COLOR); 70} 71 72static void 73fd_set_stencil_ref(struct pipe_context *pctx, 74 const struct pipe_stencil_ref stencil_ref) in_dt 75{ 76 struct fd_context *ctx = fd_context(pctx); 77 ctx->stencil_ref = stencil_ref; 78 fd_context_dirty(ctx, FD_DIRTY_STENCIL_REF); 79} 80 81static void 82fd_set_clip_state(struct pipe_context *pctx, 83 const struct pipe_clip_state *clip) in_dt 84{ 85 struct fd_context *ctx = fd_context(pctx); 86 ctx->ucp = *clip; 87 fd_context_dirty(ctx, FD_DIRTY_UCP); 88} 89 90static void 91fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask) in_dt 92{ 93 struct fd_context *ctx = fd_context(pctx); 94 ctx->sample_mask = (uint16_t)sample_mask; 95 fd_context_dirty(ctx, FD_DIRTY_SAMPLE_MASK); 96} 97 98static void 99fd_set_min_samples(struct pipe_context *pctx, unsigned min_samples) in_dt 100{ 101 struct fd_context *ctx = fd_context(pctx); 102 ctx->min_samples = min_samples; 103 fd_context_dirty(ctx, FD_DIRTY_MIN_SAMPLES); 104} 105 106/* notes from calim on #dri-devel: 107 * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded 108 * out to vec4's 109 * I should be able to consider that I own the user_ptr until the next 110 * set_constant_buffer() call, at which point I don't really care about the 111 * previous values. 112 * index>0 will be UBO's.. well, I'll worry about that later 113 */ 114static void 115fd_set_constant_buffer(struct pipe_context *pctx, enum pipe_shader_type shader, 116 uint index, bool take_ownership, 117 const struct pipe_constant_buffer *cb) in_dt 118{ 119 struct fd_context *ctx = fd_context(pctx); 120 struct fd_constbuf_stateobj *so = &ctx->constbuf[shader]; 121 122 util_copy_constant_buffer(&so->cb[index], cb, take_ownership); 123 124 /* Note that gallium frontends can unbind constant buffers by 125 * passing NULL here. 126 */ 127 if (unlikely(!cb)) { 128 so->enabled_mask &= ~(1 << index); 129 return; 130 } 131 132 so->enabled_mask |= 1 << index; 133 134 fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_CONST); 135 fd_resource_set_usage(cb->buffer, FD_DIRTY_CONST); 136 137 if (index > 0) { 138 assert(!cb->user_buffer); 139 ctx->dirty |= FD_DIRTY_RESOURCE; 140 } 141} 142 143static void 144fd_set_shader_buffers(struct pipe_context *pctx, enum pipe_shader_type shader, 145 unsigned start, unsigned count, 146 const struct pipe_shader_buffer *buffers, 147 unsigned writable_bitmask) in_dt 148{ 149 struct fd_context *ctx = fd_context(pctx); 150 struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[shader]; 151 const unsigned modified_bits = u_bit_consecutive(start, count); 152 153 so->enabled_mask &= ~modified_bits; 154 so->writable_mask &= ~modified_bits; 155 so->writable_mask |= writable_bitmask << start; 156 157 for (unsigned i = 0; i < count; i++) { 158 unsigned n = i + start; 159 struct pipe_shader_buffer *buf = &so->sb[n]; 160 161 if (buffers && buffers[i].buffer) { 162 if ((buf->buffer == buffers[i].buffer) && 163 (buf->buffer_offset == buffers[i].buffer_offset) && 164 (buf->buffer_size == buffers[i].buffer_size)) 165 continue; 166 167 buf->buffer_offset = buffers[i].buffer_offset; 168 buf->buffer_size = buffers[i].buffer_size; 169 pipe_resource_reference(&buf->buffer, buffers[i].buffer); 170 171 fd_resource_set_usage(buffers[i].buffer, FD_DIRTY_SSBO); 172 173 so->enabled_mask |= BIT(n); 174 175 if (writable_bitmask & BIT(i)) { 176 struct fd_resource *rsc = fd_resource(buf->buffer); 177 util_range_add(&rsc->b.b, &rsc->valid_buffer_range, 178 buf->buffer_offset, 179 buf->buffer_offset + buf->buffer_size); 180 } 181 } else { 182 pipe_resource_reference(&buf->buffer, NULL); 183 } 184 } 185 186 fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_SSBO); 187} 188 189void 190fd_set_shader_images(struct pipe_context *pctx, enum pipe_shader_type shader, 191 unsigned start, unsigned count, 192 unsigned unbind_num_trailing_slots, 193 const struct pipe_image_view *images) in_dt 194{ 195 struct fd_context *ctx = fd_context(pctx); 196 struct fd_shaderimg_stateobj *so = &ctx->shaderimg[shader]; 197 198 unsigned mask = 0; 199 200 if (images) { 201 for (unsigned i = 0; i < count; i++) { 202 unsigned n = i + start; 203 struct pipe_image_view *buf = &so->si[n]; 204 205 if ((buf->resource == images[i].resource) && 206 (buf->format == images[i].format) && 207 (buf->access == images[i].access) && 208 !memcmp(&buf->u, &images[i].u, sizeof(buf->u))) 209 continue; 210 211 mask |= BIT(n); 212 util_copy_image_view(buf, &images[i]); 213 214 if (buf->resource) { 215 fd_resource_set_usage(buf->resource, FD_DIRTY_IMAGE); 216 so->enabled_mask |= BIT(n); 217 218 if ((buf->access & PIPE_IMAGE_ACCESS_WRITE) && 219 (buf->resource->target == PIPE_BUFFER)) { 220 221 struct fd_resource *rsc = fd_resource(buf->resource); 222 util_range_add(&rsc->b.b, &rsc->valid_buffer_range, 223 buf->u.buf.offset, 224 buf->u.buf.offset + buf->u.buf.size); 225 } 226 } else { 227 so->enabled_mask &= ~BIT(n); 228 } 229 } 230 } else { 231 mask = (BIT(count) - 1) << start; 232 233 for (unsigned i = 0; i < count; i++) { 234 unsigned n = i + start; 235 struct pipe_image_view *img = &so->si[n]; 236 237 pipe_resource_reference(&img->resource, NULL); 238 } 239 240 so->enabled_mask &= ~mask; 241 } 242 243 for (unsigned i = 0; i < unbind_num_trailing_slots; i++) 244 pipe_resource_reference(&so->si[i + start + count].resource, NULL); 245 246 so->enabled_mask &= 247 ~(BITFIELD_MASK(unbind_num_trailing_slots) << (start + count)); 248 249 fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_IMAGE); 250} 251 252void 253fd_set_framebuffer_state(struct pipe_context *pctx, 254 const struct pipe_framebuffer_state *framebuffer) 255{ 256 struct fd_context *ctx = fd_context(pctx); 257 struct pipe_framebuffer_state *cso; 258 259 DBG("%ux%u, %u layers, %u samples", framebuffer->width, framebuffer->height, 260 framebuffer->layers, framebuffer->samples); 261 262 cso = &ctx->framebuffer; 263 264 if (util_framebuffer_state_equal(cso, framebuffer)) 265 return; 266 267 /* Do this *after* checking that the framebuffer state is actually 268 * changing. In the fd_blitter_clear() path, we get a pfb update 269 * to restore the current pfb state, which should not trigger us 270 * to flush (as that can cause the batch to be freed at a point 271 * before fd_clear() returns, but after the point where it expects 272 * flushes to potentially happen. 273 */ 274 fd_context_switch_from(ctx); 275 276 util_copy_framebuffer_state(cso, framebuffer); 277 278 cso->samples = util_framebuffer_get_num_samples(cso); 279 280 if (ctx->screen->reorder) { 281 struct fd_batch *old_batch = NULL; 282 283 fd_batch_reference(&old_batch, ctx->batch); 284 285 if (likely(old_batch)) 286 fd_batch_finish_queries(old_batch); 287 288 fd_batch_reference(&ctx->batch, NULL); 289 fd_context_all_dirty(ctx); 290 ctx->update_active_queries = true; 291 292 fd_batch_reference(&old_batch, NULL); 293 } else if (ctx->batch) { 294 DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush, 295 framebuffer->cbufs[0], framebuffer->zsbuf); 296 fd_batch_flush(ctx->batch); 297 } 298 299 fd_context_dirty(ctx, FD_DIRTY_FRAMEBUFFER); 300 301 ctx->disabled_scissor.minx = 0; 302 ctx->disabled_scissor.miny = 0; 303 ctx->disabled_scissor.maxx = cso->width; 304 ctx->disabled_scissor.maxy = cso->height; 305 306 fd_context_dirty(ctx, FD_DIRTY_SCISSOR); 307 update_draw_cost(ctx); 308} 309 310static void 311fd_set_polygon_stipple(struct pipe_context *pctx, 312 const struct pipe_poly_stipple *stipple) in_dt 313{ 314 struct fd_context *ctx = fd_context(pctx); 315 ctx->stipple = *stipple; 316 fd_context_dirty(ctx, FD_DIRTY_STIPPLE); 317} 318 319static void 320fd_set_scissor_states(struct pipe_context *pctx, unsigned start_slot, 321 unsigned num_scissors, 322 const struct pipe_scissor_state *scissor) in_dt 323{ 324 struct fd_context *ctx = fd_context(pctx); 325 326 ctx->scissor = *scissor; 327 fd_context_dirty(ctx, FD_DIRTY_SCISSOR); 328} 329 330static void 331fd_set_viewport_states(struct pipe_context *pctx, unsigned start_slot, 332 unsigned num_viewports, 333 const struct pipe_viewport_state *viewport) in_dt 334{ 335 struct fd_context *ctx = fd_context(pctx); 336 struct pipe_scissor_state *scissor = &ctx->viewport_scissor; 337 float minx, miny, maxx, maxy; 338 339 ctx->viewport = *viewport; 340 341 /* see si_get_scissor_from_viewport(): */ 342 343 /* Convert (-1, -1) and (1, 1) from clip space into window space. */ 344 minx = -viewport->scale[0] + viewport->translate[0]; 345 miny = -viewport->scale[1] + viewport->translate[1]; 346 maxx = viewport->scale[0] + viewport->translate[0]; 347 maxy = viewport->scale[1] + viewport->translate[1]; 348 349 /* Handle inverted viewports. */ 350 if (minx > maxx) { 351 swap(minx, maxx); 352 } 353 if (miny > maxy) { 354 swap(miny, maxy); 355 } 356 357 const float max_dims = ctx->screen->gen >= 4 ? 16384.f : 4096.f; 358 359 /* Clamp, convert to integer and round up the max bounds. */ 360 scissor->minx = CLAMP(minx, 0.f, max_dims); 361 scissor->miny = CLAMP(miny, 0.f, max_dims); 362 scissor->maxx = CLAMP(ceilf(maxx), 0.f, max_dims); 363 scissor->maxy = CLAMP(ceilf(maxy), 0.f, max_dims); 364 365 fd_context_dirty(ctx, FD_DIRTY_VIEWPORT); 366} 367 368static void 369fd_set_vertex_buffers(struct pipe_context *pctx, unsigned start_slot, 370 unsigned count, unsigned unbind_num_trailing_slots, 371 bool take_ownership, 372 const struct pipe_vertex_buffer *vb) in_dt 373{ 374 struct fd_context *ctx = fd_context(pctx); 375 struct fd_vertexbuf_stateobj *so = &ctx->vtx.vertexbuf; 376 int i; 377 378 /* on a2xx, pitch is encoded in the vtx fetch instruction, so 379 * we need to mark VTXSTATE as dirty as well to trigger patching 380 * and re-emitting the vtx shader: 381 */ 382 if (ctx->screen->gen < 3) { 383 for (i = 0; i < count; i++) { 384 bool new_enabled = vb && vb[i].buffer.resource; 385 bool old_enabled = so->vb[i].buffer.resource != NULL; 386 uint32_t new_stride = vb ? vb[i].stride : 0; 387 uint32_t old_stride = so->vb[i].stride; 388 if ((new_enabled != old_enabled) || (new_stride != old_stride)) { 389 fd_context_dirty(ctx, FD_DIRTY_VTXSTATE); 390 break; 391 } 392 } 393 } 394 395 util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot, 396 count, unbind_num_trailing_slots, 397 take_ownership); 398 so->count = util_last_bit(so->enabled_mask); 399 400 if (!vb) 401 return; 402 403 fd_context_dirty(ctx, FD_DIRTY_VTXBUF); 404 405 for (unsigned i = 0; i < count; i++) { 406 assert(!vb[i].is_user_buffer); 407 fd_resource_set_usage(vb[i].buffer.resource, FD_DIRTY_VTXBUF); 408 } 409} 410 411static void 412fd_blend_state_bind(struct pipe_context *pctx, void *hwcso) in_dt 413{ 414 struct fd_context *ctx = fd_context(pctx); 415 struct pipe_blend_state *cso = hwcso; 416 bool old_is_dual = ctx->blend ? ctx->blend->rt[0].blend_enable && 417 util_blend_state_is_dual(ctx->blend, 0) 418 : false; 419 bool new_is_dual = 420 cso ? cso->rt[0].blend_enable && util_blend_state_is_dual(cso, 0) : false; 421 ctx->blend = hwcso; 422 fd_context_dirty(ctx, FD_DIRTY_BLEND); 423 if (old_is_dual != new_is_dual) 424 fd_context_dirty(ctx, FD_DIRTY_BLEND_DUAL); 425 update_draw_cost(ctx); 426} 427 428static void 429fd_blend_state_delete(struct pipe_context *pctx, void *hwcso) in_dt 430{ 431 FREE(hwcso); 432} 433 434static void 435fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso) in_dt 436{ 437 struct fd_context *ctx = fd_context(pctx); 438 struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx); 439 bool discard = get_safe(ctx->rasterizer, rasterizer_discard); 440 unsigned clip_plane_enable = get_safe(ctx->rasterizer, clip_plane_enable); 441 442 ctx->rasterizer = hwcso; 443 fd_context_dirty(ctx, FD_DIRTY_RASTERIZER); 444 445 if (ctx->rasterizer && ctx->rasterizer->scissor) { 446 ctx->current_scissor = &ctx->scissor; 447 } else { 448 ctx->current_scissor = &ctx->disabled_scissor; 449 } 450 451 /* if scissor enable bit changed we need to mark scissor 452 * state as dirty as well: 453 * NOTE: we can do a shallow compare, since we only care 454 * if it changed to/from &ctx->disable_scissor 455 */ 456 if (old_scissor != fd_context_get_scissor(ctx)) 457 fd_context_dirty(ctx, FD_DIRTY_SCISSOR); 458 459 if (discard != get_safe(ctx->rasterizer, rasterizer_discard)) 460 fd_context_dirty(ctx, FD_DIRTY_RASTERIZER_DISCARD); 461 462 if (clip_plane_enable != get_safe(ctx->rasterizer, clip_plane_enable)) 463 fd_context_dirty(ctx, FD_DIRTY_RASTERIZER_CLIP_PLANE_ENABLE); 464} 465 466static void 467fd_rasterizer_state_delete(struct pipe_context *pctx, void *hwcso) in_dt 468{ 469 FREE(hwcso); 470} 471 472static void 473fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso) in_dt 474{ 475 struct fd_context *ctx = fd_context(pctx); 476 ctx->zsa = hwcso; 477 fd_context_dirty(ctx, FD_DIRTY_ZSA); 478 update_draw_cost(ctx); 479} 480 481static void 482fd_zsa_state_delete(struct pipe_context *pctx, void *hwcso) in_dt 483{ 484 FREE(hwcso); 485} 486 487static void * 488fd_vertex_state_create(struct pipe_context *pctx, unsigned num_elements, 489 const struct pipe_vertex_element *elements) 490{ 491 struct fd_vertex_stateobj *so = CALLOC_STRUCT(fd_vertex_stateobj); 492 493 if (!so) 494 return NULL; 495 496 memcpy(so->pipe, elements, sizeof(*elements) * num_elements); 497 so->num_elements = num_elements; 498 499 return so; 500} 501 502static void 503fd_vertex_state_delete(struct pipe_context *pctx, void *hwcso) in_dt 504{ 505 FREE(hwcso); 506} 507 508static void 509fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso) in_dt 510{ 511 struct fd_context *ctx = fd_context(pctx); 512 ctx->vtx.vtx = hwcso; 513 fd_context_dirty(ctx, FD_DIRTY_VTXSTATE); 514} 515 516static struct pipe_stream_output_target * 517fd_create_stream_output_target(struct pipe_context *pctx, 518 struct pipe_resource *prsc, 519 unsigned buffer_offset, unsigned buffer_size) 520{ 521 struct fd_stream_output_target *target; 522 struct fd_resource *rsc = fd_resource(prsc); 523 524 target = CALLOC_STRUCT(fd_stream_output_target); 525 if (!target) 526 return NULL; 527 528 pipe_reference_init(&target->base.reference, 1); 529 pipe_resource_reference(&target->base.buffer, prsc); 530 531 target->base.context = pctx; 532 target->base.buffer_offset = buffer_offset; 533 target->base.buffer_size = buffer_size; 534 535 target->offset_buf = pipe_buffer_create( 536 pctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, sizeof(uint32_t)); 537 538 assert(rsc->b.b.target == PIPE_BUFFER); 539 util_range_add(&rsc->b.b, &rsc->valid_buffer_range, buffer_offset, 540 buffer_offset + buffer_size); 541 542 return &target->base; 543} 544 545static void 546fd_stream_output_target_destroy(struct pipe_context *pctx, 547 struct pipe_stream_output_target *target) 548{ 549 struct fd_stream_output_target *cso = fd_stream_output_target(target); 550 551 pipe_resource_reference(&cso->base.buffer, NULL); 552 pipe_resource_reference(&cso->offset_buf, NULL); 553 554 FREE(target); 555} 556 557static void 558fd_set_stream_output_targets(struct pipe_context *pctx, unsigned num_targets, 559 struct pipe_stream_output_target **targets, 560 const unsigned *offsets) in_dt 561{ 562 struct fd_context *ctx = fd_context(pctx); 563 struct fd_streamout_stateobj *so = &ctx->streamout; 564 unsigned i; 565 566 debug_assert(num_targets <= ARRAY_SIZE(so->targets)); 567 568 /* Older targets need sw stats enabled for streamout emulation in VS: */ 569 if (ctx->screen->gen < 5) { 570 if (num_targets && !so->num_targets) { 571 ctx->stats_users++; 572 } else if (so->num_targets && !num_targets) { 573 ctx->stats_users--; 574 } 575 } 576 577 for (i = 0; i < num_targets; i++) { 578 boolean changed = targets[i] != so->targets[i]; 579 boolean reset = (offsets[i] != (unsigned)-1); 580 581 so->reset |= (reset << i); 582 583 if (!changed && !reset) 584 continue; 585 586 /* Note that all SO targets will be reset at once at a 587 * BeginTransformFeedback(). 588 */ 589 if (reset) { 590 so->offsets[i] = offsets[i]; 591 ctx->streamout.verts_written = 0; 592 } 593 594 pipe_so_target_reference(&so->targets[i], targets[i]); 595 } 596 597 for (; i < so->num_targets; i++) { 598 pipe_so_target_reference(&so->targets[i], NULL); 599 } 600 601 so->num_targets = num_targets; 602 603 fd_context_dirty(ctx, FD_DIRTY_STREAMOUT); 604} 605 606static void 607fd_bind_compute_state(struct pipe_context *pctx, void *state) in_dt 608{ 609 struct fd_context *ctx = fd_context(pctx); 610 ctx->compute = state; 611 /* NOTE: Don't mark FD_DIRTY_PROG for compute specific state */ 612 ctx->dirty_shader[PIPE_SHADER_COMPUTE] |= FD_DIRTY_SHADER_PROG; 613} 614 615static void 616fd_set_compute_resources(struct pipe_context *pctx, unsigned start, 617 unsigned count, struct pipe_surface **prscs) in_dt 618{ 619 // TODO 620} 621 622/* used by clover to bind global objects, returning the bo address 623 * via handles[n] 624 */ 625static void 626fd_set_global_binding(struct pipe_context *pctx, unsigned first, unsigned count, 627 struct pipe_resource **prscs, uint32_t **handles) in_dt 628{ 629 struct fd_context *ctx = fd_context(pctx); 630 struct fd_global_bindings_stateobj *so = &ctx->global_bindings; 631 unsigned mask = 0; 632 633 if (prscs) { 634 for (unsigned i = 0; i < count; i++) { 635 unsigned n = i + first; 636 637 mask |= BIT(n); 638 639 pipe_resource_reference(&so->buf[n], prscs[i]); 640 641 if (so->buf[n]) { 642 struct fd_resource *rsc = fd_resource(so->buf[n]); 643 uint64_t iova = fd_bo_get_iova(rsc->bo); 644 // TODO need to scream if iova > 32b or fix gallium API.. 645 *handles[i] += iova; 646 } 647 648 if (prscs[i]) 649 so->enabled_mask |= BIT(n); 650 else 651 so->enabled_mask &= ~BIT(n); 652 } 653 } else { 654 mask = (BIT(count) - 1) << first; 655 656 for (unsigned i = 0; i < count; i++) { 657 unsigned n = i + first; 658 pipe_resource_reference(&so->buf[n], NULL); 659 } 660 661 so->enabled_mask &= ~mask; 662 } 663} 664 665void 666fd_state_init(struct pipe_context *pctx) 667{ 668 pctx->set_blend_color = fd_set_blend_color; 669 pctx->set_stencil_ref = fd_set_stencil_ref; 670 pctx->set_clip_state = fd_set_clip_state; 671 pctx->set_sample_mask = fd_set_sample_mask; 672 pctx->set_min_samples = fd_set_min_samples; 673 pctx->set_constant_buffer = fd_set_constant_buffer; 674 pctx->set_shader_buffers = fd_set_shader_buffers; 675 pctx->set_shader_images = fd_set_shader_images; 676 pctx->set_framebuffer_state = fd_set_framebuffer_state; 677 pctx->set_polygon_stipple = fd_set_polygon_stipple; 678 pctx->set_scissor_states = fd_set_scissor_states; 679 pctx->set_viewport_states = fd_set_viewport_states; 680 681 pctx->set_vertex_buffers = fd_set_vertex_buffers; 682 683 pctx->bind_blend_state = fd_blend_state_bind; 684 pctx->delete_blend_state = fd_blend_state_delete; 685 686 pctx->bind_rasterizer_state = fd_rasterizer_state_bind; 687 pctx->delete_rasterizer_state = fd_rasterizer_state_delete; 688 689 pctx->bind_depth_stencil_alpha_state = fd_zsa_state_bind; 690 pctx->delete_depth_stencil_alpha_state = fd_zsa_state_delete; 691 692 if (!pctx->create_vertex_elements_state) 693 pctx->create_vertex_elements_state = fd_vertex_state_create; 694 pctx->delete_vertex_elements_state = fd_vertex_state_delete; 695 pctx->bind_vertex_elements_state = fd_vertex_state_bind; 696 697 pctx->create_stream_output_target = fd_create_stream_output_target; 698 pctx->stream_output_target_destroy = fd_stream_output_target_destroy; 699 pctx->set_stream_output_targets = fd_set_stream_output_targets; 700 701 if (has_compute(fd_screen(pctx->screen))) { 702 pctx->bind_compute_state = fd_bind_compute_state; 703 pctx->set_compute_resources = fd_set_compute_resources; 704 pctx->set_global_binding = fd_set_global_binding; 705 } 706} 707