v3dx_state.c revision 9f464c52
1/* 2 * Copyright © 2014-2017 Broadcom 3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org> 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 * IN THE SOFTWARE. 23 */ 24 25#include "pipe/p_state.h" 26#include "util/u_format.h" 27#include "util/u_framebuffer.h" 28#include "util/u_inlines.h" 29#include "util/u_math.h" 30#include "util/u_memory.h" 31#include "util/u_half.h" 32#include "util/u_helpers.h" 33#include "util/u_upload_mgr.h" 34 35#include "v3d_context.h" 36#include "v3d_tiling.h" 37#include "broadcom/common/v3d_macros.h" 38#include "broadcom/compiler/v3d_compiler.h" 39#include "broadcom/cle/v3dx_pack.h" 40 41static void 42v3d_generic_cso_state_delete(struct pipe_context *pctx, void *hwcso) 43{ 44 free(hwcso); 45} 46 47static void 48v3d_set_blend_color(struct pipe_context *pctx, 49 const struct pipe_blend_color *blend_color) 50{ 51 struct v3d_context *v3d = v3d_context(pctx); 52 v3d->blend_color.f = *blend_color; 53 for (int i = 0; i < 4; i++) { 54 v3d->blend_color.hf[i] = 55 util_float_to_half(blend_color->color[i]); 56 } 57 v3d->dirty |= VC5_DIRTY_BLEND_COLOR; 58} 59 60static void 61v3d_set_stencil_ref(struct pipe_context *pctx, 62 const struct pipe_stencil_ref *stencil_ref) 63{ 64 struct v3d_context *v3d = v3d_context(pctx); 65 v3d->stencil_ref = *stencil_ref; 66 v3d->dirty |= VC5_DIRTY_STENCIL_REF; 67} 68 69static void 70v3d_set_clip_state(struct pipe_context *pctx, 71 const struct pipe_clip_state *clip) 72{ 73 struct v3d_context *v3d = v3d_context(pctx); 74 v3d->clip = *clip; 75 v3d->dirty |= VC5_DIRTY_CLIP; 76} 77 78static void 79v3d_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask) 80{ 81 struct v3d_context *v3d = v3d_context(pctx); 82 v3d->sample_mask = sample_mask & ((1 << V3D_MAX_SAMPLES) - 1); 83 v3d->dirty |= VC5_DIRTY_SAMPLE_STATE; 84} 85 86static void * 87v3d_create_rasterizer_state(struct pipe_context *pctx, 88 const struct pipe_rasterizer_state *cso) 89{ 90 struct v3d_rasterizer_state *so; 91 92 so = CALLOC_STRUCT(v3d_rasterizer_state); 93 if (!so) 94 return NULL; 95 96 so->base = *cso; 97 98 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835, 99 * BCM21553). 100 */ 101 so->point_size = MAX2(cso->point_size, .125f); 102 103 STATIC_ASSERT(sizeof(so->depth_offset) >= 104 cl_packet_length(DEPTH_OFFSET)); 105 v3dx_pack(&so->depth_offset, DEPTH_OFFSET, depth) { 106 depth.depth_offset_factor = cso->offset_scale; 107 depth.depth_offset_units = cso->offset_units; 108 } 109 110 /* The HW treats polygon offset units based on a Z24 buffer, so we 111 * need to scale up offset_units if we're only Z16. 112 */ 113 v3dx_pack(&so->depth_offset_z16, DEPTH_OFFSET, depth) { 114 depth.depth_offset_factor = cso->offset_scale; 115 depth.depth_offset_units = cso->offset_units * 256.0; 116 } 117 118 return so; 119} 120 121/* Blend state is baked into shaders. */ 122static void * 123v3d_create_blend_state(struct pipe_context *pctx, 124 const struct pipe_blend_state *cso) 125{ 126 struct v3d_blend_state *so; 127 128 so = CALLOC_STRUCT(v3d_blend_state); 129 if (!so) 130 return NULL; 131 132 so->base = *cso; 133 134 if (cso->independent_blend_enable) { 135 for (int i = 0; i < V3D_MAX_DRAW_BUFFERS; i++) { 136 so->blend_enables |= cso->rt[i].blend_enable << i; 137 138 /* V3D 4.x is when we got independent blend enables. */ 139 assert(V3D_VERSION >= 40 || 140 cso->rt[i].blend_enable == cso->rt[0].blend_enable); 141 } 142 } else { 143 if (cso->rt[0].blend_enable) 144 so->blend_enables = (1 << V3D_MAX_DRAW_BUFFERS) - 1; 145 } 146 147 return so; 148} 149 150static uint32_t 151translate_stencil_op(enum pipe_stencil_op op) 152{ 153 switch (op) { 154 case PIPE_STENCIL_OP_KEEP: return V3D_STENCIL_OP_KEEP; 155 case PIPE_STENCIL_OP_ZERO: return V3D_STENCIL_OP_ZERO; 156 case PIPE_STENCIL_OP_REPLACE: return V3D_STENCIL_OP_REPLACE; 157 case PIPE_STENCIL_OP_INCR: return V3D_STENCIL_OP_INCR; 158 case PIPE_STENCIL_OP_DECR: return V3D_STENCIL_OP_DECR; 159 case PIPE_STENCIL_OP_INCR_WRAP: return V3D_STENCIL_OP_INCWRAP; 160 case PIPE_STENCIL_OP_DECR_WRAP: return V3D_STENCIL_OP_DECWRAP; 161 case PIPE_STENCIL_OP_INVERT: return V3D_STENCIL_OP_INVERT; 162 } 163 unreachable("bad stencil op"); 164} 165 166static void * 167v3d_create_depth_stencil_alpha_state(struct pipe_context *pctx, 168 const struct pipe_depth_stencil_alpha_state *cso) 169{ 170 struct v3d_depth_stencil_alpha_state *so; 171 172 so = CALLOC_STRUCT(v3d_depth_stencil_alpha_state); 173 if (!so) 174 return NULL; 175 176 so->base = *cso; 177 178 if (cso->depth.enabled) { 179 switch (cso->depth.func) { 180 case PIPE_FUNC_LESS: 181 case PIPE_FUNC_LEQUAL: 182 so->ez_state = VC5_EZ_LT_LE; 183 break; 184 case PIPE_FUNC_GREATER: 185 case PIPE_FUNC_GEQUAL: 186 so->ez_state = VC5_EZ_GT_GE; 187 break; 188 case PIPE_FUNC_NEVER: 189 case PIPE_FUNC_EQUAL: 190 so->ez_state = VC5_EZ_UNDECIDED; 191 break; 192 default: 193 so->ez_state = VC5_EZ_DISABLED; 194 break; 195 } 196 197 /* If stencil is enabled and it's not a no-op, then it would 198 * break EZ updates. 199 */ 200 if (cso->stencil[0].enabled && 201 (cso->stencil[0].zfail_op != PIPE_STENCIL_OP_KEEP || 202 cso->stencil[0].func != PIPE_FUNC_ALWAYS || 203 (cso->stencil[1].enabled && 204 (cso->stencil[1].zfail_op != PIPE_STENCIL_OP_KEEP && 205 cso->stencil[1].func != PIPE_FUNC_ALWAYS)))) { 206 so->ez_state = VC5_EZ_DISABLED; 207 } 208 } 209 210 const struct pipe_stencil_state *front = &cso->stencil[0]; 211 const struct pipe_stencil_state *back = &cso->stencil[1]; 212 213 if (front->enabled) { 214 STATIC_ASSERT(sizeof(so->stencil_front) >= 215 cl_packet_length(STENCIL_CFG)); 216 v3dx_pack(&so->stencil_front, STENCIL_CFG, config) { 217 config.front_config = true; 218 /* If !back->enabled, then the front values should be 219 * used for both front and back-facing primitives. 220 */ 221 config.back_config = !back->enabled; 222 223 config.stencil_write_mask = front->writemask; 224 config.stencil_test_mask = front->valuemask; 225 226 config.stencil_test_function = front->func; 227 config.stencil_pass_op = 228 translate_stencil_op(front->zpass_op); 229 config.depth_test_fail_op = 230 translate_stencil_op(front->zfail_op); 231 config.stencil_test_fail_op = 232 translate_stencil_op(front->fail_op); 233 } 234 } 235 if (back->enabled) { 236 STATIC_ASSERT(sizeof(so->stencil_back) >= 237 cl_packet_length(STENCIL_CFG)); 238 v3dx_pack(&so->stencil_back, STENCIL_CFG, config) { 239 config.front_config = false; 240 config.back_config = true; 241 242 config.stencil_write_mask = back->writemask; 243 config.stencil_test_mask = back->valuemask; 244 245 config.stencil_test_function = back->func; 246 config.stencil_pass_op = 247 translate_stencil_op(back->zpass_op); 248 config.depth_test_fail_op = 249 translate_stencil_op(back->zfail_op); 250 config.stencil_test_fail_op = 251 translate_stencil_op(back->fail_op); 252 } 253 } 254 255 return so; 256} 257 258static void 259v3d_set_polygon_stipple(struct pipe_context *pctx, 260 const struct pipe_poly_stipple *stipple) 261{ 262 struct v3d_context *v3d = v3d_context(pctx); 263 v3d->stipple = *stipple; 264 v3d->dirty |= VC5_DIRTY_STIPPLE; 265} 266 267static void 268v3d_set_scissor_states(struct pipe_context *pctx, 269 unsigned start_slot, 270 unsigned num_scissors, 271 const struct pipe_scissor_state *scissor) 272{ 273 struct v3d_context *v3d = v3d_context(pctx); 274 275 v3d->scissor = *scissor; 276 v3d->dirty |= VC5_DIRTY_SCISSOR; 277} 278 279static void 280v3d_set_viewport_states(struct pipe_context *pctx, 281 unsigned start_slot, 282 unsigned num_viewports, 283 const struct pipe_viewport_state *viewport) 284{ 285 struct v3d_context *v3d = v3d_context(pctx); 286 v3d->viewport = *viewport; 287 v3d->dirty |= VC5_DIRTY_VIEWPORT; 288} 289 290static void 291v3d_set_vertex_buffers(struct pipe_context *pctx, 292 unsigned start_slot, unsigned count, 293 const struct pipe_vertex_buffer *vb) 294{ 295 struct v3d_context *v3d = v3d_context(pctx); 296 struct v3d_vertexbuf_stateobj *so = &v3d->vertexbuf; 297 298 util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, 299 start_slot, count); 300 so->count = util_last_bit(so->enabled_mask); 301 302 v3d->dirty |= VC5_DIRTY_VTXBUF; 303} 304 305static void 306v3d_blend_state_bind(struct pipe_context *pctx, void *hwcso) 307{ 308 struct v3d_context *v3d = v3d_context(pctx); 309 v3d->blend = hwcso; 310 v3d->dirty |= VC5_DIRTY_BLEND; 311} 312 313static void 314v3d_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso) 315{ 316 struct v3d_context *v3d = v3d_context(pctx); 317 v3d->rasterizer = hwcso; 318 v3d->dirty |= VC5_DIRTY_RASTERIZER; 319} 320 321static void 322v3d_zsa_state_bind(struct pipe_context *pctx, void *hwcso) 323{ 324 struct v3d_context *v3d = v3d_context(pctx); 325 v3d->zsa = hwcso; 326 v3d->dirty |= VC5_DIRTY_ZSA; 327} 328 329static void * 330v3d_vertex_state_create(struct pipe_context *pctx, unsigned num_elements, 331 const struct pipe_vertex_element *elements) 332{ 333 struct v3d_context *v3d = v3d_context(pctx); 334 struct v3d_vertex_stateobj *so = CALLOC_STRUCT(v3d_vertex_stateobj); 335 336 if (!so) 337 return NULL; 338 339 memcpy(so->pipe, elements, sizeof(*elements) * num_elements); 340 so->num_elements = num_elements; 341 342 for (int i = 0; i < so->num_elements; i++) { 343 const struct pipe_vertex_element *elem = &elements[i]; 344 const struct util_format_description *desc = 345 util_format_description(elem->src_format); 346 uint32_t r_size = desc->channel[0].size; 347 348 const uint32_t size = 349 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD); 350 351 v3dx_pack(&so->attrs[i * size], 352 GL_SHADER_STATE_ATTRIBUTE_RECORD, attr) { 353 /* vec_size == 0 means 4 */ 354 attr.vec_size = desc->nr_channels & 3; 355 attr.signed_int_type = (desc->channel[0].type == 356 UTIL_FORMAT_TYPE_SIGNED); 357 358 attr.normalized_int_type = desc->channel[0].normalized; 359 attr.read_as_int_uint = desc->channel[0].pure_integer; 360 attr.instance_divisor = MIN2(elem->instance_divisor, 361 0xffff); 362 363 switch (desc->channel[0].type) { 364 case UTIL_FORMAT_TYPE_FLOAT: 365 if (r_size == 32) { 366 attr.type = ATTRIBUTE_FLOAT; 367 } else { 368 assert(r_size == 16); 369 attr.type = ATTRIBUTE_HALF_FLOAT; 370 } 371 break; 372 373 case UTIL_FORMAT_TYPE_SIGNED: 374 case UTIL_FORMAT_TYPE_UNSIGNED: 375 switch (r_size) { 376 case 32: 377 attr.type = ATTRIBUTE_INT; 378 break; 379 case 16: 380 attr.type = ATTRIBUTE_SHORT; 381 break; 382 case 10: 383 attr.type = ATTRIBUTE_INT2_10_10_10; 384 break; 385 case 8: 386 attr.type = ATTRIBUTE_BYTE; 387 break; 388 default: 389 fprintf(stderr, 390 "format %s unsupported\n", 391 desc->name); 392 attr.type = ATTRIBUTE_BYTE; 393 abort(); 394 } 395 break; 396 397 default: 398 fprintf(stderr, 399 "format %s unsupported\n", 400 desc->name); 401 abort(); 402 } 403 } 404 } 405 406 /* Set up the default attribute values in case any of the vertex 407 * elements use them. 408 */ 409 uint32_t *attrs; 410 u_upload_alloc(v3d->state_uploader, 0, 411 V3D_MAX_VS_INPUTS * sizeof(float), 16, 412 &so->defaults_offset, &so->defaults, (void **)&attrs); 413 414 for (int i = 0; i < V3D_MAX_VS_INPUTS / 4; i++) { 415 attrs[i * 4 + 0] = 0; 416 attrs[i * 4 + 1] = 0; 417 attrs[i * 4 + 2] = 0; 418 if (i < so->num_elements && 419 util_format_is_pure_integer(so->pipe[i].src_format)) { 420 attrs[i * 4 + 3] = 1; 421 } else { 422 attrs[i * 4 + 3] = fui(1.0); 423 } 424 } 425 426 u_upload_unmap(v3d->state_uploader); 427 return so; 428} 429 430static void 431v3d_vertex_state_delete(struct pipe_context *pctx, void *hwcso) 432{ 433 struct v3d_vertex_stateobj *so = hwcso; 434 435 pipe_resource_reference(&so->defaults, NULL); 436 free(so); 437} 438 439static void 440v3d_vertex_state_bind(struct pipe_context *pctx, void *hwcso) 441{ 442 struct v3d_context *v3d = v3d_context(pctx); 443 v3d->vtx = hwcso; 444 v3d->dirty |= VC5_DIRTY_VTXSTATE; 445} 446 447static void 448v3d_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index, 449 const struct pipe_constant_buffer *cb) 450{ 451 struct v3d_context *v3d = v3d_context(pctx); 452 struct v3d_constbuf_stateobj *so = &v3d->constbuf[shader]; 453 454 util_copy_constant_buffer(&so->cb[index], cb); 455 456 /* Note that the state tracker can unbind constant buffers by 457 * passing NULL here. 458 */ 459 if (unlikely(!cb)) { 460 so->enabled_mask &= ~(1 << index); 461 so->dirty_mask &= ~(1 << index); 462 return; 463 } 464 465 so->enabled_mask |= 1 << index; 466 so->dirty_mask |= 1 << index; 467 v3d->dirty |= VC5_DIRTY_CONSTBUF; 468} 469 470static void 471v3d_set_framebuffer_state(struct pipe_context *pctx, 472 const struct pipe_framebuffer_state *framebuffer) 473{ 474 struct v3d_context *v3d = v3d_context(pctx); 475 struct pipe_framebuffer_state *cso = &v3d->framebuffer; 476 477 v3d->job = NULL; 478 479 util_copy_framebuffer_state(cso, framebuffer); 480 481 v3d->swap_color_rb = 0; 482 v3d->blend_dst_alpha_one = 0; 483 for (int i = 0; i < v3d->framebuffer.nr_cbufs; i++) { 484 struct pipe_surface *cbuf = v3d->framebuffer.cbufs[i]; 485 if (!cbuf) 486 continue; 487 struct v3d_surface *v3d_cbuf = v3d_surface(cbuf); 488 489 const struct util_format_description *desc = 490 util_format_description(cbuf->format); 491 492 /* For BGRA8 formats (DRI window system default format), we 493 * need to swap R and B, since the HW's format is RGBA8. On 494 * V3D 4.1+, the RCL can swap R and B on load/store. 495 */ 496 if (v3d->screen->devinfo.ver < 41 && v3d_cbuf->swap_rb) 497 v3d->swap_color_rb |= 1 << i; 498 499 if (desc->swizzle[3] == PIPE_SWIZZLE_1) 500 v3d->blend_dst_alpha_one |= 1 << i; 501 } 502 503 v3d->dirty |= VC5_DIRTY_FRAMEBUFFER; 504} 505 506static enum V3DX(Wrap_Mode) 507translate_wrap(uint32_t pipe_wrap, bool using_nearest) 508{ 509 switch (pipe_wrap) { 510 case PIPE_TEX_WRAP_REPEAT: 511 return V3D_WRAP_MODE_REPEAT; 512 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: 513 return V3D_WRAP_MODE_CLAMP; 514 case PIPE_TEX_WRAP_MIRROR_REPEAT: 515 return V3D_WRAP_MODE_MIRROR; 516 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: 517 return V3D_WRAP_MODE_BORDER; 518 case PIPE_TEX_WRAP_CLAMP: 519 return (using_nearest ? 520 V3D_WRAP_MODE_CLAMP : 521 V3D_WRAP_MODE_BORDER); 522 default: 523 unreachable("Unknown wrap mode"); 524 } 525} 526 527#if V3D_VERSION >= 40 528static void 529v3d_upload_sampler_state_variant(void *map, 530 const struct pipe_sampler_state *cso, 531 enum v3d_sampler_state_variant variant, 532 bool either_nearest) 533{ 534 v3dx_pack(map, SAMPLER_STATE, sampler) { 535 sampler.wrap_i_border = false; 536 537 sampler.wrap_s = translate_wrap(cso->wrap_s, either_nearest); 538 sampler.wrap_t = translate_wrap(cso->wrap_t, either_nearest); 539 sampler.wrap_r = translate_wrap(cso->wrap_r, either_nearest); 540 541 sampler.fixed_bias = cso->lod_bias; 542 sampler.depth_compare_function = cso->compare_func; 543 544 sampler.min_filter_nearest = 545 cso->min_img_filter == PIPE_TEX_FILTER_NEAREST; 546 sampler.mag_filter_nearest = 547 cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST; 548 sampler.mip_filter_nearest = 549 cso->min_mip_filter != PIPE_TEX_MIPFILTER_LINEAR; 550 551 sampler.min_level_of_detail = MIN2(MAX2(0, cso->min_lod), 552 15); 553 sampler.max_level_of_detail = MIN2(cso->max_lod, 15); 554 555 /* If we're not doing inter-miplevel filtering, we need to 556 * clamp the LOD so that we only sample from baselevel. 557 * However, we need to still allow the calculated LOD to be 558 * fractionally over the baselevel, so that the HW can decide 559 * between the min and mag filters. 560 */ 561 if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) { 562 sampler.min_level_of_detail = 563 MIN2(sampler.min_level_of_detail, 1.0 / 256.0); 564 sampler.max_level_of_detail = 565 MIN2(sampler.max_level_of_detail, 1.0 / 256.0); 566 } 567 568 if (cso->max_anisotropy) { 569 sampler.anisotropy_enable = true; 570 571 if (cso->max_anisotropy > 8) 572 sampler.maximum_anisotropy = 3; 573 else if (cso->max_anisotropy > 4) 574 sampler.maximum_anisotropy = 2; 575 else if (cso->max_anisotropy > 2) 576 sampler.maximum_anisotropy = 1; 577 } 578 579 if (variant == V3D_SAMPLER_STATE_BORDER_0) { 580 sampler.border_color_mode = V3D_BORDER_COLOR_0000; 581 } else { 582 sampler.border_color_mode = V3D_BORDER_COLOR_FOLLOWS; 583 584 union pipe_color_union border; 585 586 /* First, reswizzle the border color for any 587 * mismatching we're doing between the texture's 588 * channel order in hardware (R) versus what it is at 589 * the GL level (ALPHA) 590 */ 591 switch (variant) { 592 case V3D_SAMPLER_STATE_F16_BGRA: 593 case V3D_SAMPLER_STATE_F16_BGRA_UNORM: 594 case V3D_SAMPLER_STATE_F16_BGRA_SNORM: 595 border.i[0] = cso->border_color.i[2]; 596 border.i[1] = cso->border_color.i[1]; 597 border.i[2] = cso->border_color.i[0]; 598 border.i[3] = cso->border_color.i[3]; 599 break; 600 601 case V3D_SAMPLER_STATE_F16_A: 602 case V3D_SAMPLER_STATE_F16_A_UNORM: 603 case V3D_SAMPLER_STATE_F16_A_SNORM: 604 case V3D_SAMPLER_STATE_32_A: 605 case V3D_SAMPLER_STATE_32_A_UNORM: 606 case V3D_SAMPLER_STATE_32_A_SNORM: 607 border.i[0] = cso->border_color.i[3]; 608 border.i[1] = 0; 609 border.i[2] = 0; 610 border.i[3] = 0; 611 break; 612 613 case V3D_SAMPLER_STATE_F16_LA: 614 case V3D_SAMPLER_STATE_F16_LA_UNORM: 615 case V3D_SAMPLER_STATE_F16_LA_SNORM: 616 border.i[0] = cso->border_color.i[0]; 617 border.i[1] = cso->border_color.i[3]; 618 border.i[2] = 0; 619 border.i[3] = 0; 620 break; 621 622 default: 623 border = cso->border_color; 624 } 625 626 /* Perform any clamping. */ 627 switch (variant) { 628 case V3D_SAMPLER_STATE_F16_UNORM: 629 case V3D_SAMPLER_STATE_F16_BGRA_UNORM: 630 case V3D_SAMPLER_STATE_F16_A_UNORM: 631 case V3D_SAMPLER_STATE_F16_LA_UNORM: 632 case V3D_SAMPLER_STATE_32_UNORM: 633 case V3D_SAMPLER_STATE_32_A_UNORM: 634 for (int i = 0; i < 4; i++) 635 border.f[i] = CLAMP(border.f[i], 0, 1); 636 break; 637 638 case V3D_SAMPLER_STATE_F16_SNORM: 639 case V3D_SAMPLER_STATE_F16_BGRA_SNORM: 640 case V3D_SAMPLER_STATE_F16_A_SNORM: 641 case V3D_SAMPLER_STATE_F16_LA_SNORM: 642 case V3D_SAMPLER_STATE_32_SNORM: 643 case V3D_SAMPLER_STATE_32_A_SNORM: 644 for (int i = 0; i < 4; i++) 645 border.f[i] = CLAMP(border.f[i], -1, 1); 646 break; 647 648 case V3D_SAMPLER_STATE_1010102U: 649 border.ui[0] = CLAMP(border.ui[0], 650 0, (1 << 10) - 1); 651 border.ui[1] = CLAMP(border.ui[1], 652 0, (1 << 10) - 1); 653 border.ui[2] = CLAMP(border.ui[2], 654 0, (1 << 10) - 1); 655 border.ui[3] = CLAMP(border.ui[3], 656 0, 3); 657 break; 658 659 case V3D_SAMPLER_STATE_16U: 660 for (int i = 0; i < 4; i++) 661 border.ui[i] = CLAMP(border.ui[i], 662 0, 0xffff); 663 break; 664 665 case V3D_SAMPLER_STATE_16I: 666 for (int i = 0; i < 4; i++) 667 border.i[i] = CLAMP(border.i[i], 668 -32768, 32767); 669 break; 670 671 case V3D_SAMPLER_STATE_8U: 672 for (int i = 0; i < 4; i++) 673 border.ui[i] = CLAMP(border.ui[i], 674 0, 0xff); 675 break; 676 677 case V3D_SAMPLER_STATE_8I: 678 for (int i = 0; i < 4; i++) 679 border.i[i] = CLAMP(border.i[i], 680 -128, 127); 681 break; 682 683 default: 684 break; 685 } 686 687 if (variant >= V3D_SAMPLER_STATE_32) { 688 sampler.border_color_word_0 = border.ui[0]; 689 sampler.border_color_word_1 = border.ui[1]; 690 sampler.border_color_word_2 = border.ui[2]; 691 sampler.border_color_word_3 = border.ui[3]; 692 } else { 693 sampler.border_color_word_0 = 694 util_float_to_half(border.f[0]); 695 sampler.border_color_word_1 = 696 util_float_to_half(border.f[1]); 697 sampler.border_color_word_2 = 698 util_float_to_half(border.f[2]); 699 sampler.border_color_word_3 = 700 util_float_to_half(border.f[3]); 701 } 702 } 703 } 704} 705#endif 706 707static void * 708v3d_create_sampler_state(struct pipe_context *pctx, 709 const struct pipe_sampler_state *cso) 710{ 711 MAYBE_UNUSED struct v3d_context *v3d = v3d_context(pctx); 712 struct v3d_sampler_state *so = CALLOC_STRUCT(v3d_sampler_state); 713 714 if (!so) 715 return NULL; 716 717 memcpy(so, cso, sizeof(*cso)); 718 719 bool either_nearest = 720 (cso->mag_img_filter == PIPE_TEX_MIPFILTER_NEAREST || 721 cso->min_img_filter == PIPE_TEX_MIPFILTER_NEAREST); 722 723 enum V3DX(Wrap_Mode) wrap_s = translate_wrap(cso->wrap_s, 724 either_nearest); 725 enum V3DX(Wrap_Mode) wrap_t = translate_wrap(cso->wrap_t, 726 either_nearest); 727 enum V3DX(Wrap_Mode) wrap_r = translate_wrap(cso->wrap_r, 728 either_nearest); 729 730 bool uses_border_color = (wrap_s == V3D_WRAP_MODE_BORDER || 731 wrap_t == V3D_WRAP_MODE_BORDER || 732 wrap_r == V3D_WRAP_MODE_BORDER); 733 so->border_color_variants = (uses_border_color && 734 (cso->border_color.ui[0] != 0 || 735 cso->border_color.ui[1] != 0 || 736 cso->border_color.ui[2] != 0 || 737 cso->border_color.ui[3] != 0)); 738 739#if V3D_VERSION >= 40 740 void *map; 741 int sampler_align = so->border_color_variants ? 32 : 8; 742 int sampler_size = align(cl_packet_length(SAMPLER_STATE), sampler_align); 743 int num_variants = (so->border_color_variants ? ARRAY_SIZE(so->sampler_state_offset) : 1); 744 u_upload_alloc(v3d->state_uploader, 0, 745 sampler_size * num_variants, 746 sampler_align, 747 &so->sampler_state_offset[0], 748 &so->sampler_state, 749 &map); 750 751 for (int i = 0; i < num_variants; i++) { 752 so->sampler_state_offset[i] = 753 so->sampler_state_offset[0] + i * sampler_size; 754 v3d_upload_sampler_state_variant(map + i * sampler_size, 755 cso, i, either_nearest); 756 } 757 758#else /* V3D_VERSION < 40 */ 759 v3dx_pack(&so->p0, TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1, p0) { 760 p0.s_wrap_mode = wrap_s; 761 p0.t_wrap_mode = wrap_t; 762 p0.r_wrap_mode = wrap_r; 763 } 764 765 v3dx_pack(&so->texture_shader_state, TEXTURE_SHADER_STATE, tex) { 766 tex.depth_compare_function = cso->compare_func; 767 tex.fixed_bias = cso->lod_bias; 768 } 769#endif /* V3D_VERSION < 40 */ 770 return so; 771} 772 773static void 774v3d_sampler_states_bind(struct pipe_context *pctx, 775 enum pipe_shader_type shader, unsigned start, 776 unsigned nr, void **hwcso) 777{ 778 struct v3d_context *v3d = v3d_context(pctx); 779 struct v3d_texture_stateobj *stage_tex = &v3d->tex[shader]; 780 781 assert(start == 0); 782 unsigned i; 783 unsigned new_nr = 0; 784 785 for (i = 0; i < nr; i++) { 786 if (hwcso[i]) 787 new_nr = i + 1; 788 stage_tex->samplers[i] = hwcso[i]; 789 } 790 791 for (; i < stage_tex->num_samplers; i++) { 792 stage_tex->samplers[i] = NULL; 793 } 794 795 stage_tex->num_samplers = new_nr; 796} 797 798static void 799v3d_sampler_state_delete(struct pipe_context *pctx, 800 void *hwcso) 801{ 802 struct pipe_sampler_state *psampler = hwcso; 803 struct v3d_sampler_state *sampler = v3d_sampler_state(psampler); 804 805 pipe_resource_reference(&sampler->sampler_state, NULL); 806 free(psampler); 807} 808 809#if V3D_VERSION >= 40 810static uint32_t 811translate_swizzle(unsigned char pipe_swizzle) 812{ 813 switch (pipe_swizzle) { 814 case PIPE_SWIZZLE_0: 815 return 0; 816 case PIPE_SWIZZLE_1: 817 return 1; 818 case PIPE_SWIZZLE_X: 819 case PIPE_SWIZZLE_Y: 820 case PIPE_SWIZZLE_Z: 821 case PIPE_SWIZZLE_W: 822 return 2 + pipe_swizzle; 823 default: 824 unreachable("unknown swizzle"); 825 } 826} 827#endif 828 829static void 830v3d_setup_texture_shader_state(struct V3DX(TEXTURE_SHADER_STATE) *tex, 831 struct pipe_resource *prsc, 832 int base_level, int last_level, 833 int first_layer, int last_layer) 834{ 835 struct v3d_resource *rsc = v3d_resource(prsc); 836 int msaa_scale = prsc->nr_samples > 1 ? 2 : 1; 837 838 tex->image_width = prsc->width0 * msaa_scale; 839 tex->image_height = prsc->height0 * msaa_scale; 840 841#if V3D_VERSION >= 40 842 /* On 4.x, the height of a 1D texture is redefined to be the 843 * upper 14 bits of the width (which is only usable with txf). 844 */ 845 if (prsc->target == PIPE_TEXTURE_1D || 846 prsc->target == PIPE_TEXTURE_1D_ARRAY) { 847 tex->image_height = tex->image_width >> 14; 848 } 849 850 tex->image_width &= (1 << 14) - 1; 851 tex->image_height &= (1 << 14) - 1; 852#endif 853 854 if (prsc->target == PIPE_TEXTURE_3D) { 855 tex->image_depth = prsc->depth0; 856 } else { 857 tex->image_depth = (last_layer - first_layer) + 1; 858 } 859 860 tex->base_level = base_level; 861#if V3D_VERSION >= 40 862 tex->max_level = last_level; 863 /* Note that we don't have a job to reference the texture's sBO 864 * at state create time, so any time this sampler view is used 865 * we need to add the texture to the job. 866 */ 867 tex->texture_base_pointer = 868 cl_address(NULL, 869 rsc->bo->offset + 870 v3d_layer_offset(prsc, 0, first_layer)); 871#endif 872 tex->array_stride_64_byte_aligned = rsc->cube_map_stride / 64; 873 874 /* Since other platform devices may produce UIF images even 875 * when they're not big enough for V3D to assume they're UIF, 876 * we force images with level 0 as UIF to be always treated 877 * that way. 878 */ 879 tex->level_0_is_strictly_uif = 880 (rsc->slices[0].tiling == VC5_TILING_UIF_XOR || 881 rsc->slices[0].tiling == VC5_TILING_UIF_NO_XOR); 882 tex->level_0_xor_enable = (rsc->slices[0].tiling == VC5_TILING_UIF_XOR); 883 884 if (tex->level_0_is_strictly_uif) 885 tex->level_0_ub_pad = rsc->slices[0].ub_pad; 886 887#if V3D_VERSION >= 40 888 if (tex->uif_xor_disable || 889 tex->level_0_is_strictly_uif) { 890 tex->extended = true; 891 } 892#endif /* V3D_VERSION >= 40 */ 893} 894 895static struct pipe_sampler_view * 896v3d_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *prsc, 897 const struct pipe_sampler_view *cso) 898{ 899 struct v3d_context *v3d = v3d_context(pctx); 900 struct v3d_screen *screen = v3d->screen; 901 struct v3d_sampler_view *so = CALLOC_STRUCT(v3d_sampler_view); 902 struct v3d_resource *rsc = v3d_resource(prsc); 903 904 if (!so) 905 return NULL; 906 907 so->base = *cso; 908 909 pipe_reference(NULL, &prsc->reference); 910 911 /* Compute the sampler view's swizzle up front. This will be plugged 912 * into either the sampler (for 16-bit returns) or the shader's 913 * texture key (for 32) 914 */ 915 uint8_t view_swizzle[4] = { 916 cso->swizzle_r, 917 cso->swizzle_g, 918 cso->swizzle_b, 919 cso->swizzle_a 920 }; 921 const uint8_t *fmt_swizzle = 922 v3d_get_format_swizzle(&screen->devinfo, so->base.format); 923 util_format_compose_swizzles(fmt_swizzle, view_swizzle, so->swizzle); 924 925 so->base.texture = prsc; 926 so->base.reference.count = 1; 927 so->base.context = pctx; 928 929 if (rsc->separate_stencil && 930 cso->format == PIPE_FORMAT_X32_S8X24_UINT) { 931 rsc = rsc->separate_stencil; 932 prsc = &rsc->base; 933 } 934 935 /* If we're sampling depth from depth/stencil, demote the format to 936 * just depth. u_format will end up giving the answers for the 937 * stencil channel, otherwise. 938 */ 939 enum pipe_format sample_format = cso->format; 940 if (sample_format == PIPE_FORMAT_S8_UINT_Z24_UNORM) 941 sample_format = PIPE_FORMAT_X8Z24_UNORM; 942 943#if V3D_VERSION >= 40 944 const struct util_format_description *desc = 945 util_format_description(sample_format); 946 947 if (util_format_is_pure_integer(sample_format) && 948 !util_format_has_depth(desc)) { 949 int chan = util_format_get_first_non_void_channel(sample_format); 950 if (util_format_is_pure_uint(sample_format)) { 951 switch (desc->channel[chan].size) { 952 case 32: 953 so->sampler_variant = V3D_SAMPLER_STATE_32; 954 break; 955 case 16: 956 so->sampler_variant = V3D_SAMPLER_STATE_16U; 957 break; 958 case 10: 959 so->sampler_variant = V3D_SAMPLER_STATE_1010102U; 960 break; 961 case 8: 962 so->sampler_variant = V3D_SAMPLER_STATE_8U; 963 break; 964 } 965 } else { 966 switch (desc->channel[chan].size) { 967 case 32: 968 so->sampler_variant = V3D_SAMPLER_STATE_32; 969 break; 970 case 16: 971 so->sampler_variant = V3D_SAMPLER_STATE_16I; 972 break; 973 case 8: 974 so->sampler_variant = V3D_SAMPLER_STATE_8I; 975 break; 976 } 977 } 978 } else { 979 if (v3d_get_tex_return_size(&screen->devinfo, sample_format, 980 PIPE_TEX_COMPARE_NONE) == 32) { 981 if (util_format_is_alpha(sample_format)) 982 so->sampler_variant = V3D_SAMPLER_STATE_32_A; 983 else 984 so->sampler_variant = V3D_SAMPLER_STATE_32; 985 } else { 986 if (util_format_is_luminance_alpha(sample_format)) 987 so->sampler_variant = V3D_SAMPLER_STATE_F16_LA; 988 else if (util_format_is_alpha(sample_format)) 989 so->sampler_variant = V3D_SAMPLER_STATE_F16_A; 990 else if (fmt_swizzle[0] == PIPE_SWIZZLE_Z) 991 so->sampler_variant = V3D_SAMPLER_STATE_F16_BGRA; 992 else 993 so->sampler_variant = V3D_SAMPLER_STATE_F16; 994 995 } 996 997 if (util_format_is_unorm(sample_format)) { 998 so->sampler_variant += (V3D_SAMPLER_STATE_F16_UNORM - 999 V3D_SAMPLER_STATE_F16); 1000 } else if (util_format_is_snorm(sample_format)){ 1001 so->sampler_variant += (V3D_SAMPLER_STATE_F16_SNORM - 1002 V3D_SAMPLER_STATE_F16); 1003 } 1004 } 1005#endif 1006 1007 /* V3D still doesn't support sampling from raster textures, so we will 1008 * have to copy to a temporary tiled texture. 1009 */ 1010 if (!rsc->tiled && !(prsc->target == PIPE_TEXTURE_1D || 1011 prsc->target == PIPE_TEXTURE_1D_ARRAY)) { 1012 struct v3d_resource *shadow_parent = rsc; 1013 struct pipe_resource tmpl = { 1014 .target = prsc->target, 1015 .format = prsc->format, 1016 .width0 = u_minify(prsc->width0, 1017 cso->u.tex.first_level), 1018 .height0 = u_minify(prsc->height0, 1019 cso->u.tex.first_level), 1020 .depth0 = 1, 1021 .array_size = 1, 1022 .bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET, 1023 .last_level = cso->u.tex.last_level - cso->u.tex.first_level, 1024 .nr_samples = prsc->nr_samples, 1025 }; 1026 1027 /* Create the shadow texture. The rest of the sampler view 1028 * setup will use the shadow. 1029 */ 1030 prsc = v3d_resource_create(pctx->screen, &tmpl); 1031 if (!prsc) { 1032 free(so); 1033 return NULL; 1034 } 1035 rsc = v3d_resource(prsc); 1036 1037 /* Flag it as needing update of the contents from the parent. */ 1038 rsc->writes = shadow_parent->writes - 1; 1039 assert(rsc->tiled); 1040 1041 so->texture = prsc; 1042 } else { 1043 pipe_resource_reference(&so->texture, prsc); 1044 } 1045 1046 void *map; 1047#if V3D_VERSION >= 40 1048 so->bo = v3d_bo_alloc(v3d->screen, 1049 cl_packet_length(TEXTURE_SHADER_STATE), "sampler"); 1050 map = v3d_bo_map(so->bo); 1051#else /* V3D_VERSION < 40 */ 1052 STATIC_ASSERT(sizeof(so->texture_shader_state) >= 1053 cl_packet_length(TEXTURE_SHADER_STATE)); 1054 map = &so->texture_shader_state; 1055#endif 1056 1057 v3dx_pack(map, TEXTURE_SHADER_STATE, tex) { 1058 v3d_setup_texture_shader_state(&tex, prsc, 1059 cso->u.tex.first_level, 1060 cso->u.tex.last_level, 1061 cso->u.tex.first_layer, 1062 cso->u.tex.last_layer); 1063 1064 tex.srgb = util_format_is_srgb(cso->format); 1065 1066#if V3D_VERSION >= 40 1067 tex.swizzle_r = translate_swizzle(so->swizzle[0]); 1068 tex.swizzle_g = translate_swizzle(so->swizzle[1]); 1069 tex.swizzle_b = translate_swizzle(so->swizzle[2]); 1070 tex.swizzle_a = translate_swizzle(so->swizzle[3]); 1071#endif 1072 1073 if (prsc->nr_samples > 1 && V3D_VERSION < 40) { 1074 /* Using texture views to reinterpret formats on our 1075 * MSAA textures won't work, because we don't lay out 1076 * the bits in memory as it's expected -- for example, 1077 * RGBA8 and RGB10_A2 are compatible in the 1078 * ARB_texture_view spec, but in HW we lay them out as 1079 * 32bpp RGBA8 and 64bpp RGBA16F. Just assert for now 1080 * to catch failures. 1081 * 1082 * We explicitly allow remapping S8Z24 to RGBA8888 for 1083 * v3d_blit.c's stencil blits. 1084 */ 1085 assert((util_format_linear(cso->format) == 1086 util_format_linear(prsc->format)) || 1087 (prsc->format == PIPE_FORMAT_S8_UINT_Z24_UNORM && 1088 cso->format == PIPE_FORMAT_R8G8B8A8_UNORM)); 1089 uint32_t output_image_format = 1090 v3d_get_rt_format(&screen->devinfo, cso->format); 1091 uint32_t internal_type; 1092 uint32_t internal_bpp; 1093 v3d_get_internal_type_bpp_for_output_format(&screen->devinfo, 1094 output_image_format, 1095 &internal_type, 1096 &internal_bpp); 1097 1098 switch (internal_type) { 1099 case V3D_INTERNAL_TYPE_8: 1100 tex.texture_type = TEXTURE_DATA_FORMAT_RGBA8; 1101 break; 1102 case V3D_INTERNAL_TYPE_16F: 1103 tex.texture_type = TEXTURE_DATA_FORMAT_RGBA16F; 1104 break; 1105 default: 1106 unreachable("Bad MSAA texture type"); 1107 } 1108 1109 /* sRGB was stored in the tile buffer as linear and 1110 * would have been encoded to sRGB on resolved tile 1111 * buffer store. Note that this means we would need 1112 * shader code if we wanted to read an MSAA sRGB 1113 * texture without sRGB decode. 1114 */ 1115 tex.srgb = false; 1116 } else { 1117 tex.texture_type = v3d_get_tex_format(&screen->devinfo, 1118 cso->format); 1119 } 1120 }; 1121 1122 return &so->base; 1123} 1124 1125static void 1126v3d_sampler_view_destroy(struct pipe_context *pctx, 1127 struct pipe_sampler_view *psview) 1128{ 1129 struct v3d_sampler_view *sview = v3d_sampler_view(psview); 1130 1131 v3d_bo_unreference(&sview->bo); 1132 pipe_resource_reference(&psview->texture, NULL); 1133 pipe_resource_reference(&sview->texture, NULL); 1134 free(psview); 1135} 1136 1137static void 1138v3d_set_sampler_views(struct pipe_context *pctx, 1139 enum pipe_shader_type shader, 1140 unsigned start, unsigned nr, 1141 struct pipe_sampler_view **views) 1142{ 1143 struct v3d_context *v3d = v3d_context(pctx); 1144 struct v3d_texture_stateobj *stage_tex = &v3d->tex[shader]; 1145 unsigned i; 1146 unsigned new_nr = 0; 1147 1148 assert(start == 0); 1149 1150 for (i = 0; i < nr; i++) { 1151 if (views[i]) 1152 new_nr = i + 1; 1153 pipe_sampler_view_reference(&stage_tex->textures[i], views[i]); 1154 } 1155 1156 for (; i < stage_tex->num_textures; i++) { 1157 pipe_sampler_view_reference(&stage_tex->textures[i], NULL); 1158 } 1159 1160 stage_tex->num_textures = new_nr; 1161} 1162 1163static struct pipe_stream_output_target * 1164v3d_create_stream_output_target(struct pipe_context *pctx, 1165 struct pipe_resource *prsc, 1166 unsigned buffer_offset, 1167 unsigned buffer_size) 1168{ 1169 struct pipe_stream_output_target *target; 1170 1171 target = CALLOC_STRUCT(pipe_stream_output_target); 1172 if (!target) 1173 return NULL; 1174 1175 pipe_reference_init(&target->reference, 1); 1176 pipe_resource_reference(&target->buffer, prsc); 1177 1178 target->context = pctx; 1179 target->buffer_offset = buffer_offset; 1180 target->buffer_size = buffer_size; 1181 1182 return target; 1183} 1184 1185static void 1186v3d_stream_output_target_destroy(struct pipe_context *pctx, 1187 struct pipe_stream_output_target *target) 1188{ 1189 pipe_resource_reference(&target->buffer, NULL); 1190 free(target); 1191} 1192 1193static void 1194v3d_set_stream_output_targets(struct pipe_context *pctx, 1195 unsigned num_targets, 1196 struct pipe_stream_output_target **targets, 1197 const unsigned *offsets) 1198{ 1199 struct v3d_context *ctx = v3d_context(pctx); 1200 struct v3d_streamout_stateobj *so = &ctx->streamout; 1201 unsigned i; 1202 1203 assert(num_targets <= ARRAY_SIZE(so->targets)); 1204 1205 for (i = 0; i < num_targets; i++) { 1206 if (offsets[i] != -1) 1207 so->offsets[i] = offsets[i]; 1208 1209 pipe_so_target_reference(&so->targets[i], targets[i]); 1210 } 1211 1212 for (; i < so->num_targets; i++) 1213 pipe_so_target_reference(&so->targets[i], NULL); 1214 1215 so->num_targets = num_targets; 1216 1217 ctx->dirty |= VC5_DIRTY_STREAMOUT; 1218} 1219 1220static void 1221v3d_set_shader_buffers(struct pipe_context *pctx, 1222 enum pipe_shader_type shader, 1223 unsigned start, unsigned count, 1224 const struct pipe_shader_buffer *buffers, 1225 unsigned writable_bitmask) 1226{ 1227 struct v3d_context *v3d = v3d_context(pctx); 1228 struct v3d_ssbo_stateobj *so = &v3d->ssbo[shader]; 1229 unsigned mask = 0; 1230 1231 if (buffers) { 1232 for (unsigned i = 0; i < count; i++) { 1233 unsigned n = i + start; 1234 struct pipe_shader_buffer *buf = &so->sb[n]; 1235 1236 if ((buf->buffer == buffers[i].buffer) && 1237 (buf->buffer_offset == buffers[i].buffer_offset) && 1238 (buf->buffer_size == buffers[i].buffer_size)) 1239 continue; 1240 1241 mask |= 1 << n; 1242 1243 buf->buffer_offset = buffers[i].buffer_offset; 1244 buf->buffer_size = buffers[i].buffer_size; 1245 pipe_resource_reference(&buf->buffer, buffers[i].buffer); 1246 1247 if (buf->buffer) 1248 so->enabled_mask |= 1 << n; 1249 else 1250 so->enabled_mask &= ~(1 << n); 1251 } 1252 } else { 1253 mask = ((1 << count) - 1) << start; 1254 1255 for (unsigned i = 0; i < count; i++) { 1256 unsigned n = i + start; 1257 struct pipe_shader_buffer *buf = &so->sb[n]; 1258 1259 pipe_resource_reference(&buf->buffer, NULL); 1260 } 1261 1262 so->enabled_mask &= ~mask; 1263 } 1264 1265 v3d->dirty |= VC5_DIRTY_SSBO; 1266} 1267 1268static void 1269v3d_create_image_view_texture_shader_state(struct v3d_context *v3d, 1270 struct v3d_shaderimg_stateobj *so, 1271 int img) 1272{ 1273#if V3D_VERSION >= 40 1274 struct v3d_image_view *iview = &so->si[img]; 1275 1276 void *map; 1277 u_upload_alloc(v3d->uploader, 0, cl_packet_length(TEXTURE_SHADER_STATE), 1278 32, 1279 &iview->tex_state_offset, 1280 &iview->tex_state, 1281 &map); 1282 1283 struct pipe_resource *prsc = iview->base.resource; 1284 1285 v3dx_pack(map, TEXTURE_SHADER_STATE, tex) { 1286 v3d_setup_texture_shader_state(&tex, prsc, 1287 iview->base.u.tex.level, 1288 iview->base.u.tex.level, 1289 iview->base.u.tex.first_layer, 1290 iview->base.u.tex.last_layer); 1291 1292 tex.swizzle_r = translate_swizzle(PIPE_SWIZZLE_X); 1293 tex.swizzle_g = translate_swizzle(PIPE_SWIZZLE_Y); 1294 tex.swizzle_b = translate_swizzle(PIPE_SWIZZLE_Z); 1295 tex.swizzle_a = translate_swizzle(PIPE_SWIZZLE_W); 1296 1297 tex.texture_type = v3d_get_tex_format(&v3d->screen->devinfo, 1298 iview->base.format); 1299 }; 1300#else /* V3D_VERSION < 40 */ 1301 /* V3D 3.x doesn't use support shader image load/store operations on 1302 * textures, so it would get lowered in the shader to general memory 1303 * acceses. 1304 */ 1305#endif 1306} 1307 1308static void 1309v3d_set_shader_images(struct pipe_context *pctx, 1310 enum pipe_shader_type shader, 1311 unsigned start, unsigned count, 1312 const struct pipe_image_view *images) 1313{ 1314 struct v3d_context *v3d = v3d_context(pctx); 1315 struct v3d_shaderimg_stateobj *so = &v3d->shaderimg[shader]; 1316 1317 if (images) { 1318 for (unsigned i = 0; i < count; i++) { 1319 unsigned n = i + start; 1320 struct v3d_image_view *iview = &so->si[n]; 1321 1322 if ((iview->base.resource == images[i].resource) && 1323 (iview->base.format == images[i].format) && 1324 (iview->base.access == images[i].access) && 1325 !memcmp(&iview->base.u, &images[i].u, 1326 sizeof(iview->base.u))) 1327 continue; 1328 1329 util_copy_image_view(&iview->base, &images[i]); 1330 1331 if (iview->base.resource) { 1332 so->enabled_mask |= 1 << n; 1333 v3d_create_image_view_texture_shader_state(v3d, 1334 so, 1335 n); 1336 } else { 1337 so->enabled_mask &= ~(1 << n); 1338 pipe_resource_reference(&iview->tex_state, NULL); 1339 } 1340 } 1341 } else { 1342 for (unsigned i = 0; i < count; i++) { 1343 unsigned n = i + start; 1344 struct v3d_image_view *iview = &so->si[n]; 1345 1346 pipe_resource_reference(&iview->base.resource, NULL); 1347 pipe_resource_reference(&iview->tex_state, NULL); 1348 } 1349 1350 if (count == 32) 1351 so->enabled_mask = 0; 1352 else 1353 so->enabled_mask &= ~(((1 << count) - 1) << start); 1354 } 1355 1356 v3d->dirty |= VC5_DIRTY_SHADER_IMAGE; 1357} 1358 1359void 1360v3dX(state_init)(struct pipe_context *pctx) 1361{ 1362 pctx->set_blend_color = v3d_set_blend_color; 1363 pctx->set_stencil_ref = v3d_set_stencil_ref; 1364 pctx->set_clip_state = v3d_set_clip_state; 1365 pctx->set_sample_mask = v3d_set_sample_mask; 1366 pctx->set_constant_buffer = v3d_set_constant_buffer; 1367 pctx->set_framebuffer_state = v3d_set_framebuffer_state; 1368 pctx->set_polygon_stipple = v3d_set_polygon_stipple; 1369 pctx->set_scissor_states = v3d_set_scissor_states; 1370 pctx->set_viewport_states = v3d_set_viewport_states; 1371 1372 pctx->set_vertex_buffers = v3d_set_vertex_buffers; 1373 1374 pctx->create_blend_state = v3d_create_blend_state; 1375 pctx->bind_blend_state = v3d_blend_state_bind; 1376 pctx->delete_blend_state = v3d_generic_cso_state_delete; 1377 1378 pctx->create_rasterizer_state = v3d_create_rasterizer_state; 1379 pctx->bind_rasterizer_state = v3d_rasterizer_state_bind; 1380 pctx->delete_rasterizer_state = v3d_generic_cso_state_delete; 1381 1382 pctx->create_depth_stencil_alpha_state = v3d_create_depth_stencil_alpha_state; 1383 pctx->bind_depth_stencil_alpha_state = v3d_zsa_state_bind; 1384 pctx->delete_depth_stencil_alpha_state = v3d_generic_cso_state_delete; 1385 1386 pctx->create_vertex_elements_state = v3d_vertex_state_create; 1387 pctx->delete_vertex_elements_state = v3d_vertex_state_delete; 1388 pctx->bind_vertex_elements_state = v3d_vertex_state_bind; 1389 1390 pctx->create_sampler_state = v3d_create_sampler_state; 1391 pctx->delete_sampler_state = v3d_sampler_state_delete; 1392 pctx->bind_sampler_states = v3d_sampler_states_bind; 1393 1394 pctx->create_sampler_view = v3d_create_sampler_view; 1395 pctx->sampler_view_destroy = v3d_sampler_view_destroy; 1396 pctx->set_sampler_views = v3d_set_sampler_views; 1397 1398 pctx->set_shader_buffers = v3d_set_shader_buffers; 1399 pctx->set_shader_images = v3d_set_shader_images; 1400 1401 pctx->create_stream_output_target = v3d_create_stream_output_target; 1402 pctx->stream_output_target_destroy = v3d_stream_output_target_destroy; 1403 pctx->set_stream_output_targets = v3d_set_stream_output_targets; 1404} 1405