1/* 2 * Copyright © 2016 Red Hat 3 * based on intel anv code: 4 * Copyright © 2015 Intel Corporation 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 */ 25 26#include "radv_meta.h" 27 28#include <fcntl.h> 29#include <limits.h> 30#include <pwd.h> 31#include <sys/stat.h> 32 33void 34radv_meta_save(struct radv_meta_saved_state *state, 35 struct radv_cmd_buffer *cmd_buffer, uint32_t flags) 36{ 37 VkPipelineBindPoint bind_point = 38 flags & RADV_META_SAVE_GRAPHICS_PIPELINE ? 39 VK_PIPELINE_BIND_POINT_GRAPHICS : 40 VK_PIPELINE_BIND_POINT_COMPUTE; 41 struct radv_descriptor_state *descriptors_state = 42 radv_get_descriptors_state(cmd_buffer, bind_point); 43 44 assert(flags & (RADV_META_SAVE_GRAPHICS_PIPELINE | 45 RADV_META_SAVE_COMPUTE_PIPELINE)); 46 47 state->flags = flags; 48 49 if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) { 50 assert(!(state->flags & RADV_META_SAVE_COMPUTE_PIPELINE)); 51 52 state->old_pipeline = cmd_buffer->state.pipeline; 53 54 /* Save all viewports. */ 55 state->viewport.count = cmd_buffer->state.dynamic.viewport.count; 56 typed_memcpy(state->viewport.viewports, 57 cmd_buffer->state.dynamic.viewport.viewports, 58 MAX_VIEWPORTS); 59 60 /* Save all scissors. */ 61 state->scissor.count = cmd_buffer->state.dynamic.scissor.count; 62 typed_memcpy(state->scissor.scissors, 63 cmd_buffer->state.dynamic.scissor.scissors, 64 MAX_SCISSORS); 65 66 /* The most common meta operations all want to have the 67 * viewport reset and any scissors disabled. The rest of the 68 * dynamic state should have no effect. 69 */ 70 cmd_buffer->state.dynamic.viewport.count = 0; 71 cmd_buffer->state.dynamic.scissor.count = 0; 72 cmd_buffer->state.dirty |= 1 << VK_DYNAMIC_STATE_VIEWPORT | 73 1 << VK_DYNAMIC_STATE_SCISSOR; 74 } 75 76 if (state->flags & RADV_META_SAVE_COMPUTE_PIPELINE) { 77 assert(!(state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE)); 78 79 state->old_pipeline = cmd_buffer->state.compute_pipeline; 80 } 81 82 if (state->flags & RADV_META_SAVE_DESCRIPTORS) { 83 state->old_descriptor_set0 = descriptors_state->sets[0]; 84 if (!(descriptors_state->valid & 1) || !state->old_descriptor_set0) 85 state->flags &= ~RADV_META_SAVE_DESCRIPTORS; 86 } 87 88 if (state->flags & RADV_META_SAVE_CONSTANTS) { 89 memcpy(state->push_constants, cmd_buffer->push_constants, 90 MAX_PUSH_CONSTANTS_SIZE); 91 } 92 93 if (state->flags & RADV_META_SAVE_PASS) { 94 state->pass = cmd_buffer->state.pass; 95 state->subpass = cmd_buffer->state.subpass; 96 state->framebuffer = cmd_buffer->state.framebuffer; 97 state->attachments = cmd_buffer->state.attachments; 98 state->render_area = cmd_buffer->state.render_area; 99 } 100} 101 102void 103radv_meta_restore(const struct radv_meta_saved_state *state, 104 struct radv_cmd_buffer *cmd_buffer) 105{ 106 VkPipelineBindPoint bind_point = 107 state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE ? 108 VK_PIPELINE_BIND_POINT_GRAPHICS : 109 VK_PIPELINE_BIND_POINT_COMPUTE; 110 111 if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) { 112 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), 113 VK_PIPELINE_BIND_POINT_GRAPHICS, 114 radv_pipeline_to_handle(state->old_pipeline)); 115 116 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE; 117 118 /* Restore all viewports. */ 119 cmd_buffer->state.dynamic.viewport.count = state->viewport.count; 120 typed_memcpy(cmd_buffer->state.dynamic.viewport.viewports, 121 state->viewport.viewports, 122 MAX_VIEWPORTS); 123 124 /* Restore all scissors. */ 125 cmd_buffer->state.dynamic.scissor.count = state->scissor.count; 126 typed_memcpy(cmd_buffer->state.dynamic.scissor.scissors, 127 state->scissor.scissors, 128 MAX_SCISSORS); 129 130 cmd_buffer->state.dirty |= 1 << VK_DYNAMIC_STATE_VIEWPORT | 131 1 << VK_DYNAMIC_STATE_SCISSOR; 132 } 133 134 if (state->flags & RADV_META_SAVE_COMPUTE_PIPELINE) { 135 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), 136 VK_PIPELINE_BIND_POINT_COMPUTE, 137 radv_pipeline_to_handle(state->old_pipeline)); 138 } 139 140 if (state->flags & RADV_META_SAVE_DESCRIPTORS) { 141 radv_set_descriptor_set(cmd_buffer, bind_point, 142 state->old_descriptor_set0, 0); 143 } 144 145 if (state->flags & RADV_META_SAVE_CONSTANTS) { 146 memcpy(cmd_buffer->push_constants, state->push_constants, 147 MAX_PUSH_CONSTANTS_SIZE); 148 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT; 149 150 if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) { 151 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_ALL_GRAPHICS; 152 } 153 } 154 155 if (state->flags & RADV_META_SAVE_PASS) { 156 cmd_buffer->state.pass = state->pass; 157 cmd_buffer->state.subpass = state->subpass; 158 cmd_buffer->state.framebuffer = state->framebuffer; 159 cmd_buffer->state.attachments = state->attachments; 160 cmd_buffer->state.render_area = state->render_area; 161 if (state->subpass) 162 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER; 163 } 164} 165 166VkImageViewType 167radv_meta_get_view_type(const struct radv_image *image) 168{ 169 switch (image->type) { 170 case VK_IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D; 171 case VK_IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D; 172 case VK_IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D; 173 default: 174 unreachable("bad VkImageViewType"); 175 } 176} 177 178/** 179 * When creating a destination VkImageView, this function provides the needed 180 * VkImageViewCreateInfo::subresourceRange::baseArrayLayer. 181 */ 182uint32_t 183radv_meta_get_iview_layer(const struct radv_image *dest_image, 184 const VkImageSubresourceLayers *dest_subresource, 185 const VkOffset3D *dest_offset) 186{ 187 switch (dest_image->type) { 188 case VK_IMAGE_TYPE_1D: 189 case VK_IMAGE_TYPE_2D: 190 return dest_subresource->baseArrayLayer; 191 case VK_IMAGE_TYPE_3D: 192 /* HACK: Vulkan does not allow attaching a 3D image to a framebuffer, 193 * but meta does it anyway. When doing so, we translate the 194 * destination's z offset into an array offset. 195 */ 196 return dest_offset->z; 197 default: 198 assert(!"bad VkImageType"); 199 return 0; 200 } 201} 202 203static void * 204meta_alloc(void* _device, size_t size, size_t alignment, 205 VkSystemAllocationScope allocationScope) 206{ 207 struct radv_device *device = _device; 208 return device->alloc.pfnAllocation(device->alloc.pUserData, size, alignment, 209 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); 210} 211 212static void * 213meta_realloc(void* _device, void *original, size_t size, size_t alignment, 214 VkSystemAllocationScope allocationScope) 215{ 216 struct radv_device *device = _device; 217 return device->alloc.pfnReallocation(device->alloc.pUserData, original, 218 size, alignment, 219 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); 220} 221 222static void 223meta_free(void* _device, void *data) 224{ 225 struct radv_device *device = _device; 226 return device->alloc.pfnFree(device->alloc.pUserData, data); 227} 228 229static bool 230radv_builtin_cache_path(char *path) 231{ 232 char *xdg_cache_home = getenv("XDG_CACHE_HOME"); 233 const char *suffix = "/radv_builtin_shaders"; 234 const char *suffix2 = "/.cache/radv_builtin_shaders"; 235 struct passwd pwd, *result; 236 char path2[PATH_MAX + 1]; /* PATH_MAX is not a real max,but suffices here. */ 237 int ret; 238 239 if (xdg_cache_home) { 240 ret = snprintf(path, PATH_MAX + 1, "%s%s%zd", 241 xdg_cache_home, suffix, sizeof(void *) * 8); 242 return ret > 0 && ret < PATH_MAX + 1; 243 } 244 245 getpwuid_r(getuid(), &pwd, path2, PATH_MAX - strlen(suffix2), &result); 246 if (!result) 247 return false; 248 249 strcpy(path, pwd.pw_dir); 250 strcat(path, "/.cache"); 251 mkdir(path, 0755); 252 253 ret = snprintf(path, PATH_MAX + 1, "%s%s%zd", 254 pwd.pw_dir, suffix2, sizeof(void *) * 8); 255 return ret > 0 && ret < PATH_MAX + 1; 256} 257 258static bool 259radv_load_meta_pipeline(struct radv_device *device) 260{ 261 char path[PATH_MAX + 1]; 262 struct stat st; 263 void *data = NULL; 264 bool ret = false; 265 266 if (!radv_builtin_cache_path(path)) 267 return false; 268 269 int fd = open(path, O_RDONLY); 270 if (fd < 0) 271 return false; 272 if (fstat(fd, &st)) 273 goto fail; 274 data = malloc(st.st_size); 275 if (!data) 276 goto fail; 277 if(read(fd, data, st.st_size) == -1) 278 goto fail; 279 280 ret = radv_pipeline_cache_load(&device->meta_state.cache, data, st.st_size); 281fail: 282 free(data); 283 close(fd); 284 return ret; 285} 286 287static void 288radv_store_meta_pipeline(struct radv_device *device) 289{ 290 char path[PATH_MAX + 1], path2[PATH_MAX + 7]; 291 size_t size; 292 void *data = NULL; 293 294 if (!device->meta_state.cache.modified) 295 return; 296 297 if (radv_GetPipelineCacheData(radv_device_to_handle(device), 298 radv_pipeline_cache_to_handle(&device->meta_state.cache), 299 &size, NULL)) 300 return; 301 302 if (!radv_builtin_cache_path(path)) 303 return; 304 305 strcpy(path2, path); 306 strcat(path2, "XXXXXX"); 307 int fd = mkstemp(path2);//open(path, O_WRONLY | O_CREAT, 0600); 308 if (fd < 0) 309 return; 310 data = malloc(size); 311 if (!data) 312 goto fail; 313 314 if (radv_GetPipelineCacheData(radv_device_to_handle(device), 315 radv_pipeline_cache_to_handle(&device->meta_state.cache), 316 &size, data)) 317 goto fail; 318 if(write(fd, data, size) == -1) 319 goto fail; 320 321 rename(path2, path); 322fail: 323 free(data); 324 close(fd); 325 unlink(path2); 326} 327 328VkResult 329radv_device_init_meta(struct radv_device *device) 330{ 331 VkResult result; 332 333 memset(&device->meta_state, 0, sizeof(device->meta_state)); 334 335 device->meta_state.alloc = (VkAllocationCallbacks) { 336 .pUserData = device, 337 .pfnAllocation = meta_alloc, 338 .pfnReallocation = meta_realloc, 339 .pfnFree = meta_free, 340 }; 341 342 device->meta_state.cache.alloc = device->meta_state.alloc; 343 radv_pipeline_cache_init(&device->meta_state.cache, device); 344 bool loaded_cache = radv_load_meta_pipeline(device); 345 bool on_demand = !loaded_cache; 346 347 mtx_init(&device->meta_state.mtx, mtx_plain); 348 349 result = radv_device_init_meta_clear_state(device, on_demand); 350 if (result != VK_SUCCESS) 351 goto fail_clear; 352 353 result = radv_device_init_meta_resolve_state(device, on_demand); 354 if (result != VK_SUCCESS) 355 goto fail_resolve; 356 357 result = radv_device_init_meta_blit_state(device, on_demand); 358 if (result != VK_SUCCESS) 359 goto fail_blit; 360 361 result = radv_device_init_meta_blit2d_state(device, on_demand); 362 if (result != VK_SUCCESS) 363 goto fail_blit2d; 364 365 result = radv_device_init_meta_bufimage_state(device); 366 if (result != VK_SUCCESS) 367 goto fail_bufimage; 368 369 result = radv_device_init_meta_depth_decomp_state(device, on_demand); 370 if (result != VK_SUCCESS) 371 goto fail_depth_decomp; 372 373 result = radv_device_init_meta_buffer_state(device); 374 if (result != VK_SUCCESS) 375 goto fail_buffer; 376 377 result = radv_device_init_meta_query_state(device, on_demand); 378 if (result != VK_SUCCESS) 379 goto fail_query; 380 381 result = radv_device_init_meta_fast_clear_flush_state(device, on_demand); 382 if (result != VK_SUCCESS) 383 goto fail_fast_clear; 384 385 result = radv_device_init_meta_resolve_compute_state(device, on_demand); 386 if (result != VK_SUCCESS) 387 goto fail_resolve_compute; 388 389 result = radv_device_init_meta_resolve_fragment_state(device, on_demand); 390 if (result != VK_SUCCESS) 391 goto fail_resolve_fragment; 392 393 result = radv_device_init_meta_fmask_expand_state(device); 394 if (result != VK_SUCCESS) 395 goto fail_fmask_expand; 396 397 return VK_SUCCESS; 398 399fail_fmask_expand: 400 radv_device_finish_meta_resolve_fragment_state(device); 401fail_resolve_fragment: 402 radv_device_finish_meta_resolve_compute_state(device); 403fail_resolve_compute: 404 radv_device_finish_meta_fast_clear_flush_state(device); 405fail_fast_clear: 406 radv_device_finish_meta_query_state(device); 407fail_query: 408 radv_device_finish_meta_buffer_state(device); 409fail_buffer: 410 radv_device_finish_meta_depth_decomp_state(device); 411fail_depth_decomp: 412 radv_device_finish_meta_bufimage_state(device); 413fail_bufimage: 414 radv_device_finish_meta_blit2d_state(device); 415fail_blit2d: 416 radv_device_finish_meta_blit_state(device); 417fail_blit: 418 radv_device_finish_meta_resolve_state(device); 419fail_resolve: 420 radv_device_finish_meta_clear_state(device); 421fail_clear: 422 mtx_destroy(&device->meta_state.mtx); 423 radv_pipeline_cache_finish(&device->meta_state.cache); 424 return result; 425} 426 427void 428radv_device_finish_meta(struct radv_device *device) 429{ 430 radv_device_finish_meta_clear_state(device); 431 radv_device_finish_meta_resolve_state(device); 432 radv_device_finish_meta_blit_state(device); 433 radv_device_finish_meta_blit2d_state(device); 434 radv_device_finish_meta_bufimage_state(device); 435 radv_device_finish_meta_depth_decomp_state(device); 436 radv_device_finish_meta_query_state(device); 437 radv_device_finish_meta_buffer_state(device); 438 radv_device_finish_meta_fast_clear_flush_state(device); 439 radv_device_finish_meta_resolve_compute_state(device); 440 radv_device_finish_meta_resolve_fragment_state(device); 441 radv_device_finish_meta_fmask_expand_state(device); 442 443 radv_store_meta_pipeline(device); 444 radv_pipeline_cache_finish(&device->meta_state.cache); 445 mtx_destroy(&device->meta_state.mtx); 446} 447 448nir_ssa_def *radv_meta_gen_rect_vertices_comp2(nir_builder *vs_b, nir_ssa_def *comp2) 449{ 450 451 nir_intrinsic_instr *vertex_id = nir_intrinsic_instr_create(vs_b->shader, nir_intrinsic_load_vertex_id_zero_base); 452 nir_ssa_dest_init(&vertex_id->instr, &vertex_id->dest, 1, 32, "vertexid"); 453 nir_builder_instr_insert(vs_b, &vertex_id->instr); 454 455 /* vertex 0 - -1.0, -1.0 */ 456 /* vertex 1 - -1.0, 1.0 */ 457 /* vertex 2 - 1.0, -1.0 */ 458 /* so channel 0 is vertex_id != 2 ? -1.0 : 1.0 459 channel 1 is vertex id != 1 ? -1.0 : 1.0 */ 460 461 nir_ssa_def *c0cmp = nir_ine(vs_b, &vertex_id->dest.ssa, 462 nir_imm_int(vs_b, 2)); 463 nir_ssa_def *c1cmp = nir_ine(vs_b, &vertex_id->dest.ssa, 464 nir_imm_int(vs_b, 1)); 465 466 nir_ssa_def *comp[4]; 467 comp[0] = nir_bcsel(vs_b, c0cmp, 468 nir_imm_float(vs_b, -1.0), 469 nir_imm_float(vs_b, 1.0)); 470 471 comp[1] = nir_bcsel(vs_b, c1cmp, 472 nir_imm_float(vs_b, -1.0), 473 nir_imm_float(vs_b, 1.0)); 474 comp[2] = comp2; 475 comp[3] = nir_imm_float(vs_b, 1.0); 476 nir_ssa_def *outvec = nir_vec(vs_b, comp, 4); 477 478 return outvec; 479} 480 481nir_ssa_def *radv_meta_gen_rect_vertices(nir_builder *vs_b) 482{ 483 return radv_meta_gen_rect_vertices_comp2(vs_b, nir_imm_float(vs_b, 0.0)); 484} 485 486/* vertex shader that generates vertices */ 487nir_shader * 488radv_meta_build_nir_vs_generate_vertices(void) 489{ 490 const struct glsl_type *vec4 = glsl_vec4_type(); 491 492 nir_builder b; 493 nir_variable *v_position; 494 495 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL); 496 b.shader->info.name = ralloc_strdup(b.shader, "meta_vs_gen_verts"); 497 498 nir_ssa_def *outvec = radv_meta_gen_rect_vertices(&b); 499 500 v_position = nir_variable_create(b.shader, nir_var_shader_out, vec4, 501 "gl_Position"); 502 v_position->data.location = VARYING_SLOT_POS; 503 504 nir_store_var(&b, v_position, outvec, 0xf); 505 506 return b.shader; 507} 508 509nir_shader * 510radv_meta_build_nir_fs_noop(void) 511{ 512 nir_builder b; 513 514 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL); 515 b.shader->info.name = ralloc_asprintf(b.shader, 516 "meta_noop_fs"); 517 518 return b.shader; 519} 520 521void radv_meta_build_resolve_shader_core(nir_builder *b, 522 bool is_integer, 523 int samples, 524 nir_variable *input_img, 525 nir_variable *color, 526 nir_ssa_def *img_coord) 527{ 528 /* do a txf_ms on each sample */ 529 nir_ssa_def *tmp; 530 nir_if *outer_if = NULL; 531 532 nir_ssa_def *input_img_deref = &nir_build_deref_var(b, input_img)->dest.ssa; 533 534 nir_tex_instr *tex = nir_tex_instr_create(b->shader, 3); 535 tex->sampler_dim = GLSL_SAMPLER_DIM_MS; 536 tex->op = nir_texop_txf_ms; 537 tex->src[0].src_type = nir_tex_src_coord; 538 tex->src[0].src = nir_src_for_ssa(img_coord); 539 tex->src[1].src_type = nir_tex_src_ms_index; 540 tex->src[1].src = nir_src_for_ssa(nir_imm_int(b, 0)); 541 tex->src[2].src_type = nir_tex_src_texture_deref; 542 tex->src[2].src = nir_src_for_ssa(input_img_deref); 543 tex->dest_type = nir_type_float; 544 tex->is_array = false; 545 tex->coord_components = 2; 546 547 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex"); 548 nir_builder_instr_insert(b, &tex->instr); 549 550 tmp = &tex->dest.ssa; 551 552 if (!is_integer && samples > 1) { 553 nir_tex_instr *tex_all_same = nir_tex_instr_create(b->shader, 2); 554 tex_all_same->sampler_dim = GLSL_SAMPLER_DIM_MS; 555 tex_all_same->op = nir_texop_samples_identical; 556 tex_all_same->src[0].src_type = nir_tex_src_coord; 557 tex_all_same->src[0].src = nir_src_for_ssa(img_coord); 558 tex_all_same->src[1].src_type = nir_tex_src_texture_deref; 559 tex_all_same->src[1].src = nir_src_for_ssa(input_img_deref); 560 tex_all_same->dest_type = nir_type_float; 561 tex_all_same->is_array = false; 562 tex_all_same->coord_components = 2; 563 564 nir_ssa_dest_init(&tex_all_same->instr, &tex_all_same->dest, 1, 32, "tex"); 565 nir_builder_instr_insert(b, &tex_all_same->instr); 566 567 nir_ssa_def *all_same = nir_ieq(b, &tex_all_same->dest.ssa, nir_imm_int(b, 0)); 568 nir_if *if_stmt = nir_if_create(b->shader); 569 if_stmt->condition = nir_src_for_ssa(all_same); 570 nir_cf_node_insert(b->cursor, &if_stmt->cf_node); 571 572 b->cursor = nir_after_cf_list(&if_stmt->then_list); 573 for (int i = 1; i < samples; i++) { 574 nir_tex_instr *tex_add = nir_tex_instr_create(b->shader, 3); 575 tex_add->sampler_dim = GLSL_SAMPLER_DIM_MS; 576 tex_add->op = nir_texop_txf_ms; 577 tex_add->src[0].src_type = nir_tex_src_coord; 578 tex_add->src[0].src = nir_src_for_ssa(img_coord); 579 tex_add->src[1].src_type = nir_tex_src_ms_index; 580 tex_add->src[1].src = nir_src_for_ssa(nir_imm_int(b, i)); 581 tex_add->src[2].src_type = nir_tex_src_texture_deref; 582 tex_add->src[2].src = nir_src_for_ssa(input_img_deref); 583 tex_add->dest_type = nir_type_float; 584 tex_add->is_array = false; 585 tex_add->coord_components = 2; 586 587 nir_ssa_dest_init(&tex_add->instr, &tex_add->dest, 4, 32, "tex"); 588 nir_builder_instr_insert(b, &tex_add->instr); 589 590 tmp = nir_fadd(b, tmp, &tex_add->dest.ssa); 591 } 592 593 tmp = nir_fdiv(b, tmp, nir_imm_float(b, samples)); 594 nir_store_var(b, color, tmp, 0xf); 595 b->cursor = nir_after_cf_list(&if_stmt->else_list); 596 outer_if = if_stmt; 597 } 598 nir_store_var(b, color, &tex->dest.ssa, 0xf); 599 600 if (outer_if) 601 b->cursor = nir_after_cf_node(&outer_if->cf_node); 602} 603