lp_setup.c revision 01e04c3f
1/************************************************************************** 2 * 3 * Copyright 2007 VMware, Inc. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28/** 29 * Tiling engine. 30 * 31 * Builds per-tile display lists and executes them on calls to 32 * lp_setup_flush(). 33 */ 34 35#include <limits.h> 36 37#include "pipe/p_defines.h" 38#include "util/u_framebuffer.h" 39#include "util/u_inlines.h" 40#include "util/u_memory.h" 41#include "util/u_pack_color.h" 42#include "util/u_viewport.h" 43#include "draw/draw_pipe.h" 44#include "util/os_time.h" 45#include "lp_context.h" 46#include "lp_memory.h" 47#include "lp_scene.h" 48#include "lp_texture.h" 49#include "lp_debug.h" 50#include "lp_fence.h" 51#include "lp_query.h" 52#include "lp_rast.h" 53#include "lp_setup_context.h" 54#include "lp_screen.h" 55#include "lp_state.h" 56#include "state_tracker/sw_winsys.h" 57 58#include "draw/draw_context.h" 59#include "draw/draw_vbuf.h" 60 61 62static boolean set_scene_state( struct lp_setup_context *, enum setup_state, 63 const char *reason); 64static boolean try_update_scene_state( struct lp_setup_context *setup ); 65 66 67static void 68lp_setup_get_empty_scene(struct lp_setup_context *setup) 69{ 70 assert(setup->scene == NULL); 71 72 setup->scene_idx++; 73 setup->scene_idx %= ARRAY_SIZE(setup->scenes); 74 75 setup->scene = setup->scenes[setup->scene_idx]; 76 77 if (setup->scene->fence) { 78 if (LP_DEBUG & DEBUG_SETUP) 79 debug_printf("%s: wait for scene %d\n", 80 __FUNCTION__, setup->scene->fence->id); 81 82 lp_fence_wait(setup->scene->fence); 83 } 84 85 lp_scene_begin_binning(setup->scene, &setup->fb); 86 87} 88 89 90static void 91first_triangle( struct lp_setup_context *setup, 92 const float (*v0)[4], 93 const float (*v1)[4], 94 const float (*v2)[4]) 95{ 96 assert(setup->state == SETUP_ACTIVE); 97 lp_setup_choose_triangle( setup ); 98 setup->triangle( setup, v0, v1, v2 ); 99} 100 101static void 102first_line( struct lp_setup_context *setup, 103 const float (*v0)[4], 104 const float (*v1)[4]) 105{ 106 assert(setup->state == SETUP_ACTIVE); 107 lp_setup_choose_line( setup ); 108 setup->line( setup, v0, v1 ); 109} 110 111static void 112first_point( struct lp_setup_context *setup, 113 const float (*v0)[4]) 114{ 115 assert(setup->state == SETUP_ACTIVE); 116 lp_setup_choose_point( setup ); 117 setup->point( setup, v0 ); 118} 119 120void lp_setup_reset( struct lp_setup_context *setup ) 121{ 122 unsigned i; 123 124 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 125 126 /* Reset derived state */ 127 for (i = 0; i < ARRAY_SIZE(setup->constants); ++i) { 128 setup->constants[i].stored_size = 0; 129 setup->constants[i].stored_data = NULL; 130 } 131 setup->fs.stored = NULL; 132 setup->dirty = ~0; 133 134 /* no current bin */ 135 setup->scene = NULL; 136 137 /* Reset some state: 138 */ 139 memset(&setup->clear, 0, sizeof setup->clear); 140 141 /* Have an explicit "start-binning" call and get rid of this 142 * pointer twiddling? 143 */ 144 setup->line = first_line; 145 setup->point = first_point; 146 setup->triangle = first_triangle; 147} 148 149 150/** Rasterize all scene's bins */ 151static void 152lp_setup_rasterize_scene( struct lp_setup_context *setup ) 153{ 154 struct lp_scene *scene = setup->scene; 155 struct llvmpipe_screen *screen = llvmpipe_screen(scene->pipe->screen); 156 157 scene->num_active_queries = setup->active_binned_queries; 158 memcpy(scene->active_queries, setup->active_queries, 159 scene->num_active_queries * sizeof(scene->active_queries[0])); 160 161 lp_scene_end_binning(scene); 162 163 lp_fence_reference(&setup->last_fence, scene->fence); 164 165 if (setup->last_fence) 166 setup->last_fence->issued = TRUE; 167 168 mtx_lock(&screen->rast_mutex); 169 170 /* FIXME: We enqueue the scene then wait on the rasterizer to finish. 171 * This means we never actually run any vertex stuff in parallel to 172 * rasterization (not in the same context at least) which is what the 173 * multiple scenes per setup is about - when we get a new empty scene 174 * any old one is already empty again because we waited here for 175 * raster tasks to be finished. Ideally, we shouldn't need to wait here 176 * and rely on fences elsewhere when waiting is necessary. 177 * Certainly, lp_scene_end_rasterization() would need to be deferred too 178 * and there's probably other bits why this doesn't actually work. 179 */ 180 lp_rast_queue_scene(screen->rast, scene); 181 lp_rast_finish(screen->rast); 182 mtx_unlock(&screen->rast_mutex); 183 184 lp_scene_end_rasterization(setup->scene); 185 lp_setup_reset( setup ); 186 187 LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__); 188} 189 190 191 192static boolean 193begin_binning( struct lp_setup_context *setup ) 194{ 195 struct lp_scene *scene = setup->scene; 196 boolean need_zsload = FALSE; 197 boolean ok; 198 199 assert(scene); 200 assert(scene->fence == NULL); 201 202 /* Always create a fence: 203 */ 204 scene->fence = lp_fence_create(MAX2(1, setup->num_threads)); 205 if (!scene->fence) 206 return FALSE; 207 208 ok = try_update_scene_state(setup); 209 if (!ok) 210 return FALSE; 211 212 if (setup->fb.zsbuf && 213 ((setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) && 214 util_format_is_depth_and_stencil(setup->fb.zsbuf->format)) 215 need_zsload = TRUE; 216 217 LP_DBG(DEBUG_SETUP, "%s color clear bufs: %x depth: %s\n", __FUNCTION__, 218 setup->clear.flags >> 2, 219 need_zsload ? "clear": "load"); 220 221 if (setup->clear.flags & PIPE_CLEAR_COLOR) { 222 unsigned cbuf; 223 for (cbuf = 0; cbuf < setup->fb.nr_cbufs; cbuf++) { 224 assert(PIPE_CLEAR_COLOR0 == 1 << 2); 225 if (setup->clear.flags & (1 << (2 + cbuf))) { 226 union lp_rast_cmd_arg clearrb_arg; 227 struct lp_rast_clear_rb *cc_scene = 228 (struct lp_rast_clear_rb *) 229 lp_scene_alloc(scene, sizeof(struct lp_rast_clear_rb)); 230 231 if (!cc_scene) { 232 return FALSE; 233 } 234 235 cc_scene->cbuf = cbuf; 236 cc_scene->color_val = setup->clear.color_val[cbuf]; 237 clearrb_arg.clear_rb = cc_scene; 238 239 if (!lp_scene_bin_everywhere(scene, 240 LP_RAST_OP_CLEAR_COLOR, 241 clearrb_arg)) 242 return FALSE; 243 } 244 } 245 } 246 247 if (setup->fb.zsbuf) { 248 if (setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) { 249 ok = lp_scene_bin_everywhere( scene, 250 LP_RAST_OP_CLEAR_ZSTENCIL, 251 lp_rast_arg_clearzs( 252 setup->clear.zsvalue, 253 setup->clear.zsmask)); 254 if (!ok) 255 return FALSE; 256 } 257 } 258 259 setup->clear.flags = 0; 260 setup->clear.zsmask = 0; 261 setup->clear.zsvalue = 0; 262 263 scene->had_queries = !!setup->active_binned_queries; 264 265 LP_DBG(DEBUG_SETUP, "%s done\n", __FUNCTION__); 266 return TRUE; 267} 268 269 270/* This basically bins and then flushes any outstanding full-screen 271 * clears. 272 * 273 * TODO: fast path for fullscreen clears and no triangles. 274 */ 275static boolean 276execute_clears( struct lp_setup_context *setup ) 277{ 278 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 279 280 return begin_binning( setup ); 281} 282 283const char *states[] = { 284 "FLUSHED", 285 "CLEARED", 286 "ACTIVE " 287}; 288 289 290static boolean 291set_scene_state( struct lp_setup_context *setup, 292 enum setup_state new_state, 293 const char *reason) 294{ 295 unsigned old_state = setup->state; 296 297 if (old_state == new_state) 298 return TRUE; 299 300 if (LP_DEBUG & DEBUG_SCENE) { 301 debug_printf("%s old %s new %s%s%s\n", 302 __FUNCTION__, 303 states[old_state], 304 states[new_state], 305 (new_state == SETUP_FLUSHED) ? ": " : "", 306 (new_state == SETUP_FLUSHED) ? reason : ""); 307 308 if (new_state == SETUP_FLUSHED && setup->scene) 309 lp_debug_draw_bins_by_cmd_length(setup->scene); 310 } 311 312 /* wait for a free/empty scene 313 */ 314 if (old_state == SETUP_FLUSHED) 315 lp_setup_get_empty_scene(setup); 316 317 switch (new_state) { 318 case SETUP_CLEARED: 319 break; 320 321 case SETUP_ACTIVE: 322 if (!begin_binning( setup )) 323 goto fail; 324 break; 325 326 case SETUP_FLUSHED: 327 if (old_state == SETUP_CLEARED) 328 if (!execute_clears( setup )) 329 goto fail; 330 331 lp_setup_rasterize_scene( setup ); 332 assert(setup->scene == NULL); 333 break; 334 335 default: 336 assert(0 && "invalid setup state mode"); 337 goto fail; 338 } 339 340 setup->state = new_state; 341 return TRUE; 342 343fail: 344 if (setup->scene) { 345 lp_scene_end_rasterization(setup->scene); 346 setup->scene = NULL; 347 } 348 349 setup->state = SETUP_FLUSHED; 350 lp_setup_reset( setup ); 351 return FALSE; 352} 353 354 355void 356lp_setup_flush( struct lp_setup_context *setup, 357 struct pipe_fence_handle **fence, 358 const char *reason) 359{ 360 set_scene_state( setup, SETUP_FLUSHED, reason ); 361 362 if (fence) { 363 lp_fence_reference((struct lp_fence **)fence, setup->last_fence); 364 } 365} 366 367 368void 369lp_setup_bind_framebuffer( struct lp_setup_context *setup, 370 const struct pipe_framebuffer_state *fb ) 371{ 372 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 373 374 /* Flush any old scene. 375 */ 376 set_scene_state( setup, SETUP_FLUSHED, __FUNCTION__ ); 377 378 /* 379 * Ensure the old scene is not reused. 380 */ 381 assert(!setup->scene); 382 383 /* Set new state. This will be picked up later when we next need a 384 * scene. 385 */ 386 util_copy_framebuffer_state(&setup->fb, fb); 387 setup->framebuffer.x0 = 0; 388 setup->framebuffer.y0 = 0; 389 setup->framebuffer.x1 = fb->width-1; 390 setup->framebuffer.y1 = fb->height-1; 391 setup->dirty |= LP_SETUP_NEW_SCISSOR; 392} 393 394 395/* 396 * Try to clear one color buffer of the attached fb, either by binning a clear 397 * command or queuing up the clear for later (when binning is started). 398 */ 399static boolean 400lp_setup_try_clear_color_buffer(struct lp_setup_context *setup, 401 const union pipe_color_union *color, 402 unsigned cbuf) 403{ 404 union lp_rast_cmd_arg clearrb_arg; 405 union util_color uc; 406 enum pipe_format format = setup->fb.cbufs[cbuf]->format; 407 408 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state); 409 410 if (util_format_is_pure_integer(format)) { 411 /* 412 * We expect int/uint clear values here, though some APIs 413 * might disagree (but in any case util_pack_color() 414 * couldn't handle it)... 415 */ 416 if (util_format_is_pure_sint(format)) { 417 util_format_write_4i(format, color->i, 0, &uc, 0, 0, 0, 1, 1); 418 } 419 else { 420 assert(util_format_is_pure_uint(format)); 421 util_format_write_4ui(format, color->ui, 0, &uc, 0, 0, 0, 1, 1); 422 } 423 } 424 else { 425 util_pack_color(color->f, format, &uc); 426 } 427 428 if (setup->state == SETUP_ACTIVE) { 429 struct lp_scene *scene = setup->scene; 430 431 /* Add the clear to existing scene. In the unusual case where 432 * both color and depth-stencil are being cleared when there's 433 * already been some rendering, we could discard the currently 434 * binned scene and start again, but I don't see that as being 435 * a common usage. 436 */ 437 struct lp_rast_clear_rb *cc_scene = 438 (struct lp_rast_clear_rb *) 439 lp_scene_alloc_aligned(scene, sizeof(struct lp_rast_clear_rb), 8); 440 441 if (!cc_scene) { 442 return FALSE; 443 } 444 445 cc_scene->cbuf = cbuf; 446 cc_scene->color_val = uc; 447 clearrb_arg.clear_rb = cc_scene; 448 449 if (!lp_scene_bin_everywhere(scene, 450 LP_RAST_OP_CLEAR_COLOR, 451 clearrb_arg)) 452 return FALSE; 453 } 454 else { 455 /* Put ourselves into the 'pre-clear' state, specifically to try 456 * and accumulate multiple clears to color and depth_stencil 457 * buffers which the app or state-tracker might issue 458 * separately. 459 */ 460 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ ); 461 462 assert(PIPE_CLEAR_COLOR0 == (1 << 2)); 463 setup->clear.flags |= 1 << (cbuf + 2); 464 setup->clear.color_val[cbuf] = uc; 465 } 466 467 return TRUE; 468} 469 470static boolean 471lp_setup_try_clear_zs(struct lp_setup_context *setup, 472 double depth, 473 unsigned stencil, 474 unsigned flags) 475{ 476 uint64_t zsmask = 0; 477 uint64_t zsvalue = 0; 478 uint32_t zmask32; 479 uint8_t smask8; 480 enum pipe_format format = setup->fb.zsbuf->format; 481 482 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state); 483 484 zmask32 = (flags & PIPE_CLEAR_DEPTH) ? ~0 : 0; 485 smask8 = (flags & PIPE_CLEAR_STENCIL) ? ~0 : 0; 486 487 zsvalue = util_pack64_z_stencil(format, depth, stencil); 488 489 zsmask = util_pack64_mask_z_stencil(format, zmask32, smask8); 490 491 zsvalue &= zsmask; 492 493 if (format == PIPE_FORMAT_Z24X8_UNORM || 494 format == PIPE_FORMAT_X8Z24_UNORM) { 495 /* 496 * Make full mask if there's "X" bits so we can do full 497 * clear (without rmw). 498 */ 499 uint32_t zsmask_full = 0; 500 zsmask_full = util_pack_mask_z_stencil(format, ~0, ~0); 501 zsmask |= ~zsmask_full; 502 } 503 504 if (setup->state == SETUP_ACTIVE) { 505 struct lp_scene *scene = setup->scene; 506 507 /* Add the clear to existing scene. In the unusual case where 508 * both color and depth-stencil are being cleared when there's 509 * already been some rendering, we could discard the currently 510 * binned scene and start again, but I don't see that as being 511 * a common usage. 512 */ 513 if (!lp_scene_bin_everywhere(scene, 514 LP_RAST_OP_CLEAR_ZSTENCIL, 515 lp_rast_arg_clearzs(zsvalue, zsmask))) 516 return FALSE; 517 } 518 else { 519 /* Put ourselves into the 'pre-clear' state, specifically to try 520 * and accumulate multiple clears to color and depth_stencil 521 * buffers which the app or state-tracker might issue 522 * separately. 523 */ 524 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ ); 525 526 setup->clear.flags |= flags; 527 528 setup->clear.zsmask |= zsmask; 529 setup->clear.zsvalue = 530 (setup->clear.zsvalue & ~zsmask) | (zsvalue & zsmask); 531 } 532 533 return TRUE; 534} 535 536void 537lp_setup_clear( struct lp_setup_context *setup, 538 const union pipe_color_union *color, 539 double depth, 540 unsigned stencil, 541 unsigned flags ) 542{ 543 unsigned i; 544 545 /* 546 * Note any of these (max 9) clears could fail (but at most there should 547 * be just one failure!). This avoids doing the previous succeeded 548 * clears again (we still clear tiles twice if a clear command succeeded 549 * partially for one buffer). 550 */ 551 if (flags & PIPE_CLEAR_DEPTHSTENCIL) { 552 unsigned flagszs = flags & PIPE_CLEAR_DEPTHSTENCIL; 553 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs)) { 554 lp_setup_flush(setup, NULL, __FUNCTION__); 555 556 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs)) 557 assert(0); 558 } 559 } 560 561 if (flags & PIPE_CLEAR_COLOR) { 562 assert(PIPE_CLEAR_COLOR0 == (1 << 2)); 563 for (i = 0; i < setup->fb.nr_cbufs; i++) { 564 if ((flags & (1 << (2 + i))) && setup->fb.cbufs[i]) { 565 if (!lp_setup_try_clear_color_buffer(setup, color, i)) { 566 lp_setup_flush(setup, NULL, __FUNCTION__); 567 568 if (!lp_setup_try_clear_color_buffer(setup, color, i)) 569 assert(0); 570 } 571 } 572 } 573 } 574} 575 576 577 578void 579lp_setup_set_triangle_state( struct lp_setup_context *setup, 580 unsigned cull_mode, 581 boolean ccw_is_frontface, 582 boolean scissor, 583 boolean half_pixel_center, 584 boolean bottom_edge_rule) 585{ 586 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 587 588 setup->ccw_is_frontface = ccw_is_frontface; 589 setup->cullmode = cull_mode; 590 setup->triangle = first_triangle; 591 setup->pixel_offset = half_pixel_center ? 0.5f : 0.0f; 592 setup->bottom_edge_rule = bottom_edge_rule; 593 594 if (setup->scissor_test != scissor) { 595 setup->dirty |= LP_SETUP_NEW_SCISSOR; 596 setup->scissor_test = scissor; 597 } 598} 599 600void 601lp_setup_set_line_state( struct lp_setup_context *setup, 602 float line_width) 603{ 604 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 605 606 setup->line_width = line_width; 607} 608 609void 610lp_setup_set_point_state( struct lp_setup_context *setup, 611 float point_size, 612 boolean point_size_per_vertex, 613 uint sprite_coord_enable, 614 uint sprite_coord_origin) 615{ 616 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 617 618 setup->point_size = point_size; 619 setup->sprite_coord_enable = sprite_coord_enable; 620 setup->sprite_coord_origin = sprite_coord_origin; 621 setup->point_size_per_vertex = point_size_per_vertex; 622} 623 624void 625lp_setup_set_setup_variant( struct lp_setup_context *setup, 626 const struct lp_setup_variant *variant) 627{ 628 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 629 630 setup->setup.variant = variant; 631} 632 633void 634lp_setup_set_fs_variant( struct lp_setup_context *setup, 635 struct lp_fragment_shader_variant *variant) 636{ 637 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, 638 variant); 639 /* FIXME: reference count */ 640 641 setup->fs.current.variant = variant; 642 setup->dirty |= LP_SETUP_NEW_FS; 643} 644 645void 646lp_setup_set_fs_constants(struct lp_setup_context *setup, 647 unsigned num, 648 struct pipe_constant_buffer *buffers) 649{ 650 unsigned i; 651 652 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers); 653 654 assert(num <= ARRAY_SIZE(setup->constants)); 655 656 for (i = 0; i < num; ++i) { 657 util_copy_constant_buffer(&setup->constants[i].current, &buffers[i]); 658 } 659 for (; i < ARRAY_SIZE(setup->constants); i++) { 660 util_copy_constant_buffer(&setup->constants[i].current, NULL); 661 } 662 setup->dirty |= LP_SETUP_NEW_CONSTANTS; 663} 664 665 666void 667lp_setup_set_alpha_ref_value( struct lp_setup_context *setup, 668 float alpha_ref_value ) 669{ 670 LP_DBG(DEBUG_SETUP, "%s %f\n", __FUNCTION__, alpha_ref_value); 671 672 if(setup->fs.current.jit_context.alpha_ref_value != alpha_ref_value) { 673 setup->fs.current.jit_context.alpha_ref_value = alpha_ref_value; 674 setup->dirty |= LP_SETUP_NEW_FS; 675 } 676} 677 678void 679lp_setup_set_stencil_ref_values( struct lp_setup_context *setup, 680 const ubyte refs[2] ) 681{ 682 LP_DBG(DEBUG_SETUP, "%s %d %d\n", __FUNCTION__, refs[0], refs[1]); 683 684 if (setup->fs.current.jit_context.stencil_ref_front != refs[0] || 685 setup->fs.current.jit_context.stencil_ref_back != refs[1]) { 686 setup->fs.current.jit_context.stencil_ref_front = refs[0]; 687 setup->fs.current.jit_context.stencil_ref_back = refs[1]; 688 setup->dirty |= LP_SETUP_NEW_FS; 689 } 690} 691 692void 693lp_setup_set_blend_color( struct lp_setup_context *setup, 694 const struct pipe_blend_color *blend_color ) 695{ 696 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 697 698 assert(blend_color); 699 700 if(memcmp(&setup->blend_color.current, blend_color, sizeof *blend_color) != 0) { 701 memcpy(&setup->blend_color.current, blend_color, sizeof *blend_color); 702 setup->dirty |= LP_SETUP_NEW_BLEND_COLOR; 703 } 704} 705 706 707void 708lp_setup_set_scissors( struct lp_setup_context *setup, 709 const struct pipe_scissor_state *scissors ) 710{ 711 unsigned i; 712 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 713 714 assert(scissors); 715 716 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) { 717 setup->scissors[i].x0 = scissors[i].minx; 718 setup->scissors[i].x1 = scissors[i].maxx-1; 719 setup->scissors[i].y0 = scissors[i].miny; 720 setup->scissors[i].y1 = scissors[i].maxy-1; 721 } 722 setup->dirty |= LP_SETUP_NEW_SCISSOR; 723} 724 725 726void 727lp_setup_set_flatshade_first(struct lp_setup_context *setup, 728 boolean flatshade_first) 729{ 730 setup->flatshade_first = flatshade_first; 731} 732 733void 734lp_setup_set_rasterizer_discard(struct lp_setup_context *setup, 735 boolean rasterizer_discard) 736{ 737 if (setup->rasterizer_discard != rasterizer_discard) { 738 setup->rasterizer_discard = rasterizer_discard; 739 setup->line = first_line; 740 setup->point = first_point; 741 setup->triangle = first_triangle; 742 } 743} 744 745void 746lp_setup_set_vertex_info(struct lp_setup_context *setup, 747 struct vertex_info *vertex_info) 748{ 749 /* XXX: just silently holding onto the pointer: 750 */ 751 setup->vertex_info = vertex_info; 752} 753 754 755/** 756 * Called during state validation when LP_NEW_VIEWPORT is set. 757 */ 758void 759lp_setup_set_viewports(struct lp_setup_context *setup, 760 unsigned num_viewports, 761 const struct pipe_viewport_state *viewports) 762{ 763 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe); 764 unsigned i; 765 766 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 767 768 assert(num_viewports <= PIPE_MAX_VIEWPORTS); 769 assert(viewports); 770 771 /* 772 * For use in lp_state_fs.c, propagate the viewport values for all viewports. 773 */ 774 for (i = 0; i < num_viewports; i++) { 775 float min_depth; 776 float max_depth; 777 util_viewport_zmin_zmax(&viewports[i], lp->rasterizer->clip_halfz, 778 &min_depth, &max_depth); 779 780 if (setup->viewports[i].min_depth != min_depth || 781 setup->viewports[i].max_depth != max_depth) { 782 setup->viewports[i].min_depth = min_depth; 783 setup->viewports[i].max_depth = max_depth; 784 setup->dirty |= LP_SETUP_NEW_VIEWPORTS; 785 } 786 } 787} 788 789 790/** 791 * Called during state validation when LP_NEW_SAMPLER_VIEW is set. 792 */ 793void 794lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup, 795 unsigned num, 796 struct pipe_sampler_view **views) 797{ 798 unsigned i, max_tex_num; 799 800 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 801 802 assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS); 803 804 max_tex_num = MAX2(num, setup->fs.current_tex_num); 805 806 for (i = 0; i < max_tex_num; i++) { 807 struct pipe_sampler_view *view = i < num ? views[i] : NULL; 808 809 if (view) { 810 struct pipe_resource *res = view->texture; 811 struct llvmpipe_resource *lp_tex = llvmpipe_resource(res); 812 struct lp_jit_texture *jit_tex; 813 jit_tex = &setup->fs.current.jit_context.textures[i]; 814 815 /* We're referencing the texture's internal data, so save a 816 * reference to it. 817 */ 818 pipe_resource_reference(&setup->fs.current_tex[i], res); 819 820 if (!lp_tex->dt) { 821 /* regular texture - setup array of mipmap level offsets */ 822 int j; 823 unsigned first_level = 0; 824 unsigned last_level = 0; 825 826 if (llvmpipe_resource_is_texture(res)) { 827 first_level = view->u.tex.first_level; 828 last_level = view->u.tex.last_level; 829 assert(first_level <= last_level); 830 assert(last_level <= res->last_level); 831 jit_tex->base = lp_tex->tex_data; 832 } 833 else { 834 jit_tex->base = lp_tex->data; 835 } 836 837 if (LP_PERF & PERF_TEX_MEM) { 838 /* use dummy tile memory */ 839 jit_tex->base = lp_dummy_tile; 840 jit_tex->width = TILE_SIZE/8; 841 jit_tex->height = TILE_SIZE/8; 842 jit_tex->depth = 1; 843 jit_tex->first_level = 0; 844 jit_tex->last_level = 0; 845 jit_tex->mip_offsets[0] = 0; 846 jit_tex->row_stride[0] = 0; 847 jit_tex->img_stride[0] = 0; 848 } 849 else { 850 jit_tex->width = res->width0; 851 jit_tex->height = res->height0; 852 jit_tex->depth = res->depth0; 853 jit_tex->first_level = first_level; 854 jit_tex->last_level = last_level; 855 856 if (llvmpipe_resource_is_texture(res)) { 857 for (j = first_level; j <= last_level; j++) { 858 jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j]; 859 jit_tex->row_stride[j] = lp_tex->row_stride[j]; 860 jit_tex->img_stride[j] = lp_tex->img_stride[j]; 861 } 862 863 if (res->target == PIPE_TEXTURE_1D_ARRAY || 864 res->target == PIPE_TEXTURE_2D_ARRAY || 865 res->target == PIPE_TEXTURE_CUBE || 866 res->target == PIPE_TEXTURE_CUBE_ARRAY) { 867 /* 868 * For array textures, we don't have first_layer, instead 869 * adjust last_layer (stored as depth) plus the mip level offsets 870 * (as we have mip-first layout can't just adjust base ptr). 871 * XXX For mip levels, could do something similar. 872 */ 873 jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1; 874 for (j = first_level; j <= last_level; j++) { 875 jit_tex->mip_offsets[j] += view->u.tex.first_layer * 876 lp_tex->img_stride[j]; 877 } 878 if (view->target == PIPE_TEXTURE_CUBE || 879 view->target == PIPE_TEXTURE_CUBE_ARRAY) { 880 assert(jit_tex->depth % 6 == 0); 881 } 882 assert(view->u.tex.first_layer <= view->u.tex.last_layer); 883 assert(view->u.tex.last_layer < res->array_size); 884 } 885 } 886 else { 887 /* 888 * For buffers, we don't have "offset", instead adjust 889 * the size (stored as width) plus the base pointer. 890 */ 891 unsigned view_blocksize = util_format_get_blocksize(view->format); 892 /* probably don't really need to fill that out */ 893 jit_tex->mip_offsets[0] = 0; 894 jit_tex->row_stride[0] = 0; 895 jit_tex->img_stride[0] = 0; 896 897 /* everything specified in number of elements here. */ 898 jit_tex->width = view->u.buf.size / view_blocksize; 899 jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.offset; 900 /* XXX Unsure if we need to sanitize parameters? */ 901 assert(view->u.buf.offset + view->u.buf.size <= res->width0); 902 } 903 } 904 } 905 else { 906 /* display target texture/surface */ 907 /* 908 * XXX: Where should this be unmapped? 909 */ 910 struct llvmpipe_screen *screen = llvmpipe_screen(res->screen); 911 struct sw_winsys *winsys = screen->winsys; 912 jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt, 913 PIPE_TRANSFER_READ); 914 jit_tex->row_stride[0] = lp_tex->row_stride[0]; 915 jit_tex->img_stride[0] = lp_tex->img_stride[0]; 916 jit_tex->mip_offsets[0] = 0; 917 jit_tex->width = res->width0; 918 jit_tex->height = res->height0; 919 jit_tex->depth = res->depth0; 920 jit_tex->first_level = jit_tex->last_level = 0; 921 assert(jit_tex->base); 922 } 923 } 924 else { 925 pipe_resource_reference(&setup->fs.current_tex[i], NULL); 926 } 927 } 928 setup->fs.current_tex_num = num; 929 930 setup->dirty |= LP_SETUP_NEW_FS; 931} 932 933 934/** 935 * Called during state validation when LP_NEW_SAMPLER is set. 936 */ 937void 938lp_setup_set_fragment_sampler_state(struct lp_setup_context *setup, 939 unsigned num, 940 struct pipe_sampler_state **samplers) 941{ 942 unsigned i; 943 944 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 945 946 assert(num <= PIPE_MAX_SAMPLERS); 947 948 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) { 949 const struct pipe_sampler_state *sampler = i < num ? samplers[i] : NULL; 950 951 if (sampler) { 952 struct lp_jit_sampler *jit_sam; 953 jit_sam = &setup->fs.current.jit_context.samplers[i]; 954 955 jit_sam->min_lod = sampler->min_lod; 956 jit_sam->max_lod = sampler->max_lod; 957 jit_sam->lod_bias = sampler->lod_bias; 958 COPY_4V(jit_sam->border_color, sampler->border_color.f); 959 } 960 } 961 962 setup->dirty |= LP_SETUP_NEW_FS; 963} 964 965 966/** 967 * Is the given texture referenced by any scene? 968 * Note: we have to check all scenes including any scenes currently 969 * being rendered and the current scene being built. 970 */ 971unsigned 972lp_setup_is_resource_referenced( const struct lp_setup_context *setup, 973 const struct pipe_resource *texture ) 974{ 975 unsigned i; 976 977 /* check the render targets */ 978 for (i = 0; i < setup->fb.nr_cbufs; i++) { 979 if (setup->fb.cbufs[i] && setup->fb.cbufs[i]->texture == texture) 980 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE; 981 } 982 if (setup->fb.zsbuf && setup->fb.zsbuf->texture == texture) { 983 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE; 984 } 985 986 /* check textures referenced by the scene */ 987 for (i = 0; i < ARRAY_SIZE(setup->scenes); i++) { 988 if (lp_scene_is_resource_referenced(setup->scenes[i], texture)) { 989 return LP_REFERENCED_FOR_READ; 990 } 991 } 992 993 return LP_UNREFERENCED; 994} 995 996 997/** 998 * Called by vbuf code when we're about to draw something. 999 * 1000 * This function stores all dirty state in the current scene's display list 1001 * memory, via lp_scene_alloc(). We can not pass pointers of mutable state to 1002 * the JIT functions, as the JIT functions will be called later on, most likely 1003 * on a different thread. 1004 * 1005 * When processing dirty state it is imperative that we don't refer to any 1006 * pointers previously allocated with lp_scene_alloc() in this function (or any 1007 * function) as they may belong to a scene freed since then. 1008 */ 1009static boolean 1010try_update_scene_state( struct lp_setup_context *setup ) 1011{ 1012 static const float fake_const_buf[4]; 1013 boolean new_scene = (setup->fs.stored == NULL); 1014 struct lp_scene *scene = setup->scene; 1015 unsigned i; 1016 1017 assert(scene); 1018 1019 if (setup->dirty & LP_SETUP_NEW_VIEWPORTS) { 1020 /* 1021 * Record new depth range state for changes due to viewport updates. 1022 * 1023 * TODO: Collapse the existing viewport and depth range information 1024 * into one structure, for access by JIT. 1025 */ 1026 struct lp_jit_viewport *stored; 1027 1028 stored = (struct lp_jit_viewport *) 1029 lp_scene_alloc(scene, sizeof setup->viewports); 1030 1031 if (!stored) { 1032 assert(!new_scene); 1033 return FALSE; 1034 } 1035 1036 memcpy(stored, setup->viewports, sizeof setup->viewports); 1037 1038 setup->fs.current.jit_context.viewports = stored; 1039 setup->dirty |= LP_SETUP_NEW_FS; 1040 } 1041 1042 if(setup->dirty & LP_SETUP_NEW_BLEND_COLOR) { 1043 uint8_t *stored; 1044 float* fstored; 1045 unsigned i, j; 1046 unsigned size; 1047 1048 /* Alloc u8_blend_color (16 x i8) and f_blend_color (4 or 8 x f32) */ 1049 size = 4 * 16 * sizeof(uint8_t); 1050 size += (LP_MAX_VECTOR_LENGTH / 4) * sizeof(float); 1051 stored = lp_scene_alloc_aligned(scene, size, LP_MIN_VECTOR_ALIGN); 1052 1053 if (!stored) { 1054 assert(!new_scene); 1055 return FALSE; 1056 } 1057 1058 /* Store floating point colour */ 1059 fstored = (float*)(stored + 4*16); 1060 for (i = 0; i < (LP_MAX_VECTOR_LENGTH / 4); ++i) { 1061 fstored[i] = setup->blend_color.current.color[i % 4]; 1062 } 1063 1064 /* smear each blend color component across 16 ubyte elements */ 1065 for (i = 0; i < 4; ++i) { 1066 uint8_t c = float_to_ubyte(setup->blend_color.current.color[i]); 1067 for (j = 0; j < 16; ++j) 1068 stored[i*16 + j] = c; 1069 } 1070 1071 setup->blend_color.stored = stored; 1072 setup->fs.current.jit_context.u8_blend_color = stored; 1073 setup->fs.current.jit_context.f_blend_color = fstored; 1074 setup->dirty |= LP_SETUP_NEW_FS; 1075 } 1076 1077 if (setup->dirty & LP_SETUP_NEW_CONSTANTS) { 1078 for (i = 0; i < ARRAY_SIZE(setup->constants); ++i) { 1079 struct pipe_resource *buffer = setup->constants[i].current.buffer; 1080 const unsigned current_size = MIN2(setup->constants[i].current.buffer_size, 1081 LP_MAX_TGSI_CONST_BUFFER_SIZE); 1082 const ubyte *current_data = NULL; 1083 int num_constants; 1084 1085 STATIC_ASSERT(DATA_BLOCK_SIZE >= LP_MAX_TGSI_CONST_BUFFER_SIZE); 1086 1087 if (buffer) { 1088 /* resource buffer */ 1089 current_data = (ubyte *) llvmpipe_resource_data(buffer); 1090 } 1091 else if (setup->constants[i].current.user_buffer) { 1092 /* user-space buffer */ 1093 current_data = (ubyte *) setup->constants[i].current.user_buffer; 1094 } 1095 1096 if (current_data) { 1097 current_data += setup->constants[i].current.buffer_offset; 1098 1099 /* TODO: copy only the actually used constants? */ 1100 1101 if (setup->constants[i].stored_size != current_size || 1102 !setup->constants[i].stored_data || 1103 memcmp(setup->constants[i].stored_data, 1104 current_data, 1105 current_size) != 0) { 1106 void *stored; 1107 1108 stored = lp_scene_alloc(scene, current_size); 1109 if (!stored) { 1110 assert(!new_scene); 1111 return FALSE; 1112 } 1113 1114 memcpy(stored, 1115 current_data, 1116 current_size); 1117 setup->constants[i].stored_size = current_size; 1118 setup->constants[i].stored_data = stored; 1119 } 1120 setup->fs.current.jit_context.constants[i] = 1121 setup->constants[i].stored_data; 1122 } 1123 else { 1124 setup->constants[i].stored_size = 0; 1125 setup->constants[i].stored_data = NULL; 1126 setup->fs.current.jit_context.constants[i] = fake_const_buf; 1127 } 1128 1129 num_constants = 1130 setup->constants[i].stored_size / (sizeof(float) * 4); 1131 setup->fs.current.jit_context.num_constants[i] = num_constants; 1132 setup->dirty |= LP_SETUP_NEW_FS; 1133 } 1134 } 1135 1136 1137 if (setup->dirty & LP_SETUP_NEW_FS) { 1138 if (!setup->fs.stored || 1139 memcmp(setup->fs.stored, 1140 &setup->fs.current, 1141 sizeof setup->fs.current) != 0) 1142 { 1143 struct lp_rast_state *stored; 1144 1145 /* The fs state that's been stored in the scene is different from 1146 * the new, current state. So allocate a new lp_rast_state object 1147 * and append it to the bin's setup data buffer. 1148 */ 1149 stored = (struct lp_rast_state *) lp_scene_alloc(scene, sizeof *stored); 1150 if (!stored) { 1151 assert(!new_scene); 1152 return FALSE; 1153 } 1154 1155 memcpy(stored, 1156 &setup->fs.current, 1157 sizeof setup->fs.current); 1158 setup->fs.stored = stored; 1159 1160 /* The scene now references the textures in the rasterization 1161 * state record. Note that now. 1162 */ 1163 for (i = 0; i < ARRAY_SIZE(setup->fs.current_tex); i++) { 1164 if (setup->fs.current_tex[i]) { 1165 if (!lp_scene_add_resource_reference(scene, 1166 setup->fs.current_tex[i], 1167 new_scene)) { 1168 assert(!new_scene); 1169 return FALSE; 1170 } 1171 } 1172 } 1173 } 1174 } 1175 1176 if (setup->dirty & LP_SETUP_NEW_SCISSOR) { 1177 unsigned i; 1178 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) { 1179 setup->draw_regions[i] = setup->framebuffer; 1180 if (setup->scissor_test) { 1181 u_rect_possible_intersection(&setup->scissors[i], 1182 &setup->draw_regions[i]); 1183 } 1184 } 1185 } 1186 1187 setup->dirty = 0; 1188 1189 assert(setup->fs.stored); 1190 return TRUE; 1191} 1192 1193boolean 1194lp_setup_update_state( struct lp_setup_context *setup, 1195 boolean update_scene ) 1196{ 1197 /* Some of the 'draw' pipeline stages may have changed some driver state. 1198 * Make sure we've processed those state changes before anything else. 1199 * 1200 * XXX this is the only place where llvmpipe_context is used in the 1201 * setup code. This may get refactored/changed... 1202 */ 1203 { 1204 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe); 1205 if (lp->dirty) { 1206 llvmpipe_update_derived(lp); 1207 } 1208 1209 if (lp->setup->dirty) { 1210 llvmpipe_update_setup(lp); 1211 } 1212 1213 assert(setup->setup.variant); 1214 1215 /* Will probably need to move this somewhere else, just need 1216 * to know about vertex shader point size attribute. 1217 */ 1218 setup->psize_slot = lp->psize_slot; 1219 setup->viewport_index_slot = lp->viewport_index_slot; 1220 setup->layer_slot = lp->layer_slot; 1221 setup->face_slot = lp->face_slot; 1222 1223 assert(lp->dirty == 0); 1224 1225 assert(lp->setup_variant.key.size == 1226 setup->setup.variant->key.size); 1227 1228 assert(memcmp(&lp->setup_variant.key, 1229 &setup->setup.variant->key, 1230 setup->setup.variant->key.size) == 0); 1231 } 1232 1233 if (update_scene && setup->state != SETUP_ACTIVE) { 1234 if (!set_scene_state( setup, SETUP_ACTIVE, __FUNCTION__ )) 1235 return FALSE; 1236 } 1237 1238 /* Only call into update_scene_state() if we already have a 1239 * scene: 1240 */ 1241 if (update_scene && setup->scene) { 1242 assert(setup->state == SETUP_ACTIVE); 1243 1244 if (try_update_scene_state(setup)) 1245 return TRUE; 1246 1247 /* Update failed, try to restart the scene. 1248 * 1249 * Cannot call lp_setup_flush_and_restart() directly here 1250 * because of potential recursion. 1251 */ 1252 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__)) 1253 return FALSE; 1254 1255 if (!set_scene_state(setup, SETUP_ACTIVE, __FUNCTION__)) 1256 return FALSE; 1257 1258 if (!setup->scene) 1259 return FALSE; 1260 1261 return try_update_scene_state(setup); 1262 } 1263 1264 return TRUE; 1265} 1266 1267 1268 1269/* Only caller is lp_setup_vbuf_destroy() 1270 */ 1271void 1272lp_setup_destroy( struct lp_setup_context *setup ) 1273{ 1274 uint i; 1275 1276 lp_setup_reset( setup ); 1277 1278 util_unreference_framebuffer_state(&setup->fb); 1279 1280 for (i = 0; i < ARRAY_SIZE(setup->fs.current_tex); i++) { 1281 pipe_resource_reference(&setup->fs.current_tex[i], NULL); 1282 } 1283 1284 for (i = 0; i < ARRAY_SIZE(setup->constants); i++) { 1285 pipe_resource_reference(&setup->constants[i].current.buffer, NULL); 1286 } 1287 1288 /* free the scenes in the 'empty' queue */ 1289 for (i = 0; i < ARRAY_SIZE(setup->scenes); i++) { 1290 struct lp_scene *scene = setup->scenes[i]; 1291 1292 if (scene->fence) 1293 lp_fence_wait(scene->fence); 1294 1295 lp_scene_destroy(scene); 1296 } 1297 1298 lp_fence_reference(&setup->last_fence, NULL); 1299 1300 FREE( setup ); 1301} 1302 1303 1304/** 1305 * Create a new primitive tiling engine. Plug it into the backend of 1306 * the draw module. Currently also creates a rasterizer to use with 1307 * it. 1308 */ 1309struct lp_setup_context * 1310lp_setup_create( struct pipe_context *pipe, 1311 struct draw_context *draw ) 1312{ 1313 struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen); 1314 struct lp_setup_context *setup; 1315 unsigned i; 1316 1317 setup = CALLOC_STRUCT(lp_setup_context); 1318 if (!setup) { 1319 goto no_setup; 1320 } 1321 1322 lp_setup_init_vbuf(setup); 1323 1324 /* Used only in update_state(): 1325 */ 1326 setup->pipe = pipe; 1327 1328 1329 setup->num_threads = screen->num_threads; 1330 setup->vbuf = draw_vbuf_stage(draw, &setup->base); 1331 if (!setup->vbuf) { 1332 goto no_vbuf; 1333 } 1334 1335 draw_set_rasterize_stage(draw, setup->vbuf); 1336 draw_set_render(draw, &setup->base); 1337 1338 /* create some empty scenes */ 1339 for (i = 0; i < MAX_SCENES; i++) { 1340 setup->scenes[i] = lp_scene_create( pipe ); 1341 if (!setup->scenes[i]) { 1342 goto no_scenes; 1343 } 1344 } 1345 1346 setup->triangle = first_triangle; 1347 setup->line = first_line; 1348 setup->point = first_point; 1349 1350 setup->dirty = ~0; 1351 1352 /* Initialize empty default fb correctly, so the rect is empty */ 1353 setup->framebuffer.x1 = -1; 1354 setup->framebuffer.y1 = -1; 1355 1356 return setup; 1357 1358no_scenes: 1359 for (i = 0; i < MAX_SCENES; i++) { 1360 if (setup->scenes[i]) { 1361 lp_scene_destroy(setup->scenes[i]); 1362 } 1363 } 1364 1365 setup->vbuf->destroy(setup->vbuf); 1366no_vbuf: 1367 FREE(setup); 1368no_setup: 1369 return NULL; 1370} 1371 1372 1373/** 1374 * Put a BeginQuery command into all bins. 1375 */ 1376void 1377lp_setup_begin_query(struct lp_setup_context *setup, 1378 struct llvmpipe_query *pq) 1379{ 1380 1381 set_scene_state(setup, SETUP_ACTIVE, "begin_query"); 1382 1383 if (!(pq->type == PIPE_QUERY_OCCLUSION_COUNTER || 1384 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE || 1385 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE || 1386 pq->type == PIPE_QUERY_PIPELINE_STATISTICS)) 1387 return; 1388 1389 /* init the query to its beginning state */ 1390 assert(setup->active_binned_queries < LP_MAX_ACTIVE_BINNED_QUERIES); 1391 /* exceeding list size so just ignore the query */ 1392 if (setup->active_binned_queries >= LP_MAX_ACTIVE_BINNED_QUERIES) { 1393 return; 1394 } 1395 assert(setup->active_queries[setup->active_binned_queries] == NULL); 1396 setup->active_queries[setup->active_binned_queries] = pq; 1397 setup->active_binned_queries++; 1398 1399 assert(setup->scene); 1400 if (setup->scene) { 1401 if (!lp_scene_bin_everywhere(setup->scene, 1402 LP_RAST_OP_BEGIN_QUERY, 1403 lp_rast_arg_query(pq))) { 1404 1405 if (!lp_setup_flush_and_restart(setup)) 1406 return; 1407 1408 if (!lp_scene_bin_everywhere(setup->scene, 1409 LP_RAST_OP_BEGIN_QUERY, 1410 lp_rast_arg_query(pq))) { 1411 return; 1412 } 1413 } 1414 setup->scene->had_queries |= TRUE; 1415 } 1416} 1417 1418 1419/** 1420 * Put an EndQuery command into all bins. 1421 */ 1422void 1423lp_setup_end_query(struct lp_setup_context *setup, struct llvmpipe_query *pq) 1424{ 1425 set_scene_state(setup, SETUP_ACTIVE, "end_query"); 1426 1427 assert(setup->scene); 1428 if (setup->scene) { 1429 /* pq->fence should be the fence of the *last* scene which 1430 * contributed to the query result. 1431 */ 1432 lp_fence_reference(&pq->fence, setup->scene->fence); 1433 1434 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER || 1435 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE || 1436 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE || 1437 pq->type == PIPE_QUERY_PIPELINE_STATISTICS || 1438 pq->type == PIPE_QUERY_TIMESTAMP) { 1439 if (pq->type == PIPE_QUERY_TIMESTAMP && 1440 !(setup->scene->tiles_x | setup->scene->tiles_y)) { 1441 /* 1442 * If there's a zero width/height framebuffer, there's no bins and 1443 * hence no rast task is ever run. So fill in something here instead. 1444 */ 1445 pq->end[0] = os_time_get_nano(); 1446 } 1447 1448 if (!lp_scene_bin_everywhere(setup->scene, 1449 LP_RAST_OP_END_QUERY, 1450 lp_rast_arg_query(pq))) { 1451 if (!lp_setup_flush_and_restart(setup)) 1452 goto fail; 1453 1454 if (!lp_scene_bin_everywhere(setup->scene, 1455 LP_RAST_OP_END_QUERY, 1456 lp_rast_arg_query(pq))) { 1457 goto fail; 1458 } 1459 } 1460 setup->scene->had_queries |= TRUE; 1461 } 1462 } 1463 else { 1464 lp_fence_reference(&pq->fence, setup->last_fence); 1465 } 1466 1467fail: 1468 /* Need to do this now not earlier since it still needs to be marked as 1469 * active when binning it would cause a flush. 1470 */ 1471 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER || 1472 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE || 1473 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE || 1474 pq->type == PIPE_QUERY_PIPELINE_STATISTICS) { 1475 unsigned i; 1476 1477 /* remove from active binned query list */ 1478 for (i = 0; i < setup->active_binned_queries; i++) { 1479 if (setup->active_queries[i] == pq) 1480 break; 1481 } 1482 assert(i < setup->active_binned_queries); 1483 if (i == setup->active_binned_queries) 1484 return; 1485 setup->active_binned_queries--; 1486 setup->active_queries[i] = setup->active_queries[setup->active_binned_queries]; 1487 setup->active_queries[setup->active_binned_queries] = NULL; 1488 } 1489} 1490 1491 1492boolean 1493lp_setup_flush_and_restart(struct lp_setup_context *setup) 1494{ 1495 if (0) debug_printf("%s\n", __FUNCTION__); 1496 1497 assert(setup->state == SETUP_ACTIVE); 1498 1499 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__)) 1500 return FALSE; 1501 1502 if (!lp_setup_update_state(setup, TRUE)) 1503 return FALSE; 1504 1505 return TRUE; 1506} 1507 1508 1509