1/************************************************************************** 2 * 3 * Copyright 2007 VMware, Inc. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28/** 29 * Tiling engine. 30 * 31 * Builds per-tile display lists and executes them on calls to 32 * lp_setup_flush(). 33 */ 34 35#include <limits.h> 36 37#include "pipe/p_defines.h" 38#include "util/u_framebuffer.h" 39#include "util/u_inlines.h" 40#include "util/u_memory.h" 41#include "util/u_pack_color.h" 42#include "util/u_viewport.h" 43#include "draw/draw_pipe.h" 44#include "util/os_time.h" 45#include "lp_context.h" 46#include "lp_memory.h" 47#include "lp_scene.h" 48#include "lp_texture.h" 49#include "lp_debug.h" 50#include "lp_fence.h" 51#include "lp_query.h" 52#include "lp_rast.h" 53#include "lp_setup_context.h" 54#include "lp_screen.h" 55#include "lp_state.h" 56#include "state_tracker/sw_winsys.h" 57 58#include "draw/draw_context.h" 59#include "draw/draw_vbuf.h" 60 61 62static boolean set_scene_state( struct lp_setup_context *, enum setup_state, 63 const char *reason); 64static boolean try_update_scene_state( struct lp_setup_context *setup ); 65 66 67static void 68lp_setup_get_empty_scene(struct lp_setup_context *setup) 69{ 70 assert(setup->scene == NULL); 71 72 setup->scene_idx++; 73 setup->scene_idx %= ARRAY_SIZE(setup->scenes); 74 75 setup->scene = setup->scenes[setup->scene_idx]; 76 77 if (setup->scene->fence) { 78 if (LP_DEBUG & DEBUG_SETUP) 79 debug_printf("%s: wait for scene %d\n", 80 __FUNCTION__, setup->scene->fence->id); 81 82 lp_fence_wait(setup->scene->fence); 83 } 84 85 lp_scene_begin_binning(setup->scene, &setup->fb); 86 87} 88 89 90static void 91first_triangle( struct lp_setup_context *setup, 92 const float (*v0)[4], 93 const float (*v1)[4], 94 const float (*v2)[4]) 95{ 96 assert(setup->state == SETUP_ACTIVE); 97 lp_setup_choose_triangle( setup ); 98 setup->triangle( setup, v0, v1, v2 ); 99} 100 101static void 102first_line( struct lp_setup_context *setup, 103 const float (*v0)[4], 104 const float (*v1)[4]) 105{ 106 assert(setup->state == SETUP_ACTIVE); 107 lp_setup_choose_line( setup ); 108 setup->line( setup, v0, v1 ); 109} 110 111static void 112first_point( struct lp_setup_context *setup, 113 const float (*v0)[4]) 114{ 115 assert(setup->state == SETUP_ACTIVE); 116 lp_setup_choose_point( setup ); 117 setup->point( setup, v0 ); 118} 119 120void lp_setup_reset( struct lp_setup_context *setup ) 121{ 122 unsigned i; 123 124 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 125 126 /* Reset derived state */ 127 for (i = 0; i < ARRAY_SIZE(setup->constants); ++i) { 128 setup->constants[i].stored_size = 0; 129 setup->constants[i].stored_data = NULL; 130 } 131 setup->fs.stored = NULL; 132 setup->dirty = ~0; 133 134 /* no current bin */ 135 setup->scene = NULL; 136 137 /* Reset some state: 138 */ 139 memset(&setup->clear, 0, sizeof setup->clear); 140 141 /* Have an explicit "start-binning" call and get rid of this 142 * pointer twiddling? 143 */ 144 setup->line = first_line; 145 setup->point = first_point; 146 setup->triangle = first_triangle; 147} 148 149 150/** Rasterize all scene's bins */ 151static void 152lp_setup_rasterize_scene( struct lp_setup_context *setup ) 153{ 154 struct lp_scene *scene = setup->scene; 155 struct llvmpipe_screen *screen = llvmpipe_screen(scene->pipe->screen); 156 157 scene->num_active_queries = setup->active_binned_queries; 158 memcpy(scene->active_queries, setup->active_queries, 159 scene->num_active_queries * sizeof(scene->active_queries[0])); 160 161 lp_scene_end_binning(scene); 162 163 lp_fence_reference(&setup->last_fence, scene->fence); 164 165 if (setup->last_fence) 166 setup->last_fence->issued = TRUE; 167 168 mtx_lock(&screen->rast_mutex); 169 170 /* FIXME: We enqueue the scene then wait on the rasterizer to finish. 171 * This means we never actually run any vertex stuff in parallel to 172 * rasterization (not in the same context at least) which is what the 173 * multiple scenes per setup is about - when we get a new empty scene 174 * any old one is already empty again because we waited here for 175 * raster tasks to be finished. Ideally, we shouldn't need to wait here 176 * and rely on fences elsewhere when waiting is necessary. 177 * Certainly, lp_scene_end_rasterization() would need to be deferred too 178 * and there's probably other bits why this doesn't actually work. 179 */ 180 lp_rast_queue_scene(screen->rast, scene); 181 lp_rast_finish(screen->rast); 182 mtx_unlock(&screen->rast_mutex); 183 184 lp_scene_end_rasterization(setup->scene); 185 lp_setup_reset( setup ); 186 187 LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__); 188} 189 190 191 192static boolean 193begin_binning( struct lp_setup_context *setup ) 194{ 195 struct lp_scene *scene = setup->scene; 196 boolean need_zsload = FALSE; 197 boolean ok; 198 199 assert(scene); 200 assert(scene->fence == NULL); 201 202 /* Always create a fence: 203 */ 204 scene->fence = lp_fence_create(MAX2(1, setup->num_threads)); 205 if (!scene->fence) 206 return FALSE; 207 208 ok = try_update_scene_state(setup); 209 if (!ok) 210 return FALSE; 211 212 if (setup->fb.zsbuf && 213 ((setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) && 214 util_format_is_depth_and_stencil(setup->fb.zsbuf->format)) 215 need_zsload = TRUE; 216 217 LP_DBG(DEBUG_SETUP, "%s color clear bufs: %x depth: %s\n", __FUNCTION__, 218 setup->clear.flags >> 2, 219 need_zsload ? "clear": "load"); 220 221 if (setup->clear.flags & PIPE_CLEAR_COLOR) { 222 unsigned cbuf; 223 for (cbuf = 0; cbuf < setup->fb.nr_cbufs; cbuf++) { 224 assert(PIPE_CLEAR_COLOR0 == 1 << 2); 225 if (setup->clear.flags & (1 << (2 + cbuf))) { 226 union lp_rast_cmd_arg clearrb_arg; 227 struct lp_rast_clear_rb *cc_scene = 228 (struct lp_rast_clear_rb *) 229 lp_scene_alloc(scene, sizeof(struct lp_rast_clear_rb)); 230 231 if (!cc_scene) { 232 return FALSE; 233 } 234 235 cc_scene->cbuf = cbuf; 236 cc_scene->color_val = setup->clear.color_val[cbuf]; 237 clearrb_arg.clear_rb = cc_scene; 238 239 if (!lp_scene_bin_everywhere(scene, 240 LP_RAST_OP_CLEAR_COLOR, 241 clearrb_arg)) 242 return FALSE; 243 } 244 } 245 } 246 247 if (setup->fb.zsbuf) { 248 if (setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) { 249 ok = lp_scene_bin_everywhere( scene, 250 LP_RAST_OP_CLEAR_ZSTENCIL, 251 lp_rast_arg_clearzs( 252 setup->clear.zsvalue, 253 setup->clear.zsmask)); 254 if (!ok) 255 return FALSE; 256 } 257 } 258 259 setup->clear.flags = 0; 260 setup->clear.zsmask = 0; 261 setup->clear.zsvalue = 0; 262 263 scene->had_queries = !!setup->active_binned_queries; 264 265 LP_DBG(DEBUG_SETUP, "%s done\n", __FUNCTION__); 266 return TRUE; 267} 268 269 270/* This basically bins and then flushes any outstanding full-screen 271 * clears. 272 * 273 * TODO: fast path for fullscreen clears and no triangles. 274 */ 275static boolean 276execute_clears( struct lp_setup_context *setup ) 277{ 278 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 279 280 return begin_binning( setup ); 281} 282 283const char *states[] = { 284 "FLUSHED", 285 "CLEARED", 286 "ACTIVE " 287}; 288 289 290static boolean 291set_scene_state( struct lp_setup_context *setup, 292 enum setup_state new_state, 293 const char *reason) 294{ 295 unsigned old_state = setup->state; 296 297 if (old_state == new_state) 298 return TRUE; 299 300 if (LP_DEBUG & DEBUG_SCENE) { 301 debug_printf("%s old %s new %s%s%s\n", 302 __FUNCTION__, 303 states[old_state], 304 states[new_state], 305 (new_state == SETUP_FLUSHED) ? ": " : "", 306 (new_state == SETUP_FLUSHED) ? reason : ""); 307 308 if (new_state == SETUP_FLUSHED && setup->scene) 309 lp_debug_draw_bins_by_cmd_length(setup->scene); 310 } 311 312 /* wait for a free/empty scene 313 */ 314 if (old_state == SETUP_FLUSHED) 315 lp_setup_get_empty_scene(setup); 316 317 switch (new_state) { 318 case SETUP_CLEARED: 319 break; 320 321 case SETUP_ACTIVE: 322 if (!begin_binning( setup )) 323 goto fail; 324 break; 325 326 case SETUP_FLUSHED: 327 if (old_state == SETUP_CLEARED) 328 if (!execute_clears( setup )) 329 goto fail; 330 331 lp_setup_rasterize_scene( setup ); 332 assert(setup->scene == NULL); 333 break; 334 335 default: 336 assert(0 && "invalid setup state mode"); 337 goto fail; 338 } 339 340 setup->state = new_state; 341 return TRUE; 342 343fail: 344 if (setup->scene) { 345 lp_scene_end_rasterization(setup->scene); 346 setup->scene = NULL; 347 } 348 349 setup->state = SETUP_FLUSHED; 350 lp_setup_reset( setup ); 351 return FALSE; 352} 353 354 355void 356lp_setup_flush( struct lp_setup_context *setup, 357 struct pipe_fence_handle **fence, 358 const char *reason) 359{ 360 set_scene_state( setup, SETUP_FLUSHED, reason ); 361 362 if (fence) { 363 lp_fence_reference((struct lp_fence **)fence, setup->last_fence); 364 if (!*fence) 365 *fence = (struct pipe_fence_handle *)lp_fence_create(0); 366 } 367} 368 369 370void 371lp_setup_bind_framebuffer( struct lp_setup_context *setup, 372 const struct pipe_framebuffer_state *fb ) 373{ 374 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 375 376 /* Flush any old scene. 377 */ 378 set_scene_state( setup, SETUP_FLUSHED, __FUNCTION__ ); 379 380 /* 381 * Ensure the old scene is not reused. 382 */ 383 assert(!setup->scene); 384 385 /* Set new state. This will be picked up later when we next need a 386 * scene. 387 */ 388 util_copy_framebuffer_state(&setup->fb, fb); 389 setup->framebuffer.x0 = 0; 390 setup->framebuffer.y0 = 0; 391 setup->framebuffer.x1 = fb->width-1; 392 setup->framebuffer.y1 = fb->height-1; 393 setup->dirty |= LP_SETUP_NEW_SCISSOR; 394} 395 396 397/* 398 * Try to clear one color buffer of the attached fb, either by binning a clear 399 * command or queuing up the clear for later (when binning is started). 400 */ 401static boolean 402lp_setup_try_clear_color_buffer(struct lp_setup_context *setup, 403 const union pipe_color_union *color, 404 unsigned cbuf) 405{ 406 union lp_rast_cmd_arg clearrb_arg; 407 union util_color uc; 408 enum pipe_format format = setup->fb.cbufs[cbuf]->format; 409 410 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state); 411 412 if (util_format_is_pure_integer(format)) { 413 /* 414 * We expect int/uint clear values here, though some APIs 415 * might disagree (but in any case util_pack_color() 416 * couldn't handle it)... 417 */ 418 if (util_format_is_pure_sint(format)) { 419 util_format_write_4i(format, color->i, 0, &uc, 0, 0, 0, 1, 1); 420 } 421 else { 422 assert(util_format_is_pure_uint(format)); 423 util_format_write_4ui(format, color->ui, 0, &uc, 0, 0, 0, 1, 1); 424 } 425 } 426 else { 427 util_pack_color(color->f, format, &uc); 428 } 429 430 if (setup->state == SETUP_ACTIVE) { 431 struct lp_scene *scene = setup->scene; 432 433 /* Add the clear to existing scene. In the unusual case where 434 * both color and depth-stencil are being cleared when there's 435 * already been some rendering, we could discard the currently 436 * binned scene and start again, but I don't see that as being 437 * a common usage. 438 */ 439 struct lp_rast_clear_rb *cc_scene = 440 (struct lp_rast_clear_rb *) 441 lp_scene_alloc_aligned(scene, sizeof(struct lp_rast_clear_rb), 8); 442 443 if (!cc_scene) { 444 return FALSE; 445 } 446 447 cc_scene->cbuf = cbuf; 448 cc_scene->color_val = uc; 449 clearrb_arg.clear_rb = cc_scene; 450 451 if (!lp_scene_bin_everywhere(scene, 452 LP_RAST_OP_CLEAR_COLOR, 453 clearrb_arg)) 454 return FALSE; 455 } 456 else { 457 /* Put ourselves into the 'pre-clear' state, specifically to try 458 * and accumulate multiple clears to color and depth_stencil 459 * buffers which the app or state-tracker might issue 460 * separately. 461 */ 462 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ ); 463 464 assert(PIPE_CLEAR_COLOR0 == (1 << 2)); 465 setup->clear.flags |= 1 << (cbuf + 2); 466 setup->clear.color_val[cbuf] = uc; 467 } 468 469 return TRUE; 470} 471 472static boolean 473lp_setup_try_clear_zs(struct lp_setup_context *setup, 474 double depth, 475 unsigned stencil, 476 unsigned flags) 477{ 478 uint64_t zsmask = 0; 479 uint64_t zsvalue = 0; 480 uint32_t zmask32; 481 uint8_t smask8; 482 enum pipe_format format = setup->fb.zsbuf->format; 483 484 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state); 485 486 zmask32 = (flags & PIPE_CLEAR_DEPTH) ? ~0 : 0; 487 smask8 = (flags & PIPE_CLEAR_STENCIL) ? ~0 : 0; 488 489 zsvalue = util_pack64_z_stencil(format, depth, stencil); 490 491 zsmask = util_pack64_mask_z_stencil(format, zmask32, smask8); 492 493 zsvalue &= zsmask; 494 495 if (format == PIPE_FORMAT_Z24X8_UNORM || 496 format == PIPE_FORMAT_X8Z24_UNORM) { 497 /* 498 * Make full mask if there's "X" bits so we can do full 499 * clear (without rmw). 500 */ 501 uint32_t zsmask_full = 0; 502 zsmask_full = util_pack_mask_z_stencil(format, ~0, ~0); 503 zsmask |= ~zsmask_full; 504 } 505 506 if (setup->state == SETUP_ACTIVE) { 507 struct lp_scene *scene = setup->scene; 508 509 /* Add the clear to existing scene. In the unusual case where 510 * both color and depth-stencil are being cleared when there's 511 * already been some rendering, we could discard the currently 512 * binned scene and start again, but I don't see that as being 513 * a common usage. 514 */ 515 if (!lp_scene_bin_everywhere(scene, 516 LP_RAST_OP_CLEAR_ZSTENCIL, 517 lp_rast_arg_clearzs(zsvalue, zsmask))) 518 return FALSE; 519 } 520 else { 521 /* Put ourselves into the 'pre-clear' state, specifically to try 522 * and accumulate multiple clears to color and depth_stencil 523 * buffers which the app or state-tracker might issue 524 * separately. 525 */ 526 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ ); 527 528 setup->clear.flags |= flags; 529 530 setup->clear.zsmask |= zsmask; 531 setup->clear.zsvalue = 532 (setup->clear.zsvalue & ~zsmask) | (zsvalue & zsmask); 533 } 534 535 return TRUE; 536} 537 538void 539lp_setup_clear( struct lp_setup_context *setup, 540 const union pipe_color_union *color, 541 double depth, 542 unsigned stencil, 543 unsigned flags ) 544{ 545 unsigned i; 546 547 /* 548 * Note any of these (max 9) clears could fail (but at most there should 549 * be just one failure!). This avoids doing the previous succeeded 550 * clears again (we still clear tiles twice if a clear command succeeded 551 * partially for one buffer). 552 */ 553 if (flags & PIPE_CLEAR_DEPTHSTENCIL) { 554 unsigned flagszs = flags & PIPE_CLEAR_DEPTHSTENCIL; 555 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs)) { 556 lp_setup_flush(setup, NULL, __FUNCTION__); 557 558 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs)) 559 assert(0); 560 } 561 } 562 563 if (flags & PIPE_CLEAR_COLOR) { 564 assert(PIPE_CLEAR_COLOR0 == (1 << 2)); 565 for (i = 0; i < setup->fb.nr_cbufs; i++) { 566 if ((flags & (1 << (2 + i))) && setup->fb.cbufs[i]) { 567 if (!lp_setup_try_clear_color_buffer(setup, color, i)) { 568 lp_setup_flush(setup, NULL, __FUNCTION__); 569 570 if (!lp_setup_try_clear_color_buffer(setup, color, i)) 571 assert(0); 572 } 573 } 574 } 575 } 576} 577 578 579 580void 581lp_setup_set_triangle_state( struct lp_setup_context *setup, 582 unsigned cull_mode, 583 boolean ccw_is_frontface, 584 boolean scissor, 585 boolean half_pixel_center, 586 boolean bottom_edge_rule) 587{ 588 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 589 590 setup->ccw_is_frontface = ccw_is_frontface; 591 setup->cullmode = cull_mode; 592 setup->triangle = first_triangle; 593 setup->pixel_offset = half_pixel_center ? 0.5f : 0.0f; 594 setup->bottom_edge_rule = bottom_edge_rule; 595 596 if (setup->scissor_test != scissor) { 597 setup->dirty |= LP_SETUP_NEW_SCISSOR; 598 setup->scissor_test = scissor; 599 } 600} 601 602void 603lp_setup_set_line_state( struct lp_setup_context *setup, 604 float line_width) 605{ 606 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 607 608 setup->line_width = line_width; 609} 610 611void 612lp_setup_set_point_state( struct lp_setup_context *setup, 613 float point_size, 614 boolean point_size_per_vertex, 615 uint sprite_coord_enable, 616 uint sprite_coord_origin) 617{ 618 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 619 620 setup->point_size = point_size; 621 setup->sprite_coord_enable = sprite_coord_enable; 622 setup->sprite_coord_origin = sprite_coord_origin; 623 setup->point_size_per_vertex = point_size_per_vertex; 624} 625 626void 627lp_setup_set_setup_variant( struct lp_setup_context *setup, 628 const struct lp_setup_variant *variant) 629{ 630 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 631 632 setup->setup.variant = variant; 633} 634 635void 636lp_setup_set_fs_variant( struct lp_setup_context *setup, 637 struct lp_fragment_shader_variant *variant) 638{ 639 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, 640 variant); 641 /* FIXME: reference count */ 642 643 setup->fs.current.variant = variant; 644 setup->dirty |= LP_SETUP_NEW_FS; 645} 646 647void 648lp_setup_set_fs_constants(struct lp_setup_context *setup, 649 unsigned num, 650 struct pipe_constant_buffer *buffers) 651{ 652 unsigned i; 653 654 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers); 655 656 assert(num <= ARRAY_SIZE(setup->constants)); 657 658 for (i = 0; i < num; ++i) { 659 util_copy_constant_buffer(&setup->constants[i].current, &buffers[i]); 660 } 661 for (; i < ARRAY_SIZE(setup->constants); i++) { 662 util_copy_constant_buffer(&setup->constants[i].current, NULL); 663 } 664 setup->dirty |= LP_SETUP_NEW_CONSTANTS; 665} 666 667 668void 669lp_setup_set_alpha_ref_value( struct lp_setup_context *setup, 670 float alpha_ref_value ) 671{ 672 LP_DBG(DEBUG_SETUP, "%s %f\n", __FUNCTION__, alpha_ref_value); 673 674 if(setup->fs.current.jit_context.alpha_ref_value != alpha_ref_value) { 675 setup->fs.current.jit_context.alpha_ref_value = alpha_ref_value; 676 setup->dirty |= LP_SETUP_NEW_FS; 677 } 678} 679 680void 681lp_setup_set_stencil_ref_values( struct lp_setup_context *setup, 682 const ubyte refs[2] ) 683{ 684 LP_DBG(DEBUG_SETUP, "%s %d %d\n", __FUNCTION__, refs[0], refs[1]); 685 686 if (setup->fs.current.jit_context.stencil_ref_front != refs[0] || 687 setup->fs.current.jit_context.stencil_ref_back != refs[1]) { 688 setup->fs.current.jit_context.stencil_ref_front = refs[0]; 689 setup->fs.current.jit_context.stencil_ref_back = refs[1]; 690 setup->dirty |= LP_SETUP_NEW_FS; 691 } 692} 693 694void 695lp_setup_set_blend_color( struct lp_setup_context *setup, 696 const struct pipe_blend_color *blend_color ) 697{ 698 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 699 700 assert(blend_color); 701 702 if(memcmp(&setup->blend_color.current, blend_color, sizeof *blend_color) != 0) { 703 memcpy(&setup->blend_color.current, blend_color, sizeof *blend_color); 704 setup->dirty |= LP_SETUP_NEW_BLEND_COLOR; 705 } 706} 707 708 709void 710lp_setup_set_scissors( struct lp_setup_context *setup, 711 const struct pipe_scissor_state *scissors ) 712{ 713 unsigned i; 714 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 715 716 assert(scissors); 717 718 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) { 719 setup->scissors[i].x0 = scissors[i].minx; 720 setup->scissors[i].x1 = scissors[i].maxx-1; 721 setup->scissors[i].y0 = scissors[i].miny; 722 setup->scissors[i].y1 = scissors[i].maxy-1; 723 } 724 setup->dirty |= LP_SETUP_NEW_SCISSOR; 725} 726 727 728void 729lp_setup_set_flatshade_first(struct lp_setup_context *setup, 730 boolean flatshade_first) 731{ 732 setup->flatshade_first = flatshade_first; 733} 734 735void 736lp_setup_set_rasterizer_discard(struct lp_setup_context *setup, 737 boolean rasterizer_discard) 738{ 739 if (setup->rasterizer_discard != rasterizer_discard) { 740 setup->rasterizer_discard = rasterizer_discard; 741 setup->line = first_line; 742 setup->point = first_point; 743 setup->triangle = first_triangle; 744 } 745} 746 747void 748lp_setup_set_vertex_info(struct lp_setup_context *setup, 749 struct vertex_info *vertex_info) 750{ 751 /* XXX: just silently holding onto the pointer: 752 */ 753 setup->vertex_info = vertex_info; 754} 755 756 757/** 758 * Called during state validation when LP_NEW_VIEWPORT is set. 759 */ 760void 761lp_setup_set_viewports(struct lp_setup_context *setup, 762 unsigned num_viewports, 763 const struct pipe_viewport_state *viewports) 764{ 765 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe); 766 unsigned i; 767 768 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 769 770 assert(num_viewports <= PIPE_MAX_VIEWPORTS); 771 assert(viewports); 772 773 /* 774 * For use in lp_state_fs.c, propagate the viewport values for all viewports. 775 */ 776 for (i = 0; i < num_viewports; i++) { 777 float min_depth; 778 float max_depth; 779 util_viewport_zmin_zmax(&viewports[i], lp->rasterizer->clip_halfz, 780 &min_depth, &max_depth); 781 782 if (setup->viewports[i].min_depth != min_depth || 783 setup->viewports[i].max_depth != max_depth) { 784 setup->viewports[i].min_depth = min_depth; 785 setup->viewports[i].max_depth = max_depth; 786 setup->dirty |= LP_SETUP_NEW_VIEWPORTS; 787 } 788 } 789} 790 791 792/** 793 * Called during state validation when LP_NEW_SAMPLER_VIEW is set. 794 */ 795void 796lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup, 797 unsigned num, 798 struct pipe_sampler_view **views) 799{ 800 unsigned i, max_tex_num; 801 802 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 803 804 assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS); 805 806 max_tex_num = MAX2(num, setup->fs.current_tex_num); 807 808 for (i = 0; i < max_tex_num; i++) { 809 struct pipe_sampler_view *view = i < num ? views[i] : NULL; 810 811 if (view) { 812 struct pipe_resource *res = view->texture; 813 struct llvmpipe_resource *lp_tex = llvmpipe_resource(res); 814 struct lp_jit_texture *jit_tex; 815 jit_tex = &setup->fs.current.jit_context.textures[i]; 816 817 /* We're referencing the texture's internal data, so save a 818 * reference to it. 819 */ 820 pipe_resource_reference(&setup->fs.current_tex[i], res); 821 822 if (!lp_tex->dt) { 823 /* regular texture - setup array of mipmap level offsets */ 824 int j; 825 unsigned first_level = 0; 826 unsigned last_level = 0; 827 828 if (llvmpipe_resource_is_texture(res)) { 829 first_level = view->u.tex.first_level; 830 last_level = view->u.tex.last_level; 831 assert(first_level <= last_level); 832 assert(last_level <= res->last_level); 833 jit_tex->base = lp_tex->tex_data; 834 } 835 else { 836 jit_tex->base = lp_tex->data; 837 } 838 839 if (LP_PERF & PERF_TEX_MEM) { 840 /* use dummy tile memory */ 841 jit_tex->base = lp_dummy_tile; 842 jit_tex->width = TILE_SIZE/8; 843 jit_tex->height = TILE_SIZE/8; 844 jit_tex->depth = 1; 845 jit_tex->first_level = 0; 846 jit_tex->last_level = 0; 847 jit_tex->mip_offsets[0] = 0; 848 jit_tex->row_stride[0] = 0; 849 jit_tex->img_stride[0] = 0; 850 } 851 else { 852 jit_tex->width = res->width0; 853 jit_tex->height = res->height0; 854 jit_tex->depth = res->depth0; 855 jit_tex->first_level = first_level; 856 jit_tex->last_level = last_level; 857 858 if (llvmpipe_resource_is_texture(res)) { 859 for (j = first_level; j <= last_level; j++) { 860 jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j]; 861 jit_tex->row_stride[j] = lp_tex->row_stride[j]; 862 jit_tex->img_stride[j] = lp_tex->img_stride[j]; 863 } 864 865 if (res->target == PIPE_TEXTURE_1D_ARRAY || 866 res->target == PIPE_TEXTURE_2D_ARRAY || 867 res->target == PIPE_TEXTURE_CUBE || 868 res->target == PIPE_TEXTURE_CUBE_ARRAY) { 869 /* 870 * For array textures, we don't have first_layer, instead 871 * adjust last_layer (stored as depth) plus the mip level offsets 872 * (as we have mip-first layout can't just adjust base ptr). 873 * XXX For mip levels, could do something similar. 874 */ 875 jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1; 876 for (j = first_level; j <= last_level; j++) { 877 jit_tex->mip_offsets[j] += view->u.tex.first_layer * 878 lp_tex->img_stride[j]; 879 } 880 if (view->target == PIPE_TEXTURE_CUBE || 881 view->target == PIPE_TEXTURE_CUBE_ARRAY) { 882 assert(jit_tex->depth % 6 == 0); 883 } 884 assert(view->u.tex.first_layer <= view->u.tex.last_layer); 885 assert(view->u.tex.last_layer < res->array_size); 886 } 887 } 888 else { 889 /* 890 * For buffers, we don't have "offset", instead adjust 891 * the size (stored as width) plus the base pointer. 892 */ 893 unsigned view_blocksize = util_format_get_blocksize(view->format); 894 /* probably don't really need to fill that out */ 895 jit_tex->mip_offsets[0] = 0; 896 jit_tex->row_stride[0] = 0; 897 jit_tex->img_stride[0] = 0; 898 899 /* everything specified in number of elements here. */ 900 jit_tex->width = view->u.buf.size / view_blocksize; 901 jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.offset; 902 /* XXX Unsure if we need to sanitize parameters? */ 903 assert(view->u.buf.offset + view->u.buf.size <= res->width0); 904 } 905 } 906 } 907 else { 908 /* display target texture/surface */ 909 /* 910 * XXX: Where should this be unmapped? 911 */ 912 struct llvmpipe_screen *screen = llvmpipe_screen(res->screen); 913 struct sw_winsys *winsys = screen->winsys; 914 jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt, 915 PIPE_TRANSFER_READ); 916 jit_tex->row_stride[0] = lp_tex->row_stride[0]; 917 jit_tex->img_stride[0] = lp_tex->img_stride[0]; 918 jit_tex->mip_offsets[0] = 0; 919 jit_tex->width = res->width0; 920 jit_tex->height = res->height0; 921 jit_tex->depth = res->depth0; 922 jit_tex->first_level = jit_tex->last_level = 0; 923 assert(jit_tex->base); 924 } 925 } 926 else { 927 pipe_resource_reference(&setup->fs.current_tex[i], NULL); 928 } 929 } 930 setup->fs.current_tex_num = num; 931 932 setup->dirty |= LP_SETUP_NEW_FS; 933} 934 935 936/** 937 * Called during state validation when LP_NEW_SAMPLER is set. 938 */ 939void 940lp_setup_set_fragment_sampler_state(struct lp_setup_context *setup, 941 unsigned num, 942 struct pipe_sampler_state **samplers) 943{ 944 unsigned i; 945 946 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); 947 948 assert(num <= PIPE_MAX_SAMPLERS); 949 950 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) { 951 const struct pipe_sampler_state *sampler = i < num ? samplers[i] : NULL; 952 953 if (sampler) { 954 struct lp_jit_sampler *jit_sam; 955 jit_sam = &setup->fs.current.jit_context.samplers[i]; 956 957 jit_sam->min_lod = sampler->min_lod; 958 jit_sam->max_lod = sampler->max_lod; 959 jit_sam->lod_bias = sampler->lod_bias; 960 COPY_4V(jit_sam->border_color, sampler->border_color.f); 961 } 962 } 963 964 setup->dirty |= LP_SETUP_NEW_FS; 965} 966 967 968/** 969 * Is the given texture referenced by any scene? 970 * Note: we have to check all scenes including any scenes currently 971 * being rendered and the current scene being built. 972 */ 973unsigned 974lp_setup_is_resource_referenced( const struct lp_setup_context *setup, 975 const struct pipe_resource *texture ) 976{ 977 unsigned i; 978 979 /* check the render targets */ 980 for (i = 0; i < setup->fb.nr_cbufs; i++) { 981 if (setup->fb.cbufs[i] && setup->fb.cbufs[i]->texture == texture) 982 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE; 983 } 984 if (setup->fb.zsbuf && setup->fb.zsbuf->texture == texture) { 985 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE; 986 } 987 988 /* check textures referenced by the scene */ 989 for (i = 0; i < ARRAY_SIZE(setup->scenes); i++) { 990 if (lp_scene_is_resource_referenced(setup->scenes[i], texture)) { 991 return LP_REFERENCED_FOR_READ; 992 } 993 } 994 995 return LP_UNREFERENCED; 996} 997 998 999/** 1000 * Called by vbuf code when we're about to draw something. 1001 * 1002 * This function stores all dirty state in the current scene's display list 1003 * memory, via lp_scene_alloc(). We can not pass pointers of mutable state to 1004 * the JIT functions, as the JIT functions will be called later on, most likely 1005 * on a different thread. 1006 * 1007 * When processing dirty state it is imperative that we don't refer to any 1008 * pointers previously allocated with lp_scene_alloc() in this function (or any 1009 * function) as they may belong to a scene freed since then. 1010 */ 1011static boolean 1012try_update_scene_state( struct lp_setup_context *setup ) 1013{ 1014 static const float fake_const_buf[4]; 1015 boolean new_scene = (setup->fs.stored == NULL); 1016 struct lp_scene *scene = setup->scene; 1017 unsigned i; 1018 1019 assert(scene); 1020 1021 if (setup->dirty & LP_SETUP_NEW_VIEWPORTS) { 1022 /* 1023 * Record new depth range state for changes due to viewport updates. 1024 * 1025 * TODO: Collapse the existing viewport and depth range information 1026 * into one structure, for access by JIT. 1027 */ 1028 struct lp_jit_viewport *stored; 1029 1030 stored = (struct lp_jit_viewport *) 1031 lp_scene_alloc(scene, sizeof setup->viewports); 1032 1033 if (!stored) { 1034 assert(!new_scene); 1035 return FALSE; 1036 } 1037 1038 memcpy(stored, setup->viewports, sizeof setup->viewports); 1039 1040 setup->fs.current.jit_context.viewports = stored; 1041 setup->dirty |= LP_SETUP_NEW_FS; 1042 } 1043 1044 if(setup->dirty & LP_SETUP_NEW_BLEND_COLOR) { 1045 uint8_t *stored; 1046 float* fstored; 1047 unsigned i, j; 1048 unsigned size; 1049 1050 /* Alloc u8_blend_color (16 x i8) and f_blend_color (4 or 8 x f32) */ 1051 size = 4 * 16 * sizeof(uint8_t); 1052 size += (LP_MAX_VECTOR_LENGTH / 4) * sizeof(float); 1053 stored = lp_scene_alloc_aligned(scene, size, LP_MIN_VECTOR_ALIGN); 1054 1055 if (!stored) { 1056 assert(!new_scene); 1057 return FALSE; 1058 } 1059 1060 /* Store floating point colour */ 1061 fstored = (float*)(stored + 4*16); 1062 for (i = 0; i < (LP_MAX_VECTOR_LENGTH / 4); ++i) { 1063 fstored[i] = setup->blend_color.current.color[i % 4]; 1064 } 1065 1066 /* smear each blend color component across 16 ubyte elements */ 1067 for (i = 0; i < 4; ++i) { 1068 uint8_t c = float_to_ubyte(setup->blend_color.current.color[i]); 1069 for (j = 0; j < 16; ++j) 1070 stored[i*16 + j] = c; 1071 } 1072 1073 setup->blend_color.stored = stored; 1074 setup->fs.current.jit_context.u8_blend_color = stored; 1075 setup->fs.current.jit_context.f_blend_color = fstored; 1076 setup->dirty |= LP_SETUP_NEW_FS; 1077 } 1078 1079 if (setup->dirty & LP_SETUP_NEW_CONSTANTS) { 1080 for (i = 0; i < ARRAY_SIZE(setup->constants); ++i) { 1081 struct pipe_resource *buffer = setup->constants[i].current.buffer; 1082 const unsigned current_size = MIN2(setup->constants[i].current.buffer_size, 1083 LP_MAX_TGSI_CONST_BUFFER_SIZE); 1084 const ubyte *current_data = NULL; 1085 int num_constants; 1086 1087 STATIC_ASSERT(DATA_BLOCK_SIZE >= LP_MAX_TGSI_CONST_BUFFER_SIZE); 1088 1089 if (buffer) { 1090 /* resource buffer */ 1091 current_data = (ubyte *) llvmpipe_resource_data(buffer); 1092 } 1093 else if (setup->constants[i].current.user_buffer) { 1094 /* user-space buffer */ 1095 current_data = (ubyte *) setup->constants[i].current.user_buffer; 1096 } 1097 1098 if (current_data) { 1099 current_data += setup->constants[i].current.buffer_offset; 1100 1101 /* TODO: copy only the actually used constants? */ 1102 1103 if (setup->constants[i].stored_size != current_size || 1104 !setup->constants[i].stored_data || 1105 memcmp(setup->constants[i].stored_data, 1106 current_data, 1107 current_size) != 0) { 1108 void *stored; 1109 1110 stored = lp_scene_alloc(scene, current_size); 1111 if (!stored) { 1112 assert(!new_scene); 1113 return FALSE; 1114 } 1115 1116 memcpy(stored, 1117 current_data, 1118 current_size); 1119 setup->constants[i].stored_size = current_size; 1120 setup->constants[i].stored_data = stored; 1121 } 1122 setup->fs.current.jit_context.constants[i] = 1123 setup->constants[i].stored_data; 1124 } 1125 else { 1126 setup->constants[i].stored_size = 0; 1127 setup->constants[i].stored_data = NULL; 1128 setup->fs.current.jit_context.constants[i] = fake_const_buf; 1129 } 1130 1131 num_constants = 1132 setup->constants[i].stored_size / (sizeof(float) * 4); 1133 setup->fs.current.jit_context.num_constants[i] = num_constants; 1134 setup->dirty |= LP_SETUP_NEW_FS; 1135 } 1136 } 1137 1138 1139 if (setup->dirty & LP_SETUP_NEW_FS) { 1140 if (!setup->fs.stored || 1141 memcmp(setup->fs.stored, 1142 &setup->fs.current, 1143 sizeof setup->fs.current) != 0) 1144 { 1145 struct lp_rast_state *stored; 1146 1147 /* The fs state that's been stored in the scene is different from 1148 * the new, current state. So allocate a new lp_rast_state object 1149 * and append it to the bin's setup data buffer. 1150 */ 1151 stored = (struct lp_rast_state *) lp_scene_alloc(scene, sizeof *stored); 1152 if (!stored) { 1153 assert(!new_scene); 1154 return FALSE; 1155 } 1156 1157 memcpy(stored, 1158 &setup->fs.current, 1159 sizeof setup->fs.current); 1160 setup->fs.stored = stored; 1161 1162 /* The scene now references the textures in the rasterization 1163 * state record. Note that now. 1164 */ 1165 for (i = 0; i < ARRAY_SIZE(setup->fs.current_tex); i++) { 1166 if (setup->fs.current_tex[i]) { 1167 if (!lp_scene_add_resource_reference(scene, 1168 setup->fs.current_tex[i], 1169 new_scene)) { 1170 assert(!new_scene); 1171 return FALSE; 1172 } 1173 } 1174 } 1175 } 1176 } 1177 1178 if (setup->dirty & LP_SETUP_NEW_SCISSOR) { 1179 unsigned i; 1180 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) { 1181 setup->draw_regions[i] = setup->framebuffer; 1182 if (setup->scissor_test) { 1183 u_rect_possible_intersection(&setup->scissors[i], 1184 &setup->draw_regions[i]); 1185 } 1186 } 1187 } 1188 1189 setup->dirty = 0; 1190 1191 assert(setup->fs.stored); 1192 return TRUE; 1193} 1194 1195boolean 1196lp_setup_update_state( struct lp_setup_context *setup, 1197 boolean update_scene ) 1198{ 1199 /* Some of the 'draw' pipeline stages may have changed some driver state. 1200 * Make sure we've processed those state changes before anything else. 1201 * 1202 * XXX this is the only place where llvmpipe_context is used in the 1203 * setup code. This may get refactored/changed... 1204 */ 1205 { 1206 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe); 1207 if (lp->dirty) { 1208 llvmpipe_update_derived(lp); 1209 } 1210 1211 if (lp->setup->dirty) { 1212 llvmpipe_update_setup(lp); 1213 } 1214 1215 assert(setup->setup.variant); 1216 1217 /* Will probably need to move this somewhere else, just need 1218 * to know about vertex shader point size attribute. 1219 */ 1220 setup->psize_slot = lp->psize_slot; 1221 setup->viewport_index_slot = lp->viewport_index_slot; 1222 setup->layer_slot = lp->layer_slot; 1223 setup->face_slot = lp->face_slot; 1224 1225 assert(lp->dirty == 0); 1226 1227 assert(lp->setup_variant.key.size == 1228 setup->setup.variant->key.size); 1229 1230 assert(memcmp(&lp->setup_variant.key, 1231 &setup->setup.variant->key, 1232 setup->setup.variant->key.size) == 0); 1233 } 1234 1235 if (update_scene && setup->state != SETUP_ACTIVE) { 1236 if (!set_scene_state( setup, SETUP_ACTIVE, __FUNCTION__ )) 1237 return FALSE; 1238 } 1239 1240 /* Only call into update_scene_state() if we already have a 1241 * scene: 1242 */ 1243 if (update_scene && setup->scene) { 1244 assert(setup->state == SETUP_ACTIVE); 1245 1246 if (try_update_scene_state(setup)) 1247 return TRUE; 1248 1249 /* Update failed, try to restart the scene. 1250 * 1251 * Cannot call lp_setup_flush_and_restart() directly here 1252 * because of potential recursion. 1253 */ 1254 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__)) 1255 return FALSE; 1256 1257 if (!set_scene_state(setup, SETUP_ACTIVE, __FUNCTION__)) 1258 return FALSE; 1259 1260 if (!setup->scene) 1261 return FALSE; 1262 1263 return try_update_scene_state(setup); 1264 } 1265 1266 return TRUE; 1267} 1268 1269 1270 1271/* Only caller is lp_setup_vbuf_destroy() 1272 */ 1273void 1274lp_setup_destroy( struct lp_setup_context *setup ) 1275{ 1276 uint i; 1277 1278 lp_setup_reset( setup ); 1279 1280 util_unreference_framebuffer_state(&setup->fb); 1281 1282 for (i = 0; i < ARRAY_SIZE(setup->fs.current_tex); i++) { 1283 pipe_resource_reference(&setup->fs.current_tex[i], NULL); 1284 } 1285 1286 for (i = 0; i < ARRAY_SIZE(setup->constants); i++) { 1287 pipe_resource_reference(&setup->constants[i].current.buffer, NULL); 1288 } 1289 1290 /* free the scenes in the 'empty' queue */ 1291 for (i = 0; i < ARRAY_SIZE(setup->scenes); i++) { 1292 struct lp_scene *scene = setup->scenes[i]; 1293 1294 if (scene->fence) 1295 lp_fence_wait(scene->fence); 1296 1297 lp_scene_destroy(scene); 1298 } 1299 1300 lp_fence_reference(&setup->last_fence, NULL); 1301 1302 FREE( setup ); 1303} 1304 1305 1306/** 1307 * Create a new primitive tiling engine. Plug it into the backend of 1308 * the draw module. Currently also creates a rasterizer to use with 1309 * it. 1310 */ 1311struct lp_setup_context * 1312lp_setup_create( struct pipe_context *pipe, 1313 struct draw_context *draw ) 1314{ 1315 struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen); 1316 struct lp_setup_context *setup; 1317 unsigned i; 1318 1319 setup = CALLOC_STRUCT(lp_setup_context); 1320 if (!setup) { 1321 goto no_setup; 1322 } 1323 1324 lp_setup_init_vbuf(setup); 1325 1326 /* Used only in update_state(): 1327 */ 1328 setup->pipe = pipe; 1329 1330 1331 setup->num_threads = screen->num_threads; 1332 setup->vbuf = draw_vbuf_stage(draw, &setup->base); 1333 if (!setup->vbuf) { 1334 goto no_vbuf; 1335 } 1336 1337 draw_set_rasterize_stage(draw, setup->vbuf); 1338 draw_set_render(draw, &setup->base); 1339 1340 /* create some empty scenes */ 1341 for (i = 0; i < MAX_SCENES; i++) { 1342 setup->scenes[i] = lp_scene_create( pipe ); 1343 if (!setup->scenes[i]) { 1344 goto no_scenes; 1345 } 1346 } 1347 1348 setup->triangle = first_triangle; 1349 setup->line = first_line; 1350 setup->point = first_point; 1351 1352 setup->dirty = ~0; 1353 1354 /* Initialize empty default fb correctly, so the rect is empty */ 1355 setup->framebuffer.x1 = -1; 1356 setup->framebuffer.y1 = -1; 1357 1358 return setup; 1359 1360no_scenes: 1361 for (i = 0; i < MAX_SCENES; i++) { 1362 if (setup->scenes[i]) { 1363 lp_scene_destroy(setup->scenes[i]); 1364 } 1365 } 1366 1367 setup->vbuf->destroy(setup->vbuf); 1368no_vbuf: 1369 FREE(setup); 1370no_setup: 1371 return NULL; 1372} 1373 1374 1375/** 1376 * Put a BeginQuery command into all bins. 1377 */ 1378void 1379lp_setup_begin_query(struct lp_setup_context *setup, 1380 struct llvmpipe_query *pq) 1381{ 1382 1383 set_scene_state(setup, SETUP_ACTIVE, "begin_query"); 1384 1385 if (!(pq->type == PIPE_QUERY_OCCLUSION_COUNTER || 1386 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE || 1387 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE || 1388 pq->type == PIPE_QUERY_PIPELINE_STATISTICS)) 1389 return; 1390 1391 /* init the query to its beginning state */ 1392 assert(setup->active_binned_queries < LP_MAX_ACTIVE_BINNED_QUERIES); 1393 /* exceeding list size so just ignore the query */ 1394 if (setup->active_binned_queries >= LP_MAX_ACTIVE_BINNED_QUERIES) { 1395 return; 1396 } 1397 assert(setup->active_queries[setup->active_binned_queries] == NULL); 1398 setup->active_queries[setup->active_binned_queries] = pq; 1399 setup->active_binned_queries++; 1400 1401 assert(setup->scene); 1402 if (setup->scene) { 1403 if (!lp_scene_bin_everywhere(setup->scene, 1404 LP_RAST_OP_BEGIN_QUERY, 1405 lp_rast_arg_query(pq))) { 1406 1407 if (!lp_setup_flush_and_restart(setup)) 1408 return; 1409 1410 if (!lp_scene_bin_everywhere(setup->scene, 1411 LP_RAST_OP_BEGIN_QUERY, 1412 lp_rast_arg_query(pq))) { 1413 return; 1414 } 1415 } 1416 setup->scene->had_queries |= TRUE; 1417 } 1418} 1419 1420 1421/** 1422 * Put an EndQuery command into all bins. 1423 */ 1424void 1425lp_setup_end_query(struct lp_setup_context *setup, struct llvmpipe_query *pq) 1426{ 1427 set_scene_state(setup, SETUP_ACTIVE, "end_query"); 1428 1429 assert(setup->scene); 1430 if (setup->scene) { 1431 /* pq->fence should be the fence of the *last* scene which 1432 * contributed to the query result. 1433 */ 1434 lp_fence_reference(&pq->fence, setup->scene->fence); 1435 1436 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER || 1437 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE || 1438 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE || 1439 pq->type == PIPE_QUERY_PIPELINE_STATISTICS || 1440 pq->type == PIPE_QUERY_TIMESTAMP) { 1441 if (pq->type == PIPE_QUERY_TIMESTAMP && 1442 !(setup->scene->tiles_x | setup->scene->tiles_y)) { 1443 /* 1444 * If there's a zero width/height framebuffer, there's no bins and 1445 * hence no rast task is ever run. So fill in something here instead. 1446 */ 1447 pq->end[0] = os_time_get_nano(); 1448 } 1449 1450 if (!lp_scene_bin_everywhere(setup->scene, 1451 LP_RAST_OP_END_QUERY, 1452 lp_rast_arg_query(pq))) { 1453 if (!lp_setup_flush_and_restart(setup)) 1454 goto fail; 1455 1456 if (!lp_scene_bin_everywhere(setup->scene, 1457 LP_RAST_OP_END_QUERY, 1458 lp_rast_arg_query(pq))) { 1459 goto fail; 1460 } 1461 } 1462 setup->scene->had_queries |= TRUE; 1463 } 1464 } 1465 else { 1466 lp_fence_reference(&pq->fence, setup->last_fence); 1467 } 1468 1469fail: 1470 /* Need to do this now not earlier since it still needs to be marked as 1471 * active when binning it would cause a flush. 1472 */ 1473 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER || 1474 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE || 1475 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE || 1476 pq->type == PIPE_QUERY_PIPELINE_STATISTICS) { 1477 unsigned i; 1478 1479 /* remove from active binned query list */ 1480 for (i = 0; i < setup->active_binned_queries; i++) { 1481 if (setup->active_queries[i] == pq) 1482 break; 1483 } 1484 assert(i < setup->active_binned_queries); 1485 if (i == setup->active_binned_queries) 1486 return; 1487 setup->active_binned_queries--; 1488 setup->active_queries[i] = setup->active_queries[setup->active_binned_queries]; 1489 setup->active_queries[setup->active_binned_queries] = NULL; 1490 } 1491} 1492 1493 1494boolean 1495lp_setup_flush_and_restart(struct lp_setup_context *setup) 1496{ 1497 if (0) debug_printf("%s\n", __FUNCTION__); 1498 1499 assert(setup->state == SETUP_ACTIVE); 1500 1501 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__)) 1502 return FALSE; 1503 1504 if (!lp_setup_update_state(setup, TRUE)) 1505 return FALSE; 1506 1507 return TRUE; 1508} 1509 1510 1511