1/* 2 * Copyright © 2013 Keith Packard 3 * Copyright © 2015 Boyan Ding 4 * 5 * Permission to use, copy, modify, distribute, and sell this software and its 6 * documentation for any purpose is hereby granted without fee, provided that 7 * the above copyright notice appear in all copies and that both that copyright 8 * notice and this permission notice appear in supporting documentation, and 9 * that the name of the copyright holders not be used in advertising or 10 * publicity pertaining to distribution of the software without specific, 11 * written prior permission. The copyright holders make no representations 12 * about the suitability of this software for any purpose. It is provided "as 13 * is" without express or implied warranty. 14 * 15 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 17 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR 18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 19 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 20 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE 21 * OF THIS SOFTWARE. 22 */ 23 24#include <fcntl.h> 25#include <stdlib.h> 26#include <unistd.h> 27#include <string.h> 28 29#include <X11/xshmfence.h> 30#include <xcb/xcb.h> 31#include <xcb/dri3.h> 32#include <xcb/present.h> 33 34#include <X11/Xlib-xcb.h> 35 36#include "loader_dri3_helper.h" 37#include "util/macros.h" 38#include "drm-uapi/drm_fourcc.h" 39 40/* From xmlpool/options.h, user exposed so should be stable */ 41#define DRI_CONF_VBLANK_NEVER 0 42#define DRI_CONF_VBLANK_DEF_INTERVAL_0 1 43#define DRI_CONF_VBLANK_DEF_INTERVAL_1 2 44#define DRI_CONF_VBLANK_ALWAYS_SYNC 3 45 46/** 47 * A cached blit context. 48 */ 49struct loader_dri3_blit_context { 50 mtx_t mtx; 51 __DRIcontext *ctx; 52 __DRIscreen *cur_screen; 53 const __DRIcoreExtension *core; 54}; 55 56/* For simplicity we maintain the cache only for a single screen at a time */ 57static struct loader_dri3_blit_context blit_context = { 58 _MTX_INITIALIZER_NP, NULL 59}; 60 61static void 62dri3_flush_present_events(struct loader_dri3_drawable *draw); 63 64static struct loader_dri3_buffer * 65dri3_find_back_alloc(struct loader_dri3_drawable *draw); 66 67static xcb_screen_t * 68get_screen_for_root(xcb_connection_t *conn, xcb_window_t root) 69{ 70 xcb_screen_iterator_t screen_iter = 71 xcb_setup_roots_iterator(xcb_get_setup(conn)); 72 73 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) { 74 if (screen_iter.data->root == root) 75 return screen_iter.data; 76 } 77 78 return NULL; 79} 80 81static xcb_visualtype_t * 82get_xcb_visualtype_for_depth(struct loader_dri3_drawable *draw, int depth) 83{ 84 xcb_visualtype_iterator_t visual_iter; 85 xcb_screen_t *screen = draw->screen; 86 xcb_depth_iterator_t depth_iter; 87 88 if (!screen) 89 return NULL; 90 91 depth_iter = xcb_screen_allowed_depths_iterator(screen); 92 for (; depth_iter.rem; xcb_depth_next(&depth_iter)) { 93 if (depth_iter.data->depth != depth) 94 continue; 95 96 visual_iter = xcb_depth_visuals_iterator(depth_iter.data); 97 if (visual_iter.rem) 98 return visual_iter.data; 99 } 100 101 return NULL; 102} 103 104/* Sets the adaptive sync window property state. */ 105static void 106set_adaptive_sync_property(xcb_connection_t *conn, xcb_drawable_t drawable, 107 uint32_t state) 108{ 109 static char const name[] = "_VARIABLE_REFRESH"; 110 xcb_intern_atom_cookie_t cookie; 111 xcb_intern_atom_reply_t* reply; 112 xcb_void_cookie_t check; 113 114 cookie = xcb_intern_atom(conn, 0, strlen(name), name); 115 reply = xcb_intern_atom_reply(conn, cookie, NULL); 116 if (reply == NULL) 117 return; 118 119 if (state) 120 check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE, 121 drawable, reply->atom, 122 XCB_ATOM_CARDINAL, 32, 1, &state); 123 else 124 check = xcb_delete_property_checked(conn, drawable, reply->atom); 125 126 xcb_discard_reply(conn, check.sequence); 127 free(reply); 128} 129 130/* Get red channel mask for given drawable at given depth. */ 131static unsigned int 132dri3_get_red_mask_for_depth(struct loader_dri3_drawable *draw, int depth) 133{ 134 xcb_visualtype_t *visual = get_xcb_visualtype_for_depth(draw, depth); 135 136 if (visual) 137 return visual->red_mask; 138 139 return 0; 140} 141 142/** 143 * Do we have blit functionality in the image blit extension? 144 * 145 * \param draw[in] The drawable intended to blit from / to. 146 * \return true if we have blit functionality. false otherwise. 147 */ 148static bool loader_dri3_have_image_blit(const struct loader_dri3_drawable *draw) 149{ 150 return draw->ext->image->base.version >= 9 && 151 draw->ext->image->blitImage != NULL; 152} 153 154/** 155 * Get and lock (for use with the current thread) a dri context associated 156 * with the drawable's dri screen. The context is intended to be used with 157 * the dri image extension's blitImage method. 158 * 159 * \param draw[in] Pointer to the drawable whose dri screen we want a 160 * dri context for. 161 * \return A dri context or NULL if context creation failed. 162 * 163 * When the caller is done with the context (even if the context returned was 164 * NULL), the caller must call loader_dri3_blit_context_put. 165 */ 166static __DRIcontext * 167loader_dri3_blit_context_get(struct loader_dri3_drawable *draw) 168{ 169 mtx_lock(&blit_context.mtx); 170 171 if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen) { 172 blit_context.core->destroyContext(blit_context.ctx); 173 blit_context.ctx = NULL; 174 } 175 176 if (!blit_context.ctx) { 177 blit_context.ctx = draw->ext->core->createNewContext(draw->dri_screen, 178 NULL, NULL, NULL); 179 blit_context.cur_screen = draw->dri_screen; 180 blit_context.core = draw->ext->core; 181 } 182 183 return blit_context.ctx; 184} 185 186/** 187 * Release (for use with other threads) a dri context previously obtained using 188 * loader_dri3_blit_context_get. 189 */ 190static void 191loader_dri3_blit_context_put(void) 192{ 193 mtx_unlock(&blit_context.mtx); 194} 195 196/** 197 * Blit (parts of) the contents of a DRI image to another dri image 198 * 199 * \param draw[in] The drawable which owns the images. 200 * \param dst[in] The destination image. 201 * \param src[in] The source image. 202 * \param dstx0[in] Start destination coordinate. 203 * \param dsty0[in] Start destination coordinate. 204 * \param width[in] Blit width. 205 * \param height[in] Blit height. 206 * \param srcx0[in] Start source coordinate. 207 * \param srcy0[in] Start source coordinate. 208 * \param flush_flag[in] Image blit flush flag. 209 * \return true iff successful. 210 */ 211static bool 212loader_dri3_blit_image(struct loader_dri3_drawable *draw, 213 __DRIimage *dst, __DRIimage *src, 214 int dstx0, int dsty0, int width, int height, 215 int srcx0, int srcy0, int flush_flag) 216{ 217 __DRIcontext *dri_context; 218 bool use_blit_context = false; 219 220 if (!loader_dri3_have_image_blit(draw)) 221 return false; 222 223 dri_context = draw->vtable->get_dri_context(draw); 224 225 if (!dri_context || !draw->vtable->in_current_context(draw)) { 226 dri_context = loader_dri3_blit_context_get(draw); 227 use_blit_context = true; 228 flush_flag |= __BLIT_FLAG_FLUSH; 229 } 230 231 if (dri_context) 232 draw->ext->image->blitImage(dri_context, dst, src, dstx0, dsty0, 233 width, height, srcx0, srcy0, 234 width, height, flush_flag); 235 236 if (use_blit_context) 237 loader_dri3_blit_context_put(); 238 239 return dri_context != NULL; 240} 241 242static inline void 243dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer) 244{ 245 xshmfence_reset(buffer->shm_fence); 246} 247 248static inline void 249dri3_fence_set(struct loader_dri3_buffer *buffer) 250{ 251 xshmfence_trigger(buffer->shm_fence); 252} 253 254static inline void 255dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer) 256{ 257 xcb_sync_trigger_fence(c, buffer->sync_fence); 258} 259 260static inline void 261dri3_fence_await(xcb_connection_t *c, struct loader_dri3_drawable *draw, 262 struct loader_dri3_buffer *buffer) 263{ 264 xcb_flush(c); 265 xshmfence_await(buffer->shm_fence); 266 if (draw) { 267 mtx_lock(&draw->mtx); 268 dri3_flush_present_events(draw); 269 mtx_unlock(&draw->mtx); 270 } 271} 272 273static void 274dri3_update_num_back(struct loader_dri3_drawable *draw) 275{ 276 if (draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) 277 draw->num_back = 3; 278 else 279 draw->num_back = 2; 280} 281 282void 283loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval) 284{ 285 draw->swap_interval = interval; 286} 287 288/** dri3_free_render_buffer 289 * 290 * Free everything associated with one render buffer including pixmap, fence 291 * stuff and the driver image 292 */ 293static void 294dri3_free_render_buffer(struct loader_dri3_drawable *draw, 295 struct loader_dri3_buffer *buffer) 296{ 297 if (buffer->own_pixmap) 298 xcb_free_pixmap(draw->conn, buffer->pixmap); 299 xcb_sync_destroy_fence(draw->conn, buffer->sync_fence); 300 xshmfence_unmap_shm(buffer->shm_fence); 301 draw->ext->image->destroyImage(buffer->image); 302 if (buffer->linear_buffer) 303 draw->ext->image->destroyImage(buffer->linear_buffer); 304 free(buffer); 305} 306 307void 308loader_dri3_drawable_fini(struct loader_dri3_drawable *draw) 309{ 310 int i; 311 312 draw->ext->core->destroyDrawable(draw->dri_drawable); 313 314 for (i = 0; i < ARRAY_SIZE(draw->buffers); i++) { 315 if (draw->buffers[i]) 316 dri3_free_render_buffer(draw, draw->buffers[i]); 317 } 318 319 if (draw->special_event) { 320 xcb_void_cookie_t cookie = 321 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable, 322 XCB_PRESENT_EVENT_MASK_NO_EVENT); 323 324 xcb_discard_reply(draw->conn, cookie.sequence); 325 xcb_unregister_for_special_event(draw->conn, draw->special_event); 326 } 327 328 cnd_destroy(&draw->event_cnd); 329 mtx_destroy(&draw->mtx); 330} 331 332int 333loader_dri3_drawable_init(xcb_connection_t *conn, 334 xcb_drawable_t drawable, 335 __DRIscreen *dri_screen, 336 bool is_different_gpu, 337 bool multiplanes_available, 338 const __DRIconfig *dri_config, 339 struct loader_dri3_extensions *ext, 340 const struct loader_dri3_vtable *vtable, 341 struct loader_dri3_drawable *draw) 342{ 343 xcb_get_geometry_cookie_t cookie; 344 xcb_get_geometry_reply_t *reply; 345 xcb_generic_error_t *error; 346 GLint vblank_mode = DRI_CONF_VBLANK_DEF_INTERVAL_1; 347 int swap_interval; 348 349 draw->conn = conn; 350 draw->ext = ext; 351 draw->vtable = vtable; 352 draw->drawable = drawable; 353 draw->dri_screen = dri_screen; 354 draw->is_different_gpu = is_different_gpu; 355 draw->multiplanes_available = multiplanes_available; 356 357 draw->have_back = 0; 358 draw->have_fake_front = 0; 359 draw->first_init = true; 360 draw->adaptive_sync = false; 361 draw->adaptive_sync_active = false; 362 363 draw->cur_blit_source = -1; 364 draw->back_format = __DRI_IMAGE_FORMAT_NONE; 365 mtx_init(&draw->mtx, mtx_plain); 366 cnd_init(&draw->event_cnd); 367 368 if (draw->ext->config) { 369 unsigned char adaptive_sync = 0; 370 371 draw->ext->config->configQueryi(draw->dri_screen, 372 "vblank_mode", &vblank_mode); 373 374 draw->ext->config->configQueryb(draw->dri_screen, 375 "adaptive_sync", 376 &adaptive_sync); 377 378 draw->adaptive_sync = adaptive_sync; 379 } 380 381 if (!draw->adaptive_sync) 382 set_adaptive_sync_property(conn, draw->drawable, false); 383 384 switch (vblank_mode) { 385 case DRI_CONF_VBLANK_NEVER: 386 case DRI_CONF_VBLANK_DEF_INTERVAL_0: 387 swap_interval = 0; 388 break; 389 case DRI_CONF_VBLANK_DEF_INTERVAL_1: 390 case DRI_CONF_VBLANK_ALWAYS_SYNC: 391 default: 392 swap_interval = 1; 393 break; 394 } 395 draw->swap_interval = swap_interval; 396 397 dri3_update_num_back(draw); 398 399 /* Create a new drawable */ 400 draw->dri_drawable = 401 draw->ext->image_driver->createNewDrawable(dri_screen, 402 dri_config, 403 draw); 404 405 if (!draw->dri_drawable) 406 return 1; 407 408 cookie = xcb_get_geometry(draw->conn, draw->drawable); 409 reply = xcb_get_geometry_reply(draw->conn, cookie, &error); 410 if (reply == NULL || error != NULL) { 411 draw->ext->core->destroyDrawable(draw->dri_drawable); 412 return 1; 413 } 414 415 draw->screen = get_screen_for_root(draw->conn, reply->root); 416 draw->width = reply->width; 417 draw->height = reply->height; 418 draw->depth = reply->depth; 419 draw->vtable->set_drawable_size(draw, draw->width, draw->height); 420 free(reply); 421 422 draw->swap_method = __DRI_ATTRIB_SWAP_UNDEFINED; 423 if (draw->ext->core->base.version >= 2) { 424 (void )draw->ext->core->getConfigAttrib(dri_config, 425 __DRI_ATTRIB_SWAP_METHOD, 426 &draw->swap_method); 427 } 428 429 /* 430 * Make sure server has the same swap interval we do for the new 431 * drawable. 432 */ 433 loader_dri3_set_swap_interval(draw, swap_interval); 434 435 return 0; 436} 437 438/* 439 * Process one Present event 440 */ 441static void 442dri3_handle_present_event(struct loader_dri3_drawable *draw, 443 xcb_present_generic_event_t *ge) 444{ 445 switch (ge->evtype) { 446 case XCB_PRESENT_CONFIGURE_NOTIFY: { 447 xcb_present_configure_notify_event_t *ce = (void *) ge; 448 449 draw->width = ce->width; 450 draw->height = ce->height; 451 draw->vtable->set_drawable_size(draw, draw->width, draw->height); 452 draw->ext->flush->invalidate(draw->dri_drawable); 453 break; 454 } 455 case XCB_PRESENT_COMPLETE_NOTIFY: { 456 xcb_present_complete_notify_event_t *ce = (void *) ge; 457 458 /* Compute the processed SBC number from the received 32-bit serial number 459 * merged with the upper 32-bits of the sent 64-bit serial number while 460 * checking for wrap. 461 */ 462 if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) { 463 uint64_t recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial; 464 465 /* Only assume wraparound if that results in exactly the previous 466 * SBC + 1, otherwise ignore received SBC > sent SBC (those are 467 * probably from a previous loader_dri3_drawable instance) to avoid 468 * calculating bogus target MSC values in loader_dri3_swap_buffers_msc 469 */ 470 if (recv_sbc <= draw->send_sbc) 471 draw->recv_sbc = recv_sbc; 472 else if (recv_sbc == (draw->recv_sbc + 0x100000001ULL)) 473 draw->recv_sbc = recv_sbc - 0x100000000ULL; 474 475 /* When moving from flip to copy, we assume that we can allocate in 476 * a more optimal way if we don't need to cater for the display 477 * controller. 478 */ 479 if (ce->mode == XCB_PRESENT_COMPLETE_MODE_COPY && 480 draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) { 481 for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) { 482 if (draw->buffers[b]) 483 draw->buffers[b]->reallocate = true; 484 } 485 } 486 487 /* If the server tells us that our allocation is suboptimal, we 488 * reallocate once. 489 */ 490#ifdef HAVE_DRI3_MODIFIERS 491 if (ce->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY && 492 draw->last_present_mode != ce->mode) { 493 for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) { 494 if (draw->buffers[b]) 495 draw->buffers[b]->reallocate = true; 496 } 497 } 498#endif 499 draw->last_present_mode = ce->mode; 500 501 if (draw->vtable->show_fps) 502 draw->vtable->show_fps(draw, ce->ust); 503 504 draw->ust = ce->ust; 505 draw->msc = ce->msc; 506 } else if (ce->serial == draw->eid) { 507 draw->notify_ust = ce->ust; 508 draw->notify_msc = ce->msc; 509 } 510 break; 511 } 512 case XCB_PRESENT_EVENT_IDLE_NOTIFY: { 513 xcb_present_idle_notify_event_t *ie = (void *) ge; 514 int b; 515 516 for (b = 0; b < ARRAY_SIZE(draw->buffers); b++) { 517 struct loader_dri3_buffer *buf = draw->buffers[b]; 518 519 if (buf && buf->pixmap == ie->pixmap) 520 buf->busy = 0; 521 } 522 break; 523 } 524 } 525 free(ge); 526} 527 528static bool 529dri3_wait_for_event_locked(struct loader_dri3_drawable *draw) 530{ 531 xcb_generic_event_t *ev; 532 xcb_present_generic_event_t *ge; 533 534 xcb_flush(draw->conn); 535 536 /* Only have one thread waiting for events at a time */ 537 if (draw->has_event_waiter) { 538 cnd_wait(&draw->event_cnd, &draw->mtx); 539 /* Another thread has updated the protected info, so retest. */ 540 return true; 541 } else { 542 draw->has_event_waiter = true; 543 /* Allow other threads access to the drawable while we're waiting. */ 544 mtx_unlock(&draw->mtx); 545 ev = xcb_wait_for_special_event(draw->conn, draw->special_event); 546 mtx_lock(&draw->mtx); 547 draw->has_event_waiter = false; 548 cnd_broadcast(&draw->event_cnd); 549 } 550 if (!ev) 551 return false; 552 ge = (void *) ev; 553 dri3_handle_present_event(draw, ge); 554 return true; 555} 556 557/** loader_dri3_wait_for_msc 558 * 559 * Get the X server to send an event when the target msc/divisor/remainder is 560 * reached. 561 */ 562bool 563loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw, 564 int64_t target_msc, 565 int64_t divisor, int64_t remainder, 566 int64_t *ust, int64_t *msc, int64_t *sbc) 567{ 568 xcb_void_cookie_t cookie = xcb_present_notify_msc(draw->conn, 569 draw->drawable, 570 draw->eid, 571 target_msc, 572 divisor, 573 remainder); 574 xcb_generic_event_t *ev; 575 unsigned full_sequence; 576 577 mtx_lock(&draw->mtx); 578 xcb_flush(draw->conn); 579 580 /* Wait for the event */ 581 do { 582 ev = xcb_wait_for_special_event(draw->conn, draw->special_event); 583 if (!ev) { 584 mtx_unlock(&draw->mtx); 585 return false; 586 } 587 588 full_sequence = ev->full_sequence; 589 dri3_handle_present_event(draw, (void *) ev); 590 } while (full_sequence != cookie.sequence || draw->notify_msc < target_msc); 591 592 *ust = draw->notify_ust; 593 *msc = draw->notify_msc; 594 *sbc = draw->recv_sbc; 595 mtx_unlock(&draw->mtx); 596 597 return true; 598} 599 600/** loader_dri3_wait_for_sbc 601 * 602 * Wait for the completed swap buffer count to reach the specified 603 * target. Presumably the application knows that this will be reached with 604 * outstanding complete events, or we're going to be here awhile. 605 */ 606int 607loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw, 608 int64_t target_sbc, int64_t *ust, 609 int64_t *msc, int64_t *sbc) 610{ 611 /* From the GLX_OML_sync_control spec: 612 * 613 * "If <target_sbc> = 0, the function will block until all previous 614 * swaps requested with glXSwapBuffersMscOML for that window have 615 * completed." 616 */ 617 mtx_lock(&draw->mtx); 618 if (!target_sbc) 619 target_sbc = draw->send_sbc; 620 621 while (draw->recv_sbc < target_sbc) { 622 if (!dri3_wait_for_event_locked(draw)) { 623 mtx_unlock(&draw->mtx); 624 return 0; 625 } 626 } 627 628 *ust = draw->ust; 629 *msc = draw->msc; 630 *sbc = draw->recv_sbc; 631 mtx_unlock(&draw->mtx); 632 return 1; 633} 634 635/** loader_dri3_find_back 636 * 637 * Find an idle back buffer. If there isn't one, then 638 * wait for a present idle notify event from the X server 639 */ 640static int 641dri3_find_back(struct loader_dri3_drawable *draw) 642{ 643 int b; 644 int num_to_consider; 645 646 mtx_lock(&draw->mtx); 647 /* Increase the likelyhood of reusing current buffer */ 648 dri3_flush_present_events(draw); 649 650 /* Check whether we need to reuse the current back buffer as new back. 651 * In that case, wait until it's not busy anymore. 652 */ 653 num_to_consider = draw->num_back; 654 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1) { 655 num_to_consider = 1; 656 draw->cur_blit_source = -1; 657 } 658 659 for (;;) { 660 for (b = 0; b < num_to_consider; b++) { 661 int id = LOADER_DRI3_BACK_ID((b + draw->cur_back) % draw->num_back); 662 struct loader_dri3_buffer *buffer = draw->buffers[id]; 663 664 if (!buffer || !buffer->busy) { 665 draw->cur_back = id; 666 mtx_unlock(&draw->mtx); 667 return id; 668 } 669 } 670 if (!dri3_wait_for_event_locked(draw)) { 671 mtx_unlock(&draw->mtx); 672 return -1; 673 } 674 } 675} 676 677static xcb_gcontext_t 678dri3_drawable_gc(struct loader_dri3_drawable *draw) 679{ 680 if (!draw->gc) { 681 uint32_t v = 0; 682 xcb_create_gc(draw->conn, 683 (draw->gc = xcb_generate_id(draw->conn)), 684 draw->drawable, 685 XCB_GC_GRAPHICS_EXPOSURES, 686 &v); 687 } 688 return draw->gc; 689} 690 691 692static struct loader_dri3_buffer * 693dri3_back_buffer(struct loader_dri3_drawable *draw) 694{ 695 return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)]; 696} 697 698static struct loader_dri3_buffer * 699dri3_fake_front_buffer(struct loader_dri3_drawable *draw) 700{ 701 return draw->buffers[LOADER_DRI3_FRONT_ID]; 702} 703 704static void 705dri3_copy_area(xcb_connection_t *c, 706 xcb_drawable_t src_drawable, 707 xcb_drawable_t dst_drawable, 708 xcb_gcontext_t gc, 709 int16_t src_x, 710 int16_t src_y, 711 int16_t dst_x, 712 int16_t dst_y, 713 uint16_t width, 714 uint16_t height) 715{ 716 xcb_void_cookie_t cookie; 717 718 cookie = xcb_copy_area_checked(c, 719 src_drawable, 720 dst_drawable, 721 gc, 722 src_x, 723 src_y, 724 dst_x, 725 dst_y, 726 width, 727 height); 728 xcb_discard_reply(c, cookie.sequence); 729} 730 731/** 732 * Asks the driver to flush any queued work necessary for serializing with the 733 * X command stream, and optionally the slightly more strict requirement of 734 * glFlush() equivalence (which would require flushing even if nothing had 735 * been drawn to a window system framebuffer, for example). 736 */ 737void 738loader_dri3_flush(struct loader_dri3_drawable *draw, 739 unsigned flags, 740 enum __DRI2throttleReason throttle_reason) 741{ 742 /* NEED TO CHECK WHETHER CONTEXT IS NULL */ 743 __DRIcontext *dri_context = draw->vtable->get_dri_context(draw); 744 745 if (dri_context) { 746 draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable, 747 flags, throttle_reason); 748 } 749} 750 751void 752loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw, 753 int x, int y, 754 int width, int height, 755 bool flush) 756{ 757 struct loader_dri3_buffer *back; 758 unsigned flags = __DRI2_FLUSH_DRAWABLE; 759 760 /* Check we have the right attachments */ 761 if (!draw->have_back || draw->is_pixmap) 762 return; 763 764 if (flush) 765 flags |= __DRI2_FLUSH_CONTEXT; 766 loader_dri3_flush(draw, flags, __DRI2_THROTTLE_SWAPBUFFER); 767 768 back = dri3_find_back_alloc(draw); 769 if (!back) 770 return; 771 772 y = draw->height - y - height; 773 774 if (draw->is_different_gpu) { 775 /* Update the linear buffer part of the back buffer 776 * for the dri3_copy_area operation 777 */ 778 (void) loader_dri3_blit_image(draw, 779 back->linear_buffer, 780 back->image, 781 0, 0, back->width, back->height, 782 0, 0, __BLIT_FLAG_FLUSH); 783 } 784 785 loader_dri3_swapbuffer_barrier(draw); 786 dri3_fence_reset(draw->conn, back); 787 dri3_copy_area(draw->conn, 788 back->pixmap, 789 draw->drawable, 790 dri3_drawable_gc(draw), 791 x, y, x, y, width, height); 792 dri3_fence_trigger(draw->conn, back); 793 /* Refresh the fake front (if present) after we just damaged the real 794 * front. 795 */ 796 if (draw->have_fake_front && 797 !loader_dri3_blit_image(draw, 798 dri3_fake_front_buffer(draw)->image, 799 back->image, 800 x, y, width, height, 801 x, y, __BLIT_FLAG_FLUSH) && 802 !draw->is_different_gpu) { 803 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw)); 804 dri3_copy_area(draw->conn, 805 back->pixmap, 806 dri3_fake_front_buffer(draw)->pixmap, 807 dri3_drawable_gc(draw), 808 x, y, x, y, width, height); 809 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw)); 810 dri3_fence_await(draw->conn, NULL, dri3_fake_front_buffer(draw)); 811 } 812 dri3_fence_await(draw->conn, draw, back); 813} 814 815void 816loader_dri3_copy_drawable(struct loader_dri3_drawable *draw, 817 xcb_drawable_t dest, 818 xcb_drawable_t src) 819{ 820 loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, 0); 821 822 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw)); 823 dri3_copy_area(draw->conn, 824 src, dest, 825 dri3_drawable_gc(draw), 826 0, 0, 0, 0, draw->width, draw->height); 827 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw)); 828 dri3_fence_await(draw->conn, draw, dri3_fake_front_buffer(draw)); 829} 830 831void 832loader_dri3_wait_x(struct loader_dri3_drawable *draw) 833{ 834 struct loader_dri3_buffer *front; 835 836 if (draw == NULL || !draw->have_fake_front) 837 return; 838 839 front = dri3_fake_front_buffer(draw); 840 841 loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable); 842 843 /* In the psc->is_different_gpu case, the linear buffer has been updated, 844 * but not yet the tiled buffer. 845 * Copy back to the tiled buffer we use for rendering. 846 * Note that we don't need flushing. 847 */ 848 if (draw->is_different_gpu) 849 (void) loader_dri3_blit_image(draw, 850 front->image, 851 front->linear_buffer, 852 0, 0, front->width, front->height, 853 0, 0, 0); 854} 855 856void 857loader_dri3_wait_gl(struct loader_dri3_drawable *draw) 858{ 859 struct loader_dri3_buffer *front; 860 861 if (draw == NULL || !draw->have_fake_front) 862 return; 863 864 front = dri3_fake_front_buffer(draw); 865 866 /* In the psc->is_different_gpu case, we update the linear_buffer 867 * before updating the real front. 868 */ 869 if (draw->is_different_gpu) 870 (void) loader_dri3_blit_image(draw, 871 front->linear_buffer, 872 front->image, 873 0, 0, front->width, front->height, 874 0, 0, __BLIT_FLAG_FLUSH); 875 loader_dri3_swapbuffer_barrier(draw); 876 loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap); 877} 878 879/** dri3_flush_present_events 880 * 881 * Process any present events that have been received from the X server 882 */ 883static void 884dri3_flush_present_events(struct loader_dri3_drawable *draw) 885{ 886 /* Check to see if any configuration changes have occurred 887 * since we were last invoked 888 */ 889 if (draw->has_event_waiter) 890 return; 891 892 if (draw->special_event) { 893 xcb_generic_event_t *ev; 894 895 while ((ev = xcb_poll_for_special_event(draw->conn, 896 draw->special_event)) != NULL) { 897 xcb_present_generic_event_t *ge = (void *) ev; 898 dri3_handle_present_event(draw, ge); 899 } 900 } 901} 902 903/** loader_dri3_swap_buffers_msc 904 * 905 * Make the current back buffer visible using the present extension 906 */ 907int64_t 908loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw, 909 int64_t target_msc, int64_t divisor, 910 int64_t remainder, unsigned flush_flags, 911 bool force_copy) 912{ 913 struct loader_dri3_buffer *back; 914 int64_t ret = 0; 915 uint32_t options = XCB_PRESENT_OPTION_NONE; 916 917 draw->vtable->flush_drawable(draw, flush_flags); 918 919 back = dri3_find_back_alloc(draw); 920 921 mtx_lock(&draw->mtx); 922 923 if (draw->adaptive_sync && !draw->adaptive_sync_active) { 924 set_adaptive_sync_property(draw->conn, draw->drawable, true); 925 draw->adaptive_sync_active = true; 926 } 927 928 if (draw->is_different_gpu && back) { 929 /* Update the linear buffer before presenting the pixmap */ 930 (void) loader_dri3_blit_image(draw, 931 back->linear_buffer, 932 back->image, 933 0, 0, back->width, back->height, 934 0, 0, __BLIT_FLAG_FLUSH); 935 } 936 937 /* If we need to preload the new back buffer, remember the source. 938 * The force_copy parameter is used by EGL to attempt to preserve 939 * the back buffer across a call to this function. 940 */ 941 if (draw->swap_method != __DRI_ATTRIB_SWAP_UNDEFINED || force_copy) 942 draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back); 943 944 /* Exchange the back and fake front. Even though the server knows about these 945 * buffers, it has no notion of back and fake front. 946 */ 947 if (back && draw->have_fake_front) { 948 struct loader_dri3_buffer *tmp; 949 950 tmp = dri3_fake_front_buffer(draw); 951 draw->buffers[LOADER_DRI3_FRONT_ID] = back; 952 draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)] = tmp; 953 954 if (draw->swap_method == __DRI_ATTRIB_SWAP_COPY || force_copy) 955 draw->cur_blit_source = LOADER_DRI3_FRONT_ID; 956 } 957 958 dri3_flush_present_events(draw); 959 960 if (back && !draw->is_pixmap) { 961 dri3_fence_reset(draw->conn, back); 962 963 /* Compute when we want the frame shown by taking the last known 964 * successful MSC and adding in a swap interval for each outstanding swap 965 * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers() 966 * semantic" 967 */ 968 ++draw->send_sbc; 969 if (target_msc == 0 && divisor == 0 && remainder == 0) 970 target_msc = draw->msc + draw->swap_interval * 971 (draw->send_sbc - draw->recv_sbc); 972 else if (divisor == 0 && remainder > 0) { 973 /* From the GLX_OML_sync_control spec: 974 * "If <divisor> = 0, the swap will occur when MSC becomes 975 * greater than or equal to <target_msc>." 976 * 977 * Note that there's no mention of the remainder. The Present 978 * extension throws BadValue for remainder != 0 with divisor == 0, so 979 * just drop the passed in value. 980 */ 981 remainder = 0; 982 } 983 984 /* From the GLX_EXT_swap_control spec 985 * and the EGL 1.4 spec (page 53): 986 * 987 * "If <interval> is set to a value of 0, buffer swaps are not 988 * synchronized to a video frame." 989 * 990 * Implementation note: It is possible to enable triple buffering 991 * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be 992 * the default. 993 */ 994 if (draw->swap_interval == 0) 995 options |= XCB_PRESENT_OPTION_ASYNC; 996 997 /* If we need to populate the new back, but need to reuse the back 998 * buffer slot due to lack of local blit capabilities, make sure 999 * the server doesn't flip and we deadlock. 1000 */ 1001 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1) 1002 options |= XCB_PRESENT_OPTION_COPY; 1003#ifdef HAVE_DRI3_MODIFIERS 1004 if (draw->multiplanes_available) 1005 options |= XCB_PRESENT_OPTION_SUBOPTIMAL; 1006#endif 1007 back->busy = 1; 1008 back->last_swap = draw->send_sbc; 1009 xcb_present_pixmap(draw->conn, 1010 draw->drawable, 1011 back->pixmap, 1012 (uint32_t) draw->send_sbc, 1013 0, /* valid */ 1014 0, /* update */ 1015 0, /* x_off */ 1016 0, /* y_off */ 1017 None, /* target_crtc */ 1018 None, 1019 back->sync_fence, 1020 options, 1021 target_msc, 1022 divisor, 1023 remainder, 0, NULL); 1024 ret = (int64_t) draw->send_sbc; 1025 1026 /* Schedule a server-side back-preserving blit if necessary. 1027 * This happens iff all conditions below are satisfied: 1028 * a) We have a fake front, 1029 * b) We need to preserve the back buffer, 1030 * c) We don't have local blit capabilities. 1031 */ 1032 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1 && 1033 draw->cur_blit_source != LOADER_DRI3_BACK_ID(draw->cur_back)) { 1034 struct loader_dri3_buffer *new_back = dri3_back_buffer(draw); 1035 struct loader_dri3_buffer *src = draw->buffers[draw->cur_blit_source]; 1036 1037 dri3_fence_reset(draw->conn, new_back); 1038 dri3_copy_area(draw->conn, src->pixmap, 1039 new_back->pixmap, 1040 dri3_drawable_gc(draw), 1041 0, 0, 0, 0, draw->width, draw->height); 1042 dri3_fence_trigger(draw->conn, new_back); 1043 new_back->last_swap = src->last_swap; 1044 } 1045 1046 xcb_flush(draw->conn); 1047 if (draw->stamp) 1048 ++(*draw->stamp); 1049 } 1050 mtx_unlock(&draw->mtx); 1051 1052 draw->ext->flush->invalidate(draw->dri_drawable); 1053 1054 return ret; 1055} 1056 1057int 1058loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw) 1059{ 1060 struct loader_dri3_buffer *back = dri3_find_back_alloc(draw); 1061 int ret; 1062 1063 mtx_lock(&draw->mtx); 1064 ret = (!back || back->last_swap == 0) ? 0 : 1065 draw->send_sbc - back->last_swap + 1; 1066 mtx_unlock(&draw->mtx); 1067 1068 return ret; 1069} 1070 1071/** loader_dri3_open 1072 * 1073 * Wrapper around xcb_dri3_open 1074 */ 1075int 1076loader_dri3_open(xcb_connection_t *conn, 1077 xcb_window_t root, 1078 uint32_t provider) 1079{ 1080 xcb_dri3_open_cookie_t cookie; 1081 xcb_dri3_open_reply_t *reply; 1082 int fd; 1083 1084 cookie = xcb_dri3_open(conn, 1085 root, 1086 provider); 1087 1088 reply = xcb_dri3_open_reply(conn, cookie, NULL); 1089 if (!reply) 1090 return -1; 1091 1092 if (reply->nfd != 1) { 1093 free(reply); 1094 return -1; 1095 } 1096 1097 fd = xcb_dri3_open_reply_fds(conn, reply)[0]; 1098 free(reply); 1099 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); 1100 1101 return fd; 1102} 1103 1104static uint32_t 1105dri3_cpp_for_format(uint32_t format) { 1106 switch (format) { 1107 case __DRI_IMAGE_FORMAT_R8: 1108 return 1; 1109 case __DRI_IMAGE_FORMAT_RGB565: 1110 case __DRI_IMAGE_FORMAT_GR88: 1111 return 2; 1112 case __DRI_IMAGE_FORMAT_XRGB8888: 1113 case __DRI_IMAGE_FORMAT_ARGB8888: 1114 case __DRI_IMAGE_FORMAT_ABGR8888: 1115 case __DRI_IMAGE_FORMAT_XBGR8888: 1116 case __DRI_IMAGE_FORMAT_XRGB2101010: 1117 case __DRI_IMAGE_FORMAT_ARGB2101010: 1118 case __DRI_IMAGE_FORMAT_XBGR2101010: 1119 case __DRI_IMAGE_FORMAT_ABGR2101010: 1120 case __DRI_IMAGE_FORMAT_SARGB8: 1121 case __DRI_IMAGE_FORMAT_SABGR8: 1122 return 4; 1123 case __DRI_IMAGE_FORMAT_NONE: 1124 default: 1125 return 0; 1126 } 1127} 1128 1129/* Map format of render buffer to corresponding format for the linear_buffer 1130 * used for sharing with the display gpu of a Prime setup (== is_different_gpu). 1131 * Usually linear_format == format, except for depth >= 30 formats, where 1132 * different gpu vendors have different preferences wrt. color channel ordering. 1133 */ 1134static uint32_t 1135dri3_linear_format_for_format(struct loader_dri3_drawable *draw, uint32_t format) 1136{ 1137 switch (format) { 1138 case __DRI_IMAGE_FORMAT_XRGB2101010: 1139 case __DRI_IMAGE_FORMAT_XBGR2101010: 1140 /* Different preferred formats for different hw */ 1141 if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff) 1142 return __DRI_IMAGE_FORMAT_XBGR2101010; 1143 else 1144 return __DRI_IMAGE_FORMAT_XRGB2101010; 1145 1146 case __DRI_IMAGE_FORMAT_ARGB2101010: 1147 case __DRI_IMAGE_FORMAT_ABGR2101010: 1148 /* Different preferred formats for different hw */ 1149 if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff) 1150 return __DRI_IMAGE_FORMAT_ABGR2101010; 1151 else 1152 return __DRI_IMAGE_FORMAT_ARGB2101010; 1153 1154 default: 1155 return format; 1156 } 1157} 1158 1159/* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while 1160 * the createImageFromFds call takes __DRI_IMAGE_FOURCC codes. To avoid 1161 * complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and 1162 * translate to __DRI_IMAGE_FOURCC codes in the call to createImageFromFds 1163 */ 1164static int 1165image_format_to_fourcc(int format) 1166{ 1167 1168 /* Convert from __DRI_IMAGE_FORMAT to __DRI_IMAGE_FOURCC (sigh) */ 1169 switch (format) { 1170 case __DRI_IMAGE_FORMAT_SARGB8: return __DRI_IMAGE_FOURCC_SARGB8888; 1171 case __DRI_IMAGE_FORMAT_SABGR8: return __DRI_IMAGE_FOURCC_SABGR8888; 1172 case __DRI_IMAGE_FORMAT_RGB565: return __DRI_IMAGE_FOURCC_RGB565; 1173 case __DRI_IMAGE_FORMAT_XRGB8888: return __DRI_IMAGE_FOURCC_XRGB8888; 1174 case __DRI_IMAGE_FORMAT_ARGB8888: return __DRI_IMAGE_FOURCC_ARGB8888; 1175 case __DRI_IMAGE_FORMAT_ABGR8888: return __DRI_IMAGE_FOURCC_ABGR8888; 1176 case __DRI_IMAGE_FORMAT_XBGR8888: return __DRI_IMAGE_FOURCC_XBGR8888; 1177 case __DRI_IMAGE_FORMAT_XRGB2101010: return __DRI_IMAGE_FOURCC_XRGB2101010; 1178 case __DRI_IMAGE_FORMAT_ARGB2101010: return __DRI_IMAGE_FOURCC_ARGB2101010; 1179 case __DRI_IMAGE_FORMAT_XBGR2101010: return __DRI_IMAGE_FOURCC_XBGR2101010; 1180 case __DRI_IMAGE_FORMAT_ABGR2101010: return __DRI_IMAGE_FOURCC_ABGR2101010; 1181 } 1182 return 0; 1183} 1184 1185#ifdef HAVE_DRI3_MODIFIERS 1186static bool 1187has_supported_modifier(struct loader_dri3_drawable *draw, unsigned int format, 1188 uint64_t *modifiers, uint32_t count) 1189{ 1190 uint64_t *supported_modifiers; 1191 int32_t supported_modifiers_count; 1192 bool found = false; 1193 int i, j; 1194 1195 if (!draw->ext->image->queryDmaBufModifiers(draw->dri_screen, 1196 format, 0, NULL, NULL, 1197 &supported_modifiers_count) || 1198 supported_modifiers_count == 0) 1199 return false; 1200 1201 supported_modifiers = malloc(supported_modifiers_count * sizeof(uint64_t)); 1202 if (!supported_modifiers) 1203 return false; 1204 1205 draw->ext->image->queryDmaBufModifiers(draw->dri_screen, format, 1206 supported_modifiers_count, 1207 supported_modifiers, NULL, 1208 &supported_modifiers_count); 1209 1210 for (i = 0; !found && i < supported_modifiers_count; i++) { 1211 for (j = 0; !found && j < count; j++) { 1212 if (supported_modifiers[i] == modifiers[j]) 1213 found = true; 1214 } 1215 } 1216 1217 free(supported_modifiers); 1218 return found; 1219} 1220#endif 1221 1222/** loader_dri3_alloc_render_buffer 1223 * 1224 * Use the driver createImage function to construct a __DRIimage, then 1225 * get a file descriptor for that and create an X pixmap from that 1226 * 1227 * Allocate an xshmfence for synchronization 1228 */ 1229static struct loader_dri3_buffer * 1230dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format, 1231 int width, int height, int depth) 1232{ 1233 struct loader_dri3_buffer *buffer; 1234 __DRIimage *pixmap_buffer; 1235 xcb_pixmap_t pixmap; 1236 xcb_sync_fence_t sync_fence; 1237 struct xshmfence *shm_fence; 1238 int buffer_fds[4], fence_fd; 1239 int num_planes = 0; 1240 int i, mod; 1241 int ret; 1242 1243 /* Create an xshmfence object and 1244 * prepare to send that to the X server 1245 */ 1246 1247 fence_fd = xshmfence_alloc_shm(); 1248 if (fence_fd < 0) 1249 return NULL; 1250 1251 shm_fence = xshmfence_map_shm(fence_fd); 1252 if (shm_fence == NULL) 1253 goto no_shm_fence; 1254 1255 /* Allocate the image from the driver 1256 */ 1257 buffer = calloc(1, sizeof *buffer); 1258 if (!buffer) 1259 goto no_buffer; 1260 1261 buffer->cpp = dri3_cpp_for_format(format); 1262 if (!buffer->cpp) 1263 goto no_image; 1264 1265 if (!draw->is_different_gpu) { 1266#ifdef HAVE_DRI3_MODIFIERS 1267 if (draw->multiplanes_available && 1268 draw->ext->image->base.version >= 15 && 1269 draw->ext->image->queryDmaBufModifiers && 1270 draw->ext->image->createImageWithModifiers) { 1271 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie; 1272 xcb_dri3_get_supported_modifiers_reply_t *mod_reply; 1273 xcb_generic_error_t *error = NULL; 1274 uint64_t *modifiers = NULL; 1275 uint32_t count = 0; 1276 1277 mod_cookie = xcb_dri3_get_supported_modifiers(draw->conn, 1278 draw->window, 1279 depth, buffer->cpp * 8); 1280 mod_reply = xcb_dri3_get_supported_modifiers_reply(draw->conn, 1281 mod_cookie, 1282 &error); 1283 if (!mod_reply) 1284 goto no_image; 1285 1286 if (mod_reply->num_window_modifiers) { 1287 count = mod_reply->num_window_modifiers; 1288 modifiers = malloc(count * sizeof(uint64_t)); 1289 if (!modifiers) { 1290 free(mod_reply); 1291 goto no_image; 1292 } 1293 1294 memcpy(modifiers, 1295 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply), 1296 count * sizeof(uint64_t)); 1297 1298 if (!has_supported_modifier(draw, image_format_to_fourcc(format), 1299 modifiers, count)) { 1300 free(modifiers); 1301 count = 0; 1302 modifiers = NULL; 1303 } 1304 } 1305 1306 if (mod_reply->num_screen_modifiers && modifiers == NULL) { 1307 count = mod_reply->num_screen_modifiers; 1308 modifiers = malloc(count * sizeof(uint64_t)); 1309 if (!modifiers) { 1310 free(modifiers); 1311 free(mod_reply); 1312 goto no_image; 1313 } 1314 1315 memcpy(modifiers, 1316 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply), 1317 count * sizeof(uint64_t)); 1318 } 1319 1320 free(mod_reply); 1321 1322 /* don't use createImageWithModifiers() if we have no 1323 * modifiers, other things depend on the use flags when 1324 * there are no modifiers to know that a buffer can be 1325 * shared. 1326 */ 1327 if (modifiers) { 1328 buffer->image = draw->ext->image->createImageWithModifiers(draw->dri_screen, 1329 width, height, 1330 format, 1331 modifiers, 1332 count, 1333 buffer); 1334 } 1335 1336 free(modifiers); 1337 } 1338#endif 1339 if (!buffer->image) 1340 buffer->image = draw->ext->image->createImage(draw->dri_screen, 1341 width, height, 1342 format, 1343 __DRI_IMAGE_USE_SHARE | 1344 __DRI_IMAGE_USE_SCANOUT | 1345 __DRI_IMAGE_USE_BACKBUFFER, 1346 buffer); 1347 1348 pixmap_buffer = buffer->image; 1349 1350 if (!buffer->image) 1351 goto no_image; 1352 } else { 1353 buffer->image = draw->ext->image->createImage(draw->dri_screen, 1354 width, height, 1355 format, 1356 0, 1357 buffer); 1358 1359 if (!buffer->image) 1360 goto no_image; 1361 1362 buffer->linear_buffer = 1363 draw->ext->image->createImage(draw->dri_screen, 1364 width, height, 1365 dri3_linear_format_for_format(draw, format), 1366 __DRI_IMAGE_USE_SHARE | 1367 __DRI_IMAGE_USE_LINEAR | 1368 __DRI_IMAGE_USE_BACKBUFFER, 1369 buffer); 1370 pixmap_buffer = buffer->linear_buffer; 1371 1372 if (!buffer->linear_buffer) 1373 goto no_linear_buffer; 1374 } 1375 1376 /* X want some information about the planes, so ask the image for it 1377 */ 1378 if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_NUM_PLANES, 1379 &num_planes)) 1380 num_planes = 1; 1381 1382 for (i = 0; i < num_planes; i++) { 1383 __DRIimage *image = draw->ext->image->fromPlanar(pixmap_buffer, i, NULL); 1384 1385 if (!image) { 1386 assert(i == 0); 1387 image = pixmap_buffer; 1388 } 1389 1390 ret = draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_FD, 1391 &buffer_fds[i]); 1392 ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE, 1393 &buffer->strides[i]); 1394 ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET, 1395 &buffer->offsets[i]); 1396 if (image != pixmap_buffer) 1397 draw->ext->image->destroyImage(image); 1398 1399 if (!ret) 1400 goto no_buffer_attrib; 1401 } 1402 1403 ret = draw->ext->image->queryImage(pixmap_buffer, 1404 __DRI_IMAGE_ATTRIB_MODIFIER_UPPER, &mod); 1405 buffer->modifier = (uint64_t) mod << 32; 1406 ret &= draw->ext->image->queryImage(pixmap_buffer, 1407 __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod); 1408 buffer->modifier |= (uint64_t)(mod & 0xffffffff); 1409 1410 if (!ret) 1411 buffer->modifier = DRM_FORMAT_MOD_INVALID; 1412 1413 pixmap = xcb_generate_id(draw->conn); 1414#ifdef HAVE_DRI3_MODIFIERS 1415 if (draw->multiplanes_available && 1416 buffer->modifier != DRM_FORMAT_MOD_INVALID) { 1417 xcb_dri3_pixmap_from_buffers(draw->conn, 1418 pixmap, 1419 draw->window, 1420 num_planes, 1421 width, height, 1422 buffer->strides[0], buffer->offsets[0], 1423 buffer->strides[1], buffer->offsets[1], 1424 buffer->strides[2], buffer->offsets[2], 1425 buffer->strides[3], buffer->offsets[3], 1426 depth, buffer->cpp * 8, 1427 buffer->modifier, 1428 buffer_fds); 1429 } else 1430#endif 1431 { 1432 xcb_dri3_pixmap_from_buffer(draw->conn, 1433 pixmap, 1434 draw->drawable, 1435 buffer->size, 1436 width, height, buffer->strides[0], 1437 depth, buffer->cpp * 8, 1438 buffer_fds[0]); 1439 } 1440 1441 xcb_dri3_fence_from_fd(draw->conn, 1442 pixmap, 1443 (sync_fence = xcb_generate_id(draw->conn)), 1444 false, 1445 fence_fd); 1446 1447 buffer->pixmap = pixmap; 1448 buffer->own_pixmap = true; 1449 buffer->sync_fence = sync_fence; 1450 buffer->shm_fence = shm_fence; 1451 buffer->width = width; 1452 buffer->height = height; 1453 1454 /* Mark the buffer as idle 1455 */ 1456 dri3_fence_set(buffer); 1457 1458 return buffer; 1459 1460no_buffer_attrib: 1461 do { 1462 close(buffer_fds[i]); 1463 } while (--i >= 0); 1464 draw->ext->image->destroyImage(pixmap_buffer); 1465no_linear_buffer: 1466 if (draw->is_different_gpu) 1467 draw->ext->image->destroyImage(buffer->image); 1468no_image: 1469 free(buffer); 1470no_buffer: 1471 xshmfence_unmap_shm(shm_fence); 1472no_shm_fence: 1473 close(fence_fd); 1474 return NULL; 1475} 1476 1477/** loader_dri3_update_drawable 1478 * 1479 * Called the first time we use the drawable and then 1480 * after we receive present configure notify events to 1481 * track the geometry of the drawable 1482 */ 1483static int 1484dri3_update_drawable(struct loader_dri3_drawable *draw) 1485{ 1486 mtx_lock(&draw->mtx); 1487 if (draw->first_init) { 1488 xcb_get_geometry_cookie_t geom_cookie; 1489 xcb_get_geometry_reply_t *geom_reply; 1490 xcb_void_cookie_t cookie; 1491 xcb_generic_error_t *error; 1492 xcb_present_query_capabilities_cookie_t present_capabilities_cookie; 1493 xcb_present_query_capabilities_reply_t *present_capabilities_reply; 1494 xcb_window_t root_win; 1495 1496 draw->first_init = false; 1497 1498 /* Try to select for input on the window. 1499 * 1500 * If the drawable is a window, this will get our events 1501 * delivered. 1502 * 1503 * Otherwise, we'll get a BadWindow error back from this request which 1504 * will let us know that the drawable is a pixmap instead. 1505 */ 1506 1507 draw->eid = xcb_generate_id(draw->conn); 1508 cookie = 1509 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable, 1510 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY | 1511 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY | 1512 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY); 1513 1514 present_capabilities_cookie = 1515 xcb_present_query_capabilities(draw->conn, draw->drawable); 1516 1517 /* Create an XCB event queue to hold present events outside of the usual 1518 * application event queue 1519 */ 1520 draw->special_event = xcb_register_for_special_xge(draw->conn, 1521 &xcb_present_id, 1522 draw->eid, 1523 draw->stamp); 1524 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable); 1525 1526 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL); 1527 1528 if (!geom_reply) { 1529 mtx_unlock(&draw->mtx); 1530 return false; 1531 } 1532 draw->width = geom_reply->width; 1533 draw->height = geom_reply->height; 1534 draw->depth = geom_reply->depth; 1535 draw->vtable->set_drawable_size(draw, draw->width, draw->height); 1536 root_win = geom_reply->root; 1537 1538 free(geom_reply); 1539 1540 draw->is_pixmap = false; 1541 1542 /* Check to see if our select input call failed. If it failed with a 1543 * BadWindow error, then assume the drawable is a pixmap. Destroy the 1544 * special event queue created above and mark the drawable as a pixmap 1545 */ 1546 1547 error = xcb_request_check(draw->conn, cookie); 1548 1549 present_capabilities_reply = 1550 xcb_present_query_capabilities_reply(draw->conn, 1551 present_capabilities_cookie, 1552 NULL); 1553 1554 if (present_capabilities_reply) { 1555 draw->present_capabilities = present_capabilities_reply->capabilities; 1556 free(present_capabilities_reply); 1557 } else 1558 draw->present_capabilities = 0; 1559 1560 if (error) { 1561 if (error->error_code != BadWindow) { 1562 free(error); 1563 mtx_unlock(&draw->mtx); 1564 return false; 1565 } 1566 free(error); 1567 draw->is_pixmap = true; 1568 xcb_unregister_for_special_event(draw->conn, draw->special_event); 1569 draw->special_event = NULL; 1570 } 1571 1572 if (draw->is_pixmap) 1573 draw->window = root_win; 1574 else 1575 draw->window = draw->drawable; 1576 } 1577 dri3_flush_present_events(draw); 1578 mtx_unlock(&draw->mtx); 1579 return true; 1580} 1581 1582__DRIimage * 1583loader_dri3_create_image(xcb_connection_t *c, 1584 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply, 1585 unsigned int format, 1586 __DRIscreen *dri_screen, 1587 const __DRIimageExtension *image, 1588 void *loaderPrivate) 1589{ 1590 int *fds; 1591 __DRIimage *image_planar, *ret; 1592 int stride, offset; 1593 1594 /* Get an FD for the pixmap object 1595 */ 1596 fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply); 1597 1598 stride = bp_reply->stride; 1599 offset = 0; 1600 1601 /* createImageFromFds creates a wrapper __DRIimage structure which 1602 * can deal with multiple planes for things like Yuv images. So, once 1603 * we've gotten the planar wrapper, pull the single plane out of it and 1604 * discard the wrapper. 1605 */ 1606 image_planar = image->createImageFromFds(dri_screen, 1607 bp_reply->width, 1608 bp_reply->height, 1609 image_format_to_fourcc(format), 1610 fds, 1, 1611 &stride, &offset, loaderPrivate); 1612 close(fds[0]); 1613 if (!image_planar) 1614 return NULL; 1615 1616 ret = image->fromPlanar(image_planar, 0, loaderPrivate); 1617 1618 if (!ret) 1619 ret = image_planar; 1620 else 1621 image->destroyImage(image_planar); 1622 1623 return ret; 1624} 1625 1626#ifdef HAVE_DRI3_MODIFIERS 1627__DRIimage * 1628loader_dri3_create_image_from_buffers(xcb_connection_t *c, 1629 xcb_dri3_buffers_from_pixmap_reply_t *bp_reply, 1630 unsigned int format, 1631 __DRIscreen *dri_screen, 1632 const __DRIimageExtension *image, 1633 void *loaderPrivate) 1634{ 1635 __DRIimage *ret; 1636 int *fds; 1637 uint32_t *strides_in, *offsets_in; 1638 int strides[4], offsets[4]; 1639 unsigned error; 1640 int i; 1641 1642 if (bp_reply->nfd > 4) 1643 return NULL; 1644 1645 fds = xcb_dri3_buffers_from_pixmap_reply_fds(c, bp_reply); 1646 strides_in = xcb_dri3_buffers_from_pixmap_strides(bp_reply); 1647 offsets_in = xcb_dri3_buffers_from_pixmap_offsets(bp_reply); 1648 for (i = 0; i < bp_reply->nfd; i++) { 1649 strides[i] = strides_in[i]; 1650 offsets[i] = offsets_in[i]; 1651 } 1652 1653 ret = image->createImageFromDmaBufs2(dri_screen, 1654 bp_reply->width, 1655 bp_reply->height, 1656 image_format_to_fourcc(format), 1657 bp_reply->modifier, 1658 fds, bp_reply->nfd, 1659 strides, offsets, 1660 0, 0, 0, 0, /* UNDEFINED */ 1661 &error, loaderPrivate); 1662 1663 for (i = 0; i < bp_reply->nfd; i++) 1664 close(fds[i]); 1665 1666 return ret; 1667} 1668#endif 1669 1670/** dri3_get_pixmap_buffer 1671 * 1672 * Get the DRM object for a pixmap from the X server and 1673 * wrap that with a __DRIimage structure using createImageFromFds 1674 */ 1675static struct loader_dri3_buffer * 1676dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format, 1677 enum loader_dri3_buffer_type buffer_type, 1678 struct loader_dri3_drawable *draw) 1679{ 1680 int buf_id = loader_dri3_pixmap_buf_id(buffer_type); 1681 struct loader_dri3_buffer *buffer = draw->buffers[buf_id]; 1682 xcb_drawable_t pixmap; 1683 xcb_sync_fence_t sync_fence; 1684 struct xshmfence *shm_fence; 1685 int width; 1686 int height; 1687 int fence_fd; 1688 __DRIscreen *cur_screen; 1689 1690 if (buffer) 1691 return buffer; 1692 1693 pixmap = draw->drawable; 1694 1695 buffer = calloc(1, sizeof *buffer); 1696 if (!buffer) 1697 goto no_buffer; 1698 1699 fence_fd = xshmfence_alloc_shm(); 1700 if (fence_fd < 0) 1701 goto no_fence; 1702 shm_fence = xshmfence_map_shm(fence_fd); 1703 if (shm_fence == NULL) { 1704 close (fence_fd); 1705 goto no_fence; 1706 } 1707 1708 /* Get the currently-bound screen or revert to using the drawable's screen if 1709 * no contexts are currently bound. The latter case is at least necessary for 1710 * obs-studio, when using Window Capture (Xcomposite) as a Source. 1711 */ 1712 cur_screen = draw->vtable->get_dri_screen(); 1713 if (!cur_screen) { 1714 cur_screen = draw->dri_screen; 1715 } 1716 1717 xcb_dri3_fence_from_fd(draw->conn, 1718 pixmap, 1719 (sync_fence = xcb_generate_id(draw->conn)), 1720 false, 1721 fence_fd); 1722#ifdef HAVE_DRI3_MODIFIERS 1723 if (draw->multiplanes_available && 1724 draw->ext->image->base.version >= 15 && 1725 draw->ext->image->createImageFromDmaBufs2) { 1726 xcb_dri3_buffers_from_pixmap_cookie_t bps_cookie; 1727 xcb_dri3_buffers_from_pixmap_reply_t *bps_reply; 1728 1729 bps_cookie = xcb_dri3_buffers_from_pixmap(draw->conn, pixmap); 1730 bps_reply = xcb_dri3_buffers_from_pixmap_reply(draw->conn, bps_cookie, 1731 NULL); 1732 if (!bps_reply) 1733 goto no_image; 1734 buffer->image = 1735 loader_dri3_create_image_from_buffers(draw->conn, bps_reply, format, 1736 cur_screen, draw->ext->image, 1737 buffer); 1738 width = bps_reply->width; 1739 height = bps_reply->height; 1740 free(bps_reply); 1741 } else 1742#endif 1743 { 1744 xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie; 1745 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply; 1746 1747 bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap); 1748 bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn, bp_cookie, NULL); 1749 if (!bp_reply) 1750 goto no_image; 1751 1752 buffer->image = loader_dri3_create_image(draw->conn, bp_reply, format, 1753 cur_screen, draw->ext->image, 1754 buffer); 1755 width = bp_reply->width; 1756 height = bp_reply->height; 1757 free(bp_reply); 1758 } 1759 1760 if (!buffer->image) 1761 goto no_image; 1762 1763 buffer->pixmap = pixmap; 1764 buffer->own_pixmap = false; 1765 buffer->width = width; 1766 buffer->height = height; 1767 buffer->shm_fence = shm_fence; 1768 buffer->sync_fence = sync_fence; 1769 1770 draw->buffers[buf_id] = buffer; 1771 1772 return buffer; 1773 1774no_image: 1775 xcb_sync_destroy_fence(draw->conn, sync_fence); 1776 xshmfence_unmap_shm(shm_fence); 1777no_fence: 1778 free(buffer); 1779no_buffer: 1780 return NULL; 1781} 1782 1783/** dri3_get_buffer 1784 * 1785 * Find a front or back buffer, allocating new ones as necessary 1786 */ 1787static struct loader_dri3_buffer * 1788dri3_get_buffer(__DRIdrawable *driDrawable, 1789 unsigned int format, 1790 enum loader_dri3_buffer_type buffer_type, 1791 struct loader_dri3_drawable *draw) 1792{ 1793 struct loader_dri3_buffer *buffer; 1794 bool fence_await = buffer_type == loader_dri3_buffer_back; 1795 int buf_id; 1796 1797 if (buffer_type == loader_dri3_buffer_back) { 1798 draw->back_format = format; 1799 1800 buf_id = dri3_find_back(draw); 1801 1802 if (buf_id < 0) 1803 return NULL; 1804 } else { 1805 buf_id = LOADER_DRI3_FRONT_ID; 1806 } 1807 1808 buffer = draw->buffers[buf_id]; 1809 1810 /* Allocate a new buffer if there isn't an old one, if that 1811 * old one is the wrong size, or if it's suboptimal 1812 */ 1813 if (!buffer || buffer->width != draw->width || 1814 buffer->height != draw->height || 1815 buffer->reallocate) { 1816 struct loader_dri3_buffer *new_buffer; 1817 1818 /* Allocate the new buffers 1819 */ 1820 new_buffer = dri3_alloc_render_buffer(draw, 1821 format, 1822 draw->width, 1823 draw->height, 1824 draw->depth); 1825 if (!new_buffer) 1826 return NULL; 1827 1828 /* When resizing, copy the contents of the old buffer, waiting for that 1829 * copy to complete using our fences before proceeding 1830 */ 1831 if ((buffer_type == loader_dri3_buffer_back || 1832 (buffer_type == loader_dri3_buffer_front && draw->have_fake_front)) 1833 && buffer) { 1834 1835 /* Fill the new buffer with data from an old buffer */ 1836 if (!loader_dri3_blit_image(draw, 1837 new_buffer->image, 1838 buffer->image, 1839 0, 0, draw->width, draw->height, 1840 0, 0, 0) && 1841 !buffer->linear_buffer) { 1842 dri3_fence_reset(draw->conn, new_buffer); 1843 dri3_copy_area(draw->conn, 1844 buffer->pixmap, 1845 new_buffer->pixmap, 1846 dri3_drawable_gc(draw), 1847 0, 0, 0, 0, 1848 draw->width, draw->height); 1849 dri3_fence_trigger(draw->conn, new_buffer); 1850 fence_await = true; 1851 } 1852 dri3_free_render_buffer(draw, buffer); 1853 } else if (buffer_type == loader_dri3_buffer_front) { 1854 /* Fill the new fake front with data from a real front */ 1855 loader_dri3_swapbuffer_barrier(draw); 1856 dri3_fence_reset(draw->conn, new_buffer); 1857 dri3_copy_area(draw->conn, 1858 draw->drawable, 1859 new_buffer->pixmap, 1860 dri3_drawable_gc(draw), 1861 0, 0, 0, 0, 1862 draw->width, draw->height); 1863 dri3_fence_trigger(draw->conn, new_buffer); 1864 1865 if (new_buffer->linear_buffer) { 1866 dri3_fence_await(draw->conn, draw, new_buffer); 1867 (void) loader_dri3_blit_image(draw, 1868 new_buffer->image, 1869 new_buffer->linear_buffer, 1870 0, 0, draw->width, draw->height, 1871 0, 0, 0); 1872 } else 1873 fence_await = true; 1874 } 1875 buffer = new_buffer; 1876 draw->buffers[buf_id] = buffer; 1877 } 1878 1879 if (fence_await) 1880 dri3_fence_await(draw->conn, draw, buffer); 1881 1882 /* 1883 * Do we need to preserve the content of a previous buffer? 1884 * 1885 * Note that this blit is needed only to avoid a wait for a buffer that 1886 * is currently in the flip chain or being scanned out from. That's really 1887 * a tradeoff. If we're ok with the wait we can reduce the number of back 1888 * buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY, 1889 * but in the latter case we must disallow page-flipping. 1890 */ 1891 if (buffer_type == loader_dri3_buffer_back && 1892 draw->cur_blit_source != -1 && 1893 draw->buffers[draw->cur_blit_source] && 1894 buffer != draw->buffers[draw->cur_blit_source]) { 1895 1896 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source]; 1897 1898 /* Avoid flushing here. Will propably do good for tiling hardware. */ 1899 (void) loader_dri3_blit_image(draw, 1900 buffer->image, 1901 source->image, 1902 0, 0, draw->width, draw->height, 1903 0, 0, 0); 1904 buffer->last_swap = source->last_swap; 1905 draw->cur_blit_source = -1; 1906 } 1907 /* Return the requested buffer */ 1908 return buffer; 1909} 1910 1911/** dri3_free_buffers 1912 * 1913 * Free the front bufffer or all of the back buffers. Used 1914 * when the application changes which buffers it needs 1915 */ 1916static void 1917dri3_free_buffers(__DRIdrawable *driDrawable, 1918 enum loader_dri3_buffer_type buffer_type, 1919 struct loader_dri3_drawable *draw) 1920{ 1921 struct loader_dri3_buffer *buffer; 1922 int first_id; 1923 int n_id; 1924 int buf_id; 1925 1926 switch (buffer_type) { 1927 case loader_dri3_buffer_back: 1928 first_id = LOADER_DRI3_BACK_ID(0); 1929 n_id = LOADER_DRI3_MAX_BACK; 1930 draw->cur_blit_source = -1; 1931 break; 1932 case loader_dri3_buffer_front: 1933 first_id = LOADER_DRI3_FRONT_ID; 1934 /* Don't free a fake front holding new backbuffer content. */ 1935 n_id = (draw->cur_blit_source == LOADER_DRI3_FRONT_ID) ? 0 : 1; 1936 } 1937 1938 for (buf_id = first_id; buf_id < first_id + n_id; buf_id++) { 1939 buffer = draw->buffers[buf_id]; 1940 if (buffer) { 1941 dri3_free_render_buffer(draw, buffer); 1942 draw->buffers[buf_id] = NULL; 1943 } 1944 } 1945} 1946 1947/** loader_dri3_get_buffers 1948 * 1949 * The published buffer allocation API. 1950 * Returns all of the necessary buffers, allocating 1951 * as needed. 1952 */ 1953int 1954loader_dri3_get_buffers(__DRIdrawable *driDrawable, 1955 unsigned int format, 1956 uint32_t *stamp, 1957 void *loaderPrivate, 1958 uint32_t buffer_mask, 1959 struct __DRIimageList *buffers) 1960{ 1961 struct loader_dri3_drawable *draw = loaderPrivate; 1962 struct loader_dri3_buffer *front, *back; 1963 int buf_id; 1964 1965 buffers->image_mask = 0; 1966 buffers->front = NULL; 1967 buffers->back = NULL; 1968 1969 front = NULL; 1970 back = NULL; 1971 1972 if (!dri3_update_drawable(draw)) 1973 return false; 1974 1975 dri3_update_num_back(draw); 1976 1977 /* Free no longer needed back buffers */ 1978 for (buf_id = draw->num_back; buf_id < LOADER_DRI3_MAX_BACK; buf_id++) { 1979 if (draw->cur_blit_source != buf_id && draw->buffers[buf_id]) { 1980 dri3_free_render_buffer(draw, draw->buffers[buf_id]); 1981 draw->buffers[buf_id] = NULL; 1982 } 1983 } 1984 1985 /* pixmaps always have front buffers. 1986 * Exchange swaps also mandate fake front buffers. 1987 */ 1988 if (draw->is_pixmap || draw->swap_method == __DRI_ATTRIB_SWAP_EXCHANGE) 1989 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT; 1990 1991 if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) { 1992 /* All pixmaps are owned by the server gpu. 1993 * When we use a different gpu, we can't use the pixmap 1994 * as buffer since it is potentially tiled a way 1995 * our device can't understand. In this case, use 1996 * a fake front buffer. Hopefully the pixmap 1997 * content will get synced with the fake front 1998 * buffer. 1999 */ 2000 if (draw->is_pixmap && !draw->is_different_gpu) 2001 front = dri3_get_pixmap_buffer(driDrawable, 2002 format, 2003 loader_dri3_buffer_front, 2004 draw); 2005 else 2006 front = dri3_get_buffer(driDrawable, 2007 format, 2008 loader_dri3_buffer_front, 2009 draw); 2010 2011 if (!front) 2012 return false; 2013 } else { 2014 dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw); 2015 draw->have_fake_front = 0; 2016 } 2017 2018 if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) { 2019 back = dri3_get_buffer(driDrawable, 2020 format, 2021 loader_dri3_buffer_back, 2022 draw); 2023 if (!back) 2024 return false; 2025 draw->have_back = 1; 2026 } else { 2027 dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw); 2028 draw->have_back = 0; 2029 } 2030 2031 if (front) { 2032 buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT; 2033 buffers->front = front->image; 2034 draw->have_fake_front = draw->is_different_gpu || !draw->is_pixmap; 2035 } 2036 2037 if (back) { 2038 buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK; 2039 buffers->back = back->image; 2040 } 2041 2042 draw->stamp = stamp; 2043 2044 return true; 2045} 2046 2047/** loader_dri3_update_drawable_geometry 2048 * 2049 * Get the current drawable geometry. 2050 */ 2051void 2052loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw) 2053{ 2054 xcb_get_geometry_cookie_t geom_cookie; 2055 xcb_get_geometry_reply_t *geom_reply; 2056 2057 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable); 2058 2059 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL); 2060 2061 if (geom_reply) { 2062 draw->width = geom_reply->width; 2063 draw->height = geom_reply->height; 2064 draw->vtable->set_drawable_size(draw, draw->width, draw->height); 2065 draw->ext->flush->invalidate(draw->dri_drawable); 2066 2067 free(geom_reply); 2068 } 2069} 2070 2071 2072/** 2073 * Make sure the server has flushed all pending swap buffers to hardware 2074 * for this drawable. Ideally we'd want to send an X protocol request to 2075 * have the server block our connection until the swaps are complete. That 2076 * would avoid the potential round-trip here. 2077 */ 2078void 2079loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw) 2080{ 2081 int64_t ust, msc, sbc; 2082 2083 (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc); 2084} 2085 2086/** 2087 * Perform any cleanup associated with a close screen operation. 2088 * \param dri_screen[in,out] Pointer to __DRIscreen about to be closed. 2089 * 2090 * This function destroys the screen's cached swap context if any. 2091 */ 2092void 2093loader_dri3_close_screen(__DRIscreen *dri_screen) 2094{ 2095 mtx_lock(&blit_context.mtx); 2096 if (blit_context.ctx && blit_context.cur_screen == dri_screen) { 2097 blit_context.core->destroyContext(blit_context.ctx); 2098 blit_context.ctx = NULL; 2099 } 2100 mtx_unlock(&blit_context.mtx); 2101} 2102 2103/** 2104 * Find a backbuffer slot - potentially allocating a back buffer 2105 * 2106 * \param draw[in,out] Pointer to the drawable for which to find back. 2107 * \return Pointer to a new back buffer or NULL if allocation failed or was 2108 * not mandated. 2109 * 2110 * Find a potentially new back buffer, and if it's not been allocated yet and 2111 * in addition needs initializing, then try to allocate and initialize it. 2112 */ 2113#include <stdio.h> 2114static struct loader_dri3_buffer * 2115dri3_find_back_alloc(struct loader_dri3_drawable *draw) 2116{ 2117 struct loader_dri3_buffer *back; 2118 int id; 2119 2120 id = dri3_find_back(draw); 2121 if (id < 0) 2122 return NULL; 2123 2124 back = draw->buffers[id]; 2125 /* Allocate a new back if we haven't got one */ 2126 if (!back && draw->back_format != __DRI_IMAGE_FORMAT_NONE && 2127 dri3_update_drawable(draw)) 2128 back = dri3_alloc_render_buffer(draw, draw->back_format, 2129 draw->width, draw->height, draw->depth); 2130 2131 if (!back) 2132 return NULL; 2133 2134 draw->buffers[id] = back; 2135 2136 /* If necessary, prefill the back with data according to swap_method mode. */ 2137 if (draw->cur_blit_source != -1 && 2138 draw->buffers[draw->cur_blit_source] && 2139 back != draw->buffers[draw->cur_blit_source]) { 2140 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source]; 2141 2142 dri3_fence_await(draw->conn, draw, source); 2143 dri3_fence_await(draw->conn, draw, back); 2144 (void) loader_dri3_blit_image(draw, 2145 back->image, 2146 source->image, 2147 0, 0, draw->width, draw->height, 2148 0, 0, 0); 2149 back->last_swap = source->last_swap; 2150 draw->cur_blit_source = -1; 2151 } 2152 2153 return back; 2154} 2155