1/* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include <X11/Xlib-xcb.h> 25#include <X11/xshmfence.h> 26#include <xcb/xcb.h> 27#include <xcb/dri3.h> 28#include <xcb/present.h> 29 30#include "util/macros.h" 31#include <stdlib.h> 32#include <stdio.h> 33#include <unistd.h> 34#include <errno.h> 35#include <string.h> 36#include <fcntl.h> 37#include <poll.h> 38#include <xf86drm.h> 39#include "drm-uapi/drm_fourcc.h" 40#include "util/hash_table.h" 41#include "util/xmlconfig.h" 42 43#include "vk_util.h" 44#include "wsi_common_private.h" 45#include "wsi_common_x11.h" 46#include "wsi_common_queue.h" 47 48#define typed_memcpy(dest, src, count) ({ \ 49 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \ 50 memcpy((dest), (src), (count) * sizeof(*(src))); \ 51}) 52 53struct wsi_x11_connection { 54 bool has_dri3; 55 bool has_dri3_modifiers; 56 bool has_present; 57 bool is_proprietary_x11; 58}; 59 60struct wsi_x11 { 61 struct wsi_interface base; 62 63 pthread_mutex_t mutex; 64 /* Hash table of xcb_connection -> wsi_x11_connection mappings */ 65 struct hash_table *connections; 66}; 67 68 69/** wsi_dri3_open 70 * 71 * Wrapper around xcb_dri3_open 72 */ 73static int 74wsi_dri3_open(xcb_connection_t *conn, 75 xcb_window_t root, 76 uint32_t provider) 77{ 78 xcb_dri3_open_cookie_t cookie; 79 xcb_dri3_open_reply_t *reply; 80 int fd; 81 82 cookie = xcb_dri3_open(conn, 83 root, 84 provider); 85 86 reply = xcb_dri3_open_reply(conn, cookie, NULL); 87 if (!reply) 88 return -1; 89 90 if (reply->nfd != 1) { 91 free(reply); 92 return -1; 93 } 94 95 fd = xcb_dri3_open_reply_fds(conn, reply)[0]; 96 free(reply); 97 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); 98 99 return fd; 100} 101 102static bool 103wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev, 104 xcb_connection_t *conn) 105{ 106 xcb_screen_iterator_t screen_iter = 107 xcb_setup_roots_iterator(xcb_get_setup(conn)); 108 xcb_screen_t *screen = screen_iter.data; 109 110 int dri3_fd = wsi_dri3_open(conn, screen->root, None); 111 if (dri3_fd == -1) 112 return true; 113 114 bool match = wsi_device_matches_drm_fd(wsi_dev, dri3_fd); 115 116 close(dri3_fd); 117 118 return match; 119} 120 121static struct wsi_x11_connection * 122wsi_x11_connection_create(struct wsi_device *wsi_dev, 123 xcb_connection_t *conn) 124{ 125 xcb_query_extension_cookie_t dri3_cookie, pres_cookie, amd_cookie, nv_cookie; 126 xcb_query_extension_reply_t *dri3_reply, *pres_reply, *amd_reply, *nv_reply; 127 bool has_dri3_v1_2 = false; 128 bool has_present_v1_2 = false; 129 130 struct wsi_x11_connection *wsi_conn = 131 vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8, 132 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 133 if (!wsi_conn) 134 return NULL; 135 136 dri3_cookie = xcb_query_extension(conn, 4, "DRI3"); 137 pres_cookie = xcb_query_extension(conn, 7, "Present"); 138 139 /* We try to be nice to users and emit a warning if they try to use a 140 * Vulkan application on a system without DRI3 enabled. However, this ends 141 * up spewing the warning when a user has, for example, both Intel 142 * integrated graphics and a discrete card with proprietary drivers and are 143 * running on the discrete card with the proprietary DDX. In this case, we 144 * really don't want to print the warning because it just confuses users. 145 * As a heuristic to detect this case, we check for a couple of proprietary 146 * X11 extensions. 147 */ 148 amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI"); 149 nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL"); 150 151 dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL); 152 pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL); 153 amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL); 154 nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL); 155 if (!dri3_reply || !pres_reply) { 156 free(dri3_reply); 157 free(pres_reply); 158 free(amd_reply); 159 free(nv_reply); 160 vk_free(&wsi_dev->instance_alloc, wsi_conn); 161 return NULL; 162 } 163 164 wsi_conn->has_dri3 = dri3_reply->present != 0; 165#ifdef HAVE_DRI3_MODIFIERS 166 if (wsi_conn->has_dri3) { 167 xcb_dri3_query_version_cookie_t ver_cookie; 168 xcb_dri3_query_version_reply_t *ver_reply; 169 170 ver_cookie = xcb_dri3_query_version(conn, 1, 2); 171 ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL); 172 has_dri3_v1_2 = 173 (ver_reply->major_version > 1 || ver_reply->minor_version >= 2); 174 free(ver_reply); 175 } 176#endif 177 178 wsi_conn->has_present = pres_reply->present != 0; 179#ifdef HAVE_DRI3_MODIFIERS 180 if (wsi_conn->has_present) { 181 xcb_present_query_version_cookie_t ver_cookie; 182 xcb_present_query_version_reply_t *ver_reply; 183 184 ver_cookie = xcb_present_query_version(conn, 1, 2); 185 ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL); 186 has_present_v1_2 = 187 (ver_reply->major_version > 1 || ver_reply->minor_version >= 2); 188 free(ver_reply); 189 } 190#endif 191 192 wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2; 193 wsi_conn->is_proprietary_x11 = false; 194 if (amd_reply && amd_reply->present) 195 wsi_conn->is_proprietary_x11 = true; 196 if (nv_reply && nv_reply->present) 197 wsi_conn->is_proprietary_x11 = true; 198 199 free(dri3_reply); 200 free(pres_reply); 201 free(amd_reply); 202 free(nv_reply); 203 204 return wsi_conn; 205} 206 207static void 208wsi_x11_connection_destroy(struct wsi_device *wsi_dev, 209 struct wsi_x11_connection *conn) 210{ 211 vk_free(&wsi_dev->instance_alloc, conn); 212} 213 214static bool 215wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn) 216{ 217 if (wsi_conn->has_dri3) 218 return true; 219 if (!wsi_conn->is_proprietary_x11) { 220 fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n" 221 "Note: you can probably enable DRI3 in your Xorg config\n"); 222 } 223 return false; 224} 225 226static struct wsi_x11_connection * 227wsi_x11_get_connection(struct wsi_device *wsi_dev, 228 xcb_connection_t *conn) 229{ 230 struct wsi_x11 *wsi = 231 (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB]; 232 233 pthread_mutex_lock(&wsi->mutex); 234 235 struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn); 236 if (!entry) { 237 /* We're about to make a bunch of blocking calls. Let's drop the 238 * mutex for now so we don't block up too badly. 239 */ 240 pthread_mutex_unlock(&wsi->mutex); 241 242 struct wsi_x11_connection *wsi_conn = 243 wsi_x11_connection_create(wsi_dev, conn); 244 if (!wsi_conn) 245 return NULL; 246 247 pthread_mutex_lock(&wsi->mutex); 248 249 entry = _mesa_hash_table_search(wsi->connections, conn); 250 if (entry) { 251 /* Oops, someone raced us to it */ 252 wsi_x11_connection_destroy(wsi_dev, wsi_conn); 253 } else { 254 entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn); 255 } 256 } 257 258 pthread_mutex_unlock(&wsi->mutex); 259 260 return entry->data; 261} 262 263static const VkFormat formats[] = { 264 VK_FORMAT_B8G8R8A8_SRGB, 265 VK_FORMAT_B8G8R8A8_UNORM, 266}; 267 268static const VkPresentModeKHR present_modes[] = { 269 VK_PRESENT_MODE_IMMEDIATE_KHR, 270 VK_PRESENT_MODE_MAILBOX_KHR, 271 VK_PRESENT_MODE_FIFO_KHR, 272}; 273 274static xcb_screen_t * 275get_screen_for_root(xcb_connection_t *conn, xcb_window_t root) 276{ 277 xcb_screen_iterator_t screen_iter = 278 xcb_setup_roots_iterator(xcb_get_setup(conn)); 279 280 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) { 281 if (screen_iter.data->root == root) 282 return screen_iter.data; 283 } 284 285 return NULL; 286} 287 288static xcb_visualtype_t * 289screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id, 290 unsigned *depth) 291{ 292 xcb_depth_iterator_t depth_iter = 293 xcb_screen_allowed_depths_iterator(screen); 294 295 for (; depth_iter.rem; xcb_depth_next (&depth_iter)) { 296 xcb_visualtype_iterator_t visual_iter = 297 xcb_depth_visuals_iterator (depth_iter.data); 298 299 for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) { 300 if (visual_iter.data->visual_id == visual_id) { 301 if (depth) 302 *depth = depth_iter.data->depth; 303 return visual_iter.data; 304 } 305 } 306 } 307 308 return NULL; 309} 310 311static xcb_visualtype_t * 312connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id, 313 unsigned *depth) 314{ 315 xcb_screen_iterator_t screen_iter = 316 xcb_setup_roots_iterator(xcb_get_setup(conn)); 317 318 /* For this we have to iterate over all of the screens which is rather 319 * annoying. Fortunately, there is probably only 1. 320 */ 321 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) { 322 xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data, 323 visual_id, depth); 324 if (visual) 325 return visual; 326 } 327 328 return NULL; 329} 330 331static xcb_visualtype_t * 332get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window, 333 unsigned *depth) 334{ 335 xcb_query_tree_cookie_t tree_cookie; 336 xcb_get_window_attributes_cookie_t attrib_cookie; 337 xcb_query_tree_reply_t *tree; 338 xcb_get_window_attributes_reply_t *attrib; 339 340 tree_cookie = xcb_query_tree(conn, window); 341 attrib_cookie = xcb_get_window_attributes(conn, window); 342 343 tree = xcb_query_tree_reply(conn, tree_cookie, NULL); 344 attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL); 345 if (attrib == NULL || tree == NULL) { 346 free(attrib); 347 free(tree); 348 return NULL; 349 } 350 351 xcb_window_t root = tree->root; 352 xcb_visualid_t visual_id = attrib->visual; 353 free(attrib); 354 free(tree); 355 356 xcb_screen_t *screen = get_screen_for_root(conn, root); 357 if (screen == NULL) 358 return NULL; 359 360 return screen_get_visualtype(screen, visual_id, depth); 361} 362 363static bool 364visual_has_alpha(xcb_visualtype_t *visual, unsigned depth) 365{ 366 uint32_t rgb_mask = visual->red_mask | 367 visual->green_mask | 368 visual->blue_mask; 369 370 uint32_t all_mask = 0xffffffff >> (32 - depth); 371 372 /* Do we have bits left over after RGB? */ 373 return (all_mask & ~rgb_mask) != 0; 374} 375 376VkBool32 wsi_get_physical_device_xcb_presentation_support( 377 struct wsi_device *wsi_device, 378 uint32_t queueFamilyIndex, 379 xcb_connection_t* connection, 380 xcb_visualid_t visual_id) 381{ 382 struct wsi_x11_connection *wsi_conn = 383 wsi_x11_get_connection(wsi_device, connection); 384 385 if (!wsi_conn) 386 return false; 387 388 if (!wsi_x11_check_for_dri3(wsi_conn)) 389 return false; 390 391 unsigned visual_depth; 392 if (!connection_get_visualtype(connection, visual_id, &visual_depth)) 393 return false; 394 395 if (visual_depth != 24 && visual_depth != 32) 396 return false; 397 398 return true; 399} 400 401static xcb_connection_t* 402x11_surface_get_connection(VkIcdSurfaceBase *icd_surface) 403{ 404 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB) 405 return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy); 406 else 407 return ((VkIcdSurfaceXcb *)icd_surface)->connection; 408} 409 410static xcb_window_t 411x11_surface_get_window(VkIcdSurfaceBase *icd_surface) 412{ 413 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB) 414 return ((VkIcdSurfaceXlib *)icd_surface)->window; 415 else 416 return ((VkIcdSurfaceXcb *)icd_surface)->window; 417} 418 419static VkResult 420x11_surface_get_support(VkIcdSurfaceBase *icd_surface, 421 struct wsi_device *wsi_device, 422 uint32_t queueFamilyIndex, 423 VkBool32* pSupported) 424{ 425 xcb_connection_t *conn = x11_surface_get_connection(icd_surface); 426 xcb_window_t window = x11_surface_get_window(icd_surface); 427 428 struct wsi_x11_connection *wsi_conn = 429 wsi_x11_get_connection(wsi_device, conn); 430 if (!wsi_conn) 431 return VK_ERROR_OUT_OF_HOST_MEMORY; 432 433 if (!wsi_x11_check_for_dri3(wsi_conn)) { 434 *pSupported = false; 435 return VK_SUCCESS; 436 } 437 438 unsigned visual_depth; 439 if (!get_visualtype_for_window(conn, window, &visual_depth)) { 440 *pSupported = false; 441 return VK_SUCCESS; 442 } 443 444 if (visual_depth != 24 && visual_depth != 32) { 445 *pSupported = false; 446 return VK_SUCCESS; 447 } 448 449 *pSupported = true; 450 return VK_SUCCESS; 451} 452 453static VkResult 454x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface, 455 struct wsi_device *wsi_device, 456 VkSurfaceCapabilitiesKHR *caps) 457{ 458 xcb_connection_t *conn = x11_surface_get_connection(icd_surface); 459 xcb_window_t window = x11_surface_get_window(icd_surface); 460 xcb_get_geometry_cookie_t geom_cookie; 461 xcb_generic_error_t *err; 462 xcb_get_geometry_reply_t *geom; 463 unsigned visual_depth; 464 465 geom_cookie = xcb_get_geometry(conn, window); 466 467 /* This does a round-trip. This is why we do get_geometry first and 468 * wait to read the reply until after we have a visual. 469 */ 470 xcb_visualtype_t *visual = 471 get_visualtype_for_window(conn, window, &visual_depth); 472 473 if (!visual) 474 return VK_ERROR_SURFACE_LOST_KHR; 475 476 geom = xcb_get_geometry_reply(conn, geom_cookie, &err); 477 if (geom) { 478 VkExtent2D extent = { geom->width, geom->height }; 479 caps->currentExtent = extent; 480 caps->minImageExtent = extent; 481 caps->maxImageExtent = extent; 482 } else { 483 /* This can happen if the client didn't wait for the configure event 484 * to come back from the compositor. In that case, we don't know the 485 * size of the window so we just return valid "I don't know" stuff. 486 */ 487 caps->currentExtent = (VkExtent2D) { -1, -1 }; 488 caps->minImageExtent = (VkExtent2D) { 1, 1 }; 489 caps->maxImageExtent = (VkExtent2D) { 490 wsi_device->maxImageDimension2D, 491 wsi_device->maxImageDimension2D, 492 }; 493 } 494 free(err); 495 free(geom); 496 497 if (visual_has_alpha(visual, visual_depth)) { 498 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR | 499 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR; 500 } else { 501 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR | 502 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; 503 } 504 505 /* For true mailbox mode, we need at least 4 images: 506 * 1) One to scan out from 507 * 2) One to have queued for scan-out 508 * 3) One to be currently held by the X server 509 * 4) One to render to 510 */ 511 caps->minImageCount = 2; 512 /* There is no real maximum */ 513 caps->maxImageCount = 0; 514 515 if (wsi_device->x11.override_minImageCount) 516 caps->minImageCount = wsi_device->x11.override_minImageCount; 517 518 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; 519 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; 520 caps->maxImageArrayLayers = 1; 521 caps->supportedUsageFlags = 522 VK_IMAGE_USAGE_TRANSFER_SRC_BIT | 523 VK_IMAGE_USAGE_SAMPLED_BIT | 524 VK_IMAGE_USAGE_TRANSFER_DST_BIT | 525 VK_IMAGE_USAGE_STORAGE_BIT | 526 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; 527 528 return VK_SUCCESS; 529} 530 531static VkResult 532x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface, 533 struct wsi_device *wsi_device, 534 const void *info_next, 535 VkSurfaceCapabilities2KHR *caps) 536{ 537 assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR); 538 539 VkResult result = 540 x11_surface_get_capabilities(icd_surface, wsi_device, 541 &caps->surfaceCapabilities); 542 543 vk_foreach_struct(ext, caps->pNext) { 544 switch (ext->sType) { 545 case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: { 546 VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext; 547 protected->supportsProtected = VK_FALSE; 548 break; 549 } 550 551 default: 552 /* Ignored */ 553 break; 554 } 555 } 556 557 return result; 558} 559 560static VkResult 561x11_surface_get_formats(VkIcdSurfaceBase *surface, 562 struct wsi_device *wsi_device, 563 uint32_t *pSurfaceFormatCount, 564 VkSurfaceFormatKHR *pSurfaceFormats) 565{ 566 VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount); 567 568 for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) { 569 vk_outarray_append(&out, f) { 570 f->format = formats[i]; 571 f->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; 572 } 573 } 574 575 return vk_outarray_status(&out); 576} 577 578static VkResult 579x11_surface_get_formats2(VkIcdSurfaceBase *surface, 580 struct wsi_device *wsi_device, 581 const void *info_next, 582 uint32_t *pSurfaceFormatCount, 583 VkSurfaceFormat2KHR *pSurfaceFormats) 584{ 585 VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount); 586 587 for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) { 588 vk_outarray_append(&out, f) { 589 assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR); 590 f->surfaceFormat.format = formats[i]; 591 f->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; 592 } 593 } 594 595 return vk_outarray_status(&out); 596} 597 598static VkResult 599x11_surface_get_present_modes(VkIcdSurfaceBase *surface, 600 uint32_t *pPresentModeCount, 601 VkPresentModeKHR *pPresentModes) 602{ 603 if (pPresentModes == NULL) { 604 *pPresentModeCount = ARRAY_SIZE(present_modes); 605 return VK_SUCCESS; 606 } 607 608 *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes)); 609 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount); 610 611 return *pPresentModeCount < ARRAY_SIZE(present_modes) ? 612 VK_INCOMPLETE : VK_SUCCESS; 613} 614 615static bool 616x11_surface_is_local_to_gpu(struct wsi_device *wsi_dev, 617 xcb_connection_t *conn) 618{ 619 struct wsi_x11_connection *wsi_conn = 620 wsi_x11_get_connection(wsi_dev, conn); 621 622 if (!wsi_conn) 623 return false; 624 625 if (!wsi_x11_check_for_dri3(wsi_conn)) 626 return false; 627 628 if (!wsi_x11_check_dri3_compatible(wsi_dev, conn)) 629 return false; 630 631 return true; 632} 633 634static VkResult 635x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface, 636 struct wsi_device *wsi_device, 637 uint32_t* pRectCount, 638 VkRect2D* pRects) 639{ 640 xcb_connection_t *conn = x11_surface_get_connection(icd_surface); 641 xcb_window_t window = x11_surface_get_window(icd_surface); 642 VK_OUTARRAY_MAKE(out, pRects, pRectCount); 643 644 if (x11_surface_is_local_to_gpu(wsi_device, conn)) { 645 vk_outarray_append(&out, rect) { 646 xcb_generic_error_t *err = NULL; 647 xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window); 648 xcb_get_geometry_reply_t *geom = 649 xcb_get_geometry_reply(conn, geom_cookie, &err); 650 free(err); 651 if (geom) { 652 *rect = (VkRect2D) { 653 .offset = { 0, 0 }, 654 .extent = { geom->width, geom->height }, 655 }; 656 } else { 657 /* This can happen if the client didn't wait for the configure event 658 * to come back from the compositor. In that case, we don't know the 659 * size of the window so we just return valid "I don't know" stuff. 660 */ 661 *rect = (VkRect2D) { 662 .offset = { 0, 0 }, 663 .extent = { -1, -1 }, 664 }; 665 } 666 free(geom); 667 } 668 } 669 670 return vk_outarray_status(&out); 671} 672 673VkResult wsi_create_xcb_surface(const VkAllocationCallbacks *pAllocator, 674 const VkXcbSurfaceCreateInfoKHR *pCreateInfo, 675 VkSurfaceKHR *pSurface) 676{ 677 VkIcdSurfaceXcb *surface; 678 679 surface = vk_alloc(pAllocator, sizeof *surface, 8, 680 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 681 if (surface == NULL) 682 return VK_ERROR_OUT_OF_HOST_MEMORY; 683 684 surface->base.platform = VK_ICD_WSI_PLATFORM_XCB; 685 surface->connection = pCreateInfo->connection; 686 surface->window = pCreateInfo->window; 687 688 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base); 689 return VK_SUCCESS; 690} 691 692VkResult wsi_create_xlib_surface(const VkAllocationCallbacks *pAllocator, 693 const VkXlibSurfaceCreateInfoKHR *pCreateInfo, 694 VkSurfaceKHR *pSurface) 695{ 696 VkIcdSurfaceXlib *surface; 697 698 surface = vk_alloc(pAllocator, sizeof *surface, 8, 699 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 700 if (surface == NULL) 701 return VK_ERROR_OUT_OF_HOST_MEMORY; 702 703 surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB; 704 surface->dpy = pCreateInfo->dpy; 705 surface->window = pCreateInfo->window; 706 707 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base); 708 return VK_SUCCESS; 709} 710 711struct x11_image { 712 struct wsi_image base; 713 xcb_pixmap_t pixmap; 714 bool busy; 715 struct xshmfence * shm_fence; 716 uint32_t sync_fence; 717}; 718 719struct x11_swapchain { 720 struct wsi_swapchain base; 721 722 bool has_dri3_modifiers; 723 724 xcb_connection_t * conn; 725 xcb_window_t window; 726 xcb_gc_t gc; 727 uint32_t depth; 728 VkExtent2D extent; 729 730 xcb_present_event_t event_id; 731 xcb_special_event_t * special_event; 732 uint64_t send_sbc; 733 uint64_t last_present_msc; 734 uint32_t stamp; 735 736 bool threaded; 737 VkResult status; 738 xcb_present_complete_mode_t last_present_mode; 739 struct wsi_queue present_queue; 740 struct wsi_queue acquire_queue; 741 pthread_t queue_manager; 742 743 struct x11_image images[0]; 744}; 745WSI_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, VkSwapchainKHR) 746 747/** 748 * Update the swapchain status with the result of an operation, and return 749 * the combined status. The chain status will eventually be returned from 750 * AcquireNextImage and QueuePresent. 751 * 752 * We make sure to 'stick' more pessimistic statuses: an out-of-date error 753 * is permanent once seen, and every subsequent call will return this. If 754 * this has not been seen, success will be returned. 755 */ 756static VkResult 757x11_swapchain_result(struct x11_swapchain *chain, VkResult result) 758{ 759 /* Prioritise returning existing errors for consistency. */ 760 if (chain->status < 0) 761 return chain->status; 762 763 /* If we have a new error, mark it as permanent on the chain and return. */ 764 if (result < 0) { 765 chain->status = result; 766 return result; 767 } 768 769 /* Return temporary errors, but don't persist them. */ 770 if (result == VK_TIMEOUT || result == VK_NOT_READY) 771 return result; 772 773 /* Suboptimal isn't an error, but is a status which sticks to the swapchain 774 * and is always returned rather than success. 775 */ 776 if (result == VK_SUBOPTIMAL_KHR) { 777 chain->status = result; 778 return result; 779 } 780 781 /* No changes, so return the last status. */ 782 return chain->status; 783} 784 785static struct wsi_image * 786x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index) 787{ 788 struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain; 789 return &chain->images[image_index].base; 790} 791 792/** 793 * Process an X11 Present event. Does not update chain->status. 794 */ 795static VkResult 796x11_handle_dri3_present_event(struct x11_swapchain *chain, 797 xcb_present_generic_event_t *event) 798{ 799 switch (event->evtype) { 800 case XCB_PRESENT_CONFIGURE_NOTIFY: { 801 xcb_present_configure_notify_event_t *config = (void *) event; 802 803 if (config->width != chain->extent.width || 804 config->height != chain->extent.height) 805 return VK_ERROR_OUT_OF_DATE_KHR; 806 807 break; 808 } 809 810 case XCB_PRESENT_EVENT_IDLE_NOTIFY: { 811 xcb_present_idle_notify_event_t *idle = (void *) event; 812 813 for (unsigned i = 0; i < chain->base.image_count; i++) { 814 if (chain->images[i].pixmap == idle->pixmap) { 815 chain->images[i].busy = false; 816 if (chain->threaded) 817 wsi_queue_push(&chain->acquire_queue, i); 818 break; 819 } 820 } 821 822 break; 823 } 824 825 case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: { 826 xcb_present_complete_notify_event_t *complete = (void *) event; 827 if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) 828 chain->last_present_msc = complete->msc; 829 830 VkResult result = VK_SUCCESS; 831 832 /* The winsys is now trying to flip directly and cannot due to our 833 * configuration. Request the user reallocate. 834 */ 835#ifdef HAVE_DRI3_MODIFIERS 836 if (complete->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY && 837 chain->last_present_mode != XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY) 838 result = VK_SUBOPTIMAL_KHR; 839#endif 840 841 /* When we go from flipping to copying, the odds are very likely that 842 * we could reallocate in a more optimal way if we didn't have to care 843 * about scanout, so we always do this. 844 */ 845 if (complete->mode == XCB_PRESENT_COMPLETE_MODE_COPY && 846 chain->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) 847 result = VK_SUBOPTIMAL_KHR; 848 849 chain->last_present_mode = complete->mode; 850 return result; 851 } 852 853 default: 854 break; 855 } 856 857 return VK_SUCCESS; 858} 859 860 861static uint64_t wsi_get_absolute_timeout(uint64_t timeout) 862{ 863 uint64_t current_time = wsi_common_get_current_time(); 864 865 timeout = MIN2(UINT64_MAX - current_time, timeout); 866 867 return current_time + timeout; 868} 869 870static VkResult 871x11_acquire_next_image_poll_x11(struct x11_swapchain *chain, 872 uint32_t *image_index, uint64_t timeout) 873{ 874 xcb_generic_event_t *event; 875 struct pollfd pfds; 876 uint64_t atimeout; 877 while (1) { 878 for (uint32_t i = 0; i < chain->base.image_count; i++) { 879 if (!chain->images[i].busy) { 880 /* We found a non-busy image */ 881 xshmfence_await(chain->images[i].shm_fence); 882 *image_index = i; 883 chain->images[i].busy = true; 884 return x11_swapchain_result(chain, VK_SUCCESS); 885 } 886 } 887 888 xcb_flush(chain->conn); 889 890 if (timeout == UINT64_MAX) { 891 event = xcb_wait_for_special_event(chain->conn, chain->special_event); 892 if (!event) 893 return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR); 894 } else { 895 event = xcb_poll_for_special_event(chain->conn, chain->special_event); 896 if (!event) { 897 int ret; 898 if (timeout == 0) 899 return x11_swapchain_result(chain, VK_NOT_READY); 900 901 atimeout = wsi_get_absolute_timeout(timeout); 902 903 pfds.fd = xcb_get_file_descriptor(chain->conn); 904 pfds.events = POLLIN; 905 ret = poll(&pfds, 1, timeout / 1000 / 1000); 906 if (ret == 0) 907 return x11_swapchain_result(chain, VK_TIMEOUT); 908 if (ret == -1) 909 return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR); 910 911 /* If a non-special event happens, the fd will still 912 * poll. So recalculate the timeout now just in case. 913 */ 914 uint64_t current_time = wsi_common_get_current_time(); 915 if (atimeout > current_time) 916 timeout = atimeout - current_time; 917 else 918 timeout = 0; 919 continue; 920 } 921 } 922 923 /* Update the swapchain status here. We may catch non-fatal errors here, 924 * in which case we need to update the status and continue. 925 */ 926 VkResult result = x11_handle_dri3_present_event(chain, (void *)event); 927 free(event); 928 if (result < 0) 929 return x11_swapchain_result(chain, result); 930 } 931} 932 933static VkResult 934x11_acquire_next_image_from_queue(struct x11_swapchain *chain, 935 uint32_t *image_index_out, uint64_t timeout) 936{ 937 assert(chain->threaded); 938 939 uint32_t image_index; 940 VkResult result = wsi_queue_pull(&chain->acquire_queue, 941 &image_index, timeout); 942 if (result < 0 || result == VK_TIMEOUT) { 943 /* On error, the thread has shut down, so safe to update chain->status. 944 * Calling x11_swapchain_result with VK_TIMEOUT won't modify 945 * chain->status so that is also safe. 946 */ 947 return x11_swapchain_result(chain, result); 948 } else if (chain->status < 0) { 949 return chain->status; 950 } 951 952 assert(image_index < chain->base.image_count); 953 xshmfence_await(chain->images[image_index].shm_fence); 954 955 *image_index_out = image_index; 956 957 return chain->status; 958} 959 960static VkResult 961x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index, 962 uint32_t target_msc) 963{ 964 struct x11_image *image = &chain->images[image_index]; 965 966 assert(image_index < chain->base.image_count); 967 968 uint32_t options = XCB_PRESENT_OPTION_NONE; 969 970 int64_t divisor = 0; 971 int64_t remainder = 0; 972 973 if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) 974 options |= XCB_PRESENT_OPTION_ASYNC; 975 976#ifdef HAVE_DRI3_MODIFIERS 977 if (chain->has_dri3_modifiers) 978 options |= XCB_PRESENT_OPTION_SUBOPTIMAL; 979#endif 980 981 /* Poll for any available event and update the swapchain status. This could 982 * update the status of the swapchain to SUBOPTIMAL or OUT_OF_DATE if the 983 * associated X11 surface has been resized. 984 */ 985 xcb_generic_event_t *event; 986 while ((event = xcb_poll_for_special_event(chain->conn, chain->special_event))) { 987 VkResult result = x11_handle_dri3_present_event(chain, (void *)event); 988 free(event); 989 if (result < 0) 990 return x11_swapchain_result(chain, result); 991 x11_swapchain_result(chain, result); 992 } 993 994 xshmfence_reset(image->shm_fence); 995 996 ++chain->send_sbc; 997 xcb_void_cookie_t cookie = 998 xcb_present_pixmap(chain->conn, 999 chain->window, 1000 image->pixmap, 1001 (uint32_t) chain->send_sbc, 1002 0, /* valid */ 1003 0, /* update */ 1004 0, /* x_off */ 1005 0, /* y_off */ 1006 XCB_NONE, /* target_crtc */ 1007 XCB_NONE, 1008 image->sync_fence, 1009 options, 1010 target_msc, 1011 divisor, 1012 remainder, 0, NULL); 1013 xcb_discard_reply(chain->conn, cookie.sequence); 1014 image->busy = true; 1015 1016 xcb_flush(chain->conn); 1017 1018 return x11_swapchain_result(chain, VK_SUCCESS); 1019} 1020 1021static VkResult 1022x11_acquire_next_image(struct wsi_swapchain *anv_chain, 1023 const VkAcquireNextImageInfoKHR *info, 1024 uint32_t *image_index) 1025{ 1026 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain; 1027 uint64_t timeout = info->timeout; 1028 1029 /* If the swapchain is in an error state, don't go any further. */ 1030 if (chain->status < 0) 1031 return chain->status; 1032 1033 if (chain->threaded) { 1034 return x11_acquire_next_image_from_queue(chain, image_index, timeout); 1035 } else { 1036 return x11_acquire_next_image_poll_x11(chain, image_index, timeout); 1037 } 1038} 1039 1040static VkResult 1041x11_queue_present(struct wsi_swapchain *anv_chain, 1042 uint32_t image_index, 1043 const VkPresentRegionKHR *damage) 1044{ 1045 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain; 1046 1047 /* If the swapchain is in an error state, don't go any further. */ 1048 if (chain->status < 0) 1049 return chain->status; 1050 1051 if (chain->threaded) { 1052 wsi_queue_push(&chain->present_queue, image_index); 1053 return chain->status; 1054 } else { 1055 return x11_present_to_x11(chain, image_index, 0); 1056 } 1057} 1058 1059static void * 1060x11_manage_fifo_queues(void *state) 1061{ 1062 struct x11_swapchain *chain = state; 1063 VkResult result = VK_SUCCESS; 1064 1065 assert(chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR); 1066 1067 while (chain->status >= 0) { 1068 /* It should be safe to unconditionally block here. Later in the loop 1069 * we blocks until the previous present has landed on-screen. At that 1070 * point, we should have received IDLE_NOTIFY on all images presented 1071 * before that point so the client should be able to acquire any image 1072 * other than the currently presented one. 1073 */ 1074 uint32_t image_index = 0; 1075 result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX); 1076 assert(result != VK_TIMEOUT); 1077 if (result < 0) { 1078 goto fail; 1079 } else if (chain->status < 0) { 1080 /* The status can change underneath us if the swapchain is destroyed 1081 * from another thread. 1082 */ 1083 return NULL; 1084 } 1085 1086 uint64_t target_msc = chain->last_present_msc + 1; 1087 result = x11_present_to_x11(chain, image_index, target_msc); 1088 if (result < 0) 1089 goto fail; 1090 1091 while (chain->last_present_msc < target_msc) { 1092 xcb_generic_event_t *event = 1093 xcb_wait_for_special_event(chain->conn, chain->special_event); 1094 if (!event) { 1095 result = VK_ERROR_OUT_OF_DATE_KHR; 1096 goto fail; 1097 } 1098 1099 result = x11_handle_dri3_present_event(chain, (void *)event); 1100 free(event); 1101 if (result < 0) 1102 goto fail; 1103 } 1104 } 1105 1106fail: 1107 x11_swapchain_result(chain, result); 1108 wsi_queue_push(&chain->acquire_queue, UINT32_MAX); 1109 1110 return NULL; 1111} 1112 1113static VkResult 1114x11_image_init(VkDevice device_h, struct x11_swapchain *chain, 1115 const VkSwapchainCreateInfoKHR *pCreateInfo, 1116 const VkAllocationCallbacks* pAllocator, 1117 const uint64_t *const *modifiers, 1118 const uint32_t *num_modifiers, 1119 int num_tranches, struct x11_image *image) 1120{ 1121 xcb_void_cookie_t cookie; 1122 VkResult result; 1123 uint32_t bpp = 32; 1124 1125 if (chain->base.use_prime_blit) { 1126 bool use_modifier = num_tranches > 0; 1127 result = wsi_create_prime_image(&chain->base, pCreateInfo, use_modifier, &image->base); 1128 } else { 1129 result = wsi_create_native_image(&chain->base, pCreateInfo, 1130 num_tranches, num_modifiers, modifiers, 1131 &image->base); 1132 } 1133 if (result < 0) 1134 return result; 1135 1136 image->pixmap = xcb_generate_id(chain->conn); 1137 1138#ifdef HAVE_DRI3_MODIFIERS 1139 if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) { 1140 /* If the image has a modifier, we must have DRI3 v1.2. */ 1141 assert(chain->has_dri3_modifiers); 1142 1143 cookie = 1144 xcb_dri3_pixmap_from_buffers_checked(chain->conn, 1145 image->pixmap, 1146 chain->window, 1147 image->base.num_planes, 1148 pCreateInfo->imageExtent.width, 1149 pCreateInfo->imageExtent.height, 1150 image->base.row_pitches[0], 1151 image->base.offsets[0], 1152 image->base.row_pitches[1], 1153 image->base.offsets[1], 1154 image->base.row_pitches[2], 1155 image->base.offsets[2], 1156 image->base.row_pitches[3], 1157 image->base.offsets[3], 1158 chain->depth, bpp, 1159 image->base.drm_modifier, 1160 image->base.fds); 1161 } else 1162#endif 1163 { 1164 /* Without passing modifiers, we can't have multi-plane RGB images. */ 1165 assert(image->base.num_planes == 1); 1166 1167 cookie = 1168 xcb_dri3_pixmap_from_buffer_checked(chain->conn, 1169 image->pixmap, 1170 chain->window, 1171 image->base.sizes[0], 1172 pCreateInfo->imageExtent.width, 1173 pCreateInfo->imageExtent.height, 1174 image->base.row_pitches[0], 1175 chain->depth, bpp, 1176 image->base.fds[0]); 1177 } 1178 1179 xcb_discard_reply(chain->conn, cookie.sequence); 1180 1181 /* XCB has now taken ownership of the FDs. */ 1182 for (int i = 0; i < image->base.num_planes; i++) 1183 image->base.fds[i] = -1; 1184 1185 int fence_fd = xshmfence_alloc_shm(); 1186 if (fence_fd < 0) 1187 goto fail_pixmap; 1188 1189 image->shm_fence = xshmfence_map_shm(fence_fd); 1190 if (image->shm_fence == NULL) 1191 goto fail_shmfence_alloc; 1192 1193 image->sync_fence = xcb_generate_id(chain->conn); 1194 xcb_dri3_fence_from_fd(chain->conn, 1195 image->pixmap, 1196 image->sync_fence, 1197 false, 1198 fence_fd); 1199 1200 image->busy = false; 1201 xshmfence_trigger(image->shm_fence); 1202 1203 return VK_SUCCESS; 1204 1205fail_shmfence_alloc: 1206 close(fence_fd); 1207 1208fail_pixmap: 1209 cookie = xcb_free_pixmap(chain->conn, image->pixmap); 1210 xcb_discard_reply(chain->conn, cookie.sequence); 1211 1212 wsi_destroy_image(&chain->base, &image->base); 1213 1214 return result; 1215} 1216 1217static void 1218x11_image_finish(struct x11_swapchain *chain, 1219 const VkAllocationCallbacks* pAllocator, 1220 struct x11_image *image) 1221{ 1222 xcb_void_cookie_t cookie; 1223 1224 cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence); 1225 xcb_discard_reply(chain->conn, cookie.sequence); 1226 xshmfence_unmap_shm(image->shm_fence); 1227 1228 cookie = xcb_free_pixmap(chain->conn, image->pixmap); 1229 xcb_discard_reply(chain->conn, cookie.sequence); 1230 1231 wsi_destroy_image(&chain->base, &image->base); 1232} 1233 1234static void 1235wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn, 1236 xcb_connection_t *conn, xcb_window_t window, 1237 uint8_t depth, uint8_t bpp, 1238 VkCompositeAlphaFlagsKHR vk_alpha, 1239 uint64_t **modifiers_in, uint32_t *num_modifiers_in, 1240 uint32_t *num_tranches_in, 1241 const VkAllocationCallbacks *pAllocator) 1242{ 1243 if (!wsi_conn->has_dri3_modifiers) 1244 goto out; 1245 1246#ifdef HAVE_DRI3_MODIFIERS 1247 xcb_generic_error_t *error = NULL; 1248 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie = 1249 xcb_dri3_get_supported_modifiers(conn, window, depth, bpp); 1250 xcb_dri3_get_supported_modifiers_reply_t *mod_reply = 1251 xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error); 1252 free(error); 1253 1254 if (!mod_reply || (mod_reply->num_window_modifiers == 0 && 1255 mod_reply->num_screen_modifiers == 0)) { 1256 free(mod_reply); 1257 goto out; 1258 } 1259 1260 uint32_t n = 0; 1261 uint32_t counts[2]; 1262 uint64_t *modifiers[2]; 1263 1264 if (mod_reply->num_window_modifiers) { 1265 counts[n] = mod_reply->num_window_modifiers; 1266 modifiers[n] = vk_alloc(pAllocator, 1267 counts[n] * sizeof(uint64_t), 1268 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1269 if (!modifiers[n]) { 1270 free(mod_reply); 1271 goto out; 1272 } 1273 1274 memcpy(modifiers[n], 1275 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply), 1276 counts[n] * sizeof(uint64_t)); 1277 n++; 1278 } 1279 1280 if (mod_reply->num_screen_modifiers) { 1281 counts[n] = mod_reply->num_screen_modifiers; 1282 modifiers[n] = vk_alloc(pAllocator, 1283 counts[n] * sizeof(uint64_t), 1284 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1285 if (!modifiers[n]) { 1286 if (n > 0) 1287 vk_free(pAllocator, modifiers[0]); 1288 free(mod_reply); 1289 goto out; 1290 } 1291 1292 memcpy(modifiers[n], 1293 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply), 1294 counts[n] * sizeof(uint64_t)); 1295 n++; 1296 } 1297 1298 for (int i = 0; i < n; i++) { 1299 modifiers_in[i] = modifiers[i]; 1300 num_modifiers_in[i] = counts[i]; 1301 } 1302 *num_tranches_in = n; 1303 1304 free(mod_reply); 1305 return; 1306#endif 1307out: 1308 *num_tranches_in = 0; 1309} 1310 1311static VkResult 1312x11_swapchain_destroy(struct wsi_swapchain *anv_chain, 1313 const VkAllocationCallbacks *pAllocator) 1314{ 1315 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain; 1316 xcb_void_cookie_t cookie; 1317 1318 if (chain->threaded) { 1319 chain->status = VK_ERROR_OUT_OF_DATE_KHR; 1320 /* Push a UINT32_MAX to wake up the manager */ 1321 wsi_queue_push(&chain->present_queue, UINT32_MAX); 1322 pthread_join(chain->queue_manager, NULL); 1323 wsi_queue_destroy(&chain->acquire_queue); 1324 wsi_queue_destroy(&chain->present_queue); 1325 } 1326 1327 for (uint32_t i = 0; i < chain->base.image_count; i++) 1328 x11_image_finish(chain, pAllocator, &chain->images[i]); 1329 1330 xcb_unregister_for_special_event(chain->conn, chain->special_event); 1331 cookie = xcb_present_select_input_checked(chain->conn, chain->event_id, 1332 chain->window, 1333 XCB_PRESENT_EVENT_MASK_NO_EVENT); 1334 xcb_discard_reply(chain->conn, cookie.sequence); 1335 1336 wsi_swapchain_finish(&chain->base); 1337 1338 vk_free(pAllocator, chain); 1339 1340 return VK_SUCCESS; 1341} 1342 1343static void 1344wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn, 1345 xcb_drawable_t drawable, 1346 uint32_t state) 1347{ 1348 static char const name[] = "_VARIABLE_REFRESH"; 1349 xcb_intern_atom_cookie_t cookie; 1350 xcb_intern_atom_reply_t* reply; 1351 xcb_void_cookie_t check; 1352 1353 cookie = xcb_intern_atom(conn, 0, strlen(name), name); 1354 reply = xcb_intern_atom_reply(conn, cookie, NULL); 1355 if (reply == NULL) 1356 return; 1357 1358 if (state) 1359 check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE, 1360 drawable, reply->atom, 1361 XCB_ATOM_CARDINAL, 32, 1, &state); 1362 else 1363 check = xcb_delete_property_checked(conn, drawable, reply->atom); 1364 1365 xcb_discard_reply(conn, check.sequence); 1366 free(reply); 1367} 1368 1369 1370static VkResult 1371x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface, 1372 VkDevice device, 1373 struct wsi_device *wsi_device, 1374 const VkSwapchainCreateInfoKHR *pCreateInfo, 1375 const VkAllocationCallbacks* pAllocator, 1376 struct wsi_swapchain **swapchain_out) 1377{ 1378 struct x11_swapchain *chain; 1379 xcb_void_cookie_t cookie; 1380 VkResult result; 1381 1382 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR); 1383 1384 const unsigned num_images = pCreateInfo->minImageCount; 1385 1386 xcb_connection_t *conn = x11_surface_get_connection(icd_surface); 1387 struct wsi_x11_connection *wsi_conn = 1388 wsi_x11_get_connection(wsi_device, conn); 1389 if (!wsi_conn) 1390 return VK_ERROR_OUT_OF_HOST_MEMORY; 1391 1392 /* Check for whether or not we have a window up-front */ 1393 xcb_window_t window = x11_surface_get_window(icd_surface); 1394 xcb_get_geometry_reply_t *geometry = 1395 xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL); 1396 if (geometry == NULL) 1397 return VK_ERROR_SURFACE_LOST_KHR; 1398 const uint32_t bit_depth = geometry->depth; 1399 free(geometry); 1400 1401 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]); 1402 chain = vk_alloc(pAllocator, size, 8, 1403 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1404 if (chain == NULL) 1405 return VK_ERROR_OUT_OF_HOST_MEMORY; 1406 1407 result = wsi_swapchain_init(wsi_device, &chain->base, device, 1408 pCreateInfo, pAllocator); 1409 if (result != VK_SUCCESS) 1410 goto fail_alloc; 1411 1412 chain->base.destroy = x11_swapchain_destroy; 1413 chain->base.get_wsi_image = x11_get_wsi_image; 1414 chain->base.acquire_next_image = x11_acquire_next_image; 1415 chain->base.queue_present = x11_queue_present; 1416 chain->base.present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo); 1417 chain->base.image_count = num_images; 1418 chain->conn = conn; 1419 chain->window = window; 1420 chain->depth = bit_depth; 1421 chain->extent = pCreateInfo->imageExtent; 1422 chain->send_sbc = 0; 1423 chain->last_present_msc = 0; 1424 chain->threaded = false; 1425 chain->status = VK_SUCCESS; 1426 chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers; 1427 1428 /* If we are reallocating from an old swapchain, then we inherit its 1429 * last completion mode, to ensure we don't get into reallocation 1430 * cycles. If we are starting anew, we set 'COPY', as that is the only 1431 * mode which provokes reallocation when anything changes, to make 1432 * sure we have the most optimal allocation. 1433 */ 1434 WSI_FROM_HANDLE(x11_swapchain, old_chain, pCreateInfo->oldSwapchain); 1435 if (old_chain) 1436 chain->last_present_mode = old_chain->last_present_mode; 1437 else 1438 chain->last_present_mode = XCB_PRESENT_COMPLETE_MODE_COPY; 1439 1440 if (!wsi_x11_check_dri3_compatible(wsi_device, conn)) 1441 chain->base.use_prime_blit = true; 1442 1443 chain->event_id = xcb_generate_id(chain->conn); 1444 xcb_present_select_input(chain->conn, chain->event_id, chain->window, 1445 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY | 1446 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY | 1447 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY); 1448 1449 /* Create an XCB event queue to hold present events outside of the usual 1450 * application event queue 1451 */ 1452 chain->special_event = 1453 xcb_register_for_special_xge(chain->conn, &xcb_present_id, 1454 chain->event_id, NULL); 1455 1456 chain->gc = xcb_generate_id(chain->conn); 1457 if (!chain->gc) { 1458 /* FINISHME: Choose a better error. */ 1459 result = VK_ERROR_OUT_OF_HOST_MEMORY; 1460 goto fail_register; 1461 } 1462 1463 cookie = xcb_create_gc(chain->conn, 1464 chain->gc, 1465 chain->window, 1466 XCB_GC_GRAPHICS_EXPOSURES, 1467 (uint32_t []) { 0 }); 1468 xcb_discard_reply(chain->conn, cookie.sequence); 1469 1470 uint64_t *modifiers[2] = {NULL, NULL}; 1471 uint32_t num_modifiers[2] = {0, 0}; 1472 uint32_t num_tranches = 0; 1473 if (wsi_device->supports_modifiers) 1474 wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, chain->depth, 32, 1475 pCreateInfo->compositeAlpha, 1476 modifiers, num_modifiers, &num_tranches, 1477 pAllocator); 1478 1479 uint32_t image = 0; 1480 for (; image < chain->base.image_count; image++) { 1481 result = x11_image_init(device, chain, pCreateInfo, pAllocator, 1482 (const uint64_t *const *)modifiers, 1483 num_modifiers, num_tranches, 1484 &chain->images[image]); 1485 if (result != VK_SUCCESS) 1486 goto fail_init_images; 1487 } 1488 1489 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) { 1490 chain->threaded = true; 1491 1492 /* Initialize our queues. We make them base.image_count + 1 because we will 1493 * occasionally use UINT32_MAX to signal the other thread that an error 1494 * has occurred and we don't want an overflow. 1495 */ 1496 int ret; 1497 ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1); 1498 if (ret) { 1499 goto fail_init_images; 1500 } 1501 1502 ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1); 1503 if (ret) { 1504 wsi_queue_destroy(&chain->acquire_queue); 1505 goto fail_init_images; 1506 } 1507 1508 for (unsigned i = 0; i < chain->base.image_count; i++) 1509 wsi_queue_push(&chain->acquire_queue, i); 1510 1511 ret = pthread_create(&chain->queue_manager, NULL, 1512 x11_manage_fifo_queues, chain); 1513 if (ret) { 1514 wsi_queue_destroy(&chain->present_queue); 1515 wsi_queue_destroy(&chain->acquire_queue); 1516 goto fail_init_images; 1517 } 1518 } 1519 1520 for (int i = 0; i < ARRAY_SIZE(modifiers); i++) 1521 vk_free(pAllocator, modifiers[i]); 1522 1523 /* It is safe to set it here as only one swapchain can be associated with 1524 * the window, and swapchain creation does the association. At this point 1525 * we know the creation is going to succeed. */ 1526 wsi_x11_set_adaptive_sync_property(conn, window, 1527 wsi_device->enable_adaptive_sync); 1528 1529 *swapchain_out = &chain->base; 1530 1531 return VK_SUCCESS; 1532 1533fail_init_images: 1534 for (uint32_t j = 0; j < image; j++) 1535 x11_image_finish(chain, pAllocator, &chain->images[j]); 1536 1537 for (int i = 0; i < ARRAY_SIZE(modifiers); i++) 1538 vk_free(pAllocator, modifiers[i]); 1539 1540fail_register: 1541 xcb_unregister_for_special_event(chain->conn, chain->special_event); 1542 1543 wsi_swapchain_finish(&chain->base); 1544 1545fail_alloc: 1546 vk_free(pAllocator, chain); 1547 1548 return result; 1549} 1550 1551VkResult 1552wsi_x11_init_wsi(struct wsi_device *wsi_device, 1553 const VkAllocationCallbacks *alloc, 1554 const struct driOptionCache *dri_options) 1555{ 1556 struct wsi_x11 *wsi; 1557 VkResult result; 1558 1559 wsi = vk_alloc(alloc, sizeof(*wsi), 8, 1560 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1561 if (!wsi) { 1562 result = VK_ERROR_OUT_OF_HOST_MEMORY; 1563 goto fail; 1564 } 1565 1566 int ret = pthread_mutex_init(&wsi->mutex, NULL); 1567 if (ret != 0) { 1568 if (ret == ENOMEM) { 1569 result = VK_ERROR_OUT_OF_HOST_MEMORY; 1570 } else { 1571 /* FINISHME: Choose a better error. */ 1572 result = VK_ERROR_OUT_OF_HOST_MEMORY; 1573 } 1574 1575 goto fail_alloc; 1576 } 1577 1578 wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer, 1579 _mesa_key_pointer_equal); 1580 if (!wsi->connections) { 1581 result = VK_ERROR_OUT_OF_HOST_MEMORY; 1582 goto fail_mutex; 1583 } 1584 1585 if (dri_options) { 1586 if (driCheckOption(dri_options, "vk_x11_override_min_image_count", DRI_INT)) { 1587 wsi_device->x11.override_minImageCount = 1588 driQueryOptioni(dri_options, "vk_x11_override_min_image_count"); 1589 } 1590 } 1591 1592 wsi->base.get_support = x11_surface_get_support; 1593 wsi->base.get_capabilities2 = x11_surface_get_capabilities2; 1594 wsi->base.get_formats = x11_surface_get_formats; 1595 wsi->base.get_formats2 = x11_surface_get_formats2; 1596 wsi->base.get_present_modes = x11_surface_get_present_modes; 1597 wsi->base.get_present_rectangles = x11_surface_get_present_rectangles; 1598 wsi->base.create_swapchain = x11_surface_create_swapchain; 1599 1600 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base; 1601 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base; 1602 1603 return VK_SUCCESS; 1604 1605fail_mutex: 1606 pthread_mutex_destroy(&wsi->mutex); 1607fail_alloc: 1608 vk_free(alloc, wsi); 1609fail: 1610 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL; 1611 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL; 1612 1613 return result; 1614} 1615 1616void 1617wsi_x11_finish_wsi(struct wsi_device *wsi_device, 1618 const VkAllocationCallbacks *alloc) 1619{ 1620 struct wsi_x11 *wsi = 1621 (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB]; 1622 1623 if (wsi) { 1624 hash_table_foreach(wsi->connections, entry) 1625 wsi_x11_connection_destroy(wsi_device, entry->data); 1626 1627 _mesa_hash_table_destroy(wsi->connections, NULL); 1628 1629 pthread_mutex_destroy(&wsi->mutex); 1630 1631 vk_free(alloc, wsi); 1632 } 1633} 1634