wsi_common_x11.c revision 01e04c3f
1/* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include <X11/Xlib-xcb.h> 25#include <X11/xshmfence.h> 26#include <xcb/xcb.h> 27#include <xcb/dri3.h> 28#include <xcb/present.h> 29 30#include "util/macros.h" 31#include <stdlib.h> 32#include <stdio.h> 33#include <unistd.h> 34#include <errno.h> 35#include <string.h> 36#include <fcntl.h> 37#include <poll.h> 38#include <xf86drm.h> 39#include <drm_fourcc.h> 40#include "util/hash_table.h" 41 42#include "vk_util.h" 43#include "wsi_common_private.h" 44#include "wsi_common_x11.h" 45#include "wsi_common_queue.h" 46 47#define typed_memcpy(dest, src, count) ({ \ 48 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \ 49 memcpy((dest), (src), (count) * sizeof(*(src))); \ 50}) 51 52struct wsi_x11_connection { 53 bool has_dri3; 54 bool has_dri3_modifiers; 55 bool has_present; 56 bool is_proprietary_x11; 57}; 58 59struct wsi_x11 { 60 struct wsi_interface base; 61 62 pthread_mutex_t mutex; 63 /* Hash table of xcb_connection -> wsi_x11_connection mappings */ 64 struct hash_table *connections; 65}; 66 67 68/** wsi_dri3_open 69 * 70 * Wrapper around xcb_dri3_open 71 */ 72static int 73wsi_dri3_open(xcb_connection_t *conn, 74 xcb_window_t root, 75 uint32_t provider) 76{ 77 xcb_dri3_open_cookie_t cookie; 78 xcb_dri3_open_reply_t *reply; 79 int fd; 80 81 cookie = xcb_dri3_open(conn, 82 root, 83 provider); 84 85 reply = xcb_dri3_open_reply(conn, cookie, NULL); 86 if (!reply) 87 return -1; 88 89 if (reply->nfd != 1) { 90 free(reply); 91 return -1; 92 } 93 94 fd = xcb_dri3_open_reply_fds(conn, reply)[0]; 95 free(reply); 96 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); 97 98 return fd; 99} 100 101static bool 102wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev, 103 xcb_connection_t *conn) 104{ 105 xcb_screen_iterator_t screen_iter = 106 xcb_setup_roots_iterator(xcb_get_setup(conn)); 107 xcb_screen_t *screen = screen_iter.data; 108 109 int dri3_fd = wsi_dri3_open(conn, screen->root, None); 110 if (dri3_fd == -1) 111 return true; 112 113 bool match = wsi_device_matches_drm_fd(wsi_dev, dri3_fd); 114 115 close(dri3_fd); 116 117 return match; 118} 119 120static struct wsi_x11_connection * 121wsi_x11_connection_create(struct wsi_device *wsi_dev, 122 xcb_connection_t *conn) 123{ 124 xcb_query_extension_cookie_t dri3_cookie, pres_cookie, amd_cookie, nv_cookie; 125 xcb_query_extension_reply_t *dri3_reply, *pres_reply, *amd_reply, *nv_reply; 126 bool has_dri3_v1_2 = false; 127 bool has_present_v1_2 = false; 128 129 struct wsi_x11_connection *wsi_conn = 130 vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8, 131 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 132 if (!wsi_conn) 133 return NULL; 134 135 dri3_cookie = xcb_query_extension(conn, 4, "DRI3"); 136 pres_cookie = xcb_query_extension(conn, 7, "Present"); 137 138 /* We try to be nice to users and emit a warning if they try to use a 139 * Vulkan application on a system without DRI3 enabled. However, this ends 140 * up spewing the warning when a user has, for example, both Intel 141 * integrated graphics and a discrete card with proprietary drivers and are 142 * running on the discrete card with the proprietary DDX. In this case, we 143 * really don't want to print the warning because it just confuses users. 144 * As a heuristic to detect this case, we check for a couple of proprietary 145 * X11 extensions. 146 */ 147 amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI"); 148 nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL"); 149 150 dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL); 151 pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL); 152 amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL); 153 nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL); 154 if (!dri3_reply || !pres_reply) { 155 free(dri3_reply); 156 free(pres_reply); 157 free(amd_reply); 158 free(nv_reply); 159 vk_free(&wsi_dev->instance_alloc, wsi_conn); 160 return NULL; 161 } 162 163 wsi_conn->has_dri3 = dri3_reply->present != 0; 164#ifdef HAVE_DRI3_MODIFIERS 165 if (wsi_conn->has_dri3) { 166 xcb_dri3_query_version_cookie_t ver_cookie; 167 xcb_dri3_query_version_reply_t *ver_reply; 168 169 ver_cookie = xcb_dri3_query_version(conn, 1, 2); 170 ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL); 171 has_dri3_v1_2 = 172 (ver_reply->major_version > 1 || ver_reply->minor_version >= 2); 173 free(ver_reply); 174 } 175#endif 176 177 wsi_conn->has_present = pres_reply->present != 0; 178#ifdef HAVE_DRI3_MODIFIERS 179 if (wsi_conn->has_present) { 180 xcb_present_query_version_cookie_t ver_cookie; 181 xcb_present_query_version_reply_t *ver_reply; 182 183 ver_cookie = xcb_present_query_version(conn, 1, 2); 184 ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL); 185 has_present_v1_2 = 186 (ver_reply->major_version > 1 || ver_reply->minor_version >= 2); 187 free(ver_reply); 188 } 189#endif 190 191 wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2; 192 wsi_conn->is_proprietary_x11 = false; 193 if (amd_reply && amd_reply->present) 194 wsi_conn->is_proprietary_x11 = true; 195 if (nv_reply && nv_reply->present) 196 wsi_conn->is_proprietary_x11 = true; 197 198 free(dri3_reply); 199 free(pres_reply); 200 free(amd_reply); 201 free(nv_reply); 202 203 return wsi_conn; 204} 205 206static void 207wsi_x11_connection_destroy(struct wsi_device *wsi_dev, 208 struct wsi_x11_connection *conn) 209{ 210 vk_free(&wsi_dev->instance_alloc, conn); 211} 212 213static bool 214wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn) 215{ 216 if (wsi_conn->has_dri3) 217 return true; 218 if (!wsi_conn->is_proprietary_x11) { 219 fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n" 220 "Note: you can probably enable DRI3 in your Xorg config\n"); 221 } 222 return false; 223} 224 225static struct wsi_x11_connection * 226wsi_x11_get_connection(struct wsi_device *wsi_dev, 227 xcb_connection_t *conn) 228{ 229 struct wsi_x11 *wsi = 230 (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB]; 231 232 pthread_mutex_lock(&wsi->mutex); 233 234 struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn); 235 if (!entry) { 236 /* We're about to make a bunch of blocking calls. Let's drop the 237 * mutex for now so we don't block up too badly. 238 */ 239 pthread_mutex_unlock(&wsi->mutex); 240 241 struct wsi_x11_connection *wsi_conn = 242 wsi_x11_connection_create(wsi_dev, conn); 243 if (!wsi_conn) 244 return NULL; 245 246 pthread_mutex_lock(&wsi->mutex); 247 248 entry = _mesa_hash_table_search(wsi->connections, conn); 249 if (entry) { 250 /* Oops, someone raced us to it */ 251 wsi_x11_connection_destroy(wsi_dev, wsi_conn); 252 } else { 253 entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn); 254 } 255 } 256 257 pthread_mutex_unlock(&wsi->mutex); 258 259 return entry->data; 260} 261 262static const VkFormat formats[] = { 263 VK_FORMAT_B8G8R8A8_SRGB, 264 VK_FORMAT_B8G8R8A8_UNORM, 265}; 266 267static const VkPresentModeKHR present_modes[] = { 268 VK_PRESENT_MODE_IMMEDIATE_KHR, 269 VK_PRESENT_MODE_MAILBOX_KHR, 270 VK_PRESENT_MODE_FIFO_KHR, 271}; 272 273static xcb_screen_t * 274get_screen_for_root(xcb_connection_t *conn, xcb_window_t root) 275{ 276 xcb_screen_iterator_t screen_iter = 277 xcb_setup_roots_iterator(xcb_get_setup(conn)); 278 279 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) { 280 if (screen_iter.data->root == root) 281 return screen_iter.data; 282 } 283 284 return NULL; 285} 286 287static xcb_visualtype_t * 288screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id, 289 unsigned *depth) 290{ 291 xcb_depth_iterator_t depth_iter = 292 xcb_screen_allowed_depths_iterator(screen); 293 294 for (; depth_iter.rem; xcb_depth_next (&depth_iter)) { 295 xcb_visualtype_iterator_t visual_iter = 296 xcb_depth_visuals_iterator (depth_iter.data); 297 298 for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) { 299 if (visual_iter.data->visual_id == visual_id) { 300 if (depth) 301 *depth = depth_iter.data->depth; 302 return visual_iter.data; 303 } 304 } 305 } 306 307 return NULL; 308} 309 310static xcb_visualtype_t * 311connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id, 312 unsigned *depth) 313{ 314 xcb_screen_iterator_t screen_iter = 315 xcb_setup_roots_iterator(xcb_get_setup(conn)); 316 317 /* For this we have to iterate over all of the screens which is rather 318 * annoying. Fortunately, there is probably only 1. 319 */ 320 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) { 321 xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data, 322 visual_id, depth); 323 if (visual) 324 return visual; 325 } 326 327 return NULL; 328} 329 330static xcb_visualtype_t * 331get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window, 332 unsigned *depth) 333{ 334 xcb_query_tree_cookie_t tree_cookie; 335 xcb_get_window_attributes_cookie_t attrib_cookie; 336 xcb_query_tree_reply_t *tree; 337 xcb_get_window_attributes_reply_t *attrib; 338 339 tree_cookie = xcb_query_tree(conn, window); 340 attrib_cookie = xcb_get_window_attributes(conn, window); 341 342 tree = xcb_query_tree_reply(conn, tree_cookie, NULL); 343 attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL); 344 if (attrib == NULL || tree == NULL) { 345 free(attrib); 346 free(tree); 347 return NULL; 348 } 349 350 xcb_window_t root = tree->root; 351 xcb_visualid_t visual_id = attrib->visual; 352 free(attrib); 353 free(tree); 354 355 xcb_screen_t *screen = get_screen_for_root(conn, root); 356 if (screen == NULL) 357 return NULL; 358 359 return screen_get_visualtype(screen, visual_id, depth); 360} 361 362static bool 363visual_has_alpha(xcb_visualtype_t *visual, unsigned depth) 364{ 365 uint32_t rgb_mask = visual->red_mask | 366 visual->green_mask | 367 visual->blue_mask; 368 369 uint32_t all_mask = 0xffffffff >> (32 - depth); 370 371 /* Do we have bits left over after RGB? */ 372 return (all_mask & ~rgb_mask) != 0; 373} 374 375VkBool32 wsi_get_physical_device_xcb_presentation_support( 376 struct wsi_device *wsi_device, 377 uint32_t queueFamilyIndex, 378 xcb_connection_t* connection, 379 xcb_visualid_t visual_id) 380{ 381 struct wsi_x11_connection *wsi_conn = 382 wsi_x11_get_connection(wsi_device, connection); 383 384 if (!wsi_conn) 385 return false; 386 387 if (!wsi_x11_check_for_dri3(wsi_conn)) 388 return false; 389 390 unsigned visual_depth; 391 if (!connection_get_visualtype(connection, visual_id, &visual_depth)) 392 return false; 393 394 if (visual_depth != 24 && visual_depth != 32) 395 return false; 396 397 return true; 398} 399 400static xcb_connection_t* 401x11_surface_get_connection(VkIcdSurfaceBase *icd_surface) 402{ 403 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB) 404 return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy); 405 else 406 return ((VkIcdSurfaceXcb *)icd_surface)->connection; 407} 408 409static xcb_window_t 410x11_surface_get_window(VkIcdSurfaceBase *icd_surface) 411{ 412 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB) 413 return ((VkIcdSurfaceXlib *)icd_surface)->window; 414 else 415 return ((VkIcdSurfaceXcb *)icd_surface)->window; 416} 417 418static VkResult 419x11_surface_get_support(VkIcdSurfaceBase *icd_surface, 420 struct wsi_device *wsi_device, 421 uint32_t queueFamilyIndex, 422 VkBool32* pSupported) 423{ 424 xcb_connection_t *conn = x11_surface_get_connection(icd_surface); 425 xcb_window_t window = x11_surface_get_window(icd_surface); 426 427 struct wsi_x11_connection *wsi_conn = 428 wsi_x11_get_connection(wsi_device, conn); 429 if (!wsi_conn) 430 return VK_ERROR_OUT_OF_HOST_MEMORY; 431 432 if (!wsi_x11_check_for_dri3(wsi_conn)) { 433 *pSupported = false; 434 return VK_SUCCESS; 435 } 436 437 unsigned visual_depth; 438 if (!get_visualtype_for_window(conn, window, &visual_depth)) { 439 *pSupported = false; 440 return VK_SUCCESS; 441 } 442 443 if (visual_depth != 24 && visual_depth != 32) { 444 *pSupported = false; 445 return VK_SUCCESS; 446 } 447 448 *pSupported = true; 449 return VK_SUCCESS; 450} 451 452static VkResult 453x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface, 454 VkSurfaceCapabilitiesKHR *caps) 455{ 456 xcb_connection_t *conn = x11_surface_get_connection(icd_surface); 457 xcb_window_t window = x11_surface_get_window(icd_surface); 458 xcb_get_geometry_cookie_t geom_cookie; 459 xcb_generic_error_t *err; 460 xcb_get_geometry_reply_t *geom; 461 unsigned visual_depth; 462 463 geom_cookie = xcb_get_geometry(conn, window); 464 465 /* This does a round-trip. This is why we do get_geometry first and 466 * wait to read the reply until after we have a visual. 467 */ 468 xcb_visualtype_t *visual = 469 get_visualtype_for_window(conn, window, &visual_depth); 470 471 if (!visual) 472 return VK_ERROR_SURFACE_LOST_KHR; 473 474 geom = xcb_get_geometry_reply(conn, geom_cookie, &err); 475 if (geom) { 476 VkExtent2D extent = { geom->width, geom->height }; 477 caps->currentExtent = extent; 478 caps->minImageExtent = extent; 479 caps->maxImageExtent = extent; 480 } else { 481 /* This can happen if the client didn't wait for the configure event 482 * to come back from the compositor. In that case, we don't know the 483 * size of the window so we just return valid "I don't know" stuff. 484 */ 485 caps->currentExtent = (VkExtent2D) { -1, -1 }; 486 caps->minImageExtent = (VkExtent2D) { 1, 1 }; 487 /* This is the maximum supported size on Intel */ 488 caps->maxImageExtent = (VkExtent2D) { 1 << 14, 1 << 14 }; 489 } 490 free(err); 491 free(geom); 492 493 if (visual_has_alpha(visual, visual_depth)) { 494 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR | 495 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR; 496 } else { 497 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR | 498 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; 499 } 500 501 /* For true mailbox mode, we need at least 4 images: 502 * 1) One to scan out from 503 * 2) One to have queued for scan-out 504 * 3) One to be currently held by the X server 505 * 4) One to render to 506 */ 507 caps->minImageCount = 2; 508 /* There is no real maximum */ 509 caps->maxImageCount = 0; 510 511 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; 512 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; 513 caps->maxImageArrayLayers = 1; 514 caps->supportedUsageFlags = 515 VK_IMAGE_USAGE_TRANSFER_SRC_BIT | 516 VK_IMAGE_USAGE_SAMPLED_BIT | 517 VK_IMAGE_USAGE_TRANSFER_DST_BIT | 518 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; 519 520 return VK_SUCCESS; 521} 522 523static VkResult 524x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface, 525 const void *info_next, 526 VkSurfaceCapabilities2KHR *caps) 527{ 528 assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR); 529 530 return x11_surface_get_capabilities(icd_surface, &caps->surfaceCapabilities); 531} 532 533static VkResult 534x11_surface_get_formats(VkIcdSurfaceBase *surface, 535 struct wsi_device *wsi_device, 536 uint32_t *pSurfaceFormatCount, 537 VkSurfaceFormatKHR *pSurfaceFormats) 538{ 539 VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount); 540 541 for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) { 542 vk_outarray_append(&out, f) { 543 f->format = formats[i]; 544 f->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; 545 } 546 } 547 548 return vk_outarray_status(&out); 549} 550 551static VkResult 552x11_surface_get_formats2(VkIcdSurfaceBase *surface, 553 struct wsi_device *wsi_device, 554 const void *info_next, 555 uint32_t *pSurfaceFormatCount, 556 VkSurfaceFormat2KHR *pSurfaceFormats) 557{ 558 VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount); 559 560 for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) { 561 vk_outarray_append(&out, f) { 562 assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR); 563 f->surfaceFormat.format = formats[i]; 564 f->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; 565 } 566 } 567 568 return vk_outarray_status(&out); 569} 570 571static VkResult 572x11_surface_get_present_modes(VkIcdSurfaceBase *surface, 573 uint32_t *pPresentModeCount, 574 VkPresentModeKHR *pPresentModes) 575{ 576 if (pPresentModes == NULL) { 577 *pPresentModeCount = ARRAY_SIZE(present_modes); 578 return VK_SUCCESS; 579 } 580 581 *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes)); 582 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount); 583 584 return *pPresentModeCount < ARRAY_SIZE(present_modes) ? 585 VK_INCOMPLETE : VK_SUCCESS; 586} 587 588static bool 589x11_surface_is_local_to_gpu(struct wsi_device *wsi_dev, 590 xcb_connection_t *conn) 591{ 592 struct wsi_x11_connection *wsi_conn = 593 wsi_x11_get_connection(wsi_dev, conn); 594 595 if (!wsi_conn) 596 return false; 597 598 if (!wsi_x11_check_for_dri3(wsi_conn)) 599 return false; 600 601 if (!wsi_x11_check_dri3_compatible(wsi_dev, conn)) 602 return false; 603 604 return true; 605} 606 607static VkResult 608x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface, 609 struct wsi_device *wsi_device, 610 uint32_t* pRectCount, 611 VkRect2D* pRects) 612{ 613 xcb_connection_t *conn = x11_surface_get_connection(icd_surface); 614 xcb_window_t window = x11_surface_get_window(icd_surface); 615 VK_OUTARRAY_MAKE(out, pRects, pRectCount); 616 617 if (x11_surface_is_local_to_gpu(wsi_device, conn)) { 618 vk_outarray_append(&out, rect) { 619 xcb_generic_error_t *err = NULL; 620 xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window); 621 xcb_get_geometry_reply_t *geom = 622 xcb_get_geometry_reply(conn, geom_cookie, &err); 623 free(err); 624 if (geom) { 625 *rect = (VkRect2D) { 626 .offset = { 0, 0 }, 627 .extent = { geom->width, geom->height }, 628 }; 629 } else { 630 /* This can happen if the client didn't wait for the configure event 631 * to come back from the compositor. In that case, we don't know the 632 * size of the window so we just return valid "I don't know" stuff. 633 */ 634 *rect = (VkRect2D) { 635 .offset = { 0, 0 }, 636 .extent = { -1, -1 }, 637 }; 638 } 639 free(geom); 640 } 641 } 642 643 return vk_outarray_status(&out); 644} 645 646VkResult wsi_create_xcb_surface(const VkAllocationCallbacks *pAllocator, 647 const VkXcbSurfaceCreateInfoKHR *pCreateInfo, 648 VkSurfaceKHR *pSurface) 649{ 650 VkIcdSurfaceXcb *surface; 651 652 surface = vk_alloc(pAllocator, sizeof *surface, 8, 653 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 654 if (surface == NULL) 655 return VK_ERROR_OUT_OF_HOST_MEMORY; 656 657 surface->base.platform = VK_ICD_WSI_PLATFORM_XCB; 658 surface->connection = pCreateInfo->connection; 659 surface->window = pCreateInfo->window; 660 661 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base); 662 return VK_SUCCESS; 663} 664 665VkResult wsi_create_xlib_surface(const VkAllocationCallbacks *pAllocator, 666 const VkXlibSurfaceCreateInfoKHR *pCreateInfo, 667 VkSurfaceKHR *pSurface) 668{ 669 VkIcdSurfaceXlib *surface; 670 671 surface = vk_alloc(pAllocator, sizeof *surface, 8, 672 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 673 if (surface == NULL) 674 return VK_ERROR_OUT_OF_HOST_MEMORY; 675 676 surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB; 677 surface->dpy = pCreateInfo->dpy; 678 surface->window = pCreateInfo->window; 679 680 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base); 681 return VK_SUCCESS; 682} 683 684struct x11_image { 685 struct wsi_image base; 686 xcb_pixmap_t pixmap; 687 bool busy; 688 struct xshmfence * shm_fence; 689 uint32_t sync_fence; 690}; 691 692struct x11_swapchain { 693 struct wsi_swapchain base; 694 695 bool has_dri3_modifiers; 696 697 xcb_connection_t * conn; 698 xcb_window_t window; 699 xcb_gc_t gc; 700 uint32_t depth; 701 VkExtent2D extent; 702 703 xcb_present_event_t event_id; 704 xcb_special_event_t * special_event; 705 uint64_t send_sbc; 706 uint64_t last_present_msc; 707 uint32_t stamp; 708 709 bool threaded; 710 VkResult status; 711 xcb_present_complete_mode_t last_present_mode; 712 struct wsi_queue present_queue; 713 struct wsi_queue acquire_queue; 714 pthread_t queue_manager; 715 716 struct x11_image images[0]; 717}; 718 719/** 720 * Update the swapchain status with the result of an operation, and return 721 * the combined status. The chain status will eventually be returned from 722 * AcquireNextImage and QueuePresent. 723 * 724 * We make sure to 'stick' more pessimistic statuses: an out-of-date error 725 * is permanent once seen, and every subsequent call will return this. If 726 * this has not been seen, success will be returned. 727 */ 728static VkResult 729x11_swapchain_result(struct x11_swapchain *chain, VkResult result) 730{ 731 /* Prioritise returning existing errors for consistency. */ 732 if (chain->status < 0) 733 return chain->status; 734 735 /* If we have a new error, mark it as permanent on the chain and return. */ 736 if (result < 0) { 737 chain->status = result; 738 return result; 739 } 740 741 /* Return temporary errors, but don't persist them. */ 742 if (result == VK_TIMEOUT || result == VK_NOT_READY) 743 return result; 744 745 /* Suboptimal isn't an error, but is a status which sticks to the swapchain 746 * and is always returned rather than success. 747 */ 748 if (result == VK_SUBOPTIMAL_KHR) { 749 chain->status = result; 750 return result; 751 } 752 753 /* No changes, so return the last status. */ 754 return chain->status; 755} 756 757static struct wsi_image * 758x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index) 759{ 760 struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain; 761 return &chain->images[image_index].base; 762} 763 764/** 765 * Process an X11 Present event. Does not update chain->status. 766 */ 767static VkResult 768x11_handle_dri3_present_event(struct x11_swapchain *chain, 769 xcb_present_generic_event_t *event) 770{ 771 switch (event->evtype) { 772 case XCB_PRESENT_CONFIGURE_NOTIFY: { 773 xcb_present_configure_notify_event_t *config = (void *) event; 774 775 if (config->width != chain->extent.width || 776 config->height != chain->extent.height) 777 return VK_ERROR_OUT_OF_DATE_KHR; 778 779 break; 780 } 781 782 case XCB_PRESENT_EVENT_IDLE_NOTIFY: { 783 xcb_present_idle_notify_event_t *idle = (void *) event; 784 785 for (unsigned i = 0; i < chain->base.image_count; i++) { 786 if (chain->images[i].pixmap == idle->pixmap) { 787 chain->images[i].busy = false; 788 if (chain->threaded) 789 wsi_queue_push(&chain->acquire_queue, i); 790 break; 791 } 792 } 793 794 break; 795 } 796 797 case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: { 798 xcb_present_complete_notify_event_t *complete = (void *) event; 799 if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) 800 chain->last_present_msc = complete->msc; 801 802 VkResult result = VK_SUCCESS; 803 804 /* The winsys is now trying to flip directly and cannot due to our 805 * configuration. Request the user reallocate. 806 */ 807#ifdef HAVE_DRI3_MODIFIERS 808 if (complete->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY && 809 chain->last_present_mode != XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY) 810 result = VK_SUBOPTIMAL_KHR; 811#endif 812 813 /* When we go from flipping to copying, the odds are very likely that 814 * we could reallocate in a more optimal way if we didn't have to care 815 * about scanout, so we always do this. 816 */ 817 if (complete->mode == XCB_PRESENT_COMPLETE_MODE_COPY && 818 chain->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) 819 result = VK_SUBOPTIMAL_KHR; 820 821 chain->last_present_mode = complete->mode; 822 return result; 823 } 824 825 default: 826 break; 827 } 828 829 return VK_SUCCESS; 830} 831 832 833static uint64_t wsi_get_current_time(void) 834{ 835 uint64_t current_time; 836 struct timespec tv; 837 838 clock_gettime(CLOCK_MONOTONIC, &tv); 839 current_time = tv.tv_nsec + tv.tv_sec*1000000000ull; 840 return current_time; 841} 842 843static uint64_t wsi_get_absolute_timeout(uint64_t timeout) 844{ 845 uint64_t current_time = wsi_get_current_time(); 846 847 timeout = MIN2(UINT64_MAX - current_time, timeout); 848 849 return current_time + timeout; 850} 851 852static VkResult 853x11_acquire_next_image_poll_x11(struct x11_swapchain *chain, 854 uint32_t *image_index, uint64_t timeout) 855{ 856 xcb_generic_event_t *event; 857 struct pollfd pfds; 858 uint64_t atimeout; 859 while (1) { 860 for (uint32_t i = 0; i < chain->base.image_count; i++) { 861 if (!chain->images[i].busy) { 862 /* We found a non-busy image */ 863 xshmfence_await(chain->images[i].shm_fence); 864 *image_index = i; 865 chain->images[i].busy = true; 866 return x11_swapchain_result(chain, VK_SUCCESS); 867 } 868 } 869 870 xcb_flush(chain->conn); 871 872 if (timeout == UINT64_MAX) { 873 event = xcb_wait_for_special_event(chain->conn, chain->special_event); 874 if (!event) 875 return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR); 876 } else { 877 event = xcb_poll_for_special_event(chain->conn, chain->special_event); 878 if (!event) { 879 int ret; 880 if (timeout == 0) 881 return x11_swapchain_result(chain, VK_NOT_READY); 882 883 atimeout = wsi_get_absolute_timeout(timeout); 884 885 pfds.fd = xcb_get_file_descriptor(chain->conn); 886 pfds.events = POLLIN; 887 ret = poll(&pfds, 1, timeout / 1000 / 1000); 888 if (ret == 0) 889 return x11_swapchain_result(chain, VK_TIMEOUT); 890 if (ret == -1) 891 return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR); 892 893 /* If a non-special event happens, the fd will still 894 * poll. So recalculate the timeout now just in case. 895 */ 896 uint64_t current_time = wsi_get_current_time(); 897 if (atimeout > current_time) 898 timeout = atimeout - current_time; 899 else 900 timeout = 0; 901 continue; 902 } 903 } 904 905 /* Update the swapchain status here. We may catch non-fatal errors here, 906 * in which case we need to update the status and continue. 907 */ 908 VkResult result = x11_handle_dri3_present_event(chain, (void *)event); 909 free(event); 910 if (result < 0) 911 return x11_swapchain_result(chain, result); 912 } 913} 914 915static VkResult 916x11_acquire_next_image_from_queue(struct x11_swapchain *chain, 917 uint32_t *image_index_out, uint64_t timeout) 918{ 919 assert(chain->threaded); 920 921 uint32_t image_index; 922 VkResult result = wsi_queue_pull(&chain->acquire_queue, 923 &image_index, timeout); 924 if (result < 0 || result == VK_TIMEOUT) { 925 /* On error, the thread has shut down, so safe to update chain->status. 926 * Calling x11_swapchain_result with VK_TIMEOUT won't modify 927 * chain->status so that is also safe. 928 */ 929 return x11_swapchain_result(chain, result); 930 } else if (chain->status < 0) { 931 return chain->status; 932 } 933 934 assert(image_index < chain->base.image_count); 935 xshmfence_await(chain->images[image_index].shm_fence); 936 937 *image_index_out = image_index; 938 939 return chain->status; 940} 941 942static VkResult 943x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index, 944 uint32_t target_msc) 945{ 946 struct x11_image *image = &chain->images[image_index]; 947 948 assert(image_index < chain->base.image_count); 949 950 uint32_t options = XCB_PRESENT_OPTION_NONE; 951 952 int64_t divisor = 0; 953 int64_t remainder = 0; 954 955 if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) 956 options |= XCB_PRESENT_OPTION_ASYNC; 957 958#ifdef HAVE_DRI3_MODIFIERS 959 if (chain->has_dri3_modifiers) 960 options |= XCB_PRESENT_OPTION_SUBOPTIMAL; 961#endif 962 963 xshmfence_reset(image->shm_fence); 964 965 ++chain->send_sbc; 966 xcb_void_cookie_t cookie = 967 xcb_present_pixmap(chain->conn, 968 chain->window, 969 image->pixmap, 970 (uint32_t) chain->send_sbc, 971 0, /* valid */ 972 0, /* update */ 973 0, /* x_off */ 974 0, /* y_off */ 975 XCB_NONE, /* target_crtc */ 976 XCB_NONE, 977 image->sync_fence, 978 options, 979 target_msc, 980 divisor, 981 remainder, 0, NULL); 982 xcb_discard_reply(chain->conn, cookie.sequence); 983 image->busy = true; 984 985 xcb_flush(chain->conn); 986 987 return x11_swapchain_result(chain, VK_SUCCESS); 988} 989 990static VkResult 991x11_acquire_next_image(struct wsi_swapchain *anv_chain, 992 const VkAcquireNextImageInfoKHR *info, 993 uint32_t *image_index) 994{ 995 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain; 996 uint64_t timeout = info->timeout; 997 998 if (chain->threaded) { 999 return x11_acquire_next_image_from_queue(chain, image_index, timeout); 1000 } else { 1001 return x11_acquire_next_image_poll_x11(chain, image_index, timeout); 1002 } 1003} 1004 1005static VkResult 1006x11_queue_present(struct wsi_swapchain *anv_chain, 1007 uint32_t image_index, 1008 const VkPresentRegionKHR *damage) 1009{ 1010 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain; 1011 1012 if (chain->threaded) { 1013 wsi_queue_push(&chain->present_queue, image_index); 1014 return chain->status; 1015 } else { 1016 return x11_present_to_x11(chain, image_index, 0); 1017 } 1018} 1019 1020static void * 1021x11_manage_fifo_queues(void *state) 1022{ 1023 struct x11_swapchain *chain = state; 1024 VkResult result = VK_SUCCESS; 1025 1026 assert(chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR); 1027 1028 while (chain->status >= 0) { 1029 /* It should be safe to unconditionally block here. Later in the loop 1030 * we blocks until the previous present has landed on-screen. At that 1031 * point, we should have received IDLE_NOTIFY on all images presented 1032 * before that point so the client should be able to acquire any image 1033 * other than the currently presented one. 1034 */ 1035 uint32_t image_index = 0; 1036 result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX); 1037 assert(result != VK_TIMEOUT); 1038 if (result < 0) { 1039 goto fail; 1040 } else if (chain->status < 0) { 1041 /* The status can change underneath us if the swapchain is destroyed 1042 * from another thread. 1043 */ 1044 return NULL; 1045 } 1046 1047 uint64_t target_msc = chain->last_present_msc + 1; 1048 result = x11_present_to_x11(chain, image_index, target_msc); 1049 if (result < 0) 1050 goto fail; 1051 1052 while (chain->last_present_msc < target_msc) { 1053 xcb_generic_event_t *event = 1054 xcb_wait_for_special_event(chain->conn, chain->special_event); 1055 if (!event) { 1056 result = VK_ERROR_OUT_OF_DATE_KHR; 1057 goto fail; 1058 } 1059 1060 result = x11_handle_dri3_present_event(chain, (void *)event); 1061 free(event); 1062 if (result < 0) 1063 goto fail; 1064 } 1065 } 1066 1067fail: 1068 x11_swapchain_result(chain, result); 1069 wsi_queue_push(&chain->acquire_queue, UINT32_MAX); 1070 1071 return NULL; 1072} 1073 1074static VkResult 1075x11_image_init(VkDevice device_h, struct x11_swapchain *chain, 1076 const VkSwapchainCreateInfoKHR *pCreateInfo, 1077 const VkAllocationCallbacks* pAllocator, 1078 const uint64_t *const *modifiers, 1079 const uint32_t *num_modifiers, 1080 int num_tranches, struct x11_image *image) 1081{ 1082 xcb_void_cookie_t cookie; 1083 VkResult result; 1084 uint32_t bpp = 32; 1085 1086 if (chain->base.use_prime_blit) { 1087 bool use_modifier = num_tranches > 0; 1088 result = wsi_create_prime_image(&chain->base, pCreateInfo, use_modifier, &image->base); 1089 } else { 1090 result = wsi_create_native_image(&chain->base, pCreateInfo, 1091 num_tranches, num_modifiers, modifiers, 1092 &image->base); 1093 } 1094 if (result < 0) 1095 return result; 1096 1097 image->pixmap = xcb_generate_id(chain->conn); 1098 1099#ifdef HAVE_DRI3_MODIFIERS 1100 if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) { 1101 /* If the image has a modifier, we must have DRI3 v1.2. */ 1102 assert(chain->has_dri3_modifiers); 1103 1104 cookie = 1105 xcb_dri3_pixmap_from_buffers_checked(chain->conn, 1106 image->pixmap, 1107 chain->window, 1108 image->base.num_planes, 1109 pCreateInfo->imageExtent.width, 1110 pCreateInfo->imageExtent.height, 1111 image->base.row_pitches[0], 1112 image->base.offsets[0], 1113 image->base.row_pitches[1], 1114 image->base.offsets[1], 1115 image->base.row_pitches[2], 1116 image->base.offsets[2], 1117 image->base.row_pitches[3], 1118 image->base.offsets[3], 1119 chain->depth, bpp, 1120 image->base.drm_modifier, 1121 image->base.fds); 1122 } else 1123#endif 1124 { 1125 /* Without passing modifiers, we can't have multi-plane RGB images. */ 1126 assert(image->base.num_planes == 1); 1127 1128 cookie = 1129 xcb_dri3_pixmap_from_buffer_checked(chain->conn, 1130 image->pixmap, 1131 chain->window, 1132 image->base.sizes[0], 1133 pCreateInfo->imageExtent.width, 1134 pCreateInfo->imageExtent.height, 1135 image->base.row_pitches[0], 1136 chain->depth, bpp, 1137 image->base.fds[0]); 1138 } 1139 1140 xcb_discard_reply(chain->conn, cookie.sequence); 1141 1142 /* XCB has now taken ownership of the FDs. */ 1143 for (int i = 0; i < image->base.num_planes; i++) 1144 image->base.fds[i] = -1; 1145 1146 int fence_fd = xshmfence_alloc_shm(); 1147 if (fence_fd < 0) 1148 goto fail_pixmap; 1149 1150 image->shm_fence = xshmfence_map_shm(fence_fd); 1151 if (image->shm_fence == NULL) 1152 goto fail_shmfence_alloc; 1153 1154 image->sync_fence = xcb_generate_id(chain->conn); 1155 xcb_dri3_fence_from_fd(chain->conn, 1156 image->pixmap, 1157 image->sync_fence, 1158 false, 1159 fence_fd); 1160 1161 image->busy = false; 1162 xshmfence_trigger(image->shm_fence); 1163 1164 return VK_SUCCESS; 1165 1166fail_shmfence_alloc: 1167 close(fence_fd); 1168 1169fail_pixmap: 1170 cookie = xcb_free_pixmap(chain->conn, image->pixmap); 1171 xcb_discard_reply(chain->conn, cookie.sequence); 1172 1173 wsi_destroy_image(&chain->base, &image->base); 1174 1175 return result; 1176} 1177 1178static void 1179x11_image_finish(struct x11_swapchain *chain, 1180 const VkAllocationCallbacks* pAllocator, 1181 struct x11_image *image) 1182{ 1183 xcb_void_cookie_t cookie; 1184 1185 cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence); 1186 xcb_discard_reply(chain->conn, cookie.sequence); 1187 xshmfence_unmap_shm(image->shm_fence); 1188 1189 cookie = xcb_free_pixmap(chain->conn, image->pixmap); 1190 xcb_discard_reply(chain->conn, cookie.sequence); 1191 1192 wsi_destroy_image(&chain->base, &image->base); 1193} 1194 1195static void 1196wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn, 1197 xcb_connection_t *conn, xcb_window_t window, 1198 uint8_t depth, uint8_t bpp, 1199 VkCompositeAlphaFlagsKHR vk_alpha, 1200 uint64_t **modifiers_in, uint32_t *num_modifiers_in, 1201 uint32_t *num_tranches_in, 1202 const VkAllocationCallbacks *pAllocator) 1203{ 1204 if (!wsi_conn->has_dri3_modifiers) 1205 goto out; 1206 1207#ifdef HAVE_DRI3_MODIFIERS 1208 xcb_generic_error_t *error = NULL; 1209 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie = 1210 xcb_dri3_get_supported_modifiers(conn, window, depth, bpp); 1211 xcb_dri3_get_supported_modifiers_reply_t *mod_reply = 1212 xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error); 1213 free(error); 1214 1215 if (!mod_reply || (mod_reply->num_window_modifiers == 0 && 1216 mod_reply->num_screen_modifiers == 0)) { 1217 free(mod_reply); 1218 goto out; 1219 } 1220 1221 uint32_t n = 0; 1222 uint32_t counts[2]; 1223 uint64_t *modifiers[2]; 1224 1225 if (mod_reply->num_window_modifiers) { 1226 counts[n] = mod_reply->num_window_modifiers; 1227 modifiers[n] = vk_alloc(pAllocator, 1228 counts[n] * sizeof(uint64_t), 1229 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1230 if (!modifiers[n]) { 1231 free(mod_reply); 1232 goto out; 1233 } 1234 1235 memcpy(modifiers[n], 1236 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply), 1237 counts[n] * sizeof(uint64_t)); 1238 n++; 1239 } 1240 1241 if (mod_reply->num_screen_modifiers) { 1242 counts[n] = mod_reply->num_screen_modifiers; 1243 modifiers[n] = vk_alloc(pAllocator, 1244 counts[n] * sizeof(uint64_t), 1245 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1246 if (!modifiers[n]) { 1247 if (n > 0) 1248 vk_free(pAllocator, modifiers[0]); 1249 free(mod_reply); 1250 goto out; 1251 } 1252 1253 memcpy(modifiers[n], 1254 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply), 1255 counts[n] * sizeof(uint64_t)); 1256 n++; 1257 } 1258 1259 for (int i = 0; i < n; i++) { 1260 modifiers_in[i] = modifiers[i]; 1261 num_modifiers_in[i] = counts[i]; 1262 } 1263 *num_tranches_in = n; 1264 1265 free(mod_reply); 1266 return; 1267#endif 1268out: 1269 *num_tranches_in = 0; 1270} 1271 1272static VkResult 1273x11_swapchain_destroy(struct wsi_swapchain *anv_chain, 1274 const VkAllocationCallbacks *pAllocator) 1275{ 1276 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain; 1277 xcb_void_cookie_t cookie; 1278 1279 if (chain->threaded) { 1280 chain->status = VK_ERROR_OUT_OF_DATE_KHR; 1281 /* Push a UINT32_MAX to wake up the manager */ 1282 wsi_queue_push(&chain->present_queue, UINT32_MAX); 1283 pthread_join(chain->queue_manager, NULL); 1284 wsi_queue_destroy(&chain->acquire_queue); 1285 wsi_queue_destroy(&chain->present_queue); 1286 } 1287 1288 for (uint32_t i = 0; i < chain->base.image_count; i++) 1289 x11_image_finish(chain, pAllocator, &chain->images[i]); 1290 1291 xcb_unregister_for_special_event(chain->conn, chain->special_event); 1292 cookie = xcb_present_select_input_checked(chain->conn, chain->event_id, 1293 chain->window, 1294 XCB_PRESENT_EVENT_MASK_NO_EVENT); 1295 xcb_discard_reply(chain->conn, cookie.sequence); 1296 1297 wsi_swapchain_finish(&chain->base); 1298 1299 vk_free(pAllocator, chain); 1300 1301 return VK_SUCCESS; 1302} 1303 1304static VkResult 1305x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface, 1306 VkDevice device, 1307 struct wsi_device *wsi_device, 1308 const VkSwapchainCreateInfoKHR *pCreateInfo, 1309 const VkAllocationCallbacks* pAllocator, 1310 struct wsi_swapchain **swapchain_out) 1311{ 1312 struct x11_swapchain *chain; 1313 xcb_void_cookie_t cookie; 1314 VkResult result; 1315 1316 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR); 1317 1318 const unsigned num_images = pCreateInfo->minImageCount; 1319 1320 xcb_connection_t *conn = x11_surface_get_connection(icd_surface); 1321 struct wsi_x11_connection *wsi_conn = 1322 wsi_x11_get_connection(wsi_device, conn); 1323 if (!wsi_conn) 1324 return VK_ERROR_OUT_OF_HOST_MEMORY; 1325 1326 /* Check for whether or not we have a window up-front */ 1327 xcb_window_t window = x11_surface_get_window(icd_surface); 1328 xcb_get_geometry_reply_t *geometry = 1329 xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL); 1330 if (geometry == NULL) 1331 return VK_ERROR_SURFACE_LOST_KHR; 1332 const uint32_t bit_depth = geometry->depth; 1333 free(geometry); 1334 1335 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]); 1336 chain = vk_alloc(pAllocator, size, 8, 1337 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1338 if (chain == NULL) 1339 return VK_ERROR_OUT_OF_HOST_MEMORY; 1340 1341 result = wsi_swapchain_init(wsi_device, &chain->base, device, 1342 pCreateInfo, pAllocator); 1343 if (result != VK_SUCCESS) 1344 goto fail_alloc; 1345 1346 chain->base.destroy = x11_swapchain_destroy; 1347 chain->base.get_wsi_image = x11_get_wsi_image; 1348 chain->base.acquire_next_image = x11_acquire_next_image; 1349 chain->base.queue_present = x11_queue_present; 1350 chain->base.present_mode = pCreateInfo->presentMode; 1351 chain->base.image_count = num_images; 1352 chain->conn = conn; 1353 chain->window = window; 1354 chain->depth = bit_depth; 1355 chain->extent = pCreateInfo->imageExtent; 1356 chain->send_sbc = 0; 1357 chain->last_present_msc = 0; 1358 chain->threaded = false; 1359 chain->status = VK_SUCCESS; 1360 chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers; 1361 1362 /* If we are reallocating from an old swapchain, then we inherit its 1363 * last completion mode, to ensure we don't get into reallocation 1364 * cycles. If we are starting anew, we set 'COPY', as that is the only 1365 * mode which provokes reallocation when anything changes, to make 1366 * sure we have the most optimal allocation. 1367 */ 1368 struct x11_swapchain *old_chain = (void *)(intptr_t) pCreateInfo->oldSwapchain; 1369 if (old_chain) 1370 chain->last_present_mode = old_chain->last_present_mode; 1371 else 1372 chain->last_present_mode = XCB_PRESENT_COMPLETE_MODE_COPY; 1373 1374 if (!wsi_x11_check_dri3_compatible(wsi_device, conn)) 1375 chain->base.use_prime_blit = true; 1376 1377 chain->event_id = xcb_generate_id(chain->conn); 1378 xcb_present_select_input(chain->conn, chain->event_id, chain->window, 1379 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY | 1380 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY | 1381 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY); 1382 1383 /* Create an XCB event queue to hold present events outside of the usual 1384 * application event queue 1385 */ 1386 chain->special_event = 1387 xcb_register_for_special_xge(chain->conn, &xcb_present_id, 1388 chain->event_id, NULL); 1389 1390 chain->gc = xcb_generate_id(chain->conn); 1391 if (!chain->gc) { 1392 /* FINISHME: Choose a better error. */ 1393 result = VK_ERROR_OUT_OF_HOST_MEMORY; 1394 goto fail_register; 1395 } 1396 1397 cookie = xcb_create_gc(chain->conn, 1398 chain->gc, 1399 chain->window, 1400 XCB_GC_GRAPHICS_EXPOSURES, 1401 (uint32_t []) { 0 }); 1402 xcb_discard_reply(chain->conn, cookie.sequence); 1403 1404 uint64_t *modifiers[2] = {NULL, NULL}; 1405 uint32_t num_modifiers[2] = {0, 0}; 1406 uint32_t num_tranches = 0; 1407 if (wsi_device->supports_modifiers) 1408 wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, chain->depth, 32, 1409 pCreateInfo->compositeAlpha, 1410 modifiers, num_modifiers, &num_tranches, 1411 pAllocator); 1412 1413 uint32_t image = 0; 1414 for (; image < chain->base.image_count; image++) { 1415 result = x11_image_init(device, chain, pCreateInfo, pAllocator, 1416 (const uint64_t *const *)modifiers, 1417 num_modifiers, num_tranches, 1418 &chain->images[image]); 1419 if (result != VK_SUCCESS) 1420 goto fail_init_images; 1421 } 1422 1423 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) { 1424 chain->threaded = true; 1425 1426 /* Initialize our queues. We make them base.image_count + 1 because we will 1427 * occasionally use UINT32_MAX to signal the other thread that an error 1428 * has occurred and we don't want an overflow. 1429 */ 1430 int ret; 1431 ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1); 1432 if (ret) { 1433 goto fail_init_images; 1434 } 1435 1436 ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1); 1437 if (ret) { 1438 wsi_queue_destroy(&chain->acquire_queue); 1439 goto fail_init_images; 1440 } 1441 1442 for (unsigned i = 0; i < chain->base.image_count; i++) 1443 wsi_queue_push(&chain->acquire_queue, i); 1444 1445 ret = pthread_create(&chain->queue_manager, NULL, 1446 x11_manage_fifo_queues, chain); 1447 if (ret) { 1448 wsi_queue_destroy(&chain->present_queue); 1449 wsi_queue_destroy(&chain->acquire_queue); 1450 goto fail_init_images; 1451 } 1452 } 1453 1454 for (int i = 0; i < ARRAY_SIZE(modifiers); i++) 1455 vk_free(pAllocator, modifiers[i]); 1456 *swapchain_out = &chain->base; 1457 1458 return VK_SUCCESS; 1459 1460fail_init_images: 1461 for (uint32_t j = 0; j < image; j++) 1462 x11_image_finish(chain, pAllocator, &chain->images[j]); 1463 1464 for (int i = 0; i < ARRAY_SIZE(modifiers); i++) 1465 vk_free(pAllocator, modifiers[i]); 1466 1467fail_register: 1468 xcb_unregister_for_special_event(chain->conn, chain->special_event); 1469 1470 wsi_swapchain_finish(&chain->base); 1471 1472fail_alloc: 1473 vk_free(pAllocator, chain); 1474 1475 return result; 1476} 1477 1478VkResult 1479wsi_x11_init_wsi(struct wsi_device *wsi_device, 1480 const VkAllocationCallbacks *alloc) 1481{ 1482 struct wsi_x11 *wsi; 1483 VkResult result; 1484 1485 wsi = vk_alloc(alloc, sizeof(*wsi), 8, 1486 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1487 if (!wsi) { 1488 result = VK_ERROR_OUT_OF_HOST_MEMORY; 1489 goto fail; 1490 } 1491 1492 int ret = pthread_mutex_init(&wsi->mutex, NULL); 1493 if (ret != 0) { 1494 if (ret == ENOMEM) { 1495 result = VK_ERROR_OUT_OF_HOST_MEMORY; 1496 } else { 1497 /* FINISHME: Choose a better error. */ 1498 result = VK_ERROR_OUT_OF_HOST_MEMORY; 1499 } 1500 1501 goto fail_alloc; 1502 } 1503 1504 wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer, 1505 _mesa_key_pointer_equal); 1506 if (!wsi->connections) { 1507 result = VK_ERROR_OUT_OF_HOST_MEMORY; 1508 goto fail_mutex; 1509 } 1510 1511 wsi->base.get_support = x11_surface_get_support; 1512 wsi->base.get_capabilities2 = x11_surface_get_capabilities2; 1513 wsi->base.get_formats = x11_surface_get_formats; 1514 wsi->base.get_formats2 = x11_surface_get_formats2; 1515 wsi->base.get_present_modes = x11_surface_get_present_modes; 1516 wsi->base.get_present_rectangles = x11_surface_get_present_rectangles; 1517 wsi->base.create_swapchain = x11_surface_create_swapchain; 1518 1519 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base; 1520 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base; 1521 1522 return VK_SUCCESS; 1523 1524fail_mutex: 1525 pthread_mutex_destroy(&wsi->mutex); 1526fail_alloc: 1527 vk_free(alloc, wsi); 1528fail: 1529 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL; 1530 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL; 1531 1532 return result; 1533} 1534 1535void 1536wsi_x11_finish_wsi(struct wsi_device *wsi_device, 1537 const VkAllocationCallbacks *alloc) 1538{ 1539 struct wsi_x11 *wsi = 1540 (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB]; 1541 1542 if (wsi) { 1543 hash_table_foreach(wsi->connections, entry) 1544 wsi_x11_connection_destroy(wsi_device, entry->data); 1545 1546 _mesa_hash_table_destroy(wsi->connections, NULL); 1547 1548 pthread_mutex_destroy(&wsi->mutex); 1549 1550 vk_free(alloc, wsi); 1551 } 1552} 1553