1b8e80941Smrg/* 2b8e80941Smrg * Copyright © 2013 Keith Packard 3b8e80941Smrg * Copyright © 2015 Boyan Ding 4b8e80941Smrg * 5b8e80941Smrg * Permission to use, copy, modify, distribute, and sell this software and its 6b8e80941Smrg * documentation for any purpose is hereby granted without fee, provided that 7b8e80941Smrg * the above copyright notice appear in all copies and that both that copyright 8b8e80941Smrg * notice and this permission notice appear in supporting documentation, and 9b8e80941Smrg * that the name of the copyright holders not be used in advertising or 10b8e80941Smrg * publicity pertaining to distribution of the software without specific, 11b8e80941Smrg * written prior permission. The copyright holders make no representations 12b8e80941Smrg * about the suitability of this software for any purpose. It is provided "as 13b8e80941Smrg * is" without express or implied warranty. 14b8e80941Smrg * 15b8e80941Smrg * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 16b8e80941Smrg * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 17b8e80941Smrg * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR 18b8e80941Smrg * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 19b8e80941Smrg * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 20b8e80941Smrg * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE 21b8e80941Smrg * OF THIS SOFTWARE. 22b8e80941Smrg */ 23b8e80941Smrg 24b8e80941Smrg#include <fcntl.h> 25b8e80941Smrg#include <stdlib.h> 26b8e80941Smrg#include <unistd.h> 27b8e80941Smrg#include <string.h> 28b8e80941Smrg 29b8e80941Smrg#include <X11/xshmfence.h> 30b8e80941Smrg#include <xcb/xcb.h> 31b8e80941Smrg#include <xcb/dri3.h> 32b8e80941Smrg#include <xcb/present.h> 33b8e80941Smrg 34b8e80941Smrg#include <X11/Xlib-xcb.h> 35b8e80941Smrg 36b8e80941Smrg#include "loader_dri3_helper.h" 37b8e80941Smrg#include "util/macros.h" 38b8e80941Smrg#include "drm-uapi/drm_fourcc.h" 39b8e80941Smrg 40b8e80941Smrg/* From xmlpool/options.h, user exposed so should be stable */ 41b8e80941Smrg#define DRI_CONF_VBLANK_NEVER 0 42b8e80941Smrg#define DRI_CONF_VBLANK_DEF_INTERVAL_0 1 43b8e80941Smrg#define DRI_CONF_VBLANK_DEF_INTERVAL_1 2 44b8e80941Smrg#define DRI_CONF_VBLANK_ALWAYS_SYNC 3 45b8e80941Smrg 46b8e80941Smrg/** 47b8e80941Smrg * A cached blit context. 48b8e80941Smrg */ 49b8e80941Smrgstruct loader_dri3_blit_context { 50b8e80941Smrg mtx_t mtx; 51b8e80941Smrg __DRIcontext *ctx; 52b8e80941Smrg __DRIscreen *cur_screen; 53b8e80941Smrg const __DRIcoreExtension *core; 54b8e80941Smrg}; 55b8e80941Smrg 56b8e80941Smrg/* For simplicity we maintain the cache only for a single screen at a time */ 57b8e80941Smrgstatic struct loader_dri3_blit_context blit_context = { 58b8e80941Smrg _MTX_INITIALIZER_NP, NULL 59b8e80941Smrg}; 60b8e80941Smrg 61b8e80941Smrgstatic void 62b8e80941Smrgdri3_flush_present_events(struct loader_dri3_drawable *draw); 63b8e80941Smrg 64b8e80941Smrgstatic struct loader_dri3_buffer * 65b8e80941Smrgdri3_find_back_alloc(struct loader_dri3_drawable *draw); 66b8e80941Smrg 67b8e80941Smrgstatic xcb_screen_t * 68b8e80941Smrgget_screen_for_root(xcb_connection_t *conn, xcb_window_t root) 69b8e80941Smrg{ 70b8e80941Smrg xcb_screen_iterator_t screen_iter = 71b8e80941Smrg xcb_setup_roots_iterator(xcb_get_setup(conn)); 72b8e80941Smrg 73b8e80941Smrg for (; screen_iter.rem; xcb_screen_next (&screen_iter)) { 74b8e80941Smrg if (screen_iter.data->root == root) 75b8e80941Smrg return screen_iter.data; 76b8e80941Smrg } 77b8e80941Smrg 78b8e80941Smrg return NULL; 79b8e80941Smrg} 80b8e80941Smrg 81b8e80941Smrgstatic xcb_visualtype_t * 82b8e80941Smrgget_xcb_visualtype_for_depth(struct loader_dri3_drawable *draw, int depth) 83b8e80941Smrg{ 84b8e80941Smrg xcb_visualtype_iterator_t visual_iter; 85b8e80941Smrg xcb_screen_t *screen = draw->screen; 86b8e80941Smrg xcb_depth_iterator_t depth_iter; 87b8e80941Smrg 88b8e80941Smrg if (!screen) 89b8e80941Smrg return NULL; 90b8e80941Smrg 91b8e80941Smrg depth_iter = xcb_screen_allowed_depths_iterator(screen); 92b8e80941Smrg for (; depth_iter.rem; xcb_depth_next(&depth_iter)) { 93b8e80941Smrg if (depth_iter.data->depth != depth) 94b8e80941Smrg continue; 95b8e80941Smrg 96b8e80941Smrg visual_iter = xcb_depth_visuals_iterator(depth_iter.data); 97b8e80941Smrg if (visual_iter.rem) 98b8e80941Smrg return visual_iter.data; 99b8e80941Smrg } 100b8e80941Smrg 101b8e80941Smrg return NULL; 102b8e80941Smrg} 103b8e80941Smrg 104b8e80941Smrg/* Sets the adaptive sync window property state. */ 105b8e80941Smrgstatic void 106b8e80941Smrgset_adaptive_sync_property(xcb_connection_t *conn, xcb_drawable_t drawable, 107b8e80941Smrg uint32_t state) 108b8e80941Smrg{ 109b8e80941Smrg static char const name[] = "_VARIABLE_REFRESH"; 110b8e80941Smrg xcb_intern_atom_cookie_t cookie; 111b8e80941Smrg xcb_intern_atom_reply_t* reply; 112b8e80941Smrg xcb_void_cookie_t check; 113b8e80941Smrg 114b8e80941Smrg cookie = xcb_intern_atom(conn, 0, strlen(name), name); 115b8e80941Smrg reply = xcb_intern_atom_reply(conn, cookie, NULL); 116b8e80941Smrg if (reply == NULL) 117b8e80941Smrg return; 118b8e80941Smrg 119b8e80941Smrg if (state) 120b8e80941Smrg check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE, 121b8e80941Smrg drawable, reply->atom, 122b8e80941Smrg XCB_ATOM_CARDINAL, 32, 1, &state); 123b8e80941Smrg else 124b8e80941Smrg check = xcb_delete_property_checked(conn, drawable, reply->atom); 125b8e80941Smrg 126b8e80941Smrg xcb_discard_reply(conn, check.sequence); 127b8e80941Smrg free(reply); 128b8e80941Smrg} 129b8e80941Smrg 130b8e80941Smrg/* Get red channel mask for given drawable at given depth. */ 131b8e80941Smrgstatic unsigned int 132b8e80941Smrgdri3_get_red_mask_for_depth(struct loader_dri3_drawable *draw, int depth) 133b8e80941Smrg{ 134b8e80941Smrg xcb_visualtype_t *visual = get_xcb_visualtype_for_depth(draw, depth); 135b8e80941Smrg 136b8e80941Smrg if (visual) 137b8e80941Smrg return visual->red_mask; 138b8e80941Smrg 139b8e80941Smrg return 0; 140b8e80941Smrg} 141b8e80941Smrg 142b8e80941Smrg/** 143b8e80941Smrg * Do we have blit functionality in the image blit extension? 144b8e80941Smrg * 145b8e80941Smrg * \param draw[in] The drawable intended to blit from / to. 146b8e80941Smrg * \return true if we have blit functionality. false otherwise. 147b8e80941Smrg */ 148b8e80941Smrgstatic bool loader_dri3_have_image_blit(const struct loader_dri3_drawable *draw) 149b8e80941Smrg{ 150b8e80941Smrg return draw->ext->image->base.version >= 9 && 151b8e80941Smrg draw->ext->image->blitImage != NULL; 152b8e80941Smrg} 153b8e80941Smrg 154b8e80941Smrg/** 155b8e80941Smrg * Get and lock (for use with the current thread) a dri context associated 156b8e80941Smrg * with the drawable's dri screen. The context is intended to be used with 157b8e80941Smrg * the dri image extension's blitImage method. 158b8e80941Smrg * 159b8e80941Smrg * \param draw[in] Pointer to the drawable whose dri screen we want a 160b8e80941Smrg * dri context for. 161b8e80941Smrg * \return A dri context or NULL if context creation failed. 162b8e80941Smrg * 163b8e80941Smrg * When the caller is done with the context (even if the context returned was 164b8e80941Smrg * NULL), the caller must call loader_dri3_blit_context_put. 165b8e80941Smrg */ 166b8e80941Smrgstatic __DRIcontext * 167b8e80941Smrgloader_dri3_blit_context_get(struct loader_dri3_drawable *draw) 168b8e80941Smrg{ 169b8e80941Smrg mtx_lock(&blit_context.mtx); 170b8e80941Smrg 171b8e80941Smrg if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen) { 172b8e80941Smrg blit_context.core->destroyContext(blit_context.ctx); 173b8e80941Smrg blit_context.ctx = NULL; 174b8e80941Smrg } 175b8e80941Smrg 176b8e80941Smrg if (!blit_context.ctx) { 177b8e80941Smrg blit_context.ctx = draw->ext->core->createNewContext(draw->dri_screen, 178b8e80941Smrg NULL, NULL, NULL); 179b8e80941Smrg blit_context.cur_screen = draw->dri_screen; 180b8e80941Smrg blit_context.core = draw->ext->core; 181b8e80941Smrg } 182b8e80941Smrg 183b8e80941Smrg return blit_context.ctx; 184b8e80941Smrg} 185b8e80941Smrg 186b8e80941Smrg/** 187b8e80941Smrg * Release (for use with other threads) a dri context previously obtained using 188b8e80941Smrg * loader_dri3_blit_context_get. 189b8e80941Smrg */ 190b8e80941Smrgstatic void 191b8e80941Smrgloader_dri3_blit_context_put(void) 192b8e80941Smrg{ 193b8e80941Smrg mtx_unlock(&blit_context.mtx); 194b8e80941Smrg} 195b8e80941Smrg 196b8e80941Smrg/** 197b8e80941Smrg * Blit (parts of) the contents of a DRI image to another dri image 198b8e80941Smrg * 199b8e80941Smrg * \param draw[in] The drawable which owns the images. 200b8e80941Smrg * \param dst[in] The destination image. 201b8e80941Smrg * \param src[in] The source image. 202b8e80941Smrg * \param dstx0[in] Start destination coordinate. 203b8e80941Smrg * \param dsty0[in] Start destination coordinate. 204b8e80941Smrg * \param width[in] Blit width. 205b8e80941Smrg * \param height[in] Blit height. 206b8e80941Smrg * \param srcx0[in] Start source coordinate. 207b8e80941Smrg * \param srcy0[in] Start source coordinate. 208b8e80941Smrg * \param flush_flag[in] Image blit flush flag. 209b8e80941Smrg * \return true iff successful. 210b8e80941Smrg */ 211b8e80941Smrgstatic bool 212b8e80941Smrgloader_dri3_blit_image(struct loader_dri3_drawable *draw, 213b8e80941Smrg __DRIimage *dst, __DRIimage *src, 214b8e80941Smrg int dstx0, int dsty0, int width, int height, 215b8e80941Smrg int srcx0, int srcy0, int flush_flag) 216b8e80941Smrg{ 217b8e80941Smrg __DRIcontext *dri_context; 218b8e80941Smrg bool use_blit_context = false; 219b8e80941Smrg 220b8e80941Smrg if (!loader_dri3_have_image_blit(draw)) 221b8e80941Smrg return false; 222b8e80941Smrg 223b8e80941Smrg dri_context = draw->vtable->get_dri_context(draw); 224b8e80941Smrg 225b8e80941Smrg if (!dri_context || !draw->vtable->in_current_context(draw)) { 226b8e80941Smrg dri_context = loader_dri3_blit_context_get(draw); 227b8e80941Smrg use_blit_context = true; 228b8e80941Smrg flush_flag |= __BLIT_FLAG_FLUSH; 229b8e80941Smrg } 230b8e80941Smrg 231b8e80941Smrg if (dri_context) 232b8e80941Smrg draw->ext->image->blitImage(dri_context, dst, src, dstx0, dsty0, 233b8e80941Smrg width, height, srcx0, srcy0, 234b8e80941Smrg width, height, flush_flag); 235b8e80941Smrg 236b8e80941Smrg if (use_blit_context) 237b8e80941Smrg loader_dri3_blit_context_put(); 238b8e80941Smrg 239b8e80941Smrg return dri_context != NULL; 240b8e80941Smrg} 241b8e80941Smrg 242b8e80941Smrgstatic inline void 243b8e80941Smrgdri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer) 244b8e80941Smrg{ 245b8e80941Smrg xshmfence_reset(buffer->shm_fence); 246b8e80941Smrg} 247b8e80941Smrg 248b8e80941Smrgstatic inline void 249b8e80941Smrgdri3_fence_set(struct loader_dri3_buffer *buffer) 250b8e80941Smrg{ 251b8e80941Smrg xshmfence_trigger(buffer->shm_fence); 252b8e80941Smrg} 253b8e80941Smrg 254b8e80941Smrgstatic inline void 255b8e80941Smrgdri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer) 256b8e80941Smrg{ 257b8e80941Smrg xcb_sync_trigger_fence(c, buffer->sync_fence); 258b8e80941Smrg} 259b8e80941Smrg 260b8e80941Smrgstatic inline void 261b8e80941Smrgdri3_fence_await(xcb_connection_t *c, struct loader_dri3_drawable *draw, 262b8e80941Smrg struct loader_dri3_buffer *buffer) 263b8e80941Smrg{ 264b8e80941Smrg xcb_flush(c); 265b8e80941Smrg xshmfence_await(buffer->shm_fence); 266b8e80941Smrg if (draw) { 267b8e80941Smrg mtx_lock(&draw->mtx); 268b8e80941Smrg dri3_flush_present_events(draw); 269b8e80941Smrg mtx_unlock(&draw->mtx); 270b8e80941Smrg } 271b8e80941Smrg} 272b8e80941Smrg 273b8e80941Smrgstatic void 274b8e80941Smrgdri3_update_num_back(struct loader_dri3_drawable *draw) 275b8e80941Smrg{ 276b8e80941Smrg if (draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) 277b8e80941Smrg draw->num_back = 3; 278b8e80941Smrg else 279b8e80941Smrg draw->num_back = 2; 280b8e80941Smrg} 281b8e80941Smrg 282b8e80941Smrgvoid 283b8e80941Smrgloader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval) 284b8e80941Smrg{ 285b8e80941Smrg draw->swap_interval = interval; 286b8e80941Smrg} 287b8e80941Smrg 288b8e80941Smrg/** dri3_free_render_buffer 289b8e80941Smrg * 290b8e80941Smrg * Free everything associated with one render buffer including pixmap, fence 291b8e80941Smrg * stuff and the driver image 292b8e80941Smrg */ 293b8e80941Smrgstatic void 294b8e80941Smrgdri3_free_render_buffer(struct loader_dri3_drawable *draw, 295b8e80941Smrg struct loader_dri3_buffer *buffer) 296b8e80941Smrg{ 297b8e80941Smrg if (buffer->own_pixmap) 298b8e80941Smrg xcb_free_pixmap(draw->conn, buffer->pixmap); 299b8e80941Smrg xcb_sync_destroy_fence(draw->conn, buffer->sync_fence); 300b8e80941Smrg xshmfence_unmap_shm(buffer->shm_fence); 301b8e80941Smrg draw->ext->image->destroyImage(buffer->image); 302b8e80941Smrg if (buffer->linear_buffer) 303b8e80941Smrg draw->ext->image->destroyImage(buffer->linear_buffer); 304b8e80941Smrg free(buffer); 305b8e80941Smrg} 306b8e80941Smrg 307b8e80941Smrgvoid 308b8e80941Smrgloader_dri3_drawable_fini(struct loader_dri3_drawable *draw) 309b8e80941Smrg{ 310b8e80941Smrg int i; 311b8e80941Smrg 312b8e80941Smrg draw->ext->core->destroyDrawable(draw->dri_drawable); 313b8e80941Smrg 314b8e80941Smrg for (i = 0; i < ARRAY_SIZE(draw->buffers); i++) { 315b8e80941Smrg if (draw->buffers[i]) 316b8e80941Smrg dri3_free_render_buffer(draw, draw->buffers[i]); 317b8e80941Smrg } 318b8e80941Smrg 319b8e80941Smrg if (draw->special_event) { 320b8e80941Smrg xcb_void_cookie_t cookie = 321b8e80941Smrg xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable, 322b8e80941Smrg XCB_PRESENT_EVENT_MASK_NO_EVENT); 323b8e80941Smrg 324b8e80941Smrg xcb_discard_reply(draw->conn, cookie.sequence); 325b8e80941Smrg xcb_unregister_for_special_event(draw->conn, draw->special_event); 326b8e80941Smrg } 327b8e80941Smrg 328b8e80941Smrg cnd_destroy(&draw->event_cnd); 329b8e80941Smrg mtx_destroy(&draw->mtx); 330b8e80941Smrg} 331b8e80941Smrg 332b8e80941Smrgint 333b8e80941Smrgloader_dri3_drawable_init(xcb_connection_t *conn, 334b8e80941Smrg xcb_drawable_t drawable, 335b8e80941Smrg __DRIscreen *dri_screen, 336b8e80941Smrg bool is_different_gpu, 337b8e80941Smrg bool multiplanes_available, 338b8e80941Smrg const __DRIconfig *dri_config, 339b8e80941Smrg struct loader_dri3_extensions *ext, 340b8e80941Smrg const struct loader_dri3_vtable *vtable, 341b8e80941Smrg struct loader_dri3_drawable *draw) 342b8e80941Smrg{ 343b8e80941Smrg xcb_get_geometry_cookie_t cookie; 344b8e80941Smrg xcb_get_geometry_reply_t *reply; 345b8e80941Smrg xcb_generic_error_t *error; 346b8e80941Smrg GLint vblank_mode = DRI_CONF_VBLANK_DEF_INTERVAL_1; 347b8e80941Smrg int swap_interval; 348b8e80941Smrg 349b8e80941Smrg draw->conn = conn; 350b8e80941Smrg draw->ext = ext; 351b8e80941Smrg draw->vtable = vtable; 352b8e80941Smrg draw->drawable = drawable; 353b8e80941Smrg draw->dri_screen = dri_screen; 354b8e80941Smrg draw->is_different_gpu = is_different_gpu; 355b8e80941Smrg draw->multiplanes_available = multiplanes_available; 356b8e80941Smrg 357b8e80941Smrg draw->have_back = 0; 358b8e80941Smrg draw->have_fake_front = 0; 359b8e80941Smrg draw->first_init = true; 360b8e80941Smrg draw->adaptive_sync = false; 361b8e80941Smrg draw->adaptive_sync_active = false; 362b8e80941Smrg 363b8e80941Smrg draw->cur_blit_source = -1; 364b8e80941Smrg draw->back_format = __DRI_IMAGE_FORMAT_NONE; 365b8e80941Smrg mtx_init(&draw->mtx, mtx_plain); 366b8e80941Smrg cnd_init(&draw->event_cnd); 367b8e80941Smrg 368b8e80941Smrg if (draw->ext->config) { 369b8e80941Smrg unsigned char adaptive_sync = 0; 370b8e80941Smrg 371b8e80941Smrg draw->ext->config->configQueryi(draw->dri_screen, 372b8e80941Smrg "vblank_mode", &vblank_mode); 373b8e80941Smrg 374b8e80941Smrg draw->ext->config->configQueryb(draw->dri_screen, 375b8e80941Smrg "adaptive_sync", 376b8e80941Smrg &adaptive_sync); 377b8e80941Smrg 378b8e80941Smrg draw->adaptive_sync = adaptive_sync; 379b8e80941Smrg } 380b8e80941Smrg 381b8e80941Smrg if (!draw->adaptive_sync) 382b8e80941Smrg set_adaptive_sync_property(conn, draw->drawable, false); 383b8e80941Smrg 384b8e80941Smrg switch (vblank_mode) { 385b8e80941Smrg case DRI_CONF_VBLANK_NEVER: 386b8e80941Smrg case DRI_CONF_VBLANK_DEF_INTERVAL_0: 387b8e80941Smrg swap_interval = 0; 388b8e80941Smrg break; 389b8e80941Smrg case DRI_CONF_VBLANK_DEF_INTERVAL_1: 390b8e80941Smrg case DRI_CONF_VBLANK_ALWAYS_SYNC: 391b8e80941Smrg default: 392b8e80941Smrg swap_interval = 1; 393b8e80941Smrg break; 394b8e80941Smrg } 395b8e80941Smrg draw->swap_interval = swap_interval; 396b8e80941Smrg 397b8e80941Smrg dri3_update_num_back(draw); 398b8e80941Smrg 399b8e80941Smrg /* Create a new drawable */ 400b8e80941Smrg draw->dri_drawable = 401b8e80941Smrg draw->ext->image_driver->createNewDrawable(dri_screen, 402b8e80941Smrg dri_config, 403b8e80941Smrg draw); 404b8e80941Smrg 405b8e80941Smrg if (!draw->dri_drawable) 406b8e80941Smrg return 1; 407b8e80941Smrg 408b8e80941Smrg cookie = xcb_get_geometry(draw->conn, draw->drawable); 409b8e80941Smrg reply = xcb_get_geometry_reply(draw->conn, cookie, &error); 410b8e80941Smrg if (reply == NULL || error != NULL) { 411b8e80941Smrg draw->ext->core->destroyDrawable(draw->dri_drawable); 412b8e80941Smrg return 1; 413b8e80941Smrg } 414b8e80941Smrg 415b8e80941Smrg draw->screen = get_screen_for_root(draw->conn, reply->root); 416b8e80941Smrg draw->width = reply->width; 417b8e80941Smrg draw->height = reply->height; 418b8e80941Smrg draw->depth = reply->depth; 419b8e80941Smrg draw->vtable->set_drawable_size(draw, draw->width, draw->height); 420b8e80941Smrg free(reply); 421b8e80941Smrg 422b8e80941Smrg draw->swap_method = __DRI_ATTRIB_SWAP_UNDEFINED; 423b8e80941Smrg if (draw->ext->core->base.version >= 2) { 424b8e80941Smrg (void )draw->ext->core->getConfigAttrib(dri_config, 425b8e80941Smrg __DRI_ATTRIB_SWAP_METHOD, 426b8e80941Smrg &draw->swap_method); 427b8e80941Smrg } 428b8e80941Smrg 429b8e80941Smrg /* 430b8e80941Smrg * Make sure server has the same swap interval we do for the new 431b8e80941Smrg * drawable. 432b8e80941Smrg */ 433b8e80941Smrg loader_dri3_set_swap_interval(draw, swap_interval); 434b8e80941Smrg 435b8e80941Smrg return 0; 436b8e80941Smrg} 437b8e80941Smrg 438b8e80941Smrg/* 439b8e80941Smrg * Process one Present event 440b8e80941Smrg */ 441b8e80941Smrgstatic void 442b8e80941Smrgdri3_handle_present_event(struct loader_dri3_drawable *draw, 443b8e80941Smrg xcb_present_generic_event_t *ge) 444b8e80941Smrg{ 445b8e80941Smrg switch (ge->evtype) { 446b8e80941Smrg case XCB_PRESENT_CONFIGURE_NOTIFY: { 447b8e80941Smrg xcb_present_configure_notify_event_t *ce = (void *) ge; 448b8e80941Smrg 449b8e80941Smrg draw->width = ce->width; 450b8e80941Smrg draw->height = ce->height; 451b8e80941Smrg draw->vtable->set_drawable_size(draw, draw->width, draw->height); 452b8e80941Smrg draw->ext->flush->invalidate(draw->dri_drawable); 453b8e80941Smrg break; 454b8e80941Smrg } 455b8e80941Smrg case XCB_PRESENT_COMPLETE_NOTIFY: { 456b8e80941Smrg xcb_present_complete_notify_event_t *ce = (void *) ge; 457b8e80941Smrg 458b8e80941Smrg /* Compute the processed SBC number from the received 32-bit serial number 459b8e80941Smrg * merged with the upper 32-bits of the sent 64-bit serial number while 460b8e80941Smrg * checking for wrap. 461b8e80941Smrg */ 462b8e80941Smrg if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) { 463b8e80941Smrg uint64_t recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial; 464b8e80941Smrg 465b8e80941Smrg /* Only assume wraparound if that results in exactly the previous 466b8e80941Smrg * SBC + 1, otherwise ignore received SBC > sent SBC (those are 467b8e80941Smrg * probably from a previous loader_dri3_drawable instance) to avoid 468b8e80941Smrg * calculating bogus target MSC values in loader_dri3_swap_buffers_msc 469b8e80941Smrg */ 470b8e80941Smrg if (recv_sbc <= draw->send_sbc) 471b8e80941Smrg draw->recv_sbc = recv_sbc; 472b8e80941Smrg else if (recv_sbc == (draw->recv_sbc + 0x100000001ULL)) 473b8e80941Smrg draw->recv_sbc = recv_sbc - 0x100000000ULL; 474b8e80941Smrg 475b8e80941Smrg /* When moving from flip to copy, we assume that we can allocate in 476b8e80941Smrg * a more optimal way if we don't need to cater for the display 477b8e80941Smrg * controller. 478b8e80941Smrg */ 479b8e80941Smrg if (ce->mode == XCB_PRESENT_COMPLETE_MODE_COPY && 480b8e80941Smrg draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) { 481b8e80941Smrg for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) { 482b8e80941Smrg if (draw->buffers[b]) 483b8e80941Smrg draw->buffers[b]->reallocate = true; 484b8e80941Smrg } 485b8e80941Smrg } 486b8e80941Smrg 487b8e80941Smrg /* If the server tells us that our allocation is suboptimal, we 488b8e80941Smrg * reallocate once. 489b8e80941Smrg */ 490b8e80941Smrg#ifdef HAVE_DRI3_MODIFIERS 491b8e80941Smrg if (ce->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY && 492b8e80941Smrg draw->last_present_mode != ce->mode) { 493b8e80941Smrg for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) { 494b8e80941Smrg if (draw->buffers[b]) 495b8e80941Smrg draw->buffers[b]->reallocate = true; 496b8e80941Smrg } 497b8e80941Smrg } 498b8e80941Smrg#endif 499b8e80941Smrg draw->last_present_mode = ce->mode; 500b8e80941Smrg 501b8e80941Smrg if (draw->vtable->show_fps) 502b8e80941Smrg draw->vtable->show_fps(draw, ce->ust); 503b8e80941Smrg 504b8e80941Smrg draw->ust = ce->ust; 505b8e80941Smrg draw->msc = ce->msc; 506b8e80941Smrg } else if (ce->serial == draw->eid) { 507b8e80941Smrg draw->notify_ust = ce->ust; 508b8e80941Smrg draw->notify_msc = ce->msc; 509b8e80941Smrg } 510b8e80941Smrg break; 511b8e80941Smrg } 512b8e80941Smrg case XCB_PRESENT_EVENT_IDLE_NOTIFY: { 513b8e80941Smrg xcb_present_idle_notify_event_t *ie = (void *) ge; 514b8e80941Smrg int b; 515b8e80941Smrg 516b8e80941Smrg for (b = 0; b < ARRAY_SIZE(draw->buffers); b++) { 517b8e80941Smrg struct loader_dri3_buffer *buf = draw->buffers[b]; 518b8e80941Smrg 519b8e80941Smrg if (buf && buf->pixmap == ie->pixmap) 520b8e80941Smrg buf->busy = 0; 521b8e80941Smrg } 522b8e80941Smrg break; 523b8e80941Smrg } 524b8e80941Smrg } 525b8e80941Smrg free(ge); 526b8e80941Smrg} 527b8e80941Smrg 528b8e80941Smrgstatic bool 529b8e80941Smrgdri3_wait_for_event_locked(struct loader_dri3_drawable *draw) 530b8e80941Smrg{ 531b8e80941Smrg xcb_generic_event_t *ev; 532b8e80941Smrg xcb_present_generic_event_t *ge; 533b8e80941Smrg 534b8e80941Smrg xcb_flush(draw->conn); 535b8e80941Smrg 536b8e80941Smrg /* Only have one thread waiting for events at a time */ 537b8e80941Smrg if (draw->has_event_waiter) { 538b8e80941Smrg cnd_wait(&draw->event_cnd, &draw->mtx); 539b8e80941Smrg /* Another thread has updated the protected info, so retest. */ 540b8e80941Smrg return true; 541b8e80941Smrg } else { 542b8e80941Smrg draw->has_event_waiter = true; 543b8e80941Smrg /* Allow other threads access to the drawable while we're waiting. */ 544b8e80941Smrg mtx_unlock(&draw->mtx); 545b8e80941Smrg ev = xcb_wait_for_special_event(draw->conn, draw->special_event); 546b8e80941Smrg mtx_lock(&draw->mtx); 547b8e80941Smrg draw->has_event_waiter = false; 548b8e80941Smrg cnd_broadcast(&draw->event_cnd); 549b8e80941Smrg } 550b8e80941Smrg if (!ev) 551b8e80941Smrg return false; 552b8e80941Smrg ge = (void *) ev; 553b8e80941Smrg dri3_handle_present_event(draw, ge); 554b8e80941Smrg return true; 555b8e80941Smrg} 556b8e80941Smrg 557b8e80941Smrg/** loader_dri3_wait_for_msc 558b8e80941Smrg * 559b8e80941Smrg * Get the X server to send an event when the target msc/divisor/remainder is 560b8e80941Smrg * reached. 561b8e80941Smrg */ 562b8e80941Smrgbool 563b8e80941Smrgloader_dri3_wait_for_msc(struct loader_dri3_drawable *draw, 564b8e80941Smrg int64_t target_msc, 565b8e80941Smrg int64_t divisor, int64_t remainder, 566b8e80941Smrg int64_t *ust, int64_t *msc, int64_t *sbc) 567b8e80941Smrg{ 568b8e80941Smrg xcb_void_cookie_t cookie = xcb_present_notify_msc(draw->conn, 569b8e80941Smrg draw->drawable, 570b8e80941Smrg draw->eid, 571b8e80941Smrg target_msc, 572b8e80941Smrg divisor, 573b8e80941Smrg remainder); 574b8e80941Smrg xcb_generic_event_t *ev; 575b8e80941Smrg unsigned full_sequence; 576b8e80941Smrg 577b8e80941Smrg mtx_lock(&draw->mtx); 578b8e80941Smrg xcb_flush(draw->conn); 579b8e80941Smrg 580b8e80941Smrg /* Wait for the event */ 581b8e80941Smrg do { 582b8e80941Smrg ev = xcb_wait_for_special_event(draw->conn, draw->special_event); 583b8e80941Smrg if (!ev) { 584b8e80941Smrg mtx_unlock(&draw->mtx); 585b8e80941Smrg return false; 586b8e80941Smrg } 587b8e80941Smrg 588b8e80941Smrg full_sequence = ev->full_sequence; 589b8e80941Smrg dri3_handle_present_event(draw, (void *) ev); 590b8e80941Smrg } while (full_sequence != cookie.sequence || draw->notify_msc < target_msc); 591b8e80941Smrg 592b8e80941Smrg *ust = draw->notify_ust; 593b8e80941Smrg *msc = draw->notify_msc; 594b8e80941Smrg *sbc = draw->recv_sbc; 595b8e80941Smrg mtx_unlock(&draw->mtx); 596b8e80941Smrg 597b8e80941Smrg return true; 598b8e80941Smrg} 599b8e80941Smrg 600b8e80941Smrg/** loader_dri3_wait_for_sbc 601b8e80941Smrg * 602b8e80941Smrg * Wait for the completed swap buffer count to reach the specified 603b8e80941Smrg * target. Presumably the application knows that this will be reached with 604b8e80941Smrg * outstanding complete events, or we're going to be here awhile. 605b8e80941Smrg */ 606b8e80941Smrgint 607b8e80941Smrgloader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw, 608b8e80941Smrg int64_t target_sbc, int64_t *ust, 609b8e80941Smrg int64_t *msc, int64_t *sbc) 610b8e80941Smrg{ 611b8e80941Smrg /* From the GLX_OML_sync_control spec: 612b8e80941Smrg * 613b8e80941Smrg * "If <target_sbc> = 0, the function will block until all previous 614b8e80941Smrg * swaps requested with glXSwapBuffersMscOML for that window have 615b8e80941Smrg * completed." 616b8e80941Smrg */ 617b8e80941Smrg mtx_lock(&draw->mtx); 618b8e80941Smrg if (!target_sbc) 619b8e80941Smrg target_sbc = draw->send_sbc; 620b8e80941Smrg 621b8e80941Smrg while (draw->recv_sbc < target_sbc) { 622b8e80941Smrg if (!dri3_wait_for_event_locked(draw)) { 623b8e80941Smrg mtx_unlock(&draw->mtx); 624b8e80941Smrg return 0; 625b8e80941Smrg } 626b8e80941Smrg } 627b8e80941Smrg 628b8e80941Smrg *ust = draw->ust; 629b8e80941Smrg *msc = draw->msc; 630b8e80941Smrg *sbc = draw->recv_sbc; 631b8e80941Smrg mtx_unlock(&draw->mtx); 632b8e80941Smrg return 1; 633b8e80941Smrg} 634b8e80941Smrg 635b8e80941Smrg/** loader_dri3_find_back 636b8e80941Smrg * 637b8e80941Smrg * Find an idle back buffer. If there isn't one, then 638b8e80941Smrg * wait for a present idle notify event from the X server 639b8e80941Smrg */ 640b8e80941Smrgstatic int 641b8e80941Smrgdri3_find_back(struct loader_dri3_drawable *draw) 642b8e80941Smrg{ 643b8e80941Smrg int b; 644b8e80941Smrg int num_to_consider; 645b8e80941Smrg 646b8e80941Smrg mtx_lock(&draw->mtx); 647b8e80941Smrg /* Increase the likelyhood of reusing current buffer */ 648b8e80941Smrg dri3_flush_present_events(draw); 649b8e80941Smrg 650b8e80941Smrg /* Check whether we need to reuse the current back buffer as new back. 651b8e80941Smrg * In that case, wait until it's not busy anymore. 652b8e80941Smrg */ 653b8e80941Smrg num_to_consider = draw->num_back; 654b8e80941Smrg if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1) { 655b8e80941Smrg num_to_consider = 1; 656b8e80941Smrg draw->cur_blit_source = -1; 657b8e80941Smrg } 658b8e80941Smrg 659b8e80941Smrg for (;;) { 660b8e80941Smrg for (b = 0; b < num_to_consider; b++) { 661b8e80941Smrg int id = LOADER_DRI3_BACK_ID((b + draw->cur_back) % draw->num_back); 662b8e80941Smrg struct loader_dri3_buffer *buffer = draw->buffers[id]; 663b8e80941Smrg 664b8e80941Smrg if (!buffer || !buffer->busy) { 665b8e80941Smrg draw->cur_back = id; 666b8e80941Smrg mtx_unlock(&draw->mtx); 667b8e80941Smrg return id; 668b8e80941Smrg } 669b8e80941Smrg } 670b8e80941Smrg if (!dri3_wait_for_event_locked(draw)) { 671b8e80941Smrg mtx_unlock(&draw->mtx); 672b8e80941Smrg return -1; 673b8e80941Smrg } 674b8e80941Smrg } 675b8e80941Smrg} 676b8e80941Smrg 677b8e80941Smrgstatic xcb_gcontext_t 678b8e80941Smrgdri3_drawable_gc(struct loader_dri3_drawable *draw) 679b8e80941Smrg{ 680b8e80941Smrg if (!draw->gc) { 681b8e80941Smrg uint32_t v = 0; 682b8e80941Smrg xcb_create_gc(draw->conn, 683b8e80941Smrg (draw->gc = xcb_generate_id(draw->conn)), 684b8e80941Smrg draw->drawable, 685b8e80941Smrg XCB_GC_GRAPHICS_EXPOSURES, 686b8e80941Smrg &v); 687b8e80941Smrg } 688b8e80941Smrg return draw->gc; 689b8e80941Smrg} 690b8e80941Smrg 691b8e80941Smrg 692b8e80941Smrgstatic struct loader_dri3_buffer * 693b8e80941Smrgdri3_back_buffer(struct loader_dri3_drawable *draw) 694b8e80941Smrg{ 695b8e80941Smrg return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)]; 696b8e80941Smrg} 697b8e80941Smrg 698b8e80941Smrgstatic struct loader_dri3_buffer * 699b8e80941Smrgdri3_fake_front_buffer(struct loader_dri3_drawable *draw) 700b8e80941Smrg{ 701b8e80941Smrg return draw->buffers[LOADER_DRI3_FRONT_ID]; 702b8e80941Smrg} 703b8e80941Smrg 704b8e80941Smrgstatic void 705b8e80941Smrgdri3_copy_area(xcb_connection_t *c, 706b8e80941Smrg xcb_drawable_t src_drawable, 707b8e80941Smrg xcb_drawable_t dst_drawable, 708b8e80941Smrg xcb_gcontext_t gc, 709b8e80941Smrg int16_t src_x, 710b8e80941Smrg int16_t src_y, 711b8e80941Smrg int16_t dst_x, 712b8e80941Smrg int16_t dst_y, 713b8e80941Smrg uint16_t width, 714b8e80941Smrg uint16_t height) 715b8e80941Smrg{ 716b8e80941Smrg xcb_void_cookie_t cookie; 717b8e80941Smrg 718b8e80941Smrg cookie = xcb_copy_area_checked(c, 719b8e80941Smrg src_drawable, 720b8e80941Smrg dst_drawable, 721b8e80941Smrg gc, 722b8e80941Smrg src_x, 723b8e80941Smrg src_y, 724b8e80941Smrg dst_x, 725b8e80941Smrg dst_y, 726b8e80941Smrg width, 727b8e80941Smrg height); 728b8e80941Smrg xcb_discard_reply(c, cookie.sequence); 729b8e80941Smrg} 730b8e80941Smrg 731b8e80941Smrg/** 732b8e80941Smrg * Asks the driver to flush any queued work necessary for serializing with the 733b8e80941Smrg * X command stream, and optionally the slightly more strict requirement of 734b8e80941Smrg * glFlush() equivalence (which would require flushing even if nothing had 735b8e80941Smrg * been drawn to a window system framebuffer, for example). 736b8e80941Smrg */ 737b8e80941Smrgvoid 738b8e80941Smrgloader_dri3_flush(struct loader_dri3_drawable *draw, 739b8e80941Smrg unsigned flags, 740b8e80941Smrg enum __DRI2throttleReason throttle_reason) 741b8e80941Smrg{ 742b8e80941Smrg /* NEED TO CHECK WHETHER CONTEXT IS NULL */ 743b8e80941Smrg __DRIcontext *dri_context = draw->vtable->get_dri_context(draw); 744b8e80941Smrg 745b8e80941Smrg if (dri_context) { 746b8e80941Smrg draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable, 747b8e80941Smrg flags, throttle_reason); 748b8e80941Smrg } 749b8e80941Smrg} 750b8e80941Smrg 751b8e80941Smrgvoid 752b8e80941Smrgloader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw, 753b8e80941Smrg int x, int y, 754b8e80941Smrg int width, int height, 755b8e80941Smrg bool flush) 756b8e80941Smrg{ 757b8e80941Smrg struct loader_dri3_buffer *back; 758b8e80941Smrg unsigned flags = __DRI2_FLUSH_DRAWABLE; 759b8e80941Smrg 760b8e80941Smrg /* Check we have the right attachments */ 761b8e80941Smrg if (!draw->have_back || draw->is_pixmap) 762b8e80941Smrg return; 763b8e80941Smrg 764b8e80941Smrg if (flush) 765b8e80941Smrg flags |= __DRI2_FLUSH_CONTEXT; 766b8e80941Smrg loader_dri3_flush(draw, flags, __DRI2_THROTTLE_SWAPBUFFER); 767b8e80941Smrg 768b8e80941Smrg back = dri3_find_back_alloc(draw); 769b8e80941Smrg if (!back) 770b8e80941Smrg return; 771b8e80941Smrg 772b8e80941Smrg y = draw->height - y - height; 773b8e80941Smrg 774b8e80941Smrg if (draw->is_different_gpu) { 775b8e80941Smrg /* Update the linear buffer part of the back buffer 776b8e80941Smrg * for the dri3_copy_area operation 777b8e80941Smrg */ 778b8e80941Smrg (void) loader_dri3_blit_image(draw, 779b8e80941Smrg back->linear_buffer, 780b8e80941Smrg back->image, 781b8e80941Smrg 0, 0, back->width, back->height, 782b8e80941Smrg 0, 0, __BLIT_FLAG_FLUSH); 783b8e80941Smrg } 784b8e80941Smrg 785b8e80941Smrg loader_dri3_swapbuffer_barrier(draw); 786b8e80941Smrg dri3_fence_reset(draw->conn, back); 787b8e80941Smrg dri3_copy_area(draw->conn, 788b8e80941Smrg back->pixmap, 789b8e80941Smrg draw->drawable, 790b8e80941Smrg dri3_drawable_gc(draw), 791b8e80941Smrg x, y, x, y, width, height); 792b8e80941Smrg dri3_fence_trigger(draw->conn, back); 793b8e80941Smrg /* Refresh the fake front (if present) after we just damaged the real 794b8e80941Smrg * front. 795b8e80941Smrg */ 796b8e80941Smrg if (draw->have_fake_front && 797b8e80941Smrg !loader_dri3_blit_image(draw, 798b8e80941Smrg dri3_fake_front_buffer(draw)->image, 799b8e80941Smrg back->image, 800b8e80941Smrg x, y, width, height, 801b8e80941Smrg x, y, __BLIT_FLAG_FLUSH) && 802b8e80941Smrg !draw->is_different_gpu) { 803b8e80941Smrg dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw)); 804b8e80941Smrg dri3_copy_area(draw->conn, 805b8e80941Smrg back->pixmap, 806b8e80941Smrg dri3_fake_front_buffer(draw)->pixmap, 807b8e80941Smrg dri3_drawable_gc(draw), 808b8e80941Smrg x, y, x, y, width, height); 809b8e80941Smrg dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw)); 810b8e80941Smrg dri3_fence_await(draw->conn, NULL, dri3_fake_front_buffer(draw)); 811b8e80941Smrg } 812b8e80941Smrg dri3_fence_await(draw->conn, draw, back); 813b8e80941Smrg} 814b8e80941Smrg 815b8e80941Smrgvoid 816b8e80941Smrgloader_dri3_copy_drawable(struct loader_dri3_drawable *draw, 817b8e80941Smrg xcb_drawable_t dest, 818b8e80941Smrg xcb_drawable_t src) 819b8e80941Smrg{ 820b8e80941Smrg loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, 0); 821b8e80941Smrg 822b8e80941Smrg dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw)); 823b8e80941Smrg dri3_copy_area(draw->conn, 824b8e80941Smrg src, dest, 825b8e80941Smrg dri3_drawable_gc(draw), 826b8e80941Smrg 0, 0, 0, 0, draw->width, draw->height); 827b8e80941Smrg dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw)); 828b8e80941Smrg dri3_fence_await(draw->conn, draw, dri3_fake_front_buffer(draw)); 829b8e80941Smrg} 830b8e80941Smrg 831b8e80941Smrgvoid 832b8e80941Smrgloader_dri3_wait_x(struct loader_dri3_drawable *draw) 833b8e80941Smrg{ 834b8e80941Smrg struct loader_dri3_buffer *front; 835b8e80941Smrg 836b8e80941Smrg if (draw == NULL || !draw->have_fake_front) 837b8e80941Smrg return; 838b8e80941Smrg 839b8e80941Smrg front = dri3_fake_front_buffer(draw); 840b8e80941Smrg 841b8e80941Smrg loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable); 842b8e80941Smrg 843b8e80941Smrg /* In the psc->is_different_gpu case, the linear buffer has been updated, 844b8e80941Smrg * but not yet the tiled buffer. 845b8e80941Smrg * Copy back to the tiled buffer we use for rendering. 846b8e80941Smrg * Note that we don't need flushing. 847b8e80941Smrg */ 848b8e80941Smrg if (draw->is_different_gpu) 849b8e80941Smrg (void) loader_dri3_blit_image(draw, 850b8e80941Smrg front->image, 851b8e80941Smrg front->linear_buffer, 852b8e80941Smrg 0, 0, front->width, front->height, 853b8e80941Smrg 0, 0, 0); 854b8e80941Smrg} 855b8e80941Smrg 856b8e80941Smrgvoid 857b8e80941Smrgloader_dri3_wait_gl(struct loader_dri3_drawable *draw) 858b8e80941Smrg{ 859b8e80941Smrg struct loader_dri3_buffer *front; 860b8e80941Smrg 861b8e80941Smrg if (draw == NULL || !draw->have_fake_front) 862b8e80941Smrg return; 863b8e80941Smrg 864b8e80941Smrg front = dri3_fake_front_buffer(draw); 865b8e80941Smrg 866b8e80941Smrg /* In the psc->is_different_gpu case, we update the linear_buffer 867b8e80941Smrg * before updating the real front. 868b8e80941Smrg */ 869b8e80941Smrg if (draw->is_different_gpu) 870b8e80941Smrg (void) loader_dri3_blit_image(draw, 871b8e80941Smrg front->linear_buffer, 872b8e80941Smrg front->image, 873b8e80941Smrg 0, 0, front->width, front->height, 874b8e80941Smrg 0, 0, __BLIT_FLAG_FLUSH); 875b8e80941Smrg loader_dri3_swapbuffer_barrier(draw); 876b8e80941Smrg loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap); 877b8e80941Smrg} 878b8e80941Smrg 879b8e80941Smrg/** dri3_flush_present_events 880b8e80941Smrg * 881b8e80941Smrg * Process any present events that have been received from the X server 882b8e80941Smrg */ 883b8e80941Smrgstatic void 884b8e80941Smrgdri3_flush_present_events(struct loader_dri3_drawable *draw) 885b8e80941Smrg{ 886b8e80941Smrg /* Check to see if any configuration changes have occurred 887b8e80941Smrg * since we were last invoked 888b8e80941Smrg */ 889b8e80941Smrg if (draw->has_event_waiter) 890b8e80941Smrg return; 891b8e80941Smrg 892b8e80941Smrg if (draw->special_event) { 893b8e80941Smrg xcb_generic_event_t *ev; 894b8e80941Smrg 895b8e80941Smrg while ((ev = xcb_poll_for_special_event(draw->conn, 896b8e80941Smrg draw->special_event)) != NULL) { 897b8e80941Smrg xcb_present_generic_event_t *ge = (void *) ev; 898b8e80941Smrg dri3_handle_present_event(draw, ge); 899b8e80941Smrg } 900b8e80941Smrg } 901b8e80941Smrg} 902b8e80941Smrg 903b8e80941Smrg/** loader_dri3_swap_buffers_msc 904b8e80941Smrg * 905b8e80941Smrg * Make the current back buffer visible using the present extension 906b8e80941Smrg */ 907b8e80941Smrgint64_t 908b8e80941Smrgloader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw, 909b8e80941Smrg int64_t target_msc, int64_t divisor, 910b8e80941Smrg int64_t remainder, unsigned flush_flags, 911b8e80941Smrg bool force_copy) 912b8e80941Smrg{ 913b8e80941Smrg struct loader_dri3_buffer *back; 914b8e80941Smrg int64_t ret = 0; 915b8e80941Smrg uint32_t options = XCB_PRESENT_OPTION_NONE; 916b8e80941Smrg 917b8e80941Smrg draw->vtable->flush_drawable(draw, flush_flags); 918b8e80941Smrg 919b8e80941Smrg back = dri3_find_back_alloc(draw); 920b8e80941Smrg 921b8e80941Smrg mtx_lock(&draw->mtx); 922b8e80941Smrg 923b8e80941Smrg if (draw->adaptive_sync && !draw->adaptive_sync_active) { 924b8e80941Smrg set_adaptive_sync_property(draw->conn, draw->drawable, true); 925b8e80941Smrg draw->adaptive_sync_active = true; 926b8e80941Smrg } 927b8e80941Smrg 928b8e80941Smrg if (draw->is_different_gpu && back) { 929b8e80941Smrg /* Update the linear buffer before presenting the pixmap */ 930b8e80941Smrg (void) loader_dri3_blit_image(draw, 931b8e80941Smrg back->linear_buffer, 932b8e80941Smrg back->image, 933b8e80941Smrg 0, 0, back->width, back->height, 934b8e80941Smrg 0, 0, __BLIT_FLAG_FLUSH); 935b8e80941Smrg } 936b8e80941Smrg 937b8e80941Smrg /* If we need to preload the new back buffer, remember the source. 938b8e80941Smrg * The force_copy parameter is used by EGL to attempt to preserve 939b8e80941Smrg * the back buffer across a call to this function. 940b8e80941Smrg */ 941b8e80941Smrg if (draw->swap_method != __DRI_ATTRIB_SWAP_UNDEFINED || force_copy) 942b8e80941Smrg draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back); 943b8e80941Smrg 944b8e80941Smrg /* Exchange the back and fake front. Even though the server knows about these 945b8e80941Smrg * buffers, it has no notion of back and fake front. 946b8e80941Smrg */ 947b8e80941Smrg if (back && draw->have_fake_front) { 948b8e80941Smrg struct loader_dri3_buffer *tmp; 949b8e80941Smrg 950b8e80941Smrg tmp = dri3_fake_front_buffer(draw); 951b8e80941Smrg draw->buffers[LOADER_DRI3_FRONT_ID] = back; 952b8e80941Smrg draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)] = tmp; 953b8e80941Smrg 954b8e80941Smrg if (draw->swap_method == __DRI_ATTRIB_SWAP_COPY || force_copy) 955b8e80941Smrg draw->cur_blit_source = LOADER_DRI3_FRONT_ID; 956b8e80941Smrg } 957b8e80941Smrg 958b8e80941Smrg dri3_flush_present_events(draw); 959b8e80941Smrg 960b8e80941Smrg if (back && !draw->is_pixmap) { 961b8e80941Smrg dri3_fence_reset(draw->conn, back); 962b8e80941Smrg 963b8e80941Smrg /* Compute when we want the frame shown by taking the last known 964b8e80941Smrg * successful MSC and adding in a swap interval for each outstanding swap 965b8e80941Smrg * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers() 966b8e80941Smrg * semantic" 967b8e80941Smrg */ 968b8e80941Smrg ++draw->send_sbc; 969b8e80941Smrg if (target_msc == 0 && divisor == 0 && remainder == 0) 970b8e80941Smrg target_msc = draw->msc + draw->swap_interval * 971b8e80941Smrg (draw->send_sbc - draw->recv_sbc); 972b8e80941Smrg else if (divisor == 0 && remainder > 0) { 973b8e80941Smrg /* From the GLX_OML_sync_control spec: 974b8e80941Smrg * "If <divisor> = 0, the swap will occur when MSC becomes 975b8e80941Smrg * greater than or equal to <target_msc>." 976b8e80941Smrg * 977b8e80941Smrg * Note that there's no mention of the remainder. The Present 978b8e80941Smrg * extension throws BadValue for remainder != 0 with divisor == 0, so 979b8e80941Smrg * just drop the passed in value. 980b8e80941Smrg */ 981b8e80941Smrg remainder = 0; 982b8e80941Smrg } 983b8e80941Smrg 984b8e80941Smrg /* From the GLX_EXT_swap_control spec 985b8e80941Smrg * and the EGL 1.4 spec (page 53): 986b8e80941Smrg * 987b8e80941Smrg * "If <interval> is set to a value of 0, buffer swaps are not 988b8e80941Smrg * synchronized to a video frame." 989b8e80941Smrg * 990b8e80941Smrg * Implementation note: It is possible to enable triple buffering 991b8e80941Smrg * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be 992b8e80941Smrg * the default. 993b8e80941Smrg */ 994b8e80941Smrg if (draw->swap_interval == 0) 995b8e80941Smrg options |= XCB_PRESENT_OPTION_ASYNC; 996b8e80941Smrg 997b8e80941Smrg /* If we need to populate the new back, but need to reuse the back 998b8e80941Smrg * buffer slot due to lack of local blit capabilities, make sure 999b8e80941Smrg * the server doesn't flip and we deadlock. 1000b8e80941Smrg */ 1001b8e80941Smrg if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1) 1002b8e80941Smrg options |= XCB_PRESENT_OPTION_COPY; 1003b8e80941Smrg#ifdef HAVE_DRI3_MODIFIERS 1004b8e80941Smrg if (draw->multiplanes_available) 1005b8e80941Smrg options |= XCB_PRESENT_OPTION_SUBOPTIMAL; 1006b8e80941Smrg#endif 1007b8e80941Smrg back->busy = 1; 1008b8e80941Smrg back->last_swap = draw->send_sbc; 1009b8e80941Smrg xcb_present_pixmap(draw->conn, 1010b8e80941Smrg draw->drawable, 1011b8e80941Smrg back->pixmap, 1012b8e80941Smrg (uint32_t) draw->send_sbc, 1013b8e80941Smrg 0, /* valid */ 1014b8e80941Smrg 0, /* update */ 1015b8e80941Smrg 0, /* x_off */ 1016b8e80941Smrg 0, /* y_off */ 1017b8e80941Smrg None, /* target_crtc */ 1018b8e80941Smrg None, 1019b8e80941Smrg back->sync_fence, 1020b8e80941Smrg options, 1021b8e80941Smrg target_msc, 1022b8e80941Smrg divisor, 1023b8e80941Smrg remainder, 0, NULL); 1024b8e80941Smrg ret = (int64_t) draw->send_sbc; 1025b8e80941Smrg 1026b8e80941Smrg /* Schedule a server-side back-preserving blit if necessary. 1027b8e80941Smrg * This happens iff all conditions below are satisfied: 1028b8e80941Smrg * a) We have a fake front, 1029b8e80941Smrg * b) We need to preserve the back buffer, 1030b8e80941Smrg * c) We don't have local blit capabilities. 1031b8e80941Smrg */ 1032b8e80941Smrg if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1 && 1033b8e80941Smrg draw->cur_blit_source != LOADER_DRI3_BACK_ID(draw->cur_back)) { 1034b8e80941Smrg struct loader_dri3_buffer *new_back = dri3_back_buffer(draw); 1035b8e80941Smrg struct loader_dri3_buffer *src = draw->buffers[draw->cur_blit_source]; 1036b8e80941Smrg 1037b8e80941Smrg dri3_fence_reset(draw->conn, new_back); 1038b8e80941Smrg dri3_copy_area(draw->conn, src->pixmap, 1039b8e80941Smrg new_back->pixmap, 1040b8e80941Smrg dri3_drawable_gc(draw), 1041b8e80941Smrg 0, 0, 0, 0, draw->width, draw->height); 1042b8e80941Smrg dri3_fence_trigger(draw->conn, new_back); 1043b8e80941Smrg new_back->last_swap = src->last_swap; 1044b8e80941Smrg } 1045b8e80941Smrg 1046b8e80941Smrg xcb_flush(draw->conn); 1047b8e80941Smrg if (draw->stamp) 1048b8e80941Smrg ++(*draw->stamp); 1049b8e80941Smrg } 1050b8e80941Smrg mtx_unlock(&draw->mtx); 1051b8e80941Smrg 1052b8e80941Smrg draw->ext->flush->invalidate(draw->dri_drawable); 1053b8e80941Smrg 1054b8e80941Smrg return ret; 1055b8e80941Smrg} 1056b8e80941Smrg 1057b8e80941Smrgint 1058b8e80941Smrgloader_dri3_query_buffer_age(struct loader_dri3_drawable *draw) 1059b8e80941Smrg{ 1060b8e80941Smrg struct loader_dri3_buffer *back = dri3_find_back_alloc(draw); 1061b8e80941Smrg int ret; 1062b8e80941Smrg 1063b8e80941Smrg mtx_lock(&draw->mtx); 1064b8e80941Smrg ret = (!back || back->last_swap == 0) ? 0 : 1065b8e80941Smrg draw->send_sbc - back->last_swap + 1; 1066b8e80941Smrg mtx_unlock(&draw->mtx); 1067b8e80941Smrg 1068b8e80941Smrg return ret; 1069b8e80941Smrg} 1070b8e80941Smrg 1071b8e80941Smrg/** loader_dri3_open 1072b8e80941Smrg * 1073b8e80941Smrg * Wrapper around xcb_dri3_open 1074b8e80941Smrg */ 1075b8e80941Smrgint 1076b8e80941Smrgloader_dri3_open(xcb_connection_t *conn, 1077b8e80941Smrg xcb_window_t root, 1078b8e80941Smrg uint32_t provider) 1079b8e80941Smrg{ 1080b8e80941Smrg xcb_dri3_open_cookie_t cookie; 1081b8e80941Smrg xcb_dri3_open_reply_t *reply; 1082b8e80941Smrg int fd; 1083b8e80941Smrg 1084b8e80941Smrg cookie = xcb_dri3_open(conn, 1085b8e80941Smrg root, 1086b8e80941Smrg provider); 1087b8e80941Smrg 1088b8e80941Smrg reply = xcb_dri3_open_reply(conn, cookie, NULL); 1089b8e80941Smrg if (!reply) 1090b8e80941Smrg return -1; 1091b8e80941Smrg 1092b8e80941Smrg if (reply->nfd != 1) { 1093b8e80941Smrg free(reply); 1094b8e80941Smrg return -1; 1095b8e80941Smrg } 1096b8e80941Smrg 1097b8e80941Smrg fd = xcb_dri3_open_reply_fds(conn, reply)[0]; 1098b8e80941Smrg free(reply); 1099b8e80941Smrg fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); 1100b8e80941Smrg 1101b8e80941Smrg return fd; 1102b8e80941Smrg} 1103b8e80941Smrg 1104b8e80941Smrgstatic uint32_t 1105b8e80941Smrgdri3_cpp_for_format(uint32_t format) { 1106b8e80941Smrg switch (format) { 1107b8e80941Smrg case __DRI_IMAGE_FORMAT_R8: 1108b8e80941Smrg return 1; 1109b8e80941Smrg case __DRI_IMAGE_FORMAT_RGB565: 1110b8e80941Smrg case __DRI_IMAGE_FORMAT_GR88: 1111b8e80941Smrg return 2; 1112b8e80941Smrg case __DRI_IMAGE_FORMAT_XRGB8888: 1113b8e80941Smrg case __DRI_IMAGE_FORMAT_ARGB8888: 1114b8e80941Smrg case __DRI_IMAGE_FORMAT_ABGR8888: 1115b8e80941Smrg case __DRI_IMAGE_FORMAT_XBGR8888: 1116b8e80941Smrg case __DRI_IMAGE_FORMAT_XRGB2101010: 1117b8e80941Smrg case __DRI_IMAGE_FORMAT_ARGB2101010: 1118b8e80941Smrg case __DRI_IMAGE_FORMAT_XBGR2101010: 1119b8e80941Smrg case __DRI_IMAGE_FORMAT_ABGR2101010: 1120b8e80941Smrg case __DRI_IMAGE_FORMAT_SARGB8: 1121b8e80941Smrg case __DRI_IMAGE_FORMAT_SABGR8: 1122b8e80941Smrg return 4; 1123b8e80941Smrg case __DRI_IMAGE_FORMAT_NONE: 1124b8e80941Smrg default: 1125b8e80941Smrg return 0; 1126b8e80941Smrg } 1127b8e80941Smrg} 1128b8e80941Smrg 1129b8e80941Smrg/* Map format of render buffer to corresponding format for the linear_buffer 1130b8e80941Smrg * used for sharing with the display gpu of a Prime setup (== is_different_gpu). 1131b8e80941Smrg * Usually linear_format == format, except for depth >= 30 formats, where 1132b8e80941Smrg * different gpu vendors have different preferences wrt. color channel ordering. 1133b8e80941Smrg */ 1134b8e80941Smrgstatic uint32_t 1135b8e80941Smrgdri3_linear_format_for_format(struct loader_dri3_drawable *draw, uint32_t format) 1136b8e80941Smrg{ 1137b8e80941Smrg switch (format) { 1138b8e80941Smrg case __DRI_IMAGE_FORMAT_XRGB2101010: 1139b8e80941Smrg case __DRI_IMAGE_FORMAT_XBGR2101010: 1140b8e80941Smrg /* Different preferred formats for different hw */ 1141b8e80941Smrg if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff) 1142b8e80941Smrg return __DRI_IMAGE_FORMAT_XBGR2101010; 1143b8e80941Smrg else 1144b8e80941Smrg return __DRI_IMAGE_FORMAT_XRGB2101010; 1145b8e80941Smrg 1146b8e80941Smrg case __DRI_IMAGE_FORMAT_ARGB2101010: 1147b8e80941Smrg case __DRI_IMAGE_FORMAT_ABGR2101010: 1148b8e80941Smrg /* Different preferred formats for different hw */ 1149b8e80941Smrg if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff) 1150b8e80941Smrg return __DRI_IMAGE_FORMAT_ABGR2101010; 1151b8e80941Smrg else 1152b8e80941Smrg return __DRI_IMAGE_FORMAT_ARGB2101010; 1153b8e80941Smrg 1154b8e80941Smrg default: 1155b8e80941Smrg return format; 1156b8e80941Smrg } 1157b8e80941Smrg} 1158b8e80941Smrg 1159b8e80941Smrg/* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while 1160b8e80941Smrg * the createImageFromFds call takes __DRI_IMAGE_FOURCC codes. To avoid 1161b8e80941Smrg * complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and 1162b8e80941Smrg * translate to __DRI_IMAGE_FOURCC codes in the call to createImageFromFds 1163b8e80941Smrg */ 1164b8e80941Smrgstatic int 1165b8e80941Smrgimage_format_to_fourcc(int format) 1166b8e80941Smrg{ 1167b8e80941Smrg 1168b8e80941Smrg /* Convert from __DRI_IMAGE_FORMAT to __DRI_IMAGE_FOURCC (sigh) */ 1169b8e80941Smrg switch (format) { 1170b8e80941Smrg case __DRI_IMAGE_FORMAT_SARGB8: return __DRI_IMAGE_FOURCC_SARGB8888; 1171b8e80941Smrg case __DRI_IMAGE_FORMAT_SABGR8: return __DRI_IMAGE_FOURCC_SABGR8888; 1172b8e80941Smrg case __DRI_IMAGE_FORMAT_RGB565: return __DRI_IMAGE_FOURCC_RGB565; 1173b8e80941Smrg case __DRI_IMAGE_FORMAT_XRGB8888: return __DRI_IMAGE_FOURCC_XRGB8888; 1174b8e80941Smrg case __DRI_IMAGE_FORMAT_ARGB8888: return __DRI_IMAGE_FOURCC_ARGB8888; 1175b8e80941Smrg case __DRI_IMAGE_FORMAT_ABGR8888: return __DRI_IMAGE_FOURCC_ABGR8888; 1176b8e80941Smrg case __DRI_IMAGE_FORMAT_XBGR8888: return __DRI_IMAGE_FOURCC_XBGR8888; 1177b8e80941Smrg case __DRI_IMAGE_FORMAT_XRGB2101010: return __DRI_IMAGE_FOURCC_XRGB2101010; 1178b8e80941Smrg case __DRI_IMAGE_FORMAT_ARGB2101010: return __DRI_IMAGE_FOURCC_ARGB2101010; 1179b8e80941Smrg case __DRI_IMAGE_FORMAT_XBGR2101010: return __DRI_IMAGE_FOURCC_XBGR2101010; 1180b8e80941Smrg case __DRI_IMAGE_FORMAT_ABGR2101010: return __DRI_IMAGE_FOURCC_ABGR2101010; 1181b8e80941Smrg } 1182b8e80941Smrg return 0; 1183b8e80941Smrg} 1184b8e80941Smrg 1185b8e80941Smrg#ifdef HAVE_DRI3_MODIFIERS 1186b8e80941Smrgstatic bool 1187b8e80941Smrghas_supported_modifier(struct loader_dri3_drawable *draw, unsigned int format, 1188b8e80941Smrg uint64_t *modifiers, uint32_t count) 1189b8e80941Smrg{ 1190b8e80941Smrg uint64_t *supported_modifiers; 1191b8e80941Smrg int32_t supported_modifiers_count; 1192b8e80941Smrg bool found = false; 1193b8e80941Smrg int i, j; 1194b8e80941Smrg 1195b8e80941Smrg if (!draw->ext->image->queryDmaBufModifiers(draw->dri_screen, 1196b8e80941Smrg format, 0, NULL, NULL, 1197b8e80941Smrg &supported_modifiers_count) || 1198b8e80941Smrg supported_modifiers_count == 0) 1199b8e80941Smrg return false; 1200b8e80941Smrg 1201b8e80941Smrg supported_modifiers = malloc(supported_modifiers_count * sizeof(uint64_t)); 1202b8e80941Smrg if (!supported_modifiers) 1203b8e80941Smrg return false; 1204b8e80941Smrg 1205b8e80941Smrg draw->ext->image->queryDmaBufModifiers(draw->dri_screen, format, 1206b8e80941Smrg supported_modifiers_count, 1207b8e80941Smrg supported_modifiers, NULL, 1208b8e80941Smrg &supported_modifiers_count); 1209b8e80941Smrg 1210b8e80941Smrg for (i = 0; !found && i < supported_modifiers_count; i++) { 1211b8e80941Smrg for (j = 0; !found && j < count; j++) { 1212b8e80941Smrg if (supported_modifiers[i] == modifiers[j]) 1213b8e80941Smrg found = true; 1214b8e80941Smrg } 1215b8e80941Smrg } 1216b8e80941Smrg 1217b8e80941Smrg free(supported_modifiers); 1218b8e80941Smrg return found; 1219b8e80941Smrg} 1220b8e80941Smrg#endif 1221b8e80941Smrg 1222b8e80941Smrg/** loader_dri3_alloc_render_buffer 1223b8e80941Smrg * 1224b8e80941Smrg * Use the driver createImage function to construct a __DRIimage, then 1225b8e80941Smrg * get a file descriptor for that and create an X pixmap from that 1226b8e80941Smrg * 1227b8e80941Smrg * Allocate an xshmfence for synchronization 1228b8e80941Smrg */ 1229b8e80941Smrgstatic struct loader_dri3_buffer * 1230b8e80941Smrgdri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format, 1231b8e80941Smrg int width, int height, int depth) 1232b8e80941Smrg{ 1233b8e80941Smrg struct loader_dri3_buffer *buffer; 1234b8e80941Smrg __DRIimage *pixmap_buffer; 1235b8e80941Smrg xcb_pixmap_t pixmap; 1236b8e80941Smrg xcb_sync_fence_t sync_fence; 1237b8e80941Smrg struct xshmfence *shm_fence; 1238b8e80941Smrg int buffer_fds[4], fence_fd; 1239b8e80941Smrg int num_planes = 0; 1240b8e80941Smrg int i, mod; 1241b8e80941Smrg int ret; 1242b8e80941Smrg 1243b8e80941Smrg /* Create an xshmfence object and 1244b8e80941Smrg * prepare to send that to the X server 1245b8e80941Smrg */ 1246b8e80941Smrg 1247b8e80941Smrg fence_fd = xshmfence_alloc_shm(); 1248b8e80941Smrg if (fence_fd < 0) 1249b8e80941Smrg return NULL; 1250b8e80941Smrg 1251b8e80941Smrg shm_fence = xshmfence_map_shm(fence_fd); 1252b8e80941Smrg if (shm_fence == NULL) 1253b8e80941Smrg goto no_shm_fence; 1254b8e80941Smrg 1255b8e80941Smrg /* Allocate the image from the driver 1256b8e80941Smrg */ 1257b8e80941Smrg buffer = calloc(1, sizeof *buffer); 1258b8e80941Smrg if (!buffer) 1259b8e80941Smrg goto no_buffer; 1260b8e80941Smrg 1261b8e80941Smrg buffer->cpp = dri3_cpp_for_format(format); 1262b8e80941Smrg if (!buffer->cpp) 1263b8e80941Smrg goto no_image; 1264b8e80941Smrg 1265b8e80941Smrg if (!draw->is_different_gpu) { 1266b8e80941Smrg#ifdef HAVE_DRI3_MODIFIERS 1267b8e80941Smrg if (draw->multiplanes_available && 1268b8e80941Smrg draw->ext->image->base.version >= 15 && 1269b8e80941Smrg draw->ext->image->queryDmaBufModifiers && 1270b8e80941Smrg draw->ext->image->createImageWithModifiers) { 1271b8e80941Smrg xcb_dri3_get_supported_modifiers_cookie_t mod_cookie; 1272b8e80941Smrg xcb_dri3_get_supported_modifiers_reply_t *mod_reply; 1273b8e80941Smrg xcb_generic_error_t *error = NULL; 1274b8e80941Smrg uint64_t *modifiers = NULL; 1275b8e80941Smrg uint32_t count = 0; 1276b8e80941Smrg 1277b8e80941Smrg mod_cookie = xcb_dri3_get_supported_modifiers(draw->conn, 1278b8e80941Smrg draw->window, 1279b8e80941Smrg depth, buffer->cpp * 8); 1280b8e80941Smrg mod_reply = xcb_dri3_get_supported_modifiers_reply(draw->conn, 1281b8e80941Smrg mod_cookie, 1282b8e80941Smrg &error); 1283b8e80941Smrg if (!mod_reply) 1284b8e80941Smrg goto no_image; 1285b8e80941Smrg 1286b8e80941Smrg if (mod_reply->num_window_modifiers) { 1287b8e80941Smrg count = mod_reply->num_window_modifiers; 1288b8e80941Smrg modifiers = malloc(count * sizeof(uint64_t)); 1289b8e80941Smrg if (!modifiers) { 1290b8e80941Smrg free(mod_reply); 1291b8e80941Smrg goto no_image; 1292b8e80941Smrg } 1293b8e80941Smrg 1294b8e80941Smrg memcpy(modifiers, 1295b8e80941Smrg xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply), 1296b8e80941Smrg count * sizeof(uint64_t)); 1297b8e80941Smrg 1298b8e80941Smrg if (!has_supported_modifier(draw, image_format_to_fourcc(format), 1299b8e80941Smrg modifiers, count)) { 1300b8e80941Smrg free(modifiers); 1301b8e80941Smrg count = 0; 1302b8e80941Smrg modifiers = NULL; 1303b8e80941Smrg } 1304b8e80941Smrg } 1305b8e80941Smrg 1306b8e80941Smrg if (mod_reply->num_screen_modifiers && modifiers == NULL) { 1307b8e80941Smrg count = mod_reply->num_screen_modifiers; 1308b8e80941Smrg modifiers = malloc(count * sizeof(uint64_t)); 1309b8e80941Smrg if (!modifiers) { 1310b8e80941Smrg free(modifiers); 1311b8e80941Smrg free(mod_reply); 1312b8e80941Smrg goto no_image; 1313b8e80941Smrg } 1314b8e80941Smrg 1315b8e80941Smrg memcpy(modifiers, 1316b8e80941Smrg xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply), 1317b8e80941Smrg count * sizeof(uint64_t)); 1318b8e80941Smrg } 1319b8e80941Smrg 1320b8e80941Smrg free(mod_reply); 1321b8e80941Smrg 1322b8e80941Smrg /* don't use createImageWithModifiers() if we have no 1323b8e80941Smrg * modifiers, other things depend on the use flags when 1324b8e80941Smrg * there are no modifiers to know that a buffer can be 1325b8e80941Smrg * shared. 1326b8e80941Smrg */ 1327b8e80941Smrg if (modifiers) { 1328b8e80941Smrg buffer->image = draw->ext->image->createImageWithModifiers(draw->dri_screen, 1329b8e80941Smrg width, height, 1330b8e80941Smrg format, 1331b8e80941Smrg modifiers, 1332b8e80941Smrg count, 1333b8e80941Smrg buffer); 1334b8e80941Smrg } 1335b8e80941Smrg 1336b8e80941Smrg free(modifiers); 1337b8e80941Smrg } 1338b8e80941Smrg#endif 1339b8e80941Smrg if (!buffer->image) 1340b8e80941Smrg buffer->image = draw->ext->image->createImage(draw->dri_screen, 1341b8e80941Smrg width, height, 1342b8e80941Smrg format, 1343b8e80941Smrg __DRI_IMAGE_USE_SHARE | 1344b8e80941Smrg __DRI_IMAGE_USE_SCANOUT | 1345b8e80941Smrg __DRI_IMAGE_USE_BACKBUFFER, 1346b8e80941Smrg buffer); 1347b8e80941Smrg 1348b8e80941Smrg pixmap_buffer = buffer->image; 1349b8e80941Smrg 1350b8e80941Smrg if (!buffer->image) 1351b8e80941Smrg goto no_image; 1352b8e80941Smrg } else { 1353b8e80941Smrg buffer->image = draw->ext->image->createImage(draw->dri_screen, 1354b8e80941Smrg width, height, 1355b8e80941Smrg format, 1356b8e80941Smrg 0, 1357b8e80941Smrg buffer); 1358b8e80941Smrg 1359b8e80941Smrg if (!buffer->image) 1360b8e80941Smrg goto no_image; 1361b8e80941Smrg 1362b8e80941Smrg buffer->linear_buffer = 1363b8e80941Smrg draw->ext->image->createImage(draw->dri_screen, 1364b8e80941Smrg width, height, 1365b8e80941Smrg dri3_linear_format_for_format(draw, format), 1366b8e80941Smrg __DRI_IMAGE_USE_SHARE | 1367b8e80941Smrg __DRI_IMAGE_USE_LINEAR | 1368b8e80941Smrg __DRI_IMAGE_USE_BACKBUFFER, 1369b8e80941Smrg buffer); 1370b8e80941Smrg pixmap_buffer = buffer->linear_buffer; 1371b8e80941Smrg 1372b8e80941Smrg if (!buffer->linear_buffer) 1373b8e80941Smrg goto no_linear_buffer; 1374b8e80941Smrg } 1375b8e80941Smrg 1376b8e80941Smrg /* X want some information about the planes, so ask the image for it 1377b8e80941Smrg */ 1378b8e80941Smrg if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_NUM_PLANES, 1379b8e80941Smrg &num_planes)) 1380b8e80941Smrg num_planes = 1; 1381b8e80941Smrg 1382b8e80941Smrg for (i = 0; i < num_planes; i++) { 1383b8e80941Smrg __DRIimage *image = draw->ext->image->fromPlanar(pixmap_buffer, i, NULL); 1384b8e80941Smrg 1385b8e80941Smrg if (!image) { 1386b8e80941Smrg assert(i == 0); 1387b8e80941Smrg image = pixmap_buffer; 1388b8e80941Smrg } 1389b8e80941Smrg 1390b8e80941Smrg ret = draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_FD, 1391b8e80941Smrg &buffer_fds[i]); 1392b8e80941Smrg ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE, 1393b8e80941Smrg &buffer->strides[i]); 1394b8e80941Smrg ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET, 1395b8e80941Smrg &buffer->offsets[i]); 1396b8e80941Smrg if (image != pixmap_buffer) 1397b8e80941Smrg draw->ext->image->destroyImage(image); 1398b8e80941Smrg 1399b8e80941Smrg if (!ret) 1400b8e80941Smrg goto no_buffer_attrib; 1401b8e80941Smrg } 1402b8e80941Smrg 1403b8e80941Smrg ret = draw->ext->image->queryImage(pixmap_buffer, 1404b8e80941Smrg __DRI_IMAGE_ATTRIB_MODIFIER_UPPER, &mod); 1405b8e80941Smrg buffer->modifier = (uint64_t) mod << 32; 1406b8e80941Smrg ret &= draw->ext->image->queryImage(pixmap_buffer, 1407b8e80941Smrg __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod); 1408b8e80941Smrg buffer->modifier |= (uint64_t)(mod & 0xffffffff); 1409b8e80941Smrg 1410b8e80941Smrg if (!ret) 1411b8e80941Smrg buffer->modifier = DRM_FORMAT_MOD_INVALID; 1412b8e80941Smrg 1413b8e80941Smrg pixmap = xcb_generate_id(draw->conn); 1414b8e80941Smrg#ifdef HAVE_DRI3_MODIFIERS 1415b8e80941Smrg if (draw->multiplanes_available && 1416b8e80941Smrg buffer->modifier != DRM_FORMAT_MOD_INVALID) { 1417b8e80941Smrg xcb_dri3_pixmap_from_buffers(draw->conn, 1418b8e80941Smrg pixmap, 1419b8e80941Smrg draw->window, 1420b8e80941Smrg num_planes, 1421b8e80941Smrg width, height, 1422b8e80941Smrg buffer->strides[0], buffer->offsets[0], 1423b8e80941Smrg buffer->strides[1], buffer->offsets[1], 1424b8e80941Smrg buffer->strides[2], buffer->offsets[2], 1425b8e80941Smrg buffer->strides[3], buffer->offsets[3], 1426b8e80941Smrg depth, buffer->cpp * 8, 1427b8e80941Smrg buffer->modifier, 1428b8e80941Smrg buffer_fds); 1429b8e80941Smrg } else 1430b8e80941Smrg#endif 1431b8e80941Smrg { 1432b8e80941Smrg xcb_dri3_pixmap_from_buffer(draw->conn, 1433b8e80941Smrg pixmap, 1434b8e80941Smrg draw->drawable, 1435b8e80941Smrg buffer->size, 1436b8e80941Smrg width, height, buffer->strides[0], 1437b8e80941Smrg depth, buffer->cpp * 8, 1438b8e80941Smrg buffer_fds[0]); 1439b8e80941Smrg } 1440b8e80941Smrg 1441b8e80941Smrg xcb_dri3_fence_from_fd(draw->conn, 1442b8e80941Smrg pixmap, 1443b8e80941Smrg (sync_fence = xcb_generate_id(draw->conn)), 1444b8e80941Smrg false, 1445b8e80941Smrg fence_fd); 1446b8e80941Smrg 1447b8e80941Smrg buffer->pixmap = pixmap; 1448b8e80941Smrg buffer->own_pixmap = true; 1449b8e80941Smrg buffer->sync_fence = sync_fence; 1450b8e80941Smrg buffer->shm_fence = shm_fence; 1451b8e80941Smrg buffer->width = width; 1452b8e80941Smrg buffer->height = height; 1453b8e80941Smrg 1454b8e80941Smrg /* Mark the buffer as idle 1455b8e80941Smrg */ 1456b8e80941Smrg dri3_fence_set(buffer); 1457b8e80941Smrg 1458b8e80941Smrg return buffer; 1459b8e80941Smrg 1460b8e80941Smrgno_buffer_attrib: 1461b8e80941Smrg do { 1462b8e80941Smrg close(buffer_fds[i]); 1463b8e80941Smrg } while (--i >= 0); 1464b8e80941Smrg draw->ext->image->destroyImage(pixmap_buffer); 1465b8e80941Smrgno_linear_buffer: 1466b8e80941Smrg if (draw->is_different_gpu) 1467b8e80941Smrg draw->ext->image->destroyImage(buffer->image); 1468b8e80941Smrgno_image: 1469b8e80941Smrg free(buffer); 1470b8e80941Smrgno_buffer: 1471b8e80941Smrg xshmfence_unmap_shm(shm_fence); 1472b8e80941Smrgno_shm_fence: 1473b8e80941Smrg close(fence_fd); 1474b8e80941Smrg return NULL; 1475b8e80941Smrg} 1476b8e80941Smrg 1477b8e80941Smrg/** loader_dri3_update_drawable 1478b8e80941Smrg * 1479b8e80941Smrg * Called the first time we use the drawable and then 1480b8e80941Smrg * after we receive present configure notify events to 1481b8e80941Smrg * track the geometry of the drawable 1482b8e80941Smrg */ 1483b8e80941Smrgstatic int 1484b8e80941Smrgdri3_update_drawable(struct loader_dri3_drawable *draw) 1485b8e80941Smrg{ 1486b8e80941Smrg mtx_lock(&draw->mtx); 1487b8e80941Smrg if (draw->first_init) { 1488b8e80941Smrg xcb_get_geometry_cookie_t geom_cookie; 1489b8e80941Smrg xcb_get_geometry_reply_t *geom_reply; 1490b8e80941Smrg xcb_void_cookie_t cookie; 1491b8e80941Smrg xcb_generic_error_t *error; 1492b8e80941Smrg xcb_present_query_capabilities_cookie_t present_capabilities_cookie; 1493b8e80941Smrg xcb_present_query_capabilities_reply_t *present_capabilities_reply; 1494b8e80941Smrg xcb_window_t root_win; 1495b8e80941Smrg 1496b8e80941Smrg draw->first_init = false; 1497b8e80941Smrg 1498b8e80941Smrg /* Try to select for input on the window. 1499b8e80941Smrg * 1500b8e80941Smrg * If the drawable is a window, this will get our events 1501b8e80941Smrg * delivered. 1502b8e80941Smrg * 1503b8e80941Smrg * Otherwise, we'll get a BadWindow error back from this request which 1504b8e80941Smrg * will let us know that the drawable is a pixmap instead. 1505b8e80941Smrg */ 1506b8e80941Smrg 1507b8e80941Smrg draw->eid = xcb_generate_id(draw->conn); 1508b8e80941Smrg cookie = 1509b8e80941Smrg xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable, 1510b8e80941Smrg XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY | 1511b8e80941Smrg XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY | 1512b8e80941Smrg XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY); 1513b8e80941Smrg 1514b8e80941Smrg present_capabilities_cookie = 1515b8e80941Smrg xcb_present_query_capabilities(draw->conn, draw->drawable); 1516b8e80941Smrg 1517b8e80941Smrg /* Create an XCB event queue to hold present events outside of the usual 1518b8e80941Smrg * application event queue 1519b8e80941Smrg */ 1520b8e80941Smrg draw->special_event = xcb_register_for_special_xge(draw->conn, 1521b8e80941Smrg &xcb_present_id, 1522b8e80941Smrg draw->eid, 1523b8e80941Smrg draw->stamp); 1524b8e80941Smrg geom_cookie = xcb_get_geometry(draw->conn, draw->drawable); 1525b8e80941Smrg 1526b8e80941Smrg geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL); 1527b8e80941Smrg 1528b8e80941Smrg if (!geom_reply) { 1529b8e80941Smrg mtx_unlock(&draw->mtx); 1530b8e80941Smrg return false; 1531b8e80941Smrg } 1532b8e80941Smrg draw->width = geom_reply->width; 1533b8e80941Smrg draw->height = geom_reply->height; 1534b8e80941Smrg draw->depth = geom_reply->depth; 1535b8e80941Smrg draw->vtable->set_drawable_size(draw, draw->width, draw->height); 1536b8e80941Smrg root_win = geom_reply->root; 1537b8e80941Smrg 1538b8e80941Smrg free(geom_reply); 1539b8e80941Smrg 1540b8e80941Smrg draw->is_pixmap = false; 1541b8e80941Smrg 1542b8e80941Smrg /* Check to see if our select input call failed. If it failed with a 1543b8e80941Smrg * BadWindow error, then assume the drawable is a pixmap. Destroy the 1544b8e80941Smrg * special event queue created above and mark the drawable as a pixmap 1545b8e80941Smrg */ 1546b8e80941Smrg 1547b8e80941Smrg error = xcb_request_check(draw->conn, cookie); 1548b8e80941Smrg 1549b8e80941Smrg present_capabilities_reply = 1550b8e80941Smrg xcb_present_query_capabilities_reply(draw->conn, 1551b8e80941Smrg present_capabilities_cookie, 1552b8e80941Smrg NULL); 1553b8e80941Smrg 1554b8e80941Smrg if (present_capabilities_reply) { 1555b8e80941Smrg draw->present_capabilities = present_capabilities_reply->capabilities; 1556b8e80941Smrg free(present_capabilities_reply); 1557b8e80941Smrg } else 1558b8e80941Smrg draw->present_capabilities = 0; 1559b8e80941Smrg 1560b8e80941Smrg if (error) { 1561b8e80941Smrg if (error->error_code != BadWindow) { 1562b8e80941Smrg free(error); 1563b8e80941Smrg mtx_unlock(&draw->mtx); 1564b8e80941Smrg return false; 1565b8e80941Smrg } 1566b8e80941Smrg free(error); 1567b8e80941Smrg draw->is_pixmap = true; 1568b8e80941Smrg xcb_unregister_for_special_event(draw->conn, draw->special_event); 1569b8e80941Smrg draw->special_event = NULL; 1570b8e80941Smrg } 1571b8e80941Smrg 1572b8e80941Smrg if (draw->is_pixmap) 1573b8e80941Smrg draw->window = root_win; 1574b8e80941Smrg else 1575b8e80941Smrg draw->window = draw->drawable; 1576b8e80941Smrg } 1577b8e80941Smrg dri3_flush_present_events(draw); 1578b8e80941Smrg mtx_unlock(&draw->mtx); 1579b8e80941Smrg return true; 1580b8e80941Smrg} 1581b8e80941Smrg 1582b8e80941Smrg__DRIimage * 1583b8e80941Smrgloader_dri3_create_image(xcb_connection_t *c, 1584b8e80941Smrg xcb_dri3_buffer_from_pixmap_reply_t *bp_reply, 1585b8e80941Smrg unsigned int format, 1586b8e80941Smrg __DRIscreen *dri_screen, 1587b8e80941Smrg const __DRIimageExtension *image, 1588b8e80941Smrg void *loaderPrivate) 1589b8e80941Smrg{ 1590b8e80941Smrg int *fds; 1591b8e80941Smrg __DRIimage *image_planar, *ret; 1592b8e80941Smrg int stride, offset; 1593b8e80941Smrg 1594b8e80941Smrg /* Get an FD for the pixmap object 1595b8e80941Smrg */ 1596b8e80941Smrg fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply); 1597b8e80941Smrg 1598b8e80941Smrg stride = bp_reply->stride; 1599b8e80941Smrg offset = 0; 1600b8e80941Smrg 1601b8e80941Smrg /* createImageFromFds creates a wrapper __DRIimage structure which 1602b8e80941Smrg * can deal with multiple planes for things like Yuv images. So, once 1603b8e80941Smrg * we've gotten the planar wrapper, pull the single plane out of it and 1604b8e80941Smrg * discard the wrapper. 1605b8e80941Smrg */ 1606b8e80941Smrg image_planar = image->createImageFromFds(dri_screen, 1607b8e80941Smrg bp_reply->width, 1608b8e80941Smrg bp_reply->height, 1609b8e80941Smrg image_format_to_fourcc(format), 1610b8e80941Smrg fds, 1, 1611b8e80941Smrg &stride, &offset, loaderPrivate); 1612b8e80941Smrg close(fds[0]); 1613b8e80941Smrg if (!image_planar) 1614b8e80941Smrg return NULL; 1615b8e80941Smrg 1616b8e80941Smrg ret = image->fromPlanar(image_planar, 0, loaderPrivate); 1617b8e80941Smrg 1618b8e80941Smrg if (!ret) 1619b8e80941Smrg ret = image_planar; 1620b8e80941Smrg else 1621b8e80941Smrg image->destroyImage(image_planar); 1622b8e80941Smrg 1623b8e80941Smrg return ret; 1624b8e80941Smrg} 1625b8e80941Smrg 1626b8e80941Smrg#ifdef HAVE_DRI3_MODIFIERS 1627b8e80941Smrg__DRIimage * 1628b8e80941Smrgloader_dri3_create_image_from_buffers(xcb_connection_t *c, 1629b8e80941Smrg xcb_dri3_buffers_from_pixmap_reply_t *bp_reply, 1630b8e80941Smrg unsigned int format, 1631b8e80941Smrg __DRIscreen *dri_screen, 1632b8e80941Smrg const __DRIimageExtension *image, 1633b8e80941Smrg void *loaderPrivate) 1634b8e80941Smrg{ 1635b8e80941Smrg __DRIimage *ret; 1636b8e80941Smrg int *fds; 1637b8e80941Smrg uint32_t *strides_in, *offsets_in; 1638b8e80941Smrg int strides[4], offsets[4]; 1639b8e80941Smrg unsigned error; 1640b8e80941Smrg int i; 1641b8e80941Smrg 1642b8e80941Smrg if (bp_reply->nfd > 4) 1643b8e80941Smrg return NULL; 1644b8e80941Smrg 1645b8e80941Smrg fds = xcb_dri3_buffers_from_pixmap_reply_fds(c, bp_reply); 1646b8e80941Smrg strides_in = xcb_dri3_buffers_from_pixmap_strides(bp_reply); 1647b8e80941Smrg offsets_in = xcb_dri3_buffers_from_pixmap_offsets(bp_reply); 1648b8e80941Smrg for (i = 0; i < bp_reply->nfd; i++) { 1649b8e80941Smrg strides[i] = strides_in[i]; 1650b8e80941Smrg offsets[i] = offsets_in[i]; 1651b8e80941Smrg } 1652b8e80941Smrg 1653b8e80941Smrg ret = image->createImageFromDmaBufs2(dri_screen, 1654b8e80941Smrg bp_reply->width, 1655b8e80941Smrg bp_reply->height, 1656b8e80941Smrg image_format_to_fourcc(format), 1657b8e80941Smrg bp_reply->modifier, 1658b8e80941Smrg fds, bp_reply->nfd, 1659b8e80941Smrg strides, offsets, 1660b8e80941Smrg 0, 0, 0, 0, /* UNDEFINED */ 1661b8e80941Smrg &error, loaderPrivate); 1662b8e80941Smrg 1663b8e80941Smrg for (i = 0; i < bp_reply->nfd; i++) 1664b8e80941Smrg close(fds[i]); 1665b8e80941Smrg 1666b8e80941Smrg return ret; 1667b8e80941Smrg} 1668b8e80941Smrg#endif 1669b8e80941Smrg 1670b8e80941Smrg/** dri3_get_pixmap_buffer 1671b8e80941Smrg * 1672b8e80941Smrg * Get the DRM object for a pixmap from the X server and 1673b8e80941Smrg * wrap that with a __DRIimage structure using createImageFromFds 1674b8e80941Smrg */ 1675b8e80941Smrgstatic struct loader_dri3_buffer * 1676b8e80941Smrgdri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format, 1677b8e80941Smrg enum loader_dri3_buffer_type buffer_type, 1678b8e80941Smrg struct loader_dri3_drawable *draw) 1679b8e80941Smrg{ 1680b8e80941Smrg int buf_id = loader_dri3_pixmap_buf_id(buffer_type); 1681b8e80941Smrg struct loader_dri3_buffer *buffer = draw->buffers[buf_id]; 1682b8e80941Smrg xcb_drawable_t pixmap; 1683b8e80941Smrg xcb_sync_fence_t sync_fence; 1684b8e80941Smrg struct xshmfence *shm_fence; 1685b8e80941Smrg int width; 1686b8e80941Smrg int height; 1687b8e80941Smrg int fence_fd; 1688b8e80941Smrg __DRIscreen *cur_screen; 1689b8e80941Smrg 1690b8e80941Smrg if (buffer) 1691b8e80941Smrg return buffer; 1692b8e80941Smrg 1693b8e80941Smrg pixmap = draw->drawable; 1694b8e80941Smrg 1695b8e80941Smrg buffer = calloc(1, sizeof *buffer); 1696b8e80941Smrg if (!buffer) 1697b8e80941Smrg goto no_buffer; 1698b8e80941Smrg 1699b8e80941Smrg fence_fd = xshmfence_alloc_shm(); 1700b8e80941Smrg if (fence_fd < 0) 1701b8e80941Smrg goto no_fence; 1702b8e80941Smrg shm_fence = xshmfence_map_shm(fence_fd); 1703b8e80941Smrg if (shm_fence == NULL) { 1704b8e80941Smrg close (fence_fd); 1705b8e80941Smrg goto no_fence; 1706b8e80941Smrg } 1707b8e80941Smrg 1708b8e80941Smrg /* Get the currently-bound screen or revert to using the drawable's screen if 1709b8e80941Smrg * no contexts are currently bound. The latter case is at least necessary for 1710b8e80941Smrg * obs-studio, when using Window Capture (Xcomposite) as a Source. 1711b8e80941Smrg */ 1712b8e80941Smrg cur_screen = draw->vtable->get_dri_screen(); 1713b8e80941Smrg if (!cur_screen) { 1714b8e80941Smrg cur_screen = draw->dri_screen; 1715b8e80941Smrg } 1716b8e80941Smrg 1717b8e80941Smrg xcb_dri3_fence_from_fd(draw->conn, 1718b8e80941Smrg pixmap, 1719b8e80941Smrg (sync_fence = xcb_generate_id(draw->conn)), 1720b8e80941Smrg false, 1721b8e80941Smrg fence_fd); 1722b8e80941Smrg#ifdef HAVE_DRI3_MODIFIERS 1723b8e80941Smrg if (draw->multiplanes_available && 1724b8e80941Smrg draw->ext->image->base.version >= 15 && 1725b8e80941Smrg draw->ext->image->createImageFromDmaBufs2) { 1726b8e80941Smrg xcb_dri3_buffers_from_pixmap_cookie_t bps_cookie; 1727b8e80941Smrg xcb_dri3_buffers_from_pixmap_reply_t *bps_reply; 1728b8e80941Smrg 1729b8e80941Smrg bps_cookie = xcb_dri3_buffers_from_pixmap(draw->conn, pixmap); 1730b8e80941Smrg bps_reply = xcb_dri3_buffers_from_pixmap_reply(draw->conn, bps_cookie, 1731b8e80941Smrg NULL); 1732b8e80941Smrg if (!bps_reply) 1733b8e80941Smrg goto no_image; 1734b8e80941Smrg buffer->image = 1735b8e80941Smrg loader_dri3_create_image_from_buffers(draw->conn, bps_reply, format, 1736b8e80941Smrg cur_screen, draw->ext->image, 1737b8e80941Smrg buffer); 1738b8e80941Smrg width = bps_reply->width; 1739b8e80941Smrg height = bps_reply->height; 1740b8e80941Smrg free(bps_reply); 1741b8e80941Smrg } else 1742b8e80941Smrg#endif 1743b8e80941Smrg { 1744b8e80941Smrg xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie; 1745b8e80941Smrg xcb_dri3_buffer_from_pixmap_reply_t *bp_reply; 1746b8e80941Smrg 1747b8e80941Smrg bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap); 1748b8e80941Smrg bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn, bp_cookie, NULL); 1749b8e80941Smrg if (!bp_reply) 1750b8e80941Smrg goto no_image; 1751b8e80941Smrg 1752b8e80941Smrg buffer->image = loader_dri3_create_image(draw->conn, bp_reply, format, 1753b8e80941Smrg cur_screen, draw->ext->image, 1754b8e80941Smrg buffer); 1755b8e80941Smrg width = bp_reply->width; 1756b8e80941Smrg height = bp_reply->height; 1757b8e80941Smrg free(bp_reply); 1758b8e80941Smrg } 1759b8e80941Smrg 1760b8e80941Smrg if (!buffer->image) 1761b8e80941Smrg goto no_image; 1762b8e80941Smrg 1763b8e80941Smrg buffer->pixmap = pixmap; 1764b8e80941Smrg buffer->own_pixmap = false; 1765b8e80941Smrg buffer->width = width; 1766b8e80941Smrg buffer->height = height; 1767b8e80941Smrg buffer->shm_fence = shm_fence; 1768b8e80941Smrg buffer->sync_fence = sync_fence; 1769b8e80941Smrg 1770b8e80941Smrg draw->buffers[buf_id] = buffer; 1771b8e80941Smrg 1772b8e80941Smrg return buffer; 1773b8e80941Smrg 1774b8e80941Smrgno_image: 1775b8e80941Smrg xcb_sync_destroy_fence(draw->conn, sync_fence); 1776b8e80941Smrg xshmfence_unmap_shm(shm_fence); 1777b8e80941Smrgno_fence: 1778b8e80941Smrg free(buffer); 1779b8e80941Smrgno_buffer: 1780b8e80941Smrg return NULL; 1781b8e80941Smrg} 1782b8e80941Smrg 1783b8e80941Smrg/** dri3_get_buffer 1784b8e80941Smrg * 1785b8e80941Smrg * Find a front or back buffer, allocating new ones as necessary 1786b8e80941Smrg */ 1787b8e80941Smrgstatic struct loader_dri3_buffer * 1788b8e80941Smrgdri3_get_buffer(__DRIdrawable *driDrawable, 1789b8e80941Smrg unsigned int format, 1790b8e80941Smrg enum loader_dri3_buffer_type buffer_type, 1791b8e80941Smrg struct loader_dri3_drawable *draw) 1792b8e80941Smrg{ 1793b8e80941Smrg struct loader_dri3_buffer *buffer; 1794b8e80941Smrg bool fence_await = buffer_type == loader_dri3_buffer_back; 1795b8e80941Smrg int buf_id; 1796b8e80941Smrg 1797b8e80941Smrg if (buffer_type == loader_dri3_buffer_back) { 1798b8e80941Smrg draw->back_format = format; 1799b8e80941Smrg 1800b8e80941Smrg buf_id = dri3_find_back(draw); 1801b8e80941Smrg 1802b8e80941Smrg if (buf_id < 0) 1803b8e80941Smrg return NULL; 1804b8e80941Smrg } else { 1805b8e80941Smrg buf_id = LOADER_DRI3_FRONT_ID; 1806b8e80941Smrg } 1807b8e80941Smrg 1808b8e80941Smrg buffer = draw->buffers[buf_id]; 1809b8e80941Smrg 1810b8e80941Smrg /* Allocate a new buffer if there isn't an old one, if that 1811b8e80941Smrg * old one is the wrong size, or if it's suboptimal 1812b8e80941Smrg */ 1813b8e80941Smrg if (!buffer || buffer->width != draw->width || 1814b8e80941Smrg buffer->height != draw->height || 1815b8e80941Smrg buffer->reallocate) { 1816b8e80941Smrg struct loader_dri3_buffer *new_buffer; 1817b8e80941Smrg 1818b8e80941Smrg /* Allocate the new buffers 1819b8e80941Smrg */ 1820b8e80941Smrg new_buffer = dri3_alloc_render_buffer(draw, 1821b8e80941Smrg format, 1822b8e80941Smrg draw->width, 1823b8e80941Smrg draw->height, 1824b8e80941Smrg draw->depth); 1825b8e80941Smrg if (!new_buffer) 1826b8e80941Smrg return NULL; 1827b8e80941Smrg 1828b8e80941Smrg /* When resizing, copy the contents of the old buffer, waiting for that 1829b8e80941Smrg * copy to complete using our fences before proceeding 1830b8e80941Smrg */ 1831b8e80941Smrg if ((buffer_type == loader_dri3_buffer_back || 1832b8e80941Smrg (buffer_type == loader_dri3_buffer_front && draw->have_fake_front)) 1833b8e80941Smrg && buffer) { 1834b8e80941Smrg 1835b8e80941Smrg /* Fill the new buffer with data from an old buffer */ 1836b8e80941Smrg if (!loader_dri3_blit_image(draw, 1837b8e80941Smrg new_buffer->image, 1838b8e80941Smrg buffer->image, 1839b8e80941Smrg 0, 0, draw->width, draw->height, 1840b8e80941Smrg 0, 0, 0) && 1841b8e80941Smrg !buffer->linear_buffer) { 1842b8e80941Smrg dri3_fence_reset(draw->conn, new_buffer); 1843b8e80941Smrg dri3_copy_area(draw->conn, 1844b8e80941Smrg buffer->pixmap, 1845b8e80941Smrg new_buffer->pixmap, 1846b8e80941Smrg dri3_drawable_gc(draw), 1847b8e80941Smrg 0, 0, 0, 0, 1848b8e80941Smrg draw->width, draw->height); 1849b8e80941Smrg dri3_fence_trigger(draw->conn, new_buffer); 1850b8e80941Smrg fence_await = true; 1851b8e80941Smrg } 1852b8e80941Smrg dri3_free_render_buffer(draw, buffer); 1853b8e80941Smrg } else if (buffer_type == loader_dri3_buffer_front) { 1854b8e80941Smrg /* Fill the new fake front with data from a real front */ 1855b8e80941Smrg loader_dri3_swapbuffer_barrier(draw); 1856b8e80941Smrg dri3_fence_reset(draw->conn, new_buffer); 1857b8e80941Smrg dri3_copy_area(draw->conn, 1858b8e80941Smrg draw->drawable, 1859b8e80941Smrg new_buffer->pixmap, 1860b8e80941Smrg dri3_drawable_gc(draw), 1861b8e80941Smrg 0, 0, 0, 0, 1862b8e80941Smrg draw->width, draw->height); 1863b8e80941Smrg dri3_fence_trigger(draw->conn, new_buffer); 1864b8e80941Smrg 1865b8e80941Smrg if (new_buffer->linear_buffer) { 1866b8e80941Smrg dri3_fence_await(draw->conn, draw, new_buffer); 1867b8e80941Smrg (void) loader_dri3_blit_image(draw, 1868b8e80941Smrg new_buffer->image, 1869b8e80941Smrg new_buffer->linear_buffer, 1870b8e80941Smrg 0, 0, draw->width, draw->height, 1871b8e80941Smrg 0, 0, 0); 1872b8e80941Smrg } else 1873b8e80941Smrg fence_await = true; 1874b8e80941Smrg } 1875b8e80941Smrg buffer = new_buffer; 1876b8e80941Smrg draw->buffers[buf_id] = buffer; 1877b8e80941Smrg } 1878b8e80941Smrg 1879b8e80941Smrg if (fence_await) 1880b8e80941Smrg dri3_fence_await(draw->conn, draw, buffer); 1881b8e80941Smrg 1882b8e80941Smrg /* 1883b8e80941Smrg * Do we need to preserve the content of a previous buffer? 1884b8e80941Smrg * 1885b8e80941Smrg * Note that this blit is needed only to avoid a wait for a buffer that 1886b8e80941Smrg * is currently in the flip chain or being scanned out from. That's really 1887b8e80941Smrg * a tradeoff. If we're ok with the wait we can reduce the number of back 1888b8e80941Smrg * buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY, 1889b8e80941Smrg * but in the latter case we must disallow page-flipping. 1890b8e80941Smrg */ 1891b8e80941Smrg if (buffer_type == loader_dri3_buffer_back && 1892b8e80941Smrg draw->cur_blit_source != -1 && 1893b8e80941Smrg draw->buffers[draw->cur_blit_source] && 1894b8e80941Smrg buffer != draw->buffers[draw->cur_blit_source]) { 1895b8e80941Smrg 1896b8e80941Smrg struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source]; 1897b8e80941Smrg 1898b8e80941Smrg /* Avoid flushing here. Will propably do good for tiling hardware. */ 1899b8e80941Smrg (void) loader_dri3_blit_image(draw, 1900b8e80941Smrg buffer->image, 1901b8e80941Smrg source->image, 1902b8e80941Smrg 0, 0, draw->width, draw->height, 1903b8e80941Smrg 0, 0, 0); 1904b8e80941Smrg buffer->last_swap = source->last_swap; 1905b8e80941Smrg draw->cur_blit_source = -1; 1906b8e80941Smrg } 1907b8e80941Smrg /* Return the requested buffer */ 1908b8e80941Smrg return buffer; 1909b8e80941Smrg} 1910b8e80941Smrg 1911b8e80941Smrg/** dri3_free_buffers 1912b8e80941Smrg * 1913b8e80941Smrg * Free the front bufffer or all of the back buffers. Used 1914b8e80941Smrg * when the application changes which buffers it needs 1915b8e80941Smrg */ 1916b8e80941Smrgstatic void 1917b8e80941Smrgdri3_free_buffers(__DRIdrawable *driDrawable, 1918b8e80941Smrg enum loader_dri3_buffer_type buffer_type, 1919b8e80941Smrg struct loader_dri3_drawable *draw) 1920b8e80941Smrg{ 1921b8e80941Smrg struct loader_dri3_buffer *buffer; 1922b8e80941Smrg int first_id; 1923b8e80941Smrg int n_id; 1924b8e80941Smrg int buf_id; 1925b8e80941Smrg 1926b8e80941Smrg switch (buffer_type) { 1927b8e80941Smrg case loader_dri3_buffer_back: 1928b8e80941Smrg first_id = LOADER_DRI3_BACK_ID(0); 1929b8e80941Smrg n_id = LOADER_DRI3_MAX_BACK; 1930b8e80941Smrg draw->cur_blit_source = -1; 1931b8e80941Smrg break; 1932b8e80941Smrg case loader_dri3_buffer_front: 1933b8e80941Smrg first_id = LOADER_DRI3_FRONT_ID; 1934b8e80941Smrg /* Don't free a fake front holding new backbuffer content. */ 1935b8e80941Smrg n_id = (draw->cur_blit_source == LOADER_DRI3_FRONT_ID) ? 0 : 1; 1936b8e80941Smrg } 1937b8e80941Smrg 1938b8e80941Smrg for (buf_id = first_id; buf_id < first_id + n_id; buf_id++) { 1939b8e80941Smrg buffer = draw->buffers[buf_id]; 1940b8e80941Smrg if (buffer) { 1941b8e80941Smrg dri3_free_render_buffer(draw, buffer); 1942b8e80941Smrg draw->buffers[buf_id] = NULL; 1943b8e80941Smrg } 1944b8e80941Smrg } 1945b8e80941Smrg} 1946b8e80941Smrg 1947b8e80941Smrg/** loader_dri3_get_buffers 1948b8e80941Smrg * 1949b8e80941Smrg * The published buffer allocation API. 1950b8e80941Smrg * Returns all of the necessary buffers, allocating 1951b8e80941Smrg * as needed. 1952b8e80941Smrg */ 1953b8e80941Smrgint 1954b8e80941Smrgloader_dri3_get_buffers(__DRIdrawable *driDrawable, 1955b8e80941Smrg unsigned int format, 1956b8e80941Smrg uint32_t *stamp, 1957b8e80941Smrg void *loaderPrivate, 1958b8e80941Smrg uint32_t buffer_mask, 1959b8e80941Smrg struct __DRIimageList *buffers) 1960b8e80941Smrg{ 1961b8e80941Smrg struct loader_dri3_drawable *draw = loaderPrivate; 1962b8e80941Smrg struct loader_dri3_buffer *front, *back; 1963b8e80941Smrg int buf_id; 1964b8e80941Smrg 1965b8e80941Smrg buffers->image_mask = 0; 1966b8e80941Smrg buffers->front = NULL; 1967b8e80941Smrg buffers->back = NULL; 1968b8e80941Smrg 1969b8e80941Smrg front = NULL; 1970b8e80941Smrg back = NULL; 1971b8e80941Smrg 1972b8e80941Smrg if (!dri3_update_drawable(draw)) 1973b8e80941Smrg return false; 1974b8e80941Smrg 1975b8e80941Smrg dri3_update_num_back(draw); 1976b8e80941Smrg 1977b8e80941Smrg /* Free no longer needed back buffers */ 1978b8e80941Smrg for (buf_id = draw->num_back; buf_id < LOADER_DRI3_MAX_BACK; buf_id++) { 1979b8e80941Smrg if (draw->cur_blit_source != buf_id && draw->buffers[buf_id]) { 1980b8e80941Smrg dri3_free_render_buffer(draw, draw->buffers[buf_id]); 1981b8e80941Smrg draw->buffers[buf_id] = NULL; 1982b8e80941Smrg } 1983b8e80941Smrg } 1984b8e80941Smrg 1985b8e80941Smrg /* pixmaps always have front buffers. 1986b8e80941Smrg * Exchange swaps also mandate fake front buffers. 1987b8e80941Smrg */ 1988b8e80941Smrg if (draw->is_pixmap || draw->swap_method == __DRI_ATTRIB_SWAP_EXCHANGE) 1989b8e80941Smrg buffer_mask |= __DRI_IMAGE_BUFFER_FRONT; 1990b8e80941Smrg 1991b8e80941Smrg if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) { 1992b8e80941Smrg /* All pixmaps are owned by the server gpu. 1993b8e80941Smrg * When we use a different gpu, we can't use the pixmap 1994b8e80941Smrg * as buffer since it is potentially tiled a way 1995b8e80941Smrg * our device can't understand. In this case, use 1996b8e80941Smrg * a fake front buffer. Hopefully the pixmap 1997b8e80941Smrg * content will get synced with the fake front 1998b8e80941Smrg * buffer. 1999b8e80941Smrg */ 2000b8e80941Smrg if (draw->is_pixmap && !draw->is_different_gpu) 2001b8e80941Smrg front = dri3_get_pixmap_buffer(driDrawable, 2002b8e80941Smrg format, 2003b8e80941Smrg loader_dri3_buffer_front, 2004b8e80941Smrg draw); 2005b8e80941Smrg else 2006b8e80941Smrg front = dri3_get_buffer(driDrawable, 2007b8e80941Smrg format, 2008b8e80941Smrg loader_dri3_buffer_front, 2009b8e80941Smrg draw); 2010b8e80941Smrg 2011b8e80941Smrg if (!front) 2012b8e80941Smrg return false; 2013b8e80941Smrg } else { 2014b8e80941Smrg dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw); 2015b8e80941Smrg draw->have_fake_front = 0; 2016b8e80941Smrg } 2017b8e80941Smrg 2018b8e80941Smrg if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) { 2019b8e80941Smrg back = dri3_get_buffer(driDrawable, 2020b8e80941Smrg format, 2021b8e80941Smrg loader_dri3_buffer_back, 2022b8e80941Smrg draw); 2023b8e80941Smrg if (!back) 2024b8e80941Smrg return false; 2025b8e80941Smrg draw->have_back = 1; 2026b8e80941Smrg } else { 2027b8e80941Smrg dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw); 2028b8e80941Smrg draw->have_back = 0; 2029b8e80941Smrg } 2030b8e80941Smrg 2031b8e80941Smrg if (front) { 2032b8e80941Smrg buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT; 2033b8e80941Smrg buffers->front = front->image; 2034b8e80941Smrg draw->have_fake_front = draw->is_different_gpu || !draw->is_pixmap; 2035b8e80941Smrg } 2036b8e80941Smrg 2037b8e80941Smrg if (back) { 2038b8e80941Smrg buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK; 2039b8e80941Smrg buffers->back = back->image; 2040b8e80941Smrg } 2041b8e80941Smrg 2042b8e80941Smrg draw->stamp = stamp; 2043b8e80941Smrg 2044b8e80941Smrg return true; 2045b8e80941Smrg} 2046b8e80941Smrg 2047b8e80941Smrg/** loader_dri3_update_drawable_geometry 2048b8e80941Smrg * 2049b8e80941Smrg * Get the current drawable geometry. 2050b8e80941Smrg */ 2051b8e80941Smrgvoid 2052b8e80941Smrgloader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw) 2053b8e80941Smrg{ 2054b8e80941Smrg xcb_get_geometry_cookie_t geom_cookie; 2055b8e80941Smrg xcb_get_geometry_reply_t *geom_reply; 2056b8e80941Smrg 2057b8e80941Smrg geom_cookie = xcb_get_geometry(draw->conn, draw->drawable); 2058b8e80941Smrg 2059b8e80941Smrg geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL); 2060b8e80941Smrg 2061b8e80941Smrg if (geom_reply) { 2062b8e80941Smrg draw->width = geom_reply->width; 2063b8e80941Smrg draw->height = geom_reply->height; 2064b8e80941Smrg draw->vtable->set_drawable_size(draw, draw->width, draw->height); 2065b8e80941Smrg draw->ext->flush->invalidate(draw->dri_drawable); 2066b8e80941Smrg 2067b8e80941Smrg free(geom_reply); 2068b8e80941Smrg } 2069b8e80941Smrg} 2070b8e80941Smrg 2071b8e80941Smrg 2072b8e80941Smrg/** 2073b8e80941Smrg * Make sure the server has flushed all pending swap buffers to hardware 2074b8e80941Smrg * for this drawable. Ideally we'd want to send an X protocol request to 2075b8e80941Smrg * have the server block our connection until the swaps are complete. That 2076b8e80941Smrg * would avoid the potential round-trip here. 2077b8e80941Smrg */ 2078b8e80941Smrgvoid 2079b8e80941Smrgloader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw) 2080b8e80941Smrg{ 2081b8e80941Smrg int64_t ust, msc, sbc; 2082b8e80941Smrg 2083b8e80941Smrg (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc); 2084b8e80941Smrg} 2085b8e80941Smrg 2086b8e80941Smrg/** 2087b8e80941Smrg * Perform any cleanup associated with a close screen operation. 2088b8e80941Smrg * \param dri_screen[in,out] Pointer to __DRIscreen about to be closed. 2089b8e80941Smrg * 2090b8e80941Smrg * This function destroys the screen's cached swap context if any. 2091b8e80941Smrg */ 2092b8e80941Smrgvoid 2093b8e80941Smrgloader_dri3_close_screen(__DRIscreen *dri_screen) 2094b8e80941Smrg{ 2095b8e80941Smrg mtx_lock(&blit_context.mtx); 2096b8e80941Smrg if (blit_context.ctx && blit_context.cur_screen == dri_screen) { 2097b8e80941Smrg blit_context.core->destroyContext(blit_context.ctx); 2098b8e80941Smrg blit_context.ctx = NULL; 2099b8e80941Smrg } 2100b8e80941Smrg mtx_unlock(&blit_context.mtx); 2101b8e80941Smrg} 2102b8e80941Smrg 2103b8e80941Smrg/** 2104b8e80941Smrg * Find a backbuffer slot - potentially allocating a back buffer 2105b8e80941Smrg * 2106b8e80941Smrg * \param draw[in,out] Pointer to the drawable for which to find back. 2107b8e80941Smrg * \return Pointer to a new back buffer or NULL if allocation failed or was 2108b8e80941Smrg * not mandated. 2109b8e80941Smrg * 2110b8e80941Smrg * Find a potentially new back buffer, and if it's not been allocated yet and 2111b8e80941Smrg * in addition needs initializing, then try to allocate and initialize it. 2112b8e80941Smrg */ 2113b8e80941Smrg#include <stdio.h> 2114b8e80941Smrgstatic struct loader_dri3_buffer * 2115b8e80941Smrgdri3_find_back_alloc(struct loader_dri3_drawable *draw) 2116b8e80941Smrg{ 2117b8e80941Smrg struct loader_dri3_buffer *back; 2118b8e80941Smrg int id; 2119b8e80941Smrg 2120b8e80941Smrg id = dri3_find_back(draw); 2121b8e80941Smrg if (id < 0) 2122b8e80941Smrg return NULL; 2123b8e80941Smrg 2124b8e80941Smrg back = draw->buffers[id]; 2125b8e80941Smrg /* Allocate a new back if we haven't got one */ 2126b8e80941Smrg if (!back && draw->back_format != __DRI_IMAGE_FORMAT_NONE && 2127b8e80941Smrg dri3_update_drawable(draw)) 2128b8e80941Smrg back = dri3_alloc_render_buffer(draw, draw->back_format, 2129b8e80941Smrg draw->width, draw->height, draw->depth); 2130b8e80941Smrg 2131b8e80941Smrg if (!back) 2132b8e80941Smrg return NULL; 2133b8e80941Smrg 2134b8e80941Smrg draw->buffers[id] = back; 2135b8e80941Smrg 2136b8e80941Smrg /* If necessary, prefill the back with data according to swap_method mode. */ 2137b8e80941Smrg if (draw->cur_blit_source != -1 && 2138b8e80941Smrg draw->buffers[draw->cur_blit_source] && 2139b8e80941Smrg back != draw->buffers[draw->cur_blit_source]) { 2140b8e80941Smrg struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source]; 2141b8e80941Smrg 2142b8e80941Smrg dri3_fence_await(draw->conn, draw, source); 2143b8e80941Smrg dri3_fence_await(draw->conn, draw, back); 2144b8e80941Smrg (void) loader_dri3_blit_image(draw, 2145b8e80941Smrg back->image, 2146b8e80941Smrg source->image, 2147b8e80941Smrg 0, 0, draw->width, draw->height, 2148b8e80941Smrg 0, 0, 0); 2149b8e80941Smrg back->last_swap = source->last_swap; 2150b8e80941Smrg draw->cur_blit_source = -1; 2151b8e80941Smrg } 2152b8e80941Smrg 2153b8e80941Smrg return back; 2154b8e80941Smrg} 2155