1/*
2 * Copyright © 2013 Keith Packard
3 * Copyright © 2015 Boyan Ding
4 *
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that copyright
8 * notice and this permission notice appear in supporting documentation, and
9 * that the name of the copyright holders not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission.  The copyright holders make no representations
12 * about the suitability of this software for any purpose.  It is provided "as
13 * is" without express or implied warranty.
14 *
15 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21 * OF THIS SOFTWARE.
22 */
23
24#include <fcntl.h>
25#include <stdlib.h>
26#include <unistd.h>
27#include <string.h>
28
29#include <X11/xshmfence.h>
30#include <xcb/xcb.h>
31#include <xcb/dri3.h>
32#include <xcb/present.h>
33#include <xcb/xfixes.h>
34
35#include <X11/Xlib-xcb.h>
36
37#include "loader_dri_helper.h"
38#include "loader_dri3_helper.h"
39#include "util/macros.h"
40#include "drm-uapi/drm_fourcc.h"
41
42/* From driconf.h, user exposed so should be stable */
43#define DRI_CONF_VBLANK_NEVER 0
44#define DRI_CONF_VBLANK_DEF_INTERVAL_0 1
45#define DRI_CONF_VBLANK_DEF_INTERVAL_1 2
46#define DRI_CONF_VBLANK_ALWAYS_SYNC 3
47
48/**
49 * A cached blit context.
50 */
51struct loader_dri3_blit_context {
52   mtx_t mtx;
53   __DRIcontext *ctx;
54   __DRIscreen *cur_screen;
55   const __DRIcoreExtension *core;
56};
57
58/* For simplicity we maintain the cache only for a single screen at a time */
59static struct loader_dri3_blit_context blit_context = {
60   _MTX_INITIALIZER_NP, NULL
61};
62
63static void
64dri3_flush_present_events(struct loader_dri3_drawable *draw);
65
66static struct loader_dri3_buffer *
67dri3_find_back_alloc(struct loader_dri3_drawable *draw);
68
69static xcb_screen_t *
70get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
71{
72   xcb_screen_iterator_t screen_iter =
73   xcb_setup_roots_iterator(xcb_get_setup(conn));
74
75   for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
76      if (screen_iter.data->root == root)
77         return screen_iter.data;
78   }
79
80   return NULL;
81}
82
83static xcb_visualtype_t *
84get_xcb_visualtype_for_depth(struct loader_dri3_drawable *draw, int depth)
85{
86   xcb_visualtype_iterator_t visual_iter;
87   xcb_screen_t *screen = draw->screen;
88   xcb_depth_iterator_t depth_iter;
89
90   if (!screen)
91      return NULL;
92
93   depth_iter = xcb_screen_allowed_depths_iterator(screen);
94   for (; depth_iter.rem; xcb_depth_next(&depth_iter)) {
95      if (depth_iter.data->depth != depth)
96         continue;
97
98      visual_iter = xcb_depth_visuals_iterator(depth_iter.data);
99      if (visual_iter.rem)
100         return visual_iter.data;
101   }
102
103   return NULL;
104}
105
106/* Sets the adaptive sync window property state. */
107static void
108set_adaptive_sync_property(xcb_connection_t *conn, xcb_drawable_t drawable,
109                           uint32_t state)
110{
111   static char const name[] = "_VARIABLE_REFRESH";
112   xcb_intern_atom_cookie_t cookie;
113   xcb_intern_atom_reply_t* reply;
114   xcb_void_cookie_t check;
115
116   cookie = xcb_intern_atom(conn, 0, strlen(name), name);
117   reply = xcb_intern_atom_reply(conn, cookie, NULL);
118   if (reply == NULL)
119      return;
120
121   if (state)
122      check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
123                                          drawable, reply->atom,
124                                          XCB_ATOM_CARDINAL, 32, 1, &state);
125   else
126      check = xcb_delete_property_checked(conn, drawable, reply->atom);
127
128   xcb_discard_reply(conn, check.sequence);
129   free(reply);
130}
131
132/* Get red channel mask for given drawable at given depth. */
133static unsigned int
134dri3_get_red_mask_for_depth(struct loader_dri3_drawable *draw, int depth)
135{
136   xcb_visualtype_t *visual = get_xcb_visualtype_for_depth(draw, depth);
137
138   if (visual)
139      return visual->red_mask;
140
141   return 0;
142}
143
144/**
145 * Do we have blit functionality in the image blit extension?
146 *
147 * \param draw[in]  The drawable intended to blit from / to.
148 * \return  true if we have blit functionality. false otherwise.
149 */
150static bool loader_dri3_have_image_blit(const struct loader_dri3_drawable *draw)
151{
152   return draw->ext->image->base.version >= 9 &&
153      draw->ext->image->blitImage != NULL;
154}
155
156/**
157 * Get and lock (for use with the current thread) a dri context associated
158 * with the drawable's dri screen. The context is intended to be used with
159 * the dri image extension's blitImage method.
160 *
161 * \param draw[in]  Pointer to the drawable whose dri screen we want a
162 * dri context for.
163 * \return A dri context or NULL if context creation failed.
164 *
165 * When the caller is done with the context (even if the context returned was
166 * NULL), the caller must call loader_dri3_blit_context_put.
167 */
168static __DRIcontext *
169loader_dri3_blit_context_get(struct loader_dri3_drawable *draw)
170{
171   mtx_lock(&blit_context.mtx);
172
173   if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen) {
174      blit_context.core->destroyContext(blit_context.ctx);
175      blit_context.ctx = NULL;
176   }
177
178   if (!blit_context.ctx) {
179      blit_context.ctx = draw->ext->core->createNewContext(draw->dri_screen,
180                                                           NULL, NULL, NULL);
181      blit_context.cur_screen = draw->dri_screen;
182      blit_context.core = draw->ext->core;
183   }
184
185   return blit_context.ctx;
186}
187
188/**
189 * Release (for use with other threads) a dri context previously obtained using
190 * loader_dri3_blit_context_get.
191 */
192static void
193loader_dri3_blit_context_put(void)
194{
195   mtx_unlock(&blit_context.mtx);
196}
197
198/**
199 * Blit (parts of) the contents of a DRI image to another dri image
200 *
201 * \param draw[in]  The drawable which owns the images.
202 * \param dst[in]  The destination image.
203 * \param src[in]  The source image.
204 * \param dstx0[in]  Start destination coordinate.
205 * \param dsty0[in]  Start destination coordinate.
206 * \param width[in]  Blit width.
207 * \param height[in] Blit height.
208 * \param srcx0[in]  Start source coordinate.
209 * \param srcy0[in]  Start source coordinate.
210 * \param flush_flag[in]  Image blit flush flag.
211 * \return true iff successful.
212 */
213static bool
214loader_dri3_blit_image(struct loader_dri3_drawable *draw,
215                       __DRIimage *dst, __DRIimage *src,
216                       int dstx0, int dsty0, int width, int height,
217                       int srcx0, int srcy0, int flush_flag)
218{
219   __DRIcontext *dri_context;
220   bool use_blit_context = false;
221
222   if (!loader_dri3_have_image_blit(draw))
223      return false;
224
225   dri_context = draw->vtable->get_dri_context(draw);
226
227   if (!dri_context || !draw->vtable->in_current_context(draw)) {
228      dri_context = loader_dri3_blit_context_get(draw);
229      use_blit_context = true;
230      flush_flag |= __BLIT_FLAG_FLUSH;
231   }
232
233   if (dri_context)
234      draw->ext->image->blitImage(dri_context, dst, src, dstx0, dsty0,
235                                  width, height, srcx0, srcy0,
236                                  width, height, flush_flag);
237
238   if (use_blit_context)
239      loader_dri3_blit_context_put();
240
241   return dri_context != NULL;
242}
243
244static inline void
245dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
246{
247   xshmfence_reset(buffer->shm_fence);
248}
249
250static inline void
251dri3_fence_set(struct loader_dri3_buffer *buffer)
252{
253   xshmfence_trigger(buffer->shm_fence);
254}
255
256static inline void
257dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
258{
259   xcb_sync_trigger_fence(c, buffer->sync_fence);
260}
261
262static inline void
263dri3_fence_await(xcb_connection_t *c, struct loader_dri3_drawable *draw,
264                 struct loader_dri3_buffer *buffer)
265{
266   xcb_flush(c);
267   xshmfence_await(buffer->shm_fence);
268   if (draw) {
269      mtx_lock(&draw->mtx);
270      dri3_flush_present_events(draw);
271      mtx_unlock(&draw->mtx);
272   }
273}
274
275static void
276dri3_update_max_num_back(struct loader_dri3_drawable *draw)
277{
278   switch (draw->last_present_mode) {
279   case XCB_PRESENT_COMPLETE_MODE_FLIP: {
280      int new_max;
281
282      if (draw->swap_interval == 0)
283         new_max = 4;
284      else
285         new_max = 3;
286
287      assert(new_max <= LOADER_DRI3_MAX_BACK);
288
289      if (new_max != draw->max_num_back) {
290         /* On transition from swap interval == 0 to != 0, start with two
291          * buffers again. Otherwise keep the current number of buffers. Either
292          * way, more will be allocated if needed.
293          */
294         if (new_max < draw->max_num_back)
295            draw->cur_num_back = 2;
296
297         draw->max_num_back = new_max;
298      }
299
300      break;
301   }
302
303   case XCB_PRESENT_COMPLETE_MODE_SKIP:
304      break;
305
306   default:
307      /* On transition from flips to copies, start with a single buffer again,
308       * a second one will be allocated if needed
309       */
310      if (draw->max_num_back != 2)
311         draw->cur_num_back = 1;
312
313      draw->max_num_back = 2;
314   }
315}
316
317void
318loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
319{
320   /* Wait all previous swap done before changing swap interval.
321    *
322    * This is for preventing swap out of order in the following cases:
323    *   1. Change from sync swap mode (>0) to async mode (=0), so async swap occurs
324    *      before previous pending sync swap.
325    *   2. Change from value A to B and A > B, so the target_msc for the previous
326    *      pending swap may be bigger than newer swap.
327    *
328    * PS. changing from value A to B and A < B won't cause swap out of order but
329    * may still gets wrong target_msc value at the beginning.
330    */
331   if (draw->swap_interval != interval)
332      loader_dri3_swapbuffer_barrier(draw);
333
334   draw->swap_interval = interval;
335}
336
337/** dri3_free_render_buffer
338 *
339 * Free everything associated with one render buffer including pixmap, fence
340 * stuff and the driver image
341 */
342static void
343dri3_free_render_buffer(struct loader_dri3_drawable *draw,
344                        struct loader_dri3_buffer *buffer)
345{
346   if (buffer->own_pixmap)
347      xcb_free_pixmap(draw->conn, buffer->pixmap);
348   xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
349   xshmfence_unmap_shm(buffer->shm_fence);
350   draw->ext->image->destroyImage(buffer->image);
351   if (buffer->linear_buffer)
352      draw->ext->image->destroyImage(buffer->linear_buffer);
353   free(buffer);
354}
355
356void
357loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
358{
359   int i;
360
361   draw->ext->core->destroyDrawable(draw->dri_drawable);
362
363   for (i = 0; i < ARRAY_SIZE(draw->buffers); i++) {
364      if (draw->buffers[i])
365         dri3_free_render_buffer(draw, draw->buffers[i]);
366   }
367
368   if (draw->special_event) {
369      xcb_void_cookie_t cookie =
370         xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
371                                          XCB_PRESENT_EVENT_MASK_NO_EVENT);
372
373      xcb_discard_reply(draw->conn, cookie.sequence);
374      xcb_unregister_for_special_event(draw->conn, draw->special_event);
375   }
376
377   if (draw->region)
378      xcb_xfixes_destroy_region(draw->conn, draw->region);
379
380   cnd_destroy(&draw->event_cnd);
381   mtx_destroy(&draw->mtx);
382}
383
384int
385loader_dri3_drawable_init(xcb_connection_t *conn,
386                          xcb_drawable_t drawable,
387                          __DRIscreen *dri_screen,
388                          bool is_different_gpu,
389                          bool multiplanes_available,
390                          bool prefer_back_buffer_reuse,
391                          const __DRIconfig *dri_config,
392                          struct loader_dri3_extensions *ext,
393                          const struct loader_dri3_vtable *vtable,
394                          struct loader_dri3_drawable *draw)
395{
396   xcb_get_geometry_cookie_t cookie;
397   xcb_get_geometry_reply_t *reply;
398   xcb_generic_error_t *error;
399   GLint vblank_mode = DRI_CONF_VBLANK_DEF_INTERVAL_1;
400   int swap_interval;
401
402   draw->conn = conn;
403   draw->ext = ext;
404   draw->vtable = vtable;
405   draw->drawable = drawable;
406   draw->region = 0;
407   draw->dri_screen = dri_screen;
408   draw->is_different_gpu = is_different_gpu;
409   draw->multiplanes_available = multiplanes_available;
410   draw->prefer_back_buffer_reuse = prefer_back_buffer_reuse;
411
412   draw->have_back = 0;
413   draw->have_fake_front = 0;
414   draw->first_init = true;
415   draw->adaptive_sync = false;
416   draw->adaptive_sync_active = false;
417
418   draw->cur_blit_source = -1;
419   draw->back_format = __DRI_IMAGE_FORMAT_NONE;
420   mtx_init(&draw->mtx, mtx_plain);
421   cnd_init(&draw->event_cnd);
422
423   if (draw->ext->config) {
424      unsigned char adaptive_sync = 0;
425
426      draw->ext->config->configQueryi(draw->dri_screen,
427                                      "vblank_mode", &vblank_mode);
428
429      draw->ext->config->configQueryb(draw->dri_screen,
430                                      "adaptive_sync",
431                                      &adaptive_sync);
432
433      draw->adaptive_sync = adaptive_sync;
434   }
435
436   if (!draw->adaptive_sync)
437      set_adaptive_sync_property(conn, draw->drawable, false);
438
439   switch (vblank_mode) {
440   case DRI_CONF_VBLANK_NEVER:
441   case DRI_CONF_VBLANK_DEF_INTERVAL_0:
442      swap_interval = 0;
443      break;
444   case DRI_CONF_VBLANK_DEF_INTERVAL_1:
445   case DRI_CONF_VBLANK_ALWAYS_SYNC:
446   default:
447      swap_interval = 1;
448      break;
449   }
450   draw->swap_interval = swap_interval;
451
452   dri3_update_max_num_back(draw);
453
454   /* Create a new drawable */
455   draw->dri_drawable =
456      draw->ext->image_driver->createNewDrawable(dri_screen,
457                                                 dri_config,
458                                                 draw);
459
460   if (!draw->dri_drawable)
461      return 1;
462
463   cookie = xcb_get_geometry(draw->conn, draw->drawable);
464   reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
465   if (reply == NULL || error != NULL) {
466      draw->ext->core->destroyDrawable(draw->dri_drawable);
467      return 1;
468   }
469
470   draw->screen = get_screen_for_root(draw->conn, reply->root);
471   draw->width = reply->width;
472   draw->height = reply->height;
473   draw->depth = reply->depth;
474   draw->vtable->set_drawable_size(draw, draw->width, draw->height);
475   free(reply);
476
477   draw->swap_method = __DRI_ATTRIB_SWAP_UNDEFINED;
478   if (draw->ext->core->base.version >= 2) {
479      (void )draw->ext->core->getConfigAttrib(dri_config,
480                                              __DRI_ATTRIB_SWAP_METHOD,
481                                              &draw->swap_method);
482   }
483
484   /*
485    * Make sure server has the same swap interval we do for the new
486    * drawable.
487    */
488   loader_dri3_set_swap_interval(draw, swap_interval);
489
490   return 0;
491}
492
493/*
494 * Process one Present event
495 */
496static void
497dri3_handle_present_event(struct loader_dri3_drawable *draw,
498                          xcb_present_generic_event_t *ge)
499{
500   switch (ge->evtype) {
501   case XCB_PRESENT_CONFIGURE_NOTIFY: {
502      xcb_present_configure_notify_event_t *ce = (void *) ge;
503
504      draw->width = ce->width;
505      draw->height = ce->height;
506      draw->vtable->set_drawable_size(draw, draw->width, draw->height);
507      draw->ext->flush->invalidate(draw->dri_drawable);
508      break;
509   }
510   case XCB_PRESENT_COMPLETE_NOTIFY: {
511      xcb_present_complete_notify_event_t *ce = (void *) ge;
512
513      /* Compute the processed SBC number from the received 32-bit serial number
514       * merged with the upper 32-bits of the sent 64-bit serial number while
515       * checking for wrap.
516       */
517      if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
518         uint64_t recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
519
520         /* Only assume wraparound if that results in exactly the previous
521          * SBC + 1, otherwise ignore received SBC > sent SBC (those are
522          * probably from a previous loader_dri3_drawable instance) to avoid
523          * calculating bogus target MSC values in loader_dri3_swap_buffers_msc
524          */
525         if (recv_sbc <= draw->send_sbc)
526            draw->recv_sbc = recv_sbc;
527         else if (recv_sbc == (draw->recv_sbc + 0x100000001ULL))
528            draw->recv_sbc = recv_sbc - 0x100000000ULL;
529
530         /* When moving from flip to copy, we assume that we can allocate in
531          * a more optimal way if we don't need to cater for the display
532          * controller.
533          */
534         if (ce->mode == XCB_PRESENT_COMPLETE_MODE_COPY &&
535             draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) {
536            for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
537               if (draw->buffers[b])
538                  draw->buffers[b]->reallocate = true;
539            }
540         }
541
542         /* If the server tells us that our allocation is suboptimal, we
543          * reallocate once.
544          */
545#ifdef HAVE_DRI3_MODIFIERS
546         if (ce->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY &&
547             draw->last_present_mode != ce->mode) {
548            for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
549               if (draw->buffers[b])
550                  draw->buffers[b]->reallocate = true;
551            }
552         }
553#endif
554         draw->last_present_mode = ce->mode;
555
556         if (draw->vtable->show_fps)
557            draw->vtable->show_fps(draw, ce->ust);
558
559         draw->ust = ce->ust;
560         draw->msc = ce->msc;
561      } else if (ce->serial == draw->eid) {
562         draw->notify_ust = ce->ust;
563         draw->notify_msc = ce->msc;
564      }
565      break;
566   }
567   case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
568      xcb_present_idle_notify_event_t *ie = (void *) ge;
569      int b;
570
571      for (b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
572         struct loader_dri3_buffer *buf = draw->buffers[b];
573
574         if (buf && buf->pixmap == ie->pixmap)
575            buf->busy = 0;
576      }
577      break;
578   }
579   }
580   free(ge);
581}
582
583static bool
584dri3_wait_for_event_locked(struct loader_dri3_drawable *draw,
585                           unsigned *full_sequence)
586{
587   xcb_generic_event_t *ev;
588   xcb_present_generic_event_t *ge;
589
590   xcb_flush(draw->conn);
591
592   /* Only have one thread waiting for events at a time */
593   if (draw->has_event_waiter) {
594      cnd_wait(&draw->event_cnd, &draw->mtx);
595      if (full_sequence)
596         *full_sequence = draw->last_special_event_sequence;
597      /* Another thread has updated the protected info, so retest. */
598      return true;
599   } else {
600      draw->has_event_waiter = true;
601      /* Allow other threads access to the drawable while we're waiting. */
602      mtx_unlock(&draw->mtx);
603      ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
604      mtx_lock(&draw->mtx);
605      draw->has_event_waiter = false;
606      cnd_broadcast(&draw->event_cnd);
607   }
608   if (!ev)
609      return false;
610   draw->last_special_event_sequence = ev->full_sequence;
611   if (full_sequence)
612      *full_sequence = ev->full_sequence;
613   ge = (void *) ev;
614   dri3_handle_present_event(draw, ge);
615   return true;
616}
617
618/** loader_dri3_wait_for_msc
619 *
620 * Get the X server to send an event when the target msc/divisor/remainder is
621 * reached.
622 */
623bool
624loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
625                         int64_t target_msc,
626                         int64_t divisor, int64_t remainder,
627                         int64_t *ust, int64_t *msc, int64_t *sbc)
628{
629   xcb_void_cookie_t cookie = xcb_present_notify_msc(draw->conn,
630                                                     draw->drawable,
631                                                     draw->eid,
632                                                     target_msc,
633                                                     divisor,
634                                                     remainder);
635   unsigned full_sequence;
636
637   mtx_lock(&draw->mtx);
638
639   /* Wait for the event */
640   do {
641      if (!dri3_wait_for_event_locked(draw, &full_sequence)) {
642         mtx_unlock(&draw->mtx);
643         return false;
644      }
645   } while (full_sequence != cookie.sequence || draw->notify_msc < target_msc);
646
647   *ust = draw->notify_ust;
648   *msc = draw->notify_msc;
649   *sbc = draw->recv_sbc;
650   mtx_unlock(&draw->mtx);
651
652   return true;
653}
654
655/** loader_dri3_wait_for_sbc
656 *
657 * Wait for the completed swap buffer count to reach the specified
658 * target. Presumably the application knows that this will be reached with
659 * outstanding complete events, or we're going to be here awhile.
660 */
661int
662loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
663                         int64_t target_sbc, int64_t *ust,
664                         int64_t *msc, int64_t *sbc)
665{
666   /* From the GLX_OML_sync_control spec:
667    *
668    *     "If <target_sbc> = 0, the function will block until all previous
669    *      swaps requested with glXSwapBuffersMscOML for that window have
670    *      completed."
671    */
672   mtx_lock(&draw->mtx);
673   if (!target_sbc)
674      target_sbc = draw->send_sbc;
675
676   while (draw->recv_sbc < target_sbc) {
677      if (!dri3_wait_for_event_locked(draw, NULL)) {
678         mtx_unlock(&draw->mtx);
679         return 0;
680      }
681   }
682
683   *ust = draw->ust;
684   *msc = draw->msc;
685   *sbc = draw->recv_sbc;
686   mtx_unlock(&draw->mtx);
687   return 1;
688}
689
690/** loader_dri3_find_back
691 *
692 * Find an idle back buffer. If there isn't one, then
693 * wait for a present idle notify event from the X server
694 */
695static int
696dri3_find_back(struct loader_dri3_drawable *draw, bool prefer_a_different)
697{
698   int b;
699   int num_to_consider;
700   int max_num;
701
702   mtx_lock(&draw->mtx);
703   /* Increase the likelyhood of reusing current buffer */
704   dri3_flush_present_events(draw);
705
706   /* Check whether we need to reuse the current back buffer as new back.
707    * In that case, wait until it's not busy anymore.
708    */
709   if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1) {
710      num_to_consider = 1;
711      max_num = 1;
712      draw->cur_blit_source = -1;
713   } else {
714      num_to_consider = draw->cur_num_back;
715      max_num = draw->max_num_back;
716   }
717
718   /* In a DRI_PRIME situation, if prefer_a_different is true, we first try
719    * to find an idle buffer that is not the last used one.
720    * This is useful if we receive a XCB_PRESENT_EVENT_IDLE_NOTIFY event
721    * for a pixmap but it's not actually idle (eg: the DRI_PRIME blit is
722    * still in progress).
723    * Unigine Superposition hits this and this allows to use 2 back buffers
724    * instead of reusing the same one all the time, causing the next frame
725    * to wait for the copy to finish.
726    */
727   int current_back_id = draw->cur_back;
728   for (;;) {
729      for (b = 0; b < num_to_consider; b++) {
730         int id = LOADER_DRI3_BACK_ID((b + draw->cur_back) % draw->cur_num_back);
731         struct loader_dri3_buffer *buffer = draw->buffers[id];
732
733         if (!buffer || (!buffer->busy &&
734                         (!prefer_a_different || id != current_back_id))) {
735            draw->cur_back = id;
736            mtx_unlock(&draw->mtx);
737            return id;
738         }
739      }
740
741      if (num_to_consider < max_num) {
742         num_to_consider = ++draw->cur_num_back;
743      } else if (prefer_a_different) {
744         prefer_a_different = false;
745      } else if (!dri3_wait_for_event_locked(draw, NULL)) {
746         mtx_unlock(&draw->mtx);
747         return -1;
748      }
749   }
750}
751
752static xcb_gcontext_t
753dri3_drawable_gc(struct loader_dri3_drawable *draw)
754{
755   if (!draw->gc) {
756      uint32_t v = 0;
757      xcb_create_gc(draw->conn,
758                    (draw->gc = xcb_generate_id(draw->conn)),
759                    draw->drawable,
760                    XCB_GC_GRAPHICS_EXPOSURES,
761                    &v);
762   }
763   return draw->gc;
764}
765
766
767static struct loader_dri3_buffer *
768dri3_back_buffer(struct loader_dri3_drawable *draw)
769{
770   return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
771}
772
773static struct loader_dri3_buffer *
774dri3_fake_front_buffer(struct loader_dri3_drawable *draw)
775{
776   return draw->buffers[LOADER_DRI3_FRONT_ID];
777}
778
779static void
780dri3_copy_area(xcb_connection_t *c,
781               xcb_drawable_t    src_drawable,
782               xcb_drawable_t    dst_drawable,
783               xcb_gcontext_t    gc,
784               int16_t           src_x,
785               int16_t           src_y,
786               int16_t           dst_x,
787               int16_t           dst_y,
788               uint16_t          width,
789               uint16_t          height)
790{
791   xcb_void_cookie_t cookie;
792
793   cookie = xcb_copy_area_checked(c,
794                                  src_drawable,
795                                  dst_drawable,
796                                  gc,
797                                  src_x,
798                                  src_y,
799                                  dst_x,
800                                  dst_y,
801                                  width,
802                                  height);
803   xcb_discard_reply(c, cookie.sequence);
804}
805
806/**
807 * Asks the driver to flush any queued work necessary for serializing with the
808 * X command stream, and optionally the slightly more strict requirement of
809 * glFlush() equivalence (which would require flushing even if nothing had
810 * been drawn to a window system framebuffer, for example).
811 */
812void
813loader_dri3_flush(struct loader_dri3_drawable *draw,
814                  unsigned flags,
815                  enum __DRI2throttleReason throttle_reason)
816{
817   /* NEED TO CHECK WHETHER CONTEXT IS NULL */
818   __DRIcontext *dri_context = draw->vtable->get_dri_context(draw);
819
820   if (dri_context) {
821      draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable,
822                                         flags, throttle_reason);
823   }
824}
825
826void
827loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
828                            int x, int y,
829                            int width, int height,
830                            bool flush)
831{
832   struct loader_dri3_buffer *back;
833   unsigned flags = __DRI2_FLUSH_DRAWABLE;
834
835   /* Check we have the right attachments */
836   if (!draw->have_back || draw->is_pixmap)
837      return;
838
839   if (flush)
840      flags |= __DRI2_FLUSH_CONTEXT;
841   loader_dri3_flush(draw, flags, __DRI2_THROTTLE_COPYSUBBUFFER);
842
843   back = dri3_find_back_alloc(draw);
844   if (!back)
845      return;
846
847   y = draw->height - y - height;
848
849   if (draw->is_different_gpu) {
850      /* Update the linear buffer part of the back buffer
851       * for the dri3_copy_area operation
852       */
853      (void) loader_dri3_blit_image(draw,
854                                    back->linear_buffer,
855                                    back->image,
856                                    0, 0, back->width, back->height,
857                                    0, 0, __BLIT_FLAG_FLUSH);
858   }
859
860   loader_dri3_swapbuffer_barrier(draw);
861   dri3_fence_reset(draw->conn, back);
862   dri3_copy_area(draw->conn,
863                  back->pixmap,
864                  draw->drawable,
865                  dri3_drawable_gc(draw),
866                  x, y, x, y, width, height);
867   dri3_fence_trigger(draw->conn, back);
868   /* Refresh the fake front (if present) after we just damaged the real
869    * front.
870    */
871   if (draw->have_fake_front &&
872       !loader_dri3_blit_image(draw,
873                               dri3_fake_front_buffer(draw)->image,
874                               back->image,
875                               x, y, width, height,
876                               x, y, __BLIT_FLAG_FLUSH) &&
877       !draw->is_different_gpu) {
878      dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
879      dri3_copy_area(draw->conn,
880                     back->pixmap,
881                     dri3_fake_front_buffer(draw)->pixmap,
882                     dri3_drawable_gc(draw),
883                     x, y, x, y, width, height);
884      dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
885      dri3_fence_await(draw->conn, NULL, dri3_fake_front_buffer(draw));
886   }
887   dri3_fence_await(draw->conn, draw, back);
888}
889
890void
891loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
892                          xcb_drawable_t dest,
893                          xcb_drawable_t src)
894{
895   loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, __DRI2_THROTTLE_COPYSUBBUFFER);
896
897   dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
898   dri3_copy_area(draw->conn,
899                  src, dest,
900                  dri3_drawable_gc(draw),
901                  0, 0, 0, 0, draw->width, draw->height);
902   dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
903   dri3_fence_await(draw->conn, draw, dri3_fake_front_buffer(draw));
904}
905
906void
907loader_dri3_wait_x(struct loader_dri3_drawable *draw)
908{
909   struct loader_dri3_buffer *front;
910
911   if (draw == NULL || !draw->have_fake_front)
912      return;
913
914   front = dri3_fake_front_buffer(draw);
915
916   loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
917
918   /* In the psc->is_different_gpu case, the linear buffer has been updated,
919    * but not yet the tiled buffer.
920    * Copy back to the tiled buffer we use for rendering.
921    * Note that we don't need flushing.
922    */
923   if (draw->is_different_gpu)
924      (void) loader_dri3_blit_image(draw,
925                                    front->image,
926                                    front->linear_buffer,
927                                    0, 0, front->width, front->height,
928                                    0, 0, 0);
929}
930
931void
932loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
933{
934   struct loader_dri3_buffer *front;
935
936   if (draw == NULL || !draw->have_fake_front)
937      return;
938
939   front = dri3_fake_front_buffer(draw);
940
941   /* In the psc->is_different_gpu case, we update the linear_buffer
942    * before updating the real front.
943    */
944   if (draw->is_different_gpu)
945      (void) loader_dri3_blit_image(draw,
946                                    front->linear_buffer,
947                                    front->image,
948                                    0, 0, front->width, front->height,
949                                    0, 0, __BLIT_FLAG_FLUSH);
950   loader_dri3_swapbuffer_barrier(draw);
951   loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
952}
953
954/** dri3_flush_present_events
955 *
956 * Process any present events that have been received from the X server
957 */
958static void
959dri3_flush_present_events(struct loader_dri3_drawable *draw)
960{
961   /* Check to see if any configuration changes have occurred
962    * since we were last invoked
963    */
964   if (draw->has_event_waiter)
965      return;
966
967   if (draw->special_event) {
968      xcb_generic_event_t    *ev;
969
970      while ((ev = xcb_poll_for_special_event(draw->conn,
971                                              draw->special_event)) != NULL) {
972         xcb_present_generic_event_t *ge = (void *) ev;
973         dri3_handle_present_event(draw, ge);
974      }
975   }
976}
977
978/** loader_dri3_swap_buffers_msc
979 *
980 * Make the current back buffer visible using the present extension
981 */
982int64_t
983loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
984                             int64_t target_msc, int64_t divisor,
985                             int64_t remainder, unsigned flush_flags,
986                             const int *rects, int n_rects,
987                             bool force_copy)
988{
989   struct loader_dri3_buffer *back;
990   int64_t ret = 0;
991   uint32_t options = XCB_PRESENT_OPTION_NONE;
992
993   draw->vtable->flush_drawable(draw, flush_flags);
994
995   back = dri3_find_back_alloc(draw);
996
997   mtx_lock(&draw->mtx);
998
999   if (draw->adaptive_sync && !draw->adaptive_sync_active) {
1000      set_adaptive_sync_property(draw->conn, draw->drawable, true);
1001      draw->adaptive_sync_active = true;
1002   }
1003
1004   if (draw->is_different_gpu && back) {
1005      /* Update the linear buffer before presenting the pixmap */
1006      (void) loader_dri3_blit_image(draw,
1007                                    back->linear_buffer,
1008                                    back->image,
1009                                    0, 0, back->width, back->height,
1010                                    0, 0, __BLIT_FLAG_FLUSH);
1011   }
1012
1013   /* If we need to preload the new back buffer, remember the source.
1014    * The force_copy parameter is used by EGL to attempt to preserve
1015    * the back buffer across a call to this function.
1016    */
1017   if (draw->swap_method != __DRI_ATTRIB_SWAP_UNDEFINED || force_copy)
1018      draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back);
1019
1020   /* Exchange the back and fake front. Even though the server knows about these
1021    * buffers, it has no notion of back and fake front.
1022    */
1023   if (back && draw->have_fake_front) {
1024      struct loader_dri3_buffer *tmp;
1025
1026      tmp = dri3_fake_front_buffer(draw);
1027      draw->buffers[LOADER_DRI3_FRONT_ID] = back;
1028      draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)] = tmp;
1029
1030      if (draw->swap_method == __DRI_ATTRIB_SWAP_COPY  || force_copy)
1031         draw->cur_blit_source = LOADER_DRI3_FRONT_ID;
1032   }
1033
1034   dri3_flush_present_events(draw);
1035
1036   if (back && !draw->is_pixmap) {
1037      dri3_fence_reset(draw->conn, back);
1038
1039      /* Compute when we want the frame shown by taking the last known
1040       * successful MSC and adding in a swap interval for each outstanding swap
1041       * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
1042       * semantic"
1043       */
1044      ++draw->send_sbc;
1045      if (target_msc == 0 && divisor == 0 && remainder == 0)
1046         target_msc = draw->msc + abs(draw->swap_interval) *
1047                      (draw->send_sbc - draw->recv_sbc);
1048      else if (divisor == 0 && remainder > 0) {
1049         /* From the GLX_OML_sync_control spec:
1050          *     "If <divisor> = 0, the swap will occur when MSC becomes
1051          *      greater than or equal to <target_msc>."
1052          *
1053          * Note that there's no mention of the remainder.  The Present
1054          * extension throws BadValue for remainder != 0 with divisor == 0, so
1055          * just drop the passed in value.
1056          */
1057         remainder = 0;
1058      }
1059
1060      /* From the GLX_EXT_swap_control spec
1061       * and the EGL 1.4 spec (page 53):
1062       *
1063       *     "If <interval> is set to a value of 0, buffer swaps are not
1064       *      synchronized to a video frame."
1065       *
1066       * From GLX_EXT_swap_control_tear:
1067       *
1068       *     "If <interval> is negative, the minimum number of video frames
1069       *      between buffer swaps is the absolute value of <interval>. In this
1070       *      case, if abs(<interval>) video frames have already passed from
1071       *      the previous swap when the swap is ready to be performed, the
1072       *      swap will occur without synchronization to a video frame."
1073       *
1074       * Implementation note: It is possible to enable triple buffering
1075       * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
1076       * the default.
1077       */
1078      if (draw->swap_interval <= 0)
1079          options |= XCB_PRESENT_OPTION_ASYNC;
1080
1081      /* If we need to populate the new back, but need to reuse the back
1082       * buffer slot due to lack of local blit capabilities, make sure
1083       * the server doesn't flip and we deadlock.
1084       */
1085      if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1)
1086         options |= XCB_PRESENT_OPTION_COPY;
1087#ifdef HAVE_DRI3_MODIFIERS
1088      if (draw->multiplanes_available)
1089         options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1090#endif
1091      back->busy = 1;
1092      back->last_swap = draw->send_sbc;
1093
1094      if (!draw->region) {
1095         draw->region = xcb_generate_id(draw->conn);
1096         xcb_xfixes_create_region(draw->conn, draw->region, 0, NULL);
1097      }
1098
1099      xcb_xfixes_region_t region = 0;
1100      xcb_rectangle_t xcb_rects[64];
1101
1102      if (n_rects > 0 && n_rects <= ARRAY_SIZE(xcb_rects)) {
1103         for (int i = 0; i < n_rects; i++) {
1104            const int *rect = &rects[i * 4];
1105            xcb_rects[i].x = rect[0];
1106            xcb_rects[i].y = draw->height - rect[1] - rect[3];
1107            xcb_rects[i].width = rect[2];
1108            xcb_rects[i].height = rect[3];
1109         }
1110
1111         region = draw->region;
1112         xcb_xfixes_set_region(draw->conn, region, n_rects, xcb_rects);
1113      }
1114
1115      xcb_present_pixmap(draw->conn,
1116                         draw->drawable,
1117                         back->pixmap,
1118                         (uint32_t) draw->send_sbc,
1119                         0,                                    /* valid */
1120                         region,                               /* update */
1121                         0,                                    /* x_off */
1122                         0,                                    /* y_off */
1123                         None,                                 /* target_crtc */
1124                         None,
1125                         back->sync_fence,
1126                         options,
1127                         target_msc,
1128                         divisor,
1129                         remainder, 0, NULL);
1130      ret = (int64_t) draw->send_sbc;
1131
1132      /* Schedule a server-side back-preserving blit if necessary.
1133       * This happens iff all conditions below are satisfied:
1134       * a) We have a fake front,
1135       * b) We need to preserve the back buffer,
1136       * c) We don't have local blit capabilities.
1137       */
1138      if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1 &&
1139          draw->cur_blit_source != LOADER_DRI3_BACK_ID(draw->cur_back)) {
1140         struct loader_dri3_buffer *new_back = dri3_back_buffer(draw);
1141         struct loader_dri3_buffer *src = draw->buffers[draw->cur_blit_source];
1142
1143         dri3_fence_reset(draw->conn, new_back);
1144         dri3_copy_area(draw->conn, src->pixmap,
1145                        new_back->pixmap,
1146                        dri3_drawable_gc(draw),
1147                        0, 0, 0, 0, draw->width, draw->height);
1148         dri3_fence_trigger(draw->conn, new_back);
1149         new_back->last_swap = src->last_swap;
1150      }
1151
1152      xcb_flush(draw->conn);
1153      if (draw->stamp)
1154         ++(*draw->stamp);
1155   }
1156   mtx_unlock(&draw->mtx);
1157
1158   draw->ext->flush->invalidate(draw->dri_drawable);
1159
1160   return ret;
1161}
1162
1163int
1164loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
1165{
1166   struct loader_dri3_buffer *back = dri3_find_back_alloc(draw);
1167   int ret;
1168
1169   mtx_lock(&draw->mtx);
1170   ret = (!back || back->last_swap == 0) ? 0 :
1171      draw->send_sbc - back->last_swap + 1;
1172   mtx_unlock(&draw->mtx);
1173
1174   return ret;
1175}
1176
1177/** loader_dri3_open
1178 *
1179 * Wrapper around xcb_dri3_open
1180 */
1181int
1182loader_dri3_open(xcb_connection_t *conn,
1183                 xcb_window_t root,
1184                 uint32_t provider)
1185{
1186   xcb_dri3_open_cookie_t       cookie;
1187   xcb_dri3_open_reply_t        *reply;
1188   xcb_xfixes_query_version_cookie_t fixes_cookie;
1189   xcb_xfixes_query_version_reply_t *fixes_reply;
1190   int                          fd;
1191
1192   cookie = xcb_dri3_open(conn,
1193                          root,
1194                          provider);
1195
1196   reply = xcb_dri3_open_reply(conn, cookie, NULL);
1197   if (!reply)
1198      return -1;
1199
1200   if (reply->nfd != 1) {
1201      free(reply);
1202      return -1;
1203   }
1204
1205   fd = xcb_dri3_open_reply_fds(conn, reply)[0];
1206   free(reply);
1207   fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
1208
1209   /* let the server know our xfixes level */
1210   fixes_cookie = xcb_xfixes_query_version(conn,
1211                                           XCB_XFIXES_MAJOR_VERSION,
1212                                           XCB_XFIXES_MINOR_VERSION);
1213   fixes_reply = xcb_xfixes_query_version_reply(conn, fixes_cookie, NULL);
1214   free(fixes_reply);
1215
1216   return fd;
1217}
1218
1219static uint32_t
1220dri3_cpp_for_format(uint32_t format) {
1221   switch (format) {
1222   case  __DRI_IMAGE_FORMAT_R8:
1223      return 1;
1224   case  __DRI_IMAGE_FORMAT_RGB565:
1225   case  __DRI_IMAGE_FORMAT_GR88:
1226      return 2;
1227   case  __DRI_IMAGE_FORMAT_XRGB8888:
1228   case  __DRI_IMAGE_FORMAT_ARGB8888:
1229   case  __DRI_IMAGE_FORMAT_ABGR8888:
1230   case  __DRI_IMAGE_FORMAT_XBGR8888:
1231   case  __DRI_IMAGE_FORMAT_XRGB2101010:
1232   case  __DRI_IMAGE_FORMAT_ARGB2101010:
1233   case  __DRI_IMAGE_FORMAT_XBGR2101010:
1234   case  __DRI_IMAGE_FORMAT_ABGR2101010:
1235   case  __DRI_IMAGE_FORMAT_SARGB8:
1236   case  __DRI_IMAGE_FORMAT_SABGR8:
1237   case  __DRI_IMAGE_FORMAT_SXRGB8:
1238      return 4;
1239   case __DRI_IMAGE_FORMAT_XBGR16161616F:
1240   case __DRI_IMAGE_FORMAT_ABGR16161616F:
1241      return 8;
1242   case  __DRI_IMAGE_FORMAT_NONE:
1243   default:
1244      return 0;
1245   }
1246}
1247
1248/* Map format of render buffer to corresponding format for the linear_buffer
1249 * used for sharing with the display gpu of a Prime setup (== is_different_gpu).
1250 * Usually linear_format == format, except for depth >= 30 formats, where
1251 * different gpu vendors have different preferences wrt. color channel ordering.
1252 */
1253static uint32_t
1254dri3_linear_format_for_format(struct loader_dri3_drawable *draw, uint32_t format)
1255{
1256   switch (format) {
1257      case  __DRI_IMAGE_FORMAT_XRGB2101010:
1258      case  __DRI_IMAGE_FORMAT_XBGR2101010:
1259         /* Different preferred formats for different hw */
1260         if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1261            return __DRI_IMAGE_FORMAT_XBGR2101010;
1262         else
1263            return __DRI_IMAGE_FORMAT_XRGB2101010;
1264
1265      case  __DRI_IMAGE_FORMAT_ARGB2101010:
1266      case  __DRI_IMAGE_FORMAT_ABGR2101010:
1267         /* Different preferred formats for different hw */
1268         if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1269            return __DRI_IMAGE_FORMAT_ABGR2101010;
1270         else
1271            return __DRI_IMAGE_FORMAT_ARGB2101010;
1272
1273      default:
1274         return format;
1275   }
1276}
1277
1278/* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while
1279 * the createImageFromFds call takes DRM_FORMAT codes. To avoid
1280 * complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and
1281 * translate to DRM_FORMAT codes in the call to createImageFromFds
1282 */
1283static int
1284image_format_to_fourcc(int format)
1285{
1286
1287   /* Convert from __DRI_IMAGE_FORMAT to DRM_FORMAT (sigh) */
1288   switch (format) {
1289   case __DRI_IMAGE_FORMAT_SARGB8: return __DRI_IMAGE_FOURCC_SARGB8888;
1290   case __DRI_IMAGE_FORMAT_SABGR8: return __DRI_IMAGE_FOURCC_SABGR8888;
1291   case __DRI_IMAGE_FORMAT_SXRGB8: return __DRI_IMAGE_FOURCC_SXRGB8888;
1292   case __DRI_IMAGE_FORMAT_RGB565: return DRM_FORMAT_RGB565;
1293   case __DRI_IMAGE_FORMAT_XRGB8888: return DRM_FORMAT_XRGB8888;
1294   case __DRI_IMAGE_FORMAT_ARGB8888: return DRM_FORMAT_ARGB8888;
1295   case __DRI_IMAGE_FORMAT_ABGR8888: return DRM_FORMAT_ABGR8888;
1296   case __DRI_IMAGE_FORMAT_XBGR8888: return DRM_FORMAT_XBGR8888;
1297   case __DRI_IMAGE_FORMAT_XRGB2101010: return DRM_FORMAT_XRGB2101010;
1298   case __DRI_IMAGE_FORMAT_ARGB2101010: return DRM_FORMAT_ARGB2101010;
1299   case __DRI_IMAGE_FORMAT_XBGR2101010: return DRM_FORMAT_XBGR2101010;
1300   case __DRI_IMAGE_FORMAT_ABGR2101010: return DRM_FORMAT_ABGR2101010;
1301   case __DRI_IMAGE_FORMAT_XBGR16161616F: return DRM_FORMAT_XBGR16161616F;
1302   case __DRI_IMAGE_FORMAT_ABGR16161616F: return DRM_FORMAT_ABGR16161616F;
1303   }
1304   return 0;
1305}
1306
1307#ifdef HAVE_DRI3_MODIFIERS
1308static bool
1309has_supported_modifier(struct loader_dri3_drawable *draw, unsigned int format,
1310                       uint64_t *modifiers, uint32_t count)
1311{
1312   uint64_t *supported_modifiers;
1313   int32_t supported_modifiers_count;
1314   bool found = false;
1315   int i, j;
1316
1317   if (!draw->ext->image->queryDmaBufModifiers(draw->dri_screen,
1318                                               format, 0, NULL, NULL,
1319                                               &supported_modifiers_count) ||
1320       supported_modifiers_count == 0)
1321      return false;
1322
1323   supported_modifiers = malloc(supported_modifiers_count * sizeof(uint64_t));
1324   if (!supported_modifiers)
1325      return false;
1326
1327   draw->ext->image->queryDmaBufModifiers(draw->dri_screen, format,
1328                                          supported_modifiers_count,
1329                                          supported_modifiers, NULL,
1330                                          &supported_modifiers_count);
1331
1332   for (i = 0; !found && i < supported_modifiers_count; i++) {
1333      for (j = 0; !found && j < count; j++) {
1334         if (supported_modifiers[i] == modifiers[j])
1335            found = true;
1336      }
1337   }
1338
1339   free(supported_modifiers);
1340   return found;
1341}
1342#endif
1343
1344/** loader_dri3_alloc_render_buffer
1345 *
1346 * Use the driver createImage function to construct a __DRIimage, then
1347 * get a file descriptor for that and create an X pixmap from that
1348 *
1349 * Allocate an xshmfence for synchronization
1350 */
1351static struct loader_dri3_buffer *
1352dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format,
1353                         int width, int height, int depth)
1354{
1355   struct loader_dri3_buffer *buffer;
1356   __DRIimage *pixmap_buffer = NULL, *linear_buffer_display_gpu = NULL;
1357   xcb_pixmap_t pixmap;
1358   xcb_sync_fence_t sync_fence;
1359   struct xshmfence *shm_fence;
1360   int buffer_fds[4], fence_fd;
1361   int num_planes = 0;
1362   uint64_t *modifiers = NULL;
1363   uint32_t count = 0;
1364   int i, mod;
1365   int ret;
1366
1367   /* Create an xshmfence object and
1368    * prepare to send that to the X server
1369    */
1370
1371   fence_fd = xshmfence_alloc_shm();
1372   if (fence_fd < 0)
1373      return NULL;
1374
1375   shm_fence = xshmfence_map_shm(fence_fd);
1376   if (shm_fence == NULL)
1377      goto no_shm_fence;
1378
1379   /* Allocate the image from the driver
1380    */
1381   buffer = calloc(1, sizeof *buffer);
1382   if (!buffer)
1383      goto no_buffer;
1384
1385   buffer->cpp = dri3_cpp_for_format(format);
1386   if (!buffer->cpp)
1387      goto no_image;
1388
1389   if (!draw->is_different_gpu) {
1390#ifdef HAVE_DRI3_MODIFIERS
1391      if (draw->multiplanes_available &&
1392          draw->ext->image->base.version >= 15 &&
1393          draw->ext->image->queryDmaBufModifiers &&
1394          draw->ext->image->createImageWithModifiers) {
1395         xcb_dri3_get_supported_modifiers_cookie_t mod_cookie;
1396         xcb_dri3_get_supported_modifiers_reply_t *mod_reply;
1397         xcb_generic_error_t *error = NULL;
1398
1399         mod_cookie = xcb_dri3_get_supported_modifiers(draw->conn,
1400                                                       draw->window,
1401                                                       depth, buffer->cpp * 8);
1402         mod_reply = xcb_dri3_get_supported_modifiers_reply(draw->conn,
1403                                                            mod_cookie,
1404                                                            &error);
1405         if (!mod_reply)
1406            goto no_image;
1407
1408         if (mod_reply->num_window_modifiers) {
1409            count = mod_reply->num_window_modifiers;
1410            modifiers = malloc(count * sizeof(uint64_t));
1411            if (!modifiers) {
1412               free(mod_reply);
1413               goto no_image;
1414            }
1415
1416            memcpy(modifiers,
1417                   xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1418                   count * sizeof(uint64_t));
1419
1420            if (!has_supported_modifier(draw, image_format_to_fourcc(format),
1421                                        modifiers, count)) {
1422               free(modifiers);
1423               count = 0;
1424               modifiers = NULL;
1425            }
1426         }
1427
1428         if (mod_reply->num_screen_modifiers && modifiers == NULL) {
1429            count = mod_reply->num_screen_modifiers;
1430            modifiers = malloc(count * sizeof(uint64_t));
1431            if (!modifiers) {
1432               free(modifiers);
1433               free(mod_reply);
1434               goto no_image;
1435            }
1436
1437            memcpy(modifiers,
1438                   xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1439                   count * sizeof(uint64_t));
1440         }
1441
1442         free(mod_reply);
1443      }
1444#endif
1445      buffer->image = loader_dri_create_image(draw->dri_screen, draw->ext->image,
1446                                              width, height, format,
1447                                              __DRI_IMAGE_USE_SHARE |
1448                                              __DRI_IMAGE_USE_SCANOUT |
1449                                              __DRI_IMAGE_USE_BACKBUFFER |
1450                                              (draw->is_protected_content ?
1451                                               __DRI_IMAGE_USE_PROTECTED : 0),
1452                                              modifiers, count, buffer);
1453      free(modifiers);
1454
1455      pixmap_buffer = buffer->image;
1456
1457      if (!buffer->image)
1458         goto no_image;
1459   } else {
1460      buffer->image = draw->ext->image->createImage(draw->dri_screen,
1461                                                    width, height,
1462                                                    format,
1463                                                    0,
1464                                                    buffer);
1465
1466      if (!buffer->image)
1467         goto no_image;
1468
1469      /* if driver name is same only then dri_screen_display_gpu is set.
1470       * This check is needed because for simplicity render gpu image extension
1471       * is also used for display gpu.
1472       */
1473      if (draw->dri_screen_display_gpu) {
1474         linear_buffer_display_gpu =
1475           draw->ext->image->createImage(draw->dri_screen_display_gpu,
1476                                         width, height,
1477                                         dri3_linear_format_for_format(draw, format),
1478                                         __DRI_IMAGE_USE_SHARE |
1479                                         __DRI_IMAGE_USE_LINEAR |
1480                                         __DRI_IMAGE_USE_BACKBUFFER |
1481                                         __DRI_IMAGE_USE_SCANOUT,
1482                                         buffer);
1483         pixmap_buffer = linear_buffer_display_gpu;
1484      }
1485
1486      if (!pixmap_buffer) {
1487         buffer->linear_buffer =
1488           draw->ext->image->createImage(draw->dri_screen,
1489                                         width, height,
1490                                         dri3_linear_format_for_format(draw, format),
1491                                         __DRI_IMAGE_USE_SHARE |
1492                                         __DRI_IMAGE_USE_LINEAR |
1493                                         __DRI_IMAGE_USE_BACKBUFFER |
1494                                         __DRI_IMAGE_USE_SCANOUT,
1495                                         buffer);
1496
1497         pixmap_buffer = buffer->linear_buffer;
1498         if (!buffer->linear_buffer) {
1499            goto no_linear_buffer;
1500         }
1501      }
1502   }
1503
1504   /* X want some information about the planes, so ask the image for it
1505    */
1506   if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1507                                     &num_planes))
1508      num_planes = 1;
1509
1510   for (i = 0; i < num_planes; i++) {
1511      __DRIimage *image = draw->ext->image->fromPlanar(pixmap_buffer, i, NULL);
1512
1513      if (!image) {
1514         assert(i == 0);
1515         image = pixmap_buffer;
1516      }
1517
1518      buffer_fds[i] = -1;
1519
1520      ret = draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_FD,
1521                                         &buffer_fds[i]);
1522      ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE,
1523                                          &buffer->strides[i]);
1524      ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET,
1525                                          &buffer->offsets[i]);
1526      if (image != pixmap_buffer)
1527         draw->ext->image->destroyImage(image);
1528
1529      if (!ret)
1530         goto no_buffer_attrib;
1531   }
1532
1533   ret = draw->ext->image->queryImage(pixmap_buffer,
1534                                     __DRI_IMAGE_ATTRIB_MODIFIER_UPPER, &mod);
1535   buffer->modifier = (uint64_t) mod << 32;
1536   ret &= draw->ext->image->queryImage(pixmap_buffer,
1537                                       __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod);
1538   buffer->modifier |= (uint64_t)(mod & 0xffffffff);
1539
1540   if (!ret)
1541      buffer->modifier = DRM_FORMAT_MOD_INVALID;
1542
1543   if (draw->is_different_gpu && draw->dri_screen_display_gpu &&
1544       linear_buffer_display_gpu) {
1545      /* The linear buffer was created in the display GPU's vram, so we
1546       * need to make it visible to render GPU
1547       */
1548      buffer->linear_buffer =
1549         draw->ext->image->createImageFromFds(draw->dri_screen,
1550                                              width,
1551                                              height,
1552                                              image_format_to_fourcc(format),
1553                                              &buffer_fds[0], num_planes,
1554                                              &buffer->strides[0],
1555                                              &buffer->offsets[0],
1556                                              buffer);
1557      if (!buffer->linear_buffer)
1558         goto no_buffer_attrib;
1559
1560      draw->ext->image->destroyImage(linear_buffer_display_gpu);
1561   }
1562
1563   pixmap = xcb_generate_id(draw->conn);
1564#ifdef HAVE_DRI3_MODIFIERS
1565   if (draw->multiplanes_available &&
1566       buffer->modifier != DRM_FORMAT_MOD_INVALID) {
1567      xcb_dri3_pixmap_from_buffers(draw->conn,
1568                                   pixmap,
1569                                   draw->window,
1570                                   num_planes,
1571                                   width, height,
1572                                   buffer->strides[0], buffer->offsets[0],
1573                                   buffer->strides[1], buffer->offsets[1],
1574                                   buffer->strides[2], buffer->offsets[2],
1575                                   buffer->strides[3], buffer->offsets[3],
1576                                   depth, buffer->cpp * 8,
1577                                   buffer->modifier,
1578                                   buffer_fds);
1579   } else
1580#endif
1581   {
1582      xcb_dri3_pixmap_from_buffer(draw->conn,
1583                                  pixmap,
1584                                  draw->drawable,
1585                                  buffer->size,
1586                                  width, height, buffer->strides[0],
1587                                  depth, buffer->cpp * 8,
1588                                  buffer_fds[0]);
1589   }
1590
1591   xcb_dri3_fence_from_fd(draw->conn,
1592                          pixmap,
1593                          (sync_fence = xcb_generate_id(draw->conn)),
1594                          false,
1595                          fence_fd);
1596
1597   buffer->pixmap = pixmap;
1598   buffer->own_pixmap = true;
1599   buffer->sync_fence = sync_fence;
1600   buffer->shm_fence = shm_fence;
1601   buffer->width = width;
1602   buffer->height = height;
1603
1604   /* Mark the buffer as idle
1605    */
1606   dri3_fence_set(buffer);
1607
1608   return buffer;
1609
1610no_buffer_attrib:
1611   do {
1612      if (buffer_fds[i] != -1)
1613         close(buffer_fds[i]);
1614   } while (--i >= 0);
1615   draw->ext->image->destroyImage(pixmap_buffer);
1616no_linear_buffer:
1617   if (draw->is_different_gpu)
1618      draw->ext->image->destroyImage(buffer->image);
1619no_image:
1620   free(buffer);
1621no_buffer:
1622   xshmfence_unmap_shm(shm_fence);
1623no_shm_fence:
1624   close(fence_fd);
1625   return NULL;
1626}
1627
1628/** loader_dri3_update_drawable
1629 *
1630 * Called the first time we use the drawable and then
1631 * after we receive present configure notify events to
1632 * track the geometry of the drawable
1633 */
1634static int
1635dri3_update_drawable(struct loader_dri3_drawable *draw)
1636{
1637   mtx_lock(&draw->mtx);
1638   if (draw->first_init) {
1639      xcb_get_geometry_cookie_t                 geom_cookie;
1640      xcb_get_geometry_reply_t                  *geom_reply;
1641      xcb_void_cookie_t                         cookie;
1642      xcb_generic_error_t                       *error;
1643      xcb_present_query_capabilities_cookie_t   present_capabilities_cookie;
1644      xcb_present_query_capabilities_reply_t    *present_capabilities_reply;
1645      xcb_window_t                               root_win;
1646
1647      draw->first_init = false;
1648
1649      /* Try to select for input on the window.
1650       *
1651       * If the drawable is a window, this will get our events
1652       * delivered.
1653       *
1654       * Otherwise, we'll get a BadWindow error back from this request which
1655       * will let us know that the drawable is a pixmap instead.
1656       */
1657
1658      draw->eid = xcb_generate_id(draw->conn);
1659      cookie =
1660         xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
1661                                          XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1662                                          XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1663                                          XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1664
1665      present_capabilities_cookie =
1666         xcb_present_query_capabilities(draw->conn, draw->drawable);
1667
1668      /* Create an XCB event queue to hold present events outside of the usual
1669       * application event queue
1670       */
1671      draw->special_event = xcb_register_for_special_xge(draw->conn,
1672                                                         &xcb_present_id,
1673                                                         draw->eid,
1674                                                         draw->stamp);
1675      geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1676
1677      geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1678
1679      if (!geom_reply) {
1680         mtx_unlock(&draw->mtx);
1681         return false;
1682      }
1683      draw->width = geom_reply->width;
1684      draw->height = geom_reply->height;
1685      draw->depth = geom_reply->depth;
1686      draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1687      root_win = geom_reply->root;
1688
1689      free(geom_reply);
1690
1691      draw->is_pixmap = false;
1692
1693      /* Check to see if our select input call failed. If it failed with a
1694       * BadWindow error, then assume the drawable is a pixmap. Destroy the
1695       * special event queue created above and mark the drawable as a pixmap
1696       */
1697
1698      error = xcb_request_check(draw->conn, cookie);
1699
1700      present_capabilities_reply =
1701          xcb_present_query_capabilities_reply(draw->conn,
1702                                               present_capabilities_cookie,
1703                                               NULL);
1704
1705      if (present_capabilities_reply) {
1706         draw->present_capabilities = present_capabilities_reply->capabilities;
1707         free(present_capabilities_reply);
1708      } else
1709         draw->present_capabilities = 0;
1710
1711      if (error) {
1712         if (error->error_code != BadWindow) {
1713            free(error);
1714            mtx_unlock(&draw->mtx);
1715            return false;
1716         }
1717         free(error);
1718         draw->is_pixmap = true;
1719         xcb_unregister_for_special_event(draw->conn, draw->special_event);
1720         draw->special_event = NULL;
1721      }
1722
1723      if (draw->is_pixmap)
1724         draw->window = root_win;
1725      else
1726         draw->window = draw->drawable;
1727   }
1728   dri3_flush_present_events(draw);
1729   mtx_unlock(&draw->mtx);
1730   return true;
1731}
1732
1733__DRIimage *
1734loader_dri3_create_image(xcb_connection_t *c,
1735                         xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1736                         unsigned int format,
1737                         __DRIscreen *dri_screen,
1738                         const __DRIimageExtension *image,
1739                         void *loaderPrivate)
1740{
1741   int                                  *fds;
1742   __DRIimage                           *image_planar, *ret;
1743   int                                  stride, offset;
1744
1745   /* Get an FD for the pixmap object
1746    */
1747   fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1748
1749   stride = bp_reply->stride;
1750   offset = 0;
1751
1752   /* createImageFromFds creates a wrapper __DRIimage structure which
1753    * can deal with multiple planes for things like Yuv images. So, once
1754    * we've gotten the planar wrapper, pull the single plane out of it and
1755    * discard the wrapper.
1756    */
1757   image_planar = image->createImageFromFds(dri_screen,
1758                                            bp_reply->width,
1759                                            bp_reply->height,
1760                                            image_format_to_fourcc(format),
1761                                            fds, 1,
1762                                            &stride, &offset, loaderPrivate);
1763   close(fds[0]);
1764   if (!image_planar)
1765      return NULL;
1766
1767   ret = image->fromPlanar(image_planar, 0, loaderPrivate);
1768
1769   if (!ret)
1770      ret = image_planar;
1771   else
1772      image->destroyImage(image_planar);
1773
1774   return ret;
1775}
1776
1777#ifdef HAVE_DRI3_MODIFIERS
1778__DRIimage *
1779loader_dri3_create_image_from_buffers(xcb_connection_t *c,
1780                                      xcb_dri3_buffers_from_pixmap_reply_t *bp_reply,
1781                                      unsigned int format,
1782                                      __DRIscreen *dri_screen,
1783                                      const __DRIimageExtension *image,
1784                                      void *loaderPrivate)
1785{
1786   __DRIimage                           *ret;
1787   int                                  *fds;
1788   uint32_t                             *strides_in, *offsets_in;
1789   int                                   strides[4], offsets[4];
1790   unsigned                              error;
1791   int                                   i;
1792
1793   if (bp_reply->nfd > 4)
1794      return NULL;
1795
1796   fds = xcb_dri3_buffers_from_pixmap_reply_fds(c, bp_reply);
1797   strides_in = xcb_dri3_buffers_from_pixmap_strides(bp_reply);
1798   offsets_in = xcb_dri3_buffers_from_pixmap_offsets(bp_reply);
1799   for (i = 0; i < bp_reply->nfd; i++) {
1800      strides[i] = strides_in[i];
1801      offsets[i] = offsets_in[i];
1802   }
1803
1804   ret = image->createImageFromDmaBufs2(dri_screen,
1805                                        bp_reply->width,
1806                                        bp_reply->height,
1807                                        image_format_to_fourcc(format),
1808                                        bp_reply->modifier,
1809                                        fds, bp_reply->nfd,
1810                                        strides, offsets,
1811                                        0, 0, 0, 0, /* UNDEFINED */
1812                                        &error, loaderPrivate);
1813
1814   for (i = 0; i < bp_reply->nfd; i++)
1815      close(fds[i]);
1816
1817   return ret;
1818}
1819#endif
1820
1821/** dri3_get_pixmap_buffer
1822 *
1823 * Get the DRM object for a pixmap from the X server and
1824 * wrap that with a __DRIimage structure using createImageFromFds
1825 */
1826static struct loader_dri3_buffer *
1827dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format,
1828                       enum loader_dri3_buffer_type buffer_type,
1829                       struct loader_dri3_drawable *draw)
1830{
1831   int                                  buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1832   struct loader_dri3_buffer            *buffer = draw->buffers[buf_id];
1833   xcb_drawable_t                       pixmap;
1834   xcb_sync_fence_t                     sync_fence;
1835   struct xshmfence                     *shm_fence;
1836   int                                  width;
1837   int                                  height;
1838   int                                  fence_fd;
1839   __DRIscreen                          *cur_screen;
1840
1841   if (buffer)
1842      return buffer;
1843
1844   pixmap = draw->drawable;
1845
1846   buffer = calloc(1, sizeof *buffer);
1847   if (!buffer)
1848      goto no_buffer;
1849
1850   fence_fd = xshmfence_alloc_shm();
1851   if (fence_fd < 0)
1852      goto no_fence;
1853   shm_fence = xshmfence_map_shm(fence_fd);
1854   if (shm_fence == NULL) {
1855      close (fence_fd);
1856      goto no_fence;
1857   }
1858
1859   /* Get the currently-bound screen or revert to using the drawable's screen if
1860    * no contexts are currently bound. The latter case is at least necessary for
1861    * obs-studio, when using Window Capture (Xcomposite) as a Source.
1862    */
1863   cur_screen = draw->vtable->get_dri_screen();
1864   if (!cur_screen) {
1865       cur_screen = draw->dri_screen;
1866   }
1867
1868   xcb_dri3_fence_from_fd(draw->conn,
1869                          pixmap,
1870                          (sync_fence = xcb_generate_id(draw->conn)),
1871                          false,
1872                          fence_fd);
1873#ifdef HAVE_DRI3_MODIFIERS
1874   if (draw->multiplanes_available &&
1875       draw->ext->image->base.version >= 15 &&
1876       draw->ext->image->createImageFromDmaBufs2) {
1877      xcb_dri3_buffers_from_pixmap_cookie_t bps_cookie;
1878      xcb_dri3_buffers_from_pixmap_reply_t *bps_reply;
1879
1880      bps_cookie = xcb_dri3_buffers_from_pixmap(draw->conn, pixmap);
1881      bps_reply = xcb_dri3_buffers_from_pixmap_reply(draw->conn, bps_cookie,
1882                                                     NULL);
1883      if (!bps_reply)
1884         goto no_image;
1885      buffer->image =
1886         loader_dri3_create_image_from_buffers(draw->conn, bps_reply, format,
1887                                               cur_screen, draw->ext->image,
1888                                               buffer);
1889      width = bps_reply->width;
1890      height = bps_reply->height;
1891      free(bps_reply);
1892   } else
1893#endif
1894   {
1895      xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1896      xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1897
1898      bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap);
1899      bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn, bp_cookie, NULL);
1900      if (!bp_reply)
1901         goto no_image;
1902
1903      buffer->image = loader_dri3_create_image(draw->conn, bp_reply, format,
1904                                               cur_screen, draw->ext->image,
1905                                               buffer);
1906      width = bp_reply->width;
1907      height = bp_reply->height;
1908      free(bp_reply);
1909   }
1910
1911   if (!buffer->image)
1912      goto no_image;
1913
1914   buffer->pixmap = pixmap;
1915   buffer->own_pixmap = false;
1916   buffer->width = width;
1917   buffer->height = height;
1918   buffer->shm_fence = shm_fence;
1919   buffer->sync_fence = sync_fence;
1920
1921   draw->buffers[buf_id] = buffer;
1922
1923   return buffer;
1924
1925no_image:
1926   xcb_sync_destroy_fence(draw->conn, sync_fence);
1927   xshmfence_unmap_shm(shm_fence);
1928no_fence:
1929   free(buffer);
1930no_buffer:
1931   return NULL;
1932}
1933
1934/** dri3_get_buffer
1935 *
1936 * Find a front or back buffer, allocating new ones as necessary
1937 */
1938static struct loader_dri3_buffer *
1939dri3_get_buffer(__DRIdrawable *driDrawable,
1940                unsigned int format,
1941                enum loader_dri3_buffer_type buffer_type,
1942                struct loader_dri3_drawable *draw)
1943{
1944   struct loader_dri3_buffer *buffer;
1945   bool fence_await = buffer_type == loader_dri3_buffer_back;
1946   int buf_id;
1947
1948   if (buffer_type == loader_dri3_buffer_back) {
1949      draw->back_format = format;
1950
1951      buf_id = dri3_find_back(draw, !draw->prefer_back_buffer_reuse);
1952
1953      if (buf_id < 0)
1954         return NULL;
1955   } else {
1956      buf_id = LOADER_DRI3_FRONT_ID;
1957   }
1958
1959   buffer = draw->buffers[buf_id];
1960
1961   /* Allocate a new buffer if there isn't an old one, if that
1962    * old one is the wrong size, or if it's suboptimal
1963    */
1964   if (!buffer || buffer->width != draw->width ||
1965       buffer->height != draw->height ||
1966       buffer->reallocate) {
1967      struct loader_dri3_buffer *new_buffer;
1968
1969      /* Allocate the new buffers
1970       */
1971      new_buffer = dri3_alloc_render_buffer(draw,
1972                                                   format,
1973                                                   draw->width,
1974                                                   draw->height,
1975                                                   draw->depth);
1976      if (!new_buffer)
1977         return NULL;
1978
1979      /* When resizing, copy the contents of the old buffer, waiting for that
1980       * copy to complete using our fences before proceeding
1981       */
1982      if ((buffer_type == loader_dri3_buffer_back ||
1983           (buffer_type == loader_dri3_buffer_front && draw->have_fake_front))
1984          && buffer) {
1985
1986         /* Fill the new buffer with data from an old buffer */
1987         if (!loader_dri3_blit_image(draw,
1988                                     new_buffer->image,
1989                                     buffer->image,
1990                                     0, 0,
1991                                     MIN2(buffer->width, new_buffer->width),
1992                                     MIN2(buffer->height, new_buffer->height),
1993                                     0, 0, 0) &&
1994             !buffer->linear_buffer) {
1995            dri3_fence_reset(draw->conn, new_buffer);
1996            dri3_copy_area(draw->conn,
1997                           buffer->pixmap,
1998                           new_buffer->pixmap,
1999                           dri3_drawable_gc(draw),
2000                           0, 0, 0, 0,
2001                           draw->width, draw->height);
2002            dri3_fence_trigger(draw->conn, new_buffer);
2003            fence_await = true;
2004         }
2005         dri3_free_render_buffer(draw, buffer);
2006      } else if (buffer_type == loader_dri3_buffer_front) {
2007         /* Fill the new fake front with data from a real front */
2008         loader_dri3_swapbuffer_barrier(draw);
2009         dri3_fence_reset(draw->conn, new_buffer);
2010         dri3_copy_area(draw->conn,
2011                        draw->drawable,
2012                        new_buffer->pixmap,
2013                        dri3_drawable_gc(draw),
2014                        0, 0, 0, 0,
2015                        draw->width, draw->height);
2016         dri3_fence_trigger(draw->conn, new_buffer);
2017
2018         if (new_buffer->linear_buffer) {
2019            dri3_fence_await(draw->conn, draw, new_buffer);
2020            (void) loader_dri3_blit_image(draw,
2021                                          new_buffer->image,
2022                                          new_buffer->linear_buffer,
2023                                          0, 0, draw->width, draw->height,
2024                                          0, 0, 0);
2025         } else
2026            fence_await = true;
2027      }
2028      buffer = new_buffer;
2029      draw->buffers[buf_id] = buffer;
2030   }
2031
2032   if (fence_await)
2033      dri3_fence_await(draw->conn, draw, buffer);
2034
2035   /*
2036    * Do we need to preserve the content of a previous buffer?
2037    *
2038    * Note that this blit is needed only to avoid a wait for a buffer that
2039    * is currently in the flip chain or being scanned out from. That's really
2040    * a tradeoff. If we're ok with the wait we can reduce the number of back
2041    * buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY,
2042    * but in the latter case we must disallow page-flipping.
2043    */
2044   if (buffer_type == loader_dri3_buffer_back &&
2045       draw->cur_blit_source != -1 &&
2046       draw->buffers[draw->cur_blit_source] &&
2047       buffer != draw->buffers[draw->cur_blit_source]) {
2048
2049      struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2050
2051      /* Avoid flushing here. Will propably do good for tiling hardware. */
2052      (void) loader_dri3_blit_image(draw,
2053                                    buffer->image,
2054                                    source->image,
2055                                    0, 0, draw->width, draw->height,
2056                                    0, 0, 0);
2057      buffer->last_swap = source->last_swap;
2058      draw->cur_blit_source = -1;
2059   }
2060   /* Return the requested buffer */
2061   return buffer;
2062}
2063
2064/** dri3_free_buffers
2065 *
2066 * Free the front bufffer or all of the back buffers. Used
2067 * when the application changes which buffers it needs
2068 */
2069static void
2070dri3_free_buffers(__DRIdrawable *driDrawable,
2071                  enum loader_dri3_buffer_type buffer_type,
2072                  struct loader_dri3_drawable *draw)
2073{
2074   struct loader_dri3_buffer *buffer;
2075   int first_id;
2076   int n_id;
2077   int buf_id;
2078
2079   switch (buffer_type) {
2080   case loader_dri3_buffer_back:
2081      first_id = LOADER_DRI3_BACK_ID(0);
2082      n_id = LOADER_DRI3_MAX_BACK;
2083      draw->cur_blit_source = -1;
2084      break;
2085   case loader_dri3_buffer_front:
2086      first_id = LOADER_DRI3_FRONT_ID;
2087      /* Don't free a fake front holding new backbuffer content. */
2088      n_id = (draw->cur_blit_source == LOADER_DRI3_FRONT_ID) ? 0 : 1;
2089      break;
2090   default:
2091      unreachable("unhandled buffer_type");
2092   }
2093
2094   for (buf_id = first_id; buf_id < first_id + n_id; buf_id++) {
2095      buffer = draw->buffers[buf_id];
2096      if (buffer) {
2097         dri3_free_render_buffer(draw, buffer);
2098         draw->buffers[buf_id] = NULL;
2099      }
2100   }
2101}
2102
2103/** loader_dri3_get_buffers
2104 *
2105 * The published buffer allocation API.
2106 * Returns all of the necessary buffers, allocating
2107 * as needed.
2108 */
2109int
2110loader_dri3_get_buffers(__DRIdrawable *driDrawable,
2111                        unsigned int format,
2112                        uint32_t *stamp,
2113                        void *loaderPrivate,
2114                        uint32_t buffer_mask,
2115                        struct __DRIimageList *buffers)
2116{
2117   struct loader_dri3_drawable *draw = loaderPrivate;
2118   struct loader_dri3_buffer   *front, *back;
2119   int buf_id;
2120
2121   buffers->image_mask = 0;
2122   buffers->front = NULL;
2123   buffers->back = NULL;
2124
2125   front = NULL;
2126   back = NULL;
2127
2128   if (!dri3_update_drawable(draw))
2129      return false;
2130
2131   dri3_update_max_num_back(draw);
2132
2133   /* Free no longer needed back buffers */
2134   for (buf_id = draw->cur_num_back; buf_id < LOADER_DRI3_MAX_BACK; buf_id++) {
2135      if (draw->cur_blit_source != buf_id && draw->buffers[buf_id]) {
2136         dri3_free_render_buffer(draw, draw->buffers[buf_id]);
2137         draw->buffers[buf_id] = NULL;
2138      }
2139   }
2140
2141   /* pixmaps always have front buffers.
2142    * Exchange swaps also mandate fake front buffers.
2143    */
2144   if (draw->is_pixmap || draw->swap_method == __DRI_ATTRIB_SWAP_EXCHANGE)
2145      buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
2146
2147   if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
2148      /* All pixmaps are owned by the server gpu.
2149       * When we use a different gpu, we can't use the pixmap
2150       * as buffer since it is potentially tiled a way
2151       * our device can't understand. In this case, use
2152       * a fake front buffer. Hopefully the pixmap
2153       * content will get synced with the fake front
2154       * buffer.
2155       */
2156      if (draw->is_pixmap && !draw->is_different_gpu)
2157         front = dri3_get_pixmap_buffer(driDrawable,
2158                                               format,
2159                                               loader_dri3_buffer_front,
2160                                               draw);
2161      else
2162         front = dri3_get_buffer(driDrawable,
2163                                        format,
2164                                        loader_dri3_buffer_front,
2165                                        draw);
2166
2167      if (!front)
2168         return false;
2169   } else {
2170      dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
2171      draw->have_fake_front = 0;
2172   }
2173
2174   if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
2175      back = dri3_get_buffer(driDrawable,
2176                                    format,
2177                                    loader_dri3_buffer_back,
2178                                    draw);
2179      if (!back)
2180         return false;
2181      draw->have_back = 1;
2182   } else {
2183      dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
2184      draw->have_back = 0;
2185   }
2186
2187   if (front) {
2188      buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
2189      buffers->front = front->image;
2190      draw->have_fake_front = draw->is_different_gpu || !draw->is_pixmap;
2191   }
2192
2193   if (back) {
2194      buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
2195      buffers->back = back->image;
2196   }
2197
2198   draw->stamp = stamp;
2199
2200   return true;
2201}
2202
2203/** loader_dri3_update_drawable_geometry
2204 *
2205 * Get the current drawable geometry.
2206 */
2207void
2208loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
2209{
2210   xcb_get_geometry_cookie_t geom_cookie;
2211   xcb_get_geometry_reply_t *geom_reply;
2212
2213   geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
2214
2215   geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
2216
2217   if (geom_reply) {
2218      draw->width = geom_reply->width;
2219      draw->height = geom_reply->height;
2220      draw->vtable->set_drawable_size(draw, draw->width, draw->height);
2221      draw->ext->flush->invalidate(draw->dri_drawable);
2222
2223      free(geom_reply);
2224   }
2225}
2226
2227
2228/**
2229 * Make sure the server has flushed all pending swap buffers to hardware
2230 * for this drawable. Ideally we'd want to send an X protocol request to
2231 * have the server block our connection until the swaps are complete. That
2232 * would avoid the potential round-trip here.
2233 */
2234void
2235loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
2236{
2237   int64_t ust, msc, sbc;
2238
2239   (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
2240}
2241
2242/**
2243 * Perform any cleanup associated with a close screen operation.
2244 * \param dri_screen[in,out] Pointer to __DRIscreen about to be closed.
2245 *
2246 * This function destroys the screen's cached swap context if any.
2247 */
2248void
2249loader_dri3_close_screen(__DRIscreen *dri_screen)
2250{
2251   mtx_lock(&blit_context.mtx);
2252   if (blit_context.ctx && blit_context.cur_screen == dri_screen) {
2253      blit_context.core->destroyContext(blit_context.ctx);
2254      blit_context.ctx = NULL;
2255   }
2256   mtx_unlock(&blit_context.mtx);
2257}
2258
2259/**
2260 * Find a backbuffer slot - potentially allocating a back buffer
2261 *
2262 * \param draw[in,out]  Pointer to the drawable for which to find back.
2263 * \return Pointer to a new back buffer or NULL if allocation failed or was
2264 * not mandated.
2265 *
2266 * Find a potentially new back buffer, and if it's not been allocated yet and
2267 * in addition needs initializing, then try to allocate and initialize it.
2268 */
2269#include <stdio.h>
2270static struct loader_dri3_buffer *
2271dri3_find_back_alloc(struct loader_dri3_drawable *draw)
2272{
2273   struct loader_dri3_buffer *back;
2274   int id;
2275
2276   id = dri3_find_back(draw, false);
2277   if (id < 0)
2278      return NULL;
2279
2280   back = draw->buffers[id];
2281   /* Allocate a new back if we haven't got one */
2282   if (!back && draw->back_format != __DRI_IMAGE_FORMAT_NONE &&
2283       dri3_update_drawable(draw))
2284      back = dri3_alloc_render_buffer(draw, draw->back_format,
2285                                      draw->width, draw->height, draw->depth);
2286
2287   if (!back)
2288      return NULL;
2289
2290   draw->buffers[id] = back;
2291
2292   /* If necessary, prefill the back with data according to swap_method mode. */
2293   if (draw->cur_blit_source != -1 &&
2294       draw->buffers[draw->cur_blit_source] &&
2295       back != draw->buffers[draw->cur_blit_source]) {
2296      struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2297
2298      dri3_fence_await(draw->conn, draw, source);
2299      dri3_fence_await(draw->conn, draw, back);
2300      (void) loader_dri3_blit_image(draw,
2301                                    back->image,
2302                                    source->image,
2303                                    0, 0, draw->width, draw->height,
2304                                    0, 0, 0);
2305      back->last_swap = source->last_swap;
2306      draw->cur_blit_source = -1;
2307   }
2308
2309   return back;
2310}
2311