1/**************************************************************************
2
3Copyright 2001 VA Linux Systems Inc., Fremont, California.
4Copyright © 2002 by David Dawes
5
6All Rights Reserved.
7
8Permission is hereby granted, free of charge, to any person obtaining a
9copy of this software and associated documentation files (the "Software"),
10to deal in the Software without restriction, including without limitation
11on the rights to use, copy, modify, merge, publish, distribute, sub
12license, and/or sell copies of the Software, and to permit persons to whom
13the Software is furnished to do so, subject to the following conditions:
14
15The above copyright notice and this permission notice (including the next
16paragraph) shall be included in all copies or substantial portions of the
17Software.
18
19THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22ATI, VA LINUX SYSTEMS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
23DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27**************************************************************************/
28
29/*
30 * Authors: Jeff Hartmann <jhartmann@valinux.com>
31 *          David Dawes <dawes@xfree86.org>
32 *          Keith Whitwell <keith@tungstengraphics.com>
33 */
34
35#ifdef HAVE_CONFIG_H
36#include "config.h"
37#endif
38
39#include <errno.h>
40#include <time.h>
41#include <string.h>
42#include <unistd.h>
43#include <poll.h>
44
45#include "sna.h"
46#include "intel_options.h"
47
48#include <xf86drm.h>
49#include <i915_drm.h>
50#include <dri2.h>
51#if XORG_VERSION_CURRENT >= XORG_VERSION_NUMERIC(1,12,99,901,0) && defined(COMPOSITE)
52#include <compositeext.h>
53#define CHECK_FOR_COMPOSITOR
54#endif
55
56#define DBG_CAN_FLIP 1
57#define DBG_CAN_XCHG 1
58
59#define DBG_FORCE_COPY -1 /* KGEM_BLT or KGEM_3D */
60
61#if DRI2INFOREC_VERSION < 2
62#error DRI2 version supported by the Xserver is too old
63#endif
64
65static inline struct kgem_bo *ref(struct kgem_bo *bo)
66{
67	return kgem_bo_reference(bo);
68}
69
70struct sna_dri2_private {
71	PixmapPtr pixmap;
72	struct kgem_bo *bo;
73	DRI2Buffer2Ptr proxy;
74	bool stale;
75	uint32_t size;
76	int refcnt;
77};
78
79static inline struct sna_dri2_private *
80get_private(void *buffer)
81{
82	return (struct sna_dri2_private *)((DRI2Buffer2Ptr)buffer+1);
83}
84
85pure static inline DRI2BufferPtr sna_pixmap_get_buffer(PixmapPtr pixmap)
86{
87	assert(pixmap->refcnt);
88	return ((void **)__get_private(pixmap, sna_pixmap_key))[2];
89}
90
91static inline void sna_pixmap_set_buffer(PixmapPtr pixmap, void *ptr)
92{
93	assert(pixmap->refcnt);
94	((void **)__get_private(pixmap, sna_pixmap_key))[2] = ptr;
95}
96
97#if DRI2INFOREC_VERSION >= 4
98enum event_type {
99	WAITMSC = 0,
100	SWAP,
101	SWAP_COMPLETE,
102	FLIP,
103	FLIP_THROTTLE,
104	FLIP_COMPLETE,
105	FLIP_ASYNC,
106};
107
108struct dri_bo {
109	struct list link;
110	struct kgem_bo *bo;
111	uint32_t name;
112	unsigned flags;
113};
114
115struct sna_dri2_event {
116	struct sna *sna;
117	DrawablePtr draw;
118	ClientPtr client;
119	enum event_type type;
120	xf86CrtcPtr crtc;
121	int pipe;
122	bool queued;
123	bool sync;
124	bool chained;
125
126	/* for swaps & flips only */
127	DRI2SwapEventPtr event_complete;
128	void *event_data;
129	DRI2BufferPtr front;
130	DRI2BufferPtr back;
131	struct kgem_bo *bo;
132
133	struct copy {
134		struct kgem_bo *bo;
135		unsigned flags;
136		uint32_t name;
137		uint32_t size;
138	} pending;
139
140	struct sna_dri2_event *chain;
141
142	struct list link;
143
144	int flip_continue;
145	int keepalive;
146	int signal;
147};
148
149#if DRI2INFOREC_VERSION < 10
150#undef USE_ASYNC_SWAP
151#endif
152
153#if USE_ASYNC_SWAP
154#define KEEPALIVE 8 /* wait ~100ms before discarding swap caches */
155#define APPLY_DAMAGE 0
156#else
157#define USE_ASYNC_SWAP 0
158#define KEEPALIVE 1
159#define APPLY_DAMAGE 1
160#endif
161
162static void sna_dri2_flip_event(struct sna_dri2_event *flip);
163inline static DRI2BufferPtr dri2_window_get_front(WindowPtr win);
164
165static struct kgem_bo *
166__sna_dri2_copy_region(struct sna *sna, DrawablePtr draw, RegionPtr region,
167		      DRI2BufferPtr src, DRI2BufferPtr dst,
168		      unsigned flags);
169
170inline static void
171__sna_dri2_copy_event(struct sna_dri2_event *info, unsigned flags)
172{
173	DBG(("%s: flags = %x\n", __FUNCTION__, flags));
174	assert(info->front != info->back);
175	info->bo = __sna_dri2_copy_region(info->sna, info->draw, NULL,
176					  info->back, info->front,
177					  flags);
178	info->front->flags = info->back->flags;
179}
180
181static int front_pitch(DrawablePtr draw)
182{
183	DRI2BufferPtr buffer;
184
185	buffer = NULL;
186	if (draw->type != DRAWABLE_PIXMAP)
187		buffer = dri2_window_get_front((WindowPtr)draw);
188	if (buffer == NULL)
189		buffer = sna_pixmap_get_buffer(get_drawable_pixmap(draw));
190
191	return buffer ? buffer->pitch : 0;
192}
193
194struct dri2_window {
195	DRI2BufferPtr front;
196	struct sna_dri2_event *chain;
197	xf86CrtcPtr crtc;
198	int64_t msc_delta;
199	struct list cache;
200	uint32_t cache_size;
201};
202
203static struct dri2_window *dri2_window(WindowPtr win)
204{
205	assert(win->drawable.type != DRAWABLE_PIXMAP);
206	return ((void **)__get_private(win, sna_window_key))[1];
207}
208
209static bool use_scanout(struct sna *sna,
210			DrawablePtr draw,
211			struct dri2_window *priv)
212{
213	if (priv->front)
214		return true;
215
216	return (sna->flags & (SNA_LINEAR_FB | SNA_NO_WAIT | SNA_NO_FLIP)) == 0 &&
217		draw->width  == sna->front->drawable.width &&
218		draw->height == sna->front->drawable.height &&
219		draw->bitsPerPixel == sna->front->drawable.bitsPerPixel;
220}
221
222static void
223sna_dri2_get_back(struct sna *sna,
224		  DrawablePtr draw,
225		  DRI2BufferPtr back)
226{
227	struct dri2_window *priv = dri2_window((WindowPtr)draw);
228	uint32_t size;
229	struct kgem_bo *bo;
230	struct dri_bo *c;
231	uint32_t name;
232	int flags;
233	bool reuse;
234
235	DBG(("%s: draw size=%dx%d, back buffer handle=%d size=%dx%d, is-scanout? %d, active?=%d, pitch=%d, front pitch=%d\n",
236	     __FUNCTION__, draw->width, draw->height,
237	     get_private(back)->bo->handle,
238	     get_private(back)->size & 0xffff, get_private(back)->size >> 16,
239	     get_private(back)->bo->scanout,
240	     get_private(back)->bo->active_scanout,
241	     back->pitch, front_pitch(draw)));
242	assert(priv);
243
244	size = draw->height << 16 | draw->width;
245	if (size != priv->cache_size) {
246		while (!list_is_empty(&priv->cache)) {
247			c = list_first_entry(&priv->cache, struct dri_bo, link);
248			list_del(&c->link);
249
250			DBG(("%s: releasing cached handle=%d\n", __FUNCTION__, c->bo ? c->bo->handle : 0));
251			assert(c->bo);
252			kgem_bo_destroy(&sna->kgem, c->bo);
253
254			free(c);
255		}
256		priv->cache_size = size;
257	}
258
259	reuse = size == get_private(back)->size;
260	if (reuse)
261		reuse = get_private(back)->bo->scanout == use_scanout(sna, draw, priv);
262	DBG(("%s: reuse backbuffer? %d\n", __FUNCTION__, reuse));
263	if (reuse) {
264		bo = get_private(back)->bo;
265		assert(bo->refcnt);
266		DBG(("%s: back buffer handle=%d, active?=%d, refcnt=%d\n",
267		     __FUNCTION__, bo->handle, bo->active_scanout, get_private(back)->refcnt));
268		if (bo->active_scanout == 0) {
269			DBG(("%s: reuse unattached back\n", __FUNCTION__));
270			get_private(back)->stale = false;
271			return;
272		}
273	}
274
275	bo = NULL;
276	list_for_each_entry(c, &priv->cache, link) {
277		DBG(("%s: cache: handle=%d, active=%d\n",
278		     __FUNCTION__, c->bo ? c->bo->handle : 0, c->bo ? c->bo->active_scanout : -1));
279		assert(c->bo);
280		if (c->bo->active_scanout == 0) {
281			_list_del(&c->link);
282			if (c->bo == NULL) {
283				free(c);
284				goto out;
285			}
286			bo = c->bo;
287			name = c->name;
288			flags = c->flags;
289			DBG(("%s: reuse cache handle=%d, name=%d, flags=%d\n", __FUNCTION__, bo->handle, name, flags));
290			c->bo = NULL;
291			break;
292		}
293	}
294	if (bo == NULL) {
295		DBG(("%s: allocating new backbuffer\n", __FUNCTION__));
296		flags = CREATE_EXACT;
297
298		if (get_private(back)->bo->scanout &&
299		    use_scanout(sna, draw, priv)) {
300			DBG(("%s: requesting scanout compatible back\n", __FUNCTION__));
301			flags |= CREATE_SCANOUT;
302		}
303
304		bo = kgem_create_2d(&sna->kgem,
305				    draw->width, draw->height, draw->bitsPerPixel,
306				    get_private(back)->bo->tiling,
307				    flags);
308		if (bo == NULL)
309			return;
310
311		name = kgem_bo_flink(&sna->kgem, bo);
312		if (name == 0) {
313			kgem_bo_destroy(&sna->kgem, bo);
314			return;
315		}
316
317		flags = 0;
318		if (USE_ASYNC_SWAP && back->flags) {
319			BoxRec box;
320
321			box.x1 = 0;
322			box.y1 = 0;
323			box.x2 = draw->width;
324			box.y2 = draw->height;
325
326			DBG(("%s: filling new buffer with old back\n", __FUNCTION__));
327			if (sna->render.copy_boxes(sna, GXcopy,
328						   draw, get_private(back)->bo, 0, 0,
329						   draw, bo, 0, 0,
330						   &box, 1, COPY_LAST | COPY_DRI))
331				flags = back->flags;
332		}
333	}
334	assert(bo->active_scanout == 0);
335
336	if (reuse && get_private(back)->bo->refcnt == 1 + get_private(back)->bo->active_scanout) {
337		if (&c->link == &priv->cache)
338			c = malloc(sizeof(*c));
339		if (c != NULL) {
340			c->bo = ref(get_private(back)->bo);
341			c->name = back->name;
342			c->flags = back->flags;
343			list_add(&c->link, &priv->cache);
344			DBG(("%s: caching handle=%d (name=%d, flags=%d, active_scanout=%d)\n", __FUNCTION__, c->bo->handle, c->name, c->flags, c->bo->active_scanout));
345		}
346	} else {
347		if (&c->link != &priv->cache)
348			free(c);
349	}
350
351	assert(bo->active_scanout == 0);
352	assert(bo != get_private(back)->bo);
353	kgem_bo_destroy(&sna->kgem, get_private(back)->bo);
354
355	get_private(back)->bo = bo;
356	get_private(back)->size = draw->height << 16 | draw->width;
357	back->pitch = bo->pitch;
358	back->name = name;
359	back->flags = flags;
360
361	assert(back->pitch);
362	assert(back->name);
363
364out:
365	get_private(back)->stale = false;
366}
367
368static struct sna_dri2_event *
369dri2_chain(DrawablePtr d)
370{
371	struct dri2_window *priv = dri2_window((WindowPtr)d);
372	assert(priv != NULL);
373	assert(priv->chain == NULL || priv->chain->chained);
374	return priv->chain;
375}
376inline static DRI2BufferPtr dri2_window_get_front(WindowPtr win)
377{
378	struct dri2_window *priv = dri2_window(win);
379	assert(priv->front == NULL || get_private(priv->front)->bo->active_scanout);
380	return priv ? priv->front : NULL;
381}
382#else
383inline static void *dri2_window_get_front(WindowPtr win) { return NULL; }
384#define APPLY_DAMAGE 1
385#endif
386
387#if DRI2INFOREC_VERSION < 6
388
389#define xorg_can_triple_buffer() 0
390#define swap_limit(d, l) false
391#define mark_stale(b)
392
393#else
394
395#if XORG_VERSION_CURRENT >= XORG_VERSION_NUMERIC(1,15,99,904,0)
396/* Prime fixed for triple buffer support */
397#define xorg_can_triple_buffer() 1
398#elif XORG_VERSION_CURRENT < XORG_VERSION_NUMERIC(1,12,99,901,0)
399/* Before numGPUScreens was introduced */
400#define xorg_can_triple_buffer() 1
401#else
402/* Subject to crashers when combining triple buffering and Prime */
403inline static bool xorg_can_triple_buffer(void)
404{
405	return screenInfo.numGPUScreens == 0;
406}
407#endif
408
409static void
410mark_stale(DRI2BufferPtr back)
411{
412	/* If we have reuse notifications, we can track when the
413	 * client tries to present an old buffer (one that has not
414	 * been updated since the last swap) and avoid showing the
415	 * stale frame. (This is mostly useful for tracking down
416	 * driver bugs!)
417	 */
418	DBG(("%s(handle=%d) => %d\n", __FUNCTION__,
419	     get_private(back)->bo->handle, xorg_can_triple_buffer()));
420	get_private(back)->stale = xorg_can_triple_buffer();
421}
422
423static Bool
424sna_dri2_swap_limit_validate(DrawablePtr draw, int swap_limit)
425{
426	DBG(("%s: swap limit set to %d\n", __FUNCTION__, swap_limit));
427	return swap_limit >= 1;
428}
429
430static void
431sna_dri2_reuse_buffer(DrawablePtr draw, DRI2BufferPtr buffer)
432{
433	struct sna *sna = to_sna_from_drawable(draw);
434
435	DBG(("%s: reusing buffer pixmap=%ld, attachment=%d, handle=%d, name=%d\n",
436	     __FUNCTION__, get_drawable_pixmap(draw)->drawable.serialNumber,
437	     buffer->attachment, get_private(buffer)->bo->handle, buffer->name));
438	assert(get_private(buffer)->refcnt);
439	assert(get_private(buffer)->bo->refcnt >= get_private(buffer)->bo->active_scanout);
440	assert(kgem_bo_flink(&sna->kgem, get_private(buffer)->bo) == buffer->name);
441
442	if (buffer->attachment == DRI2BufferBackLeft &&
443	    draw->type != DRAWABLE_PIXMAP) {
444		DBG(("%s: replacing back buffer on window %ld\n", __FUNCTION__, draw->id));
445		sna_dri2_get_back(sna, draw, buffer);
446
447		assert(get_private(buffer)->bo->refcnt);
448		assert(get_private(buffer)->bo->active_scanout == 0);
449		assert(kgem_bo_flink(&sna->kgem, get_private(buffer)->bo) == buffer->name);
450		DBG(("%s: reusing back buffer handle=%d, name=%d, pitch=%d, age=%d\n",
451		     __FUNCTION__, get_private(buffer)->bo->handle,
452		     buffer->name, buffer->pitch, buffer->flags));
453	}
454
455	kgem_bo_submit(&sna->kgem, get_private(buffer)->bo);
456}
457
458static bool swap_limit(DrawablePtr draw, int limit)
459{
460	if (!xorg_can_triple_buffer())
461		return false;
462
463	DBG(("%s: draw=%ld setting swap limit to %d\n", __FUNCTION__, (long)draw->id, limit));
464	DRI2SwapLimit(draw, limit);
465	return true;
466}
467#endif
468
469#define COLOR_PREFER_TILING_Y 0
470
471/* Prefer to enable TILING_Y if this buffer will never be a
472 * candidate for pageflipping
473 */
474static uint32_t color_tiling(struct sna *sna, DrawablePtr draw)
475{
476	uint32_t tiling;
477
478	if (!sna->kgem.can_fence)
479		return I915_TILING_NONE;
480
481	if (COLOR_PREFER_TILING_Y &&
482	    (draw->width  != sna->front->drawable.width ||
483	     draw->height != sna->front->drawable.height))
484		tiling = I915_TILING_Y;
485	else
486		tiling = I915_TILING_X;
487
488	return kgem_choose_tiling(&sna->kgem, -tiling,
489				  draw->width,
490				  draw->height,
491				  draw->bitsPerPixel);
492}
493
494static uint32_t other_tiling(struct sna *sna, DrawablePtr draw)
495{
496	/* XXX Can mix color X / depth Y? */
497	return kgem_choose_tiling(&sna->kgem,
498				  sna->kgem.gen >= 040 ? -I915_TILING_Y : -I915_TILING_X,
499				  draw->width,
500				  draw->height,
501				  draw->bitsPerPixel);
502}
503
504static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
505					  PixmapPtr pixmap)
506{
507	struct sna_pixmap *priv;
508
509	DBG(("%s: attaching DRI client to pixmap=%ld\n",
510	     __FUNCTION__, pixmap->drawable.serialNumber));
511
512	priv = sna_pixmap(pixmap);
513	if (priv != NULL && IS_STATIC_PTR(priv->ptr) && priv->cpu_bo) {
514		DBG(("%s: SHM or unattached Pixmap, BadAlloc\n", __FUNCTION__));
515		return NULL;
516	}
517
518	priv = sna_pixmap_move_to_gpu(pixmap,
519				      MOVE_READ | __MOVE_FORCE | __MOVE_DRI);
520	if (priv == NULL) {
521		DBG(("%s: failed to move to GPU, BadAlloc\n", __FUNCTION__));
522		return NULL;
523	}
524
525	assert(priv->flush == false || priv->pinned & PIN_DRI3);
526	assert(priv->gpu_bo->flush == false || priv->pinned & PIN_DRI3);
527	assert(priv->cpu_damage == NULL);
528	assert(priv->gpu_bo);
529	assert(priv->gpu_bo->proxy == NULL);
530
531	if (!kgem_bo_is_fenced(&sna->kgem, priv->gpu_bo)) {
532		if (priv->gpu_bo->tiling &&
533		    !sna_pixmap_change_tiling(pixmap, I915_TILING_NONE)) {
534			DBG(("%s: failed to discard tiling (%d) for DRI2 protocol\n", __FUNCTION__, priv->gpu_bo->tiling));
535			return NULL;
536		}
537	} else {
538		int tiling = color_tiling(sna, &pixmap->drawable);
539		if (tiling < 0)
540			tiling = -tiling;
541		if (priv->gpu_bo->tiling < tiling && !priv->gpu_bo->scanout)
542			sna_pixmap_change_tiling(pixmap, tiling);
543	}
544
545	priv->gpu_bo->active_scanout++;
546
547	return priv->gpu_bo;
548}
549
550void
551sna_dri2_pixmap_update_bo(struct sna *sna, PixmapPtr pixmap, struct kgem_bo *bo)
552{
553	DRI2BufferPtr buffer;
554	struct sna_dri2_private *private;
555
556	buffer = sna_pixmap_get_buffer(pixmap);
557	if (buffer == NULL)
558		return;
559
560	DBG(("%s: pixmap=%ld, old handle=%d, new handle=%d\n", __FUNCTION__,
561	     pixmap->drawable.serialNumber,
562	     get_private(buffer)->bo->handle,
563	     sna_pixmap(pixmap)->gpu_bo->handle));
564
565	private = get_private(buffer);
566	assert(private->pixmap == pixmap);
567
568	assert(bo != private->bo);
569	if (private->bo == bo)
570		return;
571
572	assert(private->bo->active_scanout > 0);
573	private->bo->active_scanout--;
574
575	DBG(("%s: dropping flush hint from handle=%d\n", __FUNCTION__, private->bo->handle));
576	private->bo->flush = false;
577	kgem_bo_destroy(&sna->kgem, private->bo);
578
579
580	buffer->name = kgem_bo_flink(&sna->kgem, bo);
581	buffer->pitch = bo->pitch;
582	private->bo = ref(bo);
583	bo->active_scanout++;
584
585	DBG(("%s: adding flush hint to handle=%d\n", __FUNCTION__, bo->handle));
586	bo->flush = true;
587	if (bo->exec)
588		sna->kgem.flush = 1;
589	assert(sna_pixmap(pixmap)->flush);
590
591	/* XXX DRI2InvalidateDrawable(&pixmap->drawable); */
592}
593
594static DRI2Buffer2Ptr
595sna_dri2_create_buffer(DrawablePtr draw,
596		       unsigned int attachment,
597		       unsigned int format)
598{
599	struct sna *sna = to_sna_from_drawable(draw);
600	DRI2Buffer2Ptr buffer;
601	struct sna_dri2_private *private;
602	PixmapPtr pixmap;
603	struct kgem_bo *bo;
604	unsigned bpp = format ?: draw->bitsPerPixel;
605	unsigned flags = CREATE_EXACT;
606	uint32_t size;
607
608	DBG(("%s pixmap=%ld, (attachment=%d, format=%d, drawable=%dx%d), window?=%d\n",
609	     __FUNCTION__,
610	     get_drawable_pixmap(draw)->drawable.serialNumber,
611	     attachment, format, draw->width, draw->height,
612	     draw->type != DRAWABLE_PIXMAP));
613
614	pixmap = NULL;
615	size = (uint32_t)draw->height << 16 | draw->width;
616	switch (attachment) {
617	case DRI2BufferFrontLeft:
618		sna->needs_dri_flush = true;
619
620		pixmap = get_drawable_pixmap(draw);
621		buffer = NULL;
622		if (draw->type != DRAWABLE_PIXMAP)
623			buffer = dri2_window_get_front((WindowPtr)draw);
624		if (buffer == NULL)
625			buffer = (DRI2Buffer2Ptr)sna_pixmap_get_buffer(pixmap);
626		if (buffer) {
627			private = get_private(buffer);
628
629			DBG(("%s: reusing front buffer attachment, win=%lu %dx%d, pixmap=%ld [%ld] %dx%d, handle=%d, name=%d, active_scanout=%d\n",
630			     __FUNCTION__,
631			     draw->type != DRAWABLE_PIXMAP ? (long)draw->id : (long)0,
632			     draw->width, draw->height,
633			     pixmap->drawable.serialNumber,
634			     private->pixmap->drawable.serialNumber,
635			     pixmap->drawable.width,
636			     pixmap->drawable.height,
637			     private->bo->handle, buffer->name,
638			     private->bo->active_scanout));
639
640			assert(buffer->attachment == DRI2BufferFrontLeft);
641			assert(private->pixmap == pixmap);
642			assert(sna_pixmap(pixmap)->flush);
643			assert(sna_pixmap(pixmap)->pinned & PIN_DRI2);
644			assert(kgem_bo_flink(&sna->kgem, private->bo) == buffer->name);
645			assert(private->bo->pitch == buffer->pitch);
646			assert(private->bo->active_scanout);
647
648			private->refcnt++;
649			return buffer;
650		}
651
652		bo = sna_pixmap_set_dri(sna, pixmap);
653		if (bo == NULL)
654			return NULL;
655
656		assert(sna_pixmap(pixmap) != NULL);
657
658		bo = ref(bo);
659		if (pixmap == sna->front && !(sna->flags & SNA_LINEAR_FB))
660			flags |= CREATE_SCANOUT;
661		DBG(("%s: attaching to front buffer %dx%d [%p:%d], scanout? %d\n",
662		     __FUNCTION__,
663		     pixmap->drawable.width, pixmap->drawable.height,
664		     pixmap, pixmap->refcnt, flags & CREATE_SCANOUT));
665		size = (uint32_t)pixmap->drawable.height << 16 | pixmap->drawable.width;
666		bpp = pixmap->drawable.bitsPerPixel;
667		break;
668
669	case DRI2BufferBackLeft:
670		if (draw->type != DRAWABLE_PIXMAP) {
671			if (dri2_window_get_front((WindowPtr)draw))
672				flags |= CREATE_SCANOUT;
673			if (draw->width  == sna->front->drawable.width &&
674			    draw->height == sna->front->drawable.height &&
675			    draw->bitsPerPixel == bpp &&
676			    (sna->flags & (SNA_LINEAR_FB | SNA_NO_WAIT | SNA_NO_FLIP)) == 0)
677				flags |= CREATE_SCANOUT;
678		}
679	case DRI2BufferBackRight:
680	case DRI2BufferFrontRight:
681	case DRI2BufferFakeFrontLeft:
682	case DRI2BufferFakeFrontRight:
683		DBG(("%s: creating back buffer %dx%d, suitable for scanout? %d\n",
684		     __FUNCTION__,
685		     draw->width, draw->height,
686		     flags & CREATE_SCANOUT));
687
688		bo = kgem_create_2d(&sna->kgem,
689				    draw->width,
690				    draw->height,
691				    bpp,
692				    color_tiling(sna, draw),
693				    flags);
694		break;
695
696	case DRI2BufferStencil:
697		/*
698		 * The stencil buffer has quirky pitch requirements.  From Vol
699		 * 2a, 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface
700		 * Pitch":
701		 *    The pitch must be set to 2x the value computed based on
702		 *    width, as the stencil buffer is stored with two rows
703		 *    interleaved.
704		 * To accomplish this, we resort to the nasty hack of doubling
705		 * the drm region's cpp and halving its height.
706		 *
707		 * If we neglect to double the pitch, then
708		 * drm_intel_gem_bo_map_gtt() maps the memory incorrectly.
709		 *
710		 * The alignment for W-tiling is quite different to the
711		 * nominal no-tiling case, so we have to account for
712		 * the tiled access pattern explicitly.
713		 *
714		 * The stencil buffer is W tiled. However, we request from
715		 * the kernel a non-tiled buffer because the kernel does
716		 * not understand W tiling and the GTT is incapable of
717		 * W fencing.
718		 */
719		bpp *= 2;
720		bo = kgem_create_2d(&sna->kgem,
721				    ALIGN(draw->width, 64),
722				    ALIGN((draw->height + 1) / 2, 64),
723				    bpp, I915_TILING_NONE, flags);
724		break;
725
726	case DRI2BufferDepth:
727	case DRI2BufferDepthStencil:
728	case DRI2BufferHiz:
729	case DRI2BufferAccum:
730		bo = kgem_create_2d(&sna->kgem,
731				    draw->width, draw->height, bpp,
732				    other_tiling(sna, draw),
733				    flags);
734		break;
735
736	default:
737		return NULL;
738	}
739	if (bo == NULL)
740		return NULL;
741
742	buffer = calloc(1, sizeof *buffer + sizeof *private);
743	if (buffer == NULL)
744		goto err;
745
746	private = get_private(buffer);
747	buffer->attachment = attachment;
748	buffer->pitch = bo->pitch;
749	buffer->cpp = bpp / 8;
750	buffer->driverPrivate = private;
751	buffer->format = format;
752	buffer->flags = 0;
753	buffer->name = kgem_bo_flink(&sna->kgem, bo);
754	private->refcnt = 1;
755	private->bo = bo;
756	private->pixmap = pixmap;
757	private->size = size;
758
759	if (buffer->name == 0)
760		goto err;
761
762	if (pixmap) {
763		struct sna_pixmap *priv;
764
765		assert(attachment == DRI2BufferFrontLeft);
766		assert(sna_pixmap_get_buffer(pixmap) == NULL);
767
768		sna_pixmap_set_buffer(pixmap, buffer);
769		assert(sna_pixmap_get_buffer(pixmap) == buffer);
770		pixmap->refcnt++;
771
772		priv = sna_pixmap(pixmap);
773		assert(priv->flush == false || priv->pinned & PIN_DRI3);
774		assert((priv->pinned & PIN_DRI2) == 0);
775
776		/* Don't allow this named buffer to be replaced */
777		priv->pinned |= PIN_DRI2;
778
779		/* We need to submit any modifications to and reads from this
780		 * buffer before we send any reply to the Client.
781		 *
782		 * As we don't track which Client, we flush for all.
783		 */
784		DBG(("%s: adding flush hint to handle=%d\n", __FUNCTION__, priv->gpu_bo->handle));
785		priv->gpu_bo->flush = true;
786		if (priv->gpu_bo->exec)
787			sna->kgem.flush = 1;
788
789		priv->flush |= FLUSH_READ;
790		if (draw->type == DRAWABLE_PIXMAP) {
791			/* DRI2 renders directly into GLXPixmaps, treat as hostile */
792			kgem_bo_unclean(&sna->kgem, priv->gpu_bo);
793			sna_damage_all(&priv->gpu_damage, pixmap);
794			priv->clear = false;
795			priv->cpu = false;
796			priv->flush |= FLUSH_WRITE;
797		}
798
799		sna_watch_flush(sna, 1);
800	}
801
802	return buffer;
803
804err:
805	kgem_bo_destroy(&sna->kgem, bo);
806	free(buffer);
807	return NULL;
808}
809
810static void
811sna_dri2_cache_bo(struct sna *sna,
812		  DrawablePtr draw,
813		  struct kgem_bo *bo,
814		  uint32_t name,
815		  uint32_t size,
816		  uint32_t flags)
817{
818	struct dri_bo *c;
819
820	DBG(("%s(handle=%d, name=%d)\n", __FUNCTION__, bo->handle, name));
821
822	if (draw == NULL) {
823		DBG(("%s: no draw, releasing handle=%d\n",
824		     __FUNCTION__, bo->handle));
825		goto err;
826	}
827
828	if (draw->type == DRAWABLE_PIXMAP) {
829		DBG(("%s: not a window, releasing handle=%d\n",
830		     __FUNCTION__, bo->handle));
831		goto err;
832	}
833
834	if (bo->refcnt > 1 + bo->active_scanout) {
835		DBG(("%s: multiple references [%d], releasing handle\n",
836		     __FUNCTION__, bo->refcnt, bo->handle));
837		goto err;
838	}
839
840	if ((draw->height << 16 | draw->width) != size) {
841		DBG(("%s: wrong size [%dx%d], releasing handle\n",
842		     __FUNCTION__,
843		     size & 0xffff, size >> 16,
844		     bo->handle));
845		goto err;
846	}
847
848	if (bo->scanout && front_pitch(draw) != bo->pitch) {
849		DBG(("%s: scanout with pitch change [%d != %d], releasing handle\n",
850		     __FUNCTION__, bo->pitch, front_pitch(draw), bo->handle));
851		goto err;
852	}
853
854	c = malloc(sizeof(*c));
855	if (!c)
856		goto err;
857
858	DBG(("%s: caching handle=%d (name=%d, flags=%d, active_scanout=%d)\n", __FUNCTION__, bo->handle, name, flags, bo->active_scanout));
859
860	c->bo = bo;
861	c->name = name;
862	c->flags = flags;
863	list_add(&c->link, &dri2_window((WindowPtr)draw)->cache);
864	return;
865
866err:
867	kgem_bo_destroy(&sna->kgem, bo);
868}
869
870static void _sna_dri2_destroy_buffer(struct sna *sna,
871				     DrawablePtr draw,
872				     DRI2Buffer2Ptr buffer)
873{
874	struct sna_dri2_private *private = get_private(buffer);
875
876	if (buffer == NULL)
877		return;
878
879	DBG(("%s: %p [handle=%d] -- refcnt=%d, draw=%ld, pixmap=%ld, proxy?=%d\n",
880	     __FUNCTION__, buffer, private->bo->handle, private->refcnt,
881	     draw ? draw->id : 0,
882	     private->pixmap ? private->pixmap->drawable.serialNumber : 0,
883	     private->proxy != NULL));
884	assert(private->refcnt > 0);
885	if (--private->refcnt)
886		return;
887
888	assert(private->bo);
889
890	if (private->proxy) {
891		DBG(("%s: destroying proxy\n", __FUNCTION__));
892		assert(private->bo->active_scanout > 0);
893		private->bo->active_scanout--;
894
895		_sna_dri2_destroy_buffer(sna, draw, private->proxy);
896		private->pixmap = NULL;
897	}
898
899	if (private->pixmap) {
900		PixmapPtr pixmap = private->pixmap;
901		struct sna_pixmap *priv = sna_pixmap(pixmap);
902
903		assert(sna_pixmap_get_buffer(pixmap) == buffer);
904		assert(priv->gpu_bo == private->bo);
905		assert(priv->gpu_bo->flush);
906		assert(priv->pinned & PIN_DRI2);
907		assert(priv->flush);
908
909		DBG(("%s: removing active_scanout=%d from pixmap handle=%d\n",
910		     __FUNCTION__, priv->gpu_bo->active_scanout, priv->gpu_bo->handle));
911		assert(priv->gpu_bo->active_scanout > 0);
912		priv->gpu_bo->active_scanout--;
913
914		/* Undo the DRI markings on this pixmap */
915		DBG(("%s: releasing last DRI pixmap=%ld, scanout?=%d\n",
916		     __FUNCTION__,
917		     pixmap->drawable.serialNumber,
918		     pixmap == sna->front));
919
920		list_del(&priv->flush_list);
921
922		DBG(("%s: dropping flush hint from handle=%d\n", __FUNCTION__, private->bo->handle));
923		priv->pinned &= ~PIN_DRI2;
924
925		if ((priv->pinned & PIN_DRI3) == 0) {
926			priv->gpu_bo->flush = false;
927			priv->flush = false;
928		}
929		sna_watch_flush(sna, -1);
930
931		sna_pixmap_set_buffer(pixmap, NULL);
932		pixmap->drawable.pScreen->DestroyPixmap(pixmap);
933	}
934
935	sna_dri2_cache_bo(sna, draw,
936			  private->bo,
937			  buffer->name,
938			  private->size,
939			  buffer->flags);
940	free(buffer);
941}
942
943static void sna_dri2_destroy_buffer(DrawablePtr draw, DRI2Buffer2Ptr buffer)
944{
945	_sna_dri2_destroy_buffer(to_sna_from_drawable(draw), draw, buffer);
946}
947
948static DRI2BufferPtr sna_dri2_reference_buffer(DRI2BufferPtr buffer)
949{
950	assert(get_private(buffer)->refcnt > 0);
951	get_private(buffer)->refcnt++;
952	return buffer;
953}
954
955static inline void damage(PixmapPtr pixmap, struct sna_pixmap *priv, RegionPtr region)
956{
957	assert(priv->gpu_bo);
958	if (DAMAGE_IS_ALL(priv->gpu_damage))
959		goto done;
960
961	if (region == NULL) {
962damage_all:
963		priv->gpu_damage = _sna_damage_all(priv->gpu_damage,
964						   pixmap->drawable.width,
965						   pixmap->drawable.height);
966		sna_damage_destroy(&priv->cpu_damage);
967		list_del(&priv->flush_list);
968	} else {
969		sna_damage_subtract(&priv->cpu_damage, region);
970		if (priv->cpu_damage == NULL)
971			goto damage_all;
972		sna_damage_add(&priv->gpu_damage, region);
973	}
974done:
975	priv->cpu = false;
976	priv->clear = false;
977}
978
979static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
980{
981	struct sna *sna = to_sna_from_pixmap(pixmap);
982	struct sna_pixmap *priv = sna_pixmap(pixmap);
983
984	DBG(("%s: pixmap=%ld, handle=%d (old handle=%d)\n",
985	     __FUNCTION__, pixmap->drawable.serialNumber, bo->handle, priv->gpu_bo->handle));
986
987	assert(pixmap->drawable.width * pixmap->drawable.bitsPerPixel <= 8*bo->pitch);
988	assert(pixmap->drawable.height * bo->pitch <= kgem_bo_size(bo));
989	assert(bo->proxy == NULL);
990	assert(priv->pinned & PIN_DRI2);
991	assert((priv->pinned & (PIN_PRIME | PIN_DRI3)) == 0);
992	assert(priv->flush);
993
994	if (APPLY_DAMAGE) {
995		RegionRec region;
996
997		/* Post damage on the new front buffer so that listeners, such
998		 * as DisplayLink know take a copy and shove it over the USB,
999		 * also for software cursors and the like.
1000		 */
1001		region.extents.x1 = region.extents.y1 = 0;
1002		region.extents.x2 = pixmap->drawable.width;
1003		region.extents.y2 = pixmap->drawable.height;
1004		region.data = NULL;
1005
1006		/*
1007		 * Eeek, beware the sw cursor copying to the old bo
1008		 * causing recursion and mayhem.
1009		 */
1010		DBG(("%s: marking whole pixmap as damaged\n", __FUNCTION__));
1011		sna->ignore_copy_area = sna->flags & SNA_TEAR_FREE;
1012		DamageRegionAppend(&pixmap->drawable, &region);
1013	}
1014
1015	damage(pixmap, priv, NULL);
1016
1017	assert(bo->refcnt);
1018	if (priv->move_to_gpu) {
1019		DBG(("%s: applying final/discard move-to-gpu\n", __FUNCTION__));
1020		priv->move_to_gpu(sna, priv, 0);
1021	}
1022	if (priv->gpu_bo != bo) {
1023		DBG(("%s: dropping flush hint from handle=%d\n", __FUNCTION__, priv->gpu_bo->handle));
1024		priv->gpu_bo->flush = false;
1025		if (priv->cow)
1026			sna_pixmap_undo_cow(sna, priv, 0);
1027		if (priv->gpu_bo) {
1028			sna_pixmap_unmap(pixmap, priv);
1029			kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
1030		}
1031		DBG(("%s: adding flush hint to handle=%d\n", __FUNCTION__, bo->handle));
1032		bo->flush = true;
1033		if (bo->exec)
1034			sna->kgem.flush = 1;
1035		priv->gpu_bo = ref(bo);
1036	}
1037	if (bo->domain != DOMAIN_GPU)
1038		bo->domain = DOMAIN_NONE;
1039	assert(bo->flush);
1040
1041	if (APPLY_DAMAGE) {
1042		sna->ignore_copy_area = false;
1043		DamageRegionProcessPending(&pixmap->drawable);
1044	}
1045}
1046
1047#if defined(__GNUC__)
1048#define popcount(x) __builtin_popcount(x)
1049#else
1050static int popcount(unsigned int x)
1051{
1052	int count = 0;
1053
1054	while (x) {
1055		count += x&1;
1056		x >>= 1;
1057	}
1058
1059	return count;
1060}
1061#endif
1062
1063static void sna_dri2_select_mode(struct sna *sna, struct kgem_bo *dst, struct kgem_bo *src, bool sync)
1064{
1065	struct drm_i915_gem_busy busy;
1066	int mode;
1067
1068	if (sna->kgem.gen < 060)
1069		return;
1070
1071	if (sync) {
1072		DBG(("%s: sync, force %s ring\n", __FUNCTION__,
1073		     sna->kgem.gen >= 070 ? "BLT" : "RENDER"));
1074		kgem_set_mode(&sna->kgem,
1075			      sna->kgem.gen >= 070 ? KGEM_BLT : KGEM_RENDER,
1076			      dst);
1077		return;
1078	}
1079
1080	if (DBG_FORCE_COPY != -1) {
1081		DBG(("%s: forcing %d\n", __FUNCTION__, DBG_FORCE_COPY));
1082		kgem_set_mode(&sna->kgem, DBG_FORCE_COPY, dst);
1083		return;
1084	}
1085
1086	if (sna->kgem.mode != KGEM_NONE) {
1087		DBG(("%s: busy, not switching\n", __FUNCTION__));
1088		return;
1089	}
1090
1091	if (sna->render_state.gt < 2 && sna->kgem.has_semaphores) {
1092		DBG(("%s: small GT [%d], not forcing selection\n",
1093		     __FUNCTION__, sna->render_state.gt));
1094		return;
1095	}
1096
1097	VG_CLEAR(busy);
1098	busy.handle = src->handle;
1099	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GEM_BUSY, &busy))
1100		return;
1101
1102	DBG(("%s: src handle=%d busy?=%x\n", __FUNCTION__, busy.handle, busy.busy));
1103	if (busy.busy == 0) {
1104		__kgem_bo_clear_busy(src);
1105
1106		busy.handle = dst->handle;
1107		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GEM_BUSY, &busy))
1108			return;
1109
1110		DBG(("%s: dst handle=%d busy?=%x\n", __FUNCTION__, busy.handle, busy.busy));
1111		if (busy.busy == 0) {
1112			__kgem_bo_clear_busy(dst);
1113			DBG(("%s: src/dst is idle, using defaults\n", __FUNCTION__));
1114			return;
1115		}
1116	}
1117
1118	/* Sandybridge introduced a separate ring which it uses to
1119	 * perform blits. Switching rendering between rings incurs
1120	 * a stall as we wait upon the old ring to finish and
1121	 * flush its render cache before we can proceed on with
1122	 * the operation on the new ring.
1123	 *
1124	 * As this buffer, we presume, has just been written to by
1125	 * the DRI client using the RENDER ring, we want to perform
1126	 * our operation on the same ring, and ideally on the same
1127	 * ring as we will flip from (which should be the RENDER ring
1128	 * as well).
1129	 *
1130	 * The ultimate question is whether preserving the ring outweighs
1131	 * the cost of the query.
1132	 */
1133	mode = KGEM_RENDER;
1134	if ((busy.busy & 0xffff) == I915_EXEC_BLT)
1135		mode = KGEM_BLT;
1136	kgem_bo_mark_busy(&sna->kgem,
1137			  busy.handle == src->handle ? src : dst,
1138			  mode);
1139	_kgem_set_mode(&sna->kgem, mode);
1140}
1141
1142static bool is_front(int attachment)
1143{
1144	return attachment == DRI2BufferFrontLeft;
1145}
1146
1147#define DRI2_SYNC 0x1
1148#define DRI2_DAMAGE 0x2
1149#define DRI2_BO 0x4
1150static struct kgem_bo *
1151__sna_dri2_copy_region(struct sna *sna, DrawablePtr draw, RegionPtr region,
1152		      DRI2BufferPtr src, DRI2BufferPtr dst,
1153		      unsigned flags)
1154{
1155	PixmapPtr pixmap = get_drawable_pixmap(draw);
1156	DrawableRec scratch, *src_draw = &pixmap->drawable, *dst_draw = &pixmap->drawable;
1157	struct sna_dri2_private *src_priv = get_private(src);
1158	struct sna_dri2_private *dst_priv = get_private(dst);
1159	pixman_region16_t clip;
1160	struct kgem_bo *bo = NULL;
1161	struct kgem_bo *src_bo;
1162	struct kgem_bo *dst_bo;
1163	const BoxRec *boxes;
1164	int16_t dx, dy, sx, sy;
1165	unsigned hint;
1166	int n;
1167
1168	/* To hide a stale DRI2Buffer, one may choose to substitute
1169	 * pixmap->gpu_bo instead of dst/src->bo, however you then run
1170	 * the risk of copying around invalid data. So either you may not
1171	 * see the results of the copy, or you may see the wrong pixels.
1172	 * Either way you eventually lose.
1173	 *
1174	 * We also have to be careful in case that the stale buffers are
1175	 * now attached to invalid (non-DRI) pixmaps.
1176	 */
1177
1178	assert(is_front(dst->attachment) || is_front(src->attachment));
1179	assert(dst->attachment != src->attachment);
1180
1181	clip.extents.x1 = draw->x;
1182	clip.extents.y1 = draw->y;
1183	clip.extents.x2 = draw->x + draw->width;
1184	clip.extents.y2 = draw->y + draw->height;
1185	clip.data = NULL;
1186
1187	if (region) {
1188		pixman_region_translate(region, draw->x, draw->y);
1189		pixman_region_intersect(&clip, &clip, region);
1190		region = &clip;
1191	}
1192
1193	if (clip.extents.x1 >= clip.extents.x2 ||
1194	    clip.extents.y1 >= clip.extents.y2) {
1195		DBG(("%s: all clipped\n", __FUNCTION__));
1196		return NULL;
1197	}
1198
1199	sx = sy = dx = dy = 0;
1200	if (is_front(dst->attachment)) {
1201		sx = -draw->x;
1202		sy = -draw->y;
1203	} else {
1204		dx = -draw->x;
1205		dy = -draw->y;
1206	}
1207	if (draw->type == DRAWABLE_WINDOW) {
1208		WindowPtr win = (WindowPtr)draw;
1209		int16_t tx, ty;
1210
1211		if (is_clipped(&win->clipList, draw)) {
1212			DBG(("%s: draw=(%d, %d), delta=(%d, %d), draw=(%d, %d),(%d, %d), clip.extents=(%d, %d), (%d, %d)\n",
1213			     __FUNCTION__, draw->x, draw->y,
1214			     get_drawable_dx(draw), get_drawable_dy(draw),
1215			     clip.extents.x1, clip.extents.y1,
1216			     clip.extents.x2, clip.extents.y2,
1217			     win->clipList.extents.x1, win->clipList.extents.y1,
1218			     win->clipList.extents.x2, win->clipList.extents.y2));
1219
1220			assert(region == NULL || region == &clip);
1221			pixman_region_intersect(&clip, &win->clipList, &clip);
1222			if (!pixman_region_not_empty(&clip)) {
1223				DBG(("%s: all clipped\n", __FUNCTION__));
1224				return NULL;
1225			}
1226
1227			region = &clip;
1228		}
1229
1230		if (get_drawable_deltas(draw, pixmap, &tx, &ty)) {
1231			if (is_front(dst->attachment)) {
1232				pixman_region_translate(region ?: &clip, tx, ty);
1233				sx -= tx;
1234				sy -= ty;
1235			} else {
1236				sx += tx;
1237				sy += ty;
1238			}
1239		}
1240	} else
1241		flags &= ~DRI2_SYNC;
1242
1243	scratch.pScreen = draw->pScreen;
1244	scratch.x = scratch.y = 0;
1245	scratch.width = scratch.height = 0;
1246	scratch.depth = draw->depth;
1247	scratch.bitsPerPixel = draw->bitsPerPixel;
1248
1249	src_bo = src_priv->bo;
1250	assert(src_bo->refcnt);
1251	kgem_bo_unclean(&sna->kgem, src_bo);
1252	if (is_front(src->attachment)) {
1253		struct sna_pixmap *priv;
1254
1255		priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ);
1256		if (priv)
1257			src_bo = priv->gpu_bo;
1258		DBG(("%s: updated FrontLeft src_bo from handle=%d to handle=%d\n",
1259		     __FUNCTION__, src_priv->bo->handle, src_bo->handle));
1260		assert(src_bo->refcnt);
1261	} else {
1262		RegionRec source;
1263
1264		scratch.width = src_priv->size & 0xffff;
1265		scratch.height = src_priv->size >> 16;
1266		src_draw = &scratch;
1267
1268		DBG(("%s: source size %dx%d, region size %dx%d, src offset %dx%d\n",
1269		     __FUNCTION__,
1270		     scratch.width, scratch.height,
1271		     clip.extents.x2 - clip.extents.x1,
1272		     clip.extents.y2 - clip.extents.y1,
1273		     -sx, -sy));
1274
1275		source.extents.x1 = -sx;
1276		source.extents.y1 = -sy;
1277		source.extents.x2 = source.extents.x1 + scratch.width;
1278		source.extents.y2 = source.extents.y1 + scratch.height;
1279		source.data = NULL;
1280
1281		assert(region == NULL || region == &clip);
1282		pixman_region_intersect(&clip, &clip, &source);
1283
1284		if (!pixman_region_not_empty(&clip)) {
1285			DBG(("%s: region doesn't overlap pixmap\n", __FUNCTION__));
1286			return NULL;
1287		}
1288	}
1289
1290	dst_bo = dst_priv->bo;
1291	assert(dst_bo->refcnt);
1292	if (is_front(dst->attachment)) {
1293		struct sna_pixmap *priv;
1294		struct list shadow;
1295
1296		/* Preserve the CRTC shadow overrides */
1297		sna_shadow_steal_crtcs(sna, &shadow);
1298
1299		hint = MOVE_WRITE | __MOVE_FORCE;
1300		if (clip.data)
1301			hint |= MOVE_READ;
1302
1303		assert(region == NULL || region == &clip);
1304		priv = sna_pixmap_move_area_to_gpu(pixmap, &clip.extents, hint);
1305		if (priv) {
1306			damage(pixmap, priv, region ?: &clip);
1307			dst_bo = priv->gpu_bo;
1308		}
1309		DBG(("%s: updated FrontLeft dst_bo from handle=%d to handle=%d\n",
1310		     __FUNCTION__, dst_priv->bo->handle, dst_bo->handle));
1311		assert(dst_bo->refcnt);
1312
1313		sna_shadow_unsteal_crtcs(sna, &shadow);
1314	} else {
1315		RegionRec target;
1316
1317		scratch.width = dst_priv->size & 0xffff;
1318		scratch.height = dst_priv->size >> 16;
1319		dst_draw = &scratch;
1320
1321		DBG(("%s: target size %dx%d, region size %dx%d\n",
1322		     __FUNCTION__,
1323		     scratch.width, scratch.height,
1324		     clip.extents.x2 - clip.extents.x1,
1325		     clip.extents.y2 - clip.extents.y1));
1326
1327		target.extents.x1 = -dx;
1328		target.extents.y1 = -dy;
1329		target.extents.x2 = target.extents.x1 + scratch.width;
1330		target.extents.y2 = target.extents.y1 + scratch.height;
1331		target.data = NULL;
1332
1333		assert(region == NULL || region == &clip);
1334		pixman_region_intersect(&clip, &clip, &target);
1335
1336		flags &= ~DRI2_SYNC;
1337	}
1338
1339	if (!wedged(sna)) {
1340		xf86CrtcPtr crtc;
1341
1342		crtc = NULL;
1343		if (flags & DRI2_SYNC && sna_pixmap_is_scanout(sna, pixmap))
1344			crtc = sna_covering_crtc(sna, &clip.extents, NULL);
1345		sna_dri2_select_mode(sna, dst_bo, src_bo, crtc != NULL);
1346
1347		if (crtc == NULL ||
1348		    !sna_wait_for_scanline(sna, pixmap, crtc, &clip.extents))
1349			flags &= ~DRI2_SYNC;
1350	}
1351
1352	if (region) {
1353		boxes = region_rects(region);
1354		n = region_num_rects(region);
1355		assert(n);
1356	} else {
1357		region = &clip;
1358		boxes = &clip.extents;
1359		n = 1;
1360	}
1361	if (APPLY_DAMAGE || flags & DRI2_DAMAGE) {
1362		DBG(("%s: marking region as damaged\n", __FUNCTION__));
1363		sna->ignore_copy_area = sna->flags & SNA_TEAR_FREE;
1364		DamageRegionAppend(&pixmap->drawable, region);
1365	}
1366
1367	DBG(("%s: copying [(%d, %d), (%d, %d)]x%d src=(%d, %d), dst=(%d, %d)\n",
1368	     __FUNCTION__,
1369	     boxes[0].x1, boxes[0].y1,
1370	     boxes[0].x2, boxes[0].y2,
1371	     n, sx, sy, dx, dy));
1372
1373	hint = COPY_LAST | COPY_DRI;
1374	if (flags & DRI2_SYNC)
1375		hint |= COPY_SYNC;
1376	if (!sna->render.copy_boxes(sna, GXcopy,
1377				    src_draw, src_bo, sx, sy,
1378				    dst_draw, dst_bo, dx, dy,
1379				    boxes, n, hint))
1380		memcpy_copy_boxes(sna, GXcopy,
1381				  src_draw, src_bo, sx, sy,
1382				  dst_draw, dst_bo, dx, dy,
1383				  boxes, n, hint);
1384
1385	sna->needs_dri_flush = true;
1386	if (flags & (DRI2_SYNC | DRI2_BO)) { /* STAT! */
1387		struct kgem_request *rq = RQ(dst_bo->rq);
1388		if (rq && rq != (void *)&sna->kgem) {
1389			if (rq->bo == NULL)
1390				kgem_submit(&sna->kgem);
1391			if (rq->bo) { /* Becareful in case the gpu is wedged */
1392				bo = ref(rq->bo);
1393				DBG(("%s: recording sync fence handle=%d\n",
1394				     __FUNCTION__, bo->handle));
1395			}
1396		}
1397	}
1398
1399	if (APPLY_DAMAGE || flags & DRI2_DAMAGE) {
1400		sna->ignore_copy_area = false;
1401		DamageRegionProcessPending(&pixmap->drawable);
1402	}
1403
1404	if (clip.data)
1405		pixman_region_fini(&clip);
1406
1407	return bo;
1408}
1409
1410static void
1411sna_dri2_copy_region(DrawablePtr draw,
1412		     RegionPtr region,
1413		     DRI2BufferPtr dst,
1414		     DRI2BufferPtr src)
1415{
1416	PixmapPtr pixmap = get_drawable_pixmap(draw);
1417	struct sna *sna = to_sna_from_pixmap(pixmap);
1418
1419	DBG(("%s: pixmap=%ld, src=%u (refs=%d/%d, flush=%d, attach=%d) , dst=%u (refs=%d/%d, flush=%d, attach=%d)\n",
1420	     __FUNCTION__,
1421	     pixmap->drawable.serialNumber,
1422	     get_private(src)->bo->handle,
1423	     get_private(src)->refcnt,
1424	     get_private(src)->bo->refcnt,
1425	     get_private(src)->bo->flush,
1426	     src->attachment,
1427	     get_private(dst)->bo->handle,
1428	     get_private(dst)->refcnt,
1429	     get_private(dst)->bo->refcnt,
1430	     get_private(dst)->bo->flush,
1431	     dst->attachment));
1432
1433	assert(src != dst);
1434
1435	assert(get_private(src)->refcnt);
1436	assert(get_private(dst)->refcnt);
1437
1438	assert(get_private(src)->bo != get_private(dst)->bo);
1439
1440	assert(get_private(src)->bo->refcnt);
1441	assert(get_private(dst)->bo->refcnt);
1442
1443	DBG(("%s: region (%d, %d), (%d, %d) x %d\n",
1444	     __FUNCTION__,
1445	     region->extents.x1, region->extents.y1,
1446	     region->extents.x2, region->extents.y2,
1447	     region_num_rects(region)));
1448
1449	__sna_dri2_copy_region(sna, draw, region, src, dst, DRI2_DAMAGE);
1450}
1451
1452inline static uint32_t pipe_select(int pipe)
1453{
1454	/* The third pipe was introduced with IvyBridge long after
1455	 * multiple pipe support was added to the kernel, hence
1456	 * we can safely ignore the capability check - if we have more
1457	 * than two pipes, we can assume that they are fully supported.
1458	 */
1459	assert(pipe < _DRM_VBLANK_HIGH_CRTC_MASK);
1460	if (pipe > 1)
1461		return pipe << DRM_VBLANK_HIGH_CRTC_SHIFT;
1462	else if (pipe > 0)
1463		return DRM_VBLANK_SECONDARY;
1464	else
1465		return 0;
1466}
1467
1468static inline bool sna_next_vblank(struct sna_dri2_event *info)
1469{
1470	union drm_wait_vblank vbl;
1471
1472	DBG(("%s(pipe=%d, waiting until next vblank)\n",
1473	     __FUNCTION__, info->pipe));
1474	assert(info->pipe != -1);
1475
1476	VG_CLEAR(vbl);
1477	vbl.request.type =
1478		DRM_VBLANK_RELATIVE |
1479		DRM_VBLANK_EVENT |
1480		pipe_select(info->pipe);
1481	vbl.request.sequence = 1;
1482	vbl.request.signal = (uintptr_t)info;
1483
1484	assert(!info->queued);
1485	if (drmIoctl(info->sna->kgem.fd, DRM_IOCTL_WAIT_VBLANK, &vbl))
1486		return false;
1487
1488	info->queued = true;
1489	return true;
1490}
1491
1492static inline bool sna_wait_vblank(struct sna_dri2_event *info,
1493				   unsigned seq)
1494{
1495	union drm_wait_vblank vbl;
1496
1497	DBG(("%s(pipe=%d, waiting until vblank %u)\n",
1498	     __FUNCTION__, info->pipe, seq));
1499	assert(info->pipe != -1);
1500
1501	VG_CLEAR(vbl);
1502	vbl.request.type =
1503		DRM_VBLANK_ABSOLUTE |
1504		DRM_VBLANK_EVENT |
1505		pipe_select(info->pipe);
1506	vbl.request.sequence = seq;
1507	vbl.request.signal = (uintptr_t)info;
1508
1509	assert(!info->queued);
1510	if (drmIoctl(info->sna->kgem.fd, DRM_IOCTL_WAIT_VBLANK, &vbl))
1511		return false;
1512
1513	info->queued = true;
1514	return true;
1515}
1516
1517#if DRI2INFOREC_VERSION >= 4
1518
1519static void dri2_window_attach(WindowPtr win, struct dri2_window *priv)
1520{
1521	assert(win->drawable.type == DRAWABLE_WINDOW);
1522	assert(dri2_window(win) == NULL);
1523	((void **)__get_private(win, sna_window_key))[1] = priv;
1524	assert(dri2_window(win) == priv);
1525}
1526
1527static uint64_t
1528draw_current_msc(DrawablePtr draw, xf86CrtcPtr crtc, uint64_t msc)
1529{
1530	struct dri2_window *priv;
1531
1532	assert(draw);
1533	if (draw->type != DRAWABLE_WINDOW)
1534		return msc;
1535
1536	priv = dri2_window((WindowPtr)draw);
1537	if (priv == NULL) {
1538		priv = malloc(sizeof(*priv));
1539		if (priv != NULL) {
1540			priv->front = NULL;
1541			priv->crtc = crtc;
1542			priv->msc_delta = 0;
1543			priv->chain = NULL;
1544			priv->cache_size = 0;
1545			list_init(&priv->cache);
1546			dri2_window_attach((WindowPtr)draw, priv);
1547		}
1548	} else {
1549		if (priv->crtc != crtc) {
1550			const struct ust_msc *last = sna_crtc_last_swap(priv->crtc);
1551			const struct ust_msc *this = sna_crtc_last_swap(crtc);
1552			DBG(("%s: Window transferring from pipe=%d [msc=%llu] to pipe=%d [msc=%llu], delta now %lld\n",
1553			     __FUNCTION__,
1554			     sna_crtc_pipe(priv->crtc), (long long)last->msc,
1555			     sna_crtc_pipe(crtc), (long long)this->msc,
1556			     (long long)(priv->msc_delta + this->msc - last->msc)));
1557			priv->msc_delta += this->msc - last->msc;
1558			priv->crtc = crtc;
1559		}
1560		msc -= priv->msc_delta;
1561	}
1562	return  msc;
1563}
1564
1565static uint32_t
1566draw_target_seq(DrawablePtr draw, uint64_t msc)
1567{
1568	struct dri2_window *priv = dri2_window((WindowPtr)draw);
1569	if (priv == NULL)
1570		return msc;
1571	DBG(("%s: converting target_msc=%llu to seq %u\n",
1572	     __FUNCTION__, (long long)msc, (unsigned)(msc + priv->msc_delta)));
1573	return msc + priv->msc_delta;
1574}
1575
1576static xf86CrtcPtr
1577sna_dri2_get_crtc(DrawablePtr draw)
1578{
1579	if (draw->type == DRAWABLE_PIXMAP)
1580		return NULL;
1581
1582	/* Make sure the CRTC is valid and this is the real front buffer */
1583	return sna_covering_crtc(to_sna_from_drawable(draw),
1584				 &((WindowPtr)draw)->clipList.extents,
1585				 NULL);
1586}
1587
1588static void frame_swap_complete(struct sna_dri2_event *frame, int type)
1589{
1590	const struct ust_msc *swap;
1591
1592	assert(frame->signal);
1593	frame->signal = false;
1594
1595	if (frame->client == NULL) {
1596		DBG(("%s: client already gone\n", __FUNCTION__));
1597		return;
1598	}
1599
1600	assert(frame->draw);
1601
1602	swap = sna_crtc_last_swap(frame->crtc);
1603	DBG(("%s(type=%d): draw=%ld, pipe=%d, frame=%lld [msc=%lld], tv=%d.%06d\n",
1604	     __FUNCTION__, type, (long)frame->draw->id, frame->pipe,
1605	     (long long)swap->msc,
1606	     (long long)draw_current_msc(frame->draw, frame->crtc, swap->msc),
1607	     swap->tv_sec, swap->tv_usec));
1608
1609	DRI2SwapComplete(frame->client, frame->draw,
1610			 draw_current_msc(frame->draw, frame->crtc, swap->msc),
1611			 swap->tv_sec, swap->tv_usec,
1612			 type, frame->event_complete, frame->event_data);
1613}
1614
1615static void fake_swap_complete(struct sna *sna, ClientPtr client,
1616			       DrawablePtr draw, xf86CrtcPtr crtc,
1617			       int type, DRI2SwapEventPtr func, void *data)
1618{
1619	const struct ust_msc *swap;
1620
1621	assert(draw);
1622
1623	if (crtc == NULL)
1624		crtc = sna_primary_crtc(sna);
1625
1626	swap = sna_crtc_last_swap(crtc);
1627	DBG(("%s(type=%d): draw=%ld, pipe=%d, frame=%lld [msc %lld], tv=%d.%06d\n",
1628	     __FUNCTION__, type, (long)draw->id, crtc ? sna_crtc_pipe(crtc) : -1,
1629	     (long long)swap->msc,
1630	     (long long)draw_current_msc(draw, crtc, swap->msc),
1631	     swap->tv_sec, swap->tv_usec));
1632
1633	DRI2SwapComplete(client, draw,
1634			 draw_current_msc(draw, crtc, swap->msc),
1635			 swap->tv_sec, swap->tv_usec,
1636			 type, func, data);
1637}
1638
1639static void
1640sna_dri2_remove_event(struct sna_dri2_event *info)
1641{
1642	WindowPtr win = (WindowPtr)info->draw;
1643	struct dri2_window *priv;
1644
1645	assert(win->drawable.type == DRAWABLE_WINDOW);
1646	DBG(("%s: remove[%p] from window %ld, active? %d\n",
1647	     __FUNCTION__, info, (long)win->drawable.id, info->draw != NULL));
1648	assert(!info->signal);
1649
1650	priv = dri2_window(win);
1651	assert(priv);
1652	assert(priv->chain != NULL);
1653	assert(info->chained);
1654	info->chained = false;
1655
1656	if (priv->chain != info) {
1657		struct sna_dri2_event *chain = priv->chain;
1658		while (chain->chain != info) {
1659			assert(chain->chained);
1660			chain = chain->chain;
1661		}
1662		assert(chain != info);
1663		assert(info->chain != chain);
1664		chain->chain = info->chain;
1665		return;
1666	}
1667
1668	priv->chain = info->chain;
1669	if (priv->chain == NULL) {
1670		struct dri_bo *c, *tmp;
1671
1672		c = list_entry(priv->cache.next->next, struct dri_bo, link);
1673		list_for_each_entry_safe_from(c, tmp, &priv->cache, link) {
1674			list_del(&c->link);
1675
1676			DBG(("%s: releasing cached handle=%d\n", __FUNCTION__, c->bo ? c->bo->handle : 0));
1677			assert(c->bo);
1678			kgem_bo_destroy(&info->sna->kgem, c->bo);
1679			free(c);
1680		}
1681	}
1682}
1683
1684static void
1685sna_dri2_event_free(struct sna_dri2_event *info)
1686{
1687	DBG(("%s(draw?=%d)\n", __FUNCTION__, info->draw != NULL));
1688	assert(!info->queued);
1689	assert(!info->signal);
1690	assert(info->pending.bo == NULL);
1691
1692	if (info->sna->dri2.flip_pending == info)
1693		info->sna->dri2.flip_pending = NULL;
1694	assert(info->sna->dri2.flip_pending != info);
1695	if (info->chained)
1696		sna_dri2_remove_event(info);
1697
1698	assert((info->front == NULL && info->back == NULL) || info->front != info->back);
1699	_sna_dri2_destroy_buffer(info->sna, info->draw, info->front);
1700	_sna_dri2_destroy_buffer(info->sna, info->draw, info->back);
1701
1702	if (info->bo) {
1703		DBG(("%s: releasing batch handle=%d\n", __FUNCTION__, info->bo->handle));
1704		kgem_bo_destroy(&info->sna->kgem, info->bo);
1705	}
1706
1707	_list_del(&info->link);
1708	free(info);
1709}
1710
1711static void
1712sna_dri2_client_gone(CallbackListPtr *list, void *closure, void *data)
1713{
1714	NewClientInfoRec *clientinfo = data;
1715	ClientPtr client = clientinfo->client;
1716	struct sna_client *priv = sna_client(client);
1717	struct sna *sna = closure;
1718
1719	if (priv->events.next == NULL)
1720		return;
1721
1722	if (client->clientState != ClientStateGone)
1723		return;
1724
1725	DBG(("%s(active?=%d)\n", __FUNCTION__,
1726	     !list_is_empty(&priv->events)));
1727
1728	while (!list_is_empty(&priv->events)) {
1729		struct sna_dri2_event *event;
1730
1731		event = list_first_entry(&priv->events, struct sna_dri2_event, link);
1732		assert(event->client == client);
1733		list_del(&event->link);
1734		event->signal = false;
1735
1736		if (event->pending.bo) {
1737			assert(event->pending.bo->active_scanout > 0);
1738			event->pending.bo->active_scanout--;
1739
1740			kgem_bo_destroy(&sna->kgem, event->pending.bo);
1741			event->pending.bo = NULL;
1742		}
1743
1744		if (event->chained)
1745			sna_dri2_remove_event(event);
1746
1747		event->client = NULL;
1748		event->draw = NULL;
1749		event->keepalive = 1;
1750		assert(!event->signal);
1751
1752		if (!event->queued)
1753			sna_dri2_event_free(event);
1754	}
1755
1756	if (--sna->dri2.client_count == 0)
1757		DeleteCallback(&ClientStateCallback, sna_dri2_client_gone, sna);
1758}
1759
1760static bool add_event_to_client(struct sna_dri2_event *info, struct sna *sna, ClientPtr client)
1761{
1762	struct sna_client *priv = sna_client(client);
1763
1764	if (priv->events.next == NULL) {
1765		if (sna->dri2.client_count++ == 0 &&
1766		    !AddCallback(&ClientStateCallback, sna_dri2_client_gone, sna))
1767			return false;
1768
1769		list_init(&priv->events);
1770	}
1771
1772	list_add(&info->link, &priv->events);
1773	info->client = client;
1774	return true;
1775}
1776
1777static struct sna_dri2_event *
1778sna_dri2_add_event(struct sna *sna,
1779		   DrawablePtr draw,
1780		   ClientPtr client,
1781		   xf86CrtcPtr crtc)
1782{
1783	struct dri2_window *priv;
1784	struct sna_dri2_event *info, *chain;
1785
1786	assert(draw != NULL);
1787	assert(draw->type == DRAWABLE_WINDOW);
1788	DBG(("%s: adding event to window %ld)\n",
1789	     __FUNCTION__, (long)draw->id));
1790
1791	priv = dri2_window((WindowPtr)draw);
1792	if (priv == NULL)
1793		return NULL;
1794
1795	info = calloc(1, sizeof(struct sna_dri2_event));
1796	if (info == NULL)
1797		return NULL;
1798
1799	info->sna = sna;
1800	info->draw = draw;
1801	info->crtc = crtc;
1802	info->pipe = sna_crtc_pipe(crtc);
1803	info->keepalive = 1;
1804
1805	if (!add_event_to_client(info, sna, client)) {
1806		free(info);
1807		return NULL;
1808	}
1809
1810	assert(priv->chain != info);
1811	info->chained = true;
1812
1813	if (priv->chain == NULL) {
1814		priv->chain = info;
1815		return info;
1816	}
1817
1818	chain = priv->chain;
1819	while (chain->chain != NULL)
1820		chain = chain->chain;
1821
1822	assert(chain != info);
1823	chain->chain = info;
1824	return info;
1825}
1826
1827static void decouple_window(WindowPtr win,
1828			    struct dri2_window *priv,
1829			    struct sna *sna,
1830			    bool signal)
1831{
1832	if (priv->front) {
1833		DBG(("%s: decouple private front\n", __FUNCTION__));
1834		assert(priv->crtc);
1835		sna_shadow_unset_crtc(sna, priv->crtc);
1836
1837		_sna_dri2_destroy_buffer(sna, NULL, priv->front);
1838		priv->front = NULL;
1839	}
1840
1841	if (priv->chain) {
1842		struct sna_dri2_event *info, *chain;
1843
1844		DBG(("%s: freeing chain\n", __FUNCTION__));
1845
1846		chain = priv->chain;
1847		while ((info = chain)) {
1848			DBG(("%s: freeing event, pending signal? %d, pending swap? handle=%d\n",
1849			     __FUNCTION__, info->signal,
1850			     info->pending.bo ? info->pending.bo->handle : 0));
1851			assert(info->draw == &win->drawable);
1852
1853			if (info->pending.bo) {
1854				if (signal) {
1855					bool was_signalling = info->signal;
1856					info->signal = true;
1857					frame_swap_complete(info, DRI2_EXCHANGE_COMPLETE);
1858					info->signal = was_signalling;
1859				}
1860				assert(info->pending.bo->active_scanout > 0);
1861				info->pending.bo->active_scanout--;
1862
1863				kgem_bo_destroy(&sna->kgem, info->pending.bo);
1864				info->pending.bo = NULL;
1865			}
1866
1867			if (info->signal && signal)
1868				frame_swap_complete(info, DRI2_EXCHANGE_COMPLETE);
1869			info->signal = false;
1870			info->draw = NULL;
1871			info->keepalive = 1;
1872			assert(!info->signal);
1873			list_del(&info->link);
1874
1875			chain = info->chain;
1876			info->chain = NULL;
1877			info->chained = false;
1878
1879			if (!info->queued)
1880				sna_dri2_event_free(info);
1881		}
1882
1883		priv->chain = NULL;
1884	}
1885}
1886
1887void sna_dri2_decouple_window(WindowPtr win)
1888{
1889	struct dri2_window *priv;
1890
1891	priv = dri2_window(win);
1892	if (priv == NULL)
1893		return;
1894
1895	DBG(("%s: window=%ld\n", __FUNCTION__, win->drawable.id));
1896	decouple_window(win, priv, to_sna_from_drawable(&win->drawable), true);
1897}
1898
1899void sna_dri2_destroy_window(WindowPtr win)
1900{
1901	struct dri2_window *priv;
1902	struct sna *sna;
1903
1904	priv = dri2_window(win);
1905	if (priv == NULL)
1906		return;
1907
1908	DBG(("%s: window=%ld\n", __FUNCTION__, win->drawable.id));
1909	sna = to_sna_from_drawable(&win->drawable);
1910	decouple_window(win, priv, sna, false);
1911
1912	while (!list_is_empty(&priv->cache)) {
1913		struct dri_bo *c;
1914
1915		c = list_first_entry(&priv->cache, struct dri_bo, link);
1916		list_del(&c->link);
1917
1918		DBG(("%s: releasing cached handle=%d\n", __FUNCTION__, c->bo ? c->bo->handle : 0));
1919		assert(c->bo);
1920		kgem_bo_destroy(&sna->kgem, c->bo);
1921		free(c);
1922	}
1923
1924	free(priv);
1925}
1926
1927static void
1928sna_dri2_flip_handler(struct drm_event_vblank *event, void *data)
1929{
1930	DBG(("%s: sequence=%d\n", __FUNCTION__, event->sequence));
1931	sna_dri2_flip_event(data);
1932}
1933
1934static bool
1935sna_dri2_flip(struct sna_dri2_event *info)
1936{
1937	struct kgem_bo *bo = get_private(info->back)->bo;
1938	struct kgem_bo *tmp_bo;
1939	uint32_t tmp_name, tmp_flags;
1940	int tmp_pitch;
1941
1942	DBG(("%s(type=%d)\n", __FUNCTION__, info->type));
1943
1944	assert(sna_pixmap_get_buffer(info->sna->front) == info->front);
1945	assert(get_drawable_pixmap(info->draw)->drawable.height * bo->pitch <= kgem_bo_size(bo));
1946	assert(get_private(info->front)->size == get_private(info->back)->size);
1947	assert(bo->refcnt);
1948
1949	if (info->sna->mode.flip_active) {
1950		DBG(("%s: %d flips still active, aborting\n",
1951		     __FUNCTION__, info->sna->mode.flip_active));
1952		return false;
1953	}
1954
1955	assert(!info->queued);
1956	if (!sna_page_flip(info->sna, bo, sna_dri2_flip_handler,
1957			   info->type == FLIP_ASYNC ? NULL : info))
1958		return false;
1959
1960	DBG(("%s: queued flip=%p\n", __FUNCTION__, info->type == FLIP_ASYNC ? NULL : info));
1961	assert(info->signal || info->type != FLIP_THROTTLE);
1962
1963	assert(info->sna->dri2.flip_pending == NULL ||
1964	       info->sna->dri2.flip_pending == info);
1965	if (info->type != FLIP_ASYNC)
1966		info->sna->dri2.flip_pending = info;
1967
1968	DBG(("%s: marked handle=%d as scanout, swap front (handle=%d, name=%d) and back (handle=%d, name=%d)\n",
1969	     __FUNCTION__, bo->handle,
1970	     get_private(info->front)->bo->handle, info->front->name,
1971	     get_private(info->back)->bo->handle, info->back->name));
1972
1973	tmp_bo = get_private(info->front)->bo;
1974	tmp_name = info->front->name;
1975	tmp_pitch = info->front->pitch;
1976	tmp_flags = info->front->flags;
1977
1978	assert(tmp_bo->active_scanout > 0);
1979	tmp_bo->active_scanout--;
1980
1981	set_bo(info->sna->front, bo);
1982
1983	info->front->flags = info->back->flags;
1984	info->front->name = info->back->name;
1985	info->front->pitch = info->back->pitch;
1986	get_private(info->front)->bo = bo;
1987	bo->active_scanout++;
1988	assert(bo->active_scanout <= bo->refcnt);
1989
1990	info->back->flags = tmp_flags;
1991	info->back->name = tmp_name;
1992	info->back->pitch = tmp_pitch;
1993	get_private(info->back)->bo = tmp_bo;
1994	mark_stale(info->back);
1995
1996	assert(get_private(info->front)->bo->refcnt);
1997	assert(get_private(info->back)->bo->refcnt);
1998	assert(get_private(info->front)->bo != get_private(info->back)->bo);
1999
2000	info->keepalive = KEEPALIVE;
2001	info->queued = true;
2002	return true;
2003}
2004
2005static bool
2006can_flip(struct sna * sna,
2007	 DrawablePtr draw,
2008	 DRI2BufferPtr front,
2009	 DRI2BufferPtr back,
2010	 xf86CrtcPtr crtc)
2011{
2012	WindowPtr win = (WindowPtr)draw;
2013	PixmapPtr pixmap;
2014
2015	assert((sna->flags & SNA_NO_WAIT) == 0);
2016
2017	if (!DBG_CAN_FLIP)
2018		return false;
2019
2020	if (draw->type == DRAWABLE_PIXMAP)
2021		return false;
2022
2023	if (!sna->mode.front_active) {
2024		DBG(("%s: no, active CRTC\n", __FUNCTION__));
2025		return false;
2026	}
2027
2028	assert(sna->scrn->vtSema);
2029	assert(!sna->mode.hidden);
2030
2031	if ((sna->flags & (SNA_HAS_FLIP | SNA_HAS_ASYNC_FLIP)) == 0) {
2032		DBG(("%s: no, pageflips disabled\n", __FUNCTION__));
2033		return false;
2034	}
2035
2036	if (front->cpp != back->cpp) {
2037		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
2038		     __FUNCTION__, front->cpp, back->cpp));
2039		return false;
2040	}
2041
2042	if (sna->mode.shadow_active) {
2043		DBG(("%s: no, shadow enabled\n", __FUNCTION__));
2044		return false;
2045	}
2046
2047	if (!sna_crtc_is_on(crtc)) {
2048		DBG(("%s: ref-pipe=%d is disabled\n", __FUNCTION__, sna_crtc_pipe(crtc)));
2049		return false;
2050	}
2051
2052	pixmap = get_window_pixmap(win);
2053	if (pixmap != sna->front) {
2054		DBG(("%s: no, window (pixmap=%ld) is not attached to the front buffer (pixmap=%ld)\n",
2055		     __FUNCTION__, pixmap->drawable.serialNumber, sna->front->drawable.serialNumber));
2056		return false;
2057	}
2058
2059	if (sna_pixmap_get_buffer(pixmap) != front) {
2060		DBG(("%s: no, DRI2 drawable is no longer attached (old name=%d, new name=%d) to pixmap=%ld\n",
2061		     __FUNCTION__, front->name,
2062		     sna_pixmap_get_buffer(pixmap) ? sna_pixmap_get_buffer(pixmap)->name : 0,
2063		     pixmap->drawable.serialNumber));
2064		return false;
2065	}
2066
2067	assert(get_private(front)->pixmap == sna->front);
2068	assert(sna_pixmap(sna->front)->gpu_bo == get_private(front)->bo);
2069
2070	if (!get_private(back)->bo->scanout) {
2071		DBG(("%s: no, DRI2 drawable was too small at time of creation)\n",
2072		     __FUNCTION__));
2073		return false;
2074	}
2075
2076	if (get_private(back)->size != get_private(front)->size) {
2077		DBG(("%s: no, DRI2 drawable does not fit into scanout\n",
2078		     __FUNCTION__));
2079		return false;
2080	}
2081
2082	DBG(("%s: window size: %dx%d, clip=(%d, %d), (%d, %d) x %d\n",
2083	     __FUNCTION__,
2084	     win->drawable.width, win->drawable.height,
2085	     win->clipList.extents.x1, win->clipList.extents.y1,
2086	     win->clipList.extents.x2, win->clipList.extents.y2,
2087	     region_num_rects(&win->clipList)));
2088	if (!RegionEqual(&win->clipList, &draw->pScreen->root->winSize)) {
2089		DBG(("%s: no, window is clipped: clip region=(%d, %d), (%d, %d), root size=(%d, %d), (%d, %d)\n",
2090		     __FUNCTION__,
2091		     win->clipList.extents.x1,
2092		     win->clipList.extents.y1,
2093		     win->clipList.extents.x2,
2094		     win->clipList.extents.y2,
2095		     draw->pScreen->root->winSize.extents.x1,
2096		     draw->pScreen->root->winSize.extents.y1,
2097		     draw->pScreen->root->winSize.extents.x2,
2098		     draw->pScreen->root->winSize.extents.y2));
2099		return false;
2100	}
2101
2102	if (draw->x != 0 || draw->y != 0 ||
2103#ifdef COMPOSITE
2104	    draw->x != pixmap->screen_x ||
2105	    draw->y != pixmap->screen_y ||
2106#endif
2107	    draw->width != pixmap->drawable.width ||
2108	    draw->height != pixmap->drawable.height) {
2109		DBG(("%s: no, window is not full size (%dx%d)!=(%dx%d)\n",
2110		     __FUNCTION__,
2111		     draw->width, draw->height,
2112		     pixmap->drawable.width,
2113		     pixmap->drawable.height));
2114		return false;
2115	}
2116
2117	/* prevent an implicit tiling mode change */
2118	if (get_private(back)->bo->tiling > I915_TILING_X) {
2119		DBG(("%s -- no, tiling mismatch: front %d, back=%d, want-tiled?=%d\n",
2120		     __FUNCTION__,
2121		     get_private(front)->bo->tiling,
2122		     get_private(back)->bo->tiling,
2123		     !!(sna->flags & SNA_LINEAR_FB)));
2124		return false;
2125	}
2126
2127	if (get_private(front)->bo->pitch != get_private(back)->bo->pitch) {
2128		DBG(("%s -- no, pitch mismatch: front %d, back=%d\n",
2129		     __FUNCTION__,
2130		     get_private(front)->bo->pitch,
2131		     get_private(back)->bo->pitch));
2132		return false;
2133	}
2134
2135	if (sna_pixmap(pixmap)->pinned & ~(PIN_DRI2 | PIN_SCANOUT)) {
2136		DBG(("%s -- no, pinned: front %x\n",
2137		     __FUNCTION__, sna_pixmap(pixmap)->pinned));
2138		return false;
2139	}
2140
2141	DBG(("%s: yes, pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
2142	return true;
2143}
2144
2145static bool
2146can_xchg(struct sna *sna,
2147	 DrawablePtr draw,
2148	 DRI2BufferPtr front,
2149	 DRI2BufferPtr back)
2150{
2151	WindowPtr win = (WindowPtr)draw;
2152	PixmapPtr pixmap;
2153
2154	if (!DBG_CAN_XCHG)
2155		return false;
2156
2157	if (draw->type == DRAWABLE_PIXMAP)
2158		return false;
2159
2160	if (front->cpp != back->cpp) {
2161		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
2162		     __FUNCTION__, front->cpp, back->cpp));
2163		return false;
2164	}
2165
2166	pixmap = get_window_pixmap(win);
2167	if (get_private(front)->pixmap != pixmap) {
2168		DBG(("%s: no, DRI2 drawable is no longer attached, old pixmap=%ld, now pixmap=%ld\n",
2169		     __FUNCTION__,
2170		     get_private(front)->pixmap->drawable.serialNumber,
2171		     pixmap->drawable.serialNumber));
2172		return false;
2173	}
2174
2175	DBG(("%s: window size: %dx%d, clip=(%d, %d), (%d, %d) x %d, pixmap size=%dx%d\n",
2176	     __FUNCTION__,
2177	     win->drawable.width, win->drawable.height,
2178	     win->clipList.extents.x1, win->clipList.extents.y1,
2179	     win->clipList.extents.x2, win->clipList.extents.y2,
2180	     region_num_rects(&win->clipList),
2181	     pixmap->drawable.width,
2182	     pixmap->drawable.height));
2183	if (is_clipped(&win->clipList, &pixmap->drawable)) {
2184		DBG(("%s: no, %dx%d window is clipped: clip region=(%d, %d), (%d, %d)\n",
2185		     __FUNCTION__,
2186		     draw->width, draw->height,
2187		     win->clipList.extents.x1,
2188		     win->clipList.extents.y1,
2189		     win->clipList.extents.x2,
2190		     win->clipList.extents.y2));
2191		return false;
2192	}
2193
2194	DBG(("%s: back size=%x, front size=%x\n",
2195	     __FUNCTION__, get_private(back)->size, get_private(front)->size));
2196	if (get_private(back)->size != get_private(front)->size) {
2197		DBG(("%s: no, back buffer %dx%d does not match front buffer %dx%d\n",
2198		     __FUNCTION__,
2199		     get_private(back)->size & 0x7fff, (get_private(back)->size >> 16) & 0x7fff,
2200		     get_private(front)->size & 0x7fff, (get_private(front)->size >> 16) & 0x7fff));
2201		return false;
2202	}
2203
2204	if (pixmap == sna->front && !(sna->flags & SNA_TEAR_FREE) && sna->mode.front_active) {
2205		DBG(("%s: no, front buffer, requires flipping\n",
2206		     __FUNCTION__));
2207		return false;
2208	}
2209
2210	if (sna_pixmap(pixmap)->pinned & ~(PIN_DRI2 | PIN_SCANOUT)) {
2211		DBG(("%s: no, pinned: %x\n",
2212		     __FUNCTION__, sna_pixmap(pixmap)->pinned));
2213		return false;
2214	}
2215
2216	DBG(("%s: yes, pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
2217	return true;
2218}
2219
2220static bool
2221overlaps_other_crtc(struct sna *sna, xf86CrtcPtr desired)
2222{
2223	xf86CrtcConfigPtr config = XF86_CRTC_CONFIG_PTR(sna->scrn);
2224	int c;
2225
2226	for (c = 0; c < sna->mode.num_real_crtc; c++) {
2227		xf86CrtcPtr crtc = config->crtc[c];
2228
2229		if (crtc == desired)
2230			continue;
2231
2232		if (!crtc->enabled)
2233			continue;
2234
2235		if (desired->bounds.x1 < crtc->bounds.x2 &&
2236		    desired->bounds.x2 > crtc->bounds.x1 &&
2237		    desired->bounds.y1 < crtc->bounds.y2 &&
2238		    desired->bounds.y2 > crtc->bounds.y1)
2239			return true;
2240	}
2241
2242	return false;
2243}
2244
2245static bool
2246can_xchg_crtc(struct sna *sna,
2247	      DrawablePtr draw,
2248	      xf86CrtcPtr crtc,
2249	      DRI2BufferPtr front,
2250	      DRI2BufferPtr back)
2251{
2252	WindowPtr win = (WindowPtr)draw;
2253	PixmapPtr pixmap;
2254
2255	if (!DBG_CAN_XCHG)
2256		return false;
2257
2258	if ((sna->flags & SNA_TEAR_FREE) == 0) {
2259		DBG(("%s: no, requires TearFree\n",
2260		     __FUNCTION__));
2261		return false;
2262	}
2263
2264	if (draw->type == DRAWABLE_PIXMAP)
2265		return false;
2266
2267	if (front->cpp != back->cpp) {
2268		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
2269		     __FUNCTION__, front->cpp, back->cpp));
2270		return false;
2271	}
2272
2273	if (memcmp(&win->clipList.extents, &crtc->bounds, sizeof(crtc->bounds))) {
2274		DBG(("%s: no, window [(%d, %d), (%d, %d)] does not cover CRTC [(%d, %d), (%d, %d)]\n",
2275		     __FUNCTION__,
2276		     win->clipList.extents.x1, win->clipList.extents.y1,
2277		     win->clipList.extents.x2, win->clipList.extents.y2,
2278		     crtc->bounds.x1, crtc->bounds.y1,
2279		     crtc->bounds.x2, crtc->bounds.y2));
2280		return false;
2281	}
2282
2283	if (sna_crtc_is_transformed(crtc)) {
2284		DBG(("%s: no, CRTC is rotated\n", __FUNCTION__));
2285		return false;
2286	}
2287
2288	pixmap = get_window_pixmap(win);
2289	if (pixmap != sna->front) {
2290		DBG(("%s: no, not attached to front buffer\n", __FUNCTION__));
2291		return false;
2292	}
2293
2294	if (get_private(front)->pixmap != pixmap) {
2295		DBG(("%s: no, DRI2 drawable is no longer attached, old pixmap=%ld, now pixmap=%ld\n",
2296		     __FUNCTION__,
2297		     get_private(front)->pixmap->drawable.serialNumber,
2298		     pixmap->drawable.serialNumber));
2299		return false;
2300	}
2301
2302	DBG(("%s: window size: %dx%d, clip=(%d, %d), (%d, %d) x %d\n",
2303	     __FUNCTION__,
2304	     win->drawable.width, win->drawable.height,
2305	     win->clipList.extents.x1, win->clipList.extents.y1,
2306	     win->clipList.extents.x2, win->clipList.extents.y2,
2307	     region_num_rects(&win->clipList)));
2308	if (is_clipped(&win->clipList, &win->drawable)) {
2309		DBG(("%s: no, %dx%d window is clipped: clip region=(%d, %d), (%d, %d)\n",
2310		     __FUNCTION__,
2311		     draw->width, draw->height,
2312		     win->clipList.extents.x1,
2313		     win->clipList.extents.y1,
2314		     win->clipList.extents.x2,
2315		     win->clipList.extents.y2));
2316		return false;
2317	}
2318
2319	if (overlaps_other_crtc(sna, crtc)) {
2320		DBG(("%s: no, overlaps other CRTC\n", __FUNCTION__));
2321		return false;
2322	}
2323
2324	if (get_private(back)->size != (draw->height << 16 | draw->width)) {
2325		DBG(("%s: no, DRI2 buffers does not fit window\n",
2326		     __FUNCTION__));
2327		return false;
2328	}
2329
2330	assert(win != win->drawable.pScreen->root);
2331	DBG(("%s: yes, pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
2332	return true;
2333}
2334
2335static void
2336sna_dri2_xchg(DrawablePtr draw, DRI2BufferPtr front, DRI2BufferPtr back)
2337{
2338	WindowPtr win = (WindowPtr)draw;
2339	struct kgem_bo *back_bo, *front_bo;
2340	PixmapPtr pixmap;
2341	int tmp;
2342
2343	assert(draw->type != DRAWABLE_PIXMAP);
2344	pixmap = get_window_pixmap(win);
2345
2346	back_bo = get_private(back)->bo;
2347	front_bo = get_private(front)->bo;
2348
2349	DBG(("%s: win=%ld, exchange front=%d/%d,ref=%d and back=%d/%d,ref=%d, pixmap=%ld %dx%d\n",
2350	     __FUNCTION__, win->drawable.id,
2351	     front_bo->handle, front->name, get_private(front)->refcnt,
2352	     back_bo->handle, back->name, get_private(back)->refcnt,
2353	     pixmap->drawable.serialNumber,
2354	     pixmap->drawable.width,
2355	     pixmap->drawable.height));
2356
2357	DBG(("%s: back_bo handle=%d, pitch=%d, size=%d, ref=%d, active_scanout?=%d\n",
2358	     __FUNCTION__, back_bo->handle, back_bo->pitch, kgem_bo_size(back_bo), back_bo->refcnt, back_bo->active_scanout));
2359	DBG(("%s: front_bo handle=%d, pitch=%d, size=%d, ref=%d, active_scanout?=%d\n",
2360	     __FUNCTION__, front_bo->handle, front_bo->pitch, kgem_bo_size(front_bo), front_bo->refcnt, front_bo->active_scanout));
2361
2362	assert(front_bo != back_bo);
2363	assert(front_bo->refcnt);
2364	assert(back_bo->refcnt);
2365
2366	assert(sna_pixmap_get_buffer(pixmap) == front);
2367
2368	assert(pixmap->drawable.height * back_bo->pitch <= kgem_bo_size(back_bo));
2369	assert(pixmap->drawable.height * front_bo->pitch <= kgem_bo_size(front_bo));
2370
2371	set_bo(pixmap, back_bo);
2372
2373	get_private(front)->bo = back_bo;
2374	get_private(back)->bo = front_bo;
2375	mark_stale(back);
2376
2377	assert(front_bo->active_scanout > 0);
2378	front_bo->active_scanout--;
2379	back_bo->active_scanout++;
2380	assert(back_bo->active_scanout <= back_bo->refcnt);
2381
2382	tmp = front->name;
2383	front->name = back->name;
2384	back->name = tmp;
2385
2386	tmp = front->pitch;
2387	front->pitch = back->pitch;
2388	back->pitch = tmp;
2389
2390	tmp = front->flags;
2391	front->flags = back->flags;
2392	back->flags = tmp;
2393
2394	assert(front_bo->refcnt);
2395	assert(back_bo->refcnt);
2396
2397	assert(front_bo->pitch == get_private(front)->bo->pitch);
2398	assert(back_bo->pitch == get_private(back)->bo->pitch);
2399
2400	assert(get_private(front)->bo == sna_pixmap(pixmap)->gpu_bo);
2401}
2402
2403static void sna_dri2_xchg_crtc(struct sna *sna, DrawablePtr draw, xf86CrtcPtr crtc, DRI2BufferPtr front, DRI2BufferPtr back)
2404{
2405	WindowPtr win = (WindowPtr)draw;
2406	struct dri2_window *priv = dri2_window(win);
2407
2408	DBG(("%s: exchange front=%d/%d and back=%d/%d, win id=%lu, pixmap=%ld %dx%d\n",
2409	     __FUNCTION__,
2410	     get_private(front)->bo->handle, front->name,
2411	     get_private(back)->bo->handle, back->name,
2412	     win->drawable.id,
2413	     get_window_pixmap(win)->drawable.serialNumber,
2414	     get_window_pixmap(win)->drawable.width,
2415	     get_window_pixmap(win)->drawable.height));
2416	assert(can_xchg_crtc(sna, draw, crtc, front, back));
2417
2418	if (APPLY_DAMAGE) {
2419		DBG(("%s: marking drawable as damaged\n", __FUNCTION__));
2420		sna->ignore_copy_area = sna->flags & SNA_TEAR_FREE;
2421		DamageRegionAppend(&win->drawable, &win->clipList);
2422	}
2423	sna_shadow_set_crtc(sna, crtc, get_private(back)->bo);
2424	if (APPLY_DAMAGE) {
2425		sna->ignore_copy_area = false;
2426		DamageRegionProcessPending(&win->drawable);
2427	}
2428
2429	if (priv->front == NULL) {
2430		DRI2Buffer2Ptr tmp;
2431
2432		tmp = calloc(1, sizeof(*tmp) + sizeof(struct sna_dri2_private));
2433		if (tmp == NULL) {
2434			sna_shadow_unset_crtc(sna, crtc);
2435			return;
2436		}
2437
2438		tmp->attachment = DRI2BufferFrontLeft;
2439		tmp->driverPrivate = tmp + 1;
2440		tmp->cpp = back->cpp;
2441		tmp->format = back->format;
2442
2443		get_private(tmp)->refcnt = 1;
2444		get_private(tmp)->bo = kgem_create_2d(&sna->kgem,
2445						      draw->width, draw->height, draw->bitsPerPixel,
2446						      get_private(back)->bo->tiling,
2447						      CREATE_SCANOUT | CREATE_EXACT);
2448		if (get_private(tmp)->bo != NULL) {
2449			tmp->pitch = get_private(tmp)->bo->pitch;
2450			tmp->name = kgem_bo_flink(&sna->kgem, get_private(tmp)->bo);
2451		}
2452		if (tmp->name == 0) {
2453			if (get_private(tmp)->bo != NULL)
2454				kgem_bo_destroy(&sna->kgem, get_private(tmp)->bo);
2455			sna_shadow_unset_crtc(sna, crtc);
2456			return;
2457		}
2458		get_private(tmp)->size = get_private(back)->size;
2459		get_private(tmp)->pixmap = get_private(front)->pixmap;
2460		get_private(tmp)->proxy = sna_dri2_reference_buffer(front);
2461		get_private(tmp)->bo->active_scanout++;
2462
2463		priv->front = front = tmp;
2464	}
2465	assert(front == priv->front);
2466
2467	{
2468		struct kgem_bo *front_bo = get_private(front)->bo;
2469		struct kgem_bo *back_bo = get_private(back)->bo;
2470		unsigned tmp;
2471
2472		assert(front_bo->refcnt);
2473		assert(back_bo->refcnt);
2474
2475		get_private(back)->bo = front_bo;
2476		get_private(front)->bo = back_bo;
2477		mark_stale(back);
2478
2479		assert(front_bo->active_scanout > 0);
2480		front_bo->active_scanout--;
2481		back_bo->active_scanout++;
2482		assert(back_bo->active_scanout <= back_bo->refcnt);
2483
2484		tmp = front->name;
2485		front->name = back->name;
2486		back->name = tmp;
2487
2488		tmp = front->pitch;
2489		front->pitch = back->pitch;
2490		back->pitch = tmp;
2491
2492		tmp = front->flags;
2493		front->flags = back->flags;
2494		back->flags = tmp;
2495	}
2496}
2497
2498static void chain_swap(struct sna_dri2_event *chain)
2499{
2500	DBG(("%s: draw=%ld, queued?=%d, type=%d\n",
2501	     __FUNCTION__, (long)chain->draw->id, chain->queued, chain->type));
2502
2503	if (chain->queued) /* too early! */
2504		return;
2505
2506	if (chain->draw == NULL) {
2507		sna_dri2_event_free(chain);
2508		return;
2509	}
2510
2511	assert(chain == dri2_chain(chain->draw));
2512	assert(chain->signal);
2513
2514	switch (chain->type) {
2515	case SWAP_COMPLETE:
2516		DBG(("%s: emitting chained vsync'ed blit\n", __FUNCTION__));
2517		if (can_xchg(chain->sna, chain->draw, chain->front, chain->back)) {
2518			sna_dri2_xchg(chain->draw, chain->front, chain->back);
2519		} else if (can_xchg_crtc(chain->sna, chain->draw, chain->crtc,
2520					 chain->front, chain->back)) {
2521			sna_dri2_xchg_crtc(chain->sna, chain->draw, chain->crtc,
2522					   chain->front, chain->back);
2523		} else {
2524			__sna_dri2_copy_event(chain, chain->sync | DRI2_BO);
2525		}
2526		assert(get_private(chain->back)->bo != get_private(chain->front)->bo);
2527	case SWAP:
2528		break;
2529	default:
2530		return;
2531	}
2532
2533	if ((chain->type == SWAP_COMPLETE &&
2534	     !swap_limit(chain->draw, 2 + !chain->sync) &&
2535	     !chain->sync) ||
2536	    !sna_next_vblank(chain)) {
2537		DBG(("%s: vblank wait failed, unblocking client\n", __FUNCTION__));
2538		frame_swap_complete(chain, DRI2_BLIT_COMPLETE);
2539		sna_dri2_event_free(chain);
2540	}
2541}
2542
2543static inline bool rq_is_busy(struct kgem *kgem, struct kgem_bo *bo)
2544{
2545	if (bo == NULL)
2546		return false;
2547
2548	return __kgem_bo_is_busy(kgem, bo);
2549}
2550
2551static bool sna_dri2_blit_complete(struct sna_dri2_event *info)
2552{
2553	if (!info->bo)
2554		return true;
2555
2556	if (__kgem_bo_is_busy(&info->sna->kgem, info->bo)) {
2557		DBG(("%s: vsync'ed blit is still busy, postponing\n",
2558		     __FUNCTION__));
2559		if (sna_next_vblank(info))
2560			return false;
2561
2562		kgem_bo_sync__gtt(&info->sna->kgem, info->bo);
2563	}
2564
2565	DBG(("%s: blit finished\n", __FUNCTION__));
2566	kgem_bo_destroy(&info->sna->kgem, info->bo);
2567	info->bo = NULL;
2568
2569	return true;
2570}
2571
2572void sna_dri2_vblank_handler(struct drm_event_vblank *event)
2573{
2574	struct sna_dri2_event *info = (void *)(uintptr_t)event->user_data;
2575	struct sna *sna = info->sna;
2576	DrawablePtr draw;
2577	uint64_t msc;
2578
2579	DBG(("%s(type=%d, sequence=%d, draw=%ld)\n", __FUNCTION__, info->type, event->sequence, info->draw ? info->draw->serialNumber : 0));
2580	assert(info->queued);
2581	info->queued = false;
2582
2583	msc = sna_crtc_record_event(info->crtc, event);
2584
2585	draw = info->draw;
2586	if (draw == NULL) {
2587		DBG(("%s -- drawable gone\n", __FUNCTION__));
2588		goto done;
2589	}
2590
2591	assert((info->front == NULL && info->back == NULL) || info->front != info->back);
2592	switch (info->type) {
2593	case FLIP:
2594		/* If we can still flip... */
2595		assert(info->signal);
2596		if (can_flip(sna, draw, info->front, info->back, info->crtc) &&
2597		    sna_dri2_flip(info))
2598			return;
2599
2600		/* else fall through to blit */
2601	case SWAP:
2602		assert(info->signal);
2603		if (can_xchg(info->sna, draw, info->front, info->back)) {
2604			sna_dri2_xchg(draw, info->front, info->back);
2605			info->type = SWAP_COMPLETE;
2606		} else if (can_xchg_crtc(sna, draw, info->crtc,
2607					 info->front, info->back)) {
2608			sna_dri2_xchg_crtc(sna, draw, info->crtc,
2609					   info->front, info->back);
2610			info->type = SWAP_COMPLETE;
2611		}  else {
2612			__sna_dri2_copy_event(info, DRI2_BO | DRI2_SYNC);
2613			info->type = SWAP_COMPLETE;
2614		}
2615
2616		if (sna_next_vblank(info))
2617			return;
2618
2619		DBG(("%s -- requeue failed, errno=%d\n", __FUNCTION__, errno));
2620		assert(info->pending.bo == NULL);
2621		assert(info->keepalive == 1);
2622		/* fall through to SwapComplete */
2623	case SWAP_COMPLETE:
2624		DBG(("%s: %d complete, frame=%d tv=%d.%06d\n",
2625		     __FUNCTION__, info->type,
2626		     event->sequence, event->tv_sec, event->tv_usec));
2627
2628		if (info->signal) {
2629			if (!sna_dri2_blit_complete(info))
2630				return;
2631
2632			DBG(("%s: triple buffer swap complete, unblocking client (frame=%d, tv=%d.%06d)\n", __FUNCTION__,
2633			     event->sequence, event->tv_sec, event->tv_usec));
2634			frame_swap_complete(info, DRI2_BLIT_COMPLETE);
2635		}
2636
2637		if (info->pending.bo) {
2638			struct copy current_back;
2639
2640			DBG(("%s: swapping back handle=%d [name=%d, active=%d] for pending handle=%d [name=%d, active=%d], front handle=%d [name=%d, active=%d]\n",
2641			     __FUNCTION__,
2642			     get_private(info->back)->bo->handle, info->back->name, get_private(info->back)->bo->active_scanout,
2643			     info->pending.bo->handle, info->pending.name, info->pending.bo->active_scanout,
2644			     get_private(info->front)->bo->handle, info->front->name, get_private(info->front)->bo->active_scanout));
2645
2646			assert(info->pending.bo->active_scanout > 0);
2647			info->pending.bo->active_scanout--;
2648
2649			current_back.bo = get_private(info->back)->bo;
2650			current_back.size = get_private(info->back)->size;
2651			current_back.name = info->back->name;
2652			current_back.flags = info->back->flags;
2653
2654			get_private(info->back)->bo = info->pending.bo;
2655			get_private(info->back)->size = info->pending.size;
2656			info->back->name = info->pending.name;
2657			info->back->pitch = info->pending.bo->pitch;
2658			info->back->flags = info->pending.flags;
2659			info->pending.bo = NULL;
2660
2661			assert(get_private(info->back)->bo != get_private(info->front)->bo);
2662
2663			if (can_xchg(info->sna, info->draw, info->front, info->back))
2664				sna_dri2_xchg(info->draw, info->front, info->back);
2665			else if (can_xchg_crtc(info->sna, info->draw, info->crtc,
2666						 info->front, info->back))
2667				sna_dri2_xchg_crtc(info->sna, info->draw, info->crtc,
2668						   info->front, info->back);
2669			else
2670				__sna_dri2_copy_event(info, info->sync | DRI2_BO);
2671
2672			sna_dri2_cache_bo(info->sna, info->draw,
2673					  get_private(info->back)->bo,
2674					  info->back->name,
2675					  get_private(info->back)->size,
2676					  info->back->flags);
2677
2678			get_private(info->back)->bo = current_back.bo;
2679			get_private(info->back)->size = current_back.size;
2680			info->back->name = current_back.name;
2681			info->back->pitch = current_back.bo->pitch;
2682			info->back->flags = current_back.flags;
2683
2684			DBG(("%s: restored current back handle=%d [name=%d, active=%d], active=%d], front handle=%d [name=%d, active=%d]\n",
2685			     __FUNCTION__,
2686			     get_private(info->back)->bo->handle, info->back->name, get_private(info->back)->bo->active_scanout,
2687			     get_private(info->front)->bo->handle, info->front->name, get_private(info->front)->bo->active_scanout));
2688
2689			assert(info->draw);
2690			assert(!info->signal);
2691			info->keepalive++;
2692			info->signal = true;
2693		}
2694
2695		if (--info->keepalive) {
2696			if (sna_next_vblank(info))
2697				return;
2698
2699			if (info->signal) {
2700				DBG(("%s: triple buffer swap complete, unblocking client (frame=%d, tv=%d.%06d)\n", __FUNCTION__,
2701				     event->sequence, event->tv_sec, event->tv_usec));
2702				frame_swap_complete(info, DRI2_BLIT_COMPLETE);
2703			}
2704		}
2705		break;
2706
2707	case WAITMSC:
2708		assert(info->client);
2709		DRI2WaitMSCComplete(info->client, draw, msc,
2710				    event->tv_sec, event->tv_usec);
2711		break;
2712	default:
2713		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
2714			   "%s: unknown vblank event received\n", __func__);
2715		/* Unknown type */
2716		break;
2717	}
2718
2719	if (info->chain) {
2720		DBG(("%s: continuing chain\n", __FUNCTION__));
2721		assert(info->chain != info);
2722		assert(info->draw == draw);
2723		sna_dri2_remove_event(info);
2724		chain_swap(info->chain);
2725	}
2726
2727done:
2728	sna_dri2_event_free(info);
2729	DBG(("%s complete\n", __FUNCTION__));
2730}
2731
2732static void
2733sna_dri2_immediate_blit(struct sna *sna,
2734			struct sna_dri2_event *info,
2735			bool sync)
2736{
2737	struct sna_dri2_event *chain = dri2_chain(info->draw);
2738
2739	if (sna->flags & SNA_NO_WAIT)
2740		sync = false;
2741
2742	DBG(("%s: emitting immediate blit, throttling client, synced? %d, chained? %d, pipe %d\n",
2743	     __FUNCTION__, sync, chain != info, info->pipe));
2744	assert(chain);
2745
2746	info->type = SWAP_COMPLETE;
2747	info->sync = sync;
2748	info->keepalive = KEEPALIVE;
2749
2750	if (chain == info) {
2751		DBG(("%s: no pending blit, starting chain\n", __FUNCTION__));
2752
2753		assert(info->front != info->back);
2754		if (can_xchg(info->sna, info->draw, info->front, info->back)) {
2755			sna_dri2_xchg(info->draw, info->front, info->back);
2756		} else if (can_xchg_crtc(info->sna, info->draw, info->crtc,
2757					 info->front, info->back)) {
2758			sna_dri2_xchg_crtc(info->sna, info->draw, info->crtc,
2759					   info->front, info->back);
2760		} else
2761			__sna_dri2_copy_event(info, sync | DRI2_BO);
2762
2763		assert(info->signal);
2764
2765		if ((!swap_limit(info->draw, 2 + !sync) && !sync) ||
2766		    !sna_next_vblank(info)) {
2767			DBG(("%s: fake triple buffering, unblocking client\n", __FUNCTION__));
2768			frame_swap_complete(info, DRI2_BLIT_COMPLETE);
2769			sna_dri2_event_free(info);
2770		}
2771		return;
2772	}
2773
2774	DBG(("%s: current event front=%d [name=%d, active?=%d], back=%d [name=%d, active?=%d]\n", __FUNCTION__,
2775	     get_private(chain->front)->bo->handle, chain->front->name, get_private(chain->front)->bo->active_scanout,
2776	     get_private(chain->back)->bo->handle, chain->back->name, get_private(chain->back)->bo->active_scanout));
2777
2778	if (chain->type == SWAP_COMPLETE && chain->front == info->front) {
2779		assert(chain->draw == info->draw);
2780		assert(chain->client == info->client);
2781		assert(chain->event_complete == info->event_complete);
2782		assert(chain->event_data == info->event_data);
2783		assert(chain->queued);
2784
2785		if ((!sync || !chain->sync) && chain->pending.bo) {
2786			bool signal = chain->signal;
2787
2788			DBG(("%s: swap elision, unblocking client\n", __FUNCTION__));
2789			assert(chain->draw);
2790			chain->signal = true;
2791			frame_swap_complete(chain, DRI2_EXCHANGE_COMPLETE);
2792			chain->signal = signal;
2793
2794			assert(chain->pending.bo->active_scanout > 0);
2795			chain->pending.bo->active_scanout--;
2796
2797			sna_dri2_cache_bo(chain->sna, chain->draw,
2798					  chain->pending.bo,
2799					  chain->pending.name,
2800					  chain->pending.size,
2801					  chain->pending.flags);
2802			chain->pending.bo = NULL;
2803		}
2804
2805		if (chain->pending.bo == NULL && swap_limit(info->draw, 2 + !sync)) {
2806			DBG(("%s: setting handle=%d as pending blit (current event front=%d, back=%d)\n", __FUNCTION__,
2807			     get_private(info->back)->bo->handle,
2808			     get_private(chain->front)->bo->handle,
2809			     get_private(chain->back)->bo->handle));
2810			chain->pending.bo = ref(get_private(info->back)->bo);
2811			chain->pending.size = get_private(info->back)->size;
2812			chain->pending.name = info->back->name;
2813			chain->pending.flags = info->back->flags;
2814			chain->sync = sync;
2815			info->signal = false; /* transfer signal to pending */
2816
2817			/* Prevent us from handing it back on next GetBuffers */
2818			chain->pending.bo->active_scanout++;
2819
2820			sna_dri2_event_free(info);
2821			return;
2822		}
2823	}
2824
2825	DBG(("%s: pending blit, chained\n", __FUNCTION__));
2826}
2827
2828static bool
2829sna_dri2_flip_continue(struct sna_dri2_event *info)
2830{
2831	struct kgem_bo *bo = get_private(info->front)->bo;
2832
2833	DBG(("%s(mode=%d)\n", __FUNCTION__, info->flip_continue));
2834	assert(info->flip_continue > 0);
2835	info->type = info->flip_continue;
2836	info->flip_continue = 0;
2837
2838	assert(!info->signal);
2839	info->signal = info->type == FLIP_THROTTLE && info->draw;
2840
2841	if (info->sna->mode.front_active == 0)
2842		return false;
2843
2844	if (bo != sna_pixmap(info->sna->front)->gpu_bo)
2845		return false;
2846
2847	assert(!info->queued);
2848	if (!sna_page_flip(info->sna, bo, sna_dri2_flip_handler, info))
2849		return false;
2850
2851	DBG(("%s: queued flip=%p\n", __FUNCTION__, info));
2852	assert(info->sna->dri2.flip_pending == NULL ||
2853	       info->sna->dri2.flip_pending == info);
2854	info->sna->dri2.flip_pending = info;
2855	info->queued = true;
2856
2857	return true;
2858}
2859
2860static bool
2861sna_dri2_flip_keepalive(struct sna_dri2_event *info)
2862{
2863	DBG(("%s(keepalive?=%d)\n", __FUNCTION__, info->keepalive-1));
2864	assert(info->keepalive > 0);
2865	if (!--info->keepalive)
2866		return false;
2867
2868	if (info->draw == NULL)
2869		return false;
2870
2871	DBG(("%s: marking next flip as complete\n", __FUNCTION__));
2872	info->flip_continue = FLIP_COMPLETE;
2873	return sna_dri2_flip_continue(info);
2874}
2875
2876static void chain_flip(struct sna *sna)
2877{
2878	struct sna_dri2_event *chain = sna->dri2.flip_pending;
2879
2880	assert(chain->type == FLIP);
2881	DBG(("%s: chaining type=%d, cancelled?=%d window=%ld\n",
2882	     __FUNCTION__, chain->type, chain->draw == NULL, chain->draw ? chain->draw->id : 0));
2883
2884	sna->dri2.flip_pending = NULL;
2885	if (chain->draw == NULL) {
2886		sna_dri2_event_free(chain);
2887		return;
2888	}
2889
2890	assert(chain == dri2_chain(chain->draw));
2891	assert(!chain->queued);
2892
2893	if (can_flip(sna, chain->draw, chain->front, chain->back, chain->crtc) &&
2894	    sna_dri2_flip(chain)) {
2895		DBG(("%s: performing chained flip\n", __FUNCTION__));
2896	} else {
2897		DBG(("%s: emitting chained vsync'ed blit\n", __FUNCTION__));
2898		__sna_dri2_copy_event(chain, DRI2_SYNC);
2899
2900		if (xorg_can_triple_buffer()) {
2901			chain->type = SWAP_COMPLETE;
2902			assert(chain->signal);
2903			if (sna_next_vblank(chain))
2904				return;
2905		}
2906
2907		DBG(("%s: fake triple buffering (or vblank wait failed), unblocking client\n", __FUNCTION__));
2908		frame_swap_complete(chain, DRI2_BLIT_COMPLETE);
2909		sna_dri2_event_free(chain);
2910	}
2911}
2912
2913static void sna_dri2_flip_event(struct sna_dri2_event *flip)
2914{
2915	struct sna *sna = flip->sna;
2916
2917	DBG(("%s flip=%p (pipe=%d, event=%d, queued?=%d)\n", __FUNCTION__, flip, flip->pipe, flip->type, flip->queued));
2918	if (!flip->queued) /* pageflip died whilst being queued */
2919		return;
2920	flip->queued = false;
2921
2922	if (sna->dri2.flip_pending == flip)
2923		sna->dri2.flip_pending = NULL;
2924
2925	/* We assume our flips arrive in order, so we don't check the frame */
2926	switch (flip->type) {
2927	case FLIP:
2928		if (flip->signal) {
2929			DBG(("%s: swap complete, unblocking client\n", __FUNCTION__));
2930			frame_swap_complete(flip, DRI2_FLIP_COMPLETE);
2931		}
2932		sna_dri2_event_free(flip);
2933
2934		if (sna->dri2.flip_pending)
2935			chain_flip(sna);
2936		break;
2937
2938	case FLIP_THROTTLE:
2939		if (flip->signal) {
2940			DBG(("%s: triple buffer swap complete, unblocking client\n", __FUNCTION__));
2941			frame_swap_complete(flip, DRI2_FLIP_COMPLETE);
2942		}
2943	case FLIP_COMPLETE:
2944		assert(!flip->signal);
2945		if (sna->dri2.flip_pending) {
2946			DBG(("%s: pending flip\n", __FUNCTION__));
2947			sna_dri2_event_free(flip);
2948			chain_flip(sna);
2949		} else if (!flip->flip_continue) {
2950			DBG(("%s: flip chain complete\n", __FUNCTION__));
2951			if (!sna_dri2_flip_keepalive(flip)) {
2952				if (flip->chain) {
2953					sna_dri2_remove_event(flip);
2954					chain_swap(flip->chain);
2955				}
2956
2957				sna_dri2_event_free(flip);
2958			}
2959		} else if (!sna_dri2_flip_continue(flip)) {
2960			DBG(("%s: no longer able to flip\n", __FUNCTION__));
2961			if (flip->draw != NULL)
2962				__sna_dri2_copy_event(flip, 0);
2963			if (flip->signal) {
2964				DBG(("%s: fake triple buffering, unblocking client\n", __FUNCTION__));
2965				frame_swap_complete(flip, DRI2_BLIT_COMPLETE);
2966			}
2967			sna_dri2_event_free(flip);
2968		}
2969		break;
2970
2971	default: /* Unknown type */
2972		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
2973			   "%s: unknown vblank event received\n", __func__);
2974		sna_dri2_event_free(flip);
2975		if (sna->dri2.flip_pending)
2976			chain_flip(sna);
2977		break;
2978	}
2979}
2980
2981static int
2982sna_query_vblank(struct sna *sna, xf86CrtcPtr crtc, union drm_wait_vblank *vbl)
2983{
2984	VG_CLEAR(*vbl);
2985	vbl->request.type =
2986		_DRM_VBLANK_RELATIVE | pipe_select(sna_crtc_pipe(crtc));
2987	vbl->request.sequence = 0;
2988
2989	return drmIoctl(sna->kgem.fd, DRM_IOCTL_WAIT_VBLANK, vbl);
2990}
2991
2992static uint64_t
2993get_current_msc(struct sna *sna, DrawablePtr draw, xf86CrtcPtr crtc)
2994{
2995	union drm_wait_vblank vbl;
2996	uint64_t ret;
2997
2998	if (sna_query_vblank(sna, crtc, &vbl) == 0)
2999		ret = sna_crtc_record_vblank(crtc, &vbl);
3000	else
3001		ret = sna_crtc_last_swap(crtc)->msc;
3002
3003	return draw_current_msc(draw, crtc, ret);
3004}
3005
3006#if defined(CHECK_FOR_COMPOSITOR)
3007static Bool find(pointer value, XID id, pointer cdata)
3008{
3009	return TRUE;
3010}
3011#endif
3012
3013static int use_triple_buffer(struct sna *sna, ClientPtr client, bool async)
3014{
3015	if ((sna->flags & SNA_TRIPLE_BUFFER) == 0) {
3016		DBG(("%s: triple buffer disabled, using FLIP\n", __FUNCTION__));
3017		return FLIP;
3018	}
3019
3020	if (async) {
3021		DBG(("%s: running async, using %s\n", __FUNCTION__,
3022		     sna->flags & SNA_HAS_ASYNC_FLIP ? "FLIP_ASYNC" : "FLIP_COMPLETE"));
3023		return sna->flags & SNA_HAS_ASYNC_FLIP ? FLIP_ASYNC : FLIP_COMPLETE;
3024	}
3025
3026	if (xorg_can_triple_buffer()) {
3027		DBG(("%s: triple buffer enabled, using FLIP_THROTTLE\n", __FUNCTION__));
3028		return FLIP_THROTTLE;
3029	}
3030
3031#if defined(CHECK_FOR_COMPOSITOR)
3032	/* Hack: Disable triple buffering for compositors */
3033	{
3034		struct sna_client *priv = sna_client(client);
3035		if (priv->is_compositor == 0)
3036			priv->is_compositor =
3037				LookupClientResourceComplex(client,
3038							    CompositeClientWindowType+1,
3039							    find, NULL) ? FLIP : FLIP_COMPLETE;
3040
3041		DBG(("%s: fake triple buffer enabled?=%d using %s\n", __FUNCTION__,
3042		     priv->is_compositor != FLIP, priv->is_compositor == FLIP ? "FLIP" : "FLIP_COMPLETE"));
3043		return priv->is_compositor;
3044	}
3045#else
3046	DBG(("%s: fake triple buffer enabled, using FLIP_COMPLETE\n", __FUNCTION__));
3047	return FLIP_COMPLETE;
3048#endif
3049}
3050
3051static bool immediate_swap(struct sna *sna,
3052			   DrawablePtr draw,
3053			   xf86CrtcPtr crtc,
3054			   uint64_t *target_msc,
3055			   uint64_t divisor,
3056			   uint64_t remainder,
3057			   uint64_t *current_msc)
3058{
3059	/*
3060	 * If divisor is zero, or current_msc is smaller than target_msc
3061	 * we just need to make sure target_msc passes before initiating
3062	 * the swap.
3063	 */
3064	if (divisor == 0) {
3065		*current_msc = -1;
3066
3067		if (sna->flags & SNA_NO_WAIT) {
3068			DBG(("%s: yes, waits are disabled\n", __FUNCTION__));
3069			return true;
3070		}
3071
3072		if (*target_msc)
3073			*current_msc = get_current_msc(sna, draw, crtc);
3074
3075		DBG(("%s: current_msc=%ld, target_msc=%ld -- %s\n",
3076		     __FUNCTION__, (long)*current_msc, (long)*target_msc,
3077		     (*current_msc >= *target_msc - 1) ? "yes" : "no"));
3078		return *current_msc >= *target_msc - 1;
3079	}
3080
3081	DBG(("%s: explicit waits requests, divisor=%ld\n",
3082	     __FUNCTION__, (long)divisor));
3083	*current_msc = get_current_msc(sna, draw, crtc);
3084	if (*current_msc >= *target_msc) {
3085		DBG(("%s: missed target, queueing event for next: current=%lld, target=%lld, divisor=%lld, remainder=%lld\n",
3086		     __FUNCTION__,
3087		     (long long)*current_msc,
3088		     (long long)*target_msc,
3089		     (long long)divisor,
3090		     (long long)remainder));
3091
3092		*target_msc = *current_msc + remainder - *current_msc % divisor;
3093		if (*target_msc <= *current_msc)
3094			*target_msc += divisor;
3095	}
3096
3097	DBG(("%s: target_msc=%lld, current_msc=%lld, immediate?=%d\n",
3098	     __FUNCTION__, (long long)*target_msc, (long long)*current_msc,
3099	     *current_msc >= *target_msc - 1));
3100	return *current_msc >= *target_msc - 1;
3101}
3102
3103static bool
3104sna_dri2_schedule_flip(ClientPtr client, DrawablePtr draw, xf86CrtcPtr crtc,
3105		       DRI2BufferPtr front, DRI2BufferPtr back,
3106		       bool immediate, CARD64 *target_msc, CARD64 current_msc,
3107		       DRI2SwapEventPtr func, void *data)
3108{
3109	struct sna *sna = to_sna_from_drawable(draw);
3110	struct sna_dri2_event *info;
3111
3112	if (immediate) {
3113		bool signal = false;
3114		info = sna->dri2.flip_pending;
3115		DBG(("%s: performing immediate swap on pipe %d, pending? %d, mode: %d, continuation? %d\n",
3116		     __FUNCTION__, sna_crtc_pipe(crtc),
3117		     info != NULL, info ? info->flip_continue : 0,
3118		     info && info->draw == draw));
3119
3120		if (info && info->draw == draw) {
3121			assert(info->type != FLIP);
3122			assert(info->queued);
3123			assert(info->front != info->back);
3124			if (info->front != front) {
3125				assert(info->front != NULL);
3126				_sna_dri2_destroy_buffer(sna, draw, info->front);
3127				info->front = sna_dri2_reference_buffer(front);
3128			}
3129			if (info->back != back) {
3130				assert(info->back != NULL);
3131				_sna_dri2_destroy_buffer(sna, draw, info->back);
3132				info->back = sna_dri2_reference_buffer(back);
3133			}
3134			assert(info->front != info->back);
3135			DBG(("%s: executing xchg of pending flip: flip_continue=%d, keepalive=%d, chain?=%d\n", __FUNCTION__, info->flip_continue, info->keepalive, current_msc < *target_msc));
3136			sna_dri2_xchg(draw, front, back);
3137			info->keepalive = KEEPALIVE;
3138			if (xorg_can_triple_buffer() &&
3139			    current_msc < *target_msc) {
3140				DBG(("%s: chaining flip\n", __FUNCTION__));
3141				info->flip_continue = FLIP_THROTTLE;
3142				goto out;
3143			} else {
3144				info->flip_continue = FLIP_COMPLETE;
3145				signal = info->signal;
3146				assert(info->draw);
3147				info->signal = true;
3148				goto new_back;
3149			}
3150		}
3151
3152		info = sna_dri2_add_event(sna, draw, client, crtc);
3153		if (info == NULL)
3154			return false;
3155
3156		assert(info->crtc == crtc);
3157		info->event_complete = func;
3158		info->event_data = data;
3159		assert(info->draw);
3160		info->signal = true;
3161
3162		assert(front != back);
3163		info->front = sna_dri2_reference_buffer(front);
3164		info->back = sna_dri2_reference_buffer(back);
3165
3166		if (sna->dri2.flip_pending) {
3167			/* We need to first wait (one vblank) for the
3168			 * async flips to complete before this client
3169			 * can take over.
3170			 */
3171			DBG(("%s: queueing flip after pending completion\n",
3172			     __FUNCTION__));
3173			info->type = FLIP;
3174			sna->dri2.flip_pending = info;
3175			current_msc++;
3176		} else if (sna->mode.flip_active) {
3177			DBG(("%s: %d outstanding flips from old client, queueing\n",
3178			     __FUNCTION__, sna->mode.flip_active));
3179			goto queue;
3180		} else {
3181			info->type = use_triple_buffer(sna, client, *target_msc == 0);
3182			if (!sna_dri2_flip(info)) {
3183				DBG(("%s: flip failed, falling back\n", __FUNCTION__));
3184				info->signal = false;
3185				sna_dri2_event_free(info);
3186				return false;
3187			}
3188			assert(get_private(info->front)->bo->active_scanout);
3189		}
3190
3191		swap_limit(draw, 1 + (info->type == FLIP_THROTTLE));
3192		if (info->type >= FLIP_COMPLETE) {
3193new_back:
3194			if (!xorg_can_triple_buffer())
3195				sna_dri2_get_back(sna, draw, back);
3196			DBG(("%s: fake triple buffering, unblocking client\n", __FUNCTION__));
3197			frame_swap_complete(info, DRI2_EXCHANGE_COMPLETE);
3198			assert(info->draw);
3199			info->signal = signal;
3200			if (info->type == FLIP_ASYNC)
3201				sna_dri2_event_free(info);
3202		}
3203out:
3204		DBG(("%s: target_msc=%llu\n", __FUNCTION__, current_msc + 1));
3205		*target_msc = current_msc + 1;
3206		return true;
3207	}
3208
3209queue:
3210	if (KEEPALIVE > 1 && sna->dri2.flip_pending) {
3211		info = sna->dri2.flip_pending;
3212		info->keepalive = 1;
3213	}
3214
3215	info = sna_dri2_add_event(sna, draw, client, crtc);
3216	if (info == NULL)
3217		return false;
3218
3219	assert(info->crtc == crtc);
3220	info->event_complete = func;
3221	info->event_data = data;
3222	assert(info->draw);
3223	info->signal = true;
3224	info->type = FLIP;
3225
3226	assert(front != back);
3227	info->front = sna_dri2_reference_buffer(front);
3228	info->back = sna_dri2_reference_buffer(back);
3229
3230	if (*target_msc <= current_msc + 1 && sna_dri2_flip(info)) {
3231		*target_msc = current_msc + 1;
3232	} else {
3233		/* Account for 1 frame extra pageflip delay */
3234		if (!sna_wait_vblank(info,
3235				     draw_target_seq(draw, *target_msc - 1))) {
3236			info->signal = false;
3237			sna_dri2_event_free(info);
3238			return false;
3239		}
3240	}
3241
3242	DBG(("%s: reported target_msc=%llu\n", __FUNCTION__, *target_msc));
3243	swap_limit(draw, 1);
3244	return true;
3245}
3246
3247static bool has_pending_events(struct sna *sna)
3248{
3249	struct pollfd pfd;
3250	pfd.fd = sna->kgem.fd;
3251	pfd.events = POLLIN;
3252	return poll(&pfd, 1, 0) == 1;
3253}
3254
3255/*
3256 * ScheduleSwap is responsible for requesting a DRM vblank event for the
3257 * appropriate frame.
3258 *
3259 * In the case of a blit (e.g. for a windowed swap) or buffer exchange,
3260 * the vblank requested can simply be the last queued swap frame + the swap
3261 * interval for the drawable.
3262 *
3263 * In the case of a page flip, we request an event for the last queued swap
3264 * frame + swap interval - 1, since we'll need to queue the flip for the frame
3265 * immediately following the received event.
3266 *
3267 * The client will be blocked if it tries to perform further GL commands
3268 * after queueing a swap, though in the Intel case after queueing a flip, the
3269 * client is free to queue more commands; they'll block in the kernel if
3270 * they access buffers busy with the flip.
3271 *
3272 * When the swap is complete, the driver should call into the server so it
3273 * can send any swap complete events that have been requested.
3274 */
3275static int
3276sna_dri2_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
3277		       DRI2BufferPtr back, CARD64 *target_msc, CARD64 divisor,
3278		       CARD64 remainder, DRI2SwapEventPtr func, void *data)
3279{
3280	struct sna *sna = to_sna_from_drawable(draw);
3281	xf86CrtcPtr crtc = NULL;
3282	struct sna_dri2_event *info = NULL;
3283	int type = DRI2_EXCHANGE_COMPLETE;
3284	CARD64 current_msc;
3285	bool immediate;
3286
3287	DBG(("%s: draw=%lu %dx%d, pixmap=%ld %dx%d, back=%u (refs=%d/%d, flush=%d, active=%d) , front=%u (refs=%d/%d, flush=%d, active=%d)\n",
3288	     __FUNCTION__,
3289	     (long)draw->id, draw->width, draw->height,
3290	     get_drawable_pixmap(draw)->drawable.serialNumber,
3291	     get_drawable_pixmap(draw)->drawable.width,
3292	     get_drawable_pixmap(draw)->drawable.height,
3293	     get_private(back)->bo->handle,
3294	     get_private(back)->refcnt,
3295	     get_private(back)->bo->refcnt,
3296	     get_private(back)->bo->flush,
3297	     get_private(back)->bo->active_scanout,
3298	     get_private(front)->bo->handle,
3299	     get_private(front)->refcnt,
3300	     get_private(front)->bo->refcnt,
3301	     get_private(front)->bo->flush,
3302	     get_private(front)->bo->active_scanout));
3303
3304	DBG(("%s(target_msc=%llu, divisor=%llu, remainder=%llu)\n",
3305	     __FUNCTION__,
3306	     (long long)*target_msc,
3307	     (long long)divisor,
3308	     (long long)remainder));
3309
3310	assert(front != back);
3311	assert(get_private(front) != get_private(back));
3312
3313	assert(get_private(front)->refcnt);
3314	assert(get_private(back)->refcnt);
3315
3316	assert(get_private(back)->bo != get_private(front)->bo);
3317	assert(get_private(front)->bo->refcnt);
3318	assert(get_private(back)->bo->refcnt);
3319
3320	assert(get_private(front)->bo->active_scanout);
3321	assert(!get_private(back)->bo->active_scanout);
3322
3323	if (get_private(front)->pixmap != get_drawable_pixmap(draw)) {
3324		DBG(("%s: decoupled DRI2 front pixmap=%ld, actual pixmap=%ld\n",
3325		     __FUNCTION__,
3326		     get_private(front)->pixmap->drawable.serialNumber,
3327		     get_drawable_pixmap(draw)->drawable.serialNumber));
3328		goto skip;
3329	}
3330
3331	if (get_private(back)->stale) {
3332		DBG(("%s: stale back buffer\n", __FUNCTION__));
3333		goto skip;
3334	}
3335
3336	if (draw->type != DRAWABLE_PIXMAP) {
3337		WindowPtr win = (WindowPtr)draw;
3338		struct dri2_window *priv = dri2_window(win);
3339
3340		if (priv->front) {
3341			front = priv->front;
3342			assert(front->attachment == DRI2BufferFrontLeft);
3343			assert(get_private(front)->refcnt);
3344			assert(get_private(front)->pixmap == get_drawable_pixmap(draw));
3345		}
3346
3347		if (win->clipList.extents.x2 <= win->clipList.extents.x1 ||
3348		    win->clipList.extents.y2 <= win->clipList.extents.y1) {
3349			DBG(("%s: window clipped (%d, %d), (%d, %d)\n",
3350			     __FUNCTION__,
3351			     win->clipList.extents.x1,
3352			     win->clipList.extents.y1,
3353			     win->clipList.extents.x2,
3354			     win->clipList.extents.y2));
3355			goto skip;
3356		}
3357	}
3358
3359	DBG(("%s: using front handle=%d, active_scanout?=%d, flush?=%d\n", __FUNCTION__, get_private(front)->bo->handle, get_private(front)->bo->active_scanout, sna_pixmap_from_drawable(draw)->flush));
3360	assert(get_private(front)->bo->active_scanout);
3361	assert(sna_pixmap_from_drawable(draw)->flush);
3362
3363	/* Drawable not displayed... just complete the swap */
3364	if ((sna->flags & SNA_NO_WAIT) == 0)
3365		crtc = sna_dri2_get_crtc(draw);
3366	if (crtc == NULL) {
3367		DBG(("%s: off-screen, immediate update\n", __FUNCTION__));
3368		goto blit;
3369	}
3370
3371	assert(draw->type != DRAWABLE_PIXMAP);
3372
3373	while (dri2_chain(draw) && has_pending_events(sna)) {
3374		DBG(("%s: flushing pending events\n", __FUNCTION__));
3375		sna_mode_wakeup(sna);
3376	}
3377
3378	immediate = immediate_swap(sna, draw, crtc,
3379				   target_msc, divisor, remainder,
3380				   &current_msc);
3381
3382	if (can_flip(sna, draw, front, back, crtc) &&
3383	    sna_dri2_schedule_flip(client, draw, crtc, front, back,
3384				  immediate, target_msc, current_msc,
3385				  func, data))
3386		return TRUE;
3387
3388	info = sna_dri2_add_event(sna, draw, client, crtc);
3389	if (!info)
3390		goto blit;
3391
3392	assert(info->crtc == crtc);
3393	info->event_complete = func;
3394	info->event_data = data;
3395	assert(info->draw);
3396	info->signal = true;
3397
3398	assert(front != back);
3399	info->front = sna_dri2_reference_buffer(front);
3400	info->back = sna_dri2_reference_buffer(back);
3401
3402	if (immediate) {
3403		bool sync = current_msc < *target_msc;
3404		sna_dri2_immediate_blit(sna, info, sync);
3405		*target_msc = current_msc + sync;
3406		DBG(("%s: reported target_msc=%llu\n",
3407		     __FUNCTION__, *target_msc));
3408		return TRUE;
3409	}
3410
3411	info->type = SWAP;
3412	if (*target_msc <= current_msc + 1) {
3413		DBG(("%s: performing blit before queueing\n", __FUNCTION__));
3414		__sna_dri2_copy_event(info, DRI2_SYNC);
3415		info->type = SWAP_COMPLETE;
3416		if (!sna_next_vblank(info))
3417			goto fake;
3418
3419		DBG(("%s: reported target_msc=%llu\n",
3420		     __FUNCTION__, *target_msc));
3421		*target_msc = current_msc + 1;
3422		swap_limit(draw, 2);
3423	} else {
3424		if (!sna_wait_vblank(info,
3425				     draw_target_seq(draw, *target_msc - 1)))
3426			goto blit;
3427
3428		DBG(("%s: reported target_msc=%llu (in)\n",
3429		     __FUNCTION__, *target_msc));
3430		swap_limit(draw, 1);
3431	}
3432
3433	return TRUE;
3434
3435blit:
3436	DBG(("%s -- blit\n", __FUNCTION__));
3437	if (can_xchg(sna, draw, front, back)) {
3438		sna_dri2_xchg(draw, front, back);
3439	} else {
3440		__sna_dri2_copy_region(sna, draw, NULL, back, front, 0);
3441		front->flags = back->flags;
3442		type = DRI2_BLIT_COMPLETE;
3443	}
3444	if (draw->type == DRAWABLE_PIXMAP)
3445		goto fake;
3446skip:
3447	DBG(("%s: unable to show frame, unblocking client\n", __FUNCTION__));
3448	if (crtc == NULL && (sna->flags & SNA_NO_WAIT) == 0)
3449		crtc = sna_primary_crtc(sna);
3450	if (crtc && sna_crtc_is_on(crtc)) {
3451		if (info == NULL)
3452			info = sna_dri2_add_event(sna, draw, client, crtc);
3453		if (info != dri2_chain(draw))
3454			goto fake;
3455
3456		assert(info->crtc == crtc);
3457
3458		info->type = SWAP_COMPLETE;
3459		info->event_complete = func;
3460		info->event_data = data;
3461		assert(info->draw);
3462		info->signal = true;
3463
3464		if (info->front == NULL)
3465			info->front = sna_dri2_reference_buffer(front);
3466		if (info->back == NULL)
3467			info->back = sna_dri2_reference_buffer(back);
3468
3469		if (!sna_next_vblank(info))
3470			goto fake;
3471
3472		swap_limit(draw, 1);
3473	} else {
3474fake:
3475		/* XXX Use a Timer to throttle the client? */
3476		fake_swap_complete(sna, client, draw, crtc, type, func, data);
3477		if (info) {
3478			assert(info->draw);
3479			info->signal = false;
3480			sna_dri2_event_free(info);
3481		}
3482	}
3483	DBG(("%s: reported target_msc=%llu (in)\n", __FUNCTION__, *target_msc));
3484	return TRUE;
3485}
3486
3487/*
3488 * Get current frame count and frame count timestamp, based on drawable's
3489 * crtc.
3490 */
3491static int
3492sna_dri2_get_msc(DrawablePtr draw, CARD64 *ust, CARD64 *msc)
3493{
3494	struct sna *sna = to_sna_from_drawable(draw);
3495	xf86CrtcPtr crtc = sna_dri2_get_crtc(draw);
3496	const struct ust_msc *swap;
3497	union drm_wait_vblank vbl;
3498
3499	DBG(("%s(draw=%ld, pipe=%d)\n", __FUNCTION__, draw->id,
3500	     crtc ? sna_crtc_pipe(crtc) : -1));
3501
3502	/* Drawable not displayed, make up a *monotonic* value */
3503	if (crtc == NULL)
3504		crtc = sna_primary_crtc(sna);
3505	if (crtc == NULL)
3506		return FALSE;
3507
3508	if (sna_query_vblank(sna, crtc, &vbl) == 0)
3509		sna_crtc_record_vblank(crtc, &vbl);
3510
3511	swap = sna_crtc_last_swap(crtc);
3512	*msc = draw_current_msc(draw, crtc, swap->msc);
3513	*ust = ust64(swap->tv_sec, swap->tv_usec);
3514	DBG(("%s: msc=%llu [raw=%llu], ust=%llu\n", __FUNCTION__,
3515	     (long long)*msc, swap->msc, (long long)*ust));
3516	return TRUE;
3517}
3518
3519/*
3520 * Request a DRM event when the requested conditions will be satisfied.
3521 *
3522 * We need to handle the event and ask the server to wake up the client when
3523 * we receive it.
3524 */
3525static int
3526sna_dri2_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
3527			   CARD64 divisor, CARD64 remainder)
3528{
3529	struct sna *sna = to_sna_from_drawable(draw);
3530	struct sna_dri2_event *info = NULL;
3531	xf86CrtcPtr crtc;
3532	CARD64 current_msc;
3533	const struct ust_msc *swap;
3534
3535	crtc = sna_dri2_get_crtc(draw);
3536	DBG(("%s(pipe=%d, target_msc=%llu, divisor=%llu, rem=%llu)\n",
3537	     __FUNCTION__, crtc ? sna_crtc_pipe(crtc) : -1,
3538	     (long long)target_msc,
3539	     (long long)divisor,
3540	     (long long)remainder));
3541
3542	/* Drawable not visible, return immediately */
3543	if (crtc == NULL)
3544		crtc = sna_primary_crtc(sna);
3545	if (crtc == NULL)
3546		return FALSE;
3547
3548	current_msc = get_current_msc(sna, draw, crtc);
3549
3550	/* If target_msc already reached or passed, set it to
3551	 * current_msc to ensure we return a reasonable value back
3552	 * to the caller. This keeps the client from continually
3553	 * sending us MSC targets from the past by forcibly updating
3554	 * their count on this call.
3555	 */
3556	if (divisor == 0 && current_msc >= target_msc)
3557		goto out_complete;
3558
3559	info = sna_dri2_add_event(sna, draw, client, crtc);
3560	if (!info)
3561		goto out_complete;
3562
3563	assert(info->crtc == crtc);
3564	info->type = WAITMSC;
3565
3566	/*
3567	 * If divisor is zero, or current_msc is smaller than target_msc,
3568	 * we just need to make sure target_msc passes before waking up the
3569	 * client. Otherwise, compute the next msc to match divisor/remainder.
3570	 */
3571	if (divisor && current_msc >= target_msc) {
3572		DBG(("%s: missed target, queueing event for next: current=%lld, target=%lld, divisor=%lld, remainder=%lld\n",
3573		     __FUNCTION__,
3574		     (long long)current_msc,
3575		     (long long)target_msc,
3576		     (long long)divisor,
3577		     (long long)remainder));
3578		target_msc = current_msc + remainder - current_msc % divisor;
3579		if (target_msc <= current_msc)
3580			target_msc += divisor;
3581	}
3582
3583	if (!sna_wait_vblank(info, draw_target_seq(draw, target_msc)))
3584		goto out_free_info;
3585
3586	DRI2BlockClient(client, draw);
3587	return TRUE;
3588
3589out_free_info:
3590	sna_dri2_event_free(info);
3591out_complete:
3592	swap = sna_crtc_last_swap(crtc);
3593	DRI2WaitMSCComplete(client, draw,
3594			    draw_current_msc(draw, crtc, swap->msc),
3595			    swap->tv_sec, swap->tv_usec);
3596	return TRUE;
3597}
3598#else
3599void sna_dri2_destroy_window(WindowPtr win) { }
3600void sna_dri2_decouple_window(WindowPtr win) { }
3601#endif
3602
3603static bool has_i830_dri(void)
3604{
3605	return access(DRI_DRIVER_PATH "/i830_dri.so", R_OK) == 0;
3606}
3607
3608static int
3609namecmp(const char *s1, const char *s2)
3610{
3611	char c1, c2;
3612
3613	if (!s1 || *s1 == 0) {
3614		if (!s2 || *s2 == 0)
3615			return 0;
3616		else
3617			return 1;
3618	}
3619
3620	while (*s1 == '_' || *s1 == ' ' || *s1 == '\t')
3621		s1++;
3622
3623	while (*s2 == '_' || *s2 == ' ' || *s2 == '\t')
3624		s2++;
3625
3626	c1 = isupper(*s1) ? tolower(*s1) : *s1;
3627	c2 = isupper(*s2) ? tolower(*s2) : *s2;
3628	while (c1 == c2) {
3629		if (c1 == '\0')
3630			return 0;
3631
3632		s1++;
3633		while (*s1 == '_' || *s1 == ' ' || *s1 == '\t')
3634			s1++;
3635
3636		s2++;
3637		while (*s2 == '_' || *s2 == ' ' || *s2 == '\t')
3638			s2++;
3639
3640		c1 = isupper(*s1) ? tolower(*s1) : *s1;
3641		c2 = isupper(*s2) ? tolower(*s2) : *s2;
3642	}
3643
3644	return c1 - c2;
3645}
3646
3647static bool is_level(const char **str)
3648{
3649	const char *s = *str;
3650	char *end;
3651	unsigned val;
3652
3653	if (s == NULL || *s == '\0')
3654		return true;
3655
3656	if (namecmp(s, "on") == 0)
3657		return true;
3658	if (namecmp(s, "true") == 0)
3659		return true;
3660	if (namecmp(s, "yes") == 0)
3661		return true;
3662
3663	if (namecmp(s, "0") == 0)
3664		return true;
3665	if (namecmp(s, "off") == 0)
3666		return true;
3667	if (namecmp(s, "false") == 0)
3668		return true;
3669	if (namecmp(s, "no") == 0)
3670		return true;
3671
3672	val = strtoul(s, &end, 0);
3673	if (val && *end == '\0')
3674		return true;
3675	if (val && *end == ':')
3676		*str = end + 1;
3677	return false;
3678}
3679
3680static const char *options_get_dri(struct sna *sna)
3681{
3682#if XORG_VERSION_CURRENT >= XORG_VERSION_NUMERIC(1,7,99,901,0)
3683	return xf86GetOptValString(sna->Options, OPTION_DRI);
3684#else
3685	return NULL;
3686#endif
3687}
3688
3689static const char *dri_driver_name(struct sna *sna)
3690{
3691	const char *s = options_get_dri(sna);
3692
3693	if (is_level(&s)) {
3694		if (sna->kgem.gen < 030)
3695			return has_i830_dri() ? "i830" : "i915";
3696		else if (sna->kgem.gen < 040)
3697			return "i915";
3698		else
3699			return "i965";
3700	}
3701
3702	return s;
3703}
3704
3705bool sna_dri2_open(struct sna *sna, ScreenPtr screen)
3706{
3707	DRI2InfoRec info;
3708	int major = 1, minor = 0;
3709#if DRI2INFOREC_VERSION >= 4
3710	const char *driverNames[2];
3711#endif
3712
3713	DBG(("%s()\n", __FUNCTION__));
3714
3715	if (wedged(sna)) {
3716		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
3717			   "loading DRI2 whilst acceleration is disabled.\n");
3718	}
3719
3720	if (xf86LoaderCheckSymbol("DRI2Version"))
3721		DRI2Version(&major, &minor);
3722
3723	if (minor < 1) {
3724		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
3725			   "DRI2 requires DRI2 module version 1.1.0 or later\n");
3726		return false;
3727	}
3728
3729	memset(&info, '\0', sizeof(info));
3730	info.fd = sna->kgem.fd;
3731	info.driverName = dri_driver_name(sna);
3732	info.deviceName = intel_get_master_name(sna->dev);
3733
3734	DBG(("%s: loading dri driver '%s' [gen=%d] for device '%s'\n",
3735	     __FUNCTION__, info.driverName, sna->kgem.gen, info.deviceName));
3736
3737#if DRI2INFOREC_VERSION == 2
3738	/* The ABI between 2 and 3 was broken so we could get rid of
3739	 * the multi-buffer alloc functions.  Make sure we indicate the
3740	 * right version so DRI2 can reject us if it's version 3 or above. */
3741	info.version = 2;
3742#else
3743	info.version = 3;
3744#endif
3745	info.CreateBuffer = sna_dri2_create_buffer;
3746	info.DestroyBuffer = sna_dri2_destroy_buffer;
3747
3748	info.CopyRegion = sna_dri2_copy_region;
3749#if DRI2INFOREC_VERSION >= 4
3750	info.version = 4;
3751	info.ScheduleSwap = sna_dri2_schedule_swap;
3752	info.GetMSC = sna_dri2_get_msc;
3753	info.ScheduleWaitMSC = sna_dri2_schedule_wait_msc;
3754	info.numDrivers = 2;
3755	info.driverNames = driverNames;
3756	driverNames[0] = info.driverName;
3757	driverNames[1] = "va_gl";
3758#endif
3759
3760#if DRI2INFOREC_VERSION >= 6
3761	if (xorg_can_triple_buffer()) {
3762		DBG(("%s: enabling Xorg triple buffering\n", __FUNCTION__));
3763		info.version = 6;
3764		info.SwapLimitValidate = sna_dri2_swap_limit_validate;
3765		info.ReuseBufferNotify = sna_dri2_reuse_buffer;
3766	}
3767#endif
3768
3769#if USE_ASYNC_SWAP
3770	DBG(("%s: enabled async swap and buffer age\n", __FUNCTION__));
3771	info.version = 10;
3772	info.scheduleSwap0 = 1;
3773	info.bufferAge = 1;
3774#endif
3775
3776	return DRI2ScreenInit(screen, &info);
3777}
3778
3779void sna_dri2_close(struct sna *sna, ScreenPtr screen)
3780{
3781	DBG(("%s()\n", __FUNCTION__));
3782	DRI2CloseScreen(screen);
3783}
3784