sna_dri2.c revision cbdaa46f
1/**************************************************************************
2
3Copyright 2001 VA Linux Systems Inc., Fremont, California.
4Copyright © 2002 by David Dawes
5
6All Rights Reserved.
7
8Permission is hereby granted, free of charge, to any person obtaining a
9copy of this software and associated documentation files (the "Software"),
10to deal in the Software without restriction, including without limitation
11on the rights to use, copy, modify, merge, publish, distribute, sub
12license, and/or sell copies of the Software, and to permit persons to whom
13the Software is furnished to do so, subject to the following conditions:
14
15The above copyright notice and this permission notice (including the next
16paragraph) shall be included in all copies or substantial portions of the
17Software.
18
19THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22ATI, VA LINUX SYSTEMS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
23DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27**************************************************************************/
28
29/*
30 * Authors: Jeff Hartmann <jhartmann@valinux.com>
31 *          David Dawes <dawes@xfree86.org>
32 *          Keith Whitwell <keith@tungstengraphics.com>
33 */
34
35#ifdef HAVE_CONFIG_H
36#include "config.h"
37#endif
38
39#include <errno.h>
40#include <time.h>
41#include <string.h>
42#include <unistd.h>
43#include <poll.h>
44
45#include "sna.h"
46#include "intel_options.h"
47
48#include <xf86drm.h>
49#include <i915_drm.h>
50#include <dri2.h>
51#if XORG_VERSION_CURRENT >= XORG_VERSION_NUMERIC(1,12,99,901,0) && defined(COMPOSITE)
52#include <compositeext.h>
53#define CHECK_FOR_COMPOSITOR
54#endif
55
56#define DBG_CAN_FLIP 1
57#define DBG_CAN_XCHG 1
58
59#define DBG_FORCE_COPY -1 /* KGEM_BLT or KGEM_3D */
60
61#if DRI2INFOREC_VERSION < 2
62#error DRI2 version supported by the Xserver is too old
63#endif
64
65static inline struct kgem_bo *ref(struct kgem_bo *bo)
66{
67	return kgem_bo_reference(bo);
68}
69
70struct sna_dri2_private {
71	PixmapPtr pixmap;
72	struct kgem_bo *bo;
73	DRI2Buffer2Ptr proxy;
74	bool stale;
75	uint32_t size;
76	int refcnt;
77};
78
79static inline struct sna_dri2_private *
80get_private(void *buffer)
81{
82	return (struct sna_dri2_private *)((DRI2Buffer2Ptr)buffer+1);
83}
84
85#if DRI2INFOREC_VERSION >= 4
86enum event_type {
87	WAITMSC = 0,
88	SWAP,
89	SWAP_WAIT,
90	SWAP_THROTTLE,
91	FLIP,
92	FLIP_THROTTLE,
93	FLIP_COMPLETE,
94	FLIP_ASYNC,
95};
96
97struct dri_bo {
98	struct list link;
99	struct kgem_bo *bo;
100	uint32_t name;
101};
102
103struct sna_dri2_event {
104	struct sna *sna;
105	DrawablePtr draw;
106	ClientPtr client;
107	enum event_type type;
108	xf86CrtcPtr crtc;
109	int pipe;
110	bool queued;
111
112	/* for swaps & flips only */
113	DRI2SwapEventPtr event_complete;
114	void *event_data;
115	DRI2BufferPtr front;
116	DRI2BufferPtr back;
117	struct kgem_bo *bo;
118
119	struct sna_dri2_event *chain;
120
121	struct list cache;
122	struct list link;
123
124	int mode;
125};
126
127static void sna_dri2_flip_event(struct sna_dri2_event *flip);
128
129static void
130sna_dri2_get_back(struct sna *sna,
131		  DrawablePtr draw,
132		  DRI2BufferPtr back,
133		  struct sna_dri2_event *info)
134{
135	struct kgem_bo *bo;
136	uint32_t name;
137	bool reuse;
138
139	DBG(("%s: draw size=%dx%d, buffer size=%dx%d\n",
140	     __FUNCTION__, draw->width, draw->height,
141	     get_private(back)->size & 0xffff, get_private(back)->size >> 16));
142	reuse = (draw->height << 16 | draw->width) == get_private(back)->size;
143	if (reuse) {
144		bo = get_private(back)->bo;
145		assert(bo->refcnt);
146		DBG(("%s: back buffer handle=%d, scanout?=%d, refcnt=%d\n",
147					__FUNCTION__, bo->handle, bo->active_scanout, get_private(back)->refcnt));
148		if (bo->active_scanout == 0) {
149			DBG(("%s: reuse unattached back\n", __FUNCTION__));
150			get_private(back)->stale = false;
151			return;
152		}
153	}
154
155	bo = NULL;
156	if (info) {
157		struct dri_bo *c;
158		list_for_each_entry(c, &info->cache, link) {
159			if (c->bo && c->bo->scanout == 0) {
160				bo = c->bo;
161				name = c->name;
162				DBG(("%s: reuse cache handle=%d\n", __FUNCTION__, bo->handle));
163				list_move_tail(&c->link, &info->cache);
164				c->bo = NULL;
165			}
166		}
167	}
168	if (bo == NULL) {
169		DBG(("%s: allocating new backbuffer\n", __FUNCTION__));
170		bo = kgem_create_2d(&sna->kgem,
171				    draw->width, draw->height, draw->bitsPerPixel,
172				    get_private(back)->bo->tiling,
173				    get_private(back)->bo->scanout ? CREATE_SCANOUT : 0);
174		if (bo == NULL)
175			return;
176
177		name = kgem_bo_flink(&sna->kgem, bo);
178		if (name == 0) {
179			kgem_bo_destroy(&sna->kgem, bo);
180			return;
181		}
182	}
183	assert(bo->active_scanout == 0);
184
185	if (info && reuse) {
186		bool found = false;
187		struct dri_bo *c;
188
189		list_for_each_entry_reverse(c, &info->cache, link) {
190			if (c->bo == NULL) {
191				found = true;
192				_list_del(&c->link);
193				break;
194			}
195		}
196		if (!found)
197			c = malloc(sizeof(*c));
198		if (c != NULL) {
199			c->bo = ref(get_private(back)->bo);
200			c->name = back->name;
201			list_add(&c->link, &info->cache);
202			DBG(("%s: cacheing handle=%d (name=%d)\n", __FUNCTION__, c->bo->handle, c->name));
203		}
204	}
205
206	assert(bo != get_private(back)->bo);
207	kgem_bo_destroy(&sna->kgem, get_private(back)->bo);
208
209	get_private(back)->bo = bo;
210	get_private(back)->size = draw->height << 16 | draw->width;
211	back->pitch = bo->pitch;
212	back->name = name;
213
214	get_private(back)->stale = false;
215}
216
217struct dri2_window {
218	DRI2BufferPtr front;
219	struct sna_dri2_event *chain;
220	xf86CrtcPtr crtc;
221	int64_t msc_delta;
222};
223
224static struct dri2_window *dri2_window(WindowPtr win)
225{
226	assert(win->drawable.type != DRAWABLE_PIXMAP);
227	return ((void **)__get_private(win, sna_window_key))[1];
228}
229
230static struct sna_dri2_event *
231dri2_chain(DrawablePtr d)
232{
233	struct dri2_window *priv = dri2_window((WindowPtr)d);
234	assert(priv != NULL);
235	return priv->chain;
236}
237inline static DRI2BufferPtr dri2_window_get_front(WindowPtr win)
238{
239	struct dri2_window *priv = dri2_window(win);
240	return priv ? priv->front : NULL;
241}
242#else
243inline static void *dri2_window_get_front(WindowPtr win) { return NULL; }
244#endif
245
246#if DRI2INFOREC_VERSION < 6
247
248#define xorg_can_triple_buffer() 0
249#define swap_limit(d, l) false
250#define mark_stale(b)
251
252#else
253
254#if XORG_VERSION_CURRENT >= XORG_VERSION_NUMERIC(1,15,99,904,0)
255/* Prime fixed for triple buffer support */
256#define xorg_can_triple_buffer() 1
257#elif XORG_VERSION_CURRENT < XORG_VERSION_NUMERIC(1,12,99,901,0)
258/* Before numGPUScreens was introduced */
259#define xorg_can_triple_buffer() 1
260#else
261/* Subject to crashers when combining triple buffering and Prime */
262inline static bool xorg_can_triple_buffer(void)
263{
264	return screenInfo.numGPUScreens == 0;
265}
266#endif
267
268static void
269mark_stale(DRI2BufferPtr back)
270{
271	/* If we have reuse notifications, we can track when the
272	 * client tries to present an old buffer (one that has not
273	 * been updated since the last swap) and avoid showing the
274	 * stale frame. (This is mostly useful for tracking down
275	 * driver bugs!)
276	 */
277	get_private(back)->stale = xorg_can_triple_buffer();
278}
279
280static Bool
281sna_dri2_swap_limit_validate(DrawablePtr draw, int swap_limit)
282{
283	DBG(("%s: swap limit set to %d\n", __FUNCTION__, swap_limit));
284	return swap_limit >= 1;
285}
286
287static void
288sna_dri2_reuse_buffer(DrawablePtr draw, DRI2BufferPtr buffer)
289{
290	DBG(("%s: reusing buffer pixmap=%ld, attachment=%d, handle=%d, name=%d\n",
291	     __FUNCTION__, get_drawable_pixmap(draw)->drawable.serialNumber,
292	     buffer->attachment, get_private(buffer)->bo->handle, buffer->name));
293	assert(get_private(buffer)->refcnt);
294	assert(get_private(buffer)->bo->refcnt > get_private(buffer)->bo->active_scanout);
295
296	if (buffer->attachment == DRI2BufferBackLeft &&
297	    draw->type != DRAWABLE_PIXMAP) {
298		DBG(("%s: replacing back buffer\n", __FUNCTION__));
299		sna_dri2_get_back(to_sna_from_drawable(draw), draw, buffer, dri2_chain(draw));
300
301		assert(kgem_bo_flink(&to_sna_from_drawable(draw)->kgem, get_private(buffer)->bo) == buffer->name);
302		assert(get_private(buffer)->bo->refcnt);
303		assert(get_private(buffer)->bo->active_scanout == 0);
304	}
305}
306
307static bool swap_limit(DrawablePtr draw, int limit)
308{
309	if (!xorg_can_triple_buffer())
310		return false;
311
312	DBG(("%s: draw=%ld setting swap limit to %d\n", __FUNCTION__, (long)draw->id, limit));
313	DRI2SwapLimit(draw, limit);
314	return true;
315}
316#endif
317
318#if DRI2INFOREC_VERSION < 10
319#undef USE_ASYNC_SWAP
320#define USE_ASYNC_SWAP 0
321#endif
322
323#define COLOR_PREFER_TILING_Y 0
324
325/* Prefer to enable TILING_Y if this buffer will never be a
326 * candidate for pageflipping
327 */
328static uint32_t color_tiling(struct sna *sna, DrawablePtr draw)
329{
330	uint32_t tiling;
331
332	if (COLOR_PREFER_TILING_Y &&
333	    (draw->width  != sna->front->drawable.width ||
334	     draw->height != sna->front->drawable.height))
335		tiling = I915_TILING_Y;
336	else
337		tiling = I915_TILING_X;
338
339	return kgem_choose_tiling(&sna->kgem, -tiling,
340				  draw->width,
341				  draw->height,
342				  draw->bitsPerPixel);
343}
344
345static uint32_t other_tiling(struct sna *sna, DrawablePtr draw)
346{
347	/* XXX Can mix color X / depth Y? */
348	return kgem_choose_tiling(&sna->kgem,
349				  sna->kgem.gen >= 040 ? -I915_TILING_Y : -I915_TILING_X,
350				  draw->width,
351				  draw->height,
352				  draw->bitsPerPixel);
353}
354
355static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
356					  PixmapPtr pixmap)
357{
358	struct sna_pixmap *priv;
359	int tiling;
360
361	DBG(("%s: attaching DRI client to pixmap=%ld\n",
362	     __FUNCTION__, pixmap->drawable.serialNumber));
363
364	priv = sna_pixmap(pixmap);
365	if (priv != NULL && IS_STATIC_PTR(priv->ptr) && priv->cpu_bo) {
366		DBG(("%s: SHM or unattached Pixmap, BadAlloc\n", __FUNCTION__));
367		return NULL;
368	}
369
370	priv = sna_pixmap_move_to_gpu(pixmap,
371				      MOVE_READ | __MOVE_FORCE | __MOVE_DRI);
372	if (priv == NULL) {
373		DBG(("%s: failed to move to GPU, BadAlloc\n", __FUNCTION__));
374		return NULL;
375	}
376
377	assert(priv->flush == false);
378	assert(priv->cpu_damage == NULL);
379	assert(priv->gpu_bo);
380	assert(priv->gpu_bo->proxy == NULL);
381	assert(priv->gpu_bo->flush == false);
382
383	tiling = color_tiling(sna, &pixmap->drawable);
384	if (tiling < 0)
385		tiling = -tiling;
386	if (priv->gpu_bo->tiling != tiling)
387		sna_pixmap_change_tiling(pixmap, tiling);
388
389	return priv->gpu_bo;
390}
391
392pure static inline void *sna_pixmap_get_buffer(PixmapPtr pixmap)
393{
394	assert(pixmap->refcnt);
395	return ((void **)__get_private(pixmap, sna_pixmap_key))[2];
396}
397
398static inline void sna_pixmap_set_buffer(PixmapPtr pixmap, void *ptr)
399{
400	assert(pixmap->refcnt);
401	((void **)__get_private(pixmap, sna_pixmap_key))[2] = ptr;
402}
403
404void
405sna_dri2_pixmap_update_bo(struct sna *sna, PixmapPtr pixmap, struct kgem_bo *bo)
406{
407	DRI2BufferPtr buffer;
408	struct sna_dri2_private *private;
409
410	buffer = sna_pixmap_get_buffer(pixmap);
411	if (buffer == NULL)
412		return;
413
414	DBG(("%s: pixmap=%ld, old handle=%d, new handle=%d\n", __FUNCTION__,
415	     pixmap->drawable.serialNumber,
416	     get_private(buffer)->bo->handle,
417	     sna_pixmap(pixmap)->gpu_bo->handle));
418
419	private = get_private(buffer);
420	assert(private->pixmap == pixmap);
421
422	assert(bo != private->bo);
423	if (private->bo == bo)
424		return;
425
426	DBG(("%s: dropping flush hint from handle=%d\n", __FUNCTION__, private->bo->handle));
427	private->bo->flush = false;
428	kgem_bo_destroy(&sna->kgem, private->bo);
429
430	buffer->name = kgem_bo_flink(&sna->kgem, bo);
431	buffer->pitch = bo->pitch;
432	private->bo = ref(bo);
433
434	DBG(("%s: adding flush hint to handle=%d\n", __FUNCTION__, bo->handle));
435	bo->flush = true;
436	if (bo->exec)
437		sna->kgem.flush = 1;
438	assert(sna_pixmap(pixmap)->flush);
439
440	/* XXX DRI2InvalidateDrawable(&pixmap->drawable); */
441}
442
443static DRI2Buffer2Ptr
444sna_dri2_create_buffer(DrawablePtr draw,
445		       unsigned int attachment,
446		       unsigned int format)
447{
448	struct sna *sna = to_sna_from_drawable(draw);
449	DRI2Buffer2Ptr buffer;
450	struct sna_dri2_private *private;
451	PixmapPtr pixmap;
452	struct kgem_bo *bo;
453	unsigned flags = 0;
454	uint32_t size;
455	int bpp;
456
457	DBG(("%s pixmap=%ld, (attachment=%d, format=%d, drawable=%dx%d), window?=%d\n",
458	     __FUNCTION__,
459	     get_drawable_pixmap(draw)->drawable.serialNumber,
460	     attachment, format, draw->width, draw->height,
461	     draw->type != DRAWABLE_PIXMAP));
462
463	pixmap = NULL;
464	size = (uint32_t)draw->height << 16 | draw->width;
465	switch (attachment) {
466	case DRI2BufferFrontLeft:
467		pixmap = get_drawable_pixmap(draw);
468		buffer = NULL;
469		if (draw->type != DRAWABLE_PIXMAP)
470			buffer = dri2_window_get_front((WindowPtr)draw);
471		if (buffer == NULL)
472			buffer = sna_pixmap_get_buffer(pixmap);
473		if (buffer) {
474			private = get_private(buffer);
475
476			DBG(("%s: reusing front buffer attachment, win=%lu %dx%d, pixmap=%ld [%ld] %dx%d, handle=%d, name=%d\n",
477			     __FUNCTION__,
478			     draw->type != DRAWABLE_PIXMAP ? (long)draw->id : (long)0,
479			     draw->width, draw->height,
480			     pixmap->drawable.serialNumber,
481			     private->pixmap->drawable.serialNumber,
482			     pixmap->drawable.width,
483			     pixmap->drawable.height,
484			     private->bo->handle, buffer->name));
485
486			assert(private->pixmap == pixmap);
487			assert(sna_pixmap(pixmap)->flush);
488			assert(sna_pixmap(pixmap)->pinned & PIN_DRI2);
489			assert(kgem_bo_flink(&sna->kgem, private->bo) == buffer->name);
490
491			private->refcnt++;
492			return buffer;
493		}
494
495		bo = sna_pixmap_set_dri(sna, pixmap);
496		if (bo == NULL)
497			return NULL;
498
499		assert(sna_pixmap(pixmap) != NULL);
500
501		bo = ref(bo);
502		bpp = pixmap->drawable.bitsPerPixel;
503		if (pixmap == sna->front && !(sna->flags & SNA_LINEAR_FB))
504			flags |= CREATE_SCANOUT;
505		DBG(("%s: attaching to front buffer %dx%d [%p:%d], scanout? %d\n",
506		     __FUNCTION__,
507		     pixmap->drawable.width, pixmap->drawable.height,
508		     pixmap, pixmap->refcnt, flags & CREATE_SCANOUT));
509		size = (uint32_t)pixmap->drawable.height << 16 | pixmap->drawable.width;
510		break;
511
512	case DRI2BufferBackLeft:
513		if (draw->type != DRAWABLE_PIXMAP) {
514			if (dri2_window_get_front((WindowPtr)draw))
515				flags |= CREATE_SCANOUT;
516			if (draw->width  == sna->front->drawable.width &&
517			    draw->height == sna->front->drawable.height &&
518			    (sna->flags & (SNA_LINEAR_FB | SNA_NO_WAIT | SNA_NO_FLIP)) == 0)
519				flags |= CREATE_SCANOUT;
520		}
521	case DRI2BufferBackRight:
522	case DRI2BufferFrontRight:
523	case DRI2BufferFakeFrontLeft:
524	case DRI2BufferFakeFrontRight:
525		bpp = draw->bitsPerPixel;
526		DBG(("%s: creating back buffer %dx%d, suitable for scanout? %d\n",
527		     __FUNCTION__,
528		     draw->width, draw->height,
529		     flags & CREATE_SCANOUT));
530
531		bo = kgem_create_2d(&sna->kgem,
532				    draw->width,
533				    draw->height,
534				    draw->bitsPerPixel,
535				    color_tiling(sna, draw),
536				    flags);
537		break;
538
539	case DRI2BufferStencil:
540		/*
541		 * The stencil buffer has quirky pitch requirements.  From Vol
542		 * 2a, 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface
543		 * Pitch":
544		 *    The pitch must be set to 2x the value computed based on
545		 *    width, as the stencil buffer is stored with two rows
546		 *    interleaved.
547		 * To accomplish this, we resort to the nasty hack of doubling
548		 * the drm region's cpp and halving its height.
549		 *
550		 * If we neglect to double the pitch, then
551		 * drm_intel_gem_bo_map_gtt() maps the memory incorrectly.
552		 *
553		 * The alignment for W-tiling is quite different to the
554		 * nominal no-tiling case, so we have to account for
555		 * the tiled access pattern explicitly.
556		 *
557		 * The stencil buffer is W tiled. However, we request from
558		 * the kernel a non-tiled buffer because the kernel does
559		 * not understand W tiling and the GTT is incapable of
560		 * W fencing.
561		 */
562		bpp = format ? format : draw->bitsPerPixel;
563		bpp *= 2;
564		bo = kgem_create_2d(&sna->kgem,
565				    ALIGN(draw->width, 64),
566				    ALIGN((draw->height + 1) / 2, 64),
567				    bpp, I915_TILING_NONE, flags);
568		break;
569
570	case DRI2BufferDepth:
571	case DRI2BufferDepthStencil:
572	case DRI2BufferHiz:
573	case DRI2BufferAccum:
574		bpp = format ? format : draw->bitsPerPixel,
575		bo = kgem_create_2d(&sna->kgem,
576				    draw->width, draw->height, bpp,
577				    other_tiling(sna, draw),
578				    flags);
579		break;
580
581	default:
582		return NULL;
583	}
584	if (bo == NULL)
585		return NULL;
586
587	buffer = calloc(1, sizeof *buffer + sizeof *private);
588	if (buffer == NULL)
589		goto err;
590
591	private = get_private(buffer);
592	buffer->attachment = attachment;
593	buffer->pitch = bo->pitch;
594	buffer->cpp = bpp / 8;
595	buffer->driverPrivate = private;
596	buffer->format = format;
597	buffer->flags = 0;
598	buffer->name = kgem_bo_flink(&sna->kgem, bo);
599	private->refcnt = 1;
600	private->bo = bo;
601	private->pixmap = pixmap;
602	private->size = size;
603
604	if (buffer->name == 0)
605		goto err;
606
607	if (pixmap) {
608		struct sna_pixmap *priv;
609
610		assert(attachment == DRI2BufferFrontLeft);
611		assert(sna_pixmap_get_buffer(pixmap) == NULL);
612
613		sna_pixmap_set_buffer(pixmap, buffer);
614		assert(sna_pixmap_get_buffer(pixmap) == buffer);
615		pixmap->refcnt++;
616
617		priv = sna_pixmap(pixmap);
618		assert(priv->flush == false);
619		assert((priv->pinned & PIN_DRI2) == 0);
620
621		/* Don't allow this named buffer to be replaced */
622		priv->pinned |= PIN_DRI2;
623
624		/* We need to submit any modifications to and reads from this
625		 * buffer before we send any reply to the Client.
626		 *
627		 * As we don't track which Client, we flush for all.
628		 */
629		DBG(("%s: adding flush hint to handle=%d\n", __FUNCTION__, priv->gpu_bo->handle));
630		priv->gpu_bo->flush = true;
631		if (priv->gpu_bo->exec)
632			sna->kgem.flush = 1;
633
634		priv->flush |= 1;
635		if (draw->type == DRAWABLE_PIXMAP) {
636			/* DRI2 renders directly into GLXPixmaps, treat as hostile */
637			kgem_bo_unclean(&sna->kgem, priv->gpu_bo);
638			sna_damage_all(&priv->gpu_damage, pixmap);
639			priv->clear = false;
640			priv->cpu = false;
641			priv->flush |= 2;
642		}
643
644		sna_accel_watch_flush(sna, 1);
645	}
646
647	return buffer;
648
649err:
650	kgem_bo_destroy(&sna->kgem, bo);
651	free(buffer);
652	return NULL;
653}
654
655static void _sna_dri2_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
656{
657	struct sna_dri2_private *private = get_private(buffer);
658
659	if (buffer == NULL)
660		return;
661
662	DBG(("%s: %p [handle=%d] -- refcnt=%d, pixmap=%ld\n",
663	     __FUNCTION__, buffer, private->bo->handle, private->refcnt,
664	     private->pixmap ? private->pixmap->drawable.serialNumber : 0));
665	assert(private->refcnt > 0);
666	if (--private->refcnt)
667		return;
668
669	assert(private->bo);
670
671	if (private->proxy) {
672		DBG(("%s: destroying proxy\n", __FUNCTION__));
673		_sna_dri2_destroy_buffer(sna, private->proxy);
674		private->pixmap = NULL;
675	}
676
677	if (private->pixmap) {
678		PixmapPtr pixmap = private->pixmap;
679		struct sna_pixmap *priv = sna_pixmap(pixmap);
680
681		assert(sna_pixmap_get_buffer(pixmap) == buffer);
682		assert(priv->gpu_bo == private->bo);
683		assert(priv->gpu_bo->flush);
684		assert(priv->pinned & PIN_DRI2);
685		assert(priv->flush);
686
687		/* Undo the DRI markings on this pixmap */
688		DBG(("%s: releasing last DRI pixmap=%ld, scanout?=%d\n",
689		     __FUNCTION__,
690		     pixmap->drawable.serialNumber,
691		     pixmap == sna->front));
692
693		list_del(&priv->flush_list);
694
695		DBG(("%s: dropping flush hint from handle=%d\n", __FUNCTION__, private->bo->handle));
696		priv->gpu_bo->flush = false;
697		priv->pinned &= ~PIN_DRI2;
698
699		priv->flush = false;
700		sna_accel_watch_flush(sna, -1);
701
702		sna_pixmap_set_buffer(pixmap, NULL);
703		pixmap->drawable.pScreen->DestroyPixmap(pixmap);
704	}
705	assert(private->bo->flush == false);
706
707	kgem_bo_destroy(&sna->kgem, private->bo);
708	free(buffer);
709}
710
711static void sna_dri2_destroy_buffer(DrawablePtr draw, DRI2Buffer2Ptr buffer)
712{
713	_sna_dri2_destroy_buffer(to_sna_from_drawable(draw), buffer);
714}
715
716static DRI2BufferPtr sna_dri2_reference_buffer(DRI2BufferPtr buffer)
717{
718	get_private(buffer)->refcnt++;
719	return buffer;
720}
721
722static inline void damage(PixmapPtr pixmap, struct sna_pixmap *priv, RegionPtr region)
723{
724	assert(priv->gpu_bo);
725	if (DAMAGE_IS_ALL(priv->gpu_damage))
726		goto done;
727
728	if (region == NULL) {
729damage_all:
730		priv->gpu_damage = _sna_damage_all(priv->gpu_damage,
731						   pixmap->drawable.width,
732						   pixmap->drawable.height);
733		sna_damage_destroy(&priv->cpu_damage);
734		list_del(&priv->flush_list);
735	} else {
736		sna_damage_subtract(&priv->cpu_damage, region);
737		if (priv->cpu_damage == NULL)
738			goto damage_all;
739		sna_damage_add(&priv->gpu_damage, region);
740	}
741done:
742	priv->cpu = false;
743	priv->clear = false;
744}
745
746static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
747{
748	struct sna *sna = to_sna_from_pixmap(pixmap);
749	struct sna_pixmap *priv = sna_pixmap(pixmap);
750	RegionRec region;
751
752	DBG(("%s: pixmap=%ld, handle=%d\n",
753	     __FUNCTION__, pixmap->drawable.serialNumber, bo->handle));
754
755	assert(pixmap->drawable.width * pixmap->drawable.bitsPerPixel <= 8*bo->pitch);
756	assert(pixmap->drawable.height * bo->pitch <= kgem_bo_size(bo));
757	assert(bo->proxy == NULL);
758	assert(priv->pinned & PIN_DRI2);
759	assert((priv->pinned & (PIN_PRIME | PIN_DRI3)) == 0);
760	assert(priv->flush);
761
762	/* Post damage on the new front buffer so that listeners, such
763	 * as DisplayLink know take a copy and shove it over the USB,
764	 * also for software cursors and the like.
765	 */
766	region.extents.x1 = region.extents.y1 = 0;
767	region.extents.x2 = pixmap->drawable.width;
768	region.extents.y2 = pixmap->drawable.height;
769	region.data = NULL;
770	DamageRegionAppend(&pixmap->drawable, &region);
771
772	damage(pixmap, priv, NULL);
773
774	assert(bo->refcnt);
775	if (priv->move_to_gpu)
776		priv->move_to_gpu(sna, priv, 0);
777	if (priv->gpu_bo != bo) {
778		DBG(("%s: dropping flush hint from handle=%d\n", __FUNCTION__, priv->gpu_bo->handle));
779		priv->gpu_bo->flush = false;
780		if (priv->cow)
781			sna_pixmap_undo_cow(sna, priv, 0);
782		if (priv->gpu_bo) {
783			sna_pixmap_unmap(pixmap, priv);
784			kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
785		}
786		DBG(("%s: adding flush hint to handle=%d\n", __FUNCTION__, bo->handle));
787		bo->flush = true;
788		if (bo->exec)
789			sna->kgem.flush = 1;
790		priv->gpu_bo = ref(bo);
791	}
792	if (bo->domain != DOMAIN_GPU)
793		bo->domain = DOMAIN_NONE;
794	assert(bo->flush);
795
796	DamageRegionProcessPending(&pixmap->drawable);
797}
798
799static void sna_dri2_select_mode(struct sna *sna, struct kgem_bo *dst, struct kgem_bo *src, bool sync)
800{
801	struct drm_i915_gem_busy busy;
802	int mode;
803
804	if (sna->kgem.gen < 060)
805		return;
806
807	if (sync) {
808		DBG(("%s: sync, force %s ring\n", __FUNCTION__,
809		     sna->kgem.gen >= 070 ? "BLT" : "RENDER"));
810		kgem_set_mode(&sna->kgem,
811			      sna->kgem.gen >= 070 ? KGEM_BLT : KGEM_RENDER,
812			      dst);
813		return;
814	}
815
816	if (DBG_FORCE_COPY != -1) {
817		DBG(("%s: forcing %d\n", __FUNCTION__, DBG_FORCE_COPY));
818		kgem_set_mode(&sna->kgem, DBG_FORCE_COPY, dst);
819		return;
820	}
821
822	if (sna->kgem.mode != KGEM_NONE) {
823		DBG(("%s: busy, not switching\n", __FUNCTION__));
824		return;
825	}
826
827	VG_CLEAR(busy);
828	busy.handle = src->handle;
829	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GEM_BUSY, &busy))
830		return;
831
832	DBG(("%s: src handle=%d busy?=%x\n", __FUNCTION__, busy.handle, busy.busy));
833	if (busy.busy == 0) {
834		__kgem_bo_clear_busy(src);
835
836		busy.handle = dst->handle;
837		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GEM_BUSY, &busy))
838			return;
839
840		DBG(("%s: dst handle=%d busy?=%x\n", __FUNCTION__, busy.handle, busy.busy));
841		if (busy.busy == 0) {
842			__kgem_bo_clear_busy(dst);
843			DBG(("%s: src/dst is idle, using defaults\n", __FUNCTION__));
844			return;
845		}
846	}
847
848	/* Sandybridge introduced a separate ring which it uses to
849	 * perform blits. Switching rendering between rings incurs
850	 * a stall as we wait upon the old ring to finish and
851	 * flush its render cache before we can proceed on with
852	 * the operation on the new ring.
853	 *
854	 * As this buffer, we presume, has just been written to by
855	 * the DRI client using the RENDER ring, we want to perform
856	 * our operation on the same ring, and ideally on the same
857	 * ring as we will flip from (which should be the RENDER ring
858	 * as well).
859	 *
860	 * The ultimate question is whether preserving the ring outweighs
861	 * the cost of the query.
862	 */
863	mode = KGEM_RENDER;
864	if (busy.busy & (0xfffe << 16))
865		mode = KGEM_BLT;
866	kgem_bo_mark_busy(&sna->kgem, busy.handle == src->handle ? src : dst, mode);
867	_kgem_set_mode(&sna->kgem, mode);
868}
869
870static bool is_front(int attachment)
871{
872	return attachment == DRI2BufferFrontLeft;
873}
874
875static struct kgem_bo *
876__sna_dri2_copy_region(struct sna *sna, DrawablePtr draw, RegionPtr region,
877		      DRI2BufferPtr src, DRI2BufferPtr dst,
878		      bool sync)
879{
880	PixmapPtr pixmap = get_drawable_pixmap(draw);
881	DrawableRec scratch, *src_draw = &pixmap->drawable, *dst_draw = &pixmap->drawable;
882	struct sna_dri2_private *src_priv = get_private(src);
883	struct sna_dri2_private *dst_priv = get_private(dst);
884	pixman_region16_t clip;
885	struct kgem_bo *bo = NULL;
886	struct kgem_bo *src_bo;
887	struct kgem_bo *dst_bo;
888	const BoxRec *boxes;
889	int16_t dx, dy, sx, sy;
890	unsigned flags;
891	int n;
892
893	/* To hide a stale DRI2Buffer, one may choose to substitute
894	 * pixmap->gpu_bo instead of dst/src->bo, however you then run
895	 * the risk of copying around invalid data. So either you may not
896	 * see the results of the copy, or you may see the wrong pixels.
897	 * Either way you eventually lose.
898	 *
899	 * We also have to be careful in case that the stale buffers are
900	 * now attached to invalid (non-DRI) pixmaps.
901	 */
902
903	assert(is_front(dst->attachment) || is_front(src->attachment));
904	assert(dst->attachment != src->attachment);
905
906	clip.extents.x1 = draw->x;
907	clip.extents.y1 = draw->y;
908	clip.extents.x2 = draw->x + draw->width;
909	clip.extents.y2 = draw->y + draw->height;
910	clip.data = NULL;
911
912	if (region) {
913		pixman_region_translate(region, draw->x, draw->y);
914		pixman_region_intersect(&clip, &clip, region);
915		region = &clip;
916	}
917
918	if (clip.extents.x1 >= clip.extents.x2 ||
919	    clip.extents.y1 >= clip.extents.y2) {
920		DBG(("%s: all clipped\n", __FUNCTION__));
921		return NULL;
922	}
923
924	sx = sy = dx = dy = 0;
925	if (is_front(dst->attachment)) {
926		sx = -draw->x;
927		sy = -draw->y;
928	} else {
929		dx = -draw->x;
930		dy = -draw->y;
931	}
932	if (draw->type == DRAWABLE_WINDOW) {
933		WindowPtr win = (WindowPtr)draw;
934		int16_t tx, ty;
935
936		if (is_clipped(&win->clipList, draw)) {
937			DBG(("%s: draw=(%d, %d), delta=(%d, %d), draw=(%d, %d),(%d, %d), clip.extents=(%d, %d), (%d, %d)\n",
938			     __FUNCTION__, draw->x, draw->y,
939			     get_drawable_dx(draw), get_drawable_dy(draw),
940			     clip.extents.x1, clip.extents.y1,
941			     clip.extents.x2, clip.extents.y2,
942			     win->clipList.extents.x1, win->clipList.extents.y1,
943			     win->clipList.extents.x2, win->clipList.extents.y2));
944
945			assert(region == NULL || region == &clip);
946			pixman_region_intersect(&clip, &win->clipList, &clip);
947			if (!pixman_region_not_empty(&clip)) {
948				DBG(("%s: all clipped\n", __FUNCTION__));
949				return NULL;
950			}
951
952			region = &clip;
953		}
954
955		if (get_drawable_deltas(draw, pixmap, &tx, &ty)) {
956			if (is_front(dst->attachment)) {
957				pixman_region_translate(region ?: &clip, tx, ty);
958				sx -= tx;
959				sy -= ty;
960			} else {
961				sx += tx;
962				sy += ty;
963			}
964		}
965	} else
966		sync = false;
967
968	scratch.x = scratch.y = 0;
969	scratch.width = scratch.height = 0;
970	scratch.depth = draw->depth;
971	scratch.bitsPerPixel = draw->bitsPerPixel;
972
973	src_bo = src_priv->bo;
974	assert(src_bo->refcnt);
975	if (is_front(src->attachment)) {
976		struct sna_pixmap *priv;
977
978		priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ);
979		if (priv)
980			src_bo = priv->gpu_bo;
981		DBG(("%s: updated FrontLeft src_bo from handle=%d to handle=%d\n",
982		     __FUNCTION__, src_priv->bo->handle, src_bo->handle));
983		assert(src_bo->refcnt);
984	} else {
985		RegionRec source;
986
987		scratch.width = src_priv->size & 0xffff;
988		scratch.height = src_priv->size >> 16;
989		src_draw = &scratch;
990
991		DBG(("%s: source size %dx%d, region size %dx%d\n",
992		     __FUNCTION__,
993		     scratch.width, scratch.height,
994		     clip.extents.x2 - clip.extents.x1,
995		     clip.extents.y2 - clip.extents.y1));
996
997		source.extents.x1 = -sx;
998		source.extents.y1 = -sy;
999		source.extents.x2 = source.extents.x1 + scratch.width;
1000		source.extents.y2 = source.extents.y1 + scratch.height;
1001		source.data = NULL;
1002
1003		assert(region == NULL || region == &clip);
1004		pixman_region_intersect(&clip, &clip, &source);
1005
1006	}
1007
1008	dst_bo = dst_priv->bo;
1009	assert(dst_bo->refcnt);
1010	if (is_front(dst->attachment)) {
1011		struct sna_pixmap *priv;
1012		struct list shadow;
1013
1014		/* Preserve the CRTC shadow overrides */
1015		sna_shadow_steal_crtcs(sna, &shadow);
1016
1017		flags = MOVE_WRITE | __MOVE_FORCE;
1018		if (clip.data)
1019			flags |= MOVE_READ;
1020
1021		assert(region == NULL || region == &clip);
1022		priv = sna_pixmap_move_area_to_gpu(pixmap, &clip.extents, flags);
1023		if (priv) {
1024			damage(pixmap, priv, region);
1025			dst_bo = priv->gpu_bo;
1026		}
1027		DBG(("%s: updated FrontLeft dst_bo from handle=%d to handle=%d\n",
1028		     __FUNCTION__, dst_priv->bo->handle, dst_bo->handle));
1029		assert(dst_bo->refcnt);
1030
1031		sna_shadow_unsteal_crtcs(sna, &shadow);
1032	} else {
1033		RegionRec target;
1034
1035		scratch.width = dst_priv->size & 0xffff;
1036		scratch.height = dst_priv->size >> 16;
1037		dst_draw = &scratch;
1038
1039		DBG(("%s: target size %dx%d, region size %dx%d\n",
1040		     __FUNCTION__,
1041		     scratch.width, scratch.height,
1042		     clip.extents.x2 - clip.extents.x1,
1043		     clip.extents.y2 - clip.extents.y1));
1044
1045		target.extents.x1 = -dx;
1046		target.extents.y1 = -dy;
1047		target.extents.x2 = target.extents.x1 + scratch.width;
1048		target.extents.y2 = target.extents.y1 + scratch.height;
1049		target.data = NULL;
1050
1051		assert(region == NULL || region == &clip);
1052		pixman_region_intersect(&clip, &clip, &target);
1053
1054		sync = false;
1055	}
1056
1057	if (!wedged(sna)) {
1058		xf86CrtcPtr crtc;
1059
1060		crtc = NULL;
1061		if (sync && sna_pixmap_is_scanout(sna, pixmap))
1062			crtc = sna_covering_crtc(sna, &clip.extents, NULL);
1063		sna_dri2_select_mode(sna, dst_bo, src_bo, crtc != NULL);
1064
1065		sync = (crtc != NULL&&
1066			sna_wait_for_scanline(sna, pixmap, crtc,
1067					      &clip.extents));
1068	}
1069
1070	if (region) {
1071		boxes = region_rects(region);
1072		n = region_num_rects(region);
1073		assert(n);
1074	} else {
1075		region = &clip;
1076		boxes = &clip.extents;
1077		n = 1;
1078	}
1079	DamageRegionAppend(&pixmap->drawable, region);
1080
1081
1082	DBG(("%s: copying [(%d, %d), (%d, %d)]x%d src=(%d, %d), dst=(%d, %d)\n",
1083	     __FUNCTION__,
1084	     boxes[0].x1, boxes[0].y1,
1085	     boxes[0].x2, boxes[0].y2,
1086	     n, sx, sy, dx, dy));
1087
1088	flags = COPY_LAST;
1089	if (sync)
1090		flags |= COPY_SYNC;
1091	if (!sna->render.copy_boxes(sna, GXcopy,
1092				    src_draw, src_bo, sx, sy,
1093				    dst_draw, dst_bo, dx, dy,
1094				    boxes, n, flags))
1095		memcpy_copy_boxes(sna, GXcopy,
1096				  src_draw, src_bo, sx, sy,
1097				  dst_draw, dst_bo, dx, dy,
1098				  boxes, n, flags);
1099
1100	DBG(("%s: flushing? %d\n", __FUNCTION__, sync));
1101	if (sync) { /* STAT! */
1102		struct kgem_request *rq = sna->kgem.next_request;
1103		kgem_submit(&sna->kgem);
1104		if (rq->bo) {
1105			bo = ref(rq->bo);
1106			DBG(("%s: recording sync fence handle=%d\n", __FUNCTION__, bo->handle));
1107		}
1108	}
1109
1110	DamageRegionProcessPending(&pixmap->drawable);
1111
1112	if (clip.data)
1113		pixman_region_fini(&clip);
1114
1115	return bo;
1116}
1117
1118static void
1119sna_dri2_copy_region(DrawablePtr draw,
1120		     RegionPtr region,
1121		     DRI2BufferPtr dst,
1122		     DRI2BufferPtr src)
1123{
1124	PixmapPtr pixmap = get_drawable_pixmap(draw);
1125	struct sna *sna = to_sna_from_pixmap(pixmap);
1126
1127	DBG(("%s: pixmap=%ld, src=%u (refs=%d/%d, flush=%d, attach=%d) , dst=%u (refs=%d/%d, flush=%d, attach=%d)\n",
1128	     __FUNCTION__,
1129	     pixmap->drawable.serialNumber,
1130	     get_private(src)->bo->handle,
1131	     get_private(src)->refcnt,
1132	     get_private(src)->bo->refcnt,
1133	     get_private(src)->bo->flush,
1134	     src->attachment,
1135	     get_private(dst)->bo->handle,
1136	     get_private(dst)->refcnt,
1137	     get_private(dst)->bo->refcnt,
1138	     get_private(dst)->bo->flush,
1139	     dst->attachment));
1140
1141	assert(src != dst);
1142
1143	assert(get_private(src)->refcnt);
1144	assert(get_private(dst)->refcnt);
1145
1146	assert(get_private(src)->bo->refcnt);
1147	assert(get_private(dst)->bo->refcnt);
1148
1149	DBG(("%s: region (%d, %d), (%d, %d) x %d\n",
1150	     __FUNCTION__,
1151	     region->extents.x1, region->extents.y1,
1152	     region->extents.x2, region->extents.y2,
1153	     region_num_rects(region)));
1154
1155	__sna_dri2_copy_region(sna, draw, region, src, dst, false);
1156}
1157
1158inline static uint32_t pipe_select(int pipe)
1159{
1160	/* The third pipe was introduced with IvyBridge long after
1161	 * multiple pipe support was added to the kernel, hence
1162	 * we can safely ignore the capability check - if we have more
1163	 * than two pipes, we can assume that they are fully supported.
1164	 */
1165	if (pipe > 1)
1166		return pipe << DRM_VBLANK_HIGH_CRTC_SHIFT;
1167	else if (pipe > 0)
1168		return DRM_VBLANK_SECONDARY;
1169	else
1170		return 0;
1171}
1172
1173static inline int sna_wait_vblank(struct sna *sna, union drm_wait_vblank *vbl, int pipe)
1174{
1175	DBG(("%s(pipe=%d, waiting until seq=%u%s)\n",
1176	     __FUNCTION__, pipe, vbl->request.sequence,
1177	     vbl->request.type & DRM_VBLANK_RELATIVE ? " [relative]" : ""));
1178	assert(pipe != -1);
1179
1180	vbl->request.type |= pipe_select(pipe);
1181	return drmIoctl(sna->kgem.fd, DRM_IOCTL_WAIT_VBLANK, vbl);
1182}
1183
1184#if DRI2INFOREC_VERSION >= 4
1185
1186static void dri2_window_attach(WindowPtr win, struct dri2_window *priv)
1187{
1188	assert(win->drawable.type == DRAWABLE_WINDOW);
1189	assert(dri2_window(win) == NULL);
1190	((void **)__get_private(win, sna_window_key))[1] = priv;
1191	assert(dri2_window(win) == priv);
1192}
1193
1194static uint64_t
1195draw_current_msc(DrawablePtr draw, xf86CrtcPtr crtc, uint64_t msc)
1196{
1197	struct dri2_window *priv;
1198
1199	if (draw->type != DRAWABLE_WINDOW)
1200		return msc;
1201
1202	priv = dri2_window((WindowPtr)draw);
1203	if (priv == NULL) {
1204		priv = malloc(sizeof(*priv));
1205		if (priv != NULL) {
1206			priv->front = NULL;
1207			priv->crtc = crtc;
1208			priv->msc_delta = 0;
1209			priv->chain = NULL;
1210			dri2_window_attach((WindowPtr)draw, priv);
1211		}
1212	} else {
1213		if (priv->crtc != crtc) {
1214			const struct ust_msc *last = sna_crtc_last_swap(priv->crtc);
1215			const struct ust_msc *this = sna_crtc_last_swap(crtc);
1216			DBG(("%s: Window transferring from pipe=%d [msc=%llu] to pipe=%d [msc=%llu], delta now %lld\n",
1217			     __FUNCTION__,
1218			     sna_crtc_to_pipe(priv->crtc), (long long)last->msc,
1219			     sna_crtc_to_pipe(crtc), (long long)this->msc,
1220			     (long long)(priv->msc_delta + this->msc - last->msc)));
1221			priv->msc_delta += this->msc - last->msc;
1222			priv->crtc = crtc;
1223		}
1224		msc -= priv->msc_delta;
1225	}
1226	return  msc;
1227}
1228
1229static uint32_t
1230draw_target_seq(DrawablePtr draw, uint64_t msc)
1231{
1232	struct dri2_window *priv = dri2_window((WindowPtr)draw);
1233	if (priv == NULL)
1234		return msc;
1235	DBG(("%s: converting target_msc=%llu to seq %u\n",
1236	     __FUNCTION__, (long long)msc, (unsigned)(msc + priv->msc_delta)));
1237	return msc + priv->msc_delta;
1238}
1239
1240static xf86CrtcPtr
1241sna_dri2_get_crtc(DrawablePtr draw)
1242{
1243	if (draw->type == DRAWABLE_PIXMAP)
1244		return NULL;
1245
1246	/* Make sure the CRTC is valid and this is the real front buffer */
1247	return sna_covering_crtc(to_sna_from_drawable(draw),
1248				 &((WindowPtr)draw)->clipList.extents,
1249				 NULL);
1250}
1251
1252static void
1253sna_dri2_remove_event(WindowPtr win, struct sna_dri2_event *info)
1254{
1255	struct dri2_window *priv;
1256	struct sna_dri2_event *chain;
1257
1258	assert(win->drawable.type == DRAWABLE_WINDOW);
1259	DBG(("%s: remove[%p] from window %ld, active? %d\n",
1260	     __FUNCTION__, info, (long)win->drawable.id, info->draw != NULL));
1261
1262	priv = dri2_window(win);
1263	assert(priv);
1264	assert(priv->chain != NULL);
1265
1266	if (priv->chain == info) {
1267		priv->chain = info->chain;
1268		return;
1269	}
1270
1271	chain = priv->chain;
1272	while (chain->chain != info)
1273		chain = chain->chain;
1274	assert(chain != info);
1275	assert(info->chain != chain);
1276	chain->chain = info->chain;
1277}
1278
1279static void
1280sna_dri2_event_free(struct sna_dri2_event *info)
1281{
1282	DrawablePtr draw = info->draw;
1283
1284	DBG(("%s(draw?=%d)\n", __FUNCTION__, draw != NULL));
1285	if (draw && draw->type == DRAWABLE_WINDOW)
1286		sna_dri2_remove_event((WindowPtr)draw, info);
1287
1288	_sna_dri2_destroy_buffer(info->sna, info->front);
1289	_sna_dri2_destroy_buffer(info->sna, info->back);
1290
1291	while (!list_is_empty(&info->cache)) {
1292		struct dri_bo *c;
1293
1294		c = list_first_entry(&info->cache, struct dri_bo, link);
1295		list_del(&c->link);
1296
1297		DBG(("%s: releasing cached handle=%d\n", __FUNCTION__, c->bo ? c->bo->handle : 0));
1298		if (c->bo)
1299			kgem_bo_destroy(&info->sna->kgem, c->bo);
1300
1301		free(c);
1302	}
1303
1304	if (info->bo) {
1305		DBG(("%s: releasing batch handle=%d\n", __FUNCTION__, info->bo->handle));
1306		kgem_bo_destroy(&info->sna->kgem, info->bo);
1307	}
1308
1309	_list_del(&info->link);
1310	free(info);
1311}
1312
1313static void
1314sna_dri2_client_gone(CallbackListPtr *list, void *closure, void *data)
1315{
1316	NewClientInfoRec *clientinfo = data;
1317	ClientPtr client = clientinfo->client;
1318	struct sna_client *priv = sna_client(client);
1319	struct sna *sna = closure;
1320
1321	if (priv->events.next == NULL)
1322		return;
1323
1324	if (client->clientState != ClientStateGone)
1325		return;
1326
1327	DBG(("%s(active?=%d)\n", __FUNCTION__,
1328	     !list_is_empty(&priv->events)));
1329
1330	while (!list_is_empty(&priv->events)) {
1331		struct sna_dri2_event *event;
1332
1333		event = list_first_entry(&priv->events, struct sna_dri2_event, link);
1334		assert(event->client == client);
1335
1336		if (event->queued) {
1337			if (event->draw)
1338				sna_dri2_remove_event((WindowPtr)event->draw,
1339						      event);
1340			event->client = NULL;
1341			event->draw = NULL;
1342			list_del(&event->link);
1343		} else
1344			sna_dri2_event_free(event);
1345	}
1346
1347	if (--sna->dri2.client_count == 0)
1348		DeleteCallback(&ClientStateCallback, sna_dri2_client_gone, sna);
1349}
1350
1351static bool add_event_to_client(struct sna_dri2_event *info, struct sna *sna, ClientPtr client)
1352{
1353	struct sna_client *priv = sna_client(client);
1354
1355	if (priv->events.next == NULL) {
1356		if (sna->dri2.client_count++ == 0 &&
1357		    !AddCallback(&ClientStateCallback, sna_dri2_client_gone, sna))
1358			return false;
1359
1360		list_init(&priv->events);
1361	}
1362
1363	list_add(&info->link, &priv->events);
1364	info->client = client;
1365	return true;
1366}
1367
1368static struct sna_dri2_event *
1369sna_dri2_add_event(struct sna *sna, DrawablePtr draw, ClientPtr client)
1370{
1371	struct dri2_window *priv;
1372	struct sna_dri2_event *info, *chain;
1373
1374	assert(draw->type == DRAWABLE_WINDOW);
1375	DBG(("%s: adding event to window %ld)\n",
1376	     __FUNCTION__, (long)draw->id));
1377
1378	priv = dri2_window((WindowPtr)draw);
1379	if (priv == NULL)
1380		return NULL;
1381
1382	info = calloc(1, sizeof(struct sna_dri2_event));
1383	if (info == NULL)
1384		return NULL;
1385
1386	list_init(&info->cache);
1387	info->sna = sna;
1388	info->draw = draw;
1389	info->crtc = priv->crtc;
1390	info->pipe = sna_crtc_to_pipe(priv->crtc);
1391
1392	if (!add_event_to_client(info, sna, client)) {
1393		free(info);
1394		return NULL;
1395	}
1396
1397	assert(priv->chain != info);
1398
1399	if (priv->chain == NULL) {
1400		priv->chain = info;
1401		return info;
1402	}
1403
1404	chain = priv->chain;
1405	while (chain->chain != NULL)
1406		chain = chain->chain;
1407
1408	assert(chain != info);
1409	chain->chain = info;
1410	return info;
1411}
1412
1413void sna_dri2_decouple_window(WindowPtr win)
1414{
1415	struct dri2_window *priv;
1416
1417	priv = dri2_window(win);
1418	if (priv == NULL)
1419		return;
1420
1421	DBG(("%s: window=%ld\n", __FUNCTION__, win->drawable.id));
1422
1423	if (priv->front) {
1424		struct sna *sna = to_sna_from_drawable(&win->drawable);
1425		assert(priv->crtc);
1426		sna_shadow_unset_crtc(sna, priv->crtc);
1427		_sna_dri2_destroy_buffer(sna, priv->front);
1428		priv->front = NULL;
1429	}
1430}
1431
1432void sna_dri2_destroy_window(WindowPtr win)
1433{
1434	struct dri2_window *priv;
1435
1436	priv = dri2_window(win);
1437	if (priv == NULL)
1438		return;
1439
1440	DBG(("%s: window=%ld\n", __FUNCTION__, win->drawable.id));
1441
1442	if (priv->front) {
1443		struct sna *sna = to_sna_from_drawable(&win->drawable);
1444		assert(priv->crtc);
1445		sna_shadow_unset_crtc(sna, priv->crtc);
1446		_sna_dri2_destroy_buffer(sna, priv->front);
1447	}
1448
1449	if (priv->chain) {
1450		struct sna_dri2_event *info, *chain;
1451
1452		DBG(("%s: freeing chain\n", __FUNCTION__));
1453
1454		chain = priv->chain;
1455		while ((info = chain)) {
1456			info->draw = NULL;
1457			info->client = NULL;
1458			list_del(&info->link);
1459
1460			chain = info->chain;
1461			info->chain = NULL;
1462
1463			if (!info->queued)
1464				sna_dri2_event_free(info);
1465		}
1466	}
1467
1468	free(priv);
1469}
1470
1471static void
1472sna_dri2_flip_handler(struct drm_event_vblank *event, void *data)
1473{
1474	DBG(("%s: sequence=%d\n", __FUNCTION__, event->sequence));
1475	sna_dri2_flip_event(data);
1476}
1477
1478static bool
1479sna_dri2_flip(struct sna_dri2_event *info)
1480{
1481	struct kgem_bo *bo = get_private(info->back)->bo;
1482	struct kgem_bo *tmp_bo;
1483	uint32_t tmp_name;
1484	int tmp_pitch;
1485
1486	DBG(("%s(type=%d)\n", __FUNCTION__, info->type));
1487
1488	assert(sna_pixmap_get_buffer(info->sna->front) == info->front);
1489	assert(get_drawable_pixmap(info->draw)->drawable.height * bo->pitch <= kgem_bo_size(bo));
1490	assert(bo->refcnt);
1491
1492	if (!sna_page_flip(info->sna, bo, sna_dri2_flip_handler,
1493			   info->type == FLIP_ASYNC ? NULL : info))
1494		return false;
1495
1496	assert(info->sna->dri2.flip_pending == NULL ||
1497	       info->sna->dri2.flip_pending == info);
1498	if (info->type != FLIP_ASYNC)
1499		info->sna->dri2.flip_pending = info;
1500
1501	DBG(("%s: marked handle=%d as scanout, swap front (handle=%d, name=%d) and back (handle=%d, name=%d)\n",
1502	     __FUNCTION__, bo->handle,
1503	     get_private(info->front)->bo->handle, info->front->name,
1504	     get_private(info->back)->bo->handle, info->back->name));
1505
1506	tmp_bo = get_private(info->front)->bo;
1507	tmp_name = info->front->name;
1508	tmp_pitch = info->front->pitch;
1509
1510	set_bo(info->sna->front, bo);
1511
1512	info->front->name = info->back->name;
1513	info->front->pitch = info->back->pitch;
1514	get_private(info->front)->bo = bo;
1515
1516	info->back->name = tmp_name;
1517	info->back->pitch = tmp_pitch;
1518	get_private(info->back)->bo = tmp_bo;
1519	mark_stale(info->back);
1520
1521	assert(get_private(info->front)->bo->refcnt);
1522	assert(get_private(info->back)->bo->refcnt);
1523	assert(get_private(info->front)->bo != get_private(info->back)->bo);
1524
1525	info->queued = true;
1526	return true;
1527}
1528
1529static bool
1530can_flip(struct sna * sna,
1531	 DrawablePtr draw,
1532	 DRI2BufferPtr front,
1533	 DRI2BufferPtr back,
1534	 xf86CrtcPtr crtc)
1535{
1536	WindowPtr win = (WindowPtr)draw;
1537	PixmapPtr pixmap;
1538
1539	assert((sna->flags & SNA_NO_WAIT) == 0);
1540
1541	if (!DBG_CAN_FLIP)
1542		return false;
1543
1544	if (draw->type == DRAWABLE_PIXMAP)
1545		return false;
1546
1547	if (!sna->mode.front_active) {
1548		DBG(("%s: no, active CRTC\n", __FUNCTION__));
1549		return false;
1550	}
1551
1552	assert(sna->scrn->vtSema);
1553
1554	if ((sna->flags & (SNA_HAS_FLIP | SNA_HAS_ASYNC_FLIP)) == 0) {
1555		DBG(("%s: no, pageflips disabled\n", __FUNCTION__));
1556		return false;
1557	}
1558
1559	if (front->format != back->format) {
1560		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
1561		     __FUNCTION__, front->format, back->format));
1562		return false;
1563	}
1564
1565	if (sna->mode.shadow_active) {
1566		DBG(("%s: no, shadow enabled\n", __FUNCTION__));
1567		return false;
1568	}
1569
1570	if (!sna_crtc_is_on(crtc)) {
1571		DBG(("%s: ref-pipe=%d is disabled\n", __FUNCTION__, sna_crtc_to_pipe(crtc)));
1572		return false;
1573	}
1574
1575	pixmap = get_window_pixmap(win);
1576	if (pixmap != sna->front) {
1577		DBG(("%s: no, window (pixmap=%ld) is not attached to the front buffer (pixmap=%ld)\n",
1578		     __FUNCTION__, pixmap->drawable.serialNumber, sna->front->drawable.serialNumber));
1579		return false;
1580	}
1581
1582	if (sna_pixmap_get_buffer(pixmap) != front) {
1583		DBG(("%s: no, DRI2 drawable is no longer attached (old name=%d, new name=%d) to pixmap=%ld\n",
1584		     __FUNCTION__, front->name,
1585		     sna_pixmap_get_buffer(pixmap) ? ((DRI2BufferPtr)sna_pixmap_get_buffer(pixmap))->name : 0,
1586		     pixmap->drawable.serialNumber));
1587		return false;
1588	}
1589
1590	assert(get_private(front)->pixmap == sna->front);
1591	assert(sna_pixmap(sna->front)->gpu_bo == get_private(front)->bo);
1592
1593	if (!get_private(back)->bo->scanout) {
1594		DBG(("%s: no, DRI2 drawable was too small at time of creation)\n",
1595		     __FUNCTION__));
1596		return false;
1597	}
1598
1599	if (get_private(back)->size != get_private(front)->size) {
1600		DBG(("%s: no, DRI2 drawable does not fit into scanout\n",
1601		     __FUNCTION__));
1602		return false;
1603	}
1604
1605	DBG(("%s: window size: %dx%d, clip=(%d, %d), (%d, %d) x %d\n",
1606	     __FUNCTION__,
1607	     win->drawable.width, win->drawable.height,
1608	     win->clipList.extents.x1, win->clipList.extents.y1,
1609	     win->clipList.extents.x2, win->clipList.extents.y2,
1610	     region_num_rects(&win->clipList)));
1611	if (!RegionEqual(&win->clipList, &draw->pScreen->root->winSize)) {
1612		DBG(("%s: no, window is clipped: clip region=(%d, %d), (%d, %d), root size=(%d, %d), (%d, %d)\n",
1613		     __FUNCTION__,
1614		     win->clipList.extents.x1,
1615		     win->clipList.extents.y1,
1616		     win->clipList.extents.x2,
1617		     win->clipList.extents.y2,
1618		     draw->pScreen->root->winSize.extents.x1,
1619		     draw->pScreen->root->winSize.extents.y1,
1620		     draw->pScreen->root->winSize.extents.x2,
1621		     draw->pScreen->root->winSize.extents.y2));
1622		return false;
1623	}
1624
1625	if (draw->x != 0 || draw->y != 0 ||
1626#ifdef COMPOSITE
1627	    draw->x != pixmap->screen_x ||
1628	    draw->y != pixmap->screen_y ||
1629#endif
1630	    draw->width != pixmap->drawable.width ||
1631	    draw->height != pixmap->drawable.height) {
1632		DBG(("%s: no, window is not full size (%dx%d)!=(%dx%d)\n",
1633		     __FUNCTION__,
1634		     draw->width, draw->height,
1635		     pixmap->drawable.width,
1636		     pixmap->drawable.height));
1637		return false;
1638	}
1639
1640	/* prevent an implicit tiling mode change */
1641	if (get_private(back)->bo->tiling > I915_TILING_X) {
1642		DBG(("%s -- no, tiling mismatch: front %d, back=%d, want-tiled?=%d\n",
1643		     __FUNCTION__,
1644		     get_private(front)->bo->tiling,
1645		     get_private(back)->bo->tiling,
1646		     !!(sna->flags & SNA_LINEAR_FB)));
1647		return false;
1648	}
1649
1650	if (get_private(front)->bo->pitch != get_private(back)->bo->pitch) {
1651		DBG(("%s -- no, pitch mismatch: front %d, back=%d\n",
1652		     __FUNCTION__,
1653		     get_private(front)->bo->pitch,
1654		     get_private(back)->bo->pitch));
1655		return false;
1656	}
1657
1658	if (sna_pixmap(pixmap)->pinned & ~(PIN_DRI2 | PIN_SCANOUT)) {
1659		DBG(("%s -- no, pinned: front %x\n",
1660		     __FUNCTION__, sna_pixmap(pixmap)->pinned));
1661		return false;
1662	}
1663
1664	DBG(("%s: yes, pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
1665	assert(dri2_window(win)->front == NULL);
1666	return true;
1667}
1668
1669static bool
1670can_xchg(struct sna *sna,
1671	 DrawablePtr draw,
1672	 DRI2BufferPtr front,
1673	 DRI2BufferPtr back)
1674{
1675	WindowPtr win = (WindowPtr)draw;
1676	PixmapPtr pixmap;
1677
1678	if (!DBG_CAN_XCHG)
1679		return false;
1680
1681	if (draw->type == DRAWABLE_PIXMAP)
1682		return false;
1683
1684	if (front->format != back->format) {
1685		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
1686		     __FUNCTION__, front->format, back->format));
1687		return false;
1688	}
1689
1690	pixmap = get_window_pixmap(win);
1691	if (get_private(front)->pixmap != pixmap) {
1692		DBG(("%s: no, DRI2 drawable is no longer attached, old pixmap=%ld, now pixmap=%ld\n",
1693		     __FUNCTION__,
1694		     get_private(front)->pixmap->drawable.serialNumber,
1695		     pixmap->drawable.serialNumber));
1696		return false;
1697	}
1698
1699	DBG(("%s: window size: %dx%d, clip=(%d, %d), (%d, %d) x %d, pixmap size=%dx%d\n",
1700	     __FUNCTION__,
1701	     win->drawable.width, win->drawable.height,
1702	     win->clipList.extents.x1, win->clipList.extents.y1,
1703	     win->clipList.extents.x2, win->clipList.extents.y2,
1704	     region_num_rects(&win->clipList),
1705	     pixmap->drawable.width,
1706	     pixmap->drawable.height));
1707	if (is_clipped(&win->clipList, &pixmap->drawable)) {
1708		DBG(("%s: no, %dx%d window is clipped: clip region=(%d, %d), (%d, %d)\n",
1709		     __FUNCTION__,
1710		     draw->width, draw->height,
1711		     win->clipList.extents.x1,
1712		     win->clipList.extents.y1,
1713		     win->clipList.extents.x2,
1714		     win->clipList.extents.y2));
1715		return false;
1716	}
1717
1718	if (get_private(back)->size != get_private(front)->size) {
1719		DBG(("%s: no, back buffer %dx%d does not match front buffer %dx%d\n",
1720		     __FUNCTION__,
1721		     get_private(back)->size & 0x7fff, (get_private(back)->size >> 16) & 0x7fff,
1722		     get_private(front)->size & 0x7fff, (get_private(front)->size >> 16) & 0x7fff));
1723		return false;
1724	}
1725
1726	if (pixmap == sna->front && !(sna->flags & SNA_TEAR_FREE) && sna->mode.front_active) {
1727		DBG(("%s: no, front buffer, requires flipping\n",
1728		     __FUNCTION__));
1729		return false;
1730	}
1731
1732	if (sna_pixmap(pixmap)->pinned & ~(PIN_DRI2 | PIN_SCANOUT)) {
1733		DBG(("%s: no, pinned: %x\n",
1734		     __FUNCTION__, sna_pixmap(pixmap)->pinned));
1735		return false;
1736	}
1737
1738	DBG(("%s: yes, pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
1739	return true;
1740}
1741
1742static bool
1743overlaps_other_crtc(struct sna *sna, xf86CrtcPtr desired)
1744{
1745	xf86CrtcConfigPtr config = XF86_CRTC_CONFIG_PTR(sna->scrn);
1746	int c;
1747
1748	for (c = 0; c < sna->mode.num_real_crtc; c++) {
1749		xf86CrtcPtr crtc = config->crtc[c];
1750
1751		if (crtc == desired)
1752			continue;
1753
1754		if (!crtc->enabled)
1755			continue;
1756
1757		if (desired->bounds.x1 < crtc->bounds.x2 &&
1758		    desired->bounds.x2 > crtc->bounds.x1 &&
1759		    desired->bounds.y1 < crtc->bounds.y2 &&
1760		    desired->bounds.y2 > crtc->bounds.y1)
1761			return true;
1762	}
1763
1764	return false;
1765}
1766
1767static bool
1768can_xchg_crtc(struct sna *sna,
1769	      DrawablePtr draw,
1770	      DRI2BufferPtr front,
1771	      DRI2BufferPtr back,
1772	      xf86CrtcPtr crtc)
1773{
1774	WindowPtr win = (WindowPtr)draw;
1775	PixmapPtr pixmap;
1776
1777	if (!DBG_CAN_XCHG)
1778		return false;
1779
1780	if ((sna->flags & SNA_TEAR_FREE) == 0) {
1781		DBG(("%s: no, requires TearFree\n",
1782		     __FUNCTION__));
1783		return false;
1784	}
1785
1786	if (draw->type == DRAWABLE_PIXMAP)
1787		return false;
1788
1789	if (front->format != back->format) {
1790		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
1791		     __FUNCTION__, front->format, back->format));
1792		return false;
1793	}
1794
1795	if (memcmp(&win->clipList.extents, &crtc->bounds, sizeof(crtc->bounds))) {
1796		DBG(("%s: no, window [(%d, %d), (%d, %d)] does not cover CRTC [(%d, %d), (%d, %d)]\n",
1797		     __FUNCTION__,
1798		     win->clipList.extents.x1, win->clipList.extents.y1,
1799		     win->clipList.extents.x2, win->clipList.extents.y2,
1800		     crtc->bounds.x1, crtc->bounds.y1,
1801		     crtc->bounds.x2, crtc->bounds.y2));
1802		return false;
1803	}
1804
1805	if (sna_crtc_is_transformed(crtc)) {
1806		DBG(("%s: no, CRTC is rotated\n", __FUNCTION__));
1807		return false;
1808	}
1809
1810	pixmap = get_window_pixmap(win);
1811	if (pixmap != sna->front) {
1812		DBG(("%s: no, not attached to front buffer\n", __FUNCTION__));
1813		return false;
1814	}
1815
1816	if (get_private(front)->pixmap != pixmap) {
1817		DBG(("%s: no, DRI2 drawable is no longer attached, old pixmap=%ld, now pixmap=%ld\n",
1818		     __FUNCTION__,
1819		     get_private(front)->pixmap->drawable.serialNumber,
1820		     pixmap->drawable.serialNumber));
1821		return false;
1822	}
1823
1824	DBG(("%s: window size: %dx%d, clip=(%d, %d), (%d, %d) x %d\n",
1825	     __FUNCTION__,
1826	     win->drawable.width, win->drawable.height,
1827	     win->clipList.extents.x1, win->clipList.extents.y1,
1828	     win->clipList.extents.x2, win->clipList.extents.y2,
1829	     region_num_rects(&win->clipList)));
1830	if (is_clipped(&win->clipList, &win->drawable)) {
1831		DBG(("%s: no, %dx%d window is clipped: clip region=(%d, %d), (%d, %d)\n",
1832		     __FUNCTION__,
1833		     draw->width, draw->height,
1834		     win->clipList.extents.x1,
1835		     win->clipList.extents.y1,
1836		     win->clipList.extents.x2,
1837		     win->clipList.extents.y2));
1838		return false;
1839	}
1840
1841	if (overlaps_other_crtc(sna, crtc)) {
1842		DBG(("%s: no, overlaps other CRTC\n", __FUNCTION__));
1843		return false;
1844	}
1845
1846	if (get_private(back)->size != (draw->height << 16 | draw->width)) {
1847		DBG(("%s: no, DRI2 buffers does not fit window\n",
1848		     __FUNCTION__));
1849		return false;
1850	}
1851
1852	assert(win != win->drawable.pScreen->root);
1853	DBG(("%s: yes, pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
1854	return true;
1855}
1856
1857static void
1858sna_dri2_xchg(DrawablePtr draw, DRI2BufferPtr front, DRI2BufferPtr back)
1859{
1860	WindowPtr win = (WindowPtr)draw;
1861	struct kgem_bo *back_bo, *front_bo;
1862	PixmapPtr pixmap;
1863	int tmp;
1864
1865	assert(draw->type != DRAWABLE_PIXMAP);
1866	pixmap = get_window_pixmap(win);
1867
1868	back_bo = get_private(back)->bo;
1869	front_bo = get_private(front)->bo;
1870	assert(front_bo != back_bo);
1871
1872	DBG(("%s: win=%ld, exchange front=%d/%d and back=%d/%d, pixmap=%ld %dx%d\n",
1873	     __FUNCTION__, win->drawable.id,
1874	     front_bo->handle, front->name,
1875	     back_bo->handle, back->name,
1876	     pixmap->drawable.serialNumber,
1877	     pixmap->drawable.width,
1878	     pixmap->drawable.height));
1879
1880	DBG(("%s: back_bo pitch=%d, size=%d, ref=%d, active_scanout?=%d\n",
1881	     __FUNCTION__, back_bo->pitch, kgem_bo_size(back_bo), back_bo->refcnt, back_bo->active_scanout));
1882	DBG(("%s: front_bo pitch=%d, size=%d, ref=%d, active_scanout?=%d\n",
1883	     __FUNCTION__, front_bo->pitch, kgem_bo_size(front_bo), front_bo->refcnt, front_bo->active_scanout));
1884	assert(front_bo->refcnt);
1885	assert(back_bo->refcnt);
1886
1887	assert(sna_pixmap_get_buffer(pixmap) == front);
1888
1889	assert(pixmap->drawable.height * back_bo->pitch <= kgem_bo_size(back_bo));
1890	assert(pixmap->drawable.height * front_bo->pitch <= kgem_bo_size(front_bo));
1891
1892	set_bo(pixmap, back_bo);
1893
1894	get_private(front)->bo = back_bo;
1895	get_private(back)->bo = front_bo;
1896	mark_stale(back);
1897
1898	tmp = front->name;
1899	front->name = back->name;
1900	back->name = tmp;
1901
1902	tmp = front->pitch;
1903	front->pitch = back->pitch;
1904	back->pitch = tmp;
1905
1906	assert(front_bo->refcnt);
1907	assert(back_bo->refcnt);
1908
1909	assert(get_private(front)->bo == sna_pixmap(pixmap)->gpu_bo);
1910}
1911
1912static void sna_dri2_xchg_crtc(struct sna *sna, DrawablePtr draw, xf86CrtcPtr crtc, DRI2BufferPtr front, DRI2BufferPtr back)
1913{
1914	WindowPtr win = (WindowPtr)draw;
1915	DRI2Buffer2Ptr tmp;
1916	struct kgem_bo *bo;
1917
1918	DBG(("%s: exchange front=%d/%d and back=%d/%d, win id=%lu, pixmap=%ld %dx%d\n",
1919	     __FUNCTION__,
1920	     get_private(front)->bo->handle, front->name,
1921	     get_private(back)->bo->handle, back->name,
1922	     win->drawable.id,
1923	     get_window_pixmap(win)->drawable.serialNumber,
1924	     get_window_pixmap(win)->drawable.width,
1925	     get_window_pixmap(win)->drawable.height));
1926
1927	DamageRegionAppend(&win->drawable, &win->clipList);
1928	sna_shadow_set_crtc(sna, crtc, get_private(back)->bo);
1929	DamageRegionProcessPending(&win->drawable);
1930
1931	assert(dri2_window(win)->front == NULL);
1932
1933	tmp = calloc(1, sizeof(*tmp) + sizeof(struct sna_dri2_private));
1934	if (tmp == NULL) {
1935		back->attachment = -1;
1936		if (get_private(back)->proxy == NULL) {
1937			get_private(back)->pixmap = get_window_pixmap(win);
1938			get_private(back)->proxy = sna_dri2_reference_buffer(sna_pixmap_get_buffer(get_private(back)->pixmap));
1939		}
1940		dri2_window(win)->front = sna_dri2_reference_buffer(back);
1941		return;
1942	}
1943
1944	*tmp = *back;
1945	tmp->attachment = DRI2BufferFrontLeft;
1946	tmp->driverPrivate = tmp + 1;
1947	get_private(tmp)->refcnt = 1;
1948	get_private(tmp)->bo = get_private(back)->bo;
1949	get_private(tmp)->size = get_private(back)->size;
1950	get_private(tmp)->pixmap = get_window_pixmap(win);
1951	get_private(tmp)->proxy = sna_dri2_reference_buffer(sna_pixmap_get_buffer(get_private(tmp)->pixmap));
1952	dri2_window(win)->front = tmp;
1953
1954	DBG(("%s: allocating new backbuffer\n", __FUNCTION__));
1955	back->name = 0;
1956	bo = kgem_create_2d(&sna->kgem,
1957			    draw->width, draw->height, draw->bitsPerPixel,
1958			    get_private(back)->bo->tiling,
1959			    CREATE_SCANOUT);
1960	if (bo != NULL) {
1961		get_private(back)->bo = bo;
1962		back->pitch = bo->pitch;
1963		back->name = kgem_bo_flink(&sna->kgem, bo);
1964	}
1965	if (back->name == 0) {
1966		if (bo != NULL)
1967			kgem_bo_destroy(&sna->kgem, bo);
1968		get_private(back)->bo = NULL;
1969		back->attachment = -1;
1970	}
1971}
1972
1973static void frame_swap_complete(struct sna_dri2_event *frame, int type)
1974{
1975	const struct ust_msc *swap;
1976
1977	if (frame->draw == NULL)
1978		return;
1979
1980	assert(frame->client);
1981
1982	swap = sna_crtc_last_swap(frame->crtc);
1983	DBG(("%s(type=%d): draw=%ld, pipe=%d, frame=%lld [msc=%lld], tv=%d.%06d\n",
1984	     __FUNCTION__, type, (long)frame->draw, frame->pipe,
1985	     (long long)swap->msc,
1986	     (long long)draw_current_msc(frame->draw, frame->crtc, swap->msc),
1987	     swap->tv_sec, swap->tv_usec));
1988
1989	DRI2SwapComplete(frame->client, frame->draw,
1990			 draw_current_msc(frame->draw, frame->crtc, swap->msc),
1991			 swap->tv_sec, swap->tv_usec,
1992			 type, frame->event_complete, frame->event_data);
1993}
1994
1995static void fake_swap_complete(struct sna *sna, ClientPtr client,
1996			       DrawablePtr draw, xf86CrtcPtr crtc,
1997			       int type, DRI2SwapEventPtr func, void *data)
1998{
1999	const struct ust_msc *swap;
2000
2001	swap = sna_crtc_last_swap(crtc);
2002	DBG(("%s(type=%d): draw=%ld, pipe=%d, frame=%lld [msc %lld], tv=%d.%06d\n",
2003	     __FUNCTION__, type, (long)draw->id, crtc ? sna_crtc_to_pipe(crtc) : -1,
2004	     (long long)swap->msc,
2005	     (long long)draw_current_msc(draw, crtc, swap->msc),
2006	     swap->tv_sec, swap->tv_usec));
2007
2008	DRI2SwapComplete(client, draw,
2009			 draw_current_msc(draw, crtc, swap->msc),
2010			 swap->tv_sec, swap->tv_usec,
2011			 type, func, data);
2012}
2013
2014static void chain_swap(struct sna_dri2_event *chain)
2015{
2016	union drm_wait_vblank vbl;
2017
2018	if (chain->draw == NULL) {
2019		sna_dri2_event_free(chain);
2020		return;
2021	}
2022
2023	if (chain->queued) /* too early! */
2024		return;
2025
2026	assert(chain == dri2_chain(chain->draw));
2027	DBG(("%s: chaining draw=%ld, type=%d\n",
2028	     __FUNCTION__, (long)chain->draw->id, chain->type));
2029	chain->queued = true;
2030
2031	switch (chain->type) {
2032	case SWAP_THROTTLE:
2033		DBG(("%s: emitting chained vsync'ed blit\n", __FUNCTION__));
2034		if (chain->sna->mode.shadow &&
2035		    !chain->sna->mode.shadow_damage) {
2036			/* recursed from wait_for_shadow(), simply requeue */
2037			DBG(("%s -- recursed from wait_for_shadow(), requeuing\n", __FUNCTION__));
2038			VG_CLEAR(vbl);
2039			vbl.request.type =
2040				DRM_VBLANK_RELATIVE |
2041				DRM_VBLANK_EVENT;
2042			vbl.request.sequence = 1;
2043			vbl.request.signal = (uintptr_t)chain;
2044
2045			if (!sna_wait_vblank(chain->sna, &vbl, chain->pipe))
2046				return;
2047
2048			DBG(("%s -- requeue failed, errno=%d\n", __FUNCTION__, errno));
2049		}
2050
2051		if (can_xchg(chain->sna, chain->draw, chain->front, chain->back)) {
2052			sna_dri2_xchg(chain->draw, chain->front, chain->back);
2053		} else if (can_xchg_crtc(chain->sna, chain->draw, chain->front, chain->back, chain->crtc)) {
2054			sna_dri2_xchg_crtc(chain->sna, chain->draw, chain->crtc, chain->front, chain->back);
2055		} else {
2056			assert(chain->queued);
2057			chain->bo = __sna_dri2_copy_region(chain->sna, chain->draw, NULL,
2058							   chain->back, chain->front,
2059							   true);
2060		}
2061	case SWAP:
2062		break;
2063	default:
2064		return;
2065	}
2066
2067	VG_CLEAR(vbl);
2068	vbl.request.type =
2069		DRM_VBLANK_RELATIVE |
2070		DRM_VBLANK_EVENT;
2071	vbl.request.sequence = 1;
2072	vbl.request.signal = (uintptr_t)chain;
2073	if (sna_wait_vblank(chain->sna, &vbl, chain->pipe)) {
2074		DBG(("%s: vblank wait failed, unblocking client\n", __FUNCTION__));
2075		frame_swap_complete(chain, DRI2_BLIT_COMPLETE);
2076		sna_dri2_event_free(chain);
2077	} else {
2078		if (chain->type == SWAP_THROTTLE && !swap_limit(chain->draw, 2)) {
2079			DBG(("%s: fake triple buffering, unblocking client\n", __FUNCTION__));
2080			frame_swap_complete(chain, DRI2_BLIT_COMPLETE);
2081		}
2082	}
2083}
2084
2085static inline bool rq_is_busy(struct kgem *kgem, struct kgem_bo *bo)
2086{
2087	if (bo == NULL)
2088		return false;
2089
2090	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
2091	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
2092	assert(bo->refcnt);
2093
2094	if (bo->exec)
2095		return true;
2096
2097	if (bo->rq == NULL)
2098		return false;
2099
2100	return __kgem_busy(kgem, bo->handle);
2101}
2102
2103static bool sna_dri2_blit_complete(struct sna *sna,
2104				   struct sna_dri2_event *info)
2105{
2106	if (rq_is_busy(&sna->kgem, info->bo)) {
2107		union drm_wait_vblank vbl;
2108
2109		DBG(("%s: vsync'ed blit is still busy, postponing\n",
2110		     __FUNCTION__));
2111
2112		VG_CLEAR(vbl);
2113		vbl.request.type =
2114			DRM_VBLANK_RELATIVE |
2115			DRM_VBLANK_EVENT;
2116		vbl.request.sequence = 1;
2117		vbl.request.signal = (uintptr_t)info;
2118		assert(info->queued);
2119		if (!sna_wait_vblank(sna, &vbl, info->pipe))
2120			return false;
2121	}
2122
2123	DBG(("%s: blit finished\n", __FUNCTION__));
2124	return true;
2125}
2126
2127void sna_dri2_vblank_handler(struct drm_event_vblank *event)
2128{
2129	struct sna_dri2_event *info = (void *)(uintptr_t)event->user_data;
2130	struct sna *sna = info->sna;
2131	DrawablePtr draw;
2132	union drm_wait_vblank vbl;
2133	uint64_t msc;
2134
2135	DBG(("%s(type=%d, sequence=%d)\n", __FUNCTION__, info->type, event->sequence));
2136	assert(info->queued);
2137	msc = sna_crtc_record_event(info->crtc, event);
2138
2139	draw = info->draw;
2140	if (draw == NULL) {
2141		DBG(("%s -- drawable gone\n", __FUNCTION__));
2142		goto done;
2143	}
2144
2145	switch (info->type) {
2146	case FLIP:
2147		/* If we can still flip... */
2148		if (can_flip(sna, draw, info->front, info->back, info->crtc) &&
2149		    sna_dri2_flip(info))
2150			return;
2151
2152		/* else fall through to blit */
2153	case SWAP:
2154		assert(info->queued);
2155		if (sna->mode.shadow && !sna->mode.shadow_damage) {
2156			/* recursed from wait_for_shadow(), simply requeue */
2157			DBG(("%s -- recursed from wait_for_shadow(), requeuing\n", __FUNCTION__));
2158
2159		} else if (can_xchg(info->sna, draw, info->front, info->back)) {
2160			sna_dri2_xchg(draw, info->front, info->back);
2161			info->type = SWAP_WAIT;
2162		} else if (can_xchg_crtc(sna, draw, info->front, info->back, info->crtc)) {
2163			sna_dri2_xchg_crtc(sna, draw, info->crtc, info->front, info->back);
2164			info->type = SWAP_WAIT;
2165		}  else {
2166			assert(info->queued);
2167			info->bo = __sna_dri2_copy_region(sna, draw, NULL,
2168							  info->back, info->front, true);
2169			info->type = SWAP_WAIT;
2170		}
2171
2172		VG_CLEAR(vbl);
2173		vbl.request.type =
2174			DRM_VBLANK_RELATIVE |
2175			DRM_VBLANK_EVENT;
2176		vbl.request.sequence = 1;
2177		vbl.request.signal = (uintptr_t)info;
2178
2179		assert(info->queued);
2180		if (!sna_wait_vblank(sna, &vbl, info->pipe))
2181			return;
2182
2183		DBG(("%s -- requeue failed, errno=%d\n", __FUNCTION__, errno));
2184		/* fall through to SwapComplete */
2185	case SWAP_WAIT:
2186		if (!sna_dri2_blit_complete(sna, info))
2187			return;
2188
2189		DBG(("%s: swap complete, unblocking client (frame=%d, tv=%d.%06d)\n", __FUNCTION__,
2190		     event->sequence, event->tv_sec, event->tv_usec));
2191		frame_swap_complete(info, DRI2_BLIT_COMPLETE);
2192		break;
2193
2194	case SWAP_THROTTLE:
2195		DBG(("%s: %d complete, frame=%d tv=%d.%06d\n",
2196		     __FUNCTION__, info->type,
2197		     event->sequence, event->tv_sec, event->tv_usec));
2198
2199		if (xorg_can_triple_buffer()) {
2200			if (!sna_dri2_blit_complete(sna, info))
2201				return;
2202
2203			DBG(("%s: triple buffer swap complete, unblocking client (frame=%d, tv=%d.%06d)\n", __FUNCTION__,
2204			     event->sequence, event->tv_sec, event->tv_usec));
2205			frame_swap_complete(info, DRI2_BLIT_COMPLETE);
2206		}
2207		break;
2208
2209	case WAITMSC:
2210		assert(info->client);
2211		DRI2WaitMSCComplete(info->client, draw, msc,
2212				    event->tv_sec, event->tv_usec);
2213		break;
2214	default:
2215		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
2216			   "%s: unknown vblank event received\n", __func__);
2217		/* Unknown type */
2218		break;
2219	}
2220
2221	if (info->chain) {
2222		assert(info->chain != info);
2223		assert(info->draw == draw);
2224		sna_dri2_remove_event((WindowPtr)draw, info);
2225		chain_swap(info->chain);
2226		info->draw = NULL;
2227	}
2228
2229done:
2230	sna_dri2_event_free(info);
2231	DBG(("%s complete\n", __FUNCTION__));
2232}
2233
2234static bool
2235sna_dri2_immediate_blit(struct sna *sna,
2236			struct sna_dri2_event *info,
2237			bool sync, bool event)
2238{
2239	DrawablePtr draw = info->draw;
2240	bool ret = false;
2241
2242	if (sna->flags & SNA_NO_WAIT)
2243		sync = false;
2244
2245	DBG(("%s: emitting immediate blit, throttling client, synced? %d, chained? %d, send-event? %d\n",
2246	     __FUNCTION__, sync, dri2_chain(draw) != info,
2247	     event));
2248
2249	info->type = SWAP_THROTTLE;
2250	if (!sync || dri2_chain(draw) == info) {
2251		DBG(("%s: no pending blit, starting chain\n",
2252		     __FUNCTION__));
2253
2254		info->queued = true;
2255		info->bo = __sna_dri2_copy_region(sna, draw, NULL,
2256						  info->back,
2257						  info->front,
2258						  sync);
2259		if (event) {
2260			if (sync) {
2261				union drm_wait_vblank vbl;
2262
2263				VG_CLEAR(vbl);
2264				vbl.request.type =
2265					DRM_VBLANK_RELATIVE |
2266					DRM_VBLANK_EVENT;
2267				vbl.request.sequence = 1;
2268				vbl.request.signal = (uintptr_t)info;
2269				ret = !sna_wait_vblank(sna, &vbl, info->pipe);
2270				if (ret)
2271					event = !swap_limit(draw, 2);
2272			}
2273			if (event) {
2274				DBG(("%s: fake triple buffering, unblocking client\n", __FUNCTION__));
2275				frame_swap_complete(info, DRI2_BLIT_COMPLETE);
2276			}
2277		}
2278	} else {
2279		DBG(("%s: pending blit, chained\n", __FUNCTION__));
2280		ret = true;
2281	}
2282
2283	DBG(("%s: continue? %d\n", __FUNCTION__, ret));
2284	return ret;
2285}
2286
2287static bool
2288sna_dri2_flip_continue(struct sna_dri2_event *info)
2289{
2290	DBG(("%s(mode=%d)\n", __FUNCTION__, info->mode));
2291
2292	if (info->mode > 0){
2293		struct kgem_bo *bo = get_private(info->front)->bo;
2294
2295		info->type = info->mode;
2296
2297		if (bo != sna_pixmap(info->sna->front)->gpu_bo)
2298			return false;
2299
2300		if (!sna_page_flip(info->sna, bo, sna_dri2_flip_handler, info))
2301			return false;
2302
2303		assert(info->sna->dri2.flip_pending == NULL ||
2304		       info->sna->dri2.flip_pending == info);
2305		info->sna->dri2.flip_pending = info;
2306		assert(info->queued);
2307	} else {
2308		info->type = -info->mode;
2309
2310		if (!info->draw)
2311			return false;
2312
2313		if (!can_flip(info->sna, info->draw, info->front, info->back, info->crtc))
2314			return false;
2315
2316		assert(sna_pixmap_get_buffer(get_drawable_pixmap(info->draw)) == info->front);
2317		if (!sna_dri2_flip(info))
2318			return false;
2319
2320		if (!xorg_can_triple_buffer()) {
2321			sna_dri2_get_back(info->sna, info->draw, info->back, info);
2322			DBG(("%s: fake triple buffering, unblocking client\n", __FUNCTION__));
2323			frame_swap_complete(info, DRI2_FLIP_COMPLETE);
2324		}
2325	}
2326
2327	info->mode = 0;
2328	return true;
2329}
2330
2331static void chain_flip(struct sna *sna)
2332{
2333	struct sna_dri2_event *chain = sna->dri2.flip_pending;
2334
2335	assert(chain->type == FLIP);
2336	DBG(("%s: chaining type=%d, cancelled?=%d\n",
2337	     __FUNCTION__, chain->type, chain->draw == NULL));
2338
2339	sna->dri2.flip_pending = NULL;
2340	if (chain->draw == NULL) {
2341		sna_dri2_event_free(chain);
2342		return;
2343	}
2344
2345	assert(chain == dri2_chain(chain->draw));
2346	assert(!chain->queued);
2347	chain->queued = true;
2348
2349	if (can_flip(sna, chain->draw, chain->front, chain->back, chain->crtc) &&
2350	    sna_dri2_flip(chain)) {
2351		DBG(("%s: performing chained flip\n", __FUNCTION__));
2352	} else {
2353		DBG(("%s: emitting chained vsync'ed blit\n", __FUNCTION__));
2354		chain->bo = __sna_dri2_copy_region(sna, chain->draw, NULL,
2355						  chain->back, chain->front,
2356						  true);
2357
2358		if (xorg_can_triple_buffer()) {
2359			union drm_wait_vblank vbl;
2360
2361			VG_CLEAR(vbl);
2362
2363			chain->type = SWAP_WAIT;
2364			vbl.request.type =
2365				DRM_VBLANK_RELATIVE |
2366				DRM_VBLANK_EVENT;
2367			vbl.request.sequence = 1;
2368			vbl.request.signal = (uintptr_t)chain;
2369
2370			assert(chain->queued);
2371			if (!sna_wait_vblank(sna, &vbl, chain->pipe))
2372				return;
2373		}
2374
2375		DBG(("%s: fake triple buffering (or vblank wait failed), unblocking client\n", __FUNCTION__));
2376		frame_swap_complete(chain, DRI2_BLIT_COMPLETE);
2377		sna_dri2_event_free(chain);
2378	}
2379}
2380
2381static void sna_dri2_flip_event(struct sna_dri2_event *flip)
2382{
2383	struct sna *sna = flip->sna;
2384
2385	DBG(("%s(pipe=%d, event=%d)\n", __FUNCTION__, flip->pipe, flip->type));
2386	assert(flip->queued);
2387
2388	if (sna->dri2.flip_pending == flip)
2389		sna->dri2.flip_pending = NULL;
2390
2391	/* We assume our flips arrive in order, so we don't check the frame */
2392	switch (flip->type) {
2393	case FLIP:
2394		DBG(("%s: swap complete, unblocking client\n", __FUNCTION__));
2395		frame_swap_complete(flip, DRI2_FLIP_COMPLETE);
2396		sna_dri2_event_free(flip);
2397
2398		if (sna->dri2.flip_pending)
2399			chain_flip(sna);
2400		break;
2401
2402	case FLIP_THROTTLE:
2403		DBG(("%s: triple buffer swap complete, unblocking client\n", __FUNCTION__));
2404		frame_swap_complete(flip, DRI2_FLIP_COMPLETE);
2405	case FLIP_COMPLETE:
2406		if (sna->dri2.flip_pending) {
2407			sna_dri2_event_free(flip);
2408			chain_flip(sna);
2409		} else if (!flip->mode) {
2410			DBG(("%s: flip chain complete\n", __FUNCTION__));
2411
2412			if (flip->chain) {
2413				sna_dri2_remove_event((WindowPtr)flip->draw,
2414						      flip);
2415				chain_swap(flip->chain);
2416				flip->draw = NULL;
2417			}
2418
2419			sna_dri2_event_free(flip);
2420		} else if (!sna_dri2_flip_continue(flip)) {
2421			DBG(("%s: no longer able to flip\n", __FUNCTION__));
2422			if (flip->draw == NULL || !sna_dri2_immediate_blit(sna, flip, false, flip->mode < 0))
2423				sna_dri2_event_free(flip);
2424		}
2425		break;
2426
2427	default: /* Unknown type */
2428		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
2429			   "%s: unknown vblank event received\n", __func__);
2430		sna_dri2_event_free(flip);
2431		if (sna->dri2.flip_pending)
2432			chain_flip(sna);
2433		break;
2434	}
2435}
2436
2437static uint64_t
2438get_current_msc(struct sna *sna, DrawablePtr draw, xf86CrtcPtr crtc)
2439{
2440	union drm_wait_vblank vbl;
2441	uint64_t ret = -1;
2442
2443	VG_CLEAR(vbl);
2444	vbl.request.type = _DRM_VBLANK_RELATIVE;
2445	vbl.request.sequence = 0;
2446	if (sna_wait_vblank(sna, &vbl, sna_crtc_to_pipe(crtc)) == 0)
2447		ret = sna_crtc_record_vblank(crtc, &vbl);
2448
2449	return draw_current_msc(draw, crtc, ret);
2450}
2451
2452#if defined(CHECK_FOR_COMPOSITOR)
2453static Bool find(pointer value, XID id, pointer cdata)
2454{
2455	return TRUE;
2456}
2457#endif
2458
2459static int use_triple_buffer(struct sna *sna, ClientPtr client, bool async)
2460{
2461	if ((sna->flags & SNA_TRIPLE_BUFFER) == 0) {
2462		DBG(("%s: triple buffer disabled, using FLIP\n", __FUNCTION__));
2463		return FLIP;
2464	}
2465
2466	if (async) {
2467		DBG(("%s: running async, using %s\n", __FUNCTION__,
2468		     sna->flags & SNA_HAS_ASYNC_FLIP ? "FLIP_ASYNC" : "FLIP_COMPLETE"));
2469		return sna->flags & SNA_HAS_ASYNC_FLIP ? FLIP_ASYNC : FLIP_COMPLETE;
2470	}
2471
2472	if (xorg_can_triple_buffer()) {
2473		DBG(("%s: triple buffer enabled, using FLIP_THROTTLE\n", __FUNCTION__));
2474		return FLIP_THROTTLE;
2475	}
2476
2477#if defined(CHECK_FOR_COMPOSITOR)
2478	/* Hack: Disable triple buffering for compositors */
2479	{
2480		struct sna_client *priv = sna_client(client);
2481		if (priv->is_compositor == 0)
2482			priv->is_compositor =
2483				LookupClientResourceComplex(client,
2484							    CompositeClientWindowType+1,
2485							    find, NULL) ? FLIP : FLIP_COMPLETE;
2486
2487		DBG(("%s: fake triple buffer enabled?=%d using %s\n", __FUNCTION__,
2488		     priv->is_compositor != FLIP, priv->is_compositor == FLIP ? "FLIP" : "FLIP_COMPLETE"));
2489		return priv->is_compositor;
2490	}
2491#else
2492	DBG(("%s: fake triple buffer enabled, using FLIP_COMPLETE\n", __FUNCTION__));
2493	return FLIP_COMPLETE;
2494#endif
2495}
2496
2497static bool immediate_swap(struct sna *sna,
2498			   uint64_t target_msc,
2499			   uint64_t divisor,
2500			   DrawablePtr draw,
2501			   xf86CrtcPtr crtc,
2502			   uint64_t *current_msc)
2503{
2504	if (divisor == 0) {
2505		*current_msc = -1;
2506
2507		if (sna->flags & SNA_NO_WAIT) {
2508			DBG(("%s: yes, waits are disabled\n", __FUNCTION__));
2509			return true;
2510		}
2511
2512		if (target_msc)
2513			*current_msc = get_current_msc(sna, draw, crtc);
2514
2515		DBG(("%s: current_msc=%ld, target_msc=%ld -- %s\n",
2516		     __FUNCTION__, (long)*current_msc, (long)target_msc,
2517		     (*current_msc >= target_msc - 1) ? "yes" : "no"));
2518		return *current_msc >= target_msc - 1;
2519	}
2520
2521	DBG(("%s: explicit waits requests, divisor=%ld\n",
2522	     __FUNCTION__, (long)divisor));
2523	*current_msc = get_current_msc(sna, draw, crtc);
2524	return false;
2525}
2526
2527static bool
2528sna_dri2_schedule_flip(ClientPtr client, DrawablePtr draw, xf86CrtcPtr crtc,
2529		       DRI2BufferPtr front, DRI2BufferPtr back,
2530		       CARD64 *target_msc, CARD64 divisor, CARD64 remainder,
2531		       DRI2SwapEventPtr func, void *data)
2532{
2533	struct sna *sna = to_sna_from_drawable(draw);
2534	struct sna_dri2_event *info;
2535	uint64_t current_msc;
2536
2537	if (immediate_swap(sna, *target_msc, divisor, draw, crtc, &current_msc)) {
2538		int type;
2539
2540		info = sna->dri2.flip_pending;
2541		DBG(("%s: performing immediate swap on pipe %d, pending? %d, mode: %d, continuation? %d\n",
2542		     __FUNCTION__, sna_crtc_to_pipe(crtc),
2543		     info != NULL, info ? info->mode : 0,
2544		     info && info->draw == draw));
2545
2546		if (info && info->draw == draw) {
2547			assert(info->type != FLIP);
2548			assert(info->front == front);
2549			if (info->back != back) {
2550				_sna_dri2_destroy_buffer(sna, info->back);
2551				info->back = sna_dri2_reference_buffer(back);
2552			}
2553			if (info->mode || current_msc >= *target_msc) {
2554				DBG(("%s: executing xchg of pending flip\n",
2555				     __FUNCTION__));
2556				sna_dri2_xchg(draw, front, back);
2557				info->mode = type = FLIP_COMPLETE;
2558				goto new_back;
2559			} else {
2560				DBG(("%s: chaining flip\n", __FUNCTION__));
2561				type = FLIP_THROTTLE;
2562				if (xorg_can_triple_buffer())
2563					info->mode = -type;
2564				else
2565					info->mode = -FLIP_COMPLETE;
2566				goto out;
2567			}
2568		}
2569
2570		info = sna_dri2_add_event(sna, draw, client);
2571		if (info == NULL)
2572			return false;
2573
2574		assert(info->crtc == crtc);
2575		info->event_complete = func;
2576		info->event_data = data;
2577
2578		info->front = sna_dri2_reference_buffer(front);
2579		info->back = sna_dri2_reference_buffer(back);
2580
2581		if (sna->dri2.flip_pending) {
2582			/* We need to first wait (one vblank) for the
2583			 * async flips to complete before this client
2584			 * can take over.
2585			 */
2586			DBG(("%s: queueing flip after pending completion\n",
2587			     __FUNCTION__));
2588			info->type = type = FLIP;
2589			sna->dri2.flip_pending = info;
2590			assert(info->queued);
2591			current_msc++;
2592		} else {
2593			info->type = type = use_triple_buffer(sna, client, *target_msc == 0);
2594			if (!sna_dri2_flip(info)) {
2595				DBG(("%s: flip failed, falling back\n", __FUNCTION__));
2596				sna_dri2_event_free(info);
2597				return false;
2598			}
2599		}
2600
2601		swap_limit(draw, 1 + (type == FLIP_THROTTLE));
2602		if (type >= FLIP_COMPLETE) {
2603new_back:
2604			if (!xorg_can_triple_buffer())
2605				sna_dri2_get_back(sna, draw, back, info);
2606			DBG(("%s: fake triple buffering, unblocking client\n", __FUNCTION__));
2607			frame_swap_complete(info, DRI2_EXCHANGE_COMPLETE);
2608			if (info->type == FLIP_ASYNC)
2609				sna_dri2_event_free(info);
2610		}
2611out:
2612		DBG(("%s: target_msc=%llu\n", __FUNCTION__, current_msc + 1));
2613		*target_msc = current_msc + 1;
2614		return true;
2615	}
2616
2617	info = sna_dri2_add_event(sna, draw, client);
2618	if (info == NULL)
2619		return false;
2620
2621	assert(info->crtc == crtc);
2622	info->event_complete = func;
2623	info->event_data = data;
2624	info->type = FLIP;
2625
2626	info->front = sna_dri2_reference_buffer(front);
2627	info->back = sna_dri2_reference_buffer(back);
2628
2629	/*
2630	 * If divisor is zero, or current_msc is smaller than target_msc
2631	 * we just need to make sure target_msc passes before initiating
2632	 * the swap.
2633	 */
2634	if (divisor && current_msc >= *target_msc) {
2635		DBG(("%s: missed target, queueing event for next: current=%lld, target=%lld, divisor=%lld, remainder=%lld\n",
2636		     __FUNCTION__,
2637		     (long long)current_msc,
2638		     (long long)*target_msc,
2639		     (long long)divisor,
2640		     (long long)remainder));
2641
2642		*target_msc = current_msc + remainder - current_msc % divisor;
2643		if (*target_msc <= current_msc)
2644			*target_msc += divisor;
2645	}
2646
2647	if (*target_msc <= current_msc + 1) {
2648		if (!sna_dri2_flip(info)) {
2649			sna_dri2_event_free(info);
2650			return false;
2651		}
2652		*target_msc = current_msc + 1;
2653	} else {
2654		union drm_wait_vblank vbl;
2655
2656		VG_CLEAR(vbl);
2657
2658		vbl.request.type =
2659			DRM_VBLANK_ABSOLUTE |
2660			DRM_VBLANK_EVENT;
2661
2662		/* Account for 1 frame extra pageflip delay */
2663		vbl.reply.sequence = draw_target_seq(draw, *target_msc - 1);
2664		vbl.request.signal = (uintptr_t)info;
2665
2666		info->queued = true;
2667		if (sna_wait_vblank(sna, &vbl, info->pipe)) {
2668			sna_dri2_event_free(info);
2669			return false;
2670		}
2671	}
2672
2673	DBG(("%s: reported target_msc=%llu\n", __FUNCTION__, *target_msc));
2674	swap_limit(draw, 1);
2675	return true;
2676}
2677
2678static bool
2679sna_dri2_schedule_xchg(ClientPtr client, DrawablePtr draw, xf86CrtcPtr crtc,
2680		       DRI2BufferPtr front, DRI2BufferPtr back,
2681		       CARD64 *target_msc, CARD64 divisor, CARD64 remainder,
2682		       DRI2SwapEventPtr func, void *data)
2683{
2684	struct sna *sna = to_sna_from_drawable(draw);
2685	uint64_t current_msc;
2686	bool sync, event;
2687
2688	if (!immediate_swap(sna, *target_msc, divisor, draw, crtc, &current_msc))
2689		return false;
2690
2691	sync = current_msc < *target_msc;
2692	event = dri2_chain(draw) == NULL;
2693	if (!sync || event) {
2694		DBG(("%s: performing immediate xchg on pipe %d\n",
2695		     __FUNCTION__, sna_crtc_to_pipe(crtc)));
2696		sna_dri2_xchg(draw, front, back);
2697	}
2698	if (sync) {
2699		struct sna_dri2_event *info;
2700
2701		info = sna_dri2_add_event(sna, draw, client);
2702		if (!info)
2703			goto complete;
2704
2705		info->event_complete = func;
2706		info->event_data = data;
2707
2708		info->front = sna_dri2_reference_buffer(front);
2709		info->back = sna_dri2_reference_buffer(back);
2710		info->type = SWAP_THROTTLE;
2711
2712		if (event) {
2713			union drm_wait_vblank vbl;
2714
2715			VG_CLEAR(vbl);
2716			vbl.request.type =
2717				DRM_VBLANK_RELATIVE |
2718				DRM_VBLANK_EVENT;
2719			vbl.request.sequence = 1;
2720			vbl.request.signal = (uintptr_t)info;
2721
2722			info->queued = true;
2723			if (sna_wait_vblank(sna, &vbl, info->pipe)) {
2724				sna_dri2_event_free(info);
2725				goto complete;
2726			}
2727
2728			swap_limit(draw, 2);
2729		}
2730	} else {
2731complete:
2732		fake_swap_complete(sna, client, draw, crtc, DRI2_EXCHANGE_COMPLETE, func, data);
2733	}
2734
2735	*target_msc = current_msc + 1;
2736	return true;
2737}
2738
2739static bool
2740sna_dri2_schedule_xchg_crtc(ClientPtr client, DrawablePtr draw, xf86CrtcPtr crtc,
2741			    DRI2BufferPtr front, DRI2BufferPtr back,
2742			    CARD64 *target_msc, CARD64 divisor, CARD64 remainder,
2743			    DRI2SwapEventPtr func, void *data)
2744{
2745	struct sna *sna = to_sna_from_drawable(draw);
2746	uint64_t current_msc;
2747	bool sync, event;
2748
2749	if (!immediate_swap(sna, *target_msc, divisor, draw, crtc, &current_msc))
2750		return false;
2751
2752	sync = current_msc < *target_msc;
2753	event = dri2_chain(draw) == NULL;
2754	if (!sync || event) {
2755		DBG(("%s: performing immediate xchg only on pipe %d\n",
2756		     __FUNCTION__, sna_crtc_to_pipe(crtc)));
2757		sna_dri2_xchg_crtc(sna, draw, crtc, front, back);
2758	}
2759	if (sync) {
2760		struct sna_dri2_event *info;
2761
2762		info = sna_dri2_add_event(sna, draw, client);
2763		if (!info)
2764			goto complete;
2765
2766		info->event_complete = func;
2767		info->event_data = data;
2768
2769		info->front = sna_dri2_reference_buffer(front);
2770		info->back = sna_dri2_reference_buffer(back);
2771		info->type = SWAP_THROTTLE;
2772
2773		if (event) {
2774			union drm_wait_vblank vbl;
2775
2776			VG_CLEAR(vbl);
2777			vbl.request.type =
2778				DRM_VBLANK_RELATIVE |
2779				DRM_VBLANK_EVENT;
2780			vbl.request.sequence = 1;
2781			vbl.request.signal = (uintptr_t)info;
2782
2783			info->queued = true;
2784			if (sna_wait_vblank(sna, &vbl, info->pipe)) {
2785				sna_dri2_event_free(info);
2786				goto complete;
2787			}
2788
2789			swap_limit(draw, 2);
2790		}
2791	} else {
2792complete:
2793		fake_swap_complete(sna, client, draw, crtc, DRI2_EXCHANGE_COMPLETE, func, data);
2794	}
2795
2796	*target_msc = current_msc + 1;
2797	return true;
2798}
2799
2800static bool has_pending_events(struct sna *sna)
2801{
2802	struct pollfd pfd;
2803	pfd.fd = sna->kgem.fd;
2804	pfd.events = POLLIN;
2805	return poll(&pfd, 1, 0) == 1;
2806}
2807
2808/*
2809 * ScheduleSwap is responsible for requesting a DRM vblank event for the
2810 * appropriate frame.
2811 *
2812 * In the case of a blit (e.g. for a windowed swap) or buffer exchange,
2813 * the vblank requested can simply be the last queued swap frame + the swap
2814 * interval for the drawable.
2815 *
2816 * In the case of a page flip, we request an event for the last queued swap
2817 * frame + swap interval - 1, since we'll need to queue the flip for the frame
2818 * immediately following the received event.
2819 *
2820 * The client will be blocked if it tries to perform further GL commands
2821 * after queueing a swap, though in the Intel case after queueing a flip, the
2822 * client is free to queue more commands; they'll block in the kernel if
2823 * they access buffers busy with the flip.
2824 *
2825 * When the swap is complete, the driver should call into the server so it
2826 * can send any swap complete events that have been requested.
2827 */
2828static int
2829sna_dri2_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
2830		       DRI2BufferPtr back, CARD64 *target_msc, CARD64 divisor,
2831		       CARD64 remainder, DRI2SwapEventPtr func, void *data)
2832{
2833	struct sna *sna = to_sna_from_drawable(draw);
2834	union drm_wait_vblank vbl;
2835	xf86CrtcPtr crtc = NULL;
2836	struct sna_dri2_event *info = NULL;
2837	int type = DRI2_EXCHANGE_COMPLETE;
2838	CARD64 current_msc;
2839
2840	DBG(("%s: draw=%lu %dx%d, pixmap=%ld %dx%d, back=%u (refs=%d/%d, flush=%d) , front=%u (refs=%d/%d, flush=%d)\n",
2841	     __FUNCTION__,
2842	     (long)draw->id, draw->width, draw->height,
2843	     get_drawable_pixmap(draw)->drawable.serialNumber,
2844	     get_drawable_pixmap(draw)->drawable.width,
2845	     get_drawable_pixmap(draw)->drawable.height,
2846	     get_private(back)->bo->handle,
2847	     get_private(back)->refcnt,
2848	     get_private(back)->bo->refcnt,
2849	     get_private(back)->bo->flush,
2850	     get_private(front)->bo->handle,
2851	     get_private(front)->refcnt,
2852	     get_private(front)->bo->refcnt,
2853	     get_private(front)->bo->flush));
2854
2855	DBG(("%s(target_msc=%llu, divisor=%llu, remainder=%llu)\n",
2856	     __FUNCTION__,
2857	     (long long)*target_msc,
2858	     (long long)divisor,
2859	     (long long)remainder));
2860
2861	assert(get_private(front)->refcnt);
2862	assert(get_private(back)->refcnt);
2863
2864	assert(get_private(front)->bo->refcnt);
2865	assert(get_private(back)->bo->refcnt);
2866
2867	if (get_private(front)->pixmap != get_drawable_pixmap(draw)) {
2868		DBG(("%s: decoupled DRI2 front pixmap=%ld, actual pixmap=%ld\n",
2869		     __FUNCTION__,
2870		     get_private(front)->pixmap->drawable.serialNumber,
2871		     get_drawable_pixmap(draw)->drawable.serialNumber));
2872		goto skip;
2873	}
2874
2875	if (get_private(back)->stale) {
2876		DBG(("%s: stale back buffer\n", __FUNCTION__));
2877		goto skip;
2878	}
2879
2880	assert(sna_pixmap_from_drawable(draw)->flush);
2881
2882	if (draw->type != DRAWABLE_PIXMAP) {
2883		WindowPtr win = (WindowPtr)draw;
2884		struct dri2_window *priv = dri2_window(win);
2885		if (priv->front) {
2886			assert(front == priv->front);
2887			assert(get_private(priv->front)->refcnt > 1);
2888			get_private(priv->front)->refcnt--;
2889			priv->front = NULL;
2890		}
2891		if (win->clipList.extents.x2 <= win->clipList.extents.x1 ||
2892		    win->clipList.extents.y2 <= win->clipList.extents.y1) {
2893			DBG(("%s: window clipped (%d, %d), (%d, %d)\n",
2894			     __FUNCTION__,
2895			     win->clipList.extents.x1,
2896			     win->clipList.extents.y1,
2897			     win->clipList.extents.x2,
2898			     win->clipList.extents.y2));
2899			goto skip;
2900		}
2901	}
2902
2903	/* Drawable not displayed... just complete the swap */
2904	if ((sna->flags & SNA_NO_WAIT) == 0)
2905		crtc = sna_dri2_get_crtc(draw);
2906	if (crtc == NULL) {
2907		DBG(("%s: off-screen, immediate update\n", __FUNCTION__));
2908		goto blit;
2909	}
2910
2911	assert(draw->type != DRAWABLE_PIXMAP);
2912
2913	while (dri2_chain(draw) && has_pending_events(sna)) {
2914		DBG(("%s: flushing pending events\n", __FUNCTION__));
2915		sna_mode_wakeup(sna);
2916	}
2917
2918	if (can_xchg(sna, draw, front, back) &&
2919	    sna_dri2_schedule_xchg(client, draw, crtc, front, back,
2920				   target_msc, divisor, remainder,
2921				   func, data))
2922		return TRUE;
2923
2924	if (can_xchg_crtc(sna, draw, front, back, crtc) &&
2925	    sna_dri2_schedule_xchg_crtc(client, draw, crtc, front, back,
2926					target_msc, divisor, remainder,
2927					func, data))
2928		return TRUE;
2929
2930	if (can_flip(sna, draw, front, back, crtc) &&
2931	    sna_dri2_schedule_flip(client, draw, crtc, front, back,
2932				  target_msc, divisor, remainder,
2933				  func, data))
2934		return TRUE;
2935
2936	VG_CLEAR(vbl);
2937
2938	info = sna_dri2_add_event(sna, draw, client);
2939	if (!info)
2940		goto blit;
2941
2942	assert(info->crtc == crtc);
2943	info->event_complete = func;
2944	info->event_data = data;
2945
2946	info->front = sna_dri2_reference_buffer(front);
2947	info->back = sna_dri2_reference_buffer(back);
2948
2949	if (immediate_swap(sna, *target_msc, divisor, draw, crtc, &current_msc)) {
2950		bool sync = current_msc < *target_msc;
2951		if (!sna_dri2_immediate_blit(sna, info, sync, true))
2952			sna_dri2_event_free(info);
2953		*target_msc = current_msc + sync;
2954		return TRUE;
2955	}
2956
2957	vbl.request.type =
2958		DRM_VBLANK_ABSOLUTE |
2959		DRM_VBLANK_EVENT;
2960	vbl.request.signal = (uintptr_t)info;
2961
2962	/*
2963	 * If divisor is zero, or current_msc is smaller than target_msc
2964	 * we just need to make sure target_msc passes before initiating
2965	 * the swap.
2966	 */
2967	info->type = SWAP;
2968	info->queued = true;
2969	if (divisor && current_msc >= *target_msc) {
2970		DBG(("%s: missed target, queueing event for next: current=%lld, target=%lld, divisor=%lld, remainder=%lld\n",
2971		     __FUNCTION__,
2972		     (long long)current_msc,
2973		     (long long)*target_msc,
2974		     (long long)divisor,
2975		     (long long)remainder));
2976
2977		*target_msc = current_msc + remainder - current_msc % divisor;
2978		if (*target_msc <= current_msc)
2979			*target_msc += divisor;
2980	}
2981	vbl.request.sequence = draw_target_seq(draw, *target_msc - 1);
2982	if (*target_msc <= current_msc + 1) {
2983		DBG(("%s: performing blit before queueing\n", __FUNCTION__));
2984		assert(info->queued);
2985		info->bo = __sna_dri2_copy_region(sna, draw, NULL,
2986						  back, front,
2987						  true);
2988		info->type = SWAP_WAIT;
2989
2990		vbl.request.type =
2991			DRM_VBLANK_RELATIVE |
2992			DRM_VBLANK_EVENT;
2993		vbl.request.sequence = 1;
2994		*target_msc = current_msc + 1;
2995	}
2996
2997	assert(info->queued);
2998	if (sna_wait_vblank(sna, &vbl, info->pipe))
2999		goto blit;
3000
3001	DBG(("%s: reported target_msc=%llu\n", __FUNCTION__, *target_msc));
3002	swap_limit(draw, 1 + (info->type == SWAP_WAIT));
3003	return TRUE;
3004
3005blit:
3006	DBG(("%s -- blit\n", __FUNCTION__));
3007	if (info)
3008		sna_dri2_event_free(info);
3009	if (can_xchg(sna, draw, front, back)) {
3010		sna_dri2_xchg(draw, front, back);
3011	} else {
3012		__sna_dri2_copy_region(sna, draw, NULL, back, front, false);
3013		type = DRI2_BLIT_COMPLETE;
3014	}
3015skip:
3016	DBG(("%s: unable to show frame, unblocking client\n", __FUNCTION__));
3017	if (crtc == NULL)
3018		crtc = sna_mode_first_crtc(sna);
3019	fake_swap_complete(sna, client, draw, crtc, type, func, data);
3020	*target_msc = 0; /* offscreen, so zero out target vblank count */
3021	return TRUE;
3022}
3023
3024/*
3025 * Get current frame count and frame count timestamp, based on drawable's
3026 * crtc.
3027 */
3028static int
3029sna_dri2_get_msc(DrawablePtr draw, CARD64 *ust, CARD64 *msc)
3030{
3031	struct sna *sna = to_sna_from_drawable(draw);
3032	xf86CrtcPtr crtc = sna_dri2_get_crtc(draw);
3033	const struct ust_msc *swap;
3034
3035	DBG(("%s(draw=%ld, pipe=%d)\n", __FUNCTION__, draw->id,
3036	     crtc ? sna_crtc_to_pipe(crtc) : -1));
3037
3038	if (crtc != NULL) {
3039		union drm_wait_vblank vbl;
3040
3041		VG_CLEAR(vbl);
3042		vbl.request.type = _DRM_VBLANK_RELATIVE;
3043		vbl.request.sequence = 0;
3044		if (sna_wait_vblank(sna, &vbl, sna_crtc_to_pipe(crtc)) == 0)
3045			sna_crtc_record_vblank(crtc, &vbl);
3046	} else
3047		/* Drawable not displayed, make up a *monotonic* value */
3048		crtc = sna_mode_first_crtc(sna);
3049
3050	swap = sna_crtc_last_swap(crtc);
3051	*msc = draw_current_msc(draw, crtc, swap->msc);
3052	*ust = ust64(swap->tv_sec, swap->tv_usec);
3053	DBG(("%s: msc=%llu, ust=%llu\n", __FUNCTION__,
3054	     (long long)*msc, (long long)*ust));
3055	return TRUE;
3056}
3057
3058/*
3059 * Request a DRM event when the requested conditions will be satisfied.
3060 *
3061 * We need to handle the event and ask the server to wake up the client when
3062 * we receive it.
3063 */
3064static int
3065sna_dri2_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
3066			   CARD64 divisor, CARD64 remainder)
3067{
3068	struct sna *sna = to_sna_from_drawable(draw);
3069	struct sna_dri2_event *info = NULL;
3070	xf86CrtcPtr crtc;
3071	CARD64 current_msc;
3072	union drm_wait_vblank vbl;
3073	const struct ust_msc *swap;
3074	int pipe;
3075
3076	crtc = sna_dri2_get_crtc(draw);
3077	DBG(("%s(pipe=%d, target_msc=%llu, divisor=%llu, rem=%llu)\n",
3078	     __FUNCTION__, crtc ? sna_crtc_to_pipe(crtc) : -1,
3079	     (long long)target_msc,
3080	     (long long)divisor,
3081	     (long long)remainder));
3082
3083	/* Drawable not visible, return immediately */
3084	if (crtc == NULL)
3085		goto out_complete;
3086
3087	pipe = sna_crtc_to_pipe(crtc);
3088
3089	VG_CLEAR(vbl);
3090
3091	/* Get current count */
3092	vbl.request.type = _DRM_VBLANK_RELATIVE;
3093	vbl.request.sequence = 0;
3094	if (sna_wait_vblank(sna, &vbl, pipe))
3095		goto out_complete;
3096
3097	current_msc = draw_current_msc(draw, crtc, sna_crtc_record_vblank(crtc, &vbl));
3098
3099	/* If target_msc already reached or passed, set it to
3100	 * current_msc to ensure we return a reasonable value back
3101	 * to the caller. This keeps the client from continually
3102	 * sending us MSC targets from the past by forcibly updating
3103	 * their count on this call.
3104	 */
3105	if (divisor == 0 && current_msc >= target_msc)
3106		goto out_complete;
3107
3108	info = sna_dri2_add_event(sna, draw, client);
3109	if (!info)
3110		goto out_complete;
3111
3112	assert(info->crtc == crtc);
3113	info->type = WAITMSC;
3114
3115	vbl.request.signal = (uintptr_t)info;
3116	vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
3117	/*
3118	 * If divisor is zero, or current_msc is smaller than target_msc,
3119	 * we just need to make sure target_msc passes before waking up the
3120	 * client. Otherwise, compute the next msc to match divisor/remainder.
3121	 */
3122	if (divisor && current_msc >= target_msc) {
3123		DBG(("%s: missed target, queueing event for next: current=%lld, target=%lld, divisor=%lld, remainder=%lld\n",
3124		     __FUNCTION__,
3125		     (long long)current_msc,
3126		     (long long)target_msc,
3127		     (long long)divisor,
3128		     (long long)remainder));
3129		target_msc = current_msc + remainder - current_msc % divisor;
3130		if (target_msc <= current_msc)
3131			target_msc += divisor;
3132	}
3133	vbl.request.sequence = draw_target_seq(draw, target_msc);
3134
3135	info->queued = true;
3136	if (sna_wait_vblank(sna, &vbl, pipe))
3137		goto out_free_info;
3138
3139	DRI2BlockClient(client, draw);
3140	return TRUE;
3141
3142out_free_info:
3143	sna_dri2_event_free(info);
3144out_complete:
3145	if (crtc == NULL)
3146		crtc = sna_mode_first_crtc(sna);
3147	swap = sna_crtc_last_swap(crtc);
3148	DRI2WaitMSCComplete(client, draw,
3149			    draw_current_msc(draw, crtc, swap->msc),
3150			    swap->tv_sec, swap->tv_usec);
3151	return TRUE;
3152}
3153#else
3154void sna_dri2_destroy_window(WindowPtr win) { }
3155void sna_dri2_decouple_window(WindowPtr win) { }
3156#endif
3157
3158static bool has_i830_dri(void)
3159{
3160	return access(DRI_DRIVER_PATH "/i830_dri.so", R_OK) == 0;
3161}
3162
3163static int
3164namecmp(const char *s1, const char *s2)
3165{
3166	char c1, c2;
3167
3168	if (!s1 || *s1 == 0) {
3169		if (!s2 || *s2 == 0)
3170			return 0;
3171		else
3172			return 1;
3173	}
3174
3175	while (*s1 == '_' || *s1 == ' ' || *s1 == '\t')
3176		s1++;
3177
3178	while (*s2 == '_' || *s2 == ' ' || *s2 == '\t')
3179		s2++;
3180
3181	c1 = isupper(*s1) ? tolower(*s1) : *s1;
3182	c2 = isupper(*s2) ? tolower(*s2) : *s2;
3183	while (c1 == c2) {
3184		if (c1 == '\0')
3185			return 0;
3186
3187		s1++;
3188		while (*s1 == '_' || *s1 == ' ' || *s1 == '\t')
3189			s1++;
3190
3191		s2++;
3192		while (*s2 == '_' || *s2 == ' ' || *s2 == '\t')
3193			s2++;
3194
3195		c1 = isupper(*s1) ? tolower(*s1) : *s1;
3196		c2 = isupper(*s2) ? tolower(*s2) : *s2;
3197	}
3198
3199	return c1 - c2;
3200}
3201
3202static bool is_level(const char **str)
3203{
3204	const char *s = *str;
3205	char *end;
3206	unsigned val;
3207
3208	if (s == NULL || *s == '\0')
3209		return true;
3210
3211	if (namecmp(s, "on") == 0)
3212		return true;
3213	if (namecmp(s, "true") == 0)
3214		return true;
3215	if (namecmp(s, "yes") == 0)
3216		return true;
3217
3218	if (namecmp(s, "0") == 0)
3219		return true;
3220	if (namecmp(s, "off") == 0)
3221		return true;
3222	if (namecmp(s, "false") == 0)
3223		return true;
3224	if (namecmp(s, "no") == 0)
3225		return true;
3226
3227	val = strtoul(s, &end, 0);
3228	if (val && *end == '\0')
3229		return true;
3230	if (val && *end == ':')
3231		*str = end + 1;
3232	return false;
3233}
3234
3235static const char *dri_driver_name(struct sna *sna)
3236{
3237	const char *s = xf86GetOptValString(sna->Options, OPTION_DRI);
3238
3239	if (is_level(&s)) {
3240		if (sna->kgem.gen < 030)
3241			return has_i830_dri() ? "i830" : "i915";
3242		else if (sna->kgem.gen < 040)
3243			return "i915";
3244		else
3245			return "i965";
3246	}
3247
3248	return s;
3249}
3250
3251bool sna_dri2_open(struct sna *sna, ScreenPtr screen)
3252{
3253	DRI2InfoRec info;
3254	int major = 1, minor = 0;
3255#if DRI2INFOREC_VERSION >= 4
3256	const char *driverNames[2];
3257#endif
3258
3259	DBG(("%s()\n", __FUNCTION__));
3260
3261	if (wedged(sna)) {
3262		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
3263			   "loading DRI2 whilst the GPU is wedged.\n");
3264	}
3265
3266	if (xf86LoaderCheckSymbol("DRI2Version"))
3267		DRI2Version(&major, &minor);
3268
3269	if (minor < 1) {
3270		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
3271			   "DRI2 requires DRI2 module version 1.1.0 or later\n");
3272		return false;
3273	}
3274
3275	memset(&info, '\0', sizeof(info));
3276	info.fd = sna->kgem.fd;
3277	info.driverName = dri_driver_name(sna);
3278	info.deviceName = intel_get_client_name(sna->dev);
3279
3280	DBG(("%s: loading dri driver '%s' [gen=%d] for device '%s'\n",
3281	     __FUNCTION__, info.driverName, sna->kgem.gen, info.deviceName));
3282
3283#if DRI2INFOREC_VERSION == 2
3284	/* The ABI between 2 and 3 was broken so we could get rid of
3285	 * the multi-buffer alloc functions.  Make sure we indicate the
3286	 * right version so DRI2 can reject us if it's version 3 or above. */
3287	info.version = 2;
3288#else
3289	info.version = 3;
3290#endif
3291	info.CreateBuffer = sna_dri2_create_buffer;
3292	info.DestroyBuffer = sna_dri2_destroy_buffer;
3293
3294	info.CopyRegion = sna_dri2_copy_region;
3295#if DRI2INFOREC_VERSION >= 4
3296	info.version = 4;
3297	info.ScheduleSwap = sna_dri2_schedule_swap;
3298	info.GetMSC = sna_dri2_get_msc;
3299	info.ScheduleWaitMSC = sna_dri2_schedule_wait_msc;
3300	info.numDrivers = 2;
3301	info.driverNames = driverNames;
3302	driverNames[0] = info.driverName;
3303	driverNames[1] = info.driverName;
3304#endif
3305
3306#if DRI2INFOREC_VERSION >= 6
3307	if (xorg_can_triple_buffer()) {
3308		info.version = 6;
3309		info.SwapLimitValidate = sna_dri2_swap_limit_validate;
3310		info.ReuseBufferNotify = sna_dri2_reuse_buffer;
3311	}
3312#endif
3313
3314#if USE_ASYNC_SWAP
3315	info.version = 10;
3316	info.scheduleSwap0 = 1;
3317#endif
3318
3319	return DRI2ScreenInit(screen, &info);
3320}
3321
3322void sna_dri2_close(struct sna *sna, ScreenPtr screen)
3323{
3324	DBG(("%s()\n", __FUNCTION__));
3325	DRI2CloseScreen(screen);
3326}
3327