sna_dri2.c revision 42542f5f
1/**************************************************************************
2
3Copyright 2001 VA Linux Systems Inc., Fremont, California.
4Copyright © 2002 by David Dawes
5
6All Rights Reserved.
7
8Permission is hereby granted, free of charge, to any person obtaining a
9copy of this software and associated documentation files (the "Software"),
10to deal in the Software without restriction, including without limitation
11on the rights to use, copy, modify, merge, publish, distribute, sub
12license, and/or sell copies of the Software, and to permit persons to whom
13the Software is furnished to do so, subject to the following conditions:
14
15The above copyright notice and this permission notice (including the next
16paragraph) shall be included in all copies or substantial portions of the
17Software.
18
19THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22ATI, VA LINUX SYSTEMS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
23DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27**************************************************************************/
28
29/*
30 * Authors: Jeff Hartmann <jhartmann@valinux.com>
31 *          David Dawes <dawes@xfree86.org>
32 *          Keith Whitwell <keith@tungstengraphics.com>
33 */
34
35#ifdef HAVE_CONFIG_H
36#include "config.h"
37#endif
38
39#include <errno.h>
40#include <time.h>
41#include <string.h>
42#include <unistd.h>
43#include <poll.h>
44
45#include "sna.h"
46#include "intel_options.h"
47
48#include <xf86drm.h>
49#include <i915_drm.h>
50#include <dri2.h>
51#if XORG_VERSION_CURRENT >= XORG_VERSION_NUMERIC(1,12,99,901,0) && defined(COMPOSITE)
52#include <compositeext.h>
53#define CHECK_FOR_COMPOSITOR
54#endif
55
56#define DBG_CAN_FLIP 1
57#define DBG_CAN_XCHG 1
58
59#define DBG_FORCE_COPY -1 /* KGEM_BLT or KGEM_3D */
60
61#if DRI2INFOREC_VERSION < 2
62#error DRI2 version supported by the Xserver is too old
63#endif
64
65static inline struct kgem_bo *ref(struct kgem_bo *bo)
66{
67	assert(bo->refcnt);
68	bo->refcnt++;
69	return bo;
70}
71
72static inline void unref(struct kgem_bo *bo)
73{
74	assert(bo->refcnt > 1);
75	bo->refcnt--;
76}
77
78struct sna_dri2_private {
79	PixmapPtr pixmap;
80	struct kgem_bo *bo;
81	DRI2Buffer2Ptr proxy;
82	bool stale;
83	uint32_t size;
84	int refcnt;
85};
86
87static inline struct sna_dri2_private *
88get_private(void *buffer)
89{
90	return (struct sna_dri2_private *)((DRI2Buffer2Ptr)buffer+1);
91}
92
93#if DRI2INFOREC_VERSION >= 4
94enum event_type {
95	WAITMSC = 0,
96	SWAP,
97	SWAP_WAIT,
98	SWAP_THROTTLE,
99	FLIP,
100	FLIP_THROTTLE,
101	FLIP_COMPLETE,
102	FLIP_ASYNC,
103};
104
105struct dri_bo {
106	struct list link;
107	struct kgem_bo *bo;
108	uint32_t name;
109};
110
111struct sna_dri2_event {
112	DrawablePtr draw;
113	ClientPtr client;
114	enum event_type type;
115	xf86CrtcPtr crtc;
116	int pipe;
117	bool queued;
118
119	/* for swaps & flips only */
120	DRI2SwapEventPtr event_complete;
121	void *event_data;
122	DRI2BufferPtr front;
123	DRI2BufferPtr back;
124	struct kgem_bo *bo;
125
126	struct sna_dri2_event *chain;
127
128	struct list cache;
129	struct list link;
130
131	int mode;
132};
133
134static void sna_dri2_flip_event(struct sna *sna,
135				struct sna_dri2_event *flip);
136
137static void
138sna_dri2_get_back(struct sna *sna,
139		  DrawablePtr draw,
140		  DRI2BufferPtr back,
141		  struct sna_dri2_event *info)
142{
143	struct kgem_bo *bo;
144	uint32_t name;
145	bool reuse;
146
147	DBG(("%s: draw size=%dx%d, buffer size=%dx%d\n",
148	     __FUNCTION__, draw->width, draw->height,
149	     get_private(back)->size & 0xffff, get_private(back)->size >> 16));
150	reuse = (draw->height << 16 | draw->width) == get_private(back)->size;
151	if (reuse) {
152		bo = get_private(back)->bo;
153		assert(bo->refcnt);
154		DBG(("%s: back buffer handle=%d, scanout?=%d, refcnt=%d\n",
155					__FUNCTION__, bo->handle, bo->active_scanout, get_private(back)->refcnt));
156		if (bo->active_scanout == 0) {
157			DBG(("%s: reuse unattached back\n", __FUNCTION__));
158			get_private(back)->stale = false;
159			return;
160		}
161	}
162
163	bo = NULL;
164	if (info) {
165		struct dri_bo *c;
166		list_for_each_entry(c, &info->cache, link) {
167			if (c->bo && c->bo->scanout == 0) {
168				bo = c->bo;
169				name = c->name;
170				DBG(("%s: reuse cache handle=%d\n", __FUNCTION__, bo->handle));
171				list_move_tail(&c->link, &info->cache);
172				c->bo = NULL;
173			}
174		}
175	}
176	if (bo == NULL) {
177		DBG(("%s: allocating new backbuffer\n", __FUNCTION__));
178		bo = kgem_create_2d(&sna->kgem,
179				    draw->width, draw->height, draw->bitsPerPixel,
180				    get_private(back)->bo->tiling,
181				    get_private(back)->bo->scanout ? CREATE_SCANOUT : 0);
182		if (bo == NULL)
183			return;
184
185		name = kgem_bo_flink(&sna->kgem, bo);
186		if (name == 0) {
187			kgem_bo_destroy(&sna->kgem, bo);
188			return;
189		}
190	}
191	assert(bo->active_scanout == 0);
192
193	if (info && reuse) {
194		bool found = false;
195		struct dri_bo *c;
196
197		list_for_each_entry_reverse(c, &info->cache, link) {
198			if (c->bo == NULL) {
199				found = true;
200				_list_del(&c->link);
201				break;
202			}
203		}
204		if (!found)
205			c = malloc(sizeof(*c));
206		if (c != NULL) {
207			c->bo = ref(get_private(back)->bo);
208			c->name = back->name;
209			list_add(&c->link, &info->cache);
210			DBG(("%s: cacheing handle=%d (name=%d)\n", __FUNCTION__, c->bo->handle, c->name));
211		}
212	}
213
214	assert(bo != get_private(back)->bo);
215	kgem_bo_destroy(&sna->kgem, get_private(back)->bo);
216
217	get_private(back)->bo = bo;
218	get_private(back)->size = draw->height << 16 | draw->width;
219	back->pitch = bo->pitch;
220	back->name = name;
221
222	get_private(back)->stale = false;
223}
224
225struct dri2_window {
226	DRI2BufferPtr front;
227	struct sna_dri2_event *chain;
228	xf86CrtcPtr crtc;
229	int64_t msc_delta;
230};
231
232static struct dri2_window *dri2_window(WindowPtr win)
233{
234	assert(win->drawable.type != DRAWABLE_PIXMAP);
235	return ((void **)__get_private(win, sna_window_key))[1];
236}
237
238static struct sna_dri2_event *
239dri2_chain(DrawablePtr d)
240{
241	struct dri2_window *priv = dri2_window((WindowPtr)d);
242	assert(priv != NULL);
243	return priv->chain;
244}
245inline static DRI2BufferPtr dri2_window_get_front(WindowPtr win) { return dri2_window(win)->front; }
246#else
247inline static void *dri2_window_get_front(WindowPtr win) { return NULL; }
248#endif
249
250#if DRI2INFOREC_VERSION < 6
251
252#define xorg_can_triple_buffer(ptr) 0
253#define swap_limit(d, l) false
254
255#else
256
257#if XORG_VERSION_CURRENT >= XORG_VERSION_NUMERIC(1,15,99,904,0)
258/* Prime fixed for triple buffer support */
259#define xorg_can_triple_buffer(ptr) 1
260#elif XORG_VERSION_CURRENT < XORG_VERSION_NUMERIC(1,12,99,901,0)
261/* Before numGPUScreens was introduced */
262#define xorg_can_triple_buffer(ptr) 1
263#else
264/* Subject to crashers when combining triple buffering and Prime */
265inline static bool xorg_can_triple_buffer(struct sna *sna)
266{
267	return screenInfo.numGPUScreens == 0;
268}
269#endif
270
271static Bool
272sna_dri2_swap_limit_validate(DrawablePtr draw, int swap_limit)
273{
274	DBG(("%s: swap limit set to %d\n", __FUNCTION__, swap_limit));
275	return swap_limit >= 1;
276}
277
278static void
279sna_dri2_reuse_buffer(DrawablePtr draw, DRI2BufferPtr buffer)
280{
281	DBG(("%s: reusing buffer pixmap=%ld, attachment=%d, handle=%d, name=%d\n",
282	     __FUNCTION__, get_drawable_pixmap(draw)->drawable.serialNumber,
283	     buffer->attachment, get_private(buffer)->bo->handle, buffer->name));
284	assert(get_private(buffer)->refcnt);
285	assert(get_private(buffer)->bo->refcnt > get_private(buffer)->bo->active_scanout);
286
287	if (buffer->attachment == DRI2BufferBackLeft &&
288	    draw->type != DRAWABLE_PIXMAP) {
289		DBG(("%s: replacing back buffer\n", __FUNCTION__));
290		sna_dri2_get_back(to_sna_from_drawable(draw), draw, buffer, dri2_chain(draw));
291
292		assert(kgem_bo_flink(&to_sna_from_drawable(draw)->kgem, get_private(buffer)->bo) == buffer->name);
293		assert(get_private(buffer)->bo->refcnt);
294		assert(get_private(buffer)->bo->active_scanout == 0);
295	}
296}
297
298static bool swap_limit(DrawablePtr draw, int limit)
299{
300	DBG(("%s: draw=%ld setting swap limit to %d\n", __FUNCTION__, (long)draw->id, limit));
301	DRI2SwapLimit(draw, limit);
302	return true;
303}
304#endif
305
306#if DRI2INFOREC_VERSION < 10
307#undef USE_ASYNC_SWAP
308#define USE_ASYNC_SWAP 0
309#endif
310
311#define COLOR_PREFER_TILING_Y 0
312
313/* Prefer to enable TILING_Y if this buffer will never be a
314 * candidate for pageflipping
315 */
316static uint32_t color_tiling(struct sna *sna, DrawablePtr draw)
317{
318	uint32_t tiling;
319
320	if (COLOR_PREFER_TILING_Y &&
321	    (draw->width  != sna->front->drawable.width ||
322	     draw->height != sna->front->drawable.height))
323		tiling = I915_TILING_Y;
324	else
325		tiling = I915_TILING_X;
326
327	return kgem_choose_tiling(&sna->kgem, -tiling,
328				  draw->width,
329				  draw->height,
330				  draw->bitsPerPixel);
331}
332
333static uint32_t other_tiling(struct sna *sna, DrawablePtr draw)
334{
335	/* XXX Can mix color X / depth Y? */
336	return kgem_choose_tiling(&sna->kgem,
337				  sna->kgem.gen >= 040 ? -I915_TILING_Y : -I915_TILING_X,
338				  draw->width,
339				  draw->height,
340				  draw->bitsPerPixel);
341}
342
343static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
344					  PixmapPtr pixmap)
345{
346	struct sna_pixmap *priv;
347	int tiling;
348
349	DBG(("%s: attaching DRI client to pixmap=%ld\n",
350	     __FUNCTION__, pixmap->drawable.serialNumber));
351
352	priv = sna_pixmap(pixmap);
353	if (priv != NULL && IS_STATIC_PTR(priv->ptr) && priv->cpu_bo) {
354		DBG(("%s: SHM or unattached Pixmap, BadAlloc\n", __FUNCTION__));
355		return NULL;
356	}
357
358	priv = sna_pixmap_move_to_gpu(pixmap,
359				      MOVE_READ | __MOVE_FORCE | __MOVE_DRI);
360	if (priv == NULL) {
361		DBG(("%s: failed to move to GPU, BadAlloc\n", __FUNCTION__));
362		return NULL;
363	}
364
365	assert(priv->flush == false);
366	assert(priv->cpu_damage == NULL);
367	assert(priv->gpu_bo);
368	assert(priv->gpu_bo->proxy == NULL);
369	assert(priv->gpu_bo->flush == false);
370
371	tiling = color_tiling(sna, &pixmap->drawable);
372	if (tiling < 0)
373		tiling = -tiling;
374	if (priv->gpu_bo->tiling != tiling)
375		sna_pixmap_change_tiling(pixmap, tiling);
376
377	return priv->gpu_bo;
378}
379
380pure static inline void *sna_pixmap_get_buffer(PixmapPtr pixmap)
381{
382	assert(pixmap->refcnt);
383	return ((void **)__get_private(pixmap, sna_pixmap_key))[2];
384}
385
386static inline void sna_pixmap_set_buffer(PixmapPtr pixmap, void *ptr)
387{
388	assert(pixmap->refcnt);
389	((void **)__get_private(pixmap, sna_pixmap_key))[2] = ptr;
390}
391
392void
393sna_dri2_pixmap_update_bo(struct sna *sna, PixmapPtr pixmap, struct kgem_bo *bo)
394{
395	DRI2BufferPtr buffer;
396	struct sna_dri2_private *private;
397
398	buffer = sna_pixmap_get_buffer(pixmap);
399	if (buffer == NULL)
400		return;
401
402	DBG(("%s: pixmap=%ld, old handle=%d, new handle=%d\n", __FUNCTION__,
403	     pixmap->drawable.serialNumber,
404	     get_private(buffer)->bo->handle,
405	     sna_pixmap(pixmap)->gpu_bo->handle));
406
407	private = get_private(buffer);
408	assert(private->pixmap == pixmap);
409
410	assert(bo != private->bo);
411	if (private->bo == bo)
412		return;
413
414	DBG(("%s: dropping flush hint from handle=%d\n", __FUNCTION__, private->bo->handle));
415	private->bo->flush = false;
416	kgem_bo_destroy(&sna->kgem, private->bo);
417
418	buffer->name = kgem_bo_flink(&sna->kgem, bo);
419	private->bo = ref(bo);
420
421	DBG(("%s: adding flush hint to handle=%d\n", __FUNCTION__, bo->handle));
422	bo->flush = true;
423	assert(sna_pixmap(pixmap)->flush);
424
425	/* XXX DRI2InvalidateDrawable(&pixmap->drawable); */
426}
427
428static DRI2Buffer2Ptr
429sna_dri2_create_buffer(DrawablePtr draw,
430		       unsigned int attachment,
431		       unsigned int format)
432{
433	struct sna *sna = to_sna_from_drawable(draw);
434	DRI2Buffer2Ptr buffer;
435	struct sna_dri2_private *private;
436	PixmapPtr pixmap;
437	struct kgem_bo *bo;
438	unsigned flags = 0;
439	uint32_t size;
440	int bpp;
441
442	DBG(("%s pixmap=%ld, (attachment=%d, format=%d, drawable=%dx%d)\n",
443	     __FUNCTION__,
444	     get_drawable_pixmap(draw)->drawable.serialNumber,
445	     attachment, format, draw->width, draw->height));
446
447	pixmap = NULL;
448	size = (uint32_t)draw->height << 16 | draw->width;
449	switch (attachment) {
450	case DRI2BufferFrontLeft:
451		pixmap = get_drawable_pixmap(draw);
452		buffer = NULL;
453		if (draw->type != DRAWABLE_PIXMAP)
454			buffer = dri2_window_get_front((WindowPtr)draw);
455		if (buffer == NULL)
456			buffer = sna_pixmap_get_buffer(pixmap);
457		if (buffer) {
458			private = get_private(buffer);
459
460			DBG(("%s: reusing front buffer attachment, win=%lu %dx%d, pixmap=%ld %dx%d, handle=%d, name=%d\n",
461			     __FUNCTION__,
462			     draw->type != DRAWABLE_PIXMAP ? (long)draw->id : (long)0,
463			     draw->width, draw->height,
464			     pixmap->drawable.serialNumber,
465			     pixmap->drawable.width,
466			     pixmap->drawable.height,
467			     private->bo->handle, buffer->name));
468
469			assert(private->pixmap == pixmap);
470			assert(sna_pixmap(pixmap)->flush);
471			assert(sna_pixmap(pixmap)->pinned & PIN_DRI2);
472			assert(kgem_bo_flink(&sna->kgem, private->bo) == buffer->name);
473
474			private->refcnt++;
475			return buffer;
476		}
477
478		bo = sna_pixmap_set_dri(sna, pixmap);
479		if (bo == NULL)
480			return NULL;
481
482		assert(sna_pixmap(pixmap) != NULL);
483
484		bo = ref(bo);
485		bpp = pixmap->drawable.bitsPerPixel;
486		if (pixmap == sna->front && !(sna->flags & SNA_LINEAR_FB))
487			flags |= CREATE_SCANOUT;
488		DBG(("%s: attaching to front buffer %dx%d [%p:%d], scanout? %d\n",
489		     __FUNCTION__,
490		     pixmap->drawable.width, pixmap->drawable.height,
491		     pixmap, pixmap->refcnt, flags & CREATE_SCANOUT));
492		size = (uint32_t)pixmap->drawable.height << 16 | pixmap->drawable.width;
493		break;
494
495	case DRI2BufferBackLeft:
496		if (draw->type != DRAWABLE_PIXMAP) {
497			if (dri2_window_get_front((WindowPtr)draw))
498				flags |= CREATE_SCANOUT;
499			if (draw->width  == sna->front->drawable.width &&
500			    draw->height == sna->front->drawable.height &&
501			    (sna->flags & (SNA_LINEAR_FB | SNA_NO_WAIT | SNA_NO_FLIP)) == 0)
502				flags |= CREATE_SCANOUT;
503		}
504	case DRI2BufferBackRight:
505	case DRI2BufferFrontRight:
506	case DRI2BufferFakeFrontLeft:
507	case DRI2BufferFakeFrontRight:
508		bpp = draw->bitsPerPixel;
509		DBG(("%s: creating back buffer %dx%d, suitable for scanout? %d\n",
510		     __FUNCTION__,
511		     draw->width, draw->height,
512		     flags & CREATE_SCANOUT));
513
514		bo = kgem_create_2d(&sna->kgem,
515				    draw->width,
516				    draw->height,
517				    draw->bitsPerPixel,
518				    color_tiling(sna, draw),
519				    flags);
520		break;
521
522	case DRI2BufferStencil:
523		/*
524		 * The stencil buffer has quirky pitch requirements.  From Vol
525		 * 2a, 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface
526		 * Pitch":
527		 *    The pitch must be set to 2x the value computed based on
528		 *    width, as the stencil buffer is stored with two rows
529		 *    interleaved.
530		 * To accomplish this, we resort to the nasty hack of doubling
531		 * the drm region's cpp and halving its height.
532		 *
533		 * If we neglect to double the pitch, then
534		 * drm_intel_gem_bo_map_gtt() maps the memory incorrectly.
535		 *
536		 * The alignment for W-tiling is quite different to the
537		 * nominal no-tiling case, so we have to account for
538		 * the tiled access pattern explicitly.
539		 *
540		 * The stencil buffer is W tiled. However, we request from
541		 * the kernel a non-tiled buffer because the kernel does
542		 * not understand W tiling and the GTT is incapable of
543		 * W fencing.
544		 */
545		bpp = format ? format : draw->bitsPerPixel;
546		bpp *= 2;
547		bo = kgem_create_2d(&sna->kgem,
548				    ALIGN(draw->width, 64),
549				    ALIGN((draw->height + 1) / 2, 64),
550				    bpp, I915_TILING_NONE, flags);
551		break;
552
553	case DRI2BufferDepth:
554	case DRI2BufferDepthStencil:
555	case DRI2BufferHiz:
556	case DRI2BufferAccum:
557		bpp = format ? format : draw->bitsPerPixel,
558		bo = kgem_create_2d(&sna->kgem,
559				    draw->width, draw->height, bpp,
560				    other_tiling(sna, draw),
561				    flags);
562		break;
563
564	default:
565		return NULL;
566	}
567	if (bo == NULL)
568		return NULL;
569
570	buffer = calloc(1, sizeof *buffer + sizeof *private);
571	if (buffer == NULL)
572		goto err;
573
574	private = get_private(buffer);
575	buffer->attachment = attachment;
576	buffer->pitch = bo->pitch;
577	buffer->cpp = bpp / 8;
578	buffer->driverPrivate = private;
579	buffer->format = format;
580	buffer->flags = 0;
581	buffer->name = kgem_bo_flink(&sna->kgem, bo);
582	private->refcnt = 1;
583	private->bo = bo;
584	private->pixmap = pixmap;
585	private->size = size;
586
587	if (buffer->name == 0)
588		goto err;
589
590	if (pixmap) {
591		struct sna_pixmap *priv;
592
593		assert(attachment == DRI2BufferFrontLeft);
594		assert(sna_pixmap_get_buffer(pixmap) == NULL);
595
596		sna_pixmap_set_buffer(pixmap, buffer);
597		assert(sna_pixmap_get_buffer(pixmap) == buffer);
598		pixmap->refcnt++;
599
600		priv = sna_pixmap(pixmap);
601		assert(priv->flush == false);
602		assert((priv->pinned & PIN_DRI2) == 0);
603
604		/* Don't allow this named buffer to be replaced */
605		priv->pinned |= PIN_DRI2;
606
607		/* We need to submit any modifications to and reads from this
608		 * buffer before we send any reply to the Client.
609		 *
610		 * As we don't track which Client, we flush for all.
611		 */
612		DBG(("%s: adding flush hint to handle=%d\n", __FUNCTION__, priv->gpu_bo->handle));
613		priv->gpu_bo->flush = true;
614		if (priv->gpu_bo->exec)
615			sna->kgem.flush = 1;
616
617		priv->flush |= 1;
618		if (draw->type == DRAWABLE_PIXMAP) {
619			/* DRI2 renders directly into GLXPixmaps, treat as hostile */
620			kgem_bo_unclean(&sna->kgem, priv->gpu_bo);
621			sna_damage_all(&priv->gpu_damage, pixmap);
622			priv->clear = false;
623			priv->cpu = false;
624			priv->flush |= 2;
625		}
626
627		sna_accel_watch_flush(sna, 1);
628	}
629
630	return buffer;
631
632err:
633	kgem_bo_destroy(&sna->kgem, bo);
634	free(buffer);
635	return NULL;
636}
637
638static void _sna_dri2_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
639{
640	struct sna_dri2_private *private = get_private(buffer);
641
642	if (buffer == NULL)
643		return;
644
645	DBG(("%s: %p [handle=%d] -- refcnt=%d, pixmap=%ld\n",
646	     __FUNCTION__, buffer, private->bo->handle, private->refcnt,
647	     private->pixmap ? private->pixmap->drawable.serialNumber : 0));
648	assert(private->refcnt > 0);
649	if (--private->refcnt)
650		return;
651
652	assert(private->bo);
653
654	if (private->proxy) {
655		DBG(("%s: destroying proxy\n", __FUNCTION__));
656		_sna_dri2_destroy_buffer(sna, private->proxy);
657		private->pixmap = NULL;
658	}
659
660	if (private->pixmap) {
661		PixmapPtr pixmap = private->pixmap;
662		struct sna_pixmap *priv = sna_pixmap(pixmap);
663
664		assert(sna_pixmap_get_buffer(pixmap) == buffer);
665		assert(priv->gpu_bo == private->bo);
666		assert(priv->gpu_bo->flush);
667		assert(priv->pinned & PIN_DRI2);
668		assert(priv->flush);
669
670		/* Undo the DRI markings on this pixmap */
671		DBG(("%s: releasing last DRI pixmap=%ld, scanout?=%d\n",
672		     __FUNCTION__,
673		     pixmap->drawable.serialNumber,
674		     pixmap == sna->front));
675
676		list_del(&priv->flush_list);
677
678		DBG(("%s: dropping flush hint from handle=%d\n", __FUNCTION__, private->bo->handle));
679		priv->gpu_bo->flush = false;
680		priv->pinned &= ~PIN_DRI2;
681
682		priv->flush = false;
683		sna_accel_watch_flush(sna, -1);
684
685		sna_pixmap_set_buffer(pixmap, NULL);
686		pixmap->drawable.pScreen->DestroyPixmap(pixmap);
687	}
688	assert(private->bo->flush == false);
689
690	kgem_bo_destroy(&sna->kgem, private->bo);
691	free(buffer);
692}
693
694static void sna_dri2_destroy_buffer(DrawablePtr draw, DRI2Buffer2Ptr buffer)
695{
696	_sna_dri2_destroy_buffer(to_sna_from_drawable(draw), buffer);
697}
698
699static DRI2BufferPtr sna_dri2_reference_buffer(DRI2BufferPtr buffer)
700{
701	get_private(buffer)->refcnt++;
702	return buffer;
703}
704
705static inline void damage(PixmapPtr pixmap, struct sna_pixmap *priv, RegionPtr region)
706{
707	assert(priv->gpu_bo);
708	if (DAMAGE_IS_ALL(priv->gpu_damage))
709		goto done;
710
711	if (region == NULL) {
712damage_all:
713		priv->gpu_damage = _sna_damage_all(priv->gpu_damage,
714						   pixmap->drawable.width,
715						   pixmap->drawable.height);
716		sna_damage_destroy(&priv->cpu_damage);
717		list_del(&priv->flush_list);
718	} else {
719		sna_damage_subtract(&priv->cpu_damage, region);
720		if (priv->cpu_damage == NULL)
721			goto damage_all;
722		sna_damage_add(&priv->gpu_damage, region);
723	}
724done:
725	priv->cpu = false;
726	priv->clear = false;
727}
728
729static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
730{
731	struct sna *sna = to_sna_from_pixmap(pixmap);
732	struct sna_pixmap *priv = sna_pixmap(pixmap);
733	RegionRec region;
734
735	DBG(("%s: pixmap=%ld, handle=%d\n",
736	     __FUNCTION__, pixmap->drawable.serialNumber, bo->handle));
737
738	assert(pixmap->drawable.width * pixmap->drawable.bitsPerPixel <= 8*bo->pitch);
739	assert(pixmap->drawable.height * bo->pitch <= kgem_bo_size(bo));
740	assert(bo->proxy == NULL);
741	assert(priv->pinned & PIN_DRI2);
742	assert((priv->pinned & (PIN_PRIME | PIN_DRI3)) == 0);
743	assert(priv->flush);
744
745	/* Post damage on the new front buffer so that listeners, such
746	 * as DisplayLink know take a copy and shove it over the USB,
747	 * also for software cursors and the like.
748	 */
749	region.extents.x1 = region.extents.y1 = 0;
750	region.extents.x2 = pixmap->drawable.width;
751	region.extents.y2 = pixmap->drawable.height;
752	region.data = NULL;
753	DamageRegionAppend(&pixmap->drawable, &region);
754
755	damage(pixmap, priv, NULL);
756
757	assert(bo->refcnt);
758	if (priv->move_to_gpu)
759		priv->move_to_gpu(sna, priv, 0);
760	if (priv->gpu_bo != bo) {
761		DBG(("%s: dropping flush hint from handle=%d\n", __FUNCTION__, priv->gpu_bo->handle));
762		priv->gpu_bo->flush = false;
763		if (priv->cow)
764			sna_pixmap_undo_cow(sna, priv, 0);
765		if (priv->gpu_bo) {
766			sna_pixmap_unmap(pixmap, priv);
767			kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
768		}
769		DBG(("%s: adding flush hint to handle=%d\n", __FUNCTION__, bo->handle));
770		bo->flush = true;
771		if (bo->exec)
772			sna->kgem.flush = 1;
773		priv->gpu_bo = ref(bo);
774	}
775	if (bo->domain != DOMAIN_GPU)
776		bo->domain = DOMAIN_NONE;
777	assert(bo->flush);
778
779	DamageRegionProcessPending(&pixmap->drawable);
780}
781
782static void sna_dri2_select_mode(struct sna *sna, struct kgem_bo *dst, struct kgem_bo *src, bool sync)
783{
784	struct drm_i915_gem_busy busy;
785	int mode;
786
787	if (sna->kgem.gen < 060)
788		return;
789
790	if (sync) {
791		DBG(("%s: sync, force %s ring\n", __FUNCTION__,
792		     sna->kgem.gen >= 070 ? "BLT" : "RENDER"));
793		kgem_set_mode(&sna->kgem,
794			      sna->kgem.gen >= 070 ? KGEM_BLT : KGEM_RENDER,
795			      dst);
796		return;
797	}
798
799	if (DBG_FORCE_COPY != -1) {
800		DBG(("%s: forcing %d\n", __FUNCTION__, DBG_FORCE_COPY));
801		kgem_set_mode(&sna->kgem, DBG_FORCE_COPY, dst);
802		return;
803	}
804
805	if (sna->kgem.mode != KGEM_NONE) {
806		DBG(("%s: busy, not switching\n", __FUNCTION__));
807		return;
808	}
809
810	VG_CLEAR(busy);
811	busy.handle = src->handle;
812	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GEM_BUSY, &busy))
813		return;
814
815	DBG(("%s: src handle=%d busy?=%x\n", __FUNCTION__, busy.handle, busy.busy));
816	if (busy.busy == 0) {
817		__kgem_bo_clear_busy(src);
818
819		busy.handle = dst->handle;
820		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GEM_BUSY, &busy))
821			return;
822
823		DBG(("%s: dst handle=%d busy?=%x\n", __FUNCTION__, busy.handle, busy.busy));
824		if (busy.busy == 0) {
825			__kgem_bo_clear_busy(dst);
826			DBG(("%s: src/dst is idle, using defaults\n", __FUNCTION__));
827			return;
828		}
829	}
830
831	/* Sandybridge introduced a separate ring which it uses to
832	 * perform blits. Switching rendering between rings incurs
833	 * a stall as we wait upon the old ring to finish and
834	 * flush its render cache before we can proceed on with
835	 * the operation on the new ring.
836	 *
837	 * As this buffer, we presume, has just been written to by
838	 * the DRI client using the RENDER ring, we want to perform
839	 * our operation on the same ring, and ideally on the same
840	 * ring as we will flip from (which should be the RENDER ring
841	 * as well).
842	 *
843	 * The ultimate question is whether preserving the ring outweighs
844	 * the cost of the query.
845	 */
846	mode = KGEM_RENDER;
847	if (busy.busy & (0xfffe << 16))
848		mode = KGEM_BLT;
849	kgem_bo_mark_busy(&sna->kgem, busy.handle == src->handle ? src : dst, mode);
850	_kgem_set_mode(&sna->kgem, mode);
851}
852
853static bool can_copy_cpu(struct sna *sna,
854			 struct kgem_bo *src,
855			 struct kgem_bo *dst)
856{
857	if (src->tiling != dst->tiling)
858		return false;
859
860	if (src->pitch != dst->pitch)
861		return false;
862
863	if (!kgem_bo_can_map__cpu(&sna->kgem, src, false))
864		return false;
865
866	if (!kgem_bo_can_map__cpu(&sna->kgem, dst, true))
867		return false;
868
869	DBG(("%s -- yes, src handle=%d, dst handle=%d\n", __FUNCTION__, src->handle, dst->handle));
870	return true;
871}
872
873static void
874sna_dri2_copy_fallback(struct sna *sna,
875		       const DrawableRec *draw,
876		       struct kgem_bo *src_bo, int sx, int sy,
877		       struct kgem_bo *dst_bo, int dx, int dy,
878		       const BoxRec *box, int n)
879{
880	void *dst, *src;
881	bool clipped;
882
883	clipped = (n > 1 ||
884		   box->x1 + sx > 0 ||
885		   box->y1 + sy > 0 ||
886		   box->x2 + sx < draw->width ||
887		   box->y2 + sy < draw->height);
888
889	dst = src = NULL;
890	if (!clipped && can_copy_cpu(sna, src_bo, dst_bo)) {
891		dst = kgem_bo_map__cpu(&sna->kgem, dst_bo);
892		src = kgem_bo_map__cpu(&sna->kgem, src_bo);
893	}
894
895	if (dst == NULL || src == NULL) {
896		dst = kgem_bo_map__gtt(&sna->kgem, dst_bo);
897		src = kgem_bo_map__gtt(&sna->kgem, src_bo);
898		if (dst == NULL || src == NULL)
899			return;
900	} else {
901		kgem_bo_sync__cpu_full(&sna->kgem, dst_bo, true);
902		kgem_bo_sync__cpu_full(&sna->kgem, src_bo, false);
903	}
904
905	DBG(("%s: src(%d, %d), dst(%d, %d) x %d\n",
906	     __FUNCTION__, sx, sy, dx, dy, n));
907
908	if (sigtrap_get() == 0) {
909		do {
910			memcpy_blt(src, dst, draw->bitsPerPixel,
911				   src_bo->pitch, dst_bo->pitch,
912				   box->x1 + sx, box->y1 + sy,
913				   box->x1 + dx, box->y1 + dy,
914				   box->x2 - box->x1, box->y2 - box->y1);
915			box++;
916		} while (--n);
917		sigtrap_put();
918	}
919}
920
921static bool is_front(int attachment)
922{
923	return attachment == DRI2BufferFrontLeft;
924}
925
926static struct kgem_bo *
927__sna_dri2_copy_region(struct sna *sna, DrawablePtr draw, RegionPtr region,
928		      DRI2BufferPtr src, DRI2BufferPtr dst,
929		      bool sync)
930{
931	PixmapPtr pixmap = get_drawable_pixmap(draw);
932	DrawableRec scratch, *src_draw = &pixmap->drawable, *dst_draw = &pixmap->drawable;
933	struct sna_dri2_private *src_priv = get_private(src);
934	struct sna_dri2_private *dst_priv = get_private(dst);
935	pixman_region16_t clip;
936	struct kgem_bo *bo = NULL;
937	struct kgem_bo *src_bo;
938	struct kgem_bo *dst_bo;
939	const BoxRec *boxes;
940	int16_t dx, dy, sx, sy;
941	int n;
942
943	/* To hide a stale DRI2Buffer, one may choose to substitute
944	 * pixmap->gpu_bo instead of dst/src->bo, however you then run
945	 * the risk of copying around invalid data. So either you may not
946	 * see the results of the copy, or you may see the wrong pixels.
947	 * Either way you eventually lose.
948	 *
949	 * We also have to be careful in case that the stale buffers are
950	 * now attached to invalid (non-DRI) pixmaps.
951	 */
952
953	assert(is_front(dst->attachment) || is_front(src->attachment));
954	assert(dst->attachment != src->attachment);
955
956	clip.extents.x1 = draw->x;
957	clip.extents.y1 = draw->y;
958	clip.extents.x2 = draw->x + draw->width;
959	clip.extents.y2 = draw->y + draw->height;
960	clip.data = NULL;
961
962	if (region) {
963		pixman_region_translate(region, draw->x, draw->y);
964		pixman_region_intersect(&clip, &clip, region);
965		region = &clip;
966	}
967
968	if (clip.extents.x1 >= clip.extents.x2 ||
969	    clip.extents.y1 >= clip.extents.y2) {
970		DBG(("%s: all clipped\n", __FUNCTION__));
971		return NULL;
972	}
973
974	sx = sy = dx = dy = 0;
975	if (is_front(dst->attachment)) {
976		sx = -draw->x;
977		sy = -draw->y;
978	} else {
979		dx = -draw->x;
980		dy = -draw->y;
981	}
982	if (draw->type == DRAWABLE_WINDOW) {
983		WindowPtr win = (WindowPtr)draw;
984		int16_t tx, ty;
985
986		if (is_clipped(&win->clipList, draw)) {
987			DBG(("%s: draw=(%d, %d), delta=(%d, %d), draw=(%d, %d),(%d, %d), clip.extents=(%d, %d), (%d, %d)\n",
988			     __FUNCTION__, draw->x, draw->y,
989			     get_drawable_dx(draw), get_drawable_dy(draw),
990			     clip.extents.x1, clip.extents.y1,
991			     clip.extents.x2, clip.extents.y2,
992			     win->clipList.extents.x1, win->clipList.extents.y1,
993			     win->clipList.extents.x2, win->clipList.extents.y2));
994
995			assert(region == NULL || region == &clip);
996			pixman_region_intersect(&clip, &win->clipList, &clip);
997			if (!pixman_region_not_empty(&clip)) {
998				DBG(("%s: all clipped\n", __FUNCTION__));
999				return NULL;
1000			}
1001
1002			region = &clip;
1003		}
1004
1005		if (get_drawable_deltas(draw, pixmap, &tx, &ty)) {
1006			if (is_front(dst->attachment)) {
1007				pixman_region_translate(region ?: &clip, tx, ty);
1008				sx -= tx;
1009				sy -= ty;
1010			} else {
1011				sx += tx;
1012				sy += ty;
1013			}
1014		}
1015	} else
1016		sync = false;
1017
1018	scratch.x = scratch.y = 0;
1019	scratch.width = scratch.height = 0;
1020	scratch.depth = draw->depth;
1021	scratch.bitsPerPixel = draw->bitsPerPixel;
1022
1023	src_bo = src_priv->bo;
1024	assert(src_bo->refcnt);
1025	if (is_front(src->attachment)) {
1026		struct sna_pixmap *priv;
1027
1028		priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ);
1029		if (priv)
1030			src_bo = priv->gpu_bo;
1031		DBG(("%s: updated FrontLeft src_bo from handle=%d to handle=%d\n",
1032		     __FUNCTION__, src_priv->bo->handle, src_bo->handle));
1033		assert(src_bo->refcnt);
1034	} else {
1035		RegionRec source;
1036
1037		scratch.width = src_priv->size & 0xffff;
1038		scratch.height = src_priv->size >> 16;
1039		src_draw = &scratch;
1040
1041		DBG(("%s: source size %dx%d, region size %dx%d\n",
1042		     __FUNCTION__,
1043		     scratch.width, scratch.height,
1044		     clip.extents.x2 - clip.extents.x1,
1045		     clip.extents.y2 - clip.extents.y1));
1046
1047		source.extents.x1 = -sx;
1048		source.extents.y1 = -sy;
1049		source.extents.x2 = source.extents.x1 + scratch.width;
1050		source.extents.y2 = source.extents.y1 + scratch.height;
1051		source.data = NULL;
1052
1053		assert(region == NULL || region == &clip);
1054		pixman_region_intersect(&clip, &clip, &source);
1055
1056	}
1057
1058	dst_bo = dst_priv->bo;
1059	assert(dst_bo->refcnt);
1060	if (is_front(dst->attachment)) {
1061		struct sna_pixmap *priv;
1062		unsigned int flags;
1063
1064		flags = MOVE_WRITE | __MOVE_FORCE;
1065		if (clip.data)
1066			flags |= MOVE_READ;
1067
1068		assert(region == NULL || region == &clip);
1069		priv = sna_pixmap_move_area_to_gpu(pixmap, &clip.extents, flags);
1070		if (priv) {
1071			damage(pixmap, priv, region);
1072			dst_bo = priv->gpu_bo;
1073		}
1074		DBG(("%s: updated FrontLeft dst_bo from handle=%d to handle=%d\n",
1075		     __FUNCTION__, dst_priv->bo->handle, dst_bo->handle));
1076		assert(dst_bo->refcnt);
1077	} else {
1078		RegionRec target;
1079
1080		scratch.width = dst_priv->size & 0xffff;
1081		scratch.height = dst_priv->size >> 16;
1082		dst_draw = &scratch;
1083
1084		DBG(("%s: target size %dx%d, region size %dx%d\n",
1085		     __FUNCTION__,
1086		     scratch.width, scratch.height,
1087		     clip.extents.x2 - clip.extents.x1,
1088		     clip.extents.y2 - clip.extents.y1));
1089
1090		target.extents.x1 = -dx;
1091		target.extents.y1 = -dy;
1092		target.extents.x2 = target.extents.x1 + scratch.width;
1093		target.extents.y2 = target.extents.y1 + scratch.height;
1094		target.data = NULL;
1095
1096		assert(region == NULL || region == &clip);
1097		pixman_region_intersect(&clip, &clip, &target);
1098
1099		sync = false;
1100	}
1101
1102	if (!wedged(sna)) {
1103		xf86CrtcPtr crtc;
1104
1105		crtc = NULL;
1106		if (sync && sna_pixmap_is_scanout(sna, pixmap))
1107			crtc = sna_covering_crtc(sna, &clip.extents, NULL);
1108		sna_dri2_select_mode(sna, dst_bo, src_bo, crtc != NULL);
1109
1110		sync = (crtc != NULL&&
1111			sna_wait_for_scanline(sna, pixmap, crtc,
1112					      &clip.extents));
1113	}
1114
1115	if (region) {
1116		boxes = region_rects(region);
1117		n = region_num_rects(region);
1118		assert(n);
1119	} else {
1120		region = &clip;
1121		boxes = &clip.extents;
1122		n = 1;
1123	}
1124	DamageRegionAppend(&pixmap->drawable, region);
1125
1126	if (wedged(sna)) {
1127fallback:
1128		sna_dri2_copy_fallback(sna, src_draw,
1129				      src_bo, sx, sy,
1130				      dst_bo, dx, dy,
1131				      boxes, n);
1132	} else {
1133		unsigned flags;
1134
1135		DBG(("%s: copying [(%d, %d), (%d, %d)]x%d src=(%d, %d), dst=(%d, %d)\n",
1136		     __FUNCTION__,
1137		     boxes[0].x1, boxes[0].y1,
1138		     boxes[0].x2, boxes[0].y2,
1139		     n, sx, sy, dx, dy));
1140
1141		flags = COPY_LAST;
1142		if (sync)
1143			flags |= COPY_SYNC;
1144		if (!sna->render.copy_boxes(sna, GXcopy,
1145					    src_draw, src_bo, sx, sy,
1146					    dst_draw, dst_bo, dx, dy,
1147					    boxes, n, flags))
1148			goto fallback;
1149
1150		DBG(("%s: flushing? %d\n", __FUNCTION__, sync));
1151		if (sync) { /* STAT! */
1152			struct kgem_request *rq = sna->kgem.next_request;
1153			kgem_submit(&sna->kgem);
1154			if (rq->bo) {
1155				bo = ref(rq->bo);
1156				DBG(("%s: recording sync fence handle=%d\n", __FUNCTION__, bo->handle));
1157			}
1158		}
1159	}
1160
1161	DamageRegionProcessPending(&pixmap->drawable);
1162
1163	if (clip.data)
1164		pixman_region_fini(&clip);
1165
1166	return bo;
1167}
1168
1169static void
1170sna_dri2_copy_region(DrawablePtr draw,
1171		     RegionPtr region,
1172		     DRI2BufferPtr dst,
1173		     DRI2BufferPtr src)
1174{
1175	PixmapPtr pixmap = get_drawable_pixmap(draw);
1176	struct sna *sna = to_sna_from_pixmap(pixmap);
1177
1178	DBG(("%s: pixmap=%ld, src=%u (refs=%d/%d, flush=%d, attach=%d) , dst=%u (refs=%d/%d, flush=%d, attach=%d)\n",
1179	     __FUNCTION__,
1180	     pixmap->drawable.serialNumber,
1181	     get_private(src)->bo->handle,
1182	     get_private(src)->refcnt,
1183	     get_private(src)->bo->refcnt,
1184	     get_private(src)->bo->flush,
1185	     src->attachment,
1186	     get_private(dst)->bo->handle,
1187	     get_private(dst)->refcnt,
1188	     get_private(dst)->bo->refcnt,
1189	     get_private(dst)->bo->flush,
1190	     dst->attachment));
1191
1192	assert(src != dst);
1193
1194	assert(get_private(src)->refcnt);
1195	assert(get_private(dst)->refcnt);
1196
1197	assert(get_private(src)->bo->refcnt);
1198	assert(get_private(dst)->bo->refcnt);
1199
1200	DBG(("%s: region (%d, %d), (%d, %d) x %d\n",
1201	     __FUNCTION__,
1202	     region->extents.x1, region->extents.y1,
1203	     region->extents.x2, region->extents.y2,
1204	     region_num_rects(region)));
1205
1206	__sna_dri2_copy_region(sna, draw, region, src, dst, false);
1207}
1208
1209inline static uint32_t pipe_select(int pipe)
1210{
1211	/* The third pipe was introduced with IvyBridge long after
1212	 * multiple pipe support was added to the kernel, hence
1213	 * we can safely ignore the capability check - if we have more
1214	 * than two pipes, we can assume that they are fully supported.
1215	 */
1216	if (pipe > 1)
1217		return pipe << DRM_VBLANK_HIGH_CRTC_SHIFT;
1218	else if (pipe > 0)
1219		return DRM_VBLANK_SECONDARY;
1220	else
1221		return 0;
1222}
1223
1224static inline int sna_wait_vblank(struct sna *sna, union drm_wait_vblank *vbl, int pipe)
1225{
1226	DBG(("%s(pipe=%d, waiting until seq=%u%s)\n",
1227	     __FUNCTION__, pipe, vbl->request.sequence,
1228	     vbl->request.type & DRM_VBLANK_RELATIVE ? " [relative]" : ""));
1229	assert(pipe != -1);
1230
1231	vbl->request.type |= pipe_select(pipe);
1232	return drmIoctl(sna->kgem.fd, DRM_IOCTL_WAIT_VBLANK, vbl);
1233}
1234
1235#if DRI2INFOREC_VERSION >= 4
1236
1237static void dri2_window_attach(WindowPtr win, struct dri2_window *priv)
1238{
1239	assert(win->drawable.type == DRAWABLE_WINDOW);
1240	assert(dri2_window(win) == NULL);
1241	((void **)__get_private(win, sna_window_key))[1] = priv;
1242	assert(dri2_window(win) == priv);
1243}
1244
1245static uint64_t
1246draw_current_msc(DrawablePtr draw, xf86CrtcPtr crtc, uint64_t msc)
1247{
1248	struct dri2_window *priv;
1249
1250	if (draw->type != DRAWABLE_WINDOW)
1251		return msc;
1252
1253	priv = dri2_window((WindowPtr)draw);
1254	if (priv == NULL) {
1255		priv = malloc(sizeof(*priv));
1256		if (priv != NULL) {
1257			priv->front = NULL;
1258			priv->crtc = crtc;
1259			priv->msc_delta = 0;
1260			priv->chain = NULL;
1261			dri2_window_attach((WindowPtr)draw, priv);
1262		}
1263	} else {
1264		if (priv->crtc != crtc) {
1265			const struct ust_msc *last = sna_crtc_last_swap(priv->crtc);
1266			const struct ust_msc *this = sna_crtc_last_swap(crtc);
1267			DBG(("%s: Window transferring from pipe=%d [msc=%llu] to pipe=%d [msc=%llu], delta now %lld\n",
1268			     __FUNCTION__,
1269			     sna_crtc_to_pipe(priv->crtc), (long long)last->msc,
1270			     sna_crtc_to_pipe(crtc), (long long)this->msc,
1271			     (long long)(priv->msc_delta + this->msc - last->msc)));
1272			priv->msc_delta += this->msc - last->msc;
1273			priv->crtc = crtc;
1274		}
1275		msc -= priv->msc_delta;
1276	}
1277	return  msc;
1278}
1279
1280static uint32_t
1281draw_target_seq(DrawablePtr draw, uint64_t msc)
1282{
1283	struct dri2_window *priv = dri2_window((WindowPtr)draw);
1284	if (priv == NULL)
1285		return msc;
1286	DBG(("%s: converting target_msc=%llu to seq %u\n",
1287	     __FUNCTION__, (long long)msc, (unsigned)(msc + priv->msc_delta)));
1288	return msc + priv->msc_delta;
1289}
1290
1291static xf86CrtcPtr
1292sna_dri2_get_crtc(DrawablePtr draw)
1293{
1294	struct sna *sna = to_sna_from_drawable(draw);
1295	BoxRec box;
1296
1297	if (draw->type == DRAWABLE_PIXMAP)
1298		return NULL;
1299
1300	box.x1 = draw->x;
1301	box.y1 = draw->y;
1302	box.x2 = box.x1 + draw->width;
1303	box.y2 = box.y1 + draw->height;
1304
1305	/* Make sure the CRTC is valid and this is the real front buffer */
1306	return sna_covering_crtc(sna, &box, NULL);
1307}
1308
1309static void
1310sna_dri2_remove_event(WindowPtr win, struct sna_dri2_event *info)
1311{
1312	struct dri2_window *priv;
1313	struct sna_dri2_event *chain;
1314
1315	assert(win->drawable.type == DRAWABLE_WINDOW);
1316	DBG(("%s: remove[%p] from window %ld, active? %d\n",
1317	     __FUNCTION__, info, (long)win->drawable.id, info->draw != NULL));
1318
1319	priv = dri2_window(win);
1320	assert(priv);
1321	assert(priv->chain != NULL);
1322
1323	if (priv->chain == info) {
1324		priv->chain = info->chain;
1325		return;
1326	}
1327
1328	chain = priv->chain;
1329	while (chain->chain != info)
1330		chain = chain->chain;
1331	assert(chain != info);
1332	assert(info->chain != chain);
1333	chain->chain = info->chain;
1334}
1335
1336static void
1337sna_dri2_event_free(struct sna *sna,
1338		    struct sna_dri2_event *info)
1339{
1340	DrawablePtr draw = info->draw;
1341
1342	DBG(("%s(draw?=%d)\n", __FUNCTION__, draw != NULL));
1343	if (draw && draw->type == DRAWABLE_WINDOW)
1344		sna_dri2_remove_event((WindowPtr)draw, info);
1345
1346	_sna_dri2_destroy_buffer(sna, info->front);
1347	_sna_dri2_destroy_buffer(sna, info->back);
1348
1349	while (!list_is_empty(&info->cache)) {
1350		struct dri_bo *c;
1351
1352		c = list_first_entry(&info->cache, struct dri_bo, link);
1353		list_del(&c->link);
1354
1355		DBG(("%s: releasing cached handle=%d\n", __FUNCTION__, c->bo ? c->bo->handle : 0));
1356		if (c->bo)
1357			kgem_bo_destroy(&sna->kgem, c->bo);
1358
1359		free(c);
1360	}
1361
1362	if (info->bo) {
1363		DBG(("%s: releasing batch handle=%d\n", __FUNCTION__, info->bo->handle));
1364		kgem_bo_destroy(&sna->kgem, info->bo);
1365	}
1366
1367	_list_del(&info->link);
1368	free(info);
1369}
1370
1371static void
1372sna_dri2_client_gone(CallbackListPtr *list, void *closure, void *data)
1373{
1374	NewClientInfoRec *clientinfo = data;
1375	ClientPtr client = clientinfo->client;
1376	struct sna_client *priv = sna_client(client);
1377	struct sna *sna = closure;
1378
1379	if (priv->events.next == NULL)
1380		return;
1381
1382	if (client->clientState != ClientStateGone)
1383		return;
1384
1385	DBG(("%s(active?=%d)\n", __FUNCTION__,
1386	     !list_is_empty(&priv->events)));
1387
1388	while (!list_is_empty(&priv->events)) {
1389		struct sna_dri2_event *event;
1390
1391		event = list_first_entry(&priv->events, struct sna_dri2_event, link);
1392		assert(event->client == client);
1393
1394		if (event->queued) {
1395			if (event->draw)
1396				sna_dri2_remove_event((WindowPtr)event->draw,
1397						      event);
1398			event->client = NULL;
1399			event->draw = NULL;
1400			list_del(&event->link);
1401		} else
1402			sna_dri2_event_free(sna, event);
1403	}
1404
1405	if (--sna->dri2.client_count == 0)
1406		DeleteCallback(&ClientStateCallback, sna_dri2_client_gone, sna);
1407}
1408
1409static bool add_event_to_client(struct sna_dri2_event *info, struct sna *sna, ClientPtr client)
1410{
1411	struct sna_client *priv = sna_client(client);
1412
1413	if (priv->events.next == NULL) {
1414		if (sna->dri2.client_count++ == 0 &&
1415		    !AddCallback(&ClientStateCallback, sna_dri2_client_gone, sna))
1416			return false;
1417
1418		list_init(&priv->events);
1419	}
1420
1421	list_add(&info->link, &priv->events);
1422	info->client = client;
1423	return true;
1424}
1425
1426static struct sna_dri2_event *
1427sna_dri2_add_event(struct sna *sna, DrawablePtr draw, ClientPtr client)
1428{
1429	struct dri2_window *priv;
1430	struct sna_dri2_event *info, *chain;
1431
1432	assert(draw->type == DRAWABLE_WINDOW);
1433	DBG(("%s: adding event to window %ld)\n",
1434	     __FUNCTION__, (long)draw->id));
1435
1436	priv = dri2_window((WindowPtr)draw);
1437	if (priv == NULL)
1438		return NULL;
1439
1440	info = calloc(1, sizeof(struct sna_dri2_event));
1441	if (info == NULL)
1442		return NULL;
1443
1444	list_init(&info->cache);
1445	info->draw = draw;
1446	info->crtc = priv->crtc;
1447	info->pipe = sna_crtc_to_pipe(priv->crtc);
1448
1449	if (!add_event_to_client(info, sna, client)) {
1450		free(info);
1451		return NULL;
1452	}
1453
1454	assert(priv->chain != info);
1455
1456	if (priv->chain == NULL) {
1457		priv->chain = info;
1458		return info;
1459	}
1460
1461	chain = priv->chain;
1462	while (chain->chain != NULL)
1463		chain = chain->chain;
1464
1465	assert(chain != info);
1466	chain->chain = info;
1467	return info;
1468}
1469
1470void sna_dri2_destroy_window(WindowPtr win)
1471{
1472	struct sna *sna;
1473	struct dri2_window *priv;
1474
1475	priv = dri2_window(win);
1476	if (priv == NULL)
1477		return;
1478
1479	DBG(("%s: window=%ld\n", __FUNCTION__, win->drawable.serialNumber));
1480	sna = to_sna_from_drawable(&win->drawable);
1481
1482	if (priv->front) {
1483		assert(priv->crtc);
1484		sna_shadow_unset_crtc(sna, priv->crtc);
1485		_sna_dri2_destroy_buffer(sna, priv->front);
1486	}
1487
1488	if (priv->chain) {
1489		struct sna_dri2_event *info, *chain;
1490
1491		DBG(("%s: freeing chain\n", __FUNCTION__));
1492
1493		chain = priv->chain;
1494		while ((info = chain)) {
1495			info->draw = NULL;
1496			info->client = NULL;
1497
1498			chain = info->chain;
1499			info->chain = NULL;
1500
1501			if (!info->queued)
1502				sna_dri2_event_free(sna, info);
1503		}
1504	}
1505
1506	free(priv);
1507}
1508
1509static void
1510sna_dri2_flip_handler(struct sna *sna,
1511		      struct drm_event_vblank *event,
1512		      void *data)
1513{
1514	DBG(("%s: sequence=%d\n", __FUNCTION__, event->sequence));
1515	sna_dri2_flip_event(sna, data);
1516}
1517
1518static bool
1519sna_dri2_flip(struct sna *sna, struct sna_dri2_event *info)
1520{
1521	struct kgem_bo *bo = get_private(info->back)->bo;
1522	struct kgem_bo *tmp_bo;
1523	uint32_t tmp_name;
1524
1525	DBG(("%s(type=%d)\n", __FUNCTION__, info->type));
1526
1527	assert(sna_pixmap_get_buffer(sna->front) == info->front);
1528	assert(get_drawable_pixmap(info->draw)->drawable.height * bo->pitch <= kgem_bo_size(bo));
1529	assert(bo->refcnt);
1530
1531	if (!sna_page_flip(sna, bo, sna_dri2_flip_handler,
1532			   info->type == FLIP_ASYNC ? NULL : info))
1533		return false;
1534
1535	assert(sna->dri2.flip_pending == NULL || sna->dri2.flip_pending == info);
1536	if (info->type != FLIP_ASYNC)
1537		sna->dri2.flip_pending = info;
1538
1539	DBG(("%s: marked handle=%d as scanout, swap front (handle=%d, name=%d) and back (handle=%d, name=%d)\n",
1540	     __FUNCTION__, bo->handle,
1541	     get_private(info->front)->bo->handle, info->front->name,
1542	     get_private(info->back)->bo->handle, info->back->name));
1543
1544	tmp_bo = get_private(info->front)->bo;
1545	tmp_name = info->front->name;
1546
1547	set_bo(sna->front, bo);
1548
1549	info->front->name = info->back->name;
1550	get_private(info->front)->bo = bo;
1551
1552	info->back->name = tmp_name;
1553	get_private(info->back)->bo = tmp_bo;
1554	get_private(info->back)->stale = true;
1555
1556	assert(get_private(info->front)->bo->refcnt);
1557	assert(get_private(info->back)->bo->refcnt);
1558	assert(get_private(info->front)->bo != get_private(info->back)->bo);
1559
1560	info->queued = true;
1561	return true;
1562}
1563
1564static bool
1565can_flip(struct sna * sna,
1566	 DrawablePtr draw,
1567	 DRI2BufferPtr front,
1568	 DRI2BufferPtr back,
1569	 xf86CrtcPtr crtc)
1570{
1571	WindowPtr win = (WindowPtr)draw;
1572	PixmapPtr pixmap;
1573
1574	assert((sna->flags & SNA_NO_WAIT) == 0);
1575
1576	if (!DBG_CAN_FLIP)
1577		return false;
1578
1579	if (draw->type == DRAWABLE_PIXMAP)
1580		return false;
1581
1582	if (!sna->mode.front_active) {
1583		DBG(("%s: no, active CRTC\n", __FUNCTION__));
1584		return false;
1585	}
1586
1587	assert(sna->scrn->vtSema);
1588
1589	if ((sna->flags & (SNA_HAS_FLIP | SNA_HAS_ASYNC_FLIP)) == 0) {
1590		DBG(("%s: no, pageflips disabled\n", __FUNCTION__));
1591		return false;
1592	}
1593
1594	if (front->format != back->format) {
1595		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
1596		     __FUNCTION__, front->format, back->format));
1597		return false;
1598	}
1599
1600	if (sna->mode.shadow_active) {
1601		DBG(("%s: no, shadow enabled\n", __FUNCTION__));
1602		return false;
1603	}
1604
1605	if (!sna_crtc_is_on(crtc)) {
1606		DBG(("%s: ref-pipe=%d is disabled\n", __FUNCTION__, sna_crtc_to_pipe(crtc)));
1607		return false;
1608	}
1609
1610	pixmap = get_window_pixmap(win);
1611	if (pixmap != sna->front) {
1612		DBG(("%s: no, window (pixmap=%ld) is not attached to the front buffer (pixmap=%ld)\n",
1613		     __FUNCTION__, pixmap->drawable.serialNumber, sna->front->drawable.serialNumber));
1614		return false;
1615	}
1616
1617	if (sna_pixmap_get_buffer(pixmap) != front) {
1618		DBG(("%s: no, DRI2 drawable is no longer attached (old name=%d, new name=%d) to pixmap=%ld\n",
1619		     __FUNCTION__, front->name,
1620		     sna_pixmap_get_buffer(pixmap) ? ((DRI2BufferPtr)sna_pixmap_get_buffer(pixmap))->name : 0,
1621		     pixmap->drawable.serialNumber));
1622		return false;
1623	}
1624
1625	assert(get_private(front)->pixmap == sna->front);
1626	assert(sna_pixmap(sna->front)->gpu_bo == get_private(front)->bo);
1627
1628	if (!get_private(back)->bo->scanout) {
1629		DBG(("%s: no, DRI2 drawable was too small at time of creation)\n",
1630		     __FUNCTION__));
1631		return false;
1632	}
1633
1634	if (get_private(back)->size != get_private(front)->size) {
1635		DBG(("%s: no, DRI2 drawable does not fit into scanout\n",
1636		     __FUNCTION__));
1637		return false;
1638	}
1639
1640	DBG(("%s: window size: %dx%d, clip=(%d, %d), (%d, %d) x %d\n",
1641	     __FUNCTION__,
1642	     win->drawable.width, win->drawable.height,
1643	     win->clipList.extents.x1, win->clipList.extents.y1,
1644	     win->clipList.extents.x2, win->clipList.extents.y2,
1645	     region_num_rects(&win->clipList)));
1646	if (!RegionEqual(&win->clipList, &draw->pScreen->root->winSize)) {
1647		DBG(("%s: no, window is clipped: clip region=(%d, %d), (%d, %d), root size=(%d, %d), (%d, %d)\n",
1648		     __FUNCTION__,
1649		     win->clipList.extents.x1,
1650		     win->clipList.extents.y1,
1651		     win->clipList.extents.x2,
1652		     win->clipList.extents.y2,
1653		     draw->pScreen->root->winSize.extents.x1,
1654		     draw->pScreen->root->winSize.extents.y1,
1655		     draw->pScreen->root->winSize.extents.x2,
1656		     draw->pScreen->root->winSize.extents.y2));
1657		return false;
1658	}
1659
1660	if (draw->x != 0 || draw->y != 0 ||
1661#ifdef COMPOSITE
1662	    draw->x != pixmap->screen_x ||
1663	    draw->y != pixmap->screen_y ||
1664#endif
1665	    draw->width != pixmap->drawable.width ||
1666	    draw->height != pixmap->drawable.height) {
1667		DBG(("%s: no, window is not full size (%dx%d)!=(%dx%d)\n",
1668		     __FUNCTION__,
1669		     draw->width, draw->height,
1670		     pixmap->drawable.width,
1671		     pixmap->drawable.height));
1672		return false;
1673	}
1674
1675	/* prevent an implicit tiling mode change */
1676	if (get_private(back)->bo->tiling > I915_TILING_X) {
1677		DBG(("%s -- no, tiling mismatch: front %d, back=%d, want-tiled?=%d\n",
1678		     __FUNCTION__,
1679		     get_private(front)->bo->tiling,
1680		     get_private(back)->bo->tiling,
1681		     !!(sna->flags & SNA_LINEAR_FB)));
1682		return false;
1683	}
1684
1685	if (get_private(front)->bo->pitch != get_private(back)->bo->pitch) {
1686		DBG(("%s -- no, pitch mismatch: front %d, back=%d\n",
1687		     __FUNCTION__,
1688		     get_private(front)->bo->pitch,
1689		     get_private(back)->bo->pitch));
1690		return false;
1691	}
1692
1693	if (sna_pixmap(pixmap)->pinned & ~(PIN_DRI2 | PIN_SCANOUT)) {
1694		DBG(("%s -- no, pinned: front %x\n",
1695		     __FUNCTION__, sna_pixmap(pixmap)->pinned));
1696		return false;
1697	}
1698
1699	DBG(("%s: yes, pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
1700	assert(dri2_window(win)->front == NULL);
1701	return true;
1702}
1703
1704static bool
1705can_xchg(struct sna * sna,
1706	 DrawablePtr draw,
1707	 DRI2BufferPtr front,
1708	 DRI2BufferPtr back)
1709{
1710	WindowPtr win = (WindowPtr)draw;
1711	PixmapPtr pixmap;
1712
1713	if (!DBG_CAN_XCHG)
1714		return false;
1715
1716	if (draw->type == DRAWABLE_PIXMAP)
1717		return false;
1718
1719	if (front->format != back->format) {
1720		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
1721		     __FUNCTION__, front->format, back->format));
1722		return false;
1723	}
1724
1725	pixmap = get_window_pixmap(win);
1726	if (get_private(front)->pixmap != pixmap) {
1727		DBG(("%s: no, DRI2 drawable is no longer attached, old pixmap=%ld, now pixmap=%ld\n",
1728		     __FUNCTION__,
1729		     get_private(front)->pixmap->drawable.serialNumber,
1730		     pixmap->drawable.serialNumber));
1731		return false;
1732	}
1733
1734	DBG(("%s: window size: %dx%d, clip=(%d, %d), (%d, %d) x %d, pixmap size=%dx%d\n",
1735	     __FUNCTION__,
1736	     win->drawable.width, win->drawable.height,
1737	     win->clipList.extents.x1, win->clipList.extents.y1,
1738	     win->clipList.extents.x2, win->clipList.extents.y2,
1739	     region_num_rects(&win->clipList),
1740	     pixmap->drawable.width,
1741	     pixmap->drawable.height));
1742	if (is_clipped(&win->clipList, &pixmap->drawable)) {
1743		DBG(("%s: no, %dx%d window is clipped: clip region=(%d, %d), (%d, %d)\n",
1744		     __FUNCTION__,
1745		     draw->width, draw->height,
1746		     win->clipList.extents.x1,
1747		     win->clipList.extents.y1,
1748		     win->clipList.extents.x2,
1749		     win->clipList.extents.y2));
1750		return false;
1751	}
1752
1753	if (get_private(back)->size != get_private(front)->size) {
1754		DBG(("%s: no, back buffer %dx%d does not match front buffer %dx%d\n",
1755		     __FUNCTION__,
1756		     get_private(back)->size & 0x7fff, (get_private(back)->size >> 16) & 0x7fff,
1757		     get_private(front)->size & 0x7fff, (get_private(front)->size >> 16) & 0x7fff));
1758		return false;
1759	}
1760
1761	if (pixmap == sna->front && !(sna->flags & SNA_TEAR_FREE) && sna->mode.front_active) {
1762		DBG(("%s: no, front buffer, requires flipping\n",
1763		     __FUNCTION__));
1764		return false;
1765	}
1766
1767	if (sna_pixmap(pixmap)->pinned & ~(PIN_DRI2 | PIN_SCANOUT)) {
1768		DBG(("%s: no, pinned: %x\n",
1769		     __FUNCTION__, sna_pixmap(pixmap)->pinned));
1770		return false;
1771	}
1772
1773	DBG(("%s: yes, pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
1774	return true;
1775}
1776
1777static bool
1778overlaps_other_crtc(struct sna *sna, xf86CrtcPtr desired)
1779{
1780	xf86CrtcConfigPtr config = XF86_CRTC_CONFIG_PTR(sna->scrn);
1781	int c;
1782
1783	for (c = 0; c < sna->mode.num_real_crtc; c++) {
1784		xf86CrtcPtr crtc = config->crtc[c];
1785
1786		if (crtc == desired)
1787			continue;
1788
1789		if (!crtc->enabled)
1790			continue;
1791
1792		if (desired->bounds.x1 < crtc->bounds.x2 &&
1793		    desired->bounds.x2 > crtc->bounds.x1 &&
1794		    desired->bounds.y1 < crtc->bounds.y2 &&
1795		    desired->bounds.y2 > crtc->bounds.y1)
1796			return true;
1797	}
1798
1799	return false;
1800}
1801
1802static bool
1803can_xchg_crtc(struct sna *sna,
1804	      DrawablePtr draw,
1805	      DRI2BufferPtr front,
1806	      DRI2BufferPtr back,
1807	      xf86CrtcPtr crtc)
1808{
1809	WindowPtr win = (WindowPtr)draw;
1810	PixmapPtr pixmap;
1811
1812	if (!DBG_CAN_XCHG)
1813		return false;
1814
1815	if ((sna->flags & SNA_TEAR_FREE) == 0) {
1816		DBG(("%s: no, requires TearFree\n",
1817		     __FUNCTION__));
1818		return false;
1819	}
1820
1821	if (draw->type == DRAWABLE_PIXMAP)
1822		return false;
1823
1824	if (front->format != back->format) {
1825		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
1826		     __FUNCTION__, front->format, back->format));
1827		return false;
1828	}
1829
1830	if (memcmp(&win->clipList.extents, &crtc->bounds, sizeof(crtc->bounds))) {
1831		DBG(("%s: no, window [(%d, %d), (%d, %d)] does not cover CRTC [(%d, %d), (%d, %d)]\n",
1832		     __FUNCTION__,
1833		     win->clipList.extents.x1, win->clipList.extents.y1,
1834		     win->clipList.extents.x2, win->clipList.extents.y2,
1835		     crtc->bounds.x1, crtc->bounds.y1,
1836		     crtc->bounds.x2, crtc->bounds.y2));
1837		return false;
1838	}
1839
1840	if (sna_crtc_is_transformed(crtc)) {
1841		DBG(("%s: no, CRTC is rotated\n", __FUNCTION__));
1842		return false;
1843	}
1844
1845	pixmap = get_window_pixmap(win);
1846	if (pixmap != sna->front) {
1847		DBG(("%s: no, not attached to front buffer\n", __FUNCTION__));
1848		return false;
1849	}
1850
1851	if (get_private(front)->pixmap != pixmap) {
1852		DBG(("%s: no, DRI2 drawable is no longer attached, old pixmap=%ld, now pixmap=%ld\n",
1853		     __FUNCTION__,
1854		     get_private(front)->pixmap->drawable.serialNumber,
1855		     pixmap->drawable.serialNumber));
1856		return false;
1857	}
1858
1859	DBG(("%s: window size: %dx%d, clip=(%d, %d), (%d, %d) x %d\n",
1860	     __FUNCTION__,
1861	     win->drawable.width, win->drawable.height,
1862	     win->clipList.extents.x1, win->clipList.extents.y1,
1863	     win->clipList.extents.x2, win->clipList.extents.y2,
1864	     region_num_rects(&win->clipList)));
1865	if (is_clipped(&win->clipList, &win->drawable)) {
1866		DBG(("%s: no, %dx%d window is clipped: clip region=(%d, %d), (%d, %d)\n",
1867		     __FUNCTION__,
1868		     draw->width, draw->height,
1869		     win->clipList.extents.x1,
1870		     win->clipList.extents.y1,
1871		     win->clipList.extents.x2,
1872		     win->clipList.extents.y2));
1873		return false;
1874	}
1875
1876	if (overlaps_other_crtc(sna, crtc)) {
1877		DBG(("%s: no, overlaps other CRTC\n", __FUNCTION__));
1878		return false;
1879	}
1880
1881	if (get_private(back)->size != (draw->height << 16 | draw->width)) {
1882		DBG(("%s: no, DRI2 buffers does not fit window\n",
1883		     __FUNCTION__));
1884		return false;
1885	}
1886
1887	assert(win != win->drawable.pScreen->root);
1888	DBG(("%s: yes, pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
1889	return true;
1890}
1891
1892static void
1893sna_dri2_xchg(DrawablePtr draw, DRI2BufferPtr front, DRI2BufferPtr back)
1894{
1895	WindowPtr win = (WindowPtr)draw;
1896	struct kgem_bo *back_bo, *front_bo;
1897	PixmapPtr pixmap;
1898	int tmp;
1899
1900	assert(draw->type != DRAWABLE_PIXMAP);
1901	pixmap = get_window_pixmap(win);
1902
1903	back_bo = get_private(back)->bo;
1904	front_bo = get_private(front)->bo;
1905	assert(front_bo != back_bo);
1906
1907	DBG(("%s: win=%ld, exchange front=%d/%d and back=%d/%d, pixmap=%ld %dx%d\n",
1908	     __FUNCTION__, win->drawable.id,
1909	     front_bo->handle, front->name,
1910	     back_bo->handle, back->name,
1911	     pixmap->drawable.serialNumber,
1912	     pixmap->drawable.width,
1913	     pixmap->drawable.height));
1914
1915	DBG(("%s: back_bo pitch=%d, size=%d, ref=%d, active_scanout?=%d\n",
1916	     __FUNCTION__, back_bo->pitch, kgem_bo_size(back_bo), back_bo->refcnt, back_bo->active_scanout));
1917	DBG(("%s: front_bo pitch=%d, size=%d, ref=%d, active_scanout?=%d\n",
1918	     __FUNCTION__, front_bo->pitch, kgem_bo_size(front_bo), front_bo->refcnt, front_bo->active_scanout));
1919	assert(front_bo->refcnt);
1920	assert(back_bo->refcnt);
1921
1922	assert(sna_pixmap_get_buffer(pixmap) == front);
1923
1924	assert(pixmap->drawable.height * back_bo->pitch <= kgem_bo_size(back_bo));
1925	assert(pixmap->drawable.height * front_bo->pitch <= kgem_bo_size(front_bo));
1926
1927	set_bo(pixmap, back_bo);
1928
1929	get_private(front)->bo = back_bo;
1930	get_private(back)->bo = front_bo;
1931	get_private(back)->stale = true;
1932
1933	tmp = front->name;
1934	front->name = back->name;
1935	back->name = tmp;
1936
1937	assert(front_bo->refcnt);
1938	assert(back_bo->refcnt);
1939
1940	assert(get_private(front)->bo == sna_pixmap(pixmap)->gpu_bo);
1941}
1942
1943static void sna_dri2_xchg_crtc(struct sna *sna, DrawablePtr draw, xf86CrtcPtr crtc, DRI2BufferPtr front, DRI2BufferPtr back)
1944{
1945	WindowPtr win = (WindowPtr)draw;
1946	DRI2Buffer2Ptr tmp;
1947	struct kgem_bo *bo;
1948
1949	DBG(("%s: exchange front=%d/%d and back=%d/%d, win id=%lu, pixmap=%ld %dx%d\n",
1950	     __FUNCTION__,
1951	     get_private(front)->bo->handle, front->name,
1952	     get_private(back)->bo->handle, back->name,
1953	     win->drawable.id,
1954	     get_window_pixmap(win)->drawable.serialNumber,
1955	     get_window_pixmap(win)->drawable.width,
1956	     get_window_pixmap(win)->drawable.height));
1957
1958	DamageRegionAppend(&win->drawable, &win->clipList);
1959	sna_shadow_set_crtc(sna, crtc, get_private(back)->bo);
1960	DamageRegionProcessPending(&win->drawable);
1961
1962	assert(dri2_window(win)->front == NULL);
1963
1964	tmp = calloc(1, sizeof(*tmp) + sizeof(struct sna_dri2_private));
1965	if (tmp == NULL) {
1966		back->attachment = -1;
1967		if (get_private(back)->proxy == NULL) {
1968			get_private(back)->pixmap = get_window_pixmap(win);
1969			get_private(back)->proxy = sna_dri2_reference_buffer(sna_pixmap_get_buffer(get_private(back)->pixmap));
1970		}
1971		dri2_window(win)->front = sna_dri2_reference_buffer(back);
1972		return;
1973	}
1974
1975	*tmp = *back;
1976	tmp->attachment = DRI2BufferFrontLeft;
1977	tmp->driverPrivate = tmp + 1;
1978	get_private(tmp)->refcnt = 1;
1979	get_private(tmp)->bo = get_private(back)->bo;
1980	get_private(tmp)->size = get_private(back)->size;
1981	get_private(tmp)->pixmap = get_window_pixmap(win);
1982	get_private(tmp)->proxy = sna_dri2_reference_buffer(sna_pixmap_get_buffer(get_private(tmp)->pixmap));
1983	dri2_window(win)->front = tmp;
1984
1985	DBG(("%s: allocating new backbuffer\n", __FUNCTION__));
1986	back->name = 0;
1987	bo = kgem_create_2d(&sna->kgem,
1988			    draw->width, draw->height, draw->bitsPerPixel,
1989			    get_private(back)->bo->tiling,
1990			    CREATE_SCANOUT);
1991	if (bo != NULL) {
1992		get_private(back)->bo = bo;
1993		back->pitch = bo->pitch;
1994		back->name = kgem_bo_flink(&sna->kgem, bo);
1995	}
1996	if (back->name == 0) {
1997		if (bo != NULL)
1998			kgem_bo_destroy(&sna->kgem, bo);
1999		get_private(back)->bo = NULL;
2000		back->attachment = -1;
2001	}
2002}
2003
2004static void frame_swap_complete(struct sna *sna,
2005				struct sna_dri2_event *frame,
2006				int type)
2007{
2008	const struct ust_msc *swap;
2009
2010	if (frame->draw == NULL)
2011		return;
2012
2013	assert(frame->client);
2014
2015	swap = sna_crtc_last_swap(frame->crtc);
2016	DBG(("%s: draw=%ld, pipe=%d, frame=%lld [msc=%lld], tv=%d.%06d\n",
2017	     __FUNCTION__, (long)frame->draw, frame->pipe,
2018	     (long long)swap->msc,
2019	     (long long)draw_current_msc(frame->draw, frame->crtc, swap->msc),
2020	     swap->tv_sec, swap->tv_usec));
2021
2022	DRI2SwapComplete(frame->client, frame->draw,
2023			 draw_current_msc(frame->draw, frame->crtc, swap->msc),
2024			 swap->tv_sec, swap->tv_usec,
2025			 type, frame->event_complete, frame->event_data);
2026}
2027
2028static void fake_swap_complete(struct sna *sna, ClientPtr client,
2029			       DrawablePtr draw, xf86CrtcPtr crtc,
2030			       int type, DRI2SwapEventPtr func, void *data)
2031{
2032	const struct ust_msc *swap;
2033
2034	swap = sna_crtc_last_swap(crtc);
2035	DBG(("%s: draw=%ld, pipe=%d, frame=%lld [msc %lld], tv=%d.%06d\n",
2036	     __FUNCTION__, (long)draw->id, crtc ? sna_crtc_to_pipe(crtc) : -1,
2037	     (long long)swap->msc,
2038	     (long long)draw_current_msc(draw, crtc, swap->msc),
2039	     swap->tv_sec, swap->tv_usec));
2040
2041	DRI2SwapComplete(client, draw,
2042			 draw_current_msc(draw, crtc, swap->msc),
2043			 swap->tv_sec, swap->tv_usec,
2044			 type, func, data);
2045}
2046
2047static void chain_swap(struct sna *sna, struct sna_dri2_event *chain)
2048{
2049	union drm_wait_vblank vbl;
2050
2051	if (chain->draw == NULL) {
2052		sna_dri2_event_free(sna, chain);
2053		return;
2054	}
2055
2056	if (chain->queued) /* too early! */
2057		return;
2058
2059	assert(chain == dri2_chain(chain->draw));
2060	DBG(("%s: chaining draw=%ld, type=%d\n",
2061	     __FUNCTION__, (long)chain->draw->id, chain->type));
2062	chain->queued = true;
2063
2064	switch (chain->type) {
2065	case SWAP_THROTTLE:
2066		DBG(("%s: emitting chained vsync'ed blit\n", __FUNCTION__));
2067		if (sna->mode.shadow && !sna->mode.shadow_damage) {
2068			/* recursed from wait_for_shadow(), simply requeue */
2069			DBG(("%s -- recursed from wait_for_shadow(), requeuing\n", __FUNCTION__));
2070			VG_CLEAR(vbl);
2071			vbl.request.type =
2072				DRM_VBLANK_RELATIVE |
2073				DRM_VBLANK_EVENT;
2074			vbl.request.sequence = 1;
2075			vbl.request.signal = (uintptr_t)chain;
2076
2077			if (!sna_wait_vblank(sna, &vbl, chain->pipe))
2078				return;
2079
2080			DBG(("%s -- requeue failed, errno=%d\n", __FUNCTION__, errno));
2081		}
2082
2083		if (can_xchg(sna, chain->draw, chain->front, chain->back)) {
2084			sna_dri2_xchg(chain->draw, chain->front, chain->back);
2085		} else if (can_xchg_crtc(sna, chain->draw, chain->front, chain->back, chain->crtc)) {
2086			sna_dri2_xchg_crtc(sna, chain->draw, chain->crtc, chain->front, chain->back);
2087		} else {
2088			assert(chain->queued);
2089			chain->bo = __sna_dri2_copy_region(sna, chain->draw, NULL,
2090							   chain->back, chain->front,
2091							   true);
2092		}
2093	case SWAP:
2094		break;
2095	default:
2096		return;
2097	}
2098
2099	VG_CLEAR(vbl);
2100	vbl.request.type =
2101		DRM_VBLANK_RELATIVE |
2102		DRM_VBLANK_EVENT;
2103	vbl.request.sequence = 1;
2104	vbl.request.signal = (uintptr_t)chain;
2105	if (sna_wait_vblank(sna, &vbl, chain->pipe)) {
2106		DBG(("%s: vblank wait failed, unblocking client\n", __FUNCTION__));
2107		frame_swap_complete(sna, chain, DRI2_BLIT_COMPLETE);
2108		sna_dri2_event_free(sna, chain);
2109	} else {
2110		if (chain->type == SWAP_THROTTLE && !swap_limit(chain->draw, 2)) {
2111			DBG(("%s: fake triple buffering, unblocking client\n", __FUNCTION__));
2112			frame_swap_complete(sna, chain, DRI2_BLIT_COMPLETE);
2113		}
2114	}
2115}
2116
2117static inline bool rq_is_busy(struct kgem *kgem, struct kgem_bo *bo)
2118{
2119	if (bo == NULL)
2120		return false;
2121
2122	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
2123	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
2124	assert(bo->refcnt);
2125
2126	if (bo->exec)
2127		return true;
2128
2129	if (bo->rq == NULL)
2130		return false;
2131
2132	return __kgem_busy(kgem, bo->handle);
2133}
2134
2135static bool sna_dri2_blit_complete(struct sna *sna,
2136				   struct sna_dri2_event *info)
2137{
2138	if (rq_is_busy(&sna->kgem, info->bo)) {
2139		union drm_wait_vblank vbl;
2140
2141		DBG(("%s: vsync'ed blit is still busy, postponing\n",
2142		     __FUNCTION__));
2143
2144		VG_CLEAR(vbl);
2145		vbl.request.type =
2146			DRM_VBLANK_RELATIVE |
2147			DRM_VBLANK_EVENT;
2148		vbl.request.sequence = 1;
2149		vbl.request.signal = (uintptr_t)info;
2150		assert(info->queued);
2151		if (!sna_wait_vblank(sna, &vbl, info->pipe))
2152			return false;
2153	}
2154
2155	DBG(("%s: blit finished\n", __FUNCTION__));
2156	return true;
2157}
2158
2159void sna_dri2_vblank_handler(struct sna *sna, struct drm_event_vblank *event)
2160{
2161	struct sna_dri2_event *info = (void *)(uintptr_t)event->user_data;
2162	DrawablePtr draw;
2163	union drm_wait_vblank vbl;
2164	uint64_t msc;
2165
2166	DBG(("%s(type=%d, sequence=%d)\n", __FUNCTION__, info->type, event->sequence));
2167	assert(info->queued);
2168	msc = sna_crtc_record_event(info->crtc, event);
2169
2170	draw = info->draw;
2171	if (draw == NULL) {
2172		DBG(("%s -- drawable gone\n", __FUNCTION__));
2173		goto done;
2174	}
2175
2176	switch (info->type) {
2177	case FLIP:
2178		/* If we can still flip... */
2179		if (can_flip(sna, draw, info->front, info->back, info->crtc) &&
2180		    sna_dri2_flip(sna, info))
2181			return;
2182
2183		/* else fall through to blit */
2184	case SWAP:
2185		assert(info->queued);
2186		if (sna->mode.shadow && !sna->mode.shadow_damage) {
2187			/* recursed from wait_for_shadow(), simply requeue */
2188			DBG(("%s -- recursed from wait_for_shadow(), requeuing\n", __FUNCTION__));
2189
2190		} else if (can_xchg(sna, draw, info->front, info->back)) {
2191			sna_dri2_xchg(draw, info->front, info->back);
2192			info->type = SWAP_WAIT;
2193		} else if (can_xchg_crtc(sna, draw, info->front, info->back, info->crtc)) {
2194			sna_dri2_xchg_crtc(sna, draw, info->crtc, info->front, info->back);
2195			info->type = SWAP_WAIT;
2196		}  else {
2197			assert(info->queued);
2198			info->bo = __sna_dri2_copy_region(sna, draw, NULL,
2199							  info->back, info->front, true);
2200			info->type = SWAP_WAIT;
2201		}
2202
2203		VG_CLEAR(vbl);
2204		vbl.request.type =
2205			DRM_VBLANK_RELATIVE |
2206			DRM_VBLANK_EVENT;
2207		vbl.request.sequence = 1;
2208		vbl.request.signal = (uintptr_t)info;
2209
2210		assert(info->queued);
2211		if (!sna_wait_vblank(sna, &vbl, info->pipe))
2212			return;
2213
2214		DBG(("%s -- requeue failed, errno=%d\n", __FUNCTION__, errno));
2215		/* fall through to SwapComplete */
2216	case SWAP_WAIT:
2217		if (!sna_dri2_blit_complete(sna, info))
2218			return;
2219
2220		DBG(("%s: swap complete, unblocking client (frame=%d, tv=%d.%06d)\n", __FUNCTION__,
2221		     event->sequence, event->tv_sec, event->tv_usec));
2222		frame_swap_complete(sna, info, DRI2_BLIT_COMPLETE);
2223		break;
2224
2225	case SWAP_THROTTLE:
2226		DBG(("%s: %d complete, frame=%d tv=%d.%06d\n",
2227		     __FUNCTION__, info->type,
2228		     event->sequence, event->tv_sec, event->tv_usec));
2229
2230		if (xorg_can_triple_buffer(sna)) {
2231			if (!sna_dri2_blit_complete(sna, info))
2232				return;
2233
2234			DBG(("%s: triple buffer swap complete, unblocking client (frame=%d, tv=%d.%06d)\n", __FUNCTION__,
2235			     event->sequence, event->tv_sec, event->tv_usec));
2236			frame_swap_complete(sna, info, DRI2_BLIT_COMPLETE);
2237		}
2238		break;
2239
2240	case WAITMSC:
2241		assert(info->client);
2242		DRI2WaitMSCComplete(info->client, draw, msc,
2243				    event->tv_sec, event->tv_usec);
2244		break;
2245	default:
2246		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
2247			   "%s: unknown vblank event received\n", __func__);
2248		/* Unknown type */
2249		break;
2250	}
2251
2252	if (info->chain) {
2253		assert(info->chain != info);
2254		assert(info->draw == draw);
2255		sna_dri2_remove_event((WindowPtr)draw, info);
2256		chain_swap(sna, info->chain);
2257		info->draw = NULL;
2258	}
2259
2260done:
2261	sna_dri2_event_free(sna, info);
2262	DBG(("%s complete\n", __FUNCTION__));
2263}
2264
2265static bool
2266sna_dri2_immediate_blit(struct sna *sna,
2267			struct sna_dri2_event *info,
2268			bool sync, bool event)
2269{
2270	DrawablePtr draw = info->draw;
2271	bool ret = false;
2272
2273	if (sna->flags & SNA_NO_WAIT)
2274		sync = false;
2275
2276	DBG(("%s: emitting immediate blit, throttling client, synced? %d, chained? %d, send-event? %d\n",
2277	     __FUNCTION__, sync, dri2_chain(draw) != info,
2278	     event));
2279
2280	info->type = SWAP_THROTTLE;
2281	if (!sync || dri2_chain(draw) == info) {
2282		DBG(("%s: no pending blit, starting chain\n",
2283		     __FUNCTION__));
2284
2285		info->queued = true;
2286		info->bo = __sna_dri2_copy_region(sna, draw, NULL,
2287						  info->back,
2288						  info->front,
2289						  sync);
2290		if (event) {
2291			if (sync) {
2292				union drm_wait_vblank vbl;
2293
2294				VG_CLEAR(vbl);
2295				vbl.request.type =
2296					DRM_VBLANK_RELATIVE |
2297					DRM_VBLANK_EVENT;
2298				vbl.request.sequence = 1;
2299				vbl.request.signal = (uintptr_t)info;
2300				ret = !sna_wait_vblank(sna, &vbl, info->pipe);
2301				if (ret)
2302					event = !swap_limit(draw, 2);
2303			}
2304			if (event) {
2305				DBG(("%s: fake triple buffering, unblocking client\n", __FUNCTION__));
2306				frame_swap_complete(sna, info, DRI2_BLIT_COMPLETE);
2307			}
2308		}
2309	} else {
2310		DBG(("%s: pending blit, chained\n", __FUNCTION__));
2311		ret = true;
2312	}
2313
2314	DBG(("%s: continue? %d\n", __FUNCTION__, ret));
2315	return ret;
2316}
2317
2318static bool
2319sna_dri2_flip_continue(struct sna *sna, struct sna_dri2_event *info)
2320{
2321	DBG(("%s(mode=%d)\n", __FUNCTION__, info->mode));
2322
2323	if (info->mode > 0){
2324		struct kgem_bo *bo = get_private(info->front)->bo;
2325
2326		info->type = info->mode;
2327
2328		if (bo != sna_pixmap(sna->front)->gpu_bo)
2329			return false;
2330
2331		if (!sna_page_flip(sna, bo, sna_dri2_flip_handler, info))
2332			return false;
2333
2334		assert(sna->dri2.flip_pending == NULL || sna->dri2.flip_pending == info);
2335		sna->dri2.flip_pending = info;
2336		assert(info->queued);
2337	} else {
2338		info->type = -info->mode;
2339
2340		if (!info->draw)
2341			return false;
2342
2343		if (!can_flip(sna, info->draw, info->front, info->back, info->crtc))
2344			return false;
2345
2346		assert(sna_pixmap_get_buffer(get_drawable_pixmap(info->draw)) == info->front);
2347		if (!sna_dri2_flip(sna, info))
2348			return false;
2349
2350		if (!xorg_can_triple_buffer(sna)) {
2351			sna_dri2_get_back(sna, info->draw, info->back, info);
2352			DBG(("%s: fake triple buffering, unblocking client\n", __FUNCTION__));
2353			frame_swap_complete(sna, info, DRI2_FLIP_COMPLETE);
2354		}
2355	}
2356
2357	info->mode = 0;
2358	return true;
2359}
2360
2361static void chain_flip(struct sna *sna)
2362{
2363	struct sna_dri2_event *chain = sna->dri2.flip_pending;
2364
2365	assert(chain->type == FLIP);
2366	DBG(("%s: chaining type=%d, cancelled?=%d\n",
2367	     __FUNCTION__, chain->type, chain->draw == NULL));
2368
2369	sna->dri2.flip_pending = NULL;
2370	if (chain->draw == NULL) {
2371		sna_dri2_event_free(sna, chain);
2372		return;
2373	}
2374
2375	assert(chain == dri2_chain(chain->draw));
2376	assert(!chain->queued);
2377	chain->queued = true;
2378
2379	if (can_flip(sna, chain->draw, chain->front, chain->back, chain->crtc) &&
2380	    sna_dri2_flip(sna, chain)) {
2381		DBG(("%s: performing chained flip\n", __FUNCTION__));
2382	} else {
2383		DBG(("%s: emitting chained vsync'ed blit\n", __FUNCTION__));
2384		chain->bo = __sna_dri2_copy_region(sna, chain->draw, NULL,
2385						  chain->back, chain->front,
2386						  true);
2387
2388		if (xorg_can_triple_buffer(sna)) {
2389			union drm_wait_vblank vbl;
2390
2391			VG_CLEAR(vbl);
2392
2393			chain->type = SWAP_WAIT;
2394			vbl.request.type =
2395				DRM_VBLANK_RELATIVE |
2396				DRM_VBLANK_EVENT;
2397			vbl.request.sequence = 1;
2398			vbl.request.signal = (uintptr_t)chain;
2399
2400			assert(chain->queued);
2401			if (!sna_wait_vblank(sna, &vbl, chain->pipe))
2402				return;
2403		}
2404
2405		DBG(("%s: fake triple buffering (or vblank wait failed), unblocking client\n", __FUNCTION__));
2406		frame_swap_complete(sna, chain, DRI2_BLIT_COMPLETE);
2407		sna_dri2_event_free(sna, chain);
2408	}
2409}
2410
2411static void sna_dri2_flip_event(struct sna *sna,
2412				struct sna_dri2_event *flip)
2413{
2414	DBG(("%s(pipe=%d, event=%d)\n", __FUNCTION__, flip->pipe, flip->type));
2415	assert(flip->queued);
2416
2417	if (sna->dri2.flip_pending == flip)
2418		sna->dri2.flip_pending = NULL;
2419
2420	/* We assume our flips arrive in order, so we don't check the frame */
2421	switch (flip->type) {
2422	case FLIP:
2423		DBG(("%s: swap complete, unblocking client\n", __FUNCTION__));
2424		frame_swap_complete(sna, flip, DRI2_FLIP_COMPLETE);
2425		sna_dri2_event_free(sna, flip);
2426
2427		if (sna->dri2.flip_pending)
2428			chain_flip(sna);
2429		break;
2430
2431	case FLIP_THROTTLE:
2432		DBG(("%s: triple buffer swap complete, unblocking client\n", __FUNCTION__));
2433		frame_swap_complete(sna, flip, DRI2_FLIP_COMPLETE);
2434	case FLIP_COMPLETE:
2435		if (sna->dri2.flip_pending) {
2436			sna_dri2_event_free(sna, flip);
2437			chain_flip(sna);
2438		} else if (!flip->mode) {
2439			DBG(("%s: flip chain complete\n", __FUNCTION__));
2440
2441			if (flip->chain) {
2442				sna_dri2_remove_event((WindowPtr)flip->draw,
2443						      flip);
2444				chain_swap(sna, flip->chain);
2445				flip->draw = NULL;
2446			}
2447
2448			sna_dri2_event_free(sna, flip);
2449		} else if (!sna_dri2_flip_continue(sna, flip)) {
2450			DBG(("%s: no longer able to flip\n", __FUNCTION__));
2451			if (flip->draw == NULL || !sna_dri2_immediate_blit(sna, flip, false, flip->mode < 0))
2452				sna_dri2_event_free(sna, flip);
2453		}
2454		break;
2455
2456	default: /* Unknown type */
2457		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
2458			   "%s: unknown vblank event received\n", __func__);
2459		sna_dri2_event_free(sna, flip);
2460		if (sna->dri2.flip_pending)
2461			chain_flip(sna);
2462		break;
2463	}
2464}
2465
2466static uint64_t
2467get_current_msc(struct sna *sna, DrawablePtr draw, xf86CrtcPtr crtc)
2468{
2469	union drm_wait_vblank vbl;
2470	uint64_t ret = -1;
2471
2472	VG_CLEAR(vbl);
2473	vbl.request.type = _DRM_VBLANK_RELATIVE;
2474	vbl.request.sequence = 0;
2475	if (sna_wait_vblank(sna, &vbl, sna_crtc_to_pipe(crtc)) == 0)
2476		ret = sna_crtc_record_vblank(crtc, &vbl);
2477
2478	return draw_current_msc(draw, crtc, ret);
2479}
2480
2481#if defined(CHECK_FOR_COMPOSITOR)
2482static Bool find(pointer value, XID id, pointer cdata)
2483{
2484	return TRUE;
2485}
2486#endif
2487
2488static int use_triple_buffer(struct sna *sna, ClientPtr client, bool async)
2489{
2490	if ((sna->flags & SNA_TRIPLE_BUFFER) == 0) {
2491		DBG(("%s: triple buffer disabled, using FLIP\n", __FUNCTION__));
2492		return FLIP;
2493	}
2494
2495	if (async) {
2496		DBG(("%s: running async, using %s\n", __FUNCTION__,
2497		     sna->flags & SNA_HAS_ASYNC_FLIP ? "FLIP_ASYNC" : "FLIP_COMPLETE"));
2498		return sna->flags & SNA_HAS_ASYNC_FLIP ? FLIP_ASYNC : FLIP_COMPLETE;
2499	}
2500
2501	if (xorg_can_triple_buffer(sna)) {
2502		DBG(("%s: triple buffer enabled, using FLIP_THROTTLE\n", __FUNCTION__));
2503		return FLIP_THROTTLE;
2504	}
2505
2506#if defined(CHECK_FOR_COMPOSITOR)
2507	/* Hack: Disable triple buffering for compositors */
2508	{
2509		struct sna_client *priv = sna_client(client);
2510		if (priv->is_compositor == 0)
2511			priv->is_compositor =
2512				LookupClientResourceComplex(client,
2513							    CompositeClientWindowType+1,
2514							    find, NULL) ? FLIP : FLIP_COMPLETE;
2515
2516		DBG(("%s: fake triple buffer enabled?=%d using %s\n", __FUNCTION__,
2517		     priv->is_compositor != FLIP, priv->is_compositor == FLIP ? "FLIP" : "FLIP_COMPLETE"));
2518		return priv->is_compositor;
2519	}
2520#else
2521	DBG(("%s: fake triple buffer enabled, using FLIP_COMPLETE\n", __FUNCTION__));
2522	return FLIP_COMPLETE;
2523#endif
2524}
2525
2526static bool immediate_swap(struct sna *sna,
2527			   uint64_t target_msc,
2528			   uint64_t divisor,
2529			   DrawablePtr draw,
2530			   xf86CrtcPtr crtc,
2531			   uint64_t *current_msc)
2532{
2533	if (divisor == 0) {
2534		*current_msc = -1;
2535
2536		if (sna->flags & SNA_NO_WAIT) {
2537			DBG(("%s: yes, waits are disabled\n", __FUNCTION__));
2538			return true;
2539		}
2540
2541		if (target_msc)
2542			*current_msc = get_current_msc(sna, draw, crtc);
2543
2544		DBG(("%s: current_msc=%ld, target_msc=%ld -- %s\n",
2545		     __FUNCTION__, (long)*current_msc, (long)target_msc,
2546		     (*current_msc >= target_msc - 1) ? "yes" : "no"));
2547		return *current_msc >= target_msc - 1;
2548	}
2549
2550	DBG(("%s: explicit waits requests, divisor=%ld\n",
2551	     __FUNCTION__, (long)divisor));
2552	*current_msc = get_current_msc(sna, draw, crtc);
2553	return false;
2554}
2555
2556static bool
2557sna_dri2_schedule_flip(ClientPtr client, DrawablePtr draw, xf86CrtcPtr crtc,
2558		       DRI2BufferPtr front, DRI2BufferPtr back,
2559		       CARD64 *target_msc, CARD64 divisor, CARD64 remainder,
2560		       DRI2SwapEventPtr func, void *data)
2561{
2562	struct sna *sna = to_sna_from_drawable(draw);
2563	struct sna_dri2_event *info;
2564	uint64_t current_msc;
2565
2566	if (immediate_swap(sna, *target_msc, divisor, draw, crtc, &current_msc)) {
2567		int type;
2568
2569		info = sna->dri2.flip_pending;
2570		DBG(("%s: performing immediate swap on pipe %d, pending? %d, mode: %d, continuation? %d\n",
2571		     __FUNCTION__, sna_crtc_to_pipe(crtc),
2572		     info != NULL, info ? info->mode : 0,
2573		     info && info->draw == draw));
2574
2575		if (info && info->draw == draw) {
2576			assert(info->type != FLIP);
2577			assert(info->front == front);
2578			if (info->back != back) {
2579				_sna_dri2_destroy_buffer(sna, info->back);
2580				info->back = sna_dri2_reference_buffer(back);
2581			}
2582			if (info->mode || current_msc >= *target_msc) {
2583				DBG(("%s: executing xchg of pending flip\n",
2584				     __FUNCTION__));
2585				sna_dri2_xchg(draw, front, back);
2586				info->mode = type = FLIP_COMPLETE;
2587				goto new_back;
2588			} else {
2589				DBG(("%s: chaining flip\n", __FUNCTION__));
2590				type = FLIP_THROTTLE;
2591				if (xorg_can_triple_buffer(sna))
2592					info->mode = -type;
2593				else
2594					info->mode = -FLIP_COMPLETE;
2595				goto out;
2596			}
2597		}
2598
2599		info = sna_dri2_add_event(sna, draw, client);
2600		if (info == NULL)
2601			return false;
2602
2603		assert(info->crtc == crtc);
2604		info->event_complete = func;
2605		info->event_data = data;
2606
2607		info->front = sna_dri2_reference_buffer(front);
2608		info->back = sna_dri2_reference_buffer(back);
2609
2610		if (sna->dri2.flip_pending) {
2611			/* We need to first wait (one vblank) for the
2612			 * async flips to complete before this client
2613			 * can take over.
2614			 */
2615			DBG(("%s: queueing flip after pending completion\n",
2616			     __FUNCTION__));
2617			info->type = type = FLIP;
2618			sna->dri2.flip_pending = info;
2619			assert(info->queued);
2620			current_msc++;
2621		} else {
2622			info->type = type = use_triple_buffer(sna, client, *target_msc == 0);
2623			if (!sna_dri2_flip(sna, info)) {
2624				DBG(("%s: flip failed, falling back\n", __FUNCTION__));
2625				sna_dri2_event_free(sna, info);
2626				return false;
2627			}
2628		}
2629
2630		swap_limit(draw, 1 + (type == FLIP_THROTTLE));
2631		if (type >= FLIP_COMPLETE) {
2632new_back:
2633			if (!xorg_can_triple_buffer(sna))
2634				sna_dri2_get_back(sna, draw, back, info);
2635			DBG(("%s: fake triple buffering, unblocking client\n", __FUNCTION__));
2636			frame_swap_complete(sna, info, DRI2_EXCHANGE_COMPLETE);
2637			if (info->type == FLIP_ASYNC)
2638				sna_dri2_event_free(sna, info);
2639		}
2640out:
2641		DBG(("%s: target_msc=%llu\n", __FUNCTION__, current_msc + 1));
2642		*target_msc = current_msc + 1;
2643		return true;
2644	}
2645
2646	info = sna_dri2_add_event(sna, draw, client);
2647	if (info == NULL)
2648		return false;
2649
2650	assert(info->crtc == crtc);
2651	info->event_complete = func;
2652	info->event_data = data;
2653	info->type = FLIP;
2654
2655	info->front = sna_dri2_reference_buffer(front);
2656	info->back = sna_dri2_reference_buffer(back);
2657
2658	/*
2659	 * If divisor is zero, or current_msc is smaller than target_msc
2660	 * we just need to make sure target_msc passes before initiating
2661	 * the swap.
2662	 */
2663	if (divisor && current_msc >= *target_msc) {
2664		DBG(("%s: missed target, queueing event for next: current=%lld, target=%lld, divisor=%lld, remainder=%lld\n",
2665		     __FUNCTION__,
2666		     (long long)current_msc,
2667		     (long long)*target_msc,
2668		     (long long)divisor,
2669		     (long long)remainder));
2670
2671		*target_msc = current_msc + remainder - current_msc % divisor;
2672		if (*target_msc <= current_msc)
2673			*target_msc += divisor;
2674	}
2675
2676	if (*target_msc <= current_msc + 1) {
2677		if (!sna_dri2_flip(sna, info)) {
2678			sna_dri2_event_free(sna, info);
2679			return false;
2680		}
2681		*target_msc = current_msc + 1;
2682	} else {
2683		union drm_wait_vblank vbl;
2684
2685		VG_CLEAR(vbl);
2686
2687		vbl.request.type =
2688			DRM_VBLANK_ABSOLUTE |
2689			DRM_VBLANK_EVENT;
2690
2691		/* Account for 1 frame extra pageflip delay */
2692		vbl.reply.sequence = draw_target_seq(draw, *target_msc - 1);
2693		vbl.request.signal = (uintptr_t)info;
2694
2695		info->queued = true;
2696		if (sna_wait_vblank(sna, &vbl, info->pipe)) {
2697			sna_dri2_event_free(sna, info);
2698			return false;
2699		}
2700	}
2701
2702	DBG(("%s: reported target_msc=%llu\n", __FUNCTION__, *target_msc));
2703	swap_limit(draw, 1);
2704	return true;
2705}
2706
2707static bool
2708sna_dri2_schedule_xchg(ClientPtr client, DrawablePtr draw, xf86CrtcPtr crtc,
2709		       DRI2BufferPtr front, DRI2BufferPtr back,
2710		       CARD64 *target_msc, CARD64 divisor, CARD64 remainder,
2711		       DRI2SwapEventPtr func, void *data)
2712{
2713	struct sna *sna = to_sna_from_drawable(draw);
2714	uint64_t current_msc;
2715	bool sync, event;
2716
2717	if (!immediate_swap(sna, *target_msc, divisor, draw, crtc, &current_msc))
2718		return false;
2719
2720	sync = current_msc < *target_msc;
2721	event = dri2_chain(draw) == NULL;
2722	if (!sync || event) {
2723		DBG(("%s: performing immediate xchg on pipe %d\n",
2724		     __FUNCTION__, sna_crtc_to_pipe(crtc)));
2725		sna_dri2_xchg(draw, front, back);
2726	}
2727	if (sync) {
2728		struct sna_dri2_event *info;
2729
2730		info = sna_dri2_add_event(sna, draw, client);
2731		if (!info)
2732			goto complete;
2733
2734		info->event_complete = func;
2735		info->event_data = data;
2736
2737		info->front = sna_dri2_reference_buffer(front);
2738		info->back = sna_dri2_reference_buffer(back);
2739		info->type = SWAP_THROTTLE;
2740
2741		if (event) {
2742			union drm_wait_vblank vbl;
2743
2744			VG_CLEAR(vbl);
2745			vbl.request.type =
2746				DRM_VBLANK_RELATIVE |
2747				DRM_VBLANK_EVENT;
2748			vbl.request.sequence = 1;
2749			vbl.request.signal = (uintptr_t)info;
2750
2751			info->queued = true;
2752			if (sna_wait_vblank(sna, &vbl, info->pipe)) {
2753				sna_dri2_event_free(sna, info);
2754				goto complete;
2755			}
2756
2757			swap_limit(draw, 2);
2758		}
2759	} else {
2760complete:
2761		fake_swap_complete(sna, client, draw, crtc, DRI2_EXCHANGE_COMPLETE, func, data);
2762	}
2763
2764	*target_msc = current_msc + 1;
2765	return true;
2766}
2767
2768static bool
2769sna_dri2_schedule_xchg_crtc(ClientPtr client, DrawablePtr draw, xf86CrtcPtr crtc,
2770			    DRI2BufferPtr front, DRI2BufferPtr back,
2771			    CARD64 *target_msc, CARD64 divisor, CARD64 remainder,
2772			    DRI2SwapEventPtr func, void *data)
2773{
2774	struct sna *sna = to_sna_from_drawable(draw);
2775	uint64_t current_msc;
2776	bool sync, event;
2777
2778	if (!immediate_swap(sna, *target_msc, divisor, draw, crtc, &current_msc))
2779		return false;
2780
2781	sync = current_msc < *target_msc;
2782	event = dri2_chain(draw) == NULL;
2783	if (!sync || event) {
2784		DBG(("%s: performing immediate xchg only on pipe %d\n",
2785		     __FUNCTION__, sna_crtc_to_pipe(crtc)));
2786		sna_dri2_xchg_crtc(sna, draw, crtc, front, back);
2787	}
2788	if (sync) {
2789		struct sna_dri2_event *info;
2790
2791		info = sna_dri2_add_event(sna, draw, client);
2792		if (!info)
2793			goto complete;
2794
2795		info->event_complete = func;
2796		info->event_data = data;
2797
2798		info->front = sna_dri2_reference_buffer(front);
2799		info->back = sna_dri2_reference_buffer(back);
2800		info->type = SWAP_THROTTLE;
2801
2802		if (event) {
2803			union drm_wait_vblank vbl;
2804
2805			VG_CLEAR(vbl);
2806			vbl.request.type =
2807				DRM_VBLANK_RELATIVE |
2808				DRM_VBLANK_EVENT;
2809			vbl.request.sequence = 1;
2810			vbl.request.signal = (uintptr_t)info;
2811
2812			info->queued = true;
2813			if (sna_wait_vblank(sna, &vbl, info->pipe)) {
2814				sna_dri2_event_free(sna, info);
2815				goto complete;
2816			}
2817
2818			swap_limit(draw, 2);
2819		}
2820	} else {
2821complete:
2822		fake_swap_complete(sna, client, draw, crtc, DRI2_EXCHANGE_COMPLETE, func, data);
2823	}
2824
2825	*target_msc = current_msc + 1;
2826	return true;
2827}
2828
2829static bool has_pending_events(struct sna *sna)
2830{
2831	struct pollfd pfd;
2832	pfd.fd = sna->kgem.fd;
2833	pfd.events = POLLIN;
2834	return poll(&pfd, 1, 0) == 1;
2835}
2836
2837/*
2838 * ScheduleSwap is responsible for requesting a DRM vblank event for the
2839 * appropriate frame.
2840 *
2841 * In the case of a blit (e.g. for a windowed swap) or buffer exchange,
2842 * the vblank requested can simply be the last queued swap frame + the swap
2843 * interval for the drawable.
2844 *
2845 * In the case of a page flip, we request an event for the last queued swap
2846 * frame + swap interval - 1, since we'll need to queue the flip for the frame
2847 * immediately following the received event.
2848 *
2849 * The client will be blocked if it tries to perform further GL commands
2850 * after queueing a swap, though in the Intel case after queueing a flip, the
2851 * client is free to queue more commands; they'll block in the kernel if
2852 * they access buffers busy with the flip.
2853 *
2854 * When the swap is complete, the driver should call into the server so it
2855 * can send any swap complete events that have been requested.
2856 */
2857static int
2858sna_dri2_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
2859		       DRI2BufferPtr back, CARD64 *target_msc, CARD64 divisor,
2860		       CARD64 remainder, DRI2SwapEventPtr func, void *data)
2861{
2862	struct sna *sna = to_sna_from_drawable(draw);
2863	union drm_wait_vblank vbl;
2864	xf86CrtcPtr crtc = NULL;
2865	struct sna_dri2_event *info = NULL;
2866	CARD64 current_msc;
2867
2868	DBG(("%s: draw=%lu %dx%d, pixmap=%ld %dx%d, back=%u (refs=%d/%d, flush=%d) , front=%u (refs=%d/%d, flush=%d)\n",
2869	     __FUNCTION__,
2870	     (long)draw->id, draw->width, draw->height,
2871	     get_drawable_pixmap(draw)->drawable.serialNumber,
2872	     get_drawable_pixmap(draw)->drawable.width,
2873	     get_drawable_pixmap(draw)->drawable.height,
2874	     get_private(back)->bo->handle,
2875	     get_private(back)->refcnt,
2876	     get_private(back)->bo->refcnt,
2877	     get_private(back)->bo->flush,
2878	     get_private(front)->bo->handle,
2879	     get_private(front)->refcnt,
2880	     get_private(front)->bo->refcnt,
2881	     get_private(front)->bo->flush));
2882
2883	DBG(("%s(target_msc=%llu, divisor=%llu, remainder=%llu)\n",
2884	     __FUNCTION__,
2885	     (long long)*target_msc,
2886	     (long long)divisor,
2887	     (long long)remainder));
2888
2889	assert(get_private(front)->refcnt);
2890	assert(get_private(back)->refcnt);
2891
2892	assert(get_private(front)->bo->refcnt);
2893	assert(get_private(back)->bo->refcnt);
2894
2895	if (get_private(front)->pixmap != get_drawable_pixmap(draw))
2896		goto skip;
2897
2898	if (get_private(back)->stale)
2899		goto skip;
2900
2901	assert(sna_pixmap_from_drawable(draw)->flush);
2902
2903	if (draw->type != DRAWABLE_PIXMAP) {
2904		WindowPtr win = (WindowPtr)draw;
2905		struct dri2_window *priv = dri2_window(win);
2906		if (priv->front) {
2907			assert(front == priv->front);
2908			assert(get_private(priv->front)->refcnt > 1);
2909			get_private(priv->front)->refcnt--;
2910			priv->front = NULL;
2911		}
2912		if (win->clipList.extents.x2 <= win->clipList.extents.x1 ||
2913		    win->clipList.extents.y2 <= win->clipList.extents.y1)
2914			goto skip;
2915	}
2916
2917	/* Drawable not displayed... just complete the swap */
2918	if ((sna->flags & SNA_NO_WAIT) == 0)
2919		crtc = sna_dri2_get_crtc(draw);
2920	if (crtc == NULL) {
2921		DBG(("%s: off-screen, immediate update\n", __FUNCTION__));
2922		goto blit;
2923	}
2924
2925	assert(draw->type != DRAWABLE_PIXMAP);
2926
2927	while (dri2_chain(draw) && has_pending_events(sna)) {
2928		DBG(("%s: flushing pending events\n", __FUNCTION__));
2929		sna_mode_wakeup(sna);
2930	}
2931
2932	if (can_xchg(sna, draw, front, back) &&
2933	    sna_dri2_schedule_xchg(client, draw, crtc, front, back,
2934				   target_msc, divisor, remainder,
2935				   func, data))
2936		return TRUE;
2937
2938	if (can_xchg_crtc(sna, draw, front, back, crtc) &&
2939	    sna_dri2_schedule_xchg_crtc(client, draw, crtc, front, back,
2940					target_msc, divisor, remainder,
2941					func, data))
2942		return TRUE;
2943
2944	if (can_flip(sna, draw, front, back, crtc) &&
2945	    sna_dri2_schedule_flip(client, draw, crtc, front, back,
2946				  target_msc, divisor, remainder,
2947				  func, data))
2948		return TRUE;
2949
2950	VG_CLEAR(vbl);
2951
2952	info = sna_dri2_add_event(sna, draw, client);
2953	if (!info)
2954		goto blit;
2955
2956	assert(info->crtc == crtc);
2957	info->event_complete = func;
2958	info->event_data = data;
2959
2960	info->front = sna_dri2_reference_buffer(front);
2961	info->back = sna_dri2_reference_buffer(back);
2962
2963	if (immediate_swap(sna, *target_msc, divisor, draw, crtc, &current_msc)) {
2964		bool sync = current_msc < *target_msc;
2965		if (!sna_dri2_immediate_blit(sna, info, sync, true))
2966			sna_dri2_event_free(sna, info);
2967		*target_msc = current_msc + sync;
2968		return TRUE;
2969	}
2970
2971	vbl.request.type =
2972		DRM_VBLANK_ABSOLUTE |
2973		DRM_VBLANK_EVENT;
2974	vbl.request.signal = (uintptr_t)info;
2975
2976	/*
2977	 * If divisor is zero, or current_msc is smaller than target_msc
2978	 * we just need to make sure target_msc passes before initiating
2979	 * the swap.
2980	 */
2981	info->type = SWAP;
2982	info->queued = true;
2983	if (divisor && current_msc >= *target_msc) {
2984		DBG(("%s: missed target, queueing event for next: current=%lld, target=%lld, divisor=%lld, remainder=%lld\n",
2985		     __FUNCTION__,
2986		     (long long)current_msc,
2987		     (long long)*target_msc,
2988		     (long long)divisor,
2989		     (long long)remainder));
2990
2991		*target_msc = current_msc + remainder - current_msc % divisor;
2992		if (*target_msc <= current_msc)
2993			*target_msc += divisor;
2994	}
2995	vbl.request.sequence = draw_target_seq(draw, *target_msc - 1);
2996	if (*target_msc <= current_msc + 1) {
2997		DBG(("%s: performing blit before queueing\n", __FUNCTION__));
2998		assert(info->queued);
2999		info->bo = __sna_dri2_copy_region(sna, draw, NULL,
3000						  back, front,
3001						  true);
3002		info->type = SWAP_WAIT;
3003
3004		vbl.request.type =
3005			DRM_VBLANK_RELATIVE |
3006			DRM_VBLANK_EVENT;
3007		vbl.request.sequence = 1;
3008		*target_msc = current_msc + 1;
3009	}
3010
3011	assert(info->queued);
3012	if (sna_wait_vblank(sna, &vbl, info->pipe))
3013		goto blit;
3014
3015	DBG(("%s: reported target_msc=%llu\n", __FUNCTION__, *target_msc));
3016	swap_limit(draw, 1 + (info->type == SWAP_WAIT));
3017	return TRUE;
3018
3019blit:
3020	DBG(("%s -- blit\n", __FUNCTION__));
3021	if (info)
3022		sna_dri2_event_free(sna, info);
3023	if (can_xchg(sna, draw, front, back))
3024		sna_dri2_xchg(draw, front, back);
3025	else
3026		__sna_dri2_copy_region(sna, draw, NULL, back, front, false);
3027skip:
3028	DBG(("%s: unable to show frame, unblocking client\n", __FUNCTION__));
3029	if (crtc == NULL)
3030		crtc = sna_mode_first_crtc(sna);
3031	fake_swap_complete(sna, client, draw, crtc, DRI2_BLIT_COMPLETE, func, data);
3032	*target_msc = 0; /* offscreen, so zero out target vblank count */
3033	return TRUE;
3034}
3035
3036/*
3037 * Get current frame count and frame count timestamp, based on drawable's
3038 * crtc.
3039 */
3040static int
3041sna_dri2_get_msc(DrawablePtr draw, CARD64 *ust, CARD64 *msc)
3042{
3043	struct sna *sna = to_sna_from_drawable(draw);
3044	xf86CrtcPtr crtc = sna_dri2_get_crtc(draw);
3045	const struct ust_msc *swap;
3046
3047	DBG(("%s(draw=%ld, pipe=%d)\n", __FUNCTION__, draw->id,
3048	     crtc ? sna_crtc_to_pipe(crtc) : -1));
3049
3050	if (crtc != NULL) {
3051		union drm_wait_vblank vbl;
3052
3053		VG_CLEAR(vbl);
3054		vbl.request.type = _DRM_VBLANK_RELATIVE;
3055		vbl.request.sequence = 0;
3056		if (sna_wait_vblank(sna, &vbl, sna_crtc_to_pipe(crtc)) == 0)
3057			sna_crtc_record_vblank(crtc, &vbl);
3058	} else
3059		/* Drawable not displayed, make up a *monotonic* value */
3060		crtc = sna_mode_first_crtc(sna);
3061
3062	swap = sna_crtc_last_swap(crtc);
3063	*msc = draw_current_msc(draw, crtc, swap->msc);
3064	*ust = ust64(swap->tv_sec, swap->tv_usec);
3065	DBG(("%s: msc=%llu, ust=%llu\n", __FUNCTION__,
3066	     (long long)*msc, (long long)*ust));
3067	return TRUE;
3068}
3069
3070/*
3071 * Request a DRM event when the requested conditions will be satisfied.
3072 *
3073 * We need to handle the event and ask the server to wake up the client when
3074 * we receive it.
3075 */
3076static int
3077sna_dri2_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
3078			   CARD64 divisor, CARD64 remainder)
3079{
3080	struct sna *sna = to_sna_from_drawable(draw);
3081	struct sna_dri2_event *info = NULL;
3082	xf86CrtcPtr crtc;
3083	CARD64 current_msc;
3084	union drm_wait_vblank vbl;
3085	const struct ust_msc *swap;
3086	int pipe;
3087
3088	crtc = sna_dri2_get_crtc(draw);
3089	DBG(("%s(pipe=%d, target_msc=%llu, divisor=%llu, rem=%llu)\n",
3090	     __FUNCTION__, crtc ? sna_crtc_to_pipe(crtc) : -1,
3091	     (long long)target_msc,
3092	     (long long)divisor,
3093	     (long long)remainder));
3094
3095	/* Drawable not visible, return immediately */
3096	if (crtc == NULL)
3097		goto out_complete;
3098
3099	pipe = sna_crtc_to_pipe(crtc);
3100
3101	VG_CLEAR(vbl);
3102
3103	/* Get current count */
3104	vbl.request.type = _DRM_VBLANK_RELATIVE;
3105	vbl.request.sequence = 0;
3106	if (sna_wait_vblank(sna, &vbl, pipe))
3107		goto out_complete;
3108
3109	current_msc = draw_current_msc(draw, crtc, sna_crtc_record_vblank(crtc, &vbl));
3110
3111	/* If target_msc already reached or passed, set it to
3112	 * current_msc to ensure we return a reasonable value back
3113	 * to the caller. This keeps the client from continually
3114	 * sending us MSC targets from the past by forcibly updating
3115	 * their count on this call.
3116	 */
3117	if (divisor == 0 && current_msc >= target_msc)
3118		goto out_complete;
3119
3120	info = sna_dri2_add_event(sna, draw, client);
3121	if (!info)
3122		goto out_complete;
3123
3124	assert(info->crtc == crtc);
3125	info->type = WAITMSC;
3126
3127	vbl.request.signal = (uintptr_t)info;
3128	vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
3129	/*
3130	 * If divisor is zero, or current_msc is smaller than target_msc,
3131	 * we just need to make sure target_msc passes before waking up the
3132	 * client. Otherwise, compute the next msc to match divisor/remainder.
3133	 */
3134	if (divisor && current_msc >= target_msc) {
3135		DBG(("%s: missed target, queueing event for next: current=%lld, target=%lld, divisor=%lld, remainder=%lld\n",
3136		     __FUNCTION__,
3137		     (long long)current_msc,
3138		     (long long)target_msc,
3139		     (long long)divisor,
3140		     (long long)remainder));
3141		target_msc = current_msc + remainder - current_msc % divisor;
3142		if (target_msc <= current_msc)
3143			target_msc += divisor;
3144	}
3145	vbl.request.sequence = draw_target_seq(draw, target_msc);
3146
3147	info->queued = true;
3148	if (sna_wait_vblank(sna, &vbl, pipe))
3149		goto out_free_info;
3150
3151	DRI2BlockClient(client, draw);
3152	return TRUE;
3153
3154out_free_info:
3155	sna_dri2_event_free(sna, info);
3156out_complete:
3157	if (crtc == NULL)
3158		crtc = sna_mode_first_crtc(sna);
3159	swap = sna_crtc_last_swap(crtc);
3160	DRI2WaitMSCComplete(client, draw,
3161			    draw_current_msc(draw, crtc, swap->msc),
3162			    swap->tv_sec, swap->tv_usec);
3163	return TRUE;
3164}
3165#else
3166void sna_dri2_destroy_window(WindowPtr win) { }
3167#endif
3168
3169static bool has_i830_dri(void)
3170{
3171	return access(DRI_DRIVER_PATH "/i830_dri.so", R_OK) == 0;
3172}
3173
3174static int
3175namecmp(const char *s1, const char *s2)
3176{
3177	char c1, c2;
3178
3179	if (!s1 || *s1 == 0) {
3180		if (!s2 || *s2 == 0)
3181			return 0;
3182		else
3183			return 1;
3184	}
3185
3186	while (*s1 == '_' || *s1 == ' ' || *s1 == '\t')
3187		s1++;
3188
3189	while (*s2 == '_' || *s2 == ' ' || *s2 == '\t')
3190		s2++;
3191
3192	c1 = isupper(*s1) ? tolower(*s1) : *s1;
3193	c2 = isupper(*s2) ? tolower(*s2) : *s2;
3194	while (c1 == c2) {
3195		if (c1 == '\0')
3196			return 0;
3197
3198		s1++;
3199		while (*s1 == '_' || *s1 == ' ' || *s1 == '\t')
3200			s1++;
3201
3202		s2++;
3203		while (*s2 == '_' || *s2 == ' ' || *s2 == '\t')
3204			s2++;
3205
3206		c1 = isupper(*s1) ? tolower(*s1) : *s1;
3207		c2 = isupper(*s2) ? tolower(*s2) : *s2;
3208	}
3209
3210	return c1 - c2;
3211}
3212
3213static bool is_level(const char **str)
3214{
3215	const char *s = *str;
3216	char *end;
3217	unsigned val;
3218
3219	if (s == NULL || *s == '\0')
3220		return true;
3221
3222	if (namecmp(s, "on") == 0)
3223		return true;
3224	if (namecmp(s, "true") == 0)
3225		return true;
3226	if (namecmp(s, "yes") == 0)
3227		return true;
3228
3229	if (namecmp(s, "0") == 0)
3230		return true;
3231	if (namecmp(s, "off") == 0)
3232		return true;
3233	if (namecmp(s, "false") == 0)
3234		return true;
3235	if (namecmp(s, "no") == 0)
3236		return true;
3237
3238	val = strtoul(s, &end, 0);
3239	if (val && *end == '\0')
3240		return true;
3241	if (val && *end == ':')
3242		*str = end + 1;
3243	return false;
3244}
3245
3246static const char *dri_driver_name(struct sna *sna)
3247{
3248	const char *s = xf86GetOptValString(sna->Options, OPTION_DRI);
3249
3250	if (is_level(&s)) {
3251		if (sna->kgem.gen < 030)
3252			return has_i830_dri() ? "i830" : "i915";
3253		else if (sna->kgem.gen < 040)
3254			return "i915";
3255		else
3256			return "i965";
3257	}
3258
3259	return s;
3260}
3261
3262bool sna_dri2_open(struct sna *sna, ScreenPtr screen)
3263{
3264	DRI2InfoRec info;
3265	int major = 1, minor = 0;
3266#if DRI2INFOREC_VERSION >= 4
3267	const char *driverNames[2];
3268#endif
3269
3270	DBG(("%s()\n", __FUNCTION__));
3271
3272	if (wedged(sna)) {
3273		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
3274			   "loading DRI2 whilst the GPU is wedged.\n");
3275	}
3276
3277	if (xf86LoaderCheckSymbol("DRI2Version"))
3278		DRI2Version(&major, &minor);
3279
3280	if (minor < 1) {
3281		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
3282			   "DRI2 requires DRI2 module version 1.1.0 or later\n");
3283		return false;
3284	}
3285
3286	memset(&info, '\0', sizeof(info));
3287	info.fd = sna->kgem.fd;
3288	info.driverName = dri_driver_name(sna);
3289	info.deviceName = intel_get_client_name(sna->scrn);
3290
3291	DBG(("%s: loading dri driver '%s' [gen=%d] for device '%s'\n",
3292	     __FUNCTION__, info.driverName, sna->kgem.gen, info.deviceName));
3293
3294#if DRI2INFOREC_VERSION == 2
3295	/* The ABI between 2 and 3 was broken so we could get rid of
3296	 * the multi-buffer alloc functions.  Make sure we indicate the
3297	 * right version so DRI2 can reject us if it's version 3 or above. */
3298	info.version = 2;
3299#else
3300	info.version = 3;
3301#endif
3302	info.CreateBuffer = sna_dri2_create_buffer;
3303	info.DestroyBuffer = sna_dri2_destroy_buffer;
3304
3305	info.CopyRegion = sna_dri2_copy_region;
3306#if DRI2INFOREC_VERSION >= 4
3307	info.version = 4;
3308	info.ScheduleSwap = sna_dri2_schedule_swap;
3309	info.GetMSC = sna_dri2_get_msc;
3310	info.ScheduleWaitMSC = sna_dri2_schedule_wait_msc;
3311	info.numDrivers = 2;
3312	info.driverNames = driverNames;
3313	driverNames[0] = info.driverName;
3314	driverNames[1] = info.driverName;
3315#endif
3316
3317#if DRI2INFOREC_VERSION >= 6
3318	if (xorg_can_triple_buffer(sna)) {
3319		info.version = 6;
3320		info.SwapLimitValidate = sna_dri2_swap_limit_validate;
3321		info.ReuseBufferNotify = sna_dri2_reuse_buffer;
3322	}
3323#endif
3324
3325#if USE_ASYNC_SWAP
3326	info.version = 10;
3327	info.scheduleSwap0 = 1;
3328#endif
3329
3330	return DRI2ScreenInit(screen, &info);
3331}
3332
3333void sna_dri2_close(struct sna *sna, ScreenPtr screen)
3334{
3335	DBG(("%s()\n", __FUNCTION__));
3336	DRI2CloseScreen(screen);
3337}
3338