1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission.  The copyright holders make no representations
11 * about the suitability of this software for any purpose.  It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#ifdef HAVE_CONFIG_H
24#include "config.h"
25#endif
26
27#include <sys/types.h>
28#include <fcntl.h>
29#include <unistd.h>
30#include <sys/poll.h>
31#include <errno.h>
32#include <xf86drm.h>
33
34#include "sna.h"
35
36#include <xf86.h>
37#include <present.h>
38
39static present_screen_info_rec present_info;
40
41struct sna_present_event {
42	xf86CrtcPtr crtc;
43	struct sna *sna;
44	struct list link;
45	uint64_t *event_id;
46	uint64_t target_msc;
47	int n_event_id;
48	bool queued:1;
49	bool active:1;
50};
51
52static void sna_present_unflip(ScreenPtr screen, uint64_t event_id);
53static bool sna_present_queue(struct sna_present_event *info,
54			      uint64_t last_msc);
55
56static inline struct sna_present_event *
57to_present_event(uintptr_t  data)
58{
59	return (struct sna_present_event *)(data & ~3);
60}
61
62static struct sna_present_event *info_alloc(struct sna *sna)
63{
64	struct sna_present_event *info;
65
66	info = sna->present.freed_info;
67	if (info) {
68		sna->present.freed_info = NULL;
69		return info;
70	}
71
72	return malloc(sizeof(struct sna_present_event) + sizeof(uint64_t));
73}
74
75static void info_free(struct sna_present_event *info)
76{
77	struct sna *sna = info->sna;
78
79	if (sna->present.freed_info)
80		free(sna->present.freed_info);
81
82	sna->present.freed_info = info;
83}
84
85static inline bool msc_before(uint64_t msc, uint64_t target)
86{
87	return (int64_t)(msc - target) < 0;
88}
89
90#define MARK_PRESENT(x) ((void *)((uintptr_t)(x) | 2))
91
92static inline xf86CrtcPtr unmask_crtc(xf86CrtcPtr crtc)
93{
94	return (xf86CrtcPtr)((uintptr_t)crtc & ~1);
95}
96
97static inline xf86CrtcPtr mark_crtc(xf86CrtcPtr crtc)
98{
99	return (xf86CrtcPtr)((uintptr_t)crtc | 1);
100}
101
102static inline bool has_vblank(xf86CrtcPtr crtc)
103{
104	return (uintptr_t)crtc & 1;
105}
106
107static inline int pipe_from_crtc(RRCrtcPtr crtc)
108{
109	return crtc ? sna_crtc_pipe(crtc->devPrivate) : -1;
110}
111
112static uint32_t pipe_select(int pipe)
113{
114	if (pipe > 1)
115		return pipe << DRM_VBLANK_HIGH_CRTC_SHIFT;
116	else if (pipe > 0)
117		return DRM_VBLANK_SECONDARY;
118	else
119		return 0;
120}
121
122static inline int sna_wait_vblank(struct sna *sna, union drm_wait_vblank *vbl, int pipe)
123{
124	DBG(("%s(pipe=%d, waiting until seq=%u%s)\n",
125	     __FUNCTION__, pipe, vbl->request.sequence,
126	     vbl->request.type & DRM_VBLANK_RELATIVE ? " [relative]" : ""));
127	vbl->request.type |= pipe_select(pipe);
128	return drmIoctl(sna->kgem.fd, DRM_IOCTL_WAIT_VBLANK, vbl);
129}
130
131static uint64_t gettime_ust64(void)
132{
133	struct timespec tv;
134
135	if (clock_gettime(CLOCK_MONOTONIC, &tv))
136		return GetTimeInMicros();
137
138	return ust64(tv.tv_sec, tv.tv_nsec / 1000);
139}
140
141static void vblank_complete(struct sna_present_event *info,
142			    uint64_t ust, uint64_t msc)
143{
144	struct list * const q = sna_crtc_vblank_queue(info->crtc);
145	int n;
146
147	do {
148		assert(sna_crtc_vblank_queue(info->crtc) == q);
149
150		if (msc_before(msc, info->target_msc)) {
151			DBG(("%s: event=%d too early, now %lld, expected %lld\n",
152			     __FUNCTION__,
153			     info->event_id[0],
154			     (long long)msc, (long long)info->target_msc));
155			if (sna_present_queue(info, msc))
156				return;
157		}
158
159		DBG(("%s: %d events complete\n", __FUNCTION__, info->n_event_id));
160		for (n = 0; n < info->n_event_id; n++) {
161			DBG(("%s: pipe=%d tv=%d.%06d msc=%lld (target=%lld), event=%lld complete%s\n", __FUNCTION__,
162			     sna_crtc_pipe(info->crtc),
163			     (int)(ust / 1000000), (int)(ust % 1000000),
164			     (long long)msc, (long long)info->target_msc,
165			     (long long)info->event_id[n],
166			     info->target_msc && msc == (uint32_t)info->target_msc ? "" : ": MISS"));
167			present_event_notify(info->event_id[n], ust, msc);
168		}
169		if (info->n_event_id > 1)
170			free(info->event_id);
171
172		_list_del(&info->link);
173		info_free(info);
174
175		info = list_entry(info->link.next, typeof(*info), link);
176	} while (q != &info->link && !info->queued);
177}
178
179static uint32_t msc_to_delay(xf86CrtcPtr crtc, uint64_t target)
180{
181	const DisplayModeRec *mode = &crtc->desiredMode;
182	const struct ust_msc *swap = sna_crtc_last_swap(crtc);
183	int64_t delay, subframe;
184
185	assert(mode->Clock);
186
187	delay = target - swap->msc;
188	assert(delay >= 0);
189	if (delay > 1) { /* try to use the hw vblank for the last frame */
190		delay--;
191		subframe = 0;
192	} else {
193		subframe = gettime_ust64() - swap_ust(swap);
194		subframe += 500;
195		subframe /= 1000;
196	}
197	delay *= mode->VTotal * mode->HTotal / mode->Clock;
198	if (subframe < delay)
199		delay -= subframe;
200	else
201		delay = 0;
202
203	DBG(("%s: sleep %d frames, %llu ms\n", __FUNCTION__,
204	     (int)(target - swap->msc), (long long)delay));
205	assert(delay >= 0);
206	return MIN(delay, INT32_MAX);
207}
208
209static void add_to_crtc_vblank(struct sna_present_event *info,
210				int delta)
211{
212	info->active = true;
213	if (delta == 1 && info->crtc) {
214		sna_crtc_set_vblank(info->crtc);
215		info->crtc = mark_crtc(info->crtc);
216	}
217}
218
219static CARD32 sna_fake_vblank_handler(OsTimerPtr timer, CARD32 now, void *data)
220{
221	struct sna_present_event *info = data;
222	union drm_wait_vblank vbl;
223	uint64_t msc, ust;
224
225	DBG(("%s(event=%lldx%d, now=%d)\n", __FUNCTION__, (long long)info->event_id[0], info->n_event_id, now));
226	assert(info->queued);
227
228	VG_CLEAR(vbl);
229	vbl.request.type = DRM_VBLANK_RELATIVE;
230	vbl.request.sequence = 0;
231	if (sna_wait_vblank(info->sna, &vbl, sna_crtc_pipe(info->crtc)) == 0) {
232		ust = ust64(vbl.reply.tval_sec, vbl.reply.tval_usec);
233		msc = sna_crtc_record_vblank(info->crtc, &vbl);
234		DBG(("%s: event=%lld, target msc=%lld, now %lld\n",
235		     __FUNCTION__, (long long)info->event_id[0], (long long)info->target_msc, (long long)msc));
236		if (msc_before(msc, info->target_msc)) {
237			int delta = info->target_msc - msc;
238			uint32_t delay;
239
240			DBG(("%s: too early, requeuing delta=%d\n", __FUNCTION__, delta));
241			assert(info->target_msc - msc < 1ull<<31);
242			if (delta <= 2) {
243				vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
244				vbl.request.sequence = info->target_msc;
245				vbl.request.signal = (uintptr_t)MARK_PRESENT(info);
246				if (sna_wait_vblank(info->sna, &vbl, sna_crtc_pipe(info->crtc)) == 0) {
247					DBG(("%s: scheduled new vblank event for %lld\n", __FUNCTION__, (long long)info->target_msc));
248					add_to_crtc_vblank(info, delta);
249					free(timer);
250					return 0;
251				}
252			}
253
254			delay = msc_to_delay(info->crtc, info->target_msc);
255			if (delay) {
256				DBG(("%s: requeueing timer for %dms delay\n", __FUNCTION__, delay));
257				return delay;
258			}
259
260			/* As a last resort use a blocking wait.
261			 * Less than a millisecond for (hopefully) a rare case.
262			 */
263			DBG(("%s: blocking wait!\n", __FUNCTION__));
264			vbl.request.type = DRM_VBLANK_ABSOLUTE;
265			vbl.request.sequence = info->target_msc;
266			if (sna_wait_vblank(info->sna, &vbl, sna_crtc_pipe(info->crtc)) == 0) {
267				ust = ust64(vbl.reply.tval_sec, vbl.reply.tval_usec);
268				msc = sna_crtc_record_vblank(info->crtc, &vbl);
269			} else {
270				DBG(("%s: blocking wait failed, fudging\n",
271				     __FUNCTION__));
272				goto fixup;
273			}
274		}
275	} else {
276fixup:
277		ust = gettime_ust64();
278		msc = info->target_msc;
279		DBG(("%s: event=%lld, CRTC OFF, target msc=%lld, was %lld (off)\n",
280		     __FUNCTION__, (long long)info->event_id[0], (long long)info->target_msc, (long long)sna_crtc_last_swap(info->crtc)->msc));
281	}
282
283	vblank_complete(info, ust, msc);
284	free(timer);
285	return 0;
286}
287
288static bool sna_fake_vblank(struct sna_present_event *info)
289{
290	const struct ust_msc *swap = sna_crtc_last_swap(info->crtc);
291	uint32_t delay;
292
293	if (msc_before(swap->msc, info->target_msc))
294		delay = msc_to_delay(info->crtc, info->target_msc);
295	else
296		delay = 0;
297
298	DBG(("%s(event=%lldx%d, target_msc=%lld, msc=%lld, delay=%ums)\n",
299	     __FUNCTION__, (long long)info->event_id[0], info->n_event_id,
300	     (long long)info->target_msc, (long long)swap->msc, delay));
301	if (delay == 0) {
302		uint64_t ust, msc;
303
304		if (msc_before(swap->msc, info->target_msc)) {
305			/* Fixup and pretend it completed immediately */
306			msc = info->target_msc;
307			ust = gettime_ust64();
308		} else {
309			msc = swap->msc;
310			ust = swap_ust(swap);
311		}
312
313		vblank_complete(info, ust, msc);
314		return true;
315	}
316
317	return TimerSet(NULL, 0, delay, sna_fake_vblank_handler, info);
318}
319
320static bool sna_present_queue(struct sna_present_event *info,
321			      uint64_t last_msc)
322{
323	union drm_wait_vblank vbl;
324	int delta = info->target_msc - last_msc;
325
326	DBG(("%s: target msc=%llu, seq=%u (last_msc=%llu), delta=%d\n",
327	     __FUNCTION__,
328	     (long long)info->target_msc,
329	     (unsigned)info->target_msc,
330	     (long long)last_msc,
331	     delta));
332	assert(info->target_msc - last_msc < 1ull<<31);
333	assert(delta >= 0);
334
335	VG_CLEAR(vbl);
336	vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
337	vbl.request.sequence = info->target_msc;
338	vbl.request.signal = (uintptr_t)MARK_PRESENT(info);
339	if (delta > 2 ||
340	    sna_wait_vblank(info->sna, &vbl, sna_crtc_pipe(info->crtc))) {
341		DBG(("%s: vblank enqueue failed, faking delta=%d\n", __FUNCTION__, delta));
342		if (!sna_fake_vblank(info))
343			return false;
344	} else {
345		add_to_crtc_vblank(info, delta);
346	}
347
348	info->queued = true;
349	return true;
350}
351
352static RRCrtcPtr
353sna_present_get_crtc(WindowPtr window)
354{
355	struct sna *sna = to_sna_from_drawable(&window->drawable);
356	BoxRec box;
357	xf86CrtcPtr crtc;
358
359	DBG(("%s: window=%ld (pixmap=%ld), box=(%d, %d)x(%d, %d)\n",
360	     __FUNCTION__, window->drawable.id, get_window_pixmap(window)->drawable.serialNumber,
361	     window->drawable.x, window->drawable.y,
362	     window->drawable.width, window->drawable.height));
363
364	box.x1 = window->drawable.x;
365	box.y1 = window->drawable.y;
366	box.x2 = box.x1 + window->drawable.width;
367	box.y2 = box.y1 + window->drawable.height;
368
369	crtc = sna_covering_crtc(sna, &box, NULL);
370	if (crtc)
371		return crtc->randr_crtc;
372
373	return NULL;
374}
375
376static void add_keepalive(struct sna *sna, xf86CrtcPtr crtc, uint64_t msc)
377{
378	struct list *q = sna_crtc_vblank_queue(crtc);
379	struct sna_present_event *info, *tmp;
380	union drm_wait_vblank vbl;
381
382	list_for_each_entry(tmp, q, link) {
383		if (tmp->target_msc == msc) {
384			DBG(("%s: vblank already queued for target_msc=%lld\n",
385			     __FUNCTION__, (long long)msc));
386			return;
387		}
388
389		if ((int64_t)(tmp->target_msc - msc) > 0)
390			break;
391	}
392
393	DBG(("%s: adding keepalive for target_msc=%lld\n",
394	     __FUNCTION__, (long long)msc));
395
396	info = info_alloc(sna);
397	if (!info)
398		return;
399
400	info->crtc = crtc;
401	info->sna = sna;
402	info->target_msc = msc;
403	info->event_id = (uint64_t *)(info + 1);
404	info->n_event_id = 0;
405
406	VG_CLEAR(vbl);
407	vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
408	vbl.request.sequence = msc;
409	vbl.request.signal = (uintptr_t)MARK_PRESENT(info);
410
411	if (sna_wait_vblank(info->sna, &vbl, sna_crtc_pipe(crtc)) == 0) {
412		list_add_tail(&info->link, &tmp->link);
413		add_to_crtc_vblank(info, 1);
414		info->queued = true;
415	} else
416		info_free(info);
417}
418
419static int
420sna_present_get_ust_msc(RRCrtcPtr crtc, CARD64 *ust, CARD64 *msc)
421{
422	struct sna *sna = to_sna_from_screen(crtc->pScreen);
423	union drm_wait_vblank vbl;
424
425	DBG(("%s(pipe=%d)\n", __FUNCTION__, sna_crtc_pipe(crtc->devPrivate)));
426	if (sna_crtc_has_vblank(crtc->devPrivate)) {
427		DBG(("%s: vblank active, reusing last swap msc/ust\n",
428		     __FUNCTION__));
429		goto last;
430	}
431
432	VG_CLEAR(vbl);
433	vbl.request.type = DRM_VBLANK_RELATIVE;
434	vbl.request.sequence = 0;
435	if (sna_wait_vblank(sna, &vbl, sna_crtc_pipe(crtc->devPrivate)) == 0) {
436		*ust = ust64(vbl.reply.tval_sec, vbl.reply.tval_usec);
437		*msc = sna_crtc_record_vblank(crtc->devPrivate, &vbl);
438
439		add_keepalive(sna, crtc->devPrivate, *msc + 1);
440	} else {
441		const struct ust_msc *swap;
442last:
443		swap = sna_crtc_last_swap(crtc->devPrivate);
444		*ust = swap_ust(swap);
445		*msc = swap->msc;
446	}
447
448	DBG(("%s: pipe=%d, tv=%d.%06d seq=%d msc=%lld\n", __FUNCTION__,
449	     sna_crtc_pipe(crtc->devPrivate),
450	     (int)(*ust / 1000000), (int)(*ust % 1000000),
451	     vbl.reply.sequence, (long long)*msc));
452
453	return Success;
454}
455
456void
457sna_present_vblank_handler(struct drm_event_vblank *event)
458{
459	struct sna_present_event *info = to_present_event(event->user_data);
460	uint64_t msc;
461
462	if (!info->active) {
463		DBG(("%s: arrived unexpectedly early (not active)\n", __FUNCTION__));
464		assert(!has_vblank(info->crtc));
465		return;
466	}
467
468	if (has_vblank(info->crtc)) {
469		DBG(("%s: clearing immediate flag\n", __FUNCTION__));
470		info->crtc = unmask_crtc(info->crtc);
471		sna_crtc_clear_vblank(info->crtc);
472	}
473
474	msc = sna_crtc_record_event(info->crtc, event);
475
476	vblank_complete(info, ust64(event->tv_sec, event->tv_usec), msc);
477}
478
479static int
480sna_present_queue_vblank(RRCrtcPtr crtc, uint64_t event_id, uint64_t msc)
481{
482	struct sna *sna = to_sna_from_screen(crtc->pScreen);
483	struct sna_present_event *info, *tmp;
484	const struct ust_msc *swap;
485	struct list *q;
486
487	if (!sna_crtc_is_on(crtc->devPrivate))
488		return BadAlloc;
489
490	swap = sna_crtc_last_swap(crtc->devPrivate);
491	DBG(("%s(pipe=%d, event=%lld, msc=%lld, last swap=%lld)\n",
492	     __FUNCTION__, sna_crtc_pipe(crtc->devPrivate),
493	     (long long)event_id, (long long)msc, (long long)swap->msc));
494
495	if (warn_unless((int64_t)(msc - swap->msc) >= 0)) {
496		DBG(("%s: pipe=%d tv=%d.%06d msc=%lld (target=%lld), event=%lld complete\n", __FUNCTION__,
497		     sna_crtc_pipe(crtc->devPrivate),
498		     swap->tv_sec, swap->tv_usec,
499		     (long long)swap->msc, (long long)msc,
500		     (long long)event_id));
501		present_event_notify(event_id, swap_ust(swap), swap->msc);
502		return Success;
503	}
504	if (warn_unless(msc - swap->msc < 1ull<<31))
505		return BadValue;
506
507	q = sna_crtc_vblank_queue(crtc->devPrivate);
508	list_for_each_entry(tmp, q, link) {
509		if (tmp->target_msc == msc) {
510			uint64_t *events = tmp->event_id;
511
512			if (tmp->n_event_id &&
513			    is_power_of_two(tmp->n_event_id)) {
514				events = malloc(2*sizeof(uint64_t)*tmp->n_event_id);
515				if (events == NULL)
516					return BadAlloc;
517
518				memcpy(events,
519				       tmp->event_id,
520				       tmp->n_event_id*sizeof(uint64_t));
521				if (tmp->n_event_id != 1)
522					free(tmp->event_id);
523				tmp->event_id = events;
524			}
525
526			DBG(("%s: appending event=%lld to vblank %lld x %d\n",
527			     __FUNCTION__, (long long)event_id, (long long)msc, tmp->n_event_id+1));
528			events[tmp->n_event_id++] = event_id;
529			return Success;
530		}
531		if ((int64_t)(tmp->target_msc - msc) > 0) {
532			DBG(("%s: previous target_msc=%lld invalid for coalescing\n",
533			     __FUNCTION__, (long long)tmp->target_msc));
534			break;
535		}
536	}
537
538	info = info_alloc(sna);
539	if (info == NULL)
540		return BadAlloc;
541
542	info->crtc = crtc->devPrivate;
543	info->sna = sna;
544	info->target_msc = msc;
545	info->event_id = (uint64_t *)(info + 1);
546	info->event_id[0] = event_id;
547	info->n_event_id = 1;
548	list_add_tail(&info->link, &tmp->link);
549	info->queued = false;
550	info->active = false;
551
552	if (info->link.prev == q && !sna_present_queue(info, swap->msc)) {
553		list_del(&info->link);
554		info_free(info);
555		return BadAlloc;
556	}
557
558	return Success;
559}
560
561static void
562sna_present_abort_vblank(RRCrtcPtr crtc, uint64_t event_id, uint64_t msc)
563{
564	DBG(("%s(pipe=%d, event=%lld, msc=%lld)\n",
565	     __FUNCTION__, pipe_from_crtc(crtc),
566	     (long long)event_id, (long long)msc));
567}
568
569static void
570sna_present_flush(WindowPtr window)
571{
572}
573
574static bool
575check_flip__crtc(struct sna *sna,
576		 RRCrtcPtr crtc)
577{
578	if (!sna_crtc_is_on(crtc->devPrivate)) {
579		DBG(("%s: CRTC off\n", __FUNCTION__));
580		return false;
581	}
582
583	assert(sna->scrn->vtSema);
584
585	if (!sna->mode.front_active) {
586		DBG(("%s: DPMS off, no flips\n", __FUNCTION__));
587		return FALSE;
588	}
589
590	if (sna->mode.rr_active) {
591		DBG(("%s: RandR transformation active\n", __FUNCTION__));
592		return false;
593	}
594
595	return true;
596}
597
598static Bool
599sna_present_check_flip(RRCrtcPtr crtc,
600		       WindowPtr window,
601		       PixmapPtr pixmap,
602		       Bool sync_flip)
603{
604	struct sna *sna = to_sna_from_pixmap(pixmap);
605	struct sna_pixmap *flip;
606
607	DBG(("%s(pipe=%d, pixmap=%ld, sync_flip=%d)\n",
608	     __FUNCTION__,
609	     pipe_from_crtc(crtc),
610	     pixmap->drawable.serialNumber,
611	     sync_flip));
612
613	if (!sna->scrn->vtSema) {
614		DBG(("%s: VT switched away, no flips\n", __FUNCTION__));
615		return FALSE;
616	}
617
618	if (sna->flags & SNA_NO_FLIP) {
619		DBG(("%s: flips not suported\n", __FUNCTION__));
620		return FALSE;
621	}
622
623	if (sync_flip) {
624		if ((sna->flags & SNA_HAS_FLIP) == 0) {
625			DBG(("%s: sync flips not suported\n", __FUNCTION__));
626			return FALSE;
627		}
628	} else {
629		if ((sna->flags & SNA_HAS_ASYNC_FLIP) == 0) {
630			DBG(("%s: async flips not suported\n", __FUNCTION__));
631			return FALSE;
632		}
633	}
634
635	if (!check_flip__crtc(sna, crtc)) {
636		DBG(("%s: flip invalid for CRTC\n", __FUNCTION__));
637		return FALSE;
638	}
639
640	flip = sna_pixmap(pixmap);
641	if (flip == NULL) {
642		DBG(("%s: unattached pixmap\n", __FUNCTION__));
643		return FALSE;
644	}
645
646	if (flip->cpu_bo && IS_STATIC_PTR(flip->ptr)) {
647		DBG(("%s: SHM pixmap\n", __FUNCTION__));
648		return FALSE;
649	}
650
651	if (flip->pinned) {
652		assert(flip->gpu_bo);
653		if (sna->flags & SNA_LINEAR_FB) {
654			if (flip->gpu_bo->tiling != I915_TILING_NONE) {
655				DBG(("%s: pined bo, tilng=%d needs NONE\n",
656				     __FUNCTION__, flip->gpu_bo->tiling));
657				return FALSE;
658			}
659		} else {
660			if (!sna->kgem.can_scanout_y &&
661			    flip->gpu_bo->tiling == I915_TILING_Y) {
662				DBG(("%s: pined bo, tilng=%d and can't scanout Y\n",
663				     __FUNCTION__, flip->gpu_bo->tiling));
664				return FALSE;
665			}
666		}
667
668		if (flip->gpu_bo->pitch & 63) {
669			DBG(("%s: pined bo, bad pitch=%d\n",
670			     __FUNCTION__, flip->gpu_bo->pitch));
671			return FALSE;
672		}
673	}
674
675	return TRUE;
676}
677
678static Bool
679flip__async(struct sna *sna,
680	    RRCrtcPtr crtc,
681	    uint64_t event_id,
682	    uint64_t target_msc,
683	    struct kgem_bo *bo)
684{
685	DBG(("%s(pipe=%d, event=%lld, handle=%d)\n",
686	     __FUNCTION__,
687	     pipe_from_crtc(crtc),
688	     (long long)event_id,
689	     bo->handle));
690
691	if (!sna_page_flip(sna, bo, NULL, NULL)) {
692		DBG(("%s: async pageflip failed\n", __FUNCTION__));
693		present_info.capabilities &= ~PresentCapabilityAsync;
694		return FALSE;
695	}
696
697	DBG(("%s: pipe=%d tv=%ld.%06d msc=%lld (target=%lld), event=%lld complete\n", __FUNCTION__,
698	     pipe_from_crtc(crtc),
699	     (long)(gettime_ust64() / 1000000), (int)(gettime_ust64() % 1000000),
700	     crtc ? (long long)sna_crtc_last_swap(crtc->devPrivate)->msc : 0LL,
701	     (long long)target_msc, (long long)event_id));
702	present_event_notify(event_id, gettime_ust64(), target_msc);
703	return TRUE;
704}
705
706static void
707present_flip_handler(struct drm_event_vblank *event, void *data)
708{
709	struct sna_present_event *info = data;
710	struct ust_msc swap;
711
712	DBG(("%s(sequence=%d): event=%lld\n", __FUNCTION__, event->sequence, (long long)info->event_id[0]));
713	assert(info->n_event_id == 1);
714	if (!info->active) {
715		DBG(("%s: arrived unexpectedly early (not active)\n", __FUNCTION__));
716		return;
717	}
718
719	if (info->crtc == NULL) {
720		swap.tv_sec = event->tv_sec;
721		swap.tv_usec = event->tv_usec;
722		swap.msc = event->sequence;
723	} else {
724		info->crtc = unmask_crtc(info->crtc);
725		swap = *sna_crtc_last_swap(info->crtc);
726	}
727
728	DBG(("%s: pipe=%d, tv=%d.%06d msc=%lld (target %lld), event=%lld complete%s\n", __FUNCTION__,
729	     info->crtc ? sna_crtc_pipe(info->crtc) : -1,
730	     swap.tv_sec, swap.tv_usec, (long long)swap.msc,
731	     (long long)info->target_msc,
732	     (long long)info->event_id[0],
733	     info->target_msc && info->target_msc == swap.msc ? "" : ": MISS"));
734	present_event_notify(info->event_id[0], swap_ust(&swap), swap.msc);
735	if (info->crtc) {
736		sna_crtc_clear_vblank(info->crtc);
737		if (!sna_crtc_has_vblank(info->crtc))
738			add_keepalive(info->sna, info->crtc, swap.msc + 1);
739	}
740
741	if (info->sna->present.unflip) {
742		DBG(("%s: executing queued unflip (event=%lld)\n", __FUNCTION__, (long long)info->sna->present.unflip));
743		sna_present_unflip(xf86ScrnToScreen(info->sna->scrn),
744				   info->sna->present.unflip);
745		info->sna->present.unflip = 0;
746	}
747	info_free(info);
748}
749
750static Bool
751flip(struct sna *sna,
752     RRCrtcPtr crtc,
753     uint64_t event_id,
754     uint64_t target_msc,
755     struct kgem_bo *bo)
756{
757	struct sna_present_event *info;
758
759	DBG(("%s(pipe=%d, event=%lld, handle=%d)\n",
760	     __FUNCTION__,
761	     pipe_from_crtc(crtc),
762	     (long long)event_id,
763	     bo->handle));
764
765	info = info_alloc(sna);
766	if (info == NULL)
767		return FALSE;
768
769	info->crtc = crtc ? crtc->devPrivate : NULL;
770	info->sna = sna;
771	info->event_id = (uint64_t *)(info + 1);
772	info->event_id[0] = event_id;
773	info->n_event_id = 1;
774	info->target_msc = target_msc;
775	info->active = false;
776
777	if (!sna_page_flip(sna, bo, present_flip_handler, info)) {
778		DBG(("%s: pageflip failed\n", __FUNCTION__));
779		info_free(info);
780		return FALSE;
781	}
782
783	add_to_crtc_vblank(info, 1);
784	return TRUE;
785}
786
787static struct kgem_bo *
788get_flip_bo(PixmapPtr pixmap)
789{
790	struct sna *sna = to_sna_from_pixmap(pixmap);
791	struct sna_pixmap *priv;
792
793	DBG(("%s(pixmap=%ld)\n", __FUNCTION__, pixmap->drawable.serialNumber));
794
795	priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ | __MOVE_SCANOUT | __MOVE_FORCE);
796	if (priv == NULL) {
797		DBG(("%s: cannot force pixmap to the GPU\n", __FUNCTION__));
798		return NULL;
799	}
800
801	if (priv->gpu_bo->scanout)
802		return priv->gpu_bo;
803
804	if (sna->kgem.has_llc && !wedged(sna) && !priv->pinned) {
805		struct kgem_bo *bo;
806		uint32_t tiling;
807
808		tiling = I915_TILING_NONE;
809		if ((sna->flags & SNA_LINEAR_FB) == 0)
810			tiling = I915_TILING_X;
811
812		bo = kgem_create_2d(&sna->kgem,
813				    pixmap->drawable.width,
814				    pixmap->drawable.height,
815				    pixmap->drawable.bitsPerPixel,
816				    tiling, CREATE_SCANOUT | CREATE_CACHED);
817		if (bo) {
818			BoxRec box;
819
820			box.x1 = box.y1 = 0;
821			box.x2 = pixmap->drawable.width;
822			box.y2 = pixmap->drawable.height;
823
824			if (sna->render.copy_boxes(sna, GXcopy,
825						   &pixmap->drawable, priv->gpu_bo, 0, 0,
826						   &pixmap->drawable, bo, 0, 0,
827						   &box, 1, 0)) {
828				sna_pixmap_unmap(pixmap, priv);
829				kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
830
831				priv->gpu_bo = bo;
832			} else
833				kgem_bo_destroy(&sna->kgem, bo);
834		}
835	}
836
837	if (sna->flags & SNA_LINEAR_FB &&
838	    priv->gpu_bo->tiling &&
839	    !sna_pixmap_change_tiling(pixmap, I915_TILING_NONE)) {
840		DBG(("%s: invalid tiling for scanout, user requires linear\n", __FUNCTION__));
841		return NULL;
842	}
843
844	if (priv->gpu_bo->tiling == I915_TILING_Y &&
845	    !sna->kgem.can_scanout_y &&
846	    !sna_pixmap_change_tiling(pixmap, I915_TILING_X)) {
847		DBG(("%s: invalid Y-tiling, cannot convert\n", __FUNCTION__));
848		return NULL;
849	}
850
851	if (priv->gpu_bo->pitch & 63) {
852		DBG(("%s: invalid pitch, no conversion\n", __FUNCTION__));
853		return NULL;
854	}
855
856	return priv->gpu_bo;
857}
858
859static Bool
860sna_present_flip(RRCrtcPtr crtc,
861		 uint64_t event_id,
862		 uint64_t target_msc,
863		 PixmapPtr pixmap,
864		 Bool sync_flip)
865{
866	struct sna *sna = to_sna_from_pixmap(pixmap);
867	struct kgem_bo *bo;
868
869	DBG(("%s(pipe=%d, event=%lld, msc=%lld, pixmap=%ld, sync?=%d)\n",
870	     __FUNCTION__,
871	     pipe_from_crtc(crtc),
872	     (long long)event_id,
873	     (long long)target_msc,
874	     pixmap->drawable.serialNumber, sync_flip));
875
876	if (!check_flip__crtc(sna, crtc)) {
877		DBG(("%s: flip invalid for CRTC\n", __FUNCTION__));
878		return FALSE;
879	}
880
881	assert(sna->present.unflip == 0);
882
883	if (sna->flags & SNA_TEAR_FREE) {
884		DBG(("%s: disabling TearFree (was %s) in favour of Present flips\n",
885		     __FUNCTION__, sna->mode.shadow_enabled ? "enabled" : "disabled"));
886		sna->mode.shadow_enabled = false;
887	}
888	assert(!sna->mode.shadow_enabled);
889
890	if (sna->mode.flip_active) {
891		struct pollfd pfd;
892
893		DBG(("%s: flips still pending, stalling\n", __FUNCTION__));
894		pfd.fd = sna->kgem.fd;
895		pfd.events = POLLIN;
896		while (poll(&pfd, 1, 0) == 1)
897			sna_mode_wakeup(sna);
898		if (sna->mode.flip_active)
899			return FALSE;
900	}
901
902	bo = get_flip_bo(pixmap);
903	if (bo == NULL) {
904		DBG(("%s: flip invalid bo\n", __FUNCTION__));
905		return FALSE;
906	}
907
908	if (sync_flip)
909		return flip(sna, crtc, event_id, target_msc, bo);
910	else
911		return flip__async(sna, crtc, event_id, target_msc, bo);
912}
913
914static void
915sna_present_unflip(ScreenPtr screen, uint64_t event_id)
916{
917	struct sna *sna = to_sna_from_screen(screen);
918	struct kgem_bo *bo;
919
920	DBG(("%s(event=%lld)\n", __FUNCTION__, (long long)event_id));
921	if (sna->mode.front_active == 0 || sna->mode.rr_active) {
922		const struct ust_msc *swap;
923
924		DBG(("%s: no CRTC active, perform no-op flip\n", __FUNCTION__));
925
926notify:
927		swap = sna_crtc_last_swap(sna_primary_crtc(sna));
928		DBG(("%s: pipe=%d, tv=%d.%06d msc=%lld, event=%lld complete\n", __FUNCTION__,
929		     -1,
930		     swap->tv_sec, swap->tv_usec, (long long)swap->msc,
931		     (long long)event_id));
932		present_event_notify(event_id, swap_ust(swap), swap->msc);
933		return;
934	}
935
936	if (sna->mode.flip_active) {
937		DBG(("%s: %d outstanding flips, queueing unflip\n", __FUNCTION__, sna->mode.flip_active));
938		assert(sna->present.unflip == 0);
939		sna->present.unflip = event_id;
940		return;
941	}
942
943	bo = get_flip_bo(screen->GetScreenPixmap(screen));
944
945	/* Are we unflipping after a failure that left our ScreenP in place? */
946	if (!sna_needs_page_flip(sna, bo))
947		goto notify;
948
949	assert(!sna->mode.shadow_enabled);
950	if (sna->flags & SNA_TEAR_FREE) {
951		DBG(("%s: %s TearFree after Present flips\n",
952		     __FUNCTION__, sna->mode.shadow_damage != NULL ? "enabling" : "disabling"));
953		sna->mode.shadow_enabled = sna->mode.shadow_damage != NULL;
954	}
955
956	if (bo == NULL) {
957reset_mode:
958		DBG(("%s: failed, trying to restore original mode\n", __FUNCTION__));
959		xf86SetDesiredModes(sna->scrn);
960		goto notify;
961	}
962
963	assert(sna_pixmap(screen->GetScreenPixmap(screen))->pinned & PIN_SCANOUT);
964
965	if (sna->flags & SNA_HAS_ASYNC_FLIP) {
966		DBG(("%s: trying async flip restore\n", __FUNCTION__));
967		if (flip__async(sna, NULL, event_id, 0, bo))
968			return;
969	}
970
971	if (!flip(sna, NULL, event_id, 0, bo))
972		goto reset_mode;
973}
974
975void sna_present_cancel_flip(struct sna *sna)
976{
977	if (sna->present.unflip) {
978		const struct ust_msc *swap;
979
980		swap = sna_crtc_last_swap(sna_primary_crtc(sna));
981		present_event_notify(sna->present.unflip,
982				     swap_ust(swap), swap->msc);
983
984		sna->present.unflip = 0;
985	}
986}
987
988static present_screen_info_rec present_info = {
989	.version = PRESENT_SCREEN_INFO_VERSION,
990
991	.get_crtc = sna_present_get_crtc,
992	.get_ust_msc = sna_present_get_ust_msc,
993	.queue_vblank = sna_present_queue_vblank,
994	.abort_vblank = sna_present_abort_vblank,
995	.flush = sna_present_flush,
996
997	.capabilities = PresentCapabilityNone,
998	.check_flip = sna_present_check_flip,
999	.flip = sna_present_flip,
1000	.unflip = sna_present_unflip,
1001};
1002
1003bool sna_present_open(struct sna *sna, ScreenPtr screen)
1004{
1005	DBG(("%s(num_crtc=%d)\n", __FUNCTION__, sna->mode.num_real_crtc));
1006
1007	if (sna->mode.num_real_crtc == 0)
1008		return false;
1009
1010	sna_present_update(sna);
1011	list_init(&sna->present.vblank_queue);
1012
1013	return present_screen_init(screen, &present_info);
1014}
1015
1016void sna_present_update(struct sna *sna)
1017{
1018	if (sna->flags & SNA_HAS_ASYNC_FLIP)
1019		present_info.capabilities |= PresentCapabilityAsync;
1020	else
1021		present_info.capabilities &= ~PresentCapabilityAsync;
1022
1023	DBG(("%s: has_async_flip? %d\n", __FUNCTION__,
1024	     !!(present_info.capabilities & PresentCapabilityAsync)));
1025}
1026
1027void sna_present_close(struct sna *sna, ScreenPtr screen)
1028{
1029	DBG(("%s()\n", __FUNCTION__));
1030}
1031