amdgpu_dri2.c revision 504d986f
1d6c0b56eSmrg/*
2d6c0b56eSmrg * Copyright 2008 Kristian Høgsberg
3d6c0b56eSmrg * Copyright 2008 Jérôme Glisse
4d6c0b56eSmrg *
5d6c0b56eSmrg * All Rights Reserved.
6d6c0b56eSmrg *
7d6c0b56eSmrg * Permission is hereby granted, free of charge, to any person obtaining
8d6c0b56eSmrg * a copy of this software and associated documentation files (the
9d6c0b56eSmrg * "Software"), to deal in the Software without restriction, including
10d6c0b56eSmrg * without limitation on the rights to use, copy, modify, merge,
11d6c0b56eSmrg * publish, distribute, sublicense, and/or sell copies of the Software,
12d6c0b56eSmrg * and to permit persons to whom the Software is furnished to do so,
13d6c0b56eSmrg * subject to the following conditions:
14d6c0b56eSmrg *
15d6c0b56eSmrg * The above copyright notice and this permission notice (including the
16d6c0b56eSmrg * next paragraph) shall be included in all copies or substantial
17d6c0b56eSmrg * portions of the Software.
18d6c0b56eSmrg *
19d6c0b56eSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20d6c0b56eSmrg * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21d6c0b56eSmrg * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22d6c0b56eSmrg * NON-INFRINGEMENT.  IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR
23d6c0b56eSmrg * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24d6c0b56eSmrg * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25d6c0b56eSmrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26d6c0b56eSmrg * DEALINGS IN THE SOFTWARE.
27d6c0b56eSmrg */
28d6c0b56eSmrg#ifdef HAVE_CONFIG_H
29d6c0b56eSmrg#include "config.h"
30d6c0b56eSmrg#endif
31d6c0b56eSmrg
32d6c0b56eSmrg#include "amdgpu_drv.h"
33d6c0b56eSmrg#include "amdgpu_dri2.h"
34d6c0b56eSmrg#include "amdgpu_glamor.h"
35d6c0b56eSmrg#include "amdgpu_video.h"
36d6c0b56eSmrg#include "amdgpu_pixmap.h"
37d6c0b56eSmrg
38d6c0b56eSmrg#ifdef DRI2
39d6c0b56eSmrg
40d6c0b56eSmrg#include <sys/ioctl.h>
41d6c0b56eSmrg#include <sys/types.h>
42d6c0b56eSmrg#include <sys/stat.h>
43d6c0b56eSmrg#include <fcntl.h>
44d6c0b56eSmrg#include <errno.h>
45d6c0b56eSmrg
46d6c0b56eSmrg#include <gbm.h>
47d6c0b56eSmrg
48d6c0b56eSmrg#include "amdgpu_bo_helper.h"
49d6c0b56eSmrg#include "amdgpu_version.h"
50d6c0b56eSmrg
51d6c0b56eSmrg#include "amdgpu_list.h"
52d6c0b56eSmrg
53d6c0b56eSmrg#include <xf86Priv.h>
54d6c0b56eSmrg
55d6c0b56eSmrg#if DRI2INFOREC_VERSION >= 9
56d6c0b56eSmrg#define USE_DRI2_PRIME
57d6c0b56eSmrg#endif
58d6c0b56eSmrg
59d6c0b56eSmrg#define FALLBACK_SWAP_DELAY 16
60d6c0b56eSmrg
61d6c0b56eSmrgtypedef DRI2BufferPtr BufferPtr;
62d6c0b56eSmrg
63d6c0b56eSmrgstruct dri2_buffer_priv {
64d6c0b56eSmrg	PixmapPtr pixmap;
65d6c0b56eSmrg	unsigned int attachment;
66d6c0b56eSmrg	unsigned int refcnt;
67d6c0b56eSmrg};
68d6c0b56eSmrg
69d6c0b56eSmrgstruct dri2_window_priv {
70d6c0b56eSmrg	xf86CrtcPtr crtc;
71d6c0b56eSmrg	int vblank_delta;
72d6c0b56eSmrg};
73d6c0b56eSmrg
74d6c0b56eSmrgstatic DevPrivateKeyRec dri2_window_private_key_rec;
75d6c0b56eSmrg#define dri2_window_private_key (&dri2_window_private_key_rec)
76d6c0b56eSmrg
77d6c0b56eSmrg#define get_dri2_window_priv(window) \
78d6c0b56eSmrg	((struct dri2_window_priv*) \
79d6c0b56eSmrg	 dixLookupPrivate(&(window)->devPrivates, dri2_window_private_key))
80d6c0b56eSmrg
81d6c0b56eSmrg/* Get GEM flink name for a pixmap */
82d6c0b56eSmrgstatic Bool
83d6c0b56eSmrgamdgpu_get_flink_name(AMDGPUEntPtr pAMDGPUEnt, PixmapPtr pixmap, uint32_t *name)
84d6c0b56eSmrg{
85d6c0b56eSmrg	struct amdgpu_buffer *bo = amdgpu_get_pixmap_bo(pixmap);
86d6c0b56eSmrg	struct drm_gem_flink flink;
87d6c0b56eSmrg
88d6c0b56eSmrg	if (bo && !(bo->flags & AMDGPU_BO_FLAGS_GBM) &&
89d6c0b56eSmrg	    amdgpu_bo_export(bo->bo.amdgpu,
90d6c0b56eSmrg			     amdgpu_bo_handle_type_gem_flink_name,
91d6c0b56eSmrg			     name) == 0)
92d6c0b56eSmrg		return TRUE;
93d6c0b56eSmrg
94d6c0b56eSmrg	if (!amdgpu_pixmap_get_handle(pixmap, &flink.handle) ||
95d6c0b56eSmrg	    ioctl(pAMDGPUEnt->fd, DRM_IOCTL_GEM_FLINK, &flink) < 0)
96d6c0b56eSmrg		return FALSE;
97d6c0b56eSmrg	*name = flink.name;
98d6c0b56eSmrg	return TRUE;
99d6c0b56eSmrg}
100d6c0b56eSmrg
101d6c0b56eSmrgstatic BufferPtr
102d6c0b56eSmrgamdgpu_dri2_create_buffer2(ScreenPtr pScreen,
103d6c0b56eSmrg			   DrawablePtr drawable,
104d6c0b56eSmrg			   unsigned int attachment, unsigned int format)
105d6c0b56eSmrg{
106d6c0b56eSmrg	ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
107d6c0b56eSmrg	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn);
108d6c0b56eSmrg	AMDGPUInfoPtr info = AMDGPUPTR(pScrn);
109d6c0b56eSmrg	BufferPtr buffers;
110d6c0b56eSmrg	struct dri2_buffer_priv *privates;
111d6c0b56eSmrg	PixmapPtr pixmap;
112d6c0b56eSmrg	unsigned front_width;
113d6c0b56eSmrg	unsigned aligned_width = drawable->width;
114d6c0b56eSmrg	unsigned height = drawable->height;
115d6c0b56eSmrg	Bool is_glamor_pixmap = FALSE;
116d6c0b56eSmrg	int depth;
117d6c0b56eSmrg	int cpp;
118d6c0b56eSmrg
119d6c0b56eSmrg	if (format) {
120d6c0b56eSmrg		depth = format;
121d6c0b56eSmrg
122d6c0b56eSmrg		switch (depth) {
123d6c0b56eSmrg		case 15:
124d6c0b56eSmrg			cpp = 2;
125d6c0b56eSmrg			break;
126d6c0b56eSmrg		case 24:
127d6c0b56eSmrg			cpp = 4;
128d6c0b56eSmrg			break;
129d6c0b56eSmrg		default:
130d6c0b56eSmrg			cpp = depth / 8;
131d6c0b56eSmrg		}
132d6c0b56eSmrg	} else {
133d6c0b56eSmrg		depth = drawable->depth;
134d6c0b56eSmrg		cpp = drawable->bitsPerPixel / 8;
135d6c0b56eSmrg	}
136d6c0b56eSmrg
137d6c0b56eSmrg	front_width = pScreen->GetScreenPixmap(pScreen)->drawable.width;
138d6c0b56eSmrg
139d6c0b56eSmrg	pixmap = NULL;
140d6c0b56eSmrg
141d6c0b56eSmrg	if (attachment == DRI2BufferFrontLeft) {
142d6c0b56eSmrg		uint32_t handle;
143d6c0b56eSmrg
144d6c0b56eSmrg		pixmap = get_drawable_pixmap(drawable);
145d6c0b56eSmrg		if (pScreen != pixmap->drawable.pScreen)
146d6c0b56eSmrg			pixmap = NULL;
147d6c0b56eSmrg		else if (info->use_glamor && !amdgpu_pixmap_get_handle(pixmap, &handle)) {
148d6c0b56eSmrg			is_glamor_pixmap = TRUE;
149d6c0b56eSmrg			aligned_width = pixmap->drawable.width;
150d6c0b56eSmrg			height = pixmap->drawable.height;
151d6c0b56eSmrg			pixmap = NULL;
152d6c0b56eSmrg		} else
153d6c0b56eSmrg			pixmap->refcnt++;
154d6c0b56eSmrg	}
155d6c0b56eSmrg
156d6c0b56eSmrg	if (!pixmap && (is_glamor_pixmap || attachment != DRI2BufferFrontLeft)) {
157d6c0b56eSmrg		if (aligned_width == front_width)
158d6c0b56eSmrg			aligned_width = pScrn->virtualX;
159d6c0b56eSmrg
160d6c0b56eSmrg		pixmap = (*pScreen->CreatePixmap) (pScreen,
161d6c0b56eSmrg						   aligned_width,
162d6c0b56eSmrg						   height,
163d6c0b56eSmrg						   depth,
164d6c0b56eSmrg						   AMDGPU_CREATE_PIXMAP_DRI2);
165d6c0b56eSmrg	}
166d6c0b56eSmrg
167d6c0b56eSmrg	buffers = calloc(1, sizeof *buffers);
168d6c0b56eSmrg	if (buffers == NULL)
169d6c0b56eSmrg		goto error;
170d6c0b56eSmrg
171d6c0b56eSmrg	if (pixmap) {
172504d986fSmrg		if (is_glamor_pixmap) {
173504d986fSmrg			pixmap = amdgpu_glamor_set_pixmap_bo(drawable, pixmap);
174504d986fSmrg			pixmap->refcnt++;
175504d986fSmrg		}
176d6c0b56eSmrg
177d6c0b56eSmrg		if (!amdgpu_get_flink_name(pAMDGPUEnt, pixmap, &buffers->name))
178d6c0b56eSmrg			goto error;
179d6c0b56eSmrg	}
180d6c0b56eSmrg
181d6c0b56eSmrg	privates = calloc(1, sizeof(struct dri2_buffer_priv));
182d6c0b56eSmrg	if (privates == NULL)
183d6c0b56eSmrg		goto error;
184d6c0b56eSmrg
185d6c0b56eSmrg	buffers->attachment = attachment;
186d6c0b56eSmrg	if (pixmap) {
187d6c0b56eSmrg		buffers->pitch = pixmap->devKind;
188d6c0b56eSmrg		buffers->cpp = cpp;
189d6c0b56eSmrg	}
190d6c0b56eSmrg	buffers->driverPrivate = privates;
191d6c0b56eSmrg	buffers->format = format;
192d6c0b56eSmrg	buffers->flags = 0;	/* not tiled */
193d6c0b56eSmrg	privates->pixmap = pixmap;
194d6c0b56eSmrg	privates->attachment = attachment;
195d6c0b56eSmrg	privates->refcnt = 1;
196d6c0b56eSmrg
197d6c0b56eSmrg	return buffers;
198d6c0b56eSmrg
199d6c0b56eSmrgerror:
200d6c0b56eSmrg	free(buffers);
201d6c0b56eSmrg	if (pixmap)
202d6c0b56eSmrg		(*pScreen->DestroyPixmap) (pixmap);
203d6c0b56eSmrg	return NULL;
204d6c0b56eSmrg}
205d6c0b56eSmrg
206d6c0b56eSmrgDRI2BufferPtr
207d6c0b56eSmrgamdgpu_dri2_create_buffer(DrawablePtr pDraw, unsigned int attachment,
208d6c0b56eSmrg			  unsigned int format)
209d6c0b56eSmrg{
210d6c0b56eSmrg	return amdgpu_dri2_create_buffer2(pDraw->pScreen, pDraw,
211d6c0b56eSmrg					  attachment, format);
212d6c0b56eSmrg}
213d6c0b56eSmrg
214d6c0b56eSmrgstatic void
215d6c0b56eSmrgamdgpu_dri2_destroy_buffer2(ScreenPtr pScreen,
216d6c0b56eSmrg			    DrawablePtr drawable, BufferPtr buffers)
217d6c0b56eSmrg{
218d6c0b56eSmrg	if (buffers) {
219d6c0b56eSmrg		struct dri2_buffer_priv *private = buffers->driverPrivate;
220d6c0b56eSmrg
221d6c0b56eSmrg		/* Trying to free an already freed buffer is unlikely to end well */
222d6c0b56eSmrg		if (private->refcnt == 0) {
223d6c0b56eSmrg			ScrnInfoPtr scrn = xf86ScreenToScrn(pScreen);
224d6c0b56eSmrg
225d6c0b56eSmrg			xf86DrvMsg(scrn->scrnIndex, X_WARNING,
226d6c0b56eSmrg				   "Attempted to destroy previously destroyed buffer.\
227d6c0b56eSmrg This is a programming error\n");
228d6c0b56eSmrg			return;
229d6c0b56eSmrg		}
230d6c0b56eSmrg
231d6c0b56eSmrg		private->refcnt--;
232d6c0b56eSmrg		if (private->refcnt == 0) {
233d6c0b56eSmrg			if (private->pixmap)
234d6c0b56eSmrg				(*pScreen->DestroyPixmap) (private->pixmap);
235d6c0b56eSmrg
236d6c0b56eSmrg			free(buffers->driverPrivate);
237d6c0b56eSmrg			free(buffers);
238d6c0b56eSmrg		}
239d6c0b56eSmrg	}
240d6c0b56eSmrg}
241d6c0b56eSmrg
242d6c0b56eSmrgvoid amdgpu_dri2_destroy_buffer(DrawablePtr pDraw, DRI2BufferPtr buf)
243d6c0b56eSmrg{
244d6c0b56eSmrg	amdgpu_dri2_destroy_buffer2(pDraw->pScreen, pDraw, buf);
245d6c0b56eSmrg}
246d6c0b56eSmrg
247d6c0b56eSmrgstatic inline PixmapPtr GetDrawablePixmap(DrawablePtr drawable)
248d6c0b56eSmrg{
249d6c0b56eSmrg	if (drawable->type == DRAWABLE_PIXMAP)
250d6c0b56eSmrg		return (PixmapPtr) drawable;
251d6c0b56eSmrg	else {
252d6c0b56eSmrg		struct _Window *pWin = (struct _Window *)drawable;
253d6c0b56eSmrg		return drawable->pScreen->GetWindowPixmap(pWin);
254d6c0b56eSmrg	}
255d6c0b56eSmrg}
256d6c0b56eSmrg
257d6c0b56eSmrgstatic void
258d6c0b56eSmrgamdgpu_dri2_copy_region2(ScreenPtr pScreen,
259d6c0b56eSmrg			 DrawablePtr drawable,
260d6c0b56eSmrg			 RegionPtr region,
261d6c0b56eSmrg			 BufferPtr dest_buffer, BufferPtr src_buffer)
262d6c0b56eSmrg{
263d6c0b56eSmrg	struct dri2_buffer_priv *src_private = src_buffer->driverPrivate;
264d6c0b56eSmrg	struct dri2_buffer_priv *dst_private = dest_buffer->driverPrivate;
265d6c0b56eSmrg	DrawablePtr src_drawable;
266d6c0b56eSmrg	DrawablePtr dst_drawable;
267d6c0b56eSmrg	RegionPtr copy_clip;
268d6c0b56eSmrg	GCPtr gc;
269d6c0b56eSmrg	Bool translate = FALSE;
270d6c0b56eSmrg	int off_x = 0, off_y = 0;
271d6c0b56eSmrg
272d6c0b56eSmrg	src_drawable = &src_private->pixmap->drawable;
273d6c0b56eSmrg	dst_drawable = &dst_private->pixmap->drawable;
274d6c0b56eSmrg
275d6c0b56eSmrg	if (src_private->attachment == DRI2BufferFrontLeft) {
276d6c0b56eSmrg#ifdef USE_DRI2_PRIME
277d6c0b56eSmrg		if (drawable->pScreen != pScreen) {
278d6c0b56eSmrg			src_drawable = DRI2UpdatePrime(drawable, src_buffer);
279d6c0b56eSmrg			if (!src_drawable)
280d6c0b56eSmrg				return;
281d6c0b56eSmrg		} else
282d6c0b56eSmrg#endif
283d6c0b56eSmrg			src_drawable = drawable;
284d6c0b56eSmrg	}
285d6c0b56eSmrg	if (dst_private->attachment == DRI2BufferFrontLeft) {
286d6c0b56eSmrg#ifdef USE_DRI2_PRIME
287d6c0b56eSmrg		if (drawable->pScreen != pScreen) {
288d6c0b56eSmrg			dst_drawable = DRI2UpdatePrime(drawable, dest_buffer);
289d6c0b56eSmrg			if (!dst_drawable)
290d6c0b56eSmrg				return;
291d6c0b56eSmrg			if (dst_drawable != drawable)
292d6c0b56eSmrg				translate = TRUE;
293d6c0b56eSmrg		} else
294d6c0b56eSmrg#endif
295d6c0b56eSmrg			dst_drawable = drawable;
296d6c0b56eSmrg	}
297d6c0b56eSmrg
298d6c0b56eSmrg	if (translate && drawable->type == DRAWABLE_WINDOW) {
299d6c0b56eSmrg		PixmapPtr pPix = GetDrawablePixmap(drawable);
300d6c0b56eSmrg
301d6c0b56eSmrg		off_x = drawable->x - pPix->screen_x;
302d6c0b56eSmrg		off_y = drawable->y - pPix->screen_y;
303d6c0b56eSmrg	}
304d6c0b56eSmrg	gc = GetScratchGC(dst_drawable->depth, pScreen);
305d6c0b56eSmrg	copy_clip = REGION_CREATE(pScreen, NULL, 0);
306d6c0b56eSmrg	REGION_COPY(pScreen, copy_clip, region);
307d6c0b56eSmrg
308d6c0b56eSmrg	if (translate) {
309d6c0b56eSmrg		REGION_TRANSLATE(pScreen, copy_clip, off_x, off_y);
310d6c0b56eSmrg	}
311d6c0b56eSmrg
312d6c0b56eSmrg	(*gc->funcs->ChangeClip) (gc, CT_REGION, copy_clip, 0);
313d6c0b56eSmrg	ValidateGC(dst_drawable, gc);
314d6c0b56eSmrg
315d6c0b56eSmrg	(*gc->ops->CopyArea) (src_drawable, dst_drawable, gc,
316d6c0b56eSmrg			      0, 0, drawable->width, drawable->height, off_x,
317d6c0b56eSmrg			      off_y);
318d6c0b56eSmrg
319d6c0b56eSmrg	FreeScratchGC(gc);
320d6c0b56eSmrg}
321d6c0b56eSmrg
322d6c0b56eSmrgvoid
323d6c0b56eSmrgamdgpu_dri2_copy_region(DrawablePtr pDraw, RegionPtr pRegion,
324d6c0b56eSmrg			DRI2BufferPtr pDstBuffer, DRI2BufferPtr pSrcBuffer)
325d6c0b56eSmrg{
326d6c0b56eSmrg	return amdgpu_dri2_copy_region2(pDraw->pScreen, pDraw, pRegion,
327d6c0b56eSmrg					pDstBuffer, pSrcBuffer);
328d6c0b56eSmrg}
329d6c0b56eSmrg
330d6c0b56eSmrgenum DRI2FrameEventType {
331d6c0b56eSmrg	DRI2_SWAP,
332d6c0b56eSmrg	DRI2_FLIP,
333d6c0b56eSmrg	DRI2_WAITMSC,
334d6c0b56eSmrg};
335d6c0b56eSmrg
336d6c0b56eSmrgtypedef struct _DRI2FrameEvent {
337d6c0b56eSmrg	XID drawable_id;
338d6c0b56eSmrg	ClientPtr client;
339d6c0b56eSmrg	enum DRI2FrameEventType type;
340d6c0b56eSmrg	unsigned frame;
341d6c0b56eSmrg	xf86CrtcPtr crtc;
342d6c0b56eSmrg	OsTimerPtr timer;
343d6c0b56eSmrg	uintptr_t drm_queue_seq;
344d6c0b56eSmrg
345d6c0b56eSmrg	/* for swaps & flips only */
346d6c0b56eSmrg	DRI2SwapEventPtr event_complete;
347d6c0b56eSmrg	void *event_data;
348d6c0b56eSmrg	DRI2BufferPtr front;
349d6c0b56eSmrg	DRI2BufferPtr back;
350d6c0b56eSmrg} DRI2FrameEventRec, *DRI2FrameEventPtr;
351d6c0b56eSmrg
352d6c0b56eSmrgstatic int DRI2InfoCnt;
353d6c0b56eSmrg
354d6c0b56eSmrgstatic void amdgpu_dri2_ref_buffer(BufferPtr buffer)
355d6c0b56eSmrg{
356d6c0b56eSmrg	struct dri2_buffer_priv *private = buffer->driverPrivate;
357d6c0b56eSmrg	private->refcnt++;
358d6c0b56eSmrg}
359d6c0b56eSmrg
360d6c0b56eSmrgstatic void amdgpu_dri2_unref_buffer(BufferPtr buffer)
361d6c0b56eSmrg{
362d6c0b56eSmrg	if (buffer) {
363d6c0b56eSmrg		struct dri2_buffer_priv *private = buffer->driverPrivate;
364d6c0b56eSmrg		amdgpu_dri2_destroy_buffer(&(private->pixmap->drawable),
365d6c0b56eSmrg					   buffer);
366d6c0b56eSmrg	}
367d6c0b56eSmrg}
368d6c0b56eSmrg
369d6c0b56eSmrgstatic void
370d6c0b56eSmrgamdgpu_dri2_client_state_changed(CallbackListPtr * ClientStateCallback,
371d6c0b56eSmrg				 pointer data, pointer calldata)
372d6c0b56eSmrg{
373d6c0b56eSmrg	NewClientInfoRec *clientinfo = calldata;
374d6c0b56eSmrg	ClientPtr pClient = clientinfo->client;
375d6c0b56eSmrg
376d6c0b56eSmrg	switch (pClient->clientState) {
377d6c0b56eSmrg	case ClientStateRetained:
378d6c0b56eSmrg	case ClientStateGone:
379d6c0b56eSmrg		amdgpu_drm_abort_client(pClient);
380d6c0b56eSmrg		break;
381d6c0b56eSmrg	default:
382d6c0b56eSmrg		break;
383d6c0b56eSmrg	}
384d6c0b56eSmrg}
385d6c0b56eSmrg
386d6c0b56eSmrg/*
387d6c0b56eSmrg * Get current frame count delta for the specified drawable and CRTC
388d6c0b56eSmrg */
389d6c0b56eSmrgstatic uint32_t amdgpu_get_msc_delta(DrawablePtr pDraw, xf86CrtcPtr crtc)
390d6c0b56eSmrg{
391d6c0b56eSmrg	drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
392d6c0b56eSmrg
393d6c0b56eSmrg	if (pDraw && pDraw->type == DRAWABLE_WINDOW)
394d6c0b56eSmrg		return drmmode_crtc->interpolated_vblanks +
395d6c0b56eSmrg			get_dri2_window_priv((WindowPtr)pDraw)->vblank_delta;
396d6c0b56eSmrg
397d6c0b56eSmrg	return drmmode_crtc->interpolated_vblanks;
398d6c0b56eSmrg}
399d6c0b56eSmrg
400d6c0b56eSmrg/*
401d6c0b56eSmrg * Get current frame count and timestamp of the specified CRTC
402d6c0b56eSmrg */
403d6c0b56eSmrgstatic Bool amdgpu_dri2_get_crtc_msc(xf86CrtcPtr crtc, CARD64 *ust, CARD64 *msc)
404d6c0b56eSmrg{
405504d986fSmrg	drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
406504d986fSmrg
407d6c0b56eSmrg	if (!amdgpu_crtc_is_enabled(crtc) ||
408d6c0b56eSmrg	    drmmode_crtc_get_ust_msc(crtc, ust, msc) != Success) {
409d6c0b56eSmrg		/* CRTC is not running, extrapolate MSC and timestamp */
410d6c0b56eSmrg		ScrnInfoPtr scrn = crtc->scrn;
411d6c0b56eSmrg		AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(scrn);
412d6c0b56eSmrg		CARD64 now, delta_t, delta_seq;
413d6c0b56eSmrg
414d6c0b56eSmrg		if (!drmmode_crtc->dpms_last_ust)
415d6c0b56eSmrg			return FALSE;
416d6c0b56eSmrg
417d6c0b56eSmrg		if (drmmode_get_current_ust(pAMDGPUEnt->fd, &now) != 0) {
418d6c0b56eSmrg			xf86DrvMsg(scrn->scrnIndex, X_ERROR,
419d6c0b56eSmrg				   "%s cannot get current time\n", __func__);
420d6c0b56eSmrg			return FALSE;
421d6c0b56eSmrg		}
422d6c0b56eSmrg
423d6c0b56eSmrg		delta_t = now - drmmode_crtc->dpms_last_ust;
424d6c0b56eSmrg		delta_seq = delta_t * drmmode_crtc->dpms_last_fps;
425d6c0b56eSmrg		delta_seq /= 1000000;
426d6c0b56eSmrg		*ust = drmmode_crtc->dpms_last_ust;
427d6c0b56eSmrg		delta_t = delta_seq * 1000000;
428d6c0b56eSmrg		delta_t /= drmmode_crtc->dpms_last_fps;
429d6c0b56eSmrg		*ust += delta_t;
430d6c0b56eSmrg		*msc = drmmode_crtc->dpms_last_seq;
431d6c0b56eSmrg		*msc += delta_seq;
432d6c0b56eSmrg	}
433d6c0b56eSmrg
434504d986fSmrg	*msc += drmmode_crtc->interpolated_vblanks;
435504d986fSmrg
436d6c0b56eSmrg	return TRUE;
437d6c0b56eSmrg}
438d6c0b56eSmrg
439d6c0b56eSmrgstatic
440d6c0b56eSmrgxf86CrtcPtr amdgpu_dri2_drawable_crtc(DrawablePtr pDraw, Bool consider_disabled)
441d6c0b56eSmrg{
442d6c0b56eSmrg	ScreenPtr pScreen = pDraw->pScreen;
443d6c0b56eSmrg	ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
444d6c0b56eSmrg	xf86CrtcPtr crtc = amdgpu_pick_best_crtc(pScrn, consider_disabled,
445d6c0b56eSmrg						 pDraw->x, pDraw->x + pDraw->width,
446d6c0b56eSmrg						 pDraw->y, pDraw->y + pDraw->height);
447d6c0b56eSmrg
448d6c0b56eSmrg	if (crtc && pDraw->type == DRAWABLE_WINDOW) {
449d6c0b56eSmrg		struct dri2_window_priv *priv = get_dri2_window_priv((WindowPtr)pDraw);
450d6c0b56eSmrg
451d6c0b56eSmrg		if (priv->crtc && priv->crtc != crtc) {
452d6c0b56eSmrg			CARD64 ust, mscold, mscnew;
453d6c0b56eSmrg
454d6c0b56eSmrg			if (amdgpu_dri2_get_crtc_msc(priv->crtc, &ust, &mscold) &&
455d6c0b56eSmrg			    amdgpu_dri2_get_crtc_msc(crtc, &ust, &mscnew))
456d6c0b56eSmrg				priv->vblank_delta += mscold - mscnew;
457d6c0b56eSmrg		}
458d6c0b56eSmrg
459d6c0b56eSmrg		priv->crtc = crtc;
460d6c0b56eSmrg	}
461d6c0b56eSmrg
462d6c0b56eSmrg	return crtc;
463d6c0b56eSmrg}
464d6c0b56eSmrg
465d6c0b56eSmrgstatic void
466d6c0b56eSmrgamdgpu_dri2_flip_event_abort(xf86CrtcPtr crtc, void *event_data)
467d6c0b56eSmrg{
468d6c0b56eSmrg	AMDGPUInfoPtr info = AMDGPUPTR(crtc->scrn);
469d6c0b56eSmrg
470d6c0b56eSmrg	info->drmmode.dri2_flipping = FALSE;
471d6c0b56eSmrg	free(event_data);
472d6c0b56eSmrg}
473d6c0b56eSmrg
474d6c0b56eSmrgstatic void
475d6c0b56eSmrgamdgpu_dri2_flip_event_handler(xf86CrtcPtr crtc, uint32_t frame, uint64_t usec,
476d6c0b56eSmrg			       void *event_data)
477d6c0b56eSmrg{
478d6c0b56eSmrg	DRI2FrameEventPtr flip = event_data;
479d6c0b56eSmrg	ScrnInfoPtr scrn = crtc->scrn;
480d6c0b56eSmrg	unsigned tv_sec, tv_usec;
481d6c0b56eSmrg	DrawablePtr drawable;
482d6c0b56eSmrg	ScreenPtr screen;
483d6c0b56eSmrg	int status;
484d6c0b56eSmrg	PixmapPtr pixmap;
485d6c0b56eSmrg
486d6c0b56eSmrg	status = dixLookupDrawable(&drawable, flip->drawable_id, serverClient,
487d6c0b56eSmrg				   M_ANY, DixWriteAccess);
488d6c0b56eSmrg	if (status != Success)
489d6c0b56eSmrg		goto abort;
490d6c0b56eSmrg
491d6c0b56eSmrg	frame += amdgpu_get_msc_delta(drawable, crtc);
492d6c0b56eSmrg
493d6c0b56eSmrg	screen = scrn->pScreen;
494d6c0b56eSmrg	pixmap = screen->GetScreenPixmap(screen);
495d6c0b56eSmrg	xf86DrvMsgVerb(scrn->scrnIndex, X_INFO, AMDGPU_LOGLEVEL_DEBUG,
496d6c0b56eSmrg		       "%s:%d fevent[%p] width %d pitch %d (/4 %d)\n",
497d6c0b56eSmrg		       __func__, __LINE__, flip, pixmap->drawable.width,
498d6c0b56eSmrg		       pixmap->devKind, pixmap->devKind / 4);
499d6c0b56eSmrg
500d6c0b56eSmrg	tv_sec = usec / 1000000;
501d6c0b56eSmrg	tv_usec = usec % 1000000;
502d6c0b56eSmrg
503d6c0b56eSmrg	/* We assume our flips arrive in order, so we don't check the frame */
504d6c0b56eSmrg	switch (flip->type) {
505d6c0b56eSmrg	case DRI2_SWAP:
506d6c0b56eSmrg		/* Check for too small vblank count of pageflip completion, taking wraparound
507d6c0b56eSmrg		 * into account. This usually means some defective kms pageflip completion,
508d6c0b56eSmrg		 * causing wrong (msc, ust) return values and possible visual corruption.
509d6c0b56eSmrg		 */
510d6c0b56eSmrg		if ((frame < flip->frame) && (flip->frame - frame < 5)) {
511d6c0b56eSmrg			xf86DrvMsg(scrn->scrnIndex, X_WARNING,
512d6c0b56eSmrg				   "%s: Pageflip completion event has impossible msc %u < target_msc %u\n",
513d6c0b56eSmrg				   __func__, frame, flip->frame);
514d6c0b56eSmrg			/* All-Zero values signal failure of (msc, ust) timestamping to client. */
515d6c0b56eSmrg			frame = tv_sec = tv_usec = 0;
516d6c0b56eSmrg		}
517d6c0b56eSmrg
518d6c0b56eSmrg		DRI2SwapComplete(flip->client, drawable, frame, tv_sec, tv_usec,
519d6c0b56eSmrg				 DRI2_FLIP_COMPLETE, flip->event_complete,
520d6c0b56eSmrg				 flip->event_data);
521d6c0b56eSmrg		break;
522d6c0b56eSmrg	default:
523d6c0b56eSmrg		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
524d6c0b56eSmrg			   "%s: unknown vblank event received\n", __func__);
525d6c0b56eSmrg		/* Unknown type */
526d6c0b56eSmrg		break;
527d6c0b56eSmrg	}
528d6c0b56eSmrg
529d6c0b56eSmrgabort:
530d6c0b56eSmrg	amdgpu_dri2_flip_event_abort(crtc, event_data);
531d6c0b56eSmrg}
532d6c0b56eSmrg
533d6c0b56eSmrgstatic Bool
534d6c0b56eSmrgamdgpu_dri2_schedule_flip(xf86CrtcPtr crtc, ClientPtr client,
535d6c0b56eSmrg			  DrawablePtr draw, DRI2BufferPtr front,
536d6c0b56eSmrg			  DRI2BufferPtr back, DRI2SwapEventPtr func,
537d6c0b56eSmrg			  void *data, unsigned int target_msc)
538d6c0b56eSmrg{
539d6c0b56eSmrg	ScrnInfoPtr scrn = crtc->scrn;
540d6c0b56eSmrg	AMDGPUInfoPtr info = AMDGPUPTR(scrn);
541d6c0b56eSmrg	struct dri2_buffer_priv *back_priv;
542d6c0b56eSmrg	DRI2FrameEventPtr flip_info;
543d6c0b56eSmrg	int ref_crtc_hw_id = drmmode_get_crtc_id(crtc);
544d6c0b56eSmrg
545d6c0b56eSmrg	flip_info = calloc(1, sizeof(DRI2FrameEventRec));
546d6c0b56eSmrg	if (!flip_info)
547d6c0b56eSmrg		return FALSE;
548d6c0b56eSmrg
549d6c0b56eSmrg	flip_info->drawable_id = draw->id;
550d6c0b56eSmrg	flip_info->client = client;
551d6c0b56eSmrg	flip_info->type = DRI2_SWAP;
552d6c0b56eSmrg	flip_info->event_complete = func;
553d6c0b56eSmrg	flip_info->event_data = data;
554d6c0b56eSmrg	flip_info->frame = target_msc;
555d6c0b56eSmrg	flip_info->crtc = crtc;
556d6c0b56eSmrg
557d6c0b56eSmrg	xf86DrvMsgVerb(scrn->scrnIndex, X_INFO, AMDGPU_LOGLEVEL_DEBUG,
558d6c0b56eSmrg		       "%s:%d fevent[%p]\n", __func__, __LINE__, flip_info);
559d6c0b56eSmrg
560d6c0b56eSmrg	/* Page flip the full screen buffer */
561d6c0b56eSmrg	back_priv = back->driverPrivate;
562d6c0b56eSmrg	if (amdgpu_do_pageflip(scrn, client, back_priv->pixmap,
563d6c0b56eSmrg			       AMDGPU_DRM_QUEUE_ID_DEFAULT, flip_info,
564d6c0b56eSmrg			       ref_crtc_hw_id,
565d6c0b56eSmrg			       amdgpu_dri2_flip_event_handler,
566504d986fSmrg			       amdgpu_dri2_flip_event_abort, FLIP_VSYNC)) {
567d6c0b56eSmrg		info->drmmode.dri2_flipping = TRUE;
568d6c0b56eSmrg		return TRUE;
569d6c0b56eSmrg	}
570d6c0b56eSmrg	return FALSE;
571d6c0b56eSmrg}
572d6c0b56eSmrg
573d6c0b56eSmrgstatic Bool update_front(DrawablePtr draw, DRI2BufferPtr front)
574d6c0b56eSmrg{
575d6c0b56eSmrg	ScreenPtr screen = draw->pScreen;
576d6c0b56eSmrg	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
577d6c0b56eSmrg	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(scrn);
578d6c0b56eSmrg	PixmapPtr pixmap = get_drawable_pixmap(draw);
579d6c0b56eSmrg	struct dri2_buffer_priv *priv = front->driverPrivate;
580d6c0b56eSmrg
581d6c0b56eSmrg	if (!amdgpu_get_flink_name(pAMDGPUEnt, pixmap, &front->name))
582d6c0b56eSmrg		return FALSE;
583d6c0b56eSmrg
584d6c0b56eSmrg	(*draw->pScreen->DestroyPixmap) (priv->pixmap);
585d6c0b56eSmrg	front->pitch = pixmap->devKind;
586d6c0b56eSmrg	front->cpp = pixmap->drawable.bitsPerPixel / 8;
587d6c0b56eSmrg	priv->pixmap = pixmap;
588d6c0b56eSmrg	pixmap->refcnt++;
589d6c0b56eSmrg
590d6c0b56eSmrg	return TRUE;
591d6c0b56eSmrg}
592d6c0b56eSmrg
593d6c0b56eSmrgstatic Bool
594d6c0b56eSmrgcan_exchange(ScrnInfoPtr pScrn, DrawablePtr draw,
595d6c0b56eSmrg	     DRI2BufferPtr front, DRI2BufferPtr back)
596d6c0b56eSmrg{
597d6c0b56eSmrg	struct dri2_buffer_priv *front_priv = front->driverPrivate;
598d6c0b56eSmrg	struct dri2_buffer_priv *back_priv = back->driverPrivate;
599d6c0b56eSmrg	PixmapPtr front_pixmap;
600d6c0b56eSmrg	PixmapPtr back_pixmap = back_priv->pixmap;
601d6c0b56eSmrg	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(pScrn);
602d6c0b56eSmrg	int i;
603d6c0b56eSmrg
604d6c0b56eSmrg	for (i = 0; i < xf86_config->num_crtc; i++) {
605d6c0b56eSmrg		xf86CrtcPtr crtc = xf86_config->crtc[i];
606504d986fSmrg		drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
607504d986fSmrg
608504d986fSmrg		if (crtc->enabled &&
609504d986fSmrg		    (crtc->rotatedData || drmmode_crtc->scanout[0].bo))
610d6c0b56eSmrg			return FALSE;
611d6c0b56eSmrg	}
612d6c0b56eSmrg
613d6c0b56eSmrg	if (!update_front(draw, front))
614d6c0b56eSmrg		return FALSE;
615d6c0b56eSmrg
616d6c0b56eSmrg	front_pixmap = front_priv->pixmap;
617d6c0b56eSmrg
618d6c0b56eSmrg	if (front_pixmap->drawable.width != back_pixmap->drawable.width)
619d6c0b56eSmrg		return FALSE;
620d6c0b56eSmrg
621d6c0b56eSmrg	if (front_pixmap->drawable.height != back_pixmap->drawable.height)
622d6c0b56eSmrg		return FALSE;
623d6c0b56eSmrg
624d6c0b56eSmrg	if (front_pixmap->drawable.bitsPerPixel !=
625d6c0b56eSmrg	    back_pixmap->drawable.bitsPerPixel)
626d6c0b56eSmrg		return FALSE;
627d6c0b56eSmrg
628d6c0b56eSmrg	if (front_pixmap->devKind != back_pixmap->devKind)
629d6c0b56eSmrg		return FALSE;
630d6c0b56eSmrg
631d6c0b56eSmrg	return TRUE;
632d6c0b56eSmrg}
633d6c0b56eSmrg
634d6c0b56eSmrgstatic Bool
635d6c0b56eSmrgcan_flip(ScrnInfoPtr pScrn, DrawablePtr draw,
636d6c0b56eSmrg	 DRI2BufferPtr front, DRI2BufferPtr back)
637d6c0b56eSmrg{
638d6c0b56eSmrg	AMDGPUInfoPtr info = AMDGPUPTR(pScrn);
639d6c0b56eSmrg
640d6c0b56eSmrg	return draw->type == DRAWABLE_WINDOW &&
641d6c0b56eSmrg	    info->allowPageFlip &&
642d6c0b56eSmrg	    !info->hwcursor_disabled &&
643d6c0b56eSmrg	    !info->drmmode.present_flipping &&
644d6c0b56eSmrg	    pScrn->vtSema &&
645d6c0b56eSmrg	    DRI2CanFlip(draw) && can_exchange(pScrn, draw, front, back);
646d6c0b56eSmrg}
647d6c0b56eSmrg
648d6c0b56eSmrgstatic void
649d6c0b56eSmrgamdgpu_dri2_exchange_buffers(DrawablePtr draw, DRI2BufferPtr front,
650d6c0b56eSmrg			     DRI2BufferPtr back)
651d6c0b56eSmrg{
652d6c0b56eSmrg	struct dri2_buffer_priv *front_priv = front->driverPrivate;
653d6c0b56eSmrg	struct dri2_buffer_priv *back_priv = back->driverPrivate;
654d6c0b56eSmrg	struct amdgpu_pixmap *front_pix;
655d6c0b56eSmrg	struct amdgpu_pixmap *back_pix;
656d6c0b56eSmrg	ScreenPtr screen;
657d6c0b56eSmrg	AMDGPUInfoPtr info;
658d6c0b56eSmrg	RegionRec region;
659d6c0b56eSmrg	int tmp;
660d6c0b56eSmrg
661d6c0b56eSmrg	region.extents.x1 = region.extents.y1 = 0;
662d6c0b56eSmrg	region.extents.x2 = front_priv->pixmap->drawable.width;
663504d986fSmrg	region.extents.y2 = front_priv->pixmap->drawable.height;
664d6c0b56eSmrg	region.data = NULL;
665d6c0b56eSmrg	DamageRegionAppend(&front_priv->pixmap->drawable, &region);
666d6c0b56eSmrg
667d6c0b56eSmrg	/* Swap BO names so DRI works */
668d6c0b56eSmrg	tmp = front->name;
669d6c0b56eSmrg	front->name = back->name;
670d6c0b56eSmrg	back->name = tmp;
671d6c0b56eSmrg
672d6c0b56eSmrg	/* Swap pixmap privates */
673d6c0b56eSmrg	front_pix = amdgpu_get_pixmap_private(front_priv->pixmap);
674d6c0b56eSmrg	back_pix = amdgpu_get_pixmap_private(back_priv->pixmap);
675d6c0b56eSmrg	amdgpu_set_pixmap_private(front_priv->pixmap, back_pix);
676d6c0b56eSmrg	amdgpu_set_pixmap_private(back_priv->pixmap, front_pix);
677d6c0b56eSmrg
678d6c0b56eSmrg	/* Do we need to update the Screen? */
679d6c0b56eSmrg	screen = draw->pScreen;
680d6c0b56eSmrg	info = AMDGPUPTR(xf86ScreenToScrn(screen));
681d6c0b56eSmrg	if (front_pix->bo == info->front_buffer) {
682d6c0b56eSmrg		struct amdgpu_pixmap *screen_priv =
683d6c0b56eSmrg			amdgpu_get_pixmap_private(screen->GetScreenPixmap(screen));
684d6c0b56eSmrg
685d6c0b56eSmrg		amdgpu_bo_ref(back_pix->bo);
686d6c0b56eSmrg		amdgpu_bo_unref(&info->front_buffer);
687d6c0b56eSmrg		info->front_buffer = back_pix->bo;
688d6c0b56eSmrg		*screen_priv = *back_pix;
689d6c0b56eSmrg	}
690d6c0b56eSmrg
691d6c0b56eSmrg	amdgpu_glamor_exchange_buffers(front_priv->pixmap, back_priv->pixmap);
692d6c0b56eSmrg
693d6c0b56eSmrg	DamageRegionProcessPending(&front_priv->pixmap->drawable);
694d6c0b56eSmrg}
695d6c0b56eSmrg
696d6c0b56eSmrgstatic void amdgpu_dri2_frame_event_abort(xf86CrtcPtr crtc, void *event_data)
697d6c0b56eSmrg{
698d6c0b56eSmrg	DRI2FrameEventPtr event = event_data;
699d6c0b56eSmrg
700d6c0b56eSmrg	TimerCancel(event->timer);
701d6c0b56eSmrg	TimerFree(event->timer);
702d6c0b56eSmrg	amdgpu_dri2_unref_buffer(event->front);
703d6c0b56eSmrg	amdgpu_dri2_unref_buffer(event->back);
704d6c0b56eSmrg	free(event);
705d6c0b56eSmrg}
706d6c0b56eSmrg
707d6c0b56eSmrgstatic void amdgpu_dri2_frame_event_handler(xf86CrtcPtr crtc, uint32_t seq,
708d6c0b56eSmrg					    uint64_t usec, void *event_data)
709d6c0b56eSmrg{
710d6c0b56eSmrg	DRI2FrameEventPtr event = event_data;
711d6c0b56eSmrg	ScrnInfoPtr scrn = crtc->scrn;
712d6c0b56eSmrg	DrawablePtr drawable;
713d6c0b56eSmrg	int status;
714d6c0b56eSmrg	int swap_type;
715d6c0b56eSmrg	BoxRec box;
716d6c0b56eSmrg	RegionRec region;
717d6c0b56eSmrg
718d6c0b56eSmrg	status = dixLookupDrawable(&drawable, event->drawable_id, serverClient,
719d6c0b56eSmrg				   M_ANY, DixWriteAccess);
720d6c0b56eSmrg	if (status != Success)
721d6c0b56eSmrg		goto cleanup;
722d6c0b56eSmrg
723d6c0b56eSmrg	seq += amdgpu_get_msc_delta(drawable, crtc);
724d6c0b56eSmrg
725d6c0b56eSmrg	switch (event->type) {
726d6c0b56eSmrg	case DRI2_FLIP:
727d6c0b56eSmrg		if (can_flip(scrn, drawable, event->front, event->back) &&
728d6c0b56eSmrg		    amdgpu_dri2_schedule_flip(crtc,
729d6c0b56eSmrg					      event->client,
730d6c0b56eSmrg					      drawable,
731d6c0b56eSmrg					      event->front,
732d6c0b56eSmrg					      event->back,
733d6c0b56eSmrg					      event->event_complete,
734d6c0b56eSmrg					      event->event_data,
735d6c0b56eSmrg					      event->frame)) {
736d6c0b56eSmrg			amdgpu_dri2_exchange_buffers(drawable, event->front,
737d6c0b56eSmrg						     event->back);
738d6c0b56eSmrg			break;
739d6c0b56eSmrg		}
740d6c0b56eSmrg		/* else fall through to exchange/blit */
741d6c0b56eSmrg	case DRI2_SWAP:
742d6c0b56eSmrg		if (DRI2CanExchange(drawable) &&
743d6c0b56eSmrg		    can_exchange(scrn, drawable, event->front, event->back)) {
744d6c0b56eSmrg			amdgpu_dri2_exchange_buffers(drawable, event->front,
745d6c0b56eSmrg						     event->back);
746d6c0b56eSmrg			swap_type = DRI2_EXCHANGE_COMPLETE;
747d6c0b56eSmrg		} else {
748d6c0b56eSmrg			box.x1 = 0;
749d6c0b56eSmrg			box.y1 = 0;
750d6c0b56eSmrg			box.x2 = drawable->width;
751d6c0b56eSmrg			box.y2 = drawable->height;
752d6c0b56eSmrg			REGION_INIT(pScreen, &region, &box, 0);
753d6c0b56eSmrg			amdgpu_dri2_copy_region(drawable, &region, event->front,
754d6c0b56eSmrg						event->back);
755d6c0b56eSmrg			swap_type = DRI2_BLIT_COMPLETE;
756d6c0b56eSmrg		}
757d6c0b56eSmrg
758d6c0b56eSmrg		DRI2SwapComplete(event->client, drawable, seq, usec / 1000000,
759d6c0b56eSmrg				 usec % 1000000, swap_type, event->event_complete,
760d6c0b56eSmrg				 event->event_data);
761d6c0b56eSmrg
762d6c0b56eSmrg		break;
763d6c0b56eSmrg	case DRI2_WAITMSC:
764d6c0b56eSmrg		DRI2WaitMSCComplete(event->client, drawable, seq, usec / 1000000,
765d6c0b56eSmrg				    usec % 1000000);
766d6c0b56eSmrg		break;
767d6c0b56eSmrg	default:
768d6c0b56eSmrg		/* Unknown type */
769d6c0b56eSmrg		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
770d6c0b56eSmrg			   "%s: unknown vblank event received\n", __func__);
771d6c0b56eSmrg		break;
772d6c0b56eSmrg	}
773d6c0b56eSmrg
774d6c0b56eSmrgcleanup:
775d6c0b56eSmrg	amdgpu_dri2_frame_event_abort(crtc, event_data);
776d6c0b56eSmrg}
777d6c0b56eSmrg
778d6c0b56eSmrgdrmVBlankSeqType amdgpu_populate_vbl_request_type(xf86CrtcPtr crtc)
779d6c0b56eSmrg{
780d6c0b56eSmrg	drmVBlankSeqType type = 0;
781d6c0b56eSmrg	int crtc_id = drmmode_get_crtc_id(crtc);
782d6c0b56eSmrg
783d6c0b56eSmrg	if (crtc_id == 1)
784d6c0b56eSmrg		type |= DRM_VBLANK_SECONDARY;
785d6c0b56eSmrg	else if (crtc_id > 1)
786d6c0b56eSmrg#ifdef DRM_VBLANK_HIGH_CRTC_SHIFT
787d6c0b56eSmrg		type |= (crtc_id << DRM_VBLANK_HIGH_CRTC_SHIFT) &
788d6c0b56eSmrg		    DRM_VBLANK_HIGH_CRTC_MASK;
789d6c0b56eSmrg#else
790d6c0b56eSmrg		ErrorF("amdgpu driver bug: %s called for CRTC %d > 1, but "
791d6c0b56eSmrg		       "DRM_VBLANK_HIGH_CRTC_MASK not defined at build time\n",
792d6c0b56eSmrg		       __func__, crtc_id);
793d6c0b56eSmrg#endif
794d6c0b56eSmrg
795d6c0b56eSmrg	return type;
796d6c0b56eSmrg}
797d6c0b56eSmrg
798d6c0b56eSmrg/*
799d6c0b56eSmrg * This function should be called on a disabled CRTC only (i.e., CRTC
800d6c0b56eSmrg * in DPMS-off state). It will calculate the delay necessary to reach
801d6c0b56eSmrg * target_msc from present time if the CRTC were running.
802d6c0b56eSmrg */
803d6c0b56eSmrgstatic
804d6c0b56eSmrgCARD32 amdgpu_dri2_extrapolate_msc_delay(xf86CrtcPtr crtc, CARD64 * target_msc,
805d6c0b56eSmrg					 CARD64 divisor, CARD64 remainder)
806d6c0b56eSmrg{
807d6c0b56eSmrg	drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
808d6c0b56eSmrg	ScrnInfoPtr pScrn = crtc->scrn;
809d6c0b56eSmrg	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn);
810d6c0b56eSmrg	int nominal_frame_rate = drmmode_crtc->dpms_last_fps;
811d6c0b56eSmrg	CARD64 last_vblank_ust = drmmode_crtc->dpms_last_ust;
812d6c0b56eSmrg	uint32_t last_vblank_seq = drmmode_crtc->dpms_last_seq;
813d6c0b56eSmrg	CARD64 now, target_time, delta_t;
814d6c0b56eSmrg	int64_t d, delta_seq;
815d6c0b56eSmrg	int ret;
816d6c0b56eSmrg	CARD32 d_ms;
817d6c0b56eSmrg
818d6c0b56eSmrg	if (!last_vblank_ust) {
819d6c0b56eSmrg		*target_msc = 0;
820d6c0b56eSmrg		return FALLBACK_SWAP_DELAY;
821d6c0b56eSmrg	}
822d6c0b56eSmrg	ret = drmmode_get_current_ust(pAMDGPUEnt->fd, &now);
823d6c0b56eSmrg	if (ret) {
824d6c0b56eSmrg		xf86DrvMsg(pScrn->scrnIndex, X_ERROR,
825d6c0b56eSmrg			   "%s cannot get current time\n", __func__);
826d6c0b56eSmrg		*target_msc = 0;
827d6c0b56eSmrg		return FALLBACK_SWAP_DELAY;
828d6c0b56eSmrg	}
829d6c0b56eSmrg	delta_seq = *target_msc - last_vblank_seq;
830d6c0b56eSmrg	delta_seq *= 1000000;
831d6c0b56eSmrg	target_time = last_vblank_ust;
832d6c0b56eSmrg	target_time += delta_seq / nominal_frame_rate;
833d6c0b56eSmrg	d = target_time - now;
834d6c0b56eSmrg	if (d < 0) {
835d6c0b56eSmrg		/* we missed the event, adjust target_msc, do the divisor magic */
836d6c0b56eSmrg		CARD64 current_msc = last_vblank_seq;
837d6c0b56eSmrg
838d6c0b56eSmrg		delta_t = now - last_vblank_ust;
839d6c0b56eSmrg		delta_seq = delta_t * nominal_frame_rate;
840d6c0b56eSmrg		current_msc += delta_seq / 1000000;
841d6c0b56eSmrg		current_msc &= 0xffffffff;
842d6c0b56eSmrg		if (divisor == 0) {
843d6c0b56eSmrg			*target_msc = current_msc;
844d6c0b56eSmrg			d = 0;
845d6c0b56eSmrg		} else {
846d6c0b56eSmrg			*target_msc =
847d6c0b56eSmrg			    current_msc - (current_msc % divisor) + remainder;
848d6c0b56eSmrg			if ((current_msc % divisor) >= remainder)
849d6c0b56eSmrg				*target_msc += divisor;
850d6c0b56eSmrg			*target_msc &= 0xffffffff;
851d6c0b56eSmrg			delta_seq = *target_msc - last_vblank_seq;
852d6c0b56eSmrg			delta_seq *= 1000000;
853d6c0b56eSmrg			target_time = last_vblank_ust;
854d6c0b56eSmrg			target_time += delta_seq / nominal_frame_rate;
855d6c0b56eSmrg			d = target_time - now;
856d6c0b56eSmrg		}
857d6c0b56eSmrg	}
858d6c0b56eSmrg	/*
859d6c0b56eSmrg	 * convert delay to milliseconds and add margin to prevent the client
860d6c0b56eSmrg	 * from coming back early (due to timer granularity and rounding
861d6c0b56eSmrg	 * errors) and getting the same MSC it just got
862d6c0b56eSmrg	 */
863d6c0b56eSmrg	d_ms = (CARD32) d / 1000;
864d6c0b56eSmrg	if ((CARD32) d - d_ms * 1000 > 0)
865d6c0b56eSmrg		d_ms += 2;
866d6c0b56eSmrg	else
867d6c0b56eSmrg		d_ms++;
868d6c0b56eSmrg	return d_ms;
869d6c0b56eSmrg}
870d6c0b56eSmrg
871d6c0b56eSmrg/*
872d6c0b56eSmrg * Get current interpolated frame count and frame count timestamp, based on
873d6c0b56eSmrg * drawable's crtc.
874d6c0b56eSmrg */
875d6c0b56eSmrgstatic int amdgpu_dri2_get_msc(DrawablePtr draw, CARD64 * ust, CARD64 * msc)
876d6c0b56eSmrg{
877d6c0b56eSmrg	xf86CrtcPtr crtc = amdgpu_dri2_drawable_crtc(draw, TRUE);
878d6c0b56eSmrg
879d6c0b56eSmrg	/* Drawable not displayed, make up a value */
880d6c0b56eSmrg	if (crtc == NULL) {
881d6c0b56eSmrg		*ust = 0;
882d6c0b56eSmrg		*msc = 0;
883d6c0b56eSmrg		return TRUE;
884d6c0b56eSmrg	}
885d6c0b56eSmrg
886d6c0b56eSmrg	if (!amdgpu_dri2_get_crtc_msc(crtc, ust, msc))
887d6c0b56eSmrg		return FALSE;
888d6c0b56eSmrg
889504d986fSmrg	if (draw && draw->type == DRAWABLE_WINDOW)
890504d986fSmrg		*msc += get_dri2_window_priv((WindowPtr)draw)->vblank_delta;
891d6c0b56eSmrg	*msc &= 0xffffffff;
892d6c0b56eSmrg	return TRUE;
893d6c0b56eSmrg}
894d6c0b56eSmrg
895d6c0b56eSmrgstatic
896d6c0b56eSmrgCARD32 amdgpu_dri2_deferred_event(OsTimerPtr timer, CARD32 now, pointer data)
897d6c0b56eSmrg{
898d6c0b56eSmrg	DRI2FrameEventPtr event_info = (DRI2FrameEventPtr) data;
899d6c0b56eSmrg	xf86CrtcPtr crtc = event_info->crtc;
900d6c0b56eSmrg	ScrnInfoPtr scrn;
901d6c0b56eSmrg	AMDGPUEntPtr pAMDGPUEnt;
902d6c0b56eSmrg	CARD64 drm_now;
903d6c0b56eSmrg	int ret;
904d6c0b56eSmrg	CARD64 delta_t, delta_seq, frame;
905d6c0b56eSmrg	drmmode_crtc_private_ptr drmmode_crtc;
906d6c0b56eSmrg
907d6c0b56eSmrg	/*
908d6c0b56eSmrg	 * This is emulated event, so its time is current time, which we
909d6c0b56eSmrg	 * have to get in DRM-compatible form (which is a bit messy given
910d6c0b56eSmrg	 * the information that we have at this point). Can't use now argument
911d6c0b56eSmrg	 * because DRM event time may come from monotonic clock, while
912d6c0b56eSmrg	 * DIX timer facility uses real-time clock.
913d6c0b56eSmrg	 */
914d6c0b56eSmrg	if (!event_info->crtc) {
915d6c0b56eSmrg		ErrorF("%s no crtc\n", __func__);
916d6c0b56eSmrg		if (event_info->drm_queue_seq)
917d6c0b56eSmrg			amdgpu_drm_abort_entry(event_info->drm_queue_seq);
918d6c0b56eSmrg		else
919d6c0b56eSmrg			amdgpu_dri2_frame_event_abort(NULL, data);
920d6c0b56eSmrg		return 0;
921d6c0b56eSmrg	}
922d6c0b56eSmrg
923d6c0b56eSmrg	scrn = crtc->scrn;
924d6c0b56eSmrg	pAMDGPUEnt = AMDGPUEntPriv(scrn);
925d6c0b56eSmrg	ret = drmmode_get_current_ust(pAMDGPUEnt->fd, &drm_now);
926d6c0b56eSmrg	if (ret) {
927d6c0b56eSmrg		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
928d6c0b56eSmrg			   "%s cannot get current time\n", __func__);
929d6c0b56eSmrg		if (event_info->drm_queue_seq)
930d6c0b56eSmrg			amdgpu_drm_queue_handler(pAMDGPUEnt->fd, 0, 0, 0,
931d6c0b56eSmrg						 (void*)event_info->drm_queue_seq);
932d6c0b56eSmrg		else
933d6c0b56eSmrg			amdgpu_dri2_frame_event_handler(crtc, 0, 0, data);
934d6c0b56eSmrg		return 0;
935d6c0b56eSmrg	}
936d6c0b56eSmrg	/*
937d6c0b56eSmrg	 * calculate the frame number from current time
938d6c0b56eSmrg	 * that would come from CRTC if it were running
939d6c0b56eSmrg	 */
940d6c0b56eSmrg	drmmode_crtc = event_info->crtc->driver_private;
941d6c0b56eSmrg	delta_t = drm_now - (CARD64) drmmode_crtc->dpms_last_ust;
942d6c0b56eSmrg	delta_seq = delta_t * drmmode_crtc->dpms_last_fps;
943d6c0b56eSmrg	delta_seq /= 1000000;
944d6c0b56eSmrg	frame = (CARD64) drmmode_crtc->dpms_last_seq + delta_seq;
945d6c0b56eSmrg	if (event_info->drm_queue_seq)
946d6c0b56eSmrg		amdgpu_drm_queue_handler(pAMDGPUEnt->fd, frame, drm_now / 1000000,
947d6c0b56eSmrg					 drm_now % 1000000,
948d6c0b56eSmrg					 (void*)event_info->drm_queue_seq);
949d6c0b56eSmrg	else
950d6c0b56eSmrg		amdgpu_dri2_frame_event_handler(crtc, frame, drm_now, data);
951d6c0b56eSmrg	return 0;
952d6c0b56eSmrg}
953d6c0b56eSmrg
954d6c0b56eSmrgstatic
955d6c0b56eSmrgvoid amdgpu_dri2_schedule_event(CARD32 delay, DRI2FrameEventPtr event_info)
956d6c0b56eSmrg{
957d6c0b56eSmrg	event_info->timer = TimerSet(NULL, 0, delay, amdgpu_dri2_deferred_event,
958d6c0b56eSmrg				     event_info);
959d6c0b56eSmrg	if (delay == 0) {
960d6c0b56eSmrg		CARD32 now = GetTimeInMillis();
961d6c0b56eSmrg		amdgpu_dri2_deferred_event(event_info->timer, now, event_info);
962d6c0b56eSmrg	}
963d6c0b56eSmrg}
964d6c0b56eSmrg
965d6c0b56eSmrg/*
966d6c0b56eSmrg * Request a DRM event when the requested conditions will be satisfied.
967d6c0b56eSmrg *
968d6c0b56eSmrg * We need to handle the event and ask the server to wake up the client when
969d6c0b56eSmrg * we receive it.
970d6c0b56eSmrg */
971d6c0b56eSmrgstatic int amdgpu_dri2_schedule_wait_msc(ClientPtr client, DrawablePtr draw,
972d6c0b56eSmrg					 CARD64 target_msc, CARD64 divisor,
973d6c0b56eSmrg					 CARD64 remainder)
974d6c0b56eSmrg{
975d6c0b56eSmrg	ScreenPtr screen = draw->pScreen;
976d6c0b56eSmrg	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
977d6c0b56eSmrg	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(scrn);
978d6c0b56eSmrg	DRI2FrameEventPtr wait_info = NULL;
979d6c0b56eSmrg	uintptr_t drm_queue_seq = 0;
980d6c0b56eSmrg	xf86CrtcPtr crtc = amdgpu_dri2_drawable_crtc(draw, TRUE);
981d6c0b56eSmrg	uint32_t msc_delta;
982d6c0b56eSmrg	drmVBlank vbl;
983d6c0b56eSmrg	int ret;
984d6c0b56eSmrg	CARD64 current_msc;
985d6c0b56eSmrg
986d6c0b56eSmrg	/* Truncate to match kernel interfaces; means occasional overflow
987d6c0b56eSmrg	 * misses, but that's generally not a big deal */
988d6c0b56eSmrg	target_msc &= 0xffffffff;
989d6c0b56eSmrg	divisor &= 0xffffffff;
990d6c0b56eSmrg	remainder &= 0xffffffff;
991d6c0b56eSmrg
992d6c0b56eSmrg	/* Drawable not visible, return immediately */
993d6c0b56eSmrg	if (crtc == NULL)
994d6c0b56eSmrg		goto out_complete;
995d6c0b56eSmrg
996d6c0b56eSmrg	msc_delta = amdgpu_get_msc_delta(draw, crtc);
997d6c0b56eSmrg
998d6c0b56eSmrg	wait_info = calloc(1, sizeof(DRI2FrameEventRec));
999d6c0b56eSmrg	if (!wait_info)
1000d6c0b56eSmrg		goto out_complete;
1001d6c0b56eSmrg
1002d6c0b56eSmrg	wait_info->drawable_id = draw->id;
1003d6c0b56eSmrg	wait_info->client = client;
1004d6c0b56eSmrg	wait_info->type = DRI2_WAITMSC;
1005d6c0b56eSmrg	wait_info->crtc = crtc;
1006d6c0b56eSmrg
1007d6c0b56eSmrg	/*
1008d6c0b56eSmrg	 * CRTC is in DPMS off state, calculate wait time from current time,
1009d6c0b56eSmrg	 * target_msc and last vblank time/sequence when CRTC was turned off
1010d6c0b56eSmrg	 */
1011d6c0b56eSmrg	if (!amdgpu_crtc_is_enabled(crtc)) {
1012d6c0b56eSmrg		CARD32 delay;
1013d6c0b56eSmrg		target_msc -= msc_delta;
1014d6c0b56eSmrg		delay = amdgpu_dri2_extrapolate_msc_delay(crtc, &target_msc,
1015d6c0b56eSmrg							  divisor, remainder);
1016d6c0b56eSmrg		amdgpu_dri2_schedule_event(delay, wait_info);
1017d6c0b56eSmrg		DRI2BlockClient(client, draw);
1018d6c0b56eSmrg		return TRUE;
1019d6c0b56eSmrg	}
1020d6c0b56eSmrg
1021d6c0b56eSmrg	/* Get current count */
1022d6c0b56eSmrg	vbl.request.type = DRM_VBLANK_RELATIVE;
1023d6c0b56eSmrg	vbl.request.type |= amdgpu_populate_vbl_request_type(crtc);
1024d6c0b56eSmrg	vbl.request.sequence = 0;
1025d6c0b56eSmrg	ret = drmWaitVBlank(pAMDGPUEnt->fd, &vbl);
1026d6c0b56eSmrg	if (ret) {
1027d6c0b56eSmrg		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
1028d6c0b56eSmrg			   "get vblank counter failed: %s\n", strerror(errno));
1029d6c0b56eSmrg		goto out_complete;
1030d6c0b56eSmrg	}
1031d6c0b56eSmrg
1032d6c0b56eSmrg	current_msc = vbl.reply.sequence + msc_delta;
1033d6c0b56eSmrg	current_msc &= 0xffffffff;
1034d6c0b56eSmrg
1035d6c0b56eSmrg	drm_queue_seq = amdgpu_drm_queue_alloc(crtc, client, AMDGPU_DRM_QUEUE_ID_DEFAULT,
1036d6c0b56eSmrg					       wait_info, amdgpu_dri2_frame_event_handler,
1037d6c0b56eSmrg					       amdgpu_dri2_frame_event_abort);
1038504d986fSmrg	if (drm_queue_seq == AMDGPU_DRM_QUEUE_ERROR) {
1039d6c0b56eSmrg		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
1040d6c0b56eSmrg			   "Allocating DRM queue event entry failed.\n");
1041d6c0b56eSmrg		goto out_complete;
1042d6c0b56eSmrg	}
1043d6c0b56eSmrg	wait_info->drm_queue_seq = drm_queue_seq;
1044d6c0b56eSmrg
1045d6c0b56eSmrg	/*
1046d6c0b56eSmrg	 * If divisor is zero, or current_msc is smaller than target_msc,
1047d6c0b56eSmrg	 * we just need to make sure target_msc passes  before waking up the
1048d6c0b56eSmrg	 * client.
1049d6c0b56eSmrg	 */
1050d6c0b56eSmrg	if (divisor == 0 || current_msc < target_msc) {
1051d6c0b56eSmrg		/* If target_msc already reached or passed, set it to
1052d6c0b56eSmrg		 * current_msc to ensure we return a reasonable value back
1053d6c0b56eSmrg		 * to the caller. This keeps the client from continually
1054d6c0b56eSmrg		 * sending us MSC targets from the past by forcibly updating
1055d6c0b56eSmrg		 * their count on this call.
1056d6c0b56eSmrg		 */
1057d6c0b56eSmrg		if (current_msc >= target_msc)
1058d6c0b56eSmrg			target_msc = current_msc;
1059d6c0b56eSmrg		vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
1060d6c0b56eSmrg		vbl.request.type |= amdgpu_populate_vbl_request_type(crtc);
1061d6c0b56eSmrg		vbl.request.sequence = target_msc - msc_delta;
1062d6c0b56eSmrg		vbl.request.signal = drm_queue_seq;
1063d6c0b56eSmrg		ret = drmWaitVBlank(pAMDGPUEnt->fd, &vbl);
1064d6c0b56eSmrg		if (ret) {
1065d6c0b56eSmrg			xf86DrvMsg(scrn->scrnIndex, X_WARNING,
1066d6c0b56eSmrg				   "get vblank counter failed: %s\n",
1067d6c0b56eSmrg				   strerror(errno));
1068d6c0b56eSmrg			goto out_complete;
1069d6c0b56eSmrg		}
1070d6c0b56eSmrg
1071d6c0b56eSmrg		DRI2BlockClient(client, draw);
1072d6c0b56eSmrg		return TRUE;
1073d6c0b56eSmrg	}
1074d6c0b56eSmrg
1075d6c0b56eSmrg	/*
1076d6c0b56eSmrg	 * If we get here, target_msc has already passed or we don't have one,
1077d6c0b56eSmrg	 * so we queue an event that will satisfy the divisor/remainder equation.
1078d6c0b56eSmrg	 */
1079d6c0b56eSmrg	vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
1080d6c0b56eSmrg	vbl.request.type |= amdgpu_populate_vbl_request_type(crtc);
1081d6c0b56eSmrg
1082d6c0b56eSmrg	vbl.request.sequence = current_msc - (current_msc % divisor) +
1083d6c0b56eSmrg	    remainder - msc_delta;
1084d6c0b56eSmrg
1085d6c0b56eSmrg	/*
1086d6c0b56eSmrg	 * If calculated remainder is larger than requested remainder,
1087d6c0b56eSmrg	 * it means we've passed the last point where
1088d6c0b56eSmrg	 * seq % divisor == remainder, so we need to wait for the next time
1089d6c0b56eSmrg	 * that will happen.
1090d6c0b56eSmrg	 */
1091d6c0b56eSmrg	if ((current_msc % divisor) >= remainder)
1092d6c0b56eSmrg		vbl.request.sequence += divisor;
1093d6c0b56eSmrg
1094d6c0b56eSmrg	vbl.request.signal = drm_queue_seq;
1095d6c0b56eSmrg	ret = drmWaitVBlank(pAMDGPUEnt->fd, &vbl);
1096d6c0b56eSmrg	if (ret) {
1097d6c0b56eSmrg		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
1098d6c0b56eSmrg			   "get vblank counter failed: %s\n", strerror(errno));
1099d6c0b56eSmrg		goto out_complete;
1100d6c0b56eSmrg	}
1101d6c0b56eSmrg
1102d6c0b56eSmrg	DRI2BlockClient(client, draw);
1103d6c0b56eSmrg
1104d6c0b56eSmrg	return TRUE;
1105d6c0b56eSmrg
1106d6c0b56eSmrgout_complete:
1107d6c0b56eSmrg	if (wait_info)
1108d6c0b56eSmrg		amdgpu_dri2_deferred_event(NULL, 0, wait_info);
1109d6c0b56eSmrg	return TRUE;
1110d6c0b56eSmrg}
1111d6c0b56eSmrg
1112d6c0b56eSmrg/*
1113d6c0b56eSmrg * ScheduleSwap is responsible for requesting a DRM vblank event for the
1114d6c0b56eSmrg * appropriate frame.
1115d6c0b56eSmrg *
1116d6c0b56eSmrg * In the case of a blit (e.g. for a windowed swap) or buffer exchange,
1117d6c0b56eSmrg * the vblank requested can simply be the last queued swap frame + the swap
1118d6c0b56eSmrg * interval for the drawable.
1119d6c0b56eSmrg *
1120d6c0b56eSmrg * In the case of a page flip, we request an event for the last queued swap
1121d6c0b56eSmrg * frame + swap interval - 1, since we'll need to queue the flip for the frame
1122d6c0b56eSmrg * immediately following the received event.
1123d6c0b56eSmrg *
1124d6c0b56eSmrg * The client will be blocked if it tries to perform further GL commands
1125d6c0b56eSmrg * after queueing a swap, though in the Intel case after queueing a flip, the
1126d6c0b56eSmrg * client is free to queue more commands; they'll block in the kernel if
1127d6c0b56eSmrg * they access buffers busy with the flip.
1128d6c0b56eSmrg *
1129d6c0b56eSmrg * When the swap is complete, the driver should call into the server so it
1130d6c0b56eSmrg * can send any swap complete events that have been requested.
1131d6c0b56eSmrg */
1132d6c0b56eSmrgstatic int amdgpu_dri2_schedule_swap(ClientPtr client, DrawablePtr draw,
1133d6c0b56eSmrg				     DRI2BufferPtr front, DRI2BufferPtr back,
1134d6c0b56eSmrg				     CARD64 * target_msc, CARD64 divisor,
1135d6c0b56eSmrg				     CARD64 remainder, DRI2SwapEventPtr func,
1136d6c0b56eSmrg				     void *data)
1137d6c0b56eSmrg{
1138d6c0b56eSmrg	ScreenPtr screen = draw->pScreen;
1139d6c0b56eSmrg	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
1140d6c0b56eSmrg	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(scrn);
1141d6c0b56eSmrg	xf86CrtcPtr crtc = amdgpu_dri2_drawable_crtc(draw, TRUE);
1142d6c0b56eSmrg	uint32_t msc_delta;
1143d6c0b56eSmrg	drmVBlank vbl;
1144d6c0b56eSmrg	int ret, flip = 0;
1145d6c0b56eSmrg	DRI2FrameEventPtr swap_info = NULL;
1146d6c0b56eSmrg	uintptr_t drm_queue_seq;
1147d6c0b56eSmrg	CARD64 current_msc;
1148d6c0b56eSmrg	BoxRec box;
1149d6c0b56eSmrg	RegionRec region;
1150d6c0b56eSmrg
1151d6c0b56eSmrg	/* Truncate to match kernel interfaces; means occasional overflow
1152d6c0b56eSmrg	 * misses, but that's generally not a big deal */
1153d6c0b56eSmrg	*target_msc &= 0xffffffff;
1154d6c0b56eSmrg	divisor &= 0xffffffff;
1155d6c0b56eSmrg	remainder &= 0xffffffff;
1156d6c0b56eSmrg
1157d6c0b56eSmrg	/* amdgpu_dri2_frame_event_handler will get called some unknown time in the
1158d6c0b56eSmrg	 * future with these buffers.  Take a reference to ensure that they won't
1159d6c0b56eSmrg	 * get destroyed before then.
1160d6c0b56eSmrg	 */
1161d6c0b56eSmrg	amdgpu_dri2_ref_buffer(front);
1162d6c0b56eSmrg	amdgpu_dri2_ref_buffer(back);
1163d6c0b56eSmrg
1164d6c0b56eSmrg	/* either off-screen or CRTC not usable... just complete the swap */
1165d6c0b56eSmrg	if (crtc == NULL)
1166d6c0b56eSmrg		goto blit_fallback;
1167d6c0b56eSmrg
1168d6c0b56eSmrg	msc_delta = amdgpu_get_msc_delta(draw, crtc);
1169d6c0b56eSmrg
1170d6c0b56eSmrg	swap_info = calloc(1, sizeof(DRI2FrameEventRec));
1171d6c0b56eSmrg	if (!swap_info)
1172d6c0b56eSmrg		goto blit_fallback;
1173d6c0b56eSmrg
1174d6c0b56eSmrg	swap_info->type = DRI2_SWAP;
1175d6c0b56eSmrg	swap_info->drawable_id = draw->id;
1176d6c0b56eSmrg	swap_info->client = client;
1177d6c0b56eSmrg	swap_info->event_complete = func;
1178d6c0b56eSmrg	swap_info->event_data = data;
1179d6c0b56eSmrg	swap_info->front = front;
1180d6c0b56eSmrg	swap_info->back = back;
1181d6c0b56eSmrg	swap_info->crtc = crtc;
1182d6c0b56eSmrg
1183d6c0b56eSmrg	drm_queue_seq = amdgpu_drm_queue_alloc(crtc, client, AMDGPU_DRM_QUEUE_ID_DEFAULT,
1184d6c0b56eSmrg					       swap_info, amdgpu_dri2_frame_event_handler,
1185d6c0b56eSmrg					       amdgpu_dri2_frame_event_abort);
1186504d986fSmrg	if (drm_queue_seq == AMDGPU_DRM_QUEUE_ERROR) {
1187d6c0b56eSmrg		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
1188d6c0b56eSmrg			   "Allocating DRM queue entry failed.\n");
1189d6c0b56eSmrg		goto blit_fallback;
1190d6c0b56eSmrg	}
1191d6c0b56eSmrg	swap_info->drm_queue_seq = drm_queue_seq;
1192d6c0b56eSmrg
1193d6c0b56eSmrg	/*
1194d6c0b56eSmrg	 * CRTC is in DPMS off state, fallback to blit, but calculate
1195d6c0b56eSmrg	 * wait time from current time, target_msc and last vblank
1196d6c0b56eSmrg	 * time/sequence when CRTC was turned off
1197d6c0b56eSmrg	 */
1198d6c0b56eSmrg	if (!amdgpu_crtc_is_enabled(crtc)) {
1199d6c0b56eSmrg		CARD32 delay;
1200d6c0b56eSmrg		*target_msc -= msc_delta;
1201d6c0b56eSmrg		delay = amdgpu_dri2_extrapolate_msc_delay(crtc, target_msc,
1202d6c0b56eSmrg							  divisor, remainder);
1203d6c0b56eSmrg		*target_msc += msc_delta;
1204d6c0b56eSmrg		*target_msc &= 0xffffffff;
1205d6c0b56eSmrg		amdgpu_dri2_schedule_event(delay, swap_info);
1206d6c0b56eSmrg		return TRUE;
1207d6c0b56eSmrg	}
1208d6c0b56eSmrg
1209d6c0b56eSmrg	/* Get current count */
1210d6c0b56eSmrg	vbl.request.type = DRM_VBLANK_RELATIVE;
1211d6c0b56eSmrg	vbl.request.type |= amdgpu_populate_vbl_request_type(crtc);
1212d6c0b56eSmrg	vbl.request.sequence = 0;
1213d6c0b56eSmrg	ret = drmWaitVBlank(pAMDGPUEnt->fd, &vbl);
1214d6c0b56eSmrg	if (ret) {
1215d6c0b56eSmrg		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
1216d6c0b56eSmrg			   "first get vblank counter failed: %s\n",
1217d6c0b56eSmrg			   strerror(errno));
1218d6c0b56eSmrg		goto blit_fallback;
1219d6c0b56eSmrg	}
1220d6c0b56eSmrg
1221d6c0b56eSmrg	current_msc = vbl.reply.sequence + msc_delta;
1222d6c0b56eSmrg	current_msc &= 0xffffffff;
1223d6c0b56eSmrg
1224d6c0b56eSmrg	/* Flips need to be submitted one frame before */
1225d6c0b56eSmrg	if (can_flip(scrn, draw, front, back)) {
1226d6c0b56eSmrg		swap_info->type = DRI2_FLIP;
1227d6c0b56eSmrg		flip = 1;
1228d6c0b56eSmrg	}
1229d6c0b56eSmrg
1230d6c0b56eSmrg	/* Correct target_msc by 'flip' if swap_info->type == DRI2_FLIP.
1231d6c0b56eSmrg	 * Do it early, so handling of different timing constraints
1232d6c0b56eSmrg	 * for divisor, remainder and msc vs. target_msc works.
1233d6c0b56eSmrg	 */
1234d6c0b56eSmrg	if (*target_msc > 0)
1235d6c0b56eSmrg		*target_msc -= flip;
1236d6c0b56eSmrg
1237d6c0b56eSmrg	/*
1238d6c0b56eSmrg	 * If divisor is zero, or current_msc is smaller than target_msc
1239d6c0b56eSmrg	 * we just need to make sure target_msc passes before initiating
1240d6c0b56eSmrg	 * the swap.
1241d6c0b56eSmrg	 */
1242d6c0b56eSmrg	if (divisor == 0 || current_msc < *target_msc) {
1243d6c0b56eSmrg		vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
1244d6c0b56eSmrg		/* If non-pageflipping, but blitting/exchanging, we need to use
1245d6c0b56eSmrg		 * DRM_VBLANK_NEXTONMISS to avoid unreliable timestamping later
1246d6c0b56eSmrg		 * on.
1247d6c0b56eSmrg		 */
1248d6c0b56eSmrg		if (flip == 0)
1249d6c0b56eSmrg			vbl.request.type |= DRM_VBLANK_NEXTONMISS;
1250d6c0b56eSmrg		vbl.request.type |= amdgpu_populate_vbl_request_type(crtc);
1251d6c0b56eSmrg
1252d6c0b56eSmrg		/* If target_msc already reached or passed, set it to
1253d6c0b56eSmrg		 * current_msc to ensure we return a reasonable value back
1254d6c0b56eSmrg		 * to the caller. This makes swap_interval logic more robust.
1255d6c0b56eSmrg		 */
1256d6c0b56eSmrg		if (current_msc >= *target_msc)
1257d6c0b56eSmrg			*target_msc = current_msc;
1258d6c0b56eSmrg
1259d6c0b56eSmrg		vbl.request.sequence = *target_msc - msc_delta;
1260d6c0b56eSmrg		vbl.request.signal = drm_queue_seq;
1261d6c0b56eSmrg		ret = drmWaitVBlank(pAMDGPUEnt->fd, &vbl);
1262d6c0b56eSmrg		if (ret) {
1263d6c0b56eSmrg			xf86DrvMsg(scrn->scrnIndex, X_WARNING,
1264d6c0b56eSmrg				   "divisor 0 get vblank counter failed: %s\n",
1265d6c0b56eSmrg				   strerror(errno));
1266d6c0b56eSmrg			goto blit_fallback;
1267d6c0b56eSmrg		}
1268d6c0b56eSmrg
1269d6c0b56eSmrg		*target_msc = vbl.reply.sequence + flip + msc_delta;
1270d6c0b56eSmrg		*target_msc &= 0xffffffff;
1271d6c0b56eSmrg		swap_info->frame = *target_msc;
1272d6c0b56eSmrg
1273d6c0b56eSmrg		return TRUE;
1274d6c0b56eSmrg	}
1275d6c0b56eSmrg
1276d6c0b56eSmrg	/*
1277d6c0b56eSmrg	 * If we get here, target_msc has already passed or we don't have one,
1278d6c0b56eSmrg	 * and we need to queue an event that will satisfy the divisor/remainder
1279d6c0b56eSmrg	 * equation.
1280d6c0b56eSmrg	 */
1281d6c0b56eSmrg	vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
1282d6c0b56eSmrg	if (flip == 0)
1283d6c0b56eSmrg		vbl.request.type |= DRM_VBLANK_NEXTONMISS;
1284d6c0b56eSmrg	vbl.request.type |= amdgpu_populate_vbl_request_type(crtc);
1285d6c0b56eSmrg
1286d6c0b56eSmrg	vbl.request.sequence = current_msc - (current_msc % divisor) +
1287d6c0b56eSmrg	    remainder - msc_delta;
1288d6c0b56eSmrg
1289d6c0b56eSmrg	/*
1290d6c0b56eSmrg	 * If the calculated deadline vbl.request.sequence is smaller than
1291d6c0b56eSmrg	 * or equal to current_msc, it means we've passed the last point
1292d6c0b56eSmrg	 * when effective onset frame seq could satisfy
1293d6c0b56eSmrg	 * seq % divisor == remainder, so we need to wait for the next time
1294d6c0b56eSmrg	 * this will happen.
1295d6c0b56eSmrg
1296d6c0b56eSmrg	 * This comparison takes the 1 frame swap delay in pageflipping mode
1297d6c0b56eSmrg	 * into account, as well as a potential DRM_VBLANK_NEXTONMISS delay
1298d6c0b56eSmrg	 * if we are blitting/exchanging instead of flipping.
1299d6c0b56eSmrg	 */
1300d6c0b56eSmrg	if (vbl.request.sequence <= current_msc)
1301d6c0b56eSmrg		vbl.request.sequence += divisor;
1302d6c0b56eSmrg
1303d6c0b56eSmrg	/* Account for 1 frame extra pageflip delay if flip > 0 */
1304d6c0b56eSmrg	vbl.request.sequence -= flip;
1305d6c0b56eSmrg
1306d6c0b56eSmrg	vbl.request.signal = drm_queue_seq;
1307d6c0b56eSmrg	ret = drmWaitVBlank(pAMDGPUEnt->fd, &vbl);
1308d6c0b56eSmrg	if (ret) {
1309d6c0b56eSmrg		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
1310d6c0b56eSmrg			   "final get vblank counter failed: %s\n",
1311d6c0b56eSmrg			   strerror(errno));
1312d6c0b56eSmrg		goto blit_fallback;
1313d6c0b56eSmrg	}
1314d6c0b56eSmrg
1315d6c0b56eSmrg	/* Adjust returned value for 1 fame pageflip offset of flip > 0 */
1316d6c0b56eSmrg	*target_msc = vbl.reply.sequence + flip + msc_delta;
1317d6c0b56eSmrg	*target_msc &= 0xffffffff;
1318d6c0b56eSmrg	swap_info->frame = *target_msc;
1319d6c0b56eSmrg
1320d6c0b56eSmrg	return TRUE;
1321d6c0b56eSmrg
1322d6c0b56eSmrgblit_fallback:
1323d6c0b56eSmrg	if (swap_info) {
1324d6c0b56eSmrg		swap_info->type = DRI2_SWAP;
1325d6c0b56eSmrg		amdgpu_dri2_schedule_event(FALLBACK_SWAP_DELAY, swap_info);
1326d6c0b56eSmrg	} else {
1327d6c0b56eSmrg		box.x1 = 0;
1328d6c0b56eSmrg		box.y1 = 0;
1329d6c0b56eSmrg		box.x2 = draw->width;
1330d6c0b56eSmrg		box.y2 = draw->height;
1331d6c0b56eSmrg		REGION_INIT(pScreen, &region, &box, 0);
1332d6c0b56eSmrg
1333d6c0b56eSmrg		amdgpu_dri2_copy_region(draw, &region, front, back);
1334d6c0b56eSmrg
1335d6c0b56eSmrg		DRI2SwapComplete(client, draw, 0, 0, 0, DRI2_BLIT_COMPLETE, func, data);
1336d6c0b56eSmrg
1337d6c0b56eSmrg		amdgpu_dri2_unref_buffer(front);
1338d6c0b56eSmrg		amdgpu_dri2_unref_buffer(back);
1339d6c0b56eSmrg	}
1340d6c0b56eSmrg
1341d6c0b56eSmrg	*target_msc = 0;	/* offscreen, so zero out target vblank count */
1342d6c0b56eSmrg	return TRUE;
1343d6c0b56eSmrg}
1344d6c0b56eSmrg
1345d6c0b56eSmrgBool amdgpu_dri2_screen_init(ScreenPtr pScreen)
1346d6c0b56eSmrg{
1347d6c0b56eSmrg	ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
1348d6c0b56eSmrg	AMDGPUInfoPtr info = AMDGPUPTR(pScrn);
1349d6c0b56eSmrg	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn);
1350d6c0b56eSmrg	DRI2InfoRec dri2_info = { 0 };
1351d6c0b56eSmrg	const char *driverNames[2];
1352d6c0b56eSmrg	Bool scheduling_works = TRUE;
1353d6c0b56eSmrg
1354d6c0b56eSmrg	if (!info->dri2.available)
1355d6c0b56eSmrg		return FALSE;
1356d6c0b56eSmrg
1357d6c0b56eSmrg	info->dri2.device_name = drmGetDeviceNameFromFd(pAMDGPUEnt->fd);
1358d6c0b56eSmrg
1359d6c0b56eSmrg	dri2_info.driverName = SI_DRIVER_NAME;
1360d6c0b56eSmrg	dri2_info.fd = pAMDGPUEnt->fd;
1361d6c0b56eSmrg	dri2_info.deviceName = info->dri2.device_name;
1362d6c0b56eSmrg	dri2_info.version = DRI2INFOREC_VERSION;
1363d6c0b56eSmrg	dri2_info.CreateBuffer = amdgpu_dri2_create_buffer;
1364d6c0b56eSmrg	dri2_info.DestroyBuffer = amdgpu_dri2_destroy_buffer;
1365d6c0b56eSmrg	dri2_info.CopyRegion = amdgpu_dri2_copy_region;
1366d6c0b56eSmrg
1367d6c0b56eSmrg	if (info->drmmode.count_crtcs > 2) {
1368d6c0b56eSmrg#ifdef DRM_CAP_VBLANK_HIGH_CRTC
1369d6c0b56eSmrg		uint64_t cap_value;
1370d6c0b56eSmrg
1371d6c0b56eSmrg		if (drmGetCap
1372d6c0b56eSmrg		    (pAMDGPUEnt->fd, DRM_CAP_VBLANK_HIGH_CRTC, &cap_value)) {
1373d6c0b56eSmrg			xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
1374d6c0b56eSmrg				   "You need a newer kernel "
1375d6c0b56eSmrg				   "for VBLANKs on CRTC > 1\n");
1376d6c0b56eSmrg			scheduling_works = FALSE;
1377d6c0b56eSmrg		} else if (!cap_value) {
1378d6c0b56eSmrg			xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
1379d6c0b56eSmrg				   "Your kernel does not "
1380d6c0b56eSmrg				   "handle VBLANKs on CRTC > 1\n");
1381d6c0b56eSmrg			scheduling_works = FALSE;
1382d6c0b56eSmrg		}
1383d6c0b56eSmrg#else
1384d6c0b56eSmrg		xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
1385d6c0b56eSmrg			   "You need to rebuild against a "
1386d6c0b56eSmrg			   "newer libdrm to handle VBLANKs on CRTC > 1\n");
1387d6c0b56eSmrg		scheduling_works = FALSE;
1388d6c0b56eSmrg#endif
1389d6c0b56eSmrg	}
1390d6c0b56eSmrg
1391d6c0b56eSmrg	if (scheduling_works) {
1392d6c0b56eSmrg		dri2_info.version = 4;
1393d6c0b56eSmrg		dri2_info.ScheduleSwap = amdgpu_dri2_schedule_swap;
1394d6c0b56eSmrg		dri2_info.GetMSC = amdgpu_dri2_get_msc;
1395d6c0b56eSmrg		dri2_info.ScheduleWaitMSC = amdgpu_dri2_schedule_wait_msc;
1396d6c0b56eSmrg		dri2_info.numDrivers = AMDGPU_ARRAY_SIZE(driverNames);
1397d6c0b56eSmrg		dri2_info.driverNames = driverNames;
1398d6c0b56eSmrg		driverNames[0] = driverNames[1] = dri2_info.driverName;
1399d6c0b56eSmrg
1400d6c0b56eSmrg		if (DRI2InfoCnt == 0) {
1401d6c0b56eSmrg			if (!dixRegisterPrivateKey(dri2_window_private_key,
1402d6c0b56eSmrg						   PRIVATE_WINDOW,
1403d6c0b56eSmrg						   sizeof(struct dri2_window_priv))) {
1404d6c0b56eSmrg				xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
1405d6c0b56eSmrg					   "Failed to get DRI2 window private\n");
1406d6c0b56eSmrg				return FALSE;
1407d6c0b56eSmrg			}
1408d6c0b56eSmrg
1409d6c0b56eSmrg			AddCallback(&ClientStateCallback,
1410d6c0b56eSmrg				    amdgpu_dri2_client_state_changed, 0);
1411d6c0b56eSmrg		}
1412d6c0b56eSmrg
1413d6c0b56eSmrg		DRI2InfoCnt++;
1414d6c0b56eSmrg	}
1415d6c0b56eSmrg
1416d6c0b56eSmrg#if DRI2INFOREC_VERSION >= 9
1417d6c0b56eSmrg	dri2_info.version = 9;
1418d6c0b56eSmrg	dri2_info.CreateBuffer2 = amdgpu_dri2_create_buffer2;
1419d6c0b56eSmrg	dri2_info.DestroyBuffer2 = amdgpu_dri2_destroy_buffer2;
1420d6c0b56eSmrg	dri2_info.CopyRegion2 = amdgpu_dri2_copy_region2;
1421d6c0b56eSmrg#endif
1422d6c0b56eSmrg
1423d6c0b56eSmrg	info->dri2.enabled = DRI2ScreenInit(pScreen, &dri2_info);
1424d6c0b56eSmrg	return info->dri2.enabled;
1425d6c0b56eSmrg}
1426d6c0b56eSmrg
1427d6c0b56eSmrgvoid amdgpu_dri2_close_screen(ScreenPtr pScreen)
1428d6c0b56eSmrg{
1429d6c0b56eSmrg	ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
1430d6c0b56eSmrg	AMDGPUInfoPtr info = AMDGPUPTR(pScrn);
1431d6c0b56eSmrg
1432d6c0b56eSmrg	if (--DRI2InfoCnt == 0)
1433d6c0b56eSmrg		DeleteCallback(&ClientStateCallback,
1434d6c0b56eSmrg			       amdgpu_dri2_client_state_changed, 0);
1435d6c0b56eSmrg
1436d6c0b56eSmrg	DRI2CloseScreen(pScreen);
1437d6c0b56eSmrg	drmFree(info->dri2.device_name);
1438d6c0b56eSmrg}
1439d6c0b56eSmrg
1440d6c0b56eSmrg#endif /* DRI2 */
1441