1d6c0b56eSmrg/*
2d6c0b56eSmrg * Copyright © 2007 Red Hat, Inc.
3d6c0b56eSmrg * Copyright © 2015 Advanced Micro Devices, Inc.
4d6c0b56eSmrg *
5d6c0b56eSmrg * Permission is hereby granted, free of charge, to any person obtaining a
6d6c0b56eSmrg * copy of this software and associated documentation files (the "Software"),
7d6c0b56eSmrg * to deal in the Software without restriction, including without limitation
8d6c0b56eSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9d6c0b56eSmrg * and/or sell copies of the Software, and to permit persons to whom the
10d6c0b56eSmrg * Software is furnished to do so, subject to the following conditions:
11d6c0b56eSmrg *
12d6c0b56eSmrg * The above copyright notice and this permission notice (including the next
13d6c0b56eSmrg * paragraph) shall be included in all copies or substantial portions of the
14d6c0b56eSmrg * Software.
15d6c0b56eSmrg *
16d6c0b56eSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17d6c0b56eSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18d6c0b56eSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19d6c0b56eSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20d6c0b56eSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21d6c0b56eSmrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22d6c0b56eSmrg * SOFTWARE.
23d6c0b56eSmrg *
24d6c0b56eSmrg * Authors:
25d6c0b56eSmrg *    Dave Airlie <airlied@redhat.com>
26d6c0b56eSmrg *
27d6c0b56eSmrg */
28d6c0b56eSmrg
29d6c0b56eSmrg#ifdef HAVE_CONFIG_H
30d6c0b56eSmrg#include "config.h"
31d6c0b56eSmrg#endif
32d6c0b56eSmrg
3390f2b693Smrg#include <errno.h>
3490f2b693Smrg
35d6c0b56eSmrg#include <xorg-server.h>
3624b90cf4Smrg#include <X11/Xdefs.h>
3724b90cf4Smrg#include <list.h>
38d6c0b56eSmrg
39d6c0b56eSmrg#include "amdgpu_drv.h"
40d6c0b56eSmrg#include "amdgpu_drm_queue.h"
41d6c0b56eSmrg
42d6c0b56eSmrg
43d6c0b56eSmrgstruct amdgpu_drm_queue_entry {
44d6c0b56eSmrg	struct xorg_list list;
4535d5b7c7Smrg	uint64_t usec;
46d6c0b56eSmrg	uint64_t id;
47d6c0b56eSmrg	uintptr_t seq;
48d6c0b56eSmrg	void *data;
49d6c0b56eSmrg	ClientPtr client;
50d6c0b56eSmrg	xf86CrtcPtr crtc;
51d6c0b56eSmrg	amdgpu_drm_handler_proc handler;
52d6c0b56eSmrg	amdgpu_drm_abort_proc abort;
5390f2b693Smrg	Bool is_flip;
5435d5b7c7Smrg	unsigned int frame;
55d6c0b56eSmrg};
56d6c0b56eSmrg
57d6c0b56eSmrgstatic int amdgpu_drm_queue_refcnt;
58d6c0b56eSmrgstatic struct xorg_list amdgpu_drm_queue;
5935d5b7c7Smrgstatic struct xorg_list amdgpu_drm_flip_signalled;
6035d5b7c7Smrgstatic struct xorg_list amdgpu_drm_vblank_signalled;
6190f2b693Smrgstatic struct xorg_list amdgpu_drm_vblank_deferred;
62d6c0b56eSmrgstatic uintptr_t amdgpu_drm_queue_seq;
63d6c0b56eSmrg
64d6c0b56eSmrg
65d6c0b56eSmrg/*
6635d5b7c7Smrg * Process a DRM event
67d6c0b56eSmrg */
6835d5b7c7Smrgstatic void
6935d5b7c7Smrgamdgpu_drm_queue_handle_one(struct amdgpu_drm_queue_entry *e)
7035d5b7c7Smrg{
7135d5b7c7Smrg	xorg_list_del(&e->list);
7235d5b7c7Smrg	if (e->handler) {
7335d5b7c7Smrg		e->handler(e->crtc, e->frame, e->usec, e->data);
7435d5b7c7Smrg	} else
7535d5b7c7Smrg		e->abort(e->crtc, e->data);
7635d5b7c7Smrg	free(e);
7735d5b7c7Smrg}
7835d5b7c7Smrg
7990f2b693Smrg/*
8090f2b693Smrg * Abort one queued DRM entry, removing it
8190f2b693Smrg * from the list, calling the abort function and
8290f2b693Smrg * freeing the memory
8390f2b693Smrg */
8435d5b7c7Smrgstatic void
8590f2b693Smrgamdgpu_drm_abort_one(struct amdgpu_drm_queue_entry *e)
8690f2b693Smrg{
8790f2b693Smrg	xorg_list_del(&e->list);
8890f2b693Smrg	e->abort(e->crtc, e->data);
8990f2b693Smrg	free(e);
9090f2b693Smrg}
9190f2b693Smrg
9290f2b693Smrgstatic void
9390f2b693Smrgamdgpu_drm_queue_handler(int fd, unsigned int frame, unsigned int sec,
9490f2b693Smrg			 unsigned int usec, void *user_ptr)
95d6c0b56eSmrg{
96d6c0b56eSmrg	uintptr_t seq = (uintptr_t)user_ptr;
97d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e, *tmp;
98d6c0b56eSmrg
99d6c0b56eSmrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
100d6c0b56eSmrg		if (e->seq == seq) {
10135d5b7c7Smrg			if (!e->handler) {
10290f2b693Smrg				amdgpu_drm_abort_one(e);
10335d5b7c7Smrg				break;
10435d5b7c7Smrg			}
10535d5b7c7Smrg
106d6c0b56eSmrg			xorg_list_del(&e->list);
10735d5b7c7Smrg			e->usec = (uint64_t)sec * 1000000 + usec;
10835d5b7c7Smrg			e->frame = frame;
10990f2b693Smrg			xorg_list_append(&e->list, e->is_flip ?
11090f2b693Smrg					 &amdgpu_drm_flip_signalled :
11190f2b693Smrg					 &amdgpu_drm_vblank_signalled);
112d6c0b56eSmrg			break;
113d6c0b56eSmrg		}
114d6c0b56eSmrg	}
115d6c0b56eSmrg}
116d6c0b56eSmrg
11735d5b7c7Smrg/*
11890f2b693Smrg * Handle signalled vblank events. If we're waiting for a flip event,
11990f2b693Smrg * put events for that CRTC in the vblank_deferred list.
12035d5b7c7Smrg */
12135d5b7c7Smrgstatic void
12290f2b693Smrgamdgpu_drm_handle_vblank_signalled(void)
12335d5b7c7Smrg{
12490f2b693Smrg	drmmode_crtc_private_ptr drmmode_crtc;
12590f2b693Smrg	struct amdgpu_drm_queue_entry *e;
12635d5b7c7Smrg
12790f2b693Smrg	while (!xorg_list_is_empty(&amdgpu_drm_vblank_signalled)) {
12890f2b693Smrg		e = xorg_list_first_entry(&amdgpu_drm_vblank_signalled,
12990f2b693Smrg					  struct amdgpu_drm_queue_entry, list);
13090f2b693Smrg		drmmode_crtc = e->crtc->driver_private;
13190f2b693Smrg
13290f2b693Smrg		if (drmmode_crtc->wait_flip_nesting_level == 0) {
13390f2b693Smrg			amdgpu_drm_queue_handle_one(e);
13490f2b693Smrg			continue;
13590f2b693Smrg		}
13690f2b693Smrg
13790f2b693Smrg		xorg_list_del(&e->list);
13890f2b693Smrg		xorg_list_append(&e->list, &amdgpu_drm_vblank_deferred);
13990f2b693Smrg	}
14035d5b7c7Smrg}
14135d5b7c7Smrg
14235d5b7c7Smrg/*
14335d5b7c7Smrg * Handle deferred DRM vblank events
14435d5b7c7Smrg *
14535d5b7c7Smrg * This function must be called after amdgpu_drm_wait_pending_flip, once
14635d5b7c7Smrg * it's safe to attempt queueing a flip again
14735d5b7c7Smrg */
14835d5b7c7Smrgvoid
14935d5b7c7Smrgamdgpu_drm_queue_handle_deferred(xf86CrtcPtr crtc)
15035d5b7c7Smrg{
15135d5b7c7Smrg	drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
15235d5b7c7Smrg	struct amdgpu_drm_queue_entry *e, *tmp;
15335d5b7c7Smrg
15435d5b7c7Smrg	if (drmmode_crtc->wait_flip_nesting_level == 0 ||
15535d5b7c7Smrg	    --drmmode_crtc->wait_flip_nesting_level > 0)
15635d5b7c7Smrg		return;
15735d5b7c7Smrg
15890f2b693Smrg	/* Put previously deferred vblank events for this CRTC back in the
15990f2b693Smrg	 * signalled queue
16090f2b693Smrg	 */
16190f2b693Smrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_deferred, list) {
16290f2b693Smrg		if (e->crtc != crtc)
16390f2b693Smrg			continue;
16435d5b7c7Smrg
16590f2b693Smrg		xorg_list_del(&e->list);
16690f2b693Smrg		xorg_list_append(&e->list, &amdgpu_drm_vblank_signalled);
16735d5b7c7Smrg	}
16890f2b693Smrg
16990f2b693Smrg	amdgpu_drm_handle_vblank_signalled();
17035d5b7c7Smrg}
17135d5b7c7Smrg
172d6c0b56eSmrg/*
173d6c0b56eSmrg * Enqueue a potential drm response; when the associated response
174d6c0b56eSmrg * appears, we've got data to pass to the handler from here
175d6c0b56eSmrg */
176d6c0b56eSmrguintptr_t
177d6c0b56eSmrgamdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client,
178d6c0b56eSmrg		       uint64_t id, void *data,
179d6c0b56eSmrg		       amdgpu_drm_handler_proc handler,
18090f2b693Smrg		       amdgpu_drm_abort_proc abort,
18190f2b693Smrg		       Bool is_flip)
182d6c0b56eSmrg{
183d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e;
184d6c0b56eSmrg
185d6c0b56eSmrg	e = calloc(1, sizeof(struct amdgpu_drm_queue_entry));
186d6c0b56eSmrg	if (!e)
187504d986fSmrg		return AMDGPU_DRM_QUEUE_ERROR;
188504d986fSmrg
189504d986fSmrg	if (_X_UNLIKELY(amdgpu_drm_queue_seq == AMDGPU_DRM_QUEUE_ERROR))
190504d986fSmrg		amdgpu_drm_queue_seq++;
191d6c0b56eSmrg
192d6c0b56eSmrg	e->seq = amdgpu_drm_queue_seq++;
193d6c0b56eSmrg	e->client = client;
194d6c0b56eSmrg	e->crtc = crtc;
195d6c0b56eSmrg	e->id = id;
196d6c0b56eSmrg	e->data = data;
197d6c0b56eSmrg	e->handler = handler;
198d6c0b56eSmrg	e->abort = abort;
19990f2b693Smrg	e->is_flip = is_flip;
200d6c0b56eSmrg
20124b90cf4Smrg	xorg_list_append(&e->list, &amdgpu_drm_queue);
202d6c0b56eSmrg
203d6c0b56eSmrg	return e->seq;
204d6c0b56eSmrg}
205d6c0b56eSmrg
206d6c0b56eSmrg/*
207d6c0b56eSmrg * Abort drm queue entries for a client
208d6c0b56eSmrg *
209d6c0b56eSmrg * NOTE: This keeps the entries in the list until the DRM event arrives,
210d6c0b56eSmrg * but then it calls the abort functions instead of the handler
211d6c0b56eSmrg * functions.
212d6c0b56eSmrg */
213d6c0b56eSmrgvoid
214d6c0b56eSmrgamdgpu_drm_abort_client(ClientPtr client)
215d6c0b56eSmrg{
216d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e;
217d6c0b56eSmrg
218d6c0b56eSmrg	xorg_list_for_each_entry(e, &amdgpu_drm_queue, list) {
219d6c0b56eSmrg		if (e->client == client)
220d6c0b56eSmrg			e->handler = NULL;
221d6c0b56eSmrg	}
222d6c0b56eSmrg}
223d6c0b56eSmrg
224d6c0b56eSmrg/*
225d6c0b56eSmrg * Abort specific drm queue entry
226d6c0b56eSmrg */
227d6c0b56eSmrgvoid
228d6c0b56eSmrgamdgpu_drm_abort_entry(uintptr_t seq)
229d6c0b56eSmrg{
230d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e, *tmp;
231d6c0b56eSmrg
23235d5b7c7Smrg	if (seq == AMDGPU_DRM_QUEUE_ERROR)
23335d5b7c7Smrg		return;
23435d5b7c7Smrg
23535d5b7c7Smrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_signalled, list) {
23635d5b7c7Smrg		if (e->seq == seq) {
23735d5b7c7Smrg			amdgpu_drm_abort_one(e);
23835d5b7c7Smrg			return;
23935d5b7c7Smrg		}
24035d5b7c7Smrg	}
24135d5b7c7Smrg
24290f2b693Smrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_deferred, list) {
24390f2b693Smrg		if (e->seq == seq) {
24490f2b693Smrg			amdgpu_drm_abort_one(e);
24590f2b693Smrg			return;
24690f2b693Smrg		}
24790f2b693Smrg	}
24890f2b693Smrg
249d6c0b56eSmrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
250d6c0b56eSmrg		if (e->seq == seq) {
251d6c0b56eSmrg			amdgpu_drm_abort_one(e);
252d6c0b56eSmrg			break;
253d6c0b56eSmrg		}
254d6c0b56eSmrg	}
255d6c0b56eSmrg}
256d6c0b56eSmrg
257d6c0b56eSmrg/*
258d6c0b56eSmrg * Abort specific drm queue entry by ID
259d6c0b56eSmrg */
260d6c0b56eSmrgvoid
261d6c0b56eSmrgamdgpu_drm_abort_id(uint64_t id)
262d6c0b56eSmrg{
263d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e, *tmp;
264d6c0b56eSmrg
265d6c0b56eSmrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
266d6c0b56eSmrg		if (e->id == id) {
267d6c0b56eSmrg			amdgpu_drm_abort_one(e);
268d6c0b56eSmrg			break;
269d6c0b56eSmrg		}
270d6c0b56eSmrg	}
271d6c0b56eSmrg}
272d6c0b56eSmrg
27335d5b7c7Smrg/*
27435d5b7c7Smrg * drmHandleEvent wrapper
27535d5b7c7Smrg */
27635d5b7c7Smrgint
27735d5b7c7Smrgamdgpu_drm_handle_event(int fd, drmEventContext *event_context)
27835d5b7c7Smrg{
27990f2b693Smrg	struct amdgpu_drm_queue_entry *e;
28035d5b7c7Smrg	int r;
28135d5b7c7Smrg
28290f2b693Smrg	/* Retry drmHandleEvent if it was interrupted by a signal in read() */
28390f2b693Smrg	do {
28490f2b693Smrg		r = drmHandleEvent(fd, event_context);
28590f2b693Smrg	} while (r < 0 && (errno == EINTR || errno == EAGAIN));
28690f2b693Smrg
28790f2b693Smrg	if (r < 0) {
28890f2b693Smrg		static Bool printed;
28990f2b693Smrg
29090f2b693Smrg		if (!printed) {
29190f2b693Smrg			ErrorF("%s: drmHandleEvent returned %d, errno=%d (%s)\n",
29290f2b693Smrg			       __func__, r, errno, strerror(errno));
29390f2b693Smrg			printed = TRUE;
29490f2b693Smrg		}
29590f2b693Smrg	}
29635d5b7c7Smrg
29735d5b7c7Smrg	while (!xorg_list_is_empty(&amdgpu_drm_flip_signalled)) {
29835d5b7c7Smrg		e = xorg_list_first_entry(&amdgpu_drm_flip_signalled,
29935d5b7c7Smrg					  struct amdgpu_drm_queue_entry, list);
30035d5b7c7Smrg		amdgpu_drm_queue_handle_one(e);
30135d5b7c7Smrg	}
30235d5b7c7Smrg
30390f2b693Smrg	amdgpu_drm_handle_vblank_signalled();
30435d5b7c7Smrg
30535d5b7c7Smrg	return r;
30635d5b7c7Smrg}
30735d5b7c7Smrg
30835d5b7c7Smrg/*
30935d5b7c7Smrg * Wait for pending page flip on given CRTC to complete
31035d5b7c7Smrg */
31135d5b7c7Smrgvoid amdgpu_drm_wait_pending_flip(xf86CrtcPtr crtc)
31235d5b7c7Smrg{
31335d5b7c7Smrg	drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
31435d5b7c7Smrg	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(crtc->scrn);
31535d5b7c7Smrg	struct amdgpu_drm_queue_entry *e;
31635d5b7c7Smrg
31735d5b7c7Smrg	drmmode_crtc->wait_flip_nesting_level++;
31835d5b7c7Smrg
31935d5b7c7Smrg	while (drmmode_crtc->flip_pending &&
32035d5b7c7Smrg	       !xorg_list_is_empty(&amdgpu_drm_flip_signalled)) {
32135d5b7c7Smrg		e = xorg_list_first_entry(&amdgpu_drm_flip_signalled,
32235d5b7c7Smrg					  struct amdgpu_drm_queue_entry, list);
32335d5b7c7Smrg		amdgpu_drm_queue_handle_one(e);
32435d5b7c7Smrg	}
32535d5b7c7Smrg
32635d5b7c7Smrg	while (drmmode_crtc->flip_pending
32735d5b7c7Smrg	       && amdgpu_drm_handle_event(pAMDGPUEnt->fd,
32890f2b693Smrg					  &drmmode_crtc->drmmode->event_context) >= 0);
32935d5b7c7Smrg}
33035d5b7c7Smrg
331d6c0b56eSmrg/*
332d6c0b56eSmrg * Initialize the DRM event queue
333d6c0b56eSmrg */
334d6c0b56eSmrgvoid
33535d5b7c7Smrgamdgpu_drm_queue_init(ScrnInfoPtr scrn)
336d6c0b56eSmrg{
33735d5b7c7Smrg	AMDGPUInfoPtr info = AMDGPUPTR(scrn);
33835d5b7c7Smrg	drmmode_ptr drmmode = &info->drmmode;
33935d5b7c7Smrg
34035d5b7c7Smrg	drmmode->event_context.version = 2;
34190f2b693Smrg	drmmode->event_context.vblank_handler = amdgpu_drm_queue_handler;
34290f2b693Smrg	drmmode->event_context.page_flip_handler = amdgpu_drm_queue_handler;
34335d5b7c7Smrg
344d6c0b56eSmrg	if (amdgpu_drm_queue_refcnt++)
345d6c0b56eSmrg		return;
346d6c0b56eSmrg
347d6c0b56eSmrg	xorg_list_init(&amdgpu_drm_queue);
34835d5b7c7Smrg	xorg_list_init(&amdgpu_drm_flip_signalled);
34935d5b7c7Smrg	xorg_list_init(&amdgpu_drm_vblank_signalled);
35090f2b693Smrg	xorg_list_init(&amdgpu_drm_vblank_deferred);
351d6c0b56eSmrg}
352d6c0b56eSmrg
353d6c0b56eSmrg/*
354d6c0b56eSmrg * Deinitialize the DRM event queue
355d6c0b56eSmrg */
356d6c0b56eSmrgvoid
357d6c0b56eSmrgamdgpu_drm_queue_close(ScrnInfoPtr scrn)
358d6c0b56eSmrg{
359d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e, *tmp;
360d6c0b56eSmrg
361d6c0b56eSmrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
362d6c0b56eSmrg		if (e->crtc->scrn == scrn)
363d6c0b56eSmrg			amdgpu_drm_abort_one(e);
364d6c0b56eSmrg	}
365d6c0b56eSmrg
366d6c0b56eSmrg	amdgpu_drm_queue_refcnt--;
367d6c0b56eSmrg}
368