amdgpu_drm_queue.c revision 35d5b7c7
1d6c0b56eSmrg/*
2d6c0b56eSmrg * Copyright © 2007 Red Hat, Inc.
3d6c0b56eSmrg * Copyright © 2015 Advanced Micro Devices, Inc.
4d6c0b56eSmrg *
5d6c0b56eSmrg * Permission is hereby granted, free of charge, to any person obtaining a
6d6c0b56eSmrg * copy of this software and associated documentation files (the "Software"),
7d6c0b56eSmrg * to deal in the Software without restriction, including without limitation
8d6c0b56eSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9d6c0b56eSmrg * and/or sell copies of the Software, and to permit persons to whom the
10d6c0b56eSmrg * Software is furnished to do so, subject to the following conditions:
11d6c0b56eSmrg *
12d6c0b56eSmrg * The above copyright notice and this permission notice (including the next
13d6c0b56eSmrg * paragraph) shall be included in all copies or substantial portions of the
14d6c0b56eSmrg * Software.
15d6c0b56eSmrg *
16d6c0b56eSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17d6c0b56eSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18d6c0b56eSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19d6c0b56eSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20d6c0b56eSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21d6c0b56eSmrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22d6c0b56eSmrg * SOFTWARE.
23d6c0b56eSmrg *
24d6c0b56eSmrg * Authors:
25d6c0b56eSmrg *    Dave Airlie <airlied@redhat.com>
26d6c0b56eSmrg *
27d6c0b56eSmrg */
28d6c0b56eSmrg
29d6c0b56eSmrg#ifdef HAVE_CONFIG_H
30d6c0b56eSmrg#include "config.h"
31d6c0b56eSmrg#endif
32d6c0b56eSmrg
33d6c0b56eSmrg#include <xorg-server.h>
3424b90cf4Smrg#include <X11/Xdefs.h>
3524b90cf4Smrg#include <list.h>
36d6c0b56eSmrg
37d6c0b56eSmrg#include "amdgpu_drv.h"
38d6c0b56eSmrg#include "amdgpu_drm_queue.h"
39d6c0b56eSmrg
40d6c0b56eSmrg
41d6c0b56eSmrgstruct amdgpu_drm_queue_entry {
42d6c0b56eSmrg	struct xorg_list list;
4335d5b7c7Smrg	uint64_t usec;
44d6c0b56eSmrg	uint64_t id;
45d6c0b56eSmrg	uintptr_t seq;
46d6c0b56eSmrg	void *data;
47d6c0b56eSmrg	ClientPtr client;
48d6c0b56eSmrg	xf86CrtcPtr crtc;
49d6c0b56eSmrg	amdgpu_drm_handler_proc handler;
50d6c0b56eSmrg	amdgpu_drm_abort_proc abort;
5135d5b7c7Smrg	unsigned int frame;
52d6c0b56eSmrg};
53d6c0b56eSmrg
54d6c0b56eSmrgstatic int amdgpu_drm_queue_refcnt;
55d6c0b56eSmrgstatic struct xorg_list amdgpu_drm_queue;
5635d5b7c7Smrgstatic struct xorg_list amdgpu_drm_flip_signalled;
5735d5b7c7Smrgstatic struct xorg_list amdgpu_drm_vblank_signalled;
58d6c0b56eSmrgstatic uintptr_t amdgpu_drm_queue_seq;
59d6c0b56eSmrg
60d6c0b56eSmrg
61d6c0b56eSmrg/*
6235d5b7c7Smrg * Process a DRM event
63d6c0b56eSmrg */
6435d5b7c7Smrgstatic void
6535d5b7c7Smrgamdgpu_drm_queue_handle_one(struct amdgpu_drm_queue_entry *e)
6635d5b7c7Smrg{
6735d5b7c7Smrg	xorg_list_del(&e->list);
6835d5b7c7Smrg	if (e->handler) {
6935d5b7c7Smrg		e->handler(e->crtc, e->frame, e->usec, e->data);
7035d5b7c7Smrg	} else
7135d5b7c7Smrg		e->abort(e->crtc, e->data);
7235d5b7c7Smrg	free(e);
7335d5b7c7Smrg}
7435d5b7c7Smrg
7535d5b7c7Smrgstatic void
7635d5b7c7Smrgamdgpu_drm_queue_handler(struct xorg_list *signalled, unsigned int frame,
7735d5b7c7Smrg			 unsigned int sec, unsigned int usec, void *user_ptr)
78d6c0b56eSmrg{
79d6c0b56eSmrg	uintptr_t seq = (uintptr_t)user_ptr;
80d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e, *tmp;
81d6c0b56eSmrg
82d6c0b56eSmrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
83d6c0b56eSmrg		if (e->seq == seq) {
8435d5b7c7Smrg			if (!e->handler) {
8535d5b7c7Smrg				amdgpu_drm_queue_handle_one(e);
8635d5b7c7Smrg				break;
8735d5b7c7Smrg			}
8835d5b7c7Smrg
89d6c0b56eSmrg			xorg_list_del(&e->list);
9035d5b7c7Smrg			e->usec = (uint64_t)sec * 1000000 + usec;
9135d5b7c7Smrg			e->frame = frame;
9235d5b7c7Smrg			xorg_list_append(&e->list, signalled);
93d6c0b56eSmrg			break;
94d6c0b56eSmrg		}
95d6c0b56eSmrg	}
96d6c0b56eSmrg}
97d6c0b56eSmrg
9835d5b7c7Smrg/*
9935d5b7c7Smrg * Signal a DRM page flip event
10035d5b7c7Smrg */
10135d5b7c7Smrgstatic void
10235d5b7c7Smrgamdgpu_drm_page_flip_handler(int fd, unsigned int frame, unsigned int sec,
10335d5b7c7Smrg			     unsigned int usec, void *user_ptr)
10435d5b7c7Smrg{
10535d5b7c7Smrg	amdgpu_drm_queue_handler(&amdgpu_drm_flip_signalled, frame, sec, usec,
10635d5b7c7Smrg				 user_ptr);
10735d5b7c7Smrg}
10835d5b7c7Smrg
10935d5b7c7Smrg/*
11035d5b7c7Smrg * Signal a DRM vblank event
11135d5b7c7Smrg */
11235d5b7c7Smrgstatic void
11335d5b7c7Smrgamdgpu_drm_vblank_handler(int fd, unsigned int frame, unsigned int sec,
11435d5b7c7Smrg			  unsigned int usec, void *user_ptr)
11535d5b7c7Smrg{
11635d5b7c7Smrg	amdgpu_drm_queue_handler(&amdgpu_drm_vblank_signalled, frame, sec, usec,
11735d5b7c7Smrg				 user_ptr);
11835d5b7c7Smrg}
11935d5b7c7Smrg
12035d5b7c7Smrg/*
12135d5b7c7Smrg * Handle deferred DRM vblank events
12235d5b7c7Smrg *
12335d5b7c7Smrg * This function must be called after amdgpu_drm_wait_pending_flip, once
12435d5b7c7Smrg * it's safe to attempt queueing a flip again
12535d5b7c7Smrg */
12635d5b7c7Smrgvoid
12735d5b7c7Smrgamdgpu_drm_queue_handle_deferred(xf86CrtcPtr crtc)
12835d5b7c7Smrg{
12935d5b7c7Smrg	drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
13035d5b7c7Smrg	struct amdgpu_drm_queue_entry *e, *tmp;
13135d5b7c7Smrg
13235d5b7c7Smrg	if (drmmode_crtc->wait_flip_nesting_level == 0 ||
13335d5b7c7Smrg	    --drmmode_crtc->wait_flip_nesting_level > 0)
13435d5b7c7Smrg		return;
13535d5b7c7Smrg
13635d5b7c7Smrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_signalled, list) {
13735d5b7c7Smrg		drmmode_crtc_private_ptr drmmode_crtc = e->crtc->driver_private;
13835d5b7c7Smrg
13935d5b7c7Smrg		if (drmmode_crtc->wait_flip_nesting_level == 0)
14035d5b7c7Smrg			amdgpu_drm_queue_handle_one(e);
14135d5b7c7Smrg	}
14235d5b7c7Smrg}
14335d5b7c7Smrg
144d6c0b56eSmrg/*
145d6c0b56eSmrg * Enqueue a potential drm response; when the associated response
146d6c0b56eSmrg * appears, we've got data to pass to the handler from here
147d6c0b56eSmrg */
148d6c0b56eSmrguintptr_t
149d6c0b56eSmrgamdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client,
150d6c0b56eSmrg		       uint64_t id, void *data,
151d6c0b56eSmrg		       amdgpu_drm_handler_proc handler,
152d6c0b56eSmrg		       amdgpu_drm_abort_proc abort)
153d6c0b56eSmrg{
154d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e;
155d6c0b56eSmrg
156d6c0b56eSmrg	e = calloc(1, sizeof(struct amdgpu_drm_queue_entry));
157d6c0b56eSmrg	if (!e)
158504d986fSmrg		return AMDGPU_DRM_QUEUE_ERROR;
159504d986fSmrg
160504d986fSmrg	if (_X_UNLIKELY(amdgpu_drm_queue_seq == AMDGPU_DRM_QUEUE_ERROR))
161504d986fSmrg		amdgpu_drm_queue_seq++;
162d6c0b56eSmrg
163d6c0b56eSmrg	e->seq = amdgpu_drm_queue_seq++;
164d6c0b56eSmrg	e->client = client;
165d6c0b56eSmrg	e->crtc = crtc;
166d6c0b56eSmrg	e->id = id;
167d6c0b56eSmrg	e->data = data;
168d6c0b56eSmrg	e->handler = handler;
169d6c0b56eSmrg	e->abort = abort;
170d6c0b56eSmrg
17124b90cf4Smrg	xorg_list_append(&e->list, &amdgpu_drm_queue);
172d6c0b56eSmrg
173d6c0b56eSmrg	return e->seq;
174d6c0b56eSmrg}
175d6c0b56eSmrg
176d6c0b56eSmrg/*
177d6c0b56eSmrg * Abort one queued DRM entry, removing it
178d6c0b56eSmrg * from the list, calling the abort function and
179d6c0b56eSmrg * freeing the memory
180d6c0b56eSmrg */
181d6c0b56eSmrgstatic void
182d6c0b56eSmrgamdgpu_drm_abort_one(struct amdgpu_drm_queue_entry *e)
183d6c0b56eSmrg{
184d6c0b56eSmrg	xorg_list_del(&e->list);
185d6c0b56eSmrg	e->abort(e->crtc, e->data);
186d6c0b56eSmrg	free(e);
187d6c0b56eSmrg}
188d6c0b56eSmrg
189d6c0b56eSmrg/*
190d6c0b56eSmrg * Abort drm queue entries for a client
191d6c0b56eSmrg *
192d6c0b56eSmrg * NOTE: This keeps the entries in the list until the DRM event arrives,
193d6c0b56eSmrg * but then it calls the abort functions instead of the handler
194d6c0b56eSmrg * functions.
195d6c0b56eSmrg */
196d6c0b56eSmrgvoid
197d6c0b56eSmrgamdgpu_drm_abort_client(ClientPtr client)
198d6c0b56eSmrg{
199d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e;
200d6c0b56eSmrg
201d6c0b56eSmrg	xorg_list_for_each_entry(e, &amdgpu_drm_queue, list) {
202d6c0b56eSmrg		if (e->client == client)
203d6c0b56eSmrg			e->handler = NULL;
204d6c0b56eSmrg	}
205d6c0b56eSmrg}
206d6c0b56eSmrg
207d6c0b56eSmrg/*
208d6c0b56eSmrg * Abort specific drm queue entry
209d6c0b56eSmrg */
210d6c0b56eSmrgvoid
211d6c0b56eSmrgamdgpu_drm_abort_entry(uintptr_t seq)
212d6c0b56eSmrg{
213d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e, *tmp;
214d6c0b56eSmrg
21535d5b7c7Smrg	if (seq == AMDGPU_DRM_QUEUE_ERROR)
21635d5b7c7Smrg		return;
21735d5b7c7Smrg
21835d5b7c7Smrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_signalled, list) {
21935d5b7c7Smrg		if (e->seq == seq) {
22035d5b7c7Smrg			amdgpu_drm_abort_one(e);
22135d5b7c7Smrg			return;
22235d5b7c7Smrg		}
22335d5b7c7Smrg	}
22435d5b7c7Smrg
225d6c0b56eSmrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
226d6c0b56eSmrg		if (e->seq == seq) {
227d6c0b56eSmrg			amdgpu_drm_abort_one(e);
228d6c0b56eSmrg			break;
229d6c0b56eSmrg		}
230d6c0b56eSmrg	}
231d6c0b56eSmrg}
232d6c0b56eSmrg
233d6c0b56eSmrg/*
234d6c0b56eSmrg * Abort specific drm queue entry by ID
235d6c0b56eSmrg */
236d6c0b56eSmrgvoid
237d6c0b56eSmrgamdgpu_drm_abort_id(uint64_t id)
238d6c0b56eSmrg{
239d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e, *tmp;
240d6c0b56eSmrg
241d6c0b56eSmrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
242d6c0b56eSmrg		if (e->id == id) {
243d6c0b56eSmrg			amdgpu_drm_abort_one(e);
244d6c0b56eSmrg			break;
245d6c0b56eSmrg		}
246d6c0b56eSmrg	}
247d6c0b56eSmrg}
248d6c0b56eSmrg
24935d5b7c7Smrg/*
25035d5b7c7Smrg * drmHandleEvent wrapper
25135d5b7c7Smrg */
25235d5b7c7Smrgint
25335d5b7c7Smrgamdgpu_drm_handle_event(int fd, drmEventContext *event_context)
25435d5b7c7Smrg{
25535d5b7c7Smrg	struct amdgpu_drm_queue_entry *e, *tmp;
25635d5b7c7Smrg	int r;
25735d5b7c7Smrg
25835d5b7c7Smrg	r = drmHandleEvent(fd, event_context);
25935d5b7c7Smrg
26035d5b7c7Smrg	while (!xorg_list_is_empty(&amdgpu_drm_flip_signalled)) {
26135d5b7c7Smrg		e = xorg_list_first_entry(&amdgpu_drm_flip_signalled,
26235d5b7c7Smrg					  struct amdgpu_drm_queue_entry, list);
26335d5b7c7Smrg		amdgpu_drm_queue_handle_one(e);
26435d5b7c7Smrg	}
26535d5b7c7Smrg
26635d5b7c7Smrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_signalled, list) {
26735d5b7c7Smrg		drmmode_crtc_private_ptr drmmode_crtc = e->crtc->driver_private;
26835d5b7c7Smrg
26935d5b7c7Smrg		if (drmmode_crtc->wait_flip_nesting_level == 0)
27035d5b7c7Smrg			amdgpu_drm_queue_handle_one(e);
27135d5b7c7Smrg	}
27235d5b7c7Smrg
27335d5b7c7Smrg	return r;
27435d5b7c7Smrg}
27535d5b7c7Smrg
27635d5b7c7Smrg/*
27735d5b7c7Smrg * Wait for pending page flip on given CRTC to complete
27835d5b7c7Smrg */
27935d5b7c7Smrgvoid amdgpu_drm_wait_pending_flip(xf86CrtcPtr crtc)
28035d5b7c7Smrg{
28135d5b7c7Smrg	drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
28235d5b7c7Smrg	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(crtc->scrn);
28335d5b7c7Smrg	struct amdgpu_drm_queue_entry *e;
28435d5b7c7Smrg
28535d5b7c7Smrg	drmmode_crtc->wait_flip_nesting_level++;
28635d5b7c7Smrg
28735d5b7c7Smrg	while (drmmode_crtc->flip_pending &&
28835d5b7c7Smrg	       !xorg_list_is_empty(&amdgpu_drm_flip_signalled)) {
28935d5b7c7Smrg		e = xorg_list_first_entry(&amdgpu_drm_flip_signalled,
29035d5b7c7Smrg					  struct amdgpu_drm_queue_entry, list);
29135d5b7c7Smrg		amdgpu_drm_queue_handle_one(e);
29235d5b7c7Smrg	}
29335d5b7c7Smrg
29435d5b7c7Smrg	while (drmmode_crtc->flip_pending
29535d5b7c7Smrg	       && amdgpu_drm_handle_event(pAMDGPUEnt->fd,
29635d5b7c7Smrg					  &drmmode_crtc->drmmode->event_context) > 0);
29735d5b7c7Smrg}
29835d5b7c7Smrg
299d6c0b56eSmrg/*
300d6c0b56eSmrg * Initialize the DRM event queue
301d6c0b56eSmrg */
302d6c0b56eSmrgvoid
30335d5b7c7Smrgamdgpu_drm_queue_init(ScrnInfoPtr scrn)
304d6c0b56eSmrg{
30535d5b7c7Smrg	AMDGPUInfoPtr info = AMDGPUPTR(scrn);
30635d5b7c7Smrg	drmmode_ptr drmmode = &info->drmmode;
30735d5b7c7Smrg
30835d5b7c7Smrg	drmmode->event_context.version = 2;
30935d5b7c7Smrg	drmmode->event_context.vblank_handler = amdgpu_drm_vblank_handler;
31035d5b7c7Smrg	drmmode->event_context.page_flip_handler = amdgpu_drm_page_flip_handler;
31135d5b7c7Smrg
312d6c0b56eSmrg	if (amdgpu_drm_queue_refcnt++)
313d6c0b56eSmrg		return;
314d6c0b56eSmrg
315d6c0b56eSmrg	xorg_list_init(&amdgpu_drm_queue);
31635d5b7c7Smrg	xorg_list_init(&amdgpu_drm_flip_signalled);
31735d5b7c7Smrg	xorg_list_init(&amdgpu_drm_vblank_signalled);
318d6c0b56eSmrg}
319d6c0b56eSmrg
320d6c0b56eSmrg/*
321d6c0b56eSmrg * Deinitialize the DRM event queue
322d6c0b56eSmrg */
323d6c0b56eSmrgvoid
324d6c0b56eSmrgamdgpu_drm_queue_close(ScrnInfoPtr scrn)
325d6c0b56eSmrg{
326d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e, *tmp;
327d6c0b56eSmrg
328d6c0b56eSmrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
329d6c0b56eSmrg		if (e->crtc->scrn == scrn)
330d6c0b56eSmrg			amdgpu_drm_abort_one(e);
331d6c0b56eSmrg	}
332d6c0b56eSmrg
333d6c0b56eSmrg	amdgpu_drm_queue_refcnt--;
334d6c0b56eSmrg}
335