amdgpu_drm_queue.c revision d6c0b56e
1/*
2 * Copyright © 2007 Red Hat, Inc.
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 *    Dave Airlie <airlied@redhat.com>
26 *
27 */
28
29#ifdef HAVE_CONFIG_H
30#include "config.h"
31#endif
32
33#include <xorg-server.h>
34
35#include "amdgpu_drv.h"
36#include "amdgpu_drm_queue.h"
37#include "amdgpu_list.h"
38
39
40struct amdgpu_drm_queue_entry {
41	struct xorg_list list;
42	uint64_t id;
43	uintptr_t seq;
44	void *data;
45	ClientPtr client;
46	xf86CrtcPtr crtc;
47	amdgpu_drm_handler_proc handler;
48	amdgpu_drm_abort_proc abort;
49};
50
51static int amdgpu_drm_queue_refcnt;
52static struct xorg_list amdgpu_drm_queue;
53static uintptr_t amdgpu_drm_queue_seq;
54
55
56/*
57 * Handle a DRM event
58 */
59void
60amdgpu_drm_queue_handler(int fd, unsigned int frame, unsigned int sec,
61			 unsigned int usec, void *user_ptr)
62{
63	uintptr_t seq = (uintptr_t)user_ptr;
64	struct amdgpu_drm_queue_entry *e, *tmp;
65
66	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
67		if (e->seq == seq) {
68			xorg_list_del(&e->list);
69			if (e->handler)
70				e->handler(e->crtc, frame,
71					   (uint64_t)sec * 1000000 + usec,
72					   e->data);
73			else
74				e->abort(e->crtc, e->data);
75			free(e);
76			break;
77		}
78	}
79}
80
81/*
82 * Enqueue a potential drm response; when the associated response
83 * appears, we've got data to pass to the handler from here
84 */
85uintptr_t
86amdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client,
87		       uint64_t id, void *data,
88		       amdgpu_drm_handler_proc handler,
89		       amdgpu_drm_abort_proc abort)
90{
91	struct amdgpu_drm_queue_entry *e;
92
93	e = calloc(1, sizeof(struct amdgpu_drm_queue_entry));
94	if (!e)
95		return NULL;
96
97	if (!amdgpu_drm_queue_seq)
98		amdgpu_drm_queue_seq = 1;
99	e->seq = amdgpu_drm_queue_seq++;
100	e->client = client;
101	e->crtc = crtc;
102	e->id = id;
103	e->data = data;
104	e->handler = handler;
105	e->abort = abort;
106
107	xorg_list_add(&e->list, &amdgpu_drm_queue);
108
109	return e->seq;
110}
111
112/*
113 * Abort one queued DRM entry, removing it
114 * from the list, calling the abort function and
115 * freeing the memory
116 */
117static void
118amdgpu_drm_abort_one(struct amdgpu_drm_queue_entry *e)
119{
120	xorg_list_del(&e->list);
121	e->abort(e->crtc, e->data);
122	free(e);
123}
124
125/*
126 * Abort drm queue entries for a client
127 *
128 * NOTE: This keeps the entries in the list until the DRM event arrives,
129 * but then it calls the abort functions instead of the handler
130 * functions.
131 */
132void
133amdgpu_drm_abort_client(ClientPtr client)
134{
135	struct amdgpu_drm_queue_entry *e;
136
137	xorg_list_for_each_entry(e, &amdgpu_drm_queue, list) {
138		if (e->client == client)
139			e->handler = NULL;
140	}
141}
142
143/*
144 * Abort specific drm queue entry
145 */
146void
147amdgpu_drm_abort_entry(uintptr_t seq)
148{
149	struct amdgpu_drm_queue_entry *e, *tmp;
150
151	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
152		if (e->seq == seq) {
153			amdgpu_drm_abort_one(e);
154			break;
155		}
156	}
157}
158
159/*
160 * Abort specific drm queue entry by ID
161 */
162void
163amdgpu_drm_abort_id(uint64_t id)
164{
165	struct amdgpu_drm_queue_entry *e, *tmp;
166
167	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
168		if (e->id == id) {
169			amdgpu_drm_abort_one(e);
170			break;
171		}
172	}
173}
174
175/*
176 * Initialize the DRM event queue
177 */
178void
179amdgpu_drm_queue_init()
180{
181	if (amdgpu_drm_queue_refcnt++)
182		return;
183
184	xorg_list_init(&amdgpu_drm_queue);
185}
186
187/*
188 * Deinitialize the DRM event queue
189 */
190void
191amdgpu_drm_queue_close(ScrnInfoPtr scrn)
192{
193	struct amdgpu_drm_queue_entry *e, *tmp;
194
195	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
196		if (e->crtc->scrn == scrn)
197			amdgpu_drm_abort_one(e);
198	}
199
200	amdgpu_drm_queue_refcnt--;
201}
202