amdgpu_drm_queue.c revision 504d986f
1/*
2 * Copyright © 2007 Red Hat, Inc.
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 *    Dave Airlie <airlied@redhat.com>
26 *
27 */
28
29#ifdef HAVE_CONFIG_H
30#include "config.h"
31#endif
32
33#include <xorg-server.h>
34
35#include "amdgpu_drv.h"
36#include "amdgpu_drm_queue.h"
37#include "amdgpu_list.h"
38
39
40struct amdgpu_drm_queue_entry {
41	struct xorg_list list;
42	uint64_t id;
43	uintptr_t seq;
44	void *data;
45	ClientPtr client;
46	xf86CrtcPtr crtc;
47	amdgpu_drm_handler_proc handler;
48	amdgpu_drm_abort_proc abort;
49};
50
51static int amdgpu_drm_queue_refcnt;
52static struct xorg_list amdgpu_drm_queue;
53static uintptr_t amdgpu_drm_queue_seq;
54
55
56/*
57 * Handle a DRM event
58 */
59void
60amdgpu_drm_queue_handler(int fd, unsigned int frame, unsigned int sec,
61			 unsigned int usec, void *user_ptr)
62{
63	uintptr_t seq = (uintptr_t)user_ptr;
64	struct amdgpu_drm_queue_entry *e, *tmp;
65
66	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
67		if (e->seq == seq) {
68			xorg_list_del(&e->list);
69			if (e->handler)
70				e->handler(e->crtc, frame,
71					   (uint64_t)sec * 1000000 + usec,
72					   e->data);
73			else
74				e->abort(e->crtc, e->data);
75			free(e);
76			break;
77		}
78	}
79}
80
81/*
82 * Enqueue a potential drm response; when the associated response
83 * appears, we've got data to pass to the handler from here
84 */
85uintptr_t
86amdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client,
87		       uint64_t id, void *data,
88		       amdgpu_drm_handler_proc handler,
89		       amdgpu_drm_abort_proc abort)
90{
91	struct amdgpu_drm_queue_entry *e;
92
93	e = calloc(1, sizeof(struct amdgpu_drm_queue_entry));
94	if (!e)
95		return AMDGPU_DRM_QUEUE_ERROR;
96
97	if (_X_UNLIKELY(amdgpu_drm_queue_seq == AMDGPU_DRM_QUEUE_ERROR))
98		amdgpu_drm_queue_seq++;
99
100	e->seq = amdgpu_drm_queue_seq++;
101	e->client = client;
102	e->crtc = crtc;
103	e->id = id;
104	e->data = data;
105	e->handler = handler;
106	e->abort = abort;
107
108	xorg_list_add(&e->list, &amdgpu_drm_queue);
109
110	return e->seq;
111}
112
113/*
114 * Abort one queued DRM entry, removing it
115 * from the list, calling the abort function and
116 * freeing the memory
117 */
118static void
119amdgpu_drm_abort_one(struct amdgpu_drm_queue_entry *e)
120{
121	xorg_list_del(&e->list);
122	e->abort(e->crtc, e->data);
123	free(e);
124}
125
126/*
127 * Abort drm queue entries for a client
128 *
129 * NOTE: This keeps the entries in the list until the DRM event arrives,
130 * but then it calls the abort functions instead of the handler
131 * functions.
132 */
133void
134amdgpu_drm_abort_client(ClientPtr client)
135{
136	struct amdgpu_drm_queue_entry *e;
137
138	xorg_list_for_each_entry(e, &amdgpu_drm_queue, list) {
139		if (e->client == client)
140			e->handler = NULL;
141	}
142}
143
144/*
145 * Abort specific drm queue entry
146 */
147void
148amdgpu_drm_abort_entry(uintptr_t seq)
149{
150	struct amdgpu_drm_queue_entry *e, *tmp;
151
152	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
153		if (e->seq == seq) {
154			amdgpu_drm_abort_one(e);
155			break;
156		}
157	}
158}
159
160/*
161 * Abort specific drm queue entry by ID
162 */
163void
164amdgpu_drm_abort_id(uint64_t id)
165{
166	struct amdgpu_drm_queue_entry *e, *tmp;
167
168	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
169		if (e->id == id) {
170			amdgpu_drm_abort_one(e);
171			break;
172		}
173	}
174}
175
176/*
177 * Initialize the DRM event queue
178 */
179void
180amdgpu_drm_queue_init()
181{
182	if (amdgpu_drm_queue_refcnt++)
183		return;
184
185	xorg_list_init(&amdgpu_drm_queue);
186}
187
188/*
189 * Deinitialize the DRM event queue
190 */
191void
192amdgpu_drm_queue_close(ScrnInfoPtr scrn)
193{
194	struct amdgpu_drm_queue_entry *e, *tmp;
195
196	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
197		if (e->crtc->scrn == scrn)
198			amdgpu_drm_abort_one(e);
199	}
200
201	amdgpu_drm_queue_refcnt--;
202}
203