amdgpu_drm_queue.c revision d6c0b56e
1d6c0b56eSmrg/*
2d6c0b56eSmrg * Copyright © 2007 Red Hat, Inc.
3d6c0b56eSmrg * Copyright © 2015 Advanced Micro Devices, Inc.
4d6c0b56eSmrg *
5d6c0b56eSmrg * Permission is hereby granted, free of charge, to any person obtaining a
6d6c0b56eSmrg * copy of this software and associated documentation files (the "Software"),
7d6c0b56eSmrg * to deal in the Software without restriction, including without limitation
8d6c0b56eSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9d6c0b56eSmrg * and/or sell copies of the Software, and to permit persons to whom the
10d6c0b56eSmrg * Software is furnished to do so, subject to the following conditions:
11d6c0b56eSmrg *
12d6c0b56eSmrg * The above copyright notice and this permission notice (including the next
13d6c0b56eSmrg * paragraph) shall be included in all copies or substantial portions of the
14d6c0b56eSmrg * Software.
15d6c0b56eSmrg *
16d6c0b56eSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17d6c0b56eSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18d6c0b56eSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19d6c0b56eSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20d6c0b56eSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21d6c0b56eSmrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22d6c0b56eSmrg * SOFTWARE.
23d6c0b56eSmrg *
24d6c0b56eSmrg * Authors:
25d6c0b56eSmrg *    Dave Airlie <airlied@redhat.com>
26d6c0b56eSmrg *
27d6c0b56eSmrg */
28d6c0b56eSmrg
29d6c0b56eSmrg#ifdef HAVE_CONFIG_H
30d6c0b56eSmrg#include "config.h"
31d6c0b56eSmrg#endif
32d6c0b56eSmrg
33d6c0b56eSmrg#include <xorg-server.h>
34d6c0b56eSmrg
35d6c0b56eSmrg#include "amdgpu_drv.h"
36d6c0b56eSmrg#include "amdgpu_drm_queue.h"
37d6c0b56eSmrg#include "amdgpu_list.h"
38d6c0b56eSmrg
39d6c0b56eSmrg
40d6c0b56eSmrgstruct amdgpu_drm_queue_entry {
41d6c0b56eSmrg	struct xorg_list list;
42d6c0b56eSmrg	uint64_t id;
43d6c0b56eSmrg	uintptr_t seq;
44d6c0b56eSmrg	void *data;
45d6c0b56eSmrg	ClientPtr client;
46d6c0b56eSmrg	xf86CrtcPtr crtc;
47d6c0b56eSmrg	amdgpu_drm_handler_proc handler;
48d6c0b56eSmrg	amdgpu_drm_abort_proc abort;
49d6c0b56eSmrg};
50d6c0b56eSmrg
51d6c0b56eSmrgstatic int amdgpu_drm_queue_refcnt;
52d6c0b56eSmrgstatic struct xorg_list amdgpu_drm_queue;
53d6c0b56eSmrgstatic uintptr_t amdgpu_drm_queue_seq;
54d6c0b56eSmrg
55d6c0b56eSmrg
56d6c0b56eSmrg/*
57d6c0b56eSmrg * Handle a DRM event
58d6c0b56eSmrg */
59d6c0b56eSmrgvoid
60d6c0b56eSmrgamdgpu_drm_queue_handler(int fd, unsigned int frame, unsigned int sec,
61d6c0b56eSmrg			 unsigned int usec, void *user_ptr)
62d6c0b56eSmrg{
63d6c0b56eSmrg	uintptr_t seq = (uintptr_t)user_ptr;
64d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e, *tmp;
65d6c0b56eSmrg
66d6c0b56eSmrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
67d6c0b56eSmrg		if (e->seq == seq) {
68d6c0b56eSmrg			xorg_list_del(&e->list);
69d6c0b56eSmrg			if (e->handler)
70d6c0b56eSmrg				e->handler(e->crtc, frame,
71d6c0b56eSmrg					   (uint64_t)sec * 1000000 + usec,
72d6c0b56eSmrg					   e->data);
73d6c0b56eSmrg			else
74d6c0b56eSmrg				e->abort(e->crtc, e->data);
75d6c0b56eSmrg			free(e);
76d6c0b56eSmrg			break;
77d6c0b56eSmrg		}
78d6c0b56eSmrg	}
79d6c0b56eSmrg}
80d6c0b56eSmrg
81d6c0b56eSmrg/*
82d6c0b56eSmrg * Enqueue a potential drm response; when the associated response
83d6c0b56eSmrg * appears, we've got data to pass to the handler from here
84d6c0b56eSmrg */
85d6c0b56eSmrguintptr_t
86d6c0b56eSmrgamdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client,
87d6c0b56eSmrg		       uint64_t id, void *data,
88d6c0b56eSmrg		       amdgpu_drm_handler_proc handler,
89d6c0b56eSmrg		       amdgpu_drm_abort_proc abort)
90d6c0b56eSmrg{
91d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e;
92d6c0b56eSmrg
93d6c0b56eSmrg	e = calloc(1, sizeof(struct amdgpu_drm_queue_entry));
94d6c0b56eSmrg	if (!e)
95d6c0b56eSmrg		return NULL;
96d6c0b56eSmrg
97d6c0b56eSmrg	if (!amdgpu_drm_queue_seq)
98d6c0b56eSmrg		amdgpu_drm_queue_seq = 1;
99d6c0b56eSmrg	e->seq = amdgpu_drm_queue_seq++;
100d6c0b56eSmrg	e->client = client;
101d6c0b56eSmrg	e->crtc = crtc;
102d6c0b56eSmrg	e->id = id;
103d6c0b56eSmrg	e->data = data;
104d6c0b56eSmrg	e->handler = handler;
105d6c0b56eSmrg	e->abort = abort;
106d6c0b56eSmrg
107d6c0b56eSmrg	xorg_list_add(&e->list, &amdgpu_drm_queue);
108d6c0b56eSmrg
109d6c0b56eSmrg	return e->seq;
110d6c0b56eSmrg}
111d6c0b56eSmrg
112d6c0b56eSmrg/*
113d6c0b56eSmrg * Abort one queued DRM entry, removing it
114d6c0b56eSmrg * from the list, calling the abort function and
115d6c0b56eSmrg * freeing the memory
116d6c0b56eSmrg */
117d6c0b56eSmrgstatic void
118d6c0b56eSmrgamdgpu_drm_abort_one(struct amdgpu_drm_queue_entry *e)
119d6c0b56eSmrg{
120d6c0b56eSmrg	xorg_list_del(&e->list);
121d6c0b56eSmrg	e->abort(e->crtc, e->data);
122d6c0b56eSmrg	free(e);
123d6c0b56eSmrg}
124d6c0b56eSmrg
125d6c0b56eSmrg/*
126d6c0b56eSmrg * Abort drm queue entries for a client
127d6c0b56eSmrg *
128d6c0b56eSmrg * NOTE: This keeps the entries in the list until the DRM event arrives,
129d6c0b56eSmrg * but then it calls the abort functions instead of the handler
130d6c0b56eSmrg * functions.
131d6c0b56eSmrg */
132d6c0b56eSmrgvoid
133d6c0b56eSmrgamdgpu_drm_abort_client(ClientPtr client)
134d6c0b56eSmrg{
135d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e;
136d6c0b56eSmrg
137d6c0b56eSmrg	xorg_list_for_each_entry(e, &amdgpu_drm_queue, list) {
138d6c0b56eSmrg		if (e->client == client)
139d6c0b56eSmrg			e->handler = NULL;
140d6c0b56eSmrg	}
141d6c0b56eSmrg}
142d6c0b56eSmrg
143d6c0b56eSmrg/*
144d6c0b56eSmrg * Abort specific drm queue entry
145d6c0b56eSmrg */
146d6c0b56eSmrgvoid
147d6c0b56eSmrgamdgpu_drm_abort_entry(uintptr_t seq)
148d6c0b56eSmrg{
149d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e, *tmp;
150d6c0b56eSmrg
151d6c0b56eSmrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
152d6c0b56eSmrg		if (e->seq == seq) {
153d6c0b56eSmrg			amdgpu_drm_abort_one(e);
154d6c0b56eSmrg			break;
155d6c0b56eSmrg		}
156d6c0b56eSmrg	}
157d6c0b56eSmrg}
158d6c0b56eSmrg
159d6c0b56eSmrg/*
160d6c0b56eSmrg * Abort specific drm queue entry by ID
161d6c0b56eSmrg */
162d6c0b56eSmrgvoid
163d6c0b56eSmrgamdgpu_drm_abort_id(uint64_t id)
164d6c0b56eSmrg{
165d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e, *tmp;
166d6c0b56eSmrg
167d6c0b56eSmrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
168d6c0b56eSmrg		if (e->id == id) {
169d6c0b56eSmrg			amdgpu_drm_abort_one(e);
170d6c0b56eSmrg			break;
171d6c0b56eSmrg		}
172d6c0b56eSmrg	}
173d6c0b56eSmrg}
174d6c0b56eSmrg
175d6c0b56eSmrg/*
176d6c0b56eSmrg * Initialize the DRM event queue
177d6c0b56eSmrg */
178d6c0b56eSmrgvoid
179d6c0b56eSmrgamdgpu_drm_queue_init()
180d6c0b56eSmrg{
181d6c0b56eSmrg	if (amdgpu_drm_queue_refcnt++)
182d6c0b56eSmrg		return;
183d6c0b56eSmrg
184d6c0b56eSmrg	xorg_list_init(&amdgpu_drm_queue);
185d6c0b56eSmrg}
186d6c0b56eSmrg
187d6c0b56eSmrg/*
188d6c0b56eSmrg * Deinitialize the DRM event queue
189d6c0b56eSmrg */
190d6c0b56eSmrgvoid
191d6c0b56eSmrgamdgpu_drm_queue_close(ScrnInfoPtr scrn)
192d6c0b56eSmrg{
193d6c0b56eSmrg	struct amdgpu_drm_queue_entry *e, *tmp;
194d6c0b56eSmrg
195d6c0b56eSmrg	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
196d6c0b56eSmrg		if (e->crtc->scrn == scrn)
197d6c0b56eSmrg			amdgpu_drm_abort_one(e);
198d6c0b56eSmrg	}
199d6c0b56eSmrg
200d6c0b56eSmrg	amdgpu_drm_queue_refcnt--;
201d6c0b56eSmrg}
202