amdgpu_drm_queue.c revision 24b90cf4
1/* 2 * Copyright © 2007 Red Hat, Inc. 3 * Copyright © 2015 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 * 24 * Authors: 25 * Dave Airlie <airlied@redhat.com> 26 * 27 */ 28 29#ifdef HAVE_CONFIG_H 30#include "config.h" 31#endif 32 33#include <xorg-server.h> 34#include <X11/Xdefs.h> 35#include <list.h> 36 37#include "amdgpu_drv.h" 38#include "amdgpu_drm_queue.h" 39 40 41struct amdgpu_drm_queue_entry { 42 struct xorg_list list; 43 uint64_t id; 44 uintptr_t seq; 45 void *data; 46 ClientPtr client; 47 xf86CrtcPtr crtc; 48 amdgpu_drm_handler_proc handler; 49 amdgpu_drm_abort_proc abort; 50}; 51 52static int amdgpu_drm_queue_refcnt; 53static struct xorg_list amdgpu_drm_queue; 54static uintptr_t amdgpu_drm_queue_seq; 55 56 57/* 58 * Handle a DRM event 59 */ 60void 61amdgpu_drm_queue_handler(int fd, unsigned int frame, unsigned int sec, 62 unsigned int usec, void *user_ptr) 63{ 64 uintptr_t seq = (uintptr_t)user_ptr; 65 struct amdgpu_drm_queue_entry *e, *tmp; 66 67 xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) { 68 if (e->seq == seq) { 69 xorg_list_del(&e->list); 70 if (e->handler) 71 e->handler(e->crtc, frame, 72 (uint64_t)sec * 1000000 + usec, 73 e->data); 74 else 75 e->abort(e->crtc, e->data); 76 free(e); 77 break; 78 } 79 } 80} 81 82/* 83 * Enqueue a potential drm response; when the associated response 84 * appears, we've got data to pass to the handler from here 85 */ 86uintptr_t 87amdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client, 88 uint64_t id, void *data, 89 amdgpu_drm_handler_proc handler, 90 amdgpu_drm_abort_proc abort) 91{ 92 struct amdgpu_drm_queue_entry *e; 93 94 e = calloc(1, sizeof(struct amdgpu_drm_queue_entry)); 95 if (!e) 96 return AMDGPU_DRM_QUEUE_ERROR; 97 98 if (_X_UNLIKELY(amdgpu_drm_queue_seq == AMDGPU_DRM_QUEUE_ERROR)) 99 amdgpu_drm_queue_seq++; 100 101 e->seq = amdgpu_drm_queue_seq++; 102 e->client = client; 103 e->crtc = crtc; 104 e->id = id; 105 e->data = data; 106 e->handler = handler; 107 e->abort = abort; 108 109 xorg_list_append(&e->list, &amdgpu_drm_queue); 110 111 return e->seq; 112} 113 114/* 115 * Abort one queued DRM entry, removing it 116 * from the list, calling the abort function and 117 * freeing the memory 118 */ 119static void 120amdgpu_drm_abort_one(struct amdgpu_drm_queue_entry *e) 121{ 122 xorg_list_del(&e->list); 123 e->abort(e->crtc, e->data); 124 free(e); 125} 126 127/* 128 * Abort drm queue entries for a client 129 * 130 * NOTE: This keeps the entries in the list until the DRM event arrives, 131 * but then it calls the abort functions instead of the handler 132 * functions. 133 */ 134void 135amdgpu_drm_abort_client(ClientPtr client) 136{ 137 struct amdgpu_drm_queue_entry *e; 138 139 xorg_list_for_each_entry(e, &amdgpu_drm_queue, list) { 140 if (e->client == client) 141 e->handler = NULL; 142 } 143} 144 145/* 146 * Abort specific drm queue entry 147 */ 148void 149amdgpu_drm_abort_entry(uintptr_t seq) 150{ 151 struct amdgpu_drm_queue_entry *e, *tmp; 152 153 xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) { 154 if (e->seq == seq) { 155 amdgpu_drm_abort_one(e); 156 break; 157 } 158 } 159} 160 161/* 162 * Abort specific drm queue entry by ID 163 */ 164void 165amdgpu_drm_abort_id(uint64_t id) 166{ 167 struct amdgpu_drm_queue_entry *e, *tmp; 168 169 xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) { 170 if (e->id == id) { 171 amdgpu_drm_abort_one(e); 172 break; 173 } 174 } 175} 176 177/* 178 * Initialize the DRM event queue 179 */ 180void 181amdgpu_drm_queue_init() 182{ 183 if (amdgpu_drm_queue_refcnt++) 184 return; 185 186 xorg_list_init(&amdgpu_drm_queue); 187} 188 189/* 190 * Deinitialize the DRM event queue 191 */ 192void 193amdgpu_drm_queue_close(ScrnInfoPtr scrn) 194{ 195 struct amdgpu_drm_queue_entry *e, *tmp; 196 197 xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) { 198 if (e->crtc->scrn == scrn) 199 amdgpu_drm_abort_one(e); 200 } 201 202 amdgpu_drm_queue_refcnt--; 203} 204