amdgpu_drm_queue.c revision 504d986f
1d6c0b56eSmrg/* 2d6c0b56eSmrg * Copyright © 2007 Red Hat, Inc. 3d6c0b56eSmrg * Copyright © 2015 Advanced Micro Devices, Inc. 4d6c0b56eSmrg * 5d6c0b56eSmrg * Permission is hereby granted, free of charge, to any person obtaining a 6d6c0b56eSmrg * copy of this software and associated documentation files (the "Software"), 7d6c0b56eSmrg * to deal in the Software without restriction, including without limitation 8d6c0b56eSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9d6c0b56eSmrg * and/or sell copies of the Software, and to permit persons to whom the 10d6c0b56eSmrg * Software is furnished to do so, subject to the following conditions: 11d6c0b56eSmrg * 12d6c0b56eSmrg * The above copyright notice and this permission notice (including the next 13d6c0b56eSmrg * paragraph) shall be included in all copies or substantial portions of the 14d6c0b56eSmrg * Software. 15d6c0b56eSmrg * 16d6c0b56eSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17d6c0b56eSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18d6c0b56eSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19d6c0b56eSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20d6c0b56eSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21d6c0b56eSmrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22d6c0b56eSmrg * SOFTWARE. 23d6c0b56eSmrg * 24d6c0b56eSmrg * Authors: 25d6c0b56eSmrg * Dave Airlie <airlied@redhat.com> 26d6c0b56eSmrg * 27d6c0b56eSmrg */ 28d6c0b56eSmrg 29d6c0b56eSmrg#ifdef HAVE_CONFIG_H 30d6c0b56eSmrg#include "config.h" 31d6c0b56eSmrg#endif 32d6c0b56eSmrg 33d6c0b56eSmrg#include <xorg-server.h> 34d6c0b56eSmrg 35d6c0b56eSmrg#include "amdgpu_drv.h" 36d6c0b56eSmrg#include "amdgpu_drm_queue.h" 37d6c0b56eSmrg#include "amdgpu_list.h" 38d6c0b56eSmrg 39d6c0b56eSmrg 40d6c0b56eSmrgstruct amdgpu_drm_queue_entry { 41d6c0b56eSmrg struct xorg_list list; 42d6c0b56eSmrg uint64_t id; 43d6c0b56eSmrg uintptr_t seq; 44d6c0b56eSmrg void *data; 45d6c0b56eSmrg ClientPtr client; 46d6c0b56eSmrg xf86CrtcPtr crtc; 47d6c0b56eSmrg amdgpu_drm_handler_proc handler; 48d6c0b56eSmrg amdgpu_drm_abort_proc abort; 49d6c0b56eSmrg}; 50d6c0b56eSmrg 51d6c0b56eSmrgstatic int amdgpu_drm_queue_refcnt; 52d6c0b56eSmrgstatic struct xorg_list amdgpu_drm_queue; 53d6c0b56eSmrgstatic uintptr_t amdgpu_drm_queue_seq; 54d6c0b56eSmrg 55d6c0b56eSmrg 56d6c0b56eSmrg/* 57d6c0b56eSmrg * Handle a DRM event 58d6c0b56eSmrg */ 59d6c0b56eSmrgvoid 60d6c0b56eSmrgamdgpu_drm_queue_handler(int fd, unsigned int frame, unsigned int sec, 61d6c0b56eSmrg unsigned int usec, void *user_ptr) 62d6c0b56eSmrg{ 63d6c0b56eSmrg uintptr_t seq = (uintptr_t)user_ptr; 64d6c0b56eSmrg struct amdgpu_drm_queue_entry *e, *tmp; 65d6c0b56eSmrg 66d6c0b56eSmrg xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) { 67d6c0b56eSmrg if (e->seq == seq) { 68d6c0b56eSmrg xorg_list_del(&e->list); 69d6c0b56eSmrg if (e->handler) 70d6c0b56eSmrg e->handler(e->crtc, frame, 71d6c0b56eSmrg (uint64_t)sec * 1000000 + usec, 72d6c0b56eSmrg e->data); 73d6c0b56eSmrg else 74d6c0b56eSmrg e->abort(e->crtc, e->data); 75d6c0b56eSmrg free(e); 76d6c0b56eSmrg break; 77d6c0b56eSmrg } 78d6c0b56eSmrg } 79d6c0b56eSmrg} 80d6c0b56eSmrg 81d6c0b56eSmrg/* 82d6c0b56eSmrg * Enqueue a potential drm response; when the associated response 83d6c0b56eSmrg * appears, we've got data to pass to the handler from here 84d6c0b56eSmrg */ 85d6c0b56eSmrguintptr_t 86d6c0b56eSmrgamdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client, 87d6c0b56eSmrg uint64_t id, void *data, 88d6c0b56eSmrg amdgpu_drm_handler_proc handler, 89d6c0b56eSmrg amdgpu_drm_abort_proc abort) 90d6c0b56eSmrg{ 91d6c0b56eSmrg struct amdgpu_drm_queue_entry *e; 92d6c0b56eSmrg 93d6c0b56eSmrg e = calloc(1, sizeof(struct amdgpu_drm_queue_entry)); 94d6c0b56eSmrg if (!e) 95504d986fSmrg return AMDGPU_DRM_QUEUE_ERROR; 96504d986fSmrg 97504d986fSmrg if (_X_UNLIKELY(amdgpu_drm_queue_seq == AMDGPU_DRM_QUEUE_ERROR)) 98504d986fSmrg amdgpu_drm_queue_seq++; 99d6c0b56eSmrg 100d6c0b56eSmrg e->seq = amdgpu_drm_queue_seq++; 101d6c0b56eSmrg e->client = client; 102d6c0b56eSmrg e->crtc = crtc; 103d6c0b56eSmrg e->id = id; 104d6c0b56eSmrg e->data = data; 105d6c0b56eSmrg e->handler = handler; 106d6c0b56eSmrg e->abort = abort; 107d6c0b56eSmrg 108d6c0b56eSmrg xorg_list_add(&e->list, &amdgpu_drm_queue); 109d6c0b56eSmrg 110d6c0b56eSmrg return e->seq; 111d6c0b56eSmrg} 112d6c0b56eSmrg 113d6c0b56eSmrg/* 114d6c0b56eSmrg * Abort one queued DRM entry, removing it 115d6c0b56eSmrg * from the list, calling the abort function and 116d6c0b56eSmrg * freeing the memory 117d6c0b56eSmrg */ 118d6c0b56eSmrgstatic void 119d6c0b56eSmrgamdgpu_drm_abort_one(struct amdgpu_drm_queue_entry *e) 120d6c0b56eSmrg{ 121d6c0b56eSmrg xorg_list_del(&e->list); 122d6c0b56eSmrg e->abort(e->crtc, e->data); 123d6c0b56eSmrg free(e); 124d6c0b56eSmrg} 125d6c0b56eSmrg 126d6c0b56eSmrg/* 127d6c0b56eSmrg * Abort drm queue entries for a client 128d6c0b56eSmrg * 129d6c0b56eSmrg * NOTE: This keeps the entries in the list until the DRM event arrives, 130d6c0b56eSmrg * but then it calls the abort functions instead of the handler 131d6c0b56eSmrg * functions. 132d6c0b56eSmrg */ 133d6c0b56eSmrgvoid 134d6c0b56eSmrgamdgpu_drm_abort_client(ClientPtr client) 135d6c0b56eSmrg{ 136d6c0b56eSmrg struct amdgpu_drm_queue_entry *e; 137d6c0b56eSmrg 138d6c0b56eSmrg xorg_list_for_each_entry(e, &amdgpu_drm_queue, list) { 139d6c0b56eSmrg if (e->client == client) 140d6c0b56eSmrg e->handler = NULL; 141d6c0b56eSmrg } 142d6c0b56eSmrg} 143d6c0b56eSmrg 144d6c0b56eSmrg/* 145d6c0b56eSmrg * Abort specific drm queue entry 146d6c0b56eSmrg */ 147d6c0b56eSmrgvoid 148d6c0b56eSmrgamdgpu_drm_abort_entry(uintptr_t seq) 149d6c0b56eSmrg{ 150d6c0b56eSmrg struct amdgpu_drm_queue_entry *e, *tmp; 151d6c0b56eSmrg 152d6c0b56eSmrg xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) { 153d6c0b56eSmrg if (e->seq == seq) { 154d6c0b56eSmrg amdgpu_drm_abort_one(e); 155d6c0b56eSmrg break; 156d6c0b56eSmrg } 157d6c0b56eSmrg } 158d6c0b56eSmrg} 159d6c0b56eSmrg 160d6c0b56eSmrg/* 161d6c0b56eSmrg * Abort specific drm queue entry by ID 162d6c0b56eSmrg */ 163d6c0b56eSmrgvoid 164d6c0b56eSmrgamdgpu_drm_abort_id(uint64_t id) 165d6c0b56eSmrg{ 166d6c0b56eSmrg struct amdgpu_drm_queue_entry *e, *tmp; 167d6c0b56eSmrg 168d6c0b56eSmrg xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) { 169d6c0b56eSmrg if (e->id == id) { 170d6c0b56eSmrg amdgpu_drm_abort_one(e); 171d6c0b56eSmrg break; 172d6c0b56eSmrg } 173d6c0b56eSmrg } 174d6c0b56eSmrg} 175d6c0b56eSmrg 176d6c0b56eSmrg/* 177d6c0b56eSmrg * Initialize the DRM event queue 178d6c0b56eSmrg */ 179d6c0b56eSmrgvoid 180d6c0b56eSmrgamdgpu_drm_queue_init() 181d6c0b56eSmrg{ 182d6c0b56eSmrg if (amdgpu_drm_queue_refcnt++) 183d6c0b56eSmrg return; 184d6c0b56eSmrg 185d6c0b56eSmrg xorg_list_init(&amdgpu_drm_queue); 186d6c0b56eSmrg} 187d6c0b56eSmrg 188d6c0b56eSmrg/* 189d6c0b56eSmrg * Deinitialize the DRM event queue 190d6c0b56eSmrg */ 191d6c0b56eSmrgvoid 192d6c0b56eSmrgamdgpu_drm_queue_close(ScrnInfoPtr scrn) 193d6c0b56eSmrg{ 194d6c0b56eSmrg struct amdgpu_drm_queue_entry *e, *tmp; 195d6c0b56eSmrg 196d6c0b56eSmrg xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) { 197d6c0b56eSmrg if (e->crtc->scrn == scrn) 198d6c0b56eSmrg amdgpu_drm_abort_one(e); 199d6c0b56eSmrg } 200d6c0b56eSmrg 201d6c0b56eSmrg amdgpu_drm_queue_refcnt--; 202d6c0b56eSmrg} 203