radeon_drm_queue.c revision 0d16fef4
10d16fef4Smrg/* 20d16fef4Smrg * Copyright © 2007 Red Hat, Inc. 30d16fef4Smrg * Copyright © 2015 Advanced Micro Devices, Inc. 40d16fef4Smrg * 50d16fef4Smrg * Permission is hereby granted, free of charge, to any person obtaining a 60d16fef4Smrg * copy of this software and associated documentation files (the "Software"), 70d16fef4Smrg * to deal in the Software without restriction, including without limitation 80d16fef4Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 90d16fef4Smrg * and/or sell copies of the Software, and to permit persons to whom the 100d16fef4Smrg * Software is furnished to do so, subject to the following conditions: 110d16fef4Smrg * 120d16fef4Smrg * The above copyright notice and this permission notice (including the next 130d16fef4Smrg * paragraph) shall be included in all copies or substantial portions of the 140d16fef4Smrg * Software. 150d16fef4Smrg * 160d16fef4Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 170d16fef4Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 180d16fef4Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 190d16fef4Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 200d16fef4Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 210d16fef4Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 220d16fef4Smrg * SOFTWARE. 230d16fef4Smrg * 240d16fef4Smrg * Authors: 250d16fef4Smrg * Dave Airlie <airlied@redhat.com> 260d16fef4Smrg * 270d16fef4Smrg */ 280d16fef4Smrg 290d16fef4Smrg#ifdef HAVE_CONFIG_H 300d16fef4Smrg#include "config.h" 310d16fef4Smrg#endif 320d16fef4Smrg 330d16fef4Smrg#include <xorg-server.h> 340d16fef4Smrg 350d16fef4Smrg#include "radeon.h" 360d16fef4Smrg#include "radeon_drm_queue.h" 370d16fef4Smrg#include "radeon_list.h" 380d16fef4Smrg 390d16fef4Smrg 400d16fef4Smrgstruct radeon_drm_queue_entry { 410d16fef4Smrg struct xorg_list list; 420d16fef4Smrg uint64_t id; 430d16fef4Smrg uintptr_t seq; 440d16fef4Smrg void *data; 450d16fef4Smrg ClientPtr client; 460d16fef4Smrg xf86CrtcPtr crtc; 470d16fef4Smrg radeon_drm_handler_proc handler; 480d16fef4Smrg radeon_drm_abort_proc abort; 490d16fef4Smrg}; 500d16fef4Smrg 510d16fef4Smrgstatic int radeon_drm_queue_refcnt; 520d16fef4Smrgstatic struct xorg_list radeon_drm_queue; 530d16fef4Smrgstatic uintptr_t radeon_drm_queue_seq; 540d16fef4Smrg 550d16fef4Smrg 560d16fef4Smrg/* 570d16fef4Smrg * Handle a DRM event 580d16fef4Smrg */ 590d16fef4Smrgvoid 600d16fef4Smrgradeon_drm_queue_handler(int fd, unsigned int frame, unsigned int sec, 610d16fef4Smrg unsigned int usec, void *user_ptr) 620d16fef4Smrg{ 630d16fef4Smrg uintptr_t seq = (uintptr_t)user_ptr; 640d16fef4Smrg struct radeon_drm_queue_entry *e, *tmp; 650d16fef4Smrg 660d16fef4Smrg xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) { 670d16fef4Smrg if (e->seq == seq) { 680d16fef4Smrg xorg_list_del(&e->list); 690d16fef4Smrg if (e->handler) 700d16fef4Smrg e->handler(e->crtc, frame, 710d16fef4Smrg (uint64_t)sec * 1000000 + usec, 720d16fef4Smrg e->data); 730d16fef4Smrg else 740d16fef4Smrg e->abort(e->crtc, e->data); 750d16fef4Smrg free(e); 760d16fef4Smrg break; 770d16fef4Smrg } 780d16fef4Smrg } 790d16fef4Smrg} 800d16fef4Smrg 810d16fef4Smrg/* 820d16fef4Smrg * Enqueue a potential drm response; when the associated response 830d16fef4Smrg * appears, we've got data to pass to the handler from here 840d16fef4Smrg */ 850d16fef4Smrguintptr_t 860d16fef4Smrgradeon_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client, 870d16fef4Smrg uint64_t id, void *data, 880d16fef4Smrg radeon_drm_handler_proc handler, 890d16fef4Smrg radeon_drm_abort_proc abort) 900d16fef4Smrg{ 910d16fef4Smrg struct radeon_drm_queue_entry *e; 920d16fef4Smrg 930d16fef4Smrg e = calloc(1, sizeof(struct radeon_drm_queue_entry)); 940d16fef4Smrg if (!e) 950d16fef4Smrg return NULL; 960d16fef4Smrg 970d16fef4Smrg if (!radeon_drm_queue_seq) 980d16fef4Smrg radeon_drm_queue_seq = 1; 990d16fef4Smrg e->seq = radeon_drm_queue_seq++; 1000d16fef4Smrg e->client = client; 1010d16fef4Smrg e->crtc = crtc; 1020d16fef4Smrg e->id = id; 1030d16fef4Smrg e->data = data; 1040d16fef4Smrg e->handler = handler; 1050d16fef4Smrg e->abort = abort; 1060d16fef4Smrg 1070d16fef4Smrg xorg_list_add(&e->list, &radeon_drm_queue); 1080d16fef4Smrg 1090d16fef4Smrg return e->seq; 1100d16fef4Smrg} 1110d16fef4Smrg 1120d16fef4Smrg/* 1130d16fef4Smrg * Abort one queued DRM entry, removing it 1140d16fef4Smrg * from the list, calling the abort function and 1150d16fef4Smrg * freeing the memory 1160d16fef4Smrg */ 1170d16fef4Smrgstatic void 1180d16fef4Smrgradeon_drm_abort_one(struct radeon_drm_queue_entry *e) 1190d16fef4Smrg{ 1200d16fef4Smrg xorg_list_del(&e->list); 1210d16fef4Smrg e->abort(e->crtc, e->data); 1220d16fef4Smrg free(e); 1230d16fef4Smrg} 1240d16fef4Smrg 1250d16fef4Smrg/* 1260d16fef4Smrg * Abort drm queue entries for a client 1270d16fef4Smrg * 1280d16fef4Smrg * NOTE: This keeps the entries in the list until the DRM event arrives, 1290d16fef4Smrg * but then it calls the abort functions instead of the handler 1300d16fef4Smrg * functions. 1310d16fef4Smrg */ 1320d16fef4Smrgvoid 1330d16fef4Smrgradeon_drm_abort_client(ClientPtr client) 1340d16fef4Smrg{ 1350d16fef4Smrg struct radeon_drm_queue_entry *e; 1360d16fef4Smrg 1370d16fef4Smrg xorg_list_for_each_entry(e, &radeon_drm_queue, list) { 1380d16fef4Smrg if (e->client == client) 1390d16fef4Smrg e->handler = NULL; 1400d16fef4Smrg } 1410d16fef4Smrg} 1420d16fef4Smrg 1430d16fef4Smrg/* 1440d16fef4Smrg * Abort specific drm queue entry 1450d16fef4Smrg */ 1460d16fef4Smrgvoid 1470d16fef4Smrgradeon_drm_abort_entry(uintptr_t seq) 1480d16fef4Smrg{ 1490d16fef4Smrg struct radeon_drm_queue_entry *e, *tmp; 1500d16fef4Smrg 1510d16fef4Smrg xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) { 1520d16fef4Smrg if (e->seq == seq) { 1530d16fef4Smrg radeon_drm_abort_one(e); 1540d16fef4Smrg break; 1550d16fef4Smrg } 1560d16fef4Smrg } 1570d16fef4Smrg} 1580d16fef4Smrg 1590d16fef4Smrg/* 1600d16fef4Smrg * Abort specific drm queue entry by ID 1610d16fef4Smrg */ 1620d16fef4Smrgvoid 1630d16fef4Smrgradeon_drm_abort_id(uint64_t id) 1640d16fef4Smrg{ 1650d16fef4Smrg struct radeon_drm_queue_entry *e, *tmp; 1660d16fef4Smrg 1670d16fef4Smrg xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) { 1680d16fef4Smrg if (e->id == id) { 1690d16fef4Smrg radeon_drm_abort_one(e); 1700d16fef4Smrg break; 1710d16fef4Smrg } 1720d16fef4Smrg } 1730d16fef4Smrg} 1740d16fef4Smrg 1750d16fef4Smrg/* 1760d16fef4Smrg * Initialize the DRM event queue 1770d16fef4Smrg */ 1780d16fef4Smrgvoid 1790d16fef4Smrgradeon_drm_queue_init() 1800d16fef4Smrg{ 1810d16fef4Smrg if (radeon_drm_queue_refcnt++) 1820d16fef4Smrg return; 1830d16fef4Smrg 1840d16fef4Smrg xorg_list_init(&radeon_drm_queue); 1850d16fef4Smrg} 1860d16fef4Smrg 1870d16fef4Smrg/* 1880d16fef4Smrg * Deinitialize the DRM event queue 1890d16fef4Smrg */ 1900d16fef4Smrgvoid 1910d16fef4Smrgradeon_drm_queue_close(ScrnInfoPtr scrn) 1920d16fef4Smrg{ 1930d16fef4Smrg struct radeon_drm_queue_entry *e, *tmp; 1940d16fef4Smrg 1950d16fef4Smrg xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) { 1960d16fef4Smrg if (e->crtc->scrn == scrn) 1970d16fef4Smrg radeon_drm_abort_one(e); 1980d16fef4Smrg } 1990d16fef4Smrg 2000d16fef4Smrg radeon_drm_queue_refcnt--; 2010d16fef4Smrg} 202