10d16fef4Smrg/* 20d16fef4Smrg * Copyright © 2007 Red Hat, Inc. 30d16fef4Smrg * Copyright © 2015 Advanced Micro Devices, Inc. 40d16fef4Smrg * 50d16fef4Smrg * Permission is hereby granted, free of charge, to any person obtaining a 60d16fef4Smrg * copy of this software and associated documentation files (the "Software"), 70d16fef4Smrg * to deal in the Software without restriction, including without limitation 80d16fef4Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 90d16fef4Smrg * and/or sell copies of the Software, and to permit persons to whom the 100d16fef4Smrg * Software is furnished to do so, subject to the following conditions: 110d16fef4Smrg * 120d16fef4Smrg * The above copyright notice and this permission notice (including the next 130d16fef4Smrg * paragraph) shall be included in all copies or substantial portions of the 140d16fef4Smrg * Software. 150d16fef4Smrg * 160d16fef4Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 170d16fef4Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 180d16fef4Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 190d16fef4Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 200d16fef4Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 210d16fef4Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 220d16fef4Smrg * SOFTWARE. 230d16fef4Smrg * 240d16fef4Smrg * Authors: 250d16fef4Smrg * Dave Airlie <airlied@redhat.com> 260d16fef4Smrg * 270d16fef4Smrg */ 280d16fef4Smrg 290d16fef4Smrg#ifdef HAVE_CONFIG_H 300d16fef4Smrg#include "config.h" 310d16fef4Smrg#endif 320d16fef4Smrg 33446f62d6Smrg#include <errno.h> 34446f62d6Smrg 350d16fef4Smrg#include <xorg-server.h> 368bf5c682Smrg#include <X11/Xdefs.h> 378bf5c682Smrg#include <list.h> 380d16fef4Smrg 390d16fef4Smrg#include "radeon.h" 400d16fef4Smrg#include "radeon_drm_queue.h" 410d16fef4Smrg 420d16fef4Smrg 430d16fef4Smrgstruct radeon_drm_queue_entry { 440d16fef4Smrg struct xorg_list list; 4539413783Smrg uint64_t usec; 460d16fef4Smrg uint64_t id; 470d16fef4Smrg uintptr_t seq; 480d16fef4Smrg void *data; 490d16fef4Smrg ClientPtr client; 500d16fef4Smrg xf86CrtcPtr crtc; 510d16fef4Smrg radeon_drm_handler_proc handler; 520d16fef4Smrg radeon_drm_abort_proc abort; 53446f62d6Smrg Bool is_flip; 5439413783Smrg unsigned int frame; 550d16fef4Smrg}; 560d16fef4Smrg 570d16fef4Smrgstatic int radeon_drm_queue_refcnt; 580d16fef4Smrgstatic struct xorg_list radeon_drm_queue; 5939413783Smrgstatic struct xorg_list radeon_drm_flip_signalled; 6039413783Smrgstatic struct xorg_list radeon_drm_vblank_signalled; 61446f62d6Smrgstatic struct xorg_list radeon_drm_vblank_deferred; 620d16fef4Smrgstatic uintptr_t radeon_drm_queue_seq; 630d16fef4Smrg 640d16fef4Smrg 650d16fef4Smrg/* 6639413783Smrg * Process a DRM event 670d16fef4Smrg */ 6839413783Smrgstatic void 6939413783Smrgradeon_drm_queue_handle_one(struct radeon_drm_queue_entry *e) 700d16fef4Smrg{ 7139413783Smrg xorg_list_del(&e->list); 7239413783Smrg if (e->handler) { 7339413783Smrg e->handler(e->crtc, e->frame, e->usec, e->data); 7439413783Smrg } else 7539413783Smrg e->abort(e->crtc, e->data); 7639413783Smrg free(e); 7739413783Smrg} 7839413783Smrg 79446f62d6Smrg/* 80446f62d6Smrg * Abort one queued DRM entry, removing it 81446f62d6Smrg * from the list, calling the abort function and 82446f62d6Smrg * freeing the memory 83446f62d6Smrg */ 8439413783Smrgstatic void 85446f62d6Smrgradeon_drm_abort_one(struct radeon_drm_queue_entry *e) 86446f62d6Smrg{ 87446f62d6Smrg xorg_list_del(&e->list); 88446f62d6Smrg e->abort(e->crtc, e->data); 89446f62d6Smrg free(e); 90446f62d6Smrg} 91446f62d6Smrg 92446f62d6Smrgstatic void 93446f62d6Smrgradeon_drm_queue_handler(int fd, unsigned int frame, unsigned int sec, 94446f62d6Smrg unsigned int usec, void *user_ptr) 9539413783Smrg{ 9639413783Smrg uintptr_t seq = (uintptr_t)user_ptr; 9739413783Smrg struct radeon_drm_queue_entry *e, *tmp; 9839413783Smrg 9939413783Smrg xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) { 10039413783Smrg if (e->seq == seq) { 10139413783Smrg if (!e->handler) { 102446f62d6Smrg radeon_drm_abort_one(e); 10339413783Smrg break; 10439413783Smrg } 10539413783Smrg 10639413783Smrg xorg_list_del(&e->list); 10739413783Smrg e->usec = (uint64_t)sec * 1000000 + usec; 10839413783Smrg e->frame = frame; 109446f62d6Smrg xorg_list_append(&e->list, e->is_flip ? 110446f62d6Smrg &radeon_drm_flip_signalled : 111446f62d6Smrg &radeon_drm_vblank_signalled); 11239413783Smrg break; 1130d16fef4Smrg } 11439413783Smrg } 11539413783Smrg} 11639413783Smrg 11739413783Smrg/* 118446f62d6Smrg * Handle signalled vblank events. If we're waiting for a flip event, 119446f62d6Smrg * put events for that CRTC in the vblank_deferred list. 12039413783Smrg */ 12139413783Smrgstatic void 122446f62d6Smrgradeon_drm_handle_vblank_signalled(void) 12339413783Smrg{ 124446f62d6Smrg drmmode_crtc_private_ptr drmmode_crtc; 125446f62d6Smrg struct radeon_drm_queue_entry *e; 12639413783Smrg 127446f62d6Smrg while (!xorg_list_is_empty(&radeon_drm_vblank_signalled)) { 128446f62d6Smrg e = xorg_list_first_entry(&radeon_drm_vblank_signalled, 129446f62d6Smrg struct radeon_drm_queue_entry, list); 130446f62d6Smrg drmmode_crtc = e->crtc->driver_private; 131446f62d6Smrg 132446f62d6Smrg if (drmmode_crtc->wait_flip_nesting_level == 0) { 133446f62d6Smrg radeon_drm_queue_handle_one(e); 134446f62d6Smrg continue; 135446f62d6Smrg } 136446f62d6Smrg 137446f62d6Smrg xorg_list_del(&e->list); 138446f62d6Smrg xorg_list_append(&e->list, &radeon_drm_vblank_deferred); 139446f62d6Smrg } 14039413783Smrg} 14139413783Smrg 14239413783Smrg/* 14339413783Smrg * Handle deferred DRM vblank events 14439413783Smrg * 14539413783Smrg * This function must be called after radeon_drm_wait_pending_flip, once 14639413783Smrg * it's safe to attempt queueing a flip again 14739413783Smrg */ 14839413783Smrgvoid 14939413783Smrgradeon_drm_queue_handle_deferred(xf86CrtcPtr crtc) 15039413783Smrg{ 15139413783Smrg drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private; 15239413783Smrg struct radeon_drm_queue_entry *e, *tmp; 15339413783Smrg 15439413783Smrg if (drmmode_crtc->wait_flip_nesting_level == 0 || 15539413783Smrg --drmmode_crtc->wait_flip_nesting_level > 0) 15639413783Smrg return; 15739413783Smrg 158446f62d6Smrg /* Put previously deferred vblank events for this CRTC back in the 159446f62d6Smrg * signalled queue 160446f62d6Smrg */ 161446f62d6Smrg xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_deferred, list) { 162446f62d6Smrg if (e->crtc != crtc) 163446f62d6Smrg continue; 16439413783Smrg 165446f62d6Smrg xorg_list_del(&e->list); 166446f62d6Smrg xorg_list_append(&e->list, &radeon_drm_vblank_signalled); 16739413783Smrg } 168446f62d6Smrg 169446f62d6Smrg radeon_drm_handle_vblank_signalled(); 1700d16fef4Smrg} 1710d16fef4Smrg 1720d16fef4Smrg/* 1730d16fef4Smrg * Enqueue a potential drm response; when the associated response 1740d16fef4Smrg * appears, we've got data to pass to the handler from here 1750d16fef4Smrg */ 1760d16fef4Smrguintptr_t 1770d16fef4Smrgradeon_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client, 1780d16fef4Smrg uint64_t id, void *data, 1790d16fef4Smrg radeon_drm_handler_proc handler, 180446f62d6Smrg radeon_drm_abort_proc abort, 181446f62d6Smrg Bool is_flip) 1820d16fef4Smrg{ 1830d16fef4Smrg struct radeon_drm_queue_entry *e; 1840d16fef4Smrg 1850d16fef4Smrg e = calloc(1, sizeof(struct radeon_drm_queue_entry)); 1860d16fef4Smrg if (!e) 1877314432eSmrg return RADEON_DRM_QUEUE_ERROR; 1887314432eSmrg 1897314432eSmrg if (_X_UNLIKELY(radeon_drm_queue_seq == RADEON_DRM_QUEUE_ERROR)) 1907314432eSmrg radeon_drm_queue_seq++; 1910d16fef4Smrg 1920d16fef4Smrg e->seq = radeon_drm_queue_seq++; 1930d16fef4Smrg e->client = client; 1940d16fef4Smrg e->crtc = crtc; 1950d16fef4Smrg e->id = id; 1960d16fef4Smrg e->data = data; 1970d16fef4Smrg e->handler = handler; 1980d16fef4Smrg e->abort = abort; 199446f62d6Smrg e->is_flip = is_flip; 2000d16fef4Smrg 2018bf5c682Smrg xorg_list_append(&e->list, &radeon_drm_queue); 2020d16fef4Smrg 2030d16fef4Smrg return e->seq; 2040d16fef4Smrg} 2050d16fef4Smrg 2060d16fef4Smrg/* 2070d16fef4Smrg * Abort drm queue entries for a client 2080d16fef4Smrg * 2090d16fef4Smrg * NOTE: This keeps the entries in the list until the DRM event arrives, 2100d16fef4Smrg * but then it calls the abort functions instead of the handler 2110d16fef4Smrg * functions. 2120d16fef4Smrg */ 2130d16fef4Smrgvoid 2140d16fef4Smrgradeon_drm_abort_client(ClientPtr client) 2150d16fef4Smrg{ 2160d16fef4Smrg struct radeon_drm_queue_entry *e; 2170d16fef4Smrg 2180d16fef4Smrg xorg_list_for_each_entry(e, &radeon_drm_queue, list) { 2190d16fef4Smrg if (e->client == client) 2200d16fef4Smrg e->handler = NULL; 2210d16fef4Smrg } 2220d16fef4Smrg} 2230d16fef4Smrg 2240d16fef4Smrg/* 2250d16fef4Smrg * Abort specific drm queue entry 2260d16fef4Smrg */ 2270d16fef4Smrgvoid 2280d16fef4Smrgradeon_drm_abort_entry(uintptr_t seq) 2290d16fef4Smrg{ 2300d16fef4Smrg struct radeon_drm_queue_entry *e, *tmp; 2310d16fef4Smrg 23239413783Smrg if (seq == RADEON_DRM_QUEUE_ERROR) 23339413783Smrg return; 23439413783Smrg 23539413783Smrg xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_signalled, list) { 23639413783Smrg if (e->seq == seq) { 23739413783Smrg radeon_drm_abort_one(e); 23839413783Smrg return; 23939413783Smrg } 24039413783Smrg } 24139413783Smrg 242446f62d6Smrg xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_deferred, list) { 243446f62d6Smrg if (e->seq == seq) { 244446f62d6Smrg radeon_drm_abort_one(e); 245446f62d6Smrg return; 246446f62d6Smrg } 247446f62d6Smrg } 248446f62d6Smrg 2490d16fef4Smrg xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) { 2500d16fef4Smrg if (e->seq == seq) { 2510d16fef4Smrg radeon_drm_abort_one(e); 2520d16fef4Smrg break; 2530d16fef4Smrg } 2540d16fef4Smrg } 2550d16fef4Smrg} 2560d16fef4Smrg 2570d16fef4Smrg/* 2580d16fef4Smrg * Abort specific drm queue entry by ID 2590d16fef4Smrg */ 2600d16fef4Smrgvoid 2610d16fef4Smrgradeon_drm_abort_id(uint64_t id) 2620d16fef4Smrg{ 2630d16fef4Smrg struct radeon_drm_queue_entry *e, *tmp; 2640d16fef4Smrg 2650d16fef4Smrg xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) { 2660d16fef4Smrg if (e->id == id) { 2670d16fef4Smrg radeon_drm_abort_one(e); 2680d16fef4Smrg break; 2690d16fef4Smrg } 2700d16fef4Smrg } 2710d16fef4Smrg} 2720d16fef4Smrg 27339413783Smrg/* 27439413783Smrg * drmHandleEvent wrapper 27539413783Smrg */ 27639413783Smrgint 27739413783Smrgradeon_drm_handle_event(int fd, drmEventContext *event_context) 27839413783Smrg{ 279446f62d6Smrg struct radeon_drm_queue_entry *e; 28039413783Smrg int r; 28139413783Smrg 282446f62d6Smrg /* Retry drmHandleEvent if it was interrupted by a signal in read() */ 283446f62d6Smrg do { 284446f62d6Smrg r = drmHandleEvent(fd, event_context); 285446f62d6Smrg } while (r < 0 && (errno == EINTR || errno == EAGAIN)); 286446f62d6Smrg 287446f62d6Smrg if (r < 0) { 288446f62d6Smrg static Bool printed; 289446f62d6Smrg 290446f62d6Smrg if (!printed) { 291446f62d6Smrg ErrorF("%s: drmHandleEvent returned %d, errno=%d (%s)\n", 292446f62d6Smrg __func__, r, errno, strerror(errno)); 293446f62d6Smrg printed = TRUE; 294446f62d6Smrg } 295446f62d6Smrg } 29639413783Smrg 29739413783Smrg while (!xorg_list_is_empty(&radeon_drm_flip_signalled)) { 29839413783Smrg e = xorg_list_first_entry(&radeon_drm_flip_signalled, 29939413783Smrg struct radeon_drm_queue_entry, list); 30039413783Smrg radeon_drm_queue_handle_one(e); 30139413783Smrg } 30239413783Smrg 303446f62d6Smrg radeon_drm_handle_vblank_signalled(); 30439413783Smrg 30539413783Smrg return r; 30639413783Smrg} 30739413783Smrg 30839413783Smrg/* 30939413783Smrg * Wait for pending page flip on given CRTC to complete 31039413783Smrg */ 31139413783Smrgvoid radeon_drm_wait_pending_flip(xf86CrtcPtr crtc) 31239413783Smrg{ 31339413783Smrg drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private; 31439413783Smrg RADEONEntPtr pRADEONEnt = RADEONEntPriv(crtc->scrn); 31539413783Smrg struct radeon_drm_queue_entry *e; 31639413783Smrg 31739413783Smrg drmmode_crtc->wait_flip_nesting_level++; 31839413783Smrg 31939413783Smrg while (drmmode_crtc->flip_pending && 32039413783Smrg !xorg_list_is_empty(&radeon_drm_flip_signalled)) { 32139413783Smrg e = xorg_list_first_entry(&radeon_drm_flip_signalled, 32239413783Smrg struct radeon_drm_queue_entry, list); 32339413783Smrg radeon_drm_queue_handle_one(e); 32439413783Smrg } 32539413783Smrg 32639413783Smrg while (drmmode_crtc->flip_pending 32739413783Smrg && radeon_drm_handle_event(pRADEONEnt->fd, 328446f62d6Smrg &drmmode_crtc->drmmode->event_context) >= 0); 32939413783Smrg} 33039413783Smrg 3310d16fef4Smrg/* 3320d16fef4Smrg * Initialize the DRM event queue 3330d16fef4Smrg */ 3340d16fef4Smrgvoid 33539413783Smrgradeon_drm_queue_init(ScrnInfoPtr scrn) 3360d16fef4Smrg{ 33739413783Smrg RADEONInfoPtr info = RADEONPTR(scrn); 33839413783Smrg drmmode_ptr drmmode = &info->drmmode; 33939413783Smrg 34039413783Smrg drmmode->event_context.version = 2; 341446f62d6Smrg drmmode->event_context.vblank_handler = radeon_drm_queue_handler; 342446f62d6Smrg drmmode->event_context.page_flip_handler = radeon_drm_queue_handler; 34339413783Smrg 3440d16fef4Smrg if (radeon_drm_queue_refcnt++) 3450d16fef4Smrg return; 3460d16fef4Smrg 3470d16fef4Smrg xorg_list_init(&radeon_drm_queue); 34839413783Smrg xorg_list_init(&radeon_drm_flip_signalled); 34939413783Smrg xorg_list_init(&radeon_drm_vblank_signalled); 350446f62d6Smrg xorg_list_init(&radeon_drm_vblank_deferred); 3510d16fef4Smrg} 3520d16fef4Smrg 3530d16fef4Smrg/* 3540d16fef4Smrg * Deinitialize the DRM event queue 3550d16fef4Smrg */ 3560d16fef4Smrgvoid 3570d16fef4Smrgradeon_drm_queue_close(ScrnInfoPtr scrn) 3580d16fef4Smrg{ 3590d16fef4Smrg struct radeon_drm_queue_entry *e, *tmp; 3600d16fef4Smrg 3610d16fef4Smrg xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) { 3620d16fef4Smrg if (e->crtc->scrn == scrn) 3630d16fef4Smrg radeon_drm_abort_one(e); 3640d16fef4Smrg } 3650d16fef4Smrg 3660d16fef4Smrg radeon_drm_queue_refcnt--; 3670d16fef4Smrg} 368