radeon_drm_queue.c revision 39413783
10d16fef4Smrg/*
20d16fef4Smrg * Copyright © 2007 Red Hat, Inc.
30d16fef4Smrg * Copyright © 2015 Advanced Micro Devices, Inc.
40d16fef4Smrg *
50d16fef4Smrg * Permission is hereby granted, free of charge, to any person obtaining a
60d16fef4Smrg * copy of this software and associated documentation files (the "Software"),
70d16fef4Smrg * to deal in the Software without restriction, including without limitation
80d16fef4Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
90d16fef4Smrg * and/or sell copies of the Software, and to permit persons to whom the
100d16fef4Smrg * Software is furnished to do so, subject to the following conditions:
110d16fef4Smrg *
120d16fef4Smrg * The above copyright notice and this permission notice (including the next
130d16fef4Smrg * paragraph) shall be included in all copies or substantial portions of the
140d16fef4Smrg * Software.
150d16fef4Smrg *
160d16fef4Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
170d16fef4Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
180d16fef4Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
190d16fef4Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
200d16fef4Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
210d16fef4Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
220d16fef4Smrg * SOFTWARE.
230d16fef4Smrg *
240d16fef4Smrg * Authors:
250d16fef4Smrg *    Dave Airlie <airlied@redhat.com>
260d16fef4Smrg *
270d16fef4Smrg */
280d16fef4Smrg
290d16fef4Smrg#ifdef HAVE_CONFIG_H
300d16fef4Smrg#include "config.h"
310d16fef4Smrg#endif
320d16fef4Smrg
330d16fef4Smrg#include <xorg-server.h>
348bf5c682Smrg#include <X11/Xdefs.h>
358bf5c682Smrg#include <list.h>
360d16fef4Smrg
370d16fef4Smrg#include "radeon.h"
380d16fef4Smrg#include "radeon_drm_queue.h"
390d16fef4Smrg
400d16fef4Smrg
410d16fef4Smrgstruct radeon_drm_queue_entry {
420d16fef4Smrg    struct xorg_list list;
4339413783Smrg    uint64_t usec;
440d16fef4Smrg    uint64_t id;
450d16fef4Smrg    uintptr_t seq;
460d16fef4Smrg    void *data;
470d16fef4Smrg    ClientPtr client;
480d16fef4Smrg    xf86CrtcPtr crtc;
490d16fef4Smrg    radeon_drm_handler_proc handler;
500d16fef4Smrg    radeon_drm_abort_proc abort;
5139413783Smrg    unsigned int frame;
520d16fef4Smrg};
530d16fef4Smrg
540d16fef4Smrgstatic int radeon_drm_queue_refcnt;
550d16fef4Smrgstatic struct xorg_list radeon_drm_queue;
5639413783Smrgstatic struct xorg_list radeon_drm_flip_signalled;
5739413783Smrgstatic struct xorg_list radeon_drm_vblank_signalled;
580d16fef4Smrgstatic uintptr_t radeon_drm_queue_seq;
590d16fef4Smrg
600d16fef4Smrg
610d16fef4Smrg/*
6239413783Smrg * Process a DRM event
630d16fef4Smrg */
6439413783Smrgstatic void
6539413783Smrgradeon_drm_queue_handle_one(struct radeon_drm_queue_entry *e)
660d16fef4Smrg{
6739413783Smrg    xorg_list_del(&e->list);
6839413783Smrg    if (e->handler) {
6939413783Smrg	e->handler(e->crtc, e->frame, e->usec, e->data);
7039413783Smrg    } else
7139413783Smrg	e->abort(e->crtc, e->data);
7239413783Smrg    free(e);
7339413783Smrg}
7439413783Smrg
7539413783Smrgstatic void
7639413783Smrgradeon_drm_queue_handler(struct xorg_list *signalled, unsigned int frame,
7739413783Smrg			 unsigned int sec, unsigned int usec, void *user_ptr)
7839413783Smrg{
7939413783Smrg    uintptr_t seq = (uintptr_t)user_ptr;
8039413783Smrg    struct radeon_drm_queue_entry *e, *tmp;
8139413783Smrg
8239413783Smrg    xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) {
8339413783Smrg	if (e->seq == seq) {
8439413783Smrg	    if (!e->handler) {
8539413783Smrg		radeon_drm_queue_handle_one(e);
8639413783Smrg		break;
8739413783Smrg	    }
8839413783Smrg
8939413783Smrg	    xorg_list_del(&e->list);
9039413783Smrg	    e->usec = (uint64_t)sec * 1000000 + usec;
9139413783Smrg	    e->frame = frame;
9239413783Smrg	    xorg_list_append(&e->list, signalled);
9339413783Smrg	    break;
940d16fef4Smrg	}
9539413783Smrg    }
9639413783Smrg}
9739413783Smrg
9839413783Smrg/*
9939413783Smrg * Signal a DRM page flip event
10039413783Smrg */
10139413783Smrgstatic void
10239413783Smrgradeon_drm_page_flip_handler(int fd, unsigned int frame, unsigned int sec,
10339413783Smrg			     unsigned int usec, void *user_ptr)
10439413783Smrg{
10539413783Smrg    radeon_drm_queue_handler(&radeon_drm_flip_signalled, frame, sec, usec,
10639413783Smrg			     user_ptr);
10739413783Smrg}
10839413783Smrg
10939413783Smrg/*
11039413783Smrg * Signal a DRM vblank event
11139413783Smrg */
11239413783Smrgstatic void
11339413783Smrgradeon_drm_vblank_handler(int fd, unsigned int frame, unsigned int sec,
11439413783Smrg			  unsigned int usec, void *user_ptr)
11539413783Smrg{
11639413783Smrg    radeon_drm_queue_handler(&radeon_drm_vblank_signalled, frame, sec, usec,
11739413783Smrg			     user_ptr);
11839413783Smrg}
11939413783Smrg
12039413783Smrg/*
12139413783Smrg * Handle deferred DRM vblank events
12239413783Smrg *
12339413783Smrg * This function must be called after radeon_drm_wait_pending_flip, once
12439413783Smrg * it's safe to attempt queueing a flip again
12539413783Smrg */
12639413783Smrgvoid
12739413783Smrgradeon_drm_queue_handle_deferred(xf86CrtcPtr crtc)
12839413783Smrg{
12939413783Smrg    drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
13039413783Smrg    struct radeon_drm_queue_entry *e, *tmp;
13139413783Smrg
13239413783Smrg    if (drmmode_crtc->wait_flip_nesting_level == 0 ||
13339413783Smrg	--drmmode_crtc->wait_flip_nesting_level > 0)
13439413783Smrg	return;
13539413783Smrg
13639413783Smrg    xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_signalled, list) {
13739413783Smrg	drmmode_crtc_private_ptr drmmode_crtc = e->crtc->driver_private;
13839413783Smrg
13939413783Smrg	if (drmmode_crtc->wait_flip_nesting_level == 0)
14039413783Smrg	    radeon_drm_queue_handle_one(e);
14139413783Smrg    }
1420d16fef4Smrg}
1430d16fef4Smrg
1440d16fef4Smrg/*
1450d16fef4Smrg * Enqueue a potential drm response; when the associated response
1460d16fef4Smrg * appears, we've got data to pass to the handler from here
1470d16fef4Smrg */
1480d16fef4Smrguintptr_t
1490d16fef4Smrgradeon_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client,
1500d16fef4Smrg		       uint64_t id, void *data,
1510d16fef4Smrg		       radeon_drm_handler_proc handler,
1520d16fef4Smrg		       radeon_drm_abort_proc abort)
1530d16fef4Smrg{
1540d16fef4Smrg    struct radeon_drm_queue_entry *e;
1550d16fef4Smrg
1560d16fef4Smrg    e = calloc(1, sizeof(struct radeon_drm_queue_entry));
1570d16fef4Smrg    if (!e)
1587314432eSmrg	return RADEON_DRM_QUEUE_ERROR;
1597314432eSmrg
1607314432eSmrg    if (_X_UNLIKELY(radeon_drm_queue_seq == RADEON_DRM_QUEUE_ERROR))
1617314432eSmrg	radeon_drm_queue_seq++;
1620d16fef4Smrg
1630d16fef4Smrg    e->seq = radeon_drm_queue_seq++;
1640d16fef4Smrg    e->client = client;
1650d16fef4Smrg    e->crtc = crtc;
1660d16fef4Smrg    e->id = id;
1670d16fef4Smrg    e->data = data;
1680d16fef4Smrg    e->handler = handler;
1690d16fef4Smrg    e->abort = abort;
1700d16fef4Smrg
1718bf5c682Smrg    xorg_list_append(&e->list, &radeon_drm_queue);
1720d16fef4Smrg
1730d16fef4Smrg    return e->seq;
1740d16fef4Smrg}
1750d16fef4Smrg
1760d16fef4Smrg/*
1770d16fef4Smrg * Abort one queued DRM entry, removing it
1780d16fef4Smrg * from the list, calling the abort function and
1790d16fef4Smrg * freeing the memory
1800d16fef4Smrg */
1810d16fef4Smrgstatic void
1820d16fef4Smrgradeon_drm_abort_one(struct radeon_drm_queue_entry *e)
1830d16fef4Smrg{
1840d16fef4Smrg    xorg_list_del(&e->list);
1850d16fef4Smrg    e->abort(e->crtc, e->data);
1860d16fef4Smrg    free(e);
1870d16fef4Smrg}
1880d16fef4Smrg
1890d16fef4Smrg/*
1900d16fef4Smrg * Abort drm queue entries for a client
1910d16fef4Smrg *
1920d16fef4Smrg * NOTE: This keeps the entries in the list until the DRM event arrives,
1930d16fef4Smrg * but then it calls the abort functions instead of the handler
1940d16fef4Smrg * functions.
1950d16fef4Smrg */
1960d16fef4Smrgvoid
1970d16fef4Smrgradeon_drm_abort_client(ClientPtr client)
1980d16fef4Smrg{
1990d16fef4Smrg    struct radeon_drm_queue_entry *e;
2000d16fef4Smrg
2010d16fef4Smrg    xorg_list_for_each_entry(e, &radeon_drm_queue, list) {
2020d16fef4Smrg	if (e->client == client)
2030d16fef4Smrg	    e->handler = NULL;
2040d16fef4Smrg    }
2050d16fef4Smrg}
2060d16fef4Smrg
2070d16fef4Smrg/*
2080d16fef4Smrg * Abort specific drm queue entry
2090d16fef4Smrg */
2100d16fef4Smrgvoid
2110d16fef4Smrgradeon_drm_abort_entry(uintptr_t seq)
2120d16fef4Smrg{
2130d16fef4Smrg    struct radeon_drm_queue_entry *e, *tmp;
2140d16fef4Smrg
21539413783Smrg    if (seq == RADEON_DRM_QUEUE_ERROR)
21639413783Smrg	return;
21739413783Smrg
21839413783Smrg    xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_signalled, list) {
21939413783Smrg	if (e->seq == seq) {
22039413783Smrg	    radeon_drm_abort_one(e);
22139413783Smrg	    return;
22239413783Smrg	}
22339413783Smrg    }
22439413783Smrg
2250d16fef4Smrg    xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) {
2260d16fef4Smrg	if (e->seq == seq) {
2270d16fef4Smrg	    radeon_drm_abort_one(e);
2280d16fef4Smrg	    break;
2290d16fef4Smrg	}
2300d16fef4Smrg    }
2310d16fef4Smrg}
2320d16fef4Smrg
2330d16fef4Smrg/*
2340d16fef4Smrg * Abort specific drm queue entry by ID
2350d16fef4Smrg */
2360d16fef4Smrgvoid
2370d16fef4Smrgradeon_drm_abort_id(uint64_t id)
2380d16fef4Smrg{
2390d16fef4Smrg    struct radeon_drm_queue_entry *e, *tmp;
2400d16fef4Smrg
2410d16fef4Smrg    xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) {
2420d16fef4Smrg	if (e->id == id) {
2430d16fef4Smrg	    radeon_drm_abort_one(e);
2440d16fef4Smrg	    break;
2450d16fef4Smrg	}
2460d16fef4Smrg    }
2470d16fef4Smrg}
2480d16fef4Smrg
24939413783Smrg/*
25039413783Smrg * drmHandleEvent wrapper
25139413783Smrg */
25239413783Smrgint
25339413783Smrgradeon_drm_handle_event(int fd, drmEventContext *event_context)
25439413783Smrg{
25539413783Smrg    struct radeon_drm_queue_entry *e, *tmp;
25639413783Smrg    int r;
25739413783Smrg
25839413783Smrg    r = drmHandleEvent(fd, event_context);
25939413783Smrg
26039413783Smrg    while (!xorg_list_is_empty(&radeon_drm_flip_signalled)) {
26139413783Smrg	e = xorg_list_first_entry(&radeon_drm_flip_signalled,
26239413783Smrg				  struct radeon_drm_queue_entry, list);
26339413783Smrg	radeon_drm_queue_handle_one(e);
26439413783Smrg    }
26539413783Smrg
26639413783Smrg    xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_signalled, list) {
26739413783Smrg	drmmode_crtc_private_ptr drmmode_crtc = e->crtc->driver_private;
26839413783Smrg
26939413783Smrg	if (drmmode_crtc->wait_flip_nesting_level == 0)
27039413783Smrg	    radeon_drm_queue_handle_one(e);
27139413783Smrg    }
27239413783Smrg
27339413783Smrg    return r;
27439413783Smrg}
27539413783Smrg
27639413783Smrg/*
27739413783Smrg * Wait for pending page flip on given CRTC to complete
27839413783Smrg */
27939413783Smrgvoid radeon_drm_wait_pending_flip(xf86CrtcPtr crtc)
28039413783Smrg{
28139413783Smrg    drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
28239413783Smrg    RADEONEntPtr pRADEONEnt = RADEONEntPriv(crtc->scrn);
28339413783Smrg    struct radeon_drm_queue_entry *e;
28439413783Smrg
28539413783Smrg    drmmode_crtc->wait_flip_nesting_level++;
28639413783Smrg
28739413783Smrg    while (drmmode_crtc->flip_pending &&
28839413783Smrg	   !xorg_list_is_empty(&radeon_drm_flip_signalled)) {
28939413783Smrg	e = xorg_list_first_entry(&radeon_drm_flip_signalled,
29039413783Smrg				  struct radeon_drm_queue_entry, list);
29139413783Smrg	radeon_drm_queue_handle_one(e);
29239413783Smrg    }
29339413783Smrg
29439413783Smrg    while (drmmode_crtc->flip_pending
29539413783Smrg	   && radeon_drm_handle_event(pRADEONEnt->fd,
29639413783Smrg					  &drmmode_crtc->drmmode->event_context) > 0);
29739413783Smrg}
29839413783Smrg
2990d16fef4Smrg/*
3000d16fef4Smrg * Initialize the DRM event queue
3010d16fef4Smrg */
3020d16fef4Smrgvoid
30339413783Smrgradeon_drm_queue_init(ScrnInfoPtr scrn)
3040d16fef4Smrg{
30539413783Smrg    RADEONInfoPtr info = RADEONPTR(scrn);
30639413783Smrg    drmmode_ptr drmmode = &info->drmmode;
30739413783Smrg
30839413783Smrg    drmmode->event_context.version = 2;
30939413783Smrg    drmmode->event_context.vblank_handler = radeon_drm_vblank_handler;
31039413783Smrg    drmmode->event_context.page_flip_handler = radeon_drm_page_flip_handler;
31139413783Smrg
3120d16fef4Smrg    if (radeon_drm_queue_refcnt++)
3130d16fef4Smrg	return;
3140d16fef4Smrg
3150d16fef4Smrg    xorg_list_init(&radeon_drm_queue);
31639413783Smrg    xorg_list_init(&radeon_drm_flip_signalled);
31739413783Smrg    xorg_list_init(&radeon_drm_vblank_signalled);
3180d16fef4Smrg}
3190d16fef4Smrg
3200d16fef4Smrg/*
3210d16fef4Smrg * Deinitialize the DRM event queue
3220d16fef4Smrg */
3230d16fef4Smrgvoid
3240d16fef4Smrgradeon_drm_queue_close(ScrnInfoPtr scrn)
3250d16fef4Smrg{
3260d16fef4Smrg    struct radeon_drm_queue_entry *e, *tmp;
3270d16fef4Smrg
3280d16fef4Smrg    xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) {
3290d16fef4Smrg	if (e->crtc->scrn == scrn)
3300d16fef4Smrg	    radeon_drm_abort_one(e);
3310d16fef4Smrg    }
3320d16fef4Smrg
3330d16fef4Smrg    radeon_drm_queue_refcnt--;
3340d16fef4Smrg}
335