Home | History | Annotate | Line # | Download | only in xen
      1 /*	$NetBSD: xen_drm_front_kms.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $	*/
      2 
      3 // SPDX-License-Identifier: GPL-2.0 OR MIT
      4 
      5 /*
      6  *  Xen para-virtual DRM device
      7  *
      8  * Copyright (C) 2016-2018 EPAM Systems Inc.
      9  *
     10  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko (at) epam.com>
     11  */
     12 
     13 #include <sys/cdefs.h>
     14 __KERNEL_RCSID(0, "$NetBSD: xen_drm_front_kms.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $");
     15 
     16 #include <drm/drm_atomic.h>
     17 #include <drm/drm_atomic_helper.h>
     18 #include <drm/drm_drv.h>
     19 #include <drm/drm_fourcc.h>
     20 #include <drm/drm_gem.h>
     21 #include <drm/drm_gem_framebuffer_helper.h>
     22 #include <drm/drm_probe_helper.h>
     23 #include <drm/drm_vblank.h>
     24 
     25 #include "xen_drm_front.h"
     26 #include "xen_drm_front_conn.h"
     27 #include "xen_drm_front_kms.h"
     28 
     29 /*
     30  * Timeout in ms to wait for frame done event from the backend:
     31  * must be a bit more than IO time-out
     32  */
     33 #define FRAME_DONE_TO_MS	(XEN_DRM_FRONT_WAIT_BACK_MS + 100)
     34 
     35 static struct xen_drm_front_drm_pipeline *
     36 to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe)
     37 {
     38 	return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe);
     39 }
     40 
     41 static void fb_destroy(struct drm_framebuffer *fb)
     42 {
     43 	struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private;
     44 	int idx;
     45 
     46 	if (drm_dev_enter(fb->dev, &idx)) {
     47 		xen_drm_front_fb_detach(drm_info->front_info,
     48 					xen_drm_front_fb_to_cookie(fb));
     49 		drm_dev_exit(idx);
     50 	}
     51 	drm_gem_fb_destroy(fb);
     52 }
     53 
     54 static const struct drm_framebuffer_funcs fb_funcs = {
     55 	.destroy = fb_destroy,
     56 };
     57 
     58 static struct drm_framebuffer *
     59 fb_create(struct drm_device *dev, struct drm_file *filp,
     60 	  const struct drm_mode_fb_cmd2 *mode_cmd)
     61 {
     62 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
     63 	struct drm_framebuffer *fb;
     64 	struct drm_gem_object *gem_obj;
     65 	int ret;
     66 
     67 	fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
     68 	if (IS_ERR_OR_NULL(fb))
     69 		return fb;
     70 
     71 	gem_obj = fb->obj[0];
     72 
     73 	ret = xen_drm_front_fb_attach(drm_info->front_info,
     74 				      xen_drm_front_dbuf_to_cookie(gem_obj),
     75 				      xen_drm_front_fb_to_cookie(fb),
     76 				      fb->width, fb->height,
     77 				      fb->format->format);
     78 	if (ret < 0) {
     79 		DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret);
     80 		goto fail;
     81 	}
     82 
     83 	return fb;
     84 
     85 fail:
     86 	drm_gem_fb_destroy(fb);
     87 	return ERR_PTR(ret);
     88 }
     89 
     90 static const struct drm_mode_config_funcs mode_config_funcs = {
     91 	.fb_create = fb_create,
     92 	.atomic_check = drm_atomic_helper_check,
     93 	.atomic_commit = drm_atomic_helper_commit,
     94 };
     95 
     96 static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline)
     97 {
     98 	struct drm_crtc *crtc = &pipeline->pipe.crtc;
     99 	struct drm_device *dev = crtc->dev;
    100 	unsigned long flags;
    101 
    102 	spin_lock_irqsave(&dev->event_lock, flags);
    103 	if (pipeline->pending_event)
    104 		drm_crtc_send_vblank_event(crtc, pipeline->pending_event);
    105 	pipeline->pending_event = NULL;
    106 	spin_unlock_irqrestore(&dev->event_lock, flags);
    107 }
    108 
    109 static void display_enable(struct drm_simple_display_pipe *pipe,
    110 			   struct drm_crtc_state *crtc_state,
    111 			   struct drm_plane_state *plane_state)
    112 {
    113 	struct xen_drm_front_drm_pipeline *pipeline =
    114 			to_xen_drm_pipeline(pipe);
    115 	struct drm_crtc *crtc = &pipe->crtc;
    116 	struct drm_framebuffer *fb = plane_state->fb;
    117 	int ret, idx;
    118 
    119 	if (!drm_dev_enter(pipe->crtc.dev, &idx))
    120 		return;
    121 
    122 	ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y,
    123 				     fb->width, fb->height,
    124 				     fb->format->cpp[0] * 8,
    125 				     xen_drm_front_fb_to_cookie(fb));
    126 
    127 	if (ret) {
    128 		DRM_ERROR("Failed to enable display: %d\n", ret);
    129 		pipeline->conn_connected = false;
    130 	}
    131 
    132 	drm_dev_exit(idx);
    133 }
    134 
    135 static void display_disable(struct drm_simple_display_pipe *pipe)
    136 {
    137 	struct xen_drm_front_drm_pipeline *pipeline =
    138 			to_xen_drm_pipeline(pipe);
    139 	int ret = 0, idx;
    140 
    141 	if (drm_dev_enter(pipe->crtc.dev, &idx)) {
    142 		ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0,
    143 					     xen_drm_front_fb_to_cookie(NULL));
    144 		drm_dev_exit(idx);
    145 	}
    146 	if (ret)
    147 		DRM_ERROR("Failed to disable display: %d\n", ret);
    148 
    149 	/* Make sure we can restart with enabled connector next time */
    150 	pipeline->conn_connected = true;
    151 
    152 	/* release stalled event if any */
    153 	send_pending_event(pipeline);
    154 }
    155 
    156 void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
    157 				     u64 fb_cookie)
    158 {
    159 	/*
    160 	 * This runs in interrupt context, e.g. under
    161 	 * drm_info->front_info->io_lock, so we cannot call _sync version
    162 	 * to cancel the work
    163 	 */
    164 	cancel_delayed_work(&pipeline->pflip_to_worker);
    165 
    166 	send_pending_event(pipeline);
    167 }
    168 
    169 static void pflip_to_worker(struct work_struct *work)
    170 {
    171 	struct delayed_work *delayed_work = to_delayed_work(work);
    172 	struct xen_drm_front_drm_pipeline *pipeline =
    173 			container_of(delayed_work,
    174 				     struct xen_drm_front_drm_pipeline,
    175 				     pflip_to_worker);
    176 
    177 	DRM_ERROR("Frame done timed-out, releasing");
    178 	send_pending_event(pipeline);
    179 }
    180 
    181 static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
    182 				   struct drm_plane_state *old_plane_state)
    183 {
    184 	struct drm_plane_state *plane_state =
    185 			drm_atomic_get_new_plane_state(old_plane_state->state,
    186 						       &pipe->plane);
    187 
    188 	/*
    189 	 * If old_plane_state->fb is NULL and plane_state->fb is not,
    190 	 * then this is an atomic commit which will enable display.
    191 	 * If old_plane_state->fb is not NULL and plane_state->fb is,
    192 	 * then this is an atomic commit which will disable display.
    193 	 * Ignore these and do not send page flip as this framebuffer will be
    194 	 * sent to the backend as a part of display_set_config call.
    195 	 */
    196 	if (old_plane_state->fb && plane_state->fb) {
    197 		struct xen_drm_front_drm_pipeline *pipeline =
    198 				to_xen_drm_pipeline(pipe);
    199 		struct xen_drm_front_drm_info *drm_info = pipeline->drm_info;
    200 		int ret;
    201 
    202 		schedule_delayed_work(&pipeline->pflip_to_worker,
    203 				      msecs_to_jiffies(FRAME_DONE_TO_MS));
    204 
    205 		ret = xen_drm_front_page_flip(drm_info->front_info,
    206 					      pipeline->index,
    207 					      xen_drm_front_fb_to_cookie(plane_state->fb));
    208 		if (ret) {
    209 			DRM_ERROR("Failed to send page flip request to backend: %d\n", ret);
    210 
    211 			pipeline->conn_connected = false;
    212 			/*
    213 			 * Report the flip not handled, so pending event is
    214 			 * sent, unblocking user-space.
    215 			 */
    216 			return false;
    217 		}
    218 		/*
    219 		 * Signal that page flip was handled, pending event will be sent
    220 		 * on frame done event from the backend.
    221 		 */
    222 		return true;
    223 	}
    224 
    225 	return false;
    226 }
    227 
    228 static void display_update(struct drm_simple_display_pipe *pipe,
    229 			   struct drm_plane_state *old_plane_state)
    230 {
    231 	struct xen_drm_front_drm_pipeline *pipeline =
    232 			to_xen_drm_pipeline(pipe);
    233 	struct drm_crtc *crtc = &pipe->crtc;
    234 	struct drm_pending_vblank_event *event;
    235 	int idx;
    236 
    237 	event = crtc->state->event;
    238 	if (event) {
    239 		struct drm_device *dev = crtc->dev;
    240 		unsigned long flags;
    241 
    242 		WARN_ON(pipeline->pending_event);
    243 
    244 		spin_lock_irqsave(&dev->event_lock, flags);
    245 		crtc->state->event = NULL;
    246 
    247 		pipeline->pending_event = event;
    248 		spin_unlock_irqrestore(&dev->event_lock, flags);
    249 	}
    250 
    251 	if (!drm_dev_enter(pipe->crtc.dev, &idx)) {
    252 		send_pending_event(pipeline);
    253 		return;
    254 	}
    255 
    256 	/*
    257 	 * Send page flip request to the backend *after* we have event cached
    258 	 * above, so on page flip done event from the backend we can
    259 	 * deliver it and there is no race condition between this code and
    260 	 * event from the backend.
    261 	 * If this is not a page flip, e.g. no flip done event from the backend
    262 	 * is expected, then send now.
    263 	 */
    264 	if (!display_send_page_flip(pipe, old_plane_state))
    265 		send_pending_event(pipeline);
    266 
    267 	drm_dev_exit(idx);
    268 }
    269 
    270 static enum drm_mode_status
    271 display_mode_valid(struct drm_simple_display_pipe *pipe,
    272 		   const struct drm_display_mode *mode)
    273 {
    274 	struct xen_drm_front_drm_pipeline *pipeline =
    275 			container_of(pipe, struct xen_drm_front_drm_pipeline,
    276 				     pipe);
    277 
    278 	if (mode->hdisplay != pipeline->width)
    279 		return MODE_ERROR;
    280 
    281 	if (mode->vdisplay != pipeline->height)
    282 		return MODE_ERROR;
    283 
    284 	return MODE_OK;
    285 }
    286 
    287 static const struct drm_simple_display_pipe_funcs display_funcs = {
    288 	.mode_valid = display_mode_valid,
    289 	.enable = display_enable,
    290 	.disable = display_disable,
    291 	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
    292 	.update = display_update,
    293 };
    294 
    295 static int display_pipe_init(struct xen_drm_front_drm_info *drm_info,
    296 			     int index, struct xen_drm_front_cfg_connector *cfg,
    297 			     struct xen_drm_front_drm_pipeline *pipeline)
    298 {
    299 	struct drm_device *dev = drm_info->drm_dev;
    300 	const u32 *formats;
    301 	int format_count;
    302 	int ret;
    303 
    304 	pipeline->drm_info = drm_info;
    305 	pipeline->index = index;
    306 	pipeline->height = cfg->height;
    307 	pipeline->width = cfg->width;
    308 
    309 	INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker);
    310 
    311 	ret = xen_drm_front_conn_init(drm_info, &pipeline->conn);
    312 	if (ret)
    313 		return ret;
    314 
    315 	formats = xen_drm_front_conn_get_formats(&format_count);
    316 
    317 	return drm_simple_display_pipe_init(dev, &pipeline->pipe,
    318 					    &display_funcs, formats,
    319 					    format_count, NULL,
    320 					    &pipeline->conn);
    321 }
    322 
    323 int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info)
    324 {
    325 	struct drm_device *dev = drm_info->drm_dev;
    326 	int i, ret;
    327 
    328 	drm_mode_config_init(dev);
    329 
    330 	dev->mode_config.min_width = 0;
    331 	dev->mode_config.min_height = 0;
    332 	dev->mode_config.max_width = 4095;
    333 	dev->mode_config.max_height = 2047;
    334 	dev->mode_config.funcs = &mode_config_funcs;
    335 
    336 	for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
    337 		struct xen_drm_front_cfg_connector *cfg =
    338 				&drm_info->front_info->cfg.connectors[i];
    339 		struct xen_drm_front_drm_pipeline *pipeline =
    340 				&drm_info->pipeline[i];
    341 
    342 		ret = display_pipe_init(drm_info, i, cfg, pipeline);
    343 		if (ret) {
    344 			drm_mode_config_cleanup(dev);
    345 			return ret;
    346 		}
    347 	}
    348 
    349 	drm_mode_config_reset(dev);
    350 	drm_kms_helper_poll_init(dev);
    351 	return 0;
    352 }
    353 
    354 void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info)
    355 {
    356 	int i;
    357 
    358 	for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
    359 		struct xen_drm_front_drm_pipeline *pipeline =
    360 				&drm_info->pipeline[i];
    361 
    362 		cancel_delayed_work_sync(&pipeline->pflip_to_worker);
    363 
    364 		send_pending_event(pipeline);
    365 	}
    366 }
    367