Home | History | Annotate | Line # | Download | only in xen
      1 /*	$NetBSD: xen_drm_front.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $	*/
      2 
      3 // SPDX-License-Identifier: GPL-2.0 OR MIT
      4 
      5 /*
      6  *  Xen para-virtual DRM device
      7  *
      8  * Copyright (C) 2016-2018 EPAM Systems Inc.
      9  *
     10  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko (at) epam.com>
     11  */
     12 
     13 #include <sys/cdefs.h>
     14 __KERNEL_RCSID(0, "$NetBSD: xen_drm_front.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $");
     15 
     16 #include <linux/delay.h>
     17 #include <linux/dma-mapping.h>
     18 #include <linux/module.h>
     19 #include <linux/of_device.h>
     20 
     21 #include <drm/drm_atomic_helper.h>
     22 #include <drm/drm_drv.h>
     23 #include <drm/drm_ioctl.h>
     24 #include <drm/drm_probe_helper.h>
     25 #include <drm/drm_file.h>
     26 #include <drm/drm_gem.h>
     27 
     28 #include <xen/platform_pci.h>
     29 #include <xen/xen.h>
     30 #include <xen/xenbus.h>
     31 
     32 #include <xen/xen-front-pgdir-shbuf.h>
     33 #include <xen/interface/io/displif.h>
     34 
     35 #include "xen_drm_front.h"
     36 #include "xen_drm_front_cfg.h"
     37 #include "xen_drm_front_evtchnl.h"
     38 #include "xen_drm_front_gem.h"
     39 #include "xen_drm_front_kms.h"
     40 
     41 struct xen_drm_front_dbuf {
     42 	struct list_head list;
     43 	u64 dbuf_cookie;
     44 	u64 fb_cookie;
     45 
     46 	struct xen_front_pgdir_shbuf shbuf;
     47 };
     48 
     49 static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
     50 			     struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
     51 {
     52 	dbuf->dbuf_cookie = dbuf_cookie;
     53 	list_add(&dbuf->list, &front_info->dbuf_list);
     54 }
     55 
     56 static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
     57 					   u64 dbuf_cookie)
     58 {
     59 	struct xen_drm_front_dbuf *buf, *q;
     60 
     61 	list_for_each_entry_safe(buf, q, dbuf_list, list)
     62 		if (buf->dbuf_cookie == dbuf_cookie)
     63 			return buf;
     64 
     65 	return NULL;
     66 }
     67 
     68 static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
     69 {
     70 	struct xen_drm_front_dbuf *buf, *q;
     71 
     72 	list_for_each_entry_safe(buf, q, dbuf_list, list)
     73 		if (buf->dbuf_cookie == dbuf_cookie) {
     74 			list_del(&buf->list);
     75 			xen_front_pgdir_shbuf_unmap(&buf->shbuf);
     76 			xen_front_pgdir_shbuf_free(&buf->shbuf);
     77 			kfree(buf);
     78 			break;
     79 		}
     80 }
     81 
     82 static void dbuf_free_all(struct list_head *dbuf_list)
     83 {
     84 	struct xen_drm_front_dbuf *buf, *q;
     85 
     86 	list_for_each_entry_safe(buf, q, dbuf_list, list) {
     87 		list_del(&buf->list);
     88 		xen_front_pgdir_shbuf_unmap(&buf->shbuf);
     89 		xen_front_pgdir_shbuf_free(&buf->shbuf);
     90 		kfree(buf);
     91 	}
     92 }
     93 
     94 static struct xendispl_req *
     95 be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation)
     96 {
     97 	struct xendispl_req *req;
     98 
     99 	req = RING_GET_REQUEST(&evtchnl->u.req.ring,
    100 			       evtchnl->u.req.ring.req_prod_pvt);
    101 	req->operation = operation;
    102 	req->id = evtchnl->evt_next_id++;
    103 	evtchnl->evt_id = req->id;
    104 	return req;
    105 }
    106 
    107 static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl,
    108 			   struct xendispl_req *req)
    109 {
    110 	reinit_completion(&evtchnl->u.req.completion);
    111 	if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
    112 		return -EIO;
    113 
    114 	xen_drm_front_evtchnl_flush(evtchnl);
    115 	return 0;
    116 }
    117 
    118 static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl)
    119 {
    120 	if (wait_for_completion_timeout(&evtchnl->u.req.completion,
    121 			msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0)
    122 		return -ETIMEDOUT;
    123 
    124 	return evtchnl->u.req.resp_status;
    125 }
    126 
    127 int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
    128 			   u32 x, u32 y, u32 width, u32 height,
    129 			   u32 bpp, u64 fb_cookie)
    130 {
    131 	struct xen_drm_front_evtchnl *evtchnl;
    132 	struct xen_drm_front_info *front_info;
    133 	struct xendispl_req *req;
    134 	unsigned long flags;
    135 	int ret;
    136 
    137 	front_info = pipeline->drm_info->front_info;
    138 	evtchnl = &front_info->evt_pairs[pipeline->index].req;
    139 	if (unlikely(!evtchnl))
    140 		return -EIO;
    141 
    142 	mutex_lock(&evtchnl->u.req.req_io_lock);
    143 
    144 	spin_lock_irqsave(&front_info->io_lock, flags);
    145 	req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
    146 	req->op.set_config.x = x;
    147 	req->op.set_config.y = y;
    148 	req->op.set_config.width = width;
    149 	req->op.set_config.height = height;
    150 	req->op.set_config.bpp = bpp;
    151 	req->op.set_config.fb_cookie = fb_cookie;
    152 
    153 	ret = be_stream_do_io(evtchnl, req);
    154 	spin_unlock_irqrestore(&front_info->io_lock, flags);
    155 
    156 	if (ret == 0)
    157 		ret = be_stream_wait_io(evtchnl);
    158 
    159 	mutex_unlock(&evtchnl->u.req.req_io_lock);
    160 	return ret;
    161 }
    162 
    163 int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
    164 			      u64 dbuf_cookie, u32 width, u32 height,
    165 			      u32 bpp, u64 size, struct page **pages)
    166 {
    167 	struct xen_drm_front_evtchnl *evtchnl;
    168 	struct xen_drm_front_dbuf *dbuf;
    169 	struct xendispl_req *req;
    170 	struct xen_front_pgdir_shbuf_cfg buf_cfg;
    171 	unsigned long flags;
    172 	int ret;
    173 
    174 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
    175 	if (unlikely(!evtchnl))
    176 		return -EIO;
    177 
    178 	dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
    179 	if (!dbuf)
    180 		return -ENOMEM;
    181 
    182 	dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
    183 
    184 	memset(&buf_cfg, 0, sizeof(buf_cfg));
    185 	buf_cfg.xb_dev = front_info->xb_dev;
    186 	buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
    187 	buf_cfg.pages = pages;
    188 	buf_cfg.pgdir = &dbuf->shbuf;
    189 	buf_cfg.be_alloc = front_info->cfg.be_alloc;
    190 
    191 	ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
    192 	if (ret < 0)
    193 		goto fail_shbuf_alloc;
    194 
    195 	mutex_lock(&evtchnl->u.req.req_io_lock);
    196 
    197 	spin_lock_irqsave(&front_info->io_lock, flags);
    198 	req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
    199 	req->op.dbuf_create.gref_directory =
    200 			xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
    201 	req->op.dbuf_create.buffer_sz = size;
    202 	req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
    203 	req->op.dbuf_create.width = width;
    204 	req->op.dbuf_create.height = height;
    205 	req->op.dbuf_create.bpp = bpp;
    206 	if (buf_cfg.be_alloc)
    207 		req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
    208 
    209 	ret = be_stream_do_io(evtchnl, req);
    210 	spin_unlock_irqrestore(&front_info->io_lock, flags);
    211 
    212 	if (ret < 0)
    213 		goto fail;
    214 
    215 	ret = be_stream_wait_io(evtchnl);
    216 	if (ret < 0)
    217 		goto fail;
    218 
    219 	ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
    220 	if (ret < 0)
    221 		goto fail;
    222 
    223 	mutex_unlock(&evtchnl->u.req.req_io_lock);
    224 	return 0;
    225 
    226 fail:
    227 	mutex_unlock(&evtchnl->u.req.req_io_lock);
    228 fail_shbuf_alloc:
    229 	dbuf_free(&front_info->dbuf_list, dbuf_cookie);
    230 	return ret;
    231 }
    232 
    233 static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
    234 				      u64 dbuf_cookie)
    235 {
    236 	struct xen_drm_front_evtchnl *evtchnl;
    237 	struct xendispl_req *req;
    238 	unsigned long flags;
    239 	bool be_alloc;
    240 	int ret;
    241 
    242 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
    243 	if (unlikely(!evtchnl))
    244 		return -EIO;
    245 
    246 	be_alloc = front_info->cfg.be_alloc;
    247 
    248 	/*
    249 	 * For the backend allocated buffer release references now, so backend
    250 	 * can free the buffer.
    251 	 */
    252 	if (be_alloc)
    253 		dbuf_free(&front_info->dbuf_list, dbuf_cookie);
    254 
    255 	mutex_lock(&evtchnl->u.req.req_io_lock);
    256 
    257 	spin_lock_irqsave(&front_info->io_lock, flags);
    258 	req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
    259 	req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
    260 
    261 	ret = be_stream_do_io(evtchnl, req);
    262 	spin_unlock_irqrestore(&front_info->io_lock, flags);
    263 
    264 	if (ret == 0)
    265 		ret = be_stream_wait_io(evtchnl);
    266 
    267 	/*
    268 	 * Do this regardless of communication status with the backend:
    269 	 * if we cannot remove remote resources remove what we can locally.
    270 	 */
    271 	if (!be_alloc)
    272 		dbuf_free(&front_info->dbuf_list, dbuf_cookie);
    273 
    274 	mutex_unlock(&evtchnl->u.req.req_io_lock);
    275 	return ret;
    276 }
    277 
    278 int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
    279 			    u64 dbuf_cookie, u64 fb_cookie, u32 width,
    280 			    u32 height, u32 pixel_format)
    281 {
    282 	struct xen_drm_front_evtchnl *evtchnl;
    283 	struct xen_drm_front_dbuf *buf;
    284 	struct xendispl_req *req;
    285 	unsigned long flags;
    286 	int ret;
    287 
    288 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
    289 	if (unlikely(!evtchnl))
    290 		return -EIO;
    291 
    292 	buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie);
    293 	if (!buf)
    294 		return -EINVAL;
    295 
    296 	buf->fb_cookie = fb_cookie;
    297 
    298 	mutex_lock(&evtchnl->u.req.req_io_lock);
    299 
    300 	spin_lock_irqsave(&front_info->io_lock, flags);
    301 	req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
    302 	req->op.fb_attach.dbuf_cookie = dbuf_cookie;
    303 	req->op.fb_attach.fb_cookie = fb_cookie;
    304 	req->op.fb_attach.width = width;
    305 	req->op.fb_attach.height = height;
    306 	req->op.fb_attach.pixel_format = pixel_format;
    307 
    308 	ret = be_stream_do_io(evtchnl, req);
    309 	spin_unlock_irqrestore(&front_info->io_lock, flags);
    310 
    311 	if (ret == 0)
    312 		ret = be_stream_wait_io(evtchnl);
    313 
    314 	mutex_unlock(&evtchnl->u.req.req_io_lock);
    315 	return ret;
    316 }
    317 
    318 int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
    319 			    u64 fb_cookie)
    320 {
    321 	struct xen_drm_front_evtchnl *evtchnl;
    322 	struct xendispl_req *req;
    323 	unsigned long flags;
    324 	int ret;
    325 
    326 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
    327 	if (unlikely(!evtchnl))
    328 		return -EIO;
    329 
    330 	mutex_lock(&evtchnl->u.req.req_io_lock);
    331 
    332 	spin_lock_irqsave(&front_info->io_lock, flags);
    333 	req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
    334 	req->op.fb_detach.fb_cookie = fb_cookie;
    335 
    336 	ret = be_stream_do_io(evtchnl, req);
    337 	spin_unlock_irqrestore(&front_info->io_lock, flags);
    338 
    339 	if (ret == 0)
    340 		ret = be_stream_wait_io(evtchnl);
    341 
    342 	mutex_unlock(&evtchnl->u.req.req_io_lock);
    343 	return ret;
    344 }
    345 
    346 int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
    347 			    int conn_idx, u64 fb_cookie)
    348 {
    349 	struct xen_drm_front_evtchnl *evtchnl;
    350 	struct xendispl_req *req;
    351 	unsigned long flags;
    352 	int ret;
    353 
    354 	if (unlikely(conn_idx >= front_info->num_evt_pairs))
    355 		return -EINVAL;
    356 
    357 	evtchnl = &front_info->evt_pairs[conn_idx].req;
    358 
    359 	mutex_lock(&evtchnl->u.req.req_io_lock);
    360 
    361 	spin_lock_irqsave(&front_info->io_lock, flags);
    362 	req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
    363 	req->op.pg_flip.fb_cookie = fb_cookie;
    364 
    365 	ret = be_stream_do_io(evtchnl, req);
    366 	spin_unlock_irqrestore(&front_info->io_lock, flags);
    367 
    368 	if (ret == 0)
    369 		ret = be_stream_wait_io(evtchnl);
    370 
    371 	mutex_unlock(&evtchnl->u.req.req_io_lock);
    372 	return ret;
    373 }
    374 
    375 void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
    376 				 int conn_idx, u64 fb_cookie)
    377 {
    378 	struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
    379 
    380 	if (unlikely(conn_idx >= front_info->cfg.num_connectors))
    381 		return;
    382 
    383 	xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx],
    384 					fb_cookie);
    385 }
    386 
    387 static int xen_drm_drv_dumb_create(struct drm_file *filp,
    388 				   struct drm_device *dev,
    389 				   struct drm_mode_create_dumb *args)
    390 {
    391 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
    392 	struct drm_gem_object *obj;
    393 	int ret;
    394 
    395 	/*
    396 	 * Dumb creation is a two stage process: first we create a fully
    397 	 * constructed GEM object which is communicated to the backend, and
    398 	 * only after that we can create GEM's handle. This is done so,
    399 	 * because of the possible races: once you create a handle it becomes
    400 	 * immediately visible to user-space, so the latter can try accessing
    401 	 * object without pages etc.
    402 	 * For details also see drm_gem_handle_create
    403 	 */
    404 	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
    405 	args->size = args->pitch * args->height;
    406 
    407 	obj = xen_drm_front_gem_create(dev, args->size);
    408 	if (IS_ERR_OR_NULL(obj)) {
    409 		ret = PTR_ERR(obj);
    410 		goto fail;
    411 	}
    412 
    413 	ret = xen_drm_front_dbuf_create(drm_info->front_info,
    414 					xen_drm_front_dbuf_to_cookie(obj),
    415 					args->width, args->height, args->bpp,
    416 					args->size,
    417 					xen_drm_front_gem_get_pages(obj));
    418 	if (ret)
    419 		goto fail_backend;
    420 
    421 	/* This is the tail of GEM object creation */
    422 	ret = drm_gem_handle_create(filp, obj, &args->handle);
    423 	if (ret)
    424 		goto fail_handle;
    425 
    426 	/* Drop reference from allocate - handle holds it now */
    427 	drm_gem_object_put_unlocked(obj);
    428 	return 0;
    429 
    430 fail_handle:
    431 	xen_drm_front_dbuf_destroy(drm_info->front_info,
    432 				   xen_drm_front_dbuf_to_cookie(obj));
    433 fail_backend:
    434 	/* drop reference from allocate */
    435 	drm_gem_object_put_unlocked(obj);
    436 fail:
    437 	DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
    438 	return ret;
    439 }
    440 
    441 static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj)
    442 {
    443 	struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
    444 	int idx;
    445 
    446 	if (drm_dev_enter(obj->dev, &idx)) {
    447 		xen_drm_front_dbuf_destroy(drm_info->front_info,
    448 					   xen_drm_front_dbuf_to_cookie(obj));
    449 		drm_dev_exit(idx);
    450 	} else {
    451 		dbuf_free(&drm_info->front_info->dbuf_list,
    452 			  xen_drm_front_dbuf_to_cookie(obj));
    453 	}
    454 
    455 	xen_drm_front_gem_free_object_unlocked(obj);
    456 }
    457 
    458 static void xen_drm_drv_release(struct drm_device *dev)
    459 {
    460 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
    461 	struct xen_drm_front_info *front_info = drm_info->front_info;
    462 
    463 	xen_drm_front_kms_fini(drm_info);
    464 
    465 	drm_atomic_helper_shutdown(dev);
    466 	drm_mode_config_cleanup(dev);
    467 
    468 	drm_dev_fini(dev);
    469 	kfree(dev);
    470 
    471 	if (front_info->cfg.be_alloc)
    472 		xenbus_switch_state(front_info->xb_dev,
    473 				    XenbusStateInitialising);
    474 
    475 	kfree(drm_info);
    476 }
    477 
    478 static const struct file_operations xen_drm_dev_fops = {
    479 	.owner          = THIS_MODULE,
    480 	.open           = drm_open,
    481 	.release        = drm_release,
    482 	.unlocked_ioctl = drm_ioctl,
    483 #ifdef CONFIG_COMPAT
    484 	.compat_ioctl   = drm_compat_ioctl,
    485 #endif
    486 	.poll           = drm_poll,
    487 	.read           = drm_read,
    488 	.llseek         = no_llseek,
    489 	.mmap           = xen_drm_front_gem_mmap,
    490 };
    491 
    492 static const struct vm_operations_struct xen_drm_drv_vm_ops = {
    493 	.open           = drm_gem_vm_open,
    494 	.close          = drm_gem_vm_close,
    495 };
    496 
    497 static struct drm_driver xen_drm_driver = {
    498 	.driver_features           = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
    499 	.release                   = xen_drm_drv_release,
    500 	.gem_vm_ops                = &xen_drm_drv_vm_ops,
    501 	.gem_free_object_unlocked  = xen_drm_drv_free_object_unlocked,
    502 	.prime_handle_to_fd        = drm_gem_prime_handle_to_fd,
    503 	.prime_fd_to_handle        = drm_gem_prime_fd_to_handle,
    504 	.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
    505 	.gem_prime_get_sg_table    = xen_drm_front_gem_get_sg_table,
    506 	.gem_prime_vmap            = xen_drm_front_gem_prime_vmap,
    507 	.gem_prime_vunmap          = xen_drm_front_gem_prime_vunmap,
    508 	.gem_prime_mmap            = xen_drm_front_gem_prime_mmap,
    509 	.dumb_create               = xen_drm_drv_dumb_create,
    510 	.fops                      = &xen_drm_dev_fops,
    511 	.name                      = "xendrm-du",
    512 	.desc                      = "Xen PV DRM Display Unit",
    513 	.date                      = "20180221",
    514 	.major                     = 1,
    515 	.minor                     = 0,
    516 
    517 };
    518 
    519 static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
    520 {
    521 	struct device *dev = &front_info->xb_dev->dev;
    522 	struct xen_drm_front_drm_info *drm_info;
    523 	struct drm_device *drm_dev;
    524 	int ret;
    525 
    526 	DRM_INFO("Creating %s\n", xen_drm_driver.desc);
    527 
    528 	drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
    529 	if (!drm_info) {
    530 		ret = -ENOMEM;
    531 		goto fail;
    532 	}
    533 
    534 	drm_info->front_info = front_info;
    535 	front_info->drm_info = drm_info;
    536 
    537 	drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
    538 	if (IS_ERR(drm_dev)) {
    539 		ret = PTR_ERR(drm_dev);
    540 		goto fail;
    541 	}
    542 
    543 	drm_info->drm_dev = drm_dev;
    544 
    545 	drm_dev->dev_private = drm_info;
    546 
    547 	ret = xen_drm_front_kms_init(drm_info);
    548 	if (ret) {
    549 		DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret);
    550 		goto fail_modeset;
    551 	}
    552 
    553 	ret = drm_dev_register(drm_dev, 0);
    554 	if (ret)
    555 		goto fail_register;
    556 
    557 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
    558 		 xen_drm_driver.name, xen_drm_driver.major,
    559 		 xen_drm_driver.minor, xen_drm_driver.patchlevel,
    560 		 xen_drm_driver.date, drm_dev->primary->index);
    561 
    562 	return 0;
    563 
    564 fail_register:
    565 	drm_dev_unregister(drm_dev);
    566 fail_modeset:
    567 	drm_kms_helper_poll_fini(drm_dev);
    568 	drm_mode_config_cleanup(drm_dev);
    569 fail:
    570 	kfree(drm_info);
    571 	return ret;
    572 }
    573 
    574 static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
    575 {
    576 	struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
    577 	struct drm_device *dev;
    578 
    579 	if (!drm_info)
    580 		return;
    581 
    582 	dev = drm_info->drm_dev;
    583 	if (!dev)
    584 		return;
    585 
    586 	/* Nothing to do if device is already unplugged */
    587 	if (drm_dev_is_unplugged(dev))
    588 		return;
    589 
    590 	drm_kms_helper_poll_fini(dev);
    591 	drm_dev_unplug(dev);
    592 	drm_dev_put(dev);
    593 
    594 	front_info->drm_info = NULL;
    595 
    596 	xen_drm_front_evtchnl_free_all(front_info);
    597 	dbuf_free_all(&front_info->dbuf_list);
    598 
    599 	/*
    600 	 * If we are not using backend allocated buffers, then tell the
    601 	 * backend we are ready to (re)initialize. Otherwise, wait for
    602 	 * drm_driver.release.
    603 	 */
    604 	if (!front_info->cfg.be_alloc)
    605 		xenbus_switch_state(front_info->xb_dev,
    606 				    XenbusStateInitialising);
    607 }
    608 
    609 static int displback_initwait(struct xen_drm_front_info *front_info)
    610 {
    611 	struct xen_drm_front_cfg *cfg = &front_info->cfg;
    612 	int ret;
    613 
    614 	cfg->front_info = front_info;
    615 	ret = xen_drm_front_cfg_card(front_info, cfg);
    616 	if (ret < 0)
    617 		return ret;
    618 
    619 	DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
    620 	/* Create event channels for all connectors and publish */
    621 	ret = xen_drm_front_evtchnl_create_all(front_info);
    622 	if (ret < 0)
    623 		return ret;
    624 
    625 	return xen_drm_front_evtchnl_publish_all(front_info);
    626 }
    627 
    628 static int displback_connect(struct xen_drm_front_info *front_info)
    629 {
    630 	xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED);
    631 	return xen_drm_drv_init(front_info);
    632 }
    633 
    634 static void displback_disconnect(struct xen_drm_front_info *front_info)
    635 {
    636 	if (!front_info->drm_info)
    637 		return;
    638 
    639 	/* Tell the backend to wait until we release the DRM driver. */
    640 	xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring);
    641 
    642 	xen_drm_drv_fini(front_info);
    643 }
    644 
    645 static void displback_changed(struct xenbus_device *xb_dev,
    646 			      enum xenbus_state backend_state)
    647 {
    648 	struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
    649 	int ret;
    650 
    651 	DRM_DEBUG("Backend state is %s, front is %s\n",
    652 		  xenbus_strstate(backend_state),
    653 		  xenbus_strstate(xb_dev->state));
    654 
    655 	switch (backend_state) {
    656 	case XenbusStateReconfiguring:
    657 		/* fall through */
    658 	case XenbusStateReconfigured:
    659 		/* fall through */
    660 	case XenbusStateInitialised:
    661 		break;
    662 
    663 	case XenbusStateInitialising:
    664 		if (xb_dev->state == XenbusStateReconfiguring)
    665 			break;
    666 
    667 		/* recovering after backend unexpected closure */
    668 		displback_disconnect(front_info);
    669 		break;
    670 
    671 	case XenbusStateInitWait:
    672 		if (xb_dev->state == XenbusStateReconfiguring)
    673 			break;
    674 
    675 		/* recovering after backend unexpected closure */
    676 		displback_disconnect(front_info);
    677 		if (xb_dev->state != XenbusStateInitialising)
    678 			break;
    679 
    680 		ret = displback_initwait(front_info);
    681 		if (ret < 0)
    682 			xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
    683 		else
    684 			xenbus_switch_state(xb_dev, XenbusStateInitialised);
    685 		break;
    686 
    687 	case XenbusStateConnected:
    688 		if (xb_dev->state != XenbusStateInitialised)
    689 			break;
    690 
    691 		ret = displback_connect(front_info);
    692 		if (ret < 0) {
    693 			displback_disconnect(front_info);
    694 			xenbus_dev_fatal(xb_dev, ret, "connecting backend");
    695 		} else {
    696 			xenbus_switch_state(xb_dev, XenbusStateConnected);
    697 		}
    698 		break;
    699 
    700 	case XenbusStateClosing:
    701 		/*
    702 		 * in this state backend starts freeing resources,
    703 		 * so let it go into closed state, so we can also
    704 		 * remove ours
    705 		 */
    706 		break;
    707 
    708 	case XenbusStateUnknown:
    709 		/* fall through */
    710 	case XenbusStateClosed:
    711 		if (xb_dev->state == XenbusStateClosed)
    712 			break;
    713 
    714 		displback_disconnect(front_info);
    715 		break;
    716 	}
    717 }
    718 
    719 static int xen_drv_probe(struct xenbus_device *xb_dev,
    720 			 const struct xenbus_device_id *id)
    721 {
    722 	struct xen_drm_front_info *front_info;
    723 	struct device *dev = &xb_dev->dev;
    724 	int ret;
    725 
    726 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
    727 	if (ret < 0) {
    728 		DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
    729 		return ret;
    730 	}
    731 
    732 	front_info = devm_kzalloc(&xb_dev->dev,
    733 				  sizeof(*front_info), GFP_KERNEL);
    734 	if (!front_info)
    735 		return -ENOMEM;
    736 
    737 	front_info->xb_dev = xb_dev;
    738 	spin_lock_init(&front_info->io_lock);
    739 	INIT_LIST_HEAD(&front_info->dbuf_list);
    740 	dev_set_drvdata(&xb_dev->dev, front_info);
    741 
    742 	return xenbus_switch_state(xb_dev, XenbusStateInitialising);
    743 }
    744 
    745 static int xen_drv_remove(struct xenbus_device *dev)
    746 {
    747 	struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
    748 	int to = 100;
    749 
    750 	xenbus_switch_state(dev, XenbusStateClosing);
    751 
    752 	/*
    753 	 * On driver removal it is disconnected from XenBus,
    754 	 * so no backend state change events come via .otherend_changed
    755 	 * callback. This prevents us from exiting gracefully, e.g.
    756 	 * signaling the backend to free event channels, waiting for its
    757 	 * state to change to XenbusStateClosed and cleaning at our end.
    758 	 * Normally when front driver removed backend will finally go into
    759 	 * XenbusStateInitWait state.
    760 	 *
    761 	 * Workaround: read backend's state manually and wait with time-out.
    762 	 */
    763 	while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
    764 				     XenbusStateUnknown) != XenbusStateInitWait) &&
    765 				     --to)
    766 		msleep(10);
    767 
    768 	if (!to) {
    769 		unsigned int state;
    770 
    771 		state = xenbus_read_unsigned(front_info->xb_dev->otherend,
    772 					     "state", XenbusStateUnknown);
    773 		DRM_ERROR("Backend state is %s while removing driver\n",
    774 			  xenbus_strstate(state));
    775 	}
    776 
    777 	xen_drm_drv_fini(front_info);
    778 	xenbus_frontend_closed(dev);
    779 	return 0;
    780 }
    781 
    782 static const struct xenbus_device_id xen_driver_ids[] = {
    783 	{ XENDISPL_DRIVER_NAME },
    784 	{ "" }
    785 };
    786 
    787 static struct xenbus_driver xen_driver = {
    788 	.ids = xen_driver_ids,
    789 	.probe = xen_drv_probe,
    790 	.remove = xen_drv_remove,
    791 	.otherend_changed = displback_changed,
    792 };
    793 
    794 static int __init xen_drv_init(void)
    795 {
    796 	/* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
    797 	if (XEN_PAGE_SIZE != PAGE_SIZE) {
    798 		DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
    799 			  XEN_PAGE_SIZE, PAGE_SIZE);
    800 		return -ENODEV;
    801 	}
    802 
    803 	if (!xen_domain())
    804 		return -ENODEV;
    805 
    806 	if (!xen_has_pv_devices())
    807 		return -ENODEV;
    808 
    809 	DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n");
    810 	return xenbus_register_frontend(&xen_driver);
    811 }
    812 
    813 static void __exit xen_drv_fini(void)
    814 {
    815 	DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n");
    816 	xenbus_unregister_driver(&xen_driver);
    817 }
    818 
    819 module_init(xen_drv_init);
    820 module_exit(xen_drv_fini);
    821 
    822 MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
    823 MODULE_LICENSE("GPL");
    824 MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);
    825