1 1.1 riastrad /* $NetBSD: xen_drm_front.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT 4 1.1 riastrad 5 1.1 riastrad /* 6 1.1 riastrad * Xen para-virtual DRM device 7 1.1 riastrad * 8 1.1 riastrad * Copyright (C) 2016-2018 EPAM Systems Inc. 9 1.1 riastrad * 10 1.1 riastrad * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko (at) epam.com> 11 1.1 riastrad */ 12 1.1 riastrad 13 1.1 riastrad #include <sys/cdefs.h> 14 1.1 riastrad __KERNEL_RCSID(0, "$NetBSD: xen_drm_front.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $"); 15 1.1 riastrad 16 1.1 riastrad #include <linux/delay.h> 17 1.1 riastrad #include <linux/dma-mapping.h> 18 1.1 riastrad #include <linux/module.h> 19 1.1 riastrad #include <linux/of_device.h> 20 1.1 riastrad 21 1.1 riastrad #include <drm/drm_atomic_helper.h> 22 1.1 riastrad #include <drm/drm_drv.h> 23 1.1 riastrad #include <drm/drm_ioctl.h> 24 1.1 riastrad #include <drm/drm_probe_helper.h> 25 1.1 riastrad #include <drm/drm_file.h> 26 1.1 riastrad #include <drm/drm_gem.h> 27 1.1 riastrad 28 1.1 riastrad #include <xen/platform_pci.h> 29 1.1 riastrad #include <xen/xen.h> 30 1.1 riastrad #include <xen/xenbus.h> 31 1.1 riastrad 32 1.1 riastrad #include <xen/xen-front-pgdir-shbuf.h> 33 1.1 riastrad #include <xen/interface/io/displif.h> 34 1.1 riastrad 35 1.1 riastrad #include "xen_drm_front.h" 36 1.1 riastrad #include "xen_drm_front_cfg.h" 37 1.1 riastrad #include "xen_drm_front_evtchnl.h" 38 1.1 riastrad #include "xen_drm_front_gem.h" 39 1.1 riastrad #include "xen_drm_front_kms.h" 40 1.1 riastrad 41 1.1 riastrad struct xen_drm_front_dbuf { 42 1.1 riastrad struct list_head list; 43 1.1 riastrad u64 dbuf_cookie; 44 1.1 riastrad u64 fb_cookie; 45 1.1 riastrad 46 1.1 riastrad struct xen_front_pgdir_shbuf shbuf; 47 1.1 riastrad }; 48 1.1 riastrad 49 1.1 riastrad static void dbuf_add_to_list(struct xen_drm_front_info *front_info, 50 1.1 riastrad struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie) 51 1.1 riastrad { 52 1.1 riastrad dbuf->dbuf_cookie = dbuf_cookie; 53 1.1 riastrad list_add(&dbuf->list, &front_info->dbuf_list); 54 1.1 riastrad } 55 1.1 riastrad 56 1.1 riastrad static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list, 57 1.1 riastrad u64 dbuf_cookie) 58 1.1 riastrad { 59 1.1 riastrad struct xen_drm_front_dbuf *buf, *q; 60 1.1 riastrad 61 1.1 riastrad list_for_each_entry_safe(buf, q, dbuf_list, list) 62 1.1 riastrad if (buf->dbuf_cookie == dbuf_cookie) 63 1.1 riastrad return buf; 64 1.1 riastrad 65 1.1 riastrad return NULL; 66 1.1 riastrad } 67 1.1 riastrad 68 1.1 riastrad static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie) 69 1.1 riastrad { 70 1.1 riastrad struct xen_drm_front_dbuf *buf, *q; 71 1.1 riastrad 72 1.1 riastrad list_for_each_entry_safe(buf, q, dbuf_list, list) 73 1.1 riastrad if (buf->dbuf_cookie == dbuf_cookie) { 74 1.1 riastrad list_del(&buf->list); 75 1.1 riastrad xen_front_pgdir_shbuf_unmap(&buf->shbuf); 76 1.1 riastrad xen_front_pgdir_shbuf_free(&buf->shbuf); 77 1.1 riastrad kfree(buf); 78 1.1 riastrad break; 79 1.1 riastrad } 80 1.1 riastrad } 81 1.1 riastrad 82 1.1 riastrad static void dbuf_free_all(struct list_head *dbuf_list) 83 1.1 riastrad { 84 1.1 riastrad struct xen_drm_front_dbuf *buf, *q; 85 1.1 riastrad 86 1.1 riastrad list_for_each_entry_safe(buf, q, dbuf_list, list) { 87 1.1 riastrad list_del(&buf->list); 88 1.1 riastrad xen_front_pgdir_shbuf_unmap(&buf->shbuf); 89 1.1 riastrad xen_front_pgdir_shbuf_free(&buf->shbuf); 90 1.1 riastrad kfree(buf); 91 1.1 riastrad } 92 1.1 riastrad } 93 1.1 riastrad 94 1.1 riastrad static struct xendispl_req * 95 1.1 riastrad be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation) 96 1.1 riastrad { 97 1.1 riastrad struct xendispl_req *req; 98 1.1 riastrad 99 1.1 riastrad req = RING_GET_REQUEST(&evtchnl->u.req.ring, 100 1.1 riastrad evtchnl->u.req.ring.req_prod_pvt); 101 1.1 riastrad req->operation = operation; 102 1.1 riastrad req->id = evtchnl->evt_next_id++; 103 1.1 riastrad evtchnl->evt_id = req->id; 104 1.1 riastrad return req; 105 1.1 riastrad } 106 1.1 riastrad 107 1.1 riastrad static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl, 108 1.1 riastrad struct xendispl_req *req) 109 1.1 riastrad { 110 1.1 riastrad reinit_completion(&evtchnl->u.req.completion); 111 1.1 riastrad if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED)) 112 1.1 riastrad return -EIO; 113 1.1 riastrad 114 1.1 riastrad xen_drm_front_evtchnl_flush(evtchnl); 115 1.1 riastrad return 0; 116 1.1 riastrad } 117 1.1 riastrad 118 1.1 riastrad static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl) 119 1.1 riastrad { 120 1.1 riastrad if (wait_for_completion_timeout(&evtchnl->u.req.completion, 121 1.1 riastrad msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0) 122 1.1 riastrad return -ETIMEDOUT; 123 1.1 riastrad 124 1.1 riastrad return evtchnl->u.req.resp_status; 125 1.1 riastrad } 126 1.1 riastrad 127 1.1 riastrad int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline, 128 1.1 riastrad u32 x, u32 y, u32 width, u32 height, 129 1.1 riastrad u32 bpp, u64 fb_cookie) 130 1.1 riastrad { 131 1.1 riastrad struct xen_drm_front_evtchnl *evtchnl; 132 1.1 riastrad struct xen_drm_front_info *front_info; 133 1.1 riastrad struct xendispl_req *req; 134 1.1 riastrad unsigned long flags; 135 1.1 riastrad int ret; 136 1.1 riastrad 137 1.1 riastrad front_info = pipeline->drm_info->front_info; 138 1.1 riastrad evtchnl = &front_info->evt_pairs[pipeline->index].req; 139 1.1 riastrad if (unlikely(!evtchnl)) 140 1.1 riastrad return -EIO; 141 1.1 riastrad 142 1.1 riastrad mutex_lock(&evtchnl->u.req.req_io_lock); 143 1.1 riastrad 144 1.1 riastrad spin_lock_irqsave(&front_info->io_lock, flags); 145 1.1 riastrad req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG); 146 1.1 riastrad req->op.set_config.x = x; 147 1.1 riastrad req->op.set_config.y = y; 148 1.1 riastrad req->op.set_config.width = width; 149 1.1 riastrad req->op.set_config.height = height; 150 1.1 riastrad req->op.set_config.bpp = bpp; 151 1.1 riastrad req->op.set_config.fb_cookie = fb_cookie; 152 1.1 riastrad 153 1.1 riastrad ret = be_stream_do_io(evtchnl, req); 154 1.1 riastrad spin_unlock_irqrestore(&front_info->io_lock, flags); 155 1.1 riastrad 156 1.1 riastrad if (ret == 0) 157 1.1 riastrad ret = be_stream_wait_io(evtchnl); 158 1.1 riastrad 159 1.1 riastrad mutex_unlock(&evtchnl->u.req.req_io_lock); 160 1.1 riastrad return ret; 161 1.1 riastrad } 162 1.1 riastrad 163 1.1 riastrad int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, 164 1.1 riastrad u64 dbuf_cookie, u32 width, u32 height, 165 1.1 riastrad u32 bpp, u64 size, struct page **pages) 166 1.1 riastrad { 167 1.1 riastrad struct xen_drm_front_evtchnl *evtchnl; 168 1.1 riastrad struct xen_drm_front_dbuf *dbuf; 169 1.1 riastrad struct xendispl_req *req; 170 1.1 riastrad struct xen_front_pgdir_shbuf_cfg buf_cfg; 171 1.1 riastrad unsigned long flags; 172 1.1 riastrad int ret; 173 1.1 riastrad 174 1.1 riastrad evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 175 1.1 riastrad if (unlikely(!evtchnl)) 176 1.1 riastrad return -EIO; 177 1.1 riastrad 178 1.1 riastrad dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL); 179 1.1 riastrad if (!dbuf) 180 1.1 riastrad return -ENOMEM; 181 1.1 riastrad 182 1.1 riastrad dbuf_add_to_list(front_info, dbuf, dbuf_cookie); 183 1.1 riastrad 184 1.1 riastrad memset(&buf_cfg, 0, sizeof(buf_cfg)); 185 1.1 riastrad buf_cfg.xb_dev = front_info->xb_dev; 186 1.1 riastrad buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE); 187 1.1 riastrad buf_cfg.pages = pages; 188 1.1 riastrad buf_cfg.pgdir = &dbuf->shbuf; 189 1.1 riastrad buf_cfg.be_alloc = front_info->cfg.be_alloc; 190 1.1 riastrad 191 1.1 riastrad ret = xen_front_pgdir_shbuf_alloc(&buf_cfg); 192 1.1 riastrad if (ret < 0) 193 1.1 riastrad goto fail_shbuf_alloc; 194 1.1 riastrad 195 1.1 riastrad mutex_lock(&evtchnl->u.req.req_io_lock); 196 1.1 riastrad 197 1.1 riastrad spin_lock_irqsave(&front_info->io_lock, flags); 198 1.1 riastrad req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE); 199 1.1 riastrad req->op.dbuf_create.gref_directory = 200 1.1 riastrad xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf); 201 1.1 riastrad req->op.dbuf_create.buffer_sz = size; 202 1.1 riastrad req->op.dbuf_create.dbuf_cookie = dbuf_cookie; 203 1.1 riastrad req->op.dbuf_create.width = width; 204 1.1 riastrad req->op.dbuf_create.height = height; 205 1.1 riastrad req->op.dbuf_create.bpp = bpp; 206 1.1 riastrad if (buf_cfg.be_alloc) 207 1.1 riastrad req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC; 208 1.1 riastrad 209 1.1 riastrad ret = be_stream_do_io(evtchnl, req); 210 1.1 riastrad spin_unlock_irqrestore(&front_info->io_lock, flags); 211 1.1 riastrad 212 1.1 riastrad if (ret < 0) 213 1.1 riastrad goto fail; 214 1.1 riastrad 215 1.1 riastrad ret = be_stream_wait_io(evtchnl); 216 1.1 riastrad if (ret < 0) 217 1.1 riastrad goto fail; 218 1.1 riastrad 219 1.1 riastrad ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf); 220 1.1 riastrad if (ret < 0) 221 1.1 riastrad goto fail; 222 1.1 riastrad 223 1.1 riastrad mutex_unlock(&evtchnl->u.req.req_io_lock); 224 1.1 riastrad return 0; 225 1.1 riastrad 226 1.1 riastrad fail: 227 1.1 riastrad mutex_unlock(&evtchnl->u.req.req_io_lock); 228 1.1 riastrad fail_shbuf_alloc: 229 1.1 riastrad dbuf_free(&front_info->dbuf_list, dbuf_cookie); 230 1.1 riastrad return ret; 231 1.1 riastrad } 232 1.1 riastrad 233 1.1 riastrad static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info, 234 1.1 riastrad u64 dbuf_cookie) 235 1.1 riastrad { 236 1.1 riastrad struct xen_drm_front_evtchnl *evtchnl; 237 1.1 riastrad struct xendispl_req *req; 238 1.1 riastrad unsigned long flags; 239 1.1 riastrad bool be_alloc; 240 1.1 riastrad int ret; 241 1.1 riastrad 242 1.1 riastrad evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 243 1.1 riastrad if (unlikely(!evtchnl)) 244 1.1 riastrad return -EIO; 245 1.1 riastrad 246 1.1 riastrad be_alloc = front_info->cfg.be_alloc; 247 1.1 riastrad 248 1.1 riastrad /* 249 1.1 riastrad * For the backend allocated buffer release references now, so backend 250 1.1 riastrad * can free the buffer. 251 1.1 riastrad */ 252 1.1 riastrad if (be_alloc) 253 1.1 riastrad dbuf_free(&front_info->dbuf_list, dbuf_cookie); 254 1.1 riastrad 255 1.1 riastrad mutex_lock(&evtchnl->u.req.req_io_lock); 256 1.1 riastrad 257 1.1 riastrad spin_lock_irqsave(&front_info->io_lock, flags); 258 1.1 riastrad req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY); 259 1.1 riastrad req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie; 260 1.1 riastrad 261 1.1 riastrad ret = be_stream_do_io(evtchnl, req); 262 1.1 riastrad spin_unlock_irqrestore(&front_info->io_lock, flags); 263 1.1 riastrad 264 1.1 riastrad if (ret == 0) 265 1.1 riastrad ret = be_stream_wait_io(evtchnl); 266 1.1 riastrad 267 1.1 riastrad /* 268 1.1 riastrad * Do this regardless of communication status with the backend: 269 1.1 riastrad * if we cannot remove remote resources remove what we can locally. 270 1.1 riastrad */ 271 1.1 riastrad if (!be_alloc) 272 1.1 riastrad dbuf_free(&front_info->dbuf_list, dbuf_cookie); 273 1.1 riastrad 274 1.1 riastrad mutex_unlock(&evtchnl->u.req.req_io_lock); 275 1.1 riastrad return ret; 276 1.1 riastrad } 277 1.1 riastrad 278 1.1 riastrad int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info, 279 1.1 riastrad u64 dbuf_cookie, u64 fb_cookie, u32 width, 280 1.1 riastrad u32 height, u32 pixel_format) 281 1.1 riastrad { 282 1.1 riastrad struct xen_drm_front_evtchnl *evtchnl; 283 1.1 riastrad struct xen_drm_front_dbuf *buf; 284 1.1 riastrad struct xendispl_req *req; 285 1.1 riastrad unsigned long flags; 286 1.1 riastrad int ret; 287 1.1 riastrad 288 1.1 riastrad evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 289 1.1 riastrad if (unlikely(!evtchnl)) 290 1.1 riastrad return -EIO; 291 1.1 riastrad 292 1.1 riastrad buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie); 293 1.1 riastrad if (!buf) 294 1.1 riastrad return -EINVAL; 295 1.1 riastrad 296 1.1 riastrad buf->fb_cookie = fb_cookie; 297 1.1 riastrad 298 1.1 riastrad mutex_lock(&evtchnl->u.req.req_io_lock); 299 1.1 riastrad 300 1.1 riastrad spin_lock_irqsave(&front_info->io_lock, flags); 301 1.1 riastrad req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH); 302 1.1 riastrad req->op.fb_attach.dbuf_cookie = dbuf_cookie; 303 1.1 riastrad req->op.fb_attach.fb_cookie = fb_cookie; 304 1.1 riastrad req->op.fb_attach.width = width; 305 1.1 riastrad req->op.fb_attach.height = height; 306 1.1 riastrad req->op.fb_attach.pixel_format = pixel_format; 307 1.1 riastrad 308 1.1 riastrad ret = be_stream_do_io(evtchnl, req); 309 1.1 riastrad spin_unlock_irqrestore(&front_info->io_lock, flags); 310 1.1 riastrad 311 1.1 riastrad if (ret == 0) 312 1.1 riastrad ret = be_stream_wait_io(evtchnl); 313 1.1 riastrad 314 1.1 riastrad mutex_unlock(&evtchnl->u.req.req_io_lock); 315 1.1 riastrad return ret; 316 1.1 riastrad } 317 1.1 riastrad 318 1.1 riastrad int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info, 319 1.1 riastrad u64 fb_cookie) 320 1.1 riastrad { 321 1.1 riastrad struct xen_drm_front_evtchnl *evtchnl; 322 1.1 riastrad struct xendispl_req *req; 323 1.1 riastrad unsigned long flags; 324 1.1 riastrad int ret; 325 1.1 riastrad 326 1.1 riastrad evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 327 1.1 riastrad if (unlikely(!evtchnl)) 328 1.1 riastrad return -EIO; 329 1.1 riastrad 330 1.1 riastrad mutex_lock(&evtchnl->u.req.req_io_lock); 331 1.1 riastrad 332 1.1 riastrad spin_lock_irqsave(&front_info->io_lock, flags); 333 1.1 riastrad req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH); 334 1.1 riastrad req->op.fb_detach.fb_cookie = fb_cookie; 335 1.1 riastrad 336 1.1 riastrad ret = be_stream_do_io(evtchnl, req); 337 1.1 riastrad spin_unlock_irqrestore(&front_info->io_lock, flags); 338 1.1 riastrad 339 1.1 riastrad if (ret == 0) 340 1.1 riastrad ret = be_stream_wait_io(evtchnl); 341 1.1 riastrad 342 1.1 riastrad mutex_unlock(&evtchnl->u.req.req_io_lock); 343 1.1 riastrad return ret; 344 1.1 riastrad } 345 1.1 riastrad 346 1.1 riastrad int xen_drm_front_page_flip(struct xen_drm_front_info *front_info, 347 1.1 riastrad int conn_idx, u64 fb_cookie) 348 1.1 riastrad { 349 1.1 riastrad struct xen_drm_front_evtchnl *evtchnl; 350 1.1 riastrad struct xendispl_req *req; 351 1.1 riastrad unsigned long flags; 352 1.1 riastrad int ret; 353 1.1 riastrad 354 1.1 riastrad if (unlikely(conn_idx >= front_info->num_evt_pairs)) 355 1.1 riastrad return -EINVAL; 356 1.1 riastrad 357 1.1 riastrad evtchnl = &front_info->evt_pairs[conn_idx].req; 358 1.1 riastrad 359 1.1 riastrad mutex_lock(&evtchnl->u.req.req_io_lock); 360 1.1 riastrad 361 1.1 riastrad spin_lock_irqsave(&front_info->io_lock, flags); 362 1.1 riastrad req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP); 363 1.1 riastrad req->op.pg_flip.fb_cookie = fb_cookie; 364 1.1 riastrad 365 1.1 riastrad ret = be_stream_do_io(evtchnl, req); 366 1.1 riastrad spin_unlock_irqrestore(&front_info->io_lock, flags); 367 1.1 riastrad 368 1.1 riastrad if (ret == 0) 369 1.1 riastrad ret = be_stream_wait_io(evtchnl); 370 1.1 riastrad 371 1.1 riastrad mutex_unlock(&evtchnl->u.req.req_io_lock); 372 1.1 riastrad return ret; 373 1.1 riastrad } 374 1.1 riastrad 375 1.1 riastrad void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info, 376 1.1 riastrad int conn_idx, u64 fb_cookie) 377 1.1 riastrad { 378 1.1 riastrad struct xen_drm_front_drm_info *drm_info = front_info->drm_info; 379 1.1 riastrad 380 1.1 riastrad if (unlikely(conn_idx >= front_info->cfg.num_connectors)) 381 1.1 riastrad return; 382 1.1 riastrad 383 1.1 riastrad xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx], 384 1.1 riastrad fb_cookie); 385 1.1 riastrad } 386 1.1 riastrad 387 1.1 riastrad static int xen_drm_drv_dumb_create(struct drm_file *filp, 388 1.1 riastrad struct drm_device *dev, 389 1.1 riastrad struct drm_mode_create_dumb *args) 390 1.1 riastrad { 391 1.1 riastrad struct xen_drm_front_drm_info *drm_info = dev->dev_private; 392 1.1 riastrad struct drm_gem_object *obj; 393 1.1 riastrad int ret; 394 1.1 riastrad 395 1.1 riastrad /* 396 1.1 riastrad * Dumb creation is a two stage process: first we create a fully 397 1.1 riastrad * constructed GEM object which is communicated to the backend, and 398 1.1 riastrad * only after that we can create GEM's handle. This is done so, 399 1.1 riastrad * because of the possible races: once you create a handle it becomes 400 1.1 riastrad * immediately visible to user-space, so the latter can try accessing 401 1.1 riastrad * object without pages etc. 402 1.1 riastrad * For details also see drm_gem_handle_create 403 1.1 riastrad */ 404 1.1 riastrad args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 405 1.1 riastrad args->size = args->pitch * args->height; 406 1.1 riastrad 407 1.1 riastrad obj = xen_drm_front_gem_create(dev, args->size); 408 1.1 riastrad if (IS_ERR_OR_NULL(obj)) { 409 1.1 riastrad ret = PTR_ERR(obj); 410 1.1 riastrad goto fail; 411 1.1 riastrad } 412 1.1 riastrad 413 1.1 riastrad ret = xen_drm_front_dbuf_create(drm_info->front_info, 414 1.1 riastrad xen_drm_front_dbuf_to_cookie(obj), 415 1.1 riastrad args->width, args->height, args->bpp, 416 1.1 riastrad args->size, 417 1.1 riastrad xen_drm_front_gem_get_pages(obj)); 418 1.1 riastrad if (ret) 419 1.1 riastrad goto fail_backend; 420 1.1 riastrad 421 1.1 riastrad /* This is the tail of GEM object creation */ 422 1.1 riastrad ret = drm_gem_handle_create(filp, obj, &args->handle); 423 1.1 riastrad if (ret) 424 1.1 riastrad goto fail_handle; 425 1.1 riastrad 426 1.1 riastrad /* Drop reference from allocate - handle holds it now */ 427 1.1 riastrad drm_gem_object_put_unlocked(obj); 428 1.1 riastrad return 0; 429 1.1 riastrad 430 1.1 riastrad fail_handle: 431 1.1 riastrad xen_drm_front_dbuf_destroy(drm_info->front_info, 432 1.1 riastrad xen_drm_front_dbuf_to_cookie(obj)); 433 1.1 riastrad fail_backend: 434 1.1 riastrad /* drop reference from allocate */ 435 1.1 riastrad drm_gem_object_put_unlocked(obj); 436 1.1 riastrad fail: 437 1.1 riastrad DRM_ERROR("Failed to create dumb buffer: %d\n", ret); 438 1.1 riastrad return ret; 439 1.1 riastrad } 440 1.1 riastrad 441 1.1 riastrad static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj) 442 1.1 riastrad { 443 1.1 riastrad struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private; 444 1.1 riastrad int idx; 445 1.1 riastrad 446 1.1 riastrad if (drm_dev_enter(obj->dev, &idx)) { 447 1.1 riastrad xen_drm_front_dbuf_destroy(drm_info->front_info, 448 1.1 riastrad xen_drm_front_dbuf_to_cookie(obj)); 449 1.1 riastrad drm_dev_exit(idx); 450 1.1 riastrad } else { 451 1.1 riastrad dbuf_free(&drm_info->front_info->dbuf_list, 452 1.1 riastrad xen_drm_front_dbuf_to_cookie(obj)); 453 1.1 riastrad } 454 1.1 riastrad 455 1.1 riastrad xen_drm_front_gem_free_object_unlocked(obj); 456 1.1 riastrad } 457 1.1 riastrad 458 1.1 riastrad static void xen_drm_drv_release(struct drm_device *dev) 459 1.1 riastrad { 460 1.1 riastrad struct xen_drm_front_drm_info *drm_info = dev->dev_private; 461 1.1 riastrad struct xen_drm_front_info *front_info = drm_info->front_info; 462 1.1 riastrad 463 1.1 riastrad xen_drm_front_kms_fini(drm_info); 464 1.1 riastrad 465 1.1 riastrad drm_atomic_helper_shutdown(dev); 466 1.1 riastrad drm_mode_config_cleanup(dev); 467 1.1 riastrad 468 1.1 riastrad drm_dev_fini(dev); 469 1.1 riastrad kfree(dev); 470 1.1 riastrad 471 1.1 riastrad if (front_info->cfg.be_alloc) 472 1.1 riastrad xenbus_switch_state(front_info->xb_dev, 473 1.1 riastrad XenbusStateInitialising); 474 1.1 riastrad 475 1.1 riastrad kfree(drm_info); 476 1.1 riastrad } 477 1.1 riastrad 478 1.1 riastrad static const struct file_operations xen_drm_dev_fops = { 479 1.1 riastrad .owner = THIS_MODULE, 480 1.1 riastrad .open = drm_open, 481 1.1 riastrad .release = drm_release, 482 1.1 riastrad .unlocked_ioctl = drm_ioctl, 483 1.1 riastrad #ifdef CONFIG_COMPAT 484 1.1 riastrad .compat_ioctl = drm_compat_ioctl, 485 1.1 riastrad #endif 486 1.1 riastrad .poll = drm_poll, 487 1.1 riastrad .read = drm_read, 488 1.1 riastrad .llseek = no_llseek, 489 1.1 riastrad .mmap = xen_drm_front_gem_mmap, 490 1.1 riastrad }; 491 1.1 riastrad 492 1.1 riastrad static const struct vm_operations_struct xen_drm_drv_vm_ops = { 493 1.1 riastrad .open = drm_gem_vm_open, 494 1.1 riastrad .close = drm_gem_vm_close, 495 1.1 riastrad }; 496 1.1 riastrad 497 1.1 riastrad static struct drm_driver xen_drm_driver = { 498 1.1 riastrad .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 499 1.1 riastrad .release = xen_drm_drv_release, 500 1.1 riastrad .gem_vm_ops = &xen_drm_drv_vm_ops, 501 1.1 riastrad .gem_free_object_unlocked = xen_drm_drv_free_object_unlocked, 502 1.1 riastrad .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 503 1.1 riastrad .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 504 1.1 riastrad .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, 505 1.1 riastrad .gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table, 506 1.1 riastrad .gem_prime_vmap = xen_drm_front_gem_prime_vmap, 507 1.1 riastrad .gem_prime_vunmap = xen_drm_front_gem_prime_vunmap, 508 1.1 riastrad .gem_prime_mmap = xen_drm_front_gem_prime_mmap, 509 1.1 riastrad .dumb_create = xen_drm_drv_dumb_create, 510 1.1 riastrad .fops = &xen_drm_dev_fops, 511 1.1 riastrad .name = "xendrm-du", 512 1.1 riastrad .desc = "Xen PV DRM Display Unit", 513 1.1 riastrad .date = "20180221", 514 1.1 riastrad .major = 1, 515 1.1 riastrad .minor = 0, 516 1.1 riastrad 517 1.1 riastrad }; 518 1.1 riastrad 519 1.1 riastrad static int xen_drm_drv_init(struct xen_drm_front_info *front_info) 520 1.1 riastrad { 521 1.1 riastrad struct device *dev = &front_info->xb_dev->dev; 522 1.1 riastrad struct xen_drm_front_drm_info *drm_info; 523 1.1 riastrad struct drm_device *drm_dev; 524 1.1 riastrad int ret; 525 1.1 riastrad 526 1.1 riastrad DRM_INFO("Creating %s\n", xen_drm_driver.desc); 527 1.1 riastrad 528 1.1 riastrad drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL); 529 1.1 riastrad if (!drm_info) { 530 1.1 riastrad ret = -ENOMEM; 531 1.1 riastrad goto fail; 532 1.1 riastrad } 533 1.1 riastrad 534 1.1 riastrad drm_info->front_info = front_info; 535 1.1 riastrad front_info->drm_info = drm_info; 536 1.1 riastrad 537 1.1 riastrad drm_dev = drm_dev_alloc(&xen_drm_driver, dev); 538 1.1 riastrad if (IS_ERR(drm_dev)) { 539 1.1 riastrad ret = PTR_ERR(drm_dev); 540 1.1 riastrad goto fail; 541 1.1 riastrad } 542 1.1 riastrad 543 1.1 riastrad drm_info->drm_dev = drm_dev; 544 1.1 riastrad 545 1.1 riastrad drm_dev->dev_private = drm_info; 546 1.1 riastrad 547 1.1 riastrad ret = xen_drm_front_kms_init(drm_info); 548 1.1 riastrad if (ret) { 549 1.1 riastrad DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret); 550 1.1 riastrad goto fail_modeset; 551 1.1 riastrad } 552 1.1 riastrad 553 1.1 riastrad ret = drm_dev_register(drm_dev, 0); 554 1.1 riastrad if (ret) 555 1.1 riastrad goto fail_register; 556 1.1 riastrad 557 1.1 riastrad DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 558 1.1 riastrad xen_drm_driver.name, xen_drm_driver.major, 559 1.1 riastrad xen_drm_driver.minor, xen_drm_driver.patchlevel, 560 1.1 riastrad xen_drm_driver.date, drm_dev->primary->index); 561 1.1 riastrad 562 1.1 riastrad return 0; 563 1.1 riastrad 564 1.1 riastrad fail_register: 565 1.1 riastrad drm_dev_unregister(drm_dev); 566 1.1 riastrad fail_modeset: 567 1.1 riastrad drm_kms_helper_poll_fini(drm_dev); 568 1.1 riastrad drm_mode_config_cleanup(drm_dev); 569 1.1 riastrad fail: 570 1.1 riastrad kfree(drm_info); 571 1.1 riastrad return ret; 572 1.1 riastrad } 573 1.1 riastrad 574 1.1 riastrad static void xen_drm_drv_fini(struct xen_drm_front_info *front_info) 575 1.1 riastrad { 576 1.1 riastrad struct xen_drm_front_drm_info *drm_info = front_info->drm_info; 577 1.1 riastrad struct drm_device *dev; 578 1.1 riastrad 579 1.1 riastrad if (!drm_info) 580 1.1 riastrad return; 581 1.1 riastrad 582 1.1 riastrad dev = drm_info->drm_dev; 583 1.1 riastrad if (!dev) 584 1.1 riastrad return; 585 1.1 riastrad 586 1.1 riastrad /* Nothing to do if device is already unplugged */ 587 1.1 riastrad if (drm_dev_is_unplugged(dev)) 588 1.1 riastrad return; 589 1.1 riastrad 590 1.1 riastrad drm_kms_helper_poll_fini(dev); 591 1.1 riastrad drm_dev_unplug(dev); 592 1.1 riastrad drm_dev_put(dev); 593 1.1 riastrad 594 1.1 riastrad front_info->drm_info = NULL; 595 1.1 riastrad 596 1.1 riastrad xen_drm_front_evtchnl_free_all(front_info); 597 1.1 riastrad dbuf_free_all(&front_info->dbuf_list); 598 1.1 riastrad 599 1.1 riastrad /* 600 1.1 riastrad * If we are not using backend allocated buffers, then tell the 601 1.1 riastrad * backend we are ready to (re)initialize. Otherwise, wait for 602 1.1 riastrad * drm_driver.release. 603 1.1 riastrad */ 604 1.1 riastrad if (!front_info->cfg.be_alloc) 605 1.1 riastrad xenbus_switch_state(front_info->xb_dev, 606 1.1 riastrad XenbusStateInitialising); 607 1.1 riastrad } 608 1.1 riastrad 609 1.1 riastrad static int displback_initwait(struct xen_drm_front_info *front_info) 610 1.1 riastrad { 611 1.1 riastrad struct xen_drm_front_cfg *cfg = &front_info->cfg; 612 1.1 riastrad int ret; 613 1.1 riastrad 614 1.1 riastrad cfg->front_info = front_info; 615 1.1 riastrad ret = xen_drm_front_cfg_card(front_info, cfg); 616 1.1 riastrad if (ret < 0) 617 1.1 riastrad return ret; 618 1.1 riastrad 619 1.1 riastrad DRM_INFO("Have %d connector(s)\n", cfg->num_connectors); 620 1.1 riastrad /* Create event channels for all connectors and publish */ 621 1.1 riastrad ret = xen_drm_front_evtchnl_create_all(front_info); 622 1.1 riastrad if (ret < 0) 623 1.1 riastrad return ret; 624 1.1 riastrad 625 1.1 riastrad return xen_drm_front_evtchnl_publish_all(front_info); 626 1.1 riastrad } 627 1.1 riastrad 628 1.1 riastrad static int displback_connect(struct xen_drm_front_info *front_info) 629 1.1 riastrad { 630 1.1 riastrad xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED); 631 1.1 riastrad return xen_drm_drv_init(front_info); 632 1.1 riastrad } 633 1.1 riastrad 634 1.1 riastrad static void displback_disconnect(struct xen_drm_front_info *front_info) 635 1.1 riastrad { 636 1.1 riastrad if (!front_info->drm_info) 637 1.1 riastrad return; 638 1.1 riastrad 639 1.1 riastrad /* Tell the backend to wait until we release the DRM driver. */ 640 1.1 riastrad xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring); 641 1.1 riastrad 642 1.1 riastrad xen_drm_drv_fini(front_info); 643 1.1 riastrad } 644 1.1 riastrad 645 1.1 riastrad static void displback_changed(struct xenbus_device *xb_dev, 646 1.1 riastrad enum xenbus_state backend_state) 647 1.1 riastrad { 648 1.1 riastrad struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev); 649 1.1 riastrad int ret; 650 1.1 riastrad 651 1.1 riastrad DRM_DEBUG("Backend state is %s, front is %s\n", 652 1.1 riastrad xenbus_strstate(backend_state), 653 1.1 riastrad xenbus_strstate(xb_dev->state)); 654 1.1 riastrad 655 1.1 riastrad switch (backend_state) { 656 1.1 riastrad case XenbusStateReconfiguring: 657 1.1 riastrad /* fall through */ 658 1.1 riastrad case XenbusStateReconfigured: 659 1.1 riastrad /* fall through */ 660 1.1 riastrad case XenbusStateInitialised: 661 1.1 riastrad break; 662 1.1 riastrad 663 1.1 riastrad case XenbusStateInitialising: 664 1.1 riastrad if (xb_dev->state == XenbusStateReconfiguring) 665 1.1 riastrad break; 666 1.1 riastrad 667 1.1 riastrad /* recovering after backend unexpected closure */ 668 1.1 riastrad displback_disconnect(front_info); 669 1.1 riastrad break; 670 1.1 riastrad 671 1.1 riastrad case XenbusStateInitWait: 672 1.1 riastrad if (xb_dev->state == XenbusStateReconfiguring) 673 1.1 riastrad break; 674 1.1 riastrad 675 1.1 riastrad /* recovering after backend unexpected closure */ 676 1.1 riastrad displback_disconnect(front_info); 677 1.1 riastrad if (xb_dev->state != XenbusStateInitialising) 678 1.1 riastrad break; 679 1.1 riastrad 680 1.1 riastrad ret = displback_initwait(front_info); 681 1.1 riastrad if (ret < 0) 682 1.1 riastrad xenbus_dev_fatal(xb_dev, ret, "initializing frontend"); 683 1.1 riastrad else 684 1.1 riastrad xenbus_switch_state(xb_dev, XenbusStateInitialised); 685 1.1 riastrad break; 686 1.1 riastrad 687 1.1 riastrad case XenbusStateConnected: 688 1.1 riastrad if (xb_dev->state != XenbusStateInitialised) 689 1.1 riastrad break; 690 1.1 riastrad 691 1.1 riastrad ret = displback_connect(front_info); 692 1.1 riastrad if (ret < 0) { 693 1.1 riastrad displback_disconnect(front_info); 694 1.1 riastrad xenbus_dev_fatal(xb_dev, ret, "connecting backend"); 695 1.1 riastrad } else { 696 1.1 riastrad xenbus_switch_state(xb_dev, XenbusStateConnected); 697 1.1 riastrad } 698 1.1 riastrad break; 699 1.1 riastrad 700 1.1 riastrad case XenbusStateClosing: 701 1.1 riastrad /* 702 1.1 riastrad * in this state backend starts freeing resources, 703 1.1 riastrad * so let it go into closed state, so we can also 704 1.1 riastrad * remove ours 705 1.1 riastrad */ 706 1.1 riastrad break; 707 1.1 riastrad 708 1.1 riastrad case XenbusStateUnknown: 709 1.1 riastrad /* fall through */ 710 1.1 riastrad case XenbusStateClosed: 711 1.1 riastrad if (xb_dev->state == XenbusStateClosed) 712 1.1 riastrad break; 713 1.1 riastrad 714 1.1 riastrad displback_disconnect(front_info); 715 1.1 riastrad break; 716 1.1 riastrad } 717 1.1 riastrad } 718 1.1 riastrad 719 1.1 riastrad static int xen_drv_probe(struct xenbus_device *xb_dev, 720 1.1 riastrad const struct xenbus_device_id *id) 721 1.1 riastrad { 722 1.1 riastrad struct xen_drm_front_info *front_info; 723 1.1 riastrad struct device *dev = &xb_dev->dev; 724 1.1 riastrad int ret; 725 1.1 riastrad 726 1.1 riastrad ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); 727 1.1 riastrad if (ret < 0) { 728 1.1 riastrad DRM_ERROR("Cannot setup DMA mask, ret %d", ret); 729 1.1 riastrad return ret; 730 1.1 riastrad } 731 1.1 riastrad 732 1.1 riastrad front_info = devm_kzalloc(&xb_dev->dev, 733 1.1 riastrad sizeof(*front_info), GFP_KERNEL); 734 1.1 riastrad if (!front_info) 735 1.1 riastrad return -ENOMEM; 736 1.1 riastrad 737 1.1 riastrad front_info->xb_dev = xb_dev; 738 1.1 riastrad spin_lock_init(&front_info->io_lock); 739 1.1 riastrad INIT_LIST_HEAD(&front_info->dbuf_list); 740 1.1 riastrad dev_set_drvdata(&xb_dev->dev, front_info); 741 1.1 riastrad 742 1.1 riastrad return xenbus_switch_state(xb_dev, XenbusStateInitialising); 743 1.1 riastrad } 744 1.1 riastrad 745 1.1 riastrad static int xen_drv_remove(struct xenbus_device *dev) 746 1.1 riastrad { 747 1.1 riastrad struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev); 748 1.1 riastrad int to = 100; 749 1.1 riastrad 750 1.1 riastrad xenbus_switch_state(dev, XenbusStateClosing); 751 1.1 riastrad 752 1.1 riastrad /* 753 1.1 riastrad * On driver removal it is disconnected from XenBus, 754 1.1 riastrad * so no backend state change events come via .otherend_changed 755 1.1 riastrad * callback. This prevents us from exiting gracefully, e.g. 756 1.1 riastrad * signaling the backend to free event channels, waiting for its 757 1.1 riastrad * state to change to XenbusStateClosed and cleaning at our end. 758 1.1 riastrad * Normally when front driver removed backend will finally go into 759 1.1 riastrad * XenbusStateInitWait state. 760 1.1 riastrad * 761 1.1 riastrad * Workaround: read backend's state manually and wait with time-out. 762 1.1 riastrad */ 763 1.1 riastrad while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state", 764 1.1 riastrad XenbusStateUnknown) != XenbusStateInitWait) && 765 1.1 riastrad --to) 766 1.1 riastrad msleep(10); 767 1.1 riastrad 768 1.1 riastrad if (!to) { 769 1.1 riastrad unsigned int state; 770 1.1 riastrad 771 1.1 riastrad state = xenbus_read_unsigned(front_info->xb_dev->otherend, 772 1.1 riastrad "state", XenbusStateUnknown); 773 1.1 riastrad DRM_ERROR("Backend state is %s while removing driver\n", 774 1.1 riastrad xenbus_strstate(state)); 775 1.1 riastrad } 776 1.1 riastrad 777 1.1 riastrad xen_drm_drv_fini(front_info); 778 1.1 riastrad xenbus_frontend_closed(dev); 779 1.1 riastrad return 0; 780 1.1 riastrad } 781 1.1 riastrad 782 1.1 riastrad static const struct xenbus_device_id xen_driver_ids[] = { 783 1.1 riastrad { XENDISPL_DRIVER_NAME }, 784 1.1 riastrad { "" } 785 1.1 riastrad }; 786 1.1 riastrad 787 1.1 riastrad static struct xenbus_driver xen_driver = { 788 1.1 riastrad .ids = xen_driver_ids, 789 1.1 riastrad .probe = xen_drv_probe, 790 1.1 riastrad .remove = xen_drv_remove, 791 1.1 riastrad .otherend_changed = displback_changed, 792 1.1 riastrad }; 793 1.1 riastrad 794 1.1 riastrad static int __init xen_drv_init(void) 795 1.1 riastrad { 796 1.1 riastrad /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */ 797 1.1 riastrad if (XEN_PAGE_SIZE != PAGE_SIZE) { 798 1.1 riastrad DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n", 799 1.1 riastrad XEN_PAGE_SIZE, PAGE_SIZE); 800 1.1 riastrad return -ENODEV; 801 1.1 riastrad } 802 1.1 riastrad 803 1.1 riastrad if (!xen_domain()) 804 1.1 riastrad return -ENODEV; 805 1.1 riastrad 806 1.1 riastrad if (!xen_has_pv_devices()) 807 1.1 riastrad return -ENODEV; 808 1.1 riastrad 809 1.1 riastrad DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n"); 810 1.1 riastrad return xenbus_register_frontend(&xen_driver); 811 1.1 riastrad } 812 1.1 riastrad 813 1.1 riastrad static void __exit xen_drv_fini(void) 814 1.1 riastrad { 815 1.1 riastrad DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n"); 816 1.1 riastrad xenbus_unregister_driver(&xen_driver); 817 1.1 riastrad } 818 1.1 riastrad 819 1.1 riastrad module_init(xen_drv_init); 820 1.1 riastrad module_exit(xen_drv_fini); 821 1.1 riastrad 822 1.1 riastrad MODULE_DESCRIPTION("Xen para-virtualized display device frontend"); 823 1.1 riastrad MODULE_LICENSE("GPL"); 824 1.1 riastrad MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME); 825