1 /* $NetBSD: xen_drm_front_evtchnl.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $ */ 2 3 // SPDX-License-Identifier: GPL-2.0 OR MIT 4 5 /* 6 * Xen para-virtual DRM device 7 * 8 * Copyright (C) 2016-2018 EPAM Systems Inc. 9 * 10 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko (at) epam.com> 11 */ 12 13 #include <sys/cdefs.h> 14 __KERNEL_RCSID(0, "$NetBSD: xen_drm_front_evtchnl.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $"); 15 16 #include <linux/errno.h> 17 #include <linux/irq.h> 18 19 #include <drm/drm_print.h> 20 21 #include <xen/xenbus.h> 22 #include <xen/events.h> 23 #include <xen/grant_table.h> 24 25 #include "xen_drm_front.h" 26 #include "xen_drm_front_evtchnl.h" 27 28 static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id) 29 { 30 struct xen_drm_front_evtchnl *evtchnl = dev_id; 31 struct xen_drm_front_info *front_info = evtchnl->front_info; 32 struct xendispl_resp *resp; 33 RING_IDX i, rp; 34 unsigned long flags; 35 36 if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED)) 37 return IRQ_HANDLED; 38 39 spin_lock_irqsave(&front_info->io_lock, flags); 40 41 again: 42 rp = evtchnl->u.req.ring.sring->rsp_prod; 43 /* ensure we see queued responses up to rp */ 44 virt_rmb(); 45 46 for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) { 47 resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i); 48 if (unlikely(resp->id != evtchnl->evt_id)) 49 continue; 50 51 switch (resp->operation) { 52 case XENDISPL_OP_PG_FLIP: 53 case XENDISPL_OP_FB_ATTACH: 54 case XENDISPL_OP_FB_DETACH: 55 case XENDISPL_OP_DBUF_CREATE: 56 case XENDISPL_OP_DBUF_DESTROY: 57 case XENDISPL_OP_SET_CONFIG: 58 evtchnl->u.req.resp_status = resp->status; 59 complete(&evtchnl->u.req.completion); 60 break; 61 62 default: 63 DRM_ERROR("Operation %d is not supported\n", 64 resp->operation); 65 break; 66 } 67 } 68 69 evtchnl->u.req.ring.rsp_cons = i; 70 71 if (i != evtchnl->u.req.ring.req_prod_pvt) { 72 int more_to_do; 73 74 RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring, 75 more_to_do); 76 if (more_to_do) 77 goto again; 78 } else { 79 evtchnl->u.req.ring.sring->rsp_event = i + 1; 80 } 81 82 spin_unlock_irqrestore(&front_info->io_lock, flags); 83 return IRQ_HANDLED; 84 } 85 86 static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id) 87 { 88 struct xen_drm_front_evtchnl *evtchnl = dev_id; 89 struct xen_drm_front_info *front_info = evtchnl->front_info; 90 struct xendispl_event_page *page = evtchnl->u.evt.page; 91 u32 cons, prod; 92 unsigned long flags; 93 94 if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED)) 95 return IRQ_HANDLED; 96 97 spin_lock_irqsave(&front_info->io_lock, flags); 98 99 prod = page->in_prod; 100 /* ensure we see ring contents up to prod */ 101 virt_rmb(); 102 if (prod == page->in_cons) 103 goto out; 104 105 for (cons = page->in_cons; cons != prod; cons++) { 106 struct xendispl_evt *event; 107 108 event = &XENDISPL_IN_RING_REF(page, cons); 109 if (unlikely(event->id != evtchnl->evt_id++)) 110 continue; 111 112 switch (event->type) { 113 case XENDISPL_EVT_PG_FLIP: 114 xen_drm_front_on_frame_done(front_info, evtchnl->index, 115 event->op.pg_flip.fb_cookie); 116 break; 117 } 118 } 119 page->in_cons = cons; 120 /* ensure ring contents */ 121 virt_wmb(); 122 123 out: 124 spin_unlock_irqrestore(&front_info->io_lock, flags); 125 return IRQ_HANDLED; 126 } 127 128 static void evtchnl_free(struct xen_drm_front_info *front_info, 129 struct xen_drm_front_evtchnl *evtchnl) 130 { 131 unsigned long page = 0; 132 133 if (evtchnl->type == EVTCHNL_TYPE_REQ) 134 page = (unsigned long)evtchnl->u.req.ring.sring; 135 else if (evtchnl->type == EVTCHNL_TYPE_EVT) 136 page = (unsigned long)evtchnl->u.evt.page; 137 if (!page) 138 return; 139 140 evtchnl->state = EVTCHNL_STATE_DISCONNECTED; 141 142 if (evtchnl->type == EVTCHNL_TYPE_REQ) { 143 /* release all who still waits for response if any */ 144 evtchnl->u.req.resp_status = -EIO; 145 complete_all(&evtchnl->u.req.completion); 146 } 147 148 if (evtchnl->irq) 149 unbind_from_irqhandler(evtchnl->irq, evtchnl); 150 151 if (evtchnl->port) 152 xenbus_free_evtchn(front_info->xb_dev, evtchnl->port); 153 154 /* end access and free the page */ 155 if (evtchnl->gref != GRANT_INVALID_REF) 156 gnttab_end_foreign_access(evtchnl->gref, 0, page); 157 158 memset(evtchnl, 0, sizeof(*evtchnl)); 159 } 160 161 static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index, 162 struct xen_drm_front_evtchnl *evtchnl, 163 enum xen_drm_front_evtchnl_type type) 164 { 165 struct xenbus_device *xb_dev = front_info->xb_dev; 166 unsigned long page; 167 grant_ref_t gref; 168 irq_handler_t handler; 169 int ret; 170 171 memset(evtchnl, 0, sizeof(*evtchnl)); 172 evtchnl->type = type; 173 evtchnl->index = index; 174 evtchnl->front_info = front_info; 175 evtchnl->state = EVTCHNL_STATE_DISCONNECTED; 176 evtchnl->gref = GRANT_INVALID_REF; 177 178 page = get_zeroed_page(GFP_NOIO | __GFP_HIGH); 179 if (!page) { 180 ret = -ENOMEM; 181 goto fail; 182 } 183 184 if (type == EVTCHNL_TYPE_REQ) { 185 struct xen_displif_sring *sring; 186 187 init_completion(&evtchnl->u.req.completion); 188 mutex_init(&evtchnl->u.req.req_io_lock); 189 sring = (struct xen_displif_sring *)page; 190 SHARED_RING_INIT(sring); 191 FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE); 192 193 ret = xenbus_grant_ring(xb_dev, sring, 1, &gref); 194 if (ret < 0) { 195 evtchnl->u.req.ring.sring = NULL; 196 free_page(page); 197 goto fail; 198 } 199 200 handler = evtchnl_interrupt_ctrl; 201 } else { 202 ret = gnttab_grant_foreign_access(xb_dev->otherend_id, 203 virt_to_gfn((void *)page), 0); 204 if (ret < 0) { 205 free_page(page); 206 goto fail; 207 } 208 209 evtchnl->u.evt.page = (struct xendispl_event_page *)page; 210 gref = ret; 211 handler = evtchnl_interrupt_evt; 212 } 213 evtchnl->gref = gref; 214 215 ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port); 216 if (ret < 0) 217 goto fail; 218 219 ret = bind_evtchn_to_irqhandler(evtchnl->port, 220 handler, 0, xb_dev->devicetype, 221 evtchnl); 222 if (ret < 0) 223 goto fail; 224 225 evtchnl->irq = ret; 226 return 0; 227 228 fail: 229 DRM_ERROR("Failed to allocate ring: %d\n", ret); 230 return ret; 231 } 232 233 int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info) 234 { 235 struct xen_drm_front_cfg *cfg; 236 int ret, conn; 237 238 cfg = &front_info->cfg; 239 240 front_info->evt_pairs = 241 kcalloc(cfg->num_connectors, 242 sizeof(struct xen_drm_front_evtchnl_pair), 243 GFP_KERNEL); 244 if (!front_info->evt_pairs) { 245 ret = -ENOMEM; 246 goto fail; 247 } 248 249 for (conn = 0; conn < cfg->num_connectors; conn++) { 250 ret = evtchnl_alloc(front_info, conn, 251 &front_info->evt_pairs[conn].req, 252 EVTCHNL_TYPE_REQ); 253 if (ret < 0) { 254 DRM_ERROR("Error allocating control channel\n"); 255 goto fail; 256 } 257 258 ret = evtchnl_alloc(front_info, conn, 259 &front_info->evt_pairs[conn].evt, 260 EVTCHNL_TYPE_EVT); 261 if (ret < 0) { 262 DRM_ERROR("Error allocating in-event channel\n"); 263 goto fail; 264 } 265 } 266 front_info->num_evt_pairs = cfg->num_connectors; 267 return 0; 268 269 fail: 270 xen_drm_front_evtchnl_free_all(front_info); 271 return ret; 272 } 273 274 static int evtchnl_publish(struct xenbus_transaction xbt, 275 struct xen_drm_front_evtchnl *evtchnl, 276 const char *path, const char *node_ring, 277 const char *node_chnl) 278 { 279 struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev; 280 int ret; 281 282 /* write control channel ring reference */ 283 ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref); 284 if (ret < 0) { 285 xenbus_dev_error(xb_dev, ret, "writing ring-ref"); 286 return ret; 287 } 288 289 /* write event channel ring reference */ 290 ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port); 291 if (ret < 0) { 292 xenbus_dev_error(xb_dev, ret, "writing event channel"); 293 return ret; 294 } 295 296 return 0; 297 } 298 299 int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info) 300 { 301 struct xenbus_transaction xbt; 302 struct xen_drm_front_cfg *plat_data; 303 int ret, conn; 304 305 plat_data = &front_info->cfg; 306 307 again: 308 ret = xenbus_transaction_start(&xbt); 309 if (ret < 0) { 310 xenbus_dev_fatal(front_info->xb_dev, ret, 311 "starting transaction"); 312 return ret; 313 } 314 315 for (conn = 0; conn < plat_data->num_connectors; conn++) { 316 ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req, 317 plat_data->connectors[conn].xenstore_path, 318 XENDISPL_FIELD_REQ_RING_REF, 319 XENDISPL_FIELD_REQ_CHANNEL); 320 if (ret < 0) 321 goto fail; 322 323 ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt, 324 plat_data->connectors[conn].xenstore_path, 325 XENDISPL_FIELD_EVT_RING_REF, 326 XENDISPL_FIELD_EVT_CHANNEL); 327 if (ret < 0) 328 goto fail; 329 } 330 331 ret = xenbus_transaction_end(xbt, 0); 332 if (ret < 0) { 333 if (ret == -EAGAIN) 334 goto again; 335 336 xenbus_dev_fatal(front_info->xb_dev, ret, 337 "completing transaction"); 338 goto fail_to_end; 339 } 340 341 return 0; 342 343 fail: 344 xenbus_transaction_end(xbt, 1); 345 346 fail_to_end: 347 xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store"); 348 return ret; 349 } 350 351 void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl) 352 { 353 int notify; 354 355 evtchnl->u.req.ring.req_prod_pvt++; 356 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify); 357 if (notify) 358 notify_remote_via_irq(evtchnl->irq); 359 } 360 361 void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info, 362 enum xen_drm_front_evtchnl_state state) 363 { 364 unsigned long flags; 365 int i; 366 367 if (!front_info->evt_pairs) 368 return; 369 370 spin_lock_irqsave(&front_info->io_lock, flags); 371 for (i = 0; i < front_info->num_evt_pairs; i++) { 372 front_info->evt_pairs[i].req.state = state; 373 front_info->evt_pairs[i].evt.state = state; 374 } 375 spin_unlock_irqrestore(&front_info->io_lock, flags); 376 } 377 378 void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info) 379 { 380 int i; 381 382 if (!front_info->evt_pairs) 383 return; 384 385 for (i = 0; i < front_info->num_evt_pairs; i++) { 386 evtchnl_free(front_info, &front_info->evt_pairs[i].req); 387 evtchnl_free(front_info, &front_info->evt_pairs[i].evt); 388 } 389 390 kfree(front_info->evt_pairs); 391 front_info->evt_pairs = NULL; 392 } 393