virtgpu_vq.c revision 1.2 1 /* $NetBSD: virtgpu_vq.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $ */
2
3 /*
4 * Copyright (C) 2015 Red Hat, Inc.
5 * All Rights Reserved.
6 *
7 * Authors:
8 * Dave Airlie <airlied (at) redhat.com>
9 * Gerd Hoffmann <kraxel (at) redhat.com>
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: virtgpu_vq.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $");
33
34 #include <drm/drmP.h>
35 #include "virtgpu_drv.h"
36 #include <linux/virtio.h>
37 #include <linux/virtio_config.h>
38 #include <linux/virtio_ring.h>
39
40 #define MAX_INLINE_CMD_SIZE 96
41 #define MAX_INLINE_RESP_SIZE 24
42 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
43 + MAX_INLINE_CMD_SIZE \
44 + MAX_INLINE_RESP_SIZE)
45
46 void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
47 uint32_t *resid)
48 {
49 int handle;
50
51 idr_preload(GFP_KERNEL);
52 spin_lock(&vgdev->resource_idr_lock);
53 handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
54 spin_unlock(&vgdev->resource_idr_lock);
55 idr_preload_end();
56 *resid = handle;
57 }
58
59 void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
60 {
61 spin_lock(&vgdev->resource_idr_lock);
62 idr_remove(&vgdev->resource_idr, id);
63 spin_unlock(&vgdev->resource_idr_lock);
64 }
65
66 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
67 {
68 struct drm_device *dev = vq->vdev->priv;
69 struct virtio_gpu_device *vgdev = dev->dev_private;
70 schedule_work(&vgdev->ctrlq.dequeue_work);
71 }
72
73 void virtio_gpu_cursor_ack(struct virtqueue *vq)
74 {
75 struct drm_device *dev = vq->vdev->priv;
76 struct virtio_gpu_device *vgdev = dev->dev_private;
77 schedule_work(&vgdev->cursorq.dequeue_work);
78 }
79
80 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
81 {
82 struct virtio_gpu_vbuffer *vbuf;
83 int i, size, count = 0;
84 void *ptr;
85
86 INIT_LIST_HEAD(&vgdev->free_vbufs);
87 spin_lock_init(&vgdev->free_vbufs_lock);
88 count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
89 count += virtqueue_get_vring_size(vgdev->cursorq.vq);
90 size = count * VBUFFER_SIZE;
91 DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
92 count, VBUFFER_SIZE, size / 1024);
93
94 vgdev->vbufs = kzalloc(size, GFP_KERNEL);
95 if (!vgdev->vbufs)
96 return -ENOMEM;
97
98 for (i = 0, ptr = vgdev->vbufs;
99 i < count;
100 i++, ptr += VBUFFER_SIZE) {
101 vbuf = ptr;
102 list_add(&vbuf->list, &vgdev->free_vbufs);
103 }
104 return 0;
105 }
106
107 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
108 {
109 struct virtio_gpu_vbuffer *vbuf;
110 int i, count = 0;
111
112 count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
113 count += virtqueue_get_vring_size(vgdev->cursorq.vq);
114
115 spin_lock(&vgdev->free_vbufs_lock);
116 for (i = 0; i < count; i++) {
117 if (WARN_ON(list_empty(&vgdev->free_vbufs)))
118 return;
119 vbuf = list_first_entry(&vgdev->free_vbufs,
120 struct virtio_gpu_vbuffer, list);
121 list_del(&vbuf->list);
122 }
123 spin_unlock(&vgdev->free_vbufs_lock);
124 kfree(vgdev->vbufs);
125 }
126
127 static struct virtio_gpu_vbuffer*
128 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
129 int size, int resp_size, void *resp_buf,
130 virtio_gpu_resp_cb resp_cb)
131 {
132 struct virtio_gpu_vbuffer *vbuf;
133
134 spin_lock(&vgdev->free_vbufs_lock);
135 BUG_ON(list_empty(&vgdev->free_vbufs));
136 vbuf = list_first_entry(&vgdev->free_vbufs,
137 struct virtio_gpu_vbuffer, list);
138 list_del(&vbuf->list);
139 spin_unlock(&vgdev->free_vbufs_lock);
140 memset(vbuf, 0, VBUFFER_SIZE);
141
142 BUG_ON(size > MAX_INLINE_CMD_SIZE);
143 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
144 vbuf->size = size;
145
146 vbuf->resp_cb = resp_cb;
147 vbuf->resp_size = resp_size;
148 if (resp_size <= MAX_INLINE_RESP_SIZE)
149 vbuf->resp_buf = (void *)vbuf->buf + size;
150 else
151 vbuf->resp_buf = resp_buf;
152 BUG_ON(!vbuf->resp_buf);
153 return vbuf;
154 }
155
156 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
157 struct virtio_gpu_vbuffer **vbuffer_p,
158 int size)
159 {
160 struct virtio_gpu_vbuffer *vbuf;
161
162 vbuf = virtio_gpu_get_vbuf(vgdev, size,
163 sizeof(struct virtio_gpu_ctrl_hdr),
164 NULL, NULL);
165 if (IS_ERR(vbuf)) {
166 *vbuffer_p = NULL;
167 return ERR_CAST(vbuf);
168 }
169 *vbuffer_p = vbuf;
170 return vbuf->buf;
171 }
172
173 static struct virtio_gpu_update_cursor*
174 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
175 struct virtio_gpu_vbuffer **vbuffer_p)
176 {
177 struct virtio_gpu_vbuffer *vbuf;
178
179 vbuf = virtio_gpu_get_vbuf
180 (vgdev, sizeof(struct virtio_gpu_update_cursor),
181 0, NULL, NULL);
182 if (IS_ERR(vbuf)) {
183 *vbuffer_p = NULL;
184 return ERR_CAST(vbuf);
185 }
186 *vbuffer_p = vbuf;
187 return (struct virtio_gpu_update_cursor *)vbuf->buf;
188 }
189
190 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
191 virtio_gpu_resp_cb cb,
192 struct virtio_gpu_vbuffer **vbuffer_p,
193 int cmd_size, int resp_size,
194 void *resp_buf)
195 {
196 struct virtio_gpu_vbuffer *vbuf;
197
198 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
199 resp_size, resp_buf, cb);
200 if (IS_ERR(vbuf)) {
201 *vbuffer_p = NULL;
202 return ERR_CAST(vbuf);
203 }
204 *vbuffer_p = vbuf;
205 return (struct virtio_gpu_command *)vbuf->buf;
206 }
207
208 static void free_vbuf(struct virtio_gpu_device *vgdev,
209 struct virtio_gpu_vbuffer *vbuf)
210 {
211 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
212 kfree(vbuf->resp_buf);
213 kfree(vbuf->data_buf);
214 spin_lock(&vgdev->free_vbufs_lock);
215 list_add(&vbuf->list, &vgdev->free_vbufs);
216 spin_unlock(&vgdev->free_vbufs_lock);
217 }
218
219 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
220 {
221 struct virtio_gpu_vbuffer *vbuf;
222 unsigned int len;
223 int freed = 0;
224
225 while ((vbuf = virtqueue_get_buf(vq, &len))) {
226 list_add_tail(&vbuf->list, reclaim_list);
227 freed++;
228 }
229 if (freed == 0)
230 DRM_DEBUG("Huh? zero vbufs reclaimed");
231 }
232
233 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
234 {
235 struct virtio_gpu_device *vgdev =
236 container_of(work, struct virtio_gpu_device,
237 ctrlq.dequeue_work);
238 struct list_head reclaim_list;
239 struct virtio_gpu_vbuffer *entry, *tmp;
240 struct virtio_gpu_ctrl_hdr *resp;
241 u64 fence_id = 0;
242
243 INIT_LIST_HEAD(&reclaim_list);
244 spin_lock(&vgdev->ctrlq.qlock);
245 do {
246 virtqueue_disable_cb(vgdev->ctrlq.vq);
247 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
248
249 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
250 spin_unlock(&vgdev->ctrlq.qlock);
251
252 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
253 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
254 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
255 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
256 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
257 u64 f = le64_to_cpu(resp->fence_id);
258
259 if (fence_id > f) {
260 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
261 __func__, fence_id, f);
262 } else {
263 fence_id = f;
264 }
265 }
266 if (entry->resp_cb)
267 entry->resp_cb(vgdev, entry);
268
269 list_del(&entry->list);
270 free_vbuf(vgdev, entry);
271 }
272 wake_up(&vgdev->ctrlq.ack_queue);
273
274 if (fence_id)
275 virtio_gpu_fence_event_process(vgdev, fence_id);
276 }
277
278 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
279 {
280 struct virtio_gpu_device *vgdev =
281 container_of(work, struct virtio_gpu_device,
282 cursorq.dequeue_work);
283 struct list_head reclaim_list;
284 struct virtio_gpu_vbuffer *entry, *tmp;
285
286 INIT_LIST_HEAD(&reclaim_list);
287 spin_lock(&vgdev->cursorq.qlock);
288 do {
289 virtqueue_disable_cb(vgdev->cursorq.vq);
290 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
291 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
292 spin_unlock(&vgdev->cursorq.qlock);
293
294 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
295 list_del(&entry->list);
296 free_vbuf(vgdev, entry);
297 }
298 wake_up(&vgdev->cursorq.ack_queue);
299 }
300
301 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
302 struct virtio_gpu_vbuffer *vbuf)
303 {
304 struct virtqueue *vq = vgdev->ctrlq.vq;
305 struct scatterlist *sgs[3], vcmd, vout, vresp;
306 int outcnt = 0, incnt = 0;
307 int ret;
308
309 if (!vgdev->vqs_ready)
310 return -ENODEV;
311
312 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
313 sgs[outcnt+incnt] = &vcmd;
314 outcnt++;
315
316 if (vbuf->data_size) {
317 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
318 sgs[outcnt + incnt] = &vout;
319 outcnt++;
320 }
321
322 if (vbuf->resp_size) {
323 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
324 sgs[outcnt + incnt] = &vresp;
325 incnt++;
326 }
327
328 retry:
329 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
330 if (ret == -ENOSPC) {
331 spin_unlock(&vgdev->ctrlq.qlock);
332 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
333 spin_lock(&vgdev->ctrlq.qlock);
334 goto retry;
335 } else {
336 virtqueue_kick(vq);
337 }
338
339 if (!ret)
340 ret = vq->num_free;
341 return ret;
342 }
343
344 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
345 struct virtio_gpu_vbuffer *vbuf)
346 {
347 int rc;
348
349 spin_lock(&vgdev->ctrlq.qlock);
350 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
351 spin_unlock(&vgdev->ctrlq.qlock);
352 return rc;
353 }
354
355 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
356 struct virtio_gpu_vbuffer *vbuf,
357 struct virtio_gpu_ctrl_hdr *hdr,
358 struct virtio_gpu_fence **fence)
359 {
360 struct virtqueue *vq = vgdev->ctrlq.vq;
361 int rc;
362
363 again:
364 spin_lock(&vgdev->ctrlq.qlock);
365
366 /*
367 * Make sure we have enouth space in the virtqueue. If not
368 * wait here until we have.
369 *
370 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
371 * to wait for free space, which can result in fence ids being
372 * submitted out-of-order.
373 */
374 if (vq->num_free < 3) {
375 spin_unlock(&vgdev->ctrlq.qlock);
376 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
377 goto again;
378 }
379
380 if (fence)
381 virtio_gpu_fence_emit(vgdev, hdr, fence);
382 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
383 spin_unlock(&vgdev->ctrlq.qlock);
384 return rc;
385 }
386
387 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
388 struct virtio_gpu_vbuffer *vbuf)
389 {
390 struct virtqueue *vq = vgdev->cursorq.vq;
391 struct scatterlist *sgs[1], ccmd;
392 int ret;
393 int outcnt;
394
395 if (!vgdev->vqs_ready)
396 return -ENODEV;
397
398 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
399 sgs[0] = &ccmd;
400 outcnt = 1;
401
402 spin_lock(&vgdev->cursorq.qlock);
403 retry:
404 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
405 if (ret == -ENOSPC) {
406 spin_unlock(&vgdev->cursorq.qlock);
407 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
408 spin_lock(&vgdev->cursorq.qlock);
409 goto retry;
410 } else {
411 virtqueue_kick(vq);
412 }
413
414 spin_unlock(&vgdev->cursorq.qlock);
415
416 if (!ret)
417 ret = vq->num_free;
418 return ret;
419 }
420
421 /* just create gem objects for userspace and long lived objects,
422 just use dma_alloced pages for the queue objects? */
423
424 /* create a basic resource */
425 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
426 uint32_t resource_id,
427 uint32_t format,
428 uint32_t width,
429 uint32_t height)
430 {
431 struct virtio_gpu_resource_create_2d *cmd_p;
432 struct virtio_gpu_vbuffer *vbuf;
433
434 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
435 memset(cmd_p, 0, sizeof(*cmd_p));
436
437 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
438 cmd_p->resource_id = cpu_to_le32(resource_id);
439 cmd_p->format = cpu_to_le32(format);
440 cmd_p->width = cpu_to_le32(width);
441 cmd_p->height = cpu_to_le32(height);
442
443 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
444 }
445
446 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
447 uint32_t resource_id)
448 {
449 struct virtio_gpu_resource_unref *cmd_p;
450 struct virtio_gpu_vbuffer *vbuf;
451
452 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
453 memset(cmd_p, 0, sizeof(*cmd_p));
454
455 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
456 cmd_p->resource_id = cpu_to_le32(resource_id);
457
458 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
459 }
460
461 void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
462 uint32_t resource_id)
463 {
464 struct virtio_gpu_resource_detach_backing *cmd_p;
465 struct virtio_gpu_vbuffer *vbuf;
466
467 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
468 memset(cmd_p, 0, sizeof(*cmd_p));
469
470 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
471 cmd_p->resource_id = cpu_to_le32(resource_id);
472
473 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
474 }
475
476 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
477 uint32_t scanout_id, uint32_t resource_id,
478 uint32_t width, uint32_t height,
479 uint32_t x, uint32_t y)
480 {
481 struct virtio_gpu_set_scanout *cmd_p;
482 struct virtio_gpu_vbuffer *vbuf;
483
484 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
485 memset(cmd_p, 0, sizeof(*cmd_p));
486
487 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
488 cmd_p->resource_id = cpu_to_le32(resource_id);
489 cmd_p->scanout_id = cpu_to_le32(scanout_id);
490 cmd_p->r.width = cpu_to_le32(width);
491 cmd_p->r.height = cpu_to_le32(height);
492 cmd_p->r.x = cpu_to_le32(x);
493 cmd_p->r.y = cpu_to_le32(y);
494
495 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
496 }
497
498 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
499 uint32_t resource_id,
500 uint32_t x, uint32_t y,
501 uint32_t width, uint32_t height)
502 {
503 struct virtio_gpu_resource_flush *cmd_p;
504 struct virtio_gpu_vbuffer *vbuf;
505
506 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
507 memset(cmd_p, 0, sizeof(*cmd_p));
508
509 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
510 cmd_p->resource_id = cpu_to_le32(resource_id);
511 cmd_p->r.width = cpu_to_le32(width);
512 cmd_p->r.height = cpu_to_le32(height);
513 cmd_p->r.x = cpu_to_le32(x);
514 cmd_p->r.y = cpu_to_le32(y);
515
516 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
517 }
518
519 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
520 uint32_t resource_id, uint64_t offset,
521 __le32 width, __le32 height,
522 __le32 x, __le32 y,
523 struct virtio_gpu_fence **fence)
524 {
525 struct virtio_gpu_transfer_to_host_2d *cmd_p;
526 struct virtio_gpu_vbuffer *vbuf;
527
528 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
529 memset(cmd_p, 0, sizeof(*cmd_p));
530
531 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
532 cmd_p->resource_id = cpu_to_le32(resource_id);
533 cmd_p->offset = cpu_to_le64(offset);
534 cmd_p->r.width = width;
535 cmd_p->r.height = height;
536 cmd_p->r.x = x;
537 cmd_p->r.y = y;
538
539 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
540 }
541
542 static void
543 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
544 uint32_t resource_id,
545 struct virtio_gpu_mem_entry *ents,
546 uint32_t nents,
547 struct virtio_gpu_fence **fence)
548 {
549 struct virtio_gpu_resource_attach_backing *cmd_p;
550 struct virtio_gpu_vbuffer *vbuf;
551
552 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
553 memset(cmd_p, 0, sizeof(*cmd_p));
554
555 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
556 cmd_p->resource_id = cpu_to_le32(resource_id);
557 cmd_p->nr_entries = cpu_to_le32(nents);
558
559 vbuf->data_buf = ents;
560 vbuf->data_size = sizeof(*ents) * nents;
561
562 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
563 }
564
565 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
566 struct virtio_gpu_vbuffer *vbuf)
567 {
568 struct virtio_gpu_resp_display_info *resp =
569 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
570 int i;
571
572 spin_lock(&vgdev->display_info_lock);
573 for (i = 0; i < vgdev->num_scanouts; i++) {
574 vgdev->outputs[i].info = resp->pmodes[i];
575 if (resp->pmodes[i].enabled) {
576 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
577 le32_to_cpu(resp->pmodes[i].r.width),
578 le32_to_cpu(resp->pmodes[i].r.height),
579 le32_to_cpu(resp->pmodes[i].r.x),
580 le32_to_cpu(resp->pmodes[i].r.y));
581 } else {
582 DRM_DEBUG("output %d: disabled", i);
583 }
584 }
585
586 vgdev->display_info_pending = false;
587 spin_unlock(&vgdev->display_info_lock);
588 wake_up(&vgdev->resp_wq);
589
590 if (!drm_helper_hpd_irq_event(vgdev->ddev))
591 drm_kms_helper_hotplug_event(vgdev->ddev);
592 }
593
594 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
595 struct virtio_gpu_vbuffer *vbuf)
596 {
597 struct virtio_gpu_get_capset_info *cmd =
598 (struct virtio_gpu_get_capset_info *)vbuf->buf;
599 struct virtio_gpu_resp_capset_info *resp =
600 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
601 int i = le32_to_cpu(cmd->capset_index);
602
603 spin_lock(&vgdev->display_info_lock);
604 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
605 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
606 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
607 spin_unlock(&vgdev->display_info_lock);
608 wake_up(&vgdev->resp_wq);
609 }
610
611 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
612 struct virtio_gpu_vbuffer *vbuf)
613 {
614 struct virtio_gpu_get_capset *cmd =
615 (struct virtio_gpu_get_capset *)vbuf->buf;
616 struct virtio_gpu_resp_capset *resp =
617 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
618 struct virtio_gpu_drv_cap_cache *cache_ent;
619
620 spin_lock(&vgdev->display_info_lock);
621 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
622 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
623 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
624 memcpy(cache_ent->caps_cache, resp->capset_data,
625 cache_ent->size);
626 atomic_set(&cache_ent->is_valid, 1);
627 break;
628 }
629 }
630 spin_unlock(&vgdev->display_info_lock);
631 wake_up(&vgdev->resp_wq);
632 }
633
634
635 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
636 {
637 struct virtio_gpu_ctrl_hdr *cmd_p;
638 struct virtio_gpu_vbuffer *vbuf;
639 void *resp_buf;
640
641 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
642 GFP_KERNEL);
643 if (!resp_buf)
644 return -ENOMEM;
645
646 cmd_p = virtio_gpu_alloc_cmd_resp
647 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
648 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
649 resp_buf);
650 memset(cmd_p, 0, sizeof(*cmd_p));
651
652 vgdev->display_info_pending = true;
653 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
654 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
655 return 0;
656 }
657
658 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
659 {
660 struct virtio_gpu_get_capset_info *cmd_p;
661 struct virtio_gpu_vbuffer *vbuf;
662 void *resp_buf;
663
664 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
665 GFP_KERNEL);
666 if (!resp_buf)
667 return -ENOMEM;
668
669 cmd_p = virtio_gpu_alloc_cmd_resp
670 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
671 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
672 resp_buf);
673 memset(cmd_p, 0, sizeof(*cmd_p));
674
675 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
676 cmd_p->capset_index = cpu_to_le32(idx);
677 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
678 return 0;
679 }
680
681 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
682 int idx, int version,
683 struct virtio_gpu_drv_cap_cache **cache_p)
684 {
685 struct virtio_gpu_get_capset *cmd_p;
686 struct virtio_gpu_vbuffer *vbuf;
687 int max_size = vgdev->capsets[idx].max_size;
688 struct virtio_gpu_drv_cap_cache *cache_ent;
689 void *resp_buf;
690
691 if (idx > vgdev->num_capsets)
692 return -EINVAL;
693
694 if (version > vgdev->capsets[idx].max_version)
695 return -EINVAL;
696
697 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
698 if (!cache_ent)
699 return -ENOMEM;
700
701 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
702 if (!cache_ent->caps_cache) {
703 kfree(cache_ent);
704 return -ENOMEM;
705 }
706
707 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
708 GFP_KERNEL);
709 if (!resp_buf) {
710 kfree(cache_ent->caps_cache);
711 kfree(cache_ent);
712 return -ENOMEM;
713 }
714
715 cache_ent->version = version;
716 cache_ent->id = vgdev->capsets[idx].id;
717 atomic_set(&cache_ent->is_valid, 0);
718 cache_ent->size = max_size;
719 spin_lock(&vgdev->display_info_lock);
720 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
721 spin_unlock(&vgdev->display_info_lock);
722
723 cmd_p = virtio_gpu_alloc_cmd_resp
724 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
725 sizeof(struct virtio_gpu_resp_capset) + max_size,
726 resp_buf);
727 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
728 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
729 cmd_p->capset_version = cpu_to_le32(version);
730 *cache_p = cache_ent;
731 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
732
733 return 0;
734 }
735
736 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
737 uint32_t nlen, const char *name)
738 {
739 struct virtio_gpu_ctx_create *cmd_p;
740 struct virtio_gpu_vbuffer *vbuf;
741
742 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
743 memset(cmd_p, 0, sizeof(*cmd_p));
744
745 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
746 cmd_p->hdr.ctx_id = cpu_to_le32(id);
747 cmd_p->nlen = cpu_to_le32(nlen);
748 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
749 cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
750 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
751 }
752
753 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
754 uint32_t id)
755 {
756 struct virtio_gpu_ctx_destroy *cmd_p;
757 struct virtio_gpu_vbuffer *vbuf;
758
759 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
760 memset(cmd_p, 0, sizeof(*cmd_p));
761
762 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
763 cmd_p->hdr.ctx_id = cpu_to_le32(id);
764 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
765 }
766
767 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
768 uint32_t ctx_id,
769 uint32_t resource_id)
770 {
771 struct virtio_gpu_ctx_resource *cmd_p;
772 struct virtio_gpu_vbuffer *vbuf;
773
774 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
775 memset(cmd_p, 0, sizeof(*cmd_p));
776
777 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
778 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
779 cmd_p->resource_id = cpu_to_le32(resource_id);
780 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
781
782 }
783
784 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
785 uint32_t ctx_id,
786 uint32_t resource_id)
787 {
788 struct virtio_gpu_ctx_resource *cmd_p;
789 struct virtio_gpu_vbuffer *vbuf;
790
791 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
792 memset(cmd_p, 0, sizeof(*cmd_p));
793
794 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
795 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
796 cmd_p->resource_id = cpu_to_le32(resource_id);
797 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
798 }
799
800 void
801 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
802 struct virtio_gpu_resource_create_3d *rc_3d,
803 struct virtio_gpu_fence **fence)
804 {
805 struct virtio_gpu_resource_create_3d *cmd_p;
806 struct virtio_gpu_vbuffer *vbuf;
807
808 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
809 memset(cmd_p, 0, sizeof(*cmd_p));
810
811 *cmd_p = *rc_3d;
812 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
813 cmd_p->hdr.flags = 0;
814
815 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
816 }
817
818 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
819 uint32_t resource_id, uint32_t ctx_id,
820 uint64_t offset, uint32_t level,
821 struct virtio_gpu_box *box,
822 struct virtio_gpu_fence **fence)
823 {
824 struct virtio_gpu_transfer_host_3d *cmd_p;
825 struct virtio_gpu_vbuffer *vbuf;
826
827 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
828 memset(cmd_p, 0, sizeof(*cmd_p));
829
830 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
831 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
832 cmd_p->resource_id = cpu_to_le32(resource_id);
833 cmd_p->box = *box;
834 cmd_p->offset = cpu_to_le64(offset);
835 cmd_p->level = cpu_to_le32(level);
836
837 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
838 }
839
840 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
841 uint32_t resource_id, uint32_t ctx_id,
842 uint64_t offset, uint32_t level,
843 struct virtio_gpu_box *box,
844 struct virtio_gpu_fence **fence)
845 {
846 struct virtio_gpu_transfer_host_3d *cmd_p;
847 struct virtio_gpu_vbuffer *vbuf;
848
849 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
850 memset(cmd_p, 0, sizeof(*cmd_p));
851
852 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
853 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
854 cmd_p->resource_id = cpu_to_le32(resource_id);
855 cmd_p->box = *box;
856 cmd_p->offset = cpu_to_le64(offset);
857 cmd_p->level = cpu_to_le32(level);
858
859 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
860 }
861
862 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
863 void *data, uint32_t data_size,
864 uint32_t ctx_id, struct virtio_gpu_fence **fence)
865 {
866 struct virtio_gpu_cmd_submit *cmd_p;
867 struct virtio_gpu_vbuffer *vbuf;
868
869 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
870 memset(cmd_p, 0, sizeof(*cmd_p));
871
872 vbuf->data_buf = data;
873 vbuf->data_size = data_size;
874
875 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
876 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
877 cmd_p->size = cpu_to_le32(data_size);
878
879 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
880 }
881
882 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
883 struct virtio_gpu_object *obj,
884 uint32_t resource_id,
885 struct virtio_gpu_fence **fence)
886 {
887 struct virtio_gpu_mem_entry *ents;
888 struct scatterlist *sg;
889 int si;
890
891 if (!obj->pages) {
892 int ret;
893 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
894 if (ret)
895 return ret;
896 }
897
898 /* gets freed when the ring has consumed it */
899 ents = kmalloc_array(obj->pages->nents,
900 sizeof(struct virtio_gpu_mem_entry),
901 GFP_KERNEL);
902 if (!ents) {
903 DRM_ERROR("failed to allocate ent list\n");
904 return -ENOMEM;
905 }
906
907 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
908 ents[si].addr = cpu_to_le64(sg_phys(sg));
909 ents[si].length = cpu_to_le32(sg->length);
910 ents[si].padding = 0;
911 }
912
913 virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
914 ents, obj->pages->nents,
915 fence);
916 obj->hw_res_handle = resource_id;
917 return 0;
918 }
919
920 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
921 struct virtio_gpu_output *output)
922 {
923 struct virtio_gpu_vbuffer *vbuf;
924 struct virtio_gpu_update_cursor *cur_p;
925
926 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
927 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
928 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
929 virtio_gpu_queue_cursor(vgdev, vbuf);
930 }
931