virtgpu_ioctl.c revision 1.1 1 /* $NetBSD: virtgpu_ioctl.c,v 1.1 2018/08/27 01:34:59 riastradh Exp $ */
2
3 /*
4 * Copyright (C) 2015 Red Hat, Inc.
5 * All Rights Reserved.
6 *
7 * Authors:
8 * Dave Airlie
9 * Alon Levy
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
25 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27 * OTHER DEALINGS IN THE SOFTWARE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtgpu_ioctl.c,v 1.1 2018/08/27 01:34:59 riastradh Exp $");
32
33 #include <drm/drmP.h>
34 #include "virtgpu_drv.h"
35 #include <drm/virtgpu_drm.h>
36 #include "ttm/ttm_execbuf_util.h"
37
38 static void convert_to_hw_box(struct virtio_gpu_box *dst,
39 const struct drm_virtgpu_3d_box *src)
40 {
41 dst->x = cpu_to_le32(src->x);
42 dst->y = cpu_to_le32(src->y);
43 dst->z = cpu_to_le32(src->z);
44 dst->w = cpu_to_le32(src->w);
45 dst->h = cpu_to_le32(src->h);
46 dst->d = cpu_to_le32(src->d);
47 }
48
49 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
50 struct drm_file *file_priv)
51 {
52 struct virtio_gpu_device *vgdev = dev->dev_private;
53 struct drm_virtgpu_map *virtio_gpu_map = data;
54
55 return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
56 virtio_gpu_map->handle,
57 &virtio_gpu_map->offset);
58 }
59
60 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
61 struct list_head *head)
62 {
63 struct ttm_validate_buffer *buf;
64 struct ttm_buffer_object *bo;
65 struct virtio_gpu_object *qobj;
66 int ret;
67
68 ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
69 if (ret != 0)
70 return ret;
71
72 list_for_each_entry(buf, head, head) {
73 bo = buf->bo;
74 qobj = container_of(bo, struct virtio_gpu_object, tbo);
75 ret = ttm_bo_validate(bo, &qobj->placement, false, false);
76 if (ret) {
77 ttm_eu_backoff_reservation(ticket, head);
78 return ret;
79 }
80 }
81 return 0;
82 }
83
84 static void virtio_gpu_unref_list(struct list_head *head)
85 {
86 struct ttm_validate_buffer *buf;
87 struct ttm_buffer_object *bo;
88 struct virtio_gpu_object *qobj;
89 list_for_each_entry(buf, head, head) {
90 bo = buf->bo;
91 qobj = container_of(bo, struct virtio_gpu_object, tbo);
92
93 drm_gem_object_unreference_unlocked(&qobj->gem_base);
94 }
95 }
96
97 static int virtio_gpu_execbuffer(struct drm_device *dev,
98 struct drm_virtgpu_execbuffer *exbuf,
99 struct drm_file *drm_file)
100 {
101 struct virtio_gpu_device *vgdev = dev->dev_private;
102 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
103 struct drm_gem_object *gobj;
104 struct virtio_gpu_fence *fence;
105 struct virtio_gpu_object *qobj;
106 int ret;
107 uint32_t *bo_handles = NULL;
108 void __user *user_bo_handles = NULL;
109 struct list_head validate_list;
110 struct ttm_validate_buffer *buflist = NULL;
111 int i;
112 struct ww_acquire_ctx ticket;
113 void *buf;
114
115 if (vgdev->has_virgl_3d == false)
116 return -ENOSYS;
117
118 INIT_LIST_HEAD(&validate_list);
119 if (exbuf->num_bo_handles) {
120
121 bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
122 sizeof(uint32_t));
123 buflist = drm_calloc_large(exbuf->num_bo_handles,
124 sizeof(struct ttm_validate_buffer));
125 if (!bo_handles || !buflist) {
126 drm_free_large(bo_handles);
127 drm_free_large(buflist);
128 return -ENOMEM;
129 }
130
131 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
132 if (copy_from_user(bo_handles, user_bo_handles,
133 exbuf->num_bo_handles * sizeof(uint32_t))) {
134 ret = -EFAULT;
135 drm_free_large(bo_handles);
136 drm_free_large(buflist);
137 return ret;
138 }
139
140 for (i = 0; i < exbuf->num_bo_handles; i++) {
141 gobj = drm_gem_object_lookup(dev,
142 drm_file, bo_handles[i]);
143 if (!gobj) {
144 drm_free_large(bo_handles);
145 drm_free_large(buflist);
146 return -ENOENT;
147 }
148
149 qobj = gem_to_virtio_gpu_obj(gobj);
150 buflist[i].bo = &qobj->tbo;
151
152 list_add(&buflist[i].head, &validate_list);
153 }
154 drm_free_large(bo_handles);
155 }
156
157 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
158 if (ret)
159 goto out_free;
160
161 buf = kmalloc(exbuf->size, GFP_KERNEL);
162 if (!buf) {
163 ret = -ENOMEM;
164 goto out_unresv;
165 }
166 if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
167 exbuf->size)) {
168 kfree(buf);
169 ret = -EFAULT;
170 goto out_unresv;
171 }
172 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
173 vfpriv->ctx_id, &fence);
174
175 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
176
177 /* fence the command bo */
178 virtio_gpu_unref_list(&validate_list);
179 drm_free_large(buflist);
180 fence_put(&fence->f);
181 return 0;
182
183 out_unresv:
184 ttm_eu_backoff_reservation(&ticket, &validate_list);
185 out_free:
186 virtio_gpu_unref_list(&validate_list);
187 drm_free_large(buflist);
188 return ret;
189 }
190
191 /*
192 * Usage of execbuffer:
193 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
194 * However, the command as passed from user space must *not* contain the initial
195 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
196 */
197 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
198 struct drm_file *file_priv)
199 {
200 struct drm_virtgpu_execbuffer *execbuffer = data;
201 return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
202 }
203
204
205 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
206 struct drm_file *file_priv)
207 {
208 struct virtio_gpu_device *vgdev = dev->dev_private;
209 struct drm_virtgpu_getparam *param = data;
210 int value;
211
212 switch (param->param) {
213 case VIRTGPU_PARAM_3D_FEATURES:
214 value = vgdev->has_virgl_3d == true ? 1 : 0;
215 break;
216 case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
217 value = 1;
218 break;
219 default:
220 return -EINVAL;
221 }
222 if (copy_to_user((void __user *)(unsigned long)param->value,
223 &value, sizeof(int))) {
224 return -EFAULT;
225 }
226 return 0;
227 }
228
229 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
230 struct drm_file *file_priv)
231 {
232 struct virtio_gpu_device *vgdev = dev->dev_private;
233 struct drm_virtgpu_resource_create *rc = data;
234 int ret;
235 uint32_t res_id;
236 struct virtio_gpu_object *qobj;
237 struct drm_gem_object *obj;
238 uint32_t handle = 0;
239 uint32_t size;
240 struct list_head validate_list;
241 struct ttm_validate_buffer mainbuf;
242 struct virtio_gpu_fence *fence = NULL;
243 struct ww_acquire_ctx ticket;
244 struct virtio_gpu_resource_create_3d rc_3d;
245
246 if (vgdev->has_virgl_3d == false) {
247 if (rc->depth > 1)
248 return -EINVAL;
249 if (rc->nr_samples > 1)
250 return -EINVAL;
251 if (rc->last_level > 1)
252 return -EINVAL;
253 if (rc->target != 2)
254 return -EINVAL;
255 if (rc->array_size > 1)
256 return -EINVAL;
257 }
258
259 INIT_LIST_HEAD(&validate_list);
260 memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
261
262 virtio_gpu_resource_id_get(vgdev, &res_id);
263
264 size = rc->size;
265
266 /* allocate a single page size object */
267 if (size == 0)
268 size = PAGE_SIZE;
269
270 qobj = virtio_gpu_alloc_object(dev, size, false, false);
271 if (IS_ERR(qobj)) {
272 ret = PTR_ERR(qobj);
273 goto fail_id;
274 }
275 obj = &qobj->gem_base;
276
277 if (!vgdev->has_virgl_3d) {
278 virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
279 rc->width, rc->height);
280
281 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
282 } else {
283 /* use a gem reference since unref list undoes them */
284 drm_gem_object_reference(&qobj->gem_base);
285 mainbuf.bo = &qobj->tbo;
286 list_add(&mainbuf.head, &validate_list);
287
288 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
289 if (ret) {
290 DRM_DEBUG("failed to validate\n");
291 goto fail_unref;
292 }
293
294 rc_3d.resource_id = cpu_to_le32(res_id);
295 rc_3d.target = cpu_to_le32(rc->target);
296 rc_3d.format = cpu_to_le32(rc->format);
297 rc_3d.bind = cpu_to_le32(rc->bind);
298 rc_3d.width = cpu_to_le32(rc->width);
299 rc_3d.height = cpu_to_le32(rc->height);
300 rc_3d.depth = cpu_to_le32(rc->depth);
301 rc_3d.array_size = cpu_to_le32(rc->array_size);
302 rc_3d.last_level = cpu_to_le32(rc->last_level);
303 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
304 rc_3d.flags = cpu_to_le32(rc->flags);
305
306 virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
307 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
308 if (ret) {
309 ttm_eu_backoff_reservation(&ticket, &validate_list);
310 goto fail_unref;
311 }
312 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
313 }
314
315 qobj->hw_res_handle = res_id;
316
317 ret = drm_gem_handle_create(file_priv, obj, &handle);
318 if (ret) {
319
320 drm_gem_object_release(obj);
321 if (vgdev->has_virgl_3d) {
322 virtio_gpu_unref_list(&validate_list);
323 fence_put(&fence->f);
324 }
325 return ret;
326 }
327 drm_gem_object_unreference_unlocked(obj);
328
329 rc->res_handle = res_id; /* similiar to a VM address */
330 rc->bo_handle = handle;
331
332 if (vgdev->has_virgl_3d) {
333 virtio_gpu_unref_list(&validate_list);
334 fence_put(&fence->f);
335 }
336 return 0;
337 fail_unref:
338 if (vgdev->has_virgl_3d) {
339 virtio_gpu_unref_list(&validate_list);
340 fence_put(&fence->f);
341 }
342 //fail_obj:
343 // drm_gem_object_handle_unreference_unlocked(obj);
344 fail_id:
345 virtio_gpu_resource_id_put(vgdev, res_id);
346 return ret;
347 }
348
349 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
350 struct drm_file *file_priv)
351 {
352 struct drm_virtgpu_resource_info *ri = data;
353 struct drm_gem_object *gobj = NULL;
354 struct virtio_gpu_object *qobj = NULL;
355
356 gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle);
357 if (gobj == NULL)
358 return -ENOENT;
359
360 qobj = gem_to_virtio_gpu_obj(gobj);
361
362 ri->size = qobj->gem_base.size;
363 ri->res_handle = qobj->hw_res_handle;
364 drm_gem_object_unreference_unlocked(gobj);
365 return 0;
366 }
367
368 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
369 void *data,
370 struct drm_file *file)
371 {
372 struct virtio_gpu_device *vgdev = dev->dev_private;
373 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
374 struct drm_virtgpu_3d_transfer_from_host *args = data;
375 struct drm_gem_object *gobj = NULL;
376 struct virtio_gpu_object *qobj = NULL;
377 struct virtio_gpu_fence *fence;
378 int ret;
379 u32 offset = args->offset;
380 struct virtio_gpu_box box;
381
382 if (vgdev->has_virgl_3d == false)
383 return -ENOSYS;
384
385 gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
386 if (gobj == NULL)
387 return -ENOENT;
388
389 qobj = gem_to_virtio_gpu_obj(gobj);
390
391 ret = virtio_gpu_object_reserve(qobj, false);
392 if (ret)
393 goto out;
394
395 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
396 true, false);
397 if (unlikely(ret))
398 goto out_unres;
399
400 convert_to_hw_box(&box, &args->box);
401 virtio_gpu_cmd_transfer_from_host_3d
402 (vgdev, qobj->hw_res_handle,
403 vfpriv->ctx_id, offset, args->level,
404 &box, &fence);
405 reservation_object_add_excl_fence(qobj->tbo.resv,
406 &fence->f);
407
408 fence_put(&fence->f);
409 out_unres:
410 virtio_gpu_object_unreserve(qobj);
411 out:
412 drm_gem_object_unreference_unlocked(gobj);
413 return ret;
414 }
415
416 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
417 struct drm_file *file)
418 {
419 struct virtio_gpu_device *vgdev = dev->dev_private;
420 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
421 struct drm_virtgpu_3d_transfer_to_host *args = data;
422 struct drm_gem_object *gobj = NULL;
423 struct virtio_gpu_object *qobj = NULL;
424 struct virtio_gpu_fence *fence;
425 struct virtio_gpu_box box;
426 int ret;
427 u32 offset = args->offset;
428
429 gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
430 if (gobj == NULL)
431 return -ENOENT;
432
433 qobj = gem_to_virtio_gpu_obj(gobj);
434
435 ret = virtio_gpu_object_reserve(qobj, false);
436 if (ret)
437 goto out;
438
439 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
440 true, false);
441 if (unlikely(ret))
442 goto out_unres;
443
444 convert_to_hw_box(&box, &args->box);
445 if (!vgdev->has_virgl_3d) {
446 virtio_gpu_cmd_transfer_to_host_2d
447 (vgdev, qobj->hw_res_handle, offset,
448 box.w, box.h, box.x, box.y, NULL);
449 } else {
450 virtio_gpu_cmd_transfer_to_host_3d
451 (vgdev, qobj->hw_res_handle,
452 vfpriv ? vfpriv->ctx_id : 0, offset,
453 args->level, &box, &fence);
454 reservation_object_add_excl_fence(qobj->tbo.resv,
455 &fence->f);
456 fence_put(&fence->f);
457 }
458
459 out_unres:
460 virtio_gpu_object_unreserve(qobj);
461 out:
462 drm_gem_object_unreference_unlocked(gobj);
463 return ret;
464 }
465
466 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
467 struct drm_file *file)
468 {
469 struct drm_virtgpu_3d_wait *args = data;
470 struct drm_gem_object *gobj = NULL;
471 struct virtio_gpu_object *qobj = NULL;
472 int ret;
473 bool nowait = false;
474
475 gobj = drm_gem_object_lookup(dev, file, args->handle);
476 if (gobj == NULL)
477 return -ENOENT;
478
479 qobj = gem_to_virtio_gpu_obj(gobj);
480
481 if (args->flags & VIRTGPU_WAIT_NOWAIT)
482 nowait = true;
483 ret = virtio_gpu_object_wait(qobj, nowait);
484
485 drm_gem_object_unreference_unlocked(gobj);
486 return ret;
487 }
488
489 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
490 void *data, struct drm_file *file)
491 {
492 struct virtio_gpu_device *vgdev = dev->dev_private;
493 struct drm_virtgpu_get_caps *args = data;
494 unsigned size, host_caps_size;
495 int i;
496 int found_valid = -1;
497 int ret;
498 struct virtio_gpu_drv_cap_cache *cache_ent;
499 void *ptr;
500 if (vgdev->num_capsets == 0)
501 return -ENOSYS;
502
503 /* don't allow userspace to pass 0 */
504 if (args->size == 0)
505 return -EINVAL;
506
507 spin_lock(&vgdev->display_info_lock);
508 for (i = 0; i < vgdev->num_capsets; i++) {
509 if (vgdev->capsets[i].id == args->cap_set_id) {
510 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
511 found_valid = i;
512 break;
513 }
514 }
515 }
516
517 if (found_valid == -1) {
518 spin_unlock(&vgdev->display_info_lock);
519 return -EINVAL;
520 }
521
522 host_caps_size = vgdev->capsets[found_valid].max_size;
523 /* only copy to user the minimum of the host caps size or the guest caps size */
524 size = min(args->size, host_caps_size);
525
526 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
527 if (cache_ent->id == args->cap_set_id &&
528 cache_ent->version == args->cap_set_ver) {
529 ptr = cache_ent->caps_cache;
530 spin_unlock(&vgdev->display_info_lock);
531 goto copy_exit;
532 }
533 }
534 spin_unlock(&vgdev->display_info_lock);
535
536 /* not in cache - need to talk to hw */
537 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
538 &cache_ent);
539
540 ret = wait_event_timeout(vgdev->resp_wq,
541 atomic_read(&cache_ent->is_valid), 5 * HZ);
542
543 ptr = cache_ent->caps_cache;
544
545 copy_exit:
546 if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
547 return -EFAULT;
548
549 return 0;
550 }
551
552 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
553 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
554 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
555
556 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
557 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
558
559 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
560 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
561
562 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
563 virtio_gpu_resource_create_ioctl,
564 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
565
566 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
567 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
568
569 /* make transfer async to the main ring? - no sure, can we
570 thread these in the underlying GL */
571 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
572 virtio_gpu_transfer_from_host_ioctl,
573 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
574 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
575 virtio_gpu_transfer_to_host_ioctl,
576 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
577
578 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
579 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
580
581 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
582 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
583 };
584