Home | History | Annotate | Line # | Download | only in virtio
      1 /*	$NetBSD: virtgpu_gem.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (C) 2015 Red Hat, Inc.
      5  * All Rights Reserved.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining
      8  * a copy of this software and associated documentation files (the
      9  * "Software"), to deal in the Software without restriction, including
     10  * without limitation the rights to use, copy, modify, merge, publish,
     11  * distribute, sublicense, and/or sell copies of the Software, and to
     12  * permit persons to whom the Software is furnished to do so, subject to
     13  * the following conditions:
     14  *
     15  * The above copyright notice and this permission notice (including the
     16  * next paragraph) shall be included in all copies or substantial
     17  * portions of the Software.
     18  *
     19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     20  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     22  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
     23  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
     24  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
     25  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: virtgpu_gem.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $");
     30 
     31 #include <drm/drm_file.h>
     32 #include <drm/drm_fourcc.h>
     33 
     34 #include "virtgpu_drv.h"
     35 
     36 int virtio_gpu_gem_create(struct drm_file *file,
     37 			  struct drm_device *dev,
     38 			  struct virtio_gpu_object_params *params,
     39 			  struct drm_gem_object **obj_p,
     40 			  uint32_t *handle_p)
     41 {
     42 	struct virtio_gpu_device *vgdev = dev->dev_private;
     43 	struct virtio_gpu_object *obj;
     44 	int ret;
     45 	u32 handle;
     46 
     47 	ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
     48 	if (ret < 0)
     49 		return ret;
     50 
     51 	ret = drm_gem_handle_create(file, &obj->base.base, &handle);
     52 	if (ret) {
     53 		drm_gem_object_release(&obj->base.base);
     54 		return ret;
     55 	}
     56 
     57 	*obj_p = &obj->base.base;
     58 
     59 	/* drop reference from allocate - handle holds it now */
     60 	drm_gem_object_put_unlocked(&obj->base.base);
     61 
     62 	*handle_p = handle;
     63 	return 0;
     64 }
     65 
     66 int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
     67 				struct drm_device *dev,
     68 				struct drm_mode_create_dumb *args)
     69 {
     70 	struct drm_gem_object *gobj;
     71 	struct virtio_gpu_object_params params = { 0 };
     72 	int ret;
     73 	uint32_t pitch;
     74 
     75 	if (args->bpp != 32)
     76 		return -EINVAL;
     77 
     78 	pitch = args->width * 4;
     79 	args->size = pitch * args->height;
     80 	args->size = ALIGN(args->size, PAGE_SIZE);
     81 
     82 	params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
     83 	params.width = args->width;
     84 	params.height = args->height;
     85 	params.size = args->size;
     86 	params.dumb = true;
     87 	ret = virtio_gpu_gem_create(file_priv, dev, &params, &gobj,
     88 				    &args->handle);
     89 	if (ret)
     90 		goto fail;
     91 
     92 	args->pitch = pitch;
     93 	return ret;
     94 
     95 fail:
     96 	return ret;
     97 }
     98 
     99 int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
    100 			      struct drm_device *dev,
    101 			      uint32_t handle, uint64_t *offset_p)
    102 {
    103 	struct drm_gem_object *gobj;
    104 
    105 	BUG_ON(!offset_p);
    106 	gobj = drm_gem_object_lookup(file_priv, handle);
    107 	if (gobj == NULL)
    108 		return -ENOENT;
    109 	*offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
    110 	drm_gem_object_put_unlocked(gobj);
    111 	return 0;
    112 }
    113 
    114 int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
    115 			       struct drm_file *file)
    116 {
    117 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
    118 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
    119 	struct virtio_gpu_object_array *objs;
    120 
    121 	if (!vgdev->has_virgl_3d)
    122 		return 0;
    123 
    124 	objs = virtio_gpu_array_alloc(1);
    125 	if (!objs)
    126 		return -ENOMEM;
    127 	virtio_gpu_array_add_obj(objs, obj);
    128 
    129 	virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
    130 					       objs);
    131 	return 0;
    132 }
    133 
    134 void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
    135 				 struct drm_file *file)
    136 {
    137 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
    138 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
    139 	struct virtio_gpu_object_array *objs;
    140 
    141 	if (!vgdev->has_virgl_3d)
    142 		return;
    143 
    144 	objs = virtio_gpu_array_alloc(1);
    145 	if (!objs)
    146 		return;
    147 	virtio_gpu_array_add_obj(objs, obj);
    148 
    149 	virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
    150 					       objs);
    151 }
    152 
    153 struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
    154 {
    155 	struct virtio_gpu_object_array *objs;
    156 	size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
    157 
    158 	objs = kmalloc(size, GFP_KERNEL);
    159 	if (!objs)
    160 		return NULL;
    161 
    162 	objs->nents = 0;
    163 	objs->total = nents;
    164 	return objs;
    165 }
    166 
    167 static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
    168 {
    169 	kfree(objs);
    170 }
    171 
    172 struct virtio_gpu_object_array*
    173 virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
    174 {
    175 	struct virtio_gpu_object_array *objs;
    176 	u32 i;
    177 
    178 	objs = virtio_gpu_array_alloc(nents);
    179 	if (!objs)
    180 		return NULL;
    181 
    182 	for (i = 0; i < nents; i++) {
    183 		objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
    184 		if (!objs->objs[i]) {
    185 			objs->nents = i;
    186 			virtio_gpu_array_put_free(objs);
    187 			return NULL;
    188 		}
    189 	}
    190 	objs->nents = i;
    191 	return objs;
    192 }
    193 
    194 void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
    195 			      struct drm_gem_object *obj)
    196 {
    197 	if (WARN_ON_ONCE(objs->nents == objs->total))
    198 		return;
    199 
    200 	drm_gem_object_get(obj);
    201 	objs->objs[objs->nents] = obj;
    202 	objs->nents++;
    203 }
    204 
    205 int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
    206 {
    207 	int ret;
    208 
    209 	if (objs->nents == 1) {
    210 		ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
    211 	} else {
    212 		ret = drm_gem_lock_reservations(objs->objs, objs->nents,
    213 						&objs->ticket);
    214 	}
    215 	return ret;
    216 }
    217 
    218 void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
    219 {
    220 	if (objs->nents == 1) {
    221 		dma_resv_unlock(objs->objs[0]->resv);
    222 	} else {
    223 		drm_gem_unlock_reservations(objs->objs, objs->nents,
    224 					    &objs->ticket);
    225 	}
    226 }
    227 
    228 void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
    229 				struct dma_fence *fence)
    230 {
    231 	int i;
    232 
    233 	for (i = 0; i < objs->nents; i++)
    234 		dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
    235 }
    236 
    237 void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
    238 {
    239 	u32 i;
    240 
    241 	for (i = 0; i < objs->nents; i++)
    242 		drm_gem_object_put_unlocked(objs->objs[i]);
    243 	virtio_gpu_array_free(objs);
    244 }
    245 
    246 void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
    247 				       struct virtio_gpu_object_array *objs)
    248 {
    249 	spin_lock(&vgdev->obj_free_lock);
    250 	list_add_tail(&objs->next, &vgdev->obj_free_list);
    251 	spin_unlock(&vgdev->obj_free_lock);
    252 	schedule_work(&vgdev->obj_free_work);
    253 }
    254 
    255 void virtio_gpu_array_put_free_work(struct work_struct *work)
    256 {
    257 	struct virtio_gpu_device *vgdev =
    258 		container_of(work, struct virtio_gpu_device, obj_free_work);
    259 	struct virtio_gpu_object_array *objs;
    260 
    261 	spin_lock(&vgdev->obj_free_lock);
    262 	while (!list_empty(&vgdev->obj_free_list)) {
    263 		objs = list_first_entry(&vgdev->obj_free_list,
    264 					struct virtio_gpu_object_array, next);
    265 		list_del(&objs->next);
    266 		spin_unlock(&vgdev->obj_free_lock);
    267 		virtio_gpu_array_put_free(objs);
    268 		spin_lock(&vgdev->obj_free_lock);
    269 	}
    270 	spin_unlock(&vgdev->obj_free_lock);
    271 }
    272