1 1.1 riastrad /* $NetBSD: virtgpu_gem.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright (C) 2015 Red Hat, Inc. 5 1.1 riastrad * All Rights Reserved. 6 1.1 riastrad * 7 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining 8 1.1 riastrad * a copy of this software and associated documentation files (the 9 1.1 riastrad * "Software"), to deal in the Software without restriction, including 10 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 11 1.1 riastrad * distribute, sublicense, and/or sell copies of the Software, and to 12 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 13 1.1 riastrad * the following conditions: 14 1.1 riastrad * 15 1.1 riastrad * The above copyright notice and this permission notice (including the 16 1.1 riastrad * next paragraph) shall be included in all copies or substantial 17 1.1 riastrad * portions of the Software. 18 1.1 riastrad * 19 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 20 1.1 riastrad * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 1.1 riastrad * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 22 1.1 riastrad * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 23 1.1 riastrad * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 24 1.1 riastrad * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 25 1.1 riastrad * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 1.1 riastrad */ 27 1.1 riastrad 28 1.1 riastrad #include <sys/cdefs.h> 29 1.1 riastrad __KERNEL_RCSID(0, "$NetBSD: virtgpu_gem.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $"); 30 1.1 riastrad 31 1.3 riastrad #include <drm/drm_file.h> 32 1.3 riastrad #include <drm/drm_fourcc.h> 33 1.3 riastrad 34 1.1 riastrad #include "virtgpu_drv.h" 35 1.1 riastrad 36 1.1 riastrad int virtio_gpu_gem_create(struct drm_file *file, 37 1.1 riastrad struct drm_device *dev, 38 1.3 riastrad struct virtio_gpu_object_params *params, 39 1.1 riastrad struct drm_gem_object **obj_p, 40 1.1 riastrad uint32_t *handle_p) 41 1.1 riastrad { 42 1.3 riastrad struct virtio_gpu_device *vgdev = dev->dev_private; 43 1.1 riastrad struct virtio_gpu_object *obj; 44 1.1 riastrad int ret; 45 1.1 riastrad u32 handle; 46 1.1 riastrad 47 1.3 riastrad ret = virtio_gpu_object_create(vgdev, params, &obj, NULL); 48 1.3 riastrad if (ret < 0) 49 1.3 riastrad return ret; 50 1.1 riastrad 51 1.3 riastrad ret = drm_gem_handle_create(file, &obj->base.base, &handle); 52 1.1 riastrad if (ret) { 53 1.3 riastrad drm_gem_object_release(&obj->base.base); 54 1.1 riastrad return ret; 55 1.1 riastrad } 56 1.1 riastrad 57 1.3 riastrad *obj_p = &obj->base.base; 58 1.1 riastrad 59 1.1 riastrad /* drop reference from allocate - handle holds it now */ 60 1.3 riastrad drm_gem_object_put_unlocked(&obj->base.base); 61 1.1 riastrad 62 1.1 riastrad *handle_p = handle; 63 1.1 riastrad return 0; 64 1.1 riastrad } 65 1.1 riastrad 66 1.1 riastrad int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, 67 1.1 riastrad struct drm_device *dev, 68 1.1 riastrad struct drm_mode_create_dumb *args) 69 1.1 riastrad { 70 1.1 riastrad struct drm_gem_object *gobj; 71 1.3 riastrad struct virtio_gpu_object_params params = { 0 }; 72 1.1 riastrad int ret; 73 1.1 riastrad uint32_t pitch; 74 1.1 riastrad 75 1.3 riastrad if (args->bpp != 32) 76 1.3 riastrad return -EINVAL; 77 1.3 riastrad 78 1.3 riastrad pitch = args->width * 4; 79 1.1 riastrad args->size = pitch * args->height; 80 1.1 riastrad args->size = ALIGN(args->size, PAGE_SIZE); 81 1.1 riastrad 82 1.3 riastrad params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888); 83 1.3 riastrad params.width = args->width; 84 1.3 riastrad params.height = args->height; 85 1.3 riastrad params.size = args->size; 86 1.3 riastrad params.dumb = true; 87 1.3 riastrad ret = virtio_gpu_gem_create(file_priv, dev, ¶ms, &gobj, 88 1.1 riastrad &args->handle); 89 1.1 riastrad if (ret) 90 1.1 riastrad goto fail; 91 1.1 riastrad 92 1.1 riastrad args->pitch = pitch; 93 1.1 riastrad return ret; 94 1.1 riastrad 95 1.1 riastrad fail: 96 1.1 riastrad return ret; 97 1.1 riastrad } 98 1.1 riastrad 99 1.1 riastrad int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, 100 1.1 riastrad struct drm_device *dev, 101 1.1 riastrad uint32_t handle, uint64_t *offset_p) 102 1.1 riastrad { 103 1.1 riastrad struct drm_gem_object *gobj; 104 1.3 riastrad 105 1.1 riastrad BUG_ON(!offset_p); 106 1.3 riastrad gobj = drm_gem_object_lookup(file_priv, handle); 107 1.1 riastrad if (gobj == NULL) 108 1.1 riastrad return -ENOENT; 109 1.3 riastrad *offset_p = drm_vma_node_offset_addr(&gobj->vma_node); 110 1.3 riastrad drm_gem_object_put_unlocked(gobj); 111 1.1 riastrad return 0; 112 1.1 riastrad } 113 1.1 riastrad 114 1.1 riastrad int virtio_gpu_gem_object_open(struct drm_gem_object *obj, 115 1.1 riastrad struct drm_file *file) 116 1.1 riastrad { 117 1.1 riastrad struct virtio_gpu_device *vgdev = obj->dev->dev_private; 118 1.1 riastrad struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 119 1.3 riastrad struct virtio_gpu_object_array *objs; 120 1.1 riastrad 121 1.1 riastrad if (!vgdev->has_virgl_3d) 122 1.1 riastrad return 0; 123 1.1 riastrad 124 1.3 riastrad objs = virtio_gpu_array_alloc(1); 125 1.3 riastrad if (!objs) 126 1.3 riastrad return -ENOMEM; 127 1.3 riastrad virtio_gpu_array_add_obj(objs, obj); 128 1.1 riastrad 129 1.1 riastrad virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, 130 1.3 riastrad objs); 131 1.1 riastrad return 0; 132 1.1 riastrad } 133 1.1 riastrad 134 1.1 riastrad void virtio_gpu_gem_object_close(struct drm_gem_object *obj, 135 1.1 riastrad struct drm_file *file) 136 1.1 riastrad { 137 1.1 riastrad struct virtio_gpu_device *vgdev = obj->dev->dev_private; 138 1.1 riastrad struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 139 1.3 riastrad struct virtio_gpu_object_array *objs; 140 1.1 riastrad 141 1.1 riastrad if (!vgdev->has_virgl_3d) 142 1.1 riastrad return; 143 1.1 riastrad 144 1.3 riastrad objs = virtio_gpu_array_alloc(1); 145 1.3 riastrad if (!objs) 146 1.1 riastrad return; 147 1.3 riastrad virtio_gpu_array_add_obj(objs, obj); 148 1.1 riastrad 149 1.1 riastrad virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id, 150 1.3 riastrad objs); 151 1.3 riastrad } 152 1.3 riastrad 153 1.3 riastrad struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents) 154 1.3 riastrad { 155 1.3 riastrad struct virtio_gpu_object_array *objs; 156 1.3 riastrad size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents; 157 1.3 riastrad 158 1.3 riastrad objs = kmalloc(size, GFP_KERNEL); 159 1.3 riastrad if (!objs) 160 1.3 riastrad return NULL; 161 1.3 riastrad 162 1.3 riastrad objs->nents = 0; 163 1.3 riastrad objs->total = nents; 164 1.3 riastrad return objs; 165 1.3 riastrad } 166 1.3 riastrad 167 1.3 riastrad static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs) 168 1.3 riastrad { 169 1.3 riastrad kfree(objs); 170 1.3 riastrad } 171 1.3 riastrad 172 1.3 riastrad struct virtio_gpu_object_array* 173 1.3 riastrad virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents) 174 1.3 riastrad { 175 1.3 riastrad struct virtio_gpu_object_array *objs; 176 1.3 riastrad u32 i; 177 1.3 riastrad 178 1.3 riastrad objs = virtio_gpu_array_alloc(nents); 179 1.3 riastrad if (!objs) 180 1.3 riastrad return NULL; 181 1.3 riastrad 182 1.3 riastrad for (i = 0; i < nents; i++) { 183 1.3 riastrad objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]); 184 1.3 riastrad if (!objs->objs[i]) { 185 1.3 riastrad objs->nents = i; 186 1.3 riastrad virtio_gpu_array_put_free(objs); 187 1.3 riastrad return NULL; 188 1.3 riastrad } 189 1.3 riastrad } 190 1.3 riastrad objs->nents = i; 191 1.3 riastrad return objs; 192 1.3 riastrad } 193 1.3 riastrad 194 1.3 riastrad void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, 195 1.3 riastrad struct drm_gem_object *obj) 196 1.3 riastrad { 197 1.3 riastrad if (WARN_ON_ONCE(objs->nents == objs->total)) 198 1.3 riastrad return; 199 1.3 riastrad 200 1.3 riastrad drm_gem_object_get(obj); 201 1.3 riastrad objs->objs[objs->nents] = obj; 202 1.3 riastrad objs->nents++; 203 1.3 riastrad } 204 1.3 riastrad 205 1.3 riastrad int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs) 206 1.3 riastrad { 207 1.3 riastrad int ret; 208 1.3 riastrad 209 1.3 riastrad if (objs->nents == 1) { 210 1.3 riastrad ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL); 211 1.3 riastrad } else { 212 1.3 riastrad ret = drm_gem_lock_reservations(objs->objs, objs->nents, 213 1.3 riastrad &objs->ticket); 214 1.3 riastrad } 215 1.3 riastrad return ret; 216 1.3 riastrad } 217 1.3 riastrad 218 1.3 riastrad void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs) 219 1.3 riastrad { 220 1.3 riastrad if (objs->nents == 1) { 221 1.3 riastrad dma_resv_unlock(objs->objs[0]->resv); 222 1.3 riastrad } else { 223 1.3 riastrad drm_gem_unlock_reservations(objs->objs, objs->nents, 224 1.3 riastrad &objs->ticket); 225 1.3 riastrad } 226 1.3 riastrad } 227 1.3 riastrad 228 1.3 riastrad void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, 229 1.3 riastrad struct dma_fence *fence) 230 1.3 riastrad { 231 1.3 riastrad int i; 232 1.3 riastrad 233 1.3 riastrad for (i = 0; i < objs->nents; i++) 234 1.3 riastrad dma_resv_add_excl_fence(objs->objs[i]->resv, fence); 235 1.3 riastrad } 236 1.3 riastrad 237 1.3 riastrad void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs) 238 1.3 riastrad { 239 1.3 riastrad u32 i; 240 1.3 riastrad 241 1.3 riastrad for (i = 0; i < objs->nents; i++) 242 1.3 riastrad drm_gem_object_put_unlocked(objs->objs[i]); 243 1.3 riastrad virtio_gpu_array_free(objs); 244 1.3 riastrad } 245 1.3 riastrad 246 1.3 riastrad void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, 247 1.3 riastrad struct virtio_gpu_object_array *objs) 248 1.3 riastrad { 249 1.3 riastrad spin_lock(&vgdev->obj_free_lock); 250 1.3 riastrad list_add_tail(&objs->next, &vgdev->obj_free_list); 251 1.3 riastrad spin_unlock(&vgdev->obj_free_lock); 252 1.3 riastrad schedule_work(&vgdev->obj_free_work); 253 1.3 riastrad } 254 1.3 riastrad 255 1.3 riastrad void virtio_gpu_array_put_free_work(struct work_struct *work) 256 1.3 riastrad { 257 1.3 riastrad struct virtio_gpu_device *vgdev = 258 1.3 riastrad container_of(work, struct virtio_gpu_device, obj_free_work); 259 1.3 riastrad struct virtio_gpu_object_array *objs; 260 1.3 riastrad 261 1.3 riastrad spin_lock(&vgdev->obj_free_lock); 262 1.3 riastrad while (!list_empty(&vgdev->obj_free_list)) { 263 1.3 riastrad objs = list_first_entry(&vgdev->obj_free_list, 264 1.3 riastrad struct virtio_gpu_object_array, next); 265 1.3 riastrad list_del(&objs->next); 266 1.3 riastrad spin_unlock(&vgdev->obj_free_lock); 267 1.3 riastrad virtio_gpu_array_put_free(objs); 268 1.3 riastrad spin_lock(&vgdev->obj_free_lock); 269 1.3 riastrad } 270 1.3 riastrad spin_unlock(&vgdev->obj_free_lock); 271 1.1 riastrad } 272