Home | History | Annotate | Line # | Download | only in virtio
      1 /*	$NetBSD: virtgpu_kms.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (C) 2015 Red Hat, Inc.
      5  * All Rights Reserved.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining
      8  * a copy of this software and associated documentation files (the
      9  * "Software"), to deal in the Software without restriction, including
     10  * without limitation the rights to use, copy, modify, merge, publish,
     11  * distribute, sublicense, and/or sell copies of the Software, and to
     12  * permit persons to whom the Software is furnished to do so, subject to
     13  * the following conditions:
     14  *
     15  * The above copyright notice and this permission notice (including the
     16  * next paragraph) shall be included in all copies or substantial
     17  * portions of the Software.
     18  *
     19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     20  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     22  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
     23  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
     24  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
     25  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: virtgpu_kms.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $");
     30 
     31 #include <linux/virtio.h>
     32 #include <linux/virtio_config.h>
     33 
     34 #include <drm/drm_file.h>
     35 
     36 #include "virtgpu_drv.h"
     37 
     38 static void virtio_gpu_config_changed_work_func(struct work_struct *work)
     39 {
     40 	struct virtio_gpu_device *vgdev =
     41 		container_of(work, struct virtio_gpu_device,
     42 			     config_changed_work);
     43 	u32 events_read, events_clear = 0;
     44 
     45 	/* read the config space */
     46 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
     47 		     events_read, &events_read);
     48 	if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
     49 		if (vgdev->has_edid)
     50 			virtio_gpu_cmd_get_edids(vgdev);
     51 		virtio_gpu_cmd_get_display_info(vgdev);
     52 		drm_helper_hpd_irq_event(vgdev->ddev);
     53 		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
     54 	}
     55 	virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
     56 		      events_clear, &events_clear);
     57 }
     58 
     59 static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
     60 				      uint32_t nlen, const char *name)
     61 {
     62 	int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
     63 
     64 	if (handle < 0)
     65 		return handle;
     66 	handle += 1;
     67 	virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
     68 	return handle;
     69 }
     70 
     71 static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
     72 				      uint32_t ctx_id)
     73 {
     74 	virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
     75 	ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
     76 }
     77 
     78 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
     79 			       void (*work_func)(struct work_struct *work))
     80 {
     81 	spin_lock_init(&vgvq->qlock);
     82 	init_waitqueue_head(&vgvq->ack_queue);
     83 	INIT_WORK(&vgvq->dequeue_work, work_func);
     84 }
     85 
     86 static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
     87 				   int num_capsets)
     88 {
     89 	int i, ret;
     90 
     91 	vgdev->capsets = kcalloc(num_capsets,
     92 				 sizeof(struct virtio_gpu_drv_capset),
     93 				 GFP_KERNEL);
     94 	if (!vgdev->capsets) {
     95 		DRM_ERROR("failed to allocate cap sets\n");
     96 		return;
     97 	}
     98 	for (i = 0; i < num_capsets; i++) {
     99 		virtio_gpu_cmd_get_capset_info(vgdev, i);
    100 		ret = wait_event_timeout(vgdev->resp_wq,
    101 					 vgdev->capsets[i].id > 0, 5 * HZ);
    102 		if (ret == 0) {
    103 			DRM_ERROR("timed out waiting for cap set %d\n", i);
    104 			kfree(vgdev->capsets);
    105 			vgdev->capsets = NULL;
    106 			return;
    107 		}
    108 		DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
    109 			 i, vgdev->capsets[i].id,
    110 			 vgdev->capsets[i].max_version,
    111 			 vgdev->capsets[i].max_size);
    112 	}
    113 	vgdev->num_capsets = num_capsets;
    114 }
    115 
    116 int virtio_gpu_init(struct drm_device *dev)
    117 {
    118 	static vq_callback_t *callbacks[] = {
    119 		virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
    120 	};
    121 	static const char * const names[] = { "control", "cursor" };
    122 
    123 	struct virtio_gpu_device *vgdev;
    124 	/* this will expand later */
    125 	struct virtqueue *vqs[2];
    126 	u32 num_scanouts, num_capsets;
    127 	int ret;
    128 
    129 	if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
    130 		return -ENODEV;
    131 
    132 	vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
    133 	if (!vgdev)
    134 		return -ENOMEM;
    135 
    136 	vgdev->ddev = dev;
    137 	dev->dev_private = vgdev;
    138 	vgdev->vdev = dev_to_virtio(dev->dev);
    139 	vgdev->dev = dev->dev;
    140 
    141 	spin_lock_init(&vgdev->display_info_lock);
    142 	ida_init(&vgdev->ctx_id_ida);
    143 	ida_init(&vgdev->resource_ida);
    144 	init_waitqueue_head(&vgdev->resp_wq);
    145 	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
    146 	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
    147 
    148 	vgdev->fence_drv.context = dma_fence_context_alloc(1);
    149 	spin_lock_init(&vgdev->fence_drv.lock);
    150 	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
    151 	INIT_LIST_HEAD(&vgdev->cap_cache);
    152 	INIT_WORK(&vgdev->config_changed_work,
    153 		  virtio_gpu_config_changed_work_func);
    154 
    155 	INIT_WORK(&vgdev->obj_free_work,
    156 		  virtio_gpu_array_put_free_work);
    157 	INIT_LIST_HEAD(&vgdev->obj_free_list);
    158 	spin_lock_init(&vgdev->obj_free_lock);
    159 
    160 #ifdef __LITTLE_ENDIAN
    161 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
    162 		vgdev->has_virgl_3d = true;
    163 #endif
    164 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
    165 		vgdev->has_edid = true;
    166 	}
    167 
    168 	DRM_INFO("features: %cvirgl %cedid\n",
    169 		 vgdev->has_virgl_3d ? '+' : '-',
    170 		 vgdev->has_edid     ? '+' : '-');
    171 
    172 	ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
    173 	if (ret) {
    174 		DRM_ERROR("failed to find virt queues\n");
    175 		goto err_vqs;
    176 	}
    177 	vgdev->ctrlq.vq = vqs[0];
    178 	vgdev->cursorq.vq = vqs[1];
    179 	ret = virtio_gpu_alloc_vbufs(vgdev);
    180 	if (ret) {
    181 		DRM_ERROR("failed to alloc vbufs\n");
    182 		goto err_vbufs;
    183 	}
    184 
    185 	/* get display info */
    186 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
    187 		     num_scanouts, &num_scanouts);
    188 	vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
    189 				    VIRTIO_GPU_MAX_SCANOUTS);
    190 	if (!vgdev->num_scanouts) {
    191 		DRM_ERROR("num_scanouts is zero\n");
    192 		ret = -EINVAL;
    193 		goto err_scanouts;
    194 	}
    195 	DRM_INFO("number of scanouts: %d\n", num_scanouts);
    196 
    197 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
    198 		     num_capsets, &num_capsets);
    199 	DRM_INFO("number of cap sets: %d\n", num_capsets);
    200 
    201 	virtio_gpu_modeset_init(vgdev);
    202 
    203 	virtio_device_ready(vgdev->vdev);
    204 	vgdev->vqs_ready = true;
    205 
    206 	if (num_capsets)
    207 		virtio_gpu_get_capsets(vgdev, num_capsets);
    208 	if (vgdev->has_edid)
    209 		virtio_gpu_cmd_get_edids(vgdev);
    210 	virtio_gpu_cmd_get_display_info(vgdev);
    211 	wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
    212 			   5 * HZ);
    213 	return 0;
    214 
    215 err_scanouts:
    216 	virtio_gpu_free_vbufs(vgdev);
    217 err_vbufs:
    218 	vgdev->vdev->config->del_vqs(vgdev->vdev);
    219 err_vqs:
    220 	kfree(vgdev);
    221 	return ret;
    222 }
    223 
    224 static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
    225 {
    226 	struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
    227 
    228 	list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
    229 		kfree(cache_ent->caps_cache);
    230 		kfree(cache_ent);
    231 	}
    232 }
    233 
    234 void virtio_gpu_deinit(struct drm_device *dev)
    235 {
    236 	struct virtio_gpu_device *vgdev = dev->dev_private;
    237 
    238 	flush_work(&vgdev->obj_free_work);
    239 	vgdev->vqs_ready = false;
    240 	flush_work(&vgdev->ctrlq.dequeue_work);
    241 	flush_work(&vgdev->cursorq.dequeue_work);
    242 	flush_work(&vgdev->config_changed_work);
    243 	vgdev->vdev->config->reset(vgdev->vdev);
    244 	vgdev->vdev->config->del_vqs(vgdev->vdev);
    245 
    246 	virtio_gpu_modeset_fini(vgdev);
    247 	virtio_gpu_free_vbufs(vgdev);
    248 	virtio_gpu_cleanup_cap_cache(vgdev);
    249 	kfree(vgdev->capsets);
    250 	kfree(vgdev);
    251 }
    252 
    253 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
    254 {
    255 	struct virtio_gpu_device *vgdev = dev->dev_private;
    256 	struct virtio_gpu_fpriv *vfpriv;
    257 	int id;
    258 	char dbgname[TASK_COMM_LEN];
    259 
    260 	/* can't create contexts without 3d renderer */
    261 	if (!vgdev->has_virgl_3d)
    262 		return 0;
    263 
    264 	/* allocate a virt GPU context for this opener */
    265 	vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
    266 	if (!vfpriv)
    267 		return -ENOMEM;
    268 
    269 	get_task_comm(dbgname, current);
    270 	id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
    271 	if (id < 0) {
    272 		kfree(vfpriv);
    273 		return id;
    274 	}
    275 
    276 	vfpriv->ctx_id = id;
    277 	file->driver_priv = vfpriv;
    278 	return 0;
    279 }
    280 
    281 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
    282 {
    283 	struct virtio_gpu_device *vgdev = dev->dev_private;
    284 	struct virtio_gpu_fpriv *vfpriv;
    285 
    286 	if (!vgdev->has_virgl_3d)
    287 		return;
    288 
    289 	vfpriv = file->driver_priv;
    290 
    291 	virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
    292 	kfree(vfpriv);
    293 	file->driver_priv = NULL;
    294 }
    295