virtgpu_kms.c revision 1.1.1.1 1 /* $NetBSD: virtgpu_kms.c,v 1.1.1.1 2018/08/27 01:34:59 riastradh Exp $ */
2
3 /*
4 * Copyright (C) 2015 Red Hat, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sublicense, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial
17 * portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: virtgpu_kms.c,v 1.1.1.1 2018/08/27 01:34:59 riastradh Exp $");
30
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <drm/drmP.h>
34 #include "virtgpu_drv.h"
35
36 static int virtio_gpu_fbdev = 1;
37
38 MODULE_PARM_DESC(fbdev, "Disable/Enable framebuffer device & console");
39 module_param_named(fbdev, virtio_gpu_fbdev, int, 0400);
40
41 static void virtio_gpu_config_changed_work_func(struct work_struct *work)
42 {
43 struct virtio_gpu_device *vgdev =
44 container_of(work, struct virtio_gpu_device,
45 config_changed_work);
46 u32 events_read, events_clear = 0;
47
48 /* read the config space */
49 virtio_cread(vgdev->vdev, struct virtio_gpu_config,
50 events_read, &events_read);
51 if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
52 virtio_gpu_cmd_get_display_info(vgdev);
53 drm_helper_hpd_irq_event(vgdev->ddev);
54 events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
55 }
56 virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
57 events_clear, &events_clear);
58 }
59
60 static void virtio_gpu_ctx_id_get(struct virtio_gpu_device *vgdev,
61 uint32_t *resid)
62 {
63 int handle;
64
65 idr_preload(GFP_KERNEL);
66 spin_lock(&vgdev->ctx_id_idr_lock);
67 handle = idr_alloc(&vgdev->ctx_id_idr, NULL, 1, 0, 0);
68 spin_unlock(&vgdev->ctx_id_idr_lock);
69 idr_preload_end();
70 *resid = handle;
71 }
72
73 static void virtio_gpu_ctx_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
74 {
75 spin_lock(&vgdev->ctx_id_idr_lock);
76 idr_remove(&vgdev->ctx_id_idr, id);
77 spin_unlock(&vgdev->ctx_id_idr_lock);
78 }
79
80 static void virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
81 uint32_t nlen, const char *name,
82 uint32_t *ctx_id)
83 {
84 virtio_gpu_ctx_id_get(vgdev, ctx_id);
85 virtio_gpu_cmd_context_create(vgdev, *ctx_id, nlen, name);
86 }
87
88 static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
89 uint32_t ctx_id)
90 {
91 virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
92 virtio_gpu_ctx_id_put(vgdev, ctx_id);
93 }
94
95 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
96 void (*work_func)(struct work_struct *work))
97 {
98 spin_lock_init(&vgvq->qlock);
99 init_waitqueue_head(&vgvq->ack_queue);
100 INIT_WORK(&vgvq->dequeue_work, work_func);
101 }
102
103 static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
104 int num_capsets)
105 {
106 int i, ret;
107
108 vgdev->capsets = kcalloc(num_capsets,
109 sizeof(struct virtio_gpu_drv_capset),
110 GFP_KERNEL);
111 if (!vgdev->capsets) {
112 DRM_ERROR("failed to allocate cap sets\n");
113 return;
114 }
115 for (i = 0; i < num_capsets; i++) {
116 virtio_gpu_cmd_get_capset_info(vgdev, i);
117 ret = wait_event_timeout(vgdev->resp_wq,
118 vgdev->capsets[i].id > 0, 5 * HZ);
119 if (ret == 0) {
120 DRM_ERROR("timed out waiting for cap set %d\n", i);
121 kfree(vgdev->capsets);
122 vgdev->capsets = NULL;
123 return;
124 }
125 DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
126 i, vgdev->capsets[i].id,
127 vgdev->capsets[i].max_version,
128 vgdev->capsets[i].max_size);
129 }
130 vgdev->num_capsets = num_capsets;
131 }
132
133 int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
134 {
135 static vq_callback_t *callbacks[] = {
136 virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
137 };
138 static const char *names[] = { "control", "cursor" };
139
140 struct virtio_gpu_device *vgdev;
141 /* this will expand later */
142 struct virtqueue *vqs[2];
143 u32 num_scanouts, num_capsets;
144 int ret;
145
146 if (!virtio_has_feature(dev->virtdev, VIRTIO_F_VERSION_1))
147 return -ENODEV;
148
149 vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
150 if (!vgdev)
151 return -ENOMEM;
152
153 vgdev->ddev = dev;
154 dev->dev_private = vgdev;
155 vgdev->vdev = dev->virtdev;
156 vgdev->dev = dev->dev;
157
158 spin_lock_init(&vgdev->display_info_lock);
159 spin_lock_init(&vgdev->ctx_id_idr_lock);
160 idr_init(&vgdev->ctx_id_idr);
161 spin_lock_init(&vgdev->resource_idr_lock);
162 idr_init(&vgdev->resource_idr);
163 init_waitqueue_head(&vgdev->resp_wq);
164 virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
165 virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
166
167 spin_lock_init(&vgdev->fence_drv.lock);
168 INIT_LIST_HEAD(&vgdev->fence_drv.fences);
169 INIT_LIST_HEAD(&vgdev->cap_cache);
170 INIT_WORK(&vgdev->config_changed_work,
171 virtio_gpu_config_changed_work_func);
172
173 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
174 vgdev->has_virgl_3d = true;
175 DRM_INFO("virgl 3d acceleration %s\n",
176 vgdev->has_virgl_3d ? "enabled" : "not available");
177
178 ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs,
179 callbacks, names);
180 if (ret) {
181 DRM_ERROR("failed to find virt queues\n");
182 goto err_vqs;
183 }
184 vgdev->ctrlq.vq = vqs[0];
185 vgdev->cursorq.vq = vqs[1];
186 ret = virtio_gpu_alloc_vbufs(vgdev);
187 if (ret) {
188 DRM_ERROR("failed to alloc vbufs\n");
189 goto err_vbufs;
190 }
191
192 ret = virtio_gpu_ttm_init(vgdev);
193 if (ret) {
194 DRM_ERROR("failed to init ttm %d\n", ret);
195 goto err_ttm;
196 }
197
198 /* get display info */
199 virtio_cread(vgdev->vdev, struct virtio_gpu_config,
200 num_scanouts, &num_scanouts);
201 vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
202 VIRTIO_GPU_MAX_SCANOUTS);
203 if (!vgdev->num_scanouts) {
204 DRM_ERROR("num_scanouts is zero\n");
205 ret = -EINVAL;
206 goto err_scanouts;
207 }
208 DRM_INFO("number of scanouts: %d\n", num_scanouts);
209
210 virtio_cread(vgdev->vdev, struct virtio_gpu_config,
211 num_capsets, &num_capsets);
212 DRM_INFO("number of cap sets: %d\n", num_capsets);
213
214 ret = virtio_gpu_modeset_init(vgdev);
215 if (ret)
216 goto err_modeset;
217
218 virtio_device_ready(vgdev->vdev);
219 vgdev->vqs_ready = true;
220
221 if (num_capsets)
222 virtio_gpu_get_capsets(vgdev, num_capsets);
223 virtio_gpu_cmd_get_display_info(vgdev);
224 wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
225 5 * HZ);
226 if (virtio_gpu_fbdev)
227 virtio_gpu_fbdev_init(vgdev);
228
229 return 0;
230
231 err_modeset:
232 err_scanouts:
233 virtio_gpu_ttm_fini(vgdev);
234 err_ttm:
235 virtio_gpu_free_vbufs(vgdev);
236 err_vbufs:
237 vgdev->vdev->config->del_vqs(vgdev->vdev);
238 err_vqs:
239 kfree(vgdev);
240 return ret;
241 }
242
243 static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
244 {
245 struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
246
247 list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
248 kfree(cache_ent->caps_cache);
249 kfree(cache_ent);
250 }
251 }
252
253 int virtio_gpu_driver_unload(struct drm_device *dev)
254 {
255 struct virtio_gpu_device *vgdev = dev->dev_private;
256
257 vgdev->vqs_ready = false;
258 flush_work(&vgdev->ctrlq.dequeue_work);
259 flush_work(&vgdev->cursorq.dequeue_work);
260 flush_work(&vgdev->config_changed_work);
261 vgdev->vdev->config->del_vqs(vgdev->vdev);
262
263 virtio_gpu_modeset_fini(vgdev);
264 virtio_gpu_ttm_fini(vgdev);
265 virtio_gpu_free_vbufs(vgdev);
266 virtio_gpu_cleanup_cap_cache(vgdev);
267 kfree(vgdev->capsets);
268 kfree(vgdev);
269 return 0;
270 }
271
272 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
273 {
274 struct virtio_gpu_device *vgdev = dev->dev_private;
275 struct virtio_gpu_fpriv *vfpriv;
276 uint32_t id;
277 char dbgname[64], tmpname[TASK_COMM_LEN];
278
279 /* can't create contexts without 3d renderer */
280 if (!vgdev->has_virgl_3d)
281 return 0;
282
283 get_task_comm(tmpname, current);
284 snprintf(dbgname, sizeof(dbgname), "%s", tmpname);
285 dbgname[63] = 0;
286 /* allocate a virt GPU context for this opener */
287 vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
288 if (!vfpriv)
289 return -ENOMEM;
290
291 virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname, &id);
292
293 vfpriv->ctx_id = id;
294 file->driver_priv = vfpriv;
295 return 0;
296 }
297
298 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
299 {
300 struct virtio_gpu_device *vgdev = dev->dev_private;
301 struct virtio_gpu_fpriv *vfpriv;
302
303 if (!vgdev->has_virgl_3d)
304 return;
305
306 vfpriv = file->driver_priv;
307
308 virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
309 kfree(vfpriv);
310 file->driver_priv = NULL;
311 }
312