qxl_ioctl.c revision 1.1.1.2 1 /* $NetBSD: qxl_ioctl.c,v 1.1.1.2 2018/08/27 01:34:56 riastradh Exp $ */
2
3 /*
4 * Copyright 2013 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alon Levy
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: qxl_ioctl.c,v 1.1.1.2 2018/08/27 01:34:56 riastradh Exp $");
30
31 #include "qxl_drv.h"
32 #include "qxl_object.h"
33
34 /*
35 * TODO: allocating a new gem(in qxl_bo) for each request.
36 * This is wasteful since bo's are page aligned.
37 */
38 static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
39 struct drm_file *file_priv)
40 {
41 struct qxl_device *qdev = dev->dev_private;
42 struct drm_qxl_alloc *qxl_alloc = data;
43 int ret;
44 struct qxl_bo *qobj;
45 uint32_t handle;
46 u32 domain = QXL_GEM_DOMAIN_VRAM;
47
48 if (qxl_alloc->size == 0) {
49 DRM_ERROR("invalid size %d\n", qxl_alloc->size);
50 return -EINVAL;
51 }
52 ret = qxl_gem_object_create_with_handle(qdev, file_priv,
53 domain,
54 qxl_alloc->size,
55 NULL,
56 &qobj, &handle);
57 if (ret) {
58 DRM_ERROR("%s: failed to create gem ret=%d\n",
59 __func__, ret);
60 return -ENOMEM;
61 }
62 qxl_alloc->handle = handle;
63 return 0;
64 }
65
66 static int qxl_map_ioctl(struct drm_device *dev, void *data,
67 struct drm_file *file_priv)
68 {
69 struct qxl_device *qdev = dev->dev_private;
70 struct drm_qxl_map *qxl_map = data;
71
72 return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
73 &qxl_map->offset);
74 }
75
76 struct qxl_reloc_info {
77 int type;
78 struct qxl_bo *dst_bo;
79 uint32_t dst_offset;
80 struct qxl_bo *src_bo;
81 int src_offset;
82 };
83
84 /*
85 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
86 * are on vram).
87 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
88 */
89 static void
90 apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
91 {
92 void *reloc_page;
93 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
94 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
95 info->src_bo,
96 info->src_offset);
97 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
98 }
99
100 static void
101 apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
102 {
103 uint32_t id = 0;
104 void *reloc_page;
105
106 if (info->src_bo && !info->src_bo->is_primary)
107 id = info->src_bo->surface_id;
108
109 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
110 *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
111 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
112 }
113
114 /* return holding the reference to this object */
115 static int qxlhw_handle_to_bo(struct qxl_device *qdev,
116 struct drm_file *file_priv, uint64_t handle,
117 struct qxl_release *release, struct qxl_bo **qbo_p)
118 {
119 struct drm_gem_object *gobj;
120 struct qxl_bo *qobj;
121 int ret;
122
123 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
124 if (!gobj)
125 return -EINVAL;
126
127 qobj = gem_to_qxl_bo(gobj);
128
129 ret = qxl_release_list_add(release, qobj);
130 drm_gem_object_unreference_unlocked(gobj);
131 if (ret)
132 return ret;
133
134 *qbo_p = qobj;
135 return 0;
136 }
137
138 /*
139 * Usage of execbuffer:
140 * Relocations need to take into account the full QXLDrawable size.
141 * However, the command as passed from user space must *not* contain the initial
142 * QXLReleaseInfo struct (first XXX bytes)
143 */
144 static int qxl_process_single_command(struct qxl_device *qdev,
145 struct drm_qxl_command *cmd,
146 struct drm_file *file_priv)
147 {
148 struct qxl_reloc_info *reloc_info;
149 int release_type;
150 struct qxl_release *release;
151 struct qxl_bo *cmd_bo;
152 void *fb_cmd;
153 int i, ret, num_relocs;
154 int unwritten;
155
156 switch (cmd->type) {
157 case QXL_CMD_DRAW:
158 release_type = QXL_RELEASE_DRAWABLE;
159 break;
160 case QXL_CMD_SURFACE:
161 case QXL_CMD_CURSOR:
162 default:
163 DRM_DEBUG("Only draw commands in execbuffers\n");
164 return -EINVAL;
165 break;
166 }
167
168 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
169 return -EINVAL;
170
171 if (!access_ok(VERIFY_READ,
172 (void *)(unsigned long)cmd->command,
173 cmd->command_size))
174 return -EFAULT;
175
176 reloc_info = kmalloc_array(cmd->relocs_num,
177 sizeof(struct qxl_reloc_info), GFP_KERNEL);
178 if (!reloc_info)
179 return -ENOMEM;
180
181 ret = qxl_alloc_release_reserved(qdev,
182 sizeof(union qxl_release_info) +
183 cmd->command_size,
184 release_type,
185 &release,
186 &cmd_bo);
187 if (ret)
188 goto out_free_reloc;
189
190 /* TODO copy slow path code from i915 */
191 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
192 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
193
194 {
195 struct qxl_drawable *draw = fb_cmd;
196 draw->mm_time = qdev->rom->mm_clock;
197 }
198
199 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
200 if (unwritten) {
201 DRM_ERROR("got unwritten %d\n", unwritten);
202 ret = -EFAULT;
203 goto out_free_release;
204 }
205
206 /* fill out reloc info structs */
207 num_relocs = 0;
208 for (i = 0; i < cmd->relocs_num; ++i) {
209 struct drm_qxl_reloc reloc;
210
211 if (copy_from_user(&reloc,
212 &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
213 sizeof(reloc))) {
214 ret = -EFAULT;
215 goto out_free_bos;
216 }
217
218 /* add the bos to the list of bos to validate -
219 need to validate first then process relocs? */
220 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
221 DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
222
223 ret = -EINVAL;
224 goto out_free_bos;
225 }
226 reloc_info[i].type = reloc.reloc_type;
227
228 if (reloc.dst_handle) {
229 ret = qxlhw_handle_to_bo(qdev, file_priv, reloc.dst_handle, release,
230 &reloc_info[i].dst_bo);
231 if (ret)
232 goto out_free_bos;
233 reloc_info[i].dst_offset = reloc.dst_offset;
234 } else {
235 reloc_info[i].dst_bo = cmd_bo;
236 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
237 }
238 num_relocs++;
239
240 /* reserve and validate the reloc dst bo */
241 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
242 ret = qxlhw_handle_to_bo(qdev, file_priv, reloc.src_handle, release,
243 &reloc_info[i].src_bo);
244 if (ret)
245 goto out_free_bos;
246 reloc_info[i].src_offset = reloc.src_offset;
247 } else {
248 reloc_info[i].src_bo = NULL;
249 reloc_info[i].src_offset = 0;
250 }
251 }
252
253 /* validate all buffers */
254 ret = qxl_release_reserve_list(release, false);
255 if (ret)
256 goto out_free_bos;
257
258 for (i = 0; i < cmd->relocs_num; ++i) {
259 if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
260 apply_reloc(qdev, &reloc_info[i]);
261 else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
262 apply_surf_reloc(qdev, &reloc_info[i]);
263 }
264
265 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
266 if (ret)
267 qxl_release_backoff_reserve_list(release);
268 else
269 qxl_release_fence_buffer_objects(release);
270
271 out_free_bos:
272 out_free_release:
273 if (ret)
274 qxl_release_free(qdev, release);
275 out_free_reloc:
276 kfree(reloc_info);
277 return ret;
278 }
279
280 static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
281 struct drm_file *file_priv)
282 {
283 struct qxl_device *qdev = dev->dev_private;
284 struct drm_qxl_execbuffer *execbuffer = data;
285 struct drm_qxl_command user_cmd;
286 int cmd_num;
287 int ret;
288
289 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
290
291 struct drm_qxl_command *commands =
292 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
293
294 if (copy_from_user(&user_cmd, &commands[cmd_num],
295 sizeof(user_cmd)))
296 return -EFAULT;
297
298 ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
299 if (ret)
300 return ret;
301 }
302 return 0;
303 }
304
305 static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
306 struct drm_file *file)
307 {
308 struct qxl_device *qdev = dev->dev_private;
309 struct drm_qxl_update_area *update_area = data;
310 struct qxl_rect area = {.left = update_area->left,
311 .top = update_area->top,
312 .right = update_area->right,
313 .bottom = update_area->bottom};
314 int ret;
315 struct drm_gem_object *gobj = NULL;
316 struct qxl_bo *qobj = NULL;
317
318 if (update_area->left >= update_area->right ||
319 update_area->top >= update_area->bottom)
320 return -EINVAL;
321
322 gobj = drm_gem_object_lookup(dev, file, update_area->handle);
323 if (gobj == NULL)
324 return -ENOENT;
325
326 qobj = gem_to_qxl_bo(gobj);
327
328 ret = qxl_bo_reserve(qobj, false);
329 if (ret)
330 goto out;
331
332 if (!qobj->pin_count) {
333 qxl_ttm_placement_from_domain(qobj, qobj->type, false);
334 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
335 true, false);
336 if (unlikely(ret))
337 goto out;
338 }
339
340 ret = qxl_bo_check_id(qdev, qobj);
341 if (ret)
342 goto out2;
343 if (!qobj->surface_id)
344 DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
345 ret = qxl_io_update_area(qdev, qobj, &area);
346
347 out2:
348 qxl_bo_unreserve(qobj);
349
350 out:
351 drm_gem_object_unreference_unlocked(gobj);
352 return ret;
353 }
354
355 static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
356 struct drm_file *file_priv)
357 {
358 struct qxl_device *qdev = dev->dev_private;
359 struct drm_qxl_getparam *param = data;
360
361 switch (param->param) {
362 case QXL_PARAM_NUM_SURFACES:
363 param->value = qdev->rom->n_surfaces;
364 break;
365 case QXL_PARAM_MAX_RELOCS:
366 param->value = QXL_MAX_RES;
367 break;
368 default:
369 return -EINVAL;
370 }
371 return 0;
372 }
373
374 static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
375 struct drm_file *file_priv)
376 {
377 struct qxl_device *qdev = dev->dev_private;
378 struct drm_qxl_clientcap *param = data;
379 int byte, idx;
380
381 byte = param->index / 8;
382 idx = param->index % 8;
383
384 if (qdev->pdev->revision < 4)
385 return -ENOSYS;
386
387 if (byte >= 58)
388 return -ENOSYS;
389
390 if (qdev->rom->client_capabilities[byte] & (1 << idx))
391 return 0;
392 return -ENOSYS;
393 }
394
395 static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
396 struct drm_file *file)
397 {
398 struct qxl_device *qdev = dev->dev_private;
399 struct drm_qxl_alloc_surf *param = data;
400 struct qxl_bo *qobj;
401 int handle;
402 int ret;
403 int size, actual_stride;
404 struct qxl_surface surf;
405
406 /* work out size allocate bo with handle */
407 actual_stride = param->stride < 0 ? -param->stride : param->stride;
408 size = actual_stride * param->height + actual_stride;
409
410 surf.format = param->format;
411 surf.width = param->width;
412 surf.height = param->height;
413 surf.stride = param->stride;
414 surf.data = 0;
415
416 ret = qxl_gem_object_create_with_handle(qdev, file,
417 QXL_GEM_DOMAIN_SURFACE,
418 size,
419 &surf,
420 &qobj, &handle);
421 if (ret) {
422 DRM_ERROR("%s: failed to create gem ret=%d\n",
423 __func__, ret);
424 return -ENOMEM;
425 } else
426 param->handle = handle;
427 return ret;
428 }
429
430 const struct drm_ioctl_desc qxl_ioctls[] = {
431 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
432
433 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
434
435 DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
436 DRM_AUTH),
437 DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
438 DRM_AUTH),
439 DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
440 DRM_AUTH),
441 DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
442 DRM_AUTH),
443
444 DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
445 DRM_AUTH),
446 };
447
448 int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
449