amdgpu_gem.c revision 1.1.1.2 1 /* $NetBSD: amdgpu_gem.c,v 1.1.1.2 2021/12/18 20:11:06 riastradh Exp $ */
2
3 /*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 * Copyright 2009 Jerome Glisse.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Dave Airlie
27 * Alex Deucher
28 * Jerome Glisse
29 */
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: amdgpu_gem.c,v 1.1.1.2 2021/12/18 20:11:06 riastradh Exp $");
32
33 #include <linux/ktime.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/pci.h>
37
38 #include <drm/amdgpu_drm.h>
39 #include <drm/drm_debugfs.h>
40
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_xgmi.h"
44
45 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
46 {
47 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
48
49 if (robj) {
50 amdgpu_mn_unregister(robj);
51 amdgpu_bo_unref(&robj);
52 }
53 }
54
55 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
56 int alignment, u32 initial_domain,
57 u64 flags, enum ttm_bo_type type,
58 struct dma_resv *resv,
59 struct drm_gem_object **obj)
60 {
61 struct amdgpu_bo *bo;
62 struct amdgpu_bo_param bp;
63 int r;
64
65 memset(&bp, 0, sizeof(bp));
66 *obj = NULL;
67
68 bp.size = size;
69 bp.byte_align = alignment;
70 bp.type = type;
71 bp.resv = resv;
72 bp.preferred_domain = initial_domain;
73 retry:
74 bp.flags = flags;
75 bp.domain = initial_domain;
76 r = amdgpu_bo_create(adev, &bp, &bo);
77 if (r) {
78 if (r != -ERESTARTSYS) {
79 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
80 flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
81 goto retry;
82 }
83
84 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
85 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
86 goto retry;
87 }
88 DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
89 size, initial_domain, alignment, r);
90 }
91 return r;
92 }
93 *obj = &bo->tbo.base;
94
95 return 0;
96 }
97
98 void amdgpu_gem_force_release(struct amdgpu_device *adev)
99 {
100 struct drm_device *ddev = adev->ddev;
101 struct drm_file *file;
102
103 mutex_lock(&ddev->filelist_mutex);
104
105 list_for_each_entry(file, &ddev->filelist, lhead) {
106 struct drm_gem_object *gobj;
107 int handle;
108
109 WARN_ONCE(1, "Still active user space clients!\n");
110 spin_lock(&file->table_lock);
111 idr_for_each_entry(&file->object_idr, gobj, handle) {
112 WARN_ONCE(1, "And also active allocations!\n");
113 drm_gem_object_put_unlocked(gobj);
114 }
115 idr_destroy(&file->object_idr);
116 spin_unlock(&file->table_lock);
117 }
118
119 mutex_unlock(&ddev->filelist_mutex);
120 }
121
122 /*
123 * Call from drm_gem_handle_create which appear in both new and open ioctl
124 * case.
125 */
126 int amdgpu_gem_object_open(struct drm_gem_object *obj,
127 struct drm_file *file_priv)
128 {
129 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
130 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
131 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
132 struct amdgpu_vm *vm = &fpriv->vm;
133 struct amdgpu_bo_va *bo_va;
134 struct mm_struct *mm;
135 int r;
136
137 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
138 if (mm && mm != current->mm)
139 return -EPERM;
140
141 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
142 abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
143 return -EPERM;
144
145 r = amdgpu_bo_reserve(abo, false);
146 if (r)
147 return r;
148
149 bo_va = amdgpu_vm_bo_find(vm, abo);
150 if (!bo_va) {
151 bo_va = amdgpu_vm_bo_add(adev, vm, abo);
152 } else {
153 ++bo_va->ref_count;
154 }
155 amdgpu_bo_unreserve(abo);
156 return 0;
157 }
158
159 void amdgpu_gem_object_close(struct drm_gem_object *obj,
160 struct drm_file *file_priv)
161 {
162 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
163 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
164 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
165 struct amdgpu_vm *vm = &fpriv->vm;
166
167 struct amdgpu_bo_list_entry vm_pd;
168 struct list_head list, duplicates;
169 struct ttm_validate_buffer tv;
170 struct ww_acquire_ctx ticket;
171 struct amdgpu_bo_va *bo_va;
172 int r;
173
174 INIT_LIST_HEAD(&list);
175 INIT_LIST_HEAD(&duplicates);
176
177 tv.bo = &bo->tbo;
178 tv.num_shared = 1;
179 list_add(&tv.head, &list);
180
181 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
182
183 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
184 if (r) {
185 dev_err(adev->dev, "leaking bo va because "
186 "we fail to reserve bo (%d)\n", r);
187 return;
188 }
189 bo_va = amdgpu_vm_bo_find(vm, bo);
190 if (bo_va && --bo_va->ref_count == 0) {
191 amdgpu_vm_bo_rmv(adev, bo_va);
192
193 if (amdgpu_vm_ready(vm)) {
194 struct dma_fence *fence = NULL;
195
196 r = amdgpu_vm_clear_freed(adev, vm, &fence);
197 if (unlikely(r)) {
198 dev_err(adev->dev, "failed to clear page "
199 "tables on GEM object close (%d)\n", r);
200 }
201
202 if (fence) {
203 amdgpu_bo_fence(bo, fence, true);
204 dma_fence_put(fence);
205 }
206 }
207 }
208 ttm_eu_backoff_reservation(&ticket, &list);
209 }
210
211 /*
212 * GEM ioctls.
213 */
214 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
215 struct drm_file *filp)
216 {
217 struct amdgpu_device *adev = dev->dev_private;
218 struct amdgpu_fpriv *fpriv = filp->driver_priv;
219 struct amdgpu_vm *vm = &fpriv->vm;
220 union drm_amdgpu_gem_create *args = data;
221 uint64_t flags = args->in.domain_flags;
222 uint64_t size = args->in.bo_size;
223 struct dma_resv *resv = NULL;
224 struct drm_gem_object *gobj;
225 uint32_t handle;
226 int r;
227
228 /* reject invalid gem flags */
229 if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
230 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
231 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
232 AMDGPU_GEM_CREATE_VRAM_CLEARED |
233 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
234 AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
235
236 return -EINVAL;
237
238 /* reject invalid gem domains */
239 if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
240 return -EINVAL;
241
242 /* create a gem object to contain this object in */
243 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
244 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
245 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
246 /* if gds bo is created from user space, it must be
247 * passed to bo list
248 */
249 DRM_ERROR("GDS bo cannot be per-vm-bo\n");
250 return -EINVAL;
251 }
252 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
253 }
254
255 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
256 r = amdgpu_bo_reserve(vm->root.base.bo, false);
257 if (r)
258 return r;
259
260 resv = vm->root.base.bo->tbo.base.resv;
261 }
262
263 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
264 (u32)(0xffffffff & args->in.domains),
265 flags, ttm_bo_type_device, resv, &gobj);
266 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
267 if (!r) {
268 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
269
270 abo->parent = amdgpu_bo_ref(vm->root.base.bo);
271 }
272 amdgpu_bo_unreserve(vm->root.base.bo);
273 }
274 if (r)
275 return r;
276
277 r = drm_gem_handle_create(filp, gobj, &handle);
278 /* drop reference from allocate - handle holds it now */
279 drm_gem_object_put_unlocked(gobj);
280 if (r)
281 return r;
282
283 memset(args, 0, sizeof(*args));
284 args->out.handle = handle;
285 return 0;
286 }
287
288 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
289 struct drm_file *filp)
290 {
291 struct ttm_operation_ctx ctx = { true, false };
292 struct amdgpu_device *adev = dev->dev_private;
293 struct drm_amdgpu_gem_userptr *args = data;
294 struct drm_gem_object *gobj;
295 struct amdgpu_bo *bo;
296 uint32_t handle;
297 int r;
298
299 args->addr = untagged_addr(args->addr);
300
301 if (offset_in_page(args->addr | args->size))
302 return -EINVAL;
303
304 /* reject unknown flag values */
305 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
306 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
307 AMDGPU_GEM_USERPTR_REGISTER))
308 return -EINVAL;
309
310 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
311 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
312
313 /* if we want to write to it we must install a MMU notifier */
314 return -EACCES;
315 }
316
317 /* create a gem object to contain this object in */
318 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
319 0, ttm_bo_type_device, NULL, &gobj);
320 if (r)
321 return r;
322
323 bo = gem_to_amdgpu_bo(gobj);
324 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
325 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
326 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
327 if (r)
328 goto release_object;
329
330 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
331 r = amdgpu_mn_register(bo, args->addr);
332 if (r)
333 goto release_object;
334 }
335
336 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
337 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
338 if (r)
339 goto release_object;
340
341 r = amdgpu_bo_reserve(bo, true);
342 if (r)
343 goto user_pages_done;
344
345 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
346 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
347 amdgpu_bo_unreserve(bo);
348 if (r)
349 goto user_pages_done;
350 }
351
352 r = drm_gem_handle_create(filp, gobj, &handle);
353 if (r)
354 goto user_pages_done;
355
356 args->handle = handle;
357
358 user_pages_done:
359 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
360 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
361
362 release_object:
363 drm_gem_object_put_unlocked(gobj);
364
365 return r;
366 }
367
368 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
369 struct drm_device *dev,
370 uint32_t handle, uint64_t *offset_p)
371 {
372 struct drm_gem_object *gobj;
373 struct amdgpu_bo *robj;
374
375 gobj = drm_gem_object_lookup(filp, handle);
376 if (gobj == NULL) {
377 return -ENOENT;
378 }
379 robj = gem_to_amdgpu_bo(gobj);
380 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
381 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
382 drm_gem_object_put_unlocked(gobj);
383 return -EPERM;
384 }
385 *offset_p = amdgpu_bo_mmap_offset(robj);
386 drm_gem_object_put_unlocked(gobj);
387 return 0;
388 }
389
390 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
391 struct drm_file *filp)
392 {
393 union drm_amdgpu_gem_mmap *args = data;
394 uint32_t handle = args->in.handle;
395 memset(args, 0, sizeof(*args));
396 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
397 }
398
399 /**
400 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
401 *
402 * @timeout_ns: timeout in ns
403 *
404 * Calculate the timeout in jiffies from an absolute timeout in ns.
405 */
406 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
407 {
408 unsigned long timeout_jiffies;
409 ktime_t timeout;
410
411 /* clamp timeout if it's to large */
412 if (((int64_t)timeout_ns) < 0)
413 return MAX_SCHEDULE_TIMEOUT;
414
415 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
416 if (ktime_to_ns(timeout) < 0)
417 return 0;
418
419 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
420 /* clamp timeout to avoid unsigned-> signed overflow */
421 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
422 return MAX_SCHEDULE_TIMEOUT - 1;
423
424 return timeout_jiffies;
425 }
426
427 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
428 struct drm_file *filp)
429 {
430 union drm_amdgpu_gem_wait_idle *args = data;
431 struct drm_gem_object *gobj;
432 struct amdgpu_bo *robj;
433 uint32_t handle = args->in.handle;
434 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
435 int r = 0;
436 long ret;
437
438 gobj = drm_gem_object_lookup(filp, handle);
439 if (gobj == NULL) {
440 return -ENOENT;
441 }
442 robj = gem_to_amdgpu_bo(gobj);
443 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
444 timeout);
445
446 /* ret == 0 means not signaled,
447 * ret > 0 means signaled
448 * ret < 0 means interrupted before timeout
449 */
450 if (ret >= 0) {
451 memset(args, 0, sizeof(*args));
452 args->out.status = (ret == 0);
453 } else
454 r = ret;
455
456 drm_gem_object_put_unlocked(gobj);
457 return r;
458 }
459
460 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
461 struct drm_file *filp)
462 {
463 struct drm_amdgpu_gem_metadata *args = data;
464 struct drm_gem_object *gobj;
465 struct amdgpu_bo *robj;
466 int r = -1;
467
468 DRM_DEBUG("%d \n", args->handle);
469 gobj = drm_gem_object_lookup(filp, args->handle);
470 if (gobj == NULL)
471 return -ENOENT;
472 robj = gem_to_amdgpu_bo(gobj);
473
474 r = amdgpu_bo_reserve(robj, false);
475 if (unlikely(r != 0))
476 goto out;
477
478 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
479 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
480 r = amdgpu_bo_get_metadata(robj, args->data.data,
481 sizeof(args->data.data),
482 &args->data.data_size_bytes,
483 &args->data.flags);
484 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
485 if (args->data.data_size_bytes > sizeof(args->data.data)) {
486 r = -EINVAL;
487 goto unreserve;
488 }
489 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
490 if (!r)
491 r = amdgpu_bo_set_metadata(robj, args->data.data,
492 args->data.data_size_bytes,
493 args->data.flags);
494 }
495
496 unreserve:
497 amdgpu_bo_unreserve(robj);
498 out:
499 drm_gem_object_put_unlocked(gobj);
500 return r;
501 }
502
503 /**
504 * amdgpu_gem_va_update_vm -update the bo_va in its VM
505 *
506 * @adev: amdgpu_device pointer
507 * @vm: vm to update
508 * @bo_va: bo_va to update
509 * @operation: map, unmap or clear
510 *
511 * Update the bo_va directly after setting its address. Errors are not
512 * vital here, so they are not reported back to userspace.
513 */
514 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
515 struct amdgpu_vm *vm,
516 struct amdgpu_bo_va *bo_va,
517 uint32_t operation)
518 {
519 int r;
520
521 if (!amdgpu_vm_ready(vm))
522 return;
523
524 r = amdgpu_vm_clear_freed(adev, vm, NULL);
525 if (r)
526 goto error;
527
528 if (operation == AMDGPU_VA_OP_MAP ||
529 operation == AMDGPU_VA_OP_REPLACE) {
530 r = amdgpu_vm_bo_update(adev, bo_va, false);
531 if (r)
532 goto error;
533 }
534
535 r = amdgpu_vm_update_pdes(adev, vm, false);
536
537 error:
538 if (r && r != -ERESTARTSYS)
539 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
540 }
541
542 /**
543 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
544 *
545 * @adev: amdgpu_device pointer
546 * @flags: GEM UAPI flags
547 *
548 * Returns the GEM UAPI flags mapped into hardware for the ASIC.
549 */
550 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
551 {
552 uint64_t pte_flag = 0;
553
554 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
555 pte_flag |= AMDGPU_PTE_EXECUTABLE;
556 if (flags & AMDGPU_VM_PAGE_READABLE)
557 pte_flag |= AMDGPU_PTE_READABLE;
558 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
559 pte_flag |= AMDGPU_PTE_WRITEABLE;
560 if (flags & AMDGPU_VM_PAGE_PRT)
561 pte_flag |= AMDGPU_PTE_PRT;
562
563 if (adev->gmc.gmc_funcs->map_mtype)
564 pte_flag |= amdgpu_gmc_map_mtype(adev,
565 flags & AMDGPU_VM_MTYPE_MASK);
566
567 return pte_flag;
568 }
569
570 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
571 struct drm_file *filp)
572 {
573 const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
574 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
575 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
576 const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
577 AMDGPU_VM_PAGE_PRT;
578
579 struct drm_amdgpu_gem_va *args = data;
580 struct drm_gem_object *gobj;
581 struct amdgpu_device *adev = dev->dev_private;
582 struct amdgpu_fpriv *fpriv = filp->driver_priv;
583 struct amdgpu_bo *abo;
584 struct amdgpu_bo_va *bo_va;
585 struct amdgpu_bo_list_entry vm_pd;
586 struct ttm_validate_buffer tv;
587 struct ww_acquire_ctx ticket;
588 struct list_head list, duplicates;
589 uint64_t va_flags;
590 int r = 0;
591
592 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
593 dev_dbg(&dev->pdev->dev,
594 "va_address 0x%LX is in reserved area 0x%LX\n",
595 args->va_address, AMDGPU_VA_RESERVED_SIZE);
596 return -EINVAL;
597 }
598
599 if (args->va_address >= AMDGPU_GMC_HOLE_START &&
600 args->va_address < AMDGPU_GMC_HOLE_END) {
601 dev_dbg(&dev->pdev->dev,
602 "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
603 args->va_address, AMDGPU_GMC_HOLE_START,
604 AMDGPU_GMC_HOLE_END);
605 return -EINVAL;
606 }
607
608 args->va_address &= AMDGPU_GMC_HOLE_MASK;
609
610 if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
611 dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
612 args->flags);
613 return -EINVAL;
614 }
615
616 switch (args->operation) {
617 case AMDGPU_VA_OP_MAP:
618 case AMDGPU_VA_OP_UNMAP:
619 case AMDGPU_VA_OP_CLEAR:
620 case AMDGPU_VA_OP_REPLACE:
621 break;
622 default:
623 dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
624 args->operation);
625 return -EINVAL;
626 }
627
628 INIT_LIST_HEAD(&list);
629 INIT_LIST_HEAD(&duplicates);
630 if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
631 !(args->flags & AMDGPU_VM_PAGE_PRT)) {
632 gobj = drm_gem_object_lookup(filp, args->handle);
633 if (gobj == NULL)
634 return -ENOENT;
635 abo = gem_to_amdgpu_bo(gobj);
636 tv.bo = &abo->tbo;
637 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
638 tv.num_shared = 1;
639 else
640 tv.num_shared = 0;
641 list_add(&tv.head, &list);
642 } else {
643 gobj = NULL;
644 abo = NULL;
645 }
646
647 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
648
649 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
650 if (r)
651 goto error_unref;
652
653 if (abo) {
654 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
655 if (!bo_va) {
656 r = -ENOENT;
657 goto error_backoff;
658 }
659 } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
660 bo_va = fpriv->prt_va;
661 } else {
662 bo_va = NULL;
663 }
664
665 switch (args->operation) {
666 case AMDGPU_VA_OP_MAP:
667 va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
668 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
669 args->offset_in_bo, args->map_size,
670 va_flags);
671 break;
672 case AMDGPU_VA_OP_UNMAP:
673 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
674 break;
675
676 case AMDGPU_VA_OP_CLEAR:
677 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
678 args->va_address,
679 args->map_size);
680 break;
681 case AMDGPU_VA_OP_REPLACE:
682 va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
683 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
684 args->offset_in_bo, args->map_size,
685 va_flags);
686 break;
687 default:
688 break;
689 }
690 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
691 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
692 args->operation);
693
694 error_backoff:
695 ttm_eu_backoff_reservation(&ticket, &list);
696
697 error_unref:
698 drm_gem_object_put_unlocked(gobj);
699 return r;
700 }
701
702 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
703 struct drm_file *filp)
704 {
705 struct amdgpu_device *adev = dev->dev_private;
706 struct drm_amdgpu_gem_op *args = data;
707 struct drm_gem_object *gobj;
708 struct amdgpu_vm_bo_base *base;
709 struct amdgpu_bo *robj;
710 int r;
711
712 gobj = drm_gem_object_lookup(filp, args->handle);
713 if (gobj == NULL) {
714 return -ENOENT;
715 }
716 robj = gem_to_amdgpu_bo(gobj);
717
718 r = amdgpu_bo_reserve(robj, false);
719 if (unlikely(r))
720 goto out;
721
722 switch (args->op) {
723 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
724 struct drm_amdgpu_gem_create_in info;
725 void __user *out = u64_to_user_ptr(args->value);
726
727 info.bo_size = robj->tbo.base.size;
728 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
729 info.domains = robj->preferred_domains;
730 info.domain_flags = robj->flags;
731 amdgpu_bo_unreserve(robj);
732 if (copy_to_user(out, &info, sizeof(info)))
733 r = -EFAULT;
734 break;
735 }
736 case AMDGPU_GEM_OP_SET_PLACEMENT:
737 if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
738 r = -EINVAL;
739 amdgpu_bo_unreserve(robj);
740 break;
741 }
742 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
743 r = -EPERM;
744 amdgpu_bo_unreserve(robj);
745 break;
746 }
747 for (base = robj->vm_bo; base; base = base->next)
748 if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
749 amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
750 r = -EINVAL;
751 amdgpu_bo_unreserve(robj);
752 goto out;
753 }
754
755
756 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
757 AMDGPU_GEM_DOMAIN_GTT |
758 AMDGPU_GEM_DOMAIN_CPU);
759 robj->allowed_domains = robj->preferred_domains;
760 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
761 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
762
763 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
764 amdgpu_vm_bo_invalidate(adev, robj, true);
765
766 amdgpu_bo_unreserve(robj);
767 break;
768 default:
769 amdgpu_bo_unreserve(robj);
770 r = -EINVAL;
771 }
772
773 out:
774 drm_gem_object_put_unlocked(gobj);
775 return r;
776 }
777
778 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
779 struct drm_device *dev,
780 struct drm_mode_create_dumb *args)
781 {
782 struct amdgpu_device *adev = dev->dev_private;
783 struct drm_gem_object *gobj;
784 uint32_t handle;
785 u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
786 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
787 u32 domain;
788 int r;
789
790 /*
791 * The buffer returned from this function should be cleared, but
792 * it can only be done if the ring is enabled or we'll fail to
793 * create the buffer.
794 */
795 if (adev->mman.buffer_funcs_enabled)
796 flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
797
798 args->pitch = amdgpu_align_pitch(adev, args->width,
799 DIV_ROUND_UP(args->bpp, 8), 0);
800 args->size = (u64)args->pitch * args->height;
801 args->size = ALIGN(args->size, PAGE_SIZE);
802 domain = amdgpu_bo_get_preferred_pin_domain(adev,
803 amdgpu_display_supported_domains(adev, flags));
804 r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
805 ttm_bo_type_device, NULL, &gobj);
806 if (r)
807 return -ENOMEM;
808
809 r = drm_gem_handle_create(file_priv, gobj, &handle);
810 /* drop reference from allocate - handle holds it now */
811 drm_gem_object_put_unlocked(gobj);
812 if (r) {
813 return r;
814 }
815 args->handle = handle;
816 return 0;
817 }
818
819 #if defined(CONFIG_DEBUG_FS)
820
821 #define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \
822 if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
823 seq_printf((m), " " #flag); \
824 }
825
826 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
827 {
828 struct drm_gem_object *gobj = ptr;
829 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
830 struct seq_file *m = data;
831
832 struct dma_buf_attachment *attachment;
833 struct dma_buf *dma_buf;
834 unsigned domain;
835 const char *placement;
836 unsigned pin_count;
837
838 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
839 switch (domain) {
840 case AMDGPU_GEM_DOMAIN_VRAM:
841 placement = "VRAM";
842 break;
843 case AMDGPU_GEM_DOMAIN_GTT:
844 placement = " GTT";
845 break;
846 case AMDGPU_GEM_DOMAIN_CPU:
847 default:
848 placement = " CPU";
849 break;
850 }
851 seq_printf(m, "\t0x%08x: %12ld byte %s",
852 id, amdgpu_bo_size(bo), placement);
853
854 pin_count = READ_ONCE(bo->pin_count);
855 if (pin_count)
856 seq_printf(m, " pin count %d", pin_count);
857
858 dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
859 attachment = READ_ONCE(bo->tbo.base.import_attach);
860
861 if (attachment)
862 seq_printf(m, " imported from %p", dma_buf);
863 else if (dma_buf)
864 seq_printf(m, " exported as %p", dma_buf);
865
866 amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
867 amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
868 amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
869 amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
870 amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
871 amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
872 amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
873 amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
874
875 seq_printf(m, "\n");
876
877 return 0;
878 }
879
880 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
881 {
882 struct drm_info_node *node = (struct drm_info_node *)m->private;
883 struct drm_device *dev = node->minor->dev;
884 struct drm_file *file;
885 int r;
886
887 r = mutex_lock_interruptible(&dev->filelist_mutex);
888 if (r)
889 return r;
890
891 list_for_each_entry(file, &dev->filelist, lhead) {
892 struct task_struct *task;
893
894 /*
895 * Although we have a valid reference on file->pid, that does
896 * not guarantee that the task_struct who called get_pid() is
897 * still alive (e.g. get_pid(current) => fork() => exit()).
898 * Therefore, we need to protect this ->comm access using RCU.
899 */
900 rcu_read_lock();
901 task = pid_task(file->pid, PIDTYPE_PID);
902 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
903 task ? task->comm : "<unknown>");
904 rcu_read_unlock();
905
906 spin_lock(&file->table_lock);
907 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
908 spin_unlock(&file->table_lock);
909 }
910
911 mutex_unlock(&dev->filelist_mutex);
912 return 0;
913 }
914
915 static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
916 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
917 };
918 #endif
919
920 int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
921 {
922 #if defined(CONFIG_DEBUG_FS)
923 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
924 #endif
925 return 0;
926 }
927