Home | History | Annotate | Line # | Download | only in amdgpu
      1  1.9  riastrad /*	$NetBSD: amdgpu_gem.c,v 1.9 2021/12/19 12:02:39 riastradh Exp $	*/
      2  1.1  riastrad 
      3  1.1  riastrad /*
      4  1.1  riastrad  * Copyright 2008 Advanced Micro Devices, Inc.
      5  1.1  riastrad  * Copyright 2008 Red Hat Inc.
      6  1.1  riastrad  * Copyright 2009 Jerome Glisse.
      7  1.1  riastrad  *
      8  1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
      9  1.1  riastrad  * copy of this software and associated documentation files (the "Software"),
     10  1.1  riastrad  * to deal in the Software without restriction, including without limitation
     11  1.1  riastrad  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  1.1  riastrad  * and/or sell copies of the Software, and to permit persons to whom the
     13  1.1  riastrad  * Software is furnished to do so, subject to the following conditions:
     14  1.1  riastrad  *
     15  1.1  riastrad  * The above copyright notice and this permission notice shall be included in
     16  1.1  riastrad  * all copies or substantial portions of the Software.
     17  1.1  riastrad  *
     18  1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  1.1  riastrad  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  1.1  riastrad  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  1.1  riastrad  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  1.1  riastrad  * OTHER DEALINGS IN THE SOFTWARE.
     25  1.1  riastrad  *
     26  1.1  riastrad  * Authors: Dave Airlie
     27  1.1  riastrad  *          Alex Deucher
     28  1.1  riastrad  *          Jerome Glisse
     29  1.1  riastrad  */
     30  1.1  riastrad #include <sys/cdefs.h>
     31  1.9  riastrad __KERNEL_RCSID(0, "$NetBSD: amdgpu_gem.c,v 1.9 2021/12/19 12:02:39 riastradh Exp $");
     32  1.1  riastrad 
     33  1.1  riastrad #include <linux/ktime.h>
     34  1.7  riastrad #include <linux/module.h>
     35  1.7  riastrad #include <linux/pagemap.h>
     36  1.7  riastrad #include <linux/pci.h>
     37  1.7  riastrad 
     38  1.1  riastrad #include <drm/amdgpu_drm.h>
     39  1.7  riastrad #include <drm/drm_debugfs.h>
     40  1.7  riastrad 
     41  1.1  riastrad #include "amdgpu.h"
     42  1.7  riastrad #include "amdgpu_display.h"
     43  1.7  riastrad #include "amdgpu_xgmi.h"
     44  1.1  riastrad 
     45  1.5  riastrad #include <linux/nbsd-namespace.h>
     46  1.5  riastrad 
     47  1.1  riastrad void amdgpu_gem_object_free(struct drm_gem_object *gobj)
     48  1.1  riastrad {
     49  1.1  riastrad 	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
     50  1.1  riastrad 
     51  1.1  riastrad 	if (robj) {
     52  1.1  riastrad 		amdgpu_mn_unregister(robj);
     53  1.1  riastrad 		amdgpu_bo_unref(&robj);
     54  1.1  riastrad 	}
     55  1.1  riastrad }
     56  1.1  riastrad 
     57  1.1  riastrad int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
     58  1.7  riastrad 			     int alignment, u32 initial_domain,
     59  1.7  riastrad 			     u64 flags, enum ttm_bo_type type,
     60  1.7  riastrad 			     struct dma_resv *resv,
     61  1.7  riastrad 			     struct drm_gem_object **obj)
     62  1.1  riastrad {
     63  1.7  riastrad 	struct amdgpu_bo *bo;
     64  1.7  riastrad 	struct amdgpu_bo_param bp;
     65  1.1  riastrad 	int r;
     66  1.1  riastrad 
     67  1.7  riastrad 	memset(&bp, 0, sizeof(bp));
     68  1.1  riastrad 	*obj = NULL;
     69  1.1  riastrad 
     70  1.7  riastrad 	bp.size = size;
     71  1.7  riastrad 	bp.byte_align = alignment;
     72  1.7  riastrad 	bp.type = type;
     73  1.7  riastrad 	bp.resv = resv;
     74  1.7  riastrad 	bp.preferred_domain = initial_domain;
     75  1.1  riastrad retry:
     76  1.7  riastrad 	bp.flags = flags;
     77  1.7  riastrad 	bp.domain = initial_domain;
     78  1.7  riastrad 	r = amdgpu_bo_create(adev, &bp, &bo);
     79  1.1  riastrad 	if (r) {
     80  1.1  riastrad 		if (r != -ERESTARTSYS) {
     81  1.7  riastrad 			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
     82  1.7  riastrad 				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
     83  1.7  riastrad 				goto retry;
     84  1.7  riastrad 			}
     85  1.7  riastrad 
     86  1.1  riastrad 			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
     87  1.1  riastrad 				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
     88  1.1  riastrad 				goto retry;
     89  1.1  riastrad 			}
     90  1.7  riastrad 			DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
     91  1.1  riastrad 				  size, initial_domain, alignment, r);
     92  1.1  riastrad 		}
     93  1.1  riastrad 		return r;
     94  1.1  riastrad 	}
     95  1.7  riastrad 	*obj = &bo->tbo.base;
     96  1.1  riastrad 
     97  1.1  riastrad 	return 0;
     98  1.1  riastrad }
     99  1.1  riastrad 
    100  1.7  riastrad void amdgpu_gem_force_release(struct amdgpu_device *adev)
    101  1.1  riastrad {
    102  1.7  riastrad 	struct drm_device *ddev = adev->ddev;
    103  1.7  riastrad 	struct drm_file *file;
    104  1.7  riastrad 
    105  1.7  riastrad 	mutex_lock(&ddev->filelist_mutex);
    106  1.7  riastrad 
    107  1.7  riastrad 	list_for_each_entry(file, &ddev->filelist, lhead) {
    108  1.7  riastrad 		struct drm_gem_object *gobj;
    109  1.7  riastrad 		int handle;
    110  1.7  riastrad 
    111  1.7  riastrad 		WARN_ONCE(1, "Still active user space clients!\n");
    112  1.7  riastrad 		spin_lock(&file->table_lock);
    113  1.7  riastrad 		idr_for_each_entry(&file->object_idr, gobj, handle) {
    114  1.7  riastrad 			WARN_ONCE(1, "And also active allocations!\n");
    115  1.7  riastrad 			drm_gem_object_put_unlocked(gobj);
    116  1.7  riastrad 		}
    117  1.7  riastrad 		idr_destroy(&file->object_idr);
    118  1.7  riastrad 		spin_unlock(&file->table_lock);
    119  1.7  riastrad 	}
    120  1.1  riastrad 
    121  1.7  riastrad 	mutex_unlock(&ddev->filelist_mutex);
    122  1.1  riastrad }
    123  1.1  riastrad 
    124  1.1  riastrad /*
    125  1.1  riastrad  * Call from drm_gem_handle_create which appear in both new and open ioctl
    126  1.1  riastrad  * case.
    127  1.1  riastrad  */
    128  1.7  riastrad int amdgpu_gem_object_open(struct drm_gem_object *obj,
    129  1.7  riastrad 			   struct drm_file *file_priv)
    130  1.1  riastrad {
    131  1.7  riastrad 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
    132  1.7  riastrad 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
    133  1.1  riastrad 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
    134  1.1  riastrad 	struct amdgpu_vm *vm = &fpriv->vm;
    135  1.1  riastrad 	struct amdgpu_bo_va *bo_va;
    136  1.9  riastrad #ifdef __NetBSD__
    137  1.9  riastrad 	struct vmspace *mm;
    138  1.9  riastrad #else
    139  1.7  riastrad 	struct mm_struct *mm;
    140  1.9  riastrad #endif
    141  1.1  riastrad 	int r;
    142  1.7  riastrad 
    143  1.7  riastrad 	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
    144  1.9  riastrad #ifdef __NetBSD__
    145  1.9  riastrad 	if (mm && mm != curproc->p_vmspace)
    146  1.9  riastrad #else
    147  1.7  riastrad 	if (mm && mm != current->mm)
    148  1.9  riastrad #endif
    149  1.7  riastrad 		return -EPERM;
    150  1.7  riastrad 
    151  1.7  riastrad 	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
    152  1.7  riastrad 	    abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
    153  1.7  riastrad 		return -EPERM;
    154  1.7  riastrad 
    155  1.7  riastrad 	r = amdgpu_bo_reserve(abo, false);
    156  1.1  riastrad 	if (r)
    157  1.1  riastrad 		return r;
    158  1.1  riastrad 
    159  1.7  riastrad 	bo_va = amdgpu_vm_bo_find(vm, abo);
    160  1.1  riastrad 	if (!bo_va) {
    161  1.7  riastrad 		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
    162  1.1  riastrad 	} else {
    163  1.1  riastrad 		++bo_va->ref_count;
    164  1.1  riastrad 	}
    165  1.7  riastrad 	amdgpu_bo_unreserve(abo);
    166  1.1  riastrad 	return 0;
    167  1.1  riastrad }
    168  1.1  riastrad 
    169  1.1  riastrad void amdgpu_gem_object_close(struct drm_gem_object *obj,
    170  1.1  riastrad 			     struct drm_file *file_priv)
    171  1.1  riastrad {
    172  1.7  riastrad 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
    173  1.7  riastrad 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
    174  1.1  riastrad 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
    175  1.1  riastrad 	struct amdgpu_vm *vm = &fpriv->vm;
    176  1.7  riastrad 
    177  1.7  riastrad 	struct amdgpu_bo_list_entry vm_pd;
    178  1.7  riastrad 	struct list_head list, duplicates;
    179  1.7  riastrad 	struct ttm_validate_buffer tv;
    180  1.7  riastrad 	struct ww_acquire_ctx ticket;
    181  1.1  riastrad 	struct amdgpu_bo_va *bo_va;
    182  1.1  riastrad 	int r;
    183  1.7  riastrad 
    184  1.7  riastrad 	INIT_LIST_HEAD(&list);
    185  1.7  riastrad 	INIT_LIST_HEAD(&duplicates);
    186  1.7  riastrad 
    187  1.7  riastrad 	tv.bo = &bo->tbo;
    188  1.7  riastrad 	tv.num_shared = 1;
    189  1.7  riastrad 	list_add(&tv.head, &list);
    190  1.7  riastrad 
    191  1.7  riastrad 	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
    192  1.7  riastrad 
    193  1.7  riastrad 	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
    194  1.1  riastrad 	if (r) {
    195  1.1  riastrad 		dev_err(adev->dev, "leaking bo va because "
    196  1.1  riastrad 			"we fail to reserve bo (%d)\n", r);
    197  1.1  riastrad 		return;
    198  1.1  riastrad 	}
    199  1.7  riastrad 	bo_va = amdgpu_vm_bo_find(vm, bo);
    200  1.7  riastrad 	if (bo_va && --bo_va->ref_count == 0) {
    201  1.7  riastrad 		amdgpu_vm_bo_rmv(adev, bo_va);
    202  1.7  riastrad 
    203  1.7  riastrad 		if (amdgpu_vm_ready(vm)) {
    204  1.7  riastrad 			struct dma_fence *fence = NULL;
    205  1.7  riastrad 
    206  1.7  riastrad 			r = amdgpu_vm_clear_freed(adev, vm, &fence);
    207  1.7  riastrad 			if (unlikely(r)) {
    208  1.7  riastrad 				dev_err(adev->dev, "failed to clear page "
    209  1.7  riastrad 					"tables on GEM object close (%d)\n", r);
    210  1.7  riastrad 			}
    211  1.7  riastrad 
    212  1.7  riastrad 			if (fence) {
    213  1.7  riastrad 				amdgpu_bo_fence(bo, fence, true);
    214  1.7  riastrad 				dma_fence_put(fence);
    215  1.7  riastrad 			}
    216  1.1  riastrad 		}
    217  1.1  riastrad 	}
    218  1.7  riastrad 	ttm_eu_backoff_reservation(&ticket, &list);
    219  1.1  riastrad }
    220  1.1  riastrad 
    221  1.1  riastrad /*
    222  1.1  riastrad  * GEM ioctls.
    223  1.1  riastrad  */
    224  1.1  riastrad int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
    225  1.1  riastrad 			    struct drm_file *filp)
    226  1.1  riastrad {
    227  1.1  riastrad 	struct amdgpu_device *adev = dev->dev_private;
    228  1.7  riastrad 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
    229  1.7  riastrad 	struct amdgpu_vm *vm = &fpriv->vm;
    230  1.1  riastrad 	union drm_amdgpu_gem_create *args = data;
    231  1.7  riastrad 	uint64_t flags = args->in.domain_flags;
    232  1.1  riastrad 	uint64_t size = args->in.bo_size;
    233  1.7  riastrad 	struct dma_resv *resv = NULL;
    234  1.1  riastrad 	struct drm_gem_object *gobj;
    235  1.1  riastrad 	uint32_t handle;
    236  1.1  riastrad 	int r;
    237  1.1  riastrad 
    238  1.7  riastrad 	/* reject invalid gem flags */
    239  1.7  riastrad 	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
    240  1.7  riastrad 		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
    241  1.7  riastrad 		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
    242  1.7  riastrad 		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
    243  1.7  riastrad 		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
    244  1.7  riastrad 		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
    245  1.7  riastrad 
    246  1.7  riastrad 		return -EINVAL;
    247  1.7  riastrad 
    248  1.7  riastrad 	/* reject invalid gem domains */
    249  1.7  riastrad 	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
    250  1.7  riastrad 		return -EINVAL;
    251  1.7  riastrad 
    252  1.1  riastrad 	/* create a gem object to contain this object in */
    253  1.1  riastrad 	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
    254  1.1  riastrad 	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
    255  1.7  riastrad 		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
    256  1.7  riastrad 			/* if gds bo is created from user space, it must be
    257  1.7  riastrad 			 * passed to bo list
    258  1.7  riastrad 			 */
    259  1.7  riastrad 			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
    260  1.7  riastrad 			return -EINVAL;
    261  1.1  riastrad 		}
    262  1.7  riastrad 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
    263  1.7  riastrad 	}
    264  1.7  riastrad 
    265  1.7  riastrad 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
    266  1.7  riastrad 		r = amdgpu_bo_reserve(vm->root.base.bo, false);
    267  1.7  riastrad 		if (r)
    268  1.7  riastrad 			return r;
    269  1.7  riastrad 
    270  1.7  riastrad 		resv = vm->root.base.bo->tbo.base.resv;
    271  1.1  riastrad 	}
    272  1.1  riastrad 
    273  1.1  riastrad 	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
    274  1.1  riastrad 				     (u32)(0xffffffff & args->in.domains),
    275  1.7  riastrad 				     flags, ttm_bo_type_device, resv, &gobj);
    276  1.7  riastrad 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
    277  1.7  riastrad 		if (!r) {
    278  1.7  riastrad 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
    279  1.7  riastrad 
    280  1.7  riastrad 			abo->parent = amdgpu_bo_ref(vm->root.base.bo);
    281  1.7  riastrad 		}
    282  1.7  riastrad 		amdgpu_bo_unreserve(vm->root.base.bo);
    283  1.7  riastrad 	}
    284  1.1  riastrad 	if (r)
    285  1.7  riastrad 		return r;
    286  1.1  riastrad 
    287  1.1  riastrad 	r = drm_gem_handle_create(filp, gobj, &handle);
    288  1.1  riastrad 	/* drop reference from allocate - handle holds it now */
    289  1.7  riastrad 	drm_gem_object_put_unlocked(gobj);
    290  1.1  riastrad 	if (r)
    291  1.7  riastrad 		return r;
    292  1.1  riastrad 
    293  1.1  riastrad 	memset(args, 0, sizeof(*args));
    294  1.1  riastrad 	args->out.handle = handle;
    295  1.1  riastrad 	return 0;
    296  1.1  riastrad }
    297  1.1  riastrad 
    298  1.1  riastrad int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
    299  1.1  riastrad 			     struct drm_file *filp)
    300  1.1  riastrad {
    301  1.7  riastrad 	struct ttm_operation_ctx ctx = { true, false };
    302  1.1  riastrad 	struct amdgpu_device *adev = dev->dev_private;
    303  1.1  riastrad 	struct drm_amdgpu_gem_userptr *args = data;
    304  1.1  riastrad 	struct drm_gem_object *gobj;
    305  1.1  riastrad 	struct amdgpu_bo *bo;
    306  1.1  riastrad 	uint32_t handle;
    307  1.1  riastrad 	int r;
    308  1.1  riastrad 
    309  1.7  riastrad 	args->addr = untagged_addr(args->addr);
    310  1.7  riastrad 
    311  1.1  riastrad 	if (offset_in_page(args->addr | args->size))
    312  1.1  riastrad 		return -EINVAL;
    313  1.1  riastrad 
    314  1.1  riastrad 	/* reject unknown flag values */
    315  1.1  riastrad 	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
    316  1.1  riastrad 	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
    317  1.1  riastrad 	    AMDGPU_GEM_USERPTR_REGISTER))
    318  1.1  riastrad 		return -EINVAL;
    319  1.1  riastrad 
    320  1.7  riastrad 	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
    321  1.7  riastrad 	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
    322  1.1  riastrad 
    323  1.7  riastrad 		/* if we want to write to it we must install a MMU notifier */
    324  1.1  riastrad 		return -EACCES;
    325  1.1  riastrad 	}
    326  1.1  riastrad 
    327  1.1  riastrad 	/* create a gem object to contain this object in */
    328  1.7  riastrad 	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
    329  1.7  riastrad 				     0, ttm_bo_type_device, NULL, &gobj);
    330  1.1  riastrad 	if (r)
    331  1.7  riastrad 		return r;
    332  1.1  riastrad 
    333  1.1  riastrad 	bo = gem_to_amdgpu_bo(gobj);
    334  1.7  riastrad 	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
    335  1.7  riastrad 	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
    336  1.1  riastrad 	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
    337  1.1  riastrad 	if (r)
    338  1.1  riastrad 		goto release_object;
    339  1.1  riastrad 
    340  1.1  riastrad 	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
    341  1.1  riastrad 		r = amdgpu_mn_register(bo, args->addr);
    342  1.1  riastrad 		if (r)
    343  1.1  riastrad 			goto release_object;
    344  1.1  riastrad 	}
    345  1.1  riastrad 
    346  1.1  riastrad 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
    347  1.7  riastrad 		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
    348  1.7  riastrad 		if (r)
    349  1.7  riastrad 			goto release_object;
    350  1.7  riastrad 
    351  1.1  riastrad 		r = amdgpu_bo_reserve(bo, true);
    352  1.7  riastrad 		if (r)
    353  1.7  riastrad 			goto user_pages_done;
    354  1.1  riastrad 
    355  1.7  riastrad 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
    356  1.7  riastrad 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
    357  1.1  riastrad 		amdgpu_bo_unreserve(bo);
    358  1.1  riastrad 		if (r)
    359  1.7  riastrad 			goto user_pages_done;
    360  1.1  riastrad 	}
    361  1.1  riastrad 
    362  1.1  riastrad 	r = drm_gem_handle_create(filp, gobj, &handle);
    363  1.1  riastrad 	if (r)
    364  1.7  riastrad 		goto user_pages_done;
    365  1.1  riastrad 
    366  1.1  riastrad 	args->handle = handle;
    367  1.7  riastrad 
    368  1.7  riastrad user_pages_done:
    369  1.7  riastrad 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
    370  1.7  riastrad 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
    371  1.1  riastrad 
    372  1.1  riastrad release_object:
    373  1.7  riastrad 	drm_gem_object_put_unlocked(gobj);
    374  1.1  riastrad 
    375  1.1  riastrad 	return r;
    376  1.1  riastrad }
    377  1.1  riastrad 
    378  1.1  riastrad int amdgpu_mode_dumb_mmap(struct drm_file *filp,
    379  1.1  riastrad 			  struct drm_device *dev,
    380  1.1  riastrad 			  uint32_t handle, uint64_t *offset_p)
    381  1.1  riastrad {
    382  1.1  riastrad 	struct drm_gem_object *gobj;
    383  1.1  riastrad 	struct amdgpu_bo *robj;
    384  1.1  riastrad 
    385  1.7  riastrad 	gobj = drm_gem_object_lookup(filp, handle);
    386  1.1  riastrad 	if (gobj == NULL) {
    387  1.1  riastrad 		return -ENOENT;
    388  1.1  riastrad 	}
    389  1.1  riastrad 	robj = gem_to_amdgpu_bo(gobj);
    390  1.7  riastrad 	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
    391  1.1  riastrad 	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
    392  1.7  riastrad 		drm_gem_object_put_unlocked(gobj);
    393  1.1  riastrad 		return -EPERM;
    394  1.1  riastrad 	}
    395  1.1  riastrad 	*offset_p = amdgpu_bo_mmap_offset(robj);
    396  1.7  riastrad 	drm_gem_object_put_unlocked(gobj);
    397  1.1  riastrad 	return 0;
    398  1.1  riastrad }
    399  1.1  riastrad 
    400  1.1  riastrad int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
    401  1.1  riastrad 			  struct drm_file *filp)
    402  1.1  riastrad {
    403  1.1  riastrad 	union drm_amdgpu_gem_mmap *args = data;
    404  1.1  riastrad 	uint32_t handle = args->in.handle;
    405  1.1  riastrad 	memset(args, 0, sizeof(*args));
    406  1.1  riastrad 	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
    407  1.1  riastrad }
    408  1.1  riastrad 
    409  1.1  riastrad /**
    410  1.1  riastrad  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
    411  1.1  riastrad  *
    412  1.1  riastrad  * @timeout_ns: timeout in ns
    413  1.1  riastrad  *
    414  1.1  riastrad  * Calculate the timeout in jiffies from an absolute timeout in ns.
    415  1.1  riastrad  */
    416  1.1  riastrad unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
    417  1.1  riastrad {
    418  1.1  riastrad 	unsigned long timeout_jiffies;
    419  1.1  riastrad 	ktime_t timeout;
    420  1.1  riastrad 
    421  1.1  riastrad 	/* clamp timeout if it's to large */
    422  1.1  riastrad 	if (((int64_t)timeout_ns) < 0)
    423  1.1  riastrad 		return MAX_SCHEDULE_TIMEOUT;
    424  1.1  riastrad 
    425  1.1  riastrad 	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
    426  1.1  riastrad 	if (ktime_to_ns(timeout) < 0)
    427  1.1  riastrad 		return 0;
    428  1.1  riastrad 
    429  1.1  riastrad 	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
    430  1.1  riastrad 	/*  clamp timeout to avoid unsigned-> signed overflow */
    431  1.1  riastrad 	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
    432  1.1  riastrad 		return MAX_SCHEDULE_TIMEOUT - 1;
    433  1.1  riastrad 
    434  1.1  riastrad 	return timeout_jiffies;
    435  1.1  riastrad }
    436  1.1  riastrad 
    437  1.1  riastrad int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
    438  1.1  riastrad 			      struct drm_file *filp)
    439  1.1  riastrad {
    440  1.1  riastrad 	union drm_amdgpu_gem_wait_idle *args = data;
    441  1.1  riastrad 	struct drm_gem_object *gobj;
    442  1.1  riastrad 	struct amdgpu_bo *robj;
    443  1.1  riastrad 	uint32_t handle = args->in.handle;
    444  1.1  riastrad 	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
    445  1.1  riastrad 	int r = 0;
    446  1.1  riastrad 	long ret;
    447  1.1  riastrad 
    448  1.7  riastrad 	gobj = drm_gem_object_lookup(filp, handle);
    449  1.1  riastrad 	if (gobj == NULL) {
    450  1.1  riastrad 		return -ENOENT;
    451  1.1  riastrad 	}
    452  1.1  riastrad 	robj = gem_to_amdgpu_bo(gobj);
    453  1.7  riastrad 	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
    454  1.7  riastrad 						  timeout);
    455  1.1  riastrad 
    456  1.1  riastrad 	/* ret == 0 means not signaled,
    457  1.1  riastrad 	 * ret > 0 means signaled
    458  1.1  riastrad 	 * ret < 0 means interrupted before timeout
    459  1.1  riastrad 	 */
    460  1.1  riastrad 	if (ret >= 0) {
    461  1.1  riastrad 		memset(args, 0, sizeof(*args));
    462  1.1  riastrad 		args->out.status = (ret == 0);
    463  1.1  riastrad 	} else
    464  1.1  riastrad 		r = ret;
    465  1.1  riastrad 
    466  1.7  riastrad 	drm_gem_object_put_unlocked(gobj);
    467  1.1  riastrad 	return r;
    468  1.1  riastrad }
    469  1.1  riastrad 
    470  1.1  riastrad int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
    471  1.1  riastrad 				struct drm_file *filp)
    472  1.1  riastrad {
    473  1.1  riastrad 	struct drm_amdgpu_gem_metadata *args = data;
    474  1.1  riastrad 	struct drm_gem_object *gobj;
    475  1.1  riastrad 	struct amdgpu_bo *robj;
    476  1.1  riastrad 	int r = -1;
    477  1.1  riastrad 
    478  1.1  riastrad 	DRM_DEBUG("%d \n", args->handle);
    479  1.7  riastrad 	gobj = drm_gem_object_lookup(filp, args->handle);
    480  1.1  riastrad 	if (gobj == NULL)
    481  1.1  riastrad 		return -ENOENT;
    482  1.1  riastrad 	robj = gem_to_amdgpu_bo(gobj);
    483  1.1  riastrad 
    484  1.1  riastrad 	r = amdgpu_bo_reserve(robj, false);
    485  1.1  riastrad 	if (unlikely(r != 0))
    486  1.1  riastrad 		goto out;
    487  1.1  riastrad 
    488  1.1  riastrad 	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
    489  1.1  riastrad 		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
    490  1.1  riastrad 		r = amdgpu_bo_get_metadata(robj, args->data.data,
    491  1.1  riastrad 					   sizeof(args->data.data),
    492  1.1  riastrad 					   &args->data.data_size_bytes,
    493  1.1  riastrad 					   &args->data.flags);
    494  1.1  riastrad 	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
    495  1.1  riastrad 		if (args->data.data_size_bytes > sizeof(args->data.data)) {
    496  1.1  riastrad 			r = -EINVAL;
    497  1.1  riastrad 			goto unreserve;
    498  1.1  riastrad 		}
    499  1.1  riastrad 		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
    500  1.1  riastrad 		if (!r)
    501  1.1  riastrad 			r = amdgpu_bo_set_metadata(robj, args->data.data,
    502  1.1  riastrad 						   args->data.data_size_bytes,
    503  1.1  riastrad 						   args->data.flags);
    504  1.1  riastrad 	}
    505  1.1  riastrad 
    506  1.1  riastrad unreserve:
    507  1.1  riastrad 	amdgpu_bo_unreserve(robj);
    508  1.1  riastrad out:
    509  1.7  riastrad 	drm_gem_object_put_unlocked(gobj);
    510  1.1  riastrad 	return r;
    511  1.1  riastrad }
    512  1.1  riastrad 
    513  1.1  riastrad /**
    514  1.1  riastrad  * amdgpu_gem_va_update_vm -update the bo_va in its VM
    515  1.1  riastrad  *
    516  1.1  riastrad  * @adev: amdgpu_device pointer
    517  1.7  riastrad  * @vm: vm to update
    518  1.1  riastrad  * @bo_va: bo_va to update
    519  1.7  riastrad  * @operation: map, unmap or clear
    520  1.1  riastrad  *
    521  1.7  riastrad  * Update the bo_va directly after setting its address. Errors are not
    522  1.1  riastrad  * vital here, so they are not reported back to userspace.
    523  1.1  riastrad  */
    524  1.1  riastrad static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
    525  1.7  riastrad 				    struct amdgpu_vm *vm,
    526  1.7  riastrad 				    struct amdgpu_bo_va *bo_va,
    527  1.7  riastrad 				    uint32_t operation)
    528  1.1  riastrad {
    529  1.1  riastrad 	int r;
    530  1.1  riastrad 
    531  1.7  riastrad 	if (!amdgpu_vm_ready(vm))
    532  1.1  riastrad 		return;
    533  1.1  riastrad 
    534  1.7  riastrad 	r = amdgpu_vm_clear_freed(adev, vm, NULL);
    535  1.1  riastrad 	if (r)
    536  1.7  riastrad 		goto error;
    537  1.1  riastrad 
    538  1.7  riastrad 	if (operation == AMDGPU_VA_OP_MAP ||
    539  1.7  riastrad 	    operation == AMDGPU_VA_OP_REPLACE) {
    540  1.7  riastrad 		r = amdgpu_vm_bo_update(adev, bo_va, false);
    541  1.7  riastrad 		if (r)
    542  1.7  riastrad 			goto error;
    543  1.1  riastrad 	}
    544  1.1  riastrad 
    545  1.7  riastrad 	r = amdgpu_vm_update_pdes(adev, vm, false);
    546  1.1  riastrad 
    547  1.7  riastrad error:
    548  1.1  riastrad 	if (r && r != -ERESTARTSYS)
    549  1.1  riastrad 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
    550  1.1  riastrad }
    551  1.1  riastrad 
    552  1.7  riastrad /**
    553  1.7  riastrad  * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
    554  1.7  riastrad  *
    555  1.7  riastrad  * @adev: amdgpu_device pointer
    556  1.7  riastrad  * @flags: GEM UAPI flags
    557  1.7  riastrad  *
    558  1.7  riastrad  * Returns the GEM UAPI flags mapped into hardware for the ASIC.
    559  1.7  riastrad  */
    560  1.7  riastrad uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
    561  1.7  riastrad {
    562  1.7  riastrad 	uint64_t pte_flag = 0;
    563  1.1  riastrad 
    564  1.7  riastrad 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
    565  1.7  riastrad 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
    566  1.7  riastrad 	if (flags & AMDGPU_VM_PAGE_READABLE)
    567  1.7  riastrad 		pte_flag |= AMDGPU_PTE_READABLE;
    568  1.7  riastrad 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
    569  1.7  riastrad 		pte_flag |= AMDGPU_PTE_WRITEABLE;
    570  1.7  riastrad 	if (flags & AMDGPU_VM_PAGE_PRT)
    571  1.7  riastrad 		pte_flag |= AMDGPU_PTE_PRT;
    572  1.7  riastrad 
    573  1.7  riastrad 	if (adev->gmc.gmc_funcs->map_mtype)
    574  1.7  riastrad 		pte_flag |= amdgpu_gmc_map_mtype(adev,
    575  1.7  riastrad 						 flags & AMDGPU_VM_MTYPE_MASK);
    576  1.7  riastrad 
    577  1.7  riastrad 	return pte_flag;
    578  1.7  riastrad }
    579  1.1  riastrad 
    580  1.1  riastrad int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
    581  1.1  riastrad 			  struct drm_file *filp)
    582  1.1  riastrad {
    583  1.7  riastrad 	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
    584  1.7  riastrad 		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
    585  1.7  riastrad 		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
    586  1.7  riastrad 	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
    587  1.7  riastrad 		AMDGPU_VM_PAGE_PRT;
    588  1.7  riastrad 
    589  1.1  riastrad 	struct drm_amdgpu_gem_va *args = data;
    590  1.1  riastrad 	struct drm_gem_object *gobj;
    591  1.1  riastrad 	struct amdgpu_device *adev = dev->dev_private;
    592  1.1  riastrad 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
    593  1.7  riastrad 	struct amdgpu_bo *abo;
    594  1.1  riastrad 	struct amdgpu_bo_va *bo_va;
    595  1.7  riastrad 	struct amdgpu_bo_list_entry vm_pd;
    596  1.7  riastrad 	struct ttm_validate_buffer tv;
    597  1.1  riastrad 	struct ww_acquire_ctx ticket;
    598  1.1  riastrad 	struct list_head list, duplicates;
    599  1.7  riastrad 	uint64_t va_flags;
    600  1.1  riastrad 	int r = 0;
    601  1.1  riastrad 
    602  1.7  riastrad 	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
    603  1.8  riastrad 		dev_dbg(pci_dev_dev(dev->pdev),
    604  1.9  riastrad 			"va_address 0x%"PRIX64" is in reserved area 0x%"PRIX64"\n",
    605  1.9  riastrad 			args->va_address, (uint64_t)AMDGPU_VA_RESERVED_SIZE);
    606  1.7  riastrad 		return -EINVAL;
    607  1.7  riastrad 	}
    608  1.1  riastrad 
    609  1.7  riastrad 	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
    610  1.7  riastrad 	    args->va_address < AMDGPU_GMC_HOLE_END) {
    611  1.8  riastrad 		dev_dbg(pci_dev_dev(dev->pdev),
    612  1.9  riastrad 			"va_address 0x%"PRIX64" is in VA hole 0x%"PRIX64"-0x%"PRIX64"\n",
    613  1.9  riastrad 			args->va_address, (uint64_t)AMDGPU_GMC_HOLE_START,
    614  1.9  riastrad 			(uint64_t)AMDGPU_GMC_HOLE_END);
    615  1.1  riastrad 		return -EINVAL;
    616  1.1  riastrad 	}
    617  1.1  riastrad 
    618  1.7  riastrad 	args->va_address &= AMDGPU_GMC_HOLE_MASK;
    619  1.7  riastrad 
    620  1.7  riastrad 	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
    621  1.8  riastrad 		dev_dbg(pci_dev_dev(dev->pdev), "invalid flags combination 0x%08X\n",
    622  1.7  riastrad 			args->flags);
    623  1.1  riastrad 		return -EINVAL;
    624  1.1  riastrad 	}
    625  1.1  riastrad 
    626  1.1  riastrad 	switch (args->operation) {
    627  1.1  riastrad 	case AMDGPU_VA_OP_MAP:
    628  1.1  riastrad 	case AMDGPU_VA_OP_UNMAP:
    629  1.7  riastrad 	case AMDGPU_VA_OP_CLEAR:
    630  1.7  riastrad 	case AMDGPU_VA_OP_REPLACE:
    631  1.1  riastrad 		break;
    632  1.1  riastrad 	default:
    633  1.8  riastrad 		dev_dbg(pci_dev_dev(dev->pdev), "unsupported operation %d\n",
    634  1.1  riastrad 			args->operation);
    635  1.1  riastrad 		return -EINVAL;
    636  1.1  riastrad 	}
    637  1.1  riastrad 
    638  1.1  riastrad 	INIT_LIST_HEAD(&list);
    639  1.1  riastrad 	INIT_LIST_HEAD(&duplicates);
    640  1.7  riastrad 	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
    641  1.7  riastrad 	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
    642  1.7  riastrad 		gobj = drm_gem_object_lookup(filp, args->handle);
    643  1.7  riastrad 		if (gobj == NULL)
    644  1.7  riastrad 			return -ENOENT;
    645  1.7  riastrad 		abo = gem_to_amdgpu_bo(gobj);
    646  1.7  riastrad 		tv.bo = &abo->tbo;
    647  1.7  riastrad 		if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
    648  1.7  riastrad 			tv.num_shared = 1;
    649  1.7  riastrad 		else
    650  1.7  riastrad 			tv.num_shared = 0;
    651  1.7  riastrad 		list_add(&tv.head, &list);
    652  1.7  riastrad 	} else {
    653  1.7  riastrad 		gobj = NULL;
    654  1.7  riastrad 		abo = NULL;
    655  1.7  riastrad 	}
    656  1.7  riastrad 
    657  1.7  riastrad 	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
    658  1.1  riastrad 
    659  1.1  riastrad 	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
    660  1.7  riastrad 	if (r)
    661  1.7  riastrad 		goto error_unref;
    662  1.1  riastrad 
    663  1.7  riastrad 	if (abo) {
    664  1.7  riastrad 		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
    665  1.7  riastrad 		if (!bo_va) {
    666  1.7  riastrad 			r = -ENOENT;
    667  1.7  riastrad 			goto error_backoff;
    668  1.7  riastrad 		}
    669  1.7  riastrad 	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
    670  1.7  riastrad 		bo_va = fpriv->prt_va;
    671  1.7  riastrad 	} else {
    672  1.7  riastrad 		bo_va = NULL;
    673  1.1  riastrad 	}
    674  1.1  riastrad 
    675  1.1  riastrad 	switch (args->operation) {
    676  1.1  riastrad 	case AMDGPU_VA_OP_MAP:
    677  1.7  riastrad 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
    678  1.1  riastrad 		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
    679  1.1  riastrad 				     args->offset_in_bo, args->map_size,
    680  1.1  riastrad 				     va_flags);
    681  1.1  riastrad 		break;
    682  1.1  riastrad 	case AMDGPU_VA_OP_UNMAP:
    683  1.1  riastrad 		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
    684  1.1  riastrad 		break;
    685  1.7  riastrad 
    686  1.7  riastrad 	case AMDGPU_VA_OP_CLEAR:
    687  1.7  riastrad 		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
    688  1.7  riastrad 						args->va_address,
    689  1.7  riastrad 						args->map_size);
    690  1.7  riastrad 		break;
    691  1.7  riastrad 	case AMDGPU_VA_OP_REPLACE:
    692  1.7  riastrad 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
    693  1.7  riastrad 		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
    694  1.7  riastrad 					     args->offset_in_bo, args->map_size,
    695  1.7  riastrad 					     va_flags);
    696  1.7  riastrad 		break;
    697  1.1  riastrad 	default:
    698  1.1  riastrad 		break;
    699  1.1  riastrad 	}
    700  1.7  riastrad 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
    701  1.7  riastrad 		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
    702  1.7  riastrad 					args->operation);
    703  1.7  riastrad 
    704  1.7  riastrad error_backoff:
    705  1.1  riastrad 	ttm_eu_backoff_reservation(&ticket, &list);
    706  1.1  riastrad 
    707  1.7  riastrad error_unref:
    708  1.7  riastrad 	drm_gem_object_put_unlocked(gobj);
    709  1.1  riastrad 	return r;
    710  1.1  riastrad }
    711  1.1  riastrad 
    712  1.1  riastrad int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
    713  1.1  riastrad 			struct drm_file *filp)
    714  1.1  riastrad {
    715  1.7  riastrad 	struct amdgpu_device *adev = dev->dev_private;
    716  1.1  riastrad 	struct drm_amdgpu_gem_op *args = data;
    717  1.1  riastrad 	struct drm_gem_object *gobj;
    718  1.7  riastrad 	struct amdgpu_vm_bo_base *base;
    719  1.1  riastrad 	struct amdgpu_bo *robj;
    720  1.1  riastrad 	int r;
    721  1.1  riastrad 
    722  1.7  riastrad 	gobj = drm_gem_object_lookup(filp, args->handle);
    723  1.1  riastrad 	if (gobj == NULL) {
    724  1.1  riastrad 		return -ENOENT;
    725  1.1  riastrad 	}
    726  1.1  riastrad 	robj = gem_to_amdgpu_bo(gobj);
    727  1.1  riastrad 
    728  1.1  riastrad 	r = amdgpu_bo_reserve(robj, false);
    729  1.1  riastrad 	if (unlikely(r))
    730  1.1  riastrad 		goto out;
    731  1.1  riastrad 
    732  1.1  riastrad 	switch (args->op) {
    733  1.1  riastrad 	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
    734  1.1  riastrad 		struct drm_amdgpu_gem_create_in info;
    735  1.7  riastrad 		void __user *out = u64_to_user_ptr(args->value);
    736  1.1  riastrad 
    737  1.7  riastrad 		info.bo_size = robj->tbo.base.size;
    738  1.1  riastrad 		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
    739  1.7  riastrad 		info.domains = robj->preferred_domains;
    740  1.1  riastrad 		info.domain_flags = robj->flags;
    741  1.1  riastrad 		amdgpu_bo_unreserve(robj);
    742  1.1  riastrad 		if (copy_to_user(out, &info, sizeof(info)))
    743  1.1  riastrad 			r = -EFAULT;
    744  1.1  riastrad 		break;
    745  1.1  riastrad 	}
    746  1.1  riastrad 	case AMDGPU_GEM_OP_SET_PLACEMENT:
    747  1.7  riastrad 		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
    748  1.7  riastrad 			r = -EINVAL;
    749  1.7  riastrad 			amdgpu_bo_unreserve(robj);
    750  1.7  riastrad 			break;
    751  1.7  riastrad 		}
    752  1.7  riastrad 		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
    753  1.1  riastrad 			r = -EPERM;
    754  1.1  riastrad 			amdgpu_bo_unreserve(robj);
    755  1.1  riastrad 			break;
    756  1.1  riastrad 		}
    757  1.7  riastrad 		for (base = robj->vm_bo; base; base = base->next)
    758  1.7  riastrad 			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
    759  1.7  riastrad 				amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
    760  1.7  riastrad 				r = -EINVAL;
    761  1.7  riastrad 				amdgpu_bo_unreserve(robj);
    762  1.7  riastrad 				goto out;
    763  1.7  riastrad 			}
    764  1.7  riastrad 
    765  1.7  riastrad 
    766  1.7  riastrad 		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
    767  1.7  riastrad 							AMDGPU_GEM_DOMAIN_GTT |
    768  1.7  riastrad 							AMDGPU_GEM_DOMAIN_CPU);
    769  1.7  riastrad 		robj->allowed_domains = robj->preferred_domains;
    770  1.7  riastrad 		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
    771  1.7  riastrad 			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
    772  1.7  riastrad 
    773  1.7  riastrad 		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
    774  1.7  riastrad 			amdgpu_vm_bo_invalidate(adev, robj, true);
    775  1.7  riastrad 
    776  1.1  riastrad 		amdgpu_bo_unreserve(robj);
    777  1.1  riastrad 		break;
    778  1.1  riastrad 	default:
    779  1.1  riastrad 		amdgpu_bo_unreserve(robj);
    780  1.1  riastrad 		r = -EINVAL;
    781  1.1  riastrad 	}
    782  1.1  riastrad 
    783  1.1  riastrad out:
    784  1.7  riastrad 	drm_gem_object_put_unlocked(gobj);
    785  1.1  riastrad 	return r;
    786  1.1  riastrad }
    787  1.1  riastrad 
    788  1.1  riastrad int amdgpu_mode_dumb_create(struct drm_file *file_priv,
    789  1.1  riastrad 			    struct drm_device *dev,
    790  1.1  riastrad 			    struct drm_mode_create_dumb *args)
    791  1.1  riastrad {
    792  1.1  riastrad 	struct amdgpu_device *adev = dev->dev_private;
    793  1.1  riastrad 	struct drm_gem_object *gobj;
    794  1.1  riastrad 	uint32_t handle;
    795  1.7  riastrad 	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
    796  1.7  riastrad 		    AMDGPU_GEM_CREATE_CPU_GTT_USWC;
    797  1.7  riastrad 	u32 domain;
    798  1.1  riastrad 	int r;
    799  1.1  riastrad 
    800  1.7  riastrad 	/*
    801  1.7  riastrad 	 * The buffer returned from this function should be cleared, but
    802  1.7  riastrad 	 * it can only be done if the ring is enabled or we'll fail to
    803  1.7  riastrad 	 * create the buffer.
    804  1.7  riastrad 	 */
    805  1.7  riastrad 	if (adev->mman.buffer_funcs_enabled)
    806  1.7  riastrad 		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
    807  1.7  riastrad 
    808  1.7  riastrad 	args->pitch = amdgpu_align_pitch(adev, args->width,
    809  1.7  riastrad 					 DIV_ROUND_UP(args->bpp, 8), 0);
    810  1.1  riastrad 	args->size = (u64)args->pitch * args->height;
    811  1.1  riastrad 	args->size = ALIGN(args->size, PAGE_SIZE);
    812  1.7  riastrad 	domain = amdgpu_bo_get_preferred_pin_domain(adev,
    813  1.7  riastrad 				amdgpu_display_supported_domains(adev, flags));
    814  1.7  riastrad 	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
    815  1.7  riastrad 				     ttm_bo_type_device, NULL, &gobj);
    816  1.1  riastrad 	if (r)
    817  1.1  riastrad 		return -ENOMEM;
    818  1.1  riastrad 
    819  1.1  riastrad 	r = drm_gem_handle_create(file_priv, gobj, &handle);
    820  1.1  riastrad 	/* drop reference from allocate - handle holds it now */
    821  1.7  riastrad 	drm_gem_object_put_unlocked(gobj);
    822  1.1  riastrad 	if (r) {
    823  1.1  riastrad 		return r;
    824  1.1  riastrad 	}
    825  1.1  riastrad 	args->handle = handle;
    826  1.1  riastrad 	return 0;
    827  1.1  riastrad }
    828  1.1  riastrad 
    829  1.1  riastrad #if defined(CONFIG_DEBUG_FS)
    830  1.7  riastrad 
    831  1.7  riastrad #define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag)	\
    832  1.7  riastrad 	if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
    833  1.7  riastrad 		seq_printf((m), " " #flag);		\
    834  1.7  riastrad 	}
    835  1.7  riastrad 
    836  1.7  riastrad static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
    837  1.7  riastrad {
    838  1.7  riastrad 	struct drm_gem_object *gobj = ptr;
    839  1.7  riastrad 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
    840  1.7  riastrad 	struct seq_file *m = data;
    841  1.7  riastrad 
    842  1.7  riastrad 	struct dma_buf_attachment *attachment;
    843  1.7  riastrad 	struct dma_buf *dma_buf;
    844  1.7  riastrad 	unsigned domain;
    845  1.7  riastrad 	const char *placement;
    846  1.7  riastrad 	unsigned pin_count;
    847  1.7  riastrad 
    848  1.7  riastrad 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
    849  1.7  riastrad 	switch (domain) {
    850  1.7  riastrad 	case AMDGPU_GEM_DOMAIN_VRAM:
    851  1.7  riastrad 		placement = "VRAM";
    852  1.7  riastrad 		break;
    853  1.7  riastrad 	case AMDGPU_GEM_DOMAIN_GTT:
    854  1.7  riastrad 		placement = " GTT";
    855  1.7  riastrad 		break;
    856  1.7  riastrad 	case AMDGPU_GEM_DOMAIN_CPU:
    857  1.7  riastrad 	default:
    858  1.7  riastrad 		placement = " CPU";
    859  1.7  riastrad 		break;
    860  1.7  riastrad 	}
    861  1.7  riastrad 	seq_printf(m, "\t0x%08x: %12ld byte %s",
    862  1.7  riastrad 		   id, amdgpu_bo_size(bo), placement);
    863  1.7  riastrad 
    864  1.7  riastrad 	pin_count = READ_ONCE(bo->pin_count);
    865  1.7  riastrad 	if (pin_count)
    866  1.7  riastrad 		seq_printf(m, " pin count %d", pin_count);
    867  1.7  riastrad 
    868  1.7  riastrad 	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
    869  1.7  riastrad 	attachment = READ_ONCE(bo->tbo.base.import_attach);
    870  1.7  riastrad 
    871  1.7  riastrad 	if (attachment)
    872  1.7  riastrad 		seq_printf(m, " imported from %p", dma_buf);
    873  1.7  riastrad 	else if (dma_buf)
    874  1.7  riastrad 		seq_printf(m, " exported as %p", dma_buf);
    875  1.7  riastrad 
    876  1.7  riastrad 	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
    877  1.7  riastrad 	amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
    878  1.7  riastrad 	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
    879  1.7  riastrad 	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
    880  1.7  riastrad 	amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
    881  1.7  riastrad 	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
    882  1.7  riastrad 	amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
    883  1.7  riastrad 	amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
    884  1.7  riastrad 
    885  1.7  riastrad 	seq_printf(m, "\n");
    886  1.7  riastrad 
    887  1.7  riastrad 	return 0;
    888  1.7  riastrad }
    889  1.7  riastrad 
    890  1.1  riastrad static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
    891  1.1  riastrad {
    892  1.1  riastrad 	struct drm_info_node *node = (struct drm_info_node *)m->private;
    893  1.1  riastrad 	struct drm_device *dev = node->minor->dev;
    894  1.7  riastrad 	struct drm_file *file;
    895  1.7  riastrad 	int r;
    896  1.7  riastrad 
    897  1.7  riastrad 	r = mutex_lock_interruptible(&dev->filelist_mutex);
    898  1.7  riastrad 	if (r)
    899  1.7  riastrad 		return r;
    900  1.7  riastrad 
    901  1.7  riastrad 	list_for_each_entry(file, &dev->filelist, lhead) {
    902  1.7  riastrad 		struct task_struct *task;
    903  1.1  riastrad 
    904  1.7  riastrad 		/*
    905  1.7  riastrad 		 * Although we have a valid reference on file->pid, that does
    906  1.7  riastrad 		 * not guarantee that the task_struct who called get_pid() is
    907  1.7  riastrad 		 * still alive (e.g. get_pid(current) => fork() => exit()).
    908  1.7  riastrad 		 * Therefore, we need to protect this ->comm access using RCU.
    909  1.7  riastrad 		 */
    910  1.7  riastrad 		rcu_read_lock();
    911  1.7  riastrad 		task = pid_task(file->pid, PIDTYPE_PID);
    912  1.7  riastrad 		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
    913  1.7  riastrad 			   task ? task->comm : "<unknown>");
    914  1.7  riastrad 		rcu_read_unlock();
    915  1.7  riastrad 
    916  1.7  riastrad 		spin_lock(&file->table_lock);
    917  1.7  riastrad 		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
    918  1.7  riastrad 		spin_unlock(&file->table_lock);
    919  1.1  riastrad 	}
    920  1.7  riastrad 
    921  1.7  riastrad 	mutex_unlock(&dev->filelist_mutex);
    922  1.1  riastrad 	return 0;
    923  1.1  riastrad }
    924  1.1  riastrad 
    925  1.7  riastrad static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
    926  1.1  riastrad 	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
    927  1.1  riastrad };
    928  1.1  riastrad #endif
    929  1.1  riastrad 
    930  1.7  riastrad int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
    931  1.1  riastrad {
    932  1.1  riastrad #if defined(CONFIG_DEBUG_FS)
    933  1.1  riastrad 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
    934  1.1  riastrad #endif
    935  1.1  riastrad 	return 0;
    936  1.1  riastrad }
    937