Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_vm.c revision 1.5
      1 /*	$NetBSD: amdgpu_vm.c,v 1.5 2021/12/18 23:44:58 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2008 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  * Copyright 2009 Jerome Glisse.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  * OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  * Authors: Dave Airlie
     27  *          Alex Deucher
     28  *          Jerome Glisse
     29  */
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vm.c,v 1.5 2021/12/18 23:44:58 riastradh Exp $");
     32 
     33 #include <linux/dma-fence-array.h>
     34 #include <linux/interval_tree_generic.h>
     35 #include <linux/idr.h>
     36 
     37 #include <drm/amdgpu_drm.h>
     38 #include "amdgpu.h"
     39 #include "amdgpu_trace.h"
     40 #include "amdgpu_amdkfd.h"
     41 #include "amdgpu_gmc.h"
     42 #include "amdgpu_xgmi.h"
     43 
     44 #include <linux/nbsd-namespace.h>
     45 /**
     46  * DOC: GPUVM
     47  *
     48  * GPUVM is similar to the legacy gart on older asics, however
     49  * rather than there being a single global gart table
     50  * for the entire GPU, there are multiple VM page tables active
     51  * at any given time.  The VM page tables can contain a mix
     52  * vram pages and system memory pages and system memory pages
     53  * can be mapped as snooped (cached system pages) or unsnooped
     54  * (uncached system pages).
     55  * Each VM has an ID associated with it and there is a page table
     56  * associated with each VMID.  When execting a command buffer,
     57  * the kernel tells the the ring what VMID to use for that command
     58  * buffer.  VMIDs are allocated dynamically as commands are submitted.
     59  * The userspace drivers maintain their own address space and the kernel
     60  * sets up their pages tables accordingly when they submit their
     61  * command buffers and a VMID is assigned.
     62  * Cayman/Trinity support up to 8 active VMs at any given time;
     63  * SI supports 16.
     64  */
     65 
     66 #define START(node) ((node)->start)
     67 #define LAST(node) ((node)->last)
     68 
     69 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
     70 		     START, LAST, static, amdgpu_vm_it)
     71 
     72 #undef START
     73 #undef LAST
     74 
     75 /**
     76  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
     77  */
     78 struct amdgpu_prt_cb {
     79 
     80 	/**
     81 	 * @adev: amdgpu device
     82 	 */
     83 	struct amdgpu_device *adev;
     84 
     85 	/**
     86 	 * @cb: callback
     87 	 */
     88 	struct dma_fence_cb cb;
     89 };
     90 
     91 /**
     92  * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
     93  * happens while holding this lock anywhere to prevent deadlocks when
     94  * an MMU notifier runs in reclaim-FS context.
     95  */
     96 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
     97 {
     98 	mutex_lock(&vm->eviction_lock);
     99 	vm->saved_flags = memalloc_nofs_save();
    100 }
    101 
    102 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
    103 {
    104 	if (mutex_trylock(&vm->eviction_lock)) {
    105 		vm->saved_flags = memalloc_nofs_save();
    106 		return 1;
    107 	}
    108 	return 0;
    109 }
    110 
    111 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
    112 {
    113 	memalloc_nofs_restore(vm->saved_flags);
    114 	mutex_unlock(&vm->eviction_lock);
    115 }
    116 
    117 /**
    118  * amdgpu_vm_level_shift - return the addr shift for each level
    119  *
    120  * @adev: amdgpu_device pointer
    121  * @level: VMPT level
    122  *
    123  * Returns:
    124  * The number of bits the pfn needs to be right shifted for a level.
    125  */
    126 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
    127 				      unsigned level)
    128 {
    129 	unsigned shift = 0xff;
    130 
    131 	switch (level) {
    132 	case AMDGPU_VM_PDB2:
    133 	case AMDGPU_VM_PDB1:
    134 	case AMDGPU_VM_PDB0:
    135 		shift = 9 * (AMDGPU_VM_PDB0 - level) +
    136 			adev->vm_manager.block_size;
    137 		break;
    138 	case AMDGPU_VM_PTB:
    139 		shift = 0;
    140 		break;
    141 	default:
    142 		dev_err(adev->dev, "the level%d isn't supported.\n", level);
    143 	}
    144 
    145 	return shift;
    146 }
    147 
    148 /**
    149  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
    150  *
    151  * @adev: amdgpu_device pointer
    152  * @level: VMPT level
    153  *
    154  * Returns:
    155  * The number of entries in a page directory or page table.
    156  */
    157 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
    158 				      unsigned level)
    159 {
    160 	unsigned shift = amdgpu_vm_level_shift(adev,
    161 					       adev->vm_manager.root_level);
    162 
    163 	if (level == adev->vm_manager.root_level)
    164 		/* For the root directory */
    165 		return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
    166 			>> shift;
    167 	else if (level != AMDGPU_VM_PTB)
    168 		/* Everything in between */
    169 		return 512;
    170 	else
    171 		/* For the page tables on the leaves */
    172 		return AMDGPU_VM_PTE_COUNT(adev);
    173 }
    174 
    175 /**
    176  * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
    177  *
    178  * @adev: amdgpu_device pointer
    179  *
    180  * Returns:
    181  * The number of entries in the root page directory which needs the ATS setting.
    182  */
    183 static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
    184 {
    185 	unsigned shift;
    186 
    187 	shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
    188 	return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
    189 }
    190 
    191 /**
    192  * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
    193  *
    194  * @adev: amdgpu_device pointer
    195  * @level: VMPT level
    196  *
    197  * Returns:
    198  * The mask to extract the entry number of a PD/PT from an address.
    199  */
    200 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
    201 				       unsigned int level)
    202 {
    203 	if (level <= adev->vm_manager.root_level)
    204 		return 0xffffffff;
    205 	else if (level != AMDGPU_VM_PTB)
    206 		return 0x1ff;
    207 	else
    208 		return AMDGPU_VM_PTE_COUNT(adev) - 1;
    209 }
    210 
    211 /**
    212  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
    213  *
    214  * @adev: amdgpu_device pointer
    215  * @level: VMPT level
    216  *
    217  * Returns:
    218  * The size of the BO for a page directory or page table in bytes.
    219  */
    220 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
    221 {
    222 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
    223 }
    224 
    225 /**
    226  * amdgpu_vm_bo_evicted - vm_bo is evicted
    227  *
    228  * @vm_bo: vm_bo which is evicted
    229  *
    230  * State for PDs/PTs and per VM BOs which are not at the location they should
    231  * be.
    232  */
    233 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
    234 {
    235 	struct amdgpu_vm *vm = vm_bo->vm;
    236 	struct amdgpu_bo *bo = vm_bo->bo;
    237 
    238 	vm_bo->moved = true;
    239 	if (bo->tbo.type == ttm_bo_type_kernel)
    240 		list_move(&vm_bo->vm_status, &vm->evicted);
    241 	else
    242 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
    243 }
    244 
    245 /**
    246  * amdgpu_vm_bo_relocated - vm_bo is reloacted
    247  *
    248  * @vm_bo: vm_bo which is relocated
    249  *
    250  * State for PDs/PTs which needs to update their parent PD.
    251  */
    252 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
    253 {
    254 	list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
    255 }
    256 
    257 /**
    258  * amdgpu_vm_bo_moved - vm_bo is moved
    259  *
    260  * @vm_bo: vm_bo which is moved
    261  *
    262  * State for per VM BOs which are moved, but that change is not yet reflected
    263  * in the page tables.
    264  */
    265 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
    266 {
    267 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
    268 }
    269 
    270 /**
    271  * amdgpu_vm_bo_idle - vm_bo is idle
    272  *
    273  * @vm_bo: vm_bo which is now idle
    274  *
    275  * State for PDs/PTs and per VM BOs which have gone through the state machine
    276  * and are now idle.
    277  */
    278 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
    279 {
    280 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
    281 	vm_bo->moved = false;
    282 }
    283 
    284 /**
    285  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
    286  *
    287  * @vm_bo: vm_bo which is now invalidated
    288  *
    289  * State for normal BOs which are invalidated and that change not yet reflected
    290  * in the PTs.
    291  */
    292 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
    293 {
    294 	spin_lock(&vm_bo->vm->invalidated_lock);
    295 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
    296 	spin_unlock(&vm_bo->vm->invalidated_lock);
    297 }
    298 
    299 /**
    300  * amdgpu_vm_bo_done - vm_bo is done
    301  *
    302  * @vm_bo: vm_bo which is now done
    303  *
    304  * State for normal BOs which are invalidated and that change has been updated
    305  * in the PTs.
    306  */
    307 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
    308 {
    309 	spin_lock(&vm_bo->vm->invalidated_lock);
    310 	list_del_init(&vm_bo->vm_status);
    311 	spin_unlock(&vm_bo->vm->invalidated_lock);
    312 }
    313 
    314 /**
    315  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
    316  *
    317  * @base: base structure for tracking BO usage in a VM
    318  * @vm: vm to which bo is to be added
    319  * @bo: amdgpu buffer object
    320  *
    321  * Initialize a bo_va_base structure and add it to the appropriate lists
    322  *
    323  */
    324 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
    325 				   struct amdgpu_vm *vm,
    326 				   struct amdgpu_bo *bo)
    327 {
    328 	base->vm = vm;
    329 	base->bo = bo;
    330 	base->next = NULL;
    331 	INIT_LIST_HEAD(&base->vm_status);
    332 
    333 	if (!bo)
    334 		return;
    335 	base->next = bo->vm_bo;
    336 	bo->vm_bo = base;
    337 
    338 	if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
    339 		return;
    340 
    341 	vm->bulk_moveable = false;
    342 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
    343 		amdgpu_vm_bo_relocated(base);
    344 	else
    345 		amdgpu_vm_bo_idle(base);
    346 
    347 	if (bo->preferred_domains &
    348 	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
    349 		return;
    350 
    351 	/*
    352 	 * we checked all the prerequisites, but it looks like this per vm bo
    353 	 * is currently evicted. add the bo to the evicted list to make sure it
    354 	 * is validated on next vm use to avoid fault.
    355 	 * */
    356 	amdgpu_vm_bo_evicted(base);
    357 }
    358 
    359 /**
    360  * amdgpu_vm_pt_parent - get the parent page directory
    361  *
    362  * @pt: child page table
    363  *
    364  * Helper to get the parent entry for the child page table. NULL if we are at
    365  * the root page directory.
    366  */
    367 static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
    368 {
    369 	struct amdgpu_bo *parent = pt->base.bo->parent;
    370 
    371 	if (!parent)
    372 		return NULL;
    373 
    374 	return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
    375 }
    376 
    377 /*
    378  * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
    379  */
    380 struct amdgpu_vm_pt_cursor {
    381 	uint64_t pfn;
    382 	struct amdgpu_vm_pt *parent;
    383 	struct amdgpu_vm_pt *entry;
    384 	unsigned level;
    385 };
    386 
    387 /**
    388  * amdgpu_vm_pt_start - start PD/PT walk
    389  *
    390  * @adev: amdgpu_device pointer
    391  * @vm: amdgpu_vm structure
    392  * @start: start address of the walk
    393  * @cursor: state to initialize
    394  *
    395  * Initialize a amdgpu_vm_pt_cursor to start a walk.
    396  */
    397 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
    398 			       struct amdgpu_vm *vm, uint64_t start,
    399 			       struct amdgpu_vm_pt_cursor *cursor)
    400 {
    401 	cursor->pfn = start;
    402 	cursor->parent = NULL;
    403 	cursor->entry = &vm->root;
    404 	cursor->level = adev->vm_manager.root_level;
    405 }
    406 
    407 /**
    408  * amdgpu_vm_pt_descendant - go to child node
    409  *
    410  * @adev: amdgpu_device pointer
    411  * @cursor: current state
    412  *
    413  * Walk to the child node of the current node.
    414  * Returns:
    415  * True if the walk was possible, false otherwise.
    416  */
    417 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
    418 				    struct amdgpu_vm_pt_cursor *cursor)
    419 {
    420 	unsigned mask, shift, idx;
    421 
    422 	if (!cursor->entry->entries)
    423 		return false;
    424 
    425 	BUG_ON(!cursor->entry->base.bo);
    426 	mask = amdgpu_vm_entries_mask(adev, cursor->level);
    427 	shift = amdgpu_vm_level_shift(adev, cursor->level);
    428 
    429 	++cursor->level;
    430 	idx = (cursor->pfn >> shift) & mask;
    431 	cursor->parent = cursor->entry;
    432 	cursor->entry = &cursor->entry->entries[idx];
    433 	return true;
    434 }
    435 
    436 /**
    437  * amdgpu_vm_pt_sibling - go to sibling node
    438  *
    439  * @adev: amdgpu_device pointer
    440  * @cursor: current state
    441  *
    442  * Walk to the sibling node of the current node.
    443  * Returns:
    444  * True if the walk was possible, false otherwise.
    445  */
    446 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
    447 				 struct amdgpu_vm_pt_cursor *cursor)
    448 {
    449 	unsigned shift, num_entries;
    450 
    451 	/* Root doesn't have a sibling */
    452 	if (!cursor->parent)
    453 		return false;
    454 
    455 	/* Go to our parents and see if we got a sibling */
    456 	shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
    457 	num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
    458 
    459 	if (cursor->entry == &cursor->parent->entries[num_entries - 1])
    460 		return false;
    461 
    462 	cursor->pfn += 1ULL << shift;
    463 	cursor->pfn &= ~((1ULL << shift) - 1);
    464 	++cursor->entry;
    465 	return true;
    466 }
    467 
    468 /**
    469  * amdgpu_vm_pt_ancestor - go to parent node
    470  *
    471  * @cursor: current state
    472  *
    473  * Walk to the parent node of the current node.
    474  * Returns:
    475  * True if the walk was possible, false otherwise.
    476  */
    477 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
    478 {
    479 	if (!cursor->parent)
    480 		return false;
    481 
    482 	--cursor->level;
    483 	cursor->entry = cursor->parent;
    484 	cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
    485 	return true;
    486 }
    487 
    488 /**
    489  * amdgpu_vm_pt_next - get next PD/PT in hieratchy
    490  *
    491  * @adev: amdgpu_device pointer
    492  * @cursor: current state
    493  *
    494  * Walk the PD/PT tree to the next node.
    495  */
    496 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
    497 			      struct amdgpu_vm_pt_cursor *cursor)
    498 {
    499 	/* First try a newborn child */
    500 	if (amdgpu_vm_pt_descendant(adev, cursor))
    501 		return;
    502 
    503 	/* If that didn't worked try to find a sibling */
    504 	while (!amdgpu_vm_pt_sibling(adev, cursor)) {
    505 		/* No sibling, go to our parents and grandparents */
    506 		if (!amdgpu_vm_pt_ancestor(cursor)) {
    507 			cursor->pfn = ~0ll;
    508 			return;
    509 		}
    510 	}
    511 }
    512 
    513 /**
    514  * amdgpu_vm_pt_first_dfs - start a deep first search
    515  *
    516  * @adev: amdgpu_device structure
    517  * @vm: amdgpu_vm structure
    518  * @start: optional cursor to start with
    519  * @cursor: state to initialize
    520  *
    521  * Starts a deep first traversal of the PD/PT tree.
    522  */
    523 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
    524 				   struct amdgpu_vm *vm,
    525 				   struct amdgpu_vm_pt_cursor *start,
    526 				   struct amdgpu_vm_pt_cursor *cursor)
    527 {
    528 	if (start)
    529 		*cursor = *start;
    530 	else
    531 		amdgpu_vm_pt_start(adev, vm, 0, cursor);
    532 	while (amdgpu_vm_pt_descendant(adev, cursor));
    533 }
    534 
    535 /**
    536  * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
    537  *
    538  * @start: starting point for the search
    539  * @entry: current entry
    540  *
    541  * Returns:
    542  * True when the search should continue, false otherwise.
    543  */
    544 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
    545 				      struct amdgpu_vm_pt *entry)
    546 {
    547 	return entry && (!start || entry != start->entry);
    548 }
    549 
    550 /**
    551  * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
    552  *
    553  * @adev: amdgpu_device structure
    554  * @cursor: current state
    555  *
    556  * Move the cursor to the next node in a deep first search.
    557  */
    558 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
    559 				  struct amdgpu_vm_pt_cursor *cursor)
    560 {
    561 	if (!cursor->entry)
    562 		return;
    563 
    564 	if (!cursor->parent)
    565 		cursor->entry = NULL;
    566 	else if (amdgpu_vm_pt_sibling(adev, cursor))
    567 		while (amdgpu_vm_pt_descendant(adev, cursor));
    568 	else
    569 		amdgpu_vm_pt_ancestor(cursor);
    570 }
    571 
    572 /*
    573  * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
    574  */
    575 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)		\
    576 	for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)),		\
    577 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
    578 	     amdgpu_vm_pt_continue_dfs((start), (entry));			\
    579 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
    580 
    581 /**
    582  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
    583  *
    584  * @vm: vm providing the BOs
    585  * @validated: head of validation list
    586  * @entry: entry to add
    587  *
    588  * Add the page directory to the list of BOs to
    589  * validate for command submission.
    590  */
    591 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
    592 			 struct list_head *validated,
    593 			 struct amdgpu_bo_list_entry *entry)
    594 {
    595 	entry->priority = 0;
    596 	entry->tv.bo = &vm->root.base.bo->tbo;
    597 	/* One for TTM and one for the CS job */
    598 	entry->tv.num_shared = 2;
    599 	entry->user_pages = NULL;
    600 	list_add(&entry->tv.head, validated);
    601 }
    602 
    603 /**
    604  * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
    605  *
    606  * @bo: BO which was removed from the LRU
    607  *
    608  * Make sure the bulk_moveable flag is updated when a BO is removed from the
    609  * LRU.
    610  */
    611 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
    612 {
    613 	struct amdgpu_bo *abo;
    614 	struct amdgpu_vm_bo_base *bo_base;
    615 
    616 	if (!amdgpu_bo_is_amdgpu_bo(bo))
    617 		return;
    618 
    619 	if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
    620 		return;
    621 
    622 	abo = ttm_to_amdgpu_bo(bo);
    623 	if (!abo->parent)
    624 		return;
    625 	for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
    626 		struct amdgpu_vm *vm = bo_base->vm;
    627 
    628 		if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
    629 			vm->bulk_moveable = false;
    630 	}
    631 
    632 }
    633 /**
    634  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
    635  *
    636  * @adev: amdgpu device pointer
    637  * @vm: vm providing the BOs
    638  *
    639  * Move all BOs to the end of LRU and remember their positions to put them
    640  * together.
    641  */
    642 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
    643 				struct amdgpu_vm *vm)
    644 {
    645 	struct amdgpu_vm_bo_base *bo_base;
    646 
    647 	if (vm->bulk_moveable) {
    648 		spin_lock(&ttm_bo_glob.lru_lock);
    649 		ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
    650 		spin_unlock(&ttm_bo_glob.lru_lock);
    651 		return;
    652 	}
    653 
    654 	memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
    655 
    656 	spin_lock(&ttm_bo_glob.lru_lock);
    657 	list_for_each_entry(bo_base, &vm->idle, vm_status) {
    658 		struct amdgpu_bo *bo = bo_base->bo;
    659 
    660 		if (!bo->parent)
    661 			continue;
    662 
    663 		ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
    664 		if (bo->shadow)
    665 			ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
    666 						&vm->lru_bulk_move);
    667 	}
    668 	spin_unlock(&ttm_bo_glob.lru_lock);
    669 
    670 	vm->bulk_moveable = true;
    671 }
    672 
    673 /**
    674  * amdgpu_vm_validate_pt_bos - validate the page table BOs
    675  *
    676  * @adev: amdgpu device pointer
    677  * @vm: vm providing the BOs
    678  * @validate: callback to do the validation
    679  * @param: parameter for the validation callback
    680  *
    681  * Validate the page table BOs on command submission if neccessary.
    682  *
    683  * Returns:
    684  * Validation result.
    685  */
    686 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
    687 			      int (*validate)(void *p, struct amdgpu_bo *bo),
    688 			      void *param)
    689 {
    690 	struct amdgpu_vm_bo_base *bo_base, *tmp;
    691 	int r;
    692 
    693 	vm->bulk_moveable &= list_empty(&vm->evicted);
    694 
    695 	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
    696 		struct amdgpu_bo *bo = bo_base->bo;
    697 
    698 		r = validate(param, bo);
    699 		if (r)
    700 			return r;
    701 
    702 		if (bo->tbo.type != ttm_bo_type_kernel) {
    703 			amdgpu_vm_bo_moved(bo_base);
    704 		} else {
    705 			vm->update_funcs->map_table(bo);
    706 			if (bo->parent)
    707 				amdgpu_vm_bo_relocated(bo_base);
    708 			else
    709 				amdgpu_vm_bo_idle(bo_base);
    710 		}
    711 	}
    712 
    713 	amdgpu_vm_eviction_lock(vm);
    714 	vm->evicting = false;
    715 	amdgpu_vm_eviction_unlock(vm);
    716 
    717 	return 0;
    718 }
    719 
    720 /**
    721  * amdgpu_vm_ready - check VM is ready for updates
    722  *
    723  * @vm: VM to check
    724  *
    725  * Check if all VM PDs/PTs are ready for updates
    726  *
    727  * Returns:
    728  * True if eviction list is empty.
    729  */
    730 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
    731 {
    732 	return list_empty(&vm->evicted);
    733 }
    734 
    735 /**
    736  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
    737  *
    738  * @adev: amdgpu_device pointer
    739  * @vm: VM to clear BO from
    740  * @bo: BO to clear
    741  * @direct: use a direct update
    742  *
    743  * Root PD needs to be reserved when calling this.
    744  *
    745  * Returns:
    746  * 0 on success, errno otherwise.
    747  */
    748 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
    749 			      struct amdgpu_vm *vm,
    750 			      struct amdgpu_bo *bo,
    751 			      bool direct)
    752 {
    753 	struct ttm_operation_ctx ctx = { true, false };
    754 	unsigned level = adev->vm_manager.root_level;
    755 	struct amdgpu_vm_update_params params;
    756 	struct amdgpu_bo *ancestor = bo;
    757 	unsigned entries, ats_entries;
    758 	uint64_t addr;
    759 	int r;
    760 
    761 	/* Figure out our place in the hierarchy */
    762 	if (ancestor->parent) {
    763 		++level;
    764 		while (ancestor->parent->parent) {
    765 			++level;
    766 			ancestor = ancestor->parent;
    767 		}
    768 	}
    769 
    770 	entries = amdgpu_bo_size(bo) / 8;
    771 	if (!vm->pte_support_ats) {
    772 		ats_entries = 0;
    773 
    774 	} else if (!bo->parent) {
    775 		ats_entries = amdgpu_vm_num_ats_entries(adev);
    776 		ats_entries = min(ats_entries, entries);
    777 		entries -= ats_entries;
    778 
    779 	} else {
    780 		struct amdgpu_vm_pt *pt;
    781 
    782 		pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base);
    783 		ats_entries = amdgpu_vm_num_ats_entries(adev);
    784 		if ((pt - vm->root.entries) >= ats_entries) {
    785 			ats_entries = 0;
    786 		} else {
    787 			ats_entries = entries;
    788 			entries = 0;
    789 		}
    790 	}
    791 
    792 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
    793 	if (r)
    794 		return r;
    795 
    796 	if (bo->shadow) {
    797 		r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
    798 				    &ctx);
    799 		if (r)
    800 			return r;
    801 	}
    802 
    803 	r = vm->update_funcs->map_table(bo);
    804 	if (r)
    805 		return r;
    806 
    807 	memset(&params, 0, sizeof(params));
    808 	params.adev = adev;
    809 	params.vm = vm;
    810 	params.direct = direct;
    811 
    812 	r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
    813 	if (r)
    814 		return r;
    815 
    816 	addr = 0;
    817 	if (ats_entries) {
    818 		uint64_t value = 0, flags;
    819 
    820 		flags = AMDGPU_PTE_DEFAULT_ATC;
    821 		if (level != AMDGPU_VM_PTB) {
    822 			/* Handle leaf PDEs as PTEs */
    823 			flags |= AMDGPU_PDE_PTE;
    824 			amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
    825 		}
    826 
    827 		r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
    828 					     value, flags);
    829 		if (r)
    830 			return r;
    831 
    832 		addr += ats_entries * 8;
    833 	}
    834 
    835 	if (entries) {
    836 		uint64_t value = 0, flags = 0;
    837 
    838 		if (adev->asic_type >= CHIP_VEGA10) {
    839 			if (level != AMDGPU_VM_PTB) {
    840 				/* Handle leaf PDEs as PTEs */
    841 				flags |= AMDGPU_PDE_PTE;
    842 				amdgpu_gmc_get_vm_pde(adev, level,
    843 						      &value, &flags);
    844 			} else {
    845 				/* Workaround for fault priority problem on GMC9 */
    846 				flags = AMDGPU_PTE_EXECUTABLE;
    847 			}
    848 		}
    849 
    850 		r = vm->update_funcs->update(&params, bo, addr, 0, entries,
    851 					     value, flags);
    852 		if (r)
    853 			return r;
    854 	}
    855 
    856 	return vm->update_funcs->commit(&params, NULL);
    857 }
    858 
    859 /**
    860  * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
    861  *
    862  * @adev: amdgpu_device pointer
    863  * @vm: requesting vm
    864  * @level: the page table level
    865  * @direct: use a direct update
    866  * @bp: resulting BO allocation parameters
    867  */
    868 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
    869 			       int level, bool direct,
    870 			       struct amdgpu_bo_param *bp)
    871 {
    872 	memset(bp, 0, sizeof(*bp));
    873 
    874 	bp->size = amdgpu_vm_bo_size(adev, level);
    875 	bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
    876 	bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
    877 	bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
    878 	bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
    879 		AMDGPU_GEM_CREATE_CPU_GTT_USWC;
    880 	if (vm->use_cpu_for_update)
    881 		bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
    882 	else if (!vm->root.base.bo || vm->root.base.bo->shadow)
    883 		bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
    884 	bp->type = ttm_bo_type_kernel;
    885 	bp->no_wait_gpu = direct;
    886 	if (vm->root.base.bo)
    887 		bp->resv = vm->root.base.bo->tbo.base.resv;
    888 }
    889 
    890 /**
    891  * amdgpu_vm_alloc_pts - Allocate a specific page table
    892  *
    893  * @adev: amdgpu_device pointer
    894  * @vm: VM to allocate page tables for
    895  * @cursor: Which page table to allocate
    896  * @direct: use a direct update
    897  *
    898  * Make sure a specific page table or directory is allocated.
    899  *
    900  * Returns:
    901  * 1 if page table needed to be allocated, 0 if page table was already
    902  * allocated, negative errno if an error occurred.
    903  */
    904 static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
    905 			       struct amdgpu_vm *vm,
    906 			       struct amdgpu_vm_pt_cursor *cursor,
    907 			       bool direct)
    908 {
    909 	struct amdgpu_vm_pt *entry = cursor->entry;
    910 	struct amdgpu_bo_param bp;
    911 	struct amdgpu_bo *pt;
    912 	int r;
    913 
    914 	if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
    915 		unsigned num_entries;
    916 
    917 		num_entries = amdgpu_vm_num_entries(adev, cursor->level);
    918 		entry->entries = kvmalloc_array(num_entries,
    919 						sizeof(*entry->entries),
    920 						GFP_KERNEL | __GFP_ZERO);
    921 		if (!entry->entries)
    922 			return -ENOMEM;
    923 	}
    924 
    925 	if (entry->base.bo)
    926 		return 0;
    927 
    928 	amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
    929 
    930 	r = amdgpu_bo_create(adev, &bp, &pt);
    931 	if (r)
    932 		return r;
    933 
    934 	/* Keep a reference to the root directory to avoid
    935 	 * freeing them up in the wrong order.
    936 	 */
    937 	pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
    938 	amdgpu_vm_bo_base_init(&entry->base, vm, pt);
    939 
    940 	r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
    941 	if (r)
    942 		goto error_free_pt;
    943 
    944 	return 0;
    945 
    946 error_free_pt:
    947 	amdgpu_bo_unref(&pt->shadow);
    948 	amdgpu_bo_unref(&pt);
    949 	return r;
    950 }
    951 
    952 /**
    953  * amdgpu_vm_free_table - fre one PD/PT
    954  *
    955  * @entry: PDE to free
    956  */
    957 static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
    958 {
    959 	if (entry->base.bo) {
    960 		entry->base.bo->vm_bo = NULL;
    961 		list_del(&entry->base.vm_status);
    962 		amdgpu_bo_unref(&entry->base.bo->shadow);
    963 		amdgpu_bo_unref(&entry->base.bo);
    964 	}
    965 	kvfree(entry->entries);
    966 	entry->entries = NULL;
    967 }
    968 
    969 /**
    970  * amdgpu_vm_free_pts - free PD/PT levels
    971  *
    972  * @adev: amdgpu device structure
    973  * @vm: amdgpu vm structure
    974  * @start: optional cursor where to start freeing PDs/PTs
    975  *
    976  * Free the page directory or page table level and all sub levels.
    977  */
    978 static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
    979 			       struct amdgpu_vm *vm,
    980 			       struct amdgpu_vm_pt_cursor *start)
    981 {
    982 	struct amdgpu_vm_pt_cursor cursor;
    983 	struct amdgpu_vm_pt *entry;
    984 
    985 	vm->bulk_moveable = false;
    986 
    987 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
    988 		amdgpu_vm_free_table(entry);
    989 
    990 	if (start)
    991 		amdgpu_vm_free_table(start->entry);
    992 }
    993 
    994 /**
    995  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
    996  *
    997  * @adev: amdgpu_device pointer
    998  */
    999 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
   1000 {
   1001 	const struct amdgpu_ip_block *ip_block;
   1002 	bool has_compute_vm_bug;
   1003 	struct amdgpu_ring *ring;
   1004 	int i;
   1005 
   1006 	has_compute_vm_bug = false;
   1007 
   1008 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
   1009 	if (ip_block) {
   1010 		/* Compute has a VM bug for GFX version < 7.
   1011 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
   1012 		if (ip_block->version->major <= 7)
   1013 			has_compute_vm_bug = true;
   1014 		else if (ip_block->version->major == 8)
   1015 			if (adev->gfx.mec_fw_version < 673)
   1016 				has_compute_vm_bug = true;
   1017 	}
   1018 
   1019 	for (i = 0; i < adev->num_rings; i++) {
   1020 		ring = adev->rings[i];
   1021 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
   1022 			/* only compute rings */
   1023 			ring->has_compute_vm_bug = has_compute_vm_bug;
   1024 		else
   1025 			ring->has_compute_vm_bug = false;
   1026 	}
   1027 }
   1028 
   1029 /**
   1030  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
   1031  *
   1032  * @ring: ring on which the job will be submitted
   1033  * @job: job to submit
   1034  *
   1035  * Returns:
   1036  * True if sync is needed.
   1037  */
   1038 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
   1039 				  struct amdgpu_job *job)
   1040 {
   1041 	struct amdgpu_device *adev = ring->adev;
   1042 	unsigned vmhub = ring->funcs->vmhub;
   1043 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
   1044 	struct amdgpu_vmid *id;
   1045 	bool gds_switch_needed;
   1046 	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
   1047 
   1048 	if (job->vmid == 0)
   1049 		return false;
   1050 	id = &id_mgr->ids[job->vmid];
   1051 	gds_switch_needed = ring->funcs->emit_gds_switch && (
   1052 		id->gds_base != job->gds_base ||
   1053 		id->gds_size != job->gds_size ||
   1054 		id->gws_base != job->gws_base ||
   1055 		id->gws_size != job->gws_size ||
   1056 		id->oa_base != job->oa_base ||
   1057 		id->oa_size != job->oa_size);
   1058 
   1059 	if (amdgpu_vmid_had_gpu_reset(adev, id))
   1060 		return true;
   1061 
   1062 	return vm_flush_needed || gds_switch_needed;
   1063 }
   1064 
   1065 /**
   1066  * amdgpu_vm_flush - hardware flush the vm
   1067  *
   1068  * @ring: ring to use for flush
   1069  * @job:  related job
   1070  * @need_pipe_sync: is pipe sync needed
   1071  *
   1072  * Emit a VM flush when it is necessary.
   1073  *
   1074  * Returns:
   1075  * 0 on success, errno otherwise.
   1076  */
   1077 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
   1078 		    bool need_pipe_sync)
   1079 {
   1080 	struct amdgpu_device *adev = ring->adev;
   1081 	unsigned vmhub = ring->funcs->vmhub;
   1082 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
   1083 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
   1084 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
   1085 		id->gds_base != job->gds_base ||
   1086 		id->gds_size != job->gds_size ||
   1087 		id->gws_base != job->gws_base ||
   1088 		id->gws_size != job->gws_size ||
   1089 		id->oa_base != job->oa_base ||
   1090 		id->oa_size != job->oa_size);
   1091 	bool vm_flush_needed = job->vm_needs_flush;
   1092 	struct dma_fence *fence = NULL;
   1093 	bool pasid_mapping_needed = false;
   1094 	unsigned patch_offset = 0;
   1095 	int r;
   1096 
   1097 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
   1098 		gds_switch_needed = true;
   1099 		vm_flush_needed = true;
   1100 		pasid_mapping_needed = true;
   1101 	}
   1102 
   1103 	mutex_lock(&id_mgr->lock);
   1104 	if (id->pasid != job->pasid || !id->pasid_mapping ||
   1105 	    !dma_fence_is_signaled(id->pasid_mapping))
   1106 		pasid_mapping_needed = true;
   1107 	mutex_unlock(&id_mgr->lock);
   1108 
   1109 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
   1110 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
   1111 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
   1112 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
   1113 		ring->funcs->emit_wreg;
   1114 
   1115 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
   1116 		return 0;
   1117 
   1118 	if (ring->funcs->init_cond_exec)
   1119 		patch_offset = amdgpu_ring_init_cond_exec(ring);
   1120 
   1121 	if (need_pipe_sync)
   1122 		amdgpu_ring_emit_pipeline_sync(ring);
   1123 
   1124 	if (vm_flush_needed) {
   1125 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
   1126 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
   1127 	}
   1128 
   1129 	if (pasid_mapping_needed)
   1130 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
   1131 
   1132 	if (vm_flush_needed || pasid_mapping_needed) {
   1133 		r = amdgpu_fence_emit(ring, &fence, 0);
   1134 		if (r)
   1135 			return r;
   1136 	}
   1137 
   1138 	if (vm_flush_needed) {
   1139 		mutex_lock(&id_mgr->lock);
   1140 		dma_fence_put(id->last_flush);
   1141 		id->last_flush = dma_fence_get(fence);
   1142 		id->current_gpu_reset_count =
   1143 			atomic_read(&adev->gpu_reset_counter);
   1144 		mutex_unlock(&id_mgr->lock);
   1145 	}
   1146 
   1147 	if (pasid_mapping_needed) {
   1148 		mutex_lock(&id_mgr->lock);
   1149 		id->pasid = job->pasid;
   1150 		dma_fence_put(id->pasid_mapping);
   1151 		id->pasid_mapping = dma_fence_get(fence);
   1152 		mutex_unlock(&id_mgr->lock);
   1153 	}
   1154 	dma_fence_put(fence);
   1155 
   1156 	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
   1157 		id->gds_base = job->gds_base;
   1158 		id->gds_size = job->gds_size;
   1159 		id->gws_base = job->gws_base;
   1160 		id->gws_size = job->gws_size;
   1161 		id->oa_base = job->oa_base;
   1162 		id->oa_size = job->oa_size;
   1163 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
   1164 					    job->gds_size, job->gws_base,
   1165 					    job->gws_size, job->oa_base,
   1166 					    job->oa_size);
   1167 	}
   1168 
   1169 	if (ring->funcs->patch_cond_exec)
   1170 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
   1171 
   1172 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
   1173 	if (ring->funcs->emit_switch_buffer) {
   1174 		amdgpu_ring_emit_switch_buffer(ring);
   1175 		amdgpu_ring_emit_switch_buffer(ring);
   1176 	}
   1177 	return 0;
   1178 }
   1179 
   1180 /**
   1181  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
   1182  *
   1183  * @vm: requested vm
   1184  * @bo: requested buffer object
   1185  *
   1186  * Find @bo inside the requested vm.
   1187  * Search inside the @bos vm list for the requested vm
   1188  * Returns the found bo_va or NULL if none is found
   1189  *
   1190  * Object has to be reserved!
   1191  *
   1192  * Returns:
   1193  * Found bo_va or NULL.
   1194  */
   1195 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
   1196 				       struct amdgpu_bo *bo)
   1197 {
   1198 	struct amdgpu_vm_bo_base *base;
   1199 
   1200 	for (base = bo->vm_bo; base; base = base->next) {
   1201 		if (base->vm != vm)
   1202 			continue;
   1203 
   1204 		return container_of(base, struct amdgpu_bo_va, base);
   1205 	}
   1206 	return NULL;
   1207 }
   1208 
   1209 /**
   1210  * amdgpu_vm_map_gart - Resolve gart mapping of addr
   1211  *
   1212  * @pages_addr: optional DMA address to use for lookup
   1213  * @addr: the unmapped addr
   1214  *
   1215  * Look up the physical address of the page that the pte resolves
   1216  * to.
   1217  *
   1218  * Returns:
   1219  * The pointer for the page table entry.
   1220  */
   1221 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
   1222 {
   1223 	uint64_t result;
   1224 
   1225 	/* page table offset */
   1226 	result = pages_addr[addr >> PAGE_SHIFT];
   1227 
   1228 	/* in case cpu page size != gpu page size*/
   1229 	result |= addr & (~PAGE_MASK);
   1230 
   1231 	result &= 0xFFFFFFFFFFFFF000ULL;
   1232 
   1233 	return result;
   1234 }
   1235 
   1236 /**
   1237  * amdgpu_vm_update_pde - update a single level in the hierarchy
   1238  *
   1239  * @params: parameters for the update
   1240  * @vm: requested vm
   1241  * @entry: entry to update
   1242  *
   1243  * Makes sure the requested entry in parent is up to date.
   1244  */
   1245 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
   1246 				struct amdgpu_vm *vm,
   1247 				struct amdgpu_vm_pt *entry)
   1248 {
   1249 	struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
   1250 	struct amdgpu_bo *bo = parent->base.bo, *pbo;
   1251 	uint64_t pde, pt, flags;
   1252 	unsigned level;
   1253 
   1254 	for (level = 0, pbo = bo->parent; pbo; ++level)
   1255 		pbo = pbo->parent;
   1256 
   1257 	level += params->adev->vm_manager.root_level;
   1258 	amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
   1259 	pde = (entry - parent->entries) * 8;
   1260 	return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
   1261 }
   1262 
   1263 /**
   1264  * amdgpu_vm_invalidate_pds - mark all PDs as invalid
   1265  *
   1266  * @adev: amdgpu_device pointer
   1267  * @vm: related vm
   1268  *
   1269  * Mark all PD level as invalid after an error.
   1270  */
   1271 static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
   1272 				     struct amdgpu_vm *vm)
   1273 {
   1274 	struct amdgpu_vm_pt_cursor cursor;
   1275 	struct amdgpu_vm_pt *entry;
   1276 
   1277 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
   1278 		if (entry->base.bo && !entry->base.moved)
   1279 			amdgpu_vm_bo_relocated(&entry->base);
   1280 }
   1281 
   1282 /**
   1283  * amdgpu_vm_update_pdes - make sure that all directories are valid
   1284  *
   1285  * @adev: amdgpu_device pointer
   1286  * @vm: requested vm
   1287  * @direct: submit directly to the paging queue
   1288  *
   1289  * Makes sure all directories are up to date.
   1290  *
   1291  * Returns:
   1292  * 0 for success, error for failure.
   1293  */
   1294 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
   1295 			  struct amdgpu_vm *vm, bool direct)
   1296 {
   1297 	struct amdgpu_vm_update_params params;
   1298 	int r;
   1299 
   1300 	if (list_empty(&vm->relocated))
   1301 		return 0;
   1302 
   1303 	memset(&params, 0, sizeof(params));
   1304 	params.adev = adev;
   1305 	params.vm = vm;
   1306 	params.direct = direct;
   1307 
   1308 	r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
   1309 	if (r)
   1310 		return r;
   1311 
   1312 	while (!list_empty(&vm->relocated)) {
   1313 		struct amdgpu_vm_pt *entry;
   1314 
   1315 		entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
   1316 					 base.vm_status);
   1317 		amdgpu_vm_bo_idle(&entry->base);
   1318 
   1319 		r = amdgpu_vm_update_pde(&params, vm, entry);
   1320 		if (r)
   1321 			goto error;
   1322 	}
   1323 
   1324 	r = vm->update_funcs->commit(&params, &vm->last_update);
   1325 	if (r)
   1326 		goto error;
   1327 	return 0;
   1328 
   1329 error:
   1330 	amdgpu_vm_invalidate_pds(adev, vm);
   1331 	return r;
   1332 }
   1333 
   1334 /*
   1335  * amdgpu_vm_update_flags - figure out flags for PTE updates
   1336  *
   1337  * Make sure to set the right flags for the PTEs at the desired level.
   1338  */
   1339 static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
   1340 				   struct amdgpu_bo *bo, unsigned level,
   1341 				   uint64_t pe, uint64_t addr,
   1342 				   unsigned count, uint32_t incr,
   1343 				   uint64_t flags)
   1344 
   1345 {
   1346 	if (level != AMDGPU_VM_PTB) {
   1347 		flags |= AMDGPU_PDE_PTE;
   1348 		amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
   1349 
   1350 	} else if (params->adev->asic_type >= CHIP_VEGA10 &&
   1351 		   !(flags & AMDGPU_PTE_VALID) &&
   1352 		   !(flags & AMDGPU_PTE_PRT)) {
   1353 
   1354 		/* Workaround for fault priority problem on GMC9 */
   1355 		flags |= AMDGPU_PTE_EXECUTABLE;
   1356 	}
   1357 
   1358 	params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
   1359 					 flags);
   1360 }
   1361 
   1362 /**
   1363  * amdgpu_vm_fragment - get fragment for PTEs
   1364  *
   1365  * @params: see amdgpu_vm_update_params definition
   1366  * @start: first PTE to handle
   1367  * @end: last PTE to handle
   1368  * @flags: hw mapping flags
   1369  * @frag: resulting fragment size
   1370  * @frag_end: end of this fragment
   1371  *
   1372  * Returns the first possible fragment for the start and end address.
   1373  */
   1374 static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
   1375 			       uint64_t start, uint64_t end, uint64_t flags,
   1376 			       unsigned int *frag, uint64_t *frag_end)
   1377 {
   1378 	/**
   1379 	 * The MC L1 TLB supports variable sized pages, based on a fragment
   1380 	 * field in the PTE. When this field is set to a non-zero value, page
   1381 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
   1382 	 * flags are considered valid for all PTEs within the fragment range
   1383 	 * and corresponding mappings are assumed to be physically contiguous.
   1384 	 *
   1385 	 * The L1 TLB can store a single PTE for the whole fragment,
   1386 	 * significantly increasing the space available for translation
   1387 	 * caching. This leads to large improvements in throughput when the
   1388 	 * TLB is under pressure.
   1389 	 *
   1390 	 * The L2 TLB distributes small and large fragments into two
   1391 	 * asymmetric partitions. The large fragment cache is significantly
   1392 	 * larger. Thus, we try to use large fragments wherever possible.
   1393 	 * Userspace can support this by aligning virtual base address and
   1394 	 * allocation size to the fragment size.
   1395 	 *
   1396 	 * Starting with Vega10 the fragment size only controls the L1. The L2
   1397 	 * is now directly feed with small/huge/giant pages from the walker.
   1398 	 */
   1399 	unsigned max_frag;
   1400 
   1401 	if (params->adev->asic_type < CHIP_VEGA10)
   1402 		max_frag = params->adev->vm_manager.fragment_size;
   1403 	else
   1404 		max_frag = 31;
   1405 
   1406 	/* system pages are non continuously */
   1407 	if (params->pages_addr) {
   1408 		*frag = 0;
   1409 		*frag_end = end;
   1410 		return;
   1411 	}
   1412 
   1413 	/* This intentionally wraps around if no bit is set */
   1414 	*frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
   1415 	if (*frag >= max_frag) {
   1416 		*frag = max_frag;
   1417 		*frag_end = end & ~((1ULL << max_frag) - 1);
   1418 	} else {
   1419 		*frag_end = start + (1 << *frag);
   1420 	}
   1421 }
   1422 
   1423 /**
   1424  * amdgpu_vm_update_ptes - make sure that page tables are valid
   1425  *
   1426  * @params: see amdgpu_vm_update_params definition
   1427  * @start: start of GPU address range
   1428  * @end: end of GPU address range
   1429  * @dst: destination address to map to, the next dst inside the function
   1430  * @flags: mapping flags
   1431  *
   1432  * Update the page tables in the range @start - @end.
   1433  *
   1434  * Returns:
   1435  * 0 for success, -EINVAL for failure.
   1436  */
   1437 static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
   1438 				 uint64_t start, uint64_t end,
   1439 				 uint64_t dst, uint64_t flags)
   1440 {
   1441 	struct amdgpu_device *adev = params->adev;
   1442 	struct amdgpu_vm_pt_cursor cursor;
   1443 	uint64_t frag_start = start, frag_end;
   1444 	unsigned int frag;
   1445 	int r;
   1446 
   1447 	/* figure out the initial fragment */
   1448 	amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
   1449 
   1450 	/* walk over the address space and update the PTs */
   1451 	amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
   1452 	while (cursor.pfn < end) {
   1453 		unsigned shift, parent_shift, mask;
   1454 		uint64_t incr, entry_end, pe_start;
   1455 		struct amdgpu_bo *pt;
   1456 
   1457 		/* make sure that the page tables covering the address range are
   1458 		 * actually allocated
   1459 		 */
   1460 		r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
   1461 					params->direct);
   1462 		if (r)
   1463 			return r;
   1464 
   1465 		pt = cursor.entry->base.bo;
   1466 
   1467 		/* The root level can't be a huge page */
   1468 		if (cursor.level == adev->vm_manager.root_level) {
   1469 			if (!amdgpu_vm_pt_descendant(adev, &cursor))
   1470 				return -ENOENT;
   1471 			continue;
   1472 		}
   1473 
   1474 		shift = amdgpu_vm_level_shift(adev, cursor.level);
   1475 		parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
   1476 		if (adev->asic_type < CHIP_VEGA10 &&
   1477 		    (flags & AMDGPU_PTE_VALID)) {
   1478 			/* No huge page support before GMC v9 */
   1479 			if (cursor.level != AMDGPU_VM_PTB) {
   1480 				if (!amdgpu_vm_pt_descendant(adev, &cursor))
   1481 					return -ENOENT;
   1482 				continue;
   1483 			}
   1484 		} else if (frag < shift) {
   1485 			/* We can't use this level when the fragment size is
   1486 			 * smaller than the address shift. Go to the next
   1487 			 * child entry and try again.
   1488 			 */
   1489 			if (!amdgpu_vm_pt_descendant(adev, &cursor))
   1490 				return -ENOENT;
   1491 			continue;
   1492 		} else if (frag >= parent_shift &&
   1493 			   cursor.level - 1 != adev->vm_manager.root_level) {
   1494 			/* If the fragment size is even larger than the parent
   1495 			 * shift we should go up one level and check it again
   1496 			 * unless one level up is the root level.
   1497 			 */
   1498 			if (!amdgpu_vm_pt_ancestor(&cursor))
   1499 				return -ENOENT;
   1500 			continue;
   1501 		}
   1502 
   1503 		/* Looks good so far, calculate parameters for the update */
   1504 		incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
   1505 		mask = amdgpu_vm_entries_mask(adev, cursor.level);
   1506 		pe_start = ((cursor.pfn >> shift) & mask) * 8;
   1507 		entry_end = (uint64_t)(mask + 1) << shift;
   1508 		entry_end += cursor.pfn & ~(entry_end - 1);
   1509 		entry_end = min(entry_end, end);
   1510 
   1511 		do {
   1512 			uint64_t upd_end = min(entry_end, frag_end);
   1513 			unsigned nptes = (upd_end - frag_start) >> shift;
   1514 
   1515 			amdgpu_vm_update_flags(params, pt, cursor.level,
   1516 					       pe_start, dst, nptes, incr,
   1517 					       flags | AMDGPU_PTE_FRAG(frag));
   1518 
   1519 			pe_start += nptes * 8;
   1520 			dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
   1521 
   1522 			frag_start = upd_end;
   1523 			if (frag_start >= frag_end) {
   1524 				/* figure out the next fragment */
   1525 				amdgpu_vm_fragment(params, frag_start, end,
   1526 						   flags, &frag, &frag_end);
   1527 				if (frag < shift)
   1528 					break;
   1529 			}
   1530 		} while (frag_start < entry_end);
   1531 
   1532 		if (amdgpu_vm_pt_descendant(adev, &cursor)) {
   1533 			/* Free all child entries.
   1534 			 * Update the tables with the flags and addresses and free up subsequent
   1535 			 * tables in the case of huge pages or freed up areas.
   1536 			 * This is the maximum you can free, because all other page tables are not
   1537 			 * completely covered by the range and so potentially still in use.
   1538 			 */
   1539 			while (cursor.pfn < frag_start) {
   1540 				amdgpu_vm_free_pts(adev, params->vm, &cursor);
   1541 				amdgpu_vm_pt_next(adev, &cursor);
   1542 			}
   1543 
   1544 		} else if (frag >= shift) {
   1545 			/* or just move on to the next on the same level. */
   1546 			amdgpu_vm_pt_next(adev, &cursor);
   1547 		}
   1548 	}
   1549 
   1550 	return 0;
   1551 }
   1552 
   1553 /**
   1554  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
   1555  *
   1556  * @adev: amdgpu_device pointer
   1557  * @vm: requested vm
   1558  * @direct: direct submission in a page fault
   1559  * @exclusive: fence we need to sync to
   1560  * @start: start of mapped range
   1561  * @last: last mapped entry
   1562  * @flags: flags for the entries
   1563  * @addr: addr to set the area to
   1564  * @pages_addr: DMA addresses to use for mapping
   1565  * @fence: optional resulting fence
   1566  *
   1567  * Fill in the page table entries between @start and @last.
   1568  *
   1569  * Returns:
   1570  * 0 for success, -EINVAL for failure.
   1571  */
   1572 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
   1573 				       struct amdgpu_vm *vm, bool direct,
   1574 				       struct dma_fence *exclusive,
   1575 				       uint64_t start, uint64_t last,
   1576 				       uint64_t flags, uint64_t addr,
   1577 				       dma_addr_t *pages_addr,
   1578 				       struct dma_fence **fence)
   1579 {
   1580 	struct amdgpu_vm_update_params params;
   1581 	void *owner = AMDGPU_FENCE_OWNER_VM;
   1582 	int r;
   1583 
   1584 	memset(&params, 0, sizeof(params));
   1585 	params.adev = adev;
   1586 	params.vm = vm;
   1587 	params.direct = direct;
   1588 	params.pages_addr = pages_addr;
   1589 
   1590 	/* sync to everything except eviction fences on unmapping */
   1591 	if (!(flags & AMDGPU_PTE_VALID))
   1592 		owner = AMDGPU_FENCE_OWNER_KFD;
   1593 
   1594 	amdgpu_vm_eviction_lock(vm);
   1595 	if (vm->evicting) {
   1596 		r = -EBUSY;
   1597 		goto error_unlock;
   1598 	}
   1599 
   1600 	r = vm->update_funcs->prepare(&params, owner, exclusive);
   1601 	if (r)
   1602 		goto error_unlock;
   1603 
   1604 	r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
   1605 	if (r)
   1606 		goto error_unlock;
   1607 
   1608 	r = vm->update_funcs->commit(&params, fence);
   1609 
   1610 error_unlock:
   1611 	amdgpu_vm_eviction_unlock(vm);
   1612 	return r;
   1613 }
   1614 
   1615 /**
   1616  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
   1617  *
   1618  * @adev: amdgpu_device pointer
   1619  * @exclusive: fence we need to sync to
   1620  * @pages_addr: DMA addresses to use for mapping
   1621  * @vm: requested vm
   1622  * @mapping: mapped range and flags to use for the update
   1623  * @flags: HW flags for the mapping
   1624  * @bo_adev: amdgpu_device pointer that bo actually been allocated
   1625  * @nodes: array of drm_mm_nodes with the MC addresses
   1626  * @fence: optional resulting fence
   1627  *
   1628  * Split the mapping into smaller chunks so that each update fits
   1629  * into a SDMA IB.
   1630  *
   1631  * Returns:
   1632  * 0 for success, -EINVAL for failure.
   1633  */
   1634 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
   1635 				      struct dma_fence *exclusive,
   1636 				      dma_addr_t *pages_addr,
   1637 				      struct amdgpu_vm *vm,
   1638 				      struct amdgpu_bo_va_mapping *mapping,
   1639 				      uint64_t flags,
   1640 				      struct amdgpu_device *bo_adev,
   1641 				      struct drm_mm_node *nodes,
   1642 				      struct dma_fence **fence)
   1643 {
   1644 	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
   1645 	uint64_t pfn, start = mapping->start;
   1646 	int r;
   1647 
   1648 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
   1649 	 * but in case of something, we filter the flags in first place
   1650 	 */
   1651 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
   1652 		flags &= ~AMDGPU_PTE_READABLE;
   1653 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
   1654 		flags &= ~AMDGPU_PTE_WRITEABLE;
   1655 
   1656 	/* Apply ASIC specific mapping flags */
   1657 	amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
   1658 
   1659 	trace_amdgpu_vm_bo_update(mapping);
   1660 
   1661 	pfn = mapping->offset >> PAGE_SHIFT;
   1662 	if (nodes) {
   1663 		while (pfn >= nodes->size) {
   1664 			pfn -= nodes->size;
   1665 			++nodes;
   1666 		}
   1667 	}
   1668 
   1669 	do {
   1670 		dma_addr_t *dma_addr = NULL;
   1671 		uint64_t max_entries;
   1672 		uint64_t addr, last;
   1673 
   1674 		if (nodes) {
   1675 			addr = nodes->start << PAGE_SHIFT;
   1676 			max_entries = (nodes->size - pfn) *
   1677 				AMDGPU_GPU_PAGES_IN_CPU_PAGE;
   1678 		} else {
   1679 			addr = 0;
   1680 			max_entries = S64_MAX;
   1681 		}
   1682 
   1683 		if (pages_addr) {
   1684 			uint64_t count;
   1685 
   1686 			for (count = 1;
   1687 			     count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
   1688 			     ++count) {
   1689 				uint64_t idx = pfn + count;
   1690 
   1691 				if (pages_addr[idx] !=
   1692 				    (pages_addr[idx - 1] + PAGE_SIZE))
   1693 					break;
   1694 			}
   1695 
   1696 			if (count < min_linear_pages) {
   1697 				addr = pfn << PAGE_SHIFT;
   1698 				dma_addr = pages_addr;
   1699 			} else {
   1700 				addr = pages_addr[pfn];
   1701 				max_entries = count *
   1702 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
   1703 			}
   1704 
   1705 		} else if (flags & AMDGPU_PTE_VALID) {
   1706 			addr += bo_adev->vm_manager.vram_base_offset;
   1707 			addr += pfn << PAGE_SHIFT;
   1708 		}
   1709 
   1710 		last = min((uint64_t)mapping->last, start + max_entries - 1);
   1711 		r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
   1712 						start, last, flags, addr,
   1713 						dma_addr, fence);
   1714 		if (r)
   1715 			return r;
   1716 
   1717 		pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
   1718 		if (nodes && nodes->size == pfn) {
   1719 			pfn = 0;
   1720 			++nodes;
   1721 		}
   1722 		start = last + 1;
   1723 
   1724 	} while (unlikely(start != mapping->last + 1));
   1725 
   1726 	return 0;
   1727 }
   1728 
   1729 /**
   1730  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
   1731  *
   1732  * @adev: amdgpu_device pointer
   1733  * @bo_va: requested BO and VM object
   1734  * @clear: if true clear the entries
   1735  *
   1736  * Fill in the page table entries for @bo_va.
   1737  *
   1738  * Returns:
   1739  * 0 for success, -EINVAL for failure.
   1740  */
   1741 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
   1742 			bool clear)
   1743 {
   1744 	struct amdgpu_bo *bo = bo_va->base.bo;
   1745 	struct amdgpu_vm *vm = bo_va->base.vm;
   1746 	struct amdgpu_bo_va_mapping *mapping;
   1747 	dma_addr_t *pages_addr = NULL;
   1748 	struct ttm_mem_reg *mem;
   1749 	struct drm_mm_node *nodes;
   1750 	struct dma_fence *exclusive, **last_update;
   1751 	uint64_t flags;
   1752 	struct amdgpu_device *bo_adev = adev;
   1753 	int r;
   1754 
   1755 	if (clear || !bo) {
   1756 		mem = NULL;
   1757 		nodes = NULL;
   1758 		exclusive = NULL;
   1759 	} else {
   1760 		struct ttm_dma_tt *ttm;
   1761 
   1762 		mem = &bo->tbo.mem;
   1763 		nodes = mem->mm_node;
   1764 		if (mem->mem_type == TTM_PL_TT) {
   1765 			ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
   1766 			pages_addr = ttm->dma_address;
   1767 		}
   1768 		exclusive = bo->tbo.moving;
   1769 	}
   1770 
   1771 	if (bo) {
   1772 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
   1773 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
   1774 	} else {
   1775 		flags = 0x0;
   1776 	}
   1777 
   1778 	if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
   1779 		last_update = &vm->last_update;
   1780 	else
   1781 		last_update = &bo_va->last_pt_update;
   1782 
   1783 	if (!clear && bo_va->base.moved) {
   1784 		bo_va->base.moved = false;
   1785 		list_splice_init(&bo_va->valids, &bo_va->invalids);
   1786 
   1787 	} else if (bo_va->cleared != clear) {
   1788 		list_splice_init(&bo_va->valids, &bo_va->invalids);
   1789 	}
   1790 
   1791 	list_for_each_entry(mapping, &bo_va->invalids, list) {
   1792 		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
   1793 					       mapping, flags, bo_adev, nodes,
   1794 					       last_update);
   1795 		if (r)
   1796 			return r;
   1797 	}
   1798 
   1799 	/* If the BO is not in its preferred location add it back to
   1800 	 * the evicted list so that it gets validated again on the
   1801 	 * next command submission.
   1802 	 */
   1803 	if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
   1804 		uint32_t mem_type = bo->tbo.mem.mem_type;
   1805 
   1806 		if (!(bo->preferred_domains &
   1807 		      amdgpu_mem_type_to_domain(mem_type)))
   1808 			amdgpu_vm_bo_evicted(&bo_va->base);
   1809 		else
   1810 			amdgpu_vm_bo_idle(&bo_va->base);
   1811 	} else {
   1812 		amdgpu_vm_bo_done(&bo_va->base);
   1813 	}
   1814 
   1815 	list_splice_init(&bo_va->invalids, &bo_va->valids);
   1816 	bo_va->cleared = clear;
   1817 
   1818 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
   1819 		list_for_each_entry(mapping, &bo_va->valids, list)
   1820 			trace_amdgpu_vm_bo_mapping(mapping);
   1821 	}
   1822 
   1823 	return 0;
   1824 }
   1825 
   1826 /**
   1827  * amdgpu_vm_update_prt_state - update the global PRT state
   1828  *
   1829  * @adev: amdgpu_device pointer
   1830  */
   1831 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
   1832 {
   1833 	unsigned long flags;
   1834 	bool enable;
   1835 
   1836 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
   1837 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
   1838 	adev->gmc.gmc_funcs->set_prt(adev, enable);
   1839 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
   1840 }
   1841 
   1842 /**
   1843  * amdgpu_vm_prt_get - add a PRT user
   1844  *
   1845  * @adev: amdgpu_device pointer
   1846  */
   1847 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
   1848 {
   1849 	if (!adev->gmc.gmc_funcs->set_prt)
   1850 		return;
   1851 
   1852 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
   1853 		amdgpu_vm_update_prt_state(adev);
   1854 }
   1855 
   1856 /**
   1857  * amdgpu_vm_prt_put - drop a PRT user
   1858  *
   1859  * @adev: amdgpu_device pointer
   1860  */
   1861 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
   1862 {
   1863 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
   1864 		amdgpu_vm_update_prt_state(adev);
   1865 }
   1866 
   1867 /**
   1868  * amdgpu_vm_prt_cb - callback for updating the PRT status
   1869  *
   1870  * @fence: fence for the callback
   1871  * @_cb: the callback function
   1872  */
   1873 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
   1874 {
   1875 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
   1876 
   1877 	amdgpu_vm_prt_put(cb->adev);
   1878 	kfree(cb);
   1879 }
   1880 
   1881 /**
   1882  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
   1883  *
   1884  * @adev: amdgpu_device pointer
   1885  * @fence: fence for the callback
   1886  */
   1887 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
   1888 				 struct dma_fence *fence)
   1889 {
   1890 	struct amdgpu_prt_cb *cb;
   1891 
   1892 	if (!adev->gmc.gmc_funcs->set_prt)
   1893 		return;
   1894 
   1895 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
   1896 	if (!cb) {
   1897 		/* Last resort when we are OOM */
   1898 		if (fence)
   1899 			dma_fence_wait(fence, false);
   1900 
   1901 		amdgpu_vm_prt_put(adev);
   1902 	} else {
   1903 		cb->adev = adev;
   1904 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
   1905 						     amdgpu_vm_prt_cb))
   1906 			amdgpu_vm_prt_cb(fence, &cb->cb);
   1907 	}
   1908 }
   1909 
   1910 /**
   1911  * amdgpu_vm_free_mapping - free a mapping
   1912  *
   1913  * @adev: amdgpu_device pointer
   1914  * @vm: requested vm
   1915  * @mapping: mapping to be freed
   1916  * @fence: fence of the unmap operation
   1917  *
   1918  * Free a mapping and make sure we decrease the PRT usage count if applicable.
   1919  */
   1920 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
   1921 				   struct amdgpu_vm *vm,
   1922 				   struct amdgpu_bo_va_mapping *mapping,
   1923 				   struct dma_fence *fence)
   1924 {
   1925 	if (mapping->flags & AMDGPU_PTE_PRT)
   1926 		amdgpu_vm_add_prt_cb(adev, fence);
   1927 	kfree(mapping);
   1928 }
   1929 
   1930 /**
   1931  * amdgpu_vm_prt_fini - finish all prt mappings
   1932  *
   1933  * @adev: amdgpu_device pointer
   1934  * @vm: requested vm
   1935  *
   1936  * Register a cleanup callback to disable PRT support after VM dies.
   1937  */
   1938 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
   1939 {
   1940 	struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
   1941 	struct dma_fence *excl, **shared;
   1942 	unsigned i, shared_count;
   1943 	int r;
   1944 
   1945 	r = dma_resv_get_fences_rcu(resv, &excl,
   1946 					      &shared_count, &shared);
   1947 	if (r) {
   1948 		/* Not enough memory to grab the fence list, as last resort
   1949 		 * block for all the fences to complete.
   1950 		 */
   1951 		dma_resv_wait_timeout_rcu(resv, true, false,
   1952 						    MAX_SCHEDULE_TIMEOUT);
   1953 		return;
   1954 	}
   1955 
   1956 	/* Add a callback for each fence in the reservation object */
   1957 	amdgpu_vm_prt_get(adev);
   1958 	amdgpu_vm_add_prt_cb(adev, excl);
   1959 
   1960 	for (i = 0; i < shared_count; ++i) {
   1961 		amdgpu_vm_prt_get(adev);
   1962 		amdgpu_vm_add_prt_cb(adev, shared[i]);
   1963 	}
   1964 
   1965 	kfree(shared);
   1966 }
   1967 
   1968 /**
   1969  * amdgpu_vm_clear_freed - clear freed BOs in the PT
   1970  *
   1971  * @adev: amdgpu_device pointer
   1972  * @vm: requested vm
   1973  * @fence: optional resulting fence (unchanged if no work needed to be done
   1974  * or if an error occurred)
   1975  *
   1976  * Make sure all freed BOs are cleared in the PT.
   1977  * PTs have to be reserved and mutex must be locked!
   1978  *
   1979  * Returns:
   1980  * 0 for success.
   1981  *
   1982  */
   1983 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
   1984 			  struct amdgpu_vm *vm,
   1985 			  struct dma_fence **fence)
   1986 {
   1987 	struct amdgpu_bo_va_mapping *mapping;
   1988 	uint64_t init_pte_value = 0;
   1989 	struct dma_fence *f = NULL;
   1990 	int r;
   1991 
   1992 	while (!list_empty(&vm->freed)) {
   1993 		mapping = list_first_entry(&vm->freed,
   1994 			struct amdgpu_bo_va_mapping, list);
   1995 		list_del(&mapping->list);
   1996 
   1997 		if (vm->pte_support_ats &&
   1998 		    mapping->start < AMDGPU_GMC_HOLE_START)
   1999 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
   2000 
   2001 		r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
   2002 						mapping->start, mapping->last,
   2003 						init_pte_value, 0, NULL, &f);
   2004 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
   2005 		if (r) {
   2006 			dma_fence_put(f);
   2007 			return r;
   2008 		}
   2009 	}
   2010 
   2011 	if (fence && f) {
   2012 		dma_fence_put(*fence);
   2013 		*fence = f;
   2014 	} else {
   2015 		dma_fence_put(f);
   2016 	}
   2017 
   2018 	return 0;
   2019 
   2020 }
   2021 
   2022 /**
   2023  * amdgpu_vm_handle_moved - handle moved BOs in the PT
   2024  *
   2025  * @adev: amdgpu_device pointer
   2026  * @vm: requested vm
   2027  *
   2028  * Make sure all BOs which are moved are updated in the PTs.
   2029  *
   2030  * Returns:
   2031  * 0 for success.
   2032  *
   2033  * PTs have to be reserved!
   2034  */
   2035 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
   2036 			   struct amdgpu_vm *vm)
   2037 {
   2038 	struct amdgpu_bo_va *bo_va, *tmp;
   2039 	struct dma_resv *resv;
   2040 	bool clear;
   2041 	int r;
   2042 
   2043 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
   2044 		/* Per VM BOs never need to bo cleared in the page tables */
   2045 		r = amdgpu_vm_bo_update(adev, bo_va, false);
   2046 		if (r)
   2047 			return r;
   2048 	}
   2049 
   2050 	spin_lock(&vm->invalidated_lock);
   2051 	while (!list_empty(&vm->invalidated)) {
   2052 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
   2053 					 base.vm_status);
   2054 		resv = bo_va->base.bo->tbo.base.resv;
   2055 		spin_unlock(&vm->invalidated_lock);
   2056 
   2057 		/* Try to reserve the BO to avoid clearing its ptes */
   2058 		if (!amdgpu_vm_debug && dma_resv_trylock(resv))
   2059 			clear = false;
   2060 		/* Somebody else is using the BO right now */
   2061 		else
   2062 			clear = true;
   2063 
   2064 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
   2065 		if (r)
   2066 			return r;
   2067 
   2068 		if (!clear)
   2069 			dma_resv_unlock(resv);
   2070 		spin_lock(&vm->invalidated_lock);
   2071 	}
   2072 	spin_unlock(&vm->invalidated_lock);
   2073 
   2074 	return 0;
   2075 }
   2076 
   2077 /**
   2078  * amdgpu_vm_bo_add - add a bo to a specific vm
   2079  *
   2080  * @adev: amdgpu_device pointer
   2081  * @vm: requested vm
   2082  * @bo: amdgpu buffer object
   2083  *
   2084  * Add @bo into the requested vm.
   2085  * Add @bo to the list of bos associated with the vm
   2086  *
   2087  * Returns:
   2088  * Newly added bo_va or NULL for failure
   2089  *
   2090  * Object has to be reserved!
   2091  */
   2092 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
   2093 				      struct amdgpu_vm *vm,
   2094 				      struct amdgpu_bo *bo)
   2095 {
   2096 	struct amdgpu_bo_va *bo_va;
   2097 
   2098 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
   2099 	if (bo_va == NULL) {
   2100 		return NULL;
   2101 	}
   2102 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
   2103 
   2104 	bo_va->ref_count = 1;
   2105 	INIT_LIST_HEAD(&bo_va->valids);
   2106 	INIT_LIST_HEAD(&bo_va->invalids);
   2107 
   2108 	if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
   2109 	    (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
   2110 		bo_va->is_xgmi = true;
   2111 		mutex_lock(&adev->vm_manager.lock_pstate);
   2112 		/* Power up XGMI if it can be potentially used */
   2113 		if (++adev->vm_manager.xgmi_map_counter == 1)
   2114 			amdgpu_xgmi_set_pstate(adev, 1);
   2115 		mutex_unlock(&adev->vm_manager.lock_pstate);
   2116 	}
   2117 
   2118 	return bo_va;
   2119 }
   2120 
   2121 
   2122 /**
   2123  * amdgpu_vm_bo_insert_mapping - insert a new mapping
   2124  *
   2125  * @adev: amdgpu_device pointer
   2126  * @bo_va: bo_va to store the address
   2127  * @mapping: the mapping to insert
   2128  *
   2129  * Insert a new mapping into all structures.
   2130  */
   2131 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
   2132 				    struct amdgpu_bo_va *bo_va,
   2133 				    struct amdgpu_bo_va_mapping *mapping)
   2134 {
   2135 	struct amdgpu_vm *vm = bo_va->base.vm;
   2136 	struct amdgpu_bo *bo = bo_va->base.bo;
   2137 
   2138 	mapping->bo_va = bo_va;
   2139 	list_add(&mapping->list, &bo_va->invalids);
   2140 	amdgpu_vm_it_insert(mapping, &vm->va);
   2141 
   2142 	if (mapping->flags & AMDGPU_PTE_PRT)
   2143 		amdgpu_vm_prt_get(adev);
   2144 
   2145 	if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv &&
   2146 	    !bo_va->base.moved) {
   2147 		list_move(&bo_va->base.vm_status, &vm->moved);
   2148 	}
   2149 	trace_amdgpu_vm_bo_map(bo_va, mapping);
   2150 }
   2151 
   2152 /**
   2153  * amdgpu_vm_bo_map - map bo inside a vm
   2154  *
   2155  * @adev: amdgpu_device pointer
   2156  * @bo_va: bo_va to store the address
   2157  * @saddr: where to map the BO
   2158  * @offset: requested offset in the BO
   2159  * @size: BO size in bytes
   2160  * @flags: attributes of pages (read/write/valid/etc.)
   2161  *
   2162  * Add a mapping of the BO at the specefied addr into the VM.
   2163  *
   2164  * Returns:
   2165  * 0 for success, error for failure.
   2166  *
   2167  * Object has to be reserved and unreserved outside!
   2168  */
   2169 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
   2170 		     struct amdgpu_bo_va *bo_va,
   2171 		     uint64_t saddr, uint64_t offset,
   2172 		     uint64_t size, uint64_t flags)
   2173 {
   2174 	struct amdgpu_bo_va_mapping *mapping, *tmp;
   2175 	struct amdgpu_bo *bo = bo_va->base.bo;
   2176 	struct amdgpu_vm *vm = bo_va->base.vm;
   2177 	uint64_t eaddr;
   2178 
   2179 	/* validate the parameters */
   2180 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
   2181 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
   2182 		return -EINVAL;
   2183 
   2184 	/* make sure object fit at this offset */
   2185 	eaddr = saddr + size - 1;
   2186 	if (saddr >= eaddr ||
   2187 	    (bo && offset + size > amdgpu_bo_size(bo)))
   2188 		return -EINVAL;
   2189 
   2190 	saddr /= AMDGPU_GPU_PAGE_SIZE;
   2191 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
   2192 
   2193 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
   2194 	if (tmp) {
   2195 		/* bo and tmp overlap, invalid addr */
   2196 		dev_err(adev->dev, "bo %p va 0x%010"PRIx64"-0x%010"PRIx64" conflict with "
   2197 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
   2198 			tmp->start, tmp->last + 1);
   2199 		return -EINVAL;
   2200 	}
   2201 
   2202 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
   2203 	if (!mapping)
   2204 		return -ENOMEM;
   2205 
   2206 	mapping->start = saddr;
   2207 	mapping->last = eaddr;
   2208 	mapping->offset = offset;
   2209 	mapping->flags = flags;
   2210 
   2211 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
   2212 
   2213 	return 0;
   2214 }
   2215 
   2216 /**
   2217  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
   2218  *
   2219  * @adev: amdgpu_device pointer
   2220  * @bo_va: bo_va to store the address
   2221  * @saddr: where to map the BO
   2222  * @offset: requested offset in the BO
   2223  * @size: BO size in bytes
   2224  * @flags: attributes of pages (read/write/valid/etc.)
   2225  *
   2226  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
   2227  * mappings as we do so.
   2228  *
   2229  * Returns:
   2230  * 0 for success, error for failure.
   2231  *
   2232  * Object has to be reserved and unreserved outside!
   2233  */
   2234 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
   2235 			     struct amdgpu_bo_va *bo_va,
   2236 			     uint64_t saddr, uint64_t offset,
   2237 			     uint64_t size, uint64_t flags)
   2238 {
   2239 	struct amdgpu_bo_va_mapping *mapping;
   2240 	struct amdgpu_bo *bo = bo_va->base.bo;
   2241 	uint64_t eaddr;
   2242 	int r;
   2243 
   2244 	/* validate the parameters */
   2245 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
   2246 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
   2247 		return -EINVAL;
   2248 
   2249 	/* make sure object fit at this offset */
   2250 	eaddr = saddr + size - 1;
   2251 	if (saddr >= eaddr ||
   2252 	    (bo && offset + size > amdgpu_bo_size(bo)))
   2253 		return -EINVAL;
   2254 
   2255 	/* Allocate all the needed memory */
   2256 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
   2257 	if (!mapping)
   2258 		return -ENOMEM;
   2259 
   2260 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
   2261 	if (r) {
   2262 		kfree(mapping);
   2263 		return r;
   2264 	}
   2265 
   2266 	saddr /= AMDGPU_GPU_PAGE_SIZE;
   2267 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
   2268 
   2269 	mapping->start = saddr;
   2270 	mapping->last = eaddr;
   2271 	mapping->offset = offset;
   2272 	mapping->flags = flags;
   2273 
   2274 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
   2275 
   2276 	return 0;
   2277 }
   2278 
   2279 /**
   2280  * amdgpu_vm_bo_unmap - remove bo mapping from vm
   2281  *
   2282  * @adev: amdgpu_device pointer
   2283  * @bo_va: bo_va to remove the address from
   2284  * @saddr: where to the BO is mapped
   2285  *
   2286  * Remove a mapping of the BO at the specefied addr from the VM.
   2287  *
   2288  * Returns:
   2289  * 0 for success, error for failure.
   2290  *
   2291  * Object has to be reserved and unreserved outside!
   2292  */
   2293 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
   2294 		       struct amdgpu_bo_va *bo_va,
   2295 		       uint64_t saddr)
   2296 {
   2297 	struct amdgpu_bo_va_mapping *mapping;
   2298 	struct amdgpu_vm *vm = bo_va->base.vm;
   2299 	bool valid = true;
   2300 
   2301 	saddr /= AMDGPU_GPU_PAGE_SIZE;
   2302 
   2303 	list_for_each_entry(mapping, &bo_va->valids, list) {
   2304 		if (mapping->start == saddr)
   2305 			break;
   2306 	}
   2307 
   2308 	if (&mapping->list == &bo_va->valids) {
   2309 		valid = false;
   2310 
   2311 		list_for_each_entry(mapping, &bo_va->invalids, list) {
   2312 			if (mapping->start == saddr)
   2313 				break;
   2314 		}
   2315 
   2316 		if (&mapping->list == &bo_va->invalids)
   2317 			return -ENOENT;
   2318 	}
   2319 
   2320 	list_del(&mapping->list);
   2321 	amdgpu_vm_it_remove(mapping, &vm->va);
   2322 	mapping->bo_va = NULL;
   2323 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
   2324 
   2325 	if (valid)
   2326 		list_add(&mapping->list, &vm->freed);
   2327 	else
   2328 		amdgpu_vm_free_mapping(adev, vm, mapping,
   2329 				       bo_va->last_pt_update);
   2330 
   2331 	return 0;
   2332 }
   2333 
   2334 /**
   2335  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
   2336  *
   2337  * @adev: amdgpu_device pointer
   2338  * @vm: VM structure to use
   2339  * @saddr: start of the range
   2340  * @size: size of the range
   2341  *
   2342  * Remove all mappings in a range, split them as appropriate.
   2343  *
   2344  * Returns:
   2345  * 0 for success, error for failure.
   2346  */
   2347 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
   2348 				struct amdgpu_vm *vm,
   2349 				uint64_t saddr, uint64_t size)
   2350 {
   2351 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
   2352 	LIST_HEAD(removed);
   2353 	uint64_t eaddr;
   2354 
   2355 	eaddr = saddr + size - 1;
   2356 	saddr /= AMDGPU_GPU_PAGE_SIZE;
   2357 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
   2358 
   2359 	/* Allocate all the needed memory */
   2360 	before = kzalloc(sizeof(*before), GFP_KERNEL);
   2361 	if (!before)
   2362 		return -ENOMEM;
   2363 	INIT_LIST_HEAD(&before->list);
   2364 
   2365 	after = kzalloc(sizeof(*after), GFP_KERNEL);
   2366 	if (!after) {
   2367 		kfree(before);
   2368 		return -ENOMEM;
   2369 	}
   2370 	INIT_LIST_HEAD(&after->list);
   2371 
   2372 	/* Now gather all removed mappings */
   2373 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
   2374 	while (tmp) {
   2375 		/* Remember mapping split at the start */
   2376 		if (tmp->start < saddr) {
   2377 			before->start = tmp->start;
   2378 			before->last = saddr - 1;
   2379 			before->offset = tmp->offset;
   2380 			before->flags = tmp->flags;
   2381 			before->bo_va = tmp->bo_va;
   2382 			list_add(&before->list, &tmp->bo_va->invalids);
   2383 		}
   2384 
   2385 		/* Remember mapping split at the end */
   2386 		if (tmp->last > eaddr) {
   2387 			after->start = eaddr + 1;
   2388 			after->last = tmp->last;
   2389 			after->offset = tmp->offset;
   2390 			after->offset += after->start - tmp->start;
   2391 			after->flags = tmp->flags;
   2392 			after->bo_va = tmp->bo_va;
   2393 			list_add(&after->list, &tmp->bo_va->invalids);
   2394 		}
   2395 
   2396 		list_del(&tmp->list);
   2397 		list_add(&tmp->list, &removed);
   2398 
   2399 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
   2400 	}
   2401 
   2402 	/* And free them up */
   2403 	list_for_each_entry_safe(tmp, next, &removed, list) {
   2404 		amdgpu_vm_it_remove(tmp, &vm->va);
   2405 		list_del(&tmp->list);
   2406 
   2407 		if (tmp->start < saddr)
   2408 		    tmp->start = saddr;
   2409 		if (tmp->last > eaddr)
   2410 		    tmp->last = eaddr;
   2411 
   2412 		tmp->bo_va = NULL;
   2413 		list_add(&tmp->list, &vm->freed);
   2414 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
   2415 	}
   2416 
   2417 	/* Insert partial mapping before the range */
   2418 	if (!list_empty(&before->list)) {
   2419 		amdgpu_vm_it_insert(before, &vm->va);
   2420 		if (before->flags & AMDGPU_PTE_PRT)
   2421 			amdgpu_vm_prt_get(adev);
   2422 	} else {
   2423 		kfree(before);
   2424 	}
   2425 
   2426 	/* Insert partial mapping after the range */
   2427 	if (!list_empty(&after->list)) {
   2428 		amdgpu_vm_it_insert(after, &vm->va);
   2429 		if (after->flags & AMDGPU_PTE_PRT)
   2430 			amdgpu_vm_prt_get(adev);
   2431 	} else {
   2432 		kfree(after);
   2433 	}
   2434 
   2435 	return 0;
   2436 }
   2437 
   2438 /**
   2439  * amdgpu_vm_bo_lookup_mapping - find mapping by address
   2440  *
   2441  * @vm: the requested VM
   2442  * @addr: the address
   2443  *
   2444  * Find a mapping by it's address.
   2445  *
   2446  * Returns:
   2447  * The amdgpu_bo_va_mapping matching for addr or NULL
   2448  *
   2449  */
   2450 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
   2451 							 uint64_t addr)
   2452 {
   2453 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
   2454 }
   2455 
   2456 /**
   2457  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
   2458  *
   2459  * @vm: the requested vm
   2460  * @ticket: CS ticket
   2461  *
   2462  * Trace all mappings of BOs reserved during a command submission.
   2463  */
   2464 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
   2465 {
   2466 	struct amdgpu_bo_va_mapping *mapping;
   2467 
   2468 	if (!trace_amdgpu_vm_bo_cs_enabled())
   2469 		return;
   2470 
   2471 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
   2472 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
   2473 		if (mapping->bo_va && mapping->bo_va->base.bo) {
   2474 			struct amdgpu_bo *bo;
   2475 
   2476 			bo = mapping->bo_va->base.bo;
   2477 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
   2478 			    ticket)
   2479 				continue;
   2480 		}
   2481 
   2482 		trace_amdgpu_vm_bo_cs(mapping);
   2483 	}
   2484 }
   2485 
   2486 /**
   2487  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
   2488  *
   2489  * @adev: amdgpu_device pointer
   2490  * @bo_va: requested bo_va
   2491  *
   2492  * Remove @bo_va->bo from the requested vm.
   2493  *
   2494  * Object have to be reserved!
   2495  */
   2496 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
   2497 		      struct amdgpu_bo_va *bo_va)
   2498 {
   2499 	struct amdgpu_bo_va_mapping *mapping, *next;
   2500 	struct amdgpu_bo *bo = bo_va->base.bo;
   2501 	struct amdgpu_vm *vm = bo_va->base.vm;
   2502 	struct amdgpu_vm_bo_base **base;
   2503 
   2504 	if (bo) {
   2505 		if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
   2506 			vm->bulk_moveable = false;
   2507 
   2508 		for (base = &bo_va->base.bo->vm_bo; *base;
   2509 		     base = &(*base)->next) {
   2510 			if (*base != &bo_va->base)
   2511 				continue;
   2512 
   2513 			*base = bo_va->base.next;
   2514 			break;
   2515 		}
   2516 	}
   2517 
   2518 	spin_lock(&vm->invalidated_lock);
   2519 	list_del(&bo_va->base.vm_status);
   2520 	spin_unlock(&vm->invalidated_lock);
   2521 
   2522 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
   2523 		list_del(&mapping->list);
   2524 		amdgpu_vm_it_remove(mapping, &vm->va);
   2525 		mapping->bo_va = NULL;
   2526 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
   2527 		list_add(&mapping->list, &vm->freed);
   2528 	}
   2529 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
   2530 		list_del(&mapping->list);
   2531 		amdgpu_vm_it_remove(mapping, &vm->va);
   2532 		amdgpu_vm_free_mapping(adev, vm, mapping,
   2533 				       bo_va->last_pt_update);
   2534 	}
   2535 
   2536 	dma_fence_put(bo_va->last_pt_update);
   2537 
   2538 	if (bo && bo_va->is_xgmi) {
   2539 		mutex_lock(&adev->vm_manager.lock_pstate);
   2540 		if (--adev->vm_manager.xgmi_map_counter == 0)
   2541 			amdgpu_xgmi_set_pstate(adev, 0);
   2542 		mutex_unlock(&adev->vm_manager.lock_pstate);
   2543 	}
   2544 
   2545 	kfree(bo_va);
   2546 }
   2547 
   2548 /**
   2549  * amdgpu_vm_evictable - check if we can evict a VM
   2550  *
   2551  * @bo: A page table of the VM.
   2552  *
   2553  * Check if it is possible to evict a VM.
   2554  */
   2555 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
   2556 {
   2557 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
   2558 
   2559 	/* Page tables of a destroyed VM can go away immediately */
   2560 	if (!bo_base || !bo_base->vm)
   2561 		return true;
   2562 
   2563 	/* Don't evict VM page tables while they are busy */
   2564 	if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
   2565 		return false;
   2566 
   2567 	/* Try to block ongoing updates */
   2568 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
   2569 		return false;
   2570 
   2571 	/* Don't evict VM page tables while they are updated */
   2572 	if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
   2573 	    !dma_fence_is_signaled(bo_base->vm->last_delayed)) {
   2574 		amdgpu_vm_eviction_unlock(bo_base->vm);
   2575 		return false;
   2576 	}
   2577 
   2578 	bo_base->vm->evicting = true;
   2579 	amdgpu_vm_eviction_unlock(bo_base->vm);
   2580 	return true;
   2581 }
   2582 
   2583 /**
   2584  * amdgpu_vm_bo_invalidate - mark the bo as invalid
   2585  *
   2586  * @adev: amdgpu_device pointer
   2587  * @bo: amdgpu buffer object
   2588  * @evicted: is the BO evicted
   2589  *
   2590  * Mark @bo as invalid.
   2591  */
   2592 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
   2593 			     struct amdgpu_bo *bo, bool evicted)
   2594 {
   2595 	struct amdgpu_vm_bo_base *bo_base;
   2596 
   2597 	/* shadow bo doesn't have bo base, its validation needs its parent */
   2598 	if (bo->parent && bo->parent->shadow == bo)
   2599 		bo = bo->parent;
   2600 
   2601 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
   2602 		struct amdgpu_vm *vm = bo_base->vm;
   2603 
   2604 		if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
   2605 			amdgpu_vm_bo_evicted(bo_base);
   2606 			continue;
   2607 		}
   2608 
   2609 		if (bo_base->moved)
   2610 			continue;
   2611 		bo_base->moved = true;
   2612 
   2613 		if (bo->tbo.type == ttm_bo_type_kernel)
   2614 			amdgpu_vm_bo_relocated(bo_base);
   2615 		else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
   2616 			amdgpu_vm_bo_moved(bo_base);
   2617 		else
   2618 			amdgpu_vm_bo_invalidated(bo_base);
   2619 	}
   2620 }
   2621 
   2622 /**
   2623  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
   2624  *
   2625  * @vm_size: VM size
   2626  *
   2627  * Returns:
   2628  * VM page table as power of two
   2629  */
   2630 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
   2631 {
   2632 	/* Total bits covered by PD + PTs */
   2633 	unsigned bits = ilog2(vm_size) + 18;
   2634 
   2635 	/* Make sure the PD is 4K in size up to 8GB address space.
   2636 	   Above that split equal between PD and PTs */
   2637 	if (vm_size <= 8)
   2638 		return (bits - 9);
   2639 	else
   2640 		return ((bits + 3) / 2);
   2641 }
   2642 
   2643 /**
   2644  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
   2645  *
   2646  * @adev: amdgpu_device pointer
   2647  * @min_vm_size: the minimum vm size in GB if it's set auto
   2648  * @fragment_size_default: Default PTE fragment size
   2649  * @max_level: max VMPT level
   2650  * @max_bits: max address space size in bits
   2651  *
   2652  */
   2653 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
   2654 			   uint32_t fragment_size_default, unsigned max_level,
   2655 			   unsigned max_bits)
   2656 {
   2657 	unsigned int max_size = 1 << (max_bits - 30);
   2658 	unsigned int vm_size;
   2659 	uint64_t tmp;
   2660 
   2661 	/* adjust vm size first */
   2662 	if (amdgpu_vm_size != -1) {
   2663 		vm_size = amdgpu_vm_size;
   2664 		if (vm_size > max_size) {
   2665 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
   2666 				 amdgpu_vm_size, max_size);
   2667 			vm_size = max_size;
   2668 		}
   2669 	} else {
   2670 		struct sysinfo si;
   2671 		unsigned int phys_ram_gb;
   2672 
   2673 		/* Optimal VM size depends on the amount of physical
   2674 		 * RAM available. Underlying requirements and
   2675 		 * assumptions:
   2676 		 *
   2677 		 *  - Need to map system memory and VRAM from all GPUs
   2678 		 *     - VRAM from other GPUs not known here
   2679 		 *     - Assume VRAM <= system memory
   2680 		 *  - On GFX8 and older, VM space can be segmented for
   2681 		 *    different MTYPEs
   2682 		 *  - Need to allow room for fragmentation, guard pages etc.
   2683 		 *
   2684 		 * This adds up to a rough guess of system memory x3.
   2685 		 * Round up to power of two to maximize the available
   2686 		 * VM size with the given page table size.
   2687 		 */
   2688 		si_meminfo(&si);
   2689 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
   2690 			       (1 << 30) - 1) >> 30;
   2691 		vm_size = roundup_pow_of_two(
   2692 			min(max(phys_ram_gb * 3, min_vm_size), max_size));
   2693 	}
   2694 
   2695 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
   2696 
   2697 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
   2698 	if (amdgpu_vm_block_size != -1)
   2699 		tmp >>= amdgpu_vm_block_size - 9;
   2700 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
   2701 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
   2702 	switch (adev->vm_manager.num_level) {
   2703 	case 3:
   2704 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
   2705 		break;
   2706 	case 2:
   2707 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
   2708 		break;
   2709 	case 1:
   2710 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
   2711 		break;
   2712 	default:
   2713 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
   2714 	}
   2715 	/* block size depends on vm size and hw setup*/
   2716 	if (amdgpu_vm_block_size != -1)
   2717 		adev->vm_manager.block_size =
   2718 			min((unsigned)amdgpu_vm_block_size, max_bits
   2719 			    - AMDGPU_GPU_PAGE_SHIFT
   2720 			    - 9 * adev->vm_manager.num_level);
   2721 	else if (adev->vm_manager.num_level > 1)
   2722 		adev->vm_manager.block_size = 9;
   2723 	else
   2724 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
   2725 
   2726 	if (amdgpu_vm_fragment_size == -1)
   2727 		adev->vm_manager.fragment_size = fragment_size_default;
   2728 	else
   2729 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
   2730 
   2731 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
   2732 		 vm_size, adev->vm_manager.num_level + 1,
   2733 		 adev->vm_manager.block_size,
   2734 		 adev->vm_manager.fragment_size);
   2735 }
   2736 
   2737 /**
   2738  * amdgpu_vm_wait_idle - wait for the VM to become idle
   2739  *
   2740  * @vm: VM object to wait for
   2741  * @timeout: timeout to wait for VM to become idle
   2742  */
   2743 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
   2744 {
   2745 	timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
   2746 					    true, true, timeout);
   2747 	if (timeout <= 0)
   2748 		return timeout;
   2749 
   2750 	timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
   2751 	if (timeout <= 0)
   2752 		return timeout;
   2753 
   2754 	return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
   2755 }
   2756 
   2757 /**
   2758  * amdgpu_vm_init - initialize a vm instance
   2759  *
   2760  * @adev: amdgpu_device pointer
   2761  * @vm: requested vm
   2762  * @vm_context: Indicates if it GFX or Compute context
   2763  * @pasid: Process address space identifier
   2764  *
   2765  * Init @vm fields.
   2766  *
   2767  * Returns:
   2768  * 0 for success, error for failure.
   2769  */
   2770 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
   2771 		   int vm_context, unsigned int pasid)
   2772 {
   2773 	struct amdgpu_bo_param bp;
   2774 	struct amdgpu_bo *root;
   2775 	int r, i;
   2776 
   2777 #ifdef __NetBSD__
   2778 	interval_tree_init(&vm->va);
   2779 #else
   2780 	vm->va = RB_ROOT_CACHED;
   2781 #endif
   2782 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
   2783 		vm->reserved_vmid[i] = NULL;
   2784 	INIT_LIST_HEAD(&vm->evicted);
   2785 	INIT_LIST_HEAD(&vm->relocated);
   2786 	INIT_LIST_HEAD(&vm->moved);
   2787 	INIT_LIST_HEAD(&vm->idle);
   2788 	INIT_LIST_HEAD(&vm->invalidated);
   2789 	spin_lock_init(&vm->invalidated_lock);
   2790 	INIT_LIST_HEAD(&vm->freed);
   2791 
   2792 
   2793 	/* create scheduler entities for page table updates */
   2794 	r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
   2795 				  adev->vm_manager.vm_pte_scheds,
   2796 				  adev->vm_manager.vm_pte_num_scheds, NULL);
   2797 	if (r)
   2798 		return r;
   2799 
   2800 	r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
   2801 				  adev->vm_manager.vm_pte_scheds,
   2802 				  adev->vm_manager.vm_pte_num_scheds, NULL);
   2803 	if (r)
   2804 		goto error_free_direct;
   2805 
   2806 	vm->pte_support_ats = false;
   2807 	vm->is_compute_context = false;
   2808 
   2809 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
   2810 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
   2811 						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
   2812 
   2813 		if (adev->asic_type == CHIP_RAVEN)
   2814 			vm->pte_support_ats = true;
   2815 	} else {
   2816 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
   2817 						AMDGPU_VM_USE_CPU_FOR_GFX);
   2818 	}
   2819 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
   2820 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
   2821 	WARN_ONCE((vm->use_cpu_for_update &&
   2822 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
   2823 		  "CPU update of VM recommended only for large BAR system\n");
   2824 
   2825 	if (vm->use_cpu_for_update)
   2826 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
   2827 	else
   2828 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
   2829 	vm->last_update = NULL;
   2830 	vm->last_direct = dma_fence_get_stub();
   2831 	vm->last_delayed = dma_fence_get_stub();
   2832 
   2833 	mutex_init(&vm->eviction_lock);
   2834 	vm->evicting = false;
   2835 
   2836 	amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
   2837 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
   2838 		bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
   2839 	r = amdgpu_bo_create(adev, &bp, &root);
   2840 	if (r)
   2841 		goto error_free_delayed;
   2842 
   2843 	r = amdgpu_bo_reserve(root, true);
   2844 	if (r)
   2845 		goto error_free_root;
   2846 
   2847 	r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
   2848 	if (r)
   2849 		goto error_unreserve;
   2850 
   2851 	amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
   2852 
   2853 	r = amdgpu_vm_clear_bo(adev, vm, root, false);
   2854 	if (r)
   2855 		goto error_unreserve;
   2856 
   2857 	amdgpu_bo_unreserve(vm->root.base.bo);
   2858 
   2859 	if (pasid) {
   2860 		unsigned long flags;
   2861 
   2862 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
   2863 		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
   2864 			      GFP_ATOMIC);
   2865 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
   2866 		if (r < 0)
   2867 			goto error_free_root;
   2868 
   2869 		vm->pasid = pasid;
   2870 	}
   2871 
   2872 	INIT_KFIFO(vm->faults);
   2873 
   2874 	return 0;
   2875 
   2876 error_unreserve:
   2877 	amdgpu_bo_unreserve(vm->root.base.bo);
   2878 
   2879 error_free_root:
   2880 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
   2881 	amdgpu_bo_unref(&vm->root.base.bo);
   2882 	vm->root.base.bo = NULL;
   2883 
   2884 error_free_delayed:
   2885 	dma_fence_put(vm->last_direct);
   2886 	dma_fence_put(vm->last_delayed);
   2887 	drm_sched_entity_destroy(&vm->delayed);
   2888 
   2889 error_free_direct:
   2890 	drm_sched_entity_destroy(&vm->direct);
   2891 
   2892 	return r;
   2893 }
   2894 
   2895 /**
   2896  * amdgpu_vm_check_clean_reserved - check if a VM is clean
   2897  *
   2898  * @adev: amdgpu_device pointer
   2899  * @vm: the VM to check
   2900  *
   2901  * check all entries of the root PD, if any subsequent PDs are allocated,
   2902  * it means there are page table creating and filling, and is no a clean
   2903  * VM
   2904  *
   2905  * Returns:
   2906  *	0 if this VM is clean
   2907  */
   2908 static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
   2909 	struct amdgpu_vm *vm)
   2910 {
   2911 	enum amdgpu_vm_level root = adev->vm_manager.root_level;
   2912 	unsigned int entries = amdgpu_vm_num_entries(adev, root);
   2913 	unsigned int i = 0;
   2914 
   2915 	if (!(vm->root.entries))
   2916 		return 0;
   2917 
   2918 	for (i = 0; i < entries; i++) {
   2919 		if (vm->root.entries[i].base.bo)
   2920 			return -EINVAL;
   2921 	}
   2922 
   2923 	return 0;
   2924 }
   2925 
   2926 /**
   2927  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
   2928  *
   2929  * @adev: amdgpu_device pointer
   2930  * @vm: requested vm
   2931  * @pasid: pasid to use
   2932  *
   2933  * This only works on GFX VMs that don't have any BOs added and no
   2934  * page tables allocated yet.
   2935  *
   2936  * Changes the following VM parameters:
   2937  * - use_cpu_for_update
   2938  * - pte_supports_ats
   2939  * - pasid (old PASID is released, because compute manages its own PASIDs)
   2940  *
   2941  * Reinitializes the page directory to reflect the changed ATS
   2942  * setting.
   2943  *
   2944  * Returns:
   2945  * 0 for success, -errno for errors.
   2946  */
   2947 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
   2948 			   unsigned int pasid)
   2949 {
   2950 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
   2951 	int r;
   2952 
   2953 	r = amdgpu_bo_reserve(vm->root.base.bo, true);
   2954 	if (r)
   2955 		return r;
   2956 
   2957 	/* Sanity checks */
   2958 	r = amdgpu_vm_check_clean_reserved(adev, vm);
   2959 	if (r)
   2960 		goto unreserve_bo;
   2961 
   2962 	if (pasid) {
   2963 		unsigned long flags;
   2964 
   2965 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
   2966 		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
   2967 			      GFP_ATOMIC);
   2968 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
   2969 
   2970 		if (r == -ENOSPC)
   2971 			goto unreserve_bo;
   2972 		r = 0;
   2973 	}
   2974 
   2975 	/* Check if PD needs to be reinitialized and do it before
   2976 	 * changing any other state, in case it fails.
   2977 	 */
   2978 	if (pte_support_ats != vm->pte_support_ats) {
   2979 		vm->pte_support_ats = pte_support_ats;
   2980 		r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
   2981 		if (r)
   2982 			goto free_idr;
   2983 	}
   2984 
   2985 	/* Update VM state */
   2986 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
   2987 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
   2988 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
   2989 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
   2990 	WARN_ONCE((vm->use_cpu_for_update &&
   2991 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
   2992 		  "CPU update of VM recommended only for large BAR system\n");
   2993 
   2994 	if (vm->use_cpu_for_update)
   2995 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
   2996 	else
   2997 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
   2998 	dma_fence_put(vm->last_update);
   2999 	vm->last_update = NULL;
   3000 	vm->is_compute_context = true;
   3001 
   3002 	if (vm->pasid) {
   3003 		unsigned long flags;
   3004 
   3005 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
   3006 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
   3007 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
   3008 
   3009 		/* Free the original amdgpu allocated pasid
   3010 		 * Will be replaced with kfd allocated pasid
   3011 		 */
   3012 		amdgpu_pasid_free(vm->pasid);
   3013 		vm->pasid = 0;
   3014 	}
   3015 
   3016 	/* Free the shadow bo for compute VM */
   3017 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
   3018 
   3019 	if (pasid)
   3020 		vm->pasid = pasid;
   3021 
   3022 	goto unreserve_bo;
   3023 
   3024 free_idr:
   3025 	if (pasid) {
   3026 		unsigned long flags;
   3027 
   3028 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
   3029 		idr_remove(&adev->vm_manager.pasid_idr, pasid);
   3030 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
   3031 	}
   3032 unreserve_bo:
   3033 	amdgpu_bo_unreserve(vm->root.base.bo);
   3034 	return r;
   3035 }
   3036 
   3037 /**
   3038  * amdgpu_vm_release_compute - release a compute vm
   3039  * @adev: amdgpu_device pointer
   3040  * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
   3041  *
   3042  * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
   3043  * pasid from vm. Compute should stop use of vm after this call.
   3044  */
   3045 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
   3046 {
   3047 	if (vm->pasid) {
   3048 		unsigned long flags;
   3049 
   3050 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
   3051 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
   3052 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
   3053 	}
   3054 	vm->pasid = 0;
   3055 	vm->is_compute_context = false;
   3056 }
   3057 
   3058 /**
   3059  * amdgpu_vm_fini - tear down a vm instance
   3060  *
   3061  * @adev: amdgpu_device pointer
   3062  * @vm: requested vm
   3063  *
   3064  * Tear down @vm.
   3065  * Unbind the VM and remove all bos from the vm bo list
   3066  */
   3067 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
   3068 {
   3069 	struct amdgpu_bo_va_mapping *mapping, *tmp;
   3070 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
   3071 	struct amdgpu_bo *root;
   3072 	int i;
   3073 
   3074 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
   3075 
   3076 	root = amdgpu_bo_ref(vm->root.base.bo);
   3077 	amdgpu_bo_reserve(root, true);
   3078 	if (vm->pasid) {
   3079 		unsigned long flags;
   3080 
   3081 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
   3082 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
   3083 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
   3084 		vm->pasid = 0;
   3085 	}
   3086 
   3087 	dma_fence_wait(vm->last_direct, false);
   3088 	dma_fence_put(vm->last_direct);
   3089 	dma_fence_wait(vm->last_delayed, false);
   3090 	dma_fence_put(vm->last_delayed);
   3091 
   3092 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
   3093 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
   3094 			amdgpu_vm_prt_fini(adev, vm);
   3095 			prt_fini_needed = false;
   3096 		}
   3097 
   3098 		list_del(&mapping->list);
   3099 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
   3100 	}
   3101 
   3102 	amdgpu_vm_free_pts(adev, vm, NULL);
   3103 	amdgpu_bo_unreserve(root);
   3104 	amdgpu_bo_unref(&root);
   3105 	WARN_ON(vm->root.base.bo);
   3106 
   3107 	drm_sched_entity_destroy(&vm->direct);
   3108 	drm_sched_entity_destroy(&vm->delayed);
   3109 
   3110 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
   3111 		dev_err(adev->dev, "still active bo inside vm\n");
   3112 	}
   3113 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
   3114 					     &vm->va.rb_root, rb) {
   3115 		/* Don't remove the mapping here, we don't want to trigger a
   3116 		 * rebalance and the tree is about to be destroyed anyway.
   3117 		 */
   3118 		list_del(&mapping->list);
   3119 		kfree(mapping);
   3120 	}
   3121 
   3122 	dma_fence_put(vm->last_update);
   3123 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
   3124 		amdgpu_vmid_free_reserved(adev, vm, i);
   3125 }
   3126 
   3127 /**
   3128  * amdgpu_vm_manager_init - init the VM manager
   3129  *
   3130  * @adev: amdgpu_device pointer
   3131  *
   3132  * Initialize the VM manager structures
   3133  */
   3134 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
   3135 {
   3136 	unsigned i;
   3137 
   3138 	amdgpu_vmid_mgr_init(adev);
   3139 
   3140 	adev->vm_manager.fence_context =
   3141 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
   3142 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
   3143 		adev->vm_manager.seqno[i] = 0;
   3144 
   3145 	spin_lock_init(&adev->vm_manager.prt_lock);
   3146 	atomic_set(&adev->vm_manager.num_prt_users, 0);
   3147 
   3148 	/* If not overridden by the user, by default, only in large BAR systems
   3149 	 * Compute VM tables will be updated by CPU
   3150 	 */
   3151 #ifdef CONFIG_X86_64
   3152 	if (amdgpu_vm_update_mode == -1) {
   3153 		if (amdgpu_gmc_vram_full_visible(&adev->gmc))
   3154 			adev->vm_manager.vm_update_mode =
   3155 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
   3156 		else
   3157 			adev->vm_manager.vm_update_mode = 0;
   3158 	} else
   3159 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
   3160 #else
   3161 	adev->vm_manager.vm_update_mode = 0;
   3162 #endif
   3163 
   3164 	idr_init(&adev->vm_manager.pasid_idr);
   3165 	spin_lock_init(&adev->vm_manager.pasid_lock);
   3166 
   3167 	adev->vm_manager.xgmi_map_counter = 0;
   3168 	mutex_init(&adev->vm_manager.lock_pstate);
   3169 }
   3170 
   3171 /**
   3172  * amdgpu_vm_manager_fini - cleanup VM manager
   3173  *
   3174  * @adev: amdgpu_device pointer
   3175  *
   3176  * Cleanup the VM manager and free resources.
   3177  */
   3178 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
   3179 {
   3180 	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
   3181 	idr_destroy(&adev->vm_manager.pasid_idr);
   3182 
   3183 	amdgpu_vmid_mgr_fini(adev);
   3184 }
   3185 
   3186 /**
   3187  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
   3188  *
   3189  * @dev: drm device pointer
   3190  * @data: drm_amdgpu_vm
   3191  * @filp: drm file pointer
   3192  *
   3193  * Returns:
   3194  * 0 for success, -errno for errors.
   3195  */
   3196 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
   3197 {
   3198 	union drm_amdgpu_vm *args = data;
   3199 	struct amdgpu_device *adev = dev->dev_private;
   3200 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
   3201 	int r;
   3202 
   3203 	switch (args->in.op) {
   3204 	case AMDGPU_VM_OP_RESERVE_VMID:
   3205 		/* We only have requirement to reserve vmid from gfxhub */
   3206 		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
   3207 					       AMDGPU_GFXHUB_0);
   3208 		if (r)
   3209 			return r;
   3210 		break;
   3211 	case AMDGPU_VM_OP_UNRESERVE_VMID:
   3212 		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
   3213 		break;
   3214 	default:
   3215 		return -EINVAL;
   3216 	}
   3217 
   3218 	return 0;
   3219 }
   3220 
   3221 /**
   3222  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
   3223  *
   3224  * @adev: drm device pointer
   3225  * @pasid: PASID identifier for VM
   3226  * @task_info: task_info to fill.
   3227  */
   3228 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
   3229 			 struct amdgpu_task_info *task_info)
   3230 {
   3231 	struct amdgpu_vm *vm;
   3232 	unsigned long flags;
   3233 
   3234 	spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
   3235 
   3236 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
   3237 	if (vm)
   3238 		*task_info = vm->task_info;
   3239 
   3240 	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
   3241 }
   3242 
   3243 /**
   3244  * amdgpu_vm_set_task_info - Sets VMs task info.
   3245  *
   3246  * @vm: vm for which to set the info
   3247  */
   3248 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
   3249 {
   3250 	if (vm->task_info.pid)
   3251 		return;
   3252 
   3253 	vm->task_info.pid = current->pid;
   3254 	get_task_comm(vm->task_info.task_name, current);
   3255 
   3256 	if (current->group_leader->mm != current->mm)
   3257 		return;
   3258 
   3259 	vm->task_info.tgid = current->group_leader->pid;
   3260 	get_task_comm(vm->task_info.process_name, current->group_leader);
   3261 }
   3262 
   3263 /**
   3264  * amdgpu_vm_handle_fault - graceful handling of VM faults.
   3265  * @adev: amdgpu device pointer
   3266  * @pasid: PASID of the VM
   3267  * @addr: Address of the fault
   3268  *
   3269  * Try to gracefully handle a VM fault. Return true if the fault was handled and
   3270  * shouldn't be reported any more.
   3271  */
   3272 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
   3273 			    uint64_t addr)
   3274 {
   3275 	struct amdgpu_bo *root;
   3276 	uint64_t value, flags;
   3277 	struct amdgpu_vm *vm;
   3278 	long r;
   3279 
   3280 	spin_lock(&adev->vm_manager.pasid_lock);
   3281 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
   3282 	if (vm)
   3283 		root = amdgpu_bo_ref(vm->root.base.bo);
   3284 	else
   3285 		root = NULL;
   3286 	spin_unlock(&adev->vm_manager.pasid_lock);
   3287 
   3288 	if (!root)
   3289 		return false;
   3290 
   3291 	r = amdgpu_bo_reserve(root, true);
   3292 	if (r)
   3293 		goto error_unref;
   3294 
   3295 	/* Double check that the VM still exists */
   3296 	spin_lock(&adev->vm_manager.pasid_lock);
   3297 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
   3298 	if (vm && vm->root.base.bo != root)
   3299 		vm = NULL;
   3300 	spin_unlock(&adev->vm_manager.pasid_lock);
   3301 	if (!vm)
   3302 		goto error_unlock;
   3303 
   3304 	addr /= AMDGPU_GPU_PAGE_SIZE;
   3305 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
   3306 		AMDGPU_PTE_SYSTEM;
   3307 
   3308 	if (vm->is_compute_context) {
   3309 		/* Intentionally setting invalid PTE flag
   3310 		 * combination to force a no-retry-fault
   3311 		 */
   3312 		flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
   3313 			AMDGPU_PTE_TF;
   3314 		value = 0;
   3315 
   3316 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
   3317 		/* Redirect the access to the dummy page */
   3318 		value = adev->dummy_page_addr;
   3319 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
   3320 			AMDGPU_PTE_WRITEABLE;
   3321 
   3322 	} else {
   3323 		/* Let the hw retry silently on the PTE */
   3324 		value = 0;
   3325 	}
   3326 
   3327 	r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
   3328 					flags, value, NULL, NULL);
   3329 	if (r)
   3330 		goto error_unlock;
   3331 
   3332 	r = amdgpu_vm_update_pdes(adev, vm, true);
   3333 
   3334 error_unlock:
   3335 	amdgpu_bo_unreserve(root);
   3336 	if (r < 0)
   3337 		DRM_ERROR("Can't handle page fault (%ld)\n", r);
   3338 
   3339 error_unref:
   3340 	amdgpu_bo_unref(&root);
   3341 
   3342 	return false;
   3343 }
   3344