Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_vm.c revision 1.3.6.3
      1 /*	$NetBSD: amdgpu_vm.c,v 1.3.6.3 2020/04/08 14:08:22 martin Exp $	*/
      2 
      3 /*
      4  * Copyright 2008 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  * Copyright 2009 Jerome Glisse.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  * OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  * Authors: Dave Airlie
     27  *          Alex Deucher
     28  *          Jerome Glisse
     29  */
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vm.c,v 1.3.6.3 2020/04/08 14:08:22 martin Exp $");
     32 
     33 #include <drm/drmP.h>
     34 #include <drm/amdgpu_drm.h>
     35 #include "amdgpu.h"
     36 #include "amdgpu_trace.h"
     37 
     38 #include <linux/nbsd-namespace.h>
     39 
     40 /*
     41  * GPUVM
     42  * GPUVM is similar to the legacy gart on older asics, however
     43  * rather than there being a single global gart table
     44  * for the entire GPU, there are multiple VM page tables active
     45  * at any given time.  The VM page tables can contain a mix
     46  * vram pages and system memory pages and system memory pages
     47  * can be mapped as snooped (cached system pages) or unsnooped
     48  * (uncached system pages).
     49  * Each VM has an ID associated with it and there is a page table
     50  * associated with each VMID.  When execting a command buffer,
     51  * the kernel tells the the ring what VMID to use for that command
     52  * buffer.  VMIDs are allocated dynamically as commands are submitted.
     53  * The userspace drivers maintain their own address space and the kernel
     54  * sets up their pages tables accordingly when they submit their
     55  * command buffers and a VMID is assigned.
     56  * Cayman/Trinity support up to 8 active VMs at any given time;
     57  * SI supports 16.
     58  */
     59 
     60 /**
     61  * amdgpu_vm_num_pde - return the number of page directory entries
     62  *
     63  * @adev: amdgpu_device pointer
     64  *
     65  * Calculate the number of page directory entries (cayman+).
     66  */
     67 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
     68 {
     69 	return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
     70 }
     71 
     72 /**
     73  * amdgpu_vm_directory_size - returns the size of the page directory in bytes
     74  *
     75  * @adev: amdgpu_device pointer
     76  *
     77  * Calculate the size of the page directory in bytes (cayman+).
     78  */
     79 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
     80 {
     81 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
     82 }
     83 
     84 /**
     85  * amdgpu_vm_get_bos - add the vm BOs to a validation list
     86  *
     87  * @vm: vm providing the BOs
     88  * @head: head of validation list
     89  *
     90  * Add the page directory to the list of BOs to
     91  * validate for command submission (cayman+).
     92  */
     93 struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
     94 					  struct amdgpu_vm *vm,
     95 					  struct list_head *head)
     96 {
     97 	struct amdgpu_bo_list_entry *list;
     98 	unsigned i, idx;
     99 
    100 	list = drm_malloc_ab(vm->max_pde_used + 2,
    101 			     sizeof(struct amdgpu_bo_list_entry));
    102 	if (!list) {
    103 		return NULL;
    104 	}
    105 
    106 	/* add the vm page table to the list */
    107 	list[0].robj = vm->page_directory;
    108 	list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
    109 	list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
    110 	list[0].priority = 0;
    111 	list[0].tv.bo = &vm->page_directory->tbo;
    112 	list[0].tv.shared = true;
    113 	list_add(&list[0].tv.head, head);
    114 
    115 	for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
    116 		if (!vm->page_tables[i].bo)
    117 			continue;
    118 
    119 		list[idx].robj = vm->page_tables[i].bo;
    120 		list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
    121 		list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
    122 		list[idx].priority = 0;
    123 		list[idx].tv.bo = &list[idx].robj->tbo;
    124 		list[idx].tv.shared = true;
    125 		list_add(&list[idx++].tv.head, head);
    126 	}
    127 
    128 	return list;
    129 }
    130 
    131 /**
    132  * amdgpu_vm_grab_id - allocate the next free VMID
    133  *
    134  * @vm: vm to allocate id for
    135  * @ring: ring we want to submit job to
    136  * @sync: sync object where we add dependencies
    137  *
    138  * Allocate an id for the vm, adding fences to the sync obj as necessary.
    139  *
    140  * Global mutex must be locked!
    141  */
    142 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
    143 		      struct amdgpu_sync *sync)
    144 {
    145 	struct fence *best[AMDGPU_MAX_RINGS] = {};
    146 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
    147 	struct amdgpu_device *adev = ring->adev;
    148 
    149 	unsigned choices[2] = {};
    150 	unsigned i;
    151 
    152 	/* check if the id is still valid */
    153 	if (vm_id->id) {
    154 		unsigned id = vm_id->id;
    155 		long owner;
    156 
    157 		owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
    158 		if (owner == (long)vm) {
    159 			trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
    160 			return 0;
    161 		}
    162 	}
    163 
    164 	/* we definately need to flush */
    165 	vm_id->pd_gpu_addr = ~0ll;
    166 
    167 	/* skip over VMID 0, since it is the system VM */
    168 	for (i = 1; i < adev->vm_manager.nvm; ++i) {
    169 		struct fence *fence = adev->vm_manager.ids[i].active;
    170 		struct amdgpu_ring *fring;
    171 
    172 		if (fence == NULL) {
    173 			/* found a free one */
    174 			vm_id->id = i;
    175 			trace_amdgpu_vm_grab_id(i, ring->idx);
    176 			return 0;
    177 		}
    178 
    179 		fring = amdgpu_ring_from_fence(fence);
    180 		if (best[fring->idx] == NULL ||
    181 		    fence_is_later(best[fring->idx], fence)) {
    182 			best[fring->idx] = fence;
    183 			choices[fring == ring ? 0 : 1] = i;
    184 		}
    185 	}
    186 
    187 	for (i = 0; i < 2; ++i) {
    188 		if (choices[i]) {
    189 			struct fence *fence;
    190 
    191 			fence  = adev->vm_manager.ids[choices[i]].active;
    192 			vm_id->id = choices[i];
    193 
    194 			trace_amdgpu_vm_grab_id(choices[i], ring->idx);
    195 			return amdgpu_sync_fence(ring->adev, sync, fence);
    196 		}
    197 	}
    198 
    199 	/* should never happen */
    200 	BUG();
    201 	return -EINVAL;
    202 }
    203 
    204 /**
    205  * amdgpu_vm_flush - hardware flush the vm
    206  *
    207  * @ring: ring to use for flush
    208  * @vm: vm we want to flush
    209  * @updates: last vm update that we waited for
    210  *
    211  * Flush the vm (cayman+).
    212  *
    213  * Global and local mutex must be locked!
    214  */
    215 void amdgpu_vm_flush(struct amdgpu_ring *ring,
    216 		     struct amdgpu_vm *vm,
    217 		     struct fence *updates)
    218 {
    219 	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
    220 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
    221 	struct fence *flushed_updates = vm_id->flushed_updates;
    222 	bool is_later;
    223 
    224 	if (!flushed_updates)
    225 		is_later = true;
    226 	else if (!updates)
    227 		is_later = false;
    228 	else
    229 		is_later = fence_is_later(updates, flushed_updates);
    230 
    231 	if (pd_addr != vm_id->pd_gpu_addr || is_later) {
    232 		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
    233 		if (is_later) {
    234 			vm_id->flushed_updates = fence_get(updates);
    235 			fence_put(flushed_updates);
    236 		}
    237 		vm_id->pd_gpu_addr = pd_addr;
    238 		amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
    239 	}
    240 }
    241 
    242 /**
    243  * amdgpu_vm_fence - remember fence for vm
    244  *
    245  * @adev: amdgpu_device pointer
    246  * @vm: vm we want to fence
    247  * @fence: fence to remember
    248  *
    249  * Fence the vm (cayman+).
    250  * Set the fence used to protect page table and id.
    251  *
    252  * Global and local mutex must be locked!
    253  */
    254 void amdgpu_vm_fence(struct amdgpu_device *adev,
    255 		     struct amdgpu_vm *vm,
    256 		     struct fence *fence)
    257 {
    258 	struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
    259 	unsigned vm_id = vm->ids[ring->idx].id;
    260 
    261 	fence_put(adev->vm_manager.ids[vm_id].active);
    262 	adev->vm_manager.ids[vm_id].active = fence_get(fence);
    263 	atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
    264 }
    265 
    266 /**
    267  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
    268  *
    269  * @vm: requested vm
    270  * @bo: requested buffer object
    271  *
    272  * Find @bo inside the requested vm (cayman+).
    273  * Search inside the @bos vm list for the requested vm
    274  * Returns the found bo_va or NULL if none is found
    275  *
    276  * Object has to be reserved!
    277  */
    278 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
    279 				       struct amdgpu_bo *bo)
    280 {
    281 	struct amdgpu_bo_va *bo_va;
    282 
    283 	list_for_each_entry(bo_va, &bo->va, bo_list) {
    284 		if (bo_va->vm == vm) {
    285 			return bo_va;
    286 		}
    287 	}
    288 	return NULL;
    289 }
    290 
    291 /**
    292  * amdgpu_vm_update_pages - helper to call the right asic function
    293  *
    294  * @adev: amdgpu_device pointer
    295  * @ib: indirect buffer to fill with commands
    296  * @pe: addr of the page entry
    297  * @addr: dst addr to write into pe
    298  * @count: number of page entries to update
    299  * @incr: increase next addr by incr bytes
    300  * @flags: hw access flags
    301  * @gtt_flags: GTT hw access flags
    302  *
    303  * Traces the parameters and calls the right asic functions
    304  * to setup the page table using the DMA.
    305  */
    306 static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
    307 				   struct amdgpu_ib *ib,
    308 				   uint64_t pe, uint64_t addr,
    309 				   unsigned count, uint32_t incr,
    310 				   uint32_t flags, uint32_t gtt_flags)
    311 {
    312 	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
    313 
    314 	if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
    315 		uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
    316 		amdgpu_vm_copy_pte(adev, ib, pe, src, count);
    317 
    318 	} else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) {
    319 		amdgpu_vm_write_pte(adev, ib, pe, addr,
    320 				      count, incr, flags);
    321 
    322 	} else {
    323 		amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
    324 				      count, incr, flags);
    325 	}
    326 }
    327 
    328 int amdgpu_vm_free_job(struct amdgpu_job *job)
    329 {
    330 	int i;
    331 	for (i = 0; i < job->num_ibs; i++)
    332 		amdgpu_ib_free(job->adev, &job->ibs[i]);
    333 	kfree(job->ibs);
    334 	return 0;
    335 }
    336 
    337 /**
    338  * amdgpu_vm_clear_bo - initially clear the page dir/table
    339  *
    340  * @adev: amdgpu_device pointer
    341  * @bo: bo to clear
    342  *
    343  * need to reserve bo first before calling it.
    344  */
    345 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
    346 			      struct amdgpu_bo *bo)
    347 {
    348 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
    349 	struct fence *fence = NULL;
    350 	struct amdgpu_ib *ib;
    351 	unsigned entries;
    352 	uint64_t addr;
    353 	int r;
    354 
    355 	r = reservation_object_reserve_shared(bo->tbo.resv);
    356 	if (r)
    357 		return r;
    358 
    359 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
    360 	if (r)
    361 		goto error;
    362 
    363 	addr = amdgpu_bo_gpu_offset(bo);
    364 	entries = amdgpu_bo_size(bo) / 8;
    365 
    366 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
    367 	if (!ib)
    368 		goto error;
    369 
    370 	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
    371 	if (r)
    372 		goto error_free;
    373 
    374 	ib->length_dw = 0;
    375 
    376 	amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
    377 	amdgpu_vm_pad_ib(adev, ib);
    378 	WARN_ON(ib->length_dw > 64);
    379 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
    380 						 &amdgpu_vm_free_job,
    381 						 AMDGPU_FENCE_OWNER_VM,
    382 						 &fence);
    383 	if (!r)
    384 		amdgpu_bo_fence(bo, fence, true);
    385 	fence_put(fence);
    386 	if (amdgpu_enable_scheduler)
    387 		return 0;
    388 
    389 error_free:
    390 	amdgpu_ib_free(adev, ib);
    391 	kfree(ib);
    392 
    393 error:
    394 	return r;
    395 }
    396 
    397 /**
    398  * amdgpu_vm_map_gart - get the physical address of a gart page
    399  *
    400  * @adev: amdgpu_device pointer
    401  * @addr: the unmapped addr
    402  *
    403  * Look up the physical address of the page that the pte resolves
    404  * to (cayman+).
    405  * Returns the physical address of the page.
    406  */
    407 uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
    408 {
    409 	uint64_t result;
    410 
    411 	/* page table offset */
    412 	result = adev->gart.pages_addr[addr >> PAGE_SHIFT];
    413 
    414 	/* in case cpu page size != gpu page size*/
    415 	result |= addr & (~PAGE_MASK);
    416 
    417 	return result;
    418 }
    419 
    420 /**
    421  * amdgpu_vm_update_pdes - make sure that page directory is valid
    422  *
    423  * @adev: amdgpu_device pointer
    424  * @vm: requested vm
    425  * @start: start of GPU address range
    426  * @end: end of GPU address range
    427  *
    428  * Allocates new page tables if necessary
    429  * and updates the page directory (cayman+).
    430  * Returns 0 for success, error for failure.
    431  *
    432  * Global and local mutex must be locked!
    433  */
    434 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
    435 				    struct amdgpu_vm *vm)
    436 {
    437 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
    438 	struct amdgpu_bo *pd = vm->page_directory;
    439 	uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
    440 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
    441 	uint64_t last_pde = ~0, last_pt = ~0;
    442 	unsigned count = 0, pt_idx, ndw;
    443 	struct amdgpu_ib *ib;
    444 	struct fence *fence = NULL;
    445 
    446 	int r;
    447 
    448 	/* padding, etc. */
    449 	ndw = 64;
    450 
    451 	/* assume the worst case */
    452 	ndw += vm->max_pde_used * 6;
    453 
    454 	/* update too big for an IB */
    455 	if (ndw > 0xfffff)
    456 		return -ENOMEM;
    457 
    458 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
    459 	if (!ib)
    460 		return -ENOMEM;
    461 
    462 	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
    463 	if (r) {
    464 		kfree(ib);
    465 		return r;
    466 	}
    467 	ib->length_dw = 0;
    468 
    469 	/* walk over the address space and update the page directory */
    470 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
    471 		struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
    472 		uint64_t pde, pt;
    473 
    474 		if (bo == NULL)
    475 			continue;
    476 
    477 		pt = amdgpu_bo_gpu_offset(bo);
    478 		if (vm->page_tables[pt_idx].addr == pt)
    479 			continue;
    480 		vm->page_tables[pt_idx].addr = pt;
    481 
    482 		pde = pd_addr + pt_idx * 8;
    483 		if (((last_pde + 8 * count) != pde) ||
    484 		    ((last_pt + incr * count) != pt)) {
    485 
    486 			if (count) {
    487 				amdgpu_vm_update_pages(adev, ib, last_pde,
    488 						       last_pt, count, incr,
    489 						       AMDGPU_PTE_VALID, 0);
    490 			}
    491 
    492 			count = 1;
    493 			last_pde = pde;
    494 			last_pt = pt;
    495 		} else {
    496 			++count;
    497 		}
    498 	}
    499 
    500 	if (count)
    501 		amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
    502 				       incr, AMDGPU_PTE_VALID, 0);
    503 
    504 	if (ib->length_dw != 0) {
    505 		amdgpu_vm_pad_ib(adev, ib);
    506 		amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
    507 		WARN_ON(ib->length_dw > ndw);
    508 		r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
    509 							 &amdgpu_vm_free_job,
    510 							 AMDGPU_FENCE_OWNER_VM,
    511 							 &fence);
    512 		if (r)
    513 			goto error_free;
    514 
    515 		amdgpu_bo_fence(pd, fence, true);
    516 		fence_put(vm->page_directory_fence);
    517 		vm->page_directory_fence = fence_get(fence);
    518 		fence_put(fence);
    519 	}
    520 
    521 	if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
    522 		amdgpu_ib_free(adev, ib);
    523 		kfree(ib);
    524 	}
    525 
    526 	return 0;
    527 
    528 error_free:
    529 	amdgpu_ib_free(adev, ib);
    530 	kfree(ib);
    531 	return r;
    532 }
    533 
    534 /**
    535  * amdgpu_vm_frag_ptes - add fragment information to PTEs
    536  *
    537  * @adev: amdgpu_device pointer
    538  * @ib: IB for the update
    539  * @pe_start: first PTE to handle
    540  * @pe_end: last PTE to handle
    541  * @addr: addr those PTEs should point to
    542  * @flags: hw mapping flags
    543  * @gtt_flags: GTT hw mapping flags
    544  *
    545  * Global and local mutex must be locked!
    546  */
    547 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
    548 				struct amdgpu_ib *ib,
    549 				uint64_t pe_start, uint64_t pe_end,
    550 				uint64_t addr, uint32_t flags,
    551 				uint32_t gtt_flags)
    552 {
    553 	/**
    554 	 * The MC L1 TLB supports variable sized pages, based on a fragment
    555 	 * field in the PTE. When this field is set to a non-zero value, page
    556 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
    557 	 * flags are considered valid for all PTEs within the fragment range
    558 	 * and corresponding mappings are assumed to be physically contiguous.
    559 	 *
    560 	 * The L1 TLB can store a single PTE for the whole fragment,
    561 	 * significantly increasing the space available for translation
    562 	 * caching. This leads to large improvements in throughput when the
    563 	 * TLB is under pressure.
    564 	 *
    565 	 * The L2 TLB distributes small and large fragments into two
    566 	 * asymmetric partitions. The large fragment cache is significantly
    567 	 * larger. Thus, we try to use large fragments wherever possible.
    568 	 * Userspace can support this by aligning virtual base address and
    569 	 * allocation size to the fragment size.
    570 	 */
    571 
    572 	/* SI and newer are optimized for 64KB */
    573 	uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
    574 	uint64_t frag_align = 0x80;
    575 
    576 	uint64_t frag_start = ALIGN(pe_start, frag_align);
    577 	uint64_t frag_end = pe_end & ~(frag_align - 1);
    578 
    579 	unsigned count;
    580 
    581 	/* system pages are non continuously */
    582 	if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) ||
    583 	    (frag_start >= frag_end)) {
    584 
    585 		count = (pe_end - pe_start) / 8;
    586 		amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
    587 				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
    588 		return;
    589 	}
    590 
    591 	/* handle the 4K area at the beginning */
    592 	if (pe_start != frag_start) {
    593 		count = (frag_start - pe_start) / 8;
    594 		amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
    595 				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
    596 		addr += AMDGPU_GPU_PAGE_SIZE * count;
    597 	}
    598 
    599 	/* handle the area in the middle */
    600 	count = (frag_end - frag_start) / 8;
    601 	amdgpu_vm_update_pages(adev, ib, frag_start, addr, count,
    602 			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags,
    603 			       gtt_flags);
    604 
    605 	/* handle the 4K area at the end */
    606 	if (frag_end != pe_end) {
    607 		addr += AMDGPU_GPU_PAGE_SIZE * count;
    608 		count = (pe_end - frag_end) / 8;
    609 		amdgpu_vm_update_pages(adev, ib, frag_end, addr, count,
    610 				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
    611 	}
    612 }
    613 
    614 /**
    615  * amdgpu_vm_update_ptes - make sure that page tables are valid
    616  *
    617  * @adev: amdgpu_device pointer
    618  * @vm: requested vm
    619  * @start: start of GPU address range
    620  * @end: end of GPU address range
    621  * @dst: destination address to map to
    622  * @flags: mapping flags
    623  *
    624  * Update the page tables in the range @start - @end (cayman+).
    625  *
    626  * Global and local mutex must be locked!
    627  */
    628 static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
    629 				 struct amdgpu_vm *vm,
    630 				 struct amdgpu_ib *ib,
    631 				 uint64_t start, uint64_t end,
    632 				 uint64_t dst, uint32_t flags,
    633 				 uint32_t gtt_flags)
    634 {
    635 	uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
    636 	uint64_t last_pte = ~0, last_dst = ~0;
    637 	void *owner = AMDGPU_FENCE_OWNER_VM;
    638 	unsigned count = 0;
    639 	uint64_t addr;
    640 
    641 	/* sync to everything on unmapping */
    642 	if (!(flags & AMDGPU_PTE_VALID))
    643 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
    644 
    645 	/* walk over the address space and update the page tables */
    646 	for (addr = start; addr < end; ) {
    647 		uint64_t pt_idx = addr >> amdgpu_vm_block_size;
    648 		struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo;
    649 		unsigned nptes;
    650 		uint64_t pte;
    651 		int r;
    652 
    653 		amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
    654 		r = reservation_object_reserve_shared(pt->tbo.resv);
    655 		if (r)
    656 			return r;
    657 
    658 		if ((addr & ~mask) == (end & ~mask))
    659 			nptes = end - addr;
    660 		else
    661 			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
    662 
    663 		pte = amdgpu_bo_gpu_offset(pt);
    664 		pte += (addr & mask) * 8;
    665 
    666 		if ((last_pte + 8 * count) != pte) {
    667 
    668 			if (count) {
    669 				amdgpu_vm_frag_ptes(adev, ib, last_pte,
    670 						    last_pte + 8 * count,
    671 						    last_dst, flags,
    672 						    gtt_flags);
    673 			}
    674 
    675 			count = nptes;
    676 			last_pte = pte;
    677 			last_dst = dst;
    678 		} else {
    679 			count += nptes;
    680 		}
    681 
    682 		addr += nptes;
    683 		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
    684 	}
    685 
    686 	if (count) {
    687 		amdgpu_vm_frag_ptes(adev, ib, last_pte,
    688 				    last_pte + 8 * count,
    689 				    last_dst, flags, gtt_flags);
    690 	}
    691 
    692 	return 0;
    693 }
    694 
    695 /**
    696  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
    697  *
    698  * @adev: amdgpu_device pointer
    699  * @vm: requested vm
    700  * @mapping: mapped range and flags to use for the update
    701  * @addr: addr to set the area to
    702  * @gtt_flags: flags as they are used for GTT
    703  * @fence: optional resulting fence
    704  *
    705  * Fill in the page table entries for @mapping.
    706  * Returns 0 for success, -EINVAL for failure.
    707  *
    708  * Object have to be reserved and mutex must be locked!
    709  */
    710 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
    711 				       struct amdgpu_vm *vm,
    712 				       struct amdgpu_bo_va_mapping *mapping,
    713 				       uint64_t addr, uint32_t gtt_flags,
    714 				       struct fence **fence)
    715 {
    716 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
    717 	unsigned nptes, ncmds, ndw;
    718 	uint32_t flags = gtt_flags;
    719 	struct amdgpu_ib *ib;
    720 	struct fence *f = NULL;
    721 	int r;
    722 
    723 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
    724 	 * but in case of something, we filter the flags in first place
    725 	 */
    726 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
    727 		flags &= ~AMDGPU_PTE_READABLE;
    728 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
    729 		flags &= ~AMDGPU_PTE_WRITEABLE;
    730 
    731 	trace_amdgpu_vm_bo_update(mapping);
    732 
    733 	nptes = mapping->it.last - mapping->it.start + 1;
    734 
    735 	/*
    736 	 * reserve space for one command every (1 << BLOCK_SIZE)
    737 	 *  entries or 2k dwords (whatever is smaller)
    738 	 */
    739 	ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
    740 
    741 	/* padding, etc. */
    742 	ndw = 64;
    743 
    744 	if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
    745 		/* only copy commands needed */
    746 		ndw += ncmds * 7;
    747 
    748 	} else if (flags & AMDGPU_PTE_SYSTEM) {
    749 		/* header for write data commands */
    750 		ndw += ncmds * 4;
    751 
    752 		/* body of write data command */
    753 		ndw += nptes * 2;
    754 
    755 	} else {
    756 		/* set page commands needed */
    757 		ndw += ncmds * 10;
    758 
    759 		/* two extra commands for begin/end of fragment */
    760 		ndw += 2 * 10;
    761 	}
    762 
    763 	/* update too big for an IB */
    764 	if (ndw > 0xfffff)
    765 		return -ENOMEM;
    766 
    767 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
    768 	if (!ib)
    769 		return -ENOMEM;
    770 
    771 	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
    772 	if (r) {
    773 		kfree(ib);
    774 		return r;
    775 	}
    776 
    777 	ib->length_dw = 0;
    778 
    779 	r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
    780 				  mapping->it.last + 1, addr + mapping->offset,
    781 				  flags, gtt_flags);
    782 
    783 	if (r) {
    784 		amdgpu_ib_free(adev, ib);
    785 		kfree(ib);
    786 		return r;
    787 	}
    788 
    789 	amdgpu_vm_pad_ib(adev, ib);
    790 	WARN_ON(ib->length_dw > ndw);
    791 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
    792 						 &amdgpu_vm_free_job,
    793 						 AMDGPU_FENCE_OWNER_VM,
    794 						 &f);
    795 	if (r)
    796 		goto error_free;
    797 
    798 	amdgpu_bo_fence(vm->page_directory, f, true);
    799 	if (fence) {
    800 		fence_put(*fence);
    801 		*fence = fence_get(f);
    802 	}
    803 	fence_put(f);
    804 	if (!amdgpu_enable_scheduler) {
    805 		amdgpu_ib_free(adev, ib);
    806 		kfree(ib);
    807 	}
    808 	return 0;
    809 
    810 error_free:
    811 	amdgpu_ib_free(adev, ib);
    812 	kfree(ib);
    813 	return r;
    814 }
    815 
    816 /**
    817  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
    818  *
    819  * @adev: amdgpu_device pointer
    820  * @bo_va: requested BO and VM object
    821  * @mem: ttm mem
    822  *
    823  * Fill in the page table entries for @bo_va.
    824  * Returns 0 for success, -EINVAL for failure.
    825  *
    826  * Object have to be reserved and mutex must be locked!
    827  */
    828 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
    829 			struct amdgpu_bo_va *bo_va,
    830 			struct ttm_mem_reg *mem)
    831 {
    832 	struct amdgpu_vm *vm = bo_va->vm;
    833 	struct amdgpu_bo_va_mapping *mapping;
    834 	uint32_t flags;
    835 	uint64_t addr;
    836 	int r;
    837 
    838 	if (mem) {
    839 		addr = (u64)mem->start << PAGE_SHIFT;
    840 		if (mem->mem_type != TTM_PL_TT)
    841 			addr += adev->vm_manager.vram_base_offset;
    842 	} else {
    843 		addr = 0;
    844 	}
    845 
    846 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
    847 
    848 	spin_lock(&vm->status_lock);
    849 	if (!list_empty(&bo_va->vm_status))
    850 		list_splice_init(&bo_va->valids, &bo_va->invalids);
    851 	spin_unlock(&vm->status_lock);
    852 
    853 	list_for_each_entry(mapping, &bo_va->invalids, list) {
    854 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
    855 						flags, &bo_va->last_pt_update);
    856 		if (r)
    857 			return r;
    858 	}
    859 
    860 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
    861 		list_for_each_entry(mapping, &bo_va->valids, list)
    862 			trace_amdgpu_vm_bo_mapping(mapping);
    863 
    864 		list_for_each_entry(mapping, &bo_va->invalids, list)
    865 			trace_amdgpu_vm_bo_mapping(mapping);
    866 	}
    867 
    868 	spin_lock(&vm->status_lock);
    869 	list_splice_init(&bo_va->invalids, &bo_va->valids);
    870 	list_del_init(&bo_va->vm_status);
    871 	if (!mem)
    872 		list_add(&bo_va->vm_status, &vm->cleared);
    873 	spin_unlock(&vm->status_lock);
    874 
    875 	return 0;
    876 }
    877 
    878 /**
    879  * amdgpu_vm_clear_freed - clear freed BOs in the PT
    880  *
    881  * @adev: amdgpu_device pointer
    882  * @vm: requested vm
    883  *
    884  * Make sure all freed BOs are cleared in the PT.
    885  * Returns 0 for success.
    886  *
    887  * PTs have to be reserved and mutex must be locked!
    888  */
    889 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
    890 			  struct amdgpu_vm *vm)
    891 {
    892 	struct amdgpu_bo_va_mapping *mapping;
    893 	int r;
    894 
    895 	spin_lock(&vm->freed_lock);
    896 	while (!list_empty(&vm->freed)) {
    897 		mapping = list_first_entry(&vm->freed,
    898 			struct amdgpu_bo_va_mapping, list);
    899 		list_del(&mapping->list);
    900 		spin_unlock(&vm->freed_lock);
    901 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
    902 		kfree(mapping);
    903 		if (r)
    904 			return r;
    905 
    906 		spin_lock(&vm->freed_lock);
    907 	}
    908 	spin_unlock(&vm->freed_lock);
    909 
    910 	return 0;
    911 
    912 }
    913 
    914 /**
    915  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
    916  *
    917  * @adev: amdgpu_device pointer
    918  * @vm: requested vm
    919  *
    920  * Make sure all invalidated BOs are cleared in the PT.
    921  * Returns 0 for success.
    922  *
    923  * PTs have to be reserved and mutex must be locked!
    924  */
    925 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
    926 			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
    927 {
    928 	struct amdgpu_bo_va *bo_va = NULL;
    929 	int r = 0;
    930 
    931 	spin_lock(&vm->status_lock);
    932 	while (!list_empty(&vm->invalidated)) {
    933 		bo_va = list_first_entry(&vm->invalidated,
    934 			struct amdgpu_bo_va, vm_status);
    935 		spin_unlock(&vm->status_lock);
    936 		mutex_lock(&bo_va->mutex);
    937 		r = amdgpu_vm_bo_update(adev, bo_va, NULL);
    938 		mutex_unlock(&bo_va->mutex);
    939 		if (r)
    940 			return r;
    941 
    942 		spin_lock(&vm->status_lock);
    943 	}
    944 	spin_unlock(&vm->status_lock);
    945 
    946 	if (bo_va)
    947 		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
    948 
    949 	return r;
    950 }
    951 
    952 /**
    953  * amdgpu_vm_bo_add - add a bo to a specific vm
    954  *
    955  * @adev: amdgpu_device pointer
    956  * @vm: requested vm
    957  * @bo: amdgpu buffer object
    958  *
    959  * Add @bo into the requested vm (cayman+).
    960  * Add @bo to the list of bos associated with the vm
    961  * Returns newly added bo_va or NULL for failure
    962  *
    963  * Object has to be reserved!
    964  */
    965 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
    966 				      struct amdgpu_vm *vm,
    967 				      struct amdgpu_bo *bo)
    968 {
    969 	struct amdgpu_bo_va *bo_va;
    970 
    971 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
    972 	if (bo_va == NULL) {
    973 		return NULL;
    974 	}
    975 	bo_va->vm = vm;
    976 	bo_va->bo = bo;
    977 	bo_va->ref_count = 1;
    978 	INIT_LIST_HEAD(&bo_va->bo_list);
    979 	INIT_LIST_HEAD(&bo_va->valids);
    980 	INIT_LIST_HEAD(&bo_va->invalids);
    981 	INIT_LIST_HEAD(&bo_va->vm_status);
    982 	mutex_init(&bo_va->mutex);
    983 	list_add_tail(&bo_va->bo_list, &bo->va);
    984 
    985 	return bo_va;
    986 }
    987 
    988 /**
    989  * amdgpu_vm_bo_map - map bo inside a vm
    990  *
    991  * @adev: amdgpu_device pointer
    992  * @bo_va: bo_va to store the address
    993  * @saddr: where to map the BO
    994  * @offset: requested offset in the BO
    995  * @flags: attributes of pages (read/write/valid/etc.)
    996  *
    997  * Add a mapping of the BO at the specefied addr into the VM.
    998  * Returns 0 for success, error for failure.
    999  *
   1000  * Object has to be reserved and unreserved outside!
   1001  */
   1002 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
   1003 		     struct amdgpu_bo_va *bo_va,
   1004 		     uint64_t saddr, uint64_t offset,
   1005 		     uint64_t size, uint32_t flags)
   1006 {
   1007 	struct amdgpu_bo_va_mapping *mapping;
   1008 	struct amdgpu_vm *vm = bo_va->vm;
   1009 	struct interval_tree_node *it;
   1010 	unsigned last_pfn, pt_idx;
   1011 	uint64_t eaddr;
   1012 	int r;
   1013 
   1014 	/* validate the parameters */
   1015 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
   1016 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
   1017 		return -EINVAL;
   1018 
   1019 	/* make sure object fit at this offset */
   1020 	eaddr = saddr + size - 1;
   1021 	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
   1022 		return -EINVAL;
   1023 
   1024 	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
   1025 	if (last_pfn >= adev->vm_manager.max_pfn) {
   1026 		dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
   1027 			last_pfn, adev->vm_manager.max_pfn);
   1028 		return -EINVAL;
   1029 	}
   1030 
   1031 	saddr /= AMDGPU_GPU_PAGE_SIZE;
   1032 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
   1033 
   1034 	spin_lock(&vm->it_lock);
   1035 	it = interval_tree_iter_first(&vm->va, saddr, eaddr);
   1036 	spin_unlock(&vm->it_lock);
   1037 	if (it) {
   1038 		struct amdgpu_bo_va_mapping *tmp;
   1039 		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
   1040 		/* bo and tmp overlap, invalid addr */
   1041 		dev_err(adev->dev, "bo %p va 0x%010"PRIx64"-0x%010"PRIx64" conflict with "
   1042 			"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
   1043 			tmp->it.start, tmp->it.last + 1);
   1044 		r = -EINVAL;
   1045 		goto error;
   1046 	}
   1047 
   1048 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
   1049 	if (!mapping) {
   1050 		r = -ENOMEM;
   1051 		goto error;
   1052 	}
   1053 
   1054 	INIT_LIST_HEAD(&mapping->list);
   1055 	mapping->it.start = saddr;
   1056 	mapping->it.last = eaddr;
   1057 	mapping->offset = offset;
   1058 	mapping->flags = flags;
   1059 
   1060 	mutex_lock(&bo_va->mutex);
   1061 	list_add(&mapping->list, &bo_va->invalids);
   1062 	mutex_unlock(&bo_va->mutex);
   1063 	spin_lock(&vm->it_lock);
   1064 	interval_tree_insert(&mapping->it, &vm->va);
   1065 	spin_unlock(&vm->it_lock);
   1066 	trace_amdgpu_vm_bo_map(bo_va, mapping);
   1067 
   1068 	/* Make sure the page tables are allocated */
   1069 	saddr >>= amdgpu_vm_block_size;
   1070 	eaddr >>= amdgpu_vm_block_size;
   1071 
   1072 	BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
   1073 
   1074 	if (eaddr > vm->max_pde_used)
   1075 		vm->max_pde_used = eaddr;
   1076 
   1077 	/* walk over the address space and allocate the page tables */
   1078 	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
   1079 		struct reservation_object *resv = vm->page_directory->tbo.resv;
   1080 		struct amdgpu_bo *pt;
   1081 
   1082 		if (vm->page_tables[pt_idx].bo)
   1083 			continue;
   1084 
   1085 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
   1086 				     AMDGPU_GPU_PAGE_SIZE, true,
   1087 				     AMDGPU_GEM_DOMAIN_VRAM,
   1088 				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
   1089 				     NULL, resv, &pt);
   1090 		if (r)
   1091 			goto error_free;
   1092 
   1093 		/* Keep a reference to the page table to avoid freeing
   1094 		 * them up in the wrong order.
   1095 		 */
   1096 		pt->parent = amdgpu_bo_ref(vm->page_directory);
   1097 
   1098 		r = amdgpu_vm_clear_bo(adev, pt);
   1099 		if (r) {
   1100 			amdgpu_bo_unref(&pt);
   1101 			goto error_free;
   1102 		}
   1103 
   1104 		vm->page_tables[pt_idx].addr = 0;
   1105 		vm->page_tables[pt_idx].bo = pt;
   1106 	}
   1107 
   1108 	return 0;
   1109 
   1110 error_free:
   1111 	list_del(&mapping->list);
   1112 	spin_lock(&vm->it_lock);
   1113 	interval_tree_remove(&mapping->it, &vm->va);
   1114 	spin_unlock(&vm->it_lock);
   1115 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
   1116 	kfree(mapping);
   1117 
   1118 error:
   1119 	return r;
   1120 }
   1121 
   1122 /**
   1123  * amdgpu_vm_bo_unmap - remove bo mapping from vm
   1124  *
   1125  * @adev: amdgpu_device pointer
   1126  * @bo_va: bo_va to remove the address from
   1127  * @saddr: where to the BO is mapped
   1128  *
   1129  * Remove a mapping of the BO at the specefied addr from the VM.
   1130  * Returns 0 for success, error for failure.
   1131  *
   1132  * Object has to be reserved and unreserved outside!
   1133  */
   1134 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
   1135 		       struct amdgpu_bo_va *bo_va,
   1136 		       uint64_t saddr)
   1137 {
   1138 	struct amdgpu_bo_va_mapping *mapping;
   1139 	struct amdgpu_vm *vm = bo_va->vm;
   1140 	bool valid = true;
   1141 
   1142 	saddr /= AMDGPU_GPU_PAGE_SIZE;
   1143 	mutex_lock(&bo_va->mutex);
   1144 	list_for_each_entry(mapping, &bo_va->valids, list) {
   1145 		if (mapping->it.start == saddr)
   1146 			break;
   1147 	}
   1148 
   1149 	if (&mapping->list == &bo_va->valids) {
   1150 		valid = false;
   1151 
   1152 		list_for_each_entry(mapping, &bo_va->invalids, list) {
   1153 			if (mapping->it.start == saddr)
   1154 				break;
   1155 		}
   1156 
   1157 		if (&mapping->list == &bo_va->invalids) {
   1158 			mutex_unlock(&bo_va->mutex);
   1159 			return -ENOENT;
   1160 		}
   1161 	}
   1162 	mutex_unlock(&bo_va->mutex);
   1163 	list_del(&mapping->list);
   1164 	spin_lock(&vm->it_lock);
   1165 	interval_tree_remove(&mapping->it, &vm->va);
   1166 	spin_unlock(&vm->it_lock);
   1167 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
   1168 
   1169 	if (valid) {
   1170 		spin_lock(&vm->freed_lock);
   1171 		list_add(&mapping->list, &vm->freed);
   1172 		spin_unlock(&vm->freed_lock);
   1173 	} else {
   1174 		kfree(mapping);
   1175 	}
   1176 
   1177 	return 0;
   1178 }
   1179 
   1180 /**
   1181  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
   1182  *
   1183  * @adev: amdgpu_device pointer
   1184  * @bo_va: requested bo_va
   1185  *
   1186  * Remove @bo_va->bo from the requested vm (cayman+).
   1187  *
   1188  * Object have to be reserved!
   1189  */
   1190 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
   1191 		      struct amdgpu_bo_va *bo_va)
   1192 {
   1193 	struct amdgpu_bo_va_mapping *mapping, *next;
   1194 	struct amdgpu_vm *vm = bo_va->vm;
   1195 
   1196 	list_del(&bo_va->bo_list);
   1197 
   1198 	spin_lock(&vm->status_lock);
   1199 	list_del(&bo_va->vm_status);
   1200 	spin_unlock(&vm->status_lock);
   1201 
   1202 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
   1203 		list_del(&mapping->list);
   1204 		spin_lock(&vm->it_lock);
   1205 		interval_tree_remove(&mapping->it, &vm->va);
   1206 		spin_unlock(&vm->it_lock);
   1207 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
   1208 		spin_lock(&vm->freed_lock);
   1209 		list_add(&mapping->list, &vm->freed);
   1210 		spin_unlock(&vm->freed_lock);
   1211 	}
   1212 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
   1213 		list_del(&mapping->list);
   1214 		spin_lock(&vm->it_lock);
   1215 		interval_tree_remove(&mapping->it, &vm->va);
   1216 		spin_unlock(&vm->it_lock);
   1217 		kfree(mapping);
   1218 	}
   1219 	fence_put(bo_va->last_pt_update);
   1220 	mutex_destroy(&bo_va->mutex);
   1221 	kfree(bo_va);
   1222 }
   1223 
   1224 /**
   1225  * amdgpu_vm_bo_invalidate - mark the bo as invalid
   1226  *
   1227  * @adev: amdgpu_device pointer
   1228  * @vm: requested vm
   1229  * @bo: amdgpu buffer object
   1230  *
   1231  * Mark @bo as invalid (cayman+).
   1232  */
   1233 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
   1234 			     struct amdgpu_bo *bo)
   1235 {
   1236 	struct amdgpu_bo_va *bo_va;
   1237 
   1238 	list_for_each_entry(bo_va, &bo->va, bo_list) {
   1239 		spin_lock(&bo_va->vm->status_lock);
   1240 		if (list_empty(&bo_va->vm_status))
   1241 			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
   1242 		spin_unlock(&bo_va->vm->status_lock);
   1243 	}
   1244 }
   1245 
   1246 /**
   1247  * amdgpu_vm_init - initialize a vm instance
   1248  *
   1249  * @adev: amdgpu_device pointer
   1250  * @vm: requested vm
   1251  *
   1252  * Init @vm fields (cayman+).
   1253  */
   1254 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
   1255 {
   1256 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
   1257 		AMDGPU_VM_PTE_COUNT * 8);
   1258 	unsigned pd_size, pd_entries;
   1259 	int i, r;
   1260 
   1261 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
   1262 		vm->ids[i].id = 0;
   1263 		vm->ids[i].flushed_updates = NULL;
   1264 	}
   1265 #ifdef __NetBSD__
   1266 	interval_tree_init(&vm->va);
   1267 #else
   1268 	vm->va = RB_ROOT;
   1269 #endif
   1270 	spin_lock_init(&vm->status_lock);
   1271 	INIT_LIST_HEAD(&vm->invalidated);
   1272 	INIT_LIST_HEAD(&vm->cleared);
   1273 	INIT_LIST_HEAD(&vm->freed);
   1274 	spin_lock_init(&vm->it_lock);
   1275 	spin_lock_init(&vm->freed_lock);
   1276 	pd_size = amdgpu_vm_directory_size(adev);
   1277 	pd_entries = amdgpu_vm_num_pdes(adev);
   1278 
   1279 	/* allocate page table array */
   1280 	vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
   1281 	if (vm->page_tables == NULL) {
   1282 		DRM_ERROR("Cannot allocate memory for page table array\n");
   1283 		return -ENOMEM;
   1284 	}
   1285 
   1286 	vm->page_directory_fence = NULL;
   1287 
   1288 	r = amdgpu_bo_create(adev, pd_size, align, true,
   1289 			     AMDGPU_GEM_DOMAIN_VRAM,
   1290 			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
   1291 			     NULL, NULL, &vm->page_directory);
   1292 	if (r)
   1293 		return r;
   1294 	r = amdgpu_bo_reserve(vm->page_directory, false);
   1295 	if (r) {
   1296 		amdgpu_bo_unref(&vm->page_directory);
   1297 		vm->page_directory = NULL;
   1298 		return r;
   1299 	}
   1300 	r = amdgpu_vm_clear_bo(adev, vm->page_directory);
   1301 	amdgpu_bo_unreserve(vm->page_directory);
   1302 	if (r) {
   1303 		amdgpu_bo_unref(&vm->page_directory);
   1304 		vm->page_directory = NULL;
   1305 		return r;
   1306 	}
   1307 
   1308 	return 0;
   1309 }
   1310 
   1311 /**
   1312  * amdgpu_vm_fini - tear down a vm instance
   1313  *
   1314  * @adev: amdgpu_device pointer
   1315  * @vm: requested vm
   1316  *
   1317  * Tear down @vm (cayman+).
   1318  * Unbind the VM and remove all bos from the vm bo list
   1319  */
   1320 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
   1321 {
   1322 	struct amdgpu_bo_va_mapping *mapping, *tmp;
   1323 	int i;
   1324 
   1325 	if (!RB_EMPTY_ROOT(&vm->va)) {
   1326 		dev_err(adev->dev, "still active bo inside vm\n");
   1327 	}
   1328 	rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
   1329 		list_del(&mapping->list);
   1330 		interval_tree_remove(&mapping->it, &vm->va);
   1331 		kfree(mapping);
   1332 	}
   1333 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
   1334 		list_del(&mapping->list);
   1335 		kfree(mapping);
   1336 	}
   1337 
   1338 	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
   1339 		amdgpu_bo_unref(&vm->page_tables[i].bo);
   1340 	drm_free_large(vm->page_tables);
   1341 
   1342 	amdgpu_bo_unref(&vm->page_directory);
   1343 	fence_put(vm->page_directory_fence);
   1344 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
   1345 		unsigned id = vm->ids[i].id;
   1346 
   1347 		atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
   1348 				    (long)vm, 0);
   1349 		fence_put(vm->ids[i].flushed_updates);
   1350 	}
   1351 
   1352 }
   1353 
   1354 /**
   1355  * amdgpu_vm_manager_fini - cleanup VM manager
   1356  *
   1357  * @adev: amdgpu_device pointer
   1358  *
   1359  * Cleanup the VM manager and free resources.
   1360  */
   1361 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
   1362 {
   1363 	unsigned i;
   1364 
   1365 	for (i = 0; i < AMDGPU_NUM_VM; ++i)
   1366 		fence_put(adev->vm_manager.ids[i].active);
   1367 }
   1368