Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_vm.c revision 1.2
      1 /*	$NetBSD: amdgpu_vm.c,v 1.2 2018/08/27 04:58:20 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2008 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  * Copyright 2009 Jerome Glisse.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  * OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  * Authors: Dave Airlie
     27  *          Alex Deucher
     28  *          Jerome Glisse
     29  */
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vm.c,v 1.2 2018/08/27 04:58:20 riastradh Exp $");
     32 
     33 #include <drm/drmP.h>
     34 #include <drm/amdgpu_drm.h>
     35 #include "amdgpu.h"
     36 #include "amdgpu_trace.h"
     37 
     38 /*
     39  * GPUVM
     40  * GPUVM is similar to the legacy gart on older asics, however
     41  * rather than there being a single global gart table
     42  * for the entire GPU, there are multiple VM page tables active
     43  * at any given time.  The VM page tables can contain a mix
     44  * vram pages and system memory pages and system memory pages
     45  * can be mapped as snooped (cached system pages) or unsnooped
     46  * (uncached system pages).
     47  * Each VM has an ID associated with it and there is a page table
     48  * associated with each VMID.  When execting a command buffer,
     49  * the kernel tells the the ring what VMID to use for that command
     50  * buffer.  VMIDs are allocated dynamically as commands are submitted.
     51  * The userspace drivers maintain their own address space and the kernel
     52  * sets up their pages tables accordingly when they submit their
     53  * command buffers and a VMID is assigned.
     54  * Cayman/Trinity support up to 8 active VMs at any given time;
     55  * SI supports 16.
     56  */
     57 
     58 /**
     59  * amdgpu_vm_num_pde - return the number of page directory entries
     60  *
     61  * @adev: amdgpu_device pointer
     62  *
     63  * Calculate the number of page directory entries (cayman+).
     64  */
     65 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
     66 {
     67 	return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
     68 }
     69 
     70 /**
     71  * amdgpu_vm_directory_size - returns the size of the page directory in bytes
     72  *
     73  * @adev: amdgpu_device pointer
     74  *
     75  * Calculate the size of the page directory in bytes (cayman+).
     76  */
     77 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
     78 {
     79 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
     80 }
     81 
     82 /**
     83  * amdgpu_vm_get_bos - add the vm BOs to a validation list
     84  *
     85  * @vm: vm providing the BOs
     86  * @head: head of validation list
     87  *
     88  * Add the page directory to the list of BOs to
     89  * validate for command submission (cayman+).
     90  */
     91 struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
     92 					  struct amdgpu_vm *vm,
     93 					  struct list_head *head)
     94 {
     95 	struct amdgpu_bo_list_entry *list;
     96 	unsigned i, idx;
     97 
     98 	list = drm_malloc_ab(vm->max_pde_used + 2,
     99 			     sizeof(struct amdgpu_bo_list_entry));
    100 	if (!list) {
    101 		return NULL;
    102 	}
    103 
    104 	/* add the vm page table to the list */
    105 	list[0].robj = vm->page_directory;
    106 	list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
    107 	list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
    108 	list[0].priority = 0;
    109 	list[0].tv.bo = &vm->page_directory->tbo;
    110 	list[0].tv.shared = true;
    111 	list_add(&list[0].tv.head, head);
    112 
    113 	for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
    114 		if (!vm->page_tables[i].bo)
    115 			continue;
    116 
    117 		list[idx].robj = vm->page_tables[i].bo;
    118 		list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
    119 		list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
    120 		list[idx].priority = 0;
    121 		list[idx].tv.bo = &list[idx].robj->tbo;
    122 		list[idx].tv.shared = true;
    123 		list_add(&list[idx++].tv.head, head);
    124 	}
    125 
    126 	return list;
    127 }
    128 
    129 /**
    130  * amdgpu_vm_grab_id - allocate the next free VMID
    131  *
    132  * @vm: vm to allocate id for
    133  * @ring: ring we want to submit job to
    134  * @sync: sync object where we add dependencies
    135  *
    136  * Allocate an id for the vm, adding fences to the sync obj as necessary.
    137  *
    138  * Global mutex must be locked!
    139  */
    140 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
    141 		      struct amdgpu_sync *sync)
    142 {
    143 	struct fence *best[AMDGPU_MAX_RINGS] = {};
    144 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
    145 	struct amdgpu_device *adev = ring->adev;
    146 
    147 	unsigned choices[2] = {};
    148 	unsigned i;
    149 
    150 	/* check if the id is still valid */
    151 	if (vm_id->id) {
    152 		unsigned id = vm_id->id;
    153 		long owner;
    154 
    155 		owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
    156 		if (owner == (long)vm) {
    157 			trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
    158 			return 0;
    159 		}
    160 	}
    161 
    162 	/* we definately need to flush */
    163 	vm_id->pd_gpu_addr = ~0ll;
    164 
    165 	/* skip over VMID 0, since it is the system VM */
    166 	for (i = 1; i < adev->vm_manager.nvm; ++i) {
    167 		struct fence *fence = adev->vm_manager.ids[i].active;
    168 		struct amdgpu_ring *fring;
    169 
    170 		if (fence == NULL) {
    171 			/* found a free one */
    172 			vm_id->id = i;
    173 			trace_amdgpu_vm_grab_id(i, ring->idx);
    174 			return 0;
    175 		}
    176 
    177 		fring = amdgpu_ring_from_fence(fence);
    178 		if (best[fring->idx] == NULL ||
    179 		    fence_is_later(best[fring->idx], fence)) {
    180 			best[fring->idx] = fence;
    181 			choices[fring == ring ? 0 : 1] = i;
    182 		}
    183 	}
    184 
    185 	for (i = 0; i < 2; ++i) {
    186 		if (choices[i]) {
    187 			struct fence *fence;
    188 
    189 			fence  = adev->vm_manager.ids[choices[i]].active;
    190 			vm_id->id = choices[i];
    191 
    192 			trace_amdgpu_vm_grab_id(choices[i], ring->idx);
    193 			return amdgpu_sync_fence(ring->adev, sync, fence);
    194 		}
    195 	}
    196 
    197 	/* should never happen */
    198 	BUG();
    199 	return -EINVAL;
    200 }
    201 
    202 /**
    203  * amdgpu_vm_flush - hardware flush the vm
    204  *
    205  * @ring: ring to use for flush
    206  * @vm: vm we want to flush
    207  * @updates: last vm update that we waited for
    208  *
    209  * Flush the vm (cayman+).
    210  *
    211  * Global and local mutex must be locked!
    212  */
    213 void amdgpu_vm_flush(struct amdgpu_ring *ring,
    214 		     struct amdgpu_vm *vm,
    215 		     struct fence *updates)
    216 {
    217 	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
    218 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
    219 	struct fence *flushed_updates = vm_id->flushed_updates;
    220 	bool is_later;
    221 
    222 	if (!flushed_updates)
    223 		is_later = true;
    224 	else if (!updates)
    225 		is_later = false;
    226 	else
    227 		is_later = fence_is_later(updates, flushed_updates);
    228 
    229 	if (pd_addr != vm_id->pd_gpu_addr || is_later) {
    230 		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
    231 		if (is_later) {
    232 			vm_id->flushed_updates = fence_get(updates);
    233 			fence_put(flushed_updates);
    234 		}
    235 		vm_id->pd_gpu_addr = pd_addr;
    236 		amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
    237 	}
    238 }
    239 
    240 /**
    241  * amdgpu_vm_fence - remember fence for vm
    242  *
    243  * @adev: amdgpu_device pointer
    244  * @vm: vm we want to fence
    245  * @fence: fence to remember
    246  *
    247  * Fence the vm (cayman+).
    248  * Set the fence used to protect page table and id.
    249  *
    250  * Global and local mutex must be locked!
    251  */
    252 void amdgpu_vm_fence(struct amdgpu_device *adev,
    253 		     struct amdgpu_vm *vm,
    254 		     struct fence *fence)
    255 {
    256 	struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
    257 	unsigned vm_id = vm->ids[ring->idx].id;
    258 
    259 	fence_put(adev->vm_manager.ids[vm_id].active);
    260 	adev->vm_manager.ids[vm_id].active = fence_get(fence);
    261 	atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
    262 }
    263 
    264 /**
    265  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
    266  *
    267  * @vm: requested vm
    268  * @bo: requested buffer object
    269  *
    270  * Find @bo inside the requested vm (cayman+).
    271  * Search inside the @bos vm list for the requested vm
    272  * Returns the found bo_va or NULL if none is found
    273  *
    274  * Object has to be reserved!
    275  */
    276 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
    277 				       struct amdgpu_bo *bo)
    278 {
    279 	struct amdgpu_bo_va *bo_va;
    280 
    281 	list_for_each_entry(bo_va, &bo->va, bo_list) {
    282 		if (bo_va->vm == vm) {
    283 			return bo_va;
    284 		}
    285 	}
    286 	return NULL;
    287 }
    288 
    289 /**
    290  * amdgpu_vm_update_pages - helper to call the right asic function
    291  *
    292  * @adev: amdgpu_device pointer
    293  * @ib: indirect buffer to fill with commands
    294  * @pe: addr of the page entry
    295  * @addr: dst addr to write into pe
    296  * @count: number of page entries to update
    297  * @incr: increase next addr by incr bytes
    298  * @flags: hw access flags
    299  * @gtt_flags: GTT hw access flags
    300  *
    301  * Traces the parameters and calls the right asic functions
    302  * to setup the page table using the DMA.
    303  */
    304 static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
    305 				   struct amdgpu_ib *ib,
    306 				   uint64_t pe, uint64_t addr,
    307 				   unsigned count, uint32_t incr,
    308 				   uint32_t flags, uint32_t gtt_flags)
    309 {
    310 	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
    311 
    312 	if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
    313 		uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
    314 		amdgpu_vm_copy_pte(adev, ib, pe, src, count);
    315 
    316 	} else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) {
    317 		amdgpu_vm_write_pte(adev, ib, pe, addr,
    318 				      count, incr, flags);
    319 
    320 	} else {
    321 		amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
    322 				      count, incr, flags);
    323 	}
    324 }
    325 
    326 int amdgpu_vm_free_job(struct amdgpu_job *job)
    327 {
    328 	int i;
    329 	for (i = 0; i < job->num_ibs; i++)
    330 		amdgpu_ib_free(job->adev, &job->ibs[i]);
    331 	kfree(job->ibs);
    332 	return 0;
    333 }
    334 
    335 /**
    336  * amdgpu_vm_clear_bo - initially clear the page dir/table
    337  *
    338  * @adev: amdgpu_device pointer
    339  * @bo: bo to clear
    340  *
    341  * need to reserve bo first before calling it.
    342  */
    343 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
    344 			      struct amdgpu_bo *bo)
    345 {
    346 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
    347 	struct fence *fence = NULL;
    348 	struct amdgpu_ib *ib;
    349 	unsigned entries;
    350 	uint64_t addr;
    351 	int r;
    352 
    353 	r = reservation_object_reserve_shared(bo->tbo.resv);
    354 	if (r)
    355 		return r;
    356 
    357 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
    358 	if (r)
    359 		goto error;
    360 
    361 	addr = amdgpu_bo_gpu_offset(bo);
    362 	entries = amdgpu_bo_size(bo) / 8;
    363 
    364 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
    365 	if (!ib)
    366 		goto error;
    367 
    368 	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
    369 	if (r)
    370 		goto error_free;
    371 
    372 	ib->length_dw = 0;
    373 
    374 	amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
    375 	amdgpu_vm_pad_ib(adev, ib);
    376 	WARN_ON(ib->length_dw > 64);
    377 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
    378 						 &amdgpu_vm_free_job,
    379 						 AMDGPU_FENCE_OWNER_VM,
    380 						 &fence);
    381 	if (!r)
    382 		amdgpu_bo_fence(bo, fence, true);
    383 	fence_put(fence);
    384 	if (amdgpu_enable_scheduler)
    385 		return 0;
    386 
    387 error_free:
    388 	amdgpu_ib_free(adev, ib);
    389 	kfree(ib);
    390 
    391 error:
    392 	return r;
    393 }
    394 
    395 /**
    396  * amdgpu_vm_map_gart - get the physical address of a gart page
    397  *
    398  * @adev: amdgpu_device pointer
    399  * @addr: the unmapped addr
    400  *
    401  * Look up the physical address of the page that the pte resolves
    402  * to (cayman+).
    403  * Returns the physical address of the page.
    404  */
    405 uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
    406 {
    407 	uint64_t result;
    408 
    409 	/* page table offset */
    410 	result = adev->gart.pages_addr[addr >> PAGE_SHIFT];
    411 
    412 	/* in case cpu page size != gpu page size*/
    413 	result |= addr & (~PAGE_MASK);
    414 
    415 	return result;
    416 }
    417 
    418 /**
    419  * amdgpu_vm_update_pdes - make sure that page directory is valid
    420  *
    421  * @adev: amdgpu_device pointer
    422  * @vm: requested vm
    423  * @start: start of GPU address range
    424  * @end: end of GPU address range
    425  *
    426  * Allocates new page tables if necessary
    427  * and updates the page directory (cayman+).
    428  * Returns 0 for success, error for failure.
    429  *
    430  * Global and local mutex must be locked!
    431  */
    432 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
    433 				    struct amdgpu_vm *vm)
    434 {
    435 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
    436 	struct amdgpu_bo *pd = vm->page_directory;
    437 	uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
    438 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
    439 	uint64_t last_pde = ~0, last_pt = ~0;
    440 	unsigned count = 0, pt_idx, ndw;
    441 	struct amdgpu_ib *ib;
    442 	struct fence *fence = NULL;
    443 
    444 	int r;
    445 
    446 	/* padding, etc. */
    447 	ndw = 64;
    448 
    449 	/* assume the worst case */
    450 	ndw += vm->max_pde_used * 6;
    451 
    452 	/* update too big for an IB */
    453 	if (ndw > 0xfffff)
    454 		return -ENOMEM;
    455 
    456 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
    457 	if (!ib)
    458 		return -ENOMEM;
    459 
    460 	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
    461 	if (r) {
    462 		kfree(ib);
    463 		return r;
    464 	}
    465 	ib->length_dw = 0;
    466 
    467 	/* walk over the address space and update the page directory */
    468 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
    469 		struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
    470 		uint64_t pde, pt;
    471 
    472 		if (bo == NULL)
    473 			continue;
    474 
    475 		pt = amdgpu_bo_gpu_offset(bo);
    476 		if (vm->page_tables[pt_idx].addr == pt)
    477 			continue;
    478 		vm->page_tables[pt_idx].addr = pt;
    479 
    480 		pde = pd_addr + pt_idx * 8;
    481 		if (((last_pde + 8 * count) != pde) ||
    482 		    ((last_pt + incr * count) != pt)) {
    483 
    484 			if (count) {
    485 				amdgpu_vm_update_pages(adev, ib, last_pde,
    486 						       last_pt, count, incr,
    487 						       AMDGPU_PTE_VALID, 0);
    488 			}
    489 
    490 			count = 1;
    491 			last_pde = pde;
    492 			last_pt = pt;
    493 		} else {
    494 			++count;
    495 		}
    496 	}
    497 
    498 	if (count)
    499 		amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
    500 				       incr, AMDGPU_PTE_VALID, 0);
    501 
    502 	if (ib->length_dw != 0) {
    503 		amdgpu_vm_pad_ib(adev, ib);
    504 		amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
    505 		WARN_ON(ib->length_dw > ndw);
    506 		r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
    507 							 &amdgpu_vm_free_job,
    508 							 AMDGPU_FENCE_OWNER_VM,
    509 							 &fence);
    510 		if (r)
    511 			goto error_free;
    512 
    513 		amdgpu_bo_fence(pd, fence, true);
    514 		fence_put(vm->page_directory_fence);
    515 		vm->page_directory_fence = fence_get(fence);
    516 		fence_put(fence);
    517 	}
    518 
    519 	if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
    520 		amdgpu_ib_free(adev, ib);
    521 		kfree(ib);
    522 	}
    523 
    524 	return 0;
    525 
    526 error_free:
    527 	amdgpu_ib_free(adev, ib);
    528 	kfree(ib);
    529 	return r;
    530 }
    531 
    532 /**
    533  * amdgpu_vm_frag_ptes - add fragment information to PTEs
    534  *
    535  * @adev: amdgpu_device pointer
    536  * @ib: IB for the update
    537  * @pe_start: first PTE to handle
    538  * @pe_end: last PTE to handle
    539  * @addr: addr those PTEs should point to
    540  * @flags: hw mapping flags
    541  * @gtt_flags: GTT hw mapping flags
    542  *
    543  * Global and local mutex must be locked!
    544  */
    545 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
    546 				struct amdgpu_ib *ib,
    547 				uint64_t pe_start, uint64_t pe_end,
    548 				uint64_t addr, uint32_t flags,
    549 				uint32_t gtt_flags)
    550 {
    551 	/**
    552 	 * The MC L1 TLB supports variable sized pages, based on a fragment
    553 	 * field in the PTE. When this field is set to a non-zero value, page
    554 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
    555 	 * flags are considered valid for all PTEs within the fragment range
    556 	 * and corresponding mappings are assumed to be physically contiguous.
    557 	 *
    558 	 * The L1 TLB can store a single PTE for the whole fragment,
    559 	 * significantly increasing the space available for translation
    560 	 * caching. This leads to large improvements in throughput when the
    561 	 * TLB is under pressure.
    562 	 *
    563 	 * The L2 TLB distributes small and large fragments into two
    564 	 * asymmetric partitions. The large fragment cache is significantly
    565 	 * larger. Thus, we try to use large fragments wherever possible.
    566 	 * Userspace can support this by aligning virtual base address and
    567 	 * allocation size to the fragment size.
    568 	 */
    569 
    570 	/* SI and newer are optimized for 64KB */
    571 	uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
    572 	uint64_t frag_align = 0x80;
    573 
    574 	uint64_t frag_start = ALIGN(pe_start, frag_align);
    575 	uint64_t frag_end = pe_end & ~(frag_align - 1);
    576 
    577 	unsigned count;
    578 
    579 	/* system pages are non continuously */
    580 	if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) ||
    581 	    (frag_start >= frag_end)) {
    582 
    583 		count = (pe_end - pe_start) / 8;
    584 		amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
    585 				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
    586 		return;
    587 	}
    588 
    589 	/* handle the 4K area at the beginning */
    590 	if (pe_start != frag_start) {
    591 		count = (frag_start - pe_start) / 8;
    592 		amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
    593 				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
    594 		addr += AMDGPU_GPU_PAGE_SIZE * count;
    595 	}
    596 
    597 	/* handle the area in the middle */
    598 	count = (frag_end - frag_start) / 8;
    599 	amdgpu_vm_update_pages(adev, ib, frag_start, addr, count,
    600 			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags,
    601 			       gtt_flags);
    602 
    603 	/* handle the 4K area at the end */
    604 	if (frag_end != pe_end) {
    605 		addr += AMDGPU_GPU_PAGE_SIZE * count;
    606 		count = (pe_end - frag_end) / 8;
    607 		amdgpu_vm_update_pages(adev, ib, frag_end, addr, count,
    608 				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
    609 	}
    610 }
    611 
    612 /**
    613  * amdgpu_vm_update_ptes - make sure that page tables are valid
    614  *
    615  * @adev: amdgpu_device pointer
    616  * @vm: requested vm
    617  * @start: start of GPU address range
    618  * @end: end of GPU address range
    619  * @dst: destination address to map to
    620  * @flags: mapping flags
    621  *
    622  * Update the page tables in the range @start - @end (cayman+).
    623  *
    624  * Global and local mutex must be locked!
    625  */
    626 static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
    627 				 struct amdgpu_vm *vm,
    628 				 struct amdgpu_ib *ib,
    629 				 uint64_t start, uint64_t end,
    630 				 uint64_t dst, uint32_t flags,
    631 				 uint32_t gtt_flags)
    632 {
    633 	uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
    634 	uint64_t last_pte = ~0, last_dst = ~0;
    635 	void *owner = AMDGPU_FENCE_OWNER_VM;
    636 	unsigned count = 0;
    637 	uint64_t addr;
    638 
    639 	/* sync to everything on unmapping */
    640 	if (!(flags & AMDGPU_PTE_VALID))
    641 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
    642 
    643 	/* walk over the address space and update the page tables */
    644 	for (addr = start; addr < end; ) {
    645 		uint64_t pt_idx = addr >> amdgpu_vm_block_size;
    646 		struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo;
    647 		unsigned nptes;
    648 		uint64_t pte;
    649 		int r;
    650 
    651 		amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
    652 		r = reservation_object_reserve_shared(pt->tbo.resv);
    653 		if (r)
    654 			return r;
    655 
    656 		if ((addr & ~mask) == (end & ~mask))
    657 			nptes = end - addr;
    658 		else
    659 			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
    660 
    661 		pte = amdgpu_bo_gpu_offset(pt);
    662 		pte += (addr & mask) * 8;
    663 
    664 		if ((last_pte + 8 * count) != pte) {
    665 
    666 			if (count) {
    667 				amdgpu_vm_frag_ptes(adev, ib, last_pte,
    668 						    last_pte + 8 * count,
    669 						    last_dst, flags,
    670 						    gtt_flags);
    671 			}
    672 
    673 			count = nptes;
    674 			last_pte = pte;
    675 			last_dst = dst;
    676 		} else {
    677 			count += nptes;
    678 		}
    679 
    680 		addr += nptes;
    681 		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
    682 	}
    683 
    684 	if (count) {
    685 		amdgpu_vm_frag_ptes(adev, ib, last_pte,
    686 				    last_pte + 8 * count,
    687 				    last_dst, flags, gtt_flags);
    688 	}
    689 
    690 	return 0;
    691 }
    692 
    693 /**
    694  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
    695  *
    696  * @adev: amdgpu_device pointer
    697  * @vm: requested vm
    698  * @mapping: mapped range and flags to use for the update
    699  * @addr: addr to set the area to
    700  * @gtt_flags: flags as they are used for GTT
    701  * @fence: optional resulting fence
    702  *
    703  * Fill in the page table entries for @mapping.
    704  * Returns 0 for success, -EINVAL for failure.
    705  *
    706  * Object have to be reserved and mutex must be locked!
    707  */
    708 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
    709 				       struct amdgpu_vm *vm,
    710 				       struct amdgpu_bo_va_mapping *mapping,
    711 				       uint64_t addr, uint32_t gtt_flags,
    712 				       struct fence **fence)
    713 {
    714 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
    715 	unsigned nptes, ncmds, ndw;
    716 	uint32_t flags = gtt_flags;
    717 	struct amdgpu_ib *ib;
    718 	struct fence *f = NULL;
    719 	int r;
    720 
    721 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
    722 	 * but in case of something, we filter the flags in first place
    723 	 */
    724 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
    725 		flags &= ~AMDGPU_PTE_READABLE;
    726 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
    727 		flags &= ~AMDGPU_PTE_WRITEABLE;
    728 
    729 	trace_amdgpu_vm_bo_update(mapping);
    730 
    731 	nptes = mapping->it.last - mapping->it.start + 1;
    732 
    733 	/*
    734 	 * reserve space for one command every (1 << BLOCK_SIZE)
    735 	 *  entries or 2k dwords (whatever is smaller)
    736 	 */
    737 	ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
    738 
    739 	/* padding, etc. */
    740 	ndw = 64;
    741 
    742 	if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
    743 		/* only copy commands needed */
    744 		ndw += ncmds * 7;
    745 
    746 	} else if (flags & AMDGPU_PTE_SYSTEM) {
    747 		/* header for write data commands */
    748 		ndw += ncmds * 4;
    749 
    750 		/* body of write data command */
    751 		ndw += nptes * 2;
    752 
    753 	} else {
    754 		/* set page commands needed */
    755 		ndw += ncmds * 10;
    756 
    757 		/* two extra commands for begin/end of fragment */
    758 		ndw += 2 * 10;
    759 	}
    760 
    761 	/* update too big for an IB */
    762 	if (ndw > 0xfffff)
    763 		return -ENOMEM;
    764 
    765 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
    766 	if (!ib)
    767 		return -ENOMEM;
    768 
    769 	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
    770 	if (r) {
    771 		kfree(ib);
    772 		return r;
    773 	}
    774 
    775 	ib->length_dw = 0;
    776 
    777 	r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
    778 				  mapping->it.last + 1, addr + mapping->offset,
    779 				  flags, gtt_flags);
    780 
    781 	if (r) {
    782 		amdgpu_ib_free(adev, ib);
    783 		kfree(ib);
    784 		return r;
    785 	}
    786 
    787 	amdgpu_vm_pad_ib(adev, ib);
    788 	WARN_ON(ib->length_dw > ndw);
    789 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
    790 						 &amdgpu_vm_free_job,
    791 						 AMDGPU_FENCE_OWNER_VM,
    792 						 &f);
    793 	if (r)
    794 		goto error_free;
    795 
    796 	amdgpu_bo_fence(vm->page_directory, f, true);
    797 	if (fence) {
    798 		fence_put(*fence);
    799 		*fence = fence_get(f);
    800 	}
    801 	fence_put(f);
    802 	if (!amdgpu_enable_scheduler) {
    803 		amdgpu_ib_free(adev, ib);
    804 		kfree(ib);
    805 	}
    806 	return 0;
    807 
    808 error_free:
    809 	amdgpu_ib_free(adev, ib);
    810 	kfree(ib);
    811 	return r;
    812 }
    813 
    814 /**
    815  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
    816  *
    817  * @adev: amdgpu_device pointer
    818  * @bo_va: requested BO and VM object
    819  * @mem: ttm mem
    820  *
    821  * Fill in the page table entries for @bo_va.
    822  * Returns 0 for success, -EINVAL for failure.
    823  *
    824  * Object have to be reserved and mutex must be locked!
    825  */
    826 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
    827 			struct amdgpu_bo_va *bo_va,
    828 			struct ttm_mem_reg *mem)
    829 {
    830 	struct amdgpu_vm *vm = bo_va->vm;
    831 	struct amdgpu_bo_va_mapping *mapping;
    832 	uint32_t flags;
    833 	uint64_t addr;
    834 	int r;
    835 
    836 	if (mem) {
    837 		addr = (u64)mem->start << PAGE_SHIFT;
    838 		if (mem->mem_type != TTM_PL_TT)
    839 			addr += adev->vm_manager.vram_base_offset;
    840 	} else {
    841 		addr = 0;
    842 	}
    843 
    844 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
    845 
    846 	spin_lock(&vm->status_lock);
    847 	if (!list_empty(&bo_va->vm_status))
    848 		list_splice_init(&bo_va->valids, &bo_va->invalids);
    849 	spin_unlock(&vm->status_lock);
    850 
    851 	list_for_each_entry(mapping, &bo_va->invalids, list) {
    852 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
    853 						flags, &bo_va->last_pt_update);
    854 		if (r)
    855 			return r;
    856 	}
    857 
    858 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
    859 		list_for_each_entry(mapping, &bo_va->valids, list)
    860 			trace_amdgpu_vm_bo_mapping(mapping);
    861 
    862 		list_for_each_entry(mapping, &bo_va->invalids, list)
    863 			trace_amdgpu_vm_bo_mapping(mapping);
    864 	}
    865 
    866 	spin_lock(&vm->status_lock);
    867 	list_splice_init(&bo_va->invalids, &bo_va->valids);
    868 	list_del_init(&bo_va->vm_status);
    869 	if (!mem)
    870 		list_add(&bo_va->vm_status, &vm->cleared);
    871 	spin_unlock(&vm->status_lock);
    872 
    873 	return 0;
    874 }
    875 
    876 /**
    877  * amdgpu_vm_clear_freed - clear freed BOs in the PT
    878  *
    879  * @adev: amdgpu_device pointer
    880  * @vm: requested vm
    881  *
    882  * Make sure all freed BOs are cleared in the PT.
    883  * Returns 0 for success.
    884  *
    885  * PTs have to be reserved and mutex must be locked!
    886  */
    887 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
    888 			  struct amdgpu_vm *vm)
    889 {
    890 	struct amdgpu_bo_va_mapping *mapping;
    891 	int r;
    892 
    893 	spin_lock(&vm->freed_lock);
    894 	while (!list_empty(&vm->freed)) {
    895 		mapping = list_first_entry(&vm->freed,
    896 			struct amdgpu_bo_va_mapping, list);
    897 		list_del(&mapping->list);
    898 		spin_unlock(&vm->freed_lock);
    899 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
    900 		kfree(mapping);
    901 		if (r)
    902 			return r;
    903 
    904 		spin_lock(&vm->freed_lock);
    905 	}
    906 	spin_unlock(&vm->freed_lock);
    907 
    908 	return 0;
    909 
    910 }
    911 
    912 /**
    913  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
    914  *
    915  * @adev: amdgpu_device pointer
    916  * @vm: requested vm
    917  *
    918  * Make sure all invalidated BOs are cleared in the PT.
    919  * Returns 0 for success.
    920  *
    921  * PTs have to be reserved and mutex must be locked!
    922  */
    923 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
    924 			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
    925 {
    926 	struct amdgpu_bo_va *bo_va = NULL;
    927 	int r = 0;
    928 
    929 	spin_lock(&vm->status_lock);
    930 	while (!list_empty(&vm->invalidated)) {
    931 		bo_va = list_first_entry(&vm->invalidated,
    932 			struct amdgpu_bo_va, vm_status);
    933 		spin_unlock(&vm->status_lock);
    934 		mutex_lock(&bo_va->mutex);
    935 		r = amdgpu_vm_bo_update(adev, bo_va, NULL);
    936 		mutex_unlock(&bo_va->mutex);
    937 		if (r)
    938 			return r;
    939 
    940 		spin_lock(&vm->status_lock);
    941 	}
    942 	spin_unlock(&vm->status_lock);
    943 
    944 	if (bo_va)
    945 		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
    946 
    947 	return r;
    948 }
    949 
    950 /**
    951  * amdgpu_vm_bo_add - add a bo to a specific vm
    952  *
    953  * @adev: amdgpu_device pointer
    954  * @vm: requested vm
    955  * @bo: amdgpu buffer object
    956  *
    957  * Add @bo into the requested vm (cayman+).
    958  * Add @bo to the list of bos associated with the vm
    959  * Returns newly added bo_va or NULL for failure
    960  *
    961  * Object has to be reserved!
    962  */
    963 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
    964 				      struct amdgpu_vm *vm,
    965 				      struct amdgpu_bo *bo)
    966 {
    967 	struct amdgpu_bo_va *bo_va;
    968 
    969 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
    970 	if (bo_va == NULL) {
    971 		return NULL;
    972 	}
    973 	bo_va->vm = vm;
    974 	bo_va->bo = bo;
    975 	bo_va->ref_count = 1;
    976 	INIT_LIST_HEAD(&bo_va->bo_list);
    977 	INIT_LIST_HEAD(&bo_va->valids);
    978 	INIT_LIST_HEAD(&bo_va->invalids);
    979 	INIT_LIST_HEAD(&bo_va->vm_status);
    980 	mutex_init(&bo_va->mutex);
    981 	list_add_tail(&bo_va->bo_list, &bo->va);
    982 
    983 	return bo_va;
    984 }
    985 
    986 /**
    987  * amdgpu_vm_bo_map - map bo inside a vm
    988  *
    989  * @adev: amdgpu_device pointer
    990  * @bo_va: bo_va to store the address
    991  * @saddr: where to map the BO
    992  * @offset: requested offset in the BO
    993  * @flags: attributes of pages (read/write/valid/etc.)
    994  *
    995  * Add a mapping of the BO at the specefied addr into the VM.
    996  * Returns 0 for success, error for failure.
    997  *
    998  * Object has to be reserved and unreserved outside!
    999  */
   1000 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
   1001 		     struct amdgpu_bo_va *bo_va,
   1002 		     uint64_t saddr, uint64_t offset,
   1003 		     uint64_t size, uint32_t flags)
   1004 {
   1005 	struct amdgpu_bo_va_mapping *mapping;
   1006 	struct amdgpu_vm *vm = bo_va->vm;
   1007 	struct interval_tree_node *it;
   1008 	unsigned last_pfn, pt_idx;
   1009 	uint64_t eaddr;
   1010 	int r;
   1011 
   1012 	/* validate the parameters */
   1013 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
   1014 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
   1015 		return -EINVAL;
   1016 
   1017 	/* make sure object fit at this offset */
   1018 	eaddr = saddr + size - 1;
   1019 	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
   1020 		return -EINVAL;
   1021 
   1022 	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
   1023 	if (last_pfn >= adev->vm_manager.max_pfn) {
   1024 		dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
   1025 			last_pfn, adev->vm_manager.max_pfn);
   1026 		return -EINVAL;
   1027 	}
   1028 
   1029 	saddr /= AMDGPU_GPU_PAGE_SIZE;
   1030 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
   1031 
   1032 	spin_lock(&vm->it_lock);
   1033 	it = interval_tree_iter_first(&vm->va, saddr, eaddr);
   1034 	spin_unlock(&vm->it_lock);
   1035 	if (it) {
   1036 		struct amdgpu_bo_va_mapping *tmp;
   1037 		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
   1038 		/* bo and tmp overlap, invalid addr */
   1039 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
   1040 			"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
   1041 			tmp->it.start, tmp->it.last + 1);
   1042 		r = -EINVAL;
   1043 		goto error;
   1044 	}
   1045 
   1046 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
   1047 	if (!mapping) {
   1048 		r = -ENOMEM;
   1049 		goto error;
   1050 	}
   1051 
   1052 	INIT_LIST_HEAD(&mapping->list);
   1053 	mapping->it.start = saddr;
   1054 	mapping->it.last = eaddr;
   1055 	mapping->offset = offset;
   1056 	mapping->flags = flags;
   1057 
   1058 	mutex_lock(&bo_va->mutex);
   1059 	list_add(&mapping->list, &bo_va->invalids);
   1060 	mutex_unlock(&bo_va->mutex);
   1061 	spin_lock(&vm->it_lock);
   1062 	interval_tree_insert(&mapping->it, &vm->va);
   1063 	spin_unlock(&vm->it_lock);
   1064 	trace_amdgpu_vm_bo_map(bo_va, mapping);
   1065 
   1066 	/* Make sure the page tables are allocated */
   1067 	saddr >>= amdgpu_vm_block_size;
   1068 	eaddr >>= amdgpu_vm_block_size;
   1069 
   1070 	BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
   1071 
   1072 	if (eaddr > vm->max_pde_used)
   1073 		vm->max_pde_used = eaddr;
   1074 
   1075 	/* walk over the address space and allocate the page tables */
   1076 	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
   1077 		struct reservation_object *resv = vm->page_directory->tbo.resv;
   1078 		struct amdgpu_bo *pt;
   1079 
   1080 		if (vm->page_tables[pt_idx].bo)
   1081 			continue;
   1082 
   1083 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
   1084 				     AMDGPU_GPU_PAGE_SIZE, true,
   1085 				     AMDGPU_GEM_DOMAIN_VRAM,
   1086 				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
   1087 				     NULL, resv, &pt);
   1088 		if (r)
   1089 			goto error_free;
   1090 
   1091 		/* Keep a reference to the page table to avoid freeing
   1092 		 * them up in the wrong order.
   1093 		 */
   1094 		pt->parent = amdgpu_bo_ref(vm->page_directory);
   1095 
   1096 		r = amdgpu_vm_clear_bo(adev, pt);
   1097 		if (r) {
   1098 			amdgpu_bo_unref(&pt);
   1099 			goto error_free;
   1100 		}
   1101 
   1102 		vm->page_tables[pt_idx].addr = 0;
   1103 		vm->page_tables[pt_idx].bo = pt;
   1104 	}
   1105 
   1106 	return 0;
   1107 
   1108 error_free:
   1109 	list_del(&mapping->list);
   1110 	spin_lock(&vm->it_lock);
   1111 	interval_tree_remove(&mapping->it, &vm->va);
   1112 	spin_unlock(&vm->it_lock);
   1113 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
   1114 	kfree(mapping);
   1115 
   1116 error:
   1117 	return r;
   1118 }
   1119 
   1120 /**
   1121  * amdgpu_vm_bo_unmap - remove bo mapping from vm
   1122  *
   1123  * @adev: amdgpu_device pointer
   1124  * @bo_va: bo_va to remove the address from
   1125  * @saddr: where to the BO is mapped
   1126  *
   1127  * Remove a mapping of the BO at the specefied addr from the VM.
   1128  * Returns 0 for success, error for failure.
   1129  *
   1130  * Object has to be reserved and unreserved outside!
   1131  */
   1132 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
   1133 		       struct amdgpu_bo_va *bo_va,
   1134 		       uint64_t saddr)
   1135 {
   1136 	struct amdgpu_bo_va_mapping *mapping;
   1137 	struct amdgpu_vm *vm = bo_va->vm;
   1138 	bool valid = true;
   1139 
   1140 	saddr /= AMDGPU_GPU_PAGE_SIZE;
   1141 	mutex_lock(&bo_va->mutex);
   1142 	list_for_each_entry(mapping, &bo_va->valids, list) {
   1143 		if (mapping->it.start == saddr)
   1144 			break;
   1145 	}
   1146 
   1147 	if (&mapping->list == &bo_va->valids) {
   1148 		valid = false;
   1149 
   1150 		list_for_each_entry(mapping, &bo_va->invalids, list) {
   1151 			if (mapping->it.start == saddr)
   1152 				break;
   1153 		}
   1154 
   1155 		if (&mapping->list == &bo_va->invalids) {
   1156 			mutex_unlock(&bo_va->mutex);
   1157 			return -ENOENT;
   1158 		}
   1159 	}
   1160 	mutex_unlock(&bo_va->mutex);
   1161 	list_del(&mapping->list);
   1162 	spin_lock(&vm->it_lock);
   1163 	interval_tree_remove(&mapping->it, &vm->va);
   1164 	spin_unlock(&vm->it_lock);
   1165 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
   1166 
   1167 	if (valid) {
   1168 		spin_lock(&vm->freed_lock);
   1169 		list_add(&mapping->list, &vm->freed);
   1170 		spin_unlock(&vm->freed_lock);
   1171 	} else {
   1172 		kfree(mapping);
   1173 	}
   1174 
   1175 	return 0;
   1176 }
   1177 
   1178 /**
   1179  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
   1180  *
   1181  * @adev: amdgpu_device pointer
   1182  * @bo_va: requested bo_va
   1183  *
   1184  * Remove @bo_va->bo from the requested vm (cayman+).
   1185  *
   1186  * Object have to be reserved!
   1187  */
   1188 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
   1189 		      struct amdgpu_bo_va *bo_va)
   1190 {
   1191 	struct amdgpu_bo_va_mapping *mapping, *next;
   1192 	struct amdgpu_vm *vm = bo_va->vm;
   1193 
   1194 	list_del(&bo_va->bo_list);
   1195 
   1196 	spin_lock(&vm->status_lock);
   1197 	list_del(&bo_va->vm_status);
   1198 	spin_unlock(&vm->status_lock);
   1199 
   1200 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
   1201 		list_del(&mapping->list);
   1202 		spin_lock(&vm->it_lock);
   1203 		interval_tree_remove(&mapping->it, &vm->va);
   1204 		spin_unlock(&vm->it_lock);
   1205 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
   1206 		spin_lock(&vm->freed_lock);
   1207 		list_add(&mapping->list, &vm->freed);
   1208 		spin_unlock(&vm->freed_lock);
   1209 	}
   1210 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
   1211 		list_del(&mapping->list);
   1212 		spin_lock(&vm->it_lock);
   1213 		interval_tree_remove(&mapping->it, &vm->va);
   1214 		spin_unlock(&vm->it_lock);
   1215 		kfree(mapping);
   1216 	}
   1217 	fence_put(bo_va->last_pt_update);
   1218 	mutex_destroy(&bo_va->mutex);
   1219 	kfree(bo_va);
   1220 }
   1221 
   1222 /**
   1223  * amdgpu_vm_bo_invalidate - mark the bo as invalid
   1224  *
   1225  * @adev: amdgpu_device pointer
   1226  * @vm: requested vm
   1227  * @bo: amdgpu buffer object
   1228  *
   1229  * Mark @bo as invalid (cayman+).
   1230  */
   1231 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
   1232 			     struct amdgpu_bo *bo)
   1233 {
   1234 	struct amdgpu_bo_va *bo_va;
   1235 
   1236 	list_for_each_entry(bo_va, &bo->va, bo_list) {
   1237 		spin_lock(&bo_va->vm->status_lock);
   1238 		if (list_empty(&bo_va->vm_status))
   1239 			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
   1240 		spin_unlock(&bo_va->vm->status_lock);
   1241 	}
   1242 }
   1243 
   1244 /**
   1245  * amdgpu_vm_init - initialize a vm instance
   1246  *
   1247  * @adev: amdgpu_device pointer
   1248  * @vm: requested vm
   1249  *
   1250  * Init @vm fields (cayman+).
   1251  */
   1252 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
   1253 {
   1254 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
   1255 		AMDGPU_VM_PTE_COUNT * 8);
   1256 	unsigned pd_size, pd_entries;
   1257 	int i, r;
   1258 
   1259 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
   1260 		vm->ids[i].id = 0;
   1261 		vm->ids[i].flushed_updates = NULL;
   1262 	}
   1263 	vm->va = RB_ROOT;
   1264 	spin_lock_init(&vm->status_lock);
   1265 	INIT_LIST_HEAD(&vm->invalidated);
   1266 	INIT_LIST_HEAD(&vm->cleared);
   1267 	INIT_LIST_HEAD(&vm->freed);
   1268 	spin_lock_init(&vm->it_lock);
   1269 	spin_lock_init(&vm->freed_lock);
   1270 	pd_size = amdgpu_vm_directory_size(adev);
   1271 	pd_entries = amdgpu_vm_num_pdes(adev);
   1272 
   1273 	/* allocate page table array */
   1274 	vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
   1275 	if (vm->page_tables == NULL) {
   1276 		DRM_ERROR("Cannot allocate memory for page table array\n");
   1277 		return -ENOMEM;
   1278 	}
   1279 
   1280 	vm->page_directory_fence = NULL;
   1281 
   1282 	r = amdgpu_bo_create(adev, pd_size, align, true,
   1283 			     AMDGPU_GEM_DOMAIN_VRAM,
   1284 			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
   1285 			     NULL, NULL, &vm->page_directory);
   1286 	if (r)
   1287 		return r;
   1288 	r = amdgpu_bo_reserve(vm->page_directory, false);
   1289 	if (r) {
   1290 		amdgpu_bo_unref(&vm->page_directory);
   1291 		vm->page_directory = NULL;
   1292 		return r;
   1293 	}
   1294 	r = amdgpu_vm_clear_bo(adev, vm->page_directory);
   1295 	amdgpu_bo_unreserve(vm->page_directory);
   1296 	if (r) {
   1297 		amdgpu_bo_unref(&vm->page_directory);
   1298 		vm->page_directory = NULL;
   1299 		return r;
   1300 	}
   1301 
   1302 	return 0;
   1303 }
   1304 
   1305 /**
   1306  * amdgpu_vm_fini - tear down a vm instance
   1307  *
   1308  * @adev: amdgpu_device pointer
   1309  * @vm: requested vm
   1310  *
   1311  * Tear down @vm (cayman+).
   1312  * Unbind the VM and remove all bos from the vm bo list
   1313  */
   1314 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
   1315 {
   1316 	struct amdgpu_bo_va_mapping *mapping, *tmp;
   1317 	int i;
   1318 
   1319 	if (!RB_EMPTY_ROOT(&vm->va)) {
   1320 		dev_err(adev->dev, "still active bo inside vm\n");
   1321 	}
   1322 	rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
   1323 		list_del(&mapping->list);
   1324 		interval_tree_remove(&mapping->it, &vm->va);
   1325 		kfree(mapping);
   1326 	}
   1327 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
   1328 		list_del(&mapping->list);
   1329 		kfree(mapping);
   1330 	}
   1331 
   1332 	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
   1333 		amdgpu_bo_unref(&vm->page_tables[i].bo);
   1334 	drm_free_large(vm->page_tables);
   1335 
   1336 	amdgpu_bo_unref(&vm->page_directory);
   1337 	fence_put(vm->page_directory_fence);
   1338 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
   1339 		unsigned id = vm->ids[i].id;
   1340 
   1341 		atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
   1342 				    (long)vm, 0);
   1343 		fence_put(vm->ids[i].flushed_updates);
   1344 	}
   1345 
   1346 }
   1347 
   1348 /**
   1349  * amdgpu_vm_manager_fini - cleanup VM manager
   1350  *
   1351  * @adev: amdgpu_device pointer
   1352  *
   1353  * Cleanup the VM manager and free resources.
   1354  */
   1355 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
   1356 {
   1357 	unsigned i;
   1358 
   1359 	for (i = 0; i < AMDGPU_NUM_VM; ++i)
   1360 		fence_put(adev->vm_manager.ids[i].active);
   1361 }
   1362