amdgpu_vm.c revision 1.1.1.2 1 /* $NetBSD: amdgpu_vm.c,v 1.1.1.2 2021/12/18 20:11:13 riastradh Exp $ */
2
3 /*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 * Copyright 2009 Jerome Glisse.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Dave Airlie
27 * Alex Deucher
28 * Jerome Glisse
29 */
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vm.c,v 1.1.1.2 2021/12/18 20:11:13 riastradh Exp $");
32
33 #include <linux/dma-fence-array.h>
34 #include <linux/interval_tree_generic.h>
35 #include <linux/idr.h>
36
37 #include <drm/amdgpu_drm.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_amdkfd.h"
41 #include "amdgpu_gmc.h"
42 #include "amdgpu_xgmi.h"
43
44 /**
45 * DOC: GPUVM
46 *
47 * GPUVM is similar to the legacy gart on older asics, however
48 * rather than there being a single global gart table
49 * for the entire GPU, there are multiple VM page tables active
50 * at any given time. The VM page tables can contain a mix
51 * vram pages and system memory pages and system memory pages
52 * can be mapped as snooped (cached system pages) or unsnooped
53 * (uncached system pages).
54 * Each VM has an ID associated with it and there is a page table
55 * associated with each VMID. When execting a command buffer,
56 * the kernel tells the the ring what VMID to use for that command
57 * buffer. VMIDs are allocated dynamically as commands are submitted.
58 * The userspace drivers maintain their own address space and the kernel
59 * sets up their pages tables accordingly when they submit their
60 * command buffers and a VMID is assigned.
61 * Cayman/Trinity support up to 8 active VMs at any given time;
62 * SI supports 16.
63 */
64
65 #define START(node) ((node)->start)
66 #define LAST(node) ((node)->last)
67
68 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
69 START, LAST, static, amdgpu_vm_it)
70
71 #undef START
72 #undef LAST
73
74 /**
75 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
76 */
77 struct amdgpu_prt_cb {
78
79 /**
80 * @adev: amdgpu device
81 */
82 struct amdgpu_device *adev;
83
84 /**
85 * @cb: callback
86 */
87 struct dma_fence_cb cb;
88 };
89
90 /**
91 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
92 * happens while holding this lock anywhere to prevent deadlocks when
93 * an MMU notifier runs in reclaim-FS context.
94 */
95 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
96 {
97 mutex_lock(&vm->eviction_lock);
98 vm->saved_flags = memalloc_nofs_save();
99 }
100
101 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
102 {
103 if (mutex_trylock(&vm->eviction_lock)) {
104 vm->saved_flags = memalloc_nofs_save();
105 return 1;
106 }
107 return 0;
108 }
109
110 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
111 {
112 memalloc_nofs_restore(vm->saved_flags);
113 mutex_unlock(&vm->eviction_lock);
114 }
115
116 /**
117 * amdgpu_vm_level_shift - return the addr shift for each level
118 *
119 * @adev: amdgpu_device pointer
120 * @level: VMPT level
121 *
122 * Returns:
123 * The number of bits the pfn needs to be right shifted for a level.
124 */
125 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
126 unsigned level)
127 {
128 unsigned shift = 0xff;
129
130 switch (level) {
131 case AMDGPU_VM_PDB2:
132 case AMDGPU_VM_PDB1:
133 case AMDGPU_VM_PDB0:
134 shift = 9 * (AMDGPU_VM_PDB0 - level) +
135 adev->vm_manager.block_size;
136 break;
137 case AMDGPU_VM_PTB:
138 shift = 0;
139 break;
140 default:
141 dev_err(adev->dev, "the level%d isn't supported.\n", level);
142 }
143
144 return shift;
145 }
146
147 /**
148 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
149 *
150 * @adev: amdgpu_device pointer
151 * @level: VMPT level
152 *
153 * Returns:
154 * The number of entries in a page directory or page table.
155 */
156 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
157 unsigned level)
158 {
159 unsigned shift = amdgpu_vm_level_shift(adev,
160 adev->vm_manager.root_level);
161
162 if (level == adev->vm_manager.root_level)
163 /* For the root directory */
164 return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
165 >> shift;
166 else if (level != AMDGPU_VM_PTB)
167 /* Everything in between */
168 return 512;
169 else
170 /* For the page tables on the leaves */
171 return AMDGPU_VM_PTE_COUNT(adev);
172 }
173
174 /**
175 * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
176 *
177 * @adev: amdgpu_device pointer
178 *
179 * Returns:
180 * The number of entries in the root page directory which needs the ATS setting.
181 */
182 static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
183 {
184 unsigned shift;
185
186 shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
187 return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
188 }
189
190 /**
191 * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
192 *
193 * @adev: amdgpu_device pointer
194 * @level: VMPT level
195 *
196 * Returns:
197 * The mask to extract the entry number of a PD/PT from an address.
198 */
199 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
200 unsigned int level)
201 {
202 if (level <= adev->vm_manager.root_level)
203 return 0xffffffff;
204 else if (level != AMDGPU_VM_PTB)
205 return 0x1ff;
206 else
207 return AMDGPU_VM_PTE_COUNT(adev) - 1;
208 }
209
210 /**
211 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
212 *
213 * @adev: amdgpu_device pointer
214 * @level: VMPT level
215 *
216 * Returns:
217 * The size of the BO for a page directory or page table in bytes.
218 */
219 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
220 {
221 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
222 }
223
224 /**
225 * amdgpu_vm_bo_evicted - vm_bo is evicted
226 *
227 * @vm_bo: vm_bo which is evicted
228 *
229 * State for PDs/PTs and per VM BOs which are not at the location they should
230 * be.
231 */
232 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
233 {
234 struct amdgpu_vm *vm = vm_bo->vm;
235 struct amdgpu_bo *bo = vm_bo->bo;
236
237 vm_bo->moved = true;
238 if (bo->tbo.type == ttm_bo_type_kernel)
239 list_move(&vm_bo->vm_status, &vm->evicted);
240 else
241 list_move_tail(&vm_bo->vm_status, &vm->evicted);
242 }
243
244 /**
245 * amdgpu_vm_bo_relocated - vm_bo is reloacted
246 *
247 * @vm_bo: vm_bo which is relocated
248 *
249 * State for PDs/PTs which needs to update their parent PD.
250 */
251 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
252 {
253 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
254 }
255
256 /**
257 * amdgpu_vm_bo_moved - vm_bo is moved
258 *
259 * @vm_bo: vm_bo which is moved
260 *
261 * State for per VM BOs which are moved, but that change is not yet reflected
262 * in the page tables.
263 */
264 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
265 {
266 list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
267 }
268
269 /**
270 * amdgpu_vm_bo_idle - vm_bo is idle
271 *
272 * @vm_bo: vm_bo which is now idle
273 *
274 * State for PDs/PTs and per VM BOs which have gone through the state machine
275 * and are now idle.
276 */
277 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
278 {
279 list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
280 vm_bo->moved = false;
281 }
282
283 /**
284 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
285 *
286 * @vm_bo: vm_bo which is now invalidated
287 *
288 * State for normal BOs which are invalidated and that change not yet reflected
289 * in the PTs.
290 */
291 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
292 {
293 spin_lock(&vm_bo->vm->invalidated_lock);
294 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
295 spin_unlock(&vm_bo->vm->invalidated_lock);
296 }
297
298 /**
299 * amdgpu_vm_bo_done - vm_bo is done
300 *
301 * @vm_bo: vm_bo which is now done
302 *
303 * State for normal BOs which are invalidated and that change has been updated
304 * in the PTs.
305 */
306 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
307 {
308 spin_lock(&vm_bo->vm->invalidated_lock);
309 list_del_init(&vm_bo->vm_status);
310 spin_unlock(&vm_bo->vm->invalidated_lock);
311 }
312
313 /**
314 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
315 *
316 * @base: base structure for tracking BO usage in a VM
317 * @vm: vm to which bo is to be added
318 * @bo: amdgpu buffer object
319 *
320 * Initialize a bo_va_base structure and add it to the appropriate lists
321 *
322 */
323 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
324 struct amdgpu_vm *vm,
325 struct amdgpu_bo *bo)
326 {
327 base->vm = vm;
328 base->bo = bo;
329 base->next = NULL;
330 INIT_LIST_HEAD(&base->vm_status);
331
332 if (!bo)
333 return;
334 base->next = bo->vm_bo;
335 bo->vm_bo = base;
336
337 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
338 return;
339
340 vm->bulk_moveable = false;
341 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
342 amdgpu_vm_bo_relocated(base);
343 else
344 amdgpu_vm_bo_idle(base);
345
346 if (bo->preferred_domains &
347 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
348 return;
349
350 /*
351 * we checked all the prerequisites, but it looks like this per vm bo
352 * is currently evicted. add the bo to the evicted list to make sure it
353 * is validated on next vm use to avoid fault.
354 * */
355 amdgpu_vm_bo_evicted(base);
356 }
357
358 /**
359 * amdgpu_vm_pt_parent - get the parent page directory
360 *
361 * @pt: child page table
362 *
363 * Helper to get the parent entry for the child page table. NULL if we are at
364 * the root page directory.
365 */
366 static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
367 {
368 struct amdgpu_bo *parent = pt->base.bo->parent;
369
370 if (!parent)
371 return NULL;
372
373 return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
374 }
375
376 /*
377 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
378 */
379 struct amdgpu_vm_pt_cursor {
380 uint64_t pfn;
381 struct amdgpu_vm_pt *parent;
382 struct amdgpu_vm_pt *entry;
383 unsigned level;
384 };
385
386 /**
387 * amdgpu_vm_pt_start - start PD/PT walk
388 *
389 * @adev: amdgpu_device pointer
390 * @vm: amdgpu_vm structure
391 * @start: start address of the walk
392 * @cursor: state to initialize
393 *
394 * Initialize a amdgpu_vm_pt_cursor to start a walk.
395 */
396 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
397 struct amdgpu_vm *vm, uint64_t start,
398 struct amdgpu_vm_pt_cursor *cursor)
399 {
400 cursor->pfn = start;
401 cursor->parent = NULL;
402 cursor->entry = &vm->root;
403 cursor->level = adev->vm_manager.root_level;
404 }
405
406 /**
407 * amdgpu_vm_pt_descendant - go to child node
408 *
409 * @adev: amdgpu_device pointer
410 * @cursor: current state
411 *
412 * Walk to the child node of the current node.
413 * Returns:
414 * True if the walk was possible, false otherwise.
415 */
416 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
417 struct amdgpu_vm_pt_cursor *cursor)
418 {
419 unsigned mask, shift, idx;
420
421 if (!cursor->entry->entries)
422 return false;
423
424 BUG_ON(!cursor->entry->base.bo);
425 mask = amdgpu_vm_entries_mask(adev, cursor->level);
426 shift = amdgpu_vm_level_shift(adev, cursor->level);
427
428 ++cursor->level;
429 idx = (cursor->pfn >> shift) & mask;
430 cursor->parent = cursor->entry;
431 cursor->entry = &cursor->entry->entries[idx];
432 return true;
433 }
434
435 /**
436 * amdgpu_vm_pt_sibling - go to sibling node
437 *
438 * @adev: amdgpu_device pointer
439 * @cursor: current state
440 *
441 * Walk to the sibling node of the current node.
442 * Returns:
443 * True if the walk was possible, false otherwise.
444 */
445 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
446 struct amdgpu_vm_pt_cursor *cursor)
447 {
448 unsigned shift, num_entries;
449
450 /* Root doesn't have a sibling */
451 if (!cursor->parent)
452 return false;
453
454 /* Go to our parents and see if we got a sibling */
455 shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
456 num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
457
458 if (cursor->entry == &cursor->parent->entries[num_entries - 1])
459 return false;
460
461 cursor->pfn += 1ULL << shift;
462 cursor->pfn &= ~((1ULL << shift) - 1);
463 ++cursor->entry;
464 return true;
465 }
466
467 /**
468 * amdgpu_vm_pt_ancestor - go to parent node
469 *
470 * @cursor: current state
471 *
472 * Walk to the parent node of the current node.
473 * Returns:
474 * True if the walk was possible, false otherwise.
475 */
476 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
477 {
478 if (!cursor->parent)
479 return false;
480
481 --cursor->level;
482 cursor->entry = cursor->parent;
483 cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
484 return true;
485 }
486
487 /**
488 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
489 *
490 * @adev: amdgpu_device pointer
491 * @cursor: current state
492 *
493 * Walk the PD/PT tree to the next node.
494 */
495 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
496 struct amdgpu_vm_pt_cursor *cursor)
497 {
498 /* First try a newborn child */
499 if (amdgpu_vm_pt_descendant(adev, cursor))
500 return;
501
502 /* If that didn't worked try to find a sibling */
503 while (!amdgpu_vm_pt_sibling(adev, cursor)) {
504 /* No sibling, go to our parents and grandparents */
505 if (!amdgpu_vm_pt_ancestor(cursor)) {
506 cursor->pfn = ~0ll;
507 return;
508 }
509 }
510 }
511
512 /**
513 * amdgpu_vm_pt_first_dfs - start a deep first search
514 *
515 * @adev: amdgpu_device structure
516 * @vm: amdgpu_vm structure
517 * @start: optional cursor to start with
518 * @cursor: state to initialize
519 *
520 * Starts a deep first traversal of the PD/PT tree.
521 */
522 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
523 struct amdgpu_vm *vm,
524 struct amdgpu_vm_pt_cursor *start,
525 struct amdgpu_vm_pt_cursor *cursor)
526 {
527 if (start)
528 *cursor = *start;
529 else
530 amdgpu_vm_pt_start(adev, vm, 0, cursor);
531 while (amdgpu_vm_pt_descendant(adev, cursor));
532 }
533
534 /**
535 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
536 *
537 * @start: starting point for the search
538 * @entry: current entry
539 *
540 * Returns:
541 * True when the search should continue, false otherwise.
542 */
543 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
544 struct amdgpu_vm_pt *entry)
545 {
546 return entry && (!start || entry != start->entry);
547 }
548
549 /**
550 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
551 *
552 * @adev: amdgpu_device structure
553 * @cursor: current state
554 *
555 * Move the cursor to the next node in a deep first search.
556 */
557 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
558 struct amdgpu_vm_pt_cursor *cursor)
559 {
560 if (!cursor->entry)
561 return;
562
563 if (!cursor->parent)
564 cursor->entry = NULL;
565 else if (amdgpu_vm_pt_sibling(adev, cursor))
566 while (amdgpu_vm_pt_descendant(adev, cursor));
567 else
568 amdgpu_vm_pt_ancestor(cursor);
569 }
570
571 /*
572 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
573 */
574 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
575 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
576 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
577 amdgpu_vm_pt_continue_dfs((start), (entry)); \
578 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
579
580 /**
581 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
582 *
583 * @vm: vm providing the BOs
584 * @validated: head of validation list
585 * @entry: entry to add
586 *
587 * Add the page directory to the list of BOs to
588 * validate for command submission.
589 */
590 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
591 struct list_head *validated,
592 struct amdgpu_bo_list_entry *entry)
593 {
594 entry->priority = 0;
595 entry->tv.bo = &vm->root.base.bo->tbo;
596 /* One for TTM and one for the CS job */
597 entry->tv.num_shared = 2;
598 entry->user_pages = NULL;
599 list_add(&entry->tv.head, validated);
600 }
601
602 /**
603 * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
604 *
605 * @bo: BO which was removed from the LRU
606 *
607 * Make sure the bulk_moveable flag is updated when a BO is removed from the
608 * LRU.
609 */
610 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
611 {
612 struct amdgpu_bo *abo;
613 struct amdgpu_vm_bo_base *bo_base;
614
615 if (!amdgpu_bo_is_amdgpu_bo(bo))
616 return;
617
618 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
619 return;
620
621 abo = ttm_to_amdgpu_bo(bo);
622 if (!abo->parent)
623 return;
624 for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
625 struct amdgpu_vm *vm = bo_base->vm;
626
627 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
628 vm->bulk_moveable = false;
629 }
630
631 }
632 /**
633 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
634 *
635 * @adev: amdgpu device pointer
636 * @vm: vm providing the BOs
637 *
638 * Move all BOs to the end of LRU and remember their positions to put them
639 * together.
640 */
641 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
642 struct amdgpu_vm *vm)
643 {
644 struct amdgpu_vm_bo_base *bo_base;
645
646 if (vm->bulk_moveable) {
647 spin_lock(&ttm_bo_glob.lru_lock);
648 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
649 spin_unlock(&ttm_bo_glob.lru_lock);
650 return;
651 }
652
653 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
654
655 spin_lock(&ttm_bo_glob.lru_lock);
656 list_for_each_entry(bo_base, &vm->idle, vm_status) {
657 struct amdgpu_bo *bo = bo_base->bo;
658
659 if (!bo->parent)
660 continue;
661
662 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
663 if (bo->shadow)
664 ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
665 &vm->lru_bulk_move);
666 }
667 spin_unlock(&ttm_bo_glob.lru_lock);
668
669 vm->bulk_moveable = true;
670 }
671
672 /**
673 * amdgpu_vm_validate_pt_bos - validate the page table BOs
674 *
675 * @adev: amdgpu device pointer
676 * @vm: vm providing the BOs
677 * @validate: callback to do the validation
678 * @param: parameter for the validation callback
679 *
680 * Validate the page table BOs on command submission if neccessary.
681 *
682 * Returns:
683 * Validation result.
684 */
685 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
686 int (*validate)(void *p, struct amdgpu_bo *bo),
687 void *param)
688 {
689 struct amdgpu_vm_bo_base *bo_base, *tmp;
690 int r;
691
692 vm->bulk_moveable &= list_empty(&vm->evicted);
693
694 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
695 struct amdgpu_bo *bo = bo_base->bo;
696
697 r = validate(param, bo);
698 if (r)
699 return r;
700
701 if (bo->tbo.type != ttm_bo_type_kernel) {
702 amdgpu_vm_bo_moved(bo_base);
703 } else {
704 vm->update_funcs->map_table(bo);
705 if (bo->parent)
706 amdgpu_vm_bo_relocated(bo_base);
707 else
708 amdgpu_vm_bo_idle(bo_base);
709 }
710 }
711
712 amdgpu_vm_eviction_lock(vm);
713 vm->evicting = false;
714 amdgpu_vm_eviction_unlock(vm);
715
716 return 0;
717 }
718
719 /**
720 * amdgpu_vm_ready - check VM is ready for updates
721 *
722 * @vm: VM to check
723 *
724 * Check if all VM PDs/PTs are ready for updates
725 *
726 * Returns:
727 * True if eviction list is empty.
728 */
729 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
730 {
731 return list_empty(&vm->evicted);
732 }
733
734 /**
735 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
736 *
737 * @adev: amdgpu_device pointer
738 * @vm: VM to clear BO from
739 * @bo: BO to clear
740 * @direct: use a direct update
741 *
742 * Root PD needs to be reserved when calling this.
743 *
744 * Returns:
745 * 0 on success, errno otherwise.
746 */
747 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
748 struct amdgpu_vm *vm,
749 struct amdgpu_bo *bo,
750 bool direct)
751 {
752 struct ttm_operation_ctx ctx = { true, false };
753 unsigned level = adev->vm_manager.root_level;
754 struct amdgpu_vm_update_params params;
755 struct amdgpu_bo *ancestor = bo;
756 unsigned entries, ats_entries;
757 uint64_t addr;
758 int r;
759
760 /* Figure out our place in the hierarchy */
761 if (ancestor->parent) {
762 ++level;
763 while (ancestor->parent->parent) {
764 ++level;
765 ancestor = ancestor->parent;
766 }
767 }
768
769 entries = amdgpu_bo_size(bo) / 8;
770 if (!vm->pte_support_ats) {
771 ats_entries = 0;
772
773 } else if (!bo->parent) {
774 ats_entries = amdgpu_vm_num_ats_entries(adev);
775 ats_entries = min(ats_entries, entries);
776 entries -= ats_entries;
777
778 } else {
779 struct amdgpu_vm_pt *pt;
780
781 pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base);
782 ats_entries = amdgpu_vm_num_ats_entries(adev);
783 if ((pt - vm->root.entries) >= ats_entries) {
784 ats_entries = 0;
785 } else {
786 ats_entries = entries;
787 entries = 0;
788 }
789 }
790
791 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
792 if (r)
793 return r;
794
795 if (bo->shadow) {
796 r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
797 &ctx);
798 if (r)
799 return r;
800 }
801
802 r = vm->update_funcs->map_table(bo);
803 if (r)
804 return r;
805
806 memset(¶ms, 0, sizeof(params));
807 params.adev = adev;
808 params.vm = vm;
809 params.direct = direct;
810
811 r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL);
812 if (r)
813 return r;
814
815 addr = 0;
816 if (ats_entries) {
817 uint64_t value = 0, flags;
818
819 flags = AMDGPU_PTE_DEFAULT_ATC;
820 if (level != AMDGPU_VM_PTB) {
821 /* Handle leaf PDEs as PTEs */
822 flags |= AMDGPU_PDE_PTE;
823 amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
824 }
825
826 r = vm->update_funcs->update(¶ms, bo, addr, 0, ats_entries,
827 value, flags);
828 if (r)
829 return r;
830
831 addr += ats_entries * 8;
832 }
833
834 if (entries) {
835 uint64_t value = 0, flags = 0;
836
837 if (adev->asic_type >= CHIP_VEGA10) {
838 if (level != AMDGPU_VM_PTB) {
839 /* Handle leaf PDEs as PTEs */
840 flags |= AMDGPU_PDE_PTE;
841 amdgpu_gmc_get_vm_pde(adev, level,
842 &value, &flags);
843 } else {
844 /* Workaround for fault priority problem on GMC9 */
845 flags = AMDGPU_PTE_EXECUTABLE;
846 }
847 }
848
849 r = vm->update_funcs->update(¶ms, bo, addr, 0, entries,
850 value, flags);
851 if (r)
852 return r;
853 }
854
855 return vm->update_funcs->commit(¶ms, NULL);
856 }
857
858 /**
859 * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
860 *
861 * @adev: amdgpu_device pointer
862 * @vm: requesting vm
863 * @level: the page table level
864 * @direct: use a direct update
865 * @bp: resulting BO allocation parameters
866 */
867 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
868 int level, bool direct,
869 struct amdgpu_bo_param *bp)
870 {
871 memset(bp, 0, sizeof(*bp));
872
873 bp->size = amdgpu_vm_bo_size(adev, level);
874 bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
875 bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
876 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
877 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
878 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
879 if (vm->use_cpu_for_update)
880 bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
881 else if (!vm->root.base.bo || vm->root.base.bo->shadow)
882 bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
883 bp->type = ttm_bo_type_kernel;
884 bp->no_wait_gpu = direct;
885 if (vm->root.base.bo)
886 bp->resv = vm->root.base.bo->tbo.base.resv;
887 }
888
889 /**
890 * amdgpu_vm_alloc_pts - Allocate a specific page table
891 *
892 * @adev: amdgpu_device pointer
893 * @vm: VM to allocate page tables for
894 * @cursor: Which page table to allocate
895 * @direct: use a direct update
896 *
897 * Make sure a specific page table or directory is allocated.
898 *
899 * Returns:
900 * 1 if page table needed to be allocated, 0 if page table was already
901 * allocated, negative errno if an error occurred.
902 */
903 static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
904 struct amdgpu_vm *vm,
905 struct amdgpu_vm_pt_cursor *cursor,
906 bool direct)
907 {
908 struct amdgpu_vm_pt *entry = cursor->entry;
909 struct amdgpu_bo_param bp;
910 struct amdgpu_bo *pt;
911 int r;
912
913 if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
914 unsigned num_entries;
915
916 num_entries = amdgpu_vm_num_entries(adev, cursor->level);
917 entry->entries = kvmalloc_array(num_entries,
918 sizeof(*entry->entries),
919 GFP_KERNEL | __GFP_ZERO);
920 if (!entry->entries)
921 return -ENOMEM;
922 }
923
924 if (entry->base.bo)
925 return 0;
926
927 amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
928
929 r = amdgpu_bo_create(adev, &bp, &pt);
930 if (r)
931 return r;
932
933 /* Keep a reference to the root directory to avoid
934 * freeing them up in the wrong order.
935 */
936 pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
937 amdgpu_vm_bo_base_init(&entry->base, vm, pt);
938
939 r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
940 if (r)
941 goto error_free_pt;
942
943 return 0;
944
945 error_free_pt:
946 amdgpu_bo_unref(&pt->shadow);
947 amdgpu_bo_unref(&pt);
948 return r;
949 }
950
951 /**
952 * amdgpu_vm_free_table - fre one PD/PT
953 *
954 * @entry: PDE to free
955 */
956 static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
957 {
958 if (entry->base.bo) {
959 entry->base.bo->vm_bo = NULL;
960 list_del(&entry->base.vm_status);
961 amdgpu_bo_unref(&entry->base.bo->shadow);
962 amdgpu_bo_unref(&entry->base.bo);
963 }
964 kvfree(entry->entries);
965 entry->entries = NULL;
966 }
967
968 /**
969 * amdgpu_vm_free_pts - free PD/PT levels
970 *
971 * @adev: amdgpu device structure
972 * @vm: amdgpu vm structure
973 * @start: optional cursor where to start freeing PDs/PTs
974 *
975 * Free the page directory or page table level and all sub levels.
976 */
977 static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
978 struct amdgpu_vm *vm,
979 struct amdgpu_vm_pt_cursor *start)
980 {
981 struct amdgpu_vm_pt_cursor cursor;
982 struct amdgpu_vm_pt *entry;
983
984 vm->bulk_moveable = false;
985
986 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
987 amdgpu_vm_free_table(entry);
988
989 if (start)
990 amdgpu_vm_free_table(start->entry);
991 }
992
993 /**
994 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
995 *
996 * @adev: amdgpu_device pointer
997 */
998 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
999 {
1000 const struct amdgpu_ip_block *ip_block;
1001 bool has_compute_vm_bug;
1002 struct amdgpu_ring *ring;
1003 int i;
1004
1005 has_compute_vm_bug = false;
1006
1007 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
1008 if (ip_block) {
1009 /* Compute has a VM bug for GFX version < 7.
1010 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
1011 if (ip_block->version->major <= 7)
1012 has_compute_vm_bug = true;
1013 else if (ip_block->version->major == 8)
1014 if (adev->gfx.mec_fw_version < 673)
1015 has_compute_vm_bug = true;
1016 }
1017
1018 for (i = 0; i < adev->num_rings; i++) {
1019 ring = adev->rings[i];
1020 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
1021 /* only compute rings */
1022 ring->has_compute_vm_bug = has_compute_vm_bug;
1023 else
1024 ring->has_compute_vm_bug = false;
1025 }
1026 }
1027
1028 /**
1029 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
1030 *
1031 * @ring: ring on which the job will be submitted
1032 * @job: job to submit
1033 *
1034 * Returns:
1035 * True if sync is needed.
1036 */
1037 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
1038 struct amdgpu_job *job)
1039 {
1040 struct amdgpu_device *adev = ring->adev;
1041 unsigned vmhub = ring->funcs->vmhub;
1042 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1043 struct amdgpu_vmid *id;
1044 bool gds_switch_needed;
1045 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
1046
1047 if (job->vmid == 0)
1048 return false;
1049 id = &id_mgr->ids[job->vmid];
1050 gds_switch_needed = ring->funcs->emit_gds_switch && (
1051 id->gds_base != job->gds_base ||
1052 id->gds_size != job->gds_size ||
1053 id->gws_base != job->gws_base ||
1054 id->gws_size != job->gws_size ||
1055 id->oa_base != job->oa_base ||
1056 id->oa_size != job->oa_size);
1057
1058 if (amdgpu_vmid_had_gpu_reset(adev, id))
1059 return true;
1060
1061 return vm_flush_needed || gds_switch_needed;
1062 }
1063
1064 /**
1065 * amdgpu_vm_flush - hardware flush the vm
1066 *
1067 * @ring: ring to use for flush
1068 * @job: related job
1069 * @need_pipe_sync: is pipe sync needed
1070 *
1071 * Emit a VM flush when it is necessary.
1072 *
1073 * Returns:
1074 * 0 on success, errno otherwise.
1075 */
1076 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
1077 bool need_pipe_sync)
1078 {
1079 struct amdgpu_device *adev = ring->adev;
1080 unsigned vmhub = ring->funcs->vmhub;
1081 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1082 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
1083 bool gds_switch_needed = ring->funcs->emit_gds_switch && (
1084 id->gds_base != job->gds_base ||
1085 id->gds_size != job->gds_size ||
1086 id->gws_base != job->gws_base ||
1087 id->gws_size != job->gws_size ||
1088 id->oa_base != job->oa_base ||
1089 id->oa_size != job->oa_size);
1090 bool vm_flush_needed = job->vm_needs_flush;
1091 struct dma_fence *fence = NULL;
1092 bool pasid_mapping_needed = false;
1093 unsigned patch_offset = 0;
1094 int r;
1095
1096 if (amdgpu_vmid_had_gpu_reset(adev, id)) {
1097 gds_switch_needed = true;
1098 vm_flush_needed = true;
1099 pasid_mapping_needed = true;
1100 }
1101
1102 mutex_lock(&id_mgr->lock);
1103 if (id->pasid != job->pasid || !id->pasid_mapping ||
1104 !dma_fence_is_signaled(id->pasid_mapping))
1105 pasid_mapping_needed = true;
1106 mutex_unlock(&id_mgr->lock);
1107
1108 gds_switch_needed &= !!ring->funcs->emit_gds_switch;
1109 vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
1110 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
1111 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
1112 ring->funcs->emit_wreg;
1113
1114 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
1115 return 0;
1116
1117 if (ring->funcs->init_cond_exec)
1118 patch_offset = amdgpu_ring_init_cond_exec(ring);
1119
1120 if (need_pipe_sync)
1121 amdgpu_ring_emit_pipeline_sync(ring);
1122
1123 if (vm_flush_needed) {
1124 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
1125 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
1126 }
1127
1128 if (pasid_mapping_needed)
1129 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
1130
1131 if (vm_flush_needed || pasid_mapping_needed) {
1132 r = amdgpu_fence_emit(ring, &fence, 0);
1133 if (r)
1134 return r;
1135 }
1136
1137 if (vm_flush_needed) {
1138 mutex_lock(&id_mgr->lock);
1139 dma_fence_put(id->last_flush);
1140 id->last_flush = dma_fence_get(fence);
1141 id->current_gpu_reset_count =
1142 atomic_read(&adev->gpu_reset_counter);
1143 mutex_unlock(&id_mgr->lock);
1144 }
1145
1146 if (pasid_mapping_needed) {
1147 mutex_lock(&id_mgr->lock);
1148 id->pasid = job->pasid;
1149 dma_fence_put(id->pasid_mapping);
1150 id->pasid_mapping = dma_fence_get(fence);
1151 mutex_unlock(&id_mgr->lock);
1152 }
1153 dma_fence_put(fence);
1154
1155 if (ring->funcs->emit_gds_switch && gds_switch_needed) {
1156 id->gds_base = job->gds_base;
1157 id->gds_size = job->gds_size;
1158 id->gws_base = job->gws_base;
1159 id->gws_size = job->gws_size;
1160 id->oa_base = job->oa_base;
1161 id->oa_size = job->oa_size;
1162 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
1163 job->gds_size, job->gws_base,
1164 job->gws_size, job->oa_base,
1165 job->oa_size);
1166 }
1167
1168 if (ring->funcs->patch_cond_exec)
1169 amdgpu_ring_patch_cond_exec(ring, patch_offset);
1170
1171 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1172 if (ring->funcs->emit_switch_buffer) {
1173 amdgpu_ring_emit_switch_buffer(ring);
1174 amdgpu_ring_emit_switch_buffer(ring);
1175 }
1176 return 0;
1177 }
1178
1179 /**
1180 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1181 *
1182 * @vm: requested vm
1183 * @bo: requested buffer object
1184 *
1185 * Find @bo inside the requested vm.
1186 * Search inside the @bos vm list for the requested vm
1187 * Returns the found bo_va or NULL if none is found
1188 *
1189 * Object has to be reserved!
1190 *
1191 * Returns:
1192 * Found bo_va or NULL.
1193 */
1194 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1195 struct amdgpu_bo *bo)
1196 {
1197 struct amdgpu_vm_bo_base *base;
1198
1199 for (base = bo->vm_bo; base; base = base->next) {
1200 if (base->vm != vm)
1201 continue;
1202
1203 return container_of(base, struct amdgpu_bo_va, base);
1204 }
1205 return NULL;
1206 }
1207
1208 /**
1209 * amdgpu_vm_map_gart - Resolve gart mapping of addr
1210 *
1211 * @pages_addr: optional DMA address to use for lookup
1212 * @addr: the unmapped addr
1213 *
1214 * Look up the physical address of the page that the pte resolves
1215 * to.
1216 *
1217 * Returns:
1218 * The pointer for the page table entry.
1219 */
1220 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
1221 {
1222 uint64_t result;
1223
1224 /* page table offset */
1225 result = pages_addr[addr >> PAGE_SHIFT];
1226
1227 /* in case cpu page size != gpu page size*/
1228 result |= addr & (~PAGE_MASK);
1229
1230 result &= 0xFFFFFFFFFFFFF000ULL;
1231
1232 return result;
1233 }
1234
1235 /**
1236 * amdgpu_vm_update_pde - update a single level in the hierarchy
1237 *
1238 * @params: parameters for the update
1239 * @vm: requested vm
1240 * @entry: entry to update
1241 *
1242 * Makes sure the requested entry in parent is up to date.
1243 */
1244 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
1245 struct amdgpu_vm *vm,
1246 struct amdgpu_vm_pt *entry)
1247 {
1248 struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
1249 struct amdgpu_bo *bo = parent->base.bo, *pbo;
1250 uint64_t pde, pt, flags;
1251 unsigned level;
1252
1253 for (level = 0, pbo = bo->parent; pbo; ++level)
1254 pbo = pbo->parent;
1255
1256 level += params->adev->vm_manager.root_level;
1257 amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
1258 pde = (entry - parent->entries) * 8;
1259 return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
1260 }
1261
1262 /**
1263 * amdgpu_vm_invalidate_pds - mark all PDs as invalid
1264 *
1265 * @adev: amdgpu_device pointer
1266 * @vm: related vm
1267 *
1268 * Mark all PD level as invalid after an error.
1269 */
1270 static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
1271 struct amdgpu_vm *vm)
1272 {
1273 struct amdgpu_vm_pt_cursor cursor;
1274 struct amdgpu_vm_pt *entry;
1275
1276 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
1277 if (entry->base.bo && !entry->base.moved)
1278 amdgpu_vm_bo_relocated(&entry->base);
1279 }
1280
1281 /**
1282 * amdgpu_vm_update_pdes - make sure that all directories are valid
1283 *
1284 * @adev: amdgpu_device pointer
1285 * @vm: requested vm
1286 * @direct: submit directly to the paging queue
1287 *
1288 * Makes sure all directories are up to date.
1289 *
1290 * Returns:
1291 * 0 for success, error for failure.
1292 */
1293 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
1294 struct amdgpu_vm *vm, bool direct)
1295 {
1296 struct amdgpu_vm_update_params params;
1297 int r;
1298
1299 if (list_empty(&vm->relocated))
1300 return 0;
1301
1302 memset(¶ms, 0, sizeof(params));
1303 params.adev = adev;
1304 params.vm = vm;
1305 params.direct = direct;
1306
1307 r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL);
1308 if (r)
1309 return r;
1310
1311 while (!list_empty(&vm->relocated)) {
1312 struct amdgpu_vm_pt *entry;
1313
1314 entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
1315 base.vm_status);
1316 amdgpu_vm_bo_idle(&entry->base);
1317
1318 r = amdgpu_vm_update_pde(¶ms, vm, entry);
1319 if (r)
1320 goto error;
1321 }
1322
1323 r = vm->update_funcs->commit(¶ms, &vm->last_update);
1324 if (r)
1325 goto error;
1326 return 0;
1327
1328 error:
1329 amdgpu_vm_invalidate_pds(adev, vm);
1330 return r;
1331 }
1332
1333 /*
1334 * amdgpu_vm_update_flags - figure out flags for PTE updates
1335 *
1336 * Make sure to set the right flags for the PTEs at the desired level.
1337 */
1338 static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
1339 struct amdgpu_bo *bo, unsigned level,
1340 uint64_t pe, uint64_t addr,
1341 unsigned count, uint32_t incr,
1342 uint64_t flags)
1343
1344 {
1345 if (level != AMDGPU_VM_PTB) {
1346 flags |= AMDGPU_PDE_PTE;
1347 amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
1348
1349 } else if (params->adev->asic_type >= CHIP_VEGA10 &&
1350 !(flags & AMDGPU_PTE_VALID) &&
1351 !(flags & AMDGPU_PTE_PRT)) {
1352
1353 /* Workaround for fault priority problem on GMC9 */
1354 flags |= AMDGPU_PTE_EXECUTABLE;
1355 }
1356
1357 params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
1358 flags);
1359 }
1360
1361 /**
1362 * amdgpu_vm_fragment - get fragment for PTEs
1363 *
1364 * @params: see amdgpu_vm_update_params definition
1365 * @start: first PTE to handle
1366 * @end: last PTE to handle
1367 * @flags: hw mapping flags
1368 * @frag: resulting fragment size
1369 * @frag_end: end of this fragment
1370 *
1371 * Returns the first possible fragment for the start and end address.
1372 */
1373 static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
1374 uint64_t start, uint64_t end, uint64_t flags,
1375 unsigned int *frag, uint64_t *frag_end)
1376 {
1377 /**
1378 * The MC L1 TLB supports variable sized pages, based on a fragment
1379 * field in the PTE. When this field is set to a non-zero value, page
1380 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1381 * flags are considered valid for all PTEs within the fragment range
1382 * and corresponding mappings are assumed to be physically contiguous.
1383 *
1384 * The L1 TLB can store a single PTE for the whole fragment,
1385 * significantly increasing the space available for translation
1386 * caching. This leads to large improvements in throughput when the
1387 * TLB is under pressure.
1388 *
1389 * The L2 TLB distributes small and large fragments into two
1390 * asymmetric partitions. The large fragment cache is significantly
1391 * larger. Thus, we try to use large fragments wherever possible.
1392 * Userspace can support this by aligning virtual base address and
1393 * allocation size to the fragment size.
1394 *
1395 * Starting with Vega10 the fragment size only controls the L1. The L2
1396 * is now directly feed with small/huge/giant pages from the walker.
1397 */
1398 unsigned max_frag;
1399
1400 if (params->adev->asic_type < CHIP_VEGA10)
1401 max_frag = params->adev->vm_manager.fragment_size;
1402 else
1403 max_frag = 31;
1404
1405 /* system pages are non continuously */
1406 if (params->pages_addr) {
1407 *frag = 0;
1408 *frag_end = end;
1409 return;
1410 }
1411
1412 /* This intentionally wraps around if no bit is set */
1413 *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
1414 if (*frag >= max_frag) {
1415 *frag = max_frag;
1416 *frag_end = end & ~((1ULL << max_frag) - 1);
1417 } else {
1418 *frag_end = start + (1 << *frag);
1419 }
1420 }
1421
1422 /**
1423 * amdgpu_vm_update_ptes - make sure that page tables are valid
1424 *
1425 * @params: see amdgpu_vm_update_params definition
1426 * @start: start of GPU address range
1427 * @end: end of GPU address range
1428 * @dst: destination address to map to, the next dst inside the function
1429 * @flags: mapping flags
1430 *
1431 * Update the page tables in the range @start - @end.
1432 *
1433 * Returns:
1434 * 0 for success, -EINVAL for failure.
1435 */
1436 static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
1437 uint64_t start, uint64_t end,
1438 uint64_t dst, uint64_t flags)
1439 {
1440 struct amdgpu_device *adev = params->adev;
1441 struct amdgpu_vm_pt_cursor cursor;
1442 uint64_t frag_start = start, frag_end;
1443 unsigned int frag;
1444 int r;
1445
1446 /* figure out the initial fragment */
1447 amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
1448
1449 /* walk over the address space and update the PTs */
1450 amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
1451 while (cursor.pfn < end) {
1452 unsigned shift, parent_shift, mask;
1453 uint64_t incr, entry_end, pe_start;
1454 struct amdgpu_bo *pt;
1455
1456 /* make sure that the page tables covering the address range are
1457 * actually allocated
1458 */
1459 r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
1460 params->direct);
1461 if (r)
1462 return r;
1463
1464 pt = cursor.entry->base.bo;
1465
1466 /* The root level can't be a huge page */
1467 if (cursor.level == adev->vm_manager.root_level) {
1468 if (!amdgpu_vm_pt_descendant(adev, &cursor))
1469 return -ENOENT;
1470 continue;
1471 }
1472
1473 shift = amdgpu_vm_level_shift(adev, cursor.level);
1474 parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
1475 if (adev->asic_type < CHIP_VEGA10 &&
1476 (flags & AMDGPU_PTE_VALID)) {
1477 /* No huge page support before GMC v9 */
1478 if (cursor.level != AMDGPU_VM_PTB) {
1479 if (!amdgpu_vm_pt_descendant(adev, &cursor))
1480 return -ENOENT;
1481 continue;
1482 }
1483 } else if (frag < shift) {
1484 /* We can't use this level when the fragment size is
1485 * smaller than the address shift. Go to the next
1486 * child entry and try again.
1487 */
1488 if (!amdgpu_vm_pt_descendant(adev, &cursor))
1489 return -ENOENT;
1490 continue;
1491 } else if (frag >= parent_shift &&
1492 cursor.level - 1 != adev->vm_manager.root_level) {
1493 /* If the fragment size is even larger than the parent
1494 * shift we should go up one level and check it again
1495 * unless one level up is the root level.
1496 */
1497 if (!amdgpu_vm_pt_ancestor(&cursor))
1498 return -ENOENT;
1499 continue;
1500 }
1501
1502 /* Looks good so far, calculate parameters for the update */
1503 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1504 mask = amdgpu_vm_entries_mask(adev, cursor.level);
1505 pe_start = ((cursor.pfn >> shift) & mask) * 8;
1506 entry_end = (uint64_t)(mask + 1) << shift;
1507 entry_end += cursor.pfn & ~(entry_end - 1);
1508 entry_end = min(entry_end, end);
1509
1510 do {
1511 uint64_t upd_end = min(entry_end, frag_end);
1512 unsigned nptes = (upd_end - frag_start) >> shift;
1513
1514 amdgpu_vm_update_flags(params, pt, cursor.level,
1515 pe_start, dst, nptes, incr,
1516 flags | AMDGPU_PTE_FRAG(frag));
1517
1518 pe_start += nptes * 8;
1519 dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
1520
1521 frag_start = upd_end;
1522 if (frag_start >= frag_end) {
1523 /* figure out the next fragment */
1524 amdgpu_vm_fragment(params, frag_start, end,
1525 flags, &frag, &frag_end);
1526 if (frag < shift)
1527 break;
1528 }
1529 } while (frag_start < entry_end);
1530
1531 if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1532 /* Free all child entries.
1533 * Update the tables with the flags and addresses and free up subsequent
1534 * tables in the case of huge pages or freed up areas.
1535 * This is the maximum you can free, because all other page tables are not
1536 * completely covered by the range and so potentially still in use.
1537 */
1538 while (cursor.pfn < frag_start) {
1539 amdgpu_vm_free_pts(adev, params->vm, &cursor);
1540 amdgpu_vm_pt_next(adev, &cursor);
1541 }
1542
1543 } else if (frag >= shift) {
1544 /* or just move on to the next on the same level. */
1545 amdgpu_vm_pt_next(adev, &cursor);
1546 }
1547 }
1548
1549 return 0;
1550 }
1551
1552 /**
1553 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1554 *
1555 * @adev: amdgpu_device pointer
1556 * @vm: requested vm
1557 * @direct: direct submission in a page fault
1558 * @exclusive: fence we need to sync to
1559 * @start: start of mapped range
1560 * @last: last mapped entry
1561 * @flags: flags for the entries
1562 * @addr: addr to set the area to
1563 * @pages_addr: DMA addresses to use for mapping
1564 * @fence: optional resulting fence
1565 *
1566 * Fill in the page table entries between @start and @last.
1567 *
1568 * Returns:
1569 * 0 for success, -EINVAL for failure.
1570 */
1571 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1572 struct amdgpu_vm *vm, bool direct,
1573 struct dma_fence *exclusive,
1574 uint64_t start, uint64_t last,
1575 uint64_t flags, uint64_t addr,
1576 dma_addr_t *pages_addr,
1577 struct dma_fence **fence)
1578 {
1579 struct amdgpu_vm_update_params params;
1580 void *owner = AMDGPU_FENCE_OWNER_VM;
1581 int r;
1582
1583 memset(¶ms, 0, sizeof(params));
1584 params.adev = adev;
1585 params.vm = vm;
1586 params.direct = direct;
1587 params.pages_addr = pages_addr;
1588
1589 /* sync to everything except eviction fences on unmapping */
1590 if (!(flags & AMDGPU_PTE_VALID))
1591 owner = AMDGPU_FENCE_OWNER_KFD;
1592
1593 amdgpu_vm_eviction_lock(vm);
1594 if (vm->evicting) {
1595 r = -EBUSY;
1596 goto error_unlock;
1597 }
1598
1599 r = vm->update_funcs->prepare(¶ms, owner, exclusive);
1600 if (r)
1601 goto error_unlock;
1602
1603 r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags);
1604 if (r)
1605 goto error_unlock;
1606
1607 r = vm->update_funcs->commit(¶ms, fence);
1608
1609 error_unlock:
1610 amdgpu_vm_eviction_unlock(vm);
1611 return r;
1612 }
1613
1614 /**
1615 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1616 *
1617 * @adev: amdgpu_device pointer
1618 * @exclusive: fence we need to sync to
1619 * @pages_addr: DMA addresses to use for mapping
1620 * @vm: requested vm
1621 * @mapping: mapped range and flags to use for the update
1622 * @flags: HW flags for the mapping
1623 * @bo_adev: amdgpu_device pointer that bo actually been allocated
1624 * @nodes: array of drm_mm_nodes with the MC addresses
1625 * @fence: optional resulting fence
1626 *
1627 * Split the mapping into smaller chunks so that each update fits
1628 * into a SDMA IB.
1629 *
1630 * Returns:
1631 * 0 for success, -EINVAL for failure.
1632 */
1633 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1634 struct dma_fence *exclusive,
1635 dma_addr_t *pages_addr,
1636 struct amdgpu_vm *vm,
1637 struct amdgpu_bo_va_mapping *mapping,
1638 uint64_t flags,
1639 struct amdgpu_device *bo_adev,
1640 struct drm_mm_node *nodes,
1641 struct dma_fence **fence)
1642 {
1643 unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1644 uint64_t pfn, start = mapping->start;
1645 int r;
1646
1647 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1648 * but in case of something, we filter the flags in first place
1649 */
1650 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1651 flags &= ~AMDGPU_PTE_READABLE;
1652 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1653 flags &= ~AMDGPU_PTE_WRITEABLE;
1654
1655 /* Apply ASIC specific mapping flags */
1656 amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
1657
1658 trace_amdgpu_vm_bo_update(mapping);
1659
1660 pfn = mapping->offset >> PAGE_SHIFT;
1661 if (nodes) {
1662 while (pfn >= nodes->size) {
1663 pfn -= nodes->size;
1664 ++nodes;
1665 }
1666 }
1667
1668 do {
1669 dma_addr_t *dma_addr = NULL;
1670 uint64_t max_entries;
1671 uint64_t addr, last;
1672
1673 if (nodes) {
1674 addr = nodes->start << PAGE_SHIFT;
1675 max_entries = (nodes->size - pfn) *
1676 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1677 } else {
1678 addr = 0;
1679 max_entries = S64_MAX;
1680 }
1681
1682 if (pages_addr) {
1683 uint64_t count;
1684
1685 for (count = 1;
1686 count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1687 ++count) {
1688 uint64_t idx = pfn + count;
1689
1690 if (pages_addr[idx] !=
1691 (pages_addr[idx - 1] + PAGE_SIZE))
1692 break;
1693 }
1694
1695 if (count < min_linear_pages) {
1696 addr = pfn << PAGE_SHIFT;
1697 dma_addr = pages_addr;
1698 } else {
1699 addr = pages_addr[pfn];
1700 max_entries = count *
1701 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1702 }
1703
1704 } else if (flags & AMDGPU_PTE_VALID) {
1705 addr += bo_adev->vm_manager.vram_base_offset;
1706 addr += pfn << PAGE_SHIFT;
1707 }
1708
1709 last = min((uint64_t)mapping->last, start + max_entries - 1);
1710 r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
1711 start, last, flags, addr,
1712 dma_addr, fence);
1713 if (r)
1714 return r;
1715
1716 pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1717 if (nodes && nodes->size == pfn) {
1718 pfn = 0;
1719 ++nodes;
1720 }
1721 start = last + 1;
1722
1723 } while (unlikely(start != mapping->last + 1));
1724
1725 return 0;
1726 }
1727
1728 /**
1729 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1730 *
1731 * @adev: amdgpu_device pointer
1732 * @bo_va: requested BO and VM object
1733 * @clear: if true clear the entries
1734 *
1735 * Fill in the page table entries for @bo_va.
1736 *
1737 * Returns:
1738 * 0 for success, -EINVAL for failure.
1739 */
1740 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1741 bool clear)
1742 {
1743 struct amdgpu_bo *bo = bo_va->base.bo;
1744 struct amdgpu_vm *vm = bo_va->base.vm;
1745 struct amdgpu_bo_va_mapping *mapping;
1746 dma_addr_t *pages_addr = NULL;
1747 struct ttm_mem_reg *mem;
1748 struct drm_mm_node *nodes;
1749 struct dma_fence *exclusive, **last_update;
1750 uint64_t flags;
1751 struct amdgpu_device *bo_adev = adev;
1752 int r;
1753
1754 if (clear || !bo) {
1755 mem = NULL;
1756 nodes = NULL;
1757 exclusive = NULL;
1758 } else {
1759 struct ttm_dma_tt *ttm;
1760
1761 mem = &bo->tbo.mem;
1762 nodes = mem->mm_node;
1763 if (mem->mem_type == TTM_PL_TT) {
1764 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1765 pages_addr = ttm->dma_address;
1766 }
1767 exclusive = bo->tbo.moving;
1768 }
1769
1770 if (bo) {
1771 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1772 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1773 } else {
1774 flags = 0x0;
1775 }
1776
1777 if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
1778 last_update = &vm->last_update;
1779 else
1780 last_update = &bo_va->last_pt_update;
1781
1782 if (!clear && bo_va->base.moved) {
1783 bo_va->base.moved = false;
1784 list_splice_init(&bo_va->valids, &bo_va->invalids);
1785
1786 } else if (bo_va->cleared != clear) {
1787 list_splice_init(&bo_va->valids, &bo_va->invalids);
1788 }
1789
1790 list_for_each_entry(mapping, &bo_va->invalids, list) {
1791 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1792 mapping, flags, bo_adev, nodes,
1793 last_update);
1794 if (r)
1795 return r;
1796 }
1797
1798 /* If the BO is not in its preferred location add it back to
1799 * the evicted list so that it gets validated again on the
1800 * next command submission.
1801 */
1802 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
1803 uint32_t mem_type = bo->tbo.mem.mem_type;
1804
1805 if (!(bo->preferred_domains &
1806 amdgpu_mem_type_to_domain(mem_type)))
1807 amdgpu_vm_bo_evicted(&bo_va->base);
1808 else
1809 amdgpu_vm_bo_idle(&bo_va->base);
1810 } else {
1811 amdgpu_vm_bo_done(&bo_va->base);
1812 }
1813
1814 list_splice_init(&bo_va->invalids, &bo_va->valids);
1815 bo_va->cleared = clear;
1816
1817 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1818 list_for_each_entry(mapping, &bo_va->valids, list)
1819 trace_amdgpu_vm_bo_mapping(mapping);
1820 }
1821
1822 return 0;
1823 }
1824
1825 /**
1826 * amdgpu_vm_update_prt_state - update the global PRT state
1827 *
1828 * @adev: amdgpu_device pointer
1829 */
1830 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1831 {
1832 unsigned long flags;
1833 bool enable;
1834
1835 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1836 enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1837 adev->gmc.gmc_funcs->set_prt(adev, enable);
1838 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1839 }
1840
1841 /**
1842 * amdgpu_vm_prt_get - add a PRT user
1843 *
1844 * @adev: amdgpu_device pointer
1845 */
1846 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1847 {
1848 if (!adev->gmc.gmc_funcs->set_prt)
1849 return;
1850
1851 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1852 amdgpu_vm_update_prt_state(adev);
1853 }
1854
1855 /**
1856 * amdgpu_vm_prt_put - drop a PRT user
1857 *
1858 * @adev: amdgpu_device pointer
1859 */
1860 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1861 {
1862 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1863 amdgpu_vm_update_prt_state(adev);
1864 }
1865
1866 /**
1867 * amdgpu_vm_prt_cb - callback for updating the PRT status
1868 *
1869 * @fence: fence for the callback
1870 * @_cb: the callback function
1871 */
1872 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1873 {
1874 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1875
1876 amdgpu_vm_prt_put(cb->adev);
1877 kfree(cb);
1878 }
1879
1880 /**
1881 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1882 *
1883 * @adev: amdgpu_device pointer
1884 * @fence: fence for the callback
1885 */
1886 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1887 struct dma_fence *fence)
1888 {
1889 struct amdgpu_prt_cb *cb;
1890
1891 if (!adev->gmc.gmc_funcs->set_prt)
1892 return;
1893
1894 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1895 if (!cb) {
1896 /* Last resort when we are OOM */
1897 if (fence)
1898 dma_fence_wait(fence, false);
1899
1900 amdgpu_vm_prt_put(adev);
1901 } else {
1902 cb->adev = adev;
1903 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1904 amdgpu_vm_prt_cb))
1905 amdgpu_vm_prt_cb(fence, &cb->cb);
1906 }
1907 }
1908
1909 /**
1910 * amdgpu_vm_free_mapping - free a mapping
1911 *
1912 * @adev: amdgpu_device pointer
1913 * @vm: requested vm
1914 * @mapping: mapping to be freed
1915 * @fence: fence of the unmap operation
1916 *
1917 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1918 */
1919 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1920 struct amdgpu_vm *vm,
1921 struct amdgpu_bo_va_mapping *mapping,
1922 struct dma_fence *fence)
1923 {
1924 if (mapping->flags & AMDGPU_PTE_PRT)
1925 amdgpu_vm_add_prt_cb(adev, fence);
1926 kfree(mapping);
1927 }
1928
1929 /**
1930 * amdgpu_vm_prt_fini - finish all prt mappings
1931 *
1932 * @adev: amdgpu_device pointer
1933 * @vm: requested vm
1934 *
1935 * Register a cleanup callback to disable PRT support after VM dies.
1936 */
1937 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1938 {
1939 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
1940 struct dma_fence *excl, **shared;
1941 unsigned i, shared_count;
1942 int r;
1943
1944 r = dma_resv_get_fences_rcu(resv, &excl,
1945 &shared_count, &shared);
1946 if (r) {
1947 /* Not enough memory to grab the fence list, as last resort
1948 * block for all the fences to complete.
1949 */
1950 dma_resv_wait_timeout_rcu(resv, true, false,
1951 MAX_SCHEDULE_TIMEOUT);
1952 return;
1953 }
1954
1955 /* Add a callback for each fence in the reservation object */
1956 amdgpu_vm_prt_get(adev);
1957 amdgpu_vm_add_prt_cb(adev, excl);
1958
1959 for (i = 0; i < shared_count; ++i) {
1960 amdgpu_vm_prt_get(adev);
1961 amdgpu_vm_add_prt_cb(adev, shared[i]);
1962 }
1963
1964 kfree(shared);
1965 }
1966
1967 /**
1968 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1969 *
1970 * @adev: amdgpu_device pointer
1971 * @vm: requested vm
1972 * @fence: optional resulting fence (unchanged if no work needed to be done
1973 * or if an error occurred)
1974 *
1975 * Make sure all freed BOs are cleared in the PT.
1976 * PTs have to be reserved and mutex must be locked!
1977 *
1978 * Returns:
1979 * 0 for success.
1980 *
1981 */
1982 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1983 struct amdgpu_vm *vm,
1984 struct dma_fence **fence)
1985 {
1986 struct amdgpu_bo_va_mapping *mapping;
1987 uint64_t init_pte_value = 0;
1988 struct dma_fence *f = NULL;
1989 int r;
1990
1991 while (!list_empty(&vm->freed)) {
1992 mapping = list_first_entry(&vm->freed,
1993 struct amdgpu_bo_va_mapping, list);
1994 list_del(&mapping->list);
1995
1996 if (vm->pte_support_ats &&
1997 mapping->start < AMDGPU_GMC_HOLE_START)
1998 init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1999
2000 r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
2001 mapping->start, mapping->last,
2002 init_pte_value, 0, NULL, &f);
2003 amdgpu_vm_free_mapping(adev, vm, mapping, f);
2004 if (r) {
2005 dma_fence_put(f);
2006 return r;
2007 }
2008 }
2009
2010 if (fence && f) {
2011 dma_fence_put(*fence);
2012 *fence = f;
2013 } else {
2014 dma_fence_put(f);
2015 }
2016
2017 return 0;
2018
2019 }
2020
2021 /**
2022 * amdgpu_vm_handle_moved - handle moved BOs in the PT
2023 *
2024 * @adev: amdgpu_device pointer
2025 * @vm: requested vm
2026 *
2027 * Make sure all BOs which are moved are updated in the PTs.
2028 *
2029 * Returns:
2030 * 0 for success.
2031 *
2032 * PTs have to be reserved!
2033 */
2034 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
2035 struct amdgpu_vm *vm)
2036 {
2037 struct amdgpu_bo_va *bo_va, *tmp;
2038 struct dma_resv *resv;
2039 bool clear;
2040 int r;
2041
2042 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2043 /* Per VM BOs never need to bo cleared in the page tables */
2044 r = amdgpu_vm_bo_update(adev, bo_va, false);
2045 if (r)
2046 return r;
2047 }
2048
2049 spin_lock(&vm->invalidated_lock);
2050 while (!list_empty(&vm->invalidated)) {
2051 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
2052 base.vm_status);
2053 resv = bo_va->base.bo->tbo.base.resv;
2054 spin_unlock(&vm->invalidated_lock);
2055
2056 /* Try to reserve the BO to avoid clearing its ptes */
2057 if (!amdgpu_vm_debug && dma_resv_trylock(resv))
2058 clear = false;
2059 /* Somebody else is using the BO right now */
2060 else
2061 clear = true;
2062
2063 r = amdgpu_vm_bo_update(adev, bo_va, clear);
2064 if (r)
2065 return r;
2066
2067 if (!clear)
2068 dma_resv_unlock(resv);
2069 spin_lock(&vm->invalidated_lock);
2070 }
2071 spin_unlock(&vm->invalidated_lock);
2072
2073 return 0;
2074 }
2075
2076 /**
2077 * amdgpu_vm_bo_add - add a bo to a specific vm
2078 *
2079 * @adev: amdgpu_device pointer
2080 * @vm: requested vm
2081 * @bo: amdgpu buffer object
2082 *
2083 * Add @bo into the requested vm.
2084 * Add @bo to the list of bos associated with the vm
2085 *
2086 * Returns:
2087 * Newly added bo_va or NULL for failure
2088 *
2089 * Object has to be reserved!
2090 */
2091 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2092 struct amdgpu_vm *vm,
2093 struct amdgpu_bo *bo)
2094 {
2095 struct amdgpu_bo_va *bo_va;
2096
2097 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2098 if (bo_va == NULL) {
2099 return NULL;
2100 }
2101 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2102
2103 bo_va->ref_count = 1;
2104 INIT_LIST_HEAD(&bo_va->valids);
2105 INIT_LIST_HEAD(&bo_va->invalids);
2106
2107 if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
2108 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
2109 bo_va->is_xgmi = true;
2110 mutex_lock(&adev->vm_manager.lock_pstate);
2111 /* Power up XGMI if it can be potentially used */
2112 if (++adev->vm_manager.xgmi_map_counter == 1)
2113 amdgpu_xgmi_set_pstate(adev, 1);
2114 mutex_unlock(&adev->vm_manager.lock_pstate);
2115 }
2116
2117 return bo_va;
2118 }
2119
2120
2121 /**
2122 * amdgpu_vm_bo_insert_mapping - insert a new mapping
2123 *
2124 * @adev: amdgpu_device pointer
2125 * @bo_va: bo_va to store the address
2126 * @mapping: the mapping to insert
2127 *
2128 * Insert a new mapping into all structures.
2129 */
2130 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2131 struct amdgpu_bo_va *bo_va,
2132 struct amdgpu_bo_va_mapping *mapping)
2133 {
2134 struct amdgpu_vm *vm = bo_va->base.vm;
2135 struct amdgpu_bo *bo = bo_va->base.bo;
2136
2137 mapping->bo_va = bo_va;
2138 list_add(&mapping->list, &bo_va->invalids);
2139 amdgpu_vm_it_insert(mapping, &vm->va);
2140
2141 if (mapping->flags & AMDGPU_PTE_PRT)
2142 amdgpu_vm_prt_get(adev);
2143
2144 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv &&
2145 !bo_va->base.moved) {
2146 list_move(&bo_va->base.vm_status, &vm->moved);
2147 }
2148 trace_amdgpu_vm_bo_map(bo_va, mapping);
2149 }
2150
2151 /**
2152 * amdgpu_vm_bo_map - map bo inside a vm
2153 *
2154 * @adev: amdgpu_device pointer
2155 * @bo_va: bo_va to store the address
2156 * @saddr: where to map the BO
2157 * @offset: requested offset in the BO
2158 * @size: BO size in bytes
2159 * @flags: attributes of pages (read/write/valid/etc.)
2160 *
2161 * Add a mapping of the BO at the specefied addr into the VM.
2162 *
2163 * Returns:
2164 * 0 for success, error for failure.
2165 *
2166 * Object has to be reserved and unreserved outside!
2167 */
2168 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2169 struct amdgpu_bo_va *bo_va,
2170 uint64_t saddr, uint64_t offset,
2171 uint64_t size, uint64_t flags)
2172 {
2173 struct amdgpu_bo_va_mapping *mapping, *tmp;
2174 struct amdgpu_bo *bo = bo_va->base.bo;
2175 struct amdgpu_vm *vm = bo_va->base.vm;
2176 uint64_t eaddr;
2177
2178 /* validate the parameters */
2179 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2180 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2181 return -EINVAL;
2182
2183 /* make sure object fit at this offset */
2184 eaddr = saddr + size - 1;
2185 if (saddr >= eaddr ||
2186 (bo && offset + size > amdgpu_bo_size(bo)))
2187 return -EINVAL;
2188
2189 saddr /= AMDGPU_GPU_PAGE_SIZE;
2190 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2191
2192 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2193 if (tmp) {
2194 /* bo and tmp overlap, invalid addr */
2195 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2196 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2197 tmp->start, tmp->last + 1);
2198 return -EINVAL;
2199 }
2200
2201 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2202 if (!mapping)
2203 return -ENOMEM;
2204
2205 mapping->start = saddr;
2206 mapping->last = eaddr;
2207 mapping->offset = offset;
2208 mapping->flags = flags;
2209
2210 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2211
2212 return 0;
2213 }
2214
2215 /**
2216 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2217 *
2218 * @adev: amdgpu_device pointer
2219 * @bo_va: bo_va to store the address
2220 * @saddr: where to map the BO
2221 * @offset: requested offset in the BO
2222 * @size: BO size in bytes
2223 * @flags: attributes of pages (read/write/valid/etc.)
2224 *
2225 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2226 * mappings as we do so.
2227 *
2228 * Returns:
2229 * 0 for success, error for failure.
2230 *
2231 * Object has to be reserved and unreserved outside!
2232 */
2233 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2234 struct amdgpu_bo_va *bo_va,
2235 uint64_t saddr, uint64_t offset,
2236 uint64_t size, uint64_t flags)
2237 {
2238 struct amdgpu_bo_va_mapping *mapping;
2239 struct amdgpu_bo *bo = bo_va->base.bo;
2240 uint64_t eaddr;
2241 int r;
2242
2243 /* validate the parameters */
2244 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2245 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2246 return -EINVAL;
2247
2248 /* make sure object fit at this offset */
2249 eaddr = saddr + size - 1;
2250 if (saddr >= eaddr ||
2251 (bo && offset + size > amdgpu_bo_size(bo)))
2252 return -EINVAL;
2253
2254 /* Allocate all the needed memory */
2255 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2256 if (!mapping)
2257 return -ENOMEM;
2258
2259 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2260 if (r) {
2261 kfree(mapping);
2262 return r;
2263 }
2264
2265 saddr /= AMDGPU_GPU_PAGE_SIZE;
2266 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2267
2268 mapping->start = saddr;
2269 mapping->last = eaddr;
2270 mapping->offset = offset;
2271 mapping->flags = flags;
2272
2273 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2274
2275 return 0;
2276 }
2277
2278 /**
2279 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2280 *
2281 * @adev: amdgpu_device pointer
2282 * @bo_va: bo_va to remove the address from
2283 * @saddr: where to the BO is mapped
2284 *
2285 * Remove a mapping of the BO at the specefied addr from the VM.
2286 *
2287 * Returns:
2288 * 0 for success, error for failure.
2289 *
2290 * Object has to be reserved and unreserved outside!
2291 */
2292 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2293 struct amdgpu_bo_va *bo_va,
2294 uint64_t saddr)
2295 {
2296 struct amdgpu_bo_va_mapping *mapping;
2297 struct amdgpu_vm *vm = bo_va->base.vm;
2298 bool valid = true;
2299
2300 saddr /= AMDGPU_GPU_PAGE_SIZE;
2301
2302 list_for_each_entry(mapping, &bo_va->valids, list) {
2303 if (mapping->start == saddr)
2304 break;
2305 }
2306
2307 if (&mapping->list == &bo_va->valids) {
2308 valid = false;
2309
2310 list_for_each_entry(mapping, &bo_va->invalids, list) {
2311 if (mapping->start == saddr)
2312 break;
2313 }
2314
2315 if (&mapping->list == &bo_va->invalids)
2316 return -ENOENT;
2317 }
2318
2319 list_del(&mapping->list);
2320 amdgpu_vm_it_remove(mapping, &vm->va);
2321 mapping->bo_va = NULL;
2322 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2323
2324 if (valid)
2325 list_add(&mapping->list, &vm->freed);
2326 else
2327 amdgpu_vm_free_mapping(adev, vm, mapping,
2328 bo_va->last_pt_update);
2329
2330 return 0;
2331 }
2332
2333 /**
2334 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2335 *
2336 * @adev: amdgpu_device pointer
2337 * @vm: VM structure to use
2338 * @saddr: start of the range
2339 * @size: size of the range
2340 *
2341 * Remove all mappings in a range, split them as appropriate.
2342 *
2343 * Returns:
2344 * 0 for success, error for failure.
2345 */
2346 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2347 struct amdgpu_vm *vm,
2348 uint64_t saddr, uint64_t size)
2349 {
2350 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2351 LIST_HEAD(removed);
2352 uint64_t eaddr;
2353
2354 eaddr = saddr + size - 1;
2355 saddr /= AMDGPU_GPU_PAGE_SIZE;
2356 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2357
2358 /* Allocate all the needed memory */
2359 before = kzalloc(sizeof(*before), GFP_KERNEL);
2360 if (!before)
2361 return -ENOMEM;
2362 INIT_LIST_HEAD(&before->list);
2363
2364 after = kzalloc(sizeof(*after), GFP_KERNEL);
2365 if (!after) {
2366 kfree(before);
2367 return -ENOMEM;
2368 }
2369 INIT_LIST_HEAD(&after->list);
2370
2371 /* Now gather all removed mappings */
2372 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2373 while (tmp) {
2374 /* Remember mapping split at the start */
2375 if (tmp->start < saddr) {
2376 before->start = tmp->start;
2377 before->last = saddr - 1;
2378 before->offset = tmp->offset;
2379 before->flags = tmp->flags;
2380 before->bo_va = tmp->bo_va;
2381 list_add(&before->list, &tmp->bo_va->invalids);
2382 }
2383
2384 /* Remember mapping split at the end */
2385 if (tmp->last > eaddr) {
2386 after->start = eaddr + 1;
2387 after->last = tmp->last;
2388 after->offset = tmp->offset;
2389 after->offset += after->start - tmp->start;
2390 after->flags = tmp->flags;
2391 after->bo_va = tmp->bo_va;
2392 list_add(&after->list, &tmp->bo_va->invalids);
2393 }
2394
2395 list_del(&tmp->list);
2396 list_add(&tmp->list, &removed);
2397
2398 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2399 }
2400
2401 /* And free them up */
2402 list_for_each_entry_safe(tmp, next, &removed, list) {
2403 amdgpu_vm_it_remove(tmp, &vm->va);
2404 list_del(&tmp->list);
2405
2406 if (tmp->start < saddr)
2407 tmp->start = saddr;
2408 if (tmp->last > eaddr)
2409 tmp->last = eaddr;
2410
2411 tmp->bo_va = NULL;
2412 list_add(&tmp->list, &vm->freed);
2413 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2414 }
2415
2416 /* Insert partial mapping before the range */
2417 if (!list_empty(&before->list)) {
2418 amdgpu_vm_it_insert(before, &vm->va);
2419 if (before->flags & AMDGPU_PTE_PRT)
2420 amdgpu_vm_prt_get(adev);
2421 } else {
2422 kfree(before);
2423 }
2424
2425 /* Insert partial mapping after the range */
2426 if (!list_empty(&after->list)) {
2427 amdgpu_vm_it_insert(after, &vm->va);
2428 if (after->flags & AMDGPU_PTE_PRT)
2429 amdgpu_vm_prt_get(adev);
2430 } else {
2431 kfree(after);
2432 }
2433
2434 return 0;
2435 }
2436
2437 /**
2438 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2439 *
2440 * @vm: the requested VM
2441 * @addr: the address
2442 *
2443 * Find a mapping by it's address.
2444 *
2445 * Returns:
2446 * The amdgpu_bo_va_mapping matching for addr or NULL
2447 *
2448 */
2449 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2450 uint64_t addr)
2451 {
2452 return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2453 }
2454
2455 /**
2456 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2457 *
2458 * @vm: the requested vm
2459 * @ticket: CS ticket
2460 *
2461 * Trace all mappings of BOs reserved during a command submission.
2462 */
2463 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2464 {
2465 struct amdgpu_bo_va_mapping *mapping;
2466
2467 if (!trace_amdgpu_vm_bo_cs_enabled())
2468 return;
2469
2470 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2471 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2472 if (mapping->bo_va && mapping->bo_va->base.bo) {
2473 struct amdgpu_bo *bo;
2474
2475 bo = mapping->bo_va->base.bo;
2476 if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2477 ticket)
2478 continue;
2479 }
2480
2481 trace_amdgpu_vm_bo_cs(mapping);
2482 }
2483 }
2484
2485 /**
2486 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2487 *
2488 * @adev: amdgpu_device pointer
2489 * @bo_va: requested bo_va
2490 *
2491 * Remove @bo_va->bo from the requested vm.
2492 *
2493 * Object have to be reserved!
2494 */
2495 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2496 struct amdgpu_bo_va *bo_va)
2497 {
2498 struct amdgpu_bo_va_mapping *mapping, *next;
2499 struct amdgpu_bo *bo = bo_va->base.bo;
2500 struct amdgpu_vm *vm = bo_va->base.vm;
2501 struct amdgpu_vm_bo_base **base;
2502
2503 if (bo) {
2504 if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
2505 vm->bulk_moveable = false;
2506
2507 for (base = &bo_va->base.bo->vm_bo; *base;
2508 base = &(*base)->next) {
2509 if (*base != &bo_va->base)
2510 continue;
2511
2512 *base = bo_va->base.next;
2513 break;
2514 }
2515 }
2516
2517 spin_lock(&vm->invalidated_lock);
2518 list_del(&bo_va->base.vm_status);
2519 spin_unlock(&vm->invalidated_lock);
2520
2521 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2522 list_del(&mapping->list);
2523 amdgpu_vm_it_remove(mapping, &vm->va);
2524 mapping->bo_va = NULL;
2525 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2526 list_add(&mapping->list, &vm->freed);
2527 }
2528 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2529 list_del(&mapping->list);
2530 amdgpu_vm_it_remove(mapping, &vm->va);
2531 amdgpu_vm_free_mapping(adev, vm, mapping,
2532 bo_va->last_pt_update);
2533 }
2534
2535 dma_fence_put(bo_va->last_pt_update);
2536
2537 if (bo && bo_va->is_xgmi) {
2538 mutex_lock(&adev->vm_manager.lock_pstate);
2539 if (--adev->vm_manager.xgmi_map_counter == 0)
2540 amdgpu_xgmi_set_pstate(adev, 0);
2541 mutex_unlock(&adev->vm_manager.lock_pstate);
2542 }
2543
2544 kfree(bo_va);
2545 }
2546
2547 /**
2548 * amdgpu_vm_evictable - check if we can evict a VM
2549 *
2550 * @bo: A page table of the VM.
2551 *
2552 * Check if it is possible to evict a VM.
2553 */
2554 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2555 {
2556 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2557
2558 /* Page tables of a destroyed VM can go away immediately */
2559 if (!bo_base || !bo_base->vm)
2560 return true;
2561
2562 /* Don't evict VM page tables while they are busy */
2563 if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
2564 return false;
2565
2566 /* Try to block ongoing updates */
2567 if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2568 return false;
2569
2570 /* Don't evict VM page tables while they are updated */
2571 if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
2572 !dma_fence_is_signaled(bo_base->vm->last_delayed)) {
2573 amdgpu_vm_eviction_unlock(bo_base->vm);
2574 return false;
2575 }
2576
2577 bo_base->vm->evicting = true;
2578 amdgpu_vm_eviction_unlock(bo_base->vm);
2579 return true;
2580 }
2581
2582 /**
2583 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2584 *
2585 * @adev: amdgpu_device pointer
2586 * @bo: amdgpu buffer object
2587 * @evicted: is the BO evicted
2588 *
2589 * Mark @bo as invalid.
2590 */
2591 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2592 struct amdgpu_bo *bo, bool evicted)
2593 {
2594 struct amdgpu_vm_bo_base *bo_base;
2595
2596 /* shadow bo doesn't have bo base, its validation needs its parent */
2597 if (bo->parent && bo->parent->shadow == bo)
2598 bo = bo->parent;
2599
2600 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2601 struct amdgpu_vm *vm = bo_base->vm;
2602
2603 if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
2604 amdgpu_vm_bo_evicted(bo_base);
2605 continue;
2606 }
2607
2608 if (bo_base->moved)
2609 continue;
2610 bo_base->moved = true;
2611
2612 if (bo->tbo.type == ttm_bo_type_kernel)
2613 amdgpu_vm_bo_relocated(bo_base);
2614 else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
2615 amdgpu_vm_bo_moved(bo_base);
2616 else
2617 amdgpu_vm_bo_invalidated(bo_base);
2618 }
2619 }
2620
2621 /**
2622 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2623 *
2624 * @vm_size: VM size
2625 *
2626 * Returns:
2627 * VM page table as power of two
2628 */
2629 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2630 {
2631 /* Total bits covered by PD + PTs */
2632 unsigned bits = ilog2(vm_size) + 18;
2633
2634 /* Make sure the PD is 4K in size up to 8GB address space.
2635 Above that split equal between PD and PTs */
2636 if (vm_size <= 8)
2637 return (bits - 9);
2638 else
2639 return ((bits + 3) / 2);
2640 }
2641
2642 /**
2643 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2644 *
2645 * @adev: amdgpu_device pointer
2646 * @min_vm_size: the minimum vm size in GB if it's set auto
2647 * @fragment_size_default: Default PTE fragment size
2648 * @max_level: max VMPT level
2649 * @max_bits: max address space size in bits
2650 *
2651 */
2652 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2653 uint32_t fragment_size_default, unsigned max_level,
2654 unsigned max_bits)
2655 {
2656 unsigned int max_size = 1 << (max_bits - 30);
2657 unsigned int vm_size;
2658 uint64_t tmp;
2659
2660 /* adjust vm size first */
2661 if (amdgpu_vm_size != -1) {
2662 vm_size = amdgpu_vm_size;
2663 if (vm_size > max_size) {
2664 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2665 amdgpu_vm_size, max_size);
2666 vm_size = max_size;
2667 }
2668 } else {
2669 struct sysinfo si;
2670 unsigned int phys_ram_gb;
2671
2672 /* Optimal VM size depends on the amount of physical
2673 * RAM available. Underlying requirements and
2674 * assumptions:
2675 *
2676 * - Need to map system memory and VRAM from all GPUs
2677 * - VRAM from other GPUs not known here
2678 * - Assume VRAM <= system memory
2679 * - On GFX8 and older, VM space can be segmented for
2680 * different MTYPEs
2681 * - Need to allow room for fragmentation, guard pages etc.
2682 *
2683 * This adds up to a rough guess of system memory x3.
2684 * Round up to power of two to maximize the available
2685 * VM size with the given page table size.
2686 */
2687 si_meminfo(&si);
2688 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2689 (1 << 30) - 1) >> 30;
2690 vm_size = roundup_pow_of_two(
2691 min(max(phys_ram_gb * 3, min_vm_size), max_size));
2692 }
2693
2694 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2695
2696 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2697 if (amdgpu_vm_block_size != -1)
2698 tmp >>= amdgpu_vm_block_size - 9;
2699 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2700 adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2701 switch (adev->vm_manager.num_level) {
2702 case 3:
2703 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2704 break;
2705 case 2:
2706 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2707 break;
2708 case 1:
2709 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2710 break;
2711 default:
2712 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2713 }
2714 /* block size depends on vm size and hw setup*/
2715 if (amdgpu_vm_block_size != -1)
2716 adev->vm_manager.block_size =
2717 min((unsigned)amdgpu_vm_block_size, max_bits
2718 - AMDGPU_GPU_PAGE_SHIFT
2719 - 9 * adev->vm_manager.num_level);
2720 else if (adev->vm_manager.num_level > 1)
2721 adev->vm_manager.block_size = 9;
2722 else
2723 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2724
2725 if (amdgpu_vm_fragment_size == -1)
2726 adev->vm_manager.fragment_size = fragment_size_default;
2727 else
2728 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2729
2730 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2731 vm_size, adev->vm_manager.num_level + 1,
2732 adev->vm_manager.block_size,
2733 adev->vm_manager.fragment_size);
2734 }
2735
2736 /**
2737 * amdgpu_vm_wait_idle - wait for the VM to become idle
2738 *
2739 * @vm: VM object to wait for
2740 * @timeout: timeout to wait for VM to become idle
2741 */
2742 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2743 {
2744 timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
2745 true, true, timeout);
2746 if (timeout <= 0)
2747 return timeout;
2748
2749 timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
2750 if (timeout <= 0)
2751 return timeout;
2752
2753 return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
2754 }
2755
2756 /**
2757 * amdgpu_vm_init - initialize a vm instance
2758 *
2759 * @adev: amdgpu_device pointer
2760 * @vm: requested vm
2761 * @vm_context: Indicates if it GFX or Compute context
2762 * @pasid: Process address space identifier
2763 *
2764 * Init @vm fields.
2765 *
2766 * Returns:
2767 * 0 for success, error for failure.
2768 */
2769 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2770 int vm_context, unsigned int pasid)
2771 {
2772 struct amdgpu_bo_param bp;
2773 struct amdgpu_bo *root;
2774 int r, i;
2775
2776 vm->va = RB_ROOT_CACHED;
2777 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2778 vm->reserved_vmid[i] = NULL;
2779 INIT_LIST_HEAD(&vm->evicted);
2780 INIT_LIST_HEAD(&vm->relocated);
2781 INIT_LIST_HEAD(&vm->moved);
2782 INIT_LIST_HEAD(&vm->idle);
2783 INIT_LIST_HEAD(&vm->invalidated);
2784 spin_lock_init(&vm->invalidated_lock);
2785 INIT_LIST_HEAD(&vm->freed);
2786
2787
2788 /* create scheduler entities for page table updates */
2789 r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
2790 adev->vm_manager.vm_pte_scheds,
2791 adev->vm_manager.vm_pte_num_scheds, NULL);
2792 if (r)
2793 return r;
2794
2795 r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
2796 adev->vm_manager.vm_pte_scheds,
2797 adev->vm_manager.vm_pte_num_scheds, NULL);
2798 if (r)
2799 goto error_free_direct;
2800
2801 vm->pte_support_ats = false;
2802 vm->is_compute_context = false;
2803
2804 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2805 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2806 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2807
2808 if (adev->asic_type == CHIP_RAVEN)
2809 vm->pte_support_ats = true;
2810 } else {
2811 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2812 AMDGPU_VM_USE_CPU_FOR_GFX);
2813 }
2814 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2815 vm->use_cpu_for_update ? "CPU" : "SDMA");
2816 WARN_ONCE((vm->use_cpu_for_update &&
2817 !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2818 "CPU update of VM recommended only for large BAR system\n");
2819
2820 if (vm->use_cpu_for_update)
2821 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2822 else
2823 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2824 vm->last_update = NULL;
2825 vm->last_direct = dma_fence_get_stub();
2826 vm->last_delayed = dma_fence_get_stub();
2827
2828 mutex_init(&vm->eviction_lock);
2829 vm->evicting = false;
2830
2831 amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
2832 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
2833 bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
2834 r = amdgpu_bo_create(adev, &bp, &root);
2835 if (r)
2836 goto error_free_delayed;
2837
2838 r = amdgpu_bo_reserve(root, true);
2839 if (r)
2840 goto error_free_root;
2841
2842 r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
2843 if (r)
2844 goto error_unreserve;
2845
2846 amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2847
2848 r = amdgpu_vm_clear_bo(adev, vm, root, false);
2849 if (r)
2850 goto error_unreserve;
2851
2852 amdgpu_bo_unreserve(vm->root.base.bo);
2853
2854 if (pasid) {
2855 unsigned long flags;
2856
2857 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2858 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2859 GFP_ATOMIC);
2860 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2861 if (r < 0)
2862 goto error_free_root;
2863
2864 vm->pasid = pasid;
2865 }
2866
2867 INIT_KFIFO(vm->faults);
2868
2869 return 0;
2870
2871 error_unreserve:
2872 amdgpu_bo_unreserve(vm->root.base.bo);
2873
2874 error_free_root:
2875 amdgpu_bo_unref(&vm->root.base.bo->shadow);
2876 amdgpu_bo_unref(&vm->root.base.bo);
2877 vm->root.base.bo = NULL;
2878
2879 error_free_delayed:
2880 dma_fence_put(vm->last_direct);
2881 dma_fence_put(vm->last_delayed);
2882 drm_sched_entity_destroy(&vm->delayed);
2883
2884 error_free_direct:
2885 drm_sched_entity_destroy(&vm->direct);
2886
2887 return r;
2888 }
2889
2890 /**
2891 * amdgpu_vm_check_clean_reserved - check if a VM is clean
2892 *
2893 * @adev: amdgpu_device pointer
2894 * @vm: the VM to check
2895 *
2896 * check all entries of the root PD, if any subsequent PDs are allocated,
2897 * it means there are page table creating and filling, and is no a clean
2898 * VM
2899 *
2900 * Returns:
2901 * 0 if this VM is clean
2902 */
2903 static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
2904 struct amdgpu_vm *vm)
2905 {
2906 enum amdgpu_vm_level root = adev->vm_manager.root_level;
2907 unsigned int entries = amdgpu_vm_num_entries(adev, root);
2908 unsigned int i = 0;
2909
2910 if (!(vm->root.entries))
2911 return 0;
2912
2913 for (i = 0; i < entries; i++) {
2914 if (vm->root.entries[i].base.bo)
2915 return -EINVAL;
2916 }
2917
2918 return 0;
2919 }
2920
2921 /**
2922 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2923 *
2924 * @adev: amdgpu_device pointer
2925 * @vm: requested vm
2926 * @pasid: pasid to use
2927 *
2928 * This only works on GFX VMs that don't have any BOs added and no
2929 * page tables allocated yet.
2930 *
2931 * Changes the following VM parameters:
2932 * - use_cpu_for_update
2933 * - pte_supports_ats
2934 * - pasid (old PASID is released, because compute manages its own PASIDs)
2935 *
2936 * Reinitializes the page directory to reflect the changed ATS
2937 * setting.
2938 *
2939 * Returns:
2940 * 0 for success, -errno for errors.
2941 */
2942 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2943 unsigned int pasid)
2944 {
2945 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2946 int r;
2947
2948 r = amdgpu_bo_reserve(vm->root.base.bo, true);
2949 if (r)
2950 return r;
2951
2952 /* Sanity checks */
2953 r = amdgpu_vm_check_clean_reserved(adev, vm);
2954 if (r)
2955 goto unreserve_bo;
2956
2957 if (pasid) {
2958 unsigned long flags;
2959
2960 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2961 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2962 GFP_ATOMIC);
2963 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2964
2965 if (r == -ENOSPC)
2966 goto unreserve_bo;
2967 r = 0;
2968 }
2969
2970 /* Check if PD needs to be reinitialized and do it before
2971 * changing any other state, in case it fails.
2972 */
2973 if (pte_support_ats != vm->pte_support_ats) {
2974 vm->pte_support_ats = pte_support_ats;
2975 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
2976 if (r)
2977 goto free_idr;
2978 }
2979
2980 /* Update VM state */
2981 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2982 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2983 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2984 vm->use_cpu_for_update ? "CPU" : "SDMA");
2985 WARN_ONCE((vm->use_cpu_for_update &&
2986 !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2987 "CPU update of VM recommended only for large BAR system\n");
2988
2989 if (vm->use_cpu_for_update)
2990 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2991 else
2992 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2993 dma_fence_put(vm->last_update);
2994 vm->last_update = NULL;
2995 vm->is_compute_context = true;
2996
2997 if (vm->pasid) {
2998 unsigned long flags;
2999
3000 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3001 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3002 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3003
3004 /* Free the original amdgpu allocated pasid
3005 * Will be replaced with kfd allocated pasid
3006 */
3007 amdgpu_pasid_free(vm->pasid);
3008 vm->pasid = 0;
3009 }
3010
3011 /* Free the shadow bo for compute VM */
3012 amdgpu_bo_unref(&vm->root.base.bo->shadow);
3013
3014 if (pasid)
3015 vm->pasid = pasid;
3016
3017 goto unreserve_bo;
3018
3019 free_idr:
3020 if (pasid) {
3021 unsigned long flags;
3022
3023 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3024 idr_remove(&adev->vm_manager.pasid_idr, pasid);
3025 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3026 }
3027 unreserve_bo:
3028 amdgpu_bo_unreserve(vm->root.base.bo);
3029 return r;
3030 }
3031
3032 /**
3033 * amdgpu_vm_release_compute - release a compute vm
3034 * @adev: amdgpu_device pointer
3035 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
3036 *
3037 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
3038 * pasid from vm. Compute should stop use of vm after this call.
3039 */
3040 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3041 {
3042 if (vm->pasid) {
3043 unsigned long flags;
3044
3045 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3046 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3047 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3048 }
3049 vm->pasid = 0;
3050 vm->is_compute_context = false;
3051 }
3052
3053 /**
3054 * amdgpu_vm_fini - tear down a vm instance
3055 *
3056 * @adev: amdgpu_device pointer
3057 * @vm: requested vm
3058 *
3059 * Tear down @vm.
3060 * Unbind the VM and remove all bos from the vm bo list
3061 */
3062 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3063 {
3064 struct amdgpu_bo_va_mapping *mapping, *tmp;
3065 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
3066 struct amdgpu_bo *root;
3067 int i;
3068
3069 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
3070
3071 root = amdgpu_bo_ref(vm->root.base.bo);
3072 amdgpu_bo_reserve(root, true);
3073 if (vm->pasid) {
3074 unsigned long flags;
3075
3076 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3077 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3078 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3079 vm->pasid = 0;
3080 }
3081
3082 dma_fence_wait(vm->last_direct, false);
3083 dma_fence_put(vm->last_direct);
3084 dma_fence_wait(vm->last_delayed, false);
3085 dma_fence_put(vm->last_delayed);
3086
3087 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
3088 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
3089 amdgpu_vm_prt_fini(adev, vm);
3090 prt_fini_needed = false;
3091 }
3092
3093 list_del(&mapping->list);
3094 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
3095 }
3096
3097 amdgpu_vm_free_pts(adev, vm, NULL);
3098 amdgpu_bo_unreserve(root);
3099 amdgpu_bo_unref(&root);
3100 WARN_ON(vm->root.base.bo);
3101
3102 drm_sched_entity_destroy(&vm->direct);
3103 drm_sched_entity_destroy(&vm->delayed);
3104
3105 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
3106 dev_err(adev->dev, "still active bo inside vm\n");
3107 }
3108 rbtree_postorder_for_each_entry_safe(mapping, tmp,
3109 &vm->va.rb_root, rb) {
3110 /* Don't remove the mapping here, we don't want to trigger a
3111 * rebalance and the tree is about to be destroyed anyway.
3112 */
3113 list_del(&mapping->list);
3114 kfree(mapping);
3115 }
3116
3117 dma_fence_put(vm->last_update);
3118 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
3119 amdgpu_vmid_free_reserved(adev, vm, i);
3120 }
3121
3122 /**
3123 * amdgpu_vm_manager_init - init the VM manager
3124 *
3125 * @adev: amdgpu_device pointer
3126 *
3127 * Initialize the VM manager structures
3128 */
3129 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
3130 {
3131 unsigned i;
3132
3133 amdgpu_vmid_mgr_init(adev);
3134
3135 adev->vm_manager.fence_context =
3136 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3137 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
3138 adev->vm_manager.seqno[i] = 0;
3139
3140 spin_lock_init(&adev->vm_manager.prt_lock);
3141 atomic_set(&adev->vm_manager.num_prt_users, 0);
3142
3143 /* If not overridden by the user, by default, only in large BAR systems
3144 * Compute VM tables will be updated by CPU
3145 */
3146 #ifdef CONFIG_X86_64
3147 if (amdgpu_vm_update_mode == -1) {
3148 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
3149 adev->vm_manager.vm_update_mode =
3150 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
3151 else
3152 adev->vm_manager.vm_update_mode = 0;
3153 } else
3154 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
3155 #else
3156 adev->vm_manager.vm_update_mode = 0;
3157 #endif
3158
3159 idr_init(&adev->vm_manager.pasid_idr);
3160 spin_lock_init(&adev->vm_manager.pasid_lock);
3161
3162 adev->vm_manager.xgmi_map_counter = 0;
3163 mutex_init(&adev->vm_manager.lock_pstate);
3164 }
3165
3166 /**
3167 * amdgpu_vm_manager_fini - cleanup VM manager
3168 *
3169 * @adev: amdgpu_device pointer
3170 *
3171 * Cleanup the VM manager and free resources.
3172 */
3173 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
3174 {
3175 WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
3176 idr_destroy(&adev->vm_manager.pasid_idr);
3177
3178 amdgpu_vmid_mgr_fini(adev);
3179 }
3180
3181 /**
3182 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3183 *
3184 * @dev: drm device pointer
3185 * @data: drm_amdgpu_vm
3186 * @filp: drm file pointer
3187 *
3188 * Returns:
3189 * 0 for success, -errno for errors.
3190 */
3191 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3192 {
3193 union drm_amdgpu_vm *args = data;
3194 struct amdgpu_device *adev = dev->dev_private;
3195 struct amdgpu_fpriv *fpriv = filp->driver_priv;
3196 int r;
3197
3198 switch (args->in.op) {
3199 case AMDGPU_VM_OP_RESERVE_VMID:
3200 /* We only have requirement to reserve vmid from gfxhub */
3201 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
3202 AMDGPU_GFXHUB_0);
3203 if (r)
3204 return r;
3205 break;
3206 case AMDGPU_VM_OP_UNRESERVE_VMID:
3207 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
3208 break;
3209 default:
3210 return -EINVAL;
3211 }
3212
3213 return 0;
3214 }
3215
3216 /**
3217 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3218 *
3219 * @adev: drm device pointer
3220 * @pasid: PASID identifier for VM
3221 * @task_info: task_info to fill.
3222 */
3223 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3224 struct amdgpu_task_info *task_info)
3225 {
3226 struct amdgpu_vm *vm;
3227 unsigned long flags;
3228
3229 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3230
3231 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3232 if (vm)
3233 *task_info = vm->task_info;
3234
3235 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3236 }
3237
3238 /**
3239 * amdgpu_vm_set_task_info - Sets VMs task info.
3240 *
3241 * @vm: vm for which to set the info
3242 */
3243 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3244 {
3245 if (vm->task_info.pid)
3246 return;
3247
3248 vm->task_info.pid = current->pid;
3249 get_task_comm(vm->task_info.task_name, current);
3250
3251 if (current->group_leader->mm != current->mm)
3252 return;
3253
3254 vm->task_info.tgid = current->group_leader->pid;
3255 get_task_comm(vm->task_info.process_name, current->group_leader);
3256 }
3257
3258 /**
3259 * amdgpu_vm_handle_fault - graceful handling of VM faults.
3260 * @adev: amdgpu device pointer
3261 * @pasid: PASID of the VM
3262 * @addr: Address of the fault
3263 *
3264 * Try to gracefully handle a VM fault. Return true if the fault was handled and
3265 * shouldn't be reported any more.
3266 */
3267 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
3268 uint64_t addr)
3269 {
3270 struct amdgpu_bo *root;
3271 uint64_t value, flags;
3272 struct amdgpu_vm *vm;
3273 long r;
3274
3275 spin_lock(&adev->vm_manager.pasid_lock);
3276 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3277 if (vm)
3278 root = amdgpu_bo_ref(vm->root.base.bo);
3279 else
3280 root = NULL;
3281 spin_unlock(&adev->vm_manager.pasid_lock);
3282
3283 if (!root)
3284 return false;
3285
3286 r = amdgpu_bo_reserve(root, true);
3287 if (r)
3288 goto error_unref;
3289
3290 /* Double check that the VM still exists */
3291 spin_lock(&adev->vm_manager.pasid_lock);
3292 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3293 if (vm && vm->root.base.bo != root)
3294 vm = NULL;
3295 spin_unlock(&adev->vm_manager.pasid_lock);
3296 if (!vm)
3297 goto error_unlock;
3298
3299 addr /= AMDGPU_GPU_PAGE_SIZE;
3300 flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
3301 AMDGPU_PTE_SYSTEM;
3302
3303 if (vm->is_compute_context) {
3304 /* Intentionally setting invalid PTE flag
3305 * combination to force a no-retry-fault
3306 */
3307 flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
3308 AMDGPU_PTE_TF;
3309 value = 0;
3310
3311 } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
3312 /* Redirect the access to the dummy page */
3313 value = adev->dummy_page_addr;
3314 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
3315 AMDGPU_PTE_WRITEABLE;
3316
3317 } else {
3318 /* Let the hw retry silently on the PTE */
3319 value = 0;
3320 }
3321
3322 r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
3323 flags, value, NULL, NULL);
3324 if (r)
3325 goto error_unlock;
3326
3327 r = amdgpu_vm_update_pdes(adev, vm, true);
3328
3329 error_unlock:
3330 amdgpu_bo_unreserve(root);
3331 if (r < 0)
3332 DRM_ERROR("Can't handle page fault (%ld)\n", r);
3333
3334 error_unref:
3335 amdgpu_bo_unref(&root);
3336
3337 return false;
3338 }
3339