Home | History | Annotate | Download | only in i915

Lines Matching refs:vma

58 void i915_vma_free(struct i915_vma *vma)
60 mutex_destroy(&vma->pages_mutex);
61 return kmem_cache_free(global.slab_vmas, vma);
68 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
74 if (!vma->node.stack) {
75 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
76 vma->node.start, vma->node.size, reason);
80 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
82 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
83 vma->node.start, vma->node.size, reason, buf);
88 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
130 const struct i915_vma *vma = vn;
132 long cmp = i915_vma_compare(__UNCONST(vma), key->vm, key->view);
148 rb_tree_init(&obj->vma.tree.rbr_tree, &vma_tree_rb_ops);
150 obj->vma.tree = RB_ROOT;
159 struct i915_vma *vma;
165 vma = i915_vma_alloc();
166 if (vma == NULL)
169 kref_init(&vma->ref);
170 mutex_init(&vma->pages_mutex);
171 vma->vm = i915_vm_get(vm);
172 vma->ops = &vm->vma_ops;
173 vma->obj = obj;
174 vma->resv = obj->base.resv;
175 vma->size = obj->base.size;
176 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
178 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
183 might_lock(&vma->active.mutex);
187 INIT_LIST_HEAD(&vma->closed_link);
190 vma->ggtt_view = *view;
196 vma->size = view->partial.size;
197 vma->size <<= PAGE_SHIFT;
198 GEM_BUG_ON(vma->size > obj->base.size);
200 vma->size = intel_rotation_info_size(&view->rotated);
201 vma->size <<= PAGE_SHIFT;
203 vma->size = intel_remapped_info_size(&view->remapped);
204 vma->size <<= PAGE_SHIFT;
208 if (unlikely(vma->size > vm->total))
211 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
214 if (unlikely(overflows_type(vma->size, u32)))
217 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
220 if (unlikely(vma->fence_size < vma->size || /* overflow */
221 vma->fence_size > vm->total))
224 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
226 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
229 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
231 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
234 spin_lock(&obj->vma.lock);
240 collision = rb_tree_insert_node(&obj->vma.tree.rbr_tree, vma);
241 KASSERT(collision == vma);
244 p = &obj->vma.tree.rb_node;
254 * already created a matching vma, so return the older instance
259 spin_unlock(&obj->vma.lock);
260 i915_vma_free(vma);
269 rb_link_node(&vma->obj_node, rb, p);
270 rb_insert_color(&vma->obj_node, &obj->vma.tree);
273 if (i915_vma_is_ggtt(vma))
275 * We put the GGTT vma at the start of the vma-list, followed
276 * by the ppGGTT vma. This allows us to break early when
277 * iterating over only the GGTT vma for an object, see
280 list_add(&vma->obj_link, &obj->vma.list);
282 list_add_tail(&vma->obj_link, &obj->vma.list);
284 spin_unlock(&obj->vma.lock);
286 return vma;
289 i915_vma_free(vma);
301 return rb_tree_find_node(&obj->vma.tree.rbr_tree, &key);
305 rb = obj->vma.tree.rb_node;
307 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
310 cmp = i915_vma_compare(vma, vm, view);
312 return vma;
325 * i915_vma_instance - return the singleton instance of the VMA
330 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
332 * Once created, the VMA is kept until either the object is freed, or the
335 * Returns the vma, or an error pointer.
342 struct i915_vma *vma;
347 spin_lock(&obj->vma.lock);
348 vma = vma_lookup(obj, vm, view);
349 spin_unlock(&obj->vma.lock);
351 /* vma_create() will resolve the race if another creates the vma */
352 if (unlikely(!vma))
353 vma = vma_create(obj, vm, view);
355 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
356 return vma;
361 struct i915_vma *vma;
370 struct i915_vma *vma = vw->vma;
373 err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
375 atomic_or(I915_VMA_ERROR, &vma->flags);
409 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
410 * @vma: VMA to map
416 * this VMA in case of non-default GGTT views) and PTE entries set up.
419 int i915_vma_bind(struct i915_vma *vma,
428 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
429 GEM_BUG_ON(vma->size > vma->node.size);
431 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
432 vma->node.size,
433 vma->vm->total)))
442 vma_flags = atomic_read(&vma->flags);
451 GEM_BUG_ON(!vma->pages);
453 trace_i915_vma_bind(vma, bind_flags);
454 if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
455 work->vma = vma;
464 * Also note that we do not want to track the async vma as
468 GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
469 i915_active_set_exclusive(&vma->active, &work->base.dma);
472 if (vma->obj) {
473 __i915_gem_object_pin_pages(vma->obj);
474 work->pinned = vma->obj;
477 GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
478 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
483 atomic_or(bind_flags, &vma->flags);
491 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
496 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
501 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
502 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
504 ptr = READ_ONCE(vma->iomap);
506 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
507 vma->node.start,
508 vma->node.size);
514 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
516 io_mapping_unmap(&i915_vm_to_ggtt(vma->vm)->iomap, ptr,
517 vma->node.size);
521 ptr = vma->iomap;
525 __i915_vma_pin(vma);
527 err = i915_vma_pin_fence(vma);
531 i915_vma_set_ggtt_write(vma);
537 __i915_vma_unpin(vma);
546 void i915_vma_flush_writes(struct i915_vma *vma)
548 if (i915_vma_unset_ggtt_write(vma))
549 intel_gt_flush_ggtt_writes(vma->vm->gt);
552 void i915_vma_unpin_iomap(struct i915_vma *vma)
554 GEM_BUG_ON(vma->iomap == NULL);
556 i915_vma_flush_writes(vma);
558 i915_vma_unpin_fence(vma);
559 i915_vma_unpin(vma);
564 struct i915_vma *vma;
567 vma = fetch_and_zero(p_vma);
568 if (!vma)
571 obj = vma->obj;
574 i915_vma_unpin(vma);
575 i915_vma_close(vma);
583 bool i915_vma_misplaced(const struct i915_vma *vma,
586 if (!drm_mm_node_allocated(&vma->node))
589 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags_const(vma)))
592 if (vma->node.size < size)
596 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
599 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
603 vma->node.start < (flags & PIN_OFFSET_MASK))
607 vma->node.start != (flags & PIN_OFFSET_MASK))
613 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
617 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
618 GEM_BUG_ON(!vma->fence_size);
620 fenceable = (vma->node.size >= vma->fence_size &&
621 IS_ALIGNED(vma->node.start, vma->fence_alignment));
623 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
626 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
628 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
631 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
633 struct drm_mm_node *node = &vma->node;
643 if (!i915_vm_has_cache_coloring(vma->vm))
646 vma */
676 * i915_vma_insert - finds a slot for the vma in its address space
677 * @vma: the vma
678 * @size: requested size in bytes (can be larger than the VMA)
683 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
684 * preferrably the oldest idle entry to make room for the new VMA.
690 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
696 GEM_BUG_ON(i915_vma_is_closed(vma));
697 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
698 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
700 size = max(size, vma->size);
701 alignment = max(alignment, vma->display_alignment);
703 size = max_t(typeof(size), size, vma->fence_size);
705 alignment, vma->fence_alignment);
715 end = vma->vm->total;
717 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
734 if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
735 color = vma->obj->cache_level;
743 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
758 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
766 rounddown_pow_of_two(vma->page_sizes.sg |
774 GEM_BUG_ON(i915_vma_is_ggtt(vma));
778 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
782 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
788 GEM_BUG_ON(vma->node.start < start);
789 GEM_BUG_ON(vma->node.start + vma->node.size > end);
791 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
792 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
794 if (vma->obj) {
795 struct drm_i915_gem_object *obj = vma->obj;
800 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
806 i915_vma_detach(struct i915_vma *vma)
808 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
809 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
813 * vma, we can drop its hold on the backing storage and allow
816 list_del(&vma->vm_link);
817 if (vma->obj) {
818 struct drm_i915_gem_object *obj = vma->obj;
825 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
830 bound = atomic_read(&vma->flags);
842 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
851 mutex_lock(&vma->vm->mutex);
862 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
863 mutex_unlock(&vma->vm->mutex);
868 static int vma_get_pages(struct i915_vma *vma)
872 if (atomic_add_unless(&vma->pages_count, 1, 0))
876 if (mutex_lock_interruptible(&vma->pages_mutex))
879 if (!atomic_read(&vma->pages_count)) {
880 if (vma->obj) {
881 err = i915_gem_object_pin_pages(vma->obj);
886 err = vma->ops->set_pages(vma);
888 if (vma->obj)
889 i915_gem_object_unpin_pages(vma->obj);
893 atomic_inc(&vma->pages_count);
896 mutex_unlock(&vma->pages_mutex);
901 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
904 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
905 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
906 if (atomic_sub_return(count, &vma->pages_count) == 0) {
907 vma->ops->clear_pages(vma);
908 GEM_BUG_ON(vma->pages);
909 if (vma->obj)
910 i915_gem_object_unpin_pages(vma->obj);
912 mutex_unlock(&vma->pages_mutex);
915 static void vma_put_pages(struct i915_vma *vma)
917 if (atomic_add_unless(&vma->pages_count, -1, 1))
920 __vma_put_pages(vma, 1);
923 static void vma_unbind_pages(struct i915_vma *vma)
927 lockdep_assert_held(&vma->vm->mutex);
930 count = atomic_read(&vma->pages_count);
934 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
937 int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
950 /* First try and grab the pin without rebinding the vma */
951 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
954 err = vma_get_pages(vma);
958 if (flags & vma->vm->bind_async_flags) {
967 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
970 err = mutex_lock_interruptible(&vma->vm->mutex);
974 bound = atomic_read(&vma->flags);
986 __i915_vma_pin(vma);
990 err = i915_active_acquire(&vma->active);
995 err = i915_vma_insert(vma, size, alignment, flags);
999 if (i915_is_ggtt(vma->vm))
1000 __i915_vma_set_map_and_fenceable(vma);
1003 GEM_BUG_ON(!vma->pages);
1004 err = i915_vma_bind(vma,
1005 vma->obj ? vma->obj->cache_level : 0,
1012 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1013 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1015 __i915_vma_pin(vma);
1016 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1017 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1018 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1021 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1022 i915_vma_detach(vma);
1023 drm_mm_remove_node(&vma->node);
1026 i915_active_release(&vma->active);
1028 mutex_unlock(&vma->vm->mutex);
1033 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1035 vma_put_pages(vma);
1050 int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
1052 struct i915_address_space *vm = vma->vm;
1055 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1058 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
1071 void i915_vma_close(struct i915_vma *vma)
1073 struct intel_gt *gt = vma->vm->gt;
1076 GEM_BUG_ON(i915_vma_is_closed(vma));
1079 * We defer actually closing, unbinding and destroying the VMA until
1082 * client, avoiding the work required to rebind the VMA. This is
1084 * between themselves, temporarily opening a local VMA to the
1087 * causing us to rebind the VMA once more. This ends up being a lot
1091 list_add(&vma->closed_link, &gt->closed_vma);
1095 static void __i915_vma_remove_closed(struct i915_vma *vma)
1097 struct intel_gt *gt = vma->vm->gt;
1100 list_del_init(&vma->closed_link);
1104 void i915_vma_reopen(struct i915_vma *vma)
1106 if (i915_vma_is_closed(vma))
1107 __i915_vma_remove_closed(vma);
1112 struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1114 if (drm_mm_node_allocated(&vma->node)) {
1115 mutex_lock(&vma->vm->mutex);
1116 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1117 WARN_ON(__i915_vma_unbind(vma));
1118 mutex_unlock(&vma->vm->mutex);
1119 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1121 GEM_BUG_ON(i915_vma_is_active(vma));
1123 if (vma->obj) {
1124 struct drm_i915_gem_object *obj = vma->obj;
1126 spin_lock(&obj->vma.lock);
1127 list_del(&vma->obj_link);
1128 rb_erase(&vma->obj_node, &obj->vma.tree);
1129 spin_unlock(&obj->vma.lock);
1132 __i915_vma_remove_closed(vma);
1133 i915_vm_put(vma->vm);
1135 i915_active_fini(&vma->active);
1136 i915_vma_free(vma);
1141 struct i915_vma *vma, *next;
1144 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1145 struct drm_i915_gem_object *obj = vma->obj;
1146 struct i915_address_space *vm = vma->vm;
1154 list_del_init(&vma->closed_link);
1163 __i915_vma_put(vma);
1177 static void __i915_vma_iounmap(struct i915_vma *vma)
1179 GEM_BUG_ON(i915_vma_is_pinned(vma));
1181 if (vma->iomap == NULL)
1185 io_mapping_unmap(&i915_vm_to_ggtt(vma->vm)->iomap, vma->iomap,
1186 vma->node.size);
1188 io_mapping_unmap(vma->iomap);
1190 vma->iomap = NULL;
1193 void i915_vma_revoke_mmap(struct i915_vma *vma)
1198 if (!i915_vma_has_userfault(vma))
1201 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1202 GEM_BUG_ON(!vma->obj->userfault_count);
1207 struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
1208 paddr_t pa = i915->ggtt.gmadr.start + vma->node.start;
1209 vsize_t npgs = vma->size >> PAGE_SHIFT;
1213 node = &vma->mmo->vma_node;
1214 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1215 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1217 vma->size,
1221 i915_vma_unset_userfault(vma);
1222 if (!--vma->obj->userfault_count)
1223 list_del(&vma->obj->userfault_link);
1226 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1230 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1232 /* Wait for the vma to be bound before we start! */
1233 err = i915_request_await_active(rq, &vma->active);
1237 return i915_active_add_request(&vma->active, rq);
1240 int i915_vma_move_to_active(struct i915_vma *vma,
1244 struct drm_i915_gem_object *obj = vma->obj;
1249 err = __i915_vma_move_to_active(vma, rq);
1263 dma_resv_add_excl_fence(vma->resv, &rq->fence);
1267 err = dma_resv_reserve_shared(vma->resv, 1);
1271 dma_resv_add_shared_fence(vma->resv, &rq->fence);
1277 GEM_BUG_ON(!i915_vma_is_active(vma));
1281 vma)
1285 lockdep_assert_held(&vma->vm->mutex);
1289 * have side-effects such as unpinning or even unbinding this vma.
1295 ret = i915_vma_sync(vma);
1299 if (i915_vma_is_pinned(vma)) {
1300 vma_print_allocator(vma, "is pinned");
1305 * After confirming that no one else is pinning this vma, wait for
1309 ret = i915_vma_sync(vma);
1313 if (!drm_mm_node_allocated(&vma->node))
1316 GEM_BUG_ON(i915_vma_is_pinned(vma));
1317 GEM_BUG_ON(i915_vma_is_active(vma));
1319 if (i915_vma_is_map_and_fenceable(vma)) {
1326 i915_vma_flush_writes(vma);
1327 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
1330 ret = i915_vma_revoke_fence(vma);
1335 i915_vma_revoke_mmap(vma);
1337 __i915_vma_iounmap(vma);
1338 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1340 GEM_BUG_ON(vma->fence);
1341 GEM_BUG_ON(i915_vma_has_userfault(vma));
1343 if (likely(atomic_read(&vma->vm->open))) {
1344 trace_i915_vma_unbind(vma);
1345 vma->ops->unbind_vma(vma);
1347 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
1349 i915_vma_detach(vma);
1350 vma_unbind_pages(vma);
1352 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1356 int i915_vma_unbind(struct i915_vma *vma)
1358 struct i915_address_space *vm = vma->vm;
1362 if (!drm_mm_node_allocated(&vma->node))
1365 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1373 err = __i915_vma_unbind(vma);
1382 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1384 i915_gem_object_make_unshrinkable(vma->obj);
1385 return vma;
1388 void i915_vma_make_shrinkable(struct i915_vma *vma)
1390 i915_gem_object_make_shrinkable(vma->obj);
1393 void i915_vma_make_purgeable(struct i915_vma *vma)
1395 i915_gem_object_make_purgeable(vma->obj);