Home | History | Annotate | Line # | Download | only in i915
i915_vma.c revision 1.10
      1 /*	$NetBSD: i915_vma.c,v 1.10 2021/12/19 12:09:58 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2016 Intel Corporation
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23  * IN THE SOFTWARE.
     24  *
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: i915_vma.c,v 1.10 2021/12/19 12:09:58 riastradh Exp $");
     29 
     30 #include <linux/sched/mm.h>
     31 #include <drm/drm_gem.h>
     32 
     33 #include "display/intel_frontbuffer.h"
     34 
     35 #include "gt/intel_engine.h"
     36 #include "gt/intel_engine_heartbeat.h"
     37 #include "gt/intel_gt.h"
     38 #include "gt/intel_gt_requests.h"
     39 
     40 #include "i915_drv.h"
     41 #include "i915_globals.h"
     42 #include "i915_sw_fence_work.h"
     43 #include "i915_trace.h"
     44 #include "i915_vma.h"
     45 
     46 #include <linux/nbsd-namespace.h>
     47 
     48 static struct i915_global_vma {
     49 	struct i915_global base;
     50 	struct kmem_cache *slab_vmas;
     51 } global;
     52 
     53 struct i915_vma *i915_vma_alloc(void)
     54 {
     55 	return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
     56 }
     57 
     58 void i915_vma_free(struct i915_vma *vma)
     59 {
     60 	mutex_destroy(&vma->pages_mutex);
     61 	return kmem_cache_free(global.slab_vmas, vma);
     62 }
     63 
     64 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
     65 
     66 #include <linux/stackdepot.h>
     67 
     68 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
     69 {
     70 	unsigned long *entries;
     71 	unsigned int nr_entries;
     72 	char buf[512];
     73 
     74 	if (!vma->node.stack) {
     75 		DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
     76 				 vma->node.start, vma->node.size, reason);
     77 		return;
     78 	}
     79 
     80 	nr_entries = stack_depot_fetch(vma->node.stack, &entries);
     81 	stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
     82 	DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
     83 			 vma->node.start, vma->node.size, reason, buf);
     84 }
     85 
     86 #else
     87 
     88 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
     89 {
     90 }
     91 
     92 #endif
     93 
     94 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
     95 {
     96 	return container_of(ref, typeof(struct i915_vma), active);
     97 }
     98 
     99 static int __i915_vma_active(struct i915_active *ref)
    100 {
    101 	return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
    102 }
    103 
    104 __i915_active_call
    105 static void __i915_vma_retire(struct i915_active *ref)
    106 {
    107 	i915_vma_put(active_to_vma(ref));
    108 }
    109 
    110 #ifdef __NetBSD__
    111 struct i915_vma_key {
    112 	struct i915_address_space *vm;
    113 	const struct i915_ggtt_view *view;
    114 };
    115 
    116 static int
    117 compare_vma(void *cookie, const void *va, const void *vb)
    118 {
    119 	const struct i915_vma *a = va;
    120 	const struct i915_vma *b = vb;
    121 	long cmp = i915_vma_compare(__UNCONST(a), b->vm,
    122 	    b->ggtt_view.type == I915_GGTT_VIEW_NORMAL ? NULL : &b->ggtt_view);
    123 
    124 	return (cmp < 0 ? -1 : cmp > 0 ? +1 : 0);
    125 }
    126 
    127 static int
    128 compare_vma_key(void *cookie, const void *vn, const void *vk)
    129 {
    130 	const struct i915_vma *vma = vn;
    131 	const struct i915_vma_key *key = vk;
    132 	long cmp = i915_vma_compare(__UNCONST(vma), key->vm, key->view);
    133 
    134 	return (cmp < 0 ? -1 : cmp > 0 ? +1 : 0);
    135 }
    136 
    137 static const rb_tree_ops_t vma_tree_rb_ops = {
    138 	.rbto_compare_nodes = compare_vma,
    139 	.rbto_compare_key = compare_vma_key,
    140 	.rbto_node_offset = offsetof(struct i915_vma, obj_node),
    141 };
    142 #endif
    143 
    144 void
    145 i915_vma_tree_init(struct drm_i915_gem_object *obj)
    146 {
    147 #ifdef __NetBSD__
    148 	rb_tree_init(&obj->vma.tree.rbr_tree, &vma_tree_rb_ops);
    149 #else
    150 	obj->vma.tree = RB_ROOT;
    151 #endif
    152 }
    153 
    154 static struct i915_vma *
    155 vma_create(struct drm_i915_gem_object *obj,
    156 	   struct i915_address_space *vm,
    157 	   const struct i915_ggtt_view *view)
    158 {
    159 	struct i915_vma *vma;
    160 	struct rb_node *rb, **p;
    161 
    162 	/* The aliasing_ppgtt should never be used directly! */
    163 	GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
    164 
    165 	vma = i915_vma_alloc();
    166 	if (vma == NULL)
    167 		return ERR_PTR(-ENOMEM);
    168 
    169 	kref_init(&vma->ref);
    170 	mutex_init(&vma->pages_mutex);
    171 	vma->vm = i915_vm_get(vm);
    172 	vma->ops = &vm->vma_ops;
    173 	vma->obj = obj;
    174 	vma->resv = obj->base.resv;
    175 	vma->size = obj->base.size;
    176 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
    177 
    178 	i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
    179 
    180 	/* Declare ourselves safe for use inside shrinkers */
    181 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
    182 		fs_reclaim_acquire(GFP_KERNEL);
    183 		might_lock(&vma->active.mutex);
    184 		fs_reclaim_release(GFP_KERNEL);
    185 	}
    186 
    187 	INIT_LIST_HEAD(&vma->closed_link);
    188 
    189 	if (view && view->type != I915_GGTT_VIEW_NORMAL) {
    190 		vma->ggtt_view = *view;
    191 		if (view->type == I915_GGTT_VIEW_PARTIAL) {
    192 			GEM_BUG_ON(range_overflows_t(u64,
    193 						     view->partial.offset,
    194 						     view->partial.size,
    195 						     obj->base.size >> PAGE_SHIFT));
    196 			vma->size = view->partial.size;
    197 			vma->size <<= PAGE_SHIFT;
    198 			GEM_BUG_ON(vma->size > obj->base.size);
    199 		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
    200 			vma->size = intel_rotation_info_size(&view->rotated);
    201 			vma->size <<= PAGE_SHIFT;
    202 		} else if (view->type == I915_GGTT_VIEW_REMAPPED) {
    203 			vma->size = intel_remapped_info_size(&view->remapped);
    204 			vma->size <<= PAGE_SHIFT;
    205 		}
    206 	}
    207 
    208 	if (unlikely(vma->size > vm->total))
    209 		goto err_vma;
    210 
    211 	GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
    212 
    213 	if (i915_is_ggtt(vm)) {
    214 		if (unlikely(overflows_type(vma->size, u32)))
    215 			goto err_vma;
    216 
    217 		vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
    218 						      i915_gem_object_get_tiling(obj),
    219 						      i915_gem_object_get_stride(obj));
    220 		if (unlikely(vma->fence_size < vma->size || /* overflow */
    221 			     vma->fence_size > vm->total))
    222 			goto err_vma;
    223 
    224 		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
    225 
    226 		vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
    227 								i915_gem_object_get_tiling(obj),
    228 								i915_gem_object_get_stride(obj));
    229 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
    230 
    231 		__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
    232 	}
    233 
    234 	spin_lock(&obj->vma.lock);
    235 
    236 #ifdef __NetBSD__
    237 	__USE(rb);
    238 	__USE(p);
    239 	struct i915_vma *collision __diagused;
    240 	collision = rb_tree_insert_node(&obj->vma.tree.rbr_tree, vma);
    241 	KASSERT(collision == vma);
    242 #else
    243 	rb = NULL;
    244 	p = &obj->vma.tree.rb_node;
    245 	while (*p) {
    246 		struct i915_vma *pos;
    247 		long cmp;
    248 
    249 		rb = *p;
    250 		pos = rb_entry(rb, struct i915_vma, obj_node);
    251 
    252 		/*
    253 		 * If the view already exists in the tree, another thread
    254 		 * already created a matching vma, so return the older instance
    255 		 * and dispose of ours.
    256 		 */
    257 		cmp = i915_vma_compare(pos, vm, view);
    258 		if (cmp == 0) {
    259 			spin_unlock(&obj->vma.lock);
    260 			i915_vma_free(vma);
    261 			return pos;
    262 		}
    263 
    264 		if (cmp < 0)
    265 			p = &rb->rb_right;
    266 		else
    267 			p = &rb->rb_left;
    268 	}
    269 	rb_link_node(&vma->obj_node, rb, p);
    270 	rb_insert_color(&vma->obj_node, &obj->vma.tree);
    271 #endif
    272 
    273 	if (i915_vma_is_ggtt(vma))
    274 		/*
    275 		 * We put the GGTT vma at the start of the vma-list, followed
    276 		 * by the ppGGTT vma. This allows us to break early when
    277 		 * iterating over only the GGTT vma for an object, see
    278 		 * for_each_ggtt_vma()
    279 		 */
    280 		list_add(&vma->obj_link, &obj->vma.list);
    281 	else
    282 		list_add_tail(&vma->obj_link, &obj->vma.list);
    283 
    284 	spin_unlock(&obj->vma.lock);
    285 
    286 	return vma;
    287 
    288 err_vma:
    289 	i915_vma_free(vma);
    290 	return ERR_PTR(-E2BIG);
    291 }
    292 
    293 static struct i915_vma *
    294 vma_lookup(struct drm_i915_gem_object *obj,
    295 	   struct i915_address_space *vm,
    296 	   const struct i915_ggtt_view *view)
    297 {
    298 #ifdef __NetBSD__
    299 	const struct i915_vma_key key = { .vm = vm, .view = view };
    300 
    301 	return rb_tree_find_node(&obj->vma.tree.rbr_tree, &key);
    302 #else
    303 	struct rb_node *rb;
    304 
    305 	rb = obj->vma.tree.rb_node;
    306 	while (rb) {
    307 		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
    308 		long cmp;
    309 
    310 		cmp = i915_vma_compare(vma, vm, view);
    311 		if (cmp == 0)
    312 			return vma;
    313 
    314 		if (cmp < 0)
    315 			rb = rb->rb_right;
    316 		else
    317 			rb = rb->rb_left;
    318 	}
    319 
    320 	return NULL;
    321 #endif
    322 }
    323 
    324 /**
    325  * i915_vma_instance - return the singleton instance of the VMA
    326  * @obj: parent &struct drm_i915_gem_object to be mapped
    327  * @vm: address space in which the mapping is located
    328  * @view: additional mapping requirements
    329  *
    330  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
    331  * the same @view characteristics. If a match is not found, one is created.
    332  * Once created, the VMA is kept until either the object is freed, or the
    333  * address space is closed.
    334  *
    335  * Returns the vma, or an error pointer.
    336  */
    337 struct i915_vma *
    338 i915_vma_instance(struct drm_i915_gem_object *obj,
    339 		  struct i915_address_space *vm,
    340 		  const struct i915_ggtt_view *view)
    341 {
    342 	struct i915_vma *vma;
    343 
    344 	GEM_BUG_ON(view && !i915_is_ggtt(vm));
    345 	GEM_BUG_ON(!atomic_read(&vm->open));
    346 
    347 	spin_lock(&obj->vma.lock);
    348 	vma = vma_lookup(obj, vm, view);
    349 	spin_unlock(&obj->vma.lock);
    350 
    351 	/* vma_create() will resolve the race if another creates the vma */
    352 	if (unlikely(!vma))
    353 		vma = vma_create(obj, vm, view);
    354 
    355 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
    356 	return vma;
    357 }
    358 
    359 struct i915_vma_work {
    360 	struct dma_fence_work base;
    361 	struct i915_vma *vma;
    362 	struct drm_i915_gem_object *pinned;
    363 	enum i915_cache_level cache_level;
    364 	unsigned int flags;
    365 };
    366 
    367 static int __vma_bind(struct dma_fence_work *work)
    368 {
    369 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
    370 	struct i915_vma *vma = vw->vma;
    371 	int err;
    372 
    373 	err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
    374 	if (err)
    375 		atomic_or(I915_VMA_ERROR, &vma->flags);
    376 
    377 	return err;
    378 }
    379 
    380 static void __vma_release(struct dma_fence_work *work)
    381 {
    382 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
    383 
    384 	if (vw->pinned)
    385 		__i915_gem_object_unpin_pages(vw->pinned);
    386 }
    387 
    388 static const struct dma_fence_work_ops bind_ops = {
    389 	.name = "bind",
    390 	.work = __vma_bind,
    391 	.release = __vma_release,
    392 };
    393 
    394 struct i915_vma_work *i915_vma_work(void)
    395 {
    396 	struct i915_vma_work *vw;
    397 
    398 	vw = kzalloc(sizeof(*vw), GFP_KERNEL);
    399 	if (!vw)
    400 		return NULL;
    401 
    402 	dma_fence_work_init(&vw->base, &bind_ops);
    403 	vw->base.dma.error = -EAGAIN; /* disable the worker by default */
    404 
    405 	return vw;
    406 }
    407 
    408 /**
    409  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
    410  * @vma: VMA to map
    411  * @cache_level: mapping cache level
    412  * @flags: flags like global or local mapping
    413  * @work: preallocated worker for allocating and binding the PTE
    414  *
    415  * DMA addresses are taken from the scatter-gather table of this object (or of
    416  * this VMA in case of non-default GGTT views) and PTE entries set up.
    417  * Note that DMA addresses are also the only part of the SG table we care about.
    418  */
    419 int i915_vma_bind(struct i915_vma *vma,
    420 		  enum i915_cache_level cache_level,
    421 		  u32 flags,
    422 		  struct i915_vma_work *work)
    423 {
    424 	u32 bind_flags;
    425 	u32 vma_flags;
    426 	int ret;
    427 
    428 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
    429 	GEM_BUG_ON(vma->size > vma->node.size);
    430 
    431 	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
    432 					      vma->node.size,
    433 					      vma->vm->total)))
    434 		return -ENODEV;
    435 
    436 	if (GEM_DEBUG_WARN_ON(!flags))
    437 		return -EINVAL;
    438 
    439 	bind_flags = flags;
    440 	bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
    441 
    442 	vma_flags = atomic_read(&vma->flags);
    443 	vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
    444 	if (flags & PIN_UPDATE)
    445 		bind_flags |= vma_flags;
    446 	else
    447 		bind_flags &= ~vma_flags;
    448 	if (bind_flags == 0)
    449 		return 0;
    450 
    451 	GEM_BUG_ON(!vma->pages);
    452 
    453 	trace_i915_vma_bind(vma, bind_flags);
    454 	if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
    455 		work->vma = vma;
    456 		work->cache_level = cache_level;
    457 		work->flags = bind_flags | I915_VMA_ALLOC;
    458 
    459 		/*
    460 		 * Note we only want to chain up to the migration fence on
    461 		 * the pages (not the object itself). As we don't track that,
    462 		 * yet, we have to use the exclusive fence instead.
    463 		 *
    464 		 * Also note that we do not want to track the async vma as
    465 		 * part of the obj->resv->excl_fence as it only affects
    466 		 * execution and not content or object's backing store lifetime.
    467 		 */
    468 		GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
    469 		i915_active_set_exclusive(&vma->active, &work->base.dma);
    470 		work->base.dma.error = 0; /* enable the queue_work() */
    471 
    472 		if (vma->obj) {
    473 			__i915_gem_object_pin_pages(vma->obj);
    474 			work->pinned = vma->obj;
    475 		}
    476 	} else {
    477 		GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
    478 		ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
    479 		if (ret)
    480 			return ret;
    481 	}
    482 
    483 	atomic_or(bind_flags, &vma->flags);
    484 	return 0;
    485 }
    486 
    487 #ifdef __NetBSD__
    488 #  define	__iomem		__i915_vma_iomem
    489 #endif
    490 
    491 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
    492 {
    493 	void __iomem *ptr;
    494 	int err;
    495 
    496 	if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
    497 		err = -ENODEV;
    498 		goto err;
    499 	}
    500 
    501 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
    502 	GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
    503 
    504 	ptr = READ_ONCE(vma->iomap);
    505 	if (ptr == NULL) {
    506 		ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
    507 					vma->node.start,
    508 					vma->node.size);
    509 		if (ptr == NULL) {
    510 			err = -ENOMEM;
    511 			goto err;
    512 		}
    513 
    514 		if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
    515 #ifdef __NetBSD__
    516 			io_mapping_unmap(&i915_vm_to_ggtt(vma->vm)->iomap, ptr);
    517 #else
    518 			io_mapping_unmap(ptr);
    519 #endif
    520 			ptr = vma->iomap;
    521 		}
    522 	}
    523 
    524 	__i915_vma_pin(vma);
    525 
    526 	err = i915_vma_pin_fence(vma);
    527 	if (err)
    528 		goto err_unpin;
    529 
    530 	i915_vma_set_ggtt_write(vma);
    531 
    532 	/* NB Access through the GTT requires the device to be awake. */
    533 	return ptr;
    534 
    535 err_unpin:
    536 	__i915_vma_unpin(vma);
    537 err:
    538 	return IO_ERR_PTR(err);
    539 }
    540 
    541 #ifdef __NetBSD__
    542 #  undef	__iomem
    543 #endif
    544 
    545 void i915_vma_flush_writes(struct i915_vma *vma)
    546 {
    547 	if (i915_vma_unset_ggtt_write(vma))
    548 		intel_gt_flush_ggtt_writes(vma->vm->gt);
    549 }
    550 
    551 void i915_vma_unpin_iomap(struct i915_vma *vma)
    552 {
    553 	GEM_BUG_ON(vma->iomap == NULL);
    554 
    555 	i915_vma_flush_writes(vma);
    556 
    557 	i915_vma_unpin_fence(vma);
    558 	i915_vma_unpin(vma);
    559 }
    560 
    561 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
    562 {
    563 	struct i915_vma *vma;
    564 	struct drm_i915_gem_object *obj;
    565 
    566 	vma = fetch_and_zero(p_vma);
    567 	if (!vma)
    568 		return;
    569 
    570 	obj = vma->obj;
    571 	GEM_BUG_ON(!obj);
    572 
    573 	i915_vma_unpin(vma);
    574 	i915_vma_close(vma);
    575 
    576 	if (flags & I915_VMA_RELEASE_MAP)
    577 		i915_gem_object_unpin_map(obj);
    578 
    579 	i915_gem_object_put(obj);
    580 }
    581 
    582 bool i915_vma_misplaced(const struct i915_vma *vma,
    583 			u64 size, u64 alignment, u64 flags)
    584 {
    585 	if (!drm_mm_node_allocated(&vma->node))
    586 		return false;
    587 
    588 	if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags_const(vma)))
    589 		return true;
    590 
    591 	if (vma->node.size < size)
    592 		return true;
    593 
    594 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
    595 	if (alignment && !IS_ALIGNED(vma->node.start, alignment))
    596 		return true;
    597 
    598 	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
    599 		return true;
    600 
    601 	if (flags & PIN_OFFSET_BIAS &&
    602 	    vma->node.start < (flags & PIN_OFFSET_MASK))
    603 		return true;
    604 
    605 	if (flags & PIN_OFFSET_FIXED &&
    606 	    vma->node.start != (flags & PIN_OFFSET_MASK))
    607 		return true;
    608 
    609 	return false;
    610 }
    611 
    612 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
    613 {
    614 	bool mappable, fenceable;
    615 
    616 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
    617 	GEM_BUG_ON(!vma->fence_size);
    618 
    619 	fenceable = (vma->node.size >= vma->fence_size &&
    620 		     IS_ALIGNED(vma->node.start, vma->fence_alignment));
    621 
    622 	mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
    623 
    624 	if (mappable && fenceable)
    625 		set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
    626 	else
    627 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
    628 }
    629 
    630 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
    631 {
    632 	struct drm_mm_node *node = &vma->node;
    633 	struct drm_mm_node *other;
    634 
    635 	/*
    636 	 * On some machines we have to be careful when putting differing types
    637 	 * of snoopable memory together to avoid the prefetcher crossing memory
    638 	 * domains and dying. During vm initialisation, we decide whether or not
    639 	 * these constraints apply and set the drm_mm.color_adjust
    640 	 * appropriately.
    641 	 */
    642 	if (!i915_vm_has_cache_coloring(vma->vm))
    643 		return true;
    644 
    645 	/* Only valid to be called on an already inserted vma */
    646 	GEM_BUG_ON(!drm_mm_node_allocated(node));
    647 	GEM_BUG_ON(list_empty(&node->node_list));
    648 
    649 	other = list_prev_entry(node, node_list);
    650 	if (i915_node_color_differs(other, color) &&
    651 	    !drm_mm_hole_follows(other))
    652 		return false;
    653 
    654 	other = list_next_entry(node, node_list);
    655 	if (i915_node_color_differs(other, color) &&
    656 	    !drm_mm_hole_follows(node))
    657 		return false;
    658 
    659 	return true;
    660 }
    661 
    662 static void assert_bind_count(const struct drm_i915_gem_object *obj)
    663 {
    664 	/*
    665 	 * Combine the assertion that the object is bound and that we have
    666 	 * pinned its pages. But we should never have bound the object
    667 	 * more than we have pinned its pages. (For complete accuracy, we
    668 	 * assume that no else is pinning the pages, but as a rough assertion
    669 	 * that we will not run into problems later, this will do!)
    670 	 */
    671 	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
    672 }
    673 
    674 /**
    675  * i915_vma_insert - finds a slot for the vma in its address space
    676  * @vma: the vma
    677  * @size: requested size in bytes (can be larger than the VMA)
    678  * @alignment: required alignment
    679  * @flags: mask of PIN_* flags to use
    680  *
    681  * First we try to allocate some free space that meets the requirements for
    682  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
    683  * preferrably the oldest idle entry to make room for the new VMA.
    684  *
    685  * Returns:
    686  * 0 on success, negative error code otherwise.
    687  */
    688 static int
    689 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
    690 {
    691 	unsigned long color;
    692 	u64 start, end;
    693 	int ret;
    694 
    695 	GEM_BUG_ON(i915_vma_is_closed(vma));
    696 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
    697 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
    698 
    699 	size = max(size, vma->size);
    700 	alignment = max(alignment, vma->display_alignment);
    701 	if (flags & PIN_MAPPABLE) {
    702 		size = max_t(typeof(size), size, vma->fence_size);
    703 		alignment = max_t(typeof(alignment),
    704 				  alignment, vma->fence_alignment);
    705 	}
    706 
    707 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
    708 	GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
    709 	GEM_BUG_ON(!is_power_of_2(alignment));
    710 
    711 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
    712 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
    713 
    714 	end = vma->vm->total;
    715 	if (flags & PIN_MAPPABLE)
    716 		end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
    717 	if (flags & PIN_ZONE_4G)
    718 		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
    719 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
    720 
    721 	/* If binding the object/GGTT view requires more space than the entire
    722 	 * aperture has, reject it early before evicting everything in a vain
    723 	 * attempt to find space.
    724 	 */
    725 	if (size > end) {
    726 		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%"PRIu64" > %s aperture=%"PRIu64"\n",
    727 			  size, flags & PIN_MAPPABLE ? "mappable" : "total",
    728 			  end);
    729 		return -ENOSPC;
    730 	}
    731 
    732 	color = 0;
    733 	if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
    734 		color = vma->obj->cache_level;
    735 
    736 	if (flags & PIN_OFFSET_FIXED) {
    737 		u64 offset = flags & PIN_OFFSET_MASK;
    738 		if (!IS_ALIGNED(offset, alignment) ||
    739 		    range_overflows(offset, size, end))
    740 			return -EINVAL;
    741 
    742 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
    743 					   size, offset, color,
    744 					   flags);
    745 		if (ret)
    746 			return ret;
    747 	} else {
    748 		/*
    749 		 * We only support huge gtt pages through the 48b PPGTT,
    750 		 * however we also don't want to force any alignment for
    751 		 * objects which need to be tightly packed into the low 32bits.
    752 		 *
    753 		 * Note that we assume that GGTT are limited to 4GiB for the
    754 		 * forseeable future. See also i915_ggtt_offset().
    755 		 */
    756 		if (upper_32_bits(end - 1) &&
    757 		    vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
    758 			/*
    759 			 * We can't mix 64K and 4K PTEs in the same page-table
    760 			 * (2M block), and so to avoid the ugliness and
    761 			 * complexity of coloring we opt for just aligning 64K
    762 			 * objects to 2M.
    763 			 */
    764 			u64 page_alignment =
    765 				rounddown_pow_of_two(vma->page_sizes.sg |
    766 						     I915_GTT_PAGE_SIZE_2M);
    767 
    768 			/*
    769 			 * Check we don't expand for the limited Global GTT
    770 			 * (mappable aperture is even more precious!). This
    771 			 * also checks that we exclude the aliasing-ppgtt.
    772 			 */
    773 			GEM_BUG_ON(i915_vma_is_ggtt(vma));
    774 
    775 			alignment = max(alignment, page_alignment);
    776 
    777 			if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
    778 				size = round_up(size, I915_GTT_PAGE_SIZE_2M);
    779 		}
    780 
    781 		ret = i915_gem_gtt_insert(vma->vm, &vma->node,
    782 					  size, alignment, color,
    783 					  start, end, flags);
    784 		if (ret)
    785 			return ret;
    786 
    787 		GEM_BUG_ON(vma->node.start < start);
    788 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
    789 	}
    790 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
    791 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
    792 
    793 	if (vma->obj) {
    794 		struct drm_i915_gem_object *obj = vma->obj;
    795 
    796 		atomic_inc(&obj->bind_count);
    797 		assert_bind_count(obj);
    798 	}
    799 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
    800 
    801 	return 0;
    802 }
    803 
    804 static void
    805 i915_vma_detach(struct i915_vma *vma)
    806 {
    807 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
    808 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
    809 
    810 	/*
    811 	 * And finally now the object is completely decoupled from this
    812 	 * vma, we can drop its hold on the backing storage and allow
    813 	 * it to be reaped by the shrinker.
    814 	 */
    815 	list_del(&vma->vm_link);
    816 	if (vma->obj) {
    817 		struct drm_i915_gem_object *obj = vma->obj;
    818 
    819 		assert_bind_count(obj);
    820 		atomic_dec(&obj->bind_count);
    821 	}
    822 }
    823 
    824 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
    825 {
    826 	unsigned int bound;
    827 	bool pinned = true;
    828 
    829 	bound = atomic_read(&vma->flags);
    830 	do {
    831 		if (unlikely(flags & ~bound))
    832 			return false;
    833 
    834 		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
    835 			return false;
    836 
    837 		if (!(bound & I915_VMA_PIN_MASK))
    838 			goto unpinned;
    839 
    840 		GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
    841 	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
    842 
    843 	return true;
    844 
    845 unpinned:
    846 	/*
    847 	 * If pin_count==0, but we are bound, check under the lock to avoid
    848 	 * racing with a concurrent i915_vma_unbind().
    849 	 */
    850 	mutex_lock(&vma->vm->mutex);
    851 	do {
    852 		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
    853 			pinned = false;
    854 			break;
    855 		}
    856 
    857 		if (unlikely(flags & ~bound)) {
    858 			pinned = false;
    859 			break;
    860 		}
    861 	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
    862 	mutex_unlock(&vma->vm->mutex);
    863 
    864 	return pinned;
    865 }
    866 
    867 static int vma_get_pages(struct i915_vma *vma)
    868 {
    869 	int err = 0;
    870 
    871 	if (atomic_add_unless(&vma->pages_count, 1, 0))
    872 		return 0;
    873 
    874 	/* Allocations ahoy! */
    875 	if (mutex_lock_interruptible(&vma->pages_mutex))
    876 		return -EINTR;
    877 
    878 	if (!atomic_read(&vma->pages_count)) {
    879 		if (vma->obj) {
    880 			err = i915_gem_object_pin_pages(vma->obj);
    881 			if (err)
    882 				goto unlock;
    883 		}
    884 
    885 		err = vma->ops->set_pages(vma);
    886 		if (err) {
    887 			if (vma->obj)
    888 				i915_gem_object_unpin_pages(vma->obj);
    889 			goto unlock;
    890 		}
    891 	}
    892 	atomic_inc(&vma->pages_count);
    893 
    894 unlock:
    895 	mutex_unlock(&vma->pages_mutex);
    896 
    897 	return err;
    898 }
    899 
    900 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
    901 {
    902 	/* We allocate under vma_get_pages, so beware the shrinker */
    903 	mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
    904 	GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
    905 	if (atomic_sub_return(count, &vma->pages_count) == 0) {
    906 		vma->ops->clear_pages(vma);
    907 		GEM_BUG_ON(vma->pages);
    908 		if (vma->obj)
    909 			i915_gem_object_unpin_pages(vma->obj);
    910 	}
    911 	mutex_unlock(&vma->pages_mutex);
    912 }
    913 
    914 static void vma_put_pages(struct i915_vma *vma)
    915 {
    916 	if (atomic_add_unless(&vma->pages_count, -1, 1))
    917 		return;
    918 
    919 	__vma_put_pages(vma, 1);
    920 }
    921 
    922 static void vma_unbind_pages(struct i915_vma *vma)
    923 {
    924 	unsigned int count;
    925 
    926 	lockdep_assert_held(&vma->vm->mutex);
    927 
    928 	/* The upper portion of pages_count is the number of bindings */
    929 	count = atomic_read(&vma->pages_count);
    930 	count >>= I915_VMA_PAGES_BIAS;
    931 	GEM_BUG_ON(!count);
    932 
    933 	__vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
    934 }
    935 
    936 int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
    937 {
    938 	struct i915_vma_work *work = NULL;
    939 	intel_wakeref_t wakeref = 0;
    940 	unsigned int bound;
    941 	int err;
    942 
    943 	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
    944 	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
    945 
    946 	GEM_BUG_ON(flags & PIN_UPDATE);
    947 	GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
    948 
    949 	/* First try and grab the pin without rebinding the vma */
    950 	if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
    951 		return 0;
    952 
    953 	err = vma_get_pages(vma);
    954 	if (err)
    955 		return err;
    956 
    957 	if (flags & vma->vm->bind_async_flags) {
    958 		work = i915_vma_work();
    959 		if (!work) {
    960 			err = -ENOMEM;
    961 			goto err_pages;
    962 		}
    963 	}
    964 
    965 	if (flags & PIN_GLOBAL)
    966 		wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
    967 
    968 	/* No more allocations allowed once we hold vm->mutex */
    969 	err = mutex_lock_interruptible(&vma->vm->mutex);
    970 	if (err)
    971 		goto err_fence;
    972 
    973 	bound = atomic_read(&vma->flags);
    974 	if (unlikely(bound & I915_VMA_ERROR)) {
    975 		err = -ENOMEM;
    976 		goto err_unlock;
    977 	}
    978 
    979 	if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
    980 		err = -EAGAIN; /* pins are meant to be fairly temporary */
    981 		goto err_unlock;
    982 	}
    983 
    984 	if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
    985 		__i915_vma_pin(vma);
    986 		goto err_unlock;
    987 	}
    988 
    989 	err = i915_active_acquire(&vma->active);
    990 	if (err)
    991 		goto err_unlock;
    992 
    993 	if (!(bound & I915_VMA_BIND_MASK)) {
    994 		err = i915_vma_insert(vma, size, alignment, flags);
    995 		if (err)
    996 			goto err_active;
    997 
    998 		if (i915_is_ggtt(vma->vm))
    999 			__i915_vma_set_map_and_fenceable(vma);
   1000 	}
   1001 
   1002 	GEM_BUG_ON(!vma->pages);
   1003 	err = i915_vma_bind(vma,
   1004 			    vma->obj ? vma->obj->cache_level : 0,
   1005 			    flags, work);
   1006 	if (err)
   1007 		goto err_remove;
   1008 
   1009 	/* There should only be at most 2 active bindings (user, global) */
   1010 	GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
   1011 	atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
   1012 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
   1013 
   1014 	__i915_vma_pin(vma);
   1015 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
   1016 	GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
   1017 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
   1018 
   1019 err_remove:
   1020 	if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
   1021 		i915_vma_detach(vma);
   1022 		drm_mm_remove_node(&vma->node);
   1023 	}
   1024 err_active:
   1025 	i915_active_release(&vma->active);
   1026 err_unlock:
   1027 	mutex_unlock(&vma->vm->mutex);
   1028 err_fence:
   1029 	if (work)
   1030 		dma_fence_work_commit(&work->base);
   1031 	if (wakeref)
   1032 		intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
   1033 err_pages:
   1034 	vma_put_pages(vma);
   1035 	return err;
   1036 }
   1037 
   1038 static void flush_idle_contexts(struct intel_gt *gt)
   1039 {
   1040 	struct intel_engine_cs *engine;
   1041 	enum intel_engine_id id;
   1042 
   1043 	for_each_engine(engine, gt, id)
   1044 		intel_engine_flush_barriers(engine);
   1045 
   1046 	intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
   1047 }
   1048 
   1049 int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
   1050 {
   1051 	struct i915_address_space *vm = vma->vm;
   1052 	int err;
   1053 
   1054 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
   1055 
   1056 	do {
   1057 		err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
   1058 		if (err != -ENOSPC)
   1059 			return err;
   1060 
   1061 		/* Unlike i915_vma_pin, we don't take no for an answer! */
   1062 		flush_idle_contexts(vm->gt);
   1063 		if (mutex_lock_interruptible(&vm->mutex) == 0) {
   1064 			i915_gem_evict_vm(vm);
   1065 			mutex_unlock(&vm->mutex);
   1066 		}
   1067 	} while (1);
   1068 }
   1069 
   1070 void i915_vma_close(struct i915_vma *vma)
   1071 {
   1072 	struct intel_gt *gt = vma->vm->gt;
   1073 	unsigned long flags;
   1074 
   1075 	GEM_BUG_ON(i915_vma_is_closed(vma));
   1076 
   1077 	/*
   1078 	 * We defer actually closing, unbinding and destroying the VMA until
   1079 	 * the next idle point, or if the object is freed in the meantime. By
   1080 	 * postponing the unbind, we allow for it to be resurrected by the
   1081 	 * client, avoiding the work required to rebind the VMA. This is
   1082 	 * advantageous for DRI, where the client/server pass objects
   1083 	 * between themselves, temporarily opening a local VMA to the
   1084 	 * object, and then closing it again. The same object is then reused
   1085 	 * on the next frame (or two, depending on the depth of the swap queue)
   1086 	 * causing us to rebind the VMA once more. This ends up being a lot
   1087 	 * of wasted work for the steady state.
   1088 	 */
   1089 	spin_lock_irqsave(&gt->closed_lock, flags);
   1090 	list_add(&vma->closed_link, &gt->closed_vma);
   1091 	spin_unlock_irqrestore(&gt->closed_lock, flags);
   1092 }
   1093 
   1094 static void __i915_vma_remove_closed(struct i915_vma *vma)
   1095 {
   1096 	struct intel_gt *gt = vma->vm->gt;
   1097 
   1098 	spin_lock_irq(&gt->closed_lock);
   1099 	list_del_init(&vma->closed_link);
   1100 	spin_unlock_irq(&gt->closed_lock);
   1101 }
   1102 
   1103 void i915_vma_reopen(struct i915_vma *vma)
   1104 {
   1105 	if (i915_vma_is_closed(vma))
   1106 		__i915_vma_remove_closed(vma);
   1107 }
   1108 
   1109 void i915_vma_release(struct kref *ref)
   1110 {
   1111 	struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
   1112 
   1113 	if (drm_mm_node_allocated(&vma->node)) {
   1114 		mutex_lock(&vma->vm->mutex);
   1115 		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
   1116 		WARN_ON(__i915_vma_unbind(vma));
   1117 		mutex_unlock(&vma->vm->mutex);
   1118 		GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
   1119 	}
   1120 	GEM_BUG_ON(i915_vma_is_active(vma));
   1121 
   1122 	if (vma->obj) {
   1123 		struct drm_i915_gem_object *obj = vma->obj;
   1124 
   1125 		spin_lock(&obj->vma.lock);
   1126 		list_del(&vma->obj_link);
   1127 		rb_erase(&vma->obj_node, &obj->vma.tree);
   1128 		spin_unlock(&obj->vma.lock);
   1129 	}
   1130 
   1131 	__i915_vma_remove_closed(vma);
   1132 	i915_vm_put(vma->vm);
   1133 
   1134 	i915_active_fini(&vma->active);
   1135 	i915_vma_free(vma);
   1136 }
   1137 
   1138 void i915_vma_parked(struct intel_gt *gt)
   1139 {
   1140 	struct i915_vma *vma, *next;
   1141 
   1142 	spin_lock_irq(&gt->closed_lock);
   1143 	list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
   1144 		struct drm_i915_gem_object *obj = vma->obj;
   1145 		struct i915_address_space *vm = vma->vm;
   1146 
   1147 		/* XXX All to avoid keeping a reference on i915_vma itself */
   1148 
   1149 		if (!kref_get_unless_zero(&obj->base.refcount))
   1150 			continue;
   1151 
   1152 		if (i915_vm_tryopen(vm)) {
   1153 			list_del_init(&vma->closed_link);
   1154 		} else {
   1155 			i915_gem_object_put(obj);
   1156 			obj = NULL;
   1157 		}
   1158 
   1159 		spin_unlock_irq(&gt->closed_lock);
   1160 
   1161 		if (obj) {
   1162 			__i915_vma_put(vma);
   1163 			i915_gem_object_put(obj);
   1164 		}
   1165 
   1166 		i915_vm_close(vm);
   1167 
   1168 		/* Restart after dropping lock */
   1169 		spin_lock_irq(&gt->closed_lock);
   1170 		next = list_first_entry(&gt->closed_vma,
   1171 					typeof(*next), closed_link);
   1172 	}
   1173 	spin_unlock_irq(&gt->closed_lock);
   1174 }
   1175 
   1176 static void __i915_vma_iounmap(struct i915_vma *vma)
   1177 {
   1178 	GEM_BUG_ON(i915_vma_is_pinned(vma));
   1179 
   1180 	if (vma->iomap == NULL)
   1181 		return;
   1182 
   1183 #ifdef __NetBSD__
   1184 	io_mapping_unmap(&i915_vm_to_ggtt(vma->vm)->iomap, vma->iomap);
   1185 #else
   1186 	io_mapping_unmap(vma->iomap);
   1187 #endif
   1188 	vma->iomap = NULL;
   1189 }
   1190 
   1191 void i915_vma_revoke_mmap(struct i915_vma *vma)
   1192 {
   1193 	struct drm_vma_offset_node *node;
   1194 	u64 vma_offset;
   1195 
   1196 	if (!i915_vma_has_userfault(vma))
   1197 		return;
   1198 
   1199 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
   1200 	GEM_BUG_ON(!vma->obj->userfault_count);
   1201 
   1202 #ifdef __NetBSD__
   1203 	__USE(vma_offset);
   1204 	__USE(node);
   1205 	struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
   1206 	paddr_t pa = i915->ggtt.gmadr.start + vma->node.start;
   1207 	vsize_t npgs = vma->size >> PAGE_SHIFT;
   1208 	while (npgs --> 0)
   1209 		pmap_pv_protect(pa = (npgs << PAGE_SHIFT), VM_PROT_NONE);
   1210 #else
   1211 	node = &vma->mmo->vma_node;
   1212 	vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
   1213 	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
   1214 			    drm_vma_node_offset_addr(node) + vma_offset,
   1215 			    vma->size,
   1216 			    1);
   1217 #endif
   1218 
   1219 	i915_vma_unset_userfault(vma);
   1220 	if (!--vma->obj->userfault_count)
   1221 		list_del(&vma->obj->userfault_link);
   1222 }
   1223 
   1224 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
   1225 {
   1226 	int err;
   1227 
   1228 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
   1229 
   1230 	/* Wait for the vma to be bound before we start! */
   1231 	err = i915_request_await_active(rq, &vma->active);
   1232 	if (err)
   1233 		return err;
   1234 
   1235 	return i915_active_add_request(&vma->active, rq);
   1236 }
   1237 
   1238 int i915_vma_move_to_active(struct i915_vma *vma,
   1239 			    struct i915_request *rq,
   1240 			    unsigned int flags)
   1241 {
   1242 	struct drm_i915_gem_object *obj = vma->obj;
   1243 	int err;
   1244 
   1245 	assert_object_held(obj);
   1246 
   1247 	err = __i915_vma_move_to_active(vma, rq);
   1248 	if (unlikely(err))
   1249 		return err;
   1250 
   1251 	if (flags & EXEC_OBJECT_WRITE) {
   1252 		struct intel_frontbuffer *front;
   1253 
   1254 		front = __intel_frontbuffer_get(obj);
   1255 		if (unlikely(front)) {
   1256 			if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
   1257 				i915_active_add_request(&front->write, rq);
   1258 			intel_frontbuffer_put(front);
   1259 		}
   1260 
   1261 		dma_resv_add_excl_fence(vma->resv, &rq->fence);
   1262 		obj->write_domain = I915_GEM_DOMAIN_RENDER;
   1263 		obj->read_domains = 0;
   1264 	} else {
   1265 		err = dma_resv_reserve_shared(vma->resv, 1);
   1266 		if (unlikely(err))
   1267 			return err;
   1268 
   1269 		dma_resv_add_shared_fence(vma->resv, &rq->fence);
   1270 		obj->write_domain = 0;
   1271 	}
   1272 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
   1273 	obj->mm.dirty = true;
   1274 
   1275 	GEM_BUG_ON(!i915_vma_is_active(vma));
   1276 	return 0;
   1277 }
   1278 
   1279 int __i915_vma_unbind(struct i915_vma *vma)
   1280 {
   1281 	int ret;
   1282 
   1283 	lockdep_assert_held(&vma->vm->mutex);
   1284 
   1285 	/*
   1286 	 * First wait upon any activity as retiring the request may
   1287 	 * have side-effects such as unpinning or even unbinding this vma.
   1288 	 *
   1289 	 * XXX Actually waiting under the vm->mutex is a hinderance and
   1290 	 * should be pipelined wherever possible. In cases where that is
   1291 	 * unavoidable, we should lift the wait to before the mutex.
   1292 	 */
   1293 	ret = i915_vma_sync(vma);
   1294 	if (ret)
   1295 		return ret;
   1296 
   1297 	if (i915_vma_is_pinned(vma)) {
   1298 		vma_print_allocator(vma, "is pinned");
   1299 		return -EAGAIN;
   1300 	}
   1301 
   1302 	/*
   1303 	 * After confirming that no one else is pinning this vma, wait for
   1304 	 * any laggards who may have crept in during the wait (through
   1305 	 * a residual pin skipping the vm->mutex) to complete.
   1306 	 */
   1307 	ret = i915_vma_sync(vma);
   1308 	if (ret)
   1309 		return ret;
   1310 
   1311 	if (!drm_mm_node_allocated(&vma->node))
   1312 		return 0;
   1313 
   1314 	GEM_BUG_ON(i915_vma_is_pinned(vma));
   1315 	GEM_BUG_ON(i915_vma_is_active(vma));
   1316 
   1317 	if (i915_vma_is_map_and_fenceable(vma)) {
   1318 		/*
   1319 		 * Check that we have flushed all writes through the GGTT
   1320 		 * before the unbind, other due to non-strict nature of those
   1321 		 * indirect writes they may end up referencing the GGTT PTE
   1322 		 * after the unbind.
   1323 		 */
   1324 		i915_vma_flush_writes(vma);
   1325 		GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
   1326 
   1327 		/* release the fence reg _after_ flushing */
   1328 		ret = i915_vma_revoke_fence(vma);
   1329 		if (ret)
   1330 			return ret;
   1331 
   1332 		/* Force a pagefault for domain tracking on next user access */
   1333 		i915_vma_revoke_mmap(vma);
   1334 
   1335 		__i915_vma_iounmap(vma);
   1336 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
   1337 	}
   1338 	GEM_BUG_ON(vma->fence);
   1339 	GEM_BUG_ON(i915_vma_has_userfault(vma));
   1340 
   1341 	if (likely(atomic_read(&vma->vm->open))) {
   1342 		trace_i915_vma_unbind(vma);
   1343 		vma->ops->unbind_vma(vma);
   1344 	}
   1345 	atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
   1346 
   1347 	i915_vma_detach(vma);
   1348 	vma_unbind_pages(vma);
   1349 
   1350 	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
   1351 	return 0;
   1352 }
   1353 
   1354 int i915_vma_unbind(struct i915_vma *vma)
   1355 {
   1356 	struct i915_address_space *vm = vma->vm;
   1357 	intel_wakeref_t wakeref = 0;
   1358 	int err;
   1359 
   1360 	if (!drm_mm_node_allocated(&vma->node))
   1361 		return 0;
   1362 
   1363 	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
   1364 		/* XXX not always required: nop_clear_range */
   1365 		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
   1366 
   1367 	err = mutex_lock_interruptible(&vm->mutex);
   1368 	if (err)
   1369 		return err;
   1370 
   1371 	err = __i915_vma_unbind(vma);
   1372 	mutex_unlock(&vm->mutex);
   1373 
   1374 	if (wakeref)
   1375 		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
   1376 
   1377 	return err;
   1378 }
   1379 
   1380 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
   1381 {
   1382 	i915_gem_object_make_unshrinkable(vma->obj);
   1383 	return vma;
   1384 }
   1385 
   1386 void i915_vma_make_shrinkable(struct i915_vma *vma)
   1387 {
   1388 	i915_gem_object_make_shrinkable(vma->obj);
   1389 }
   1390 
   1391 void i915_vma_make_purgeable(struct i915_vma *vma)
   1392 {
   1393 	i915_gem_object_make_purgeable(vma->obj);
   1394 }
   1395 
   1396 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
   1397 #include "selftests/i915_vma.c"
   1398 #endif
   1399 
   1400 static void i915_global_vma_shrink(void)
   1401 {
   1402 	kmem_cache_shrink(global.slab_vmas);
   1403 }
   1404 
   1405 static void i915_global_vma_exit(void)
   1406 {
   1407 	kmem_cache_destroy(global.slab_vmas);
   1408 }
   1409 
   1410 static struct i915_global_vma global = { {
   1411 	.shrink = i915_global_vma_shrink,
   1412 	.exit = i915_global_vma_exit,
   1413 } };
   1414 
   1415 int __init i915_global_vma_init(void)
   1416 {
   1417 	global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
   1418 	if (!global.slab_vmas)
   1419 		return -ENOMEM;
   1420 
   1421 	i915_global_register(&global.base);
   1422 	return 0;
   1423 }
   1424