Home | History | Annotate | Line # | Download | only in i915
      1  1.12  riastrad /*	$NetBSD: i915_vma.c,v 1.12 2021/12/19 12:27:49 riastradh Exp $	*/
      2   1.1  riastrad 
      3   1.1  riastrad /*
      4   1.1  riastrad  * Copyright  2016 Intel Corporation
      5   1.1  riastrad  *
      6   1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
      7   1.1  riastrad  * copy of this software and associated documentation files (the "Software"),
      8   1.1  riastrad  * to deal in the Software without restriction, including without limitation
      9   1.1  riastrad  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10   1.1  riastrad  * and/or sell copies of the Software, and to permit persons to whom the
     11   1.1  riastrad  * Software is furnished to do so, subject to the following conditions:
     12   1.1  riastrad  *
     13   1.1  riastrad  * The above copyright notice and this permission notice (including the next
     14   1.1  riastrad  * paragraph) shall be included in all copies or substantial portions of the
     15   1.1  riastrad  * Software.
     16   1.1  riastrad  *
     17   1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18   1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19   1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20   1.1  riastrad  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21   1.1  riastrad  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22   1.1  riastrad  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23   1.1  riastrad  * IN THE SOFTWARE.
     24   1.1  riastrad  *
     25   1.1  riastrad  */
     26   1.1  riastrad 
     27   1.1  riastrad #include <sys/cdefs.h>
     28  1.12  riastrad __KERNEL_RCSID(0, "$NetBSD: i915_vma.c,v 1.12 2021/12/19 12:27:49 riastradh Exp $");
     29   1.1  riastrad 
     30   1.1  riastrad #include <linux/sched/mm.h>
     31   1.1  riastrad #include <drm/drm_gem.h>
     32   1.1  riastrad 
     33   1.1  riastrad #include "display/intel_frontbuffer.h"
     34   1.1  riastrad 
     35   1.1  riastrad #include "gt/intel_engine.h"
     36   1.1  riastrad #include "gt/intel_engine_heartbeat.h"
     37   1.1  riastrad #include "gt/intel_gt.h"
     38   1.1  riastrad #include "gt/intel_gt_requests.h"
     39   1.1  riastrad 
     40   1.1  riastrad #include "i915_drv.h"
     41   1.1  riastrad #include "i915_globals.h"
     42   1.1  riastrad #include "i915_sw_fence_work.h"
     43   1.1  riastrad #include "i915_trace.h"
     44   1.1  riastrad #include "i915_vma.h"
     45   1.1  riastrad 
     46   1.6  riastrad #include <linux/nbsd-namespace.h>
     47   1.6  riastrad 
     48   1.1  riastrad static struct i915_global_vma {
     49   1.1  riastrad 	struct i915_global base;
     50   1.1  riastrad 	struct kmem_cache *slab_vmas;
     51   1.1  riastrad } global;
     52   1.1  riastrad 
     53   1.1  riastrad struct i915_vma *i915_vma_alloc(void)
     54   1.1  riastrad {
     55   1.1  riastrad 	return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
     56   1.1  riastrad }
     57   1.1  riastrad 
     58   1.1  riastrad void i915_vma_free(struct i915_vma *vma)
     59   1.1  riastrad {
     60   1.9  riastrad 	mutex_destroy(&vma->pages_mutex);
     61   1.1  riastrad 	return kmem_cache_free(global.slab_vmas, vma);
     62   1.1  riastrad }
     63   1.1  riastrad 
     64   1.1  riastrad #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
     65   1.1  riastrad 
     66   1.1  riastrad #include <linux/stackdepot.h>
     67   1.1  riastrad 
     68   1.1  riastrad static void vma_print_allocator(struct i915_vma *vma, const char *reason)
     69   1.1  riastrad {
     70   1.1  riastrad 	unsigned long *entries;
     71   1.1  riastrad 	unsigned int nr_entries;
     72   1.1  riastrad 	char buf[512];
     73   1.1  riastrad 
     74   1.1  riastrad 	if (!vma->node.stack) {
     75   1.1  riastrad 		DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
     76   1.1  riastrad 				 vma->node.start, vma->node.size, reason);
     77   1.1  riastrad 		return;
     78   1.1  riastrad 	}
     79   1.1  riastrad 
     80   1.1  riastrad 	nr_entries = stack_depot_fetch(vma->node.stack, &entries);
     81   1.1  riastrad 	stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
     82   1.1  riastrad 	DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
     83   1.1  riastrad 			 vma->node.start, vma->node.size, reason, buf);
     84   1.1  riastrad }
     85   1.1  riastrad 
     86   1.1  riastrad #else
     87   1.1  riastrad 
     88   1.1  riastrad static void vma_print_allocator(struct i915_vma *vma, const char *reason)
     89   1.1  riastrad {
     90   1.1  riastrad }
     91   1.1  riastrad 
     92   1.1  riastrad #endif
     93   1.1  riastrad 
     94   1.1  riastrad static inline struct i915_vma *active_to_vma(struct i915_active *ref)
     95   1.1  riastrad {
     96   1.1  riastrad 	return container_of(ref, typeof(struct i915_vma), active);
     97   1.1  riastrad }
     98   1.1  riastrad 
     99   1.1  riastrad static int __i915_vma_active(struct i915_active *ref)
    100   1.1  riastrad {
    101   1.1  riastrad 	return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
    102   1.1  riastrad }
    103   1.1  riastrad 
    104   1.1  riastrad __i915_active_call
    105   1.1  riastrad static void __i915_vma_retire(struct i915_active *ref)
    106   1.1  riastrad {
    107   1.1  riastrad 	i915_vma_put(active_to_vma(ref));
    108   1.1  riastrad }
    109   1.1  riastrad 
    110   1.5  riastrad #ifdef __NetBSD__
    111   1.5  riastrad struct i915_vma_key {
    112   1.5  riastrad 	struct i915_address_space *vm;
    113   1.5  riastrad 	const struct i915_ggtt_view *view;
    114   1.5  riastrad };
    115   1.5  riastrad 
    116   1.5  riastrad static int
    117   1.5  riastrad compare_vma(void *cookie, const void *va, const void *vb)
    118   1.5  riastrad {
    119   1.5  riastrad 	const struct i915_vma *a = va;
    120   1.5  riastrad 	const struct i915_vma *b = vb;
    121  1.10  riastrad 	long cmp = i915_vma_compare(__UNCONST(a), b->vm,
    122  1.10  riastrad 	    b->ggtt_view.type == I915_GGTT_VIEW_NORMAL ? NULL : &b->ggtt_view);
    123   1.5  riastrad 
    124   1.5  riastrad 	return (cmp < 0 ? -1 : cmp > 0 ? +1 : 0);
    125   1.5  riastrad }
    126   1.5  riastrad 
    127   1.5  riastrad static int
    128   1.5  riastrad compare_vma_key(void *cookie, const void *vn, const void *vk)
    129   1.5  riastrad {
    130   1.5  riastrad 	const struct i915_vma *vma = vn;
    131   1.5  riastrad 	const struct i915_vma_key *key = vk;
    132   1.5  riastrad 	long cmp = i915_vma_compare(__UNCONST(vma), key->vm, key->view);
    133   1.5  riastrad 
    134   1.5  riastrad 	return (cmp < 0 ? -1 : cmp > 0 ? +1 : 0);
    135   1.5  riastrad }
    136   1.5  riastrad 
    137   1.5  riastrad static const rb_tree_ops_t vma_tree_rb_ops = {
    138   1.5  riastrad 	.rbto_compare_nodes = compare_vma,
    139   1.5  riastrad 	.rbto_compare_key = compare_vma_key,
    140   1.5  riastrad 	.rbto_node_offset = offsetof(struct i915_vma, obj_node),
    141   1.5  riastrad };
    142   1.5  riastrad #endif
    143   1.5  riastrad 
    144   1.5  riastrad void
    145   1.5  riastrad i915_vma_tree_init(struct drm_i915_gem_object *obj)
    146   1.5  riastrad {
    147   1.5  riastrad #ifdef __NetBSD__
    148   1.6  riastrad 	rb_tree_init(&obj->vma.tree.rbr_tree, &vma_tree_rb_ops);
    149   1.5  riastrad #else
    150   1.6  riastrad 	obj->vma.tree = RB_ROOT;
    151   1.5  riastrad #endif
    152   1.5  riastrad }
    153   1.5  riastrad 
    154   1.1  riastrad static struct i915_vma *
    155   1.1  riastrad vma_create(struct drm_i915_gem_object *obj,
    156   1.1  riastrad 	   struct i915_address_space *vm,
    157   1.1  riastrad 	   const struct i915_ggtt_view *view)
    158   1.1  riastrad {
    159   1.1  riastrad 	struct i915_vma *vma;
    160   1.1  riastrad 	struct rb_node *rb, **p;
    161   1.1  riastrad 
    162   1.1  riastrad 	/* The aliasing_ppgtt should never be used directly! */
    163   1.1  riastrad 	GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
    164   1.1  riastrad 
    165   1.1  riastrad 	vma = i915_vma_alloc();
    166   1.1  riastrad 	if (vma == NULL)
    167   1.1  riastrad 		return ERR_PTR(-ENOMEM);
    168   1.1  riastrad 
    169   1.1  riastrad 	kref_init(&vma->ref);
    170   1.1  riastrad 	mutex_init(&vma->pages_mutex);
    171   1.1  riastrad 	vma->vm = i915_vm_get(vm);
    172   1.1  riastrad 	vma->ops = &vm->vma_ops;
    173   1.1  riastrad 	vma->obj = obj;
    174   1.1  riastrad 	vma->resv = obj->base.resv;
    175   1.1  riastrad 	vma->size = obj->base.size;
    176   1.1  riastrad 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
    177   1.1  riastrad 
    178   1.1  riastrad 	i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
    179   1.1  riastrad 
    180   1.1  riastrad 	/* Declare ourselves safe for use inside shrinkers */
    181   1.1  riastrad 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
    182   1.1  riastrad 		fs_reclaim_acquire(GFP_KERNEL);
    183   1.1  riastrad 		might_lock(&vma->active.mutex);
    184   1.1  riastrad 		fs_reclaim_release(GFP_KERNEL);
    185   1.1  riastrad 	}
    186   1.1  riastrad 
    187   1.1  riastrad 	INIT_LIST_HEAD(&vma->closed_link);
    188   1.1  riastrad 
    189   1.1  riastrad 	if (view && view->type != I915_GGTT_VIEW_NORMAL) {
    190   1.1  riastrad 		vma->ggtt_view = *view;
    191   1.1  riastrad 		if (view->type == I915_GGTT_VIEW_PARTIAL) {
    192   1.1  riastrad 			GEM_BUG_ON(range_overflows_t(u64,
    193   1.1  riastrad 						     view->partial.offset,
    194   1.1  riastrad 						     view->partial.size,
    195   1.1  riastrad 						     obj->base.size >> PAGE_SHIFT));
    196   1.1  riastrad 			vma->size = view->partial.size;
    197   1.1  riastrad 			vma->size <<= PAGE_SHIFT;
    198   1.1  riastrad 			GEM_BUG_ON(vma->size > obj->base.size);
    199   1.1  riastrad 		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
    200   1.1  riastrad 			vma->size = intel_rotation_info_size(&view->rotated);
    201   1.1  riastrad 			vma->size <<= PAGE_SHIFT;
    202   1.1  riastrad 		} else if (view->type == I915_GGTT_VIEW_REMAPPED) {
    203   1.1  riastrad 			vma->size = intel_remapped_info_size(&view->remapped);
    204   1.1  riastrad 			vma->size <<= PAGE_SHIFT;
    205   1.1  riastrad 		}
    206   1.1  riastrad 	}
    207   1.1  riastrad 
    208   1.1  riastrad 	if (unlikely(vma->size > vm->total))
    209   1.1  riastrad 		goto err_vma;
    210   1.1  riastrad 
    211   1.1  riastrad 	GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
    212   1.1  riastrad 
    213   1.1  riastrad 	if (i915_is_ggtt(vm)) {
    214   1.1  riastrad 		if (unlikely(overflows_type(vma->size, u32)))
    215   1.1  riastrad 			goto err_vma;
    216   1.1  riastrad 
    217   1.1  riastrad 		vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
    218   1.1  riastrad 						      i915_gem_object_get_tiling(obj),
    219   1.1  riastrad 						      i915_gem_object_get_stride(obj));
    220   1.1  riastrad 		if (unlikely(vma->fence_size < vma->size || /* overflow */
    221   1.1  riastrad 			     vma->fence_size > vm->total))
    222   1.1  riastrad 			goto err_vma;
    223   1.1  riastrad 
    224   1.1  riastrad 		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
    225   1.1  riastrad 
    226   1.1  riastrad 		vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
    227   1.1  riastrad 								i915_gem_object_get_tiling(obj),
    228   1.1  riastrad 								i915_gem_object_get_stride(obj));
    229   1.1  riastrad 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
    230   1.1  riastrad 
    231   1.1  riastrad 		__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
    232   1.1  riastrad 	}
    233   1.1  riastrad 
    234   1.8  riastrad 	spin_lock(&obj->vma.lock);
    235   1.8  riastrad 
    236   1.5  riastrad #ifdef __NetBSD__
    237   1.5  riastrad 	__USE(rb);
    238   1.5  riastrad 	__USE(p);
    239   1.5  riastrad 	struct i915_vma *collision __diagused;
    240   1.6  riastrad 	collision = rb_tree_insert_node(&obj->vma.tree.rbr_tree, vma);
    241   1.5  riastrad 	KASSERT(collision == vma);
    242   1.5  riastrad #else
    243   1.1  riastrad 	rb = NULL;
    244   1.1  riastrad 	p = &obj->vma.tree.rb_node;
    245   1.1  riastrad 	while (*p) {
    246   1.1  riastrad 		struct i915_vma *pos;
    247   1.1  riastrad 		long cmp;
    248   1.1  riastrad 
    249   1.1  riastrad 		rb = *p;
    250   1.1  riastrad 		pos = rb_entry(rb, struct i915_vma, obj_node);
    251   1.1  riastrad 
    252   1.1  riastrad 		/*
    253   1.1  riastrad 		 * If the view already exists in the tree, another thread
    254   1.1  riastrad 		 * already created a matching vma, so return the older instance
    255   1.1  riastrad 		 * and dispose of ours.
    256   1.1  riastrad 		 */
    257   1.1  riastrad 		cmp = i915_vma_compare(pos, vm, view);
    258   1.1  riastrad 		if (cmp == 0) {
    259   1.1  riastrad 			spin_unlock(&obj->vma.lock);
    260   1.1  riastrad 			i915_vma_free(vma);
    261   1.1  riastrad 			return pos;
    262   1.1  riastrad 		}
    263   1.1  riastrad 
    264   1.1  riastrad 		if (cmp < 0)
    265   1.1  riastrad 			p = &rb->rb_right;
    266   1.1  riastrad 		else
    267   1.1  riastrad 			p = &rb->rb_left;
    268   1.1  riastrad 	}
    269   1.1  riastrad 	rb_link_node(&vma->obj_node, rb, p);
    270   1.1  riastrad 	rb_insert_color(&vma->obj_node, &obj->vma.tree);
    271   1.5  riastrad #endif
    272   1.1  riastrad 
    273   1.1  riastrad 	if (i915_vma_is_ggtt(vma))
    274   1.1  riastrad 		/*
    275   1.1  riastrad 		 * We put the GGTT vma at the start of the vma-list, followed
    276   1.1  riastrad 		 * by the ppGGTT vma. This allows us to break early when
    277   1.1  riastrad 		 * iterating over only the GGTT vma for an object, see
    278   1.1  riastrad 		 * for_each_ggtt_vma()
    279   1.1  riastrad 		 */
    280   1.1  riastrad 		list_add(&vma->obj_link, &obj->vma.list);
    281   1.1  riastrad 	else
    282   1.1  riastrad 		list_add_tail(&vma->obj_link, &obj->vma.list);
    283   1.1  riastrad 
    284   1.1  riastrad 	spin_unlock(&obj->vma.lock);
    285   1.1  riastrad 
    286   1.1  riastrad 	return vma;
    287   1.1  riastrad 
    288   1.1  riastrad err_vma:
    289   1.1  riastrad 	i915_vma_free(vma);
    290   1.1  riastrad 	return ERR_PTR(-E2BIG);
    291   1.1  riastrad }
    292   1.1  riastrad 
    293   1.1  riastrad static struct i915_vma *
    294   1.1  riastrad vma_lookup(struct drm_i915_gem_object *obj,
    295   1.1  riastrad 	   struct i915_address_space *vm,
    296   1.1  riastrad 	   const struct i915_ggtt_view *view)
    297   1.1  riastrad {
    298   1.5  riastrad #ifdef __NetBSD__
    299   1.5  riastrad 	const struct i915_vma_key key = { .vm = vm, .view = view };
    300   1.5  riastrad 
    301   1.6  riastrad 	return rb_tree_find_node(&obj->vma.tree.rbr_tree, &key);
    302   1.5  riastrad #else
    303   1.1  riastrad 	struct rb_node *rb;
    304   1.1  riastrad 
    305   1.1  riastrad 	rb = obj->vma.tree.rb_node;
    306   1.1  riastrad 	while (rb) {
    307   1.1  riastrad 		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
    308   1.1  riastrad 		long cmp;
    309   1.1  riastrad 
    310   1.1  riastrad 		cmp = i915_vma_compare(vma, vm, view);
    311   1.1  riastrad 		if (cmp == 0)
    312   1.1  riastrad 			return vma;
    313   1.1  riastrad 
    314   1.1  riastrad 		if (cmp < 0)
    315   1.1  riastrad 			rb = rb->rb_right;
    316   1.1  riastrad 		else
    317   1.1  riastrad 			rb = rb->rb_left;
    318   1.1  riastrad 	}
    319   1.1  riastrad 
    320   1.1  riastrad 	return NULL;
    321   1.5  riastrad #endif
    322   1.1  riastrad }
    323   1.1  riastrad 
    324   1.1  riastrad /**
    325   1.1  riastrad  * i915_vma_instance - return the singleton instance of the VMA
    326   1.1  riastrad  * @obj: parent &struct drm_i915_gem_object to be mapped
    327   1.1  riastrad  * @vm: address space in which the mapping is located
    328   1.1  riastrad  * @view: additional mapping requirements
    329   1.1  riastrad  *
    330   1.1  riastrad  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
    331   1.1  riastrad  * the same @view characteristics. If a match is not found, one is created.
    332   1.1  riastrad  * Once created, the VMA is kept until either the object is freed, or the
    333   1.1  riastrad  * address space is closed.
    334   1.1  riastrad  *
    335   1.1  riastrad  * Returns the vma, or an error pointer.
    336   1.1  riastrad  */
    337   1.1  riastrad struct i915_vma *
    338   1.1  riastrad i915_vma_instance(struct drm_i915_gem_object *obj,
    339   1.1  riastrad 		  struct i915_address_space *vm,
    340   1.1  riastrad 		  const struct i915_ggtt_view *view)
    341   1.1  riastrad {
    342   1.1  riastrad 	struct i915_vma *vma;
    343   1.1  riastrad 
    344   1.1  riastrad 	GEM_BUG_ON(view && !i915_is_ggtt(vm));
    345   1.1  riastrad 	GEM_BUG_ON(!atomic_read(&vm->open));
    346   1.1  riastrad 
    347   1.1  riastrad 	spin_lock(&obj->vma.lock);
    348   1.1  riastrad 	vma = vma_lookup(obj, vm, view);
    349   1.1  riastrad 	spin_unlock(&obj->vma.lock);
    350   1.1  riastrad 
    351   1.1  riastrad 	/* vma_create() will resolve the race if another creates the vma */
    352   1.1  riastrad 	if (unlikely(!vma))
    353   1.1  riastrad 		vma = vma_create(obj, vm, view);
    354   1.1  riastrad 
    355   1.1  riastrad 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
    356   1.1  riastrad 	return vma;
    357   1.1  riastrad }
    358   1.1  riastrad 
    359   1.1  riastrad struct i915_vma_work {
    360   1.1  riastrad 	struct dma_fence_work base;
    361   1.1  riastrad 	struct i915_vma *vma;
    362   1.1  riastrad 	struct drm_i915_gem_object *pinned;
    363   1.1  riastrad 	enum i915_cache_level cache_level;
    364   1.1  riastrad 	unsigned int flags;
    365   1.1  riastrad };
    366   1.1  riastrad 
    367   1.1  riastrad static int __vma_bind(struct dma_fence_work *work)
    368   1.1  riastrad {
    369   1.1  riastrad 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
    370   1.1  riastrad 	struct i915_vma *vma = vw->vma;
    371   1.1  riastrad 	int err;
    372   1.1  riastrad 
    373   1.1  riastrad 	err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
    374   1.1  riastrad 	if (err)
    375   1.1  riastrad 		atomic_or(I915_VMA_ERROR, &vma->flags);
    376   1.1  riastrad 
    377   1.1  riastrad 	return err;
    378   1.1  riastrad }
    379   1.1  riastrad 
    380   1.1  riastrad static void __vma_release(struct dma_fence_work *work)
    381   1.1  riastrad {
    382   1.1  riastrad 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
    383   1.1  riastrad 
    384   1.1  riastrad 	if (vw->pinned)
    385   1.1  riastrad 		__i915_gem_object_unpin_pages(vw->pinned);
    386   1.1  riastrad }
    387   1.1  riastrad 
    388   1.1  riastrad static const struct dma_fence_work_ops bind_ops = {
    389   1.1  riastrad 	.name = "bind",
    390   1.1  riastrad 	.work = __vma_bind,
    391   1.1  riastrad 	.release = __vma_release,
    392   1.1  riastrad };
    393   1.1  riastrad 
    394   1.1  riastrad struct i915_vma_work *i915_vma_work(void)
    395   1.1  riastrad {
    396   1.1  riastrad 	struct i915_vma_work *vw;
    397   1.1  riastrad 
    398   1.1  riastrad 	vw = kzalloc(sizeof(*vw), GFP_KERNEL);
    399   1.1  riastrad 	if (!vw)
    400   1.1  riastrad 		return NULL;
    401   1.1  riastrad 
    402   1.1  riastrad 	dma_fence_work_init(&vw->base, &bind_ops);
    403   1.1  riastrad 	vw->base.dma.error = -EAGAIN; /* disable the worker by default */
    404   1.1  riastrad 
    405   1.1  riastrad 	return vw;
    406   1.1  riastrad }
    407   1.1  riastrad 
    408   1.1  riastrad /**
    409   1.1  riastrad  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
    410   1.1  riastrad  * @vma: VMA to map
    411   1.1  riastrad  * @cache_level: mapping cache level
    412   1.1  riastrad  * @flags: flags like global or local mapping
    413   1.1  riastrad  * @work: preallocated worker for allocating and binding the PTE
    414   1.1  riastrad  *
    415   1.1  riastrad  * DMA addresses are taken from the scatter-gather table of this object (or of
    416   1.1  riastrad  * this VMA in case of non-default GGTT views) and PTE entries set up.
    417   1.1  riastrad  * Note that DMA addresses are also the only part of the SG table we care about.
    418   1.1  riastrad  */
    419   1.1  riastrad int i915_vma_bind(struct i915_vma *vma,
    420   1.1  riastrad 		  enum i915_cache_level cache_level,
    421   1.1  riastrad 		  u32 flags,
    422   1.1  riastrad 		  struct i915_vma_work *work)
    423   1.1  riastrad {
    424   1.1  riastrad 	u32 bind_flags;
    425   1.1  riastrad 	u32 vma_flags;
    426   1.1  riastrad 	int ret;
    427   1.1  riastrad 
    428   1.1  riastrad 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
    429   1.1  riastrad 	GEM_BUG_ON(vma->size > vma->node.size);
    430   1.1  riastrad 
    431   1.1  riastrad 	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
    432   1.1  riastrad 					      vma->node.size,
    433   1.1  riastrad 					      vma->vm->total)))
    434   1.1  riastrad 		return -ENODEV;
    435   1.1  riastrad 
    436   1.1  riastrad 	if (GEM_DEBUG_WARN_ON(!flags))
    437   1.1  riastrad 		return -EINVAL;
    438   1.1  riastrad 
    439   1.1  riastrad 	bind_flags = flags;
    440   1.1  riastrad 	bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
    441   1.1  riastrad 
    442   1.1  riastrad 	vma_flags = atomic_read(&vma->flags);
    443   1.1  riastrad 	vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
    444   1.1  riastrad 	if (flags & PIN_UPDATE)
    445   1.1  riastrad 		bind_flags |= vma_flags;
    446   1.1  riastrad 	else
    447   1.1  riastrad 		bind_flags &= ~vma_flags;
    448   1.1  riastrad 	if (bind_flags == 0)
    449   1.1  riastrad 		return 0;
    450   1.1  riastrad 
    451   1.1  riastrad 	GEM_BUG_ON(!vma->pages);
    452   1.1  riastrad 
    453   1.1  riastrad 	trace_i915_vma_bind(vma, bind_flags);
    454   1.1  riastrad 	if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
    455   1.1  riastrad 		work->vma = vma;
    456   1.1  riastrad 		work->cache_level = cache_level;
    457   1.1  riastrad 		work->flags = bind_flags | I915_VMA_ALLOC;
    458   1.1  riastrad 
    459   1.1  riastrad 		/*
    460   1.1  riastrad 		 * Note we only want to chain up to the migration fence on
    461   1.1  riastrad 		 * the pages (not the object itself). As we don't track that,
    462   1.1  riastrad 		 * yet, we have to use the exclusive fence instead.
    463   1.1  riastrad 		 *
    464   1.1  riastrad 		 * Also note that we do not want to track the async vma as
    465   1.1  riastrad 		 * part of the obj->resv->excl_fence as it only affects
    466   1.1  riastrad 		 * execution and not content or object's backing store lifetime.
    467   1.1  riastrad 		 */
    468   1.1  riastrad 		GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
    469   1.1  riastrad 		i915_active_set_exclusive(&vma->active, &work->base.dma);
    470   1.1  riastrad 		work->base.dma.error = 0; /* enable the queue_work() */
    471   1.1  riastrad 
    472   1.1  riastrad 		if (vma->obj) {
    473   1.1  riastrad 			__i915_gem_object_pin_pages(vma->obj);
    474   1.1  riastrad 			work->pinned = vma->obj;
    475   1.1  riastrad 		}
    476   1.1  riastrad 	} else {
    477   1.1  riastrad 		GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
    478   1.1  riastrad 		ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
    479   1.1  riastrad 		if (ret)
    480   1.1  riastrad 			return ret;
    481   1.1  riastrad 	}
    482   1.1  riastrad 
    483   1.1  riastrad 	atomic_or(bind_flags, &vma->flags);
    484   1.1  riastrad 	return 0;
    485   1.1  riastrad }
    486   1.1  riastrad 
    487   1.3  riastrad #ifdef __NetBSD__
    488   1.3  riastrad #  define	__iomem		__i915_vma_iomem
    489   1.3  riastrad #endif
    490   1.3  riastrad 
    491   1.1  riastrad void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
    492   1.1  riastrad {
    493   1.1  riastrad 	void __iomem *ptr;
    494   1.1  riastrad 	int err;
    495   1.1  riastrad 
    496   1.1  riastrad 	if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
    497   1.1  riastrad 		err = -ENODEV;
    498   1.1  riastrad 		goto err;
    499   1.1  riastrad 	}
    500   1.1  riastrad 
    501   1.1  riastrad 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
    502   1.1  riastrad 	GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
    503   1.1  riastrad 
    504   1.1  riastrad 	ptr = READ_ONCE(vma->iomap);
    505   1.1  riastrad 	if (ptr == NULL) {
    506   1.1  riastrad 		ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
    507   1.1  riastrad 					vma->node.start,
    508   1.1  riastrad 					vma->node.size);
    509   1.1  riastrad 		if (ptr == NULL) {
    510   1.1  riastrad 			err = -ENOMEM;
    511   1.1  riastrad 			goto err;
    512   1.1  riastrad 		}
    513   1.1  riastrad 
    514   1.1  riastrad 		if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
    515   1.6  riastrad #ifdef __NetBSD__
    516  1.12  riastrad 			io_mapping_unmap(&i915_vm_to_ggtt(vma->vm)->iomap, ptr,
    517  1.12  riastrad 			    vma->node.size);
    518   1.6  riastrad #else
    519   1.1  riastrad 			io_mapping_unmap(ptr);
    520   1.6  riastrad #endif
    521   1.1  riastrad 			ptr = vma->iomap;
    522   1.1  riastrad 		}
    523   1.1  riastrad 	}
    524   1.1  riastrad 
    525   1.1  riastrad 	__i915_vma_pin(vma);
    526   1.1  riastrad 
    527   1.1  riastrad 	err = i915_vma_pin_fence(vma);
    528   1.1  riastrad 	if (err)
    529   1.1  riastrad 		goto err_unpin;
    530   1.1  riastrad 
    531   1.1  riastrad 	i915_vma_set_ggtt_write(vma);
    532   1.1  riastrad 
    533   1.1  riastrad 	/* NB Access through the GTT requires the device to be awake. */
    534   1.1  riastrad 	return ptr;
    535   1.1  riastrad 
    536   1.1  riastrad err_unpin:
    537   1.1  riastrad 	__i915_vma_unpin(vma);
    538   1.1  riastrad err:
    539   1.1  riastrad 	return IO_ERR_PTR(err);
    540   1.1  riastrad }
    541   1.1  riastrad 
    542   1.3  riastrad #ifdef __NetBSD__
    543   1.3  riastrad #  undef	__iomem
    544   1.3  riastrad #endif
    545   1.3  riastrad 
    546   1.1  riastrad void i915_vma_flush_writes(struct i915_vma *vma)
    547   1.1  riastrad {
    548   1.1  riastrad 	if (i915_vma_unset_ggtt_write(vma))
    549   1.1  riastrad 		intel_gt_flush_ggtt_writes(vma->vm->gt);
    550   1.1  riastrad }
    551   1.1  riastrad 
    552   1.1  riastrad void i915_vma_unpin_iomap(struct i915_vma *vma)
    553   1.1  riastrad {
    554   1.1  riastrad 	GEM_BUG_ON(vma->iomap == NULL);
    555   1.1  riastrad 
    556   1.1  riastrad 	i915_vma_flush_writes(vma);
    557   1.1  riastrad 
    558   1.1  riastrad 	i915_vma_unpin_fence(vma);
    559   1.1  riastrad 	i915_vma_unpin(vma);
    560   1.1  riastrad }
    561   1.1  riastrad 
    562   1.1  riastrad void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
    563   1.1  riastrad {
    564   1.1  riastrad 	struct i915_vma *vma;
    565   1.1  riastrad 	struct drm_i915_gem_object *obj;
    566   1.1  riastrad 
    567   1.1  riastrad 	vma = fetch_and_zero(p_vma);
    568   1.1  riastrad 	if (!vma)
    569   1.1  riastrad 		return;
    570   1.1  riastrad 
    571   1.1  riastrad 	obj = vma->obj;
    572   1.1  riastrad 	GEM_BUG_ON(!obj);
    573   1.1  riastrad 
    574   1.1  riastrad 	i915_vma_unpin(vma);
    575   1.1  riastrad 	i915_vma_close(vma);
    576   1.1  riastrad 
    577   1.1  riastrad 	if (flags & I915_VMA_RELEASE_MAP)
    578   1.1  riastrad 		i915_gem_object_unpin_map(obj);
    579   1.1  riastrad 
    580   1.1  riastrad 	i915_gem_object_put(obj);
    581   1.1  riastrad }
    582   1.1  riastrad 
    583   1.1  riastrad bool i915_vma_misplaced(const struct i915_vma *vma,
    584   1.1  riastrad 			u64 size, u64 alignment, u64 flags)
    585   1.1  riastrad {
    586   1.1  riastrad 	if (!drm_mm_node_allocated(&vma->node))
    587   1.1  riastrad 		return false;
    588   1.1  riastrad 
    589   1.6  riastrad 	if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags_const(vma)))
    590   1.1  riastrad 		return true;
    591   1.1  riastrad 
    592   1.1  riastrad 	if (vma->node.size < size)
    593   1.1  riastrad 		return true;
    594   1.1  riastrad 
    595   1.1  riastrad 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
    596   1.1  riastrad 	if (alignment && !IS_ALIGNED(vma->node.start, alignment))
    597   1.1  riastrad 		return true;
    598   1.1  riastrad 
    599   1.1  riastrad 	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
    600   1.1  riastrad 		return true;
    601   1.1  riastrad 
    602   1.1  riastrad 	if (flags & PIN_OFFSET_BIAS &&
    603   1.1  riastrad 	    vma->node.start < (flags & PIN_OFFSET_MASK))
    604   1.1  riastrad 		return true;
    605   1.1  riastrad 
    606   1.1  riastrad 	if (flags & PIN_OFFSET_FIXED &&
    607   1.1  riastrad 	    vma->node.start != (flags & PIN_OFFSET_MASK))
    608   1.1  riastrad 		return true;
    609   1.1  riastrad 
    610   1.1  riastrad 	return false;
    611   1.1  riastrad }
    612   1.1  riastrad 
    613   1.1  riastrad void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
    614   1.1  riastrad {
    615   1.1  riastrad 	bool mappable, fenceable;
    616   1.1  riastrad 
    617   1.1  riastrad 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
    618   1.1  riastrad 	GEM_BUG_ON(!vma->fence_size);
    619   1.1  riastrad 
    620   1.1  riastrad 	fenceable = (vma->node.size >= vma->fence_size &&
    621   1.1  riastrad 		     IS_ALIGNED(vma->node.start, vma->fence_alignment));
    622   1.1  riastrad 
    623   1.1  riastrad 	mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
    624   1.1  riastrad 
    625   1.1  riastrad 	if (mappable && fenceable)
    626   1.1  riastrad 		set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
    627   1.1  riastrad 	else
    628   1.1  riastrad 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
    629   1.1  riastrad }
    630   1.1  riastrad 
    631   1.1  riastrad bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
    632   1.1  riastrad {
    633   1.1  riastrad 	struct drm_mm_node *node = &vma->node;
    634   1.1  riastrad 	struct drm_mm_node *other;
    635   1.1  riastrad 
    636   1.1  riastrad 	/*
    637   1.1  riastrad 	 * On some machines we have to be careful when putting differing types
    638   1.1  riastrad 	 * of snoopable memory together to avoid the prefetcher crossing memory
    639   1.1  riastrad 	 * domains and dying. During vm initialisation, we decide whether or not
    640   1.1  riastrad 	 * these constraints apply and set the drm_mm.color_adjust
    641   1.1  riastrad 	 * appropriately.
    642   1.1  riastrad 	 */
    643   1.1  riastrad 	if (!i915_vm_has_cache_coloring(vma->vm))
    644   1.1  riastrad 		return true;
    645   1.1  riastrad 
    646   1.1  riastrad 	/* Only valid to be called on an already inserted vma */
    647   1.1  riastrad 	GEM_BUG_ON(!drm_mm_node_allocated(node));
    648   1.1  riastrad 	GEM_BUG_ON(list_empty(&node->node_list));
    649   1.1  riastrad 
    650   1.1  riastrad 	other = list_prev_entry(node, node_list);
    651   1.1  riastrad 	if (i915_node_color_differs(other, color) &&
    652   1.1  riastrad 	    !drm_mm_hole_follows(other))
    653   1.1  riastrad 		return false;
    654   1.1  riastrad 
    655   1.1  riastrad 	other = list_next_entry(node, node_list);
    656   1.1  riastrad 	if (i915_node_color_differs(other, color) &&
    657   1.1  riastrad 	    !drm_mm_hole_follows(node))
    658   1.1  riastrad 		return false;
    659   1.1  riastrad 
    660   1.1  riastrad 	return true;
    661   1.1  riastrad }
    662   1.1  riastrad 
    663   1.1  riastrad static void assert_bind_count(const struct drm_i915_gem_object *obj)
    664   1.1  riastrad {
    665   1.1  riastrad 	/*
    666   1.1  riastrad 	 * Combine the assertion that the object is bound and that we have
    667   1.1  riastrad 	 * pinned its pages. But we should never have bound the object
    668   1.1  riastrad 	 * more than we have pinned its pages. (For complete accuracy, we
    669   1.1  riastrad 	 * assume that no else is pinning the pages, but as a rough assertion
    670   1.1  riastrad 	 * that we will not run into problems later, this will do!)
    671   1.1  riastrad 	 */
    672   1.1  riastrad 	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
    673   1.1  riastrad }
    674   1.1  riastrad 
    675   1.1  riastrad /**
    676   1.1  riastrad  * i915_vma_insert - finds a slot for the vma in its address space
    677   1.1  riastrad  * @vma: the vma
    678   1.1  riastrad  * @size: requested size in bytes (can be larger than the VMA)
    679   1.1  riastrad  * @alignment: required alignment
    680   1.1  riastrad  * @flags: mask of PIN_* flags to use
    681   1.1  riastrad  *
    682   1.1  riastrad  * First we try to allocate some free space that meets the requirements for
    683   1.1  riastrad  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
    684   1.1  riastrad  * preferrably the oldest idle entry to make room for the new VMA.
    685   1.1  riastrad  *
    686   1.1  riastrad  * Returns:
    687   1.1  riastrad  * 0 on success, negative error code otherwise.
    688   1.1  riastrad  */
    689   1.1  riastrad static int
    690   1.1  riastrad i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
    691   1.1  riastrad {
    692   1.1  riastrad 	unsigned long color;
    693   1.1  riastrad 	u64 start, end;
    694   1.1  riastrad 	int ret;
    695   1.1  riastrad 
    696   1.1  riastrad 	GEM_BUG_ON(i915_vma_is_closed(vma));
    697   1.1  riastrad 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
    698   1.1  riastrad 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
    699   1.1  riastrad 
    700   1.1  riastrad 	size = max(size, vma->size);
    701   1.1  riastrad 	alignment = max(alignment, vma->display_alignment);
    702   1.1  riastrad 	if (flags & PIN_MAPPABLE) {
    703   1.1  riastrad 		size = max_t(typeof(size), size, vma->fence_size);
    704   1.1  riastrad 		alignment = max_t(typeof(alignment),
    705   1.1  riastrad 				  alignment, vma->fence_alignment);
    706   1.1  riastrad 	}
    707   1.1  riastrad 
    708   1.1  riastrad 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
    709   1.1  riastrad 	GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
    710   1.1  riastrad 	GEM_BUG_ON(!is_power_of_2(alignment));
    711   1.1  riastrad 
    712   1.1  riastrad 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
    713   1.1  riastrad 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
    714   1.1  riastrad 
    715   1.1  riastrad 	end = vma->vm->total;
    716   1.1  riastrad 	if (flags & PIN_MAPPABLE)
    717   1.1  riastrad 		end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
    718   1.1  riastrad 	if (flags & PIN_ZONE_4G)
    719   1.1  riastrad 		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
    720   1.1  riastrad 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
    721   1.1  riastrad 
    722   1.1  riastrad 	/* If binding the object/GGTT view requires more space than the entire
    723   1.1  riastrad 	 * aperture has, reject it early before evicting everything in a vain
    724   1.1  riastrad 	 * attempt to find space.
    725   1.1  riastrad 	 */
    726   1.1  riastrad 	if (size > end) {
    727   1.5  riastrad 		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%"PRIu64" > %s aperture=%"PRIu64"\n",
    728   1.1  riastrad 			  size, flags & PIN_MAPPABLE ? "mappable" : "total",
    729   1.1  riastrad 			  end);
    730   1.1  riastrad 		return -ENOSPC;
    731   1.1  riastrad 	}
    732   1.1  riastrad 
    733   1.1  riastrad 	color = 0;
    734   1.1  riastrad 	if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
    735   1.1  riastrad 		color = vma->obj->cache_level;
    736   1.1  riastrad 
    737   1.1  riastrad 	if (flags & PIN_OFFSET_FIXED) {
    738   1.1  riastrad 		u64 offset = flags & PIN_OFFSET_MASK;
    739   1.1  riastrad 		if (!IS_ALIGNED(offset, alignment) ||
    740   1.1  riastrad 		    range_overflows(offset, size, end))
    741   1.1  riastrad 			return -EINVAL;
    742   1.1  riastrad 
    743   1.1  riastrad 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
    744   1.1  riastrad 					   size, offset, color,
    745   1.1  riastrad 					   flags);
    746   1.1  riastrad 		if (ret)
    747   1.1  riastrad 			return ret;
    748   1.1  riastrad 	} else {
    749   1.1  riastrad 		/*
    750   1.1  riastrad 		 * We only support huge gtt pages through the 48b PPGTT,
    751   1.1  riastrad 		 * however we also don't want to force any alignment for
    752   1.1  riastrad 		 * objects which need to be tightly packed into the low 32bits.
    753   1.1  riastrad 		 *
    754   1.1  riastrad 		 * Note that we assume that GGTT are limited to 4GiB for the
    755   1.1  riastrad 		 * forseeable future. See also i915_ggtt_offset().
    756   1.1  riastrad 		 */
    757   1.1  riastrad 		if (upper_32_bits(end - 1) &&
    758   1.1  riastrad 		    vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
    759   1.1  riastrad 			/*
    760   1.1  riastrad 			 * We can't mix 64K and 4K PTEs in the same page-table
    761   1.1  riastrad 			 * (2M block), and so to avoid the ugliness and
    762   1.1  riastrad 			 * complexity of coloring we opt for just aligning 64K
    763   1.1  riastrad 			 * objects to 2M.
    764   1.1  riastrad 			 */
    765   1.1  riastrad 			u64 page_alignment =
    766   1.1  riastrad 				rounddown_pow_of_two(vma->page_sizes.sg |
    767   1.1  riastrad 						     I915_GTT_PAGE_SIZE_2M);
    768   1.1  riastrad 
    769   1.1  riastrad 			/*
    770   1.1  riastrad 			 * Check we don't expand for the limited Global GTT
    771   1.1  riastrad 			 * (mappable aperture is even more precious!). This
    772   1.1  riastrad 			 * also checks that we exclude the aliasing-ppgtt.
    773   1.1  riastrad 			 */
    774   1.1  riastrad 			GEM_BUG_ON(i915_vma_is_ggtt(vma));
    775   1.1  riastrad 
    776   1.1  riastrad 			alignment = max(alignment, page_alignment);
    777   1.1  riastrad 
    778   1.1  riastrad 			if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
    779   1.1  riastrad 				size = round_up(size, I915_GTT_PAGE_SIZE_2M);
    780   1.1  riastrad 		}
    781   1.1  riastrad 
    782   1.1  riastrad 		ret = i915_gem_gtt_insert(vma->vm, &vma->node,
    783   1.1  riastrad 					  size, alignment, color,
    784   1.1  riastrad 					  start, end, flags);
    785   1.1  riastrad 		if (ret)
    786   1.1  riastrad 			return ret;
    787   1.1  riastrad 
    788   1.1  riastrad 		GEM_BUG_ON(vma->node.start < start);
    789   1.1  riastrad 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
    790   1.1  riastrad 	}
    791   1.1  riastrad 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
    792   1.1  riastrad 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
    793   1.1  riastrad 
    794   1.1  riastrad 	if (vma->obj) {
    795   1.1  riastrad 		struct drm_i915_gem_object *obj = vma->obj;
    796   1.1  riastrad 
    797   1.1  riastrad 		atomic_inc(&obj->bind_count);
    798   1.1  riastrad 		assert_bind_count(obj);
    799   1.1  riastrad 	}
    800   1.1  riastrad 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
    801   1.1  riastrad 
    802   1.1  riastrad 	return 0;
    803   1.1  riastrad }
    804   1.1  riastrad 
    805   1.1  riastrad static void
    806   1.1  riastrad i915_vma_detach(struct i915_vma *vma)
    807   1.1  riastrad {
    808   1.1  riastrad 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
    809   1.1  riastrad 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
    810   1.1  riastrad 
    811   1.1  riastrad 	/*
    812   1.1  riastrad 	 * And finally now the object is completely decoupled from this
    813   1.1  riastrad 	 * vma, we can drop its hold on the backing storage and allow
    814   1.1  riastrad 	 * it to be reaped by the shrinker.
    815   1.1  riastrad 	 */
    816   1.1  riastrad 	list_del(&vma->vm_link);
    817   1.1  riastrad 	if (vma->obj) {
    818   1.1  riastrad 		struct drm_i915_gem_object *obj = vma->obj;
    819   1.1  riastrad 
    820   1.1  riastrad 		assert_bind_count(obj);
    821   1.1  riastrad 		atomic_dec(&obj->bind_count);
    822   1.1  riastrad 	}
    823   1.1  riastrad }
    824   1.1  riastrad 
    825   1.1  riastrad static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
    826   1.1  riastrad {
    827   1.1  riastrad 	unsigned int bound;
    828   1.1  riastrad 	bool pinned = true;
    829   1.1  riastrad 
    830   1.1  riastrad 	bound = atomic_read(&vma->flags);
    831   1.1  riastrad 	do {
    832   1.1  riastrad 		if (unlikely(flags & ~bound))
    833   1.1  riastrad 			return false;
    834   1.1  riastrad 
    835   1.1  riastrad 		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
    836   1.1  riastrad 			return false;
    837   1.1  riastrad 
    838   1.1  riastrad 		if (!(bound & I915_VMA_PIN_MASK))
    839   1.1  riastrad 			goto unpinned;
    840   1.1  riastrad 
    841   1.1  riastrad 		GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
    842   1.1  riastrad 	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
    843   1.1  riastrad 
    844   1.1  riastrad 	return true;
    845   1.1  riastrad 
    846   1.1  riastrad unpinned:
    847   1.1  riastrad 	/*
    848   1.1  riastrad 	 * If pin_count==0, but we are bound, check under the lock to avoid
    849   1.1  riastrad 	 * racing with a concurrent i915_vma_unbind().
    850   1.1  riastrad 	 */
    851   1.1  riastrad 	mutex_lock(&vma->vm->mutex);
    852   1.1  riastrad 	do {
    853   1.1  riastrad 		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
    854   1.1  riastrad 			pinned = false;
    855   1.1  riastrad 			break;
    856   1.1  riastrad 		}
    857   1.1  riastrad 
    858   1.1  riastrad 		if (unlikely(flags & ~bound)) {
    859   1.1  riastrad 			pinned = false;
    860   1.1  riastrad 			break;
    861   1.1  riastrad 		}
    862   1.1  riastrad 	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
    863   1.1  riastrad 	mutex_unlock(&vma->vm->mutex);
    864   1.1  riastrad 
    865   1.1  riastrad 	return pinned;
    866   1.1  riastrad }
    867   1.1  riastrad 
    868   1.1  riastrad static int vma_get_pages(struct i915_vma *vma)
    869   1.1  riastrad {
    870   1.1  riastrad 	int err = 0;
    871   1.1  riastrad 
    872   1.1  riastrad 	if (atomic_add_unless(&vma->pages_count, 1, 0))
    873   1.1  riastrad 		return 0;
    874   1.1  riastrad 
    875   1.1  riastrad 	/* Allocations ahoy! */
    876   1.1  riastrad 	if (mutex_lock_interruptible(&vma->pages_mutex))
    877   1.1  riastrad 		return -EINTR;
    878   1.1  riastrad 
    879   1.1  riastrad 	if (!atomic_read(&vma->pages_count)) {
    880   1.1  riastrad 		if (vma->obj) {
    881   1.1  riastrad 			err = i915_gem_object_pin_pages(vma->obj);
    882   1.1  riastrad 			if (err)
    883   1.1  riastrad 				goto unlock;
    884   1.1  riastrad 		}
    885   1.1  riastrad 
    886   1.1  riastrad 		err = vma->ops->set_pages(vma);
    887   1.1  riastrad 		if (err) {
    888   1.1  riastrad 			if (vma->obj)
    889   1.1  riastrad 				i915_gem_object_unpin_pages(vma->obj);
    890   1.1  riastrad 			goto unlock;
    891   1.1  riastrad 		}
    892   1.1  riastrad 	}
    893   1.1  riastrad 	atomic_inc(&vma->pages_count);
    894   1.1  riastrad 
    895   1.1  riastrad unlock:
    896   1.1  riastrad 	mutex_unlock(&vma->pages_mutex);
    897   1.1  riastrad 
    898   1.1  riastrad 	return err;
    899   1.1  riastrad }
    900   1.1  riastrad 
    901   1.1  riastrad static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
    902   1.1  riastrad {
    903   1.1  riastrad 	/* We allocate under vma_get_pages, so beware the shrinker */
    904   1.1  riastrad 	mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
    905   1.1  riastrad 	GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
    906   1.1  riastrad 	if (atomic_sub_return(count, &vma->pages_count) == 0) {
    907   1.1  riastrad 		vma->ops->clear_pages(vma);
    908   1.1  riastrad 		GEM_BUG_ON(vma->pages);
    909   1.1  riastrad 		if (vma->obj)
    910   1.1  riastrad 			i915_gem_object_unpin_pages(vma->obj);
    911   1.1  riastrad 	}
    912   1.1  riastrad 	mutex_unlock(&vma->pages_mutex);
    913   1.1  riastrad }
    914   1.1  riastrad 
    915   1.1  riastrad static void vma_put_pages(struct i915_vma *vma)
    916   1.1  riastrad {
    917   1.1  riastrad 	if (atomic_add_unless(&vma->pages_count, -1, 1))
    918   1.1  riastrad 		return;
    919   1.1  riastrad 
    920   1.1  riastrad 	__vma_put_pages(vma, 1);
    921   1.1  riastrad }
    922   1.1  riastrad 
    923   1.1  riastrad static void vma_unbind_pages(struct i915_vma *vma)
    924   1.1  riastrad {
    925   1.1  riastrad 	unsigned int count;
    926   1.1  riastrad 
    927   1.1  riastrad 	lockdep_assert_held(&vma->vm->mutex);
    928   1.1  riastrad 
    929   1.1  riastrad 	/* The upper portion of pages_count is the number of bindings */
    930   1.1  riastrad 	count = atomic_read(&vma->pages_count);
    931   1.1  riastrad 	count >>= I915_VMA_PAGES_BIAS;
    932   1.1  riastrad 	GEM_BUG_ON(!count);
    933   1.1  riastrad 
    934   1.1  riastrad 	__vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
    935   1.1  riastrad }
    936   1.1  riastrad 
    937   1.1  riastrad int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
    938   1.1  riastrad {
    939   1.1  riastrad 	struct i915_vma_work *work = NULL;
    940   1.1  riastrad 	intel_wakeref_t wakeref = 0;
    941   1.1  riastrad 	unsigned int bound;
    942   1.1  riastrad 	int err;
    943   1.1  riastrad 
    944   1.1  riastrad 	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
    945   1.1  riastrad 	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
    946   1.1  riastrad 
    947   1.1  riastrad 	GEM_BUG_ON(flags & PIN_UPDATE);
    948   1.1  riastrad 	GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
    949   1.1  riastrad 
    950   1.1  riastrad 	/* First try and grab the pin without rebinding the vma */
    951   1.1  riastrad 	if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
    952   1.1  riastrad 		return 0;
    953   1.1  riastrad 
    954   1.1  riastrad 	err = vma_get_pages(vma);
    955   1.1  riastrad 	if (err)
    956   1.1  riastrad 		return err;
    957   1.1  riastrad 
    958   1.1  riastrad 	if (flags & vma->vm->bind_async_flags) {
    959   1.1  riastrad 		work = i915_vma_work();
    960   1.1  riastrad 		if (!work) {
    961   1.1  riastrad 			err = -ENOMEM;
    962   1.1  riastrad 			goto err_pages;
    963   1.1  riastrad 		}
    964   1.1  riastrad 	}
    965   1.1  riastrad 
    966   1.1  riastrad 	if (flags & PIN_GLOBAL)
    967   1.1  riastrad 		wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
    968   1.1  riastrad 
    969   1.1  riastrad 	/* No more allocations allowed once we hold vm->mutex */
    970   1.1  riastrad 	err = mutex_lock_interruptible(&vma->vm->mutex);
    971   1.1  riastrad 	if (err)
    972   1.1  riastrad 		goto err_fence;
    973   1.1  riastrad 
    974   1.1  riastrad 	bound = atomic_read(&vma->flags);
    975   1.1  riastrad 	if (unlikely(bound & I915_VMA_ERROR)) {
    976   1.1  riastrad 		err = -ENOMEM;
    977   1.1  riastrad 		goto err_unlock;
    978   1.1  riastrad 	}
    979   1.1  riastrad 
    980   1.1  riastrad 	if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
    981   1.1  riastrad 		err = -EAGAIN; /* pins are meant to be fairly temporary */
    982   1.1  riastrad 		goto err_unlock;
    983   1.1  riastrad 	}
    984   1.1  riastrad 
    985   1.1  riastrad 	if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
    986   1.1  riastrad 		__i915_vma_pin(vma);
    987   1.1  riastrad 		goto err_unlock;
    988   1.1  riastrad 	}
    989   1.1  riastrad 
    990   1.1  riastrad 	err = i915_active_acquire(&vma->active);
    991   1.1  riastrad 	if (err)
    992   1.1  riastrad 		goto err_unlock;
    993   1.1  riastrad 
    994   1.1  riastrad 	if (!(bound & I915_VMA_BIND_MASK)) {
    995   1.1  riastrad 		err = i915_vma_insert(vma, size, alignment, flags);
    996   1.1  riastrad 		if (err)
    997   1.1  riastrad 			goto err_active;
    998   1.1  riastrad 
    999   1.1  riastrad 		if (i915_is_ggtt(vma->vm))
   1000   1.1  riastrad 			__i915_vma_set_map_and_fenceable(vma);
   1001   1.1  riastrad 	}
   1002   1.1  riastrad 
   1003   1.1  riastrad 	GEM_BUG_ON(!vma->pages);
   1004   1.1  riastrad 	err = i915_vma_bind(vma,
   1005   1.1  riastrad 			    vma->obj ? vma->obj->cache_level : 0,
   1006   1.1  riastrad 			    flags, work);
   1007   1.1  riastrad 	if (err)
   1008   1.1  riastrad 		goto err_remove;
   1009   1.1  riastrad 
   1010   1.1  riastrad 	/* There should only be at most 2 active bindings (user, global) */
   1011   1.1  riastrad 	GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
   1012   1.1  riastrad 	atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
   1013   1.1  riastrad 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
   1014   1.1  riastrad 
   1015   1.1  riastrad 	__i915_vma_pin(vma);
   1016   1.1  riastrad 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
   1017   1.1  riastrad 	GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
   1018   1.1  riastrad 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
   1019   1.1  riastrad 
   1020   1.1  riastrad err_remove:
   1021   1.1  riastrad 	if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
   1022   1.1  riastrad 		i915_vma_detach(vma);
   1023   1.1  riastrad 		drm_mm_remove_node(&vma->node);
   1024   1.1  riastrad 	}
   1025   1.1  riastrad err_active:
   1026   1.1  riastrad 	i915_active_release(&vma->active);
   1027   1.1  riastrad err_unlock:
   1028   1.1  riastrad 	mutex_unlock(&vma->vm->mutex);
   1029   1.1  riastrad err_fence:
   1030   1.1  riastrad 	if (work)
   1031   1.1  riastrad 		dma_fence_work_commit(&work->base);
   1032   1.1  riastrad 	if (wakeref)
   1033   1.1  riastrad 		intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
   1034   1.1  riastrad err_pages:
   1035   1.1  riastrad 	vma_put_pages(vma);
   1036   1.1  riastrad 	return err;
   1037   1.1  riastrad }
   1038   1.1  riastrad 
   1039   1.1  riastrad static void flush_idle_contexts(struct intel_gt *gt)
   1040   1.1  riastrad {
   1041   1.1  riastrad 	struct intel_engine_cs *engine;
   1042   1.1  riastrad 	enum intel_engine_id id;
   1043   1.1  riastrad 
   1044   1.1  riastrad 	for_each_engine(engine, gt, id)
   1045   1.1  riastrad 		intel_engine_flush_barriers(engine);
   1046   1.1  riastrad 
   1047   1.1  riastrad 	intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
   1048   1.1  riastrad }
   1049   1.1  riastrad 
   1050   1.1  riastrad int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
   1051   1.1  riastrad {
   1052   1.1  riastrad 	struct i915_address_space *vm = vma->vm;
   1053   1.1  riastrad 	int err;
   1054   1.1  riastrad 
   1055   1.1  riastrad 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
   1056   1.1  riastrad 
   1057   1.1  riastrad 	do {
   1058   1.1  riastrad 		err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
   1059   1.1  riastrad 		if (err != -ENOSPC)
   1060   1.1  riastrad 			return err;
   1061   1.1  riastrad 
   1062   1.1  riastrad 		/* Unlike i915_vma_pin, we don't take no for an answer! */
   1063   1.1  riastrad 		flush_idle_contexts(vm->gt);
   1064   1.1  riastrad 		if (mutex_lock_interruptible(&vm->mutex) == 0) {
   1065   1.1  riastrad 			i915_gem_evict_vm(vm);
   1066   1.1  riastrad 			mutex_unlock(&vm->mutex);
   1067   1.1  riastrad 		}
   1068   1.1  riastrad 	} while (1);
   1069   1.1  riastrad }
   1070   1.1  riastrad 
   1071   1.1  riastrad void i915_vma_close(struct i915_vma *vma)
   1072   1.1  riastrad {
   1073   1.1  riastrad 	struct intel_gt *gt = vma->vm->gt;
   1074   1.1  riastrad 	unsigned long flags;
   1075   1.1  riastrad 
   1076   1.1  riastrad 	GEM_BUG_ON(i915_vma_is_closed(vma));
   1077   1.1  riastrad 
   1078   1.1  riastrad 	/*
   1079   1.1  riastrad 	 * We defer actually closing, unbinding and destroying the VMA until
   1080   1.1  riastrad 	 * the next idle point, or if the object is freed in the meantime. By
   1081   1.1  riastrad 	 * postponing the unbind, we allow for it to be resurrected by the
   1082   1.1  riastrad 	 * client, avoiding the work required to rebind the VMA. This is
   1083   1.1  riastrad 	 * advantageous for DRI, where the client/server pass objects
   1084   1.1  riastrad 	 * between themselves, temporarily opening a local VMA to the
   1085   1.1  riastrad 	 * object, and then closing it again. The same object is then reused
   1086   1.1  riastrad 	 * on the next frame (or two, depending on the depth of the swap queue)
   1087   1.1  riastrad 	 * causing us to rebind the VMA once more. This ends up being a lot
   1088   1.1  riastrad 	 * of wasted work for the steady state.
   1089   1.1  riastrad 	 */
   1090   1.1  riastrad 	spin_lock_irqsave(&gt->closed_lock, flags);
   1091   1.1  riastrad 	list_add(&vma->closed_link, &gt->closed_vma);
   1092   1.1  riastrad 	spin_unlock_irqrestore(&gt->closed_lock, flags);
   1093   1.1  riastrad }
   1094   1.1  riastrad 
   1095   1.1  riastrad static void __i915_vma_remove_closed(struct i915_vma *vma)
   1096   1.1  riastrad {
   1097   1.1  riastrad 	struct intel_gt *gt = vma->vm->gt;
   1098   1.1  riastrad 
   1099   1.1  riastrad 	spin_lock_irq(&gt->closed_lock);
   1100   1.1  riastrad 	list_del_init(&vma->closed_link);
   1101   1.1  riastrad 	spin_unlock_irq(&gt->closed_lock);
   1102   1.1  riastrad }
   1103   1.1  riastrad 
   1104   1.1  riastrad void i915_vma_reopen(struct i915_vma *vma)
   1105   1.1  riastrad {
   1106   1.1  riastrad 	if (i915_vma_is_closed(vma))
   1107   1.1  riastrad 		__i915_vma_remove_closed(vma);
   1108   1.1  riastrad }
   1109   1.1  riastrad 
   1110   1.1  riastrad void i915_vma_release(struct kref *ref)
   1111   1.1  riastrad {
   1112   1.1  riastrad 	struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
   1113   1.1  riastrad 
   1114   1.1  riastrad 	if (drm_mm_node_allocated(&vma->node)) {
   1115   1.1  riastrad 		mutex_lock(&vma->vm->mutex);
   1116   1.1  riastrad 		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
   1117   1.1  riastrad 		WARN_ON(__i915_vma_unbind(vma));
   1118   1.1  riastrad 		mutex_unlock(&vma->vm->mutex);
   1119   1.1  riastrad 		GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
   1120   1.1  riastrad 	}
   1121   1.1  riastrad 	GEM_BUG_ON(i915_vma_is_active(vma));
   1122   1.1  riastrad 
   1123   1.1  riastrad 	if (vma->obj) {
   1124   1.1  riastrad 		struct drm_i915_gem_object *obj = vma->obj;
   1125   1.1  riastrad 
   1126   1.1  riastrad 		spin_lock(&obj->vma.lock);
   1127   1.1  riastrad 		list_del(&vma->obj_link);
   1128   1.1  riastrad 		rb_erase(&vma->obj_node, &obj->vma.tree);
   1129   1.1  riastrad 		spin_unlock(&obj->vma.lock);
   1130   1.1  riastrad 	}
   1131   1.1  riastrad 
   1132   1.1  riastrad 	__i915_vma_remove_closed(vma);
   1133   1.1  riastrad 	i915_vm_put(vma->vm);
   1134   1.1  riastrad 
   1135   1.1  riastrad 	i915_active_fini(&vma->active);
   1136   1.1  riastrad 	i915_vma_free(vma);
   1137   1.1  riastrad }
   1138   1.1  riastrad 
   1139   1.1  riastrad void i915_vma_parked(struct intel_gt *gt)
   1140   1.1  riastrad {
   1141   1.1  riastrad 	struct i915_vma *vma, *next;
   1142   1.1  riastrad 
   1143   1.1  riastrad 	spin_lock_irq(&gt->closed_lock);
   1144   1.1  riastrad 	list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
   1145   1.1  riastrad 		struct drm_i915_gem_object *obj = vma->obj;
   1146   1.1  riastrad 		struct i915_address_space *vm = vma->vm;
   1147   1.1  riastrad 
   1148   1.1  riastrad 		/* XXX All to avoid keeping a reference on i915_vma itself */
   1149   1.1  riastrad 
   1150   1.1  riastrad 		if (!kref_get_unless_zero(&obj->base.refcount))
   1151   1.1  riastrad 			continue;
   1152   1.1  riastrad 
   1153   1.1  riastrad 		if (i915_vm_tryopen(vm)) {
   1154   1.1  riastrad 			list_del_init(&vma->closed_link);
   1155   1.1  riastrad 		} else {
   1156   1.1  riastrad 			i915_gem_object_put(obj);
   1157   1.1  riastrad 			obj = NULL;
   1158   1.1  riastrad 		}
   1159   1.1  riastrad 
   1160   1.1  riastrad 		spin_unlock_irq(&gt->closed_lock);
   1161   1.1  riastrad 
   1162   1.1  riastrad 		if (obj) {
   1163   1.1  riastrad 			__i915_vma_put(vma);
   1164   1.1  riastrad 			i915_gem_object_put(obj);
   1165   1.1  riastrad 		}
   1166   1.1  riastrad 
   1167   1.1  riastrad 		i915_vm_close(vm);
   1168   1.1  riastrad 
   1169   1.1  riastrad 		/* Restart after dropping lock */
   1170   1.1  riastrad 		spin_lock_irq(&gt->closed_lock);
   1171   1.1  riastrad 		next = list_first_entry(&gt->closed_vma,
   1172   1.1  riastrad 					typeof(*next), closed_link);
   1173   1.1  riastrad 	}
   1174   1.1  riastrad 	spin_unlock_irq(&gt->closed_lock);
   1175   1.1  riastrad }
   1176   1.1  riastrad 
   1177   1.1  riastrad static void __i915_vma_iounmap(struct i915_vma *vma)
   1178   1.1  riastrad {
   1179   1.1  riastrad 	GEM_BUG_ON(i915_vma_is_pinned(vma));
   1180   1.1  riastrad 
   1181   1.1  riastrad 	if (vma->iomap == NULL)
   1182   1.1  riastrad 		return;
   1183   1.1  riastrad 
   1184   1.5  riastrad #ifdef __NetBSD__
   1185  1.12  riastrad 	io_mapping_unmap(&i915_vm_to_ggtt(vma->vm)->iomap, vma->iomap,
   1186  1.12  riastrad 	    vma->node.size);
   1187   1.5  riastrad #else
   1188   1.1  riastrad 	io_mapping_unmap(vma->iomap);
   1189   1.5  riastrad #endif
   1190   1.1  riastrad 	vma->iomap = NULL;
   1191   1.1  riastrad }
   1192   1.1  riastrad 
   1193   1.1  riastrad void i915_vma_revoke_mmap(struct i915_vma *vma)
   1194   1.1  riastrad {
   1195   1.1  riastrad 	struct drm_vma_offset_node *node;
   1196   1.1  riastrad 	u64 vma_offset;
   1197   1.1  riastrad 
   1198   1.1  riastrad 	if (!i915_vma_has_userfault(vma))
   1199   1.1  riastrad 		return;
   1200   1.1  riastrad 
   1201   1.1  riastrad 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
   1202   1.1  riastrad 	GEM_BUG_ON(!vma->obj->userfault_count);
   1203   1.1  riastrad 
   1204   1.5  riastrad #ifdef __NetBSD__
   1205   1.5  riastrad 	__USE(vma_offset);
   1206   1.5  riastrad 	__USE(node);
   1207   1.5  riastrad 	struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
   1208   1.5  riastrad 	paddr_t pa = i915->ggtt.gmadr.start + vma->node.start;
   1209   1.5  riastrad 	vsize_t npgs = vma->size >> PAGE_SHIFT;
   1210   1.5  riastrad 	while (npgs --> 0)
   1211  1.11  riastrad 		pmap_pv_protect(pa + (npgs << PAGE_SHIFT), VM_PROT_NONE);
   1212   1.5  riastrad #else
   1213   1.1  riastrad 	node = &vma->mmo->vma_node;
   1214   1.1  riastrad 	vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
   1215   1.1  riastrad 	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
   1216   1.1  riastrad 			    drm_vma_node_offset_addr(node) + vma_offset,
   1217   1.1  riastrad 			    vma->size,
   1218   1.1  riastrad 			    1);
   1219   1.5  riastrad #endif
   1220   1.1  riastrad 
   1221   1.1  riastrad 	i915_vma_unset_userfault(vma);
   1222   1.1  riastrad 	if (!--vma->obj->userfault_count)
   1223   1.1  riastrad 		list_del(&vma->obj->userfault_link);
   1224   1.1  riastrad }
   1225   1.1  riastrad 
   1226   1.1  riastrad int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
   1227   1.1  riastrad {
   1228   1.1  riastrad 	int err;
   1229   1.1  riastrad 
   1230   1.1  riastrad 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
   1231   1.1  riastrad 
   1232   1.1  riastrad 	/* Wait for the vma to be bound before we start! */
   1233   1.1  riastrad 	err = i915_request_await_active(rq, &vma->active);
   1234   1.1  riastrad 	if (err)
   1235   1.1  riastrad 		return err;
   1236   1.1  riastrad 
   1237   1.1  riastrad 	return i915_active_add_request(&vma->active, rq);
   1238   1.1  riastrad }
   1239   1.1  riastrad 
   1240   1.1  riastrad int i915_vma_move_to_active(struct i915_vma *vma,
   1241   1.1  riastrad 			    struct i915_request *rq,
   1242   1.1  riastrad 			    unsigned int flags)
   1243   1.1  riastrad {
   1244   1.1  riastrad 	struct drm_i915_gem_object *obj = vma->obj;
   1245   1.1  riastrad 	int err;
   1246   1.1  riastrad 
   1247   1.1  riastrad 	assert_object_held(obj);
   1248   1.1  riastrad 
   1249   1.1  riastrad 	err = __i915_vma_move_to_active(vma, rq);
   1250   1.1  riastrad 	if (unlikely(err))
   1251   1.1  riastrad 		return err;
   1252   1.1  riastrad 
   1253   1.1  riastrad 	if (flags & EXEC_OBJECT_WRITE) {
   1254   1.1  riastrad 		struct intel_frontbuffer *front;
   1255   1.1  riastrad 
   1256   1.1  riastrad 		front = __intel_frontbuffer_get(obj);
   1257   1.1  riastrad 		if (unlikely(front)) {
   1258   1.1  riastrad 			if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
   1259   1.1  riastrad 				i915_active_add_request(&front->write, rq);
   1260   1.1  riastrad 			intel_frontbuffer_put(front);
   1261   1.1  riastrad 		}
   1262   1.1  riastrad 
   1263   1.1  riastrad 		dma_resv_add_excl_fence(vma->resv, &rq->fence);
   1264   1.1  riastrad 		obj->write_domain = I915_GEM_DOMAIN_RENDER;
   1265   1.1  riastrad 		obj->read_domains = 0;
   1266   1.1  riastrad 	} else {
   1267   1.1  riastrad 		err = dma_resv_reserve_shared(vma->resv, 1);
   1268   1.1  riastrad 		if (unlikely(err))
   1269   1.1  riastrad 			return err;
   1270   1.1  riastrad 
   1271   1.1  riastrad 		dma_resv_add_shared_fence(vma->resv, &rq->fence);
   1272   1.1  riastrad 		obj->write_domain = 0;
   1273   1.1  riastrad 	}
   1274   1.1  riastrad 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
   1275   1.1  riastrad 	obj->mm.dirty = true;
   1276   1.1  riastrad 
   1277   1.1  riastrad 	GEM_BUG_ON(!i915_vma_is_active(vma));
   1278   1.1  riastrad 	return 0;
   1279   1.1  riastrad }
   1280   1.1  riastrad 
   1281   1.1  riastrad int __i915_vma_unbind(struct i915_vma *vma)
   1282   1.1  riastrad {
   1283   1.1  riastrad 	int ret;
   1284   1.1  riastrad 
   1285   1.1  riastrad 	lockdep_assert_held(&vma->vm->mutex);
   1286   1.1  riastrad 
   1287   1.1  riastrad 	/*
   1288   1.1  riastrad 	 * First wait upon any activity as retiring the request may
   1289   1.1  riastrad 	 * have side-effects such as unpinning or even unbinding this vma.
   1290   1.1  riastrad 	 *
   1291   1.1  riastrad 	 * XXX Actually waiting under the vm->mutex is a hinderance and
   1292   1.1  riastrad 	 * should be pipelined wherever possible. In cases where that is
   1293   1.1  riastrad 	 * unavoidable, we should lift the wait to before the mutex.
   1294   1.1  riastrad 	 */
   1295   1.1  riastrad 	ret = i915_vma_sync(vma);
   1296   1.1  riastrad 	if (ret)
   1297   1.1  riastrad 		return ret;
   1298   1.1  riastrad 
   1299   1.1  riastrad 	if (i915_vma_is_pinned(vma)) {
   1300   1.1  riastrad 		vma_print_allocator(vma, "is pinned");
   1301   1.1  riastrad 		return -EAGAIN;
   1302   1.1  riastrad 	}
   1303   1.1  riastrad 
   1304   1.1  riastrad 	/*
   1305   1.1  riastrad 	 * After confirming that no one else is pinning this vma, wait for
   1306   1.1  riastrad 	 * any laggards who may have crept in during the wait (through
   1307   1.1  riastrad 	 * a residual pin skipping the vm->mutex) to complete.
   1308   1.1  riastrad 	 */
   1309   1.1  riastrad 	ret = i915_vma_sync(vma);
   1310   1.1  riastrad 	if (ret)
   1311   1.1  riastrad 		return ret;
   1312   1.1  riastrad 
   1313   1.1  riastrad 	if (!drm_mm_node_allocated(&vma->node))
   1314   1.1  riastrad 		return 0;
   1315   1.1  riastrad 
   1316   1.1  riastrad 	GEM_BUG_ON(i915_vma_is_pinned(vma));
   1317   1.1  riastrad 	GEM_BUG_ON(i915_vma_is_active(vma));
   1318   1.1  riastrad 
   1319   1.1  riastrad 	if (i915_vma_is_map_and_fenceable(vma)) {
   1320   1.1  riastrad 		/*
   1321   1.1  riastrad 		 * Check that we have flushed all writes through the GGTT
   1322   1.1  riastrad 		 * before the unbind, other due to non-strict nature of those
   1323   1.1  riastrad 		 * indirect writes they may end up referencing the GGTT PTE
   1324   1.1  riastrad 		 * after the unbind.
   1325   1.1  riastrad 		 */
   1326   1.1  riastrad 		i915_vma_flush_writes(vma);
   1327   1.1  riastrad 		GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
   1328   1.1  riastrad 
   1329   1.1  riastrad 		/* release the fence reg _after_ flushing */
   1330   1.1  riastrad 		ret = i915_vma_revoke_fence(vma);
   1331   1.1  riastrad 		if (ret)
   1332   1.1  riastrad 			return ret;
   1333   1.1  riastrad 
   1334   1.1  riastrad 		/* Force a pagefault for domain tracking on next user access */
   1335   1.1  riastrad 		i915_vma_revoke_mmap(vma);
   1336   1.1  riastrad 
   1337   1.1  riastrad 		__i915_vma_iounmap(vma);
   1338   1.1  riastrad 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
   1339   1.1  riastrad 	}
   1340   1.1  riastrad 	GEM_BUG_ON(vma->fence);
   1341   1.1  riastrad 	GEM_BUG_ON(i915_vma_has_userfault(vma));
   1342   1.1  riastrad 
   1343   1.1  riastrad 	if (likely(atomic_read(&vma->vm->open))) {
   1344   1.1  riastrad 		trace_i915_vma_unbind(vma);
   1345   1.1  riastrad 		vma->ops->unbind_vma(vma);
   1346   1.1  riastrad 	}
   1347   1.1  riastrad 	atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
   1348   1.1  riastrad 
   1349   1.1  riastrad 	i915_vma_detach(vma);
   1350   1.1  riastrad 	vma_unbind_pages(vma);
   1351   1.1  riastrad 
   1352   1.1  riastrad 	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
   1353   1.1  riastrad 	return 0;
   1354   1.1  riastrad }
   1355   1.1  riastrad 
   1356   1.1  riastrad int i915_vma_unbind(struct i915_vma *vma)
   1357   1.1  riastrad {
   1358   1.1  riastrad 	struct i915_address_space *vm = vma->vm;
   1359   1.1  riastrad 	intel_wakeref_t wakeref = 0;
   1360   1.1  riastrad 	int err;
   1361   1.1  riastrad 
   1362   1.1  riastrad 	if (!drm_mm_node_allocated(&vma->node))
   1363   1.1  riastrad 		return 0;
   1364   1.1  riastrad 
   1365   1.1  riastrad 	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
   1366   1.1  riastrad 		/* XXX not always required: nop_clear_range */
   1367   1.1  riastrad 		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
   1368   1.1  riastrad 
   1369   1.1  riastrad 	err = mutex_lock_interruptible(&vm->mutex);
   1370   1.1  riastrad 	if (err)
   1371   1.1  riastrad 		return err;
   1372   1.1  riastrad 
   1373   1.1  riastrad 	err = __i915_vma_unbind(vma);
   1374   1.1  riastrad 	mutex_unlock(&vm->mutex);
   1375   1.1  riastrad 
   1376   1.1  riastrad 	if (wakeref)
   1377   1.1  riastrad 		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
   1378   1.1  riastrad 
   1379   1.1  riastrad 	return err;
   1380   1.1  riastrad }
   1381   1.1  riastrad 
   1382   1.1  riastrad struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
   1383   1.1  riastrad {
   1384   1.1  riastrad 	i915_gem_object_make_unshrinkable(vma->obj);
   1385   1.1  riastrad 	return vma;
   1386   1.1  riastrad }
   1387   1.1  riastrad 
   1388   1.1  riastrad void i915_vma_make_shrinkable(struct i915_vma *vma)
   1389   1.1  riastrad {
   1390   1.1  riastrad 	i915_gem_object_make_shrinkable(vma->obj);
   1391   1.1  riastrad }
   1392   1.1  riastrad 
   1393   1.1  riastrad void i915_vma_make_purgeable(struct i915_vma *vma)
   1394   1.1  riastrad {
   1395   1.1  riastrad 	i915_gem_object_make_purgeable(vma->obj);
   1396   1.1  riastrad }
   1397   1.1  riastrad 
   1398   1.1  riastrad #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
   1399   1.1  riastrad #include "selftests/i915_vma.c"
   1400   1.1  riastrad #endif
   1401   1.1  riastrad 
   1402   1.1  riastrad static void i915_global_vma_shrink(void)
   1403   1.1  riastrad {
   1404   1.1  riastrad 	kmem_cache_shrink(global.slab_vmas);
   1405   1.1  riastrad }
   1406   1.1  riastrad 
   1407   1.1  riastrad static void i915_global_vma_exit(void)
   1408   1.1  riastrad {
   1409   1.1  riastrad 	kmem_cache_destroy(global.slab_vmas);
   1410   1.1  riastrad }
   1411   1.1  riastrad 
   1412   1.1  riastrad static struct i915_global_vma global = { {
   1413   1.1  riastrad 	.shrink = i915_global_vma_shrink,
   1414   1.1  riastrad 	.exit = i915_global_vma_exit,
   1415   1.1  riastrad } };
   1416   1.1  riastrad 
   1417   1.1  riastrad int __init i915_global_vma_init(void)
   1418   1.1  riastrad {
   1419   1.1  riastrad 	global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
   1420   1.1  riastrad 	if (!global.slab_vmas)
   1421   1.1  riastrad 		return -ENOMEM;
   1422   1.1  riastrad 
   1423   1.1  riastrad 	i915_global_register(&global.base);
   1424   1.1  riastrad 	return 0;
   1425   1.1  riastrad }
   1426