Home | History | Annotate | Line # | Download | only in i915
i915_gem.c revision 1.18.2.1
      1 /*
      2  * Copyright  2008 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  *
     23  * Authors:
     24  *    Eric Anholt <eric (at) anholt.net>
     25  *
     26  */
     27 
     28 #ifdef __NetBSD__
     29 #if 0				/* XXX uvmhist option?  */
     30 #include "opt_uvmhist.h"
     31 #endif
     32 
     33 #include <sys/types.h>
     34 #include <sys/param.h>
     35 
     36 #include <uvm/uvm.h>
     37 #include <uvm/uvm_extern.h>
     38 #include <uvm/uvm_fault.h>
     39 #include <uvm/uvm_page.h>
     40 #include <uvm/uvm_pmap.h>
     41 #include <uvm/uvm_prot.h>
     42 
     43 #include <drm/bus_dma_hacks.h>
     44 #endif
     45 
     46 #include <drm/drmP.h>
     47 #include <drm/drm_vma_manager.h>
     48 #include <drm/i915_drm.h>
     49 #include "i915_drv.h"
     50 #include "i915_trace.h"
     51 #include "intel_drv.h"
     52 #include <linux/shmem_fs.h>
     53 #include <linux/slab.h>
     54 #include <linux/swap.h>
     55 #include <linux/pci.h>
     56 #include <linux/dma-buf.h>
     57 #include <linux/errno.h>
     58 #include <linux/time.h>
     59 #include <linux/err.h>
     60 #include <linux/bitops.h>
     61 #include <linux/printk.h>
     62 #include <asm/param.h>
     63 #include <asm/page.h>
     64 
     65 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
     66 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
     67 						   bool force);
     68 static __must_check int
     69 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
     70 			       bool readonly);
     71 
     72 static void i915_gem_write_fence(struct drm_device *dev, int reg,
     73 				 struct drm_i915_gem_object *obj);
     74 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
     75 					 struct drm_i915_fence_reg *fence,
     76 					 bool enable);
     77 
     78 static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
     79 					     struct shrink_control *sc);
     80 static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
     81 					    struct shrink_control *sc);
     82 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
     83 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
     84 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
     85 static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
     86 
     87 static bool cpu_cache_is_coherent(struct drm_device *dev,
     88 				  enum i915_cache_level level)
     89 {
     90 	return HAS_LLC(dev) || level != I915_CACHE_NONE;
     91 }
     92 
     93 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
     94 {
     95 	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
     96 		return true;
     97 
     98 	return obj->pin_display;
     99 }
    100 
    101 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
    102 {
    103 	if (obj->tiling_mode)
    104 		i915_gem_release_mmap(obj);
    105 
    106 	/* As we do not have an associated fence register, we will force
    107 	 * a tiling change if we ever need to acquire one.
    108 	 */
    109 	obj->fence_dirty = false;
    110 	obj->fence_reg = I915_FENCE_REG_NONE;
    111 }
    112 
    113 /* some bookkeeping */
    114 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
    115 				  size_t size)
    116 {
    117 	spin_lock(&dev_priv->mm.object_stat_lock);
    118 	dev_priv->mm.object_count++;
    119 	dev_priv->mm.object_memory += size;
    120 	spin_unlock(&dev_priv->mm.object_stat_lock);
    121 }
    122 
    123 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
    124 				     size_t size)
    125 {
    126 	spin_lock(&dev_priv->mm.object_stat_lock);
    127 	dev_priv->mm.object_count--;
    128 	dev_priv->mm.object_memory -= size;
    129 	spin_unlock(&dev_priv->mm.object_stat_lock);
    130 }
    131 
    132 static int
    133 i915_gem_wait_for_error(struct i915_gpu_error *error)
    134 {
    135 	int ret;
    136 
    137 #define EXIT_COND (!i915_reset_in_progress(error) || \
    138 		   i915_terminally_wedged(error))
    139 	if (EXIT_COND)
    140 		return 0;
    141 
    142 	/*
    143 	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
    144 	 * userspace. If it takes that long something really bad is going on and
    145 	 * we should simply try to bail out and fail as gracefully as possible.
    146 	 */
    147 #ifdef __NetBSD__
    148 	spin_lock(&error->reset_lock);
    149 	DRM_SPIN_TIMED_WAIT_UNTIL(ret, &error->reset_queue, &error->reset_lock,
    150 	    10*HZ, EXIT_COND);
    151 	spin_unlock(&error->reset_lock);
    152 #else
    153 	ret = wait_event_interruptible_timeout(error->reset_queue,
    154 					       EXIT_COND,
    155 					       10*HZ);
    156 #endif
    157 	if (ret == 0) {
    158 		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
    159 		return -EIO;
    160 	} else if (ret < 0) {
    161 		return ret;
    162 	}
    163 #undef EXIT_COND
    164 
    165 	return 0;
    166 }
    167 
    168 int i915_mutex_lock_interruptible(struct drm_device *dev)
    169 {
    170 	struct drm_i915_private *dev_priv = dev->dev_private;
    171 	int ret;
    172 
    173 	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
    174 	if (ret)
    175 		return ret;
    176 
    177 	ret = mutex_lock_interruptible(&dev->struct_mutex);
    178 	if (ret)
    179 		return ret;
    180 
    181 	WARN_ON(i915_verify_lists(dev));
    182 	return 0;
    183 }
    184 
    185 static inline bool
    186 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
    187 {
    188 	return i915_gem_obj_bound_any(obj) && !obj->active;
    189 }
    190 
    191 int
    192 i915_gem_init_ioctl(struct drm_device *dev, void *data,
    193 		    struct drm_file *file)
    194 {
    195 	struct drm_i915_private *dev_priv = dev->dev_private;
    196 	struct drm_i915_gem_init *args = data;
    197 
    198 	if (drm_core_check_feature(dev, DRIVER_MODESET))
    199 		return -ENODEV;
    200 
    201 	if (args->gtt_start >= args->gtt_end ||
    202 	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
    203 		return -EINVAL;
    204 
    205 	/* GEM with user mode setting was never supported on ilk and later. */
    206 	if (INTEL_INFO(dev)->gen >= 5)
    207 		return -ENODEV;
    208 
    209 	mutex_lock(&dev->struct_mutex);
    210 	i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
    211 				  args->gtt_end);
    212 	dev_priv->gtt.mappable_end = args->gtt_end;
    213 	mutex_unlock(&dev->struct_mutex);
    214 
    215 	return 0;
    216 }
    217 
    218 int
    219 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
    220 			    struct drm_file *file)
    221 {
    222 	struct drm_i915_private *dev_priv = dev->dev_private;
    223 	struct drm_i915_gem_get_aperture *args = data;
    224 	struct drm_i915_gem_object *obj;
    225 	size_t pinned;
    226 
    227 	pinned = 0;
    228 	mutex_lock(&dev->struct_mutex);
    229 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
    230 		if (i915_gem_obj_is_pinned(obj))
    231 			pinned += i915_gem_obj_ggtt_size(obj);
    232 	mutex_unlock(&dev->struct_mutex);
    233 
    234 	args->aper_size = dev_priv->gtt.base.total;
    235 	args->aper_available_size = args->aper_size - pinned;
    236 
    237 	return 0;
    238 }
    239 
    240 static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
    241 {
    242 	drm_dma_handle_t *phys = obj->phys_handle;
    243 
    244 	if (!phys)
    245 		return;
    246 
    247 	if (obj->madv == I915_MADV_WILLNEED) {
    248 #ifdef __NetBSD__
    249 		const char *vaddr = phys->vaddr;
    250 		unsigned i;
    251 
    252 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
    253 			struct pglist pages;
    254 			int error;
    255 
    256 			TAILQ_INIT(&pages);
    257 			error = uvm_obj_wirepages(obj->base.gemo_shm_uao,
    258 			    i*PAGE_SIZE, (i+1)*PAGE_SIZE, &pages);
    259 			if (error)
    260 				continue;
    261 
    262 			struct vm_page *const vm_page = TAILQ_FIRST(&pages);
    263 			struct page *const page = container_of(vm_page,
    264 			    struct page, p_vmp);
    265 			char *const dst = kmap_atomic(page);
    266 			(void)memcpy(dst, vaddr + (i*PAGE_SIZE), PAGE_SIZE);
    267 			drm_clflush_virt_range(dst, PAGE_SIZE);
    268 			kunmap_atomic(dst);
    269 
    270 			vm_page->flags &= ~PG_CLEAN;
    271 			/* XXX mark page accessed */
    272 			uvm_obj_unwirepages(obj->base.gemo_shm_uao,
    273 			    i*PAGE_SIZE, (i+1)*PAGE_SIZE);
    274 		}
    275 #else
    276 		struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
    277 		char *vaddr = phys->vaddr;
    278 		int i;
    279 
    280 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
    281 			struct page *page = shmem_read_mapping_page(mapping, i);
    282 			if (!IS_ERR(page)) {
    283 				char *dst = kmap_atomic(page);
    284 				memcpy(dst, vaddr, PAGE_SIZE);
    285 				drm_clflush_virt_range(dst, PAGE_SIZE);
    286 				kunmap_atomic(dst);
    287 
    288 				set_page_dirty(page);
    289 				mark_page_accessed(page);
    290 				page_cache_release(page);
    291 			}
    292 			vaddr += PAGE_SIZE;
    293 		}
    294 #endif
    295 		i915_gem_chipset_flush(obj->base.dev);
    296 	}
    297 
    298 #ifndef __NetBSD__
    299 #ifdef CONFIG_X86
    300 	set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
    301 #endif
    302 #endif
    303 	drm_pci_free(obj->base.dev, phys);
    304 	obj->phys_handle = NULL;
    305 }
    306 
    307 int
    308 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
    309 			    int align)
    310 {
    311 	drm_dma_handle_t *phys;
    312 #ifndef __NetBSD__
    313 	struct address_space *mapping;
    314 #endif
    315 	char *vaddr;
    316 	int i;
    317 
    318 	if (obj->phys_handle) {
    319 		if ((unsigned long)obj->phys_handle->vaddr & (align -1))
    320 			return -EBUSY;
    321 
    322 		return 0;
    323 	}
    324 
    325 	if (obj->madv != I915_MADV_WILLNEED)
    326 		return -EFAULT;
    327 
    328 #ifdef __NetBSD__
    329 	if (obj->base.gemo_shm_uao == NULL)
    330 		return -EINVAL;
    331 #else
    332 	if (obj->base.filp == NULL)
    333 		return -EINVAL;
    334 #endif
    335 
    336 	/* create a new object */
    337 	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
    338 	if (!phys)
    339 		return -ENOMEM;
    340 
    341 	vaddr = phys->vaddr;
    342 #ifndef __NetBSD__
    343 #ifdef CONFIG_X86
    344 	set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
    345 #endif
    346 	mapping = file_inode(obj->base.filp)->i_mapping;
    347 #endif
    348 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
    349 		struct page *page;
    350 		char *src;
    351 
    352 #ifdef __NetBSD__
    353 		struct pglist pages;
    354 		int ret;
    355 
    356 		TAILQ_INIT(&pages);
    357 
    358 		/* XXX errno NetBSD->Linux */
    359 		ret = -uvm_obj_wirepages(obj->base.gemo_shm_uao, i*PAGE_SIZE,
    360 		    (i+1)*PAGE_SIZE, &pages);
    361 		if (ret) {
    362 			drm_pci_free(obj->base.dev, phys);
    363 			return ret;
    364 		}
    365 		KASSERT(!TAILQ_EMPTY(&pages));
    366 		page = container_of(TAILQ_FIRST(&pages), struct page, p_vmp);
    367 #else
    368 		page = shmem_read_mapping_page(mapping, i);
    369 		if (IS_ERR(page)) {
    370 #ifdef CONFIG_X86
    371 			set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
    372 #endif
    373 			drm_pci_free(obj->base.dev, phys);
    374 			return PTR_ERR(page);
    375 		}
    376 #endif	/* defined(__NetBSD__) */
    377 
    378 		src = kmap_atomic(page);
    379 		memcpy(vaddr, src, PAGE_SIZE);
    380 		kunmap_atomic(src);
    381 
    382 #ifdef __NetBSD__
    383 		/* XXX mark page accessed */
    384 		uvm_obj_unwirepages(obj->base.gemo_shm_uao, i*PAGE_SIZE,
    385 		    (i + 1)*PAGE_SIZE);
    386 #else
    387 		mark_page_accessed(page);
    388 		page_cache_release(page);
    389 #endif
    390 
    391 		vaddr += PAGE_SIZE;
    392 	}
    393 
    394 	obj->phys_handle = phys;
    395 	return 0;
    396 }
    397 
    398 static int
    399 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
    400 		     struct drm_i915_gem_pwrite *args,
    401 		     struct drm_file *file_priv)
    402 {
    403 	struct drm_device *dev = obj->base.dev;
    404 	void *vaddr = (char *)obj->phys_handle->vaddr + args->offset;
    405 	char __user *user_data = to_user_ptr(args->data_ptr);
    406 
    407 	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
    408 		unsigned long unwritten;
    409 
    410 		/* The physical object once assigned is fixed for the lifetime
    411 		 * of the obj, so we can safely drop the lock and continue
    412 		 * to access vaddr.
    413 		 */
    414 		mutex_unlock(&dev->struct_mutex);
    415 		unwritten = copy_from_user(vaddr, user_data, args->size);
    416 		mutex_lock(&dev->struct_mutex);
    417 		if (unwritten)
    418 			return -EFAULT;
    419 	}
    420 
    421 	i915_gem_chipset_flush(dev);
    422 	return 0;
    423 }
    424 
    425 void *i915_gem_object_alloc(struct drm_device *dev)
    426 {
    427 	struct drm_i915_private *dev_priv = dev->dev_private;
    428 	return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
    429 }
    430 
    431 void i915_gem_object_free(struct drm_i915_gem_object *obj)
    432 {
    433 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
    434 	kmem_cache_free(dev_priv->slab, obj);
    435 }
    436 
    437 static int
    438 i915_gem_create(struct drm_file *file,
    439 		struct drm_device *dev,
    440 		uint64_t size,
    441 		uint32_t *handle_p)
    442 {
    443 	struct drm_i915_gem_object *obj;
    444 	int ret;
    445 	u32 handle;
    446 
    447 	size = roundup(size, PAGE_SIZE);
    448 	if (size == 0)
    449 		return -EINVAL;
    450 
    451 	/* Allocate the new object */
    452 	obj = i915_gem_alloc_object(dev, size);
    453 	if (obj == NULL)
    454 		return -ENOMEM;
    455 
    456 	ret = drm_gem_handle_create(file, &obj->base, &handle);
    457 	/* drop reference from allocate - handle holds it now */
    458 	drm_gem_object_unreference_unlocked(&obj->base);
    459 	if (ret)
    460 		return ret;
    461 
    462 	*handle_p = handle;
    463 	return 0;
    464 }
    465 
    466 int
    467 i915_gem_dumb_create(struct drm_file *file,
    468 		     struct drm_device *dev,
    469 		     struct drm_mode_create_dumb *args)
    470 {
    471 	/* have to work out size/pitch and return them */
    472 #ifdef __NetBSD__		/* ALIGN means something else.  */
    473 	args->pitch = round_up(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
    474 #else
    475 	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
    476 #endif
    477 	args->size = args->pitch * args->height;
    478 	return i915_gem_create(file, dev,
    479 			       args->size, &args->handle);
    480 }
    481 
    482 /**
    483  * Creates a new mm object and returns a handle to it.
    484  */
    485 int
    486 i915_gem_create_ioctl(struct drm_device *dev, void *data,
    487 		      struct drm_file *file)
    488 {
    489 	struct drm_i915_gem_create *args = data;
    490 
    491 	return i915_gem_create(file, dev,
    492 			       args->size, &args->handle);
    493 }
    494 
    495 static inline int
    496 __copy_to_user_swizzled(char __user *cpu_vaddr,
    497 			const char *gpu_vaddr, int gpu_offset,
    498 			int length)
    499 {
    500 	int ret, cpu_offset = 0;
    501 
    502 	while (length > 0) {
    503 #ifdef __NetBSD__		/* XXX ALIGN means something else.  */
    504 		int cacheline_end = round_up(gpu_offset + 1, 64);
    505 #else
    506 		int cacheline_end = ALIGN(gpu_offset + 1, 64);
    507 #endif
    508 		int this_length = min(cacheline_end - gpu_offset, length);
    509 		int swizzled_gpu_offset = gpu_offset ^ 64;
    510 
    511 		ret = __copy_to_user(cpu_vaddr + cpu_offset,
    512 				     gpu_vaddr + swizzled_gpu_offset,
    513 				     this_length);
    514 		if (ret)
    515 			return ret + length;
    516 
    517 		cpu_offset += this_length;
    518 		gpu_offset += this_length;
    519 		length -= this_length;
    520 	}
    521 
    522 	return 0;
    523 }
    524 
    525 static inline int
    526 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
    527 			  const char __user *cpu_vaddr,
    528 			  int length)
    529 {
    530 	int ret, cpu_offset = 0;
    531 
    532 	while (length > 0) {
    533 #ifdef __NetBSD__		/* XXX ALIGN means something else.  */
    534 		int cacheline_end = round_up(gpu_offset + 1, 64);
    535 #else
    536 		int cacheline_end = ALIGN(gpu_offset + 1, 64);
    537 #endif
    538 		int this_length = min(cacheline_end - gpu_offset, length);
    539 		int swizzled_gpu_offset = gpu_offset ^ 64;
    540 
    541 		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
    542 				       cpu_vaddr + cpu_offset,
    543 				       this_length);
    544 		if (ret)
    545 			return ret + length;
    546 
    547 		cpu_offset += this_length;
    548 		gpu_offset += this_length;
    549 		length -= this_length;
    550 	}
    551 
    552 	return 0;
    553 }
    554 
    555 /*
    556  * Pins the specified object's pages and synchronizes the object with
    557  * GPU accesses. Sets needs_clflush to non-zero if the caller should
    558  * flush the object from the CPU cache.
    559  */
    560 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
    561 				    int *needs_clflush)
    562 {
    563 	int ret;
    564 
    565 	*needs_clflush = 0;
    566 
    567 #ifdef __NetBSD__
    568 	if (obj->base.gemo_shm_uao == NULL)
    569 		return -EINVAL;
    570 #else
    571 	if (!obj->base.filp)
    572 		return -EINVAL;
    573 #endif
    574 
    575 	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
    576 		/* If we're not in the cpu read domain, set ourself into the gtt
    577 		 * read domain and manually flush cachelines (if required). This
    578 		 * optimizes for the case when the gpu will dirty the data
    579 		 * anyway again before the next pread happens. */
    580 		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
    581 							obj->cache_level);
    582 		ret = i915_gem_object_wait_rendering(obj, true);
    583 		if (ret)
    584 			return ret;
    585 	}
    586 
    587 	ret = i915_gem_object_get_pages(obj);
    588 	if (ret)
    589 		return ret;
    590 
    591 	i915_gem_object_pin_pages(obj);
    592 
    593 	return ret;
    594 }
    595 
    596 /* Per-page copy function for the shmem pread fastpath.
    597  * Flushes invalid cachelines before reading the target if
    598  * needs_clflush is set. */
    599 static int
    600 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
    601 		 char __user *user_data,
    602 		 bool page_do_bit17_swizzling, bool needs_clflush)
    603 {
    604 #ifdef __NetBSD__		/* XXX atomic shmem fast path */
    605 	return -EFAULT;
    606 #else
    607 	char *vaddr;
    608 	int ret;
    609 
    610 	if (unlikely(page_do_bit17_swizzling))
    611 		return -EINVAL;
    612 
    613 	vaddr = kmap_atomic(page);
    614 	if (needs_clflush)
    615 		drm_clflush_virt_range(vaddr + shmem_page_offset,
    616 				       page_length);
    617 	ret = __copy_to_user_inatomic(user_data,
    618 				      vaddr + shmem_page_offset,
    619 				      page_length);
    620 	kunmap_atomic(vaddr);
    621 
    622 	return ret ? -EFAULT : 0;
    623 #endif
    624 }
    625 
    626 static void
    627 shmem_clflush_swizzled_range(char *addr, unsigned long length,
    628 			     bool swizzled)
    629 {
    630 	if (unlikely(swizzled)) {
    631 		unsigned long start = (unsigned long) addr;
    632 		unsigned long end = (unsigned long) addr + length;
    633 
    634 		/* For swizzling simply ensure that we always flush both
    635 		 * channels. Lame, but simple and it works. Swizzled
    636 		 * pwrite/pread is far from a hotpath - current userspace
    637 		 * doesn't use it at all. */
    638 		start = round_down(start, 128);
    639 		end = round_up(end, 128);
    640 
    641 		drm_clflush_virt_range((void *)start, end - start);
    642 	} else {
    643 		drm_clflush_virt_range(addr, length);
    644 	}
    645 
    646 }
    647 
    648 /* Only difference to the fast-path function is that this can handle bit17
    649  * and uses non-atomic copy and kmap functions. */
    650 static int
    651 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
    652 		 char __user *user_data,
    653 		 bool page_do_bit17_swizzling, bool needs_clflush)
    654 {
    655 	char *vaddr;
    656 	int ret;
    657 
    658 	vaddr = kmap(page);
    659 	if (needs_clflush)
    660 		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
    661 					     page_length,
    662 					     page_do_bit17_swizzling);
    663 
    664 	if (page_do_bit17_swizzling)
    665 		ret = __copy_to_user_swizzled(user_data,
    666 					      vaddr, shmem_page_offset,
    667 					      page_length);
    668 	else
    669 		ret = __copy_to_user(user_data,
    670 				     vaddr + shmem_page_offset,
    671 				     page_length);
    672 	kunmap(page);
    673 
    674 	return ret ? - EFAULT : 0;
    675 }
    676 
    677 static int
    678 i915_gem_shmem_pread(struct drm_device *dev,
    679 		     struct drm_i915_gem_object *obj,
    680 		     struct drm_i915_gem_pread *args,
    681 		     struct drm_file *file)
    682 {
    683 	char __user *user_data;
    684 	ssize_t remain;
    685 	loff_t offset;
    686 	int shmem_page_offset, page_length, ret = 0;
    687 	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
    688 #ifndef __NetBSD__		/* XXX */
    689 	int prefaulted = 0;
    690 #endif
    691 	int needs_clflush = 0;
    692 #ifndef __NetBSD__
    693 	struct sg_page_iter sg_iter;
    694 #endif
    695 
    696 	user_data = to_user_ptr(args->data_ptr);
    697 	remain = args->size;
    698 
    699 	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
    700 
    701 	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
    702 	if (ret)
    703 		return ret;
    704 
    705 	offset = args->offset;
    706 
    707 #ifdef __NetBSD__
    708 	while (0 < remain)
    709 #else
    710 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
    711 			 offset >> PAGE_SHIFT)
    712 #endif
    713 	{
    714 #ifdef __NetBSD__
    715 		struct page *const page = i915_gem_object_get_page(obj,
    716 		    atop(offset));
    717 #else
    718 		struct page *page = sg_page_iter_page(&sg_iter);
    719 
    720 		if (remain <= 0)
    721 			break;
    722 #endif
    723 
    724 		/* Operation in this page
    725 		 *
    726 		 * shmem_page_offset = offset within page in shmem file
    727 		 * page_length = bytes to copy for this page
    728 		 */
    729 		shmem_page_offset = offset_in_page(offset);
    730 		page_length = remain;
    731 		if ((shmem_page_offset + page_length) > PAGE_SIZE)
    732 			page_length = PAGE_SIZE - shmem_page_offset;
    733 
    734 		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
    735 			(page_to_phys(page) & (1 << 17)) != 0;
    736 
    737 		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
    738 				       user_data, page_do_bit17_swizzling,
    739 				       needs_clflush);
    740 		if (ret == 0)
    741 			goto next_page;
    742 
    743 		mutex_unlock(&dev->struct_mutex);
    744 #ifndef __NetBSD__
    745 		if (likely(!i915.prefault_disable) && !prefaulted) {
    746 			ret = fault_in_multipages_writeable(user_data, remain);
    747 			/* Userspace is tricking us, but we've already clobbered
    748 			 * its pages with the prefault and promised to write the
    749 			 * data up to the first fault. Hence ignore any errors
    750 			 * and just continue. */
    751 			(void)ret;
    752 			prefaulted = 1;
    753 		}
    754 #endif
    755 		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
    756 				       user_data, page_do_bit17_swizzling,
    757 				       needs_clflush);
    758 
    759 		mutex_lock(&dev->struct_mutex);
    760 
    761 		if (ret)
    762 			goto out;
    763 
    764 next_page:
    765 		remain -= page_length;
    766 		user_data += page_length;
    767 		offset += page_length;
    768 	}
    769 
    770 out:
    771 	i915_gem_object_unpin_pages(obj);
    772 
    773 	return ret;
    774 }
    775 
    776 /**
    777  * Reads data from the object referenced by handle.
    778  *
    779  * On error, the contents of *data are undefined.
    780  */
    781 int
    782 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
    783 		     struct drm_file *file)
    784 {
    785 	struct drm_i915_gem_pread *args = data;
    786 	struct drm_gem_object *gobj;
    787 	struct drm_i915_gem_object *obj;
    788 	int ret = 0;
    789 
    790 	if (args->size == 0)
    791 		return 0;
    792 
    793 	if (!access_ok(VERIFY_WRITE,
    794 		       to_user_ptr(args->data_ptr),
    795 		       args->size))
    796 		return -EFAULT;
    797 
    798 	ret = i915_mutex_lock_interruptible(dev);
    799 	if (ret)
    800 		return ret;
    801 
    802 	gobj = drm_gem_object_lookup(dev, file, args->handle);
    803 	if (gobj == NULL) {
    804 		ret = -ENOENT;
    805 		goto unlock;
    806 	}
    807 	obj = to_intel_bo(gobj);
    808 
    809 	/* Bounds check source.  */
    810 	if (args->offset > obj->base.size ||
    811 	    args->size > obj->base.size - args->offset) {
    812 		ret = -EINVAL;
    813 		goto out;
    814 	}
    815 
    816 	/* prime objects have no backing filp to GEM pread/pwrite
    817 	 * pages from.
    818 	 */
    819 #ifdef __NetBSD__
    820 	/* Also stolen objects.  */
    821 	if (obj->base.gemo_shm_uao == NULL) {
    822 		ret = -EINVAL;
    823 		goto out;
    824 	}
    825 #else
    826 	if (!obj->base.filp) {
    827 		ret = -EINVAL;
    828 		goto out;
    829 	}
    830 #endif
    831 
    832 	trace_i915_gem_object_pread(obj, args->offset, args->size);
    833 
    834 	ret = i915_gem_shmem_pread(dev, obj, args, file);
    835 
    836 out:
    837 	drm_gem_object_unreference(&obj->base);
    838 unlock:
    839 	mutex_unlock(&dev->struct_mutex);
    840 	return ret;
    841 }
    842 
    843 /* This is the fast write path which cannot handle
    844  * page faults in the source data
    845  */
    846 
    847 static inline int
    848 fast_user_write(struct io_mapping *mapping,
    849 		loff_t page_base, int page_offset,
    850 		char __user *user_data,
    851 		int length)
    852 {
    853 #ifdef __NetBSD__		/* XXX atomic shmem fast path */
    854 	return -EFAULT;
    855 #else
    856 	void __iomem *vaddr_atomic;
    857 	void *vaddr;
    858 	unsigned long unwritten;
    859 
    860 	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
    861 	/* We can use the cpu mem copy function because this is X86. */
    862 	vaddr = (void __force*)vaddr_atomic + page_offset;
    863 	unwritten = __copy_from_user_inatomic_nocache(vaddr,
    864 						      user_data, length);
    865 	io_mapping_unmap_atomic(vaddr_atomic);
    866 	return unwritten;
    867 #endif
    868 }
    869 
    870 /**
    871  * This is the fast pwrite path, where we copy the data directly from the
    872  * user into the GTT, uncached.
    873  */
    874 static int
    875 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
    876 			 struct drm_i915_gem_object *obj,
    877 			 struct drm_i915_gem_pwrite *args,
    878 			 struct drm_file *file)
    879 {
    880 	struct drm_i915_private *dev_priv = dev->dev_private;
    881 	ssize_t remain;
    882 	loff_t offset, page_base;
    883 	char __user *user_data;
    884 	int page_offset, page_length, ret;
    885 
    886 	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
    887 	if (ret)
    888 		goto out;
    889 
    890 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
    891 	if (ret)
    892 		goto out_unpin;
    893 
    894 	ret = i915_gem_object_put_fence(obj);
    895 	if (ret)
    896 		goto out_unpin;
    897 
    898 	user_data = to_user_ptr(args->data_ptr);
    899 	remain = args->size;
    900 
    901 	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
    902 
    903 	while (remain > 0) {
    904 		/* Operation in this page
    905 		 *
    906 		 * page_base = page offset within aperture
    907 		 * page_offset = offset within page
    908 		 * page_length = bytes to copy for this page
    909 		 */
    910 		page_base = offset & PAGE_MASK;
    911 		page_offset = offset_in_page(offset);
    912 		page_length = remain;
    913 		if ((page_offset + remain) > PAGE_SIZE)
    914 			page_length = PAGE_SIZE - page_offset;
    915 
    916 		/* If we get a fault while copying data, then (presumably) our
    917 		 * source page isn't available.  Return the error and we'll
    918 		 * retry in the slow path.
    919 		 */
    920 		if (fast_user_write(dev_priv->gtt.mappable, page_base,
    921 				    page_offset, user_data, page_length)) {
    922 			ret = -EFAULT;
    923 			goto out_unpin;
    924 		}
    925 
    926 		remain -= page_length;
    927 		user_data += page_length;
    928 		offset += page_length;
    929 	}
    930 
    931 out_unpin:
    932 	i915_gem_object_ggtt_unpin(obj);
    933 out:
    934 	return ret;
    935 }
    936 
    937 /* Per-page copy function for the shmem pwrite fastpath.
    938  * Flushes invalid cachelines before writing to the target if
    939  * needs_clflush_before is set and flushes out any written cachelines after
    940  * writing if needs_clflush is set. */
    941 static int
    942 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
    943 		  char __user *user_data,
    944 		  bool page_do_bit17_swizzling,
    945 		  bool needs_clflush_before,
    946 		  bool needs_clflush_after)
    947 {
    948 #ifdef __NetBSD__
    949 	return -EFAULT;
    950 #else
    951 	char *vaddr;
    952 	int ret;
    953 
    954 	if (unlikely(page_do_bit17_swizzling))
    955 		return -EINVAL;
    956 
    957 	vaddr = kmap_atomic(page);
    958 	if (needs_clflush_before)
    959 		drm_clflush_virt_range(vaddr + shmem_page_offset,
    960 				       page_length);
    961 	ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
    962 					user_data, page_length);
    963 	if (needs_clflush_after)
    964 		drm_clflush_virt_range(vaddr + shmem_page_offset,
    965 				       page_length);
    966 	kunmap_atomic(vaddr);
    967 
    968 	return ret ? -EFAULT : 0;
    969 #endif
    970 }
    971 
    972 /* Only difference to the fast-path function is that this can handle bit17
    973  * and uses non-atomic copy and kmap functions. */
    974 static int
    975 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
    976 		  char __user *user_data,
    977 		  bool page_do_bit17_swizzling,
    978 		  bool needs_clflush_before,
    979 		  bool needs_clflush_after)
    980 {
    981 	char *vaddr;
    982 	int ret;
    983 
    984 	vaddr = kmap(page);
    985 	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
    986 		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
    987 					     page_length,
    988 					     page_do_bit17_swizzling);
    989 	if (page_do_bit17_swizzling)
    990 		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
    991 						user_data,
    992 						page_length);
    993 	else
    994 		ret = __copy_from_user(vaddr + shmem_page_offset,
    995 				       user_data,
    996 				       page_length);
    997 	if (needs_clflush_after)
    998 		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
    999 					     page_length,
   1000 					     page_do_bit17_swizzling);
   1001 	kunmap(page);
   1002 
   1003 	return ret ? -EFAULT : 0;
   1004 }
   1005 
   1006 static int
   1007 i915_gem_shmem_pwrite(struct drm_device *dev,
   1008 		      struct drm_i915_gem_object *obj,
   1009 		      struct drm_i915_gem_pwrite *args,
   1010 		      struct drm_file *file)
   1011 {
   1012 	ssize_t remain;
   1013 	loff_t offset;
   1014 	char __user *user_data;
   1015 	int shmem_page_offset, page_length, ret = 0;
   1016 	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
   1017 	int hit_slowpath = 0;
   1018 	int needs_clflush_after = 0;
   1019 	int needs_clflush_before = 0;
   1020 #ifndef __NetBSD__
   1021 	struct sg_page_iter sg_iter;
   1022 	int flush_mask = boot_cpu_data.x86_clflush_size - 1;
   1023 #else
   1024 	int flush_mask = cpu_info_primary.ci_cflush_lsize - 1;
   1025 #endif
   1026 
   1027 	user_data = to_user_ptr(args->data_ptr);
   1028 	remain = args->size;
   1029 
   1030 	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
   1031 
   1032 	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
   1033 		/* If we're not in the cpu write domain, set ourself into the gtt
   1034 		 * write domain and manually flush cachelines (if required). This
   1035 		 * optimizes for the case when the gpu will use the data
   1036 		 * right away and we therefore have to clflush anyway. */
   1037 		needs_clflush_after = cpu_write_needs_clflush(obj);
   1038 		ret = i915_gem_object_wait_rendering(obj, false);
   1039 		if (ret)
   1040 			return ret;
   1041 	}
   1042 	/* Same trick applies to invalidate partially written cachelines read
   1043 	 * before writing. */
   1044 	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
   1045 		needs_clflush_before =
   1046 			!cpu_cache_is_coherent(dev, obj->cache_level);
   1047 
   1048 	ret = i915_gem_object_get_pages(obj);
   1049 	if (ret)
   1050 		return ret;
   1051 
   1052 	i915_gem_object_pin_pages(obj);
   1053 
   1054 	offset = args->offset;
   1055 	obj->dirty = 1;
   1056 
   1057 #ifdef __NetBSD__
   1058 	while (0 < remain)
   1059 #else
   1060 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
   1061 			 offset >> PAGE_SHIFT)
   1062 #endif
   1063 	{
   1064 #ifdef __NetBSD__
   1065 		struct page *const page = i915_gem_object_get_page(obj,
   1066 		    atop(offset));
   1067 #else
   1068 		struct page *page = sg_page_iter_page(&sg_iter);
   1069 #endif
   1070 
   1071 		if (remain <= 0)
   1072 			break;
   1073 
   1074 		/* Operation in this page
   1075 		 *
   1076 		 * shmem_page_offset = offset within page in shmem file
   1077 		 * page_length = bytes to copy for this page
   1078 		 */
   1079 		shmem_page_offset = offset_in_page(offset);
   1080 
   1081 		page_length = remain;
   1082 		if ((shmem_page_offset + page_length) > PAGE_SIZE)
   1083 			page_length = PAGE_SIZE - shmem_page_offset;
   1084 
   1085 		/* If we don't overwrite a cacheline completely we need to be
   1086 		 * careful to have up-to-date data by first clflushing. Don't
   1087 		 * overcomplicate things and flush the entire patch. */
   1088 		const int partial_cacheline_write = needs_clflush_before &&
   1089 			((shmem_page_offset | page_length) & flush_mask);
   1090 
   1091 		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
   1092 			(page_to_phys(page) & (1 << 17)) != 0;
   1093 
   1094 		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
   1095 					user_data, page_do_bit17_swizzling,
   1096 					partial_cacheline_write,
   1097 					needs_clflush_after);
   1098 		if (ret == 0)
   1099 			goto next_page;
   1100 
   1101 		hit_slowpath = 1;
   1102 		mutex_unlock(&dev->struct_mutex);
   1103 		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
   1104 					user_data, page_do_bit17_swizzling,
   1105 					partial_cacheline_write,
   1106 					needs_clflush_after);
   1107 
   1108 		mutex_lock(&dev->struct_mutex);
   1109 
   1110 		if (ret)
   1111 			goto out;
   1112 
   1113 next_page:
   1114 		remain -= page_length;
   1115 		user_data += page_length;
   1116 		offset += page_length;
   1117 	}
   1118 
   1119 out:
   1120 	i915_gem_object_unpin_pages(obj);
   1121 
   1122 	if (hit_slowpath) {
   1123 		/*
   1124 		 * Fixup: Flush cpu caches in case we didn't flush the dirty
   1125 		 * cachelines in-line while writing and the object moved
   1126 		 * out of the cpu write domain while we've dropped the lock.
   1127 		 */
   1128 		if (!needs_clflush_after &&
   1129 		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
   1130 			if (i915_gem_clflush_object(obj, obj->pin_display))
   1131 				i915_gem_chipset_flush(dev);
   1132 		}
   1133 	}
   1134 
   1135 	if (needs_clflush_after)
   1136 		i915_gem_chipset_flush(dev);
   1137 
   1138 	return ret;
   1139 }
   1140 
   1141 /**
   1142  * Writes data to the object referenced by handle.
   1143  *
   1144  * On error, the contents of the buffer that were to be modified are undefined.
   1145  */
   1146 int
   1147 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
   1148 		      struct drm_file *file)
   1149 {
   1150 	struct drm_i915_gem_pwrite *args = data;
   1151 	struct drm_gem_object *gobj;
   1152 	struct drm_i915_gem_object *obj;
   1153 	int ret;
   1154 
   1155 	if (args->size == 0)
   1156 		return 0;
   1157 
   1158 	if (!access_ok(VERIFY_READ,
   1159 		       to_user_ptr(args->data_ptr),
   1160 		       args->size))
   1161 		return -EFAULT;
   1162 
   1163 #ifndef __NetBSD__		/* XXX prefault */
   1164 	if (likely(!i915.prefault_disable)) {
   1165 		ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
   1166 						   args->size);
   1167 		if (ret)
   1168 			return -EFAULT;
   1169 	}
   1170 #endif
   1171 
   1172 	ret = i915_mutex_lock_interruptible(dev);
   1173 	if (ret)
   1174 		return ret;
   1175 
   1176 	gobj = drm_gem_object_lookup(dev, file, args->handle);
   1177 	if (gobj == NULL) {
   1178 		ret = -ENOENT;
   1179 		goto unlock;
   1180 	}
   1181 	obj = to_intel_bo(gobj);
   1182 
   1183 	/* Bounds check destination. */
   1184 	if (args->offset > obj->base.size ||
   1185 	    args->size > obj->base.size - args->offset) {
   1186 		ret = -EINVAL;
   1187 		goto out;
   1188 	}
   1189 
   1190 	/* prime objects have no backing filp to GEM pread/pwrite
   1191 	 * pages from.
   1192 	 */
   1193 #ifdef __NetBSD__
   1194 	/* Also stolen objects.  */
   1195 	if (obj->base.gemo_shm_uao == NULL) {
   1196 		ret = -EINVAL;
   1197 		goto out;
   1198 	}
   1199 #else
   1200 	if (!obj->base.filp) {
   1201 		ret = -EINVAL;
   1202 		goto out;
   1203 	}
   1204 #endif
   1205 
   1206 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
   1207 
   1208 	ret = -EFAULT;
   1209 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
   1210 	 * it would end up going through the fenced access, and we'll get
   1211 	 * different detiling behavior between reading and writing.
   1212 	 * pread/pwrite currently are reading and writing from the CPU
   1213 	 * perspective, requiring manual detiling by the client.
   1214 	 */
   1215 	if (obj->phys_handle) {
   1216 		ret = i915_gem_phys_pwrite(obj, args, file);
   1217 		goto out;
   1218 	}
   1219 
   1220 	if (obj->tiling_mode == I915_TILING_NONE &&
   1221 	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
   1222 	    cpu_write_needs_clflush(obj)) {
   1223 		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
   1224 		/* Note that the gtt paths might fail with non-page-backed user
   1225 		 * pointers (e.g. gtt mappings when moving data between
   1226 		 * textures). Fallback to the shmem path in that case. */
   1227 	}
   1228 
   1229 	if (ret == -EFAULT || ret == -ENOSPC)
   1230 		ret = i915_gem_shmem_pwrite(dev, obj, args, file);
   1231 
   1232 out:
   1233 	drm_gem_object_unreference(&obj->base);
   1234 unlock:
   1235 	mutex_unlock(&dev->struct_mutex);
   1236 	return ret;
   1237 }
   1238 
   1239 int
   1240 i915_gem_check_wedge(struct i915_gpu_error *error,
   1241 		     bool interruptible)
   1242 {
   1243 	if (i915_reset_in_progress(error)) {
   1244 		/* Non-interruptible callers can't handle -EAGAIN, hence return
   1245 		 * -EIO unconditionally for these. */
   1246 		if (!interruptible)
   1247 			return -EIO;
   1248 
   1249 		/* Recovery complete, but the reset failed ... */
   1250 		if (i915_terminally_wedged(error))
   1251 			return -EIO;
   1252 
   1253 		return -EAGAIN;
   1254 	}
   1255 
   1256 	return 0;
   1257 }
   1258 
   1259 /*
   1260  * Compare seqno against outstanding lazy request. Emit a request if they are
   1261  * equal.
   1262  */
   1263 static int
   1264 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
   1265 {
   1266 	int ret;
   1267 
   1268 	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
   1269 
   1270 	ret = 0;
   1271 	if (seqno == ring->outstanding_lazy_seqno)
   1272 		ret = i915_add_request(ring, NULL);
   1273 
   1274 	return ret;
   1275 }
   1276 
   1277 #ifndef __NetBSD__
   1278 static void fake_irq(unsigned long data)
   1279 {
   1280 	wake_up_process((struct task_struct *)data);
   1281 }
   1282 #endif
   1283 
   1284 static bool missed_irq(struct drm_i915_private *dev_priv,
   1285 		       struct intel_ring_buffer *ring)
   1286 {
   1287 	return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
   1288 }
   1289 
   1290 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
   1291 {
   1292 	if (file_priv == NULL)
   1293 		return true;
   1294 
   1295 	return !atomic_xchg(&file_priv->rps_wait_boost, true);
   1296 }
   1297 
   1298 /**
   1299  * __wait_seqno - wait until execution of seqno has finished
   1300  * @ring: the ring expected to report seqno
   1301  * @seqno: duh!
   1302  * @reset_counter: reset sequence associated with the given seqno
   1303  * @interruptible: do an interruptible wait (normally yes)
   1304  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
   1305  *
   1306  * Note: It is of utmost importance that the passed in seqno and reset_counter
   1307  * values have been read by the caller in an smp safe manner. Where read-side
   1308  * locks are involved, it is sufficient to read the reset_counter before
   1309  * unlocking the lock that protects the seqno. For lockless tricks, the
   1310  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
   1311  * inserted.
   1312  *
   1313  * Returns 0 if the seqno was found within the alloted time. Else returns the
   1314  * errno with remaining time filled in timeout argument.
   1315  */
   1316 #ifdef __NetBSD__
   1317 static int
   1318 __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, unsigned reset_counter,
   1319     bool interruptible, struct timespec *timeout,
   1320     struct drm_i915_file_private *file_priv)
   1321 {
   1322 	struct drm_device *dev = ring->dev;
   1323 	struct drm_i915_private *dev_priv = dev->dev_private;
   1324 	bool irq_test_in_progress;
   1325 	struct timespec before, after;
   1326 	int ticks;
   1327 	bool wedged;
   1328 	int ret;
   1329 
   1330 	irq_test_in_progress = (dev_priv->gpu_error.test_irq_rings &
   1331 	    intel_ring_flag(ring));
   1332 	__insn_barrier();
   1333 
   1334 	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
   1335 		return 0;
   1336 
   1337 	if (timeout)
   1338 		ticks = mstohz(timespec_to_ns(timeout) / 1000000);
   1339 	else
   1340 		ticks = 1;
   1341 
   1342 	if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
   1343 		gen6_rps_boost(dev_priv);
   1344 		if (file_priv)
   1345 			mod_delayed_work(dev_priv->wq,
   1346 					 &file_priv->mm.idle_work,
   1347 					 msecs_to_jiffies(100));
   1348 	}
   1349 
   1350 	if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
   1351 		return -ENODEV;
   1352 
   1353 	nanotime(&before);
   1354 	spin_lock(&dev_priv->irq_lock);
   1355 #define	EXIT_COND							      \
   1356 	((wedged = (reset_counter !=					      \
   1357 		atomic_read(&dev_priv->gpu_error.reset_counter))) ||	      \
   1358 	    i915_seqno_passed(ring->get_seqno(ring, false),		      \
   1359 		seqno))
   1360 
   1361 	if (timeout) {
   1362 		/*
   1363 		 * XXX This missed_irq business smells like unlocked
   1364 		 * Linux waitqueue nonsense.
   1365 		 */
   1366 		if (missed_irq(dev_priv, ring))
   1367 			ticks = 1;
   1368 		if (interruptible)
   1369 			DRM_SPIN_TIMED_WAIT_UNTIL(ret, &ring->irq_queue,
   1370 			    &dev_priv->irq_lock, ticks, EXIT_COND);
   1371 		else
   1372 			DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &ring->irq_queue,
   1373 			    &dev_priv->irq_lock, ticks, EXIT_COND);
   1374 		if (ret < 0)	/* Failure: return negative error as is.  */
   1375 			;
   1376 		else if (ret == 0) /* Timed out: return -ETIME.  */
   1377 			ret = -ETIME;
   1378 		else		/* Succeeded (ret > 0): return 0.  */
   1379 			ret = 0;
   1380 	} else {
   1381 		if (interruptible)
   1382 			DRM_SPIN_WAIT_UNTIL(ret, &ring->irq_queue,
   1383 			    &dev_priv->irq_lock, EXIT_COND);
   1384 		else
   1385 			DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &ring->irq_queue,
   1386 			    &dev_priv->irq_lock, EXIT_COND);
   1387 		/* ret is negative on failure or zero on success.  */
   1388 	}
   1389 #undef	EXIT_COND
   1390 	spin_unlock(&dev_priv->irq_lock);
   1391 	nanotime(&after);
   1392 
   1393 	if (!irq_test_in_progress)
   1394 		ring->irq_put(ring);
   1395 	if (timeout) {
   1396 		struct timespec slept;
   1397 
   1398 		/* Compute slept = after - before.  */
   1399 		timespecsub(&after, &before, &slept);
   1400 
   1401 		/*
   1402 		 * Return the time remaining, timeout - slept, if we
   1403 		 * slept for less time than the timeout; or zero if we
   1404 		 * timed out.
   1405 		 */
   1406 		if (timespeccmp(&slept, timeout, <))
   1407 			timespecsub(timeout, &slept, timeout);
   1408 		else
   1409 			timespecclear(timeout);
   1410 	}
   1411 	if (wedged) {		/* GPU reset while we were waiting.  */
   1412 		ret = i915_gem_check_wedge(&dev_priv->gpu_error,
   1413 		    interruptible);
   1414 		if (ret == 0)
   1415 			ret = -EAGAIN;
   1416 	}
   1417 	return ret;
   1418 }
   1419 #else
   1420 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
   1421 			unsigned reset_counter,
   1422 			bool interruptible,
   1423 			struct timespec *timeout,
   1424 			struct drm_i915_file_private *file_priv)
   1425 {
   1426 	struct drm_device *dev = ring->dev;
   1427 	struct drm_i915_private *dev_priv = dev->dev_private;
   1428 	const bool irq_test_in_progress =
   1429 		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
   1430 	struct timespec before, now;
   1431 	DEFINE_WAIT(wait);
   1432 	unsigned long timeout_expire;
   1433 	int ret;
   1434 
   1435 	WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
   1436 
   1437 	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
   1438 		return 0;
   1439 
   1440 	timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
   1441 
   1442 	if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
   1443 		gen6_rps_boost(dev_priv);
   1444 		if (file_priv)
   1445 			mod_delayed_work(dev_priv->wq,
   1446 					 &file_priv->mm.idle_work,
   1447 					 msecs_to_jiffies(100));
   1448 	}
   1449 
   1450 	if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
   1451 		return -ENODEV;
   1452 
   1453 	/* Record current time in case interrupted by signal, or wedged */
   1454 	trace_i915_gem_request_wait_begin(ring, seqno);
   1455 	getrawmonotonic(&before);
   1456 	for (;;) {
   1457 		struct timer_list timer;
   1458 
   1459 		prepare_to_wait(&ring->irq_queue, &wait,
   1460 				interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
   1461 
   1462 		/* We need to check whether any gpu reset happened in between
   1463 		 * the caller grabbing the seqno and now ... */
   1464 		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
   1465 			/* ... but upgrade the -EAGAIN to an -EIO if the gpu
   1466 			 * is truely gone. */
   1467 			ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
   1468 			if (ret == 0)
   1469 				ret = -EAGAIN;
   1470 			break;
   1471 		}
   1472 
   1473 		if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
   1474 			ret = 0;
   1475 			break;
   1476 		}
   1477 
   1478 		if (interruptible && signal_pending(current)) {
   1479 			ret = -ERESTARTSYS;
   1480 			break;
   1481 		}
   1482 
   1483 		if (timeout && time_after_eq(jiffies, timeout_expire)) {
   1484 			ret = -ETIME;
   1485 			break;
   1486 		}
   1487 
   1488 		timer.function = NULL;
   1489 		if (timeout || missed_irq(dev_priv, ring)) {
   1490 			unsigned long expire;
   1491 
   1492 			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
   1493 			expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
   1494 			mod_timer(&timer, expire);
   1495 		}
   1496 
   1497 		io_schedule();
   1498 
   1499 		if (timer.function) {
   1500 			del_singleshot_timer_sync(&timer);
   1501 			destroy_timer_on_stack(&timer);
   1502 		}
   1503 	}
   1504 	getrawmonotonic(&now);
   1505 	trace_i915_gem_request_wait_end(ring, seqno);
   1506 
   1507 	if (!irq_test_in_progress)
   1508 		ring->irq_put(ring);
   1509 
   1510 	finish_wait(&ring->irq_queue, &wait);
   1511 
   1512 	if (timeout) {
   1513 		struct timespec sleep_time = timespec_sub(now, before);
   1514 		*timeout = timespec_sub(*timeout, sleep_time);
   1515 		if (!timespec_valid(timeout)) /* i.e. negative time remains */
   1516 			set_normalized_timespec(timeout, 0, 0);
   1517 	}
   1518 
   1519 	return ret;
   1520 }
   1521 #endif
   1522 
   1523 /**
   1524  * Waits for a sequence number to be signaled, and cleans up the
   1525  * request and object lists appropriately for that event.
   1526  */
   1527 int
   1528 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
   1529 {
   1530 	struct drm_device *dev = ring->dev;
   1531 	struct drm_i915_private *dev_priv = dev->dev_private;
   1532 	bool interruptible = dev_priv->mm.interruptible;
   1533 	int ret;
   1534 
   1535 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
   1536 	BUG_ON(seqno == 0);
   1537 
   1538 	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
   1539 	if (ret)
   1540 		return ret;
   1541 
   1542 	ret = i915_gem_check_olr(ring, seqno);
   1543 	if (ret)
   1544 		return ret;
   1545 
   1546 	return __wait_seqno(ring, seqno,
   1547 			    atomic_read(&dev_priv->gpu_error.reset_counter),
   1548 			    interruptible, NULL, NULL);
   1549 }
   1550 
   1551 static int
   1552 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
   1553 				     struct intel_ring_buffer *ring)
   1554 {
   1555 	i915_gem_retire_requests_ring(ring);
   1556 
   1557 	/* Manually manage the write flush as we may have not yet
   1558 	 * retired the buffer.
   1559 	 *
   1560 	 * Note that the last_write_seqno is always the earlier of
   1561 	 * the two (read/write) seqno, so if we haved successfully waited,
   1562 	 * we know we have passed the last write.
   1563 	 */
   1564 	obj->last_write_seqno = 0;
   1565 	obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
   1566 
   1567 	return 0;
   1568 }
   1569 
   1570 /**
   1571  * Ensures that all rendering to the object has completed and the object is
   1572  * safe to unbind from the GTT or access from the CPU.
   1573  */
   1574 static __must_check int
   1575 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
   1576 			       bool readonly)
   1577 {
   1578 	struct intel_ring_buffer *ring = obj->ring;
   1579 	u32 seqno;
   1580 	int ret;
   1581 
   1582 	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
   1583 	if (seqno == 0)
   1584 		return 0;
   1585 
   1586 	ret = i915_wait_seqno(ring, seqno);
   1587 	if (ret)
   1588 		return ret;
   1589 
   1590 	return i915_gem_object_wait_rendering__tail(obj, ring);
   1591 }
   1592 
   1593 /* A nonblocking variant of the above wait. This is a highly dangerous routine
   1594  * as the object state may change during this call.
   1595  */
   1596 static __must_check int
   1597 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
   1598 					    struct drm_i915_file_private *file_priv,
   1599 					    bool readonly)
   1600 {
   1601 	struct drm_device *dev = obj->base.dev;
   1602 	struct drm_i915_private *dev_priv = dev->dev_private;
   1603 	struct intel_ring_buffer *ring = obj->ring;
   1604 	unsigned reset_counter;
   1605 	u32 seqno;
   1606 	int ret;
   1607 
   1608 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
   1609 	BUG_ON(!dev_priv->mm.interruptible);
   1610 
   1611 	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
   1612 	if (seqno == 0)
   1613 		return 0;
   1614 
   1615 	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
   1616 	if (ret)
   1617 		return ret;
   1618 
   1619 	ret = i915_gem_check_olr(ring, seqno);
   1620 	if (ret)
   1621 		return ret;
   1622 
   1623 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
   1624 	mutex_unlock(&dev->struct_mutex);
   1625 	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
   1626 	mutex_lock(&dev->struct_mutex);
   1627 	if (ret)
   1628 		return ret;
   1629 
   1630 	return i915_gem_object_wait_rendering__tail(obj, ring);
   1631 }
   1632 
   1633 /**
   1634  * Called when user space prepares to use an object with the CPU, either
   1635  * through the mmap ioctl's mapping or a GTT mapping.
   1636  */
   1637 int
   1638 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
   1639 			  struct drm_file *file)
   1640 {
   1641 	struct drm_i915_gem_set_domain *args = data;
   1642 	struct drm_gem_object *gobj;
   1643 	struct drm_i915_gem_object *obj;
   1644 	uint32_t read_domains = args->read_domains;
   1645 	uint32_t write_domain = args->write_domain;
   1646 	int ret;
   1647 
   1648 	/* Only handle setting domains to types used by the CPU. */
   1649 	if (write_domain & I915_GEM_GPU_DOMAINS)
   1650 		return -EINVAL;
   1651 
   1652 	if (read_domains & I915_GEM_GPU_DOMAINS)
   1653 		return -EINVAL;
   1654 
   1655 	/* Having something in the write domain implies it's in the read
   1656 	 * domain, and only that read domain.  Enforce that in the request.
   1657 	 */
   1658 	if (write_domain != 0 && read_domains != write_domain)
   1659 		return -EINVAL;
   1660 
   1661 	ret = i915_mutex_lock_interruptible(dev);
   1662 	if (ret)
   1663 		return ret;
   1664 
   1665 	gobj = drm_gem_object_lookup(dev, file, args->handle);
   1666 	if (gobj == NULL) {
   1667 		ret = -ENOENT;
   1668 		goto unlock;
   1669 	}
   1670 	obj = to_intel_bo(gobj);
   1671 
   1672 	/* Try to flush the object off the GPU without holding the lock.
   1673 	 * We will repeat the flush holding the lock in the normal manner
   1674 	 * to catch cases where we are gazumped.
   1675 	 */
   1676 	ret = i915_gem_object_wait_rendering__nonblocking(obj,
   1677 							  file->driver_priv,
   1678 							  !write_domain);
   1679 	if (ret)
   1680 		goto unref;
   1681 
   1682 	if (read_domains & I915_GEM_DOMAIN_GTT) {
   1683 		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
   1684 
   1685 		/* Silently promote "you're not bound, there was nothing to do"
   1686 		 * to success, since the client was just asking us to
   1687 		 * make sure everything was done.
   1688 		 */
   1689 		if (ret == -EINVAL)
   1690 			ret = 0;
   1691 	} else {
   1692 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
   1693 	}
   1694 
   1695 unref:
   1696 	drm_gem_object_unreference(&obj->base);
   1697 unlock:
   1698 	mutex_unlock(&dev->struct_mutex);
   1699 	return ret;
   1700 }
   1701 
   1702 /**
   1703  * Called when user space has done writes to this buffer
   1704  */
   1705 int
   1706 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
   1707 			 struct drm_file *file)
   1708 {
   1709 	struct drm_i915_gem_sw_finish *args = data;
   1710 	struct drm_gem_object *gobj;
   1711 	struct drm_i915_gem_object *obj;
   1712 	int ret = 0;
   1713 
   1714 	ret = i915_mutex_lock_interruptible(dev);
   1715 	if (ret)
   1716 		return ret;
   1717 
   1718 	gobj = drm_gem_object_lookup(dev, file, args->handle);
   1719 	if (gobj == NULL) {
   1720 		ret = -ENOENT;
   1721 		goto unlock;
   1722 	}
   1723 	obj = to_intel_bo(gobj);
   1724 
   1725 	/* Pinned buffers may be scanout, so flush the cache */
   1726 	if (obj->pin_display)
   1727 		i915_gem_object_flush_cpu_write_domain(obj, true);
   1728 
   1729 	drm_gem_object_unreference(&obj->base);
   1730 unlock:
   1731 	mutex_unlock(&dev->struct_mutex);
   1732 	return ret;
   1733 }
   1734 
   1735 /**
   1736  * Maps the contents of an object, returning the address it is mapped
   1737  * into.
   1738  *
   1739  * While the mapping holds a reference on the contents of the object, it doesn't
   1740  * imply a ref on the object itself.
   1741  */
   1742 int
   1743 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
   1744 		    struct drm_file *file)
   1745 {
   1746 	struct drm_i915_gem_mmap *args = data;
   1747 	struct drm_gem_object *obj;
   1748 	unsigned long addr;
   1749 #ifdef __NetBSD__
   1750 	int ret;
   1751 #endif
   1752 
   1753 	obj = drm_gem_object_lookup(dev, file, args->handle);
   1754 	if (obj == NULL)
   1755 		return -ENOENT;
   1756 
   1757 	/* prime objects have no backing filp to GEM mmap
   1758 	 * pages from.
   1759 	 */
   1760 #ifdef __NetBSD__
   1761 	/* Also stolen objects (XXX can we get them here?)  */
   1762 	if (obj->gemo_shm_uao == NULL) {
   1763 		drm_gem_object_unreference_unlocked(obj);
   1764 		return -EINVAL;
   1765 	}
   1766 #else
   1767 	if (!obj->filp) {
   1768 		drm_gem_object_unreference_unlocked(obj);
   1769 		return -EINVAL;
   1770 	}
   1771 #endif
   1772 
   1773 #ifdef __NetBSD__
   1774 	addr = (*curproc->p_emul->e_vm_default_addr)(curproc,
   1775 	    (vaddr_t)curproc->p_vmspace->vm_daddr, args->size);
   1776 	/* XXX errno NetBSD->Linux */
   1777 	ret = -uvm_map(&curproc->p_vmspace->vm_map, &addr, args->size,
   1778 	    obj->gemo_shm_uao, args->offset, 0,
   1779 	    UVM_MAPFLAG((VM_PROT_READ | VM_PROT_WRITE),
   1780 		(VM_PROT_READ | VM_PROT_WRITE), UVM_INH_COPY, UVM_ADV_NORMAL,
   1781 		0));
   1782 	if (ret) {
   1783 		drm_gem_object_unreference_unlocked(obj);
   1784 		return ret;
   1785 	}
   1786 	uao_reference(obj->gemo_shm_uao);
   1787 	drm_gem_object_unreference_unlocked(obj);
   1788 #else
   1789 	addr = vm_mmap(obj->filp, 0, args->size,
   1790 		       PROT_READ | PROT_WRITE, MAP_SHARED,
   1791 		       args->offset);
   1792 	drm_gem_object_unreference_unlocked(obj);
   1793 	if (IS_ERR((void *)addr))
   1794 		return addr;
   1795 #endif
   1796 
   1797 	args->addr_ptr = (uint64_t) addr;
   1798 
   1799 	return 0;
   1800 }
   1801 
   1802 #ifdef __NetBSD__		/* XXX gem gtt fault */
   1803 static int	i915_udv_fault(struct uvm_faultinfo *, vaddr_t,
   1804 		    struct vm_page **, int, int, vm_prot_t, int, paddr_t);
   1805 
   1806 int
   1807 i915_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
   1808     int npages, int centeridx, vm_prot_t access_type, int flags)
   1809 {
   1810 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
   1811 	struct drm_gem_object *gem_obj =
   1812 	    container_of(uobj, struct drm_gem_object, gemo_uvmobj);
   1813 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
   1814 	struct drm_device *dev = obj->base.dev;
   1815 	struct drm_i915_private *dev_priv = dev->dev_private;
   1816 	voff_t byte_offset;
   1817 	pgoff_t page_offset;
   1818 	int ret = 0;
   1819 	bool write = ISSET(access_type, VM_PROT_WRITE)? 1 : 0;
   1820 
   1821 	byte_offset = (ufi->entry->offset + (vaddr - ufi->entry->start));
   1822 	KASSERT(byte_offset <= obj->base.size);
   1823 	page_offset = (byte_offset >> PAGE_SHIFT);
   1824 
   1825 	intel_runtime_pm_get(dev_priv);
   1826 
   1827 	/* Thanks, uvm, but we don't need this lock.  */
   1828 	mutex_exit(uobj->vmobjlock);
   1829 
   1830 	ret = i915_mutex_lock_interruptible(dev);
   1831 	if (ret)
   1832 		goto out;
   1833 
   1834 	trace_i915_gem_object_fault(obj, page_offset, true, write);
   1835 
   1836 	ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
   1837 	if (ret)
   1838 		goto unlock;
   1839 
   1840 	if ((obj->cache_level != I915_CACHE_NONE) && !HAS_LLC(dev)) {
   1841 		ret = -EINVAL;
   1842 		goto unlock;
   1843 	}
   1844 
   1845 	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
   1846 	if (ret)
   1847 		goto unlock;
   1848 
   1849 	ret = i915_gem_object_set_to_gtt_domain(obj, write);
   1850 	if (ret)
   1851 		goto unpin;
   1852 
   1853 	ret = i915_gem_object_get_fence(obj);
   1854 	if (ret)
   1855 		goto unpin;
   1856 
   1857 	obj->fault_mappable = true;
   1858 
   1859 	/* XXX errno NetBSD->Linux */
   1860 	ret = -i915_udv_fault(ufi, vaddr, pps, npages, centeridx, access_type,
   1861 	    flags,
   1862 	    (dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj)));
   1863 unpin:
   1864 	i915_gem_object_ggtt_unpin(obj);
   1865 unlock:
   1866 	mutex_unlock(&dev->struct_mutex);
   1867 out:
   1868 	mutex_enter(uobj->vmobjlock);
   1869 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
   1870 	if (ret == -ERESTART)
   1871 		uvm_wait("i915flt");
   1872 	/* XXX Deal with GPU hangs here...  */
   1873 	intel_runtime_pm_put(dev_priv);
   1874 	/* XXX errno Linux->NetBSD */
   1875 	return -ret;
   1876 }
   1877 
   1878 /*
   1879  * XXX i915_udv_fault is copypasta of udv_fault from uvm_device.c.
   1880  *
   1881  * XXX pmap_enter_default instead of pmap_enter because of a problem
   1882  * with using weak aliases in kernel modules or something.
   1883  */
   1884 int	pmap_enter_default(pmap_t, vaddr_t, paddr_t, vm_prot_t, unsigned);
   1885 
   1886 static int
   1887 i915_udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
   1888     int npages, int centeridx, vm_prot_t access_type, int flags,
   1889     paddr_t gtt_paddr)
   1890 {
   1891 	struct vm_map_entry *entry = ufi->entry;
   1892 	vaddr_t curr_va;
   1893 	off_t curr_offset;
   1894 	paddr_t paddr;
   1895 	u_int mmapflags;
   1896 	int lcv, retval;
   1897 	vm_prot_t mapprot;
   1898 	UVMHIST_FUNC("i915_udv_fault"); UVMHIST_CALLED(maphist);
   1899 	UVMHIST_LOG(maphist,"  flags=%d", flags,0,0,0);
   1900 
   1901 	/*
   1902 	 * we do not allow device mappings to be mapped copy-on-write
   1903 	 * so we kill any attempt to do so here.
   1904 	 */
   1905 
   1906 	if (UVM_ET_ISCOPYONWRITE(entry)) {
   1907 		UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%x)",
   1908 		entry->etype, 0,0,0);
   1909 		return(EIO);
   1910 	}
   1911 
   1912 	/*
   1913 	 * now we must determine the offset in udv to use and the VA to
   1914 	 * use for pmap_enter.  note that we always use orig_map's pmap
   1915 	 * for pmap_enter (even if we have a submap).   since virtual
   1916 	 * addresses in a submap must match the main map, this is ok.
   1917 	 */
   1918 
   1919 	/* udv offset = (offset from start of entry) + entry's offset */
   1920 	curr_offset = entry->offset + (vaddr - entry->start);
   1921 	/* pmap va = vaddr (virtual address of pps[0]) */
   1922 	curr_va = vaddr;
   1923 
   1924 	/*
   1925 	 * loop over the page range entering in as needed
   1926 	 */
   1927 
   1928 	retval = 0;
   1929 	for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
   1930 	    curr_va += PAGE_SIZE) {
   1931 		if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
   1932 			continue;
   1933 
   1934 		if (pps[lcv] == PGO_DONTCARE)
   1935 			continue;
   1936 
   1937 		paddr = (gtt_paddr + curr_offset);
   1938 		mmapflags = 0;
   1939 		mapprot = ufi->entry->protection;
   1940 		UVMHIST_LOG(maphist,
   1941 		    "  MAPPING: device: pm=0x%x, va=0x%x, pa=0x%lx, at=%d",
   1942 		    ufi->orig_map->pmap, curr_va, paddr, mapprot);
   1943 		if (pmap_enter_default(ufi->orig_map->pmap, curr_va, paddr, mapprot,
   1944 		    PMAP_CANFAIL | mapprot | mmapflags) != 0) {
   1945 			/*
   1946 			 * pmap_enter() didn't have the resource to
   1947 			 * enter this mapping.  Unlock everything,
   1948 			 * wait for the pagedaemon to free up some
   1949 			 * pages, and then tell uvm_fault() to start
   1950 			 * the fault again.
   1951 			 *
   1952 			 * XXX Needs some rethinking for the PGO_ALLPAGES
   1953 			 * XXX case.
   1954 			 */
   1955 			pmap_update(ufi->orig_map->pmap);	/* sync what we have so far */
   1956 			return (ERESTART);
   1957 		}
   1958 	}
   1959 
   1960 	pmap_update(ufi->orig_map->pmap);
   1961 	return (retval);
   1962 }
   1963 #else
   1964 /**
   1965  * i915_gem_fault - fault a page into the GTT
   1966  * vma: VMA in question
   1967  * vmf: fault info
   1968  *
   1969  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
   1970  * from userspace.  The fault handler takes care of binding the object to
   1971  * the GTT (if needed), allocating and programming a fence register (again,
   1972  * only if needed based on whether the old reg is still valid or the object
   1973  * is tiled) and inserting a new PTE into the faulting process.
   1974  *
   1975  * Note that the faulting process may involve evicting existing objects
   1976  * from the GTT and/or fence registers to make room.  So performance may
   1977  * suffer if the GTT working set is large or there are few fence registers
   1978  * left.
   1979  */
   1980 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
   1981 {
   1982 	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
   1983 	struct drm_device *dev = obj->base.dev;
   1984 	struct drm_i915_private *dev_priv = dev->dev_private;
   1985 	pgoff_t page_offset;
   1986 	unsigned long pfn;
   1987 	int ret = 0;
   1988 	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
   1989 
   1990 	intel_runtime_pm_get(dev_priv);
   1991 
   1992 	/* We don't use vmf->pgoff since that has the fake offset */
   1993 	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
   1994 		PAGE_SHIFT;
   1995 
   1996 	ret = i915_mutex_lock_interruptible(dev);
   1997 	if (ret)
   1998 		goto out;
   1999 
   2000 	trace_i915_gem_object_fault(obj, page_offset, true, write);
   2001 
   2002 	/* Try to flush the object off the GPU first without holding the lock.
   2003 	 * Upon reacquiring the lock, we will perform our sanity checks and then
   2004 	 * repeat the flush holding the lock in the normal manner to catch cases
   2005 	 * where we are gazumped.
   2006 	 */
   2007 	ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
   2008 	if (ret)
   2009 		goto unlock;
   2010 
   2011 	/* Access to snoopable pages through the GTT is incoherent. */
   2012 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
   2013 		ret = -EINVAL;
   2014 		goto unlock;
   2015 	}
   2016 
   2017 	/* Now bind it into the GTT if needed */
   2018 	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
   2019 	if (ret)
   2020 		goto unlock;
   2021 
   2022 	ret = i915_gem_object_set_to_gtt_domain(obj, write);
   2023 	if (ret)
   2024 		goto unpin;
   2025 
   2026 	ret = i915_gem_object_get_fence(obj);
   2027 	if (ret)
   2028 		goto unpin;
   2029 
   2030 	obj->fault_mappable = true;
   2031 
   2032 	pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
   2033 	pfn >>= PAGE_SHIFT;
   2034 	pfn += page_offset;
   2035 
   2036 	/* Finally, remap it using the new GTT offset */
   2037 	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
   2038 unpin:
   2039 	i915_gem_object_ggtt_unpin(obj);
   2040 unlock:
   2041 	mutex_unlock(&dev->struct_mutex);
   2042 out:
   2043 	switch (ret) {
   2044 	case -EIO:
   2045 		/* If this -EIO is due to a gpu hang, give the reset code a
   2046 		 * chance to clean up the mess. Otherwise return the proper
   2047 		 * SIGBUS. */
   2048 		if (i915_terminally_wedged(&dev_priv->gpu_error)) {
   2049 			ret = VM_FAULT_SIGBUS;
   2050 			break;
   2051 		}
   2052 	case -EAGAIN:
   2053 		/*
   2054 		 * EAGAIN means the gpu is hung and we'll wait for the error
   2055 		 * handler to reset everything when re-faulting in
   2056 		 * i915_mutex_lock_interruptible.
   2057 		 */
   2058 	case 0:
   2059 	case -ERESTARTSYS:
   2060 	case -EINTR:
   2061 	case -EBUSY:
   2062 		/*
   2063 		 * EBUSY is ok: this just means that another thread
   2064 		 * already did the job.
   2065 		 */
   2066 		ret = VM_FAULT_NOPAGE;
   2067 		break;
   2068 	case -ENOMEM:
   2069 		ret = VM_FAULT_OOM;
   2070 		break;
   2071 	case -ENOSPC:
   2072 	case -EFAULT:
   2073 		ret = VM_FAULT_SIGBUS;
   2074 		break;
   2075 	default:
   2076 		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
   2077 		ret = VM_FAULT_SIGBUS;
   2078 		break;
   2079 	}
   2080 
   2081 	intel_runtime_pm_put(dev_priv);
   2082 	return ret;
   2083 }
   2084 
   2085 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
   2086 {
   2087 	struct i915_vma *vma;
   2088 
   2089 	/*
   2090 	 * Only the global gtt is relevant for gtt memory mappings, so restrict
   2091 	 * list traversal to objects bound into the global address space. Note
   2092 	 * that the active list should be empty, but better safe than sorry.
   2093 	 */
   2094 	WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
   2095 	list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
   2096 		i915_gem_release_mmap(vma->obj);
   2097 	list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
   2098 		i915_gem_release_mmap(vma->obj);
   2099 }
   2100 #endif
   2101 
   2102 /**
   2103  * i915_gem_release_mmap - remove physical page mappings
   2104  * @obj: obj in question
   2105  *
   2106  * Preserve the reservation of the mmapping with the DRM core code, but
   2107  * relinquish ownership of the pages back to the system.
   2108  *
   2109  * It is vital that we remove the page mapping if we have mapped a tiled
   2110  * object through the GTT and then lose the fence register due to
   2111  * resource pressure. Similarly if the object has been moved out of the
   2112  * aperture, than pages mapped into userspace must be revoked. Removing the
   2113  * mapping will then trigger a page fault on the next user access, allowing
   2114  * fixup by i915_gem_fault().
   2115  */
   2116 void
   2117 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
   2118 {
   2119 	if (!obj->fault_mappable)
   2120 		return;
   2121 
   2122 #ifdef __NetBSD__		/* XXX gem gtt fault */
   2123 	{
   2124 		struct drm_device *const dev = obj->base.dev;
   2125 		struct drm_i915_private *const dev_priv = dev->dev_private;
   2126 		const paddr_t start = dev_priv->gtt.mappable_base +
   2127 		    i915_gem_obj_ggtt_offset(obj);
   2128 		const size_t size = obj->base.size;
   2129 		const paddr_t end = start + size;
   2130 		paddr_t pa;
   2131 
   2132 		KASSERT((start & (PAGE_SIZE - 1)) == 0);
   2133 		KASSERT((size & (PAGE_SIZE - 1)) == 0);
   2134 
   2135 		for (pa = start; pa < end; pa += PAGE_SIZE)
   2136 			pmap_pv_protect(pa, VM_PROT_NONE);
   2137 	}
   2138 #else
   2139 	drm_vma_node_unmap(&obj->base.vma_node,
   2140 			   obj->base.dev->anon_inode->i_mapping);
   2141 #endif
   2142 	obj->fault_mappable = false;
   2143 }
   2144 
   2145 uint32_t
   2146 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
   2147 {
   2148 	uint32_t gtt_size;
   2149 
   2150 	if (INTEL_INFO(dev)->gen >= 4 ||
   2151 	    tiling_mode == I915_TILING_NONE)
   2152 		return size;
   2153 
   2154 	/* Previous chips need a power-of-two fence region when tiling */
   2155 	if (INTEL_INFO(dev)->gen == 3)
   2156 		gtt_size = 1024*1024;
   2157 	else
   2158 		gtt_size = 512*1024;
   2159 
   2160 	while (gtt_size < size)
   2161 		gtt_size <<= 1;
   2162 
   2163 	return gtt_size;
   2164 }
   2165 
   2166 /**
   2167  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
   2168  * @obj: object to check
   2169  *
   2170  * Return the required GTT alignment for an object, taking into account
   2171  * potential fence register mapping.
   2172  */
   2173 uint32_t
   2174 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
   2175 			   int tiling_mode, bool fenced)
   2176 {
   2177 	/*
   2178 	 * Minimum alignment is 4k (GTT page size), but might be greater
   2179 	 * if a fence register is needed for the object.
   2180 	 */
   2181 	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
   2182 	    tiling_mode == I915_TILING_NONE)
   2183 		return 4096;
   2184 
   2185 	/*
   2186 	 * Previous chips need to be aligned to the size of the smallest
   2187 	 * fence register that can contain the object.
   2188 	 */
   2189 	return i915_gem_get_gtt_size(dev, size, tiling_mode);
   2190 }
   2191 
   2192 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
   2193 {
   2194 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   2195 	int ret;
   2196 
   2197 	if (drm_vma_node_has_offset(&obj->base.vma_node))
   2198 		return 0;
   2199 
   2200 	dev_priv->mm.shrinker_no_lock_stealing = true;
   2201 
   2202 	ret = drm_gem_create_mmap_offset(&obj->base);
   2203 	if (ret != -ENOSPC)
   2204 		goto out;
   2205 
   2206 	/* Badly fragmented mmap space? The only way we can recover
   2207 	 * space is by destroying unwanted objects. We can't randomly release
   2208 	 * mmap_offsets as userspace expects them to be persistent for the
   2209 	 * lifetime of the objects. The closest we can is to release the
   2210 	 * offsets on purgeable objects by truncating it and marking it purged,
   2211 	 * which prevents userspace from ever using that object again.
   2212 	 */
   2213 	i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
   2214 	ret = drm_gem_create_mmap_offset(&obj->base);
   2215 	if (ret != -ENOSPC)
   2216 		goto out;
   2217 
   2218 	i915_gem_shrink_all(dev_priv);
   2219 	ret = drm_gem_create_mmap_offset(&obj->base);
   2220 out:
   2221 	dev_priv->mm.shrinker_no_lock_stealing = false;
   2222 
   2223 	return ret;
   2224 }
   2225 
   2226 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
   2227 {
   2228 	drm_gem_free_mmap_offset(&obj->base);
   2229 }
   2230 
   2231 int
   2232 i915_gem_mmap_gtt(struct drm_file *file,
   2233 		  struct drm_device *dev,
   2234 		  uint32_t handle,
   2235 		  uint64_t *offset)
   2236 {
   2237 	struct drm_i915_private *dev_priv = dev->dev_private;
   2238 	struct drm_gem_object *gobj;
   2239 	struct drm_i915_gem_object *obj;
   2240 	int ret;
   2241 
   2242 	ret = i915_mutex_lock_interruptible(dev);
   2243 	if (ret)
   2244 		return ret;
   2245 
   2246 	gobj = drm_gem_object_lookup(dev, file, handle);
   2247 	if (gobj == NULL) {
   2248 		ret = -ENOENT;
   2249 		goto unlock;
   2250 	}
   2251 	obj = to_intel_bo(gobj);
   2252 
   2253 	if (obj->base.size > dev_priv->gtt.mappable_end) {
   2254 		ret = -E2BIG;
   2255 		goto out;
   2256 	}
   2257 
   2258 	if (obj->madv != I915_MADV_WILLNEED) {
   2259 		DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
   2260 		ret = -EFAULT;
   2261 		goto out;
   2262 	}
   2263 
   2264 	ret = i915_gem_object_create_mmap_offset(obj);
   2265 	if (ret)
   2266 		goto out;
   2267 
   2268 	*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
   2269 
   2270 out:
   2271 	drm_gem_object_unreference(&obj->base);
   2272 unlock:
   2273 	mutex_unlock(&dev->struct_mutex);
   2274 	return ret;
   2275 }
   2276 
   2277 /**
   2278  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
   2279  * @dev: DRM device
   2280  * @data: GTT mapping ioctl data
   2281  * @file: GEM object info
   2282  *
   2283  * Simply returns the fake offset to userspace so it can mmap it.
   2284  * The mmap call will end up in drm_gem_mmap(), which will set things
   2285  * up so we can get faults in the handler above.
   2286  *
   2287  * The fault handler will take care of binding the object into the GTT
   2288  * (since it may have been evicted to make room for something), allocating
   2289  * a fence register, and mapping the appropriate aperture address into
   2290  * userspace.
   2291  */
   2292 int
   2293 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
   2294 			struct drm_file *file)
   2295 {
   2296 	struct drm_i915_gem_mmap_gtt *args = data;
   2297 
   2298 	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
   2299 }
   2300 
   2301 /* Immediately discard the backing storage */
   2302 static void
   2303 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
   2304 {
   2305 #ifndef __NetBSD__
   2306 	struct inode *inode;
   2307 #endif
   2308 
   2309 	i915_gem_object_free_mmap_offset(obj);
   2310 
   2311 #ifdef __NetBSD__
   2312 	if (obj->base.gemo_shm_uao == NULL)
   2313 		return;
   2314 
   2315 	{
   2316 		struct uvm_object *const uobj = obj->base.gemo_shm_uao;
   2317 
   2318 		if (uobj != NULL) {
   2319 			/* XXX Calling pgo_put like this is bogus.  */
   2320 			mutex_enter(uobj->vmobjlock);
   2321 			(*uobj->pgops->pgo_put)(uobj, 0, obj->base.size,
   2322 			    (PGO_ALLPAGES | PGO_FREE));
   2323 		}
   2324 	}
   2325 #else
   2326 	if (obj->base.filp == NULL)
   2327 		return;
   2328 
   2329 	/* Our goal here is to return as much of the memory as
   2330 	 * is possible back to the system as we are called from OOM.
   2331 	 * To do this we must instruct the shmfs to drop all of its
   2332 	 * backing pages, *now*.
   2333 	 */
   2334 	inode = file_inode(obj->base.filp);
   2335 	shmem_truncate_range(inode, 0, (loff_t)-1);
   2336 #endif
   2337 
   2338 	obj->madv = __I915_MADV_PURGED;
   2339 }
   2340 
   2341 static inline int
   2342 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
   2343 {
   2344 	return obj->madv == I915_MADV_DONTNEED;
   2345 }
   2346 
   2347 #ifdef __NetBSD__
   2348 static void
   2349 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
   2350 {
   2351 	struct drm_device *const dev = obj->base.dev;
   2352 	struct vm_page *page;
   2353 	int ret;
   2354 
   2355 	/* XXX Cargo-culted from the Linux code.  */
   2356 	BUG_ON(obj->madv == __I915_MADV_PURGED);
   2357 
   2358 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
   2359 	if (ret) {
   2360 		WARN_ON(ret != -EIO);
   2361 		i915_gem_clflush_object(obj, true);
   2362 		obj->base.read_domains = obj->base.write_domain =
   2363 		    I915_GEM_DOMAIN_CPU;
   2364 	}
   2365 
   2366 	if (i915_gem_object_needs_bit17_swizzle(obj))
   2367 		i915_gem_object_save_bit_17_swizzle(obj);
   2368 
   2369 	if (obj->madv == I915_MADV_DONTNEED)
   2370 		obj->dirty = 0;
   2371 
   2372 	if (obj->dirty) {
   2373 		TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue) {
   2374 			page->flags &= ~PG_CLEAN;
   2375 			/* XXX mark page accessed */
   2376 		}
   2377 	}
   2378 
   2379 	bus_dmamap_destroy(dev->dmat, obj->igo_dmamap);
   2380 	bus_dmamem_unwire_uvm_object(dev->dmat, obj->base.gemo_shm_uao, 0,
   2381 	    obj->base.size, obj->pages, obj->igo_nsegs);
   2382 
   2383 	kfree(obj->pages);
   2384 }
   2385 #else
   2386 static void
   2387 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
   2388 {
   2389 	struct sg_page_iter sg_iter;
   2390 	int ret;
   2391 
   2392 	BUG_ON(obj->madv == __I915_MADV_PURGED);
   2393 
   2394 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
   2395 	if (ret) {
   2396 		/* In the event of a disaster, abandon all caches and
   2397 		 * hope for the best.
   2398 		 */
   2399 		WARN_ON(ret != -EIO);
   2400 		i915_gem_clflush_object(obj, true);
   2401 		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
   2402 	}
   2403 
   2404 	if (i915_gem_object_needs_bit17_swizzle(obj))
   2405 		i915_gem_object_save_bit_17_swizzle(obj);
   2406 
   2407 	if (obj->madv == I915_MADV_DONTNEED)
   2408 		obj->dirty = 0;
   2409 
   2410 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
   2411 		struct page *page = sg_page_iter_page(&sg_iter);
   2412 
   2413 		if (obj->dirty)
   2414 			set_page_dirty(page);
   2415 
   2416 		if (obj->madv == I915_MADV_WILLNEED)
   2417 			mark_page_accessed(page);
   2418 
   2419 		page_cache_release(page);
   2420 	}
   2421 	obj->dirty = 0;
   2422 
   2423 	sg_free_table(obj->pages);
   2424 	kfree(obj->pages);
   2425 }
   2426 #endif
   2427 
   2428 int
   2429 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
   2430 {
   2431 	const struct drm_i915_gem_object_ops *ops = obj->ops;
   2432 
   2433 	if (obj->pages == NULL)
   2434 		return 0;
   2435 
   2436 	if (obj->pages_pin_count)
   2437 		return -EBUSY;
   2438 
   2439 	BUG_ON(i915_gem_obj_bound_any(obj));
   2440 
   2441 	/* ->put_pages might need to allocate memory for the bit17 swizzle
   2442 	 * array, hence protect them from being reaped by removing them from gtt
   2443 	 * lists early. */
   2444 	list_del(&obj->global_list);
   2445 
   2446 	ops->put_pages(obj);
   2447 	obj->pages = NULL;
   2448 
   2449 	if (i915_gem_object_is_purgeable(obj))
   2450 		i915_gem_object_truncate(obj);
   2451 
   2452 	return 0;
   2453 }
   2454 
   2455 static unsigned long
   2456 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
   2457 		  bool purgeable_only)
   2458 {
   2459 	struct list_head still_bound_list;
   2460 	struct drm_i915_gem_object *obj, *next;
   2461 	unsigned long count = 0;
   2462 
   2463 	list_for_each_entry_safe(obj, next,
   2464 				 &dev_priv->mm.unbound_list,
   2465 				 global_list) {
   2466 		if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
   2467 		    i915_gem_object_put_pages(obj) == 0) {
   2468 			count += obj->base.size >> PAGE_SHIFT;
   2469 			if (count >= target)
   2470 				return count;
   2471 		}
   2472 	}
   2473 
   2474 	/*
   2475 	 * As we may completely rewrite the bound list whilst unbinding
   2476 	 * (due to retiring requests) we have to strictly process only
   2477 	 * one element of the list at the time, and recheck the list
   2478 	 * on every iteration.
   2479 	 */
   2480 	INIT_LIST_HEAD(&still_bound_list);
   2481 	while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
   2482 		struct i915_vma *vma, *v;
   2483 
   2484 		obj = list_first_entry(&dev_priv->mm.bound_list,
   2485 				       typeof(*obj), global_list);
   2486 		list_move_tail(&obj->global_list, &still_bound_list);
   2487 
   2488 		if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
   2489 			continue;
   2490 
   2491 		/*
   2492 		 * Hold a reference whilst we unbind this object, as we may
   2493 		 * end up waiting for and retiring requests. This might
   2494 		 * release the final reference (held by the active list)
   2495 		 * and result in the object being freed from under us.
   2496 		 * in this object being freed.
   2497 		 *
   2498 		 * Note 1: Shrinking the bound list is special since only active
   2499 		 * (and hence bound objects) can contain such limbo objects, so
   2500 		 * we don't need special tricks for shrinking the unbound list.
   2501 		 * The only other place where we have to be careful with active
   2502 		 * objects suddenly disappearing due to retiring requests is the
   2503 		 * eviction code.
   2504 		 *
   2505 		 * Note 2: Even though the bound list doesn't hold a reference
   2506 		 * to the object we can safely grab one here: The final object
   2507 		 * unreferencing and the bound_list are both protected by the
   2508 		 * dev->struct_mutex and so we won't ever be able to observe an
   2509 		 * object on the bound_list with a reference count equals 0.
   2510 		 */
   2511 		drm_gem_object_reference(&obj->base);
   2512 
   2513 		list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
   2514 			if (i915_vma_unbind(vma))
   2515 				break;
   2516 
   2517 		if (i915_gem_object_put_pages(obj) == 0)
   2518 			count += obj->base.size >> PAGE_SHIFT;
   2519 
   2520 		drm_gem_object_unreference(&obj->base);
   2521 	}
   2522 	list_splice(&still_bound_list, &dev_priv->mm.bound_list);
   2523 
   2524 	return count;
   2525 }
   2526 
   2527 static unsigned long
   2528 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
   2529 {
   2530 	return __i915_gem_shrink(dev_priv, target, true);
   2531 }
   2532 
   2533 static unsigned long
   2534 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
   2535 {
   2536 	struct drm_i915_gem_object *obj, *next;
   2537 	long freed = 0;
   2538 
   2539 	i915_gem_evict_everything(dev_priv->dev);
   2540 
   2541 	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
   2542 				 global_list) {
   2543 		if (i915_gem_object_put_pages(obj) == 0)
   2544 			freed += obj->base.size >> PAGE_SHIFT;
   2545 	}
   2546 	return freed;
   2547 }
   2548 
   2549 #ifdef __NetBSD__
   2550 static int
   2551 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
   2552 {
   2553 	struct drm_device *const dev = obj->base.dev;
   2554 	struct vm_page *page;
   2555 	int error;
   2556 
   2557 	/* XXX Cargo-culted from the Linux code.  */
   2558 	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
   2559 	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
   2560 
   2561 	KASSERT(obj->pages == NULL);
   2562 	TAILQ_INIT(&obj->igo_pageq);
   2563 	obj->pages = kcalloc((obj->base.size / PAGE_SIZE),
   2564 	    sizeof(obj->pages[0]), GFP_KERNEL);
   2565 	if (obj->pages == NULL) {
   2566 		error = -ENOMEM;
   2567 		goto fail0;
   2568 	}
   2569 
   2570 	/* XXX errno NetBSD->Linux */
   2571 	error = -bus_dmamem_wire_uvm_object(dev->dmat, obj->base.gemo_shm_uao,
   2572 	    0, obj->base.size, &obj->igo_pageq, PAGE_SIZE, 0, obj->pages,
   2573 	    (obj->base.size / PAGE_SIZE), &obj->igo_nsegs, BUS_DMA_NOWAIT);
   2574 	if (error)
   2575 		/* XXX Try i915_gem_purge, i915_gem_shrink_all.  */
   2576 		goto fail1;
   2577 	KASSERT(0 < obj->igo_nsegs);
   2578 	KASSERT(obj->igo_nsegs <= (obj->base.size / PAGE_SIZE));
   2579 
   2580 	/*
   2581 	 * Check that the paddrs will fit in 40 bits, or 32 bits on i965.
   2582 	 *
   2583 	 * XXX This is wrong; we ought to pass this constraint to
   2584 	 * bus_dmamem_wire_uvm_object instead.
   2585 	 */
   2586 	TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue) {
   2587 		const uint64_t mask =
   2588 		    (IS_BROADWATER(dev) || IS_CRESTLINE(dev)?
   2589 			0xffffffffULL : 0xffffffffffULL);
   2590 		if (VM_PAGE_TO_PHYS(page) & ~mask) {
   2591 			DRM_ERROR("GEM physical address exceeds %u bits"
   2592 			    ": %"PRIxMAX"\n",
   2593 			    popcount64(mask),
   2594 			    (uintmax_t)VM_PAGE_TO_PHYS(page));
   2595 			error = -EIO;
   2596 			goto fail2;
   2597 		}
   2598 	}
   2599 
   2600 	/* XXX Should create the DMA map when creating the object.  */
   2601 
   2602 	/* XXX errno NetBSD->Linux */
   2603 	error = -bus_dmamap_create(dev->dmat, obj->base.size, obj->igo_nsegs,
   2604 	    PAGE_SIZE, 0, BUS_DMA_NOWAIT, &obj->igo_dmamap);
   2605 	if (error)
   2606 		goto fail2;
   2607 
   2608 	/* XXX Cargo-culted from the Linux code.  */
   2609 	if (i915_gem_object_needs_bit17_swizzle(obj))
   2610 		i915_gem_object_do_bit_17_swizzle(obj);
   2611 
   2612 	/* Success!  */
   2613 	return 0;
   2614 
   2615 fail2:	bus_dmamem_unwire_uvm_object(dev->dmat, obj->base.gemo_shm_uao, 0,
   2616 	    obj->base.size, obj->pages, (obj->base.size / PAGE_SIZE));
   2617 fail1:	kfree(obj->pages);
   2618 	obj->pages = NULL;
   2619 fail0:	KASSERT(error);
   2620 	return error;
   2621 }
   2622 #else
   2623 static int
   2624 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
   2625 {
   2626 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   2627 	int page_count, i;
   2628 	struct address_space *mapping;
   2629 	struct sg_table *st;
   2630 	struct scatterlist *sg;
   2631 	struct sg_page_iter sg_iter;
   2632 	struct page *page;
   2633 	unsigned long last_pfn = 0;	/* suppress gcc warning */
   2634 	gfp_t gfp;
   2635 
   2636 	/* Assert that the object is not currently in any GPU domain. As it
   2637 	 * wasn't in the GTT, there shouldn't be any way it could have been in
   2638 	 * a GPU cache
   2639 	 */
   2640 	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
   2641 	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
   2642 
   2643 	st = kmalloc(sizeof(*st), GFP_KERNEL);
   2644 	if (st == NULL)
   2645 		return -ENOMEM;
   2646 
   2647 	page_count = obj->base.size / PAGE_SIZE;
   2648 	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
   2649 		kfree(st);
   2650 		return -ENOMEM;
   2651 	}
   2652 
   2653 	/* Get the list of pages out of our struct file.  They'll be pinned
   2654 	 * at this point until we release them.
   2655 	 *
   2656 	 * Fail silently without starting the shrinker
   2657 	 */
   2658 	mapping = file_inode(obj->base.filp)->i_mapping;
   2659 	gfp = mapping_gfp_mask(mapping);
   2660 	gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
   2661 	gfp &= ~(__GFP_IO | __GFP_WAIT);
   2662 	sg = st->sgl;
   2663 	st->nents = 0;
   2664 	for (i = 0; i < page_count; i++) {
   2665 		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
   2666 		if (IS_ERR(page)) {
   2667 			i915_gem_purge(dev_priv, page_count);
   2668 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
   2669 		}
   2670 		if (IS_ERR(page)) {
   2671 			/* We've tried hard to allocate the memory by reaping
   2672 			 * our own buffer, now let the real VM do its job and
   2673 			 * go down in flames if truly OOM.
   2674 			 */
   2675 			gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
   2676 			gfp |= __GFP_IO | __GFP_WAIT;
   2677 
   2678 			i915_gem_shrink_all(dev_priv);
   2679 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
   2680 			if (IS_ERR(page))
   2681 				goto err_pages;
   2682 
   2683 			gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
   2684 			gfp &= ~(__GFP_IO | __GFP_WAIT);
   2685 		}
   2686 #ifdef CONFIG_SWIOTLB
   2687 		if (swiotlb_nr_tbl()) {
   2688 			st->nents++;
   2689 			sg_set_page(sg, page, PAGE_SIZE, 0);
   2690 			sg = sg_next(sg);
   2691 			continue;
   2692 		}
   2693 #endif
   2694 		if (!i || page_to_pfn(page) != last_pfn + 1) {
   2695 			if (i)
   2696 				sg = sg_next(sg);
   2697 			st->nents++;
   2698 			sg_set_page(sg, page, PAGE_SIZE, 0);
   2699 		} else {
   2700 			sg->length += PAGE_SIZE;
   2701 		}
   2702 		last_pfn = page_to_pfn(page);
   2703 
   2704 		/* Check that the i965g/gm workaround works. */
   2705 		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
   2706 	}
   2707 #ifdef CONFIG_SWIOTLB
   2708 	if (!swiotlb_nr_tbl())
   2709 #endif
   2710 		sg_mark_end(sg);
   2711 	obj->pages = st;
   2712 
   2713 	if (i915_gem_object_needs_bit17_swizzle(obj))
   2714 		i915_gem_object_do_bit_17_swizzle(obj);
   2715 
   2716 	return 0;
   2717 
   2718 err_pages:
   2719 	sg_mark_end(sg);
   2720 	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
   2721 		page_cache_release(sg_page_iter_page(&sg_iter));
   2722 	sg_free_table(st);
   2723 	kfree(st);
   2724 	return PTR_ERR(page);
   2725 }
   2726 #endif
   2727 
   2728 /* Ensure that the associated pages are gathered from the backing storage
   2729  * and pinned into our object. i915_gem_object_get_pages() may be called
   2730  * multiple times before they are released by a single call to
   2731  * i915_gem_object_put_pages() - once the pages are no longer referenced
   2732  * either as a result of memory pressure (reaping pages under the shrinker)
   2733  * or as the object is itself released.
   2734  */
   2735 int
   2736 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
   2737 {
   2738 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   2739 	const struct drm_i915_gem_object_ops *ops = obj->ops;
   2740 	int ret;
   2741 
   2742 	if (obj->pages)
   2743 		return 0;
   2744 
   2745 	if (obj->madv != I915_MADV_WILLNEED) {
   2746 		DRM_DEBUG("Attempting to obtain a purgeable object\n");
   2747 		return -EFAULT;
   2748 	}
   2749 
   2750 	BUG_ON(obj->pages_pin_count);
   2751 
   2752 	ret = ops->get_pages(obj);
   2753 	if (ret)
   2754 		return ret;
   2755 
   2756 	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
   2757 	return 0;
   2758 }
   2759 
   2760 static void
   2761 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
   2762 			       struct intel_ring_buffer *ring)
   2763 {
   2764 	struct drm_device *dev = obj->base.dev;
   2765 	struct drm_i915_private *dev_priv = dev->dev_private;
   2766 	u32 seqno = intel_ring_get_seqno(ring);
   2767 
   2768 	BUG_ON(ring == NULL);
   2769 	if (obj->ring != ring && obj->last_write_seqno) {
   2770 		/* Keep the seqno relative to the current ring */
   2771 		obj->last_write_seqno = seqno;
   2772 	}
   2773 	obj->ring = ring;
   2774 
   2775 	/* Add a reference if we're newly entering the active list. */
   2776 	if (!obj->active) {
   2777 		drm_gem_object_reference(&obj->base);
   2778 		obj->active = 1;
   2779 	}
   2780 
   2781 	list_move_tail(&obj->ring_list, &ring->active_list);
   2782 
   2783 	obj->last_read_seqno = seqno;
   2784 
   2785 	if (obj->fenced_gpu_access) {
   2786 		obj->last_fenced_seqno = seqno;
   2787 
   2788 		/* Bump MRU to take account of the delayed flush */
   2789 		if (obj->fence_reg != I915_FENCE_REG_NONE) {
   2790 			struct drm_i915_fence_reg *reg;
   2791 
   2792 			reg = &dev_priv->fence_regs[obj->fence_reg];
   2793 			list_move_tail(&reg->lru_list,
   2794 				       &dev_priv->mm.fence_list);
   2795 		}
   2796 	}
   2797 }
   2798 
   2799 void i915_vma_move_to_active(struct i915_vma *vma,
   2800 			     struct intel_ring_buffer *ring)
   2801 {
   2802 	list_move_tail(&vma->mm_list, &vma->vm->active_list);
   2803 	return i915_gem_object_move_to_active(vma->obj, ring);
   2804 }
   2805 
   2806 static void
   2807 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
   2808 {
   2809 	struct drm_device *dev = obj->base.dev;
   2810 	struct drm_i915_private *dev_priv = dev->dev_private;
   2811 	struct i915_address_space *vm;
   2812 	struct i915_vma *vma;
   2813 
   2814 	if ((obj->base.write_domain & I915_GEM_DOMAIN_GTT) != 0) {
   2815 		printk(KERN_ERR "%s: %p 0x%x flushing gtt\n", __func__, obj,
   2816 			obj->base.write_domain);
   2817 		i915_gem_object_flush_gtt_write_domain(obj);
   2818 	}
   2819 	if ((obj->base.write_domain & I915_GEM_DOMAIN_CPU) != 0) {
   2820 		printk(KERN_ERR "%s: %p 0x%x flushing cpu\n", __func__, obj,
   2821 			obj->base.write_domain);
   2822 		i915_gem_object_flush_cpu_write_domain(obj, false);
   2823 	}
   2824 	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
   2825 	BUG_ON(!obj->active);
   2826 
   2827 	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
   2828 		vma = i915_gem_obj_to_vma(obj, vm);
   2829 		if (vma && !list_empty(&vma->mm_list))
   2830 			list_move_tail(&vma->mm_list, &vm->inactive_list);
   2831 	}
   2832 
   2833 	list_del_init(&obj->ring_list);
   2834 	obj->ring = NULL;
   2835 
   2836 	obj->last_read_seqno = 0;
   2837 	obj->last_write_seqno = 0;
   2838 	obj->base.write_domain = 0;
   2839 
   2840 	obj->last_fenced_seqno = 0;
   2841 	obj->fenced_gpu_access = false;
   2842 
   2843 	obj->active = 0;
   2844 	drm_gem_object_unreference(&obj->base);
   2845 
   2846 	WARN_ON(i915_verify_lists(dev));
   2847 }
   2848 
   2849 static int
   2850 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
   2851 {
   2852 	struct drm_i915_private *dev_priv = dev->dev_private;
   2853 	struct intel_ring_buffer *ring;
   2854 	int ret, i, j;
   2855 
   2856 	/* Carefully retire all requests without writing to the rings */
   2857 	for_each_ring(ring, dev_priv, i) {
   2858 		ret = intel_ring_idle(ring);
   2859 		if (ret)
   2860 			return ret;
   2861 	}
   2862 	i915_gem_retire_requests(dev);
   2863 
   2864 	/* Finally reset hw state */
   2865 	for_each_ring(ring, dev_priv, i) {
   2866 		intel_ring_init_seqno(ring, seqno);
   2867 
   2868 		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
   2869 			ring->sync_seqno[j] = 0;
   2870 	}
   2871 
   2872 	return 0;
   2873 }
   2874 
   2875 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
   2876 {
   2877 	struct drm_i915_private *dev_priv = dev->dev_private;
   2878 	int ret;
   2879 
   2880 	if (seqno == 0)
   2881 		return -EINVAL;
   2882 
   2883 	/* HWS page needs to be set less than what we
   2884 	 * will inject to ring
   2885 	 */
   2886 	ret = i915_gem_init_seqno(dev, seqno - 1);
   2887 	if (ret)
   2888 		return ret;
   2889 
   2890 	/* Carefully set the last_seqno value so that wrap
   2891 	 * detection still works
   2892 	 */
   2893 	dev_priv->next_seqno = seqno;
   2894 	dev_priv->last_seqno = seqno - 1;
   2895 	if (dev_priv->last_seqno == 0)
   2896 		dev_priv->last_seqno--;
   2897 
   2898 	return 0;
   2899 }
   2900 
   2901 int
   2902 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
   2903 {
   2904 	struct drm_i915_private *dev_priv = dev->dev_private;
   2905 
   2906 	/* reserve 0 for non-seqno */
   2907 	if (dev_priv->next_seqno == 0) {
   2908 		int ret = i915_gem_init_seqno(dev, 0);
   2909 		if (ret)
   2910 			return ret;
   2911 
   2912 		dev_priv->next_seqno = 1;
   2913 	}
   2914 
   2915 	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
   2916 	return 0;
   2917 }
   2918 
   2919 int __i915_add_request(struct intel_ring_buffer *ring,
   2920 		       struct drm_file *file,
   2921 		       struct drm_i915_gem_object *obj,
   2922 		       u32 *out_seqno)
   2923 {
   2924 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
   2925 	struct drm_i915_gem_request *request;
   2926 	u32 request_ring_position, request_start;
   2927 	int ret;
   2928 
   2929 	request_start = intel_ring_get_tail(ring);
   2930 	/*
   2931 	 * Emit any outstanding flushes - execbuf can fail to emit the flush
   2932 	 * after having emitted the batchbuffer command. Hence we need to fix
   2933 	 * things up similar to emitting the lazy request. The difference here
   2934 	 * is that the flush _must_ happen before the next request, no matter
   2935 	 * what.
   2936 	 */
   2937 	ret = intel_ring_flush_all_caches(ring);
   2938 	if (ret)
   2939 		return ret;
   2940 
   2941 	request = ring->preallocated_lazy_request;
   2942 	if (WARN_ON(request == NULL))
   2943 		return -ENOMEM;
   2944 
   2945 	/* Record the position of the start of the request so that
   2946 	 * should we detect the updated seqno part-way through the
   2947 	 * GPU processing the request, we never over-estimate the
   2948 	 * position of the head.
   2949 	 */
   2950 	request_ring_position = intel_ring_get_tail(ring);
   2951 
   2952 	ret = ring->add_request(ring);
   2953 	if (ret)
   2954 		return ret;
   2955 
   2956 	request->seqno = intel_ring_get_seqno(ring);
   2957 	request->ring = ring;
   2958 	request->head = request_start;
   2959 	request->tail = request_ring_position;
   2960 
   2961 	/* Whilst this request exists, batch_obj will be on the
   2962 	 * active_list, and so will hold the active reference. Only when this
   2963 	 * request is retired will the the batch_obj be moved onto the
   2964 	 * inactive_list and lose its active reference. Hence we do not need
   2965 	 * to explicitly hold another reference here.
   2966 	 */
   2967 	request->batch_obj = obj;
   2968 
   2969 	/* Hold a reference to the current context so that we can inspect
   2970 	 * it later in case a hangcheck error event fires.
   2971 	 */
   2972 	request->ctx = ring->last_context;
   2973 	if (request->ctx)
   2974 		i915_gem_context_reference(request->ctx);
   2975 
   2976 	request->emitted_jiffies = jiffies;
   2977 	list_add_tail(&request->list, &ring->request_list);
   2978 	request->file_priv = NULL;
   2979 
   2980 	if (file) {
   2981 		struct drm_i915_file_private *file_priv = file->driver_priv;
   2982 
   2983 		spin_lock(&file_priv->mm.lock);
   2984 		request->file_priv = file_priv;
   2985 		list_add_tail(&request->client_list,
   2986 			      &file_priv->mm.request_list);
   2987 		spin_unlock(&file_priv->mm.lock);
   2988 	}
   2989 
   2990 	trace_i915_gem_request_add(ring, request->seqno);
   2991 	ring->outstanding_lazy_seqno = 0;
   2992 	ring->preallocated_lazy_request = NULL;
   2993 
   2994 	if (!dev_priv->ums.mm_suspended) {
   2995 		i915_queue_hangcheck(ring->dev);
   2996 
   2997 		cancel_delayed_work_sync(&dev_priv->mm.idle_work);
   2998 		queue_delayed_work(dev_priv->wq,
   2999 				   &dev_priv->mm.retire_work,
   3000 				   round_jiffies_up_relative(HZ));
   3001 		intel_mark_busy(dev_priv->dev);
   3002 	}
   3003 
   3004 	if (out_seqno)
   3005 		*out_seqno = request->seqno;
   3006 	return 0;
   3007 }
   3008 
   3009 static inline void
   3010 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
   3011 {
   3012 	struct drm_i915_file_private *file_priv = request->file_priv;
   3013 
   3014 	if (!file_priv)
   3015 		return;
   3016 
   3017 	spin_lock(&file_priv->mm.lock);
   3018 	list_del(&request->client_list);
   3019 	request->file_priv = NULL;
   3020 	spin_unlock(&file_priv->mm.lock);
   3021 }
   3022 
   3023 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
   3024 				   const struct i915_hw_context *ctx)
   3025 {
   3026 	unsigned long elapsed;
   3027 
   3028 	elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
   3029 
   3030 	if (ctx->hang_stats.banned)
   3031 		return true;
   3032 
   3033 	if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
   3034 		if (!i915_gem_context_is_default(ctx)) {
   3035 			DRM_DEBUG("context hanging too fast, banning!\n");
   3036 			return true;
   3037 		} else if (dev_priv->gpu_error.stop_rings == 0) {
   3038 			DRM_ERROR("gpu hanging too fast, banning!\n");
   3039 			return true;
   3040 		}
   3041 	}
   3042 
   3043 	return false;
   3044 }
   3045 
   3046 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
   3047 				  struct i915_hw_context *ctx,
   3048 				  const bool guilty)
   3049 {
   3050 	struct i915_ctx_hang_stats *hs;
   3051 
   3052 	if (WARN_ON(!ctx))
   3053 		return;
   3054 
   3055 	hs = &ctx->hang_stats;
   3056 
   3057 	if (guilty) {
   3058 		hs->banned = i915_context_is_banned(dev_priv, ctx);
   3059 		hs->batch_active++;
   3060 		hs->guilty_ts = get_seconds();
   3061 	} else {
   3062 		hs->batch_pending++;
   3063 	}
   3064 }
   3065 
   3066 static void i915_gem_free_request(struct drm_i915_gem_request *request)
   3067 {
   3068 	list_del(&request->list);
   3069 	i915_gem_request_remove_from_client(request);
   3070 
   3071 	if (request->ctx)
   3072 		i915_gem_context_unreference(request->ctx);
   3073 
   3074 	kfree(request);
   3075 }
   3076 
   3077 struct drm_i915_gem_request *
   3078 i915_gem_find_active_request(struct intel_ring_buffer *ring)
   3079 {
   3080 	struct drm_i915_gem_request *request;
   3081 	u32 completed_seqno;
   3082 
   3083 	completed_seqno = ring->get_seqno(ring, false);
   3084 
   3085 	list_for_each_entry(request, &ring->request_list, list) {
   3086 		if (i915_seqno_passed(completed_seqno, request->seqno))
   3087 			continue;
   3088 
   3089 		return request;
   3090 	}
   3091 
   3092 	return NULL;
   3093 }
   3094 
   3095 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
   3096 				       struct intel_ring_buffer *ring)
   3097 {
   3098 	struct drm_i915_gem_request *request;
   3099 	bool ring_hung;
   3100 
   3101 	request = i915_gem_find_active_request(ring);
   3102 
   3103 	if (request == NULL)
   3104 		return;
   3105 
   3106 	ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
   3107 
   3108 	i915_set_reset_status(dev_priv, request->ctx, ring_hung);
   3109 
   3110 	list_for_each_entry_continue(request, &ring->request_list, list)
   3111 		i915_set_reset_status(dev_priv, request->ctx, false);
   3112 }
   3113 
   3114 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
   3115 					struct intel_ring_buffer *ring)
   3116 {
   3117 	while (!list_empty(&ring->active_list)) {
   3118 		struct drm_i915_gem_object *obj;
   3119 
   3120 		obj = list_first_entry(&ring->active_list,
   3121 				       struct drm_i915_gem_object,
   3122 				       ring_list);
   3123 
   3124 		i915_gem_object_move_to_inactive(obj);
   3125 	}
   3126 
   3127 	/*
   3128 	 * We must free the requests after all the corresponding objects have
   3129 	 * been moved off active lists. Which is the same order as the normal
   3130 	 * retire_requests function does. This is important if object hold
   3131 	 * implicit references on things like e.g. ppgtt address spaces through
   3132 	 * the request.
   3133 	 */
   3134 	while (!list_empty(&ring->request_list)) {
   3135 		struct drm_i915_gem_request *request;
   3136 
   3137 		request = list_first_entry(&ring->request_list,
   3138 					   struct drm_i915_gem_request,
   3139 					   list);
   3140 
   3141 		i915_gem_free_request(request);
   3142 	}
   3143 }
   3144 
   3145 void i915_gem_restore_fences(struct drm_device *dev)
   3146 {
   3147 	struct drm_i915_private *dev_priv = dev->dev_private;
   3148 	int i;
   3149 
   3150 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
   3151 		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
   3152 
   3153 		/*
   3154 		 * Commit delayed tiling changes if we have an object still
   3155 		 * attached to the fence, otherwise just clear the fence.
   3156 		 */
   3157 		if (reg->obj) {
   3158 			i915_gem_object_update_fence(reg->obj, reg,
   3159 						     reg->obj->tiling_mode);
   3160 		} else {
   3161 			i915_gem_write_fence(dev, i, NULL);
   3162 		}
   3163 	}
   3164 }
   3165 
   3166 void i915_gem_reset(struct drm_device *dev)
   3167 {
   3168 	struct drm_i915_private *dev_priv = dev->dev_private;
   3169 	struct intel_ring_buffer *ring;
   3170 	int i;
   3171 
   3172 	/*
   3173 	 * Before we free the objects from the requests, we need to inspect
   3174 	 * them for finding the guilty party. As the requests only borrow
   3175 	 * their reference to the objects, the inspection must be done first.
   3176 	 */
   3177 	for_each_ring(ring, dev_priv, i)
   3178 		i915_gem_reset_ring_status(dev_priv, ring);
   3179 
   3180 	for_each_ring(ring, dev_priv, i)
   3181 		i915_gem_reset_ring_cleanup(dev_priv, ring);
   3182 
   3183 	i915_gem_cleanup_ringbuffer(dev);
   3184 
   3185 	i915_gem_context_reset(dev);
   3186 
   3187 	i915_gem_restore_fences(dev);
   3188 }
   3189 
   3190 /**
   3191  * This function clears the request list as sequence numbers are passed.
   3192  */
   3193 static void
   3194 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
   3195 {
   3196 	uint32_t seqno;
   3197 
   3198 	if (list_empty(&ring->request_list))
   3199 		return;
   3200 
   3201 	WARN_ON(i915_verify_lists(ring->dev));
   3202 
   3203 	seqno = ring->get_seqno(ring, true);
   3204 
   3205 	/* Move any buffers on the active list that are no longer referenced
   3206 	 * by the ringbuffer to the flushing/inactive lists as appropriate,
   3207 	 * before we free the context associated with the requests.
   3208 	 */
   3209 	while (!list_empty(&ring->active_list)) {
   3210 		struct drm_i915_gem_object *obj;
   3211 
   3212 		obj = list_first_entry(&ring->active_list,
   3213 				      struct drm_i915_gem_object,
   3214 				      ring_list);
   3215 
   3216 		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
   3217 			break;
   3218 
   3219 		i915_gem_object_move_to_inactive(obj);
   3220 	}
   3221 
   3222 
   3223 	while (!list_empty(&ring->request_list)) {
   3224 		struct drm_i915_gem_request *request;
   3225 
   3226 		request = list_first_entry(&ring->request_list,
   3227 					   struct drm_i915_gem_request,
   3228 					   list);
   3229 
   3230 		if (!i915_seqno_passed(seqno, request->seqno))
   3231 			break;
   3232 
   3233 		trace_i915_gem_request_retire(ring, request->seqno);
   3234 		/* We know the GPU must have read the request to have
   3235 		 * sent us the seqno + interrupt, so use the position
   3236 		 * of tail of the request to update the last known position
   3237 		 * of the GPU head.
   3238 		 */
   3239 		ring->last_retired_head = request->tail;
   3240 
   3241 		i915_gem_free_request(request);
   3242 	}
   3243 
   3244 	if (unlikely(ring->trace_irq_seqno &&
   3245 		     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
   3246 		ring->irq_put(ring);
   3247 		ring->trace_irq_seqno = 0;
   3248 	}
   3249 
   3250 	WARN_ON(i915_verify_lists(ring->dev));
   3251 }
   3252 
   3253 bool
   3254 i915_gem_retire_requests(struct drm_device *dev)
   3255 {
   3256 	struct drm_i915_private *dev_priv = dev->dev_private;
   3257 	struct intel_ring_buffer *ring;
   3258 	bool idle = true;
   3259 	int i;
   3260 
   3261 	for_each_ring(ring, dev_priv, i) {
   3262 		i915_gem_retire_requests_ring(ring);
   3263 		idle &= list_empty(&ring->request_list);
   3264 	}
   3265 
   3266 	if (idle)
   3267 		mod_delayed_work(dev_priv->wq,
   3268 				   &dev_priv->mm.idle_work,
   3269 				   msecs_to_jiffies(100));
   3270 
   3271 	return idle;
   3272 }
   3273 
   3274 static void
   3275 i915_gem_retire_work_handler(struct work_struct *work)
   3276 {
   3277 	struct drm_i915_private *dev_priv =
   3278 		container_of(work, typeof(*dev_priv), mm.retire_work.work);
   3279 	struct drm_device *dev = dev_priv->dev;
   3280 	bool idle;
   3281 
   3282 	/* Come back later if the device is busy... */
   3283 	idle = false;
   3284 	if (mutex_trylock(&dev->struct_mutex)) {
   3285 		idle = i915_gem_retire_requests(dev);
   3286 		mutex_unlock(&dev->struct_mutex);
   3287 	}
   3288 	if (!idle)
   3289 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
   3290 				   round_jiffies_up_relative(HZ));
   3291 }
   3292 
   3293 static void
   3294 i915_gem_idle_work_handler(struct work_struct *work)
   3295 {
   3296 	struct drm_i915_private *dev_priv =
   3297 		container_of(work, typeof(*dev_priv), mm.idle_work.work);
   3298 
   3299 	intel_mark_idle(dev_priv->dev);
   3300 }
   3301 
   3302 /**
   3303  * Ensures that an object will eventually get non-busy by flushing any required
   3304  * write domains, emitting any outstanding lazy request and retiring and
   3305  * completed requests.
   3306  */
   3307 static int
   3308 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
   3309 {
   3310 	int ret;
   3311 
   3312 	if (obj->active) {
   3313 		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
   3314 		if (ret)
   3315 			return ret;
   3316 
   3317 		i915_gem_retire_requests_ring(obj->ring);
   3318 	}
   3319 
   3320 	return 0;
   3321 }
   3322 
   3323 /**
   3324  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
   3325  * @DRM_IOCTL_ARGS: standard ioctl arguments
   3326  *
   3327  * Returns 0 if successful, else an error is returned with the remaining time in
   3328  * the timeout parameter.
   3329  *  -ETIME: object is still busy after timeout
   3330  *  -ERESTARTSYS: signal interrupted the wait
   3331  *  -ENONENT: object doesn't exist
   3332  * Also possible, but rare:
   3333  *  -EAGAIN: GPU wedged
   3334  *  -ENOMEM: damn
   3335  *  -ENODEV: Internal IRQ fail
   3336  *  -E?: The add request failed
   3337  *
   3338  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
   3339  * non-zero timeout parameter the wait ioctl will wait for the given number of
   3340  * nanoseconds on an object becoming unbusy. Since the wait itself does so
   3341  * without holding struct_mutex the object may become re-busied before this
   3342  * function completes. A similar but shorter * race condition exists in the busy
   3343  * ioctl
   3344  */
   3345 int
   3346 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
   3347 {
   3348 	struct drm_i915_private *dev_priv = dev->dev_private;
   3349 	struct drm_i915_gem_wait *args = data;
   3350 	struct drm_gem_object *gobj;
   3351 	struct drm_i915_gem_object *obj;
   3352 	struct intel_ring_buffer *ring = NULL;
   3353 	struct timespec timeout_stack, *timeout = NULL;
   3354 	unsigned reset_counter;
   3355 	u32 seqno = 0;
   3356 	int ret = 0;
   3357 
   3358 	if (args->timeout_ns >= 0) {
   3359 		timeout_stack = ns_to_timespec(args->timeout_ns);
   3360 		timeout = &timeout_stack;
   3361 	}
   3362 
   3363 	ret = i915_mutex_lock_interruptible(dev);
   3364 	if (ret)
   3365 		return ret;
   3366 
   3367 	gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
   3368 	if (gobj == NULL) {
   3369 		mutex_unlock(&dev->struct_mutex);
   3370 		return -ENOENT;
   3371 	}
   3372 	obj = to_intel_bo(gobj);
   3373 
   3374 	/* Need to make sure the object gets inactive eventually. */
   3375 	ret = i915_gem_object_flush_active(obj);
   3376 	if (ret)
   3377 		goto out;
   3378 
   3379 	if (obj->active) {
   3380 		seqno = obj->last_read_seqno;
   3381 		ring = obj->ring;
   3382 	}
   3383 
   3384 	if (seqno == 0)
   3385 		 goto out;
   3386 
   3387 	/* Do this after OLR check to make sure we make forward progress polling
   3388 	 * on this IOCTL with a 0 timeout (like busy ioctl)
   3389 	 */
   3390 	if (!args->timeout_ns) {
   3391 		ret = -ETIME;
   3392 		goto out;
   3393 	}
   3394 
   3395 	drm_gem_object_unreference(&obj->base);
   3396 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
   3397 	mutex_unlock(&dev->struct_mutex);
   3398 
   3399 	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
   3400 	if (timeout)
   3401 		args->timeout_ns = timespec_to_ns(timeout);
   3402 	return ret;
   3403 
   3404 out:
   3405 	drm_gem_object_unreference(&obj->base);
   3406 	mutex_unlock(&dev->struct_mutex);
   3407 	return ret;
   3408 }
   3409 
   3410 /**
   3411  * i915_gem_object_sync - sync an object to a ring.
   3412  *
   3413  * @obj: object which may be in use on another ring.
   3414  * @to: ring we wish to use the object on. May be NULL.
   3415  *
   3416  * This code is meant to abstract object synchronization with the GPU.
   3417  * Calling with NULL implies synchronizing the object with the CPU
   3418  * rather than a particular GPU ring.
   3419  *
   3420  * Returns 0 if successful, else propagates up the lower layer error.
   3421  */
   3422 int
   3423 i915_gem_object_sync(struct drm_i915_gem_object *obj,
   3424 		     struct intel_ring_buffer *to)
   3425 {
   3426 	struct intel_ring_buffer *from = obj->ring;
   3427 	u32 seqno;
   3428 	int ret, idx;
   3429 
   3430 	if (from == NULL || to == from)
   3431 		return 0;
   3432 
   3433 	if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
   3434 		return i915_gem_object_wait_rendering(obj, false);
   3435 
   3436 	idx = intel_ring_sync_index(from, to);
   3437 
   3438 	seqno = obj->last_read_seqno;
   3439 	if (seqno <= from->sync_seqno[idx])
   3440 		return 0;
   3441 
   3442 	ret = i915_gem_check_olr(obj->ring, seqno);
   3443 	if (ret)
   3444 		return ret;
   3445 
   3446 	trace_i915_gem_ring_sync_to(from, to, seqno);
   3447 	ret = to->sync_to(to, from, seqno);
   3448 	if (!ret)
   3449 		/* We use last_read_seqno because sync_to()
   3450 		 * might have just caused seqno wrap under
   3451 		 * the radar.
   3452 		 */
   3453 		from->sync_seqno[idx] = obj->last_read_seqno;
   3454 
   3455 	return ret;
   3456 }
   3457 
   3458 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
   3459 {
   3460 	u32 old_write_domain, old_read_domains;
   3461 
   3462 	/* Force a pagefault for domain tracking on next user access */
   3463 	i915_gem_release_mmap(obj);
   3464 
   3465 	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
   3466 		return;
   3467 
   3468 	/* Wait for any direct GTT access to complete */
   3469 	mb();
   3470 
   3471 	old_read_domains = obj->base.read_domains;
   3472 	old_write_domain = obj->base.write_domain;
   3473 
   3474 	obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
   3475 	obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
   3476 
   3477 	trace_i915_gem_object_change_domain(obj,
   3478 					    old_read_domains,
   3479 					    old_write_domain);
   3480 }
   3481 
   3482 int i915_vma_unbind(struct i915_vma *vma)
   3483 {
   3484 	struct drm_i915_gem_object *obj = vma->obj;
   3485 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   3486 	int ret;
   3487 
   3488 	if (list_empty(&vma->vma_link))
   3489 		return 0;
   3490 
   3491 	if (!drm_mm_node_allocated(&vma->node)) {
   3492 		i915_gem_vma_destroy(vma);
   3493 		return 0;
   3494 	}
   3495 
   3496 	if (vma->pin_count)
   3497 		return -EBUSY;
   3498 
   3499 	BUG_ON(obj->pages == NULL);
   3500 
   3501 	ret = i915_gem_object_finish_gpu(obj);
   3502 	if (ret)
   3503 		return ret;
   3504 	/* Continue on if we fail due to EIO, the GPU is hung so we
   3505 	 * should be safe and we need to cleanup or else we might
   3506 	 * cause memory corruption through use-after-free.
   3507 	 */
   3508 
   3509 	i915_gem_object_finish_gtt(obj);
   3510 
   3511 	/* release the fence reg _after_ flushing */
   3512 	ret = i915_gem_object_put_fence(obj);
   3513 	if (ret)
   3514 		return ret;
   3515 
   3516 	trace_i915_vma_unbind(vma);
   3517 
   3518 	vma->unbind_vma(vma);
   3519 
   3520 	i915_gem_gtt_finish_object(obj);
   3521 
   3522 	list_del_init(&vma->mm_list);
   3523 	/* Avoid an unnecessary call to unbind on rebind. */
   3524 	if (i915_is_ggtt(vma->vm))
   3525 		obj->map_and_fenceable = true;
   3526 
   3527 	drm_mm_remove_node(&vma->node);
   3528 	i915_gem_vma_destroy(vma);
   3529 
   3530 	/* Since the unbound list is global, only move to that list if
   3531 	 * no more VMAs exist. */
   3532 	if (list_empty(&obj->vma_list))
   3533 		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
   3534 
   3535 	/* And finally now the object is completely decoupled from this vma,
   3536 	 * we can drop its hold on the backing storage and allow it to be
   3537 	 * reaped by the shrinker.
   3538 	 */
   3539 	i915_gem_object_unpin_pages(obj);
   3540 
   3541 	return 0;
   3542 }
   3543 
   3544 int i915_gpu_idle(struct drm_device *dev)
   3545 {
   3546 	struct drm_i915_private *dev_priv = dev->dev_private;
   3547 	struct intel_ring_buffer *ring;
   3548 	int ret, i;
   3549 
   3550 	/* Flush everything onto the inactive list. */
   3551 	for_each_ring(ring, dev_priv, i) {
   3552 		ret = i915_switch_context(ring, ring->default_context);
   3553 		if (ret)
   3554 			return ret;
   3555 
   3556 		ret = intel_ring_idle(ring);
   3557 		if (ret)
   3558 			return ret;
   3559 	}
   3560 
   3561 	return 0;
   3562 }
   3563 
   3564 static void i965_write_fence_reg(struct drm_device *dev, int reg,
   3565 				 struct drm_i915_gem_object *obj)
   3566 {
   3567 	struct drm_i915_private *dev_priv = dev->dev_private;
   3568 	int fence_reg;
   3569 	int fence_pitch_shift;
   3570 
   3571 	if (INTEL_INFO(dev)->gen >= 6) {
   3572 		fence_reg = FENCE_REG_SANDYBRIDGE_0;
   3573 		fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
   3574 	} else {
   3575 		fence_reg = FENCE_REG_965_0;
   3576 		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
   3577 	}
   3578 
   3579 	fence_reg += reg * 8;
   3580 
   3581 	/* To w/a incoherency with non-atomic 64-bit register updates,
   3582 	 * we split the 64-bit update into two 32-bit writes. In order
   3583 	 * for a partial fence not to be evaluated between writes, we
   3584 	 * precede the update with write to turn off the fence register,
   3585 	 * and only enable the fence as the last step.
   3586 	 *
   3587 	 * For extra levels of paranoia, we make sure each step lands
   3588 	 * before applying the next step.
   3589 	 */
   3590 	I915_WRITE(fence_reg, 0);
   3591 	POSTING_READ(fence_reg);
   3592 
   3593 	if (obj) {
   3594 		u32 size = i915_gem_obj_ggtt_size(obj);
   3595 		uint64_t val;
   3596 
   3597 		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
   3598 				 0xfffff000) << 32;
   3599 		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
   3600 		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
   3601 		if (obj->tiling_mode == I915_TILING_Y)
   3602 			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
   3603 		val |= I965_FENCE_REG_VALID;
   3604 
   3605 		I915_WRITE(fence_reg + 4, val >> 32);
   3606 		POSTING_READ(fence_reg + 4);
   3607 
   3608 		I915_WRITE(fence_reg + 0, val);
   3609 		POSTING_READ(fence_reg);
   3610 	} else {
   3611 		I915_WRITE(fence_reg + 4, 0);
   3612 		POSTING_READ(fence_reg + 4);
   3613 	}
   3614 }
   3615 
   3616 static void i915_write_fence_reg(struct drm_device *dev, int reg,
   3617 				 struct drm_i915_gem_object *obj)
   3618 {
   3619 	struct drm_i915_private *dev_priv = dev->dev_private;
   3620 	u32 val;
   3621 
   3622 	if (obj) {
   3623 		u32 size = i915_gem_obj_ggtt_size(obj);
   3624 		int pitch_val;
   3625 		int tile_width;
   3626 
   3627 		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
   3628 		     (size & -size) != size ||
   3629 		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
   3630 		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
   3631 		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
   3632 
   3633 		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
   3634 			tile_width = 128;
   3635 		else
   3636 			tile_width = 512;
   3637 
   3638 		/* Note: pitch better be a power of two tile widths */
   3639 		pitch_val = obj->stride / tile_width;
   3640 		pitch_val = ffs(pitch_val) - 1;
   3641 
   3642 		val = i915_gem_obj_ggtt_offset(obj);
   3643 		if (obj->tiling_mode == I915_TILING_Y)
   3644 			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
   3645 		val |= I915_FENCE_SIZE_BITS(size);
   3646 		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
   3647 		val |= I830_FENCE_REG_VALID;
   3648 	} else
   3649 		val = 0;
   3650 
   3651 	if (reg < 8)
   3652 		reg = FENCE_REG_830_0 + reg * 4;
   3653 	else
   3654 		reg = FENCE_REG_945_8 + (reg - 8) * 4;
   3655 
   3656 	I915_WRITE(reg, val);
   3657 	POSTING_READ(reg);
   3658 }
   3659 
   3660 static void i830_write_fence_reg(struct drm_device *dev, int reg,
   3661 				struct drm_i915_gem_object *obj)
   3662 {
   3663 	struct drm_i915_private *dev_priv = dev->dev_private;
   3664 	uint32_t val;
   3665 
   3666 	if (obj) {
   3667 		u32 size = i915_gem_obj_ggtt_size(obj);
   3668 		uint32_t pitch_val;
   3669 
   3670 		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
   3671 		     (size & -size) != size ||
   3672 		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
   3673 		     "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
   3674 		     i915_gem_obj_ggtt_offset(obj), size);
   3675 
   3676 		pitch_val = obj->stride / 128;
   3677 		pitch_val = ffs(pitch_val) - 1;
   3678 
   3679 		val = i915_gem_obj_ggtt_offset(obj);
   3680 		if (obj->tiling_mode == I915_TILING_Y)
   3681 			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
   3682 		val |= I830_FENCE_SIZE_BITS(size);
   3683 		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
   3684 		val |= I830_FENCE_REG_VALID;
   3685 	} else
   3686 		val = 0;
   3687 
   3688 	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
   3689 	POSTING_READ(FENCE_REG_830_0 + reg * 4);
   3690 }
   3691 
   3692 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
   3693 {
   3694 	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
   3695 }
   3696 
   3697 static void i915_gem_write_fence(struct drm_device *dev, int reg,
   3698 				 struct drm_i915_gem_object *obj)
   3699 {
   3700 	struct drm_i915_private *dev_priv = dev->dev_private;
   3701 
   3702 	/* Ensure that all CPU reads are completed before installing a fence
   3703 	 * and all writes before removing the fence.
   3704 	 */
   3705 	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
   3706 		mb();
   3707 
   3708 	WARN(obj && (!obj->stride || !obj->tiling_mode),
   3709 	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
   3710 	     obj->stride, obj->tiling_mode);
   3711 
   3712 	switch (INTEL_INFO(dev)->gen) {
   3713 	case 8:
   3714 	case 7:
   3715 	case 6:
   3716 	case 5:
   3717 	case 4: i965_write_fence_reg(dev, reg, obj); break;
   3718 	case 3: i915_write_fence_reg(dev, reg, obj); break;
   3719 	case 2: i830_write_fence_reg(dev, reg, obj); break;
   3720 	default: BUG();
   3721 	}
   3722 
   3723 	/* And similarly be paranoid that no direct access to this region
   3724 	 * is reordered to before the fence is installed.
   3725 	 */
   3726 	if (i915_gem_object_needs_mb(obj))
   3727 		mb();
   3728 }
   3729 
   3730 static inline int fence_number(struct drm_i915_private *dev_priv,
   3731 			       struct drm_i915_fence_reg *fence)
   3732 {
   3733 	return fence - dev_priv->fence_regs;
   3734 }
   3735 
   3736 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
   3737 					 struct drm_i915_fence_reg *fence,
   3738 					 bool enable)
   3739 {
   3740 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   3741 	int reg = fence_number(dev_priv, fence);
   3742 
   3743 	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
   3744 
   3745 	if (enable) {
   3746 		obj->fence_reg = reg;
   3747 		fence->obj = obj;
   3748 		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
   3749 	} else {
   3750 		obj->fence_reg = I915_FENCE_REG_NONE;
   3751 		fence->obj = NULL;
   3752 		list_del_init(&fence->lru_list);
   3753 	}
   3754 	obj->fence_dirty = false;
   3755 }
   3756 
   3757 static int
   3758 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
   3759 {
   3760 	if (obj->last_fenced_seqno) {
   3761 		int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
   3762 		if (ret)
   3763 			return ret;
   3764 
   3765 		obj->last_fenced_seqno = 0;
   3766 	}
   3767 
   3768 	obj->fenced_gpu_access = false;
   3769 	return 0;
   3770 }
   3771 
   3772 int
   3773 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
   3774 {
   3775 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   3776 	struct drm_i915_fence_reg *fence;
   3777 	int ret;
   3778 
   3779 	ret = i915_gem_object_wait_fence(obj);
   3780 	if (ret)
   3781 		return ret;
   3782 
   3783 	if (obj->fence_reg == I915_FENCE_REG_NONE)
   3784 		return 0;
   3785 
   3786 	fence = &dev_priv->fence_regs[obj->fence_reg];
   3787 
   3788 	i915_gem_object_fence_lost(obj);
   3789 	i915_gem_object_update_fence(obj, fence, false);
   3790 
   3791 	return 0;
   3792 }
   3793 
   3794 static struct drm_i915_fence_reg *
   3795 i915_find_fence_reg(struct drm_device *dev)
   3796 {
   3797 	struct drm_i915_private *dev_priv = dev->dev_private;
   3798 	struct drm_i915_fence_reg *reg, *avail;
   3799 	int i;
   3800 
   3801 	/* First try to find a free reg */
   3802 	avail = NULL;
   3803 	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
   3804 		reg = &dev_priv->fence_regs[i];
   3805 		if (!reg->obj)
   3806 			return reg;
   3807 
   3808 		if (!reg->pin_count)
   3809 			avail = reg;
   3810 	}
   3811 
   3812 	if (avail == NULL)
   3813 		goto deadlock;
   3814 
   3815 	/* None available, try to steal one or wait for a user to finish */
   3816 	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
   3817 		if (reg->pin_count)
   3818 			continue;
   3819 
   3820 		return reg;
   3821 	}
   3822 
   3823 deadlock:
   3824 	/* Wait for completion of pending flips which consume fences */
   3825 	if (intel_has_pending_fb_unpin(dev))
   3826 		return ERR_PTR(-EAGAIN);
   3827 
   3828 	return ERR_PTR(-EDEADLK);
   3829 }
   3830 
   3831 /**
   3832  * i915_gem_object_get_fence - set up fencing for an object
   3833  * @obj: object to map through a fence reg
   3834  *
   3835  * When mapping objects through the GTT, userspace wants to be able to write
   3836  * to them without having to worry about swizzling if the object is tiled.
   3837  * This function walks the fence regs looking for a free one for @obj,
   3838  * stealing one if it can't find any.
   3839  *
   3840  * It then sets up the reg based on the object's properties: address, pitch
   3841  * and tiling format.
   3842  *
   3843  * For an untiled surface, this removes any existing fence.
   3844  */
   3845 int
   3846 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
   3847 {
   3848 	struct drm_device *dev = obj->base.dev;
   3849 	struct drm_i915_private *dev_priv = dev->dev_private;
   3850 	bool enable = obj->tiling_mode != I915_TILING_NONE;
   3851 	struct drm_i915_fence_reg *reg;
   3852 	int ret;
   3853 
   3854 	/* Have we updated the tiling parameters upon the object and so
   3855 	 * will need to serialise the write to the associated fence register?
   3856 	 */
   3857 	if (obj->fence_dirty) {
   3858 		ret = i915_gem_object_wait_fence(obj);
   3859 		if (ret)
   3860 			return ret;
   3861 	}
   3862 
   3863 	/* Just update our place in the LRU if our fence is getting reused. */
   3864 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
   3865 		reg = &dev_priv->fence_regs[obj->fence_reg];
   3866 		if (!obj->fence_dirty) {
   3867 			list_move_tail(&reg->lru_list,
   3868 				       &dev_priv->mm.fence_list);
   3869 			return 0;
   3870 		}
   3871 	} else if (enable) {
   3872 		reg = i915_find_fence_reg(dev);
   3873 		if (IS_ERR(reg))
   3874 			return PTR_ERR(reg);
   3875 
   3876 		if (reg->obj) {
   3877 			struct drm_i915_gem_object *old = reg->obj;
   3878 
   3879 			ret = i915_gem_object_wait_fence(old);
   3880 			if (ret)
   3881 				return ret;
   3882 
   3883 			i915_gem_object_fence_lost(old);
   3884 		}
   3885 	} else
   3886 		return 0;
   3887 
   3888 	i915_gem_object_update_fence(obj, reg, enable);
   3889 
   3890 	return 0;
   3891 }
   3892 
   3893 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
   3894 				     struct drm_mm_node *gtt_space,
   3895 				     unsigned long cache_level)
   3896 {
   3897 	struct drm_mm_node *other;
   3898 
   3899 	/* On non-LLC machines we have to be careful when putting differing
   3900 	 * types of snoopable memory together to avoid the prefetcher
   3901 	 * crossing memory domains and dying.
   3902 	 */
   3903 	if (HAS_LLC(dev))
   3904 		return true;
   3905 
   3906 	if (!drm_mm_node_allocated(gtt_space))
   3907 		return true;
   3908 
   3909 	if (list_empty(&gtt_space->node_list))
   3910 		return true;
   3911 
   3912 	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
   3913 	if (other->allocated && !other->hole_follows && other->color != cache_level)
   3914 		return false;
   3915 
   3916 	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
   3917 	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
   3918 		return false;
   3919 
   3920 	return true;
   3921 }
   3922 
   3923 static void i915_gem_verify_gtt(struct drm_device *dev)
   3924 {
   3925 #if WATCH_GTT
   3926 	struct drm_i915_private *dev_priv = dev->dev_private;
   3927 	struct drm_i915_gem_object *obj;
   3928 	int err = 0;
   3929 
   3930 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
   3931 		if (obj->gtt_space == NULL) {
   3932 			printk(KERN_ERR "object found on GTT list with no space reserved\n");
   3933 			err++;
   3934 			continue;
   3935 		}
   3936 
   3937 		if (obj->cache_level != obj->gtt_space->color) {
   3938 			printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
   3939 			       i915_gem_obj_ggtt_offset(obj),
   3940 			       i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
   3941 			       obj->cache_level,
   3942 			       obj->gtt_space->color);
   3943 			err++;
   3944 			continue;
   3945 		}
   3946 
   3947 		if (!i915_gem_valid_gtt_space(dev,
   3948 					      obj->gtt_space,
   3949 					      obj->cache_level)) {
   3950 			printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
   3951 			       i915_gem_obj_ggtt_offset(obj),
   3952 			       i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
   3953 			       obj->cache_level);
   3954 			err++;
   3955 			continue;
   3956 		}
   3957 	}
   3958 
   3959 	WARN_ON(err);
   3960 #endif
   3961 }
   3962 
   3963 /**
   3964  * Finds free space in the GTT aperture and binds the object there.
   3965  */
   3966 static struct i915_vma *
   3967 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
   3968 			   struct i915_address_space *vm,
   3969 			   unsigned alignment,
   3970 			   uint64_t flags)
   3971 {
   3972 	struct drm_device *dev = obj->base.dev;
   3973 	struct drm_i915_private *dev_priv = dev->dev_private;
   3974 	u32 size, fence_size, fence_alignment, unfenced_alignment;
   3975 	unsigned long start =
   3976 		flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
   3977 	unsigned long end =
   3978 		flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
   3979 	struct i915_vma *vma;
   3980 	int ret;
   3981 
   3982 	fence_size = i915_gem_get_gtt_size(dev,
   3983 					   obj->base.size,
   3984 					   obj->tiling_mode);
   3985 	fence_alignment = i915_gem_get_gtt_alignment(dev,
   3986 						     obj->base.size,
   3987 						     obj->tiling_mode, true);
   3988 	unfenced_alignment =
   3989 		i915_gem_get_gtt_alignment(dev,
   3990 					   obj->base.size,
   3991 					   obj->tiling_mode, false);
   3992 
   3993 	if (alignment == 0)
   3994 		alignment = flags & PIN_MAPPABLE ? fence_alignment :
   3995 						unfenced_alignment;
   3996 	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
   3997 		DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
   3998 		return ERR_PTR(-EINVAL);
   3999 	}
   4000 
   4001 	size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
   4002 
   4003 	/* If the object is bigger than the entire aperture, reject it early
   4004 	 * before evicting everything in a vain attempt to find space.
   4005 	 */
   4006 	if (obj->base.size > end) {
   4007 		DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
   4008 			  obj->base.size,
   4009 			  flags & PIN_MAPPABLE ? "mappable" : "total",
   4010 			  end);
   4011 		return ERR_PTR(-E2BIG);
   4012 	}
   4013 
   4014 	ret = i915_gem_object_get_pages(obj);
   4015 	if (ret)
   4016 		return ERR_PTR(ret);
   4017 
   4018 	i915_gem_object_pin_pages(obj);
   4019 
   4020 	vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
   4021 	if (IS_ERR(vma))
   4022 		goto err_unpin;
   4023 
   4024 search_free:
   4025 	ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
   4026 						  size, alignment,
   4027 						  obj->cache_level,
   4028 						  start, end,
   4029 						  DRM_MM_SEARCH_DEFAULT,
   4030 						  DRM_MM_CREATE_DEFAULT);
   4031 	if (ret) {
   4032 		ret = i915_gem_evict_something(dev, vm, size, alignment,
   4033 					       obj->cache_level,
   4034 					       start, end,
   4035 					       flags);
   4036 		if (ret == 0)
   4037 			goto search_free;
   4038 
   4039 		goto err_free_vma;
   4040 	}
   4041 	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
   4042 					      obj->cache_level))) {
   4043 		ret = -EINVAL;
   4044 		goto err_remove_node;
   4045 	}
   4046 
   4047 	ret = i915_gem_gtt_prepare_object(obj);
   4048 	if (ret)
   4049 		goto err_remove_node;
   4050 
   4051 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
   4052 	list_add_tail(&vma->mm_list, &vm->inactive_list);
   4053 
   4054 	if (i915_is_ggtt(vm)) {
   4055 		bool mappable, fenceable;
   4056 
   4057 		fenceable = (vma->node.size == fence_size &&
   4058 			     (vma->node.start & (fence_alignment - 1)) == 0);
   4059 
   4060 		mappable = (vma->node.start + obj->base.size <=
   4061 			    dev_priv->gtt.mappable_end);
   4062 
   4063 		obj->map_and_fenceable = mappable && fenceable;
   4064 	}
   4065 
   4066 	WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
   4067 
   4068 	trace_i915_vma_bind(vma, flags);
   4069 	vma->bind_vma(vma, obj->cache_level,
   4070 		      flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
   4071 
   4072 	i915_gem_verify_gtt(dev);
   4073 	return vma;
   4074 
   4075 err_remove_node:
   4076 	drm_mm_remove_node(&vma->node);
   4077 err_free_vma:
   4078 	i915_gem_vma_destroy(vma);
   4079 	vma = ERR_PTR(ret);
   4080 err_unpin:
   4081 	i915_gem_object_unpin_pages(obj);
   4082 	return vma;
   4083 }
   4084 
   4085 bool
   4086 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
   4087 			bool force)
   4088 {
   4089 	/* If we don't have a page list set up, then we're not pinned
   4090 	 * to GPU, and we can ignore the cache flush because it'll happen
   4091 	 * again at bind time.
   4092 	 */
   4093 	if (obj->pages == NULL)
   4094 		return false;
   4095 
   4096 	/*
   4097 	 * Stolen memory is always coherent with the GPU as it is explicitly
   4098 	 * marked as wc by the system, or the system is cache-coherent.
   4099 	 */
   4100 	if (obj->stolen)
   4101 		return false;
   4102 
   4103 	/* If the GPU is snooping the contents of the CPU cache,
   4104 	 * we do not need to manually clear the CPU cache lines.  However,
   4105 	 * the caches are only snooped when the render cache is
   4106 	 * flushed/invalidated.  As we always have to emit invalidations
   4107 	 * and flushes when moving into and out of the RENDER domain, correct
   4108 	 * snooping behaviour occurs naturally as the result of our domain
   4109 	 * tracking.
   4110 	 */
   4111 	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
   4112 		return false;
   4113 
   4114 	trace_i915_gem_object_clflush(obj);
   4115 #ifdef __NetBSD__
   4116 	drm_clflush_pglist(&obj->igo_pageq);
   4117 #else
   4118 	drm_clflush_sg(obj->pages);
   4119 #endif
   4120 
   4121 	return true;
   4122 }
   4123 
   4124 /** Flushes the GTT write domain for the object if it's dirty. */
   4125 static void
   4126 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
   4127 {
   4128 	uint32_t old_write_domain;
   4129 
   4130 	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
   4131 		return;
   4132 
   4133 	/* No actual flushing is required for the GTT write domain.  Writes
   4134 	 * to it immediately go to main memory as far as we know, so there's
   4135 	 * no chipset flush.  It also doesn't land in render cache.
   4136 	 *
   4137 	 * However, we do have to enforce the order so that all writes through
   4138 	 * the GTT land before any writes to the device, such as updates to
   4139 	 * the GATT itself.
   4140 	 */
   4141 	wmb();
   4142 
   4143 	old_write_domain = obj->base.write_domain;
   4144 	obj->base.write_domain = 0;
   4145 
   4146 	trace_i915_gem_object_change_domain(obj,
   4147 					    obj->base.read_domains,
   4148 					    old_write_domain);
   4149 }
   4150 
   4151 /** Flushes the CPU write domain for the object if it's dirty. */
   4152 static void
   4153 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
   4154 				       bool force)
   4155 {
   4156 	uint32_t old_write_domain;
   4157 
   4158 	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
   4159 		return;
   4160 
   4161 	if (i915_gem_clflush_object(obj, force))
   4162 		i915_gem_chipset_flush(obj->base.dev);
   4163 
   4164 	old_write_domain = obj->base.write_domain;
   4165 	obj->base.write_domain = 0;
   4166 
   4167 	trace_i915_gem_object_change_domain(obj,
   4168 					    obj->base.read_domains,
   4169 					    old_write_domain);
   4170 }
   4171 
   4172 /**
   4173  * Moves a single object to the GTT read, and possibly write domain.
   4174  *
   4175  * This function returns when the move is complete, including waiting on
   4176  * flushes to occur.
   4177  */
   4178 int
   4179 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
   4180 {
   4181 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   4182 	uint32_t old_write_domain, old_read_domains;
   4183 	int ret;
   4184 
   4185 	/* Not valid to be called on unbound objects. */
   4186 	if (!i915_gem_obj_bound_any(obj))
   4187 		return -EINVAL;
   4188 
   4189 	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
   4190 		return 0;
   4191 
   4192 	ret = i915_gem_object_wait_rendering(obj, !write);
   4193 	if (ret)
   4194 		return ret;
   4195 
   4196 	i915_gem_object_flush_cpu_write_domain(obj, false);
   4197 
   4198 	/* Serialise direct access to this object with the barriers for
   4199 	 * coherent writes from the GPU, by effectively invalidating the
   4200 	 * GTT domain upon first access.
   4201 	 */
   4202 	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
   4203 		mb();
   4204 
   4205 	old_write_domain = obj->base.write_domain;
   4206 	old_read_domains = obj->base.read_domains;
   4207 
   4208 	/* It should now be out of any other write domains, and we can update
   4209 	 * the domain values for our changes.
   4210 	 */
   4211 	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
   4212 	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
   4213 	if (write) {
   4214 		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
   4215 		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
   4216 		obj->dirty = 1;
   4217 	}
   4218 
   4219 	trace_i915_gem_object_change_domain(obj,
   4220 					    old_read_domains,
   4221 					    old_write_domain);
   4222 
   4223 	/* And bump the LRU for this access */
   4224 	if (i915_gem_object_is_inactive(obj)) {
   4225 		struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
   4226 		if (vma)
   4227 			list_move_tail(&vma->mm_list,
   4228 				       &dev_priv->gtt.base.inactive_list);
   4229 
   4230 	}
   4231 
   4232 	return 0;
   4233 }
   4234 
   4235 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
   4236 				    enum i915_cache_level cache_level)
   4237 {
   4238 	struct drm_device *dev = obj->base.dev;
   4239 	struct i915_vma *vma, *next;
   4240 	int ret;
   4241 
   4242 	if (obj->cache_level == cache_level)
   4243 		return 0;
   4244 
   4245 	if (i915_gem_obj_is_pinned(obj)) {
   4246 		DRM_DEBUG("can not change the cache level of pinned objects\n");
   4247 		return -EBUSY;
   4248 	}
   4249 
   4250 	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
   4251 		if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
   4252 			ret = i915_vma_unbind(vma);
   4253 			if (ret)
   4254 				return ret;
   4255 		}
   4256 	}
   4257 
   4258 	if (i915_gem_obj_bound_any(obj)) {
   4259 		ret = i915_gem_object_finish_gpu(obj);
   4260 		if (ret)
   4261 			return ret;
   4262 
   4263 		i915_gem_object_finish_gtt(obj);
   4264 
   4265 		/* Before SandyBridge, you could not use tiling or fence
   4266 		 * registers with snooped memory, so relinquish any fences
   4267 		 * currently pointing to our region in the aperture.
   4268 		 */
   4269 		if (INTEL_INFO(dev)->gen < 6) {
   4270 			ret = i915_gem_object_put_fence(obj);
   4271 			if (ret)
   4272 				return ret;
   4273 		}
   4274 
   4275 		list_for_each_entry(vma, &obj->vma_list, vma_link)
   4276 			if (drm_mm_node_allocated(&vma->node))
   4277 				vma->bind_vma(vma, cache_level,
   4278 					      obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
   4279 	}
   4280 
   4281 	list_for_each_entry(vma, &obj->vma_list, vma_link)
   4282 		vma->node.color = cache_level;
   4283 	obj->cache_level = cache_level;
   4284 
   4285 	if (cpu_write_needs_clflush(obj)) {
   4286 		u32 old_read_domains, old_write_domain;
   4287 
   4288 		/* If we're coming from LLC cached, then we haven't
   4289 		 * actually been tracking whether the data is in the
   4290 		 * CPU cache or not, since we only allow one bit set
   4291 		 * in obj->write_domain and have been skipping the clflushes.
   4292 		 * Just set it to the CPU cache for now.
   4293 		 */
   4294 		WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
   4295 
   4296 		old_read_domains = obj->base.read_domains;
   4297 		old_write_domain = obj->base.write_domain;
   4298 
   4299 		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
   4300 		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
   4301 
   4302 		trace_i915_gem_object_change_domain(obj,
   4303 						    old_read_domains,
   4304 						    old_write_domain);
   4305 	}
   4306 
   4307 	i915_gem_verify_gtt(dev);
   4308 	return 0;
   4309 }
   4310 
   4311 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
   4312 			       struct drm_file *file)
   4313 {
   4314 	struct drm_i915_gem_caching *args = data;
   4315 	struct drm_gem_object *gobj;
   4316 	struct drm_i915_gem_object *obj;
   4317 	int ret;
   4318 
   4319 	ret = i915_mutex_lock_interruptible(dev);
   4320 	if (ret)
   4321 		return ret;
   4322 
   4323 	gobj = drm_gem_object_lookup(dev, file, args->handle);
   4324 	if (gobj == NULL) {
   4325 		ret = -ENOENT;
   4326 		goto unlock;
   4327 	}
   4328 	obj = to_intel_bo(gobj);
   4329 
   4330 	switch (obj->cache_level) {
   4331 	case I915_CACHE_LLC:
   4332 	case I915_CACHE_L3_LLC:
   4333 		args->caching = I915_CACHING_CACHED;
   4334 		break;
   4335 
   4336 	case I915_CACHE_WT:
   4337 		args->caching = I915_CACHING_DISPLAY;
   4338 		break;
   4339 
   4340 	default:
   4341 		args->caching = I915_CACHING_NONE;
   4342 		break;
   4343 	}
   4344 
   4345 	drm_gem_object_unreference(&obj->base);
   4346 unlock:
   4347 	mutex_unlock(&dev->struct_mutex);
   4348 	return ret;
   4349 }
   4350 
   4351 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
   4352 			       struct drm_file *file)
   4353 {
   4354 	struct drm_i915_gem_caching *args = data;
   4355 	struct drm_gem_object *gobj;
   4356 	struct drm_i915_gem_object *obj;
   4357 	enum i915_cache_level level;
   4358 	int ret;
   4359 
   4360 	switch (args->caching) {
   4361 	case I915_CACHING_NONE:
   4362 		level = I915_CACHE_NONE;
   4363 		break;
   4364 	case I915_CACHING_CACHED:
   4365 		level = I915_CACHE_LLC;
   4366 		break;
   4367 	case I915_CACHING_DISPLAY:
   4368 		level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
   4369 		break;
   4370 	default:
   4371 		return -EINVAL;
   4372 	}
   4373 
   4374 	ret = i915_mutex_lock_interruptible(dev);
   4375 	if (ret)
   4376 		return ret;
   4377 
   4378 	gobj = drm_gem_object_lookup(dev, file, args->handle);
   4379 	if (gobj == NULL) {
   4380 		ret = -ENOENT;
   4381 		goto unlock;
   4382 	}
   4383 	obj = to_intel_bo(gobj);
   4384 
   4385 	ret = i915_gem_object_set_cache_level(obj, level);
   4386 
   4387 	drm_gem_object_unreference(&obj->base);
   4388 unlock:
   4389 	mutex_unlock(&dev->struct_mutex);
   4390 	return ret;
   4391 }
   4392 
   4393 static bool is_pin_display(struct drm_i915_gem_object *obj)
   4394 {
   4395 	/* There are 3 sources that pin objects:
   4396 	 *   1. The display engine (scanouts, sprites, cursors);
   4397 	 *   2. Reservations for execbuffer;
   4398 	 *   3. The user.
   4399 	 *
   4400 	 * We can ignore reservations as we hold the struct_mutex and
   4401 	 * are only called outside of the reservation path.  The user
   4402 	 * can only increment pin_count once, and so if after
   4403 	 * subtracting the potential reference by the user, any pin_count
   4404 	 * remains, it must be due to another use by the display engine.
   4405 	 */
   4406 	return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
   4407 }
   4408 
   4409 /*
   4410  * Prepare buffer for display plane (scanout, cursors, etc).
   4411  * Can be called from an uninterruptible phase (modesetting) and allows
   4412  * any flushes to be pipelined (for pageflips).
   4413  */
   4414 int
   4415 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
   4416 				     u32 alignment,
   4417 				     struct intel_ring_buffer *pipelined)
   4418 {
   4419 	u32 old_read_domains, old_write_domain;
   4420 	int ret;
   4421 
   4422 	if (pipelined != obj->ring) {
   4423 		ret = i915_gem_object_sync(obj, pipelined);
   4424 		if (ret)
   4425 			return ret;
   4426 	}
   4427 
   4428 	/* Mark the pin_display early so that we account for the
   4429 	 * display coherency whilst setting up the cache domains.
   4430 	 */
   4431 	obj->pin_display = true;
   4432 
   4433 	/* The display engine is not coherent with the LLC cache on gen6.  As
   4434 	 * a result, we make sure that the pinning that is about to occur is
   4435 	 * done with uncached PTEs. This is lowest common denominator for all
   4436 	 * chipsets.
   4437 	 *
   4438 	 * However for gen6+, we could do better by using the GFDT bit instead
   4439 	 * of uncaching, which would allow us to flush all the LLC-cached data
   4440 	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
   4441 	 */
   4442 	ret = i915_gem_object_set_cache_level(obj,
   4443 					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
   4444 	if (ret)
   4445 		goto err_unpin_display;
   4446 
   4447 	/* As the user may map the buffer once pinned in the display plane
   4448 	 * (e.g. libkms for the bootup splash), we have to ensure that we
   4449 	 * always use map_and_fenceable for all scanout buffers.
   4450 	 */
   4451 	ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
   4452 	if (ret)
   4453 		goto err_unpin_display;
   4454 
   4455 	i915_gem_object_flush_cpu_write_domain(obj, true);
   4456 
   4457 	old_write_domain = obj->base.write_domain;
   4458 	old_read_domains = obj->base.read_domains;
   4459 
   4460 	/* It should now be out of any other write domains, and we can update
   4461 	 * the domain values for our changes.
   4462 	 */
   4463 	obj->base.write_domain = 0;
   4464 	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
   4465 
   4466 	trace_i915_gem_object_change_domain(obj,
   4467 					    old_read_domains,
   4468 					    old_write_domain);
   4469 
   4470 	return 0;
   4471 
   4472 err_unpin_display:
   4473 	obj->pin_display = is_pin_display(obj);
   4474 	return ret;
   4475 }
   4476 
   4477 void
   4478 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
   4479 {
   4480 	i915_gem_object_ggtt_unpin(obj);
   4481 	obj->pin_display = is_pin_display(obj);
   4482 }
   4483 
   4484 int
   4485 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
   4486 {
   4487 	int ret;
   4488 
   4489 	if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
   4490 		return 0;
   4491 
   4492 	ret = i915_gem_object_wait_rendering(obj, false);
   4493 	if (ret)
   4494 		return ret;
   4495 
   4496 	/* Ensure that we invalidate the GPU's caches and TLBs. */
   4497 	obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
   4498 	return 0;
   4499 }
   4500 
   4501 /**
   4502  * Moves a single object to the CPU read, and possibly write domain.
   4503  *
   4504  * This function returns when the move is complete, including waiting on
   4505  * flushes to occur.
   4506  */
   4507 int
   4508 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
   4509 {
   4510 	uint32_t old_write_domain, old_read_domains;
   4511 	int ret;
   4512 
   4513 	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
   4514 		return 0;
   4515 
   4516 	ret = i915_gem_object_wait_rendering(obj, !write);
   4517 	if (ret)
   4518 		return ret;
   4519 
   4520 	i915_gem_object_flush_gtt_write_domain(obj);
   4521 
   4522 	old_write_domain = obj->base.write_domain;
   4523 	old_read_domains = obj->base.read_domains;
   4524 
   4525 	/* Flush the CPU cache if it's still invalid. */
   4526 	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
   4527 		i915_gem_clflush_object(obj, false);
   4528 
   4529 		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
   4530 	}
   4531 
   4532 	/* It should now be out of any other write domains, and we can update
   4533 	 * the domain values for our changes.
   4534 	 */
   4535 	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
   4536 
   4537 	/* If we're writing through the CPU, then the GPU read domains will
   4538 	 * need to be invalidated at next use.
   4539 	 */
   4540 	if (write) {
   4541 		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
   4542 		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
   4543 	}
   4544 
   4545 	trace_i915_gem_object_change_domain(obj,
   4546 					    old_read_domains,
   4547 					    old_write_domain);
   4548 
   4549 	return 0;
   4550 }
   4551 
   4552 /* Throttle our rendering by waiting until the ring has completed our requests
   4553  * emitted over 20 msec ago.
   4554  *
   4555  * Note that if we were to use the current jiffies each time around the loop,
   4556  * we wouldn't escape the function with any frames outstanding if the time to
   4557  * render a frame was over 20ms.
   4558  *
   4559  * This should get us reasonable parallelism between CPU and GPU but also
   4560  * relatively low latency when blocking on a particular request to finish.
   4561  */
   4562 static int
   4563 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
   4564 {
   4565 	struct drm_i915_private *dev_priv = dev->dev_private;
   4566 	struct drm_i915_file_private *file_priv = file->driver_priv;
   4567 	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
   4568 	struct drm_i915_gem_request *request;
   4569 	struct intel_ring_buffer *ring = NULL;
   4570 	unsigned reset_counter;
   4571 	u32 seqno = 0;
   4572 	int ret;
   4573 
   4574 	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
   4575 	if (ret)
   4576 		return ret;
   4577 
   4578 	ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
   4579 	if (ret)
   4580 		return ret;
   4581 
   4582 	spin_lock(&file_priv->mm.lock);
   4583 	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
   4584 		if (time_after_eq(request->emitted_jiffies, recent_enough))
   4585 			break;
   4586 
   4587 		ring = request->ring;
   4588 		seqno = request->seqno;
   4589 	}
   4590 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
   4591 	spin_unlock(&file_priv->mm.lock);
   4592 
   4593 	if (seqno == 0)
   4594 		return 0;
   4595 
   4596 	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
   4597 	if (ret == 0)
   4598 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
   4599 
   4600 	return ret;
   4601 }
   4602 
   4603 static bool
   4604 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
   4605 {
   4606 	struct drm_i915_gem_object *obj = vma->obj;
   4607 
   4608 	if (alignment &&
   4609 	    vma->node.start & (alignment - 1))
   4610 		return true;
   4611 
   4612 	if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
   4613 		return true;
   4614 
   4615 	if (flags & PIN_OFFSET_BIAS &&
   4616 	    vma->node.start < (flags & PIN_OFFSET_MASK))
   4617 		return true;
   4618 
   4619 	return false;
   4620 }
   4621 
   4622 int
   4623 i915_gem_object_pin(struct drm_i915_gem_object *obj,
   4624 		    struct i915_address_space *vm,
   4625 		    uint32_t alignment,
   4626 		    uint64_t flags)
   4627 {
   4628 	struct i915_vma *vma;
   4629 	int ret;
   4630 
   4631 	if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
   4632 		return -EINVAL;
   4633 
   4634 	vma = i915_gem_obj_to_vma(obj, vm);
   4635 	if (vma) {
   4636 		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
   4637 			return -EBUSY;
   4638 
   4639 		if (i915_vma_misplaced(vma, alignment, flags)) {
   4640 			WARN(vma->pin_count,
   4641 			     "bo is already pinned with incorrect alignment:"
   4642 			     " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
   4643 			     " obj->map_and_fenceable=%d\n",
   4644 			     i915_gem_obj_offset(obj, vm), alignment,
   4645 			     !!(flags & PIN_MAPPABLE),
   4646 			     obj->map_and_fenceable);
   4647 			ret = i915_vma_unbind(vma);
   4648 			if (ret)
   4649 				return ret;
   4650 
   4651 			vma = NULL;
   4652 		}
   4653 	}
   4654 
   4655 	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
   4656 		vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
   4657 		if (IS_ERR(vma))
   4658 			return PTR_ERR(vma);
   4659 	}
   4660 
   4661 	if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
   4662 		vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
   4663 
   4664 	vma->pin_count++;
   4665 	if (flags & PIN_MAPPABLE)
   4666 		obj->pin_mappable |= true;
   4667 
   4668 	return 0;
   4669 }
   4670 
   4671 void
   4672 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
   4673 {
   4674 	struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
   4675 
   4676 	BUG_ON(!vma);
   4677 	BUG_ON(vma->pin_count == 0);
   4678 	BUG_ON(!i915_gem_obj_ggtt_bound(obj));
   4679 
   4680 	if (--vma->pin_count == 0)
   4681 		obj->pin_mappable = false;
   4682 }
   4683 
   4684 int
   4685 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
   4686 		   struct drm_file *file)
   4687 {
   4688 	struct drm_i915_gem_pin *args = data;
   4689 	struct drm_gem_object *gobj;
   4690 	struct drm_i915_gem_object *obj;
   4691 	int ret;
   4692 
   4693 	if (INTEL_INFO(dev)->gen >= 6)
   4694 		return -ENODEV;
   4695 
   4696 	ret = i915_mutex_lock_interruptible(dev);
   4697 	if (ret)
   4698 		return ret;
   4699 
   4700 	gobj = drm_gem_object_lookup(dev, file, args->handle);
   4701 	if (gobj == NULL) {
   4702 		ret = -ENOENT;
   4703 		goto unlock;
   4704 	}
   4705 	obj = to_intel_bo(gobj);
   4706 
   4707 	if (obj->madv != I915_MADV_WILLNEED) {
   4708 		DRM_DEBUG("Attempting to pin a purgeable buffer\n");
   4709 		ret = -EFAULT;
   4710 		goto out;
   4711 	}
   4712 
   4713 	if (obj->pin_filp != NULL && obj->pin_filp != file) {
   4714 		DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
   4715 			  args->handle);
   4716 		ret = -EINVAL;
   4717 		goto out;
   4718 	}
   4719 
   4720 	if (obj->user_pin_count == ULONG_MAX) {
   4721 		ret = -EBUSY;
   4722 		goto out;
   4723 	}
   4724 
   4725 	if (obj->user_pin_count == 0) {
   4726 		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
   4727 		if (ret)
   4728 			goto out;
   4729 	}
   4730 
   4731 	obj->user_pin_count++;
   4732 	obj->pin_filp = file;
   4733 
   4734 	args->offset = i915_gem_obj_ggtt_offset(obj);
   4735 out:
   4736 	drm_gem_object_unreference(&obj->base);
   4737 unlock:
   4738 	mutex_unlock(&dev->struct_mutex);
   4739 	return ret;
   4740 }
   4741 
   4742 int
   4743 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
   4744 		     struct drm_file *file)
   4745 {
   4746 	struct drm_i915_gem_pin *args = data;
   4747 	struct drm_gem_object *gobj;
   4748 	struct drm_i915_gem_object *obj;
   4749 	int ret;
   4750 
   4751 	ret = i915_mutex_lock_interruptible(dev);
   4752 	if (ret)
   4753 		return ret;
   4754 
   4755 	gobj = drm_gem_object_lookup(dev, file, args->handle);
   4756 	if (gobj == NULL) {
   4757 		ret = -ENOENT;
   4758 		goto unlock;
   4759 	}
   4760 	obj = to_intel_bo(gobj);
   4761 
   4762 	if (obj->pin_filp != file) {
   4763 		DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
   4764 			  args->handle);
   4765 		ret = -EINVAL;
   4766 		goto out;
   4767 	}
   4768 	obj->user_pin_count--;
   4769 	if (obj->user_pin_count == 0) {
   4770 		obj->pin_filp = NULL;
   4771 		i915_gem_object_ggtt_unpin(obj);
   4772 	}
   4773 
   4774 out:
   4775 	drm_gem_object_unreference(&obj->base);
   4776 unlock:
   4777 	mutex_unlock(&dev->struct_mutex);
   4778 	return ret;
   4779 }
   4780 
   4781 int
   4782 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
   4783 		    struct drm_file *file)
   4784 {
   4785 	struct drm_i915_gem_busy *args = data;
   4786 	struct drm_gem_object *gobj;
   4787 	struct drm_i915_gem_object *obj;
   4788 	int ret;
   4789 
   4790 	ret = i915_mutex_lock_interruptible(dev);
   4791 	if (ret)
   4792 		return ret;
   4793 
   4794 	gobj = drm_gem_object_lookup(dev, file, args->handle);
   4795 	if (gobj == NULL) {
   4796 		ret = -ENOENT;
   4797 		goto unlock;
   4798 	}
   4799 	obj = to_intel_bo(gobj);
   4800 
   4801 	/* Count all active objects as busy, even if they are currently not used
   4802 	 * by the gpu. Users of this interface expect objects to eventually
   4803 	 * become non-busy without any further actions, therefore emit any
   4804 	 * necessary flushes here.
   4805 	 */
   4806 	ret = i915_gem_object_flush_active(obj);
   4807 
   4808 	args->busy = obj->active;
   4809 	if (obj->ring) {
   4810 		BUILD_BUG_ON(I915_NUM_RINGS > 16);
   4811 		args->busy |= intel_ring_flag(obj->ring) << 16;
   4812 	}
   4813 
   4814 	drm_gem_object_unreference(&obj->base);
   4815 unlock:
   4816 	mutex_unlock(&dev->struct_mutex);
   4817 	return ret;
   4818 }
   4819 
   4820 int
   4821 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
   4822 			struct drm_file *file_priv)
   4823 {
   4824 	return i915_gem_ring_throttle(dev, file_priv);
   4825 }
   4826 
   4827 int
   4828 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
   4829 		       struct drm_file *file_priv)
   4830 {
   4831 	struct drm_i915_gem_madvise *args = data;
   4832 	struct drm_gem_object *gobj;
   4833 	struct drm_i915_gem_object *obj;
   4834 	int ret;
   4835 
   4836 	switch (args->madv) {
   4837 	case I915_MADV_DONTNEED:
   4838 	case I915_MADV_WILLNEED:
   4839 	    break;
   4840 	default:
   4841 	    return -EINVAL;
   4842 	}
   4843 
   4844 	ret = i915_mutex_lock_interruptible(dev);
   4845 	if (ret)
   4846 		return ret;
   4847 
   4848 	gobj = drm_gem_object_lookup(dev, file_priv, args->handle);
   4849 	if (gobj == NULL) {
   4850 		ret = -ENOENT;
   4851 		goto unlock;
   4852 	}
   4853 	obj = to_intel_bo(gobj);
   4854 
   4855 	if (i915_gem_obj_is_pinned(obj)) {
   4856 		ret = -EINVAL;
   4857 		goto out;
   4858 	}
   4859 
   4860 	if (obj->madv != __I915_MADV_PURGED)
   4861 		obj->madv = args->madv;
   4862 
   4863 	/* if the object is no longer attached, discard its backing storage */
   4864 	if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
   4865 		i915_gem_object_truncate(obj);
   4866 
   4867 	args->retained = obj->madv != __I915_MADV_PURGED;
   4868 
   4869 out:
   4870 	drm_gem_object_unreference(&obj->base);
   4871 unlock:
   4872 	mutex_unlock(&dev->struct_mutex);
   4873 	return ret;
   4874 }
   4875 
   4876 void i915_gem_object_init(struct drm_i915_gem_object *obj,
   4877 			  const struct drm_i915_gem_object_ops *ops)
   4878 {
   4879 	INIT_LIST_HEAD(&obj->global_list);
   4880 	INIT_LIST_HEAD(&obj->ring_list);
   4881 	INIT_LIST_HEAD(&obj->obj_exec_link);
   4882 	INIT_LIST_HEAD(&obj->vma_list);
   4883 
   4884 	obj->ops = ops;
   4885 
   4886 	obj->fence_reg = I915_FENCE_REG_NONE;
   4887 	obj->madv = I915_MADV_WILLNEED;
   4888 	/* Avoid an unnecessary call to unbind on the first bind. */
   4889 	obj->map_and_fenceable = true;
   4890 
   4891 	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
   4892 }
   4893 
   4894 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
   4895 	.get_pages = i915_gem_object_get_pages_gtt,
   4896 	.put_pages = i915_gem_object_put_pages_gtt,
   4897 };
   4898 
   4899 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
   4900 						  size_t size)
   4901 {
   4902 #ifdef __NetBSD__
   4903 	struct drm_i915_private *const dev_priv = dev->dev_private;
   4904 #endif
   4905 	struct drm_i915_gem_object *obj;
   4906 #ifndef __NetBSD__
   4907 	struct address_space *mapping;
   4908 	gfp_t mask;
   4909 #endif
   4910 
   4911 	obj = i915_gem_object_alloc(dev);
   4912 	if (obj == NULL)
   4913 		return NULL;
   4914 
   4915 	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
   4916 		i915_gem_object_free(obj);
   4917 		return NULL;
   4918 	}
   4919 
   4920 #ifdef __NetBSD__
   4921 	uao_set_pgfl(obj->base.gemo_shm_uao, dev_priv->gtt.pgfl);
   4922 #else
   4923 	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
   4924 	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
   4925 		/* 965gm cannot relocate objects above 4GiB. */
   4926 		mask &= ~__GFP_HIGHMEM;
   4927 		mask |= __GFP_DMA32;
   4928 	}
   4929 
   4930 	mapping = file_inode(obj->base.filp)->i_mapping;
   4931 	mapping_set_gfp_mask(mapping, mask);
   4932 #endif
   4933 
   4934 	i915_gem_object_init(obj, &i915_gem_object_ops);
   4935 
   4936 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
   4937 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
   4938 
   4939 	if (HAS_LLC(dev)) {
   4940 		/* On some devices, we can have the GPU use the LLC (the CPU
   4941 		 * cache) for about a 10% performance improvement
   4942 		 * compared to uncached.  Graphics requests other than
   4943 		 * display scanout are coherent with the CPU in
   4944 		 * accessing this cache.  This means in this mode we
   4945 		 * don't need to clflush on the CPU side, and on the
   4946 		 * GPU side we only need to flush internal caches to
   4947 		 * get data visible to the CPU.
   4948 		 *
   4949 		 * However, we maintain the display planes as UC, and so
   4950 		 * need to rebind when first used as such.
   4951 		 */
   4952 		obj->cache_level = I915_CACHE_LLC;
   4953 	} else
   4954 		obj->cache_level = I915_CACHE_NONE;
   4955 
   4956 	trace_i915_gem_object_create(obj);
   4957 
   4958 	return obj;
   4959 }
   4960 
   4961 void i915_gem_free_object(struct drm_gem_object *gem_obj)
   4962 {
   4963 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
   4964 	struct drm_device *dev = obj->base.dev;
   4965 	struct drm_i915_private *dev_priv = dev->dev_private;
   4966 	struct i915_vma *vma, *next;
   4967 
   4968 	intel_runtime_pm_get(dev_priv);
   4969 
   4970 	trace_i915_gem_object_destroy(obj);
   4971 
   4972 	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
   4973 		int ret;
   4974 
   4975 		vma->pin_count = 0;
   4976 		ret = i915_vma_unbind(vma);
   4977 		if (WARN_ON(ret == -ERESTARTSYS)) {
   4978 			bool was_interruptible;
   4979 
   4980 			was_interruptible = dev_priv->mm.interruptible;
   4981 			dev_priv->mm.interruptible = false;
   4982 
   4983 			WARN_ON(i915_vma_unbind(vma));
   4984 
   4985 			dev_priv->mm.interruptible = was_interruptible;
   4986 		}
   4987 	}
   4988 
   4989 	i915_gem_object_detach_phys(obj);
   4990 
   4991 	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
   4992 	 * before progressing. */
   4993 	if (obj->stolen)
   4994 		i915_gem_object_unpin_pages(obj);
   4995 
   4996 	if (WARN_ON(obj->pages_pin_count))
   4997 		obj->pages_pin_count = 0;
   4998 	i915_gem_object_put_pages(obj);
   4999 	i915_gem_object_free_mmap_offset(obj);
   5000 	i915_gem_object_release_stolen(obj);
   5001 
   5002 	BUG_ON(obj->pages);
   5003 
   5004 #ifndef __NetBSD__		/* XXX drm prime */
   5005 	if (obj->base.import_attach)
   5006 		drm_prime_gem_destroy(&obj->base, NULL);
   5007 #endif
   5008 
   5009 	drm_gem_object_release(&obj->base);
   5010 	i915_gem_info_remove_obj(dev_priv, obj->base.size);
   5011 
   5012 	kfree(obj->bit_17);
   5013 	i915_gem_object_free(obj);
   5014 
   5015 	intel_runtime_pm_put(dev_priv);
   5016 }
   5017 
   5018 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
   5019 				     struct i915_address_space *vm)
   5020 {
   5021 	struct i915_vma *vma;
   5022 	list_for_each_entry(vma, &obj->vma_list, vma_link)
   5023 		if (vma->vm == vm)
   5024 			return vma;
   5025 
   5026 	return NULL;
   5027 }
   5028 
   5029 void i915_gem_vma_destroy(struct i915_vma *vma)
   5030 {
   5031 	WARN_ON(vma->node.allocated);
   5032 
   5033 	/* Keep the vma as a placeholder in the execbuffer reservation lists */
   5034 	if (!list_empty(&vma->exec_list))
   5035 		return;
   5036 
   5037 	list_del(&vma->vma_link);
   5038 
   5039 	kfree(vma);
   5040 }
   5041 
   5042 int
   5043 i915_gem_suspend(struct drm_device *dev)
   5044 {
   5045 	struct drm_i915_private *dev_priv = dev->dev_private;
   5046 	int ret = 0;
   5047 
   5048 	mutex_lock(&dev->struct_mutex);
   5049 	if (dev_priv->ums.mm_suspended)
   5050 		goto err;
   5051 
   5052 	ret = i915_gpu_idle(dev);
   5053 	if (ret)
   5054 		goto err;
   5055 
   5056 	i915_gem_retire_requests(dev);
   5057 
   5058 	/* Under UMS, be paranoid and evict. */
   5059 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
   5060 		i915_gem_evict_everything(dev);
   5061 
   5062 	i915_kernel_lost_context(dev);
   5063 	i915_gem_cleanup_ringbuffer(dev);
   5064 
   5065 	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
   5066 	 * We need to replace this with a semaphore, or something.
   5067 	 * And not confound ums.mm_suspended!
   5068 	 */
   5069 	dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
   5070 							     DRIVER_MODESET);
   5071 	mutex_unlock(&dev->struct_mutex);
   5072 
   5073 	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
   5074 	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
   5075 	cancel_delayed_work_sync(&dev_priv->mm.idle_work);
   5076 
   5077 	return 0;
   5078 
   5079 err:
   5080 	mutex_unlock(&dev->struct_mutex);
   5081 	return ret;
   5082 }
   5083 
   5084 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
   5085 {
   5086 	struct drm_device *dev = ring->dev;
   5087 	struct drm_i915_private *dev_priv = dev->dev_private;
   5088 	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
   5089 	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
   5090 	int i, ret;
   5091 
   5092 	if (!HAS_L3_DPF(dev) || !remap_info)
   5093 		return 0;
   5094 
   5095 	ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
   5096 	if (ret)
   5097 		return ret;
   5098 
   5099 	/*
   5100 	 * Note: We do not worry about the concurrent register cacheline hang
   5101 	 * here because no other code should access these registers other than
   5102 	 * at initialization time.
   5103 	 */
   5104 	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
   5105 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
   5106 		intel_ring_emit(ring, reg_base + i);
   5107 		intel_ring_emit(ring, remap_info[i/4]);
   5108 	}
   5109 
   5110 	intel_ring_advance(ring);
   5111 
   5112 	return ret;
   5113 }
   5114 
   5115 void i915_gem_init_swizzling(struct drm_device *dev)
   5116 {
   5117 	struct drm_i915_private *dev_priv = dev->dev_private;
   5118 
   5119 	if (INTEL_INFO(dev)->gen < 5 ||
   5120 	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
   5121 		return;
   5122 
   5123 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
   5124 				 DISP_TILE_SURFACE_SWIZZLING);
   5125 
   5126 	if (IS_GEN5(dev))
   5127 		return;
   5128 
   5129 	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
   5130 	if (IS_GEN6(dev))
   5131 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
   5132 	else if (IS_GEN7(dev))
   5133 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
   5134 	else if (IS_GEN8(dev))
   5135 		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
   5136 	else
   5137 		BUG();
   5138 }
   5139 
   5140 static bool
   5141 intel_enable_blt(struct drm_device *dev)
   5142 {
   5143 	if (!HAS_BLT(dev))
   5144 		return false;
   5145 
   5146 	/* The blitter was dysfunctional on early prototypes */
   5147 	if (IS_GEN6(dev) && dev->pdev->revision < 8) {
   5148 		DRM_INFO("BLT not supported on this pre-production hardware;"
   5149 			 " graphics performance will be degraded.\n");
   5150 		return false;
   5151 	}
   5152 
   5153 	return true;
   5154 }
   5155 
   5156 static int i915_gem_init_rings(struct drm_device *dev)
   5157 {
   5158 	struct drm_i915_private *dev_priv = dev->dev_private;
   5159 	int ret;
   5160 
   5161 	ret = intel_init_render_ring_buffer(dev);
   5162 	if (ret)
   5163 		return ret;
   5164 
   5165 	if (HAS_BSD(dev)) {
   5166 		ret = intel_init_bsd_ring_buffer(dev);
   5167 		if (ret)
   5168 			goto cleanup_render_ring;
   5169 	}
   5170 
   5171 	if (intel_enable_blt(dev)) {
   5172 		ret = intel_init_blt_ring_buffer(dev);
   5173 		if (ret)
   5174 			goto cleanup_bsd_ring;
   5175 	}
   5176 
   5177 	if (HAS_VEBOX(dev)) {
   5178 		ret = intel_init_vebox_ring_buffer(dev);
   5179 		if (ret)
   5180 			goto cleanup_blt_ring;
   5181 	}
   5182 
   5183 
   5184 	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
   5185 	if (ret)
   5186 		goto cleanup_vebox_ring;
   5187 
   5188 	return 0;
   5189 
   5190 cleanup_vebox_ring:
   5191 	intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
   5192 cleanup_blt_ring:
   5193 	intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
   5194 cleanup_bsd_ring:
   5195 	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
   5196 cleanup_render_ring:
   5197 	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
   5198 
   5199 	return ret;
   5200 }
   5201 
   5202 int
   5203 i915_gem_init_hw(struct drm_device *dev)
   5204 {
   5205 	struct drm_i915_private *dev_priv = dev->dev_private;
   5206 	int ret, i;
   5207 
   5208 	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
   5209 		return -EIO;
   5210 
   5211 	if (dev_priv->ellc_size)
   5212 		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
   5213 
   5214 	if (IS_HASWELL(dev))
   5215 		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
   5216 			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
   5217 
   5218 	if (HAS_PCH_NOP(dev)) {
   5219 		if (IS_IVYBRIDGE(dev)) {
   5220 			u32 temp = I915_READ(GEN7_MSG_CTL);
   5221 			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
   5222 			I915_WRITE(GEN7_MSG_CTL, temp);
   5223 		} else if (INTEL_INFO(dev)->gen >= 7) {
   5224 			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
   5225 			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
   5226 			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
   5227 		}
   5228 	}
   5229 
   5230 	i915_gem_init_swizzling(dev);
   5231 
   5232 	ret = i915_gem_init_rings(dev);
   5233 	if (ret)
   5234 		return ret;
   5235 
   5236 	for (i = 0; i < NUM_L3_SLICES(dev); i++)
   5237 		i915_gem_l3_remap(&dev_priv->ring[RCS], i);
   5238 
   5239 	/*
   5240 	 * XXX: Contexts should only be initialized once. Doing a switch to the
   5241 	 * default context switch however is something we'd like to do after
   5242 	 * reset or thaw (the latter may not actually be necessary for HW, but
   5243 	 * goes with our code better). Context switching requires rings (for
   5244 	 * the do_switch), but before enabling PPGTT. So don't move this.
   5245 	 */
   5246 	ret = i915_gem_context_enable(dev_priv);
   5247 	if (ret) {
   5248 		DRM_ERROR("Context enable failed %d\n", ret);
   5249 		goto err_out;
   5250 	}
   5251 
   5252 	return 0;
   5253 
   5254 err_out:
   5255 	i915_gem_cleanup_ringbuffer(dev);
   5256 	return ret;
   5257 }
   5258 
   5259 int i915_gem_init(struct drm_device *dev)
   5260 {
   5261 	struct drm_i915_private *dev_priv = dev->dev_private;
   5262 	int ret;
   5263 
   5264 	mutex_lock(&dev->struct_mutex);
   5265 
   5266 	if (IS_VALLEYVIEW(dev)) {
   5267 		/* VLVA0 (potential hack), BIOS isn't actually waking us */
   5268 		I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
   5269 		if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
   5270 			DRM_DEBUG_DRIVER("allow wake ack timed out\n");
   5271 	}
   5272 	i915_gem_init_global_gtt(dev);
   5273 
   5274 	ret = i915_gem_context_init(dev);
   5275 	if (ret) {
   5276 		mutex_unlock(&dev->struct_mutex);
   5277 		return ret;
   5278 	}
   5279 
   5280 	ret = i915_gem_init_hw(dev);
   5281 	mutex_unlock(&dev->struct_mutex);
   5282 	if (ret) {
   5283 		WARN_ON(dev_priv->mm.aliasing_ppgtt);
   5284 		i915_gem_context_fini(dev);
   5285 		drm_mm_takedown(&dev_priv->gtt.base.mm);
   5286 		return ret;
   5287 	}
   5288 
   5289 	/* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
   5290 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
   5291 		dev_priv->dri1.allow_batchbuffer = 1;
   5292 	return 0;
   5293 }
   5294 
   5295 void
   5296 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
   5297 {
   5298 	struct drm_i915_private *dev_priv = dev->dev_private;
   5299 	struct intel_ring_buffer *ring;
   5300 	int i;
   5301 
   5302 	for_each_ring(ring, dev_priv, i)
   5303 		intel_cleanup_ring_buffer(ring);
   5304 }
   5305 
   5306 int
   5307 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
   5308 		       struct drm_file *file_priv)
   5309 {
   5310 	struct drm_i915_private *dev_priv = dev->dev_private;
   5311 	int ret;
   5312 
   5313 	if (drm_core_check_feature(dev, DRIVER_MODESET))
   5314 		return 0;
   5315 
   5316 	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
   5317 		DRM_ERROR("Reenabling wedged hardware, good luck\n");
   5318 		atomic_set(&dev_priv->gpu_error.reset_counter, 0);
   5319 	}
   5320 
   5321 	mutex_lock(&dev->struct_mutex);
   5322 	dev_priv->ums.mm_suspended = 0;
   5323 
   5324 	ret = i915_gem_init_hw(dev);
   5325 	if (ret != 0) {
   5326 		mutex_unlock(&dev->struct_mutex);
   5327 		return ret;
   5328 	}
   5329 
   5330 	BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
   5331 	mutex_unlock(&dev->struct_mutex);
   5332 
   5333 	ret = drm_irq_install(dev);
   5334 	if (ret)
   5335 		goto cleanup_ringbuffer;
   5336 
   5337 	return 0;
   5338 
   5339 cleanup_ringbuffer:
   5340 	mutex_lock(&dev->struct_mutex);
   5341 	i915_gem_cleanup_ringbuffer(dev);
   5342 	dev_priv->ums.mm_suspended = 1;
   5343 	mutex_unlock(&dev->struct_mutex);
   5344 
   5345 	return ret;
   5346 }
   5347 
   5348 int
   5349 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
   5350 		       struct drm_file *file_priv)
   5351 {
   5352 	if (drm_core_check_feature(dev, DRIVER_MODESET))
   5353 		return 0;
   5354 
   5355 	drm_irq_uninstall(dev);
   5356 
   5357 	return i915_gem_suspend(dev);
   5358 }
   5359 
   5360 void
   5361 i915_gem_lastclose(struct drm_device *dev)
   5362 {
   5363 	int ret;
   5364 
   5365 	if (drm_core_check_feature(dev, DRIVER_MODESET))
   5366 		return;
   5367 
   5368 	ret = i915_gem_suspend(dev);
   5369 	if (ret)
   5370 		DRM_ERROR("failed to idle hardware: %d\n", ret);
   5371 }
   5372 
   5373 static void
   5374 init_ring_lists(struct intel_ring_buffer *ring)
   5375 {
   5376 	INIT_LIST_HEAD(&ring->active_list);
   5377 	INIT_LIST_HEAD(&ring->request_list);
   5378 }
   5379 
   5380 void i915_init_vm(struct drm_i915_private *dev_priv,
   5381 		  struct i915_address_space *vm)
   5382 {
   5383 	if (!i915_is_ggtt(vm))
   5384 		drm_mm_init(&vm->mm, vm->start, vm->total);
   5385 	vm->dev = dev_priv->dev;
   5386 	INIT_LIST_HEAD(&vm->active_list);
   5387 	INIT_LIST_HEAD(&vm->inactive_list);
   5388 	INIT_LIST_HEAD(&vm->global_link);
   5389 	list_add_tail(&vm->global_link, &dev_priv->vm_list);
   5390 }
   5391 
   5392 void
   5393 i915_gem_load(struct drm_device *dev)
   5394 {
   5395 	struct drm_i915_private *dev_priv = dev->dev_private;
   5396 	int i;
   5397 
   5398 	dev_priv->slab =
   5399 		kmem_cache_create("i915_gem_object",
   5400 				  sizeof(struct drm_i915_gem_object), 0,
   5401 				  SLAB_HWCACHE_ALIGN,
   5402 				  NULL);
   5403 
   5404 	INIT_LIST_HEAD(&dev_priv->vm_list);
   5405 	i915_init_vm(dev_priv, &dev_priv->gtt.base);
   5406 
   5407 	INIT_LIST_HEAD(&dev_priv->context_list);
   5408 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
   5409 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
   5410 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
   5411 	for (i = 0; i < I915_NUM_RINGS; i++)
   5412 		init_ring_lists(&dev_priv->ring[i]);
   5413 	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
   5414 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
   5415 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
   5416 			  i915_gem_retire_work_handler);
   5417 	INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
   5418 			  i915_gem_idle_work_handler);
   5419 #ifdef __NetBSD__
   5420 	spin_lock_init(&dev_priv->gpu_error.reset_lock);
   5421 	DRM_INIT_WAITQUEUE(&dev_priv->gpu_error.reset_queue, "i915errst");
   5422 #else
   5423 	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
   5424 #endif
   5425 
   5426 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
   5427 	if (IS_GEN3(dev)) {
   5428 		I915_WRITE(MI_ARB_STATE,
   5429 			   _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
   5430 	}
   5431 
   5432 	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
   5433 
   5434 	/* Old X drivers will take 0-2 for front, back, depth buffers */
   5435 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
   5436 		dev_priv->fence_reg_start = 3;
   5437 
   5438 	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
   5439 		dev_priv->num_fence_regs = 32;
   5440 	else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
   5441 		dev_priv->num_fence_regs = 16;
   5442 	else
   5443 		dev_priv->num_fence_regs = 8;
   5444 
   5445 	/* Initialize fence registers to zero */
   5446 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
   5447 	i915_gem_restore_fences(dev);
   5448 
   5449 	i915_gem_detect_bit_6_swizzle(dev);
   5450 #ifdef __NetBSD__
   5451 	DRM_INIT_WAITQUEUE(&dev_priv->pending_flip_queue, "i915flip");
   5452 	spin_lock_init(&dev_priv->pending_flip_lock);
   5453 #else
   5454 	init_waitqueue_head(&dev_priv->pending_flip_queue);
   5455 #endif
   5456 
   5457 	dev_priv->mm.interruptible = true;
   5458 
   5459 	dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
   5460 	dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
   5461 	dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
   5462 	register_shrinker(&dev_priv->mm.inactive_shrinker);
   5463 }
   5464 
   5465 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
   5466 {
   5467 	struct drm_i915_file_private *file_priv = file->driver_priv;
   5468 
   5469 	cancel_delayed_work_sync(&file_priv->mm.idle_work);
   5470 
   5471 	/* Clean up our request list when the client is going away, so that
   5472 	 * later retire_requests won't dereference our soon-to-be-gone
   5473 	 * file_priv.
   5474 	 */
   5475 	spin_lock(&file_priv->mm.lock);
   5476 	while (!list_empty(&file_priv->mm.request_list)) {
   5477 		struct drm_i915_gem_request *request;
   5478 
   5479 		request = list_first_entry(&file_priv->mm.request_list,
   5480 					   struct drm_i915_gem_request,
   5481 					   client_list);
   5482 		list_del(&request->client_list);
   5483 		request->file_priv = NULL;
   5484 	}
   5485 	spin_unlock(&file_priv->mm.lock);
   5486 }
   5487 
   5488 static void
   5489 i915_gem_file_idle_work_handler(struct work_struct *work)
   5490 {
   5491 	struct drm_i915_file_private *file_priv =
   5492 		container_of(work, typeof(*file_priv), mm.idle_work.work);
   5493 
   5494 	atomic_set(&file_priv->rps_wait_boost, false);
   5495 }
   5496 
   5497 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
   5498 {
   5499 	struct drm_i915_file_private *file_priv;
   5500 	int ret;
   5501 
   5502 	DRM_DEBUG_DRIVER("\n");
   5503 
   5504 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
   5505 	if (!file_priv)
   5506 		return -ENOMEM;
   5507 
   5508 	file->driver_priv = file_priv;
   5509 	file_priv->dev_priv = dev->dev_private;
   5510 	file_priv->file = file;
   5511 
   5512 	spin_lock_init(&file_priv->mm.lock);
   5513 	INIT_LIST_HEAD(&file_priv->mm.request_list);
   5514 	INIT_DELAYED_WORK(&file_priv->mm.idle_work,
   5515 			  i915_gem_file_idle_work_handler);
   5516 
   5517 	ret = i915_gem_context_open(dev, file);
   5518 	if (ret)
   5519 		kfree(file_priv);
   5520 
   5521 	return ret;
   5522 }
   5523 
   5524 #ifndef __NetBSD__
   5525 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
   5526 {
   5527 	if (!mutex_is_locked(mutex))
   5528 		return false;
   5529 
   5530 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
   5531 	return mutex->owner == task;
   5532 #else
   5533 	/* Since UP may be pre-empted, we cannot assume that we own the lock */
   5534 	return false;
   5535 #endif
   5536 }
   5537 #endif
   5538 
   5539 static unsigned long
   5540 i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
   5541 {
   5542 #ifdef __NetBSD__		/* XXX shrinkers */
   5543 	return 0;
   5544 #else
   5545 	struct drm_i915_private *dev_priv =
   5546 		container_of(shrinker,
   5547 			     struct drm_i915_private,
   5548 			     mm.inactive_shrinker);
   5549 	struct drm_device *dev = dev_priv->dev;
   5550 	struct drm_i915_gem_object *obj;
   5551 	bool unlock = true;
   5552 	unsigned long count;
   5553 
   5554 	if (!mutex_trylock(&dev->struct_mutex)) {
   5555 		if (!mutex_is_locked_by(&dev->struct_mutex, current))
   5556 			return 0;
   5557 
   5558 		if (dev_priv->mm.shrinker_no_lock_stealing)
   5559 			return 0;
   5560 
   5561 		unlock = false;
   5562 	}
   5563 
   5564 	count = 0;
   5565 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
   5566 		if (obj->pages_pin_count == 0)
   5567 			count += obj->base.size >> PAGE_SHIFT;
   5568 
   5569 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
   5570 		if (obj->active)
   5571 			continue;
   5572 
   5573 		if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
   5574 			count += obj->base.size >> PAGE_SHIFT;
   5575 	}
   5576 
   5577 	if (unlock)
   5578 		mutex_unlock(&dev->struct_mutex);
   5579 
   5580 	return count;
   5581 #endif
   5582 }
   5583 
   5584 /* All the new VM stuff */
   5585 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
   5586 				  struct i915_address_space *vm)
   5587 {
   5588 	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
   5589 	struct i915_vma *vma;
   5590 
   5591 	if (!dev_priv->mm.aliasing_ppgtt ||
   5592 	    vm == &dev_priv->mm.aliasing_ppgtt->base)
   5593 		vm = &dev_priv->gtt.base;
   5594 
   5595 	BUG_ON(list_empty(&o->vma_list));
   5596 	list_for_each_entry(vma, &o->vma_list, vma_link) {
   5597 		if (vma->vm == vm)
   5598 			return vma->node.start;
   5599 
   5600 	}
   5601 	return -1;
   5602 }
   5603 
   5604 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
   5605 			struct i915_address_space *vm)
   5606 {
   5607 	struct i915_vma *vma;
   5608 
   5609 	list_for_each_entry(vma, &o->vma_list, vma_link)
   5610 		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
   5611 			return true;
   5612 
   5613 	return false;
   5614 }
   5615 
   5616 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
   5617 {
   5618 	struct i915_vma *vma;
   5619 
   5620 	list_for_each_entry(vma, &o->vma_list, vma_link)
   5621 		if (drm_mm_node_allocated(&vma->node))
   5622 			return true;
   5623 
   5624 	return false;
   5625 }
   5626 
   5627 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
   5628 				struct i915_address_space *vm)
   5629 {
   5630 	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
   5631 	struct i915_vma *vma;
   5632 
   5633 	if (!dev_priv->mm.aliasing_ppgtt ||
   5634 	    vm == &dev_priv->mm.aliasing_ppgtt->base)
   5635 		vm = &dev_priv->gtt.base;
   5636 
   5637 	BUG_ON(list_empty(&o->vma_list));
   5638 
   5639 	list_for_each_entry(vma, &o->vma_list, vma_link)
   5640 		if (vma->vm == vm)
   5641 			return vma->node.size;
   5642 
   5643 	return 0;
   5644 }
   5645 
   5646 static unsigned long
   5647 i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
   5648 {
   5649 #ifdef __NetBSD__		/* XXX shrinkers */
   5650 	return 0;
   5651 #else
   5652 	struct drm_i915_private *dev_priv =
   5653 		container_of(shrinker,
   5654 			     struct drm_i915_private,
   5655 			     mm.inactive_shrinker);
   5656 	struct drm_device *dev = dev_priv->dev;
   5657 	unsigned long freed;
   5658 	bool unlock = true;
   5659 
   5660 	if (!mutex_trylock(&dev->struct_mutex)) {
   5661 		if (!mutex_is_locked_by(&dev->struct_mutex, current))
   5662 			return SHRINK_STOP;
   5663 
   5664 		if (dev_priv->mm.shrinker_no_lock_stealing)
   5665 			return SHRINK_STOP;
   5666 
   5667 		unlock = false;
   5668 	}
   5669 
   5670 	freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
   5671 	if (freed < sc->nr_to_scan)
   5672 		freed += __i915_gem_shrink(dev_priv,
   5673 					   sc->nr_to_scan - freed,
   5674 					   false);
   5675 	if (freed < sc->nr_to_scan)
   5676 		freed += i915_gem_shrink_all(dev_priv);
   5677 
   5678 	if (unlock)
   5679 		mutex_unlock(&dev->struct_mutex);
   5680 
   5681 	return freed;
   5682 #endif
   5683 }
   5684 
   5685 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
   5686 {
   5687 	struct i915_vma *vma;
   5688 
   5689 	if (WARN_ON(list_empty(&obj->vma_list)))
   5690 		return NULL;
   5691 
   5692 	vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
   5693 	if (vma->vm != obj_to_ggtt(obj))
   5694 		return NULL;
   5695 
   5696 	return vma;
   5697 }
   5698