Home | History | Annotate | Line # | Download | only in i915
i915_gem.c revision 1.1
      1 /*
      2  * Copyright  2008 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  *
     23  * Authors:
     24  *    Eric Anholt <eric (at) anholt.net>
     25  *
     26  */
     27 
     28 #include <drm/drmP.h>
     29 #include <drm/i915_drm.h>
     30 #include "i915_drv.h"
     31 #include "i915_trace.h"
     32 #include "intel_drv.h"
     33 #include <linux/shmem_fs.h>
     34 #include <linux/slab.h>
     35 #include <linux/swap.h>
     36 #include <linux/pci.h>
     37 #include <linux/dma-buf.h>
     38 
     39 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
     40 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
     41 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
     42 						    unsigned alignment,
     43 						    bool map_and_fenceable,
     44 						    bool nonblocking);
     45 static int i915_gem_phys_pwrite(struct drm_device *dev,
     46 				struct drm_i915_gem_object *obj,
     47 				struct drm_i915_gem_pwrite *args,
     48 				struct drm_file *file);
     49 
     50 static void i915_gem_write_fence(struct drm_device *dev, int reg,
     51 				 struct drm_i915_gem_object *obj);
     52 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
     53 					 struct drm_i915_fence_reg *fence,
     54 					 bool enable);
     55 
     56 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
     57 				    struct shrink_control *sc);
     58 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
     59 static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
     60 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
     61 
     62 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
     63 {
     64 	if (obj->tiling_mode)
     65 		i915_gem_release_mmap(obj);
     66 
     67 	/* As we do not have an associated fence register, we will force
     68 	 * a tiling change if we ever need to acquire one.
     69 	 */
     70 	obj->fence_dirty = false;
     71 	obj->fence_reg = I915_FENCE_REG_NONE;
     72 }
     73 
     74 /* some bookkeeping */
     75 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
     76 				  size_t size)
     77 {
     78 	dev_priv->mm.object_count++;
     79 	dev_priv->mm.object_memory += size;
     80 }
     81 
     82 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
     83 				     size_t size)
     84 {
     85 	dev_priv->mm.object_count--;
     86 	dev_priv->mm.object_memory -= size;
     87 }
     88 
     89 static int
     90 i915_gem_wait_for_error(struct drm_device *dev)
     91 {
     92 	struct drm_i915_private *dev_priv = dev->dev_private;
     93 	struct completion *x = &dev_priv->error_completion;
     94 	unsigned long flags;
     95 	int ret;
     96 
     97 	if (!atomic_read(&dev_priv->mm.wedged))
     98 		return 0;
     99 
    100 	/*
    101 	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
    102 	 * userspace. If it takes that long something really bad is going on and
    103 	 * we should simply try to bail out and fail as gracefully as possible.
    104 	 */
    105 	ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
    106 	if (ret == 0) {
    107 		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
    108 		return -EIO;
    109 	} else if (ret < 0) {
    110 		return ret;
    111 	}
    112 
    113 	if (atomic_read(&dev_priv->mm.wedged)) {
    114 		/* GPU is hung, bump the completion count to account for
    115 		 * the token we just consumed so that we never hit zero and
    116 		 * end up waiting upon a subsequent completion event that
    117 		 * will never happen.
    118 		 */
    119 		spin_lock_irqsave(&x->wait.lock, flags);
    120 		x->done++;
    121 		spin_unlock_irqrestore(&x->wait.lock, flags);
    122 	}
    123 	return 0;
    124 }
    125 
    126 int i915_mutex_lock_interruptible(struct drm_device *dev)
    127 {
    128 	int ret;
    129 
    130 	ret = i915_gem_wait_for_error(dev);
    131 	if (ret)
    132 		return ret;
    133 
    134 	ret = mutex_lock_interruptible(&dev->struct_mutex);
    135 	if (ret)
    136 		return ret;
    137 
    138 	WARN_ON(i915_verify_lists(dev));
    139 	return 0;
    140 }
    141 
    142 static inline bool
    143 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
    144 {
    145 	return obj->gtt_space && !obj->active;
    146 }
    147 
    148 int
    149 i915_gem_init_ioctl(struct drm_device *dev, void *data,
    150 		    struct drm_file *file)
    151 {
    152 	struct drm_i915_gem_init *args = data;
    153 
    154 	if (drm_core_check_feature(dev, DRIVER_MODESET))
    155 		return -ENODEV;
    156 
    157 	if (args->gtt_start >= args->gtt_end ||
    158 	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
    159 		return -EINVAL;
    160 
    161 	/* GEM with user mode setting was never supported on ilk and later. */
    162 	if (INTEL_INFO(dev)->gen >= 5)
    163 		return -ENODEV;
    164 
    165 	mutex_lock(&dev->struct_mutex);
    166 	i915_gem_init_global_gtt(dev, args->gtt_start,
    167 				 args->gtt_end, args->gtt_end);
    168 	mutex_unlock(&dev->struct_mutex);
    169 
    170 	return 0;
    171 }
    172 
    173 int
    174 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
    175 			    struct drm_file *file)
    176 {
    177 	struct drm_i915_private *dev_priv = dev->dev_private;
    178 	struct drm_i915_gem_get_aperture *args = data;
    179 	struct drm_i915_gem_object *obj;
    180 	size_t pinned;
    181 
    182 	pinned = 0;
    183 	mutex_lock(&dev->struct_mutex);
    184 	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
    185 		if (obj->pin_count)
    186 			pinned += obj->gtt_space->size;
    187 	mutex_unlock(&dev->struct_mutex);
    188 
    189 	args->aper_size = dev_priv->mm.gtt_total;
    190 	args->aper_available_size = args->aper_size - pinned;
    191 
    192 	return 0;
    193 }
    194 
    195 static int
    196 i915_gem_create(struct drm_file *file,
    197 		struct drm_device *dev,
    198 		uint64_t size,
    199 		uint32_t *handle_p)
    200 {
    201 	struct drm_i915_gem_object *obj;
    202 	int ret;
    203 	u32 handle;
    204 
    205 	size = roundup(size, PAGE_SIZE);
    206 	if (size == 0)
    207 		return -EINVAL;
    208 
    209 	/* Allocate the new object */
    210 	obj = i915_gem_alloc_object(dev, size);
    211 	if (obj == NULL)
    212 		return -ENOMEM;
    213 
    214 	ret = drm_gem_handle_create(file, &obj->base, &handle);
    215 	if (ret) {
    216 		drm_gem_object_release(&obj->base);
    217 		i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
    218 		kfree(obj);
    219 		return ret;
    220 	}
    221 
    222 	/* drop reference from allocate - handle holds it now */
    223 	drm_gem_object_unreference(&obj->base);
    224 	trace_i915_gem_object_create(obj);
    225 
    226 	*handle_p = handle;
    227 	return 0;
    228 }
    229 
    230 int
    231 i915_gem_dumb_create(struct drm_file *file,
    232 		     struct drm_device *dev,
    233 		     struct drm_mode_create_dumb *args)
    234 {
    235 	/* have to work out size/pitch and return them */
    236 	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
    237 	args->size = args->pitch * args->height;
    238 	return i915_gem_create(file, dev,
    239 			       args->size, &args->handle);
    240 }
    241 
    242 int i915_gem_dumb_destroy(struct drm_file *file,
    243 			  struct drm_device *dev,
    244 			  uint32_t handle)
    245 {
    246 	return drm_gem_handle_delete(file, handle);
    247 }
    248 
    249 /**
    250  * Creates a new mm object and returns a handle to it.
    251  */
    252 int
    253 i915_gem_create_ioctl(struct drm_device *dev, void *data,
    254 		      struct drm_file *file)
    255 {
    256 	struct drm_i915_gem_create *args = data;
    257 
    258 	return i915_gem_create(file, dev,
    259 			       args->size, &args->handle);
    260 }
    261 
    262 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
    263 {
    264 	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
    265 
    266 	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
    267 		obj->tiling_mode != I915_TILING_NONE;
    268 }
    269 
    270 static inline int
    271 __copy_to_user_swizzled(char __user *cpu_vaddr,
    272 			const char *gpu_vaddr, int gpu_offset,
    273 			int length)
    274 {
    275 	int ret, cpu_offset = 0;
    276 
    277 	while (length > 0) {
    278 		int cacheline_end = ALIGN(gpu_offset + 1, 64);
    279 		int this_length = min(cacheline_end - gpu_offset, length);
    280 		int swizzled_gpu_offset = gpu_offset ^ 64;
    281 
    282 		ret = __copy_to_user(cpu_vaddr + cpu_offset,
    283 				     gpu_vaddr + swizzled_gpu_offset,
    284 				     this_length);
    285 		if (ret)
    286 			return ret + length;
    287 
    288 		cpu_offset += this_length;
    289 		gpu_offset += this_length;
    290 		length -= this_length;
    291 	}
    292 
    293 	return 0;
    294 }
    295 
    296 static inline int
    297 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
    298 			  const char __user *cpu_vaddr,
    299 			  int length)
    300 {
    301 	int ret, cpu_offset = 0;
    302 
    303 	while (length > 0) {
    304 		int cacheline_end = ALIGN(gpu_offset + 1, 64);
    305 		int this_length = min(cacheline_end - gpu_offset, length);
    306 		int swizzled_gpu_offset = gpu_offset ^ 64;
    307 
    308 		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
    309 				       cpu_vaddr + cpu_offset,
    310 				       this_length);
    311 		if (ret)
    312 			return ret + length;
    313 
    314 		cpu_offset += this_length;
    315 		gpu_offset += this_length;
    316 		length -= this_length;
    317 	}
    318 
    319 	return 0;
    320 }
    321 
    322 /* Per-page copy function for the shmem pread fastpath.
    323  * Flushes invalid cachelines before reading the target if
    324  * needs_clflush is set. */
    325 static int
    326 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
    327 		 char __user *user_data,
    328 		 bool page_do_bit17_swizzling, bool needs_clflush)
    329 {
    330 	char *vaddr;
    331 	int ret;
    332 
    333 	if (unlikely(page_do_bit17_swizzling))
    334 		return -EINVAL;
    335 
    336 	vaddr = kmap_atomic(page);
    337 	if (needs_clflush)
    338 		drm_clflush_virt_range(vaddr + shmem_page_offset,
    339 				       page_length);
    340 	ret = __copy_to_user_inatomic(user_data,
    341 				      vaddr + shmem_page_offset,
    342 				      page_length);
    343 	kunmap_atomic(vaddr);
    344 
    345 	return ret ? -EFAULT : 0;
    346 }
    347 
    348 static void
    349 shmem_clflush_swizzled_range(char *addr, unsigned long length,
    350 			     bool swizzled)
    351 {
    352 	if (unlikely(swizzled)) {
    353 		unsigned long start = (unsigned long) addr;
    354 		unsigned long end = (unsigned long) addr + length;
    355 
    356 		/* For swizzling simply ensure that we always flush both
    357 		 * channels. Lame, but simple and it works. Swizzled
    358 		 * pwrite/pread is far from a hotpath - current userspace
    359 		 * doesn't use it at all. */
    360 		start = round_down(start, 128);
    361 		end = round_up(end, 128);
    362 
    363 		drm_clflush_virt_range((void *)start, end - start);
    364 	} else {
    365 		drm_clflush_virt_range(addr, length);
    366 	}
    367 
    368 }
    369 
    370 /* Only difference to the fast-path function is that this can handle bit17
    371  * and uses non-atomic copy and kmap functions. */
    372 static int
    373 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
    374 		 char __user *user_data,
    375 		 bool page_do_bit17_swizzling, bool needs_clflush)
    376 {
    377 	char *vaddr;
    378 	int ret;
    379 
    380 	vaddr = kmap(page);
    381 	if (needs_clflush)
    382 		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
    383 					     page_length,
    384 					     page_do_bit17_swizzling);
    385 
    386 	if (page_do_bit17_swizzling)
    387 		ret = __copy_to_user_swizzled(user_data,
    388 					      vaddr, shmem_page_offset,
    389 					      page_length);
    390 	else
    391 		ret = __copy_to_user(user_data,
    392 				     vaddr + shmem_page_offset,
    393 				     page_length);
    394 	kunmap(page);
    395 
    396 	return ret ? - EFAULT : 0;
    397 }
    398 
    399 static int
    400 i915_gem_shmem_pread(struct drm_device *dev,
    401 		     struct drm_i915_gem_object *obj,
    402 		     struct drm_i915_gem_pread *args,
    403 		     struct drm_file *file)
    404 {
    405 	char __user *user_data;
    406 	ssize_t remain;
    407 	loff_t offset;
    408 	int shmem_page_offset, page_length, ret = 0;
    409 	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
    410 	int hit_slowpath = 0;
    411 	int prefaulted = 0;
    412 	int needs_clflush = 0;
    413 	struct scatterlist *sg;
    414 	int i;
    415 
    416 	user_data = (char __user *) (uintptr_t) args->data_ptr;
    417 	remain = args->size;
    418 
    419 	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
    420 
    421 	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
    422 		/* If we're not in the cpu read domain, set ourself into the gtt
    423 		 * read domain and manually flush cachelines (if required). This
    424 		 * optimizes for the case when the gpu will dirty the data
    425 		 * anyway again before the next pread happens. */
    426 		if (obj->cache_level == I915_CACHE_NONE)
    427 			needs_clflush = 1;
    428 		if (obj->gtt_space) {
    429 			ret = i915_gem_object_set_to_gtt_domain(obj, false);
    430 			if (ret)
    431 				return ret;
    432 		}
    433 	}
    434 
    435 	ret = i915_gem_object_get_pages(obj);
    436 	if (ret)
    437 		return ret;
    438 
    439 	i915_gem_object_pin_pages(obj);
    440 
    441 	offset = args->offset;
    442 
    443 	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
    444 		struct page *page;
    445 
    446 		if (i < offset >> PAGE_SHIFT)
    447 			continue;
    448 
    449 		if (remain <= 0)
    450 			break;
    451 
    452 		/* Operation in this page
    453 		 *
    454 		 * shmem_page_offset = offset within page in shmem file
    455 		 * page_length = bytes to copy for this page
    456 		 */
    457 		shmem_page_offset = offset_in_page(offset);
    458 		page_length = remain;
    459 		if ((shmem_page_offset + page_length) > PAGE_SIZE)
    460 			page_length = PAGE_SIZE - shmem_page_offset;
    461 
    462 		page = sg_page(sg);
    463 		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
    464 			(page_to_phys(page) & (1 << 17)) != 0;
    465 
    466 		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
    467 				       user_data, page_do_bit17_swizzling,
    468 				       needs_clflush);
    469 		if (ret == 0)
    470 			goto next_page;
    471 
    472 		hit_slowpath = 1;
    473 		mutex_unlock(&dev->struct_mutex);
    474 
    475 		if (!prefaulted) {
    476 			ret = fault_in_multipages_writeable(user_data, remain);
    477 			/* Userspace is tricking us, but we've already clobbered
    478 			 * its pages with the prefault and promised to write the
    479 			 * data up to the first fault. Hence ignore any errors
    480 			 * and just continue. */
    481 			(void)ret;
    482 			prefaulted = 1;
    483 		}
    484 
    485 		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
    486 				       user_data, page_do_bit17_swizzling,
    487 				       needs_clflush);
    488 
    489 		mutex_lock(&dev->struct_mutex);
    490 
    491 next_page:
    492 		mark_page_accessed(page);
    493 
    494 		if (ret)
    495 			goto out;
    496 
    497 		remain -= page_length;
    498 		user_data += page_length;
    499 		offset += page_length;
    500 	}
    501 
    502 out:
    503 	i915_gem_object_unpin_pages(obj);
    504 
    505 	if (hit_slowpath) {
    506 		/* Fixup: Kill any reinstated backing storage pages */
    507 		if (obj->madv == __I915_MADV_PURGED)
    508 			i915_gem_object_truncate(obj);
    509 	}
    510 
    511 	return ret;
    512 }
    513 
    514 /**
    515  * Reads data from the object referenced by handle.
    516  *
    517  * On error, the contents of *data are undefined.
    518  */
    519 int
    520 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
    521 		     struct drm_file *file)
    522 {
    523 	struct drm_i915_gem_pread *args = data;
    524 	struct drm_i915_gem_object *obj;
    525 	int ret = 0;
    526 
    527 	if (args->size == 0)
    528 		return 0;
    529 
    530 	if (!access_ok(VERIFY_WRITE,
    531 		       (char __user *)(uintptr_t)args->data_ptr,
    532 		       args->size))
    533 		return -EFAULT;
    534 
    535 	ret = i915_mutex_lock_interruptible(dev);
    536 	if (ret)
    537 		return ret;
    538 
    539 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
    540 	if (&obj->base == NULL) {
    541 		ret = -ENOENT;
    542 		goto unlock;
    543 	}
    544 
    545 	/* Bounds check source.  */
    546 	if (args->offset > obj->base.size ||
    547 	    args->size > obj->base.size - args->offset) {
    548 		ret = -EINVAL;
    549 		goto out;
    550 	}
    551 
    552 	/* prime objects have no backing filp to GEM pread/pwrite
    553 	 * pages from.
    554 	 */
    555 	if (!obj->base.filp) {
    556 		ret = -EINVAL;
    557 		goto out;
    558 	}
    559 
    560 	trace_i915_gem_object_pread(obj, args->offset, args->size);
    561 
    562 	ret = i915_gem_shmem_pread(dev, obj, args, file);
    563 
    564 out:
    565 	drm_gem_object_unreference(&obj->base);
    566 unlock:
    567 	mutex_unlock(&dev->struct_mutex);
    568 	return ret;
    569 }
    570 
    571 /* This is the fast write path which cannot handle
    572  * page faults in the source data
    573  */
    574 
    575 static inline int
    576 fast_user_write(struct io_mapping *mapping,
    577 		loff_t page_base, int page_offset,
    578 		char __user *user_data,
    579 		int length)
    580 {
    581 	void __iomem *vaddr_atomic;
    582 	void *vaddr;
    583 	unsigned long unwritten;
    584 
    585 	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
    586 	/* We can use the cpu mem copy function because this is X86. */
    587 	vaddr = (void __force*)vaddr_atomic + page_offset;
    588 	unwritten = __copy_from_user_inatomic_nocache(vaddr,
    589 						      user_data, length);
    590 	io_mapping_unmap_atomic(vaddr_atomic);
    591 	return unwritten;
    592 }
    593 
    594 /**
    595  * This is the fast pwrite path, where we copy the data directly from the
    596  * user into the GTT, uncached.
    597  */
    598 static int
    599 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
    600 			 struct drm_i915_gem_object *obj,
    601 			 struct drm_i915_gem_pwrite *args,
    602 			 struct drm_file *file)
    603 {
    604 	drm_i915_private_t *dev_priv = dev->dev_private;
    605 	ssize_t remain;
    606 	loff_t offset, page_base;
    607 	char __user *user_data;
    608 	int page_offset, page_length, ret;
    609 
    610 	ret = i915_gem_object_pin(obj, 0, true, true);
    611 	if (ret)
    612 		goto out;
    613 
    614 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
    615 	if (ret)
    616 		goto out_unpin;
    617 
    618 	ret = i915_gem_object_put_fence(obj);
    619 	if (ret)
    620 		goto out_unpin;
    621 
    622 	user_data = (char __user *) (uintptr_t) args->data_ptr;
    623 	remain = args->size;
    624 
    625 	offset = obj->gtt_offset + args->offset;
    626 
    627 	while (remain > 0) {
    628 		/* Operation in this page
    629 		 *
    630 		 * page_base = page offset within aperture
    631 		 * page_offset = offset within page
    632 		 * page_length = bytes to copy for this page
    633 		 */
    634 		page_base = offset & PAGE_MASK;
    635 		page_offset = offset_in_page(offset);
    636 		page_length = remain;
    637 		if ((page_offset + remain) > PAGE_SIZE)
    638 			page_length = PAGE_SIZE - page_offset;
    639 
    640 		/* If we get a fault while copying data, then (presumably) our
    641 		 * source page isn't available.  Return the error and we'll
    642 		 * retry in the slow path.
    643 		 */
    644 		if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
    645 				    page_offset, user_data, page_length)) {
    646 			ret = -EFAULT;
    647 			goto out_unpin;
    648 		}
    649 
    650 		remain -= page_length;
    651 		user_data += page_length;
    652 		offset += page_length;
    653 	}
    654 
    655 out_unpin:
    656 	i915_gem_object_unpin(obj);
    657 out:
    658 	return ret;
    659 }
    660 
    661 /* Per-page copy function for the shmem pwrite fastpath.
    662  * Flushes invalid cachelines before writing to the target if
    663  * needs_clflush_before is set and flushes out any written cachelines after
    664  * writing if needs_clflush is set. */
    665 static int
    666 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
    667 		  char __user *user_data,
    668 		  bool page_do_bit17_swizzling,
    669 		  bool needs_clflush_before,
    670 		  bool needs_clflush_after)
    671 {
    672 	char *vaddr;
    673 	int ret;
    674 
    675 	if (unlikely(page_do_bit17_swizzling))
    676 		return -EINVAL;
    677 
    678 	vaddr = kmap_atomic(page);
    679 	if (needs_clflush_before)
    680 		drm_clflush_virt_range(vaddr + shmem_page_offset,
    681 				       page_length);
    682 	ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
    683 						user_data,
    684 						page_length);
    685 	if (needs_clflush_after)
    686 		drm_clflush_virt_range(vaddr + shmem_page_offset,
    687 				       page_length);
    688 	kunmap_atomic(vaddr);
    689 
    690 	return ret ? -EFAULT : 0;
    691 }
    692 
    693 /* Only difference to the fast-path function is that this can handle bit17
    694  * and uses non-atomic copy and kmap functions. */
    695 static int
    696 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
    697 		  char __user *user_data,
    698 		  bool page_do_bit17_swizzling,
    699 		  bool needs_clflush_before,
    700 		  bool needs_clflush_after)
    701 {
    702 	char *vaddr;
    703 	int ret;
    704 
    705 	vaddr = kmap(page);
    706 	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
    707 		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
    708 					     page_length,
    709 					     page_do_bit17_swizzling);
    710 	if (page_do_bit17_swizzling)
    711 		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
    712 						user_data,
    713 						page_length);
    714 	else
    715 		ret = __copy_from_user(vaddr + shmem_page_offset,
    716 				       user_data,
    717 				       page_length);
    718 	if (needs_clflush_after)
    719 		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
    720 					     page_length,
    721 					     page_do_bit17_swizzling);
    722 	kunmap(page);
    723 
    724 	return ret ? -EFAULT : 0;
    725 }
    726 
    727 static int
    728 i915_gem_shmem_pwrite(struct drm_device *dev,
    729 		      struct drm_i915_gem_object *obj,
    730 		      struct drm_i915_gem_pwrite *args,
    731 		      struct drm_file *file)
    732 {
    733 	ssize_t remain;
    734 	loff_t offset;
    735 	char __user *user_data;
    736 	int shmem_page_offset, page_length, ret = 0;
    737 	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
    738 	int hit_slowpath = 0;
    739 	int needs_clflush_after = 0;
    740 	int needs_clflush_before = 0;
    741 	int i;
    742 	struct scatterlist *sg;
    743 
    744 	user_data = (char __user *) (uintptr_t) args->data_ptr;
    745 	remain = args->size;
    746 
    747 	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
    748 
    749 	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
    750 		/* If we're not in the cpu write domain, set ourself into the gtt
    751 		 * write domain and manually flush cachelines (if required). This
    752 		 * optimizes for the case when the gpu will use the data
    753 		 * right away and we therefore have to clflush anyway. */
    754 		if (obj->cache_level == I915_CACHE_NONE)
    755 			needs_clflush_after = 1;
    756 		if (obj->gtt_space) {
    757 			ret = i915_gem_object_set_to_gtt_domain(obj, true);
    758 			if (ret)
    759 				return ret;
    760 		}
    761 	}
    762 	/* Same trick applies for invalidate partially written cachelines before
    763 	 * writing.  */
    764 	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
    765 	    && obj->cache_level == I915_CACHE_NONE)
    766 		needs_clflush_before = 1;
    767 
    768 	ret = i915_gem_object_get_pages(obj);
    769 	if (ret)
    770 		return ret;
    771 
    772 	i915_gem_object_pin_pages(obj);
    773 
    774 	offset = args->offset;
    775 	obj->dirty = 1;
    776 
    777 	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
    778 		struct page *page;
    779 		int partial_cacheline_write;
    780 
    781 		if (i < offset >> PAGE_SHIFT)
    782 			continue;
    783 
    784 		if (remain <= 0)
    785 			break;
    786 
    787 		/* Operation in this page
    788 		 *
    789 		 * shmem_page_offset = offset within page in shmem file
    790 		 * page_length = bytes to copy for this page
    791 		 */
    792 		shmem_page_offset = offset_in_page(offset);
    793 
    794 		page_length = remain;
    795 		if ((shmem_page_offset + page_length) > PAGE_SIZE)
    796 			page_length = PAGE_SIZE - shmem_page_offset;
    797 
    798 		/* If we don't overwrite a cacheline completely we need to be
    799 		 * careful to have up-to-date data by first clflushing. Don't
    800 		 * overcomplicate things and flush the entire patch. */
    801 		partial_cacheline_write = needs_clflush_before &&
    802 			((shmem_page_offset | page_length)
    803 				& (boot_cpu_data.x86_clflush_size - 1));
    804 
    805 		page = sg_page(sg);
    806 		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
    807 			(page_to_phys(page) & (1 << 17)) != 0;
    808 
    809 		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
    810 					user_data, page_do_bit17_swizzling,
    811 					partial_cacheline_write,
    812 					needs_clflush_after);
    813 		if (ret == 0)
    814 			goto next_page;
    815 
    816 		hit_slowpath = 1;
    817 		mutex_unlock(&dev->struct_mutex);
    818 		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
    819 					user_data, page_do_bit17_swizzling,
    820 					partial_cacheline_write,
    821 					needs_clflush_after);
    822 
    823 		mutex_lock(&dev->struct_mutex);
    824 
    825 next_page:
    826 		set_page_dirty(page);
    827 		mark_page_accessed(page);
    828 
    829 		if (ret)
    830 			goto out;
    831 
    832 		remain -= page_length;
    833 		user_data += page_length;
    834 		offset += page_length;
    835 	}
    836 
    837 out:
    838 	i915_gem_object_unpin_pages(obj);
    839 
    840 	if (hit_slowpath) {
    841 		/* Fixup: Kill any reinstated backing storage pages */
    842 		if (obj->madv == __I915_MADV_PURGED)
    843 			i915_gem_object_truncate(obj);
    844 		/* and flush dirty cachelines in case the object isn't in the cpu write
    845 		 * domain anymore. */
    846 		if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
    847 			i915_gem_clflush_object(obj);
    848 			i915_gem_chipset_flush(dev);
    849 		}
    850 	}
    851 
    852 	if (needs_clflush_after)
    853 		i915_gem_chipset_flush(dev);
    854 
    855 	return ret;
    856 }
    857 
    858 /**
    859  * Writes data to the object referenced by handle.
    860  *
    861  * On error, the contents of the buffer that were to be modified are undefined.
    862  */
    863 int
    864 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
    865 		      struct drm_file *file)
    866 {
    867 	struct drm_i915_gem_pwrite *args = data;
    868 	struct drm_i915_gem_object *obj;
    869 	int ret;
    870 
    871 	if (args->size == 0)
    872 		return 0;
    873 
    874 	if (!access_ok(VERIFY_READ,
    875 		       (char __user *)(uintptr_t)args->data_ptr,
    876 		       args->size))
    877 		return -EFAULT;
    878 
    879 	ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
    880 					   args->size);
    881 	if (ret)
    882 		return -EFAULT;
    883 
    884 	ret = i915_mutex_lock_interruptible(dev);
    885 	if (ret)
    886 		return ret;
    887 
    888 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
    889 	if (&obj->base == NULL) {
    890 		ret = -ENOENT;
    891 		goto unlock;
    892 	}
    893 
    894 	/* Bounds check destination. */
    895 	if (args->offset > obj->base.size ||
    896 	    args->size > obj->base.size - args->offset) {
    897 		ret = -EINVAL;
    898 		goto out;
    899 	}
    900 
    901 	/* prime objects have no backing filp to GEM pread/pwrite
    902 	 * pages from.
    903 	 */
    904 	if (!obj->base.filp) {
    905 		ret = -EINVAL;
    906 		goto out;
    907 	}
    908 
    909 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
    910 
    911 	ret = -EFAULT;
    912 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
    913 	 * it would end up going through the fenced access, and we'll get
    914 	 * different detiling behavior between reading and writing.
    915 	 * pread/pwrite currently are reading and writing from the CPU
    916 	 * perspective, requiring manual detiling by the client.
    917 	 */
    918 	if (obj->phys_obj) {
    919 		ret = i915_gem_phys_pwrite(dev, obj, args, file);
    920 		goto out;
    921 	}
    922 
    923 	if (obj->cache_level == I915_CACHE_NONE &&
    924 	    obj->tiling_mode == I915_TILING_NONE &&
    925 	    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
    926 		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
    927 		/* Note that the gtt paths might fail with non-page-backed user
    928 		 * pointers (e.g. gtt mappings when moving data between
    929 		 * textures). Fallback to the shmem path in that case. */
    930 	}
    931 
    932 	if (ret == -EFAULT || ret == -ENOSPC)
    933 		ret = i915_gem_shmem_pwrite(dev, obj, args, file);
    934 
    935 out:
    936 	drm_gem_object_unreference(&obj->base);
    937 unlock:
    938 	mutex_unlock(&dev->struct_mutex);
    939 	return ret;
    940 }
    941 
    942 int
    943 i915_gem_check_wedge(struct drm_i915_private *dev_priv,
    944 		     bool interruptible)
    945 {
    946 	if (atomic_read(&dev_priv->mm.wedged)) {
    947 		struct completion *x = &dev_priv->error_completion;
    948 		bool recovery_complete;
    949 		unsigned long flags;
    950 
    951 		/* Give the error handler a chance to run. */
    952 		spin_lock_irqsave(&x->wait.lock, flags);
    953 		recovery_complete = x->done > 0;
    954 		spin_unlock_irqrestore(&x->wait.lock, flags);
    955 
    956 		/* Non-interruptible callers can't handle -EAGAIN, hence return
    957 		 * -EIO unconditionally for these. */
    958 		if (!interruptible)
    959 			return -EIO;
    960 
    961 		/* Recovery complete, but still wedged means reset failure. */
    962 		if (recovery_complete)
    963 			return -EIO;
    964 
    965 		return -EAGAIN;
    966 	}
    967 
    968 	return 0;
    969 }
    970 
    971 /*
    972  * Compare seqno against outstanding lazy request. Emit a request if they are
    973  * equal.
    974  */
    975 static int
    976 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
    977 {
    978 	int ret;
    979 
    980 	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
    981 
    982 	ret = 0;
    983 	if (seqno == ring->outstanding_lazy_request)
    984 		ret = i915_add_request(ring, NULL, NULL);
    985 
    986 	return ret;
    987 }
    988 
    989 /**
    990  * __wait_seqno - wait until execution of seqno has finished
    991  * @ring: the ring expected to report seqno
    992  * @seqno: duh!
    993  * @interruptible: do an interruptible wait (normally yes)
    994  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
    995  *
    996  * Returns 0 if the seqno was found within the alloted time. Else returns the
    997  * errno with remaining time filled in timeout argument.
    998  */
    999 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
   1000 			bool interruptible, struct timespec *timeout)
   1001 {
   1002 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
   1003 	struct timespec before, now, wait_time={1,0};
   1004 	unsigned long timeout_jiffies;
   1005 	long end;
   1006 	bool wait_forever = true;
   1007 	int ret;
   1008 
   1009 	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
   1010 		return 0;
   1011 
   1012 	trace_i915_gem_request_wait_begin(ring, seqno);
   1013 
   1014 	if (timeout != NULL) {
   1015 		wait_time = *timeout;
   1016 		wait_forever = false;
   1017 	}
   1018 
   1019 	timeout_jiffies = timespec_to_jiffies(&wait_time);
   1020 
   1021 	if (WARN_ON(!ring->irq_get(ring)))
   1022 		return -ENODEV;
   1023 
   1024 	/* Record current time in case interrupted by signal, or wedged * */
   1025 	getrawmonotonic(&before);
   1026 
   1027 #define EXIT_COND \
   1028 	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
   1029 	atomic_read(&dev_priv->mm.wedged))
   1030 	do {
   1031 		if (interruptible)
   1032 			end = wait_event_interruptible_timeout(ring->irq_queue,
   1033 							       EXIT_COND,
   1034 							       timeout_jiffies);
   1035 		else
   1036 			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
   1037 						 timeout_jiffies);
   1038 
   1039 		ret = i915_gem_check_wedge(dev_priv, interruptible);
   1040 		if (ret)
   1041 			end = ret;
   1042 	} while (end == 0 && wait_forever);
   1043 
   1044 	getrawmonotonic(&now);
   1045 
   1046 	ring->irq_put(ring);
   1047 	trace_i915_gem_request_wait_end(ring, seqno);
   1048 #undef EXIT_COND
   1049 
   1050 	if (timeout) {
   1051 		struct timespec sleep_time = timespec_sub(now, before);
   1052 		*timeout = timespec_sub(*timeout, sleep_time);
   1053 	}
   1054 
   1055 	switch (end) {
   1056 	case -EIO:
   1057 	case -EAGAIN: /* Wedged */
   1058 	case -ERESTARTSYS: /* Signal */
   1059 		return (int)end;
   1060 	case 0: /* Timeout */
   1061 		if (timeout)
   1062 			set_normalized_timespec(timeout, 0, 0);
   1063 		return -ETIME;
   1064 	default: /* Completed */
   1065 		WARN_ON(end < 0); /* We're not aware of other errors */
   1066 		return 0;
   1067 	}
   1068 }
   1069 
   1070 /**
   1071  * Waits for a sequence number to be signaled, and cleans up the
   1072  * request and object lists appropriately for that event.
   1073  */
   1074 int
   1075 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
   1076 {
   1077 	struct drm_device *dev = ring->dev;
   1078 	struct drm_i915_private *dev_priv = dev->dev_private;
   1079 	bool interruptible = dev_priv->mm.interruptible;
   1080 	int ret;
   1081 
   1082 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
   1083 	BUG_ON(seqno == 0);
   1084 
   1085 	ret = i915_gem_check_wedge(dev_priv, interruptible);
   1086 	if (ret)
   1087 		return ret;
   1088 
   1089 	ret = i915_gem_check_olr(ring, seqno);
   1090 	if (ret)
   1091 		return ret;
   1092 
   1093 	return __wait_seqno(ring, seqno, interruptible, NULL);
   1094 }
   1095 
   1096 /**
   1097  * Ensures that all rendering to the object has completed and the object is
   1098  * safe to unbind from the GTT or access from the CPU.
   1099  */
   1100 static __must_check int
   1101 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
   1102 			       bool readonly)
   1103 {
   1104 	struct intel_ring_buffer *ring = obj->ring;
   1105 	u32 seqno;
   1106 	int ret;
   1107 
   1108 	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
   1109 	if (seqno == 0)
   1110 		return 0;
   1111 
   1112 	ret = i915_wait_seqno(ring, seqno);
   1113 	if (ret)
   1114 		return ret;
   1115 
   1116 	i915_gem_retire_requests_ring(ring);
   1117 
   1118 	/* Manually manage the write flush as we may have not yet
   1119 	 * retired the buffer.
   1120 	 */
   1121 	if (obj->last_write_seqno &&
   1122 	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
   1123 		obj->last_write_seqno = 0;
   1124 		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
   1125 	}
   1126 
   1127 	return 0;
   1128 }
   1129 
   1130 /* A nonblocking variant of the above wait. This is a highly dangerous routine
   1131  * as the object state may change during this call.
   1132  */
   1133 static __must_check int
   1134 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
   1135 					    bool readonly)
   1136 {
   1137 	struct drm_device *dev = obj->base.dev;
   1138 	struct drm_i915_private *dev_priv = dev->dev_private;
   1139 	struct intel_ring_buffer *ring = obj->ring;
   1140 	u32 seqno;
   1141 	int ret;
   1142 
   1143 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
   1144 	BUG_ON(!dev_priv->mm.interruptible);
   1145 
   1146 	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
   1147 	if (seqno == 0)
   1148 		return 0;
   1149 
   1150 	ret = i915_gem_check_wedge(dev_priv, true);
   1151 	if (ret)
   1152 		return ret;
   1153 
   1154 	ret = i915_gem_check_olr(ring, seqno);
   1155 	if (ret)
   1156 		return ret;
   1157 
   1158 	mutex_unlock(&dev->struct_mutex);
   1159 	ret = __wait_seqno(ring, seqno, true, NULL);
   1160 	mutex_lock(&dev->struct_mutex);
   1161 
   1162 	i915_gem_retire_requests_ring(ring);
   1163 
   1164 	/* Manually manage the write flush as we may have not yet
   1165 	 * retired the buffer.
   1166 	 */
   1167 	if (obj->last_write_seqno &&
   1168 	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
   1169 		obj->last_write_seqno = 0;
   1170 		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
   1171 	}
   1172 
   1173 	return ret;
   1174 }
   1175 
   1176 /**
   1177  * Called when user space prepares to use an object with the CPU, either
   1178  * through the mmap ioctl's mapping or a GTT mapping.
   1179  */
   1180 int
   1181 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
   1182 			  struct drm_file *file)
   1183 {
   1184 	struct drm_i915_gem_set_domain *args = data;
   1185 	struct drm_i915_gem_object *obj;
   1186 	uint32_t read_domains = args->read_domains;
   1187 	uint32_t write_domain = args->write_domain;
   1188 	int ret;
   1189 
   1190 	/* Only handle setting domains to types used by the CPU. */
   1191 	if (write_domain & I915_GEM_GPU_DOMAINS)
   1192 		return -EINVAL;
   1193 
   1194 	if (read_domains & I915_GEM_GPU_DOMAINS)
   1195 		return -EINVAL;
   1196 
   1197 	/* Having something in the write domain implies it's in the read
   1198 	 * domain, and only that read domain.  Enforce that in the request.
   1199 	 */
   1200 	if (write_domain != 0 && read_domains != write_domain)
   1201 		return -EINVAL;
   1202 
   1203 	ret = i915_mutex_lock_interruptible(dev);
   1204 	if (ret)
   1205 		return ret;
   1206 
   1207 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
   1208 	if (&obj->base == NULL) {
   1209 		ret = -ENOENT;
   1210 		goto unlock;
   1211 	}
   1212 
   1213 	/* Try to flush the object off the GPU without holding the lock.
   1214 	 * We will repeat the flush holding the lock in the normal manner
   1215 	 * to catch cases where we are gazumped.
   1216 	 */
   1217 	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
   1218 	if (ret)
   1219 		goto unref;
   1220 
   1221 	if (read_domains & I915_GEM_DOMAIN_GTT) {
   1222 		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
   1223 
   1224 		/* Silently promote "you're not bound, there was nothing to do"
   1225 		 * to success, since the client was just asking us to
   1226 		 * make sure everything was done.
   1227 		 */
   1228 		if (ret == -EINVAL)
   1229 			ret = 0;
   1230 	} else {
   1231 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
   1232 	}
   1233 
   1234 unref:
   1235 	drm_gem_object_unreference(&obj->base);
   1236 unlock:
   1237 	mutex_unlock(&dev->struct_mutex);
   1238 	return ret;
   1239 }
   1240 
   1241 /**
   1242  * Called when user space has done writes to this buffer
   1243  */
   1244 int
   1245 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
   1246 			 struct drm_file *file)
   1247 {
   1248 	struct drm_i915_gem_sw_finish *args = data;
   1249 	struct drm_i915_gem_object *obj;
   1250 	int ret = 0;
   1251 
   1252 	ret = i915_mutex_lock_interruptible(dev);
   1253 	if (ret)
   1254 		return ret;
   1255 
   1256 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
   1257 	if (&obj->base == NULL) {
   1258 		ret = -ENOENT;
   1259 		goto unlock;
   1260 	}
   1261 
   1262 	/* Pinned buffers may be scanout, so flush the cache */
   1263 	if (obj->pin_count)
   1264 		i915_gem_object_flush_cpu_write_domain(obj);
   1265 
   1266 	drm_gem_object_unreference(&obj->base);
   1267 unlock:
   1268 	mutex_unlock(&dev->struct_mutex);
   1269 	return ret;
   1270 }
   1271 
   1272 /**
   1273  * Maps the contents of an object, returning the address it is mapped
   1274  * into.
   1275  *
   1276  * While the mapping holds a reference on the contents of the object, it doesn't
   1277  * imply a ref on the object itself.
   1278  */
   1279 int
   1280 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
   1281 		    struct drm_file *file)
   1282 {
   1283 	struct drm_i915_gem_mmap *args = data;
   1284 	struct drm_gem_object *obj;
   1285 	unsigned long addr;
   1286 
   1287 	obj = drm_gem_object_lookup(dev, file, args->handle);
   1288 	if (obj == NULL)
   1289 		return -ENOENT;
   1290 
   1291 	/* prime objects have no backing filp to GEM mmap
   1292 	 * pages from.
   1293 	 */
   1294 	if (!obj->filp) {
   1295 		drm_gem_object_unreference_unlocked(obj);
   1296 		return -EINVAL;
   1297 	}
   1298 
   1299 	addr = vm_mmap(obj->filp, 0, args->size,
   1300 		       PROT_READ | PROT_WRITE, MAP_SHARED,
   1301 		       args->offset);
   1302 	drm_gem_object_unreference_unlocked(obj);
   1303 	if (IS_ERR((void *)addr))
   1304 		return addr;
   1305 
   1306 	args->addr_ptr = (uint64_t) addr;
   1307 
   1308 	return 0;
   1309 }
   1310 
   1311 /**
   1312  * i915_gem_fault - fault a page into the GTT
   1313  * vma: VMA in question
   1314  * vmf: fault info
   1315  *
   1316  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
   1317  * from userspace.  The fault handler takes care of binding the object to
   1318  * the GTT (if needed), allocating and programming a fence register (again,
   1319  * only if needed based on whether the old reg is still valid or the object
   1320  * is tiled) and inserting a new PTE into the faulting process.
   1321  *
   1322  * Note that the faulting process may involve evicting existing objects
   1323  * from the GTT and/or fence registers to make room.  So performance may
   1324  * suffer if the GTT working set is large or there are few fence registers
   1325  * left.
   1326  */
   1327 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
   1328 {
   1329 	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
   1330 	struct drm_device *dev = obj->base.dev;
   1331 	drm_i915_private_t *dev_priv = dev->dev_private;
   1332 	pgoff_t page_offset;
   1333 	unsigned long pfn;
   1334 	int ret = 0;
   1335 	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
   1336 
   1337 	/* We don't use vmf->pgoff since that has the fake offset */
   1338 	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
   1339 		PAGE_SHIFT;
   1340 
   1341 	ret = i915_mutex_lock_interruptible(dev);
   1342 	if (ret)
   1343 		goto out;
   1344 
   1345 	trace_i915_gem_object_fault(obj, page_offset, true, write);
   1346 
   1347 	/* Now bind it into the GTT if needed */
   1348 	ret = i915_gem_object_pin(obj, 0, true, false);
   1349 	if (ret)
   1350 		goto unlock;
   1351 
   1352 	ret = i915_gem_object_set_to_gtt_domain(obj, write);
   1353 	if (ret)
   1354 		goto unpin;
   1355 
   1356 	ret = i915_gem_object_get_fence(obj);
   1357 	if (ret)
   1358 		goto unpin;
   1359 
   1360 	obj->fault_mappable = true;
   1361 
   1362 	pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
   1363 		page_offset;
   1364 
   1365 	/* Finally, remap it using the new GTT offset */
   1366 	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
   1367 unpin:
   1368 	i915_gem_object_unpin(obj);
   1369 unlock:
   1370 	mutex_unlock(&dev->struct_mutex);
   1371 out:
   1372 	switch (ret) {
   1373 	case -EIO:
   1374 		/* If this -EIO is due to a gpu hang, give the reset code a
   1375 		 * chance to clean up the mess. Otherwise return the proper
   1376 		 * SIGBUS. */
   1377 		if (!atomic_read(&dev_priv->mm.wedged))
   1378 			return VM_FAULT_SIGBUS;
   1379 	case -EAGAIN:
   1380 		/* Give the error handler a chance to run and move the
   1381 		 * objects off the GPU active list. Next time we service the
   1382 		 * fault, we should be able to transition the page into the
   1383 		 * GTT without touching the GPU (and so avoid further
   1384 		 * EIO/EGAIN). If the GPU is wedged, then there is no issue
   1385 		 * with coherency, just lost writes.
   1386 		 */
   1387 		set_need_resched();
   1388 	case 0:
   1389 	case -ERESTARTSYS:
   1390 	case -EINTR:
   1391 	case -EBUSY:
   1392 		/*
   1393 		 * EBUSY is ok: this just means that another thread
   1394 		 * already did the job.
   1395 		 */
   1396 		return VM_FAULT_NOPAGE;
   1397 	case -ENOMEM:
   1398 		return VM_FAULT_OOM;
   1399 	case -ENOSPC:
   1400 		return VM_FAULT_SIGBUS;
   1401 	default:
   1402 		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
   1403 		return VM_FAULT_SIGBUS;
   1404 	}
   1405 }
   1406 
   1407 /**
   1408  * i915_gem_release_mmap - remove physical page mappings
   1409  * @obj: obj in question
   1410  *
   1411  * Preserve the reservation of the mmapping with the DRM core code, but
   1412  * relinquish ownership of the pages back to the system.
   1413  *
   1414  * It is vital that we remove the page mapping if we have mapped a tiled
   1415  * object through the GTT and then lose the fence register due to
   1416  * resource pressure. Similarly if the object has been moved out of the
   1417  * aperture, than pages mapped into userspace must be revoked. Removing the
   1418  * mapping will then trigger a page fault on the next user access, allowing
   1419  * fixup by i915_gem_fault().
   1420  */
   1421 void
   1422 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
   1423 {
   1424 	if (!obj->fault_mappable)
   1425 		return;
   1426 
   1427 	if (obj->base.dev->dev_mapping)
   1428 		unmap_mapping_range(obj->base.dev->dev_mapping,
   1429 				    (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
   1430 				    obj->base.size, 1);
   1431 
   1432 	obj->fault_mappable = false;
   1433 }
   1434 
   1435 static uint32_t
   1436 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
   1437 {
   1438 	uint32_t gtt_size;
   1439 
   1440 	if (INTEL_INFO(dev)->gen >= 4 ||
   1441 	    tiling_mode == I915_TILING_NONE)
   1442 		return size;
   1443 
   1444 	/* Previous chips need a power-of-two fence region when tiling */
   1445 	if (INTEL_INFO(dev)->gen == 3)
   1446 		gtt_size = 1024*1024;
   1447 	else
   1448 		gtt_size = 512*1024;
   1449 
   1450 	while (gtt_size < size)
   1451 		gtt_size <<= 1;
   1452 
   1453 	return gtt_size;
   1454 }
   1455 
   1456 /**
   1457  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
   1458  * @obj: object to check
   1459  *
   1460  * Return the required GTT alignment for an object, taking into account
   1461  * potential fence register mapping.
   1462  */
   1463 static uint32_t
   1464 i915_gem_get_gtt_alignment(struct drm_device *dev,
   1465 			   uint32_t size,
   1466 			   int tiling_mode)
   1467 {
   1468 	/*
   1469 	 * Minimum alignment is 4k (GTT page size), but might be greater
   1470 	 * if a fence register is needed for the object.
   1471 	 */
   1472 	if (INTEL_INFO(dev)->gen >= 4 ||
   1473 	    tiling_mode == I915_TILING_NONE)
   1474 		return 4096;
   1475 
   1476 	/*
   1477 	 * Previous chips need to be aligned to the size of the smallest
   1478 	 * fence register that can contain the object.
   1479 	 */
   1480 	return i915_gem_get_gtt_size(dev, size, tiling_mode);
   1481 }
   1482 
   1483 /**
   1484  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
   1485  *					 unfenced object
   1486  * @dev: the device
   1487  * @size: size of the object
   1488  * @tiling_mode: tiling mode of the object
   1489  *
   1490  * Return the required GTT alignment for an object, only taking into account
   1491  * unfenced tiled surface requirements.
   1492  */
   1493 uint32_t
   1494 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
   1495 				    uint32_t size,
   1496 				    int tiling_mode)
   1497 {
   1498 	/*
   1499 	 * Minimum alignment is 4k (GTT page size) for sane hw.
   1500 	 */
   1501 	if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
   1502 	    tiling_mode == I915_TILING_NONE)
   1503 		return 4096;
   1504 
   1505 	/* Previous hardware however needs to be aligned to a power-of-two
   1506 	 * tile height. The simplest method for determining this is to reuse
   1507 	 * the power-of-tile object size.
   1508 	 */
   1509 	return i915_gem_get_gtt_size(dev, size, tiling_mode);
   1510 }
   1511 
   1512 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
   1513 {
   1514 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   1515 	int ret;
   1516 
   1517 	if (obj->base.map_list.map)
   1518 		return 0;
   1519 
   1520 	dev_priv->mm.shrinker_no_lock_stealing = true;
   1521 
   1522 	ret = drm_gem_create_mmap_offset(&obj->base);
   1523 	if (ret != -ENOSPC)
   1524 		goto out;
   1525 
   1526 	/* Badly fragmented mmap space? The only way we can recover
   1527 	 * space is by destroying unwanted objects. We can't randomly release
   1528 	 * mmap_offsets as userspace expects them to be persistent for the
   1529 	 * lifetime of the objects. The closest we can is to release the
   1530 	 * offsets on purgeable objects by truncating it and marking it purged,
   1531 	 * which prevents userspace from ever using that object again.
   1532 	 */
   1533 	i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
   1534 	ret = drm_gem_create_mmap_offset(&obj->base);
   1535 	if (ret != -ENOSPC)
   1536 		goto out;
   1537 
   1538 	i915_gem_shrink_all(dev_priv);
   1539 	ret = drm_gem_create_mmap_offset(&obj->base);
   1540 out:
   1541 	dev_priv->mm.shrinker_no_lock_stealing = false;
   1542 
   1543 	return ret;
   1544 }
   1545 
   1546 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
   1547 {
   1548 	if (!obj->base.map_list.map)
   1549 		return;
   1550 
   1551 	drm_gem_free_mmap_offset(&obj->base);
   1552 }
   1553 
   1554 int
   1555 i915_gem_mmap_gtt(struct drm_file *file,
   1556 		  struct drm_device *dev,
   1557 		  uint32_t handle,
   1558 		  uint64_t *offset)
   1559 {
   1560 	struct drm_i915_private *dev_priv = dev->dev_private;
   1561 	struct drm_i915_gem_object *obj;
   1562 	int ret;
   1563 
   1564 	ret = i915_mutex_lock_interruptible(dev);
   1565 	if (ret)
   1566 		return ret;
   1567 
   1568 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
   1569 	if (&obj->base == NULL) {
   1570 		ret = -ENOENT;
   1571 		goto unlock;
   1572 	}
   1573 
   1574 	if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
   1575 		ret = -E2BIG;
   1576 		goto out;
   1577 	}
   1578 
   1579 	if (obj->madv != I915_MADV_WILLNEED) {
   1580 		DRM_ERROR("Attempting to mmap a purgeable buffer\n");
   1581 		ret = -EINVAL;
   1582 		goto out;
   1583 	}
   1584 
   1585 	ret = i915_gem_object_create_mmap_offset(obj);
   1586 	if (ret)
   1587 		goto out;
   1588 
   1589 	*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
   1590 
   1591 out:
   1592 	drm_gem_object_unreference(&obj->base);
   1593 unlock:
   1594 	mutex_unlock(&dev->struct_mutex);
   1595 	return ret;
   1596 }
   1597 
   1598 /**
   1599  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
   1600  * @dev: DRM device
   1601  * @data: GTT mapping ioctl data
   1602  * @file: GEM object info
   1603  *
   1604  * Simply returns the fake offset to userspace so it can mmap it.
   1605  * The mmap call will end up in drm_gem_mmap(), which will set things
   1606  * up so we can get faults in the handler above.
   1607  *
   1608  * The fault handler will take care of binding the object into the GTT
   1609  * (since it may have been evicted to make room for something), allocating
   1610  * a fence register, and mapping the appropriate aperture address into
   1611  * userspace.
   1612  */
   1613 int
   1614 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
   1615 			struct drm_file *file)
   1616 {
   1617 	struct drm_i915_gem_mmap_gtt *args = data;
   1618 
   1619 	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
   1620 }
   1621 
   1622 /* Immediately discard the backing storage */
   1623 static void
   1624 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
   1625 {
   1626 	struct inode *inode;
   1627 
   1628 	i915_gem_object_free_mmap_offset(obj);
   1629 
   1630 	if (obj->base.filp == NULL)
   1631 		return;
   1632 
   1633 	/* Our goal here is to return as much of the memory as
   1634 	 * is possible back to the system as we are called from OOM.
   1635 	 * To do this we must instruct the shmfs to drop all of its
   1636 	 * backing pages, *now*.
   1637 	 */
   1638 	inode = obj->base.filp->f_path.dentry->d_inode;
   1639 	shmem_truncate_range(inode, 0, (loff_t)-1);
   1640 
   1641 	obj->madv = __I915_MADV_PURGED;
   1642 }
   1643 
   1644 static inline int
   1645 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
   1646 {
   1647 	return obj->madv == I915_MADV_DONTNEED;
   1648 }
   1649 
   1650 static void
   1651 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
   1652 {
   1653 	int page_count = obj->base.size / PAGE_SIZE;
   1654 	struct scatterlist *sg;
   1655 	int ret, i;
   1656 
   1657 	BUG_ON(obj->madv == __I915_MADV_PURGED);
   1658 
   1659 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
   1660 	if (ret) {
   1661 		/* In the event of a disaster, abandon all caches and
   1662 		 * hope for the best.
   1663 		 */
   1664 		WARN_ON(ret != -EIO);
   1665 		i915_gem_clflush_object(obj);
   1666 		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
   1667 	}
   1668 
   1669 	if (i915_gem_object_needs_bit17_swizzle(obj))
   1670 		i915_gem_object_save_bit_17_swizzle(obj);
   1671 
   1672 	if (obj->madv == I915_MADV_DONTNEED)
   1673 		obj->dirty = 0;
   1674 
   1675 	for_each_sg(obj->pages->sgl, sg, page_count, i) {
   1676 		struct page *page = sg_page(sg);
   1677 
   1678 		if (obj->dirty)
   1679 			set_page_dirty(page);
   1680 
   1681 		if (obj->madv == I915_MADV_WILLNEED)
   1682 			mark_page_accessed(page);
   1683 
   1684 		page_cache_release(page);
   1685 	}
   1686 	obj->dirty = 0;
   1687 
   1688 	sg_free_table(obj->pages);
   1689 	kfree(obj->pages);
   1690 }
   1691 
   1692 static int
   1693 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
   1694 {
   1695 	const struct drm_i915_gem_object_ops *ops = obj->ops;
   1696 
   1697 	if (obj->pages == NULL)
   1698 		return 0;
   1699 
   1700 	BUG_ON(obj->gtt_space);
   1701 
   1702 	if (obj->pages_pin_count)
   1703 		return -EBUSY;
   1704 
   1705 	/* ->put_pages might need to allocate memory for the bit17 swizzle
   1706 	 * array, hence protect them from being reaped by removing them from gtt
   1707 	 * lists early. */
   1708 	list_del(&obj->gtt_list);
   1709 
   1710 	ops->put_pages(obj);
   1711 	obj->pages = NULL;
   1712 
   1713 	if (i915_gem_object_is_purgeable(obj))
   1714 		i915_gem_object_truncate(obj);
   1715 
   1716 	return 0;
   1717 }
   1718 
   1719 static long
   1720 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
   1721 		  bool purgeable_only)
   1722 {
   1723 	struct drm_i915_gem_object *obj, *next;
   1724 	long count = 0;
   1725 
   1726 	list_for_each_entry_safe(obj, next,
   1727 				 &dev_priv->mm.unbound_list,
   1728 				 gtt_list) {
   1729 		if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
   1730 		    i915_gem_object_put_pages(obj) == 0) {
   1731 			count += obj->base.size >> PAGE_SHIFT;
   1732 			if (count >= target)
   1733 				return count;
   1734 		}
   1735 	}
   1736 
   1737 	list_for_each_entry_safe(obj, next,
   1738 				 &dev_priv->mm.inactive_list,
   1739 				 mm_list) {
   1740 		if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
   1741 		    i915_gem_object_unbind(obj) == 0 &&
   1742 		    i915_gem_object_put_pages(obj) == 0) {
   1743 			count += obj->base.size >> PAGE_SHIFT;
   1744 			if (count >= target)
   1745 				return count;
   1746 		}
   1747 	}
   1748 
   1749 	return count;
   1750 }
   1751 
   1752 static long
   1753 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
   1754 {
   1755 	return __i915_gem_shrink(dev_priv, target, true);
   1756 }
   1757 
   1758 static void
   1759 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
   1760 {
   1761 	struct drm_i915_gem_object *obj, *next;
   1762 
   1763 	i915_gem_evict_everything(dev_priv->dev);
   1764 
   1765 	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
   1766 		i915_gem_object_put_pages(obj);
   1767 }
   1768 
   1769 static int
   1770 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
   1771 {
   1772 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   1773 	int page_count, i;
   1774 	struct address_space *mapping;
   1775 	struct sg_table *st;
   1776 	struct scatterlist *sg;
   1777 	struct page *page;
   1778 	gfp_t gfp;
   1779 
   1780 	/* Assert that the object is not currently in any GPU domain. As it
   1781 	 * wasn't in the GTT, there shouldn't be any way it could have been in
   1782 	 * a GPU cache
   1783 	 */
   1784 	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
   1785 	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
   1786 
   1787 	st = kmalloc(sizeof(*st), GFP_KERNEL);
   1788 	if (st == NULL)
   1789 		return -ENOMEM;
   1790 
   1791 	page_count = obj->base.size / PAGE_SIZE;
   1792 	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
   1793 		sg_free_table(st);
   1794 		kfree(st);
   1795 		return -ENOMEM;
   1796 	}
   1797 
   1798 	/* Get the list of pages out of our struct file.  They'll be pinned
   1799 	 * at this point until we release them.
   1800 	 *
   1801 	 * Fail silently without starting the shrinker
   1802 	 */
   1803 	mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
   1804 	gfp = mapping_gfp_mask(mapping);
   1805 	gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
   1806 	gfp &= ~(__GFP_IO | __GFP_WAIT);
   1807 	for_each_sg(st->sgl, sg, page_count, i) {
   1808 		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
   1809 		if (IS_ERR(page)) {
   1810 			i915_gem_purge(dev_priv, page_count);
   1811 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
   1812 		}
   1813 		if (IS_ERR(page)) {
   1814 			/* We've tried hard to allocate the memory by reaping
   1815 			 * our own buffer, now let the real VM do its job and
   1816 			 * go down in flames if truly OOM.
   1817 			 */
   1818 			gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
   1819 			gfp |= __GFP_IO | __GFP_WAIT;
   1820 
   1821 			i915_gem_shrink_all(dev_priv);
   1822 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
   1823 			if (IS_ERR(page))
   1824 				goto err_pages;
   1825 
   1826 			gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
   1827 			gfp &= ~(__GFP_IO | __GFP_WAIT);
   1828 		}
   1829 
   1830 		sg_set_page(sg, page, PAGE_SIZE, 0);
   1831 	}
   1832 
   1833 	obj->pages = st;
   1834 
   1835 	if (i915_gem_object_needs_bit17_swizzle(obj))
   1836 		i915_gem_object_do_bit_17_swizzle(obj);
   1837 
   1838 	return 0;
   1839 
   1840 err_pages:
   1841 	for_each_sg(st->sgl, sg, i, page_count)
   1842 		page_cache_release(sg_page(sg));
   1843 	sg_free_table(st);
   1844 	kfree(st);
   1845 	return PTR_ERR(page);
   1846 }
   1847 
   1848 /* Ensure that the associated pages are gathered from the backing storage
   1849  * and pinned into our object. i915_gem_object_get_pages() may be called
   1850  * multiple times before they are released by a single call to
   1851  * i915_gem_object_put_pages() - once the pages are no longer referenced
   1852  * either as a result of memory pressure (reaping pages under the shrinker)
   1853  * or as the object is itself released.
   1854  */
   1855 int
   1856 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
   1857 {
   1858 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   1859 	const struct drm_i915_gem_object_ops *ops = obj->ops;
   1860 	int ret;
   1861 
   1862 	if (obj->pages)
   1863 		return 0;
   1864 
   1865 	BUG_ON(obj->pages_pin_count);
   1866 
   1867 	ret = ops->get_pages(obj);
   1868 	if (ret)
   1869 		return ret;
   1870 
   1871 	list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
   1872 	return 0;
   1873 }
   1874 
   1875 void
   1876 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
   1877 			       struct intel_ring_buffer *ring)
   1878 {
   1879 	struct drm_device *dev = obj->base.dev;
   1880 	struct drm_i915_private *dev_priv = dev->dev_private;
   1881 	u32 seqno = intel_ring_get_seqno(ring);
   1882 
   1883 	BUG_ON(ring == NULL);
   1884 	obj->ring = ring;
   1885 
   1886 	/* Add a reference if we're newly entering the active list. */
   1887 	if (!obj->active) {
   1888 		drm_gem_object_reference(&obj->base);
   1889 		obj->active = 1;
   1890 	}
   1891 
   1892 	/* Move from whatever list we were on to the tail of execution. */
   1893 	list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
   1894 	list_move_tail(&obj->ring_list, &ring->active_list);
   1895 
   1896 	obj->last_read_seqno = seqno;
   1897 
   1898 	if (obj->fenced_gpu_access) {
   1899 		obj->last_fenced_seqno = seqno;
   1900 
   1901 		/* Bump MRU to take account of the delayed flush */
   1902 		if (obj->fence_reg != I915_FENCE_REG_NONE) {
   1903 			struct drm_i915_fence_reg *reg;
   1904 
   1905 			reg = &dev_priv->fence_regs[obj->fence_reg];
   1906 			list_move_tail(&reg->lru_list,
   1907 				       &dev_priv->mm.fence_list);
   1908 		}
   1909 	}
   1910 }
   1911 
   1912 static void
   1913 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
   1914 {
   1915 	struct drm_device *dev = obj->base.dev;
   1916 	struct drm_i915_private *dev_priv = dev->dev_private;
   1917 
   1918 	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
   1919 	BUG_ON(!obj->active);
   1920 
   1921 	if (obj->pin_count) /* are we a framebuffer? */
   1922 		intel_mark_fb_idle(obj);
   1923 
   1924 	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
   1925 
   1926 	list_del_init(&obj->ring_list);
   1927 	obj->ring = NULL;
   1928 
   1929 	obj->last_read_seqno = 0;
   1930 	obj->last_write_seqno = 0;
   1931 	obj->base.write_domain = 0;
   1932 
   1933 	obj->last_fenced_seqno = 0;
   1934 	obj->fenced_gpu_access = false;
   1935 
   1936 	obj->active = 0;
   1937 	drm_gem_object_unreference(&obj->base);
   1938 
   1939 	WARN_ON(i915_verify_lists(dev));
   1940 }
   1941 
   1942 static int
   1943 i915_gem_handle_seqno_wrap(struct drm_device *dev)
   1944 {
   1945 	struct drm_i915_private *dev_priv = dev->dev_private;
   1946 	struct intel_ring_buffer *ring;
   1947 	int ret, i, j;
   1948 
   1949 	/* The hardware uses various monotonic 32-bit counters, if we
   1950 	 * detect that they will wraparound we need to idle the GPU
   1951 	 * and reset those counters.
   1952 	 */
   1953 	ret = 0;
   1954 	for_each_ring(ring, dev_priv, i) {
   1955 		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
   1956 			ret |= ring->sync_seqno[j] != 0;
   1957 	}
   1958 	if (ret == 0)
   1959 		return ret;
   1960 
   1961 	ret = i915_gpu_idle(dev);
   1962 	if (ret)
   1963 		return ret;
   1964 
   1965 	i915_gem_retire_requests(dev);
   1966 	for_each_ring(ring, dev_priv, i) {
   1967 		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
   1968 			ring->sync_seqno[j] = 0;
   1969 	}
   1970 
   1971 	return 0;
   1972 }
   1973 
   1974 int
   1975 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
   1976 {
   1977 	struct drm_i915_private *dev_priv = dev->dev_private;
   1978 
   1979 	/* reserve 0 for non-seqno */
   1980 	if (dev_priv->next_seqno == 0) {
   1981 		int ret = i915_gem_handle_seqno_wrap(dev);
   1982 		if (ret)
   1983 			return ret;
   1984 
   1985 		dev_priv->next_seqno = 1;
   1986 	}
   1987 
   1988 	*seqno = dev_priv->next_seqno++;
   1989 	return 0;
   1990 }
   1991 
   1992 int
   1993 i915_add_request(struct intel_ring_buffer *ring,
   1994 		 struct drm_file *file,
   1995 		 u32 *out_seqno)
   1996 {
   1997 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
   1998 	struct drm_i915_gem_request *request;
   1999 	u32 request_ring_position;
   2000 	int was_empty;
   2001 	int ret;
   2002 
   2003 	/*
   2004 	 * Emit any outstanding flushes - execbuf can fail to emit the flush
   2005 	 * after having emitted the batchbuffer command. Hence we need to fix
   2006 	 * things up similar to emitting the lazy request. The difference here
   2007 	 * is that the flush _must_ happen before the next request, no matter
   2008 	 * what.
   2009 	 */
   2010 	ret = intel_ring_flush_all_caches(ring);
   2011 	if (ret)
   2012 		return ret;
   2013 
   2014 	request = kmalloc(sizeof(*request), GFP_KERNEL);
   2015 	if (request == NULL)
   2016 		return -ENOMEM;
   2017 
   2018 
   2019 	/* Record the position of the start of the request so that
   2020 	 * should we detect the updated seqno part-way through the
   2021 	 * GPU processing the request, we never over-estimate the
   2022 	 * position of the head.
   2023 	 */
   2024 	request_ring_position = intel_ring_get_tail(ring);
   2025 
   2026 	ret = ring->add_request(ring);
   2027 	if (ret) {
   2028 		kfree(request);
   2029 		return ret;
   2030 	}
   2031 
   2032 	request->seqno = intel_ring_get_seqno(ring);
   2033 	request->ring = ring;
   2034 	request->tail = request_ring_position;
   2035 	request->emitted_jiffies = jiffies;
   2036 	was_empty = list_empty(&ring->request_list);
   2037 	list_add_tail(&request->list, &ring->request_list);
   2038 	request->file_priv = NULL;
   2039 
   2040 	if (file) {
   2041 		struct drm_i915_file_private *file_priv = file->driver_priv;
   2042 
   2043 		spin_lock(&file_priv->mm.lock);
   2044 		request->file_priv = file_priv;
   2045 		list_add_tail(&request->client_list,
   2046 			      &file_priv->mm.request_list);
   2047 		spin_unlock(&file_priv->mm.lock);
   2048 	}
   2049 
   2050 	trace_i915_gem_request_add(ring, request->seqno);
   2051 	ring->outstanding_lazy_request = 0;
   2052 
   2053 	if (!dev_priv->mm.suspended) {
   2054 		if (i915_enable_hangcheck) {
   2055 			mod_timer(&dev_priv->hangcheck_timer,
   2056 				  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
   2057 		}
   2058 		if (was_empty) {
   2059 			queue_delayed_work(dev_priv->wq,
   2060 					   &dev_priv->mm.retire_work,
   2061 					   round_jiffies_up_relative(HZ));
   2062 			intel_mark_busy(dev_priv->dev);
   2063 		}
   2064 	}
   2065 
   2066 	if (out_seqno)
   2067 		*out_seqno = request->seqno;
   2068 	return 0;
   2069 }
   2070 
   2071 static inline void
   2072 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
   2073 {
   2074 	struct drm_i915_file_private *file_priv = request->file_priv;
   2075 
   2076 	if (!file_priv)
   2077 		return;
   2078 
   2079 	spin_lock(&file_priv->mm.lock);
   2080 	if (request->file_priv) {
   2081 		list_del(&request->client_list);
   2082 		request->file_priv = NULL;
   2083 	}
   2084 	spin_unlock(&file_priv->mm.lock);
   2085 }
   2086 
   2087 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
   2088 				      struct intel_ring_buffer *ring)
   2089 {
   2090 	while (!list_empty(&ring->request_list)) {
   2091 		struct drm_i915_gem_request *request;
   2092 
   2093 		request = list_first_entry(&ring->request_list,
   2094 					   struct drm_i915_gem_request,
   2095 					   list);
   2096 
   2097 		list_del(&request->list);
   2098 		i915_gem_request_remove_from_client(request);
   2099 		kfree(request);
   2100 	}
   2101 
   2102 	while (!list_empty(&ring->active_list)) {
   2103 		struct drm_i915_gem_object *obj;
   2104 
   2105 		obj = list_first_entry(&ring->active_list,
   2106 				       struct drm_i915_gem_object,
   2107 				       ring_list);
   2108 
   2109 		i915_gem_object_move_to_inactive(obj);
   2110 	}
   2111 }
   2112 
   2113 static void i915_gem_reset_fences(struct drm_device *dev)
   2114 {
   2115 	struct drm_i915_private *dev_priv = dev->dev_private;
   2116 	int i;
   2117 
   2118 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
   2119 		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
   2120 
   2121 		i915_gem_write_fence(dev, i, NULL);
   2122 
   2123 		if (reg->obj)
   2124 			i915_gem_object_fence_lost(reg->obj);
   2125 
   2126 		reg->pin_count = 0;
   2127 		reg->obj = NULL;
   2128 		INIT_LIST_HEAD(&reg->lru_list);
   2129 	}
   2130 
   2131 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
   2132 }
   2133 
   2134 void i915_gem_reset(struct drm_device *dev)
   2135 {
   2136 	struct drm_i915_private *dev_priv = dev->dev_private;
   2137 	struct drm_i915_gem_object *obj;
   2138 	struct intel_ring_buffer *ring;
   2139 	int i;
   2140 
   2141 	for_each_ring(ring, dev_priv, i)
   2142 		i915_gem_reset_ring_lists(dev_priv, ring);
   2143 
   2144 	/* Move everything out of the GPU domains to ensure we do any
   2145 	 * necessary invalidation upon reuse.
   2146 	 */
   2147 	list_for_each_entry(obj,
   2148 			    &dev_priv->mm.inactive_list,
   2149 			    mm_list)
   2150 	{
   2151 		obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
   2152 	}
   2153 
   2154 	/* The fence registers are invalidated so clear them out */
   2155 	i915_gem_reset_fences(dev);
   2156 }
   2157 
   2158 /**
   2159  * This function clears the request list as sequence numbers are passed.
   2160  */
   2161 void
   2162 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
   2163 {
   2164 	uint32_t seqno;
   2165 
   2166 	if (list_empty(&ring->request_list))
   2167 		return;
   2168 
   2169 	WARN_ON(i915_verify_lists(ring->dev));
   2170 
   2171 	seqno = ring->get_seqno(ring, true);
   2172 
   2173 	while (!list_empty(&ring->request_list)) {
   2174 		struct drm_i915_gem_request *request;
   2175 
   2176 		request = list_first_entry(&ring->request_list,
   2177 					   struct drm_i915_gem_request,
   2178 					   list);
   2179 
   2180 		if (!i915_seqno_passed(seqno, request->seqno))
   2181 			break;
   2182 
   2183 		trace_i915_gem_request_retire(ring, request->seqno);
   2184 		/* We know the GPU must have read the request to have
   2185 		 * sent us the seqno + interrupt, so use the position
   2186 		 * of tail of the request to update the last known position
   2187 		 * of the GPU head.
   2188 		 */
   2189 		ring->last_retired_head = request->tail;
   2190 
   2191 		list_del(&request->list);
   2192 		i915_gem_request_remove_from_client(request);
   2193 		kfree(request);
   2194 	}
   2195 
   2196 	/* Move any buffers on the active list that are no longer referenced
   2197 	 * by the ringbuffer to the flushing/inactive lists as appropriate.
   2198 	 */
   2199 	while (!list_empty(&ring->active_list)) {
   2200 		struct drm_i915_gem_object *obj;
   2201 
   2202 		obj = list_first_entry(&ring->active_list,
   2203 				      struct drm_i915_gem_object,
   2204 				      ring_list);
   2205 
   2206 		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
   2207 			break;
   2208 
   2209 		i915_gem_object_move_to_inactive(obj);
   2210 	}
   2211 
   2212 	if (unlikely(ring->trace_irq_seqno &&
   2213 		     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
   2214 		ring->irq_put(ring);
   2215 		ring->trace_irq_seqno = 0;
   2216 	}
   2217 
   2218 	WARN_ON(i915_verify_lists(ring->dev));
   2219 }
   2220 
   2221 void
   2222 i915_gem_retire_requests(struct drm_device *dev)
   2223 {
   2224 	drm_i915_private_t *dev_priv = dev->dev_private;
   2225 	struct intel_ring_buffer *ring;
   2226 	int i;
   2227 
   2228 	for_each_ring(ring, dev_priv, i)
   2229 		i915_gem_retire_requests_ring(ring);
   2230 }
   2231 
   2232 static void
   2233 i915_gem_retire_work_handler(struct work_struct *work)
   2234 {
   2235 	drm_i915_private_t *dev_priv;
   2236 	struct drm_device *dev;
   2237 	struct intel_ring_buffer *ring;
   2238 	bool idle;
   2239 	int i;
   2240 
   2241 	dev_priv = container_of(work, drm_i915_private_t,
   2242 				mm.retire_work.work);
   2243 	dev = dev_priv->dev;
   2244 
   2245 	/* Come back later if the device is busy... */
   2246 	if (!mutex_trylock(&dev->struct_mutex)) {
   2247 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
   2248 				   round_jiffies_up_relative(HZ));
   2249 		return;
   2250 	}
   2251 
   2252 	i915_gem_retire_requests(dev);
   2253 
   2254 	/* Send a periodic flush down the ring so we don't hold onto GEM
   2255 	 * objects indefinitely.
   2256 	 */
   2257 	idle = true;
   2258 	for_each_ring(ring, dev_priv, i) {
   2259 		if (ring->gpu_caches_dirty)
   2260 			i915_add_request(ring, NULL, NULL);
   2261 
   2262 		idle &= list_empty(&ring->request_list);
   2263 	}
   2264 
   2265 	if (!dev_priv->mm.suspended && !idle)
   2266 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
   2267 				   round_jiffies_up_relative(HZ));
   2268 	if (idle)
   2269 		intel_mark_idle(dev);
   2270 
   2271 	mutex_unlock(&dev->struct_mutex);
   2272 }
   2273 
   2274 /**
   2275  * Ensures that an object will eventually get non-busy by flushing any required
   2276  * write domains, emitting any outstanding lazy request and retiring and
   2277  * completed requests.
   2278  */
   2279 static int
   2280 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
   2281 {
   2282 	int ret;
   2283 
   2284 	if (obj->active) {
   2285 		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
   2286 		if (ret)
   2287 			return ret;
   2288 
   2289 		i915_gem_retire_requests_ring(obj->ring);
   2290 	}
   2291 
   2292 	return 0;
   2293 }
   2294 
   2295 /**
   2296  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
   2297  * @DRM_IOCTL_ARGS: standard ioctl arguments
   2298  *
   2299  * Returns 0 if successful, else an error is returned with the remaining time in
   2300  * the timeout parameter.
   2301  *  -ETIME: object is still busy after timeout
   2302  *  -ERESTARTSYS: signal interrupted the wait
   2303  *  -ENONENT: object doesn't exist
   2304  * Also possible, but rare:
   2305  *  -EAGAIN: GPU wedged
   2306  *  -ENOMEM: damn
   2307  *  -ENODEV: Internal IRQ fail
   2308  *  -E?: The add request failed
   2309  *
   2310  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
   2311  * non-zero timeout parameter the wait ioctl will wait for the given number of
   2312  * nanoseconds on an object becoming unbusy. Since the wait itself does so
   2313  * without holding struct_mutex the object may become re-busied before this
   2314  * function completes. A similar but shorter * race condition exists in the busy
   2315  * ioctl
   2316  */
   2317 int
   2318 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
   2319 {
   2320 	struct drm_i915_gem_wait *args = data;
   2321 	struct drm_i915_gem_object *obj;
   2322 	struct intel_ring_buffer *ring = NULL;
   2323 	struct timespec timeout_stack, *timeout = NULL;
   2324 	u32 seqno = 0;
   2325 	int ret = 0;
   2326 
   2327 	if (args->timeout_ns >= 0) {
   2328 		timeout_stack = ns_to_timespec(args->timeout_ns);
   2329 		timeout = &timeout_stack;
   2330 	}
   2331 
   2332 	ret = i915_mutex_lock_interruptible(dev);
   2333 	if (ret)
   2334 		return ret;
   2335 
   2336 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
   2337 	if (&obj->base == NULL) {
   2338 		mutex_unlock(&dev->struct_mutex);
   2339 		return -ENOENT;
   2340 	}
   2341 
   2342 	/* Need to make sure the object gets inactive eventually. */
   2343 	ret = i915_gem_object_flush_active(obj);
   2344 	if (ret)
   2345 		goto out;
   2346 
   2347 	if (obj->active) {
   2348 		seqno = obj->last_read_seqno;
   2349 		ring = obj->ring;
   2350 	}
   2351 
   2352 	if (seqno == 0)
   2353 		 goto out;
   2354 
   2355 	/* Do this after OLR check to make sure we make forward progress polling
   2356 	 * on this IOCTL with a 0 timeout (like busy ioctl)
   2357 	 */
   2358 	if (!args->timeout_ns) {
   2359 		ret = -ETIME;
   2360 		goto out;
   2361 	}
   2362 
   2363 	drm_gem_object_unreference(&obj->base);
   2364 	mutex_unlock(&dev->struct_mutex);
   2365 
   2366 	ret = __wait_seqno(ring, seqno, true, timeout);
   2367 	if (timeout) {
   2368 		WARN_ON(!timespec_valid(timeout));
   2369 		args->timeout_ns = timespec_to_ns(timeout);
   2370 	}
   2371 	return ret;
   2372 
   2373 out:
   2374 	drm_gem_object_unreference(&obj->base);
   2375 	mutex_unlock(&dev->struct_mutex);
   2376 	return ret;
   2377 }
   2378 
   2379 /**
   2380  * i915_gem_object_sync - sync an object to a ring.
   2381  *
   2382  * @obj: object which may be in use on another ring.
   2383  * @to: ring we wish to use the object on. May be NULL.
   2384  *
   2385  * This code is meant to abstract object synchronization with the GPU.
   2386  * Calling with NULL implies synchronizing the object with the CPU
   2387  * rather than a particular GPU ring.
   2388  *
   2389  * Returns 0 if successful, else propagates up the lower layer error.
   2390  */
   2391 int
   2392 i915_gem_object_sync(struct drm_i915_gem_object *obj,
   2393 		     struct intel_ring_buffer *to)
   2394 {
   2395 	struct intel_ring_buffer *from = obj->ring;
   2396 	u32 seqno;
   2397 	int ret, idx;
   2398 
   2399 	if (from == NULL || to == from)
   2400 		return 0;
   2401 
   2402 	if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
   2403 		return i915_gem_object_wait_rendering(obj, false);
   2404 
   2405 	idx = intel_ring_sync_index(from, to);
   2406 
   2407 	seqno = obj->last_read_seqno;
   2408 	if (seqno <= from->sync_seqno[idx])
   2409 		return 0;
   2410 
   2411 	ret = i915_gem_check_olr(obj->ring, seqno);
   2412 	if (ret)
   2413 		return ret;
   2414 
   2415 	ret = to->sync_to(to, from, seqno);
   2416 	if (!ret)
   2417 		/* We use last_read_seqno because sync_to()
   2418 		 * might have just caused seqno wrap under
   2419 		 * the radar.
   2420 		 */
   2421 		from->sync_seqno[idx] = obj->last_read_seqno;
   2422 
   2423 	return ret;
   2424 }
   2425 
   2426 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
   2427 {
   2428 	u32 old_write_domain, old_read_domains;
   2429 
   2430 	/* Act a barrier for all accesses through the GTT */
   2431 	mb();
   2432 
   2433 	/* Force a pagefault for domain tracking on next user access */
   2434 	i915_gem_release_mmap(obj);
   2435 
   2436 	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
   2437 		return;
   2438 
   2439 	old_read_domains = obj->base.read_domains;
   2440 	old_write_domain = obj->base.write_domain;
   2441 
   2442 	obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
   2443 	obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
   2444 
   2445 	trace_i915_gem_object_change_domain(obj,
   2446 					    old_read_domains,
   2447 					    old_write_domain);
   2448 }
   2449 
   2450 /**
   2451  * Unbinds an object from the GTT aperture.
   2452  */
   2453 int
   2454 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
   2455 {
   2456 	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
   2457 	int ret = 0;
   2458 
   2459 	if (obj->gtt_space == NULL)
   2460 		return 0;
   2461 
   2462 	if (obj->pin_count)
   2463 		return -EBUSY;
   2464 
   2465 	BUG_ON(obj->pages == NULL);
   2466 
   2467 	ret = i915_gem_object_finish_gpu(obj);
   2468 	if (ret)
   2469 		return ret;
   2470 	/* Continue on if we fail due to EIO, the GPU is hung so we
   2471 	 * should be safe and we need to cleanup or else we might
   2472 	 * cause memory corruption through use-after-free.
   2473 	 */
   2474 
   2475 	i915_gem_object_finish_gtt(obj);
   2476 
   2477 	/* release the fence reg _after_ flushing */
   2478 	ret = i915_gem_object_put_fence(obj);
   2479 	if (ret)
   2480 		return ret;
   2481 
   2482 	trace_i915_gem_object_unbind(obj);
   2483 
   2484 	if (obj->has_global_gtt_mapping)
   2485 		i915_gem_gtt_unbind_object(obj);
   2486 	if (obj->has_aliasing_ppgtt_mapping) {
   2487 		i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
   2488 		obj->has_aliasing_ppgtt_mapping = 0;
   2489 	}
   2490 	i915_gem_gtt_finish_object(obj);
   2491 
   2492 	list_del(&obj->mm_list);
   2493 	list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
   2494 	/* Avoid an unnecessary call to unbind on rebind. */
   2495 	obj->map_and_fenceable = true;
   2496 
   2497 	drm_mm_put_block(obj->gtt_space);
   2498 	obj->gtt_space = NULL;
   2499 	obj->gtt_offset = 0;
   2500 
   2501 	return 0;
   2502 }
   2503 
   2504 int i915_gpu_idle(struct drm_device *dev)
   2505 {
   2506 	drm_i915_private_t *dev_priv = dev->dev_private;
   2507 	struct intel_ring_buffer *ring;
   2508 	int ret, i;
   2509 
   2510 	/* Flush everything onto the inactive list. */
   2511 	for_each_ring(ring, dev_priv, i) {
   2512 		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
   2513 		if (ret)
   2514 			return ret;
   2515 
   2516 		ret = intel_ring_idle(ring);
   2517 		if (ret)
   2518 			return ret;
   2519 	}
   2520 
   2521 	return 0;
   2522 }
   2523 
   2524 static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
   2525 					struct drm_i915_gem_object *obj)
   2526 {
   2527 	drm_i915_private_t *dev_priv = dev->dev_private;
   2528 	uint64_t val;
   2529 
   2530 	if (obj) {
   2531 		u32 size = obj->gtt_space->size;
   2532 
   2533 		val = (uint64_t)((obj->gtt_offset + size - 4096) &
   2534 				 0xfffff000) << 32;
   2535 		val |= obj->gtt_offset & 0xfffff000;
   2536 		val |= (uint64_t)((obj->stride / 128) - 1) <<
   2537 			SANDYBRIDGE_FENCE_PITCH_SHIFT;
   2538 
   2539 		if (obj->tiling_mode == I915_TILING_Y)
   2540 			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
   2541 		val |= I965_FENCE_REG_VALID;
   2542 	} else
   2543 		val = 0;
   2544 
   2545 	I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
   2546 	POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
   2547 }
   2548 
   2549 static void i965_write_fence_reg(struct drm_device *dev, int reg,
   2550 				 struct drm_i915_gem_object *obj)
   2551 {
   2552 	drm_i915_private_t *dev_priv = dev->dev_private;
   2553 	uint64_t val;
   2554 
   2555 	if (obj) {
   2556 		u32 size = obj->gtt_space->size;
   2557 
   2558 		val = (uint64_t)((obj->gtt_offset + size - 4096) &
   2559 				 0xfffff000) << 32;
   2560 		val |= obj->gtt_offset & 0xfffff000;
   2561 		val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
   2562 		if (obj->tiling_mode == I915_TILING_Y)
   2563 			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
   2564 		val |= I965_FENCE_REG_VALID;
   2565 	} else
   2566 		val = 0;
   2567 
   2568 	I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
   2569 	POSTING_READ(FENCE_REG_965_0 + reg * 8);
   2570 }
   2571 
   2572 static void i915_write_fence_reg(struct drm_device *dev, int reg,
   2573 				 struct drm_i915_gem_object *obj)
   2574 {
   2575 	drm_i915_private_t *dev_priv = dev->dev_private;
   2576 	u32 val;
   2577 
   2578 	if (obj) {
   2579 		u32 size = obj->gtt_space->size;
   2580 		int pitch_val;
   2581 		int tile_width;
   2582 
   2583 		WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
   2584 		     (size & -size) != size ||
   2585 		     (obj->gtt_offset & (size - 1)),
   2586 		     "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
   2587 		     obj->gtt_offset, obj->map_and_fenceable, size);
   2588 
   2589 		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
   2590 			tile_width = 128;
   2591 		else
   2592 			tile_width = 512;
   2593 
   2594 		/* Note: pitch better be a power of two tile widths */
   2595 		pitch_val = obj->stride / tile_width;
   2596 		pitch_val = ffs(pitch_val) - 1;
   2597 
   2598 		val = obj->gtt_offset;
   2599 		if (obj->tiling_mode == I915_TILING_Y)
   2600 			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
   2601 		val |= I915_FENCE_SIZE_BITS(size);
   2602 		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
   2603 		val |= I830_FENCE_REG_VALID;
   2604 	} else
   2605 		val = 0;
   2606 
   2607 	if (reg < 8)
   2608 		reg = FENCE_REG_830_0 + reg * 4;
   2609 	else
   2610 		reg = FENCE_REG_945_8 + (reg - 8) * 4;
   2611 
   2612 	I915_WRITE(reg, val);
   2613 	POSTING_READ(reg);
   2614 }
   2615 
   2616 static void i830_write_fence_reg(struct drm_device *dev, int reg,
   2617 				struct drm_i915_gem_object *obj)
   2618 {
   2619 	drm_i915_private_t *dev_priv = dev->dev_private;
   2620 	uint32_t val;
   2621 
   2622 	if (obj) {
   2623 		u32 size = obj->gtt_space->size;
   2624 		uint32_t pitch_val;
   2625 
   2626 		WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
   2627 		     (size & -size) != size ||
   2628 		     (obj->gtt_offset & (size - 1)),
   2629 		     "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
   2630 		     obj->gtt_offset, size);
   2631 
   2632 		pitch_val = obj->stride / 128;
   2633 		pitch_val = ffs(pitch_val) - 1;
   2634 
   2635 		val = obj->gtt_offset;
   2636 		if (obj->tiling_mode == I915_TILING_Y)
   2637 			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
   2638 		val |= I830_FENCE_SIZE_BITS(size);
   2639 		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
   2640 		val |= I830_FENCE_REG_VALID;
   2641 	} else
   2642 		val = 0;
   2643 
   2644 	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
   2645 	POSTING_READ(FENCE_REG_830_0 + reg * 4);
   2646 }
   2647 
   2648 static void i915_gem_write_fence(struct drm_device *dev, int reg,
   2649 				 struct drm_i915_gem_object *obj)
   2650 {
   2651 	switch (INTEL_INFO(dev)->gen) {
   2652 	case 7:
   2653 	case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
   2654 	case 5:
   2655 	case 4: i965_write_fence_reg(dev, reg, obj); break;
   2656 	case 3: i915_write_fence_reg(dev, reg, obj); break;
   2657 	case 2: i830_write_fence_reg(dev, reg, obj); break;
   2658 	default: break;
   2659 	}
   2660 }
   2661 
   2662 static inline int fence_number(struct drm_i915_private *dev_priv,
   2663 			       struct drm_i915_fence_reg *fence)
   2664 {
   2665 	return fence - dev_priv->fence_regs;
   2666 }
   2667 
   2668 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
   2669 					 struct drm_i915_fence_reg *fence,
   2670 					 bool enable)
   2671 {
   2672 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   2673 	int reg = fence_number(dev_priv, fence);
   2674 
   2675 	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
   2676 
   2677 	if (enable) {
   2678 		obj->fence_reg = reg;
   2679 		fence->obj = obj;
   2680 		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
   2681 	} else {
   2682 		obj->fence_reg = I915_FENCE_REG_NONE;
   2683 		fence->obj = NULL;
   2684 		list_del_init(&fence->lru_list);
   2685 	}
   2686 }
   2687 
   2688 static int
   2689 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
   2690 {
   2691 	if (obj->last_fenced_seqno) {
   2692 		int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
   2693 		if (ret)
   2694 			return ret;
   2695 
   2696 		obj->last_fenced_seqno = 0;
   2697 	}
   2698 
   2699 	/* Ensure that all CPU reads are completed before installing a fence
   2700 	 * and all writes before removing the fence.
   2701 	 */
   2702 	if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
   2703 		mb();
   2704 
   2705 	obj->fenced_gpu_access = false;
   2706 	return 0;
   2707 }
   2708 
   2709 int
   2710 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
   2711 {
   2712 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   2713 	int ret;
   2714 
   2715 	ret = i915_gem_object_flush_fence(obj);
   2716 	if (ret)
   2717 		return ret;
   2718 
   2719 	if (obj->fence_reg == I915_FENCE_REG_NONE)
   2720 		return 0;
   2721 
   2722 	i915_gem_object_update_fence(obj,
   2723 				     &dev_priv->fence_regs[obj->fence_reg],
   2724 				     false);
   2725 	i915_gem_object_fence_lost(obj);
   2726 
   2727 	return 0;
   2728 }
   2729 
   2730 static struct drm_i915_fence_reg *
   2731 i915_find_fence_reg(struct drm_device *dev)
   2732 {
   2733 	struct drm_i915_private *dev_priv = dev->dev_private;
   2734 	struct drm_i915_fence_reg *reg, *avail;
   2735 	int i;
   2736 
   2737 	/* First try to find a free reg */
   2738 	avail = NULL;
   2739 	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
   2740 		reg = &dev_priv->fence_regs[i];
   2741 		if (!reg->obj)
   2742 			return reg;
   2743 
   2744 		if (!reg->pin_count)
   2745 			avail = reg;
   2746 	}
   2747 
   2748 	if (avail == NULL)
   2749 		return NULL;
   2750 
   2751 	/* None available, try to steal one or wait for a user to finish */
   2752 	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
   2753 		if (reg->pin_count)
   2754 			continue;
   2755 
   2756 		return reg;
   2757 	}
   2758 
   2759 	return NULL;
   2760 }
   2761 
   2762 /**
   2763  * i915_gem_object_get_fence - set up fencing for an object
   2764  * @obj: object to map through a fence reg
   2765  *
   2766  * When mapping objects through the GTT, userspace wants to be able to write
   2767  * to them without having to worry about swizzling if the object is tiled.
   2768  * This function walks the fence regs looking for a free one for @obj,
   2769  * stealing one if it can't find any.
   2770  *
   2771  * It then sets up the reg based on the object's properties: address, pitch
   2772  * and tiling format.
   2773  *
   2774  * For an untiled surface, this removes any existing fence.
   2775  */
   2776 int
   2777 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
   2778 {
   2779 	struct drm_device *dev = obj->base.dev;
   2780 	struct drm_i915_private *dev_priv = dev->dev_private;
   2781 	bool enable = obj->tiling_mode != I915_TILING_NONE;
   2782 	struct drm_i915_fence_reg *reg;
   2783 	int ret;
   2784 
   2785 	/* Have we updated the tiling parameters upon the object and so
   2786 	 * will need to serialise the write to the associated fence register?
   2787 	 */
   2788 	if (obj->fence_dirty) {
   2789 		ret = i915_gem_object_flush_fence(obj);
   2790 		if (ret)
   2791 			return ret;
   2792 	}
   2793 
   2794 	/* Just update our place in the LRU if our fence is getting reused. */
   2795 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
   2796 		reg = &dev_priv->fence_regs[obj->fence_reg];
   2797 		if (!obj->fence_dirty) {
   2798 			list_move_tail(&reg->lru_list,
   2799 				       &dev_priv->mm.fence_list);
   2800 			return 0;
   2801 		}
   2802 	} else if (enable) {
   2803 		reg = i915_find_fence_reg(dev);
   2804 		if (reg == NULL)
   2805 			return -EDEADLK;
   2806 
   2807 		if (reg->obj) {
   2808 			struct drm_i915_gem_object *old = reg->obj;
   2809 
   2810 			ret = i915_gem_object_flush_fence(old);
   2811 			if (ret)
   2812 				return ret;
   2813 
   2814 			i915_gem_object_fence_lost(old);
   2815 		}
   2816 	} else
   2817 		return 0;
   2818 
   2819 	i915_gem_object_update_fence(obj, reg, enable);
   2820 	obj->fence_dirty = false;
   2821 
   2822 	return 0;
   2823 }
   2824 
   2825 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
   2826 				     struct drm_mm_node *gtt_space,
   2827 				     unsigned long cache_level)
   2828 {
   2829 	struct drm_mm_node *other;
   2830 
   2831 	/* On non-LLC machines we have to be careful when putting differing
   2832 	 * types of snoopable memory together to avoid the prefetcher
   2833 	 * crossing memory domains and dieing.
   2834 	 */
   2835 	if (HAS_LLC(dev))
   2836 		return true;
   2837 
   2838 	if (gtt_space == NULL)
   2839 		return true;
   2840 
   2841 	if (list_empty(&gtt_space->node_list))
   2842 		return true;
   2843 
   2844 	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
   2845 	if (other->allocated && !other->hole_follows && other->color != cache_level)
   2846 		return false;
   2847 
   2848 	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
   2849 	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
   2850 		return false;
   2851 
   2852 	return true;
   2853 }
   2854 
   2855 static void i915_gem_verify_gtt(struct drm_device *dev)
   2856 {
   2857 #if WATCH_GTT
   2858 	struct drm_i915_private *dev_priv = dev->dev_private;
   2859 	struct drm_i915_gem_object *obj;
   2860 	int err = 0;
   2861 
   2862 	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
   2863 		if (obj->gtt_space == NULL) {
   2864 			printk(KERN_ERR "object found on GTT list with no space reserved\n");
   2865 			err++;
   2866 			continue;
   2867 		}
   2868 
   2869 		if (obj->cache_level != obj->gtt_space->color) {
   2870 			printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
   2871 			       obj->gtt_space->start,
   2872 			       obj->gtt_space->start + obj->gtt_space->size,
   2873 			       obj->cache_level,
   2874 			       obj->gtt_space->color);
   2875 			err++;
   2876 			continue;
   2877 		}
   2878 
   2879 		if (!i915_gem_valid_gtt_space(dev,
   2880 					      obj->gtt_space,
   2881 					      obj->cache_level)) {
   2882 			printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
   2883 			       obj->gtt_space->start,
   2884 			       obj->gtt_space->start + obj->gtt_space->size,
   2885 			       obj->cache_level);
   2886 			err++;
   2887 			continue;
   2888 		}
   2889 	}
   2890 
   2891 	WARN_ON(err);
   2892 #endif
   2893 }
   2894 
   2895 /**
   2896  * Finds free space in the GTT aperture and binds the object there.
   2897  */
   2898 static int
   2899 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
   2900 			    unsigned alignment,
   2901 			    bool map_and_fenceable,
   2902 			    bool nonblocking)
   2903 {
   2904 	struct drm_device *dev = obj->base.dev;
   2905 	drm_i915_private_t *dev_priv = dev->dev_private;
   2906 	struct drm_mm_node *node;
   2907 	u32 size, fence_size, fence_alignment, unfenced_alignment;
   2908 	bool mappable, fenceable;
   2909 	int ret;
   2910 
   2911 	if (obj->madv != I915_MADV_WILLNEED) {
   2912 		DRM_ERROR("Attempting to bind a purgeable object\n");
   2913 		return -EINVAL;
   2914 	}
   2915 
   2916 	fence_size = i915_gem_get_gtt_size(dev,
   2917 					   obj->base.size,
   2918 					   obj->tiling_mode);
   2919 	fence_alignment = i915_gem_get_gtt_alignment(dev,
   2920 						     obj->base.size,
   2921 						     obj->tiling_mode);
   2922 	unfenced_alignment =
   2923 		i915_gem_get_unfenced_gtt_alignment(dev,
   2924 						    obj->base.size,
   2925 						    obj->tiling_mode);
   2926 
   2927 	if (alignment == 0)
   2928 		alignment = map_and_fenceable ? fence_alignment :
   2929 						unfenced_alignment;
   2930 	if (map_and_fenceable && alignment & (fence_alignment - 1)) {
   2931 		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
   2932 		return -EINVAL;
   2933 	}
   2934 
   2935 	size = map_and_fenceable ? fence_size : obj->base.size;
   2936 
   2937 	/* If the object is bigger than the entire aperture, reject it early
   2938 	 * before evicting everything in a vain attempt to find space.
   2939 	 */
   2940 	if (obj->base.size >
   2941 	    (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
   2942 		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
   2943 		return -E2BIG;
   2944 	}
   2945 
   2946 	ret = i915_gem_object_get_pages(obj);
   2947 	if (ret)
   2948 		return ret;
   2949 
   2950 	i915_gem_object_pin_pages(obj);
   2951 
   2952 	node = kzalloc(sizeof(*node), GFP_KERNEL);
   2953 	if (node == NULL) {
   2954 		i915_gem_object_unpin_pages(obj);
   2955 		return -ENOMEM;
   2956 	}
   2957 
   2958  search_free:
   2959 	if (map_and_fenceable)
   2960 		ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
   2961 							  size, alignment, obj->cache_level,
   2962 							  0, dev_priv->mm.gtt_mappable_end);
   2963 	else
   2964 		ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
   2965 						 size, alignment, obj->cache_level);
   2966 	if (ret) {
   2967 		ret = i915_gem_evict_something(dev, size, alignment,
   2968 					       obj->cache_level,
   2969 					       map_and_fenceable,
   2970 					       nonblocking);
   2971 		if (ret == 0)
   2972 			goto search_free;
   2973 
   2974 		i915_gem_object_unpin_pages(obj);
   2975 		kfree(node);
   2976 		return ret;
   2977 	}
   2978 	if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
   2979 		i915_gem_object_unpin_pages(obj);
   2980 		drm_mm_put_block(node);
   2981 		return -EINVAL;
   2982 	}
   2983 
   2984 	ret = i915_gem_gtt_prepare_object(obj);
   2985 	if (ret) {
   2986 		i915_gem_object_unpin_pages(obj);
   2987 		drm_mm_put_block(node);
   2988 		return ret;
   2989 	}
   2990 
   2991 	list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
   2992 	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
   2993 
   2994 	obj->gtt_space = node;
   2995 	obj->gtt_offset = node->start;
   2996 
   2997 	fenceable =
   2998 		node->size == fence_size &&
   2999 		(node->start & (fence_alignment - 1)) == 0;
   3000 
   3001 	mappable =
   3002 		obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
   3003 
   3004 	obj->map_and_fenceable = mappable && fenceable;
   3005 
   3006 	i915_gem_object_unpin_pages(obj);
   3007 	trace_i915_gem_object_bind(obj, map_and_fenceable);
   3008 	i915_gem_verify_gtt(dev);
   3009 	return 0;
   3010 }
   3011 
   3012 void
   3013 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
   3014 {
   3015 	/* If we don't have a page list set up, then we're not pinned
   3016 	 * to GPU, and we can ignore the cache flush because it'll happen
   3017 	 * again at bind time.
   3018 	 */
   3019 	if (obj->pages == NULL)
   3020 		return;
   3021 
   3022 	/* If the GPU is snooping the contents of the CPU cache,
   3023 	 * we do not need to manually clear the CPU cache lines.  However,
   3024 	 * the caches are only snooped when the render cache is
   3025 	 * flushed/invalidated.  As we always have to emit invalidations
   3026 	 * and flushes when moving into and out of the RENDER domain, correct
   3027 	 * snooping behaviour occurs naturally as the result of our domain
   3028 	 * tracking.
   3029 	 */
   3030 	if (obj->cache_level != I915_CACHE_NONE)
   3031 		return;
   3032 
   3033 	trace_i915_gem_object_clflush(obj);
   3034 
   3035 	drm_clflush_sg(obj->pages);
   3036 }
   3037 
   3038 /** Flushes the GTT write domain for the object if it's dirty. */
   3039 static void
   3040 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
   3041 {
   3042 	uint32_t old_write_domain;
   3043 
   3044 	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
   3045 		return;
   3046 
   3047 	/* No actual flushing is required for the GTT write domain.  Writes
   3048 	 * to it immediately go to main memory as far as we know, so there's
   3049 	 * no chipset flush.  It also doesn't land in render cache.
   3050 	 *
   3051 	 * However, we do have to enforce the order so that all writes through
   3052 	 * the GTT land before any writes to the device, such as updates to
   3053 	 * the GATT itself.
   3054 	 */
   3055 	wmb();
   3056 
   3057 	old_write_domain = obj->base.write_domain;
   3058 	obj->base.write_domain = 0;
   3059 
   3060 	trace_i915_gem_object_change_domain(obj,
   3061 					    obj->base.read_domains,
   3062 					    old_write_domain);
   3063 }
   3064 
   3065 /** Flushes the CPU write domain for the object if it's dirty. */
   3066 static void
   3067 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
   3068 {
   3069 	uint32_t old_write_domain;
   3070 
   3071 	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
   3072 		return;
   3073 
   3074 	i915_gem_clflush_object(obj);
   3075 	i915_gem_chipset_flush(obj->base.dev);
   3076 	old_write_domain = obj->base.write_domain;
   3077 	obj->base.write_domain = 0;
   3078 
   3079 	trace_i915_gem_object_change_domain(obj,
   3080 					    obj->base.read_domains,
   3081 					    old_write_domain);
   3082 }
   3083 
   3084 /**
   3085  * Moves a single object to the GTT read, and possibly write domain.
   3086  *
   3087  * This function returns when the move is complete, including waiting on
   3088  * flushes to occur.
   3089  */
   3090 int
   3091 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
   3092 {
   3093 	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
   3094 	uint32_t old_write_domain, old_read_domains;
   3095 	int ret;
   3096 
   3097 	/* Not valid to be called on unbound objects. */
   3098 	if (obj->gtt_space == NULL)
   3099 		return -EINVAL;
   3100 
   3101 	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
   3102 		return 0;
   3103 
   3104 	ret = i915_gem_object_wait_rendering(obj, !write);
   3105 	if (ret)
   3106 		return ret;
   3107 
   3108 	i915_gem_object_flush_cpu_write_domain(obj);
   3109 
   3110 	old_write_domain = obj->base.write_domain;
   3111 	old_read_domains = obj->base.read_domains;
   3112 
   3113 	/* It should now be out of any other write domains, and we can update
   3114 	 * the domain values for our changes.
   3115 	 */
   3116 	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
   3117 	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
   3118 	if (write) {
   3119 		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
   3120 		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
   3121 		obj->dirty = 1;
   3122 	}
   3123 
   3124 	trace_i915_gem_object_change_domain(obj,
   3125 					    old_read_domains,
   3126 					    old_write_domain);
   3127 
   3128 	/* And bump the LRU for this access */
   3129 	if (i915_gem_object_is_inactive(obj))
   3130 		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
   3131 
   3132 	return 0;
   3133 }
   3134 
   3135 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
   3136 				    enum i915_cache_level cache_level)
   3137 {
   3138 	struct drm_device *dev = obj->base.dev;
   3139 	drm_i915_private_t *dev_priv = dev->dev_private;
   3140 	int ret;
   3141 
   3142 	if (obj->cache_level == cache_level)
   3143 		return 0;
   3144 
   3145 	if (obj->pin_count) {
   3146 		DRM_DEBUG("can not change the cache level of pinned objects\n");
   3147 		return -EBUSY;
   3148 	}
   3149 
   3150 	if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
   3151 		ret = i915_gem_object_unbind(obj);
   3152 		if (ret)
   3153 			return ret;
   3154 	}
   3155 
   3156 	if (obj->gtt_space) {
   3157 		ret = i915_gem_object_finish_gpu(obj);
   3158 		if (ret)
   3159 			return ret;
   3160 
   3161 		i915_gem_object_finish_gtt(obj);
   3162 
   3163 		/* Before SandyBridge, you could not use tiling or fence
   3164 		 * registers with snooped memory, so relinquish any fences
   3165 		 * currently pointing to our region in the aperture.
   3166 		 */
   3167 		if (INTEL_INFO(dev)->gen < 6) {
   3168 			ret = i915_gem_object_put_fence(obj);
   3169 			if (ret)
   3170 				return ret;
   3171 		}
   3172 
   3173 		if (obj->has_global_gtt_mapping)
   3174 			i915_gem_gtt_bind_object(obj, cache_level);
   3175 		if (obj->has_aliasing_ppgtt_mapping)
   3176 			i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
   3177 					       obj, cache_level);
   3178 
   3179 		obj->gtt_space->color = cache_level;
   3180 	}
   3181 
   3182 	if (cache_level == I915_CACHE_NONE) {
   3183 		u32 old_read_domains, old_write_domain;
   3184 
   3185 		/* If we're coming from LLC cached, then we haven't
   3186 		 * actually been tracking whether the data is in the
   3187 		 * CPU cache or not, since we only allow one bit set
   3188 		 * in obj->write_domain and have been skipping the clflushes.
   3189 		 * Just set it to the CPU cache for now.
   3190 		 */
   3191 		WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
   3192 		WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
   3193 
   3194 		old_read_domains = obj->base.read_domains;
   3195 		old_write_domain = obj->base.write_domain;
   3196 
   3197 		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
   3198 		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
   3199 
   3200 		trace_i915_gem_object_change_domain(obj,
   3201 						    old_read_domains,
   3202 						    old_write_domain);
   3203 	}
   3204 
   3205 	obj->cache_level = cache_level;
   3206 	i915_gem_verify_gtt(dev);
   3207 	return 0;
   3208 }
   3209 
   3210 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
   3211 			       struct drm_file *file)
   3212 {
   3213 	struct drm_i915_gem_caching *args = data;
   3214 	struct drm_i915_gem_object *obj;
   3215 	int ret;
   3216 
   3217 	ret = i915_mutex_lock_interruptible(dev);
   3218 	if (ret)
   3219 		return ret;
   3220 
   3221 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
   3222 	if (&obj->base == NULL) {
   3223 		ret = -ENOENT;
   3224 		goto unlock;
   3225 	}
   3226 
   3227 	args->caching = obj->cache_level != I915_CACHE_NONE;
   3228 
   3229 	drm_gem_object_unreference(&obj->base);
   3230 unlock:
   3231 	mutex_unlock(&dev->struct_mutex);
   3232 	return ret;
   3233 }
   3234 
   3235 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
   3236 			       struct drm_file *file)
   3237 {
   3238 	struct drm_i915_gem_caching *args = data;
   3239 	struct drm_i915_gem_object *obj;
   3240 	enum i915_cache_level level;
   3241 	int ret;
   3242 
   3243 	switch (args->caching) {
   3244 	case I915_CACHING_NONE:
   3245 		level = I915_CACHE_NONE;
   3246 		break;
   3247 	case I915_CACHING_CACHED:
   3248 		level = I915_CACHE_LLC;
   3249 		break;
   3250 	default:
   3251 		return -EINVAL;
   3252 	}
   3253 
   3254 	ret = i915_mutex_lock_interruptible(dev);
   3255 	if (ret)
   3256 		return ret;
   3257 
   3258 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
   3259 	if (&obj->base == NULL) {
   3260 		ret = -ENOENT;
   3261 		goto unlock;
   3262 	}
   3263 
   3264 	ret = i915_gem_object_set_cache_level(obj, level);
   3265 
   3266 	drm_gem_object_unreference(&obj->base);
   3267 unlock:
   3268 	mutex_unlock(&dev->struct_mutex);
   3269 	return ret;
   3270 }
   3271 
   3272 /*
   3273  * Prepare buffer for display plane (scanout, cursors, etc).
   3274  * Can be called from an uninterruptible phase (modesetting) and allows
   3275  * any flushes to be pipelined (for pageflips).
   3276  */
   3277 int
   3278 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
   3279 				     u32 alignment,
   3280 				     struct intel_ring_buffer *pipelined)
   3281 {
   3282 	u32 old_read_domains, old_write_domain;
   3283 	int ret;
   3284 
   3285 	if (pipelined != obj->ring) {
   3286 		ret = i915_gem_object_sync(obj, pipelined);
   3287 		if (ret)
   3288 			return ret;
   3289 	}
   3290 
   3291 	/* The display engine is not coherent with the LLC cache on gen6.  As
   3292 	 * a result, we make sure that the pinning that is about to occur is
   3293 	 * done with uncached PTEs. This is lowest common denominator for all
   3294 	 * chipsets.
   3295 	 *
   3296 	 * However for gen6+, we could do better by using the GFDT bit instead
   3297 	 * of uncaching, which would allow us to flush all the LLC-cached data
   3298 	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
   3299 	 */
   3300 	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
   3301 	if (ret)
   3302 		return ret;
   3303 
   3304 	/* As the user may map the buffer once pinned in the display plane
   3305 	 * (e.g. libkms for the bootup splash), we have to ensure that we
   3306 	 * always use map_and_fenceable for all scanout buffers.
   3307 	 */
   3308 	ret = i915_gem_object_pin(obj, alignment, true, false);
   3309 	if (ret)
   3310 		return ret;
   3311 
   3312 	i915_gem_object_flush_cpu_write_domain(obj);
   3313 
   3314 	old_write_domain = obj->base.write_domain;
   3315 	old_read_domains = obj->base.read_domains;
   3316 
   3317 	/* It should now be out of any other write domains, and we can update
   3318 	 * the domain values for our changes.
   3319 	 */
   3320 	obj->base.write_domain = 0;
   3321 	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
   3322 
   3323 	trace_i915_gem_object_change_domain(obj,
   3324 					    old_read_domains,
   3325 					    old_write_domain);
   3326 
   3327 	return 0;
   3328 }
   3329 
   3330 int
   3331 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
   3332 {
   3333 	int ret;
   3334 
   3335 	if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
   3336 		return 0;
   3337 
   3338 	ret = i915_gem_object_wait_rendering(obj, false);
   3339 	if (ret)
   3340 		return ret;
   3341 
   3342 	/* Ensure that we invalidate the GPU's caches and TLBs. */
   3343 	obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
   3344 	return 0;
   3345 }
   3346 
   3347 /**
   3348  * Moves a single object to the CPU read, and possibly write domain.
   3349  *
   3350  * This function returns when the move is complete, including waiting on
   3351  * flushes to occur.
   3352  */
   3353 int
   3354 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
   3355 {
   3356 	uint32_t old_write_domain, old_read_domains;
   3357 	int ret;
   3358 
   3359 	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
   3360 		return 0;
   3361 
   3362 	ret = i915_gem_object_wait_rendering(obj, !write);
   3363 	if (ret)
   3364 		return ret;
   3365 
   3366 	i915_gem_object_flush_gtt_write_domain(obj);
   3367 
   3368 	old_write_domain = obj->base.write_domain;
   3369 	old_read_domains = obj->base.read_domains;
   3370 
   3371 	/* Flush the CPU cache if it's still invalid. */
   3372 	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
   3373 		i915_gem_clflush_object(obj);
   3374 
   3375 		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
   3376 	}
   3377 
   3378 	/* It should now be out of any other write domains, and we can update
   3379 	 * the domain values for our changes.
   3380 	 */
   3381 	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
   3382 
   3383 	/* If we're writing through the CPU, then the GPU read domains will
   3384 	 * need to be invalidated at next use.
   3385 	 */
   3386 	if (write) {
   3387 		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
   3388 		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
   3389 	}
   3390 
   3391 	trace_i915_gem_object_change_domain(obj,
   3392 					    old_read_domains,
   3393 					    old_write_domain);
   3394 
   3395 	return 0;
   3396 }
   3397 
   3398 /* Throttle our rendering by waiting until the ring has completed our requests
   3399  * emitted over 20 msec ago.
   3400  *
   3401  * Note that if we were to use the current jiffies each time around the loop,
   3402  * we wouldn't escape the function with any frames outstanding if the time to
   3403  * render a frame was over 20ms.
   3404  *
   3405  * This should get us reasonable parallelism between CPU and GPU but also
   3406  * relatively low latency when blocking on a particular request to finish.
   3407  */
   3408 static int
   3409 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
   3410 {
   3411 	struct drm_i915_private *dev_priv = dev->dev_private;
   3412 	struct drm_i915_file_private *file_priv = file->driver_priv;
   3413 	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
   3414 	struct drm_i915_gem_request *request;
   3415 	struct intel_ring_buffer *ring = NULL;
   3416 	u32 seqno = 0;
   3417 	int ret;
   3418 
   3419 	if (atomic_read(&dev_priv->mm.wedged))
   3420 		return -EIO;
   3421 
   3422 	spin_lock(&file_priv->mm.lock);
   3423 	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
   3424 		if (time_after_eq(request->emitted_jiffies, recent_enough))
   3425 			break;
   3426 
   3427 		ring = request->ring;
   3428 		seqno = request->seqno;
   3429 	}
   3430 	spin_unlock(&file_priv->mm.lock);
   3431 
   3432 	if (seqno == 0)
   3433 		return 0;
   3434 
   3435 	ret = __wait_seqno(ring, seqno, true, NULL);
   3436 	if (ret == 0)
   3437 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
   3438 
   3439 	return ret;
   3440 }
   3441 
   3442 int
   3443 i915_gem_object_pin(struct drm_i915_gem_object *obj,
   3444 		    uint32_t alignment,
   3445 		    bool map_and_fenceable,
   3446 		    bool nonblocking)
   3447 {
   3448 	int ret;
   3449 
   3450 	if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
   3451 		return -EBUSY;
   3452 
   3453 	if (obj->gtt_space != NULL) {
   3454 		if ((alignment && obj->gtt_offset & (alignment - 1)) ||
   3455 		    (map_and_fenceable && !obj->map_and_fenceable)) {
   3456 			WARN(obj->pin_count,
   3457 			     "bo is already pinned with incorrect alignment:"
   3458 			     " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
   3459 			     " obj->map_and_fenceable=%d\n",
   3460 			     obj->gtt_offset, alignment,
   3461 			     map_and_fenceable,
   3462 			     obj->map_and_fenceable);
   3463 			ret = i915_gem_object_unbind(obj);
   3464 			if (ret)
   3465 				return ret;
   3466 		}
   3467 	}
   3468 
   3469 	if (obj->gtt_space == NULL) {
   3470 		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
   3471 
   3472 		ret = i915_gem_object_bind_to_gtt(obj, alignment,
   3473 						  map_and_fenceable,
   3474 						  nonblocking);
   3475 		if (ret)
   3476 			return ret;
   3477 
   3478 		if (!dev_priv->mm.aliasing_ppgtt)
   3479 			i915_gem_gtt_bind_object(obj, obj->cache_level);
   3480 	}
   3481 
   3482 	if (!obj->has_global_gtt_mapping && map_and_fenceable)
   3483 		i915_gem_gtt_bind_object(obj, obj->cache_level);
   3484 
   3485 	obj->pin_count++;
   3486 	obj->pin_mappable |= map_and_fenceable;
   3487 
   3488 	return 0;
   3489 }
   3490 
   3491 void
   3492 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
   3493 {
   3494 	BUG_ON(obj->pin_count == 0);
   3495 	BUG_ON(obj->gtt_space == NULL);
   3496 
   3497 	if (--obj->pin_count == 0)
   3498 		obj->pin_mappable = false;
   3499 }
   3500 
   3501 int
   3502 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
   3503 		   struct drm_file *file)
   3504 {
   3505 	struct drm_i915_gem_pin *args = data;
   3506 	struct drm_i915_gem_object *obj;
   3507 	int ret;
   3508 
   3509 	ret = i915_mutex_lock_interruptible(dev);
   3510 	if (ret)
   3511 		return ret;
   3512 
   3513 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
   3514 	if (&obj->base == NULL) {
   3515 		ret = -ENOENT;
   3516 		goto unlock;
   3517 	}
   3518 
   3519 	if (obj->madv != I915_MADV_WILLNEED) {
   3520 		DRM_ERROR("Attempting to pin a purgeable buffer\n");
   3521 		ret = -EINVAL;
   3522 		goto out;
   3523 	}
   3524 
   3525 	if (obj->pin_filp != NULL && obj->pin_filp != file) {
   3526 		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
   3527 			  args->handle);
   3528 		ret = -EINVAL;
   3529 		goto out;
   3530 	}
   3531 
   3532 	if (obj->user_pin_count == 0) {
   3533 		ret = i915_gem_object_pin(obj, args->alignment, true, false);
   3534 		if (ret)
   3535 			goto out;
   3536 	}
   3537 
   3538 	obj->user_pin_count++;
   3539 	obj->pin_filp = file;
   3540 
   3541 	/* XXX - flush the CPU caches for pinned objects
   3542 	 * as the X server doesn't manage domains yet
   3543 	 */
   3544 	i915_gem_object_flush_cpu_write_domain(obj);
   3545 	args->offset = obj->gtt_offset;
   3546 out:
   3547 	drm_gem_object_unreference(&obj->base);
   3548 unlock:
   3549 	mutex_unlock(&dev->struct_mutex);
   3550 	return ret;
   3551 }
   3552 
   3553 int
   3554 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
   3555 		     struct drm_file *file)
   3556 {
   3557 	struct drm_i915_gem_pin *args = data;
   3558 	struct drm_i915_gem_object *obj;
   3559 	int ret;
   3560 
   3561 	ret = i915_mutex_lock_interruptible(dev);
   3562 	if (ret)
   3563 		return ret;
   3564 
   3565 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
   3566 	if (&obj->base == NULL) {
   3567 		ret = -ENOENT;
   3568 		goto unlock;
   3569 	}
   3570 
   3571 	if (obj->pin_filp != file) {
   3572 		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
   3573 			  args->handle);
   3574 		ret = -EINVAL;
   3575 		goto out;
   3576 	}
   3577 	obj->user_pin_count--;
   3578 	if (obj->user_pin_count == 0) {
   3579 		obj->pin_filp = NULL;
   3580 		i915_gem_object_unpin(obj);
   3581 	}
   3582 
   3583 out:
   3584 	drm_gem_object_unreference(&obj->base);
   3585 unlock:
   3586 	mutex_unlock(&dev->struct_mutex);
   3587 	return ret;
   3588 }
   3589 
   3590 int
   3591 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
   3592 		    struct drm_file *file)
   3593 {
   3594 	struct drm_i915_gem_busy *args = data;
   3595 	struct drm_i915_gem_object *obj;
   3596 	int ret;
   3597 
   3598 	ret = i915_mutex_lock_interruptible(dev);
   3599 	if (ret)
   3600 		return ret;
   3601 
   3602 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
   3603 	if (&obj->base == NULL) {
   3604 		ret = -ENOENT;
   3605 		goto unlock;
   3606 	}
   3607 
   3608 	/* Count all active objects as busy, even if they are currently not used
   3609 	 * by the gpu. Users of this interface expect objects to eventually
   3610 	 * become non-busy without any further actions, therefore emit any
   3611 	 * necessary flushes here.
   3612 	 */
   3613 	ret = i915_gem_object_flush_active(obj);
   3614 
   3615 	args->busy = obj->active;
   3616 	if (obj->ring) {
   3617 		BUILD_BUG_ON(I915_NUM_RINGS > 16);
   3618 		args->busy |= intel_ring_flag(obj->ring) << 16;
   3619 	}
   3620 
   3621 	drm_gem_object_unreference(&obj->base);
   3622 unlock:
   3623 	mutex_unlock(&dev->struct_mutex);
   3624 	return ret;
   3625 }
   3626 
   3627 int
   3628 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
   3629 			struct drm_file *file_priv)
   3630 {
   3631 	return i915_gem_ring_throttle(dev, file_priv);
   3632 }
   3633 
   3634 int
   3635 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
   3636 		       struct drm_file *file_priv)
   3637 {
   3638 	struct drm_i915_gem_madvise *args = data;
   3639 	struct drm_i915_gem_object *obj;
   3640 	int ret;
   3641 
   3642 	switch (args->madv) {
   3643 	case I915_MADV_DONTNEED:
   3644 	case I915_MADV_WILLNEED:
   3645 	    break;
   3646 	default:
   3647 	    return -EINVAL;
   3648 	}
   3649 
   3650 	ret = i915_mutex_lock_interruptible(dev);
   3651 	if (ret)
   3652 		return ret;
   3653 
   3654 	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
   3655 	if (&obj->base == NULL) {
   3656 		ret = -ENOENT;
   3657 		goto unlock;
   3658 	}
   3659 
   3660 	if (obj->pin_count) {
   3661 		ret = -EINVAL;
   3662 		goto out;
   3663 	}
   3664 
   3665 	if (obj->madv != __I915_MADV_PURGED)
   3666 		obj->madv = args->madv;
   3667 
   3668 	/* if the object is no longer attached, discard its backing storage */
   3669 	if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
   3670 		i915_gem_object_truncate(obj);
   3671 
   3672 	args->retained = obj->madv != __I915_MADV_PURGED;
   3673 
   3674 out:
   3675 	drm_gem_object_unreference(&obj->base);
   3676 unlock:
   3677 	mutex_unlock(&dev->struct_mutex);
   3678 	return ret;
   3679 }
   3680 
   3681 void i915_gem_object_init(struct drm_i915_gem_object *obj,
   3682 			  const struct drm_i915_gem_object_ops *ops)
   3683 {
   3684 	INIT_LIST_HEAD(&obj->mm_list);
   3685 	INIT_LIST_HEAD(&obj->gtt_list);
   3686 	INIT_LIST_HEAD(&obj->ring_list);
   3687 	INIT_LIST_HEAD(&obj->exec_list);
   3688 
   3689 	obj->ops = ops;
   3690 
   3691 	obj->fence_reg = I915_FENCE_REG_NONE;
   3692 	obj->madv = I915_MADV_WILLNEED;
   3693 	/* Avoid an unnecessary call to unbind on the first bind. */
   3694 	obj->map_and_fenceable = true;
   3695 
   3696 	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
   3697 }
   3698 
   3699 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
   3700 	.get_pages = i915_gem_object_get_pages_gtt,
   3701 	.put_pages = i915_gem_object_put_pages_gtt,
   3702 };
   3703 
   3704 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
   3705 						  size_t size)
   3706 {
   3707 	struct drm_i915_gem_object *obj;
   3708 	struct address_space *mapping;
   3709 	u32 mask;
   3710 
   3711 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
   3712 	if (obj == NULL)
   3713 		return NULL;
   3714 
   3715 	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
   3716 		kfree(obj);
   3717 		return NULL;
   3718 	}
   3719 
   3720 	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
   3721 	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
   3722 		/* 965gm cannot relocate objects above 4GiB. */
   3723 		mask &= ~__GFP_HIGHMEM;
   3724 		mask |= __GFP_DMA32;
   3725 	}
   3726 
   3727 	mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
   3728 	mapping_set_gfp_mask(mapping, mask);
   3729 
   3730 	i915_gem_object_init(obj, &i915_gem_object_ops);
   3731 
   3732 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
   3733 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
   3734 
   3735 	if (HAS_LLC(dev)) {
   3736 		/* On some devices, we can have the GPU use the LLC (the CPU
   3737 		 * cache) for about a 10% performance improvement
   3738 		 * compared to uncached.  Graphics requests other than
   3739 		 * display scanout are coherent with the CPU in
   3740 		 * accessing this cache.  This means in this mode we
   3741 		 * don't need to clflush on the CPU side, and on the
   3742 		 * GPU side we only need to flush internal caches to
   3743 		 * get data visible to the CPU.
   3744 		 *
   3745 		 * However, we maintain the display planes as UC, and so
   3746 		 * need to rebind when first used as such.
   3747 		 */
   3748 		obj->cache_level = I915_CACHE_LLC;
   3749 	} else
   3750 		obj->cache_level = I915_CACHE_NONE;
   3751 
   3752 	return obj;
   3753 }
   3754 
   3755 int i915_gem_init_object(struct drm_gem_object *obj)
   3756 {
   3757 	BUG();
   3758 
   3759 	return 0;
   3760 }
   3761 
   3762 void i915_gem_free_object(struct drm_gem_object *gem_obj)
   3763 {
   3764 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
   3765 	struct drm_device *dev = obj->base.dev;
   3766 	drm_i915_private_t *dev_priv = dev->dev_private;
   3767 
   3768 	trace_i915_gem_object_destroy(obj);
   3769 
   3770 	if (obj->phys_obj)
   3771 		i915_gem_detach_phys_object(dev, obj);
   3772 
   3773 	obj->pin_count = 0;
   3774 	if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
   3775 		bool was_interruptible;
   3776 
   3777 		was_interruptible = dev_priv->mm.interruptible;
   3778 		dev_priv->mm.interruptible = false;
   3779 
   3780 		WARN_ON(i915_gem_object_unbind(obj));
   3781 
   3782 		dev_priv->mm.interruptible = was_interruptible;
   3783 	}
   3784 
   3785 	obj->pages_pin_count = 0;
   3786 	i915_gem_object_put_pages(obj);
   3787 	i915_gem_object_free_mmap_offset(obj);
   3788 
   3789 	BUG_ON(obj->pages);
   3790 
   3791 	if (obj->base.import_attach)
   3792 		drm_prime_gem_destroy(&obj->base, NULL);
   3793 
   3794 	drm_gem_object_release(&obj->base);
   3795 	i915_gem_info_remove_obj(dev_priv, obj->base.size);
   3796 
   3797 	kfree(obj->bit_17);
   3798 	kfree(obj);
   3799 }
   3800 
   3801 int
   3802 i915_gem_idle(struct drm_device *dev)
   3803 {
   3804 	drm_i915_private_t *dev_priv = dev->dev_private;
   3805 	int ret;
   3806 
   3807 	mutex_lock(&dev->struct_mutex);
   3808 
   3809 	if (dev_priv->mm.suspended) {
   3810 		mutex_unlock(&dev->struct_mutex);
   3811 		return 0;
   3812 	}
   3813 
   3814 	ret = i915_gpu_idle(dev);
   3815 	if (ret) {
   3816 		mutex_unlock(&dev->struct_mutex);
   3817 		return ret;
   3818 	}
   3819 	i915_gem_retire_requests(dev);
   3820 
   3821 	/* Under UMS, be paranoid and evict. */
   3822 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
   3823 		i915_gem_evict_everything(dev);
   3824 
   3825 	i915_gem_reset_fences(dev);
   3826 
   3827 	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
   3828 	 * We need to replace this with a semaphore, or something.
   3829 	 * And not confound mm.suspended!
   3830 	 */
   3831 	dev_priv->mm.suspended = 1;
   3832 	del_timer_sync(&dev_priv->hangcheck_timer);
   3833 
   3834 	i915_kernel_lost_context(dev);
   3835 	i915_gem_cleanup_ringbuffer(dev);
   3836 
   3837 	mutex_unlock(&dev->struct_mutex);
   3838 
   3839 	/* Cancel the retire work handler, which should be idle now. */
   3840 	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
   3841 
   3842 	return 0;
   3843 }
   3844 
   3845 void i915_gem_l3_remap(struct drm_device *dev)
   3846 {
   3847 	drm_i915_private_t *dev_priv = dev->dev_private;
   3848 	u32 misccpctl;
   3849 	int i;
   3850 
   3851 	if (!IS_IVYBRIDGE(dev))
   3852 		return;
   3853 
   3854 	if (!dev_priv->l3_parity.remap_info)
   3855 		return;
   3856 
   3857 	misccpctl = I915_READ(GEN7_MISCCPCTL);
   3858 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
   3859 	POSTING_READ(GEN7_MISCCPCTL);
   3860 
   3861 	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
   3862 		u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
   3863 		if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
   3864 			DRM_DEBUG("0x%x was already programmed to %x\n",
   3865 				  GEN7_L3LOG_BASE + i, remap);
   3866 		if (remap && !dev_priv->l3_parity.remap_info[i/4])
   3867 			DRM_DEBUG_DRIVER("Clearing remapped register\n");
   3868 		I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
   3869 	}
   3870 
   3871 	/* Make sure all the writes land before disabling dop clock gating */
   3872 	POSTING_READ(GEN7_L3LOG_BASE);
   3873 
   3874 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
   3875 }
   3876 
   3877 void i915_gem_init_swizzling(struct drm_device *dev)
   3878 {
   3879 	drm_i915_private_t *dev_priv = dev->dev_private;
   3880 
   3881 	if (INTEL_INFO(dev)->gen < 5 ||
   3882 	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
   3883 		return;
   3884 
   3885 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
   3886 				 DISP_TILE_SURFACE_SWIZZLING);
   3887 
   3888 	if (IS_GEN5(dev))
   3889 		return;
   3890 
   3891 	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
   3892 	if (IS_GEN6(dev))
   3893 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
   3894 	else
   3895 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
   3896 }
   3897 
   3898 static bool
   3899 intel_enable_blt(struct drm_device *dev)
   3900 {
   3901 	if (!HAS_BLT(dev))
   3902 		return false;
   3903 
   3904 	/* The blitter was dysfunctional on early prototypes */
   3905 	if (IS_GEN6(dev) && dev->pdev->revision < 8) {
   3906 		DRM_INFO("BLT not supported on this pre-production hardware;"
   3907 			 " graphics performance will be degraded.\n");
   3908 		return false;
   3909 	}
   3910 
   3911 	return true;
   3912 }
   3913 
   3914 int
   3915 i915_gem_init_hw(struct drm_device *dev)
   3916 {
   3917 	drm_i915_private_t *dev_priv = dev->dev_private;
   3918 	int ret;
   3919 
   3920 	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
   3921 		return -EIO;
   3922 
   3923 	if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
   3924 		I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
   3925 
   3926 	i915_gem_l3_remap(dev);
   3927 
   3928 	i915_gem_init_swizzling(dev);
   3929 
   3930 	ret = intel_init_render_ring_buffer(dev);
   3931 	if (ret)
   3932 		return ret;
   3933 
   3934 	if (HAS_BSD(dev)) {
   3935 		ret = intel_init_bsd_ring_buffer(dev);
   3936 		if (ret)
   3937 			goto cleanup_render_ring;
   3938 	}
   3939 
   3940 	if (intel_enable_blt(dev)) {
   3941 		ret = intel_init_blt_ring_buffer(dev);
   3942 		if (ret)
   3943 			goto cleanup_bsd_ring;
   3944 	}
   3945 
   3946 	dev_priv->next_seqno = 1;
   3947 
   3948 	/*
   3949 	 * XXX: There was some w/a described somewhere suggesting loading
   3950 	 * contexts before PPGTT.
   3951 	 */
   3952 	i915_gem_context_init(dev);
   3953 	i915_gem_init_ppgtt(dev);
   3954 
   3955 	return 0;
   3956 
   3957 cleanup_bsd_ring:
   3958 	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
   3959 cleanup_render_ring:
   3960 	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
   3961 	return ret;
   3962 }
   3963 
   3964 static bool
   3965 intel_enable_ppgtt(struct drm_device *dev)
   3966 {
   3967 	if (i915_enable_ppgtt >= 0)
   3968 		return i915_enable_ppgtt;
   3969 
   3970 #ifdef CONFIG_INTEL_IOMMU
   3971 	/* Disable ppgtt on SNB if VT-d is on. */
   3972 	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
   3973 		return false;
   3974 #endif
   3975 
   3976 	return true;
   3977 }
   3978 
   3979 int i915_gem_init(struct drm_device *dev)
   3980 {
   3981 	struct drm_i915_private *dev_priv = dev->dev_private;
   3982 	unsigned long gtt_size, mappable_size;
   3983 	int ret;
   3984 
   3985 	gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
   3986 	mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
   3987 
   3988 	mutex_lock(&dev->struct_mutex);
   3989 	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
   3990 		/* PPGTT pdes are stolen from global gtt ptes, so shrink the
   3991 		 * aperture accordingly when using aliasing ppgtt. */
   3992 		gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
   3993 
   3994 		i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
   3995 
   3996 		ret = i915_gem_init_aliasing_ppgtt(dev);
   3997 		if (ret) {
   3998 			mutex_unlock(&dev->struct_mutex);
   3999 			return ret;
   4000 		}
   4001 	} else {
   4002 		/* Let GEM Manage all of the aperture.
   4003 		 *
   4004 		 * However, leave one page at the end still bound to the scratch
   4005 		 * page.  There are a number of places where the hardware
   4006 		 * apparently prefetches past the end of the object, and we've
   4007 		 * seen multiple hangs with the GPU head pointer stuck in a
   4008 		 * batchbuffer bound at the last page of the aperture.  One page
   4009 		 * should be enough to keep any prefetching inside of the
   4010 		 * aperture.
   4011 		 */
   4012 		i915_gem_init_global_gtt(dev, 0, mappable_size,
   4013 					 gtt_size);
   4014 	}
   4015 
   4016 	ret = i915_gem_init_hw(dev);
   4017 	mutex_unlock(&dev->struct_mutex);
   4018 	if (ret) {
   4019 		i915_gem_cleanup_aliasing_ppgtt(dev);
   4020 		return ret;
   4021 	}
   4022 
   4023 	/* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
   4024 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
   4025 		dev_priv->dri1.allow_batchbuffer = 1;
   4026 	return 0;
   4027 }
   4028 
   4029 void
   4030 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
   4031 {
   4032 	drm_i915_private_t *dev_priv = dev->dev_private;
   4033 	struct intel_ring_buffer *ring;
   4034 	int i;
   4035 
   4036 	for_each_ring(ring, dev_priv, i)
   4037 		intel_cleanup_ring_buffer(ring);
   4038 }
   4039 
   4040 int
   4041 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
   4042 		       struct drm_file *file_priv)
   4043 {
   4044 	drm_i915_private_t *dev_priv = dev->dev_private;
   4045 	int ret;
   4046 
   4047 	if (drm_core_check_feature(dev, DRIVER_MODESET))
   4048 		return 0;
   4049 
   4050 	if (atomic_read(&dev_priv->mm.wedged)) {
   4051 		DRM_ERROR("Reenabling wedged hardware, good luck\n");
   4052 		atomic_set(&dev_priv->mm.wedged, 0);
   4053 	}
   4054 
   4055 	mutex_lock(&dev->struct_mutex);
   4056 	dev_priv->mm.suspended = 0;
   4057 
   4058 	ret = i915_gem_init_hw(dev);
   4059 	if (ret != 0) {
   4060 		mutex_unlock(&dev->struct_mutex);
   4061 		return ret;
   4062 	}
   4063 
   4064 	BUG_ON(!list_empty(&dev_priv->mm.active_list));
   4065 	mutex_unlock(&dev->struct_mutex);
   4066 
   4067 	ret = drm_irq_install(dev);
   4068 	if (ret)
   4069 		goto cleanup_ringbuffer;
   4070 
   4071 	return 0;
   4072 
   4073 cleanup_ringbuffer:
   4074 	mutex_lock(&dev->struct_mutex);
   4075 	i915_gem_cleanup_ringbuffer(dev);
   4076 	dev_priv->mm.suspended = 1;
   4077 	mutex_unlock(&dev->struct_mutex);
   4078 
   4079 	return ret;
   4080 }
   4081 
   4082 int
   4083 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
   4084 		       struct drm_file *file_priv)
   4085 {
   4086 	if (drm_core_check_feature(dev, DRIVER_MODESET))
   4087 		return 0;
   4088 
   4089 	drm_irq_uninstall(dev);
   4090 	return i915_gem_idle(dev);
   4091 }
   4092 
   4093 void
   4094 i915_gem_lastclose(struct drm_device *dev)
   4095 {
   4096 	int ret;
   4097 
   4098 	if (drm_core_check_feature(dev, DRIVER_MODESET))
   4099 		return;
   4100 
   4101 	ret = i915_gem_idle(dev);
   4102 	if (ret)
   4103 		DRM_ERROR("failed to idle hardware: %d\n", ret);
   4104 }
   4105 
   4106 static void
   4107 init_ring_lists(struct intel_ring_buffer *ring)
   4108 {
   4109 	INIT_LIST_HEAD(&ring->active_list);
   4110 	INIT_LIST_HEAD(&ring->request_list);
   4111 }
   4112 
   4113 void
   4114 i915_gem_load(struct drm_device *dev)
   4115 {
   4116 	int i;
   4117 	drm_i915_private_t *dev_priv = dev->dev_private;
   4118 
   4119 	INIT_LIST_HEAD(&dev_priv->mm.active_list);
   4120 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
   4121 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
   4122 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
   4123 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
   4124 	for (i = 0; i < I915_NUM_RINGS; i++)
   4125 		init_ring_lists(&dev_priv->ring[i]);
   4126 	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
   4127 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
   4128 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
   4129 			  i915_gem_retire_work_handler);
   4130 	init_completion(&dev_priv->error_completion);
   4131 
   4132 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
   4133 	if (IS_GEN3(dev)) {
   4134 		I915_WRITE(MI_ARB_STATE,
   4135 			   _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
   4136 	}
   4137 
   4138 	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
   4139 
   4140 	/* Old X drivers will take 0-2 for front, back, depth buffers */
   4141 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
   4142 		dev_priv->fence_reg_start = 3;
   4143 
   4144 	if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
   4145 		dev_priv->num_fence_regs = 16;
   4146 	else
   4147 		dev_priv->num_fence_regs = 8;
   4148 
   4149 	/* Initialize fence registers to zero */
   4150 	i915_gem_reset_fences(dev);
   4151 
   4152 	i915_gem_detect_bit_6_swizzle(dev);
   4153 	init_waitqueue_head(&dev_priv->pending_flip_queue);
   4154 
   4155 	dev_priv->mm.interruptible = true;
   4156 
   4157 	dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
   4158 	dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
   4159 	register_shrinker(&dev_priv->mm.inactive_shrinker);
   4160 }
   4161 
   4162 /*
   4163  * Create a physically contiguous memory object for this object
   4164  * e.g. for cursor + overlay regs
   4165  */
   4166 static int i915_gem_init_phys_object(struct drm_device *dev,
   4167 				     int id, int size, int align)
   4168 {
   4169 	drm_i915_private_t *dev_priv = dev->dev_private;
   4170 	struct drm_i915_gem_phys_object *phys_obj;
   4171 	int ret;
   4172 
   4173 	if (dev_priv->mm.phys_objs[id - 1] || !size)
   4174 		return 0;
   4175 
   4176 	phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
   4177 	if (!phys_obj)
   4178 		return -ENOMEM;
   4179 
   4180 	phys_obj->id = id;
   4181 
   4182 	phys_obj->handle = drm_pci_alloc(dev, size, align);
   4183 	if (!phys_obj->handle) {
   4184 		ret = -ENOMEM;
   4185 		goto kfree_obj;
   4186 	}
   4187 #ifdef CONFIG_X86
   4188 	set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
   4189 #endif
   4190 
   4191 	dev_priv->mm.phys_objs[id - 1] = phys_obj;
   4192 
   4193 	return 0;
   4194 kfree_obj:
   4195 	kfree(phys_obj);
   4196 	return ret;
   4197 }
   4198 
   4199 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
   4200 {
   4201 	drm_i915_private_t *dev_priv = dev->dev_private;
   4202 	struct drm_i915_gem_phys_object *phys_obj;
   4203 
   4204 	if (!dev_priv->mm.phys_objs[id - 1])
   4205 		return;
   4206 
   4207 	phys_obj = dev_priv->mm.phys_objs[id - 1];
   4208 	if (phys_obj->cur_obj) {
   4209 		i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
   4210 	}
   4211 
   4212 #ifdef CONFIG_X86
   4213 	set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
   4214 #endif
   4215 	drm_pci_free(dev, phys_obj->handle);
   4216 	kfree(phys_obj);
   4217 	dev_priv->mm.phys_objs[id - 1] = NULL;
   4218 }
   4219 
   4220 void i915_gem_free_all_phys_object(struct drm_device *dev)
   4221 {
   4222 	int i;
   4223 
   4224 	for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
   4225 		i915_gem_free_phys_object(dev, i);
   4226 }
   4227 
   4228 void i915_gem_detach_phys_object(struct drm_device *dev,
   4229 				 struct drm_i915_gem_object *obj)
   4230 {
   4231 	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
   4232 	char *vaddr;
   4233 	int i;
   4234 	int page_count;
   4235 
   4236 	if (!obj->phys_obj)
   4237 		return;
   4238 	vaddr = obj->phys_obj->handle->vaddr;
   4239 
   4240 	page_count = obj->base.size / PAGE_SIZE;
   4241 	for (i = 0; i < page_count; i++) {
   4242 		struct page *page = shmem_read_mapping_page(mapping, i);
   4243 		if (!IS_ERR(page)) {
   4244 			char *dst = kmap_atomic(page);
   4245 			memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
   4246 			kunmap_atomic(dst);
   4247 
   4248 			drm_clflush_pages(&page, 1);
   4249 
   4250 			set_page_dirty(page);
   4251 			mark_page_accessed(page);
   4252 			page_cache_release(page);
   4253 		}
   4254 	}
   4255 	i915_gem_chipset_flush(dev);
   4256 
   4257 	obj->phys_obj->cur_obj = NULL;
   4258 	obj->phys_obj = NULL;
   4259 }
   4260 
   4261 int
   4262 i915_gem_attach_phys_object(struct drm_device *dev,
   4263 			    struct drm_i915_gem_object *obj,
   4264 			    int id,
   4265 			    int align)
   4266 {
   4267 	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
   4268 	drm_i915_private_t *dev_priv = dev->dev_private;
   4269 	int ret = 0;
   4270 	int page_count;
   4271 	int i;
   4272 
   4273 	if (id > I915_MAX_PHYS_OBJECT)
   4274 		return -EINVAL;
   4275 
   4276 	if (obj->phys_obj) {
   4277 		if (obj->phys_obj->id == id)
   4278 			return 0;
   4279 		i915_gem_detach_phys_object(dev, obj);
   4280 	}
   4281 
   4282 	/* create a new object */
   4283 	if (!dev_priv->mm.phys_objs[id - 1]) {
   4284 		ret = i915_gem_init_phys_object(dev, id,
   4285 						obj->base.size, align);
   4286 		if (ret) {
   4287 			DRM_ERROR("failed to init phys object %d size: %zu\n",
   4288 				  id, obj->base.size);
   4289 			return ret;
   4290 		}
   4291 	}
   4292 
   4293 	/* bind to the object */
   4294 	obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
   4295 	obj->phys_obj->cur_obj = obj;
   4296 
   4297 	page_count = obj->base.size / PAGE_SIZE;
   4298 
   4299 	for (i = 0; i < page_count; i++) {
   4300 		struct page *page;
   4301 		char *dst, *src;
   4302 
   4303 		page = shmem_read_mapping_page(mapping, i);
   4304 		if (IS_ERR(page))
   4305 			return PTR_ERR(page);
   4306 
   4307 		src = kmap_atomic(page);
   4308 		dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
   4309 		memcpy(dst, src, PAGE_SIZE);
   4310 		kunmap_atomic(src);
   4311 
   4312 		mark_page_accessed(page);
   4313 		page_cache_release(page);
   4314 	}
   4315 
   4316 	return 0;
   4317 }
   4318 
   4319 static int
   4320 i915_gem_phys_pwrite(struct drm_device *dev,
   4321 		     struct drm_i915_gem_object *obj,
   4322 		     struct drm_i915_gem_pwrite *args,
   4323 		     struct drm_file *file_priv)
   4324 {
   4325 	void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
   4326 	char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
   4327 
   4328 	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
   4329 		unsigned long unwritten;
   4330 
   4331 		/* The physical object once assigned is fixed for the lifetime
   4332 		 * of the obj, so we can safely drop the lock and continue
   4333 		 * to access vaddr.
   4334 		 */
   4335 		mutex_unlock(&dev->struct_mutex);
   4336 		unwritten = copy_from_user(vaddr, user_data, args->size);
   4337 		mutex_lock(&dev->struct_mutex);
   4338 		if (unwritten)
   4339 			return -EFAULT;
   4340 	}
   4341 
   4342 	i915_gem_chipset_flush(dev);
   4343 	return 0;
   4344 }
   4345 
   4346 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
   4347 {
   4348 	struct drm_i915_file_private *file_priv = file->driver_priv;
   4349 
   4350 	/* Clean up our request list when the client is going away, so that
   4351 	 * later retire_requests won't dereference our soon-to-be-gone
   4352 	 * file_priv.
   4353 	 */
   4354 	spin_lock(&file_priv->mm.lock);
   4355 	while (!list_empty(&file_priv->mm.request_list)) {
   4356 		struct drm_i915_gem_request *request;
   4357 
   4358 		request = list_first_entry(&file_priv->mm.request_list,
   4359 					   struct drm_i915_gem_request,
   4360 					   client_list);
   4361 		list_del(&request->client_list);
   4362 		request->file_priv = NULL;
   4363 	}
   4364 	spin_unlock(&file_priv->mm.lock);
   4365 }
   4366 
   4367 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
   4368 {
   4369 	if (!mutex_is_locked(mutex))
   4370 		return false;
   4371 
   4372 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
   4373 	return mutex->owner == task;
   4374 #else
   4375 	/* Since UP may be pre-empted, we cannot assume that we own the lock */
   4376 	return false;
   4377 #endif
   4378 }
   4379 
   4380 static int
   4381 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
   4382 {
   4383 	struct drm_i915_private *dev_priv =
   4384 		container_of(shrinker,
   4385 			     struct drm_i915_private,
   4386 			     mm.inactive_shrinker);
   4387 	struct drm_device *dev = dev_priv->dev;
   4388 	struct drm_i915_gem_object *obj;
   4389 	int nr_to_scan = sc->nr_to_scan;
   4390 	bool unlock = true;
   4391 	int cnt;
   4392 
   4393 	if (!mutex_trylock(&dev->struct_mutex)) {
   4394 		if (!mutex_is_locked_by(&dev->struct_mutex, current))
   4395 			return 0;
   4396 
   4397 		if (dev_priv->mm.shrinker_no_lock_stealing)
   4398 			return 0;
   4399 
   4400 		unlock = false;
   4401 	}
   4402 
   4403 	if (nr_to_scan) {
   4404 		nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
   4405 		if (nr_to_scan > 0)
   4406 			nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
   4407 							false);
   4408 		if (nr_to_scan > 0)
   4409 			i915_gem_shrink_all(dev_priv);
   4410 	}
   4411 
   4412 	cnt = 0;
   4413 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
   4414 		if (obj->pages_pin_count == 0)
   4415 			cnt += obj->base.size >> PAGE_SHIFT;
   4416 	list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
   4417 		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
   4418 			cnt += obj->base.size >> PAGE_SHIFT;
   4419 
   4420 	if (unlock)
   4421 		mutex_unlock(&dev->struct_mutex);
   4422 	return cnt;
   4423 }
   4424