Home | History | Annotate | Line # | Download | only in i915
      1  1.75  riastrad /*	$NetBSD: i915_gem.c,v 1.75 2021/12/19 12:32:15 riastradh Exp $	*/
      2  1.36  riastrad 
      3   1.1  riastrad /*
      4  1.36  riastrad  * Copyright  2008-2015 Intel Corporation
      5   1.1  riastrad  *
      6   1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
      7   1.1  riastrad  * copy of this software and associated documentation files (the "Software"),
      8   1.1  riastrad  * to deal in the Software without restriction, including without limitation
      9   1.1  riastrad  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10   1.1  riastrad  * and/or sell copies of the Software, and to permit persons to whom the
     11   1.1  riastrad  * Software is furnished to do so, subject to the following conditions:
     12   1.1  riastrad  *
     13   1.1  riastrad  * The above copyright notice and this permission notice (including the next
     14   1.1  riastrad  * paragraph) shall be included in all copies or substantial portions of the
     15   1.1  riastrad  * Software.
     16   1.1  riastrad  *
     17   1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18   1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19   1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20   1.1  riastrad  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21   1.1  riastrad  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22   1.1  riastrad  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23   1.1  riastrad  * IN THE SOFTWARE.
     24   1.1  riastrad  *
     25   1.1  riastrad  * Authors:
     26   1.1  riastrad  *    Eric Anholt <eric (at) anholt.net>
     27   1.1  riastrad  *
     28   1.1  riastrad  */
     29   1.1  riastrad 
     30  1.36  riastrad #include <sys/cdefs.h>
     31  1.75  riastrad __KERNEL_RCSID(0, "$NetBSD: i915_gem.c,v 1.75 2021/12/19 12:32:15 riastradh Exp $");
     32  1.36  riastrad 
     33   1.2  riastrad #ifdef __NetBSD__
     34  1.13  riastrad #include <drm/bus_dma_hacks.h>
     35   1.2  riastrad #endif
     36   1.2  riastrad 
     37  1.12  riastrad #include <drm/drm_vma_manager.h>
     38   1.1  riastrad #include <drm/i915_drm.h>
     39  1.62  riastrad #include <linux/dma-fence-array.h>
     40  1.62  riastrad #include <linux/kthread.h>
     41  1.62  riastrad #include <linux/dma-resv.h>
     42   1.1  riastrad #include <linux/shmem_fs.h>
     43   1.1  riastrad #include <linux/slab.h>
     44  1.62  riastrad #include <linux/stop_machine.h>
     45   1.1  riastrad #include <linux/swap.h>
     46   1.1  riastrad #include <linux/pci.h>
     47   1.1  riastrad #include <linux/dma-buf.h>
     48  1.62  riastrad #include <linux/mman.h>
     49  1.69  riastrad #include <linux/uaccess.h>
     50  1.58  riastrad 
     51  1.62  riastrad #include "display/intel_display.h"
     52  1.62  riastrad #include "display/intel_frontbuffer.h"
     53  1.36  riastrad 
     54  1.62  riastrad #include "gem/i915_gem_clflush.h"
     55  1.62  riastrad #include "gem/i915_gem_context.h"
     56  1.62  riastrad #include "gem/i915_gem_ioctls.h"
     57  1.62  riastrad #include "gem/i915_gem_mman.h"
     58  1.62  riastrad #include "gem/i915_gem_region.h"
     59  1.62  riastrad #include "gt/intel_engine_user.h"
     60  1.62  riastrad #include "gt/intel_gt.h"
     61  1.62  riastrad #include "gt/intel_gt_pm.h"
     62  1.62  riastrad #include "gt/intel_workarounds.h"
     63  1.12  riastrad 
     64  1.62  riastrad #include "i915_drv.h"
     65  1.62  riastrad #include "i915_trace.h"
     66  1.62  riastrad #include "i915_vgpu.h"
     67  1.12  riastrad 
     68  1.62  riastrad #include "intel_pm.h"
     69   1.1  riastrad 
     70  1.67  riastrad #include <linux/nbsd-namespace.h>
     71  1.67  riastrad 
     72  1.62  riastrad static int
     73  1.62  riastrad insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
     74   1.1  riastrad {
     75  1.62  riastrad 	int err;
     76   1.1  riastrad 
     77  1.62  riastrad 	err = mutex_lock_interruptible(&ggtt->vm.mutex);
     78  1.62  riastrad 	if (err)
     79  1.62  riastrad 		return err;
     80   1.1  riastrad 
     81  1.62  riastrad 	memset(node, 0, sizeof(*node));
     82  1.62  riastrad 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
     83  1.62  riastrad 					  size, 0, I915_COLOR_UNEVICTABLE,
     84  1.62  riastrad 					  0, ggtt->mappable_end,
     85  1.62  riastrad 					  DRM_MM_INSERT_LOW);
     86   1.1  riastrad 
     87  1.62  riastrad 	mutex_unlock(&ggtt->vm.mutex);
     88   1.1  riastrad 
     89  1.62  riastrad 	return err;
     90   1.1  riastrad }
     91   1.1  riastrad 
     92  1.62  riastrad static void
     93  1.62  riastrad remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
     94   1.1  riastrad {
     95  1.62  riastrad 	mutex_lock(&ggtt->vm.mutex);
     96  1.62  riastrad 	drm_mm_remove_node(node);
     97  1.62  riastrad 	mutex_unlock(&ggtt->vm.mutex);
     98   1.1  riastrad }
     99   1.1  riastrad 
    100   1.1  riastrad int
    101   1.1  riastrad i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
    102   1.1  riastrad 			    struct drm_file *file)
    103   1.1  riastrad {
    104  1.62  riastrad 	struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
    105   1.1  riastrad 	struct drm_i915_gem_get_aperture *args = data;
    106  1.36  riastrad 	struct i915_vma *vma;
    107  1.62  riastrad 	u64 pinned;
    108  1.62  riastrad 
    109  1.62  riastrad 	if (mutex_lock_interruptible(&ggtt->vm.mutex))
    110  1.62  riastrad 		return -EINTR;
    111   1.1  riastrad 
    112  1.62  riastrad 	pinned = ggtt->vm.reserved;
    113  1.62  riastrad 	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
    114  1.62  riastrad 		if (i915_vma_is_pinned(vma))
    115  1.36  riastrad 			pinned += vma->node.size;
    116   1.1  riastrad 
    117  1.62  riastrad 	mutex_unlock(&ggtt->vm.mutex);
    118  1.62  riastrad 
    119  1.62  riastrad 	args->aper_size = ggtt->vm.total;
    120   1.1  riastrad 	args->aper_available_size = args->aper_size - pinned;
    121   1.1  riastrad 
    122   1.1  riastrad 	return 0;
    123   1.1  riastrad }
    124   1.1  riastrad 
    125  1.62  riastrad int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
    126  1.62  riastrad 			   unsigned long flags)
    127  1.12  riastrad {
    128  1.62  riastrad 	struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
    129  1.62  riastrad 	LIST_HEAD(still_in_list);
    130  1.62  riastrad 	intel_wakeref_t wakeref;
    131  1.62  riastrad 	struct i915_vma *vma;
    132  1.62  riastrad 	int ret;
    133  1.36  riastrad 
    134  1.62  riastrad 	if (!atomic_read(&obj->bind_count))
    135  1.62  riastrad 		return 0;
    136  1.36  riastrad 
    137  1.62  riastrad 	/*
    138  1.62  riastrad 	 * As some machines use ACPI to handle runtime-resume callbacks, and
    139  1.62  riastrad 	 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
    140  1.62  riastrad 	 * as they are required by the shrinker. Ergo, we wake the device up
    141  1.62  riastrad 	 * first just in case.
    142  1.62  riastrad 	 */
    143  1.62  riastrad 	wakeref = intel_runtime_pm_get(rpm);
    144  1.62  riastrad 
    145  1.62  riastrad try_again:
    146  1.62  riastrad 	ret = 0;
    147  1.62  riastrad 	spin_lock(&obj->vma.lock);
    148  1.62  riastrad 	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
    149  1.62  riastrad 						       struct i915_vma,
    150  1.62  riastrad 						       obj_link))) {
    151  1.62  riastrad 		struct i915_address_space *vm = vma->vm;
    152  1.36  riastrad 
    153  1.62  riastrad 		list_move_tail(&vma->obj_link, &still_in_list);
    154  1.62  riastrad 		if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
    155  1.62  riastrad 			continue;
    156  1.36  riastrad 
    157  1.62  riastrad 		ret = -EAGAIN;
    158  1.62  riastrad 		if (!i915_vm_tryopen(vm))
    159  1.62  riastrad 			break;
    160  1.36  riastrad 
    161  1.62  riastrad 		/* Prevent vma being freed by i915_vma_parked as we unbind */
    162  1.62  riastrad 		vma = __i915_vma_get(vma);
    163  1.62  riastrad 		spin_unlock(&obj->vma.lock);
    164  1.12  riastrad 
    165  1.62  riastrad 		if (vma) {
    166  1.62  riastrad 			ret = -EBUSY;
    167  1.62  riastrad 			if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
    168  1.62  riastrad 			    !i915_vma_is_active(vma))
    169  1.62  riastrad 				ret = i915_vma_unbind(vma);
    170  1.12  riastrad 
    171  1.62  riastrad 			__i915_vma_put(vma);
    172  1.62  riastrad 		}
    173  1.36  riastrad 
    174  1.62  riastrad 		i915_vm_close(vm);
    175  1.62  riastrad 		spin_lock(&obj->vma.lock);
    176  1.36  riastrad 	}
    177  1.62  riastrad 	list_splice_init(&still_in_list, &obj->vma.list);
    178  1.62  riastrad 	spin_unlock(&obj->vma.lock);
    179  1.36  riastrad 
    180  1.62  riastrad 	if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
    181  1.62  riastrad 		rcu_barrier(); /* flush the i915_vm_release() */
    182  1.62  riastrad 		goto try_again;
    183  1.12  riastrad 	}
    184  1.12  riastrad 
    185  1.62  riastrad 	intel_runtime_pm_put(rpm, wakeref);
    186  1.36  riastrad 
    187  1.36  riastrad 	return ret;
    188  1.12  riastrad }
    189  1.12  riastrad 
    190  1.12  riastrad static int
    191  1.12  riastrad i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
    192  1.12  riastrad 		     struct drm_i915_gem_pwrite *args,
    193  1.62  riastrad 		     struct drm_file *file)
    194  1.12  riastrad {
    195  1.68  riastrad #ifdef __NetBSD__
    196  1.73  riastrad 	void *vaddr = obj->mm.u.phys.kva + args->offset;
    197  1.68  riastrad #else
    198  1.62  riastrad 	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
    199  1.73  riastrad #endif
    200  1.62  riastrad 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
    201  1.36  riastrad 
    202  1.62  riastrad 	/*
    203  1.62  riastrad 	 * We manually control the domain here and pretend that it
    204  1.36  riastrad 	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
    205  1.36  riastrad 	 */
    206  1.62  riastrad 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
    207  1.12  riastrad 
    208  1.62  riastrad 	if (copy_from_user(vaddr, user_data, args->size))
    209  1.62  riastrad 		return -EFAULT;
    210  1.12  riastrad 
    211  1.36  riastrad 	drm_clflush_virt_range(vaddr, args->size);
    212  1.62  riastrad 	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
    213  1.36  riastrad 
    214  1.62  riastrad 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
    215  1.62  riastrad 	return 0;
    216  1.12  riastrad }
    217  1.12  riastrad 
    218   1.1  riastrad static int
    219   1.1  riastrad i915_gem_create(struct drm_file *file,
    220  1.62  riastrad 		struct intel_memory_region *mr,
    221  1.62  riastrad 		u64 *size_p,
    222  1.62  riastrad 		u32 *handle_p)
    223   1.1  riastrad {
    224   1.1  riastrad 	struct drm_i915_gem_object *obj;
    225  1.62  riastrad 	u32 handle;
    226  1.62  riastrad 	u64 size;
    227   1.1  riastrad 	int ret;
    228   1.1  riastrad 
    229  1.62  riastrad 	GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
    230  1.72  riastrad 	size = ALIGN(*size_p, mr->min_page_size);
    231   1.1  riastrad 	if (size == 0)
    232   1.1  riastrad 		return -EINVAL;
    233   1.1  riastrad 
    234  1.62  riastrad 	/* For most of the ABI (e.g. mmap) we think in system pages */
    235  1.62  riastrad 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
    236  1.62  riastrad 
    237   1.1  riastrad 	/* Allocate the new object */
    238  1.62  riastrad 	obj = i915_gem_object_create_region(mr, size, 0);
    239  1.62  riastrad 	if (IS_ERR(obj))
    240  1.62  riastrad 		return PTR_ERR(obj);
    241   1.1  riastrad 
    242   1.1  riastrad 	ret = drm_gem_handle_create(file, &obj->base, &handle);
    243  1.12  riastrad 	/* drop reference from allocate - handle holds it now */
    244  1.62  riastrad 	i915_gem_object_put(obj);
    245  1.12  riastrad 	if (ret)
    246   1.1  riastrad 		return ret;
    247   1.1  riastrad 
    248   1.1  riastrad 	*handle_p = handle;
    249  1.62  riastrad 	*size_p = size;
    250   1.1  riastrad 	return 0;
    251   1.1  riastrad }
    252   1.1  riastrad 
    253   1.1  riastrad int
    254   1.1  riastrad i915_gem_dumb_create(struct drm_file *file,
    255   1.1  riastrad 		     struct drm_device *dev,
    256   1.1  riastrad 		     struct drm_mode_create_dumb *args)
    257   1.1  riastrad {
    258  1.62  riastrad 	enum intel_memory_type mem_type;
    259  1.62  riastrad 	int cpp = DIV_ROUND_UP(args->bpp, 8);
    260  1.62  riastrad 	u32 format;
    261  1.62  riastrad 
    262  1.62  riastrad 	switch (cpp) {
    263  1.62  riastrad 	case 1:
    264  1.62  riastrad 		format = DRM_FORMAT_C8;
    265  1.62  riastrad 		break;
    266  1.62  riastrad 	case 2:
    267  1.62  riastrad 		format = DRM_FORMAT_RGB565;
    268  1.62  riastrad 		break;
    269  1.62  riastrad 	case 4:
    270  1.62  riastrad 		format = DRM_FORMAT_XRGB8888;
    271  1.62  riastrad 		break;
    272  1.62  riastrad 	default:
    273  1.62  riastrad 		return -EINVAL;
    274  1.62  riastrad 	}
    275  1.62  riastrad 
    276   1.1  riastrad 	/* have to work out size/pitch and return them */
    277  1.72  riastrad 	args->pitch = ALIGN(args->width * cpp, 64);
    278  1.62  riastrad 
    279  1.62  riastrad 	/* align stride to page size so that we can remap */
    280  1.62  riastrad 	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
    281  1.62  riastrad 						    DRM_FORMAT_MOD_LINEAR))
    282  1.72  riastrad 		args->pitch = ALIGN(args->pitch, 4096);
    283  1.62  riastrad 
    284  1.62  riastrad 	if (args->pitch < args->width)
    285  1.62  riastrad 		return -EINVAL;
    286  1.62  riastrad 
    287  1.62  riastrad 	args->size = mul_u32_u32(args->pitch, args->height);
    288  1.62  riastrad 
    289  1.62  riastrad 	mem_type = INTEL_MEMORY_SYSTEM;
    290  1.62  riastrad 	if (HAS_LMEM(to_i915(dev)))
    291  1.62  riastrad 		mem_type = INTEL_MEMORY_LOCAL;
    292  1.62  riastrad 
    293  1.62  riastrad 	return i915_gem_create(file,
    294  1.62  riastrad 			       intel_memory_region_by_type(to_i915(dev),
    295  1.62  riastrad 							   mem_type),
    296  1.62  riastrad 			       &args->size, &args->handle);
    297   1.1  riastrad }
    298   1.1  riastrad 
    299   1.1  riastrad /**
    300   1.1  riastrad  * Creates a new mm object and returns a handle to it.
    301  1.62  riastrad  * @dev: drm device pointer
    302  1.62  riastrad  * @data: ioctl data blob
    303  1.62  riastrad  * @file: drm file pointer
    304   1.1  riastrad  */
    305   1.1  riastrad int
    306   1.1  riastrad i915_gem_create_ioctl(struct drm_device *dev, void *data,
    307   1.1  riastrad 		      struct drm_file *file)
    308   1.1  riastrad {
    309  1.62  riastrad 	struct drm_i915_private *i915 = to_i915(dev);
    310   1.1  riastrad 	struct drm_i915_gem_create *args = data;
    311   1.1  riastrad 
    312  1.62  riastrad 	i915_gem_flush_free_objects(i915);
    313  1.62  riastrad 
    314  1.62  riastrad 	return i915_gem_create(file,
    315  1.62  riastrad 			       intel_memory_region_by_type(i915,
    316  1.62  riastrad 							   INTEL_MEMORY_SYSTEM),
    317  1.62  riastrad 			       &args->size, &args->handle);
    318   1.1  riastrad }
    319   1.1  riastrad 
    320  1.62  riastrad static int
    321  1.62  riastrad shmem_pread(struct page *page, int offset, int len, char __user *user_data,
    322  1.62  riastrad 	    bool needs_clflush)
    323   1.1  riastrad {
    324  1.62  riastrad 	char *vaddr;
    325  1.62  riastrad 	int ret;
    326   1.1  riastrad 
    327  1.62  riastrad 	vaddr = kmap(page);
    328   1.1  riastrad 
    329  1.62  riastrad 	if (needs_clflush)
    330  1.62  riastrad 		drm_clflush_virt_range(vaddr + offset, len);
    331   1.1  riastrad 
    332  1.62  riastrad 	ret = __copy_to_user(user_data, vaddr + offset, len);
    333   1.1  riastrad 
    334  1.62  riastrad 	kunmap(page);
    335   1.1  riastrad 
    336  1.62  riastrad 	return ret ? -EFAULT : 0;
    337   1.1  riastrad }
    338   1.1  riastrad 
    339  1.62  riastrad static int
    340  1.62  riastrad i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
    341  1.62  riastrad 		     struct drm_i915_gem_pread *args)
    342  1.12  riastrad {
    343  1.62  riastrad 	unsigned int needs_clflush;
    344  1.62  riastrad 	unsigned int idx, offset;
    345  1.62  riastrad 	struct dma_fence *fence;
    346  1.62  riastrad 	char __user *user_data;
    347  1.62  riastrad 	u64 remain;
    348  1.12  riastrad 	int ret;
    349  1.12  riastrad 
    350  1.62  riastrad 	ret = i915_gem_object_prepare_read(obj, &needs_clflush);
    351  1.12  riastrad 	if (ret)
    352  1.12  riastrad 		return ret;
    353  1.12  riastrad 
    354  1.62  riastrad 	fence = i915_gem_object_lock_fence(obj);
    355  1.62  riastrad 	i915_gem_object_finish_access(obj);
    356  1.62  riastrad 	if (!fence)
    357  1.62  riastrad 		return -ENOMEM;
    358  1.12  riastrad 
    359  1.62  riastrad 	remain = args->size;
    360  1.62  riastrad 	user_data = u64_to_user_ptr(args->data_ptr);
    361  1.62  riastrad 	offset = offset_in_page(args->offset);
    362  1.62  riastrad 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
    363  1.62  riastrad 		struct page *page = i915_gem_object_get_page(obj, idx);
    364  1.62  riastrad 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
    365  1.12  riastrad 
    366  1.62  riastrad 		ret = shmem_pread(page, offset, length, user_data,
    367  1.62  riastrad 				  needs_clflush);
    368  1.62  riastrad 		if (ret)
    369  1.62  riastrad 			break;
    370   1.1  riastrad 
    371  1.62  riastrad 		remain -= length;
    372  1.62  riastrad 		user_data += length;
    373  1.62  riastrad 		offset = 0;
    374  1.62  riastrad 	}
    375   1.1  riastrad 
    376  1.62  riastrad 	i915_gem_object_unlock_fence(obj, fence);
    377  1.62  riastrad 	return ret;
    378   1.1  riastrad }
    379   1.1  riastrad 
    380  1.69  riastrad #ifdef __NetBSD__
    381  1.69  riastrad #define __iomem
    382  1.69  riastrad #endif
    383  1.62  riastrad static inline bool
    384  1.62  riastrad gtt_user_read(struct io_mapping *mapping,
    385  1.62  riastrad 	      loff_t base, int offset,
    386  1.62  riastrad 	      char __user *user_data, int length)
    387   1.1  riastrad {
    388  1.62  riastrad 	void __iomem *vaddr;
    389  1.62  riastrad 	unsigned long unwritten;
    390   1.1  riastrad 
    391  1.68  riastrad #ifdef __NetBSD__
    392  1.68  riastrad 	// No fast path for us.
    393  1.68  riastrad 	unwritten = -EFAULT;
    394  1.68  riastrad #else
    395  1.62  riastrad 	/* We can use the cpu mem copy function because this is X86. */
    396  1.62  riastrad 	vaddr = io_mapping_map_atomic_wc(mapping, base);
    397  1.62  riastrad 	unwritten = __copy_to_user_inatomic(user_data,
    398  1.62  riastrad 					    (void __force *)vaddr + offset,
    399  1.62  riastrad 					    length);
    400  1.62  riastrad 	io_mapping_unmap_atomic(vaddr);
    401  1.68  riastrad #endif
    402  1.62  riastrad 	if (unwritten) {
    403  1.62  riastrad 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
    404  1.62  riastrad 		unwritten = copy_to_user(user_data,
    405  1.62  riastrad 					 (void __force *)vaddr + offset,
    406  1.62  riastrad 					 length);
    407  1.68  riastrad #ifdef __NetBSD__
    408  1.74  riastrad 		io_mapping_unmap(mapping, vaddr, PAGE_SIZE);
    409  1.68  riastrad #else
    410  1.62  riastrad 		io_mapping_unmap(vaddr);
    411  1.68  riastrad #endif
    412   1.1  riastrad 	}
    413  1.62  riastrad 	return unwritten;
    414   1.1  riastrad }
    415  1.69  riastrad #ifdef __NetBSD__
    416  1.69  riastrad #undef __iomem
    417  1.69  riastrad #endif
    418   1.1  riastrad 
    419   1.1  riastrad static int
    420  1.62  riastrad i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
    421  1.62  riastrad 		   const struct drm_i915_gem_pread *args)
    422   1.1  riastrad {
    423  1.62  riastrad 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
    424  1.62  riastrad 	struct i915_ggtt *ggtt = &i915->ggtt;
    425  1.62  riastrad 	intel_wakeref_t wakeref;
    426  1.62  riastrad 	struct drm_mm_node node;
    427  1.62  riastrad 	struct dma_fence *fence;
    428  1.62  riastrad 	void __user *user_data;
    429  1.62  riastrad 	struct i915_vma *vma;
    430  1.62  riastrad 	u64 remain, offset;
    431   1.1  riastrad 	int ret;
    432   1.1  riastrad 
    433  1.62  riastrad 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
    434  1.62  riastrad 	vma = ERR_PTR(-ENODEV);
    435  1.62  riastrad 	if (!i915_gem_object_is_tiled(obj))
    436  1.62  riastrad 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
    437  1.62  riastrad 					       PIN_MAPPABLE |
    438  1.62  riastrad 					       PIN_NONBLOCK /* NOWARN */ |
    439  1.62  riastrad 					       PIN_NOEVICT);
    440  1.62  riastrad 	if (!IS_ERR(vma)) {
    441  1.62  riastrad 		node.start = i915_ggtt_offset(vma);
    442  1.62  riastrad 		node.flags = 0;
    443  1.62  riastrad 	} else {
    444  1.62  riastrad 		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
    445  1.62  riastrad 		if (ret)
    446  1.62  riastrad 			goto out_rpm;
    447  1.62  riastrad 		GEM_BUG_ON(!drm_mm_node_allocated(&node));
    448  1.62  riastrad 	}
    449  1.62  riastrad 
    450  1.62  riastrad 	ret = i915_gem_object_lock_interruptible(obj);
    451  1.62  riastrad 	if (ret)
    452  1.62  riastrad 		goto out_unpin;
    453   1.1  riastrad 
    454  1.62  riastrad 	ret = i915_gem_object_set_to_gtt_domain(obj, false);
    455  1.62  riastrad 	if (ret) {
    456  1.62  riastrad 		i915_gem_object_unlock(obj);
    457  1.62  riastrad 		goto out_unpin;
    458  1.62  riastrad 	}
    459   1.1  riastrad 
    460  1.62  riastrad 	fence = i915_gem_object_lock_fence(obj);
    461  1.62  riastrad 	i915_gem_object_unlock(obj);
    462  1.62  riastrad 	if (!fence) {
    463  1.62  riastrad 		ret = -ENOMEM;
    464  1.62  riastrad 		goto out_unpin;
    465  1.62  riastrad 	}
    466   1.1  riastrad 
    467  1.62  riastrad 	user_data = u64_to_user_ptr(args->data_ptr);
    468   1.1  riastrad 	remain = args->size;
    469  1.62  riastrad 	offset = args->offset;
    470   1.1  riastrad 
    471  1.62  riastrad 	while (remain > 0) {
    472  1.62  riastrad 		/* Operation in this page
    473  1.62  riastrad 		 *
    474  1.62  riastrad 		 * page_base = page offset within aperture
    475  1.62  riastrad 		 * page_offset = offset within page
    476  1.62  riastrad 		 * page_length = bytes to copy for this page
    477  1.62  riastrad 		 */
    478  1.62  riastrad 		u32 page_base = node.start;
    479  1.62  riastrad 		unsigned page_offset = offset_in_page(offset);
    480  1.62  riastrad 		unsigned page_length = PAGE_SIZE - page_offset;
    481  1.62  riastrad 		page_length = remain < page_length ? remain : page_length;
    482  1.62  riastrad 		if (drm_mm_node_allocated(&node)) {
    483  1.62  riastrad 			ggtt->vm.insert_page(&ggtt->vm,
    484  1.62  riastrad 					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
    485  1.62  riastrad 					     node.start, I915_CACHE_NONE, 0);
    486  1.62  riastrad 		} else {
    487  1.62  riastrad 			page_base += offset & PAGE_MASK;
    488  1.62  riastrad 		}
    489   1.1  riastrad 
    490  1.62  riastrad 		if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
    491  1.62  riastrad 				  user_data, page_length)) {
    492  1.62  riastrad 			ret = -EFAULT;
    493  1.62  riastrad 			break;
    494  1.62  riastrad 		}
    495   1.1  riastrad 
    496   1.1  riastrad 		remain -= page_length;
    497   1.1  riastrad 		user_data += page_length;
    498   1.1  riastrad 		offset += page_length;
    499   1.1  riastrad 	}
    500   1.1  riastrad 
    501  1.62  riastrad 	i915_gem_object_unlock_fence(obj, fence);
    502  1.62  riastrad out_unpin:
    503  1.62  riastrad 	if (drm_mm_node_allocated(&node)) {
    504  1.62  riastrad 		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
    505  1.62  riastrad 		remove_mappable_node(ggtt, &node);
    506  1.62  riastrad 	} else {
    507  1.62  riastrad 		i915_vma_unpin(vma);
    508  1.62  riastrad 	}
    509  1.62  riastrad out_rpm:
    510  1.62  riastrad 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
    511   1.1  riastrad 	return ret;
    512   1.1  riastrad }
    513   1.1  riastrad 
    514   1.1  riastrad /**
    515   1.1  riastrad  * Reads data from the object referenced by handle.
    516  1.62  riastrad  * @dev: drm device pointer
    517  1.62  riastrad  * @data: ioctl data blob
    518  1.62  riastrad  * @file: drm file pointer
    519   1.1  riastrad  *
    520   1.1  riastrad  * On error, the contents of *data are undefined.
    521   1.1  riastrad  */
    522   1.1  riastrad int
    523   1.1  riastrad i915_gem_pread_ioctl(struct drm_device *dev, void *data,
    524   1.1  riastrad 		     struct drm_file *file)
    525   1.1  riastrad {
    526   1.1  riastrad 	struct drm_i915_gem_pread *args = data;
    527   1.1  riastrad 	struct drm_i915_gem_object *obj;
    528  1.62  riastrad 	int ret;
    529   1.1  riastrad 
    530   1.1  riastrad 	if (args->size == 0)
    531   1.1  riastrad 		return 0;
    532   1.1  riastrad 
    533  1.62  riastrad 	if (!access_ok(u64_to_user_ptr(args->data_ptr),
    534   1.1  riastrad 		       args->size))
    535   1.1  riastrad 		return -EFAULT;
    536   1.1  riastrad 
    537  1.62  riastrad 	obj = i915_gem_object_lookup(file, args->handle);
    538  1.62  riastrad 	if (!obj)
    539  1.62  riastrad 		return -ENOENT;
    540   1.1  riastrad 
    541   1.1  riastrad 	/* Bounds check source.  */
    542  1.62  riastrad 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
    543   1.1  riastrad 		ret = -EINVAL;
    544   1.1  riastrad 		goto out;
    545   1.1  riastrad 	}
    546   1.1  riastrad 
    547  1.62  riastrad 	trace_i915_gem_object_pread(obj, args->offset, args->size);
    548  1.62  riastrad 
    549  1.62  riastrad 	ret = i915_gem_object_wait(obj,
    550  1.62  riastrad 				   I915_WAIT_INTERRUPTIBLE,
    551  1.62  riastrad 				   MAX_SCHEDULE_TIMEOUT);
    552  1.62  riastrad 	if (ret)
    553   1.1  riastrad 		goto out;
    554   1.1  riastrad 
    555  1.62  riastrad 	ret = i915_gem_object_pin_pages(obj);
    556  1.62  riastrad 	if (ret)
    557  1.62  riastrad 		goto out;
    558   1.1  riastrad 
    559  1.62  riastrad 	ret = i915_gem_shmem_pread(obj, args);
    560  1.62  riastrad 	if (ret == -EFAULT || ret == -ENODEV)
    561  1.62  riastrad 		ret = i915_gem_gtt_pread(obj, args);
    562   1.1  riastrad 
    563  1.62  riastrad 	i915_gem_object_unpin_pages(obj);
    564   1.1  riastrad out:
    565  1.62  riastrad 	i915_gem_object_put(obj);
    566   1.1  riastrad 	return ret;
    567   1.1  riastrad }
    568   1.1  riastrad 
    569   1.1  riastrad /* This is the fast write path which cannot handle
    570   1.1  riastrad  * page faults in the source data
    571   1.1  riastrad  */
    572   1.1  riastrad 
    573  1.62  riastrad static inline bool
    574  1.62  riastrad ggtt_write(struct io_mapping *mapping,
    575  1.62  riastrad 	   loff_t base, int offset,
    576  1.62  riastrad 	   char __user *user_data, int length)
    577   1.1  riastrad {
    578  1.68  riastrad #ifdef __NetBSD__
    579  1.68  riastrad 	return length;
    580  1.68  riastrad #else
    581  1.62  riastrad 	void __iomem *vaddr;
    582   1.1  riastrad 	unsigned long unwritten;
    583   1.1  riastrad 
    584   1.1  riastrad 	/* We can use the cpu mem copy function because this is X86. */
    585  1.62  riastrad 	vaddr = io_mapping_map_atomic_wc(mapping, base);
    586  1.62  riastrad 	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
    587   1.1  riastrad 						      user_data, length);
    588  1.62  riastrad 	io_mapping_unmap_atomic(vaddr);
    589  1.62  riastrad 	if (unwritten) {
    590  1.62  riastrad 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
    591  1.62  riastrad 		unwritten = copy_from_user((void __force *)vaddr + offset,
    592  1.62  riastrad 					   user_data, length);
    593  1.62  riastrad 		io_mapping_unmap(vaddr);
    594  1.62  riastrad 	}
    595  1.62  riastrad 
    596   1.1  riastrad 	return unwritten;
    597  1.68  riastrad #endif
    598   1.1  riastrad }
    599   1.1  riastrad 
    600   1.1  riastrad /**
    601   1.1  riastrad  * This is the fast pwrite path, where we copy the data directly from the
    602   1.1  riastrad  * user into the GTT, uncached.
    603  1.62  riastrad  * @obj: i915 GEM object
    604  1.62  riastrad  * @args: pwrite arguments structure
    605   1.1  riastrad  */
    606   1.1  riastrad static int
    607  1.62  riastrad i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
    608  1.62  riastrad 			 const struct drm_i915_gem_pwrite *args)
    609   1.1  riastrad {
    610  1.62  riastrad 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
    611  1.62  riastrad 	struct i915_ggtt *ggtt = &i915->ggtt;
    612  1.62  riastrad 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
    613  1.62  riastrad 	intel_wakeref_t wakeref;
    614  1.62  riastrad 	struct drm_mm_node node;
    615  1.62  riastrad 	struct dma_fence *fence;
    616  1.62  riastrad 	struct i915_vma *vma;
    617  1.62  riastrad 	u64 remain, offset;
    618  1.62  riastrad 	void __user *user_data;
    619  1.62  riastrad 	int ret;
    620  1.62  riastrad 
    621  1.62  riastrad 	if (i915_gem_object_has_struct_page(obj)) {
    622  1.62  riastrad 		/*
    623  1.62  riastrad 		 * Avoid waking the device up if we can fallback, as
    624  1.62  riastrad 		 * waking/resuming is very slow (worst-case 10-100 ms
    625  1.62  riastrad 		 * depending on PCI sleeps and our own resume time).
    626  1.62  riastrad 		 * This easily dwarfs any performance advantage from
    627  1.62  riastrad 		 * using the cache bypass of indirect GGTT access.
    628  1.62  riastrad 		 */
    629  1.62  riastrad 		wakeref = intel_runtime_pm_get_if_in_use(rpm);
    630  1.62  riastrad 		if (!wakeref)
    631  1.62  riastrad 			return -EFAULT;
    632  1.62  riastrad 	} else {
    633  1.62  riastrad 		/* No backing pages, no fallback, we must force GGTT access */
    634  1.62  riastrad 		wakeref = intel_runtime_pm_get(rpm);
    635  1.62  riastrad 	}
    636  1.62  riastrad 
    637  1.62  riastrad 	vma = ERR_PTR(-ENODEV);
    638  1.62  riastrad 	if (!i915_gem_object_is_tiled(obj))
    639  1.62  riastrad 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
    640  1.62  riastrad 					       PIN_MAPPABLE |
    641  1.62  riastrad 					       PIN_NONBLOCK /* NOWARN */ |
    642  1.62  riastrad 					       PIN_NOEVICT);
    643  1.62  riastrad 	if (!IS_ERR(vma)) {
    644  1.62  riastrad 		node.start = i915_ggtt_offset(vma);
    645  1.62  riastrad 		node.flags = 0;
    646  1.62  riastrad 	} else {
    647  1.62  riastrad 		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
    648  1.62  riastrad 		if (ret)
    649  1.62  riastrad 			goto out_rpm;
    650  1.62  riastrad 		GEM_BUG_ON(!drm_mm_node_allocated(&node));
    651  1.62  riastrad 	}
    652   1.1  riastrad 
    653  1.62  riastrad 	ret = i915_gem_object_lock_interruptible(obj);
    654   1.1  riastrad 	if (ret)
    655  1.62  riastrad 		goto out_unpin;
    656   1.1  riastrad 
    657   1.1  riastrad 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
    658  1.62  riastrad 	if (ret) {
    659  1.62  riastrad 		i915_gem_object_unlock(obj);
    660   1.1  riastrad 		goto out_unpin;
    661  1.62  riastrad 	}
    662   1.1  riastrad 
    663  1.62  riastrad 	fence = i915_gem_object_lock_fence(obj);
    664  1.62  riastrad 	i915_gem_object_unlock(obj);
    665  1.62  riastrad 	if (!fence) {
    666  1.62  riastrad 		ret = -ENOMEM;
    667   1.1  riastrad 		goto out_unpin;
    668  1.62  riastrad 	}
    669   1.1  riastrad 
    670  1.62  riastrad 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
    671  1.62  riastrad 
    672  1.62  riastrad 	user_data = u64_to_user_ptr(args->data_ptr);
    673  1.62  riastrad 	offset = args->offset;
    674   1.1  riastrad 	remain = args->size;
    675  1.62  riastrad 	while (remain) {
    676   1.1  riastrad 		/* Operation in this page
    677   1.1  riastrad 		 *
    678   1.1  riastrad 		 * page_base = page offset within aperture
    679   1.1  riastrad 		 * page_offset = offset within page
    680   1.1  riastrad 		 * page_length = bytes to copy for this page
    681   1.1  riastrad 		 */
    682  1.62  riastrad 		u32 page_base = node.start;
    683  1.62  riastrad 		unsigned int page_offset = offset_in_page(offset);
    684  1.62  riastrad 		unsigned int page_length = PAGE_SIZE - page_offset;
    685  1.62  riastrad 		page_length = remain < page_length ? remain : page_length;
    686  1.62  riastrad 		if (drm_mm_node_allocated(&node)) {
    687  1.62  riastrad 			/* flush the write before we modify the GGTT */
    688  1.62  riastrad 			intel_gt_flush_ggtt_writes(ggtt->vm.gt);
    689  1.62  riastrad 			ggtt->vm.insert_page(&ggtt->vm,
    690  1.62  riastrad 					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
    691  1.62  riastrad 					     node.start, I915_CACHE_NONE, 0);
    692  1.62  riastrad 			wmb(); /* flush modifications to the GGTT (insert_page) */
    693  1.62  riastrad 		} else {
    694  1.62  riastrad 			page_base += offset & PAGE_MASK;
    695  1.62  riastrad 		}
    696   1.1  riastrad 		/* If we get a fault while copying data, then (presumably) our
    697   1.1  riastrad 		 * source page isn't available.  Return the error and we'll
    698   1.1  riastrad 		 * retry in the slow path.
    699  1.62  riastrad 		 * If the object is non-shmem backed, we retry again with the
    700  1.62  riastrad 		 * path that handles page fault.
    701   1.1  riastrad 		 */
    702  1.62  riastrad 		if (ggtt_write(&ggtt->iomap, page_base, page_offset,
    703  1.62  riastrad 			       user_data, page_length)) {
    704   1.1  riastrad 			ret = -EFAULT;
    705  1.62  riastrad 			break;
    706   1.1  riastrad 		}
    707   1.1  riastrad 
    708   1.1  riastrad 		remain -= page_length;
    709   1.1  riastrad 		user_data += page_length;
    710   1.1  riastrad 		offset += page_length;
    711   1.1  riastrad 	}
    712   1.1  riastrad 
    713  1.62  riastrad 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
    714  1.62  riastrad 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
    715  1.62  riastrad 
    716  1.62  riastrad 	i915_gem_object_unlock_fence(obj, fence);
    717   1.1  riastrad out_unpin:
    718  1.62  riastrad 	if (drm_mm_node_allocated(&node)) {
    719  1.62  riastrad 		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
    720  1.62  riastrad 		remove_mappable_node(ggtt, &node);
    721  1.62  riastrad 	} else {
    722  1.62  riastrad 		i915_vma_unpin(vma);
    723  1.62  riastrad 	}
    724  1.62  riastrad out_rpm:
    725  1.62  riastrad 	intel_runtime_pm_put(rpm, wakeref);
    726   1.1  riastrad 	return ret;
    727   1.1  riastrad }
    728   1.1  riastrad 
    729   1.1  riastrad /* Per-page copy function for the shmem pwrite fastpath.
    730   1.1  riastrad  * Flushes invalid cachelines before writing to the target if
    731   1.1  riastrad  * needs_clflush_before is set and flushes out any written cachelines after
    732  1.62  riastrad  * writing if needs_clflush is set.
    733  1.62  riastrad  */
    734   1.1  riastrad static int
    735  1.62  riastrad shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
    736  1.62  riastrad 	     bool needs_clflush_before,
    737  1.62  riastrad 	     bool needs_clflush_after)
    738   1.1  riastrad {
    739   1.1  riastrad 	char *vaddr;
    740   1.1  riastrad 	int ret;
    741   1.1  riastrad 
    742  1.62  riastrad 	vaddr = kmap(page);
    743   1.1  riastrad 
    744   1.1  riastrad 	if (needs_clflush_before)
    745  1.62  riastrad 		drm_clflush_virt_range(vaddr + offset, len);
    746   1.1  riastrad 
    747  1.62  riastrad 	ret = __copy_from_user(vaddr + offset, user_data, len);
    748  1.62  riastrad 	if (!ret && needs_clflush_after)
    749  1.62  riastrad 		drm_clflush_virt_range(vaddr + offset, len);
    750   1.1  riastrad 
    751   1.1  riastrad 	kunmap(page);
    752   1.1  riastrad 
    753   1.1  riastrad 	return ret ? -EFAULT : 0;
    754   1.1  riastrad }
    755   1.1  riastrad 
    756   1.1  riastrad static int
    757  1.62  riastrad i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
    758  1.62  riastrad 		      const struct drm_i915_gem_pwrite *args)
    759   1.1  riastrad {
    760  1.62  riastrad 	unsigned int partial_cacheline_write;
    761  1.62  riastrad 	unsigned int needs_clflush;
    762  1.62  riastrad 	unsigned int offset, idx;
    763  1.62  riastrad 	struct dma_fence *fence;
    764  1.62  riastrad 	void __user *user_data;
    765  1.62  riastrad 	u64 remain;
    766  1.62  riastrad 	int ret;
    767   1.1  riastrad 
    768  1.62  riastrad 	ret = i915_gem_object_prepare_write(obj, &needs_clflush);
    769   1.1  riastrad 	if (ret)
    770   1.1  riastrad 		return ret;
    771   1.1  riastrad 
    772  1.62  riastrad 	fence = i915_gem_object_lock_fence(obj);
    773  1.62  riastrad 	i915_gem_object_finish_access(obj);
    774  1.62  riastrad 	if (!fence)
    775  1.62  riastrad 		return -ENOMEM;
    776  1.36  riastrad 
    777  1.62  riastrad 	/* If we don't overwrite a cacheline completely we need to be
    778  1.62  riastrad 	 * careful to have up-to-date data by first clflushing. Don't
    779  1.62  riastrad 	 * overcomplicate things and flush the entire patch.
    780  1.62  riastrad 	 */
    781  1.62  riastrad 	partial_cacheline_write = 0;
    782  1.62  riastrad 	if (needs_clflush & CLFLUSH_BEFORE)
    783  1.69  riastrad #ifdef __NetBSD__
    784  1.69  riastrad 		partial_cacheline_write = cpu_info_primary.ci_cflush_lsize - 1;
    785  1.69  riastrad #else
    786  1.62  riastrad 		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
    787  1.69  riastrad #endif
    788   1.1  riastrad 
    789  1.62  riastrad 	user_data = u64_to_user_ptr(args->data_ptr);
    790  1.62  riastrad 	remain = args->size;
    791  1.62  riastrad 	offset = offset_in_page(args->offset);
    792  1.62  riastrad 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
    793  1.62  riastrad 		struct page *page = i915_gem_object_get_page(obj, idx);
    794  1.62  riastrad 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
    795  1.62  riastrad 
    796  1.62  riastrad 		ret = shmem_pwrite(page, offset, length, user_data,
    797  1.62  riastrad 				   (offset | length) & partial_cacheline_write,
    798  1.62  riastrad 				   needs_clflush & CLFLUSH_AFTER);
    799  1.62  riastrad 		if (ret)
    800   1.1  riastrad 			break;
    801   1.1  riastrad 
    802  1.62  riastrad 		remain -= length;
    803  1.62  riastrad 		user_data += length;
    804  1.62  riastrad 		offset = 0;
    805   1.1  riastrad 	}
    806   1.1  riastrad 
    807  1.62  riastrad 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
    808  1.62  riastrad 	i915_gem_object_unlock_fence(obj, fence);
    809   1.1  riastrad 
    810   1.1  riastrad 	return ret;
    811   1.1  riastrad }
    812   1.1  riastrad 
    813   1.1  riastrad /**
    814   1.1  riastrad  * Writes data to the object referenced by handle.
    815  1.62  riastrad  * @dev: drm device
    816  1.62  riastrad  * @data: ioctl data blob
    817  1.62  riastrad  * @file: drm file
    818   1.1  riastrad  *
    819   1.1  riastrad  * On error, the contents of the buffer that were to be modified are undefined.
    820   1.1  riastrad  */
    821   1.1  riastrad int
    822   1.1  riastrad i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
    823   1.1  riastrad 		      struct drm_file *file)
    824   1.1  riastrad {
    825   1.1  riastrad 	struct drm_i915_gem_pwrite *args = data;
    826   1.1  riastrad 	struct drm_i915_gem_object *obj;
    827   1.1  riastrad 	int ret;
    828   1.1  riastrad 
    829   1.1  riastrad 	if (args->size == 0)
    830   1.1  riastrad 		return 0;
    831   1.1  riastrad 
    832  1.62  riastrad 	if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
    833   1.1  riastrad 		return -EFAULT;
    834   1.1  riastrad 
    835  1.62  riastrad 	obj = i915_gem_object_lookup(file, args->handle);
    836  1.62  riastrad 	if (!obj)
    837  1.62  riastrad 		return -ENOENT;
    838   1.1  riastrad 
    839   1.1  riastrad 	/* Bounds check destination. */
    840  1.62  riastrad 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
    841   1.1  riastrad 		ret = -EINVAL;
    842  1.62  riastrad 		goto err;
    843   1.1  riastrad 	}
    844   1.1  riastrad 
    845  1.62  riastrad 	/* Writes not allowed into this read-only object */
    846  1.62  riastrad 	if (i915_gem_object_is_readonly(obj)) {
    847   1.1  riastrad 		ret = -EINVAL;
    848  1.62  riastrad 		goto err;
    849  1.36  riastrad 	}
    850  1.36  riastrad 
    851  1.62  riastrad 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
    852   1.1  riastrad 
    853  1.62  riastrad 	ret = -ENODEV;
    854  1.62  riastrad 	if (obj->ops->pwrite)
    855  1.62  riastrad 		ret = obj->ops->pwrite(obj, args);
    856  1.62  riastrad 	if (ret != -ENODEV)
    857  1.62  riastrad 		goto err;
    858   1.1  riastrad 
    859  1.62  riastrad 	ret = i915_gem_object_wait(obj,
    860  1.62  riastrad 				   I915_WAIT_INTERRUPTIBLE |
    861  1.62  riastrad 				   I915_WAIT_ALL,
    862  1.62  riastrad 				   MAX_SCHEDULE_TIMEOUT);
    863  1.12  riastrad 	if (ret)
    864  1.12  riastrad 		goto err;
    865   1.1  riastrad 
    866  1.62  riastrad 	ret = i915_gem_object_pin_pages(obj);
    867  1.62  riastrad 	if (ret)
    868  1.62  riastrad 		goto err;
    869   1.1  riastrad 
    870  1.62  riastrad 	ret = -EFAULT;
    871  1.62  riastrad 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
    872  1.62  riastrad 	 * it would end up going through the fenced access, and we'll get
    873  1.62  riastrad 	 * different detiling behavior between reading and writing.
    874  1.62  riastrad 	 * pread/pwrite currently are reading and writing from the CPU
    875  1.62  riastrad 	 * perspective, requiring manual detiling by the client.
    876  1.36  riastrad 	 */
    877  1.62  riastrad 	if (!i915_gem_object_has_struct_page(obj) ||
    878  1.62  riastrad 	    cpu_write_needs_clflush(obj))
    879  1.62  riastrad 		/* Note that the gtt paths might fail with non-page-backed user
    880  1.62  riastrad 		 * pointers (e.g. gtt mappings when moving data between
    881  1.62  riastrad 		 * textures). Fallback to the shmem path in that case.
    882  1.62  riastrad 		 */
    883  1.62  riastrad 		ret = i915_gem_gtt_pwrite_fast(obj, args);
    884   1.1  riastrad 
    885  1.62  riastrad 	if (ret == -EFAULT || ret == -ENOSPC) {
    886  1.62  riastrad 		if (i915_gem_object_has_struct_page(obj))
    887  1.62  riastrad 			ret = i915_gem_shmem_pwrite(obj, args);
    888  1.62  riastrad 		else
    889  1.62  riastrad 			ret = i915_gem_phys_pwrite(obj, args, file);
    890  1.62  riastrad 	}
    891  1.12  riastrad 
    892  1.62  riastrad 	i915_gem_object_unpin_pages(obj);
    893  1.12  riastrad err:
    894  1.62  riastrad 	i915_gem_object_put(obj);
    895  1.12  riastrad 	return ret;
    896   1.1  riastrad }
    897   1.1  riastrad 
    898  1.62  riastrad /**
    899  1.62  riastrad  * Called when user space has done writes to this buffer
    900  1.62  riastrad  * @dev: drm device
    901  1.62  riastrad  * @data: ioctl data blob
    902  1.62  riastrad  * @file: drm file
    903  1.62  riastrad  */
    904  1.62  riastrad int
    905  1.62  riastrad i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
    906  1.62  riastrad 			 struct drm_file *file)
    907   1.1  riastrad {
    908  1.62  riastrad 	struct drm_i915_gem_sw_finish *args = data;
    909  1.62  riastrad 	struct drm_i915_gem_object *obj;
    910   1.1  riastrad 
    911  1.62  riastrad 	obj = i915_gem_object_lookup(file, args->handle);
    912  1.62  riastrad 	if (!obj)
    913  1.62  riastrad 		return -ENOENT;
    914   1.1  riastrad 
    915  1.12  riastrad 	/*
    916  1.62  riastrad 	 * Proxy objects are barred from CPU access, so there is no
    917  1.62  riastrad 	 * need to ban sw_finish as it is a nop.
    918  1.12  riastrad 	 */
    919   1.1  riastrad 
    920  1.62  riastrad 	/* Pinned buffers may be scanout, so flush the cache */
    921  1.62  riastrad 	i915_gem_object_flush_if_display(obj);
    922  1.62  riastrad 	i915_gem_object_put(obj);
    923   1.1  riastrad 
    924  1.62  riastrad 	return 0;
    925   1.1  riastrad }
    926   1.1  riastrad 
    927  1.62  riastrad void i915_gem_runtime_suspend(struct drm_i915_private *i915)
    928   1.1  riastrad {
    929  1.62  riastrad 	struct drm_i915_gem_object *obj, *on;
    930  1.62  riastrad 	int i;
    931  1.62  riastrad 
    932  1.62  riastrad 	/*
    933  1.62  riastrad 	 * Only called during RPM suspend. All users of the userfault_list
    934  1.62  riastrad 	 * must be holding an RPM wakeref to ensure that this can not
    935  1.62  riastrad 	 * run concurrently with themselves (and use the struct_mutex for
    936  1.62  riastrad 	 * protection between themselves).
    937  1.62  riastrad 	 */
    938  1.62  riastrad 
    939  1.62  riastrad 	list_for_each_entry_safe(obj, on,
    940  1.62  riastrad 				 &i915->ggtt.userfault_list, userfault_link)
    941  1.62  riastrad 		__i915_gem_object_release_mmap_gtt(obj);
    942   1.1  riastrad 
    943  1.62  riastrad 	/*
    944  1.62  riastrad 	 * The fence will be lost when the device powers down. If any were
    945  1.62  riastrad 	 * in use by hardware (i.e. they are pinned), we should not be powering
    946  1.62  riastrad 	 * down! All other fences will be reacquired by the user upon waking.
    947  1.62  riastrad 	 */
    948  1.62  riastrad 	for (i = 0; i < i915->ggtt.num_fences; i++) {
    949  1.62  riastrad 		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
    950   1.1  riastrad 
    951  1.62  riastrad 		/*
    952  1.62  riastrad 		 * Ideally we want to assert that the fence register is not
    953  1.62  riastrad 		 * live at this point (i.e. that no piece of code will be
    954  1.62  riastrad 		 * trying to write through fence + GTT, as that both violates
    955  1.62  riastrad 		 * our tracking of activity and associated locking/barriers,
    956  1.62  riastrad 		 * but also is illegal given that the hw is powered down).
    957  1.62  riastrad 		 *
    958  1.62  riastrad 		 * Previously we used reg->pin_count as a "liveness" indicator.
    959  1.62  riastrad 		 * That is not sufficient, and we need a more fine-grained
    960  1.62  riastrad 		 * tool if we want to have a sanity check here.
    961  1.62  riastrad 		 */
    962   1.1  riastrad 
    963  1.62  riastrad 		if (!reg->vma)
    964  1.62  riastrad 			continue;
    965  1.36  riastrad 
    966  1.62  riastrad 		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
    967  1.62  riastrad 		reg->dirty = true;
    968   1.1  riastrad 	}
    969   1.1  riastrad }
    970   1.1  riastrad 
    971  1.62  riastrad struct i915_vma *
    972  1.62  riastrad i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
    973  1.62  riastrad 			 const struct i915_ggtt_view *view,
    974  1.62  riastrad 			 u64 size,
    975  1.62  riastrad 			 u64 alignment,
    976  1.62  riastrad 			 u64 flags)
    977   1.1  riastrad {
    978  1.62  riastrad 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
    979  1.62  riastrad 	struct i915_ggtt *ggtt = &i915->ggtt;
    980  1.62  riastrad 	struct i915_vma *vma;
    981   1.1  riastrad 	int ret;
    982   1.1  riastrad 
    983  1.62  riastrad 	if (i915_gem_object_never_bind_ggtt(obj))
    984  1.62  riastrad 		return ERR_PTR(-ENODEV);
    985  1.62  riastrad 
    986  1.62  riastrad 	if (flags & PIN_MAPPABLE &&
    987  1.62  riastrad 	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
    988  1.62  riastrad 		/*
    989  1.62  riastrad 		 * If the required space is larger than the available
    990  1.62  riastrad 		 * aperture, we will not able to find a slot for the
    991  1.62  riastrad 		 * object and unbinding the object now will be in
    992  1.62  riastrad 		 * vain. Worse, doing so may cause us to ping-pong
    993  1.62  riastrad 		 * the object in and out of the Global GTT and
    994  1.62  riastrad 		 * waste a lot of cycles under the mutex.
    995  1.62  riastrad 		 */
    996  1.62  riastrad 		if (obj->base.size > ggtt->mappable_end)
    997  1.62  riastrad 			return ERR_PTR(-E2BIG);
    998   1.1  riastrad 
    999  1.62  riastrad 		/*
   1000  1.62  riastrad 		 * If NONBLOCK is set the caller is optimistically
   1001  1.62  riastrad 		 * trying to cache the full object within the mappable
   1002  1.62  riastrad 		 * aperture, and *must* have a fallback in place for
   1003  1.62  riastrad 		 * situations where we cannot bind the object. We
   1004  1.62  riastrad 		 * can be a little more lax here and use the fallback
   1005  1.62  riastrad 		 * more often to avoid costly migrations of ourselves
   1006  1.62  riastrad 		 * and other objects within the aperture.
   1007  1.62  riastrad 		 *
   1008  1.62  riastrad 		 * Half-the-aperture is used as a simple heuristic.
   1009  1.62  riastrad 		 * More interesting would to do search for a free
   1010  1.62  riastrad 		 * block prior to making the commitment to unbind.
   1011  1.62  riastrad 		 * That caters for the self-harm case, and with a
   1012  1.62  riastrad 		 * little more heuristics (e.g. NOFAULT, NOEVICT)
   1013  1.62  riastrad 		 * we could try to minimise harm to others.
   1014  1.62  riastrad 		 */
   1015  1.62  riastrad 		if (flags & PIN_NONBLOCK &&
   1016  1.62  riastrad 		    obj->base.size > ggtt->mappable_end / 2)
   1017  1.62  riastrad 			return ERR_PTR(-ENOSPC);
   1018   1.1  riastrad 	}
   1019   1.1  riastrad 
   1020  1.62  riastrad 	vma = i915_vma_instance(obj, &ggtt->vm, view);
   1021  1.62  riastrad 	if (IS_ERR(vma))
   1022  1.62  riastrad 		return vma;
   1023  1.62  riastrad 
   1024  1.62  riastrad 	if (i915_vma_misplaced(vma, size, alignment, flags)) {
   1025  1.62  riastrad 		if (flags & PIN_NONBLOCK) {
   1026  1.62  riastrad 			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
   1027  1.62  riastrad 				return ERR_PTR(-ENOSPC);
   1028  1.62  riastrad 
   1029  1.62  riastrad 			if (flags & PIN_MAPPABLE &&
   1030  1.62  riastrad 			    vma->fence_size > ggtt->mappable_end / 2)
   1031  1.62  riastrad 				return ERR_PTR(-ENOSPC);
   1032  1.62  riastrad 		}
   1033   1.1  riastrad 
   1034  1.62  riastrad 		ret = i915_vma_unbind(vma);
   1035  1.12  riastrad 		if (ret)
   1036  1.62  riastrad 			return ERR_PTR(ret);
   1037  1.12  riastrad 	}
   1038  1.12  riastrad 
   1039  1.62  riastrad 	if (vma->fence && !i915_gem_object_is_tiled(obj)) {
   1040  1.62  riastrad 		mutex_lock(&ggtt->vm.mutex);
   1041  1.62  riastrad 		ret = i915_vma_revoke_fence(vma);
   1042  1.62  riastrad 		mutex_unlock(&ggtt->vm.mutex);
   1043  1.36  riastrad 		if (ret)
   1044  1.62  riastrad 			return ERR_PTR(ret);
   1045  1.36  riastrad 	}
   1046   1.1  riastrad 
   1047  1.62  riastrad 	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
   1048  1.62  riastrad 	if (ret)
   1049  1.62  riastrad 		return ERR_PTR(ret);
   1050   1.1  riastrad 
   1051  1.62  riastrad 	return vma;
   1052   1.1  riastrad }
   1053   1.1  riastrad 
   1054  1.12  riastrad int
   1055  1.62  riastrad i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
   1056  1.62  riastrad 		       struct drm_file *file_priv)
   1057   1.1  riastrad {
   1058  1.62  riastrad 	struct drm_i915_private *i915 = to_i915(dev);
   1059  1.62  riastrad 	struct drm_i915_gem_madvise *args = data;
   1060  1.62  riastrad 	struct drm_i915_gem_object *obj;
   1061  1.62  riastrad 	int err;
   1062  1.62  riastrad 
   1063  1.62  riastrad 	switch (args->madv) {
   1064  1.62  riastrad 	case I915_MADV_DONTNEED:
   1065  1.62  riastrad 	case I915_MADV_WILLNEED:
   1066  1.62  riastrad 	    break;
   1067  1.62  riastrad 	default:
   1068  1.62  riastrad 	    return -EINVAL;
   1069  1.12  riastrad 	}
   1070  1.12  riastrad 
   1071  1.62  riastrad 	obj = i915_gem_object_lookup(file_priv, args->handle);
   1072  1.62  riastrad 	if (!obj)
   1073  1.62  riastrad 		return -ENOENT;
   1074  1.12  riastrad 
   1075  1.62  riastrad 	err = mutex_lock_interruptible(&obj->mm.lock);
   1076  1.62  riastrad 	if (err)
   1077  1.62  riastrad 		goto out;
   1078  1.36  riastrad 
   1079  1.62  riastrad 	if (i915_gem_object_has_pages(obj) &&
   1080  1.62  riastrad 	    i915_gem_object_is_tiled(obj) &&
   1081  1.62  riastrad 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
   1082  1.62  riastrad 		if (obj->mm.madv == I915_MADV_WILLNEED) {
   1083  1.62  riastrad 			GEM_BUG_ON(!obj->mm.quirked);
   1084  1.62  riastrad 			__i915_gem_object_unpin_pages(obj);
   1085  1.62  riastrad 			obj->mm.quirked = false;
   1086  1.62  riastrad 		}
   1087  1.62  riastrad 		if (args->madv == I915_MADV_WILLNEED) {
   1088  1.62  riastrad 			GEM_BUG_ON(obj->mm.quirked);
   1089  1.62  riastrad 			__i915_gem_object_pin_pages(obj);
   1090  1.62  riastrad 			obj->mm.quirked = true;
   1091  1.62  riastrad 		}
   1092  1.36  riastrad 	}
   1093  1.36  riastrad 
   1094  1.62  riastrad 	if (obj->mm.madv != __I915_MADV_PURGED)
   1095  1.62  riastrad 		obj->mm.madv = args->madv;
   1096  1.12  riastrad 
   1097  1.62  riastrad 	if (i915_gem_object_has_pages(obj)) {
   1098  1.62  riastrad 		struct list_head *list;
   1099  1.12  riastrad 
   1100  1.62  riastrad 		if (i915_gem_object_is_shrinkable(obj)) {
   1101  1.62  riastrad 			unsigned long flags;
   1102  1.36  riastrad 
   1103  1.62  riastrad 			spin_lock_irqsave(&i915->mm.obj_lock, flags);
   1104  1.36  riastrad 
   1105  1.62  riastrad 			if (obj->mm.madv != I915_MADV_WILLNEED)
   1106  1.62  riastrad 				list = &i915->mm.purge_list;
   1107  1.62  riastrad 			else
   1108  1.62  riastrad 				list = &i915->mm.shrink_list;
   1109  1.62  riastrad 			list_move_tail(&obj->mm.link, list);
   1110  1.36  riastrad 
   1111  1.62  riastrad 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
   1112  1.36  riastrad 		}
   1113  1.62  riastrad 	}
   1114  1.36  riastrad 
   1115  1.62  riastrad 	/* if the object is no longer attached, discard its backing storage */
   1116  1.62  riastrad 	if (obj->mm.madv == I915_MADV_DONTNEED &&
   1117  1.62  riastrad 	    !i915_gem_object_has_pages(obj))
   1118  1.62  riastrad 		i915_gem_object_truncate(obj);
   1119  1.36  riastrad 
   1120  1.62  riastrad 	args->retained = obj->mm.madv != __I915_MADV_PURGED;
   1121  1.62  riastrad 	mutex_unlock(&obj->mm.lock);
   1122   1.1  riastrad 
   1123  1.36  riastrad out:
   1124  1.62  riastrad 	i915_gem_object_put(obj);
   1125  1.62  riastrad 	return err;
   1126   1.1  riastrad }
   1127   1.1  riastrad 
   1128  1.62  riastrad int i915_gem_init(struct drm_i915_private *dev_priv)
   1129   1.1  riastrad {
   1130   1.1  riastrad 	int ret;
   1131   1.1  riastrad 
   1132  1.62  riastrad 	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
   1133  1.62  riastrad 	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
   1134  1.62  riastrad 		mkwrite_device_info(dev_priv)->page_sizes =
   1135  1.62  riastrad 			I915_GTT_PAGE_SIZE_4K;
   1136  1.36  riastrad 
   1137  1.62  riastrad 	ret = i915_gem_init_userptr(dev_priv);
   1138  1.62  riastrad 	if (ret)
   1139  1.62  riastrad 		return ret;
   1140   1.1  riastrad 
   1141  1.62  riastrad 	intel_uc_fetch_firmwares(&dev_priv->gt.uc);
   1142  1.62  riastrad 	intel_wopcm_init(&dev_priv->wopcm);
   1143   1.1  riastrad 
   1144  1.62  riastrad 	ret = i915_init_ggtt(dev_priv);
   1145  1.62  riastrad 	if (ret) {
   1146  1.62  riastrad 		GEM_BUG_ON(ret == -EIO);
   1147  1.62  riastrad 		goto err_unlock;
   1148   1.1  riastrad 	}
   1149   1.1  riastrad 
   1150  1.62  riastrad 	/*
   1151  1.62  riastrad 	 * Despite its name intel_init_clock_gating applies both display
   1152  1.62  riastrad 	 * clock gating workarounds; GT mmio workarounds and the occasional
   1153  1.62  riastrad 	 * GT power context workaround. Worse, sometimes it includes a context
   1154  1.62  riastrad 	 * register workaround which we need to apply before we record the
   1155  1.62  riastrad 	 * default HW state for all contexts.
   1156  1.62  riastrad 	 *
   1157  1.62  riastrad 	 * FIXME: break up the workarounds and apply them at the right time!
   1158  1.36  riastrad 	 */
   1159  1.62  riastrad 	intel_init_clock_gating(dev_priv);
   1160   1.1  riastrad 
   1161  1.62  riastrad 	ret = intel_gt_init(&dev_priv->gt);
   1162  1.36  riastrad 	if (ret)
   1163  1.62  riastrad 		goto err_unlock;
   1164   1.1  riastrad 
   1165  1.62  riastrad 	return 0;
   1166   1.1  riastrad 
   1167  1.62  riastrad 	/*
   1168  1.62  riastrad 	 * Unwinding is complicated by that we want to handle -EIO to mean
   1169  1.62  riastrad 	 * disable GPU submission but keep KMS alive. We want to mark the
   1170  1.62  riastrad 	 * HW as irrevisibly wedged, but keep enough state around that the
   1171  1.62  riastrad 	 * driver doesn't explode during runtime.
   1172  1.62  riastrad 	 */
   1173  1.62  riastrad err_unlock:
   1174  1.62  riastrad 	i915_gem_drain_workqueue(dev_priv);
   1175  1.62  riastrad 
   1176  1.62  riastrad 	if (ret != -EIO) {
   1177  1.62  riastrad 		intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
   1178  1.62  riastrad 		i915_gem_cleanup_userptr(dev_priv);
   1179  1.62  riastrad 	}
   1180   1.1  riastrad 
   1181  1.36  riastrad 	if (ret == -EIO) {
   1182  1.62  riastrad 		/*
   1183  1.62  riastrad 		 * Allow engines or uC initialisation to fail by marking the GPU
   1184  1.62  riastrad 		 * as wedged. But we only want to do this when the GPU is angry,
   1185  1.36  riastrad 		 * for all other failure, such as an allocation failure, bail.
   1186  1.36  riastrad 		 */
   1187  1.62  riastrad 		if (!intel_gt_is_wedged(&dev_priv->gt)) {
   1188  1.62  riastrad 			i915_probe_error(dev_priv,
   1189  1.62  riastrad 					 "Failed to initialize GPU, declaring it wedged!\n");
   1190  1.62  riastrad 			intel_gt_set_wedged(&dev_priv->gt);
   1191  1.62  riastrad 		}
   1192  1.62  riastrad 
   1193  1.62  riastrad 		/* Minimal basic recovery for KMS */
   1194  1.62  riastrad 		ret = i915_ggtt_enable_hw(dev_priv);
   1195  1.62  riastrad 		i915_gem_restore_gtt_mappings(dev_priv);
   1196  1.62  riastrad 		i915_gem_restore_fences(&dev_priv->ggtt);
   1197  1.62  riastrad 		intel_init_clock_gating(dev_priv);
   1198   1.1  riastrad 	}
   1199   1.1  riastrad 
   1200  1.62  riastrad 	i915_gem_drain_freed_objects(dev_priv);
   1201   1.1  riastrad 	return ret;
   1202   1.1  riastrad }
   1203   1.1  riastrad 
   1204  1.62  riastrad void i915_gem_driver_register(struct drm_i915_private *i915)
   1205   1.1  riastrad {
   1206  1.62  riastrad 	i915_gem_driver_register__shrinker(i915);
   1207   1.1  riastrad 
   1208  1.62  riastrad 	intel_engines_driver_register(i915);
   1209   1.1  riastrad }
   1210   1.1  riastrad 
   1211  1.62  riastrad void i915_gem_driver_unregister(struct drm_i915_private *i915)
   1212   1.1  riastrad {
   1213  1.62  riastrad 	i915_gem_driver_unregister__shrinker(i915);
   1214   1.1  riastrad }
   1215   1.1  riastrad 
   1216  1.62  riastrad void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
   1217   1.1  riastrad {
   1218  1.62  riastrad 	intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
   1219   1.1  riastrad 
   1220  1.62  riastrad 	i915_gem_suspend_late(dev_priv);
   1221  1.62  riastrad 	intel_gt_driver_remove(&dev_priv->gt);
   1222  1.70  riastrad #ifndef __NetBSD__		/* XXX uabi_engines */
   1223  1.62  riastrad 	dev_priv->uabi_engines = RB_ROOT;
   1224  1.70  riastrad #endif
   1225   1.1  riastrad 
   1226  1.62  riastrad 	/* Flush any outstanding unpin_work. */
   1227  1.62  riastrad 	i915_gem_drain_workqueue(dev_priv);
   1228   1.1  riastrad 
   1229  1.62  riastrad 	i915_gem_drain_freed_objects(dev_priv);
   1230   1.1  riastrad }
   1231   1.1  riastrad 
   1232  1.62  riastrad void i915_gem_driver_release(struct drm_i915_private *dev_priv)
   1233   1.1  riastrad {
   1234  1.62  riastrad 	i915_gem_driver_release__contexts(dev_priv);
   1235  1.12  riastrad 
   1236  1.62  riastrad 	intel_gt_driver_release(&dev_priv->gt);
   1237   1.1  riastrad 
   1238  1.62  riastrad 	intel_wa_list_free(&dev_priv->gt_wa_list);
   1239   1.1  riastrad 
   1240  1.62  riastrad 	intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
   1241  1.62  riastrad 	i915_gem_cleanup_userptr(dev_priv);
   1242   1.1  riastrad 
   1243  1.62  riastrad 	i915_gem_drain_freed_objects(dev_priv);
   1244   1.1  riastrad 
   1245  1.62  riastrad 	WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
   1246   1.1  riastrad }
   1247   1.1  riastrad 
   1248  1.62  riastrad static void i915_gem_init__mm(struct drm_i915_private *i915)
   1249   1.1  riastrad {
   1250  1.62  riastrad 	spin_lock_init(&i915->mm.obj_lock);
   1251   1.1  riastrad 
   1252  1.62  riastrad 	init_llist_head(&i915->mm.free_list);
   1253   1.1  riastrad 
   1254  1.62  riastrad 	INIT_LIST_HEAD(&i915->mm.purge_list);
   1255  1.62  riastrad 	INIT_LIST_HEAD(&i915->mm.shrink_list);
   1256   1.2  riastrad 
   1257  1.62  riastrad 	i915_gem_init__objects(i915);
   1258  1.12  riastrad }
   1259   1.2  riastrad 
   1260  1.62  riastrad void i915_gem_init_early(struct drm_i915_private *dev_priv)
   1261  1.12  riastrad {
   1262  1.62  riastrad 	i915_gem_init__mm(dev_priv);
   1263  1.62  riastrad 	i915_gem_init__contexts(dev_priv);
   1264   1.1  riastrad 
   1265  1.62  riastrad 	spin_lock_init(&dev_priv->fb_tracking.lock);
   1266   1.1  riastrad }
   1267   1.1  riastrad 
   1268  1.62  riastrad void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
   1269   1.1  riastrad {
   1270  1.62  riastrad 	i915_gem_drain_freed_objects(dev_priv);
   1271  1.62  riastrad 	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
   1272  1.62  riastrad 	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
   1273  1.62  riastrad 	WARN_ON(dev_priv->mm.shrink_count);
   1274  1.66  riastrad 	spin_lock_destroy(&dev_priv->fb_tracking.lock);
   1275  1.75  riastrad 	spin_lock_destroy(&dev_priv->mm.obj_lock);
   1276  1.36  riastrad }
   1277  1.36  riastrad 
   1278  1.62  riastrad int i915_gem_freeze(struct drm_i915_private *dev_priv)
   1279  1.36  riastrad {
   1280  1.62  riastrad 	/* Discard all purgeable objects, let userspace recover those as
   1281  1.62  riastrad 	 * required after resuming.
   1282  1.62  riastrad 	 */
   1283  1.62  riastrad 	i915_gem_shrink_all(dev_priv);
   1284  1.36  riastrad 
   1285  1.62  riastrad 	return 0;
   1286   1.1  riastrad }
   1287   1.1  riastrad 
   1288  1.62  riastrad int i915_gem_freeze_late(struct drm_i915_private *i915)
   1289   1.1  riastrad {
   1290  1.62  riastrad 	struct drm_i915_gem_object *obj;
   1291  1.62  riastrad 	intel_wakeref_t wakeref;
   1292   1.1  riastrad 
   1293  1.62  riastrad 	/*
   1294  1.62  riastrad 	 * Called just before we write the hibernation image.
   1295  1.62  riastrad 	 *
   1296  1.62  riastrad 	 * We need to update the domain tracking to reflect that the CPU
   1297  1.62  riastrad 	 * will be accessing all the pages to create and restore from the
   1298  1.62  riastrad 	 * hibernation, and so upon restoration those pages will be in the
   1299  1.62  riastrad 	 * CPU domain.
   1300  1.62  riastrad 	 *
   1301  1.62  riastrad 	 * To make sure the hibernation image contains the latest state,
   1302  1.62  riastrad 	 * we update that state just before writing out the image.
   1303  1.62  riastrad 	 *
   1304  1.62  riastrad 	 * To try and reduce the hibernation image, we manually shrink
   1305  1.62  riastrad 	 * the objects as well, see i915_gem_freeze()
   1306  1.62  riastrad 	 */
   1307   1.1  riastrad 
   1308  1.62  riastrad 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
   1309   1.1  riastrad 
   1310  1.62  riastrad 	i915_gem_shrink(i915, -1UL, NULL, ~0);
   1311  1.62  riastrad 	i915_gem_drain_freed_objects(i915);
   1312  1.12  riastrad 
   1313  1.62  riastrad 	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
   1314  1.62  riastrad 		i915_gem_object_lock(obj);
   1315  1.62  riastrad 		WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
   1316  1.62  riastrad 		i915_gem_object_unlock(obj);
   1317  1.62  riastrad 	}
   1318  1.12  riastrad 
   1319  1.62  riastrad 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
   1320  1.12  riastrad 
   1321  1.12  riastrad 	return 0;
   1322   1.1  riastrad }
   1323   1.1  riastrad 
   1324  1.62  riastrad void i915_gem_release(struct drm_device *dev, struct drm_file *file)
   1325   1.1  riastrad {
   1326  1.62  riastrad 	struct drm_i915_file_private *file_priv = file->driver_priv;
   1327  1.62  riastrad 	struct i915_request *request;
   1328   1.1  riastrad 
   1329  1.62  riastrad 	/* Clean up our request list when the client is going away, so that
   1330  1.62  riastrad 	 * later retire_requests won't dereference our soon-to-be-gone
   1331  1.62  riastrad 	 * file_priv.
   1332  1.62  riastrad 	 */
   1333  1.62  riastrad 	spin_lock(&file_priv->mm.lock);
   1334  1.62  riastrad 	list_for_each_entry(request, &file_priv->mm.request_list, client_link)
   1335  1.62  riastrad 		request->file_priv = NULL;
   1336  1.62  riastrad 	spin_unlock(&file_priv->mm.lock);
   1337  1.66  riastrad 
   1338  1.66  riastrad 	/*
   1339  1.66  riastrad 	 * XXX This is probably too early -- need to defer with
   1340  1.66  riastrad 	 * callrcu; caller already defers free with kfree_rcu.
   1341  1.66  riastrad 	 */
   1342  1.66  riastrad 	spin_lock_destroy(&file_priv->mm.lock);
   1343  1.36  riastrad }
   1344   1.1  riastrad 
   1345  1.62  riastrad int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
   1346  1.36  riastrad {
   1347  1.62  riastrad 	struct drm_i915_file_private *file_priv;
   1348  1.36  riastrad 	int ret;
   1349   1.1  riastrad 
   1350  1.62  riastrad 	DRM_DEBUG("\n");
   1351   1.1  riastrad 
   1352  1.62  riastrad 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
   1353  1.62  riastrad 	if (!file_priv)
   1354  1.62  riastrad 		return -ENOMEM;
   1355   1.1  riastrad 
   1356  1.62  riastrad 	file->driver_priv = file_priv;
   1357  1.62  riastrad 	file_priv->dev_priv = i915;
   1358  1.62  riastrad 	file_priv->file = file;
   1359  1.12  riastrad 
   1360  1.62  riastrad 	spin_lock_init(&file_priv->mm.lock);
   1361  1.62  riastrad 	INIT_LIST_HEAD(&file_priv->mm.request_list);
   1362  1.12  riastrad 
   1363  1.62  riastrad 	file_priv->bsd_engine = -1;
   1364  1.62  riastrad 	file_priv->hang_timestamp = jiffies;
   1365  1.12  riastrad 
   1366  1.62  riastrad 	ret = i915_gem_context_open(i915, file);
   1367  1.62  riastrad 	if (ret)
   1368  1.62  riastrad 		kfree(file_priv);
   1369  1.12  riastrad 
   1370  1.62  riastrad 	return ret;
   1371   1.1  riastrad }
   1372  1.62  riastrad 
   1373  1.62  riastrad #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
   1374  1.62  riastrad #include "selftests/mock_gem_device.c"
   1375  1.62  riastrad #include "selftests/i915_gem.c"
   1376  1.62  riastrad #endif
   1377