Home | History | Annotate | Line # | Download | only in gem
      1 /*	$NetBSD: i915_gem_mman.c,v 1.28 2025/01/26 20:49:22 riastradh Exp $	*/
      2 
      3 /*
      4  * SPDX-License-Identifier: MIT
      5  *
      6  * Copyright  2014-2016 Intel Corporation
      7  */
      8 
      9 #include <sys/cdefs.h>
     10 __KERNEL_RCSID(0, "$NetBSD: i915_gem_mman.c,v 1.28 2025/01/26 20:49:22 riastradh Exp $");
     11 
     12 #include <linux/anon_inodes.h>
     13 #include <linux/mman.h>
     14 #include <linux/pfn_t.h>
     15 #include <linux/sizes.h>
     16 
     17 #include "drm/drm_gem.h"
     18 
     19 #include "gt/intel_gt.h"
     20 #include "gt/intel_gt_requests.h"
     21 
     22 #include "i915_drv.h"
     23 #include "i915_gem_gtt.h"
     24 #include "i915_gem_ioctls.h"
     25 #include "i915_gem_object.h"
     26 #include "i915_gem_mman.h"
     27 #include "i915_trace.h"
     28 #include "i915_user_extensions.h"
     29 #include "i915_vma.h"
     30 
     31 #ifdef __NetBSD__
     32 static const struct uvm_pagerops i915_mmo_gem_uvm_ops;
     33 #else
     34 static inline bool
     35 __vma_matches(struct vm_area_struct *vma, struct file *filp,
     36 	      unsigned long addr, unsigned long size)
     37 {
     38 	if (vma->vm_file != filp)
     39 		return false;
     40 
     41 	return vma->vm_start == addr &&
     42 	       (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
     43 }
     44 #endif
     45 
     46 /**
     47  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
     48  *			 it is mapped to.
     49  * @dev: drm device
     50  * @data: ioctl data blob
     51  * @file: drm file
     52  *
     53  * While the mapping holds a reference on the contents of the object, it doesn't
     54  * imply a ref on the object itself.
     55  *
     56  * IMPORTANT:
     57  *
     58  * DRM driver writers who look a this function as an example for how to do GEM
     59  * mmap support, please don't implement mmap support like here. The modern way
     60  * to implement DRM mmap support is with an mmap offset ioctl (like
     61  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
     62  * That way debug tooling like valgrind will understand what's going on, hiding
     63  * the mmap call in a driver private ioctl will break that. The i915 driver only
     64  * does cpu mmaps this way because we didn't know better.
     65  */
     66 int
     67 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
     68 		    struct drm_file *file)
     69 {
     70 	struct drm_i915_gem_mmap *args = data;
     71 	struct drm_i915_gem_object *obj;
     72 	unsigned long addr;
     73 
     74 	if (args->flags & ~(I915_MMAP_WC))
     75 		return -EINVAL;
     76 
     77 	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
     78 		return -ENODEV;
     79 
     80 	obj = i915_gem_object_lookup(file, args->handle);
     81 	if (!obj)
     82 		return -ENOENT;
     83 
     84 #ifdef __NetBSD__
     85 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
     86 	if ((i915->quirks & QUIRK_NETBSD_VERSION_CALLED) == 0)
     87 		args->flags = 0;
     88 #endif
     89 
     90 	/* prime objects have no backing filp to GEM mmap
     91 	 * pages from.
     92 	 */
     93 	if (!obj->base.filp) {
     94 		addr = -ENXIO;
     95 		goto err;
     96 	}
     97 
     98 	if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
     99 		addr = -EINVAL;
    100 		goto err;
    101 	}
    102 
    103 #ifdef __NetBSD__
    104 	int error;
    105 
    106         /* Acquire a reference for uvm_map to consume.  */
    107         uao_reference(obj->base.filp);
    108         addr = (*curproc->p_emul->e_vm_default_addr)(curproc,
    109             (vaddr_t)curproc->p_vmspace->vm_daddr, args->size,
    110             curproc->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
    111         error = uvm_map(&curproc->p_vmspace->vm_map, &addr, args->size,
    112             obj->base.filp, args->offset, 0,
    113             UVM_MAPFLAG(VM_PROT_READ|VM_PROT_WRITE,
    114                 VM_PROT_READ|VM_PROT_WRITE, UVM_INH_COPY, UVM_ADV_NORMAL,
    115                 0));
    116         if (error) {
    117                 uao_detach(obj->base.filp);
    118 		/* XXX errno NetBSD->Linux */
    119 		addr = -error;
    120 		goto err;
    121         }
    122 #else
    123 	addr = vm_mmap(obj->base.filp, 0, args->size,
    124 		       PROT_READ | PROT_WRITE, MAP_SHARED,
    125 		       args->offset);
    126 	if (IS_ERR_VALUE(addr))
    127 		goto err;
    128 
    129 	if (args->flags & I915_MMAP_WC) {
    130 		struct mm_struct *mm = current->mm;
    131 		struct vm_area_struct *vma;
    132 
    133 		if (down_write_killable(&mm->mmap_sem)) {
    134 			addr = -EINTR;
    135 			goto err;
    136 		}
    137 		vma = find_vma(mm, addr);
    138 		if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
    139 			vma->vm_page_prot =
    140 				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
    141 		else
    142 			addr = -ENOMEM;
    143 		up_write(&mm->mmap_sem);
    144 		if (IS_ERR_VALUE(addr))
    145 			goto err;
    146 	}
    147 #endif
    148 	i915_gem_object_put(obj);
    149 
    150 	args->addr_ptr = (u64)addr;
    151 	return 0;
    152 
    153 err:
    154 	i915_gem_object_put(obj);
    155 	return addr;
    156 }
    157 
    158 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
    159 {
    160 	return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
    161 }
    162 
    163 /**
    164  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
    165  *
    166  * A history of the GTT mmap interface:
    167  *
    168  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
    169  *     aligned and suitable for fencing, and still fit into the available
    170  *     mappable space left by the pinned display objects. A classic problem
    171  *     we called the page-fault-of-doom where we would ping-pong between
    172  *     two objects that could not fit inside the GTT and so the memcpy
    173  *     would page one object in at the expense of the other between every
    174  *     single byte.
    175  *
    176  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
    177  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
    178  *     object is too large for the available space (or simply too large
    179  *     for the mappable aperture!), a view is created instead and faulted
    180  *     into userspace. (This view is aligned and sized appropriately for
    181  *     fenced access.)
    182  *
    183  * 2 - Recognise WC as a separate cache domain so that we can flush the
    184  *     delayed writes via GTT before performing direct access via WC.
    185  *
    186  * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
    187  *     pagefault; swapin remains transparent.
    188  *
    189  * 4 - Support multiple fault handlers per object depending on object's
    190  *     backing storage (a.k.a. MMAP_OFFSET).
    191  *
    192  * Restrictions:
    193  *
    194  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
    195  *    hangs on some architectures, corruption on others. An attempt to service
    196  *    a GTT page fault from a snoopable object will generate a SIGBUS.
    197  *
    198  *  * the object must be able to fit into RAM (physical memory, though no
    199  *    limited to the mappable aperture).
    200  *
    201  *
    202  * Caveats:
    203  *
    204  *  * a new GTT page fault will synchronize rendering from the GPU and flush
    205  *    all data to system memory. Subsequent access will not be synchronized.
    206  *
    207  *  * all mappings are revoked on runtime device suspend.
    208  *
    209  *  * there are only 8, 16 or 32 fence registers to share between all users
    210  *    (older machines require fence register for display and blitter access
    211  *    as well). Contention of the fence registers will cause the previous users
    212  *    to be unmapped and any new access will generate new page faults.
    213  *
    214  *  * running out of memory while servicing a fault may generate a SIGBUS,
    215  *    rather than the expected SIGSEGV.
    216  */
    217 int i915_gem_mmap_gtt_version(void)
    218 {
    219 	return 4;
    220 }
    221 
    222 static inline struct i915_ggtt_view
    223 compute_partial_view(const struct drm_i915_gem_object *obj,
    224 		     pgoff_t page_offset,
    225 		     unsigned int chunk)
    226 {
    227 	struct i915_ggtt_view view;
    228 
    229 	if (i915_gem_object_is_tiled(obj))
    230 		chunk = roundup(chunk, tile_row_pages(obj));
    231 
    232 	view.type = I915_GGTT_VIEW_PARTIAL;
    233 	view.partial.offset = rounddown(page_offset, chunk);
    234 	view.partial.size =
    235 		min_t(unsigned int, chunk,
    236 		      (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
    237 
    238 	/* If the partial covers the entire object, just create a normal VMA. */
    239 	if (chunk >= obj->base.size >> PAGE_SHIFT)
    240 		view.type = I915_GGTT_VIEW_NORMAL;
    241 
    242 	return view;
    243 }
    244 
    245 #ifdef __NetBSD__
    246 /*
    247  * XXX pmap_enter_default instead of pmap_enter because of a problem
    248  * with using weak aliases in kernel modules.
    249  *
    250  * XXX This probably won't work in a Xen kernel!  Maybe this should be
    251  * #ifdef _MODULE?
    252  */
    253 int	pmap_enter_default(pmap_t, vaddr_t, paddr_t, vm_prot_t, unsigned);
    254 #define	pmap_enter	pmap_enter_default
    255 #endif
    256 
    257 #ifdef __NetBSD__
    258 static int
    259 i915_error_to_vmf_fault(int err)
    260 #else
    261 static vm_fault_t i915_error_to_vmf_fault(int err)
    262 #endif
    263 {
    264 	switch (err) {
    265 	default:
    266 		WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
    267 		/* fallthrough */
    268 	case -EIO: /* shmemfs failure from swap device */
    269 	case -EFAULT: /* purged object */
    270 	case -ENODEV: /* bad object, how did you get here! */
    271 	case -ENXIO: /* unable to access backing store (on device) */
    272 #ifdef __NetBSD__
    273 		return EINVAL;	/* SIGBUS */
    274 #else
    275 		return VM_FAULT_SIGBUS;
    276 #endif
    277 
    278 	case -ENOSPC: /* shmemfs allocation failure */
    279 	case -ENOMEM: /* our allocation failure */
    280 #ifdef __NetBSD__
    281 		return ENOMEM;
    282 #else
    283 		return VM_FAULT_OOM;
    284 #endif
    285 
    286 	case 0:
    287 	case -EAGAIN:
    288 	case -ERESTARTSYS:
    289 	case -EINTR:
    290 	case -EBUSY:
    291 		/*
    292 		 * EBUSY is ok: this just means that another thread
    293 		 * already did the job.
    294 		 */
    295 #ifdef __NetBSD__
    296 		return 0;	/* retry access in userland */
    297 #else
    298 		return VM_FAULT_NOPAGE;
    299 #endif
    300 	}
    301 }
    302 
    303 #ifdef __NetBSD__
    304 static int
    305 vm_fault_cpu(struct uvm_faultinfo *ufi, struct i915_mmap_offset *mmo,
    306     vaddr_t vaddr, struct vm_page **pps, int npages, int centeridx, int flags)
    307 #else
    308 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
    309 #endif
    310 {
    311 #ifndef __NetBSD__
    312 	struct vm_area_struct *area = vmf->vma;
    313 	struct i915_mmap_offset *mmo = area->vm_private_data;
    314 #endif
    315 	struct drm_i915_gem_object *obj = mmo->obj;
    316 #ifdef __NetBSD__
    317 	bool write = ufi->entry->protection & VM_PROT_WRITE;
    318 #else
    319 	bool write = area->vm_flags & VM_WRITE;
    320 #endif
    321 	resource_size_t iomap;
    322 	int err;
    323 
    324 	/* Sanity check that we allow writing into this object */
    325 	if (unlikely(i915_gem_object_is_readonly(obj) && write))
    326 #ifdef __NetBSD__
    327 		return EINVAL;	/* SIGBUS */
    328 #else
    329 		return VM_FAULT_SIGBUS;
    330 #endif
    331 
    332 	err = i915_gem_object_pin_pages(obj);
    333 	if (err)
    334 		goto out;
    335 
    336 	iomap = -1;
    337 	if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
    338 		iomap = obj->mm.region->iomap.base;
    339 		iomap -= obj->mm.region->region.start;
    340 	}
    341 
    342 	/* PTEs are revoked in obj->ops->put_pages() */
    343 #ifdef __NetBSD__
    344 	/* XXX No lmem supported yet.  */
    345 	KASSERT(i915_gem_object_type_has(obj,
    346 		I915_GEM_OBJECT_HAS_STRUCT_PAGE));
    347 
    348 	int pmapflags;
    349 	switch (mmo->mmap_type) {
    350 	case I915_MMAP_TYPE_WC:
    351 		pmapflags = PMAP_WRITE_COMBINE;
    352 		break;
    353 	case I915_MMAP_TYPE_WB:
    354 		pmapflags = 0;	/* default */
    355 		break;
    356 	case I915_MMAP_TYPE_UC:
    357 		pmapflags = PMAP_NOCACHE;
    358 		break;
    359 	case I915_MMAP_TYPE_GTT: /* handled by vm_fault_gtt */
    360 	default:
    361 		panic("invalid i915 gem mmap offset type: %d",
    362 		    mmo->mmap_type);
    363 	}
    364 
    365 	struct scatterlist *sg = obj->mm.pages->sgl;
    366 	unsigned startpage = (ufi->entry->offset + (vaddr - ufi->entry->start))
    367 	    >> PAGE_SHIFT;
    368 	paddr_t paddr;
    369 	int i;
    370 
    371 	for (i = 0; i < npages; i++) {
    372 		if ((flags & PGO_ALLPAGES) == 0 && i != centeridx)
    373 			continue;
    374 		if (pps[i] == PGO_DONTCARE)
    375 			continue;
    376 		paddr = page_to_phys(sg->sg_pgs[startpage + i]);
    377 		/* XXX errno NetBSD->Linux */
    378 		err = -pmap_enter(ufi->orig_map->pmap,
    379 		    vaddr + i*PAGE_SIZE, paddr, ufi->entry->protection,
    380 		    PMAP_CANFAIL | ufi->entry->protection | pmapflags);
    381 		if (err)
    382 			break;
    383 	}
    384 	pmap_update(ufi->orig_map->pmap);
    385 #else
    386 	err = remap_io_sg(area,
    387 			  area->vm_start, area->vm_end - area->vm_start,
    388 			  obj->mm.pages->sgl, iomap);
    389 #endif
    390 
    391 	if (write) {
    392 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
    393 		obj->mm.dirty = true;
    394 	}
    395 
    396 	i915_gem_object_unpin_pages(obj);
    397 
    398 out:
    399 	return i915_error_to_vmf_fault(err);
    400 }
    401 
    402 #ifdef __NetBSD__
    403 static int
    404 vm_fault_gtt(struct uvm_faultinfo *ufi, struct i915_mmap_offset *mmo,
    405     vaddr_t vaddr, struct vm_page **pps, int npages, int centeridx, int flags)
    406 #else
    407 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
    408 #endif
    409 {
    410 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
    411 #ifndef __NetBSD__
    412 	struct vm_area_struct *area = vmf->vma;
    413 	struct i915_mmap_offset *mmo = area->vm_private_data;
    414 #endif
    415 	struct drm_i915_gem_object *obj = mmo->obj;
    416 	struct drm_device *dev = obj->base.dev;
    417 	struct drm_i915_private *i915 = to_i915(dev);
    418 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
    419 	struct i915_ggtt *ggtt = &i915->ggtt;
    420 #ifdef __NetBSD__
    421 	bool write = ufi->entry->protection & VM_PROT_WRITE;
    422 #else
    423 	bool write = area->vm_flags & VM_WRITE;
    424 #endif
    425 	intel_wakeref_t wakeref;
    426 	struct i915_vma *vma;
    427 	pgoff_t page_offset;
    428 	int srcu;
    429 	int ret;
    430 
    431 	/* Sanity check that we allow writing into this object */
    432 	if (i915_gem_object_is_readonly(obj) && write)
    433 #ifdef __NetBSD__
    434 		return EINVAL;	/* SIGBUS */
    435 #else
    436 		return VM_FAULT_SIGBUS;
    437 #endif
    438 
    439 #ifdef __NetBSD__
    440 	page_offset = (ufi->entry->offset + (vaddr - ufi->entry->start))
    441 	    >> PAGE_SHIFT;
    442 #else
    443 	/* We don't use vmf->pgoff since that has the fake offset */
    444 	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
    445 #endif
    446 
    447 	trace_i915_gem_object_fault(obj, page_offset, true, write);
    448 
    449 	ret = i915_gem_object_pin_pages(obj);
    450 	if (ret)
    451 		goto err;
    452 
    453 	wakeref = intel_runtime_pm_get(rpm);
    454 
    455 	ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
    456 	if (ret)
    457 		goto err_rpm;
    458 
    459 	/* Now pin it into the GTT as needed */
    460 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
    461 				       PIN_MAPPABLE |
    462 				       PIN_NONBLOCK /* NOWARN */ |
    463 				       PIN_NOEVICT);
    464 	if (IS_ERR(vma)) {
    465 		/* Use a partial view if it is bigger than available space */
    466 		struct i915_ggtt_view view =
    467 			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
    468 		unsigned int flags;
    469 
    470 		flags = PIN_MAPPABLE | PIN_NOSEARCH;
    471 		if (view.type == I915_GGTT_VIEW_NORMAL)
    472 			flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
    473 
    474 		/*
    475 		 * Userspace is now writing through an untracked VMA, abandon
    476 		 * all hope that the hardware is able to track future writes.
    477 		 */
    478 
    479 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
    480 		if (IS_ERR(vma)) {
    481 			flags = PIN_MAPPABLE;
    482 			view.type = I915_GGTT_VIEW_PARTIAL;
    483 			vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
    484 		}
    485 
    486 		/* The entire mappable GGTT is pinned? Unexpected! */
    487 		GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
    488 	}
    489 	if (IS_ERR(vma)) {
    490 		ret = PTR_ERR(vma);
    491 		goto err_reset;
    492 	}
    493 
    494 	/* Access to snoopable pages through the GTT is incoherent. */
    495 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
    496 		ret = -EFAULT;
    497 		goto err_unpin;
    498 	}
    499 
    500 	ret = i915_vma_pin_fence(vma);
    501 	if (ret)
    502 		goto err_unpin;
    503 
    504 	/* Finally, remap it using the new GTT offset */
    505 #ifdef __NetBSD__
    506 	unsigned startpage = page_offset;
    507 	paddr_t paddr;
    508 	int i;
    509 
    510 	for (i = 0; i < npages; i++) {
    511 		if ((flags & PGO_ALLPAGES) == 0 && i != centeridx)
    512 			continue;
    513 		if (pps[i] == PGO_DONTCARE)
    514 			continue;
    515 		paddr = ggtt->gmadr.start + vma->node.start
    516 		    + (startpage + i)*PAGE_SIZE;
    517 		/* XXX errno NetBSD->Linux */
    518 		ret = -pmap_enter(ufi->orig_map->pmap,
    519 		    vaddr + i*PAGE_SIZE, paddr, ufi->entry->protection,
    520 		    PMAP_CANFAIL|PMAP_WRITE_COMBINE | ufi->entry->protection);
    521 		if (ret)
    522 			break;
    523 	}
    524 	pmap_update(ufi->orig_map->pmap);
    525 #else
    526 	ret = remap_io_mapping(area,
    527 			       area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
    528 			       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
    529 			       min_t(u64, vma->size, area->vm_end - area->vm_start),
    530 			       &ggtt->iomap);
    531 #endif
    532 	if (ret)
    533 		goto err_fence;
    534 
    535 	assert_rpm_wakelock_held(rpm);
    536 
    537 	/* Mark as being mmapped into userspace for later revocation */
    538 	mutex_lock(&i915->ggtt.vm.mutex);
    539 	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
    540 		list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
    541 	mutex_unlock(&i915->ggtt.vm.mutex);
    542 
    543 	/* Track the mmo associated with the fenced vma */
    544 	vma->mmo = mmo;
    545 
    546 	if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
    547 		intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
    548 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
    549 
    550 	if (write) {
    551 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
    552 		i915_vma_set_ggtt_write(vma);
    553 		obj->mm.dirty = true;
    554 	}
    555 
    556 err_fence:
    557 	i915_vma_unpin_fence(vma);
    558 err_unpin:
    559 	__i915_vma_unpin(vma);
    560 err_reset:
    561 	intel_gt_reset_unlock(ggtt->vm.gt, srcu);
    562 err_rpm:
    563 	intel_runtime_pm_put(rpm, wakeref);
    564 	i915_gem_object_unpin_pages(obj);
    565 err:
    566 	return i915_error_to_vmf_fault(ret);
    567 }
    568 
    569 #ifdef __NetBSD__
    570 
    571 static int
    572 i915_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
    573     int npages, int centeridx, vm_prot_t access_type, int flags)
    574 {
    575 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
    576 	struct i915_mmap_offset *mmo =
    577 	    container_of(uobj, struct i915_mmap_offset, uobj);
    578 	struct drm_i915_gem_object *obj = mmo->obj;
    579 	int error;
    580 
    581 	KASSERT(rw_lock_held(obj->base.filp->vmobjlock));
    582 	KASSERT(!i915_gem_object_is_readonly(obj) ||
    583 	    (access_type & VM_PROT_WRITE) == 0);
    584 	KASSERT(i915_gem_object_type_has(obj,
    585 		I915_GEM_OBJECT_HAS_STRUCT_PAGE|I915_GEM_OBJECT_HAS_IOMEM));
    586 
    587 	/* Actually we don't support iomem right now!  */
    588 	KASSERT(i915_gem_object_type_has(obj,
    589 		I915_GEM_OBJECT_HAS_STRUCT_PAGE));
    590 
    591 	/*
    592 	 * The lock isn't actually helpful for us and the caller in
    593 	 * uvm_fault only just acquired it anyway so no important
    594 	 * invariants are implied by it.
    595 	 */
    596 	rw_exit(obj->base.filp->vmobjlock);
    597 
    598 	switch (mmo->mmap_type) {
    599 	case I915_MMAP_TYPE_WC:
    600 	case I915_MMAP_TYPE_WB:
    601 	case I915_MMAP_TYPE_UC:
    602 		error = vm_fault_cpu(ufi, mmo, vaddr, pps, npages, centeridx,
    603 		    flags);
    604 		break;
    605 	case I915_MMAP_TYPE_GTT:
    606 		error = vm_fault_gtt(ufi, mmo, vaddr, pps, npages, centeridx,
    607 		    flags);
    608 		break;
    609 	default:
    610 		panic("invalid i915 gem mmap offset type: %d",
    611 		    mmo->mmap_type);
    612 	}
    613 
    614 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
    615 	KASSERT(error != EINTR);
    616 	KASSERT(error != ERESTART);
    617 	return error;
    618 }
    619 
    620 #endif
    621 
    622 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
    623 {
    624 	struct i915_vma *vma;
    625 
    626 	GEM_BUG_ON(!obj->userfault_count);
    627 
    628 	for_each_ggtt_vma(vma, obj)
    629 		i915_vma_revoke_mmap(vma);
    630 
    631 	GEM_BUG_ON(obj->userfault_count);
    632 }
    633 
    634 /*
    635  * It is vital that we remove the page mapping if we have mapped a tiled
    636  * object through the GTT and then lose the fence register due to
    637  * resource pressure. Similarly if the object has been moved out of the
    638  * aperture, than pages mapped into userspace must be revoked. Removing the
    639  * mapping will then trigger a page fault on the next user access, allowing
    640  * fixup by vm_fault_gtt().
    641  */
    642 static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
    643 {
    644 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
    645 	intel_wakeref_t wakeref;
    646 
    647 	/*
    648 	 * Serialisation between user GTT access and our code depends upon
    649 	 * revoking the CPU's PTE whilst the mutex is held. The next user
    650 	 * pagefault then has to wait until we release the mutex.
    651 	 *
    652 	 * Note that RPM complicates somewhat by adding an additional
    653 	 * requirement that operations to the GGTT be made holding the RPM
    654 	 * wakeref.
    655 	 */
    656 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
    657 	mutex_lock(&i915->ggtt.vm.mutex);
    658 
    659 	if (!obj->userfault_count)
    660 		goto out;
    661 
    662 	__i915_gem_object_release_mmap_gtt(obj);
    663 
    664 	/*
    665 	 * Ensure that the CPU's PTE are revoked and there are not outstanding
    666 	 * memory transactions from userspace before we return. The TLB
    667 	 * flushing implied above by changing the PTE above *should* be
    668 	 * sufficient, an extra barrier here just provides us with a bit
    669 	 * of paranoid documentation about our requirement to serialise
    670 	 * memory writes before touching registers / GSM.
    671 	 */
    672 	wmb();
    673 
    674 out:
    675 	mutex_unlock(&i915->ggtt.vm.mutex);
    676 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
    677 }
    678 
    679 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
    680 {
    681 #ifdef __NetBSD__
    682 	struct page *page;
    683 	struct vm_page *vm_page;
    684 	unsigned i;
    685 
    686 	if (!i915_gem_object_has_pages(obj))
    687 		return;
    688 	for (i = 0; i < obj->mm.pages->sgl->sg_npgs; i++) {
    689 		page = obj->mm.pages->sgl->sg_pgs[i];
    690 		vm_page = &page->p_vmp;
    691 		pmap_page_protect(vm_page, VM_PROT_NONE);
    692 	}
    693 #else
    694 	struct i915_mmap_offset *mmo, *mn;
    695 
    696 	spin_lock(&obj->mmo.lock);
    697 	rbtree_postorder_for_each_entry_safe(mmo, mn,
    698 					     &obj->mmo.offsets, offset) {
    699 		/*
    700 		 * vma_node_unmap for GTT mmaps handled already in
    701 		 * __i915_gem_object_release_mmap_gtt
    702 		 */
    703 		if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
    704 			continue;
    705 
    706 		spin_unlock(&obj->mmo.lock);
    707 		drm_vma_node_unmap(&mmo->vma_node,
    708 				   obj->base.dev->anon_inode->i_mapping);
    709 		spin_lock(&obj->mmo.lock);
    710 	}
    711 	spin_unlock(&obj->mmo.lock);
    712 #endif
    713 }
    714 
    715 /**
    716  * i915_gem_object_release_mmap - remove physical page mappings
    717  * @obj: obj in question
    718  *
    719  * Preserve the reservation of the mmapping with the DRM core code, but
    720  * relinquish ownership of the pages back to the system.
    721  */
    722 void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
    723 {
    724 	i915_gem_object_release_mmap_gtt(obj);
    725 	i915_gem_object_release_mmap_offset(obj);
    726 }
    727 
    728 static struct i915_mmap_offset *
    729 lookup_mmo(struct drm_i915_gem_object *obj,
    730 	   enum i915_mmap_type mmap_type)
    731 {
    732 #ifdef __NetBSD__
    733 	struct i915_mmap_offset *mmo;
    734 
    735 	spin_lock(&obj->mmo.lock);
    736 	mmo = obj->mmo.offsets[mmap_type];
    737 	spin_unlock(&obj->mmo.lock);
    738 
    739 	return mmo;
    740 #else
    741 	struct rb_node *rb;
    742 
    743 	spin_lock(&obj->mmo.lock);
    744 	rb = obj->mmo.offsets.rb_node;
    745 	while (rb) {
    746 		struct i915_mmap_offset *mmo =
    747 			rb_entry(rb, typeof(*mmo), offset);
    748 
    749 		if (mmo->mmap_type == mmap_type) {
    750 			spin_unlock(&obj->mmo.lock);
    751 			return mmo;
    752 		}
    753 
    754 		if (mmo->mmap_type < mmap_type)
    755 			rb = rb->rb_right;
    756 		else
    757 			rb = rb->rb_left;
    758 	}
    759 	spin_unlock(&obj->mmo.lock);
    760 
    761 	return NULL;
    762 #endif
    763 }
    764 
    765 static struct i915_mmap_offset *
    766 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
    767 {
    768 #ifdef __NetBSD__
    769 	struct i915_mmap_offset *to_free = NULL;
    770 
    771 	spin_lock(&obj->mmo.lock);
    772 	if (obj->mmo.offsets[mmo->mmap_type]) {
    773 		to_free = mmo;
    774 		mmo = obj->mmo.offsets[mmo->mmap_type];
    775 	} else {
    776 		obj->mmo.offsets[mmo->mmap_type] = mmo;
    777 	}
    778 	spin_unlock(&obj->mmo.lock);
    779 
    780 	if (to_free) {
    781 		drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
    782 		    &to_free->vma_node);
    783 		uvm_obj_destroy(&to_free->uobj, /*free lock*/true);
    784 		drm_vma_node_destroy(&to_free->vma_node);
    785 		kfree(to_free);
    786 	}
    787 
    788 	return mmo;
    789 #else
    790 	struct rb_node *rb, **p;
    791 
    792 	spin_lock(&obj->mmo.lock);
    793 	rb = NULL;
    794 	p = &obj->mmo.offsets.rb_node;
    795 	while (*p) {
    796 		struct i915_mmap_offset *pos;
    797 
    798 		rb = *p;
    799 		pos = rb_entry(rb, typeof(*pos), offset);
    800 
    801 		if (pos->mmap_type == mmo->mmap_type) {
    802 			spin_unlock(&obj->mmo.lock);
    803 			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
    804 					      &mmo->vma_node);
    805 			kfree(mmo);
    806 			return pos;
    807 		}
    808 
    809 		if (pos->mmap_type < mmo->mmap_type)
    810 			p = &rb->rb_right;
    811 		else
    812 			p = &rb->rb_left;
    813 	}
    814 	rb_link_node(&mmo->offset, rb, p);
    815 	rb_insert_color(&mmo->offset, &obj->mmo.offsets);
    816 	spin_unlock(&obj->mmo.lock);
    817 
    818 	return mmo;
    819 #endif
    820 }
    821 
    822 static struct i915_mmap_offset *
    823 mmap_offset_attach(struct drm_i915_gem_object *obj,
    824 		   enum i915_mmap_type mmap_type,
    825 		   struct drm_file *file)
    826 {
    827 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
    828 	struct i915_mmap_offset *mmo;
    829 	int err;
    830 
    831 	mmo = lookup_mmo(obj, mmap_type);
    832 	if (mmo)
    833 		goto out;
    834 
    835 	mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
    836 	if (!mmo)
    837 		return ERR_PTR(-ENOMEM);
    838 
    839 	mmo->obj = obj;
    840 	mmo->mmap_type = mmap_type;
    841 #ifdef __NetBSD__
    842 	drm_vma_node_init(&mmo->vma_node);
    843 	uvm_obj_init(&mmo->uobj, &i915_mmo_gem_uvm_ops, /*allocate lock*/false,
    844 	    /*nrefs*/1);
    845 	uvm_obj_setlock(&mmo->uobj, obj->base.filp->vmobjlock);
    846 #else
    847 	drm_vma_node_reset(&mmo->vma_node);
    848 #endif
    849 
    850 	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
    851 				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
    852 	if (likely(!err))
    853 		goto insert;
    854 
    855 	/* Attempt to reap some mmap space from dead objects */
    856 	err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
    857 	if (err)
    858 		goto err;
    859 
    860 	i915_gem_drain_freed_objects(i915);
    861 	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
    862 				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
    863 	if (err)
    864 		goto err;
    865 
    866 insert:
    867 	mmo = insert_mmo(obj, mmo);
    868 	GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
    869 out:
    870 	if (file)
    871 		drm_vma_node_allow(&mmo->vma_node, file);
    872 	return mmo;
    873 
    874 err:
    875 #ifdef __NetBSD__
    876 	uvm_obj_destroy(&mmo->uobj, /*free lock*/true);
    877 #endif
    878 	drm_vma_node_destroy(&mmo->vma_node);
    879 	kfree(mmo);
    880 	return ERR_PTR(err);
    881 }
    882 
    883 static int
    884 __assign_mmap_offset(struct drm_file *file,
    885 		     u32 handle,
    886 		     enum i915_mmap_type mmap_type,
    887 		     u64 *offset)
    888 {
    889 	struct drm_i915_gem_object *obj;
    890 	struct i915_mmap_offset *mmo;
    891 	int err;
    892 
    893 	obj = i915_gem_object_lookup(file, handle);
    894 	if (!obj)
    895 		return -ENOENT;
    896 
    897 	if (mmap_type == I915_MMAP_TYPE_GTT &&
    898 	    i915_gem_object_never_bind_ggtt(obj)) {
    899 		err = -ENODEV;
    900 		goto out;
    901 	}
    902 
    903 	if (mmap_type != I915_MMAP_TYPE_GTT &&
    904 	    !i915_gem_object_type_has(obj,
    905 				      I915_GEM_OBJECT_HAS_STRUCT_PAGE |
    906 				      I915_GEM_OBJECT_HAS_IOMEM)) {
    907 		err = -ENODEV;
    908 		goto out;
    909 	}
    910 
    911 	mmo = mmap_offset_attach(obj, mmap_type, file);
    912 	if (IS_ERR(mmo)) {
    913 		err = PTR_ERR(mmo);
    914 		goto out;
    915 	}
    916 
    917 	*offset = drm_vma_node_offset_addr(&mmo->vma_node);
    918 	err = 0;
    919 out:
    920 	i915_gem_object_put(obj);
    921 	return err;
    922 }
    923 
    924 int
    925 i915_gem_dumb_mmap_offset(struct drm_file *file,
    926 			  struct drm_device *dev,
    927 			  u32 handle,
    928 			  u64 *offset)
    929 {
    930 	enum i915_mmap_type mmap_type;
    931 
    932 	if (boot_cpu_has(X86_FEATURE_PAT))
    933 		mmap_type = I915_MMAP_TYPE_WC;
    934 	else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
    935 		return -ENODEV;
    936 	else
    937 		mmap_type = I915_MMAP_TYPE_GTT;
    938 
    939 	return __assign_mmap_offset(file, handle, mmap_type, offset);
    940 }
    941 
    942 /**
    943  * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
    944  * @dev: DRM device
    945  * @data: GTT mapping ioctl data
    946  * @file: GEM object info
    947  *
    948  * Simply returns the fake offset to userspace so it can mmap it.
    949  * The mmap call will end up in drm_gem_mmap(), which will set things
    950  * up so we can get faults in the handler above.
    951  *
    952  * The fault handler will take care of binding the object into the GTT
    953  * (since it may have been evicted to make room for something), allocating
    954  * a fence register, and mapping the appropriate aperture address into
    955  * userspace.
    956  */
    957 int
    958 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
    959 			   struct drm_file *file)
    960 {
    961 	struct drm_i915_private *i915 = to_i915(dev);
    962 	struct drm_i915_gem_mmap_offset *args = data;
    963 	enum i915_mmap_type type;
    964 	int err;
    965 
    966 	/*
    967 	 * Historically we failed to check args.pad and args.offset
    968 	 * and so we cannot use those fields for user input and we cannot
    969 	 * add -EINVAL for them as the ABI is fixed, i.e. old userspace
    970 	 * may be feeding in garbage in those fields.
    971 	 *
    972 	 * if (args->pad) return -EINVAL; is verbotten!
    973 	 */
    974 
    975 	err = i915_user_extensions(u64_to_user_ptr(args->extensions),
    976 				   NULL, 0, NULL);
    977 	if (err)
    978 		return err;
    979 
    980 	switch (args->flags) {
    981 	case I915_MMAP_OFFSET_GTT:
    982 		if (!i915_ggtt_has_aperture(&i915->ggtt))
    983 			return -ENODEV;
    984 		type = I915_MMAP_TYPE_GTT;
    985 		break;
    986 
    987 	case I915_MMAP_OFFSET_WC:
    988 		if (!boot_cpu_has(X86_FEATURE_PAT))
    989 			return -ENODEV;
    990 		type = I915_MMAP_TYPE_WC;
    991 		break;
    992 
    993 	case I915_MMAP_OFFSET_WB:
    994 		type = I915_MMAP_TYPE_WB;
    995 		break;
    996 
    997 	case I915_MMAP_OFFSET_UC:
    998 		if (!boot_cpu_has(X86_FEATURE_PAT))
    999 			return -ENODEV;
   1000 		type = I915_MMAP_TYPE_UC;
   1001 		break;
   1002 
   1003 	default:
   1004 		return -EINVAL;
   1005 	}
   1006 
   1007 	return __assign_mmap_offset(file, args->handle, type, &args->offset);
   1008 }
   1009 
   1010 #ifdef __NetBSD__
   1011 
   1012 static int
   1013 i915_gem_nofault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
   1014     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
   1015     int flags)
   1016 {
   1017 	panic("i915 main gem object should not be mmapped directly");
   1018 }
   1019 
   1020 const struct uvm_pagerops i915_gem_uvm_ops = {
   1021 	.pgo_reference = drm_gem_pager_reference,
   1022 	.pgo_detach = drm_gem_pager_detach,
   1023 	.pgo_fault = i915_gem_nofault,
   1024 };
   1025 
   1026 static void
   1027 i915_mmo_reference(struct uvm_object *uobj)
   1028 {
   1029 	struct i915_mmap_offset *mmo =
   1030 	    container_of(uobj, struct i915_mmap_offset, uobj);
   1031 	struct drm_i915_gem_object *obj = mmo->obj;
   1032 
   1033 	drm_gem_object_get(&obj->base);
   1034 }
   1035 
   1036 static void
   1037 i915_mmo_detach(struct uvm_object *uobj)
   1038 {
   1039 	struct i915_mmap_offset *mmo =
   1040 	    container_of(uobj, struct i915_mmap_offset, uobj);
   1041 	struct drm_i915_gem_object *obj = mmo->obj;
   1042 
   1043 	drm_gem_object_put_unlocked(&obj->base);
   1044 }
   1045 
   1046 static const struct uvm_pagerops i915_mmo_gem_uvm_ops = {
   1047 	.pgo_reference = i915_mmo_reference,
   1048 	.pgo_detach = i915_mmo_detach,
   1049 	.pgo_fault = i915_gem_fault,
   1050 };
   1051 
   1052 int
   1053 i915_gem_mmap_object(struct drm_device *dev, off_t byte_offset, size_t nbytes,
   1054     int prot, struct uvm_object **uobjp, voff_t *uoffsetp, struct file *fp)
   1055 {
   1056 	const unsigned long startpage = byte_offset >> PAGE_SHIFT;
   1057 	const unsigned long npages = nbytes >> PAGE_SHIFT;
   1058 	struct drm_file *file = fp->f_data;
   1059 	struct drm_vma_offset_node *node;
   1060 	struct drm_i915_gem_object *obj = NULL;
   1061 	struct i915_mmap_offset *mmo = NULL;
   1062 
   1063 	if (drm_dev_is_unplugged(dev))
   1064 		return -ENODEV;
   1065 
   1066 	rcu_read_lock();
   1067 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
   1068 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
   1069 	    startpage, npages);
   1070 	if (node && drm_vma_node_is_allowed(node, file)) {
   1071 		/*
   1072 		 * Skip 0-refcnted objects as it is in the process of being
   1073 		 * destroyed and will be invalid when the vma manager lock
   1074 		 * is released.
   1075 		 */
   1076 		mmo = container_of(node, struct i915_mmap_offset, vma_node);
   1077 		obj = i915_gem_object_get_rcu(mmo->obj);
   1078 	}
   1079 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
   1080 	rcu_read_unlock();
   1081 	if (!obj)
   1082 		return node ? -EACCES : -EINVAL;
   1083 
   1084 	if (i915_gem_object_is_readonly(obj)) {
   1085 		if (prot & VM_PROT_WRITE) {
   1086 			i915_gem_object_put(obj);
   1087 			return -EINVAL;
   1088 		}
   1089 	}
   1090 
   1091 	/* Success!  */
   1092 	*uobjp = &mmo->uobj;
   1093 	*uoffsetp = 0;
   1094 	return 0;
   1095 }
   1096 
   1097 #else
   1098 
   1099 static void vm_open(struct vm_area_struct *vma)
   1100 {
   1101 	struct i915_mmap_offset *mmo = vma->vm_private_data;
   1102 	struct drm_i915_gem_object *obj = mmo->obj;
   1103 
   1104 	GEM_BUG_ON(!obj);
   1105 	i915_gem_object_get(obj);
   1106 }
   1107 
   1108 static void vm_close(struct vm_area_struct *vma)
   1109 {
   1110 	struct i915_mmap_offset *mmo = vma->vm_private_data;
   1111 	struct drm_i915_gem_object *obj = mmo->obj;
   1112 
   1113 	GEM_BUG_ON(!obj);
   1114 	i915_gem_object_put(obj);
   1115 }
   1116 
   1117 static const struct vm_operations_struct vm_ops_gtt = {
   1118 	.fault = vm_fault_gtt,
   1119 	.open = vm_open,
   1120 	.close = vm_close,
   1121 };
   1122 
   1123 static const struct vm_operations_struct vm_ops_cpu = {
   1124 	.fault = vm_fault_cpu,
   1125 	.open = vm_open,
   1126 	.close = vm_close,
   1127 };
   1128 
   1129 static int singleton_release(struct inode *inode, struct file *file)
   1130 {
   1131 	struct drm_i915_private *i915 = file->private_data;
   1132 
   1133 	cmpxchg(&i915->gem.mmap_singleton, file, NULL);
   1134 	drm_dev_put(&i915->drm);
   1135 
   1136 	return 0;
   1137 }
   1138 
   1139 static const struct file_operations singleton_fops = {
   1140 	.owner = THIS_MODULE,
   1141 	.release = singleton_release,
   1142 };
   1143 
   1144 static struct file *mmap_singleton(struct drm_i915_private *i915)
   1145 {
   1146 	struct file *file;
   1147 
   1148 	rcu_read_lock();
   1149 	file = i915->gem.mmap_singleton;
   1150 	if (file && !get_file_rcu(file))
   1151 		file = NULL;
   1152 	rcu_read_unlock();
   1153 	if (file)
   1154 		return file;
   1155 
   1156 	file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
   1157 	if (IS_ERR(file))
   1158 		return file;
   1159 
   1160 	/* Everyone shares a single global address space */
   1161 	file->f_mapping = i915->drm.anon_inode->i_mapping;
   1162 
   1163 	smp_store_mb(i915->gem.mmap_singleton, file);
   1164 	drm_dev_get(&i915->drm);
   1165 
   1166 	return file;
   1167 }
   1168 
   1169 /*
   1170  * This overcomes the limitation in drm_gem_mmap's assignment of a
   1171  * drm_gem_object as the vma->vm_private_data. Since we need to
   1172  * be able to resolve multiple mmap offsets which could be tied
   1173  * to a single gem object.
   1174  */
   1175 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
   1176 {
   1177 	struct drm_vma_offset_node *node;
   1178 	struct drm_file *priv = filp->private_data;
   1179 	struct drm_device *dev = priv->minor->dev;
   1180 	struct drm_i915_gem_object *obj = NULL;
   1181 	struct i915_mmap_offset *mmo = NULL;
   1182 	struct file *anon;
   1183 
   1184 	if (drm_dev_is_unplugged(dev))
   1185 		return -ENODEV;
   1186 
   1187 	rcu_read_lock();
   1188 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
   1189 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
   1190 						  vma->vm_pgoff,
   1191 						  vma_pages(vma));
   1192 	if (node && drm_vma_node_is_allowed(node, priv)) {
   1193 		/*
   1194 		 * Skip 0-refcnted objects as it is in the process of being
   1195 		 * destroyed and will be invalid when the vma manager lock
   1196 		 * is released.
   1197 		 */
   1198 		mmo = container_of(node, struct i915_mmap_offset, vma_node);
   1199 		obj = i915_gem_object_get_rcu(mmo->obj);
   1200 	}
   1201 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
   1202 	rcu_read_unlock();
   1203 	if (!obj)
   1204 		return node ? -EACCES : -EINVAL;
   1205 
   1206 	if (i915_gem_object_is_readonly(obj)) {
   1207 		if (vma->vm_flags & VM_WRITE) {
   1208 			i915_gem_object_put(obj);
   1209 			return -EINVAL;
   1210 		}
   1211 		vma->vm_flags &= ~VM_MAYWRITE;
   1212 	}
   1213 
   1214 	anon = mmap_singleton(to_i915(dev));
   1215 	if (IS_ERR(anon)) {
   1216 		i915_gem_object_put(obj);
   1217 		return PTR_ERR(anon);
   1218 	}
   1219 
   1220 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
   1221 	vma->vm_private_data = mmo;
   1222 
   1223 	/*
   1224 	 * We keep the ref on mmo->obj, not vm_file, but we require
   1225 	 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
   1226 	 * Our userspace is accustomed to having per-file resource cleanup
   1227 	 * (i.e. contexts, objects and requests) on their close(fd), which
   1228 	 * requires avoiding extraneous references to their filp, hence why
   1229 	 * we prefer to use an anonymous file for their mmaps.
   1230 	 */
   1231 	fput(vma->vm_file);
   1232 	vma->vm_file = anon;
   1233 
   1234 	switch (mmo->mmap_type) {
   1235 	case I915_MMAP_TYPE_WC:
   1236 		vma->vm_page_prot =
   1237 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
   1238 		vma->vm_ops = &vm_ops_cpu;
   1239 		break;
   1240 
   1241 	case I915_MMAP_TYPE_WB:
   1242 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
   1243 		vma->vm_ops = &vm_ops_cpu;
   1244 		break;
   1245 
   1246 	case I915_MMAP_TYPE_UC:
   1247 		vma->vm_page_prot =
   1248 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
   1249 		vma->vm_ops = &vm_ops_cpu;
   1250 		break;
   1251 
   1252 	case I915_MMAP_TYPE_GTT:
   1253 		vma->vm_page_prot =
   1254 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
   1255 		vma->vm_ops = &vm_ops_gtt;
   1256 		break;
   1257 	}
   1258 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
   1259 
   1260 	return 0;
   1261 }
   1262 
   1263 #endif	/* __NetBSD__ */
   1264 
   1265 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
   1266 #include "selftests/i915_gem_mman.c"
   1267 #endif
   1268