Home | History | Annotate | Line # | Download | only in ttm
ttm_bo_vm.c revision 1.22.4.1
      1  1.22.4.1    martin /*	$NetBSD: ttm_bo_vm.c,v 1.22.4.1 2024/10/04 11:40:53 martin Exp $	*/
      2       1.1  riastrad 
      3       1.1  riastrad /*-
      4       1.1  riastrad  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5       1.1  riastrad  * All rights reserved.
      6       1.1  riastrad  *
      7       1.1  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8       1.1  riastrad  * by Taylor R. Campbell.
      9       1.1  riastrad  *
     10       1.1  riastrad  * Redistribution and use in source and binary forms, with or without
     11       1.1  riastrad  * modification, are permitted provided that the following conditions
     12       1.1  riastrad  * are met:
     13       1.1  riastrad  * 1. Redistributions of source code must retain the above copyright
     14       1.1  riastrad  *    notice, this list of conditions and the following disclaimer.
     15       1.1  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17       1.1  riastrad  *    documentation and/or other materials provided with the distribution.
     18       1.1  riastrad  *
     19       1.1  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20       1.1  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21       1.1  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22       1.1  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23       1.1  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24       1.1  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25       1.1  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26       1.1  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27       1.1  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28       1.1  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29       1.1  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30       1.1  riastrad  */
     31       1.1  riastrad 
     32  1.22.4.1    martin /**************************************************************************
     33  1.22.4.1    martin  *
     34  1.22.4.1    martin  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
     35  1.22.4.1    martin  * All Rights Reserved.
     36  1.22.4.1    martin  *
     37  1.22.4.1    martin  * Permission is hereby granted, free of charge, to any person obtaining a
     38  1.22.4.1    martin  * copy of this software and associated documentation files (the
     39  1.22.4.1    martin  * "Software"), to deal in the Software without restriction, including
     40  1.22.4.1    martin  * without limitation the rights to use, copy, modify, merge, publish,
     41  1.22.4.1    martin  * distribute, sub license, and/or sell copies of the Software, and to
     42  1.22.4.1    martin  * permit persons to whom the Software is furnished to do so, subject to
     43  1.22.4.1    martin  * the following conditions:
     44  1.22.4.1    martin  *
     45  1.22.4.1    martin  * The above copyright notice and this permission notice (including the
     46  1.22.4.1    martin  * next paragraph) shall be included in all copies or substantial portions
     47  1.22.4.1    martin  * of the Software.
     48  1.22.4.1    martin  *
     49  1.22.4.1    martin  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     50  1.22.4.1    martin  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     51  1.22.4.1    martin  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     52  1.22.4.1    martin  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     53  1.22.4.1    martin  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     54  1.22.4.1    martin  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     55  1.22.4.1    martin  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     56  1.22.4.1    martin  *
     57  1.22.4.1    martin  **************************************************************************/
     58  1.22.4.1    martin /*
     59  1.22.4.1    martin  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     60  1.22.4.1    martin  */
     61  1.22.4.1    martin 
     62       1.1  riastrad #include <sys/cdefs.h>
     63  1.22.4.1    martin __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.22.4.1 2024/10/04 11:40:53 martin Exp $");
     64       1.1  riastrad 
     65       1.1  riastrad #include <sys/types.h>
     66       1.1  riastrad 
     67       1.1  riastrad #include <uvm/uvm.h>
     68       1.1  riastrad #include <uvm/uvm_extern.h>
     69       1.1  riastrad #include <uvm/uvm_fault.h>
     70       1.1  riastrad 
     71       1.1  riastrad #include <linux/bitops.h>
     72       1.1  riastrad 
     73       1.1  riastrad #include <drm/drm_vma_manager.h>
     74       1.1  riastrad 
     75       1.1  riastrad #include <ttm/ttm_bo_driver.h>
     76       1.1  riastrad 
     77       1.1  riastrad static int	ttm_bo_uvm_lookup(struct ttm_bo_device *, unsigned long,
     78       1.1  riastrad 		    unsigned long, struct ttm_buffer_object **);
     79       1.1  riastrad 
     80       1.1  riastrad void
     81       1.1  riastrad ttm_bo_uvm_reference(struct uvm_object *uobj)
     82       1.1  riastrad {
     83       1.1  riastrad 	struct ttm_buffer_object *const bo = container_of(uobj,
     84       1.1  riastrad 	    struct ttm_buffer_object, uvmobj);
     85       1.1  riastrad 
     86      1.16  riastrad 	(void)ttm_bo_get(bo);
     87       1.1  riastrad }
     88       1.1  riastrad 
     89       1.1  riastrad void
     90       1.1  riastrad ttm_bo_uvm_detach(struct uvm_object *uobj)
     91       1.1  riastrad {
     92       1.1  riastrad 	struct ttm_buffer_object *bo = container_of(uobj,
     93       1.1  riastrad 	    struct ttm_buffer_object, uvmobj);
     94       1.1  riastrad 
     95      1.16  riastrad 	ttm_bo_put(bo);
     96       1.1  riastrad }
     97       1.1  riastrad 
     98  1.22.4.1    martin static int
     99  1.22.4.1    martin ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *vmf)
    100  1.22.4.1    martin {
    101  1.22.4.1    martin 	int err, ret = 0;
    102  1.22.4.1    martin 
    103  1.22.4.1    martin 	if (__predict_true(!bo->moving))
    104  1.22.4.1    martin 		goto out_unlock;
    105  1.22.4.1    martin 
    106  1.22.4.1    martin 	/*
    107  1.22.4.1    martin 	 * Quick non-stalling check for idle.
    108  1.22.4.1    martin 	 */
    109  1.22.4.1    martin 	if (dma_fence_is_signaled(bo->moving))
    110  1.22.4.1    martin 		goto out_clear;
    111  1.22.4.1    martin 
    112  1.22.4.1    martin 	/*
    113  1.22.4.1    martin 	 * If possible, avoid waiting for GPU with mmap_sem
    114  1.22.4.1    martin 	 * held.
    115  1.22.4.1    martin 	 */
    116  1.22.4.1    martin 	if (1) {		/* always retriable in NetBSD */
    117  1.22.4.1    martin 		ret = ERESTART;
    118  1.22.4.1    martin 
    119  1.22.4.1    martin 		ttm_bo_get(bo);
    120  1.22.4.1    martin 		uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL);
    121  1.22.4.1    martin 		(void) dma_fence_wait(bo->moving, true);
    122  1.22.4.1    martin 		dma_resv_unlock(bo->base.resv);
    123  1.22.4.1    martin 		ttm_bo_put(bo);
    124  1.22.4.1    martin 		goto out_unlock;
    125  1.22.4.1    martin 	}
    126  1.22.4.1    martin 
    127  1.22.4.1    martin 	/*
    128  1.22.4.1    martin 	 * Ordinary wait.
    129  1.22.4.1    martin 	 */
    130  1.22.4.1    martin 	err = dma_fence_wait(bo->moving, true);
    131  1.22.4.1    martin 	if (__predict_false(err != 0)) {
    132  1.22.4.1    martin 		ret = (err != -ERESTARTSYS) ? EINVAL/*SIGBUS*/ :
    133  1.22.4.1    martin 		    0/*retry access in userland*/;
    134  1.22.4.1    martin 		goto out_unlock;
    135  1.22.4.1    martin 	}
    136  1.22.4.1    martin 
    137  1.22.4.1    martin out_clear:
    138  1.22.4.1    martin 	dma_fence_put(bo->moving);
    139  1.22.4.1    martin 	bo->moving = NULL;
    140  1.22.4.1    martin 
    141  1.22.4.1    martin out_unlock:
    142  1.22.4.1    martin 	return ret;
    143  1.22.4.1    martin }
    144  1.22.4.1    martin 
    145  1.22.4.1    martin static int
    146  1.22.4.1    martin ttm_bo_vm_reserve(struct ttm_buffer_object *bo, struct uvm_faultinfo *vmf)
    147  1.22.4.1    martin {
    148  1.22.4.1    martin 
    149  1.22.4.1    martin 	/*
    150  1.22.4.1    martin 	 * Work around locking order reversal in fault / nopfn
    151  1.22.4.1    martin 	 * between mmap_sem and bo_reserve: Perform a trylock operation
    152  1.22.4.1    martin 	 * for reserve, and if it fails, retry the fault after waiting
    153  1.22.4.1    martin 	 * for the buffer to become unreserved.
    154  1.22.4.1    martin 	 */
    155  1.22.4.1    martin 	if (__predict_false(!dma_resv_trylock(bo->base.resv))) {
    156  1.22.4.1    martin 		ttm_bo_get(bo);
    157  1.22.4.1    martin 		uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL);
    158  1.22.4.1    martin 		if (!dma_resv_lock_interruptible(bo->base.resv, NULL))
    159  1.22.4.1    martin 			dma_resv_unlock(bo->base.resv);
    160  1.22.4.1    martin 		ttm_bo_put(bo);
    161  1.22.4.1    martin 		return ERESTART;
    162  1.22.4.1    martin 	}
    163  1.22.4.1    martin 
    164  1.22.4.1    martin 	return 0;
    165  1.22.4.1    martin }
    166  1.22.4.1    martin 
    167  1.22.4.1    martin static int
    168  1.22.4.1    martin ttm_bo_uvm_fault_reserved(struct uvm_faultinfo *vmf, vaddr_t vaddr,
    169       1.1  riastrad     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
    170       1.1  riastrad     int flags)
    171       1.1  riastrad {
    172  1.22.4.1    martin 	struct uvm_object *const uobj = vmf->entry->object.uvm_obj;
    173       1.1  riastrad 	struct ttm_buffer_object *const bo = container_of(uobj,
    174       1.1  riastrad 	    struct ttm_buffer_object, uvmobj);
    175       1.1  riastrad 	struct ttm_bo_device *const bdev = bo->bdev;
    176       1.1  riastrad 	struct ttm_mem_type_manager *man =
    177       1.1  riastrad 	    &bdev->man[bo->mem.mem_type];
    178       1.1  riastrad 	union {
    179       1.1  riastrad 		bus_addr_t base;
    180       1.1  riastrad 		struct ttm_tt *ttm;
    181       1.1  riastrad 	} u;
    182       1.1  riastrad 	size_t size __diagused;
    183       1.1  riastrad 	voff_t uoffset;		/* offset in bytes into bo */
    184       1.1  riastrad 	unsigned startpage;	/* offset in pages into bo */
    185       1.1  riastrad 	unsigned i;
    186  1.22.4.1    martin 	vm_prot_t vm_prot = vmf->entry->protection; /* VM_PROT_* */
    187  1.22.4.1    martin 	pgprot_t prot = vm_prot; /* VM_PROT_* | PMAP_* cacheability flags */
    188  1.22.4.1    martin 	int err, ret;
    189  1.22.4.1    martin 
    190  1.22.4.1    martin 	/*
    191  1.22.4.1    martin 	 * Refuse to fault imported pages. This should be handled
    192  1.22.4.1    martin 	 * (if at all) by redirecting mmap to the exporter.
    193  1.22.4.1    martin 	 */
    194  1.22.4.1    martin 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
    195  1.22.4.1    martin 		return EINVAL;	/* SIGBUS */
    196      1.17  riastrad 
    197       1.1  riastrad 	if (bdev->driver->fault_reserve_notify) {
    198  1.22.4.1    martin 		struct dma_fence *moving = dma_fence_get(bo->moving);
    199  1.22.4.1    martin 
    200  1.22.4.1    martin 		err = bdev->driver->fault_reserve_notify(bo);
    201  1.22.4.1    martin 		switch (err) {
    202  1.22.4.1    martin 		case 0:
    203  1.22.4.1    martin 			break;
    204  1.22.4.1    martin 		case -EBUSY:
    205  1.22.4.1    martin 		case -ERESTARTSYS:
    206  1.22.4.1    martin 			return 0;	/* retry access in userland */
    207  1.22.4.1    martin 		default:
    208  1.22.4.1    martin 			return EINVAL;	/* SIGBUS */
    209       1.1  riastrad 		}
    210       1.1  riastrad 
    211  1.22.4.1    martin 		if (bo->moving != moving) {
    212  1.22.4.1    martin 			spin_lock(&ttm_bo_glob.lru_lock);
    213  1.22.4.1    martin 			ttm_bo_move_to_lru_tail(bo, NULL);
    214  1.22.4.1    martin 			spin_unlock(&ttm_bo_glob.lru_lock);
    215  1.22.4.1    martin 		}
    216  1.22.4.1    martin 		dma_fence_put(moving);
    217       1.1  riastrad 	}
    218       1.1  riastrad 
    219  1.22.4.1    martin 	/*
    220  1.22.4.1    martin 	 * Wait for buffer data in transit, due to a pipelined
    221  1.22.4.1    martin 	 * move.
    222  1.22.4.1    martin 	 */
    223  1.22.4.1    martin 	ret = ttm_bo_vm_fault_idle(bo, vmf);
    224  1.22.4.1    martin 	if (__predict_false(ret != 0))
    225  1.22.4.1    martin 		return ret;
    226  1.22.4.1    martin 
    227  1.22.4.1    martin 	err = ttm_mem_io_lock(man, true);
    228  1.22.4.1    martin 	if (__predict_false(err != 0))
    229  1.22.4.1    martin 		return 0;	/* retry access in userland */
    230  1.22.4.1    martin 	err = ttm_mem_io_reserve_vm(bo);
    231  1.22.4.1    martin 	if (__predict_false(err != 0)) {
    232  1.22.4.1    martin 		ret = EINVAL;	/* SIGBUS */
    233  1.22.4.1    martin 		goto out_io_unlock;
    234       1.1  riastrad 	}
    235       1.1  riastrad 
    236  1.22.4.1    martin 	prot = ttm_io_prot(bo->mem.placement, prot);
    237  1.22.4.1    martin 	if (!bo->mem.bus.is_iomem) {
    238      1.15  riastrad 		struct ttm_operation_ctx ctx = {
    239      1.15  riastrad 			.interruptible = false,
    240      1.15  riastrad 			.no_wait_gpu = false,
    241  1.22.4.1    martin 			.flags = TTM_OPT_FLAG_FORCE_ALLOC
    242  1.22.4.1    martin 
    243      1.15  riastrad 		};
    244  1.22.4.1    martin 
    245       1.1  riastrad 		u.ttm = bo->ttm;
    246      1.22  riastrad 		size = (size_t)bo->ttm->num_pages << PAGE_SHIFT;
    247  1.22.4.1    martin 		if (ttm_tt_populate(bo->ttm, &ctx)) {
    248  1.22.4.1    martin 			ret = ENOMEM;
    249  1.22.4.1    martin 			goto out_io_unlock;
    250       1.1  riastrad 		}
    251  1.22.4.1    martin 	} else {
    252  1.22.4.1    martin 		u.base = (bo->mem.bus.base + bo->mem.bus.offset);
    253  1.22.4.1    martin 		size = bo->mem.bus.size;
    254       1.1  riastrad 	}
    255       1.1  riastrad 
    256  1.22.4.1    martin 	KASSERT(vmf->entry->start <= vaddr);
    257  1.22.4.1    martin 	KASSERT((vmf->entry->offset & (PAGE_SIZE - 1)) == 0);
    258  1.22.4.1    martin 	KASSERT(vmf->entry->offset <= size);
    259  1.22.4.1    martin 	KASSERT((vaddr - vmf->entry->start) <= (size - vmf->entry->offset));
    260      1.19  riastrad 	KASSERTMSG(((size_t)npages << PAGE_SHIFT <=
    261  1.22.4.1    martin 		((size - vmf->entry->offset) - (vaddr - vmf->entry->start))),
    262      1.19  riastrad 	    "vaddr=%jx npages=%d bo=%p is_iomem=%d size=%zu"
    263      1.19  riastrad 	    " start=%jx offset=%jx",
    264      1.19  riastrad 	    (uintmax_t)vaddr, npages, bo, (int)bo->mem.bus.is_iomem, size,
    265  1.22.4.1    martin 	    (uintmax_t)vmf->entry->start, (uintmax_t)vmf->entry->offset);
    266  1.22.4.1    martin 	uoffset = (vmf->entry->offset + (vaddr - vmf->entry->start));
    267       1.1  riastrad 	startpage = (uoffset >> PAGE_SHIFT);
    268       1.1  riastrad 	for (i = 0; i < npages; i++) {
    269       1.1  riastrad 		paddr_t paddr;
    270       1.1  riastrad 
    271  1.22.4.1    martin 		if ((flags & PGO_ALLPAGES) == 0 && i != centeridx)
    272  1.22.4.1    martin 			continue;
    273       1.1  riastrad 		if (pps[i] == PGO_DONTCARE)
    274       1.1  riastrad 			continue;
    275      1.21  riastrad 		if (!bo->mem.bus.is_iomem) {
    276      1.21  riastrad 			paddr = page_to_phys(u.ttm->pages[startpage + i]);
    277      1.21  riastrad 		} else if (bdev->driver->io_mem_pfn) {
    278      1.22  riastrad 			paddr = (paddr_t)(*bdev->driver->io_mem_pfn)(bo,
    279      1.22  riastrad 			    startpage + i) << PAGE_SHIFT;
    280      1.21  riastrad 		} else {
    281       1.3  riastrad 			const paddr_t cookie = bus_space_mmap(bdev->memt,
    282      1.22  riastrad 			    u.base, (off_t)(startpage + i) << PAGE_SHIFT,
    283      1.22  riastrad 			    vm_prot, 0);
    284       1.3  riastrad 
    285       1.3  riastrad 			paddr = pmap_phys_address(cookie);
    286  1.22.4.1    martin #if 0				/* XXX Why no PMAP_* flags added here? */
    287  1.22.4.1    martin 			mmapflags = pmap_mmap_flags(cookie);
    288  1.22.4.1    martin #endif
    289  1.22.4.1    martin 		}
    290  1.22.4.1    martin 		ret = pmap_enter(vmf->orig_map->pmap, vaddr + i*PAGE_SIZE,
    291  1.22.4.1    martin 		    paddr, vm_prot, PMAP_CANFAIL | prot);
    292  1.22.4.1    martin 		if (ret) {
    293  1.22.4.1    martin 			/*
    294  1.22.4.1    martin 			 * XXX Continue with ret=0 if i != centeridx,
    295  1.22.4.1    martin 			 * so we don't fail if only readahead pages
    296  1.22.4.1    martin 			 * fail?
    297  1.22.4.1    martin 			 */
    298  1.22.4.1    martin 			KASSERT(ret != ERESTART);
    299  1.22.4.1    martin 			break;
    300       1.3  riastrad 		}
    301       1.1  riastrad 	}
    302  1.22.4.1    martin 	pmap_update(vmf->orig_map->pmap);
    303  1.22.4.1    martin 	ret = 0;		/* retry access in userland */
    304  1.22.4.1    martin out_io_unlock:
    305  1.22.4.1    martin 	ttm_mem_io_unlock(man);
    306  1.22.4.1    martin 	KASSERT(ret != ERESTART);
    307  1.22.4.1    martin 	return ret;
    308       1.1  riastrad }
    309       1.1  riastrad 
    310  1.22.4.1    martin int
    311  1.22.4.1    martin ttm_bo_uvm_fault(struct uvm_faultinfo *vmf, vaddr_t vaddr,
    312  1.22.4.1    martin     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
    313  1.22.4.1    martin     int flags)
    314       1.1  riastrad {
    315  1.22.4.1    martin 	struct uvm_object *const uobj = vmf->entry->object.uvm_obj;
    316  1.22.4.1    martin 	struct ttm_buffer_object *const bo = container_of(uobj,
    317  1.22.4.1    martin 	    struct ttm_buffer_object, uvmobj);
    318  1.22.4.1    martin 	int ret;
    319       1.1  riastrad 
    320  1.22.4.1    martin 	/* Thanks, uvm, but we don't need this lock.  */
    321  1.22.4.1    martin 	rw_exit(uobj->vmobjlock);
    322      1.15  riastrad 
    323  1.22.4.1    martin 	/* Copy-on-write mappings make no sense for the graphics aperture.  */
    324  1.22.4.1    martin 	if (UVM_ET_ISCOPYONWRITE(vmf->entry)) {
    325  1.22.4.1    martin 		ret = EINVAL;	/* SIGBUS */
    326  1.22.4.1    martin 		goto out;
    327  1.22.4.1    martin 	}
    328      1.15  riastrad 
    329  1.22.4.1    martin 	ret = ttm_bo_vm_reserve(bo, vmf);
    330  1.22.4.1    martin 	if (ret) {
    331  1.22.4.1    martin 		/* ttm_bo_vm_reserve already unlocked on ERESTART */
    332  1.22.4.1    martin 		KASSERTMSG(ret == ERESTART, "ret=%d", ret);
    333  1.22.4.1    martin 		return ret;
    334      1.15  riastrad 	}
    335      1.15  riastrad 
    336  1.22.4.1    martin 	ret = ttm_bo_uvm_fault_reserved(vmf, vaddr, pps, npages, centeridx,
    337  1.22.4.1    martin 	    access_type, flags);
    338  1.22.4.1    martin 	if (ret == ERESTART)	/* already unlocked on ERESTART */
    339  1.22.4.1    martin 		return ret;
    340  1.22.4.1    martin 
    341  1.22.4.1    martin 	dma_resv_unlock(bo->base.resv);
    342  1.22.4.1    martin 
    343  1.22.4.1    martin out:	uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL);
    344  1.22.4.1    martin 	return ret;
    345       1.1  riastrad }
    346       1.1  riastrad 
    347       1.1  riastrad int
    348       1.1  riastrad ttm_bo_mmap_object(struct ttm_bo_device *bdev, off_t offset, size_t size,
    349       1.1  riastrad     vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp,
    350       1.1  riastrad     struct file *file)
    351       1.1  riastrad {
    352       1.1  riastrad 	const unsigned long startpage = (offset >> PAGE_SHIFT);
    353       1.1  riastrad 	const unsigned long npages = (size >> PAGE_SHIFT);
    354       1.1  riastrad 	struct ttm_buffer_object *bo;
    355       1.1  riastrad 	int ret;
    356       1.1  riastrad 
    357       1.1  riastrad 	KASSERT(0 == (offset & (PAGE_SIZE - 1)));
    358       1.1  riastrad 	KASSERT(0 == (size & (PAGE_SIZE - 1)));
    359       1.1  riastrad 
    360       1.1  riastrad 	ret = ttm_bo_uvm_lookup(bdev, startpage, npages, &bo);
    361       1.1  riastrad 	if (ret)
    362       1.1  riastrad 		goto fail0;
    363      1.20  riastrad 	KASSERTMSG((drm_vma_node_start(&bo->base.vma_node) <= startpage),
    364      1.20  riastrad 	    "mapping npages=0x%jx @ pfn=0x%jx"
    365      1.20  riastrad 	    " from vma npages=0x%jx @ pfn=0x%jx",
    366      1.20  riastrad 	    (uintmax_t)npages,
    367      1.20  riastrad 	    (uintmax_t)startpage,
    368      1.20  riastrad 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
    369      1.20  riastrad 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
    370      1.20  riastrad 	KASSERTMSG((npages <= drm_vma_node_size(&bo->base.vma_node)),
    371      1.20  riastrad 	    "mapping npages=0x%jx @ pfn=0x%jx"
    372      1.20  riastrad 	    " from vma npages=0x%jx @ pfn=0x%jx",
    373      1.20  riastrad 	    (uintmax_t)npages,
    374      1.20  riastrad 	    (uintmax_t)startpage,
    375      1.20  riastrad 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
    376      1.20  riastrad 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
    377      1.20  riastrad 	KASSERTMSG(((startpage - drm_vma_node_start(&bo->base.vma_node))
    378      1.20  riastrad 		<= (drm_vma_node_size(&bo->base.vma_node) - npages)),
    379      1.20  riastrad 	    "mapping npages=0x%jx @ pfn=0x%jx"
    380      1.20  riastrad 	    " from vma npages=0x%jx @ pfn=0x%jx",
    381      1.20  riastrad 	    (uintmax_t)npages,
    382      1.20  riastrad 	    (uintmax_t)startpage,
    383      1.20  riastrad 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
    384      1.20  riastrad 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
    385      1.20  riastrad 
    386       1.1  riastrad 	/* XXX Just assert this?  */
    387       1.1  riastrad 	if (__predict_false(bdev->driver->verify_access == NULL)) {
    388       1.1  riastrad 		ret = -EPERM;
    389       1.1  riastrad 		goto fail1;
    390       1.1  riastrad 	}
    391       1.1  riastrad 	ret = (*bdev->driver->verify_access)(bo, file);
    392       1.1  riastrad 	if (ret)
    393       1.1  riastrad 		goto fail1;
    394       1.1  riastrad 
    395       1.1  riastrad 	/* Success!  */
    396       1.1  riastrad 	*uobjp = &bo->uvmobj;
    397       1.1  riastrad 	*uoffsetp = (offset -
    398      1.22  riastrad 	    ((off_t)drm_vma_node_start(&bo->base.vma_node) << PAGE_SHIFT));
    399       1.1  riastrad 	return 0;
    400       1.1  riastrad 
    401      1.16  riastrad fail1:	ttm_bo_put(bo);
    402       1.1  riastrad fail0:	KASSERT(ret);
    403       1.1  riastrad 	return ret;
    404       1.1  riastrad }
    405       1.1  riastrad 
    406       1.1  riastrad static int
    407       1.1  riastrad ttm_bo_uvm_lookup(struct ttm_bo_device *bdev, unsigned long startpage,
    408       1.1  riastrad     unsigned long npages, struct ttm_buffer_object **bop)
    409       1.1  riastrad {
    410       1.1  riastrad 	struct ttm_buffer_object *bo = NULL;
    411       1.1  riastrad 	struct drm_vma_offset_node *node;
    412       1.1  riastrad 
    413      1.16  riastrad 	drm_vma_offset_lock_lookup(bdev->vma_manager);
    414      1.16  riastrad 	node = drm_vma_offset_lookup_locked(bdev->vma_manager, startpage,
    415       1.1  riastrad 	    npages);
    416       1.1  riastrad 	if (node != NULL) {
    417  1.22.4.1    martin 		bo = container_of(node, struct ttm_buffer_object,
    418  1.22.4.1    martin 		    base.vma_node);
    419       1.1  riastrad 		if (!kref_get_unless_zero(&bo->kref))
    420       1.1  riastrad 			bo = NULL;
    421       1.1  riastrad 	}
    422      1.16  riastrad 	drm_vma_offset_unlock_lookup(bdev->vma_manager);
    423       1.1  riastrad 
    424       1.1  riastrad 	if (bo == NULL)
    425       1.1  riastrad 		return -ENOENT;
    426       1.1  riastrad 
    427       1.1  riastrad 	*bop = bo;
    428       1.1  riastrad 	return 0;
    429       1.1  riastrad }
    430