Home | History | Annotate | Line # | Download | only in ttm
ttm_bo_vm.c revision 1.26
      1 /*	$NetBSD: ttm_bo_vm.c,v 1.26 2024/06/23 00:49:06 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /**************************************************************************
     33  *
     34  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
     35  * All Rights Reserved.
     36  *
     37  * Permission is hereby granted, free of charge, to any person obtaining a
     38  * copy of this software and associated documentation files (the
     39  * "Software"), to deal in the Software without restriction, including
     40  * without limitation the rights to use, copy, modify, merge, publish,
     41  * distribute, sub license, and/or sell copies of the Software, and to
     42  * permit persons to whom the Software is furnished to do so, subject to
     43  * the following conditions:
     44  *
     45  * The above copyright notice and this permission notice (including the
     46  * next paragraph) shall be included in all copies or substantial portions
     47  * of the Software.
     48  *
     49  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     50  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     51  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     52  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     53  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     54  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     55  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     56  *
     57  **************************************************************************/
     58 /*
     59  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     60  */
     61 
     62 #include <sys/cdefs.h>
     63 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.26 2024/06/23 00:49:06 riastradh Exp $");
     64 
     65 #include <sys/types.h>
     66 
     67 #include <uvm/uvm.h>
     68 #include <uvm/uvm_extern.h>
     69 #include <uvm/uvm_fault.h>
     70 
     71 #include <linux/bitops.h>
     72 
     73 #include <drm/drm_vma_manager.h>
     74 
     75 #include <ttm/ttm_bo_driver.h>
     76 
     77 static int	ttm_bo_uvm_lookup(struct ttm_bo_device *, unsigned long,
     78 		    unsigned long, struct ttm_buffer_object **);
     79 
     80 void
     81 ttm_bo_uvm_reference(struct uvm_object *uobj)
     82 {
     83 	struct ttm_buffer_object *const bo = container_of(uobj,
     84 	    struct ttm_buffer_object, uvmobj);
     85 
     86 	(void)ttm_bo_get(bo);
     87 }
     88 
     89 void
     90 ttm_bo_uvm_detach(struct uvm_object *uobj)
     91 {
     92 	struct ttm_buffer_object *bo = container_of(uobj,
     93 	    struct ttm_buffer_object, uvmobj);
     94 
     95 	ttm_bo_put(bo);
     96 }
     97 
     98 static int
     99 ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *vmf)
    100 {
    101 	int err, ret = 0;
    102 
    103 	if (__predict_true(!bo->moving))
    104 		goto out_unlock;
    105 
    106 	/*
    107 	 * Quick non-stalling check for idle.
    108 	 */
    109 	if (dma_fence_is_signaled(bo->moving))
    110 		goto out_clear;
    111 
    112 	/*
    113 	 * If possible, avoid waiting for GPU with mmap_sem
    114 	 * held.
    115 	 */
    116 	if (1) {		/* always retriable in NetBSD */
    117 		ret = ERESTART;
    118 
    119 		ttm_bo_get(bo);
    120 		uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL);
    121 		(void) dma_fence_wait(bo->moving, true);
    122 		dma_resv_unlock(bo->base.resv);
    123 		ttm_bo_put(bo);
    124 		goto out_unlock;
    125 	}
    126 
    127 	/*
    128 	 * Ordinary wait.
    129 	 */
    130 	err = dma_fence_wait(bo->moving, true);
    131 	if (__predict_false(err != 0)) {
    132 		ret = (err != -ERESTARTSYS) ? EINVAL/*SIGBUS*/ :
    133 		    0/*retry access in userland*/;
    134 		goto out_unlock;
    135 	}
    136 
    137 out_clear:
    138 	dma_fence_put(bo->moving);
    139 	bo->moving = NULL;
    140 
    141 out_unlock:
    142 	return ret;
    143 }
    144 
    145 static int
    146 ttm_bo_vm_reserve(struct ttm_buffer_object *bo, struct uvm_faultinfo *vmf)
    147 {
    148 
    149 	/*
    150 	 * Work around locking order reversal in fault / nopfn
    151 	 * between mmap_sem and bo_reserve: Perform a trylock operation
    152 	 * for reserve, and if it fails, retry the fault after waiting
    153 	 * for the buffer to become unreserved.
    154 	 */
    155 	if (__predict_false(!dma_resv_trylock(bo->base.resv))) {
    156 		ttm_bo_get(bo);
    157 		uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL);
    158 		if (!dma_resv_lock_interruptible(bo->base.resv, NULL))
    159 			dma_resv_unlock(bo->base.resv);
    160 		ttm_bo_put(bo);
    161 		return ERESTART;
    162 	}
    163 
    164 	return 0;
    165 }
    166 
    167 static int
    168 ttm_bo_uvm_fault_reserved(struct uvm_faultinfo *vmf, vaddr_t vaddr,
    169     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
    170     int flags)
    171 {
    172 	struct uvm_object *const uobj = vmf->entry->object.uvm_obj;
    173 	struct ttm_buffer_object *const bo = container_of(uobj,
    174 	    struct ttm_buffer_object, uvmobj);
    175 	struct ttm_bo_device *const bdev = bo->bdev;
    176 	struct ttm_mem_type_manager *man =
    177 	    &bdev->man[bo->mem.mem_type];
    178 	union {
    179 		bus_addr_t base;
    180 		struct ttm_tt *ttm;
    181 	} u;
    182 	size_t size __diagused;
    183 	voff_t uoffset;		/* offset in bytes into bo */
    184 	unsigned startpage;	/* offset in pages into bo */
    185 	unsigned i;
    186 	vm_prot_t vm_prot;	/* VM_PROT_* */
    187 	pgprot_t pgprot;	/* VM_PROT_* | PMAP_* cacheability flags */
    188 	int err, ret;
    189 
    190 	/*
    191 	 * Refuse to fault imported pages. This should be handled
    192 	 * (if at all) by redirecting mmap to the exporter.
    193 	 */
    194 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
    195 		return EINVAL;	/* SIGBUS */
    196 
    197 	if (bdev->driver->fault_reserve_notify) {
    198 		struct dma_fence *moving = dma_fence_get(bo->moving);
    199 
    200 		err = bdev->driver->fault_reserve_notify(bo);
    201 		switch (err) {
    202 		case 0:
    203 			break;
    204 		case -EBUSY:
    205 		case -ERESTARTSYS:
    206 			return 0;	/* retry access in userland */
    207 		default:
    208 			return EINVAL;	/* SIGBUS */
    209 		}
    210 
    211 		if (bo->moving != moving) {
    212 			spin_lock(&ttm_bo_glob.lru_lock);
    213 			ttm_bo_move_to_lru_tail(bo, NULL);
    214 			spin_unlock(&ttm_bo_glob.lru_lock);
    215 		}
    216 		dma_fence_put(moving);
    217 	}
    218 
    219 	/*
    220 	 * Wait for buffer data in transit, due to a pipelined
    221 	 * move.
    222 	 */
    223 	ret = ttm_bo_vm_fault_idle(bo, vmf);
    224 	if (__predict_false(ret != 0))
    225 		return ret;
    226 
    227 	err = ttm_mem_io_lock(man, true);
    228 	if (__predict_false(err != 0))
    229 		return 0;	/* retry access in userland */
    230 	err = ttm_mem_io_reserve_vm(bo);
    231 	if (__predict_false(err != 0)) {
    232 		ret = EINVAL;	/* SIGBUS */
    233 		goto out_io_unlock;
    234 	}
    235 
    236 	vm_prot = vmf->entry->protection;
    237 	if (!bo->mem.bus.is_iomem) {
    238 		struct ttm_operation_ctx ctx = {
    239 			.interruptible = false,
    240 			.no_wait_gpu = false,
    241 			.flags = TTM_OPT_FLAG_FORCE_ALLOC
    242 		};
    243 
    244 		u.ttm = bo->ttm;
    245 		size = (size_t)bo->ttm->num_pages << PAGE_SHIFT;
    246 		if (ISSET(bo->mem.placement, TTM_PL_FLAG_CACHED))
    247 			pgprot = vm_prot;
    248 		else
    249 			pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
    250 		if (ttm_tt_populate(bo->ttm, &ctx)) {
    251 			ret = ENOMEM;
    252 			goto out_io_unlock;
    253 		}
    254 	} else {
    255 		u.base = (bo->mem.bus.base + bo->mem.bus.offset);
    256 		size = bo->mem.bus.size;
    257 		pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
    258 	}
    259 
    260 	KASSERT(vmf->entry->start <= vaddr);
    261 	KASSERT((vmf->entry->offset & (PAGE_SIZE - 1)) == 0);
    262 	KASSERT(vmf->entry->offset <= size);
    263 	KASSERT((vaddr - vmf->entry->start) <= (size - vmf->entry->offset));
    264 	KASSERTMSG(((size_t)npages << PAGE_SHIFT <=
    265 		((size - vmf->entry->offset) - (vaddr - vmf->entry->start))),
    266 	    "vaddr=%jx npages=%d bo=%p is_iomem=%d size=%zu"
    267 	    " start=%jx offset=%jx",
    268 	    (uintmax_t)vaddr, npages, bo, (int)bo->mem.bus.is_iomem, size,
    269 	    (uintmax_t)vmf->entry->start, (uintmax_t)vmf->entry->offset);
    270 	uoffset = (vmf->entry->offset + (vaddr - vmf->entry->start));
    271 	startpage = (uoffset >> PAGE_SHIFT);
    272 	for (i = 0; i < npages; i++) {
    273 		paddr_t paddr;
    274 
    275 		if ((flags & PGO_ALLPAGES) == 0 && i != centeridx)
    276 			continue;
    277 		if (pps[i] == PGO_DONTCARE)
    278 			continue;
    279 		if (!bo->mem.bus.is_iomem) {
    280 			paddr = page_to_phys(u.ttm->pages[startpage + i]);
    281 		} else if (bdev->driver->io_mem_pfn) {
    282 			paddr = (paddr_t)(*bdev->driver->io_mem_pfn)(bo,
    283 			    startpage + i) << PAGE_SHIFT;
    284 		} else {
    285 			const paddr_t cookie = bus_space_mmap(bdev->memt,
    286 			    u.base, (off_t)(startpage + i) << PAGE_SHIFT,
    287 			    vm_prot, 0);
    288 
    289 			paddr = pmap_phys_address(cookie);
    290 		}
    291 		ret = pmap_enter(vmf->orig_map->pmap, vaddr + i*PAGE_SIZE,
    292 		    paddr, vm_prot, (PMAP_CANFAIL | pgprot));
    293 		if (ret) {
    294 			KASSERT(ret != ERESTART);
    295 			break;
    296 		}
    297 	}
    298 	pmap_update(vmf->orig_map->pmap);
    299 	ret = 0;		/* retry access in userland */
    300 out_io_unlock:
    301 	ttm_mem_io_unlock(man);
    302 	KASSERT(ret != ERESTART);
    303 	return ret;
    304 }
    305 
    306 int
    307 ttm_bo_uvm_fault(struct uvm_faultinfo *vmf, vaddr_t vaddr,
    308     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
    309     int flags)
    310 {
    311 	struct uvm_object *const uobj = vmf->entry->object.uvm_obj;
    312 	struct ttm_buffer_object *const bo = container_of(uobj,
    313 	    struct ttm_buffer_object, uvmobj);
    314 	int ret;
    315 
    316 	/* Thanks, uvm, but we don't need this lock.  */
    317 	rw_exit(uobj->vmobjlock);
    318 
    319 	/* Copy-on-write mappings make no sense for the graphics aperture.  */
    320 	if (UVM_ET_ISCOPYONWRITE(vmf->entry)) {
    321 		ret = EINVAL;	/* SIGBUS */
    322 		goto out;
    323 	}
    324 
    325 	ret = ttm_bo_vm_reserve(bo, vmf);
    326 	if (ret) {
    327 		/* ttm_bo_vm_reserve already unlocked on ERESTART */
    328 		KASSERTMSG(ret == ERESTART, "ret=%d", ret);
    329 		return ret;
    330 	}
    331 
    332 	ret = ttm_bo_uvm_fault_reserved(vmf, vaddr, pps, npages, centeridx,
    333 	    access_type, flags);
    334 	if (ret == ERESTART)	/* already unlocked on ERESTART */
    335 		return ret;
    336 
    337 	dma_resv_unlock(bo->base.resv);
    338 
    339 out:	uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL);
    340 	return ret;
    341 }
    342 
    343 int
    344 ttm_bo_mmap_object(struct ttm_bo_device *bdev, off_t offset, size_t size,
    345     vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp,
    346     struct file *file)
    347 {
    348 	const unsigned long startpage = (offset >> PAGE_SHIFT);
    349 	const unsigned long npages = (size >> PAGE_SHIFT);
    350 	struct ttm_buffer_object *bo;
    351 	int ret;
    352 
    353 	KASSERT(0 == (offset & (PAGE_SIZE - 1)));
    354 	KASSERT(0 == (size & (PAGE_SIZE - 1)));
    355 
    356 	ret = ttm_bo_uvm_lookup(bdev, startpage, npages, &bo);
    357 	if (ret)
    358 		goto fail0;
    359 	KASSERTMSG((drm_vma_node_start(&bo->base.vma_node) <= startpage),
    360 	    "mapping npages=0x%jx @ pfn=0x%jx"
    361 	    " from vma npages=0x%jx @ pfn=0x%jx",
    362 	    (uintmax_t)npages,
    363 	    (uintmax_t)startpage,
    364 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
    365 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
    366 	KASSERTMSG((npages <= drm_vma_node_size(&bo->base.vma_node)),
    367 	    "mapping npages=0x%jx @ pfn=0x%jx"
    368 	    " from vma npages=0x%jx @ pfn=0x%jx",
    369 	    (uintmax_t)npages,
    370 	    (uintmax_t)startpage,
    371 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
    372 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
    373 	KASSERTMSG(((startpage - drm_vma_node_start(&bo->base.vma_node))
    374 		<= (drm_vma_node_size(&bo->base.vma_node) - npages)),
    375 	    "mapping npages=0x%jx @ pfn=0x%jx"
    376 	    " from vma npages=0x%jx @ pfn=0x%jx",
    377 	    (uintmax_t)npages,
    378 	    (uintmax_t)startpage,
    379 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
    380 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
    381 
    382 	/* XXX Just assert this?  */
    383 	if (__predict_false(bdev->driver->verify_access == NULL)) {
    384 		ret = -EPERM;
    385 		goto fail1;
    386 	}
    387 	ret = (*bdev->driver->verify_access)(bo, file);
    388 	if (ret)
    389 		goto fail1;
    390 
    391 	/* Success!  */
    392 	*uobjp = &bo->uvmobj;
    393 	*uoffsetp = (offset -
    394 	    ((off_t)drm_vma_node_start(&bo->base.vma_node) << PAGE_SHIFT));
    395 	return 0;
    396 
    397 fail1:	ttm_bo_put(bo);
    398 fail0:	KASSERT(ret);
    399 	return ret;
    400 }
    401 
    402 static int
    403 ttm_bo_uvm_lookup(struct ttm_bo_device *bdev, unsigned long startpage,
    404     unsigned long npages, struct ttm_buffer_object **bop)
    405 {
    406 	struct ttm_buffer_object *bo = NULL;
    407 	struct drm_vma_offset_node *node;
    408 
    409 	drm_vma_offset_lock_lookup(bdev->vma_manager);
    410 	node = drm_vma_offset_lookup_locked(bdev->vma_manager, startpage,
    411 	    npages);
    412 	if (node != NULL) {
    413 		bo = container_of(node, struct ttm_buffer_object,
    414 		    base.vma_node);
    415 		if (!kref_get_unless_zero(&bo->kref))
    416 			bo = NULL;
    417 	}
    418 	drm_vma_offset_unlock_lookup(bdev->vma_manager);
    419 
    420 	if (bo == NULL)
    421 		return -ENOENT;
    422 
    423 	*bop = bo;
    424 	return 0;
    425 }
    426