Home | History | Annotate | Line # | Download | only in ttm
ttm_bo_vm.c revision 1.22
      1  1.22  riastrad /*	$NetBSD: ttm_bo_vm.c,v 1.22 2022/07/21 08:07:56 riastradh Exp $	*/
      2   1.1  riastrad 
      3   1.1  riastrad /*-
      4   1.1  riastrad  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5   1.1  riastrad  * All rights reserved.
      6   1.1  riastrad  *
      7   1.1  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1  riastrad  * by Taylor R. Campbell.
      9   1.1  riastrad  *
     10   1.1  riastrad  * Redistribution and use in source and binary forms, with or without
     11   1.1  riastrad  * modification, are permitted provided that the following conditions
     12   1.1  riastrad  * are met:
     13   1.1  riastrad  * 1. Redistributions of source code must retain the above copyright
     14   1.1  riastrad  *    notice, this list of conditions and the following disclaimer.
     15   1.1  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17   1.1  riastrad  *    documentation and/or other materials provided with the distribution.
     18   1.1  riastrad  *
     19   1.1  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1  riastrad  */
     31   1.1  riastrad 
     32   1.1  riastrad #include <sys/cdefs.h>
     33  1.22  riastrad __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.22 2022/07/21 08:07:56 riastradh Exp $");
     34   1.1  riastrad 
     35   1.1  riastrad #include <sys/types.h>
     36   1.1  riastrad 
     37   1.1  riastrad #include <uvm/uvm.h>
     38   1.1  riastrad #include <uvm/uvm_extern.h>
     39   1.1  riastrad #include <uvm/uvm_fault.h>
     40   1.1  riastrad 
     41   1.1  riastrad #include <linux/bitops.h>
     42   1.1  riastrad 
     43   1.1  riastrad #include <drm/drm_vma_manager.h>
     44   1.1  riastrad 
     45   1.1  riastrad #include <ttm/ttm_bo_driver.h>
     46   1.1  riastrad 
     47   1.1  riastrad static int	ttm_bo_uvm_fault_idle(struct ttm_buffer_object *,
     48   1.6  riastrad 		    struct uvm_faultinfo *);
     49   1.1  riastrad static int	ttm_bo_uvm_lookup(struct ttm_bo_device *, unsigned long,
     50   1.1  riastrad 		    unsigned long, struct ttm_buffer_object **);
     51   1.1  riastrad 
     52   1.1  riastrad void
     53   1.1  riastrad ttm_bo_uvm_reference(struct uvm_object *uobj)
     54   1.1  riastrad {
     55   1.1  riastrad 	struct ttm_buffer_object *const bo = container_of(uobj,
     56   1.1  riastrad 	    struct ttm_buffer_object, uvmobj);
     57   1.1  riastrad 
     58  1.16  riastrad 	(void)ttm_bo_get(bo);
     59   1.1  riastrad }
     60   1.1  riastrad 
     61   1.1  riastrad void
     62   1.1  riastrad ttm_bo_uvm_detach(struct uvm_object *uobj)
     63   1.1  riastrad {
     64   1.1  riastrad 	struct ttm_buffer_object *bo = container_of(uobj,
     65   1.1  riastrad 	    struct ttm_buffer_object, uvmobj);
     66   1.1  riastrad 
     67  1.16  riastrad 	ttm_bo_put(bo);
     68   1.1  riastrad }
     69   1.1  riastrad 
     70   1.1  riastrad int
     71   1.1  riastrad ttm_bo_uvm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
     72   1.1  riastrad     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
     73   1.1  riastrad     int flags)
     74   1.1  riastrad {
     75   1.1  riastrad 	struct uvm_object *const uobj = ufi->entry->object.uvm_obj;
     76   1.1  riastrad 	struct ttm_buffer_object *const bo = container_of(uobj,
     77   1.1  riastrad 	    struct ttm_buffer_object, uvmobj);
     78   1.1  riastrad 	struct ttm_bo_device *const bdev = bo->bdev;
     79   1.1  riastrad 	struct ttm_mem_type_manager *man =
     80   1.1  riastrad 	    &bdev->man[bo->mem.mem_type];
     81   1.1  riastrad 	union {
     82   1.1  riastrad 		bus_addr_t base;
     83   1.1  riastrad 		struct ttm_tt *ttm;
     84   1.1  riastrad 	} u;
     85   1.1  riastrad 	size_t size __diagused;
     86   1.1  riastrad 	voff_t uoffset;		/* offset in bytes into bo */
     87   1.1  riastrad 	unsigned startpage;	/* offset in pages into bo */
     88   1.1  riastrad 	unsigned i;
     89   1.1  riastrad 	vm_prot_t vm_prot;	/* VM_PROT_* */
     90   1.1  riastrad 	pgprot_t pgprot;	/* VM_PROT_* | PMAP_* cacheability flags */
     91   1.1  riastrad 	int ret;
     92   1.1  riastrad 
     93   1.4  riastrad 	/* Thanks, uvm, but we don't need this lock.  */
     94  1.14        ad 	rw_exit(uobj->vmobjlock);
     95   1.4  riastrad 
     96   1.1  riastrad 	/* Copy-on-write mappings make no sense for the graphics aperture.  */
     97   1.1  riastrad 	if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
     98   1.1  riastrad 		ret = -EIO;
     99   1.1  riastrad 		goto out0;
    100   1.1  riastrad 	}
    101   1.1  riastrad 
    102   1.1  riastrad 	/* Try to lock the buffer.  */
    103  1.15  riastrad 	ret = ttm_bo_reserve(bo, true, true, NULL);
    104   1.1  riastrad 	if (ret) {
    105   1.1  riastrad 		if (ret != -EBUSY)
    106   1.1  riastrad 			goto out0;
    107   1.1  riastrad 		/*
    108   1.8  riastrad 		 * It's currently locked.  Unlock the fault, wait for
    109   1.8  riastrad 		 * it, and start over.
    110   1.1  riastrad 		 */
    111   1.6  riastrad 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
    112  1.17  riastrad 		if (!dma_resv_lock_interruptible(bo->base.resv, NULL))
    113  1.17  riastrad 			dma_resv_unlock(bo->base.resv);
    114  1.17  riastrad 
    115  1.12       mrg 		return ERESTART;
    116   1.1  riastrad 	}
    117   1.1  riastrad 
    118   1.1  riastrad 	/* drm prime buffers are not mappable.  XXX Catch this earlier?  */
    119   1.1  riastrad 	if (bo->ttm && ISSET(bo->ttm->page_flags, TTM_PAGE_FLAG_SG)) {
    120   1.1  riastrad 		ret = -EINVAL;
    121   1.9  riastrad 		goto out1;
    122   1.1  riastrad 	}
    123   1.1  riastrad 
    124   1.1  riastrad 	/* Notify the driver of a fault if it wants.  */
    125   1.1  riastrad 	if (bdev->driver->fault_reserve_notify) {
    126   1.1  riastrad 		ret = (*bdev->driver->fault_reserve_notify)(bo);
    127   1.1  riastrad 		if (ret) {
    128   1.1  riastrad 			if (ret == -ERESTART)
    129   1.1  riastrad 				ret = -EIO;
    130   1.9  riastrad 			goto out1;
    131   1.1  riastrad 		}
    132   1.1  riastrad 	}
    133   1.1  riastrad 
    134   1.7  riastrad 	ret = ttm_bo_uvm_fault_idle(bo, ufi);
    135   1.1  riastrad 	if (ret) {
    136  1.15  riastrad 		KASSERT(ret == -ERESTART || ret == -EFAULT);
    137  1.10  riastrad 		/* ttm_bo_uvm_fault_idle calls uvmfault_unlockall for us.  */
    138  1.10  riastrad 		ttm_bo_unreserve(bo);
    139   1.1  riastrad 		/* XXX errno Linux->NetBSD */
    140   1.1  riastrad 		return -ret;
    141   1.1  riastrad 	}
    142   1.1  riastrad 
    143   1.1  riastrad 	ret = ttm_mem_io_lock(man, true);
    144   1.1  riastrad 	if (ret) {
    145   1.1  riastrad 		ret = -EIO;
    146   1.1  riastrad 		goto out1;
    147   1.1  riastrad 	}
    148   1.1  riastrad 	ret = ttm_mem_io_reserve_vm(bo);
    149   1.1  riastrad 	if (ret) {
    150   1.1  riastrad 		ret = -EIO;
    151   1.1  riastrad 		goto out2;
    152   1.1  riastrad 	}
    153   1.1  riastrad 
    154   1.1  riastrad 	vm_prot = ufi->entry->protection;
    155   1.1  riastrad 	if (bo->mem.bus.is_iomem) {
    156   1.1  riastrad 		u.base = (bo->mem.bus.base + bo->mem.bus.offset);
    157   1.1  riastrad 		size = bo->mem.bus.size;
    158   1.1  riastrad 		pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
    159   1.1  riastrad 	} else {
    160  1.15  riastrad 		struct ttm_operation_ctx ctx = {
    161  1.15  riastrad 			.interruptible = false,
    162  1.15  riastrad 			.no_wait_gpu = false,
    163  1.15  riastrad 			.flags = TTM_OPT_FLAG_FORCE_ALLOC,
    164  1.15  riastrad 		};
    165   1.1  riastrad 		u.ttm = bo->ttm;
    166  1.22  riastrad 		size = (size_t)bo->ttm->num_pages << PAGE_SHIFT;
    167   1.1  riastrad 		if (ISSET(bo->mem.placement, TTM_PL_FLAG_CACHED))
    168   1.1  riastrad 			pgprot = vm_prot;
    169   1.1  riastrad 		else
    170   1.1  riastrad 			pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
    171  1.15  riastrad 		if (ttm_tt_populate(u.ttm, &ctx)) {
    172   1.1  riastrad 			ret = -ENOMEM;
    173   1.1  riastrad 			goto out2;
    174   1.1  riastrad 		}
    175   1.1  riastrad 	}
    176   1.1  riastrad 
    177   1.1  riastrad 	KASSERT(ufi->entry->start <= vaddr);
    178   1.1  riastrad 	KASSERT((ufi->entry->offset & (PAGE_SIZE - 1)) == 0);
    179   1.1  riastrad 	KASSERT(ufi->entry->offset <= size);
    180   1.1  riastrad 	KASSERT((vaddr - ufi->entry->start) <= (size - ufi->entry->offset));
    181  1.19  riastrad 	KASSERTMSG(((size_t)npages << PAGE_SHIFT <=
    182  1.19  riastrad 		((size - ufi->entry->offset) - (vaddr - ufi->entry->start))),
    183  1.19  riastrad 	    "vaddr=%jx npages=%d bo=%p is_iomem=%d size=%zu"
    184  1.19  riastrad 	    " start=%jx offset=%jx",
    185  1.19  riastrad 	    (uintmax_t)vaddr, npages, bo, (int)bo->mem.bus.is_iomem, size,
    186  1.19  riastrad 	    (uintmax_t)ufi->entry->start, (uintmax_t)ufi->entry->offset);
    187   1.1  riastrad 	uoffset = (ufi->entry->offset + (vaddr - ufi->entry->start));
    188   1.1  riastrad 	startpage = (uoffset >> PAGE_SHIFT);
    189   1.1  riastrad 	for (i = 0; i < npages; i++) {
    190   1.1  riastrad 		paddr_t paddr;
    191   1.1  riastrad 
    192   1.1  riastrad 		/* XXX PGO_ALLPAGES?  */
    193   1.1  riastrad 		if (pps[i] == PGO_DONTCARE)
    194   1.1  riastrad 			continue;
    195  1.21  riastrad 		if (!bo->mem.bus.is_iomem) {
    196  1.21  riastrad 			paddr = page_to_phys(u.ttm->pages[startpage + i]);
    197  1.21  riastrad 		} else if (bdev->driver->io_mem_pfn) {
    198  1.22  riastrad 			paddr = (paddr_t)(*bdev->driver->io_mem_pfn)(bo,
    199  1.22  riastrad 			    startpage + i) << PAGE_SHIFT;
    200  1.21  riastrad 		} else {
    201   1.3  riastrad 			const paddr_t cookie = bus_space_mmap(bdev->memt,
    202  1.22  riastrad 			    u.base, (off_t)(startpage + i) << PAGE_SHIFT,
    203  1.22  riastrad 			    vm_prot, 0);
    204   1.3  riastrad 
    205   1.3  riastrad 			paddr = pmap_phys_address(cookie);
    206   1.3  riastrad 		}
    207   1.2  riastrad 		ret = -pmap_enter(ufi->orig_map->pmap, vaddr + i*PAGE_SIZE,
    208  1.13  jmcneill 		    paddr, vm_prot, (PMAP_CANFAIL | pgprot));
    209   1.1  riastrad 		if (ret)
    210   1.1  riastrad 			goto out3;
    211   1.1  riastrad 	}
    212   1.1  riastrad 
    213   1.1  riastrad out3:	pmap_update(ufi->orig_map->pmap);
    214   1.1  riastrad out2:	ttm_mem_io_unlock(man);
    215   1.1  riastrad out1:	ttm_bo_unreserve(bo);
    216   1.6  riastrad out0:	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
    217   1.1  riastrad 	/* XXX errno Linux->NetBSD */
    218   1.1  riastrad 	return -ret;
    219   1.1  riastrad }
    220   1.1  riastrad 
    221   1.1  riastrad static int
    222   1.6  riastrad ttm_bo_uvm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *ufi)
    223   1.1  riastrad {
    224  1.15  riastrad 	int ret = 0;
    225   1.1  riastrad 
    226  1.15  riastrad 	if (__predict_true(!bo->moving))
    227  1.15  riastrad 		goto out0;
    228  1.15  riastrad 
    229  1.15  riastrad 	if (dma_fence_is_signaled(bo->moving))
    230  1.15  riastrad 		goto out1;
    231  1.15  riastrad 
    232  1.15  riastrad 	if (dma_fence_wait(bo->moving, true) != 0) {
    233  1.15  riastrad 		ret = -EFAULT;
    234  1.15  riastrad 		goto out2;
    235  1.15  riastrad 	}
    236  1.15  riastrad 
    237  1.15  riastrad 	ret = -ERESTART;
    238  1.15  riastrad out2:	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
    239  1.15  riastrad out1:	dma_fence_put(bo->moving);
    240  1.15  riastrad 	bo->moving = NULL;
    241  1.15  riastrad out0:	return ret;
    242   1.1  riastrad }
    243   1.1  riastrad 
    244   1.1  riastrad int
    245   1.1  riastrad ttm_bo_mmap_object(struct ttm_bo_device *bdev, off_t offset, size_t size,
    246   1.1  riastrad     vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp,
    247   1.1  riastrad     struct file *file)
    248   1.1  riastrad {
    249   1.1  riastrad 	const unsigned long startpage = (offset >> PAGE_SHIFT);
    250   1.1  riastrad 	const unsigned long npages = (size >> PAGE_SHIFT);
    251   1.1  riastrad 	struct ttm_buffer_object *bo;
    252   1.1  riastrad 	int ret;
    253   1.1  riastrad 
    254   1.1  riastrad 	KASSERT(0 == (offset & (PAGE_SIZE - 1)));
    255   1.1  riastrad 	KASSERT(0 == (size & (PAGE_SIZE - 1)));
    256   1.1  riastrad 
    257   1.1  riastrad 	ret = ttm_bo_uvm_lookup(bdev, startpage, npages, &bo);
    258   1.1  riastrad 	if (ret)
    259   1.1  riastrad 		goto fail0;
    260  1.20  riastrad 	KASSERTMSG((drm_vma_node_start(&bo->base.vma_node) <= startpage),
    261  1.20  riastrad 	    "mapping npages=0x%jx @ pfn=0x%jx"
    262  1.20  riastrad 	    " from vma npages=0x%jx @ pfn=0x%jx",
    263  1.20  riastrad 	    (uintmax_t)npages,
    264  1.20  riastrad 	    (uintmax_t)startpage,
    265  1.20  riastrad 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
    266  1.20  riastrad 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
    267  1.20  riastrad 	KASSERTMSG((npages <= drm_vma_node_size(&bo->base.vma_node)),
    268  1.20  riastrad 	    "mapping npages=0x%jx @ pfn=0x%jx"
    269  1.20  riastrad 	    " from vma npages=0x%jx @ pfn=0x%jx",
    270  1.20  riastrad 	    (uintmax_t)npages,
    271  1.20  riastrad 	    (uintmax_t)startpage,
    272  1.20  riastrad 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
    273  1.20  riastrad 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
    274  1.20  riastrad 	KASSERTMSG(((startpage - drm_vma_node_start(&bo->base.vma_node))
    275  1.20  riastrad 		<= (drm_vma_node_size(&bo->base.vma_node) - npages)),
    276  1.20  riastrad 	    "mapping npages=0x%jx @ pfn=0x%jx"
    277  1.20  riastrad 	    " from vma npages=0x%jx @ pfn=0x%jx",
    278  1.20  riastrad 	    (uintmax_t)npages,
    279  1.20  riastrad 	    (uintmax_t)startpage,
    280  1.20  riastrad 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
    281  1.20  riastrad 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
    282  1.20  riastrad 
    283   1.1  riastrad 	/* XXX Just assert this?  */
    284   1.1  riastrad 	if (__predict_false(bdev->driver->verify_access == NULL)) {
    285   1.1  riastrad 		ret = -EPERM;
    286   1.1  riastrad 		goto fail1;
    287   1.1  riastrad 	}
    288   1.1  riastrad 	ret = (*bdev->driver->verify_access)(bo, file);
    289   1.1  riastrad 	if (ret)
    290   1.1  riastrad 		goto fail1;
    291   1.1  riastrad 
    292   1.1  riastrad 	/* Success!  */
    293   1.1  riastrad 	*uobjp = &bo->uvmobj;
    294   1.1  riastrad 	*uoffsetp = (offset -
    295  1.22  riastrad 	    ((off_t)drm_vma_node_start(&bo->base.vma_node) << PAGE_SHIFT));
    296   1.1  riastrad 	return 0;
    297   1.1  riastrad 
    298  1.16  riastrad fail1:	ttm_bo_put(bo);
    299   1.1  riastrad fail0:	KASSERT(ret);
    300   1.1  riastrad 	return ret;
    301   1.1  riastrad }
    302   1.1  riastrad 
    303   1.1  riastrad static int
    304   1.1  riastrad ttm_bo_uvm_lookup(struct ttm_bo_device *bdev, unsigned long startpage,
    305   1.1  riastrad     unsigned long npages, struct ttm_buffer_object **bop)
    306   1.1  riastrad {
    307   1.1  riastrad 	struct ttm_buffer_object *bo = NULL;
    308   1.1  riastrad 	struct drm_vma_offset_node *node;
    309   1.1  riastrad 
    310  1.16  riastrad 	drm_vma_offset_lock_lookup(bdev->vma_manager);
    311  1.16  riastrad 	node = drm_vma_offset_lookup_locked(bdev->vma_manager, startpage,
    312   1.1  riastrad 	    npages);
    313   1.1  riastrad 	if (node != NULL) {
    314  1.16  riastrad 		bo = container_of(node, struct ttm_buffer_object, base.vma_node);
    315   1.1  riastrad 		if (!kref_get_unless_zero(&bo->kref))
    316   1.1  riastrad 			bo = NULL;
    317   1.1  riastrad 	}
    318  1.16  riastrad 	drm_vma_offset_unlock_lookup(bdev->vma_manager);
    319   1.1  riastrad 
    320   1.1  riastrad 	if (bo == NULL)
    321   1.1  riastrad 		return -ENOENT;
    322   1.1  riastrad 
    323   1.1  riastrad 	*bop = bo;
    324   1.1  riastrad 	return 0;
    325   1.1  riastrad }
    326