Home | History | Annotate | Line # | Download | only in ttm
ttm_bo_vm.c revision 1.5
      1 /*	$NetBSD: ttm_bo_vm.c,v 1.5 2014/09/10 18:24:16 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.5 2014/09/10 18:24:16 riastradh Exp $");
     34 
     35 #include <sys/types.h>
     36 
     37 #include <uvm/uvm.h>
     38 #include <uvm/uvm_extern.h>
     39 #include <uvm/uvm_fault.h>
     40 
     41 #include <linux/bitops.h>
     42 
     43 #include <drm/drm_vma_manager.h>
     44 
     45 #include <ttm/ttm_bo_driver.h>
     46 
     47 static int	ttm_bo_uvm_fault_idle(struct ttm_buffer_object *,
     48 		    struct uvm_faultinfo *, struct uvm_object *);
     49 static int	ttm_bo_uvm_lookup(struct ttm_bo_device *, unsigned long,
     50 		    unsigned long, struct ttm_buffer_object **);
     51 
     52 void
     53 ttm_bo_uvm_reference(struct uvm_object *uobj)
     54 {
     55 	struct ttm_buffer_object *const bo = container_of(uobj,
     56 	    struct ttm_buffer_object, uvmobj);
     57 
     58 	(void)ttm_bo_reference(bo);
     59 }
     60 
     61 void
     62 ttm_bo_uvm_detach(struct uvm_object *uobj)
     63 {
     64 	struct ttm_buffer_object *bo = container_of(uobj,
     65 	    struct ttm_buffer_object, uvmobj);
     66 
     67 	ttm_bo_unref(&bo);
     68 	KASSERT(bo == NULL);
     69 }
     70 
     71 int
     72 ttm_bo_uvm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
     73     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
     74     int flags)
     75 {
     76 	struct uvm_object *const uobj = ufi->entry->object.uvm_obj;
     77 	struct ttm_buffer_object *const bo = container_of(uobj,
     78 	    struct ttm_buffer_object, uvmobj);
     79 	struct ttm_bo_device *const bdev = bo->bdev;
     80 	struct ttm_mem_type_manager *man =
     81 	    &bdev->man[bo->mem.mem_type];
     82 	union {
     83 		bus_addr_t base;
     84 		struct ttm_tt *ttm;
     85 	} u;
     86 	size_t size __diagused;
     87 	voff_t uoffset;		/* offset in bytes into bo */
     88 	unsigned startpage;	/* offset in pages into bo */
     89 	unsigned i;
     90 	vm_prot_t vm_prot;	/* VM_PROT_* */
     91 	pgprot_t pgprot;	/* VM_PROT_* | PMAP_* cacheability flags */
     92 	unsigned mmapflags;
     93 	int ret;
     94 
     95 	/* Thanks, uvm, but we don't need this lock.  */
     96 	mutex_exit(uobj->vmobjlock);
     97 
     98 	/* Copy-on-write mappings make no sense for the graphics aperture.  */
     99 	if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
    100 		ret = -EIO;
    101 		goto out0;
    102 	}
    103 
    104 	/* Try to lock the buffer.  */
    105 	ret = ttm_bo_reserve(bo, true, true, false, NULL);
    106 	if (ret) {
    107 		if (ret != -EBUSY)
    108 			goto out0;
    109 		/*
    110 		 * It's currently locked.  Unlock the fault (requires
    111 		 * relocking uobj's vmobjlock first), wait for it, and
    112 		 * start over.
    113 		 */
    114 		mutex_enter(uobj->vmobjlock);
    115 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
    116 		(void)ttm_bo_wait_unreserved(bo);
    117 		return -ERESTART;
    118 	}
    119 
    120 	/* drm prime buffers are not mappable.  XXX Catch this earlier?  */
    121 	if (bo->ttm && ISSET(bo->ttm->page_flags, TTM_PAGE_FLAG_SG)) {
    122 		ret = -EINVAL;
    123 		goto out0;
    124 	}
    125 
    126 	/* Notify the driver of a fault if it wants.  */
    127 	if (bdev->driver->fault_reserve_notify) {
    128 		ret = (*bdev->driver->fault_reserve_notify)(bo);
    129 		if (ret) {
    130 			if (ret == -ERESTART)
    131 				ret = -EIO;
    132 			goto out0;
    133 		}
    134 	}
    135 
    136 	ret = ttm_bo_uvm_fault_idle(bo, ufi, uobj);
    137 	if (ret) {
    138 		/* Unlocks if it restarts.  */
    139 		KASSERT(ret == -ERESTART);
    140 		/* XXX errno Linux->NetBSD */
    141 		return -ret;
    142 	}
    143 
    144 	ret = ttm_mem_io_lock(man, true);
    145 	if (ret) {
    146 		ret = -EIO;
    147 		goto out1;
    148 	}
    149 	ret = ttm_mem_io_reserve_vm(bo);
    150 	if (ret) {
    151 		ret = -EIO;
    152 		goto out2;
    153 	}
    154 
    155 	vm_prot = ufi->entry->protection;
    156 	if (bo->mem.bus.is_iomem) {
    157 		u.base = (bo->mem.bus.base + bo->mem.bus.offset);
    158 		size = bo->mem.bus.size;
    159 		pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
    160 	} else {
    161 		u.ttm = bo->ttm;
    162 		size = (bo->ttm->num_pages << PAGE_SHIFT);
    163 		if (ISSET(bo->mem.placement, TTM_PL_FLAG_CACHED))
    164 			pgprot = vm_prot;
    165 		else
    166 			pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
    167 		if ((*u.ttm->bdev->driver->ttm_tt_populate)(u.ttm)) {
    168 			ret = -ENOMEM;
    169 			goto out2;
    170 		}
    171 	}
    172 
    173 	KASSERT(ufi->entry->start <= vaddr);
    174 	KASSERT((ufi->entry->offset & (PAGE_SIZE - 1)) == 0);
    175 	KASSERT(ufi->entry->offset <= size);
    176 	KASSERT((vaddr - ufi->entry->start) <= (size - ufi->entry->offset));
    177 	KASSERT(npages <= ((size - ufi->entry->offset) -
    178 		(vaddr - ufi->entry->start)));
    179 	uoffset = (ufi->entry->offset + (vaddr - ufi->entry->start));
    180 	startpage = (uoffset >> PAGE_SHIFT);
    181 	for (i = 0; i < npages; i++) {
    182 		paddr_t paddr;
    183 
    184 		/* XXX PGO_ALLPAGES?  */
    185 		if (pps[i] == PGO_DONTCARE)
    186 			continue;
    187 		if (bo->mem.bus.is_iomem) {
    188 			const paddr_t cookie = bus_space_mmap(bdev->memt,
    189 			    u.base, ((startpage + i) << PAGE_SHIFT), vm_prot,
    190 			    0);
    191 
    192 			paddr = pmap_phys_address(cookie);
    193 			mmapflags = pmap_mmap_flags(cookie);
    194 		} else {
    195 			paddr = page_to_phys(u.ttm->pages[startpage + i]);
    196 			mmapflags = 0;
    197 		}
    198 		ret = -pmap_enter(ufi->orig_map->pmap, vaddr + i*PAGE_SIZE,
    199 		    paddr, vm_prot, (PMAP_CANFAIL | pgprot | mmapflags));
    200 		if (ret)
    201 			goto out3;
    202 	}
    203 
    204 out3:	pmap_update(ufi->orig_map->pmap);
    205 out2:	ttm_mem_io_unlock(man);
    206 out1:	ttm_bo_unreserve(bo);
    207 out0:	mutex_enter(uobj->vmobjlock);
    208 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
    209 	/* XXX errno Linux->NetBSD */
    210 	return -ret;
    211 }
    212 
    213 static int
    214 ttm_bo_uvm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *ufi,
    215     struct uvm_object *uobj)
    216 {
    217 	struct ttm_bo_device *const bdev = bo->bdev;
    218 	int ret = 0;
    219 
    220 	spin_lock(&bdev->fence_lock);
    221 	if (__predict_true(!test_bit(TTM_BO_PRIV_FLAG_MOVING,
    222 		    &bo->priv_flags)))
    223 		goto out;
    224 	if (ttm_bo_wait(bo, false, false, true) == 0)
    225 		goto out;
    226 
    227 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
    228 	(void)ttm_bo_wait(bo, false, true, false);
    229 	ret = -ERESTART;
    230 
    231 out:	spin_unlock(&bdev->fence_lock);
    232 	return ret;
    233 }
    234 
    235 int
    236 ttm_bo_mmap_object(struct ttm_bo_device *bdev, off_t offset, size_t size,
    237     vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp,
    238     struct file *file)
    239 {
    240 	const unsigned long startpage = (offset >> PAGE_SHIFT);
    241 	const unsigned long npages = (size >> PAGE_SHIFT);
    242 	struct ttm_buffer_object *bo;
    243 	int ret;
    244 
    245 	KASSERT(0 == (offset & (PAGE_SIZE - 1)));
    246 	KASSERT(0 == (size & (PAGE_SIZE - 1)));
    247 
    248 	ret = ttm_bo_uvm_lookup(bdev, startpage, npages, &bo);
    249 	if (ret)
    250 		goto fail0;
    251 	KASSERT(drm_vma_node_start(&bo->vma_node) <= offset);
    252 	/* XXX Just assert this?  */
    253 	if (__predict_false(bdev->driver->verify_access == NULL)) {
    254 		ret = -EPERM;
    255 		goto fail1;
    256 	}
    257 	ret = (*bdev->driver->verify_access)(bo, file);
    258 	if (ret)
    259 		goto fail1;
    260 
    261 	/* Success!  */
    262 	*uobjp = &bo->uvmobj;
    263 	*uoffsetp = (offset -
    264 	    (drm_vma_node_start(&bo->vma_node) << PAGE_SHIFT));
    265 	return 0;
    266 
    267 fail1:	ttm_bo_unref(&bo);
    268 fail0:	KASSERT(ret);
    269 	return ret;
    270 }
    271 
    272 static int
    273 ttm_bo_uvm_lookup(struct ttm_bo_device *bdev, unsigned long startpage,
    274     unsigned long npages, struct ttm_buffer_object **bop)
    275 {
    276 	struct ttm_buffer_object *bo = NULL;
    277 	struct drm_vma_offset_node *node;
    278 
    279 	drm_vma_offset_lock_lookup(&bdev->vma_manager);
    280 	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, startpage,
    281 	    npages);
    282 	if (node != NULL) {
    283 		bo = container_of(node, struct ttm_buffer_object, vma_node);
    284 		if (!kref_get_unless_zero(&bo->kref))
    285 			bo = NULL;
    286 	}
    287 	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
    288 
    289 	if (bo == NULL)
    290 		return -ENOENT;
    291 
    292 	*bop = bo;
    293 	return 0;
    294 }
    295