1 1.28 riastrad /* $NetBSD: ttm_bo_vm.c,v 1.28 2024/06/23 00:49:31 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /*- 4 1.1 riastrad * Copyright (c) 2014 The NetBSD Foundation, Inc. 5 1.1 riastrad * All rights reserved. 6 1.1 riastrad * 7 1.1 riastrad * This code is derived from software contributed to The NetBSD Foundation 8 1.1 riastrad * by Taylor R. Campbell. 9 1.1 riastrad * 10 1.1 riastrad * Redistribution and use in source and binary forms, with or without 11 1.1 riastrad * modification, are permitted provided that the following conditions 12 1.1 riastrad * are met: 13 1.1 riastrad * 1. Redistributions of source code must retain the above copyright 14 1.1 riastrad * notice, this list of conditions and the following disclaimer. 15 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 riastrad * notice, this list of conditions and the following disclaimer in the 17 1.1 riastrad * documentation and/or other materials provided with the distribution. 18 1.1 riastrad * 19 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE. 30 1.1 riastrad */ 31 1.1 riastrad 32 1.25 riastrad /************************************************************************** 33 1.25 riastrad * 34 1.25 riastrad * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 35 1.25 riastrad * All Rights Reserved. 36 1.25 riastrad * 37 1.25 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 38 1.25 riastrad * copy of this software and associated documentation files (the 39 1.25 riastrad * "Software"), to deal in the Software without restriction, including 40 1.25 riastrad * without limitation the rights to use, copy, modify, merge, publish, 41 1.25 riastrad * distribute, sub license, and/or sell copies of the Software, and to 42 1.25 riastrad * permit persons to whom the Software is furnished to do so, subject to 43 1.25 riastrad * the following conditions: 44 1.25 riastrad * 45 1.25 riastrad * The above copyright notice and this permission notice (including the 46 1.25 riastrad * next paragraph) shall be included in all copies or substantial portions 47 1.25 riastrad * of the Software. 48 1.25 riastrad * 49 1.25 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 50 1.25 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 51 1.25 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 52 1.25 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 53 1.25 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 54 1.25 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 55 1.25 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 56 1.25 riastrad * 57 1.25 riastrad **************************************************************************/ 58 1.25 riastrad /* 59 1.25 riastrad * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 60 1.25 riastrad */ 61 1.25 riastrad 62 1.1 riastrad #include <sys/cdefs.h> 63 1.28 riastrad __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.28 2024/06/23 00:49:31 riastradh Exp $"); 64 1.1 riastrad 65 1.1 riastrad #include <sys/types.h> 66 1.1 riastrad 67 1.1 riastrad #include <uvm/uvm.h> 68 1.1 riastrad #include <uvm/uvm_extern.h> 69 1.1 riastrad #include <uvm/uvm_fault.h> 70 1.1 riastrad 71 1.1 riastrad #include <linux/bitops.h> 72 1.1 riastrad 73 1.1 riastrad #include <drm/drm_vma_manager.h> 74 1.1 riastrad 75 1.1 riastrad #include <ttm/ttm_bo_driver.h> 76 1.1 riastrad 77 1.1 riastrad static int ttm_bo_uvm_lookup(struct ttm_bo_device *, unsigned long, 78 1.1 riastrad unsigned long, struct ttm_buffer_object **); 79 1.1 riastrad 80 1.1 riastrad void 81 1.1 riastrad ttm_bo_uvm_reference(struct uvm_object *uobj) 82 1.1 riastrad { 83 1.1 riastrad struct ttm_buffer_object *const bo = container_of(uobj, 84 1.1 riastrad struct ttm_buffer_object, uvmobj); 85 1.1 riastrad 86 1.16 riastrad (void)ttm_bo_get(bo); 87 1.1 riastrad } 88 1.1 riastrad 89 1.1 riastrad void 90 1.1 riastrad ttm_bo_uvm_detach(struct uvm_object *uobj) 91 1.1 riastrad { 92 1.1 riastrad struct ttm_buffer_object *bo = container_of(uobj, 93 1.1 riastrad struct ttm_buffer_object, uvmobj); 94 1.1 riastrad 95 1.16 riastrad ttm_bo_put(bo); 96 1.1 riastrad } 97 1.1 riastrad 98 1.25 riastrad static int 99 1.25 riastrad ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *vmf) 100 1.25 riastrad { 101 1.25 riastrad int err, ret = 0; 102 1.25 riastrad 103 1.25 riastrad if (__predict_true(!bo->moving)) 104 1.25 riastrad goto out_unlock; 105 1.25 riastrad 106 1.25 riastrad /* 107 1.25 riastrad * Quick non-stalling check for idle. 108 1.25 riastrad */ 109 1.25 riastrad if (dma_fence_is_signaled(bo->moving)) 110 1.25 riastrad goto out_clear; 111 1.25 riastrad 112 1.25 riastrad /* 113 1.25 riastrad * If possible, avoid waiting for GPU with mmap_sem 114 1.25 riastrad * held. 115 1.25 riastrad */ 116 1.25 riastrad if (1) { /* always retriable in NetBSD */ 117 1.25 riastrad ret = ERESTART; 118 1.25 riastrad 119 1.25 riastrad ttm_bo_get(bo); 120 1.25 riastrad uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL); 121 1.25 riastrad (void) dma_fence_wait(bo->moving, true); 122 1.25 riastrad dma_resv_unlock(bo->base.resv); 123 1.25 riastrad ttm_bo_put(bo); 124 1.25 riastrad goto out_unlock; 125 1.25 riastrad } 126 1.25 riastrad 127 1.25 riastrad /* 128 1.25 riastrad * Ordinary wait. 129 1.25 riastrad */ 130 1.25 riastrad err = dma_fence_wait(bo->moving, true); 131 1.25 riastrad if (__predict_false(err != 0)) { 132 1.25 riastrad ret = (err != -ERESTARTSYS) ? EINVAL/*SIGBUS*/ : 133 1.25 riastrad 0/*retry access in userland*/; 134 1.25 riastrad goto out_unlock; 135 1.25 riastrad } 136 1.25 riastrad 137 1.25 riastrad out_clear: 138 1.25 riastrad dma_fence_put(bo->moving); 139 1.25 riastrad bo->moving = NULL; 140 1.25 riastrad 141 1.25 riastrad out_unlock: 142 1.25 riastrad return ret; 143 1.25 riastrad } 144 1.25 riastrad 145 1.25 riastrad static int 146 1.25 riastrad ttm_bo_vm_reserve(struct ttm_buffer_object *bo, struct uvm_faultinfo *vmf) 147 1.25 riastrad { 148 1.25 riastrad 149 1.25 riastrad /* 150 1.25 riastrad * Work around locking order reversal in fault / nopfn 151 1.25 riastrad * between mmap_sem and bo_reserve: Perform a trylock operation 152 1.25 riastrad * for reserve, and if it fails, retry the fault after waiting 153 1.25 riastrad * for the buffer to become unreserved. 154 1.25 riastrad */ 155 1.25 riastrad if (__predict_false(!dma_resv_trylock(bo->base.resv))) { 156 1.25 riastrad ttm_bo_get(bo); 157 1.25 riastrad uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL); 158 1.25 riastrad if (!dma_resv_lock_interruptible(bo->base.resv, NULL)) 159 1.25 riastrad dma_resv_unlock(bo->base.resv); 160 1.25 riastrad ttm_bo_put(bo); 161 1.25 riastrad return ERESTART; 162 1.25 riastrad } 163 1.25 riastrad 164 1.25 riastrad return 0; 165 1.25 riastrad } 166 1.25 riastrad 167 1.25 riastrad static int 168 1.25 riastrad ttm_bo_uvm_fault_reserved(struct uvm_faultinfo *vmf, vaddr_t vaddr, 169 1.1 riastrad struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type, 170 1.1 riastrad int flags) 171 1.1 riastrad { 172 1.25 riastrad struct uvm_object *const uobj = vmf->entry->object.uvm_obj; 173 1.1 riastrad struct ttm_buffer_object *const bo = container_of(uobj, 174 1.1 riastrad struct ttm_buffer_object, uvmobj); 175 1.1 riastrad struct ttm_bo_device *const bdev = bo->bdev; 176 1.1 riastrad struct ttm_mem_type_manager *man = 177 1.1 riastrad &bdev->man[bo->mem.mem_type]; 178 1.1 riastrad union { 179 1.1 riastrad bus_addr_t base; 180 1.1 riastrad struct ttm_tt *ttm; 181 1.1 riastrad } u; 182 1.1 riastrad size_t size __diagused; 183 1.1 riastrad voff_t uoffset; /* offset in bytes into bo */ 184 1.1 riastrad unsigned startpage; /* offset in pages into bo */ 185 1.1 riastrad unsigned i; 186 1.27 riastrad vm_prot_t vm_prot = vmf->entry->protection; /* VM_PROT_* */ 187 1.27 riastrad pgprot_t prot = vm_prot; /* VM_PROT_* | PMAP_* cacheability flags */ 188 1.25 riastrad int err, ret; 189 1.1 riastrad 190 1.25 riastrad /* 191 1.25 riastrad * Refuse to fault imported pages. This should be handled 192 1.25 riastrad * (if at all) by redirecting mmap to the exporter. 193 1.25 riastrad */ 194 1.25 riastrad if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) 195 1.25 riastrad return EINVAL; /* SIGBUS */ 196 1.4 riastrad 197 1.25 riastrad if (bdev->driver->fault_reserve_notify) { 198 1.25 riastrad struct dma_fence *moving = dma_fence_get(bo->moving); 199 1.1 riastrad 200 1.25 riastrad err = bdev->driver->fault_reserve_notify(bo); 201 1.25 riastrad switch (err) { 202 1.25 riastrad case 0: 203 1.25 riastrad break; 204 1.25 riastrad case -EBUSY: 205 1.25 riastrad case -ERESTARTSYS: 206 1.25 riastrad return 0; /* retry access in userland */ 207 1.25 riastrad default: 208 1.25 riastrad return EINVAL; /* SIGBUS */ 209 1.25 riastrad } 210 1.1 riastrad 211 1.25 riastrad if (bo->moving != moving) { 212 1.25 riastrad spin_lock(&ttm_bo_glob.lru_lock); 213 1.25 riastrad ttm_bo_move_to_lru_tail(bo, NULL); 214 1.25 riastrad spin_unlock(&ttm_bo_glob.lru_lock); 215 1.1 riastrad } 216 1.25 riastrad dma_fence_put(moving); 217 1.1 riastrad } 218 1.1 riastrad 219 1.25 riastrad /* 220 1.25 riastrad * Wait for buffer data in transit, due to a pipelined 221 1.25 riastrad * move. 222 1.25 riastrad */ 223 1.25 riastrad ret = ttm_bo_vm_fault_idle(bo, vmf); 224 1.25 riastrad if (__predict_false(ret != 0)) 225 1.25 riastrad return ret; 226 1.25 riastrad 227 1.25 riastrad err = ttm_mem_io_lock(man, true); 228 1.25 riastrad if (__predict_false(err != 0)) 229 1.25 riastrad return 0; /* retry access in userland */ 230 1.25 riastrad err = ttm_mem_io_reserve_vm(bo); 231 1.25 riastrad if (__predict_false(err != 0)) { 232 1.25 riastrad ret = EINVAL; /* SIGBUS */ 233 1.25 riastrad goto out_io_unlock; 234 1.1 riastrad } 235 1.1 riastrad 236 1.27 riastrad prot = ttm_io_prot(bo->mem.placement, prot); 237 1.25 riastrad if (!bo->mem.bus.is_iomem) { 238 1.15 riastrad struct ttm_operation_ctx ctx = { 239 1.15 riastrad .interruptible = false, 240 1.15 riastrad .no_wait_gpu = false, 241 1.25 riastrad .flags = TTM_OPT_FLAG_FORCE_ALLOC 242 1.27 riastrad 243 1.15 riastrad }; 244 1.25 riastrad 245 1.1 riastrad u.ttm = bo->ttm; 246 1.22 riastrad size = (size_t)bo->ttm->num_pages << PAGE_SHIFT; 247 1.25 riastrad if (ttm_tt_populate(bo->ttm, &ctx)) { 248 1.25 riastrad ret = ENOMEM; 249 1.25 riastrad goto out_io_unlock; 250 1.1 riastrad } 251 1.25 riastrad } else { 252 1.25 riastrad u.base = (bo->mem.bus.base + bo->mem.bus.offset); 253 1.25 riastrad size = bo->mem.bus.size; 254 1.1 riastrad } 255 1.1 riastrad 256 1.25 riastrad KASSERT(vmf->entry->start <= vaddr); 257 1.25 riastrad KASSERT((vmf->entry->offset & (PAGE_SIZE - 1)) == 0); 258 1.25 riastrad KASSERT(vmf->entry->offset <= size); 259 1.25 riastrad KASSERT((vaddr - vmf->entry->start) <= (size - vmf->entry->offset)); 260 1.19 riastrad KASSERTMSG(((size_t)npages << PAGE_SHIFT <= 261 1.25 riastrad ((size - vmf->entry->offset) - (vaddr - vmf->entry->start))), 262 1.19 riastrad "vaddr=%jx npages=%d bo=%p is_iomem=%d size=%zu" 263 1.19 riastrad " start=%jx offset=%jx", 264 1.19 riastrad (uintmax_t)vaddr, npages, bo, (int)bo->mem.bus.is_iomem, size, 265 1.25 riastrad (uintmax_t)vmf->entry->start, (uintmax_t)vmf->entry->offset); 266 1.25 riastrad uoffset = (vmf->entry->offset + (vaddr - vmf->entry->start)); 267 1.1 riastrad startpage = (uoffset >> PAGE_SHIFT); 268 1.1 riastrad for (i = 0; i < npages; i++) { 269 1.1 riastrad paddr_t paddr; 270 1.1 riastrad 271 1.26 riastrad if ((flags & PGO_ALLPAGES) == 0 && i != centeridx) 272 1.26 riastrad continue; 273 1.1 riastrad if (pps[i] == PGO_DONTCARE) 274 1.1 riastrad continue; 275 1.21 riastrad if (!bo->mem.bus.is_iomem) { 276 1.21 riastrad paddr = page_to_phys(u.ttm->pages[startpage + i]); 277 1.21 riastrad } else if (bdev->driver->io_mem_pfn) { 278 1.22 riastrad paddr = (paddr_t)(*bdev->driver->io_mem_pfn)(bo, 279 1.22 riastrad startpage + i) << PAGE_SHIFT; 280 1.21 riastrad } else { 281 1.3 riastrad const paddr_t cookie = bus_space_mmap(bdev->memt, 282 1.22 riastrad u.base, (off_t)(startpage + i) << PAGE_SHIFT, 283 1.22 riastrad vm_prot, 0); 284 1.3 riastrad 285 1.3 riastrad paddr = pmap_phys_address(cookie); 286 1.27 riastrad #if 0 /* XXX Why no PMAP_* flags added here? */ 287 1.27 riastrad mmapflags = pmap_mmap_flags(cookie); 288 1.27 riastrad #endif 289 1.3 riastrad } 290 1.25 riastrad ret = pmap_enter(vmf->orig_map->pmap, vaddr + i*PAGE_SIZE, 291 1.27 riastrad paddr, vm_prot, PMAP_CANFAIL | prot); 292 1.25 riastrad if (ret) { 293 1.28 riastrad /* 294 1.28 riastrad * XXX Continue with ret=0 if i != centeridx, 295 1.28 riastrad * so we don't fail if only readahead pages 296 1.28 riastrad * fail? 297 1.28 riastrad */ 298 1.25 riastrad KASSERT(ret != ERESTART); 299 1.25 riastrad break; 300 1.25 riastrad } 301 1.1 riastrad } 302 1.25 riastrad pmap_update(vmf->orig_map->pmap); 303 1.25 riastrad ret = 0; /* retry access in userland */ 304 1.25 riastrad out_io_unlock: 305 1.25 riastrad ttm_mem_io_unlock(man); 306 1.25 riastrad KASSERT(ret != ERESTART); 307 1.25 riastrad return ret; 308 1.1 riastrad } 309 1.1 riastrad 310 1.25 riastrad int 311 1.25 riastrad ttm_bo_uvm_fault(struct uvm_faultinfo *vmf, vaddr_t vaddr, 312 1.25 riastrad struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type, 313 1.25 riastrad int flags) 314 1.1 riastrad { 315 1.25 riastrad struct uvm_object *const uobj = vmf->entry->object.uvm_obj; 316 1.25 riastrad struct ttm_buffer_object *const bo = container_of(uobj, 317 1.25 riastrad struct ttm_buffer_object, uvmobj); 318 1.25 riastrad int ret; 319 1.1 riastrad 320 1.25 riastrad /* Thanks, uvm, but we don't need this lock. */ 321 1.25 riastrad rw_exit(uobj->vmobjlock); 322 1.15 riastrad 323 1.25 riastrad /* Copy-on-write mappings make no sense for the graphics aperture. */ 324 1.25 riastrad if (UVM_ET_ISCOPYONWRITE(vmf->entry)) { 325 1.25 riastrad ret = EINVAL; /* SIGBUS */ 326 1.25 riastrad goto out; 327 1.25 riastrad } 328 1.23 riastrad 329 1.25 riastrad ret = ttm_bo_vm_reserve(bo, vmf); 330 1.25 riastrad if (ret) { 331 1.25 riastrad /* ttm_bo_vm_reserve already unlocked on ERESTART */ 332 1.25 riastrad KASSERTMSG(ret == ERESTART, "ret=%d", ret); 333 1.25 riastrad return ret; 334 1.23 riastrad } 335 1.23 riastrad 336 1.25 riastrad ret = ttm_bo_uvm_fault_reserved(vmf, vaddr, pps, npages, centeridx, 337 1.25 riastrad access_type, flags); 338 1.25 riastrad if (ret == ERESTART) /* already unlocked on ERESTART */ 339 1.25 riastrad return ret; 340 1.15 riastrad 341 1.25 riastrad dma_resv_unlock(bo->base.resv); 342 1.23 riastrad 343 1.25 riastrad out: uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL); 344 1.23 riastrad return ret; 345 1.1 riastrad } 346 1.1 riastrad 347 1.1 riastrad int 348 1.1 riastrad ttm_bo_mmap_object(struct ttm_bo_device *bdev, off_t offset, size_t size, 349 1.1 riastrad vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp, 350 1.1 riastrad struct file *file) 351 1.1 riastrad { 352 1.1 riastrad const unsigned long startpage = (offset >> PAGE_SHIFT); 353 1.1 riastrad const unsigned long npages = (size >> PAGE_SHIFT); 354 1.1 riastrad struct ttm_buffer_object *bo; 355 1.1 riastrad int ret; 356 1.1 riastrad 357 1.1 riastrad KASSERT(0 == (offset & (PAGE_SIZE - 1))); 358 1.1 riastrad KASSERT(0 == (size & (PAGE_SIZE - 1))); 359 1.1 riastrad 360 1.1 riastrad ret = ttm_bo_uvm_lookup(bdev, startpage, npages, &bo); 361 1.1 riastrad if (ret) 362 1.1 riastrad goto fail0; 363 1.20 riastrad KASSERTMSG((drm_vma_node_start(&bo->base.vma_node) <= startpage), 364 1.20 riastrad "mapping npages=0x%jx @ pfn=0x%jx" 365 1.20 riastrad " from vma npages=0x%jx @ pfn=0x%jx", 366 1.20 riastrad (uintmax_t)npages, 367 1.20 riastrad (uintmax_t)startpage, 368 1.20 riastrad (uintmax_t)drm_vma_node_size(&bo->base.vma_node), 369 1.20 riastrad (uintmax_t)drm_vma_node_start(&bo->base.vma_node)); 370 1.20 riastrad KASSERTMSG((npages <= drm_vma_node_size(&bo->base.vma_node)), 371 1.20 riastrad "mapping npages=0x%jx @ pfn=0x%jx" 372 1.20 riastrad " from vma npages=0x%jx @ pfn=0x%jx", 373 1.20 riastrad (uintmax_t)npages, 374 1.20 riastrad (uintmax_t)startpage, 375 1.20 riastrad (uintmax_t)drm_vma_node_size(&bo->base.vma_node), 376 1.20 riastrad (uintmax_t)drm_vma_node_start(&bo->base.vma_node)); 377 1.20 riastrad KASSERTMSG(((startpage - drm_vma_node_start(&bo->base.vma_node)) 378 1.20 riastrad <= (drm_vma_node_size(&bo->base.vma_node) - npages)), 379 1.20 riastrad "mapping npages=0x%jx @ pfn=0x%jx" 380 1.20 riastrad " from vma npages=0x%jx @ pfn=0x%jx", 381 1.20 riastrad (uintmax_t)npages, 382 1.20 riastrad (uintmax_t)startpage, 383 1.20 riastrad (uintmax_t)drm_vma_node_size(&bo->base.vma_node), 384 1.20 riastrad (uintmax_t)drm_vma_node_start(&bo->base.vma_node)); 385 1.20 riastrad 386 1.1 riastrad /* XXX Just assert this? */ 387 1.1 riastrad if (__predict_false(bdev->driver->verify_access == NULL)) { 388 1.1 riastrad ret = -EPERM; 389 1.1 riastrad goto fail1; 390 1.1 riastrad } 391 1.1 riastrad ret = (*bdev->driver->verify_access)(bo, file); 392 1.1 riastrad if (ret) 393 1.1 riastrad goto fail1; 394 1.1 riastrad 395 1.1 riastrad /* Success! */ 396 1.1 riastrad *uobjp = &bo->uvmobj; 397 1.1 riastrad *uoffsetp = (offset - 398 1.22 riastrad ((off_t)drm_vma_node_start(&bo->base.vma_node) << PAGE_SHIFT)); 399 1.1 riastrad return 0; 400 1.1 riastrad 401 1.16 riastrad fail1: ttm_bo_put(bo); 402 1.1 riastrad fail0: KASSERT(ret); 403 1.1 riastrad return ret; 404 1.1 riastrad } 405 1.1 riastrad 406 1.1 riastrad static int 407 1.1 riastrad ttm_bo_uvm_lookup(struct ttm_bo_device *bdev, unsigned long startpage, 408 1.1 riastrad unsigned long npages, struct ttm_buffer_object **bop) 409 1.1 riastrad { 410 1.1 riastrad struct ttm_buffer_object *bo = NULL; 411 1.1 riastrad struct drm_vma_offset_node *node; 412 1.1 riastrad 413 1.16 riastrad drm_vma_offset_lock_lookup(bdev->vma_manager); 414 1.16 riastrad node = drm_vma_offset_lookup_locked(bdev->vma_manager, startpage, 415 1.1 riastrad npages); 416 1.1 riastrad if (node != NULL) { 417 1.25 riastrad bo = container_of(node, struct ttm_buffer_object, 418 1.25 riastrad base.vma_node); 419 1.1 riastrad if (!kref_get_unless_zero(&bo->kref)) 420 1.1 riastrad bo = NULL; 421 1.1 riastrad } 422 1.16 riastrad drm_vma_offset_unlock_lookup(bdev->vma_manager); 423 1.1 riastrad 424 1.1 riastrad if (bo == NULL) 425 1.1 riastrad return -ENOENT; 426 1.1 riastrad 427 1.1 riastrad *bop = bo; 428 1.1 riastrad return 0; 429 1.1 riastrad } 430