ttm_bo_vm.c revision 1.17 1 /* $NetBSD: ttm_bo_vm.c,v 1.17 2021/12/19 09:57:33 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.17 2021/12/19 09:57:33 riastradh Exp $");
34
35 #include <sys/types.h>
36
37 #include <uvm/uvm.h>
38 #include <uvm/uvm_extern.h>
39 #include <uvm/uvm_fault.h>
40
41 #include <linux/bitops.h>
42
43 #include <drm/drm_vma_manager.h>
44
45 #include <ttm/ttm_bo_driver.h>
46
47 static int ttm_bo_uvm_fault_idle(struct ttm_buffer_object *,
48 struct uvm_faultinfo *);
49 static int ttm_bo_uvm_lookup(struct ttm_bo_device *, unsigned long,
50 unsigned long, struct ttm_buffer_object **);
51
52 void
53 ttm_bo_uvm_reference(struct uvm_object *uobj)
54 {
55 struct ttm_buffer_object *const bo = container_of(uobj,
56 struct ttm_buffer_object, uvmobj);
57
58 (void)ttm_bo_get(bo);
59 }
60
61 void
62 ttm_bo_uvm_detach(struct uvm_object *uobj)
63 {
64 struct ttm_buffer_object *bo = container_of(uobj,
65 struct ttm_buffer_object, uvmobj);
66
67 ttm_bo_put(bo);
68 KASSERT(bo == NULL);
69 }
70
71 int
72 ttm_bo_uvm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
73 struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
74 int flags)
75 {
76 struct uvm_object *const uobj = ufi->entry->object.uvm_obj;
77 struct ttm_buffer_object *const bo = container_of(uobj,
78 struct ttm_buffer_object, uvmobj);
79 struct ttm_bo_device *const bdev = bo->bdev;
80 struct ttm_mem_type_manager *man =
81 &bdev->man[bo->mem.mem_type];
82 union {
83 bus_addr_t base;
84 struct ttm_tt *ttm;
85 } u;
86 size_t size __diagused;
87 voff_t uoffset; /* offset in bytes into bo */
88 unsigned startpage; /* offset in pages into bo */
89 unsigned i;
90 vm_prot_t vm_prot; /* VM_PROT_* */
91 pgprot_t pgprot; /* VM_PROT_* | PMAP_* cacheability flags */
92 int ret;
93
94 /* Thanks, uvm, but we don't need this lock. */
95 rw_exit(uobj->vmobjlock);
96
97 /* Copy-on-write mappings make no sense for the graphics aperture. */
98 if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
99 ret = -EIO;
100 goto out0;
101 }
102
103 /* Try to lock the buffer. */
104 ret = ttm_bo_reserve(bo, true, true, NULL);
105 if (ret) {
106 if (ret != -EBUSY)
107 goto out0;
108 /*
109 * It's currently locked. Unlock the fault, wait for
110 * it, and start over.
111 */
112 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
113 if (!dma_resv_lock_interruptible(bo->base.resv, NULL))
114 dma_resv_unlock(bo->base.resv);
115
116 return ERESTART;
117 }
118
119 /* drm prime buffers are not mappable. XXX Catch this earlier? */
120 if (bo->ttm && ISSET(bo->ttm->page_flags, TTM_PAGE_FLAG_SG)) {
121 ret = -EINVAL;
122 goto out1;
123 }
124
125 /* Notify the driver of a fault if it wants. */
126 if (bdev->driver->fault_reserve_notify) {
127 ret = (*bdev->driver->fault_reserve_notify)(bo);
128 if (ret) {
129 if (ret == -ERESTART)
130 ret = -EIO;
131 goto out1;
132 }
133 }
134
135 ret = ttm_bo_uvm_fault_idle(bo, ufi);
136 if (ret) {
137 KASSERT(ret == -ERESTART || ret == -EFAULT);
138 /* ttm_bo_uvm_fault_idle calls uvmfault_unlockall for us. */
139 ttm_bo_unreserve(bo);
140 /* XXX errno Linux->NetBSD */
141 return -ret;
142 }
143
144 ret = ttm_mem_io_lock(man, true);
145 if (ret) {
146 ret = -EIO;
147 goto out1;
148 }
149 ret = ttm_mem_io_reserve_vm(bo);
150 if (ret) {
151 ret = -EIO;
152 goto out2;
153 }
154
155 vm_prot = ufi->entry->protection;
156 if (bo->mem.bus.is_iomem) {
157 u.base = (bo->mem.bus.base + bo->mem.bus.offset);
158 size = bo->mem.bus.size;
159 pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
160 } else {
161 struct ttm_operation_ctx ctx = {
162 .interruptible = false,
163 .no_wait_gpu = false,
164 .flags = TTM_OPT_FLAG_FORCE_ALLOC,
165 };
166 u.ttm = bo->ttm;
167 size = (bo->ttm->num_pages << PAGE_SHIFT);
168 if (ISSET(bo->mem.placement, TTM_PL_FLAG_CACHED))
169 pgprot = vm_prot;
170 else
171 pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
172 if (ttm_tt_populate(u.ttm, &ctx)) {
173 ret = -ENOMEM;
174 goto out2;
175 }
176 }
177
178 KASSERT(ufi->entry->start <= vaddr);
179 KASSERT((ufi->entry->offset & (PAGE_SIZE - 1)) == 0);
180 KASSERT(ufi->entry->offset <= size);
181 KASSERT((vaddr - ufi->entry->start) <= (size - ufi->entry->offset));
182 KASSERT(npages <= ((size - ufi->entry->offset) -
183 (vaddr - ufi->entry->start)));
184 uoffset = (ufi->entry->offset + (vaddr - ufi->entry->start));
185 startpage = (uoffset >> PAGE_SHIFT);
186 for (i = 0; i < npages; i++) {
187 paddr_t paddr;
188
189 /* XXX PGO_ALLPAGES? */
190 if (pps[i] == PGO_DONTCARE)
191 continue;
192 if (bo->mem.bus.is_iomem) {
193 const paddr_t cookie = bus_space_mmap(bdev->memt,
194 u.base, ((startpage + i) << PAGE_SHIFT), vm_prot,
195 0);
196
197 paddr = pmap_phys_address(cookie);
198 } else {
199 paddr = page_to_phys(u.ttm->pages[startpage + i]);
200 }
201 ret = -pmap_enter(ufi->orig_map->pmap, vaddr + i*PAGE_SIZE,
202 paddr, vm_prot, (PMAP_CANFAIL | pgprot));
203 if (ret)
204 goto out3;
205 }
206
207 out3: pmap_update(ufi->orig_map->pmap);
208 out2: ttm_mem_io_unlock(man);
209 out1: ttm_bo_unreserve(bo);
210 out0: uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
211 /* XXX errno Linux->NetBSD */
212 return -ret;
213 }
214
215 static int
216 ttm_bo_uvm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *ufi)
217 {
218 int ret = 0;
219
220 if (__predict_true(!bo->moving))
221 goto out0;
222
223 if (dma_fence_is_signaled(bo->moving))
224 goto out1;
225
226 if (dma_fence_wait(bo->moving, true) != 0) {
227 ret = -EFAULT;
228 goto out2;
229 }
230
231 ret = -ERESTART;
232 out2: uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
233 out1: dma_fence_put(bo->moving);
234 bo->moving = NULL;
235 out0: return ret;
236 }
237
238 int
239 ttm_bo_mmap_object(struct ttm_bo_device *bdev, off_t offset, size_t size,
240 vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp,
241 struct file *file)
242 {
243 const unsigned long startpage = (offset >> PAGE_SHIFT);
244 const unsigned long npages = (size >> PAGE_SHIFT);
245 struct ttm_buffer_object *bo;
246 int ret;
247
248 KASSERT(0 == (offset & (PAGE_SIZE - 1)));
249 KASSERT(0 == (size & (PAGE_SIZE - 1)));
250
251 ret = ttm_bo_uvm_lookup(bdev, startpage, npages, &bo);
252 if (ret)
253 goto fail0;
254 KASSERT(drm_vma_node_start(&bo->base.vma_node) <= offset);
255 /* XXX Just assert this? */
256 if (__predict_false(bdev->driver->verify_access == NULL)) {
257 ret = -EPERM;
258 goto fail1;
259 }
260 ret = (*bdev->driver->verify_access)(bo, file);
261 if (ret)
262 goto fail1;
263
264 /* Success! */
265 *uobjp = &bo->uvmobj;
266 *uoffsetp = (offset -
267 (drm_vma_node_start(&bo->base.vma_node) << PAGE_SHIFT));
268 return 0;
269
270 fail1: ttm_bo_put(bo);
271 fail0: KASSERT(ret);
272 return ret;
273 }
274
275 static int
276 ttm_bo_uvm_lookup(struct ttm_bo_device *bdev, unsigned long startpage,
277 unsigned long npages, struct ttm_buffer_object **bop)
278 {
279 struct ttm_buffer_object *bo = NULL;
280 struct drm_vma_offset_node *node;
281
282 drm_vma_offset_lock_lookup(bdev->vma_manager);
283 node = drm_vma_offset_lookup_locked(bdev->vma_manager, startpage,
284 npages);
285 if (node != NULL) {
286 bo = container_of(node, struct ttm_buffer_object, base.vma_node);
287 if (!kref_get_unless_zero(&bo->kref))
288 bo = NULL;
289 }
290 drm_vma_offset_unlock_lookup(bdev->vma_manager);
291
292 if (bo == NULL)
293 return -ENOENT;
294
295 *bop = bo;
296 return 0;
297 }
298