ttm_bo_vm.c revision 1.18 1 /* $NetBSD: ttm_bo_vm.c,v 1.18 2021/12/19 11:09:25 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.18 2021/12/19 11:09:25 riastradh Exp $");
34
35 #include <sys/types.h>
36
37 #include <uvm/uvm.h>
38 #include <uvm/uvm_extern.h>
39 #include <uvm/uvm_fault.h>
40
41 #include <linux/bitops.h>
42
43 #include <drm/drm_vma_manager.h>
44
45 #include <ttm/ttm_bo_driver.h>
46
47 static int ttm_bo_uvm_fault_idle(struct ttm_buffer_object *,
48 struct uvm_faultinfo *);
49 static int ttm_bo_uvm_lookup(struct ttm_bo_device *, unsigned long,
50 unsigned long, struct ttm_buffer_object **);
51
52 void
53 ttm_bo_uvm_reference(struct uvm_object *uobj)
54 {
55 struct ttm_buffer_object *const bo = container_of(uobj,
56 struct ttm_buffer_object, uvmobj);
57
58 (void)ttm_bo_get(bo);
59 }
60
61 void
62 ttm_bo_uvm_detach(struct uvm_object *uobj)
63 {
64 struct ttm_buffer_object *bo = container_of(uobj,
65 struct ttm_buffer_object, uvmobj);
66
67 ttm_bo_put(bo);
68 }
69
70 int
71 ttm_bo_uvm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
72 struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
73 int flags)
74 {
75 struct uvm_object *const uobj = ufi->entry->object.uvm_obj;
76 struct ttm_buffer_object *const bo = container_of(uobj,
77 struct ttm_buffer_object, uvmobj);
78 struct ttm_bo_device *const bdev = bo->bdev;
79 struct ttm_mem_type_manager *man =
80 &bdev->man[bo->mem.mem_type];
81 union {
82 bus_addr_t base;
83 struct ttm_tt *ttm;
84 } u;
85 size_t size __diagused;
86 voff_t uoffset; /* offset in bytes into bo */
87 unsigned startpage; /* offset in pages into bo */
88 unsigned i;
89 vm_prot_t vm_prot; /* VM_PROT_* */
90 pgprot_t pgprot; /* VM_PROT_* | PMAP_* cacheability flags */
91 int ret;
92
93 /* Thanks, uvm, but we don't need this lock. */
94 rw_exit(uobj->vmobjlock);
95
96 /* Copy-on-write mappings make no sense for the graphics aperture. */
97 if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
98 ret = -EIO;
99 goto out0;
100 }
101
102 /* Try to lock the buffer. */
103 ret = ttm_bo_reserve(bo, true, true, NULL);
104 if (ret) {
105 if (ret != -EBUSY)
106 goto out0;
107 /*
108 * It's currently locked. Unlock the fault, wait for
109 * it, and start over.
110 */
111 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
112 if (!dma_resv_lock_interruptible(bo->base.resv, NULL))
113 dma_resv_unlock(bo->base.resv);
114
115 return ERESTART;
116 }
117
118 /* drm prime buffers are not mappable. XXX Catch this earlier? */
119 if (bo->ttm && ISSET(bo->ttm->page_flags, TTM_PAGE_FLAG_SG)) {
120 ret = -EINVAL;
121 goto out1;
122 }
123
124 /* Notify the driver of a fault if it wants. */
125 if (bdev->driver->fault_reserve_notify) {
126 ret = (*bdev->driver->fault_reserve_notify)(bo);
127 if (ret) {
128 if (ret == -ERESTART)
129 ret = -EIO;
130 goto out1;
131 }
132 }
133
134 ret = ttm_bo_uvm_fault_idle(bo, ufi);
135 if (ret) {
136 KASSERT(ret == -ERESTART || ret == -EFAULT);
137 /* ttm_bo_uvm_fault_idle calls uvmfault_unlockall for us. */
138 ttm_bo_unreserve(bo);
139 /* XXX errno Linux->NetBSD */
140 return -ret;
141 }
142
143 ret = ttm_mem_io_lock(man, true);
144 if (ret) {
145 ret = -EIO;
146 goto out1;
147 }
148 ret = ttm_mem_io_reserve_vm(bo);
149 if (ret) {
150 ret = -EIO;
151 goto out2;
152 }
153
154 vm_prot = ufi->entry->protection;
155 if (bo->mem.bus.is_iomem) {
156 u.base = (bo->mem.bus.base + bo->mem.bus.offset);
157 size = bo->mem.bus.size;
158 pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
159 } else {
160 struct ttm_operation_ctx ctx = {
161 .interruptible = false,
162 .no_wait_gpu = false,
163 .flags = TTM_OPT_FLAG_FORCE_ALLOC,
164 };
165 u.ttm = bo->ttm;
166 size = (bo->ttm->num_pages << PAGE_SHIFT);
167 if (ISSET(bo->mem.placement, TTM_PL_FLAG_CACHED))
168 pgprot = vm_prot;
169 else
170 pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
171 if (ttm_tt_populate(u.ttm, &ctx)) {
172 ret = -ENOMEM;
173 goto out2;
174 }
175 }
176
177 KASSERT(ufi->entry->start <= vaddr);
178 KASSERT((ufi->entry->offset & (PAGE_SIZE - 1)) == 0);
179 KASSERT(ufi->entry->offset <= size);
180 KASSERT((vaddr - ufi->entry->start) <= (size - ufi->entry->offset));
181 KASSERT(npages <= ((size - ufi->entry->offset) -
182 (vaddr - ufi->entry->start)));
183 uoffset = (ufi->entry->offset + (vaddr - ufi->entry->start));
184 startpage = (uoffset >> PAGE_SHIFT);
185 for (i = 0; i < npages; i++) {
186 paddr_t paddr;
187
188 /* XXX PGO_ALLPAGES? */
189 if (pps[i] == PGO_DONTCARE)
190 continue;
191 if (bo->mem.bus.is_iomem) {
192 const paddr_t cookie = bus_space_mmap(bdev->memt,
193 u.base, ((startpage + i) << PAGE_SHIFT), vm_prot,
194 0);
195
196 paddr = pmap_phys_address(cookie);
197 } else {
198 paddr = page_to_phys(u.ttm->pages[startpage + i]);
199 }
200 ret = -pmap_enter(ufi->orig_map->pmap, vaddr + i*PAGE_SIZE,
201 paddr, vm_prot, (PMAP_CANFAIL | pgprot));
202 if (ret)
203 goto out3;
204 }
205
206 out3: pmap_update(ufi->orig_map->pmap);
207 out2: ttm_mem_io_unlock(man);
208 out1: ttm_bo_unreserve(bo);
209 out0: uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
210 /* XXX errno Linux->NetBSD */
211 return -ret;
212 }
213
214 static int
215 ttm_bo_uvm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *ufi)
216 {
217 int ret = 0;
218
219 if (__predict_true(!bo->moving))
220 goto out0;
221
222 if (dma_fence_is_signaled(bo->moving))
223 goto out1;
224
225 if (dma_fence_wait(bo->moving, true) != 0) {
226 ret = -EFAULT;
227 goto out2;
228 }
229
230 ret = -ERESTART;
231 out2: uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
232 out1: dma_fence_put(bo->moving);
233 bo->moving = NULL;
234 out0: return ret;
235 }
236
237 int
238 ttm_bo_mmap_object(struct ttm_bo_device *bdev, off_t offset, size_t size,
239 vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp,
240 struct file *file)
241 {
242 const unsigned long startpage = (offset >> PAGE_SHIFT);
243 const unsigned long npages = (size >> PAGE_SHIFT);
244 struct ttm_buffer_object *bo;
245 int ret;
246
247 KASSERT(0 == (offset & (PAGE_SIZE - 1)));
248 KASSERT(0 == (size & (PAGE_SIZE - 1)));
249
250 ret = ttm_bo_uvm_lookup(bdev, startpage, npages, &bo);
251 if (ret)
252 goto fail0;
253 KASSERT(drm_vma_node_start(&bo->base.vma_node) <= offset);
254 /* XXX Just assert this? */
255 if (__predict_false(bdev->driver->verify_access == NULL)) {
256 ret = -EPERM;
257 goto fail1;
258 }
259 ret = (*bdev->driver->verify_access)(bo, file);
260 if (ret)
261 goto fail1;
262
263 /* Success! */
264 *uobjp = &bo->uvmobj;
265 *uoffsetp = (offset -
266 (drm_vma_node_start(&bo->base.vma_node) << PAGE_SHIFT));
267 return 0;
268
269 fail1: ttm_bo_put(bo);
270 fail0: KASSERT(ret);
271 return ret;
272 }
273
274 static int
275 ttm_bo_uvm_lookup(struct ttm_bo_device *bdev, unsigned long startpage,
276 unsigned long npages, struct ttm_buffer_object **bop)
277 {
278 struct ttm_buffer_object *bo = NULL;
279 struct drm_vma_offset_node *node;
280
281 drm_vma_offset_lock_lookup(bdev->vma_manager);
282 node = drm_vma_offset_lookup_locked(bdev->vma_manager, startpage,
283 npages);
284 if (node != NULL) {
285 bo = container_of(node, struct ttm_buffer_object, base.vma_node);
286 if (!kref_get_unless_zero(&bo->kref))
287 bo = NULL;
288 }
289 drm_vma_offset_unlock_lookup(bdev->vma_manager);
290
291 if (bo == NULL)
292 return -ENOENT;
293
294 *bop = bo;
295 return 0;
296 }
297