ttm_bo_util.c revision 1.23 1 /* $NetBSD: ttm_bo_util.c,v 1.23 2021/12/19 09:57:42 riastradh Exp $ */
2
3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 /**************************************************************************
5 *
6 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_util.c,v 1.23 2021/12/19 09:57:42 riastradh Exp $");
36
37 #include <drm/ttm/ttm_bo_driver.h>
38 #include <drm/ttm/ttm_placement.h>
39 #include <drm/drm_vma_manager.h>
40 #include <drm/drm_os_netbsd.h>
41 #include <linux/io.h>
42 #include <linux/highmem.h>
43 #include <linux/wait.h>
44 #include <linux/slab.h>
45 #include <linux/vmalloc.h>
46 #include <linux/module.h>
47 #include <linux/dma-resv.h>
48
49 struct ttm_transfer_obj {
50 struct ttm_buffer_object base;
51 struct ttm_buffer_object *bo;
52 };
53
54 #ifdef __NetBSD__ /* PMAP_* caching flags for ttm_io_prot */
55 #include <uvm/uvm_pmap.h>
56 #include <linux/nbsd-namespace.h>
57 #endif
58
59 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
60 {
61 ttm_bo_mem_put(bo, &bo->mem);
62 }
63
64 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
65 struct ttm_operation_ctx *ctx,
66 struct ttm_mem_reg *new_mem)
67 {
68 struct ttm_tt *ttm = bo->ttm;
69 struct ttm_mem_reg *old_mem = &bo->mem;
70 int ret;
71
72 if (old_mem->mem_type != TTM_PL_SYSTEM) {
73 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
74
75 if (unlikely(ret != 0)) {
76 if (ret != -ERESTARTSYS)
77 pr_err("Failed to expire sync object before unbinding TTM\n");
78 return ret;
79 }
80
81 ttm_tt_unbind(ttm);
82 ttm_bo_free_old_node(bo);
83 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
84 TTM_PL_MASK_MEM);
85 old_mem->mem_type = TTM_PL_SYSTEM;
86 }
87
88 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
89 if (unlikely(ret != 0))
90 return ret;
91
92 if (new_mem->mem_type != TTM_PL_SYSTEM) {
93 ret = ttm_tt_bind(ttm, new_mem, ctx);
94 if (unlikely(ret != 0))
95 return ret;
96 }
97
98 *old_mem = *new_mem;
99 new_mem->mm_node = NULL;
100
101 return 0;
102 }
103 EXPORT_SYMBOL(ttm_bo_move_ttm);
104
105 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
106 {
107 if (likely(man->io_reserve_fastpath))
108 return 0;
109
110 if (interruptible)
111 return mutex_lock_interruptible(&man->io_reserve_mutex);
112
113 mutex_lock(&man->io_reserve_mutex);
114 return 0;
115 }
116
117 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
118 {
119 if (likely(man->io_reserve_fastpath))
120 return;
121
122 mutex_unlock(&man->io_reserve_mutex);
123 }
124
125 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
126 {
127 struct ttm_buffer_object *bo;
128
129 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
130 return -EAGAIN;
131
132 bo = list_first_entry(&man->io_reserve_lru,
133 struct ttm_buffer_object,
134 io_reserve_lru);
135 list_del_init(&bo->io_reserve_lru);
136 ttm_bo_unmap_virtual_locked(bo);
137
138 return 0;
139 }
140
141
142 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
143 struct ttm_mem_reg *mem)
144 {
145 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
146 int ret = 0;
147
148 if (!bdev->driver->io_mem_reserve)
149 return 0;
150 if (likely(man->io_reserve_fastpath))
151 return bdev->driver->io_mem_reserve(bdev, mem);
152
153 if (bdev->driver->io_mem_reserve &&
154 mem->bus.io_reserved_count++ == 0) {
155 retry:
156 ret = bdev->driver->io_mem_reserve(bdev, mem);
157 if (ret == -EAGAIN) {
158 ret = ttm_mem_io_evict(man);
159 if (ret == 0)
160 goto retry;
161 }
162 }
163 return ret;
164 }
165
166 void ttm_mem_io_free(struct ttm_bo_device *bdev,
167 struct ttm_mem_reg *mem)
168 {
169 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
170
171 if (likely(man->io_reserve_fastpath))
172 return;
173
174 if (bdev->driver->io_mem_reserve &&
175 --mem->bus.io_reserved_count == 0 &&
176 bdev->driver->io_mem_free)
177 bdev->driver->io_mem_free(bdev, mem);
178
179 }
180
181 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
182 {
183 struct ttm_mem_reg *mem = &bo->mem;
184 int ret;
185
186 if (!mem->bus.io_reserved_vm) {
187 struct ttm_mem_type_manager *man =
188 &bo->bdev->man[mem->mem_type];
189
190 ret = ttm_mem_io_reserve(bo->bdev, mem);
191 if (unlikely(ret != 0))
192 return ret;
193 mem->bus.io_reserved_vm = true;
194 if (man->use_io_reserve_lru)
195 list_add_tail(&bo->io_reserve_lru,
196 &man->io_reserve_lru);
197 }
198 return 0;
199 }
200
201 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
202 {
203 struct ttm_mem_reg *mem = &bo->mem;
204
205 if (mem->bus.io_reserved_vm) {
206 mem->bus.io_reserved_vm = false;
207 list_del_init(&bo->io_reserve_lru);
208 ttm_mem_io_free(bo->bdev, mem);
209 }
210 }
211
212 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
213 void **virtual)
214 {
215 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
216 int ret;
217 void *addr;
218
219 *virtual = NULL;
220 (void) ttm_mem_io_lock(man, false);
221 ret = ttm_mem_io_reserve(bdev, mem);
222 ttm_mem_io_unlock(man);
223 if (ret || !mem->bus.is_iomem)
224 return ret;
225
226 if (mem->bus.addr) {
227 addr = mem->bus.addr;
228 } else {
229 #ifdef __NetBSD__
230 const bus_addr_t bus_addr = (mem->bus.base + mem->bus.offset);
231 int flags = BUS_SPACE_MAP_LINEAR;
232
233 if (ISSET(mem->placement, TTM_PL_FLAG_WC))
234 flags |= BUS_SPACE_MAP_PREFETCHABLE;
235 /* XXX errno NetBSD->Linux */
236 ret = -bus_space_map(bdev->memt, bus_addr, mem->bus.size,
237 flags, &mem->bus.memh);
238 if (ret) {
239 (void) ttm_mem_io_lock(man, false);
240 ttm_mem_io_free(bdev, mem);
241 ttm_mem_io_unlock(man);
242 return ret;
243 }
244 addr = bus_space_vaddr(bdev->memt, mem->bus.memh);
245 #else
246 if (mem->placement & TTM_PL_FLAG_WC)
247 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
248 else
249 addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
250 if (!addr) {
251 (void) ttm_mem_io_lock(man, false);
252 ttm_mem_io_free(bdev, mem);
253 ttm_mem_io_unlock(man);
254 return -ENOMEM;
255 }
256 #endif
257 }
258 *virtual = addr;
259 return 0;
260 }
261
262 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
263 void *virtual)
264 {
265 struct ttm_mem_type_manager *man;
266
267 man = &bdev->man[mem->mem_type];
268
269 if (virtual && mem->bus.addr == NULL)
270 #ifdef __NetBSD__
271 bus_space_unmap(bdev->memt, mem->bus.memh, mem->bus.size);
272 #else
273 iounmap(virtual);
274 #endif
275 (void) ttm_mem_io_lock(man, false);
276 ttm_mem_io_free(bdev, mem);
277 ttm_mem_io_unlock(man);
278 }
279
280 #ifdef __NetBSD__
281 # define ioread32 fake_ioread32
282 # define iowrite32 fake_iowrite32
283
284 static inline uint32_t
285 ioread32(const volatile uint32_t *p)
286 {
287 uint32_t v;
288
289 v = *p;
290 __insn_barrier(); /* XXX ttm io barrier */
291
292 return v; /* XXX ttm byte order */
293 }
294
295 static inline void
296 iowrite32(uint32_t v, volatile uint32_t *p)
297 {
298
299 __insn_barrier(); /* XXX ttm io barrier */
300 *p = v; /* XXX ttm byte order */
301 }
302 #endif
303
304 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
305 {
306 uint32_t *dstP =
307 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
308 uint32_t *srcP =
309 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
310
311 int i;
312 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
313 iowrite32(ioread32(srcP++), dstP++);
314 return 0;
315 }
316
317 #ifdef __NetBSD__
318 # undef ioread32
319 # undef iowrite32
320 #endif
321
322 #ifdef CONFIG_X86
323 #define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
324 #define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
325 #else
326 #define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
327 #define __ttm_kunmap_atomic(__addr) vunmap(__addr)
328 #endif
329
330
331 /**
332 * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
333 * specified page protection.
334 *
335 * @page: The page to map.
336 * @prot: The page protection.
337 *
338 * This function maps a TTM page using the kmap_atomic api if available,
339 * otherwise falls back to vmap. The user must make sure that the
340 * specified page does not have an aliased mapping with a different caching
341 * policy unless the architecture explicitly allows it. Also mapping and
342 * unmapping using this api must be correctly nested. Unmapping should
343 * occur in the reverse order of mapping.
344 */
345 void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
346 {
347 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
348 return kmap_atomic(page);
349 else
350 return __ttm_kmap_atomic_prot(page, prot);
351 }
352 EXPORT_SYMBOL(ttm_kmap_atomic_prot);
353
354 /**
355 * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
356 * ttm_kmap_atomic_prot.
357 *
358 * @addr: The virtual address from the map.
359 * @prot: The page protection.
360 */
361 void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
362 {
363 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
364 kunmap_atomic(addr);
365 else
366 __ttm_kunmap_atomic(addr);
367 }
368 EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
369
370 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
371 unsigned long page,
372 pgprot_t prot)
373 {
374 struct page *d = ttm->pages[page];
375 void *dst;
376
377 if (!d)
378 return -ENOMEM;
379
380 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
381 dst = ttm_kmap_atomic_prot(d, prot);
382 if (!dst)
383 return -ENOMEM;
384
385 memcpy_fromio(dst, src, PAGE_SIZE);
386
387 ttm_kunmap_atomic_prot(dst, prot);
388
389 return 0;
390 }
391
392 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
393 unsigned long page,
394 pgprot_t prot)
395 {
396 struct page *s = ttm->pages[page];
397 void *src;
398
399 if (!s)
400 return -ENOMEM;
401
402 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
403 src = ttm_kmap_atomic_prot(s, prot);
404 if (!src)
405 return -ENOMEM;
406
407 memcpy_toio(dst, src, PAGE_SIZE);
408
409 ttm_kunmap_atomic_prot(src, prot);
410
411 return 0;
412 }
413
414 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
415 struct ttm_operation_ctx *ctx,
416 struct ttm_mem_reg *new_mem)
417 {
418 struct ttm_bo_device *bdev = bo->bdev;
419 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
420 struct ttm_tt *ttm = bo->ttm;
421 struct ttm_mem_reg *old_mem = &bo->mem;
422 struct ttm_mem_reg old_copy = *old_mem;
423 void *old_iomap;
424 void *new_iomap;
425 int ret;
426 unsigned long i;
427 unsigned long page;
428 unsigned long add = 0;
429 int dir;
430
431 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
432 if (ret)
433 return ret;
434
435 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
436 if (ret)
437 return ret;
438 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
439 if (ret)
440 goto out;
441
442 /*
443 * Single TTM move. NOP.
444 */
445 if (old_iomap == NULL && new_iomap == NULL)
446 goto out2;
447
448 /*
449 * Don't move nonexistent data. Clear destination instead.
450 */
451 if (old_iomap == NULL &&
452 (ttm == NULL || (ttm->state == tt_unpopulated &&
453 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
454 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
455 goto out2;
456 }
457
458 /*
459 * TTM might be null for moves within the same region.
460 */
461 if (ttm) {
462 ret = ttm_tt_populate(ttm, ctx);
463 if (ret)
464 goto out1;
465 }
466
467 add = 0;
468 dir = 1;
469
470 if ((old_mem->mem_type == new_mem->mem_type) &&
471 (new_mem->start < old_mem->start + old_mem->size)) {
472 dir = -1;
473 add = new_mem->num_pages - 1;
474 }
475
476 for (i = 0; i < new_mem->num_pages; ++i) {
477 page = i * dir + add;
478 if (old_iomap == NULL) {
479 pgprot_t prot = ttm_io_prot(old_mem->placement,
480 PAGE_KERNEL);
481 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
482 prot);
483 } else if (new_iomap == NULL) {
484 pgprot_t prot = ttm_io_prot(new_mem->placement,
485 PAGE_KERNEL);
486 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
487 prot);
488 } else {
489 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
490 }
491 if (ret)
492 goto out1;
493 }
494 mb();
495 out2:
496 old_copy = *old_mem;
497 *old_mem = *new_mem;
498 new_mem->mm_node = NULL;
499
500 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
501 ttm_tt_destroy(ttm);
502 bo->ttm = NULL;
503 }
504
505 out1:
506 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
507 out:
508 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
509
510 /*
511 * On error, keep the mm node!
512 */
513 if (!ret)
514 ttm_bo_mem_put(bo, &old_copy);
515 return ret;
516 }
517 EXPORT_SYMBOL(ttm_bo_move_memcpy);
518
519 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
520 {
521 struct ttm_transfer_obj *fbo;
522
523 fbo = container_of(bo, struct ttm_transfer_obj, base);
524 ttm_bo_put(fbo->bo);
525 kfree(fbo);
526 }
527
528 /**
529 * ttm_buffer_object_transfer
530 *
531 * @bo: A pointer to a struct ttm_buffer_object.
532 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
533 * holding the data of @bo with the old placement.
534 *
535 * This is a utility function that may be called after an accelerated move
536 * has been scheduled. A new buffer object is created as a placeholder for
537 * the old data while it's being copied. When that buffer object is idle,
538 * it can be destroyed, releasing the space of the old placement.
539 * Returns:
540 * !0: Failure.
541 */
542
543 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
544 struct ttm_buffer_object **new_obj)
545 {
546 struct ttm_transfer_obj *fbo;
547 int ret;
548
549 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
550 if (!fbo)
551 return -ENOMEM;
552
553 fbo->base = *bo;
554 fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
555
556 ttm_bo_get(bo);
557 fbo->bo = bo;
558
559 /**
560 * Fix up members that we shouldn't copy directly:
561 * TODO: Explicit member copy would probably be better here.
562 */
563
564 atomic_inc(&ttm_bo_glob.bo_count);
565 INIT_LIST_HEAD(&fbo->base.ddestroy);
566 INIT_LIST_HEAD(&fbo->base.lru);
567 INIT_LIST_HEAD(&fbo->base.swap);
568 INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
569 fbo->base.moving = NULL;
570 #ifdef __NetBSD__
571 drm_vma_node_init(&fbo->vma_node);
572 uvm_obj_init(&fbo->base.uvmobj, bo->bdev->driver->ttm_uvm_ops, true, 1);
573 rw_obj_hold(bo->uvmobj.vmobjlock);
574 uvm_obj_setlock(&fbo->base.uvmobj, bo->uvmobj.vmobjlock);
575 #else
576 drm_vma_node_reset(&fbo->base.base.vma_node);
577 #endif
578
579 kref_init(&fbo->base.list_kref);
580 kref_init(&fbo->base.kref);
581 fbo->base.destroy = &ttm_transfered_destroy;
582 fbo->base.acc_size = 0;
583 if (bo->base.resv == &bo->base._resv)
584 fbo->base.base.resv = &fbo->base.base._resv;
585
586 dma_resv_init(&fbo->base.base._resv);
587 ret = dma_resv_trylock(&fbo->base.base._resv);
588 WARN_ON(!ret);
589
590 *new_obj = &fbo->base;
591 return 0;
592 }
593
594 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
595 {
596 /* Cached mappings need no adjustment */
597 if (caching_flags & TTM_PL_FLAG_CACHED)
598 return tmp;
599
600 #ifdef __NetBSD__
601 tmp &= ~PMAP_CACHE_MASK;
602 if (caching_flags & TTM_PL_FLAG_WC)
603 return (tmp | PMAP_WRITE_COMBINE);
604 else
605 return (tmp | PMAP_NOCACHE);
606 #else
607 #if defined(__i386__) || defined(__x86_64__)
608 if (caching_flags & TTM_PL_FLAG_WC)
609 tmp = pgprot_writecombine(tmp);
610 else if (boot_cpu_data.x86 > 3)
611 tmp = pgprot_noncached(tmp);
612 #endif
613 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
614 defined(__powerpc__) || defined(__mips__)
615 if (caching_flags & TTM_PL_FLAG_WC)
616 tmp = pgprot_writecombine(tmp);
617 else
618 tmp = pgprot_noncached(tmp);
619 #endif
620 #if defined(__sparc__)
621 tmp = pgprot_noncached(tmp);
622 #endif
623 return tmp;
624 #endif
625 }
626 EXPORT_SYMBOL(ttm_io_prot);
627
628 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
629 unsigned long offset,
630 unsigned long size,
631 struct ttm_bo_kmap_obj *map)
632 {
633 struct ttm_mem_reg *mem = &bo->mem;
634
635 if (bo->mem.bus.addr) {
636 map->bo_kmap_type = ttm_bo_map_premapped;
637 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
638 } else {
639 map->bo_kmap_type = ttm_bo_map_iomap;
640 #ifdef __NetBSD__
641 {
642 bus_addr_t addr;
643 int flags = BUS_SPACE_MAP_LINEAR;
644 int ret;
645
646 addr = (bo->mem.bus.base + bo->mem.bus.offset + offset);
647 if (ISSET(mem->placement, TTM_PL_FLAG_WC))
648 flags |= BUS_SPACE_MAP_PREFETCHABLE;
649 /* XXX errno NetBSD->Linux */
650 ret = -bus_space_map(bo->bdev->memt, addr, size, flags,
651 &map->u.io.memh);
652 if (ret)
653 return ret;
654 map->u.io.size = size;
655 map->virtual = bus_space_vaddr(bo->bdev->memt, map->u.io.memh);
656 }
657 #else
658 if (mem->placement & TTM_PL_FLAG_WC)
659 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
660 size);
661 else
662 map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
663 size);
664 #endif
665 }
666 return (!map->virtual) ? -ENOMEM : 0;
667 }
668
669 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
670 unsigned long start_page,
671 unsigned long num_pages,
672 struct ttm_bo_kmap_obj *map)
673 {
674 struct ttm_mem_reg *mem = &bo->mem;
675 struct ttm_operation_ctx ctx = {
676 .interruptible = false,
677 .no_wait_gpu = false
678 };
679 struct ttm_tt *ttm = bo->ttm;
680 pgprot_t prot;
681 int ret;
682
683 BUG_ON(!ttm);
684
685 ret = ttm_tt_populate(ttm, &ctx);
686 if (ret)
687 return ret;
688
689 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
690 /*
691 * We're mapping a single page, and the desired
692 * page protection is consistent with the bo.
693 */
694
695 map->bo_kmap_type = ttm_bo_map_kmap;
696 #ifdef __NetBSD__
697 map->u.kmapped.page = ttm->pages[start_page];
698 map->virtual = kmap(map->u.kmapped.page);
699 #else
700 map->page = ttm->pages[start_page];
701 map->virtual = kmap(map->page);
702 #endif
703 } else {
704 /*
705 * We need to use vmap to get the desired page protection
706 * or to make the buffer object look contiguous.
707 */
708 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
709 map->bo_kmap_type = ttm_bo_map_vmap;
710 map->virtual = vmap(ttm->pages + start_page, num_pages,
711 0, prot);
712 #ifdef __NetBSD__
713 map->u.vmapped.vsize = (vsize_t)num_pages << PAGE_SHIFT;
714 #endif
715 }
716 return (!map->virtual) ? -ENOMEM : 0;
717 }
718
719 int ttm_bo_kmap(struct ttm_buffer_object *bo,
720 unsigned long start_page, unsigned long num_pages,
721 struct ttm_bo_kmap_obj *map)
722 {
723 struct ttm_mem_type_manager *man =
724 &bo->bdev->man[bo->mem.mem_type];
725 unsigned long offset, size;
726 int ret;
727
728 map->virtual = NULL;
729 map->bo = bo;
730 if (num_pages > bo->num_pages)
731 return -EINVAL;
732 if (start_page > bo->num_pages)
733 return -EINVAL;
734
735 (void) ttm_mem_io_lock(man, false);
736 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
737 ttm_mem_io_unlock(man);
738 if (ret)
739 return ret;
740 if (!bo->mem.bus.is_iomem) {
741 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
742 } else {
743 offset = start_page << PAGE_SHIFT;
744 size = num_pages << PAGE_SHIFT;
745 return ttm_bo_ioremap(bo, offset, size, map);
746 }
747 }
748 EXPORT_SYMBOL(ttm_bo_kmap);
749
750 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
751 {
752 struct ttm_buffer_object *bo = map->bo;
753 struct ttm_mem_type_manager *man =
754 &bo->bdev->man[bo->mem.mem_type];
755
756 if (!map->virtual)
757 return;
758 switch (map->bo_kmap_type) {
759 case ttm_bo_map_iomap:
760 #ifdef __NetBSD__
761 bus_space_unmap(bo->bdev->memt, map->u.io.memh,
762 map->u.io.size);
763 #else
764 iounmap(map->virtual);
765 #endif
766 break;
767 case ttm_bo_map_vmap:
768 #ifdef __NetBSD__
769 vunmap(map->virtual, map->u.vmapped.vsize >> PAGE_SHIFT);
770 #else
771 vunmap(map->virtual);
772 #endif
773 break;
774 case ttm_bo_map_kmap:
775 #ifdef __NetBSD__
776 kunmap(map->u.kmapped.page);
777 #else
778 kunmap(map->page);
779 #endif
780 break;
781 case ttm_bo_map_premapped:
782 break;
783 default:
784 BUG();
785 }
786 (void) ttm_mem_io_lock(man, false);
787 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
788 ttm_mem_io_unlock(man);
789 map->virtual = NULL;
790 #ifndef __NetBSD__
791 map->page = NULL;
792 #endif
793 }
794 EXPORT_SYMBOL(ttm_bo_kunmap);
795
796 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
797 struct dma_fence *fence,
798 bool evict,
799 struct ttm_mem_reg *new_mem)
800 {
801 struct ttm_bo_device *bdev = bo->bdev;
802 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
803 struct ttm_mem_reg *old_mem = &bo->mem;
804 int ret;
805 struct ttm_buffer_object *ghost_obj;
806
807 dma_resv_add_excl_fence(bo->base.resv, fence);
808 if (evict) {
809 ret = ttm_bo_wait(bo, false, false);
810 if (ret)
811 return ret;
812
813 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
814 ttm_tt_destroy(bo->ttm);
815 bo->ttm = NULL;
816 }
817 ttm_bo_free_old_node(bo);
818 } else {
819 /**
820 * This should help pipeline ordinary buffer moves.
821 *
822 * Hang old buffer memory on a new buffer object,
823 * and leave it to be released when the GPU
824 * operation has completed.
825 */
826
827 dma_fence_put(bo->moving);
828 bo->moving = dma_fence_get(fence);
829
830 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
831 if (ret)
832 return ret;
833
834 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
835
836 /**
837 * If we're not moving to fixed memory, the TTM object
838 * needs to stay alive. Otherwhise hang it on the ghost
839 * bo to be unbound and destroyed.
840 */
841
842 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
843 ghost_obj->ttm = NULL;
844 else
845 bo->ttm = NULL;
846
847 dma_resv_unlock(&ghost_obj->base._resv);
848 ttm_bo_put(ghost_obj);
849 }
850
851 *old_mem = *new_mem;
852 new_mem->mm_node = NULL;
853
854 return 0;
855 }
856 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
857
858 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
859 struct dma_fence *fence, bool evict,
860 struct ttm_mem_reg *new_mem)
861 {
862 struct ttm_bo_device *bdev = bo->bdev;
863 struct ttm_mem_reg *old_mem = &bo->mem;
864
865 struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
866 struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
867
868 int ret;
869
870 dma_resv_add_excl_fence(bo->base.resv, fence);
871
872 if (!evict) {
873 struct ttm_buffer_object *ghost_obj;
874
875 /**
876 * This should help pipeline ordinary buffer moves.
877 *
878 * Hang old buffer memory on a new buffer object,
879 * and leave it to be released when the GPU
880 * operation has completed.
881 */
882
883 dma_fence_put(bo->moving);
884 bo->moving = dma_fence_get(fence);
885
886 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
887 if (ret)
888 return ret;
889
890 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
891
892 /**
893 * If we're not moving to fixed memory, the TTM object
894 * needs to stay alive. Otherwhise hang it on the ghost
895 * bo to be unbound and destroyed.
896 */
897
898 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
899 ghost_obj->ttm = NULL;
900 else
901 bo->ttm = NULL;
902
903 dma_resv_unlock(&ghost_obj->base._resv);
904 ttm_bo_put(ghost_obj);
905
906 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
907
908 /**
909 * BO doesn't have a TTM we need to bind/unbind. Just remember
910 * this eviction and free up the allocation
911 */
912
913 spin_lock(&from->move_lock);
914 if (!from->move || dma_fence_is_later(fence, from->move)) {
915 dma_fence_put(from->move);
916 from->move = dma_fence_get(fence);
917 }
918 spin_unlock(&from->move_lock);
919
920 ttm_bo_free_old_node(bo);
921
922 dma_fence_put(bo->moving);
923 bo->moving = dma_fence_get(fence);
924
925 } else {
926 /**
927 * Last resort, wait for the move to be completed.
928 *
929 * Should never happen in pratice.
930 */
931
932 ret = ttm_bo_wait(bo, false, false);
933 if (ret)
934 return ret;
935
936 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
937 ttm_tt_destroy(bo->ttm);
938 bo->ttm = NULL;
939 }
940 ttm_bo_free_old_node(bo);
941 }
942
943 *old_mem = *new_mem;
944 new_mem->mm_node = NULL;
945
946 return 0;
947 }
948 EXPORT_SYMBOL(ttm_bo_pipeline_move);
949
950 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
951 {
952 struct ttm_buffer_object *ghost;
953 int ret;
954
955 ret = ttm_buffer_object_transfer(bo, &ghost);
956 if (ret)
957 return ret;
958
959 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
960 /* Last resort, wait for the BO to be idle when we are OOM */
961 if (ret)
962 ttm_bo_wait(bo, false, false);
963
964 memset(&bo->mem, 0, sizeof(bo->mem));
965 bo->mem.mem_type = TTM_PL_SYSTEM;
966 bo->ttm = NULL;
967
968 dma_resv_unlock(&ghost->base._resv);
969 ttm_bo_put(ghost);
970
971 return 0;
972 }
973