ttm_bo_util.c revision 1.21 1 /* $NetBSD: ttm_bo_util.c,v 1.21 2021/12/18 23:45:44 riastradh Exp $ */
2
3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 /**************************************************************************
5 *
6 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_util.c,v 1.21 2021/12/18 23:45:44 riastradh Exp $");
36
37 #include <drm/ttm/ttm_bo_driver.h>
38 #include <drm/ttm/ttm_placement.h>
39 #include <drm/drm_vma_manager.h>
40 #include <linux/io.h>
41 #include <linux/highmem.h>
42 #include <linux/wait.h>
43 #include <linux/slab.h>
44 #include <linux/vmalloc.h>
45 #include <linux/module.h>
46 #include <linux/dma-resv.h>
47
48 struct ttm_transfer_obj {
49 struct ttm_buffer_object base;
50 struct ttm_buffer_object *bo;
51 };
52
53 #ifdef __NetBSD__ /* PMAP_* caching flags for ttm_io_prot */
54 #include <uvm/uvm_pmap.h>
55 #include <linux/nbsd-namespace.h>
56 #endif
57
58 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
59 {
60 ttm_bo_mem_put(bo, &bo->mem);
61 }
62
63 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
64 struct ttm_operation_ctx *ctx,
65 struct ttm_mem_reg *new_mem)
66 {
67 struct ttm_tt *ttm = bo->ttm;
68 struct ttm_mem_reg *old_mem = &bo->mem;
69 int ret;
70
71 if (old_mem->mem_type != TTM_PL_SYSTEM) {
72 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
73
74 if (unlikely(ret != 0)) {
75 if (ret != -ERESTARTSYS)
76 pr_err("Failed to expire sync object before unbinding TTM\n");
77 return ret;
78 }
79
80 ttm_tt_unbind(ttm);
81 ttm_bo_free_old_node(bo);
82 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
83 TTM_PL_MASK_MEM);
84 old_mem->mem_type = TTM_PL_SYSTEM;
85 }
86
87 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
88 if (unlikely(ret != 0))
89 return ret;
90
91 if (new_mem->mem_type != TTM_PL_SYSTEM) {
92 ret = ttm_tt_bind(ttm, new_mem, ctx);
93 if (unlikely(ret != 0))
94 return ret;
95 }
96
97 *old_mem = *new_mem;
98 new_mem->mm_node = NULL;
99
100 return 0;
101 }
102 EXPORT_SYMBOL(ttm_bo_move_ttm);
103
104 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
105 {
106 if (likely(man->io_reserve_fastpath))
107 return 0;
108
109 if (interruptible)
110 return mutex_lock_interruptible(&man->io_reserve_mutex);
111
112 mutex_lock(&man->io_reserve_mutex);
113 return 0;
114 }
115
116 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
117 {
118 if (likely(man->io_reserve_fastpath))
119 return;
120
121 mutex_unlock(&man->io_reserve_mutex);
122 }
123
124 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
125 {
126 struct ttm_buffer_object *bo;
127
128 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
129 return -EAGAIN;
130
131 bo = list_first_entry(&man->io_reserve_lru,
132 struct ttm_buffer_object,
133 io_reserve_lru);
134 list_del_init(&bo->io_reserve_lru);
135 ttm_bo_unmap_virtual_locked(bo);
136
137 return 0;
138 }
139
140
141 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
142 struct ttm_mem_reg *mem)
143 {
144 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
145 int ret = 0;
146
147 if (!bdev->driver->io_mem_reserve)
148 return 0;
149 if (likely(man->io_reserve_fastpath))
150 return bdev->driver->io_mem_reserve(bdev, mem);
151
152 if (bdev->driver->io_mem_reserve &&
153 mem->bus.io_reserved_count++ == 0) {
154 retry:
155 ret = bdev->driver->io_mem_reserve(bdev, mem);
156 if (ret == -EAGAIN) {
157 ret = ttm_mem_io_evict(man);
158 if (ret == 0)
159 goto retry;
160 }
161 }
162 return ret;
163 }
164
165 void ttm_mem_io_free(struct ttm_bo_device *bdev,
166 struct ttm_mem_reg *mem)
167 {
168 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
169
170 if (likely(man->io_reserve_fastpath))
171 return;
172
173 if (bdev->driver->io_mem_reserve &&
174 --mem->bus.io_reserved_count == 0 &&
175 bdev->driver->io_mem_free)
176 bdev->driver->io_mem_free(bdev, mem);
177
178 }
179
180 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
181 {
182 struct ttm_mem_reg *mem = &bo->mem;
183 int ret;
184
185 if (!mem->bus.io_reserved_vm) {
186 struct ttm_mem_type_manager *man =
187 &bo->bdev->man[mem->mem_type];
188
189 ret = ttm_mem_io_reserve(bo->bdev, mem);
190 if (unlikely(ret != 0))
191 return ret;
192 mem->bus.io_reserved_vm = true;
193 if (man->use_io_reserve_lru)
194 list_add_tail(&bo->io_reserve_lru,
195 &man->io_reserve_lru);
196 }
197 return 0;
198 }
199
200 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
201 {
202 struct ttm_mem_reg *mem = &bo->mem;
203
204 if (mem->bus.io_reserved_vm) {
205 mem->bus.io_reserved_vm = false;
206 list_del_init(&bo->io_reserve_lru);
207 ttm_mem_io_free(bo->bdev, mem);
208 }
209 }
210
211 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
212 void **virtual)
213 {
214 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
215 int ret;
216 void *addr;
217
218 *virtual = NULL;
219 (void) ttm_mem_io_lock(man, false);
220 ret = ttm_mem_io_reserve(bdev, mem);
221 ttm_mem_io_unlock(man);
222 if (ret || !mem->bus.is_iomem)
223 return ret;
224
225 if (mem->bus.addr) {
226 addr = mem->bus.addr;
227 } else {
228 #ifdef __NetBSD__
229 const bus_addr_t bus_addr = (mem->bus.base + mem->bus.offset);
230 int flags = BUS_SPACE_MAP_LINEAR;
231
232 if (ISSET(mem->placement, TTM_PL_FLAG_WC))
233 flags |= BUS_SPACE_MAP_PREFETCHABLE;
234 /* XXX errno NetBSD->Linux */
235 ret = -bus_space_map(bdev->memt, bus_addr, mem->bus.size,
236 flags, &mem->bus.memh);
237 if (ret) {
238 (void) ttm_mem_io_lock(man, false);
239 ttm_mem_io_free(bdev, mem);
240 ttm_mem_io_unlock(man);
241 return ret;
242 }
243 addr = bus_space_vaddr(bdev->memt, mem->bus.memh);
244 #else
245 if (mem->placement & TTM_PL_FLAG_WC)
246 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
247 else
248 addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
249 if (!addr) {
250 (void) ttm_mem_io_lock(man, false);
251 ttm_mem_io_free(bdev, mem);
252 ttm_mem_io_unlock(man);
253 return -ENOMEM;
254 }
255 #endif
256 }
257 *virtual = addr;
258 return 0;
259 }
260
261 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
262 void *virtual)
263 {
264 struct ttm_mem_type_manager *man;
265
266 man = &bdev->man[mem->mem_type];
267
268 if (virtual && mem->bus.addr == NULL)
269 #ifdef __NetBSD__
270 bus_space_unmap(bdev->memt, mem->bus.memh, mem->bus.size);
271 #else
272 iounmap(virtual);
273 #endif
274 (void) ttm_mem_io_lock(man, false);
275 ttm_mem_io_free(bdev, mem);
276 ttm_mem_io_unlock(man);
277 }
278
279 #ifdef __NetBSD__
280 # define ioread32 fake_ioread32
281 # define iowrite32 fake_iowrite32
282
283 static inline uint32_t
284 ioread32(const volatile uint32_t *p)
285 {
286 uint32_t v;
287
288 v = *p;
289 __insn_barrier(); /* XXX ttm io barrier */
290
291 return v; /* XXX ttm byte order */
292 }
293
294 static inline void
295 iowrite32(uint32_t v, volatile uint32_t *p)
296 {
297
298 __insn_barrier(); /* XXX ttm io barrier */
299 *p = v; /* XXX ttm byte order */
300 }
301 #endif
302
303 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
304 {
305 uint32_t *dstP =
306 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
307 uint32_t *srcP =
308 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
309
310 int i;
311 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
312 iowrite32(ioread32(srcP++), dstP++);
313 return 0;
314 }
315
316 #ifdef __NetBSD__
317 # undef ioread32
318 # undef iowrite32
319 #endif
320
321 #ifdef CONFIG_X86
322 #define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
323 #define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
324 #else
325 #define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
326 #define __ttm_kunmap_atomic(__addr) vunmap(__addr)
327 #endif
328
329
330 /**
331 * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
332 * specified page protection.
333 *
334 * @page: The page to map.
335 * @prot: The page protection.
336 *
337 * This function maps a TTM page using the kmap_atomic api if available,
338 * otherwise falls back to vmap. The user must make sure that the
339 * specified page does not have an aliased mapping with a different caching
340 * policy unless the architecture explicitly allows it. Also mapping and
341 * unmapping using this api must be correctly nested. Unmapping should
342 * occur in the reverse order of mapping.
343 */
344 void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
345 {
346 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
347 return kmap_atomic(page);
348 else
349 return __ttm_kmap_atomic_prot(page, prot);
350 }
351 EXPORT_SYMBOL(ttm_kmap_atomic_prot);
352
353 /**
354 * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
355 * ttm_kmap_atomic_prot.
356 *
357 * @addr: The virtual address from the map.
358 * @prot: The page protection.
359 */
360 void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
361 {
362 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
363 kunmap_atomic(addr);
364 else
365 __ttm_kunmap_atomic(addr);
366 }
367 EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
368
369 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
370 unsigned long page,
371 pgprot_t prot)
372 {
373 struct page *d = ttm->pages[page];
374 void *dst;
375
376 if (!d)
377 return -ENOMEM;
378
379 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
380 dst = ttm_kmap_atomic_prot(d, prot);
381 if (!dst)
382 return -ENOMEM;
383
384 memcpy_fromio(dst, src, PAGE_SIZE);
385
386 ttm_kunmap_atomic_prot(dst, prot);
387
388 return 0;
389 }
390
391 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
392 unsigned long page,
393 pgprot_t prot)
394 {
395 struct page *s = ttm->pages[page];
396 void *src;
397
398 if (!s)
399 return -ENOMEM;
400
401 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
402 src = ttm_kmap_atomic_prot(s, prot);
403 if (!src)
404 return -ENOMEM;
405
406 memcpy_toio(dst, src, PAGE_SIZE);
407
408 ttm_kunmap_atomic_prot(src, prot);
409
410 return 0;
411 }
412
413 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
414 struct ttm_operation_ctx *ctx,
415 struct ttm_mem_reg *new_mem)
416 {
417 struct ttm_bo_device *bdev = bo->bdev;
418 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
419 struct ttm_tt *ttm = bo->ttm;
420 struct ttm_mem_reg *old_mem = &bo->mem;
421 struct ttm_mem_reg old_copy = *old_mem;
422 void *old_iomap;
423 void *new_iomap;
424 int ret;
425 unsigned long i;
426 unsigned long page;
427 unsigned long add = 0;
428 int dir;
429
430 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
431 if (ret)
432 return ret;
433
434 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
435 if (ret)
436 return ret;
437 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
438 if (ret)
439 goto out;
440
441 /*
442 * Single TTM move. NOP.
443 */
444 if (old_iomap == NULL && new_iomap == NULL)
445 goto out2;
446
447 /*
448 * Don't move nonexistent data. Clear destination instead.
449 */
450 if (old_iomap == NULL &&
451 (ttm == NULL || (ttm->state == tt_unpopulated &&
452 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
453 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
454 goto out2;
455 }
456
457 /*
458 * TTM might be null for moves within the same region.
459 */
460 if (ttm) {
461 ret = ttm_tt_populate(ttm, ctx);
462 if (ret)
463 goto out1;
464 }
465
466 add = 0;
467 dir = 1;
468
469 if ((old_mem->mem_type == new_mem->mem_type) &&
470 (new_mem->start < old_mem->start + old_mem->size)) {
471 dir = -1;
472 add = new_mem->num_pages - 1;
473 }
474
475 for (i = 0; i < new_mem->num_pages; ++i) {
476 page = i * dir + add;
477 if (old_iomap == NULL) {
478 pgprot_t prot = ttm_io_prot(old_mem->placement,
479 PAGE_KERNEL);
480 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
481 prot);
482 } else if (new_iomap == NULL) {
483 pgprot_t prot = ttm_io_prot(new_mem->placement,
484 PAGE_KERNEL);
485 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
486 prot);
487 } else {
488 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
489 }
490 if (ret)
491 goto out1;
492 }
493 mb();
494 out2:
495 old_copy = *old_mem;
496 *old_mem = *new_mem;
497 new_mem->mm_node = NULL;
498
499 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
500 ttm_tt_destroy(ttm);
501 bo->ttm = NULL;
502 }
503
504 out1:
505 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
506 out:
507 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
508
509 /*
510 * On error, keep the mm node!
511 */
512 if (!ret)
513 ttm_bo_mem_put(bo, &old_copy);
514 return ret;
515 }
516 EXPORT_SYMBOL(ttm_bo_move_memcpy);
517
518 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
519 {
520 struct ttm_transfer_obj *fbo;
521
522 fbo = container_of(bo, struct ttm_transfer_obj, base);
523 ttm_bo_put(fbo->bo);
524 kfree(fbo);
525 }
526
527 /**
528 * ttm_buffer_object_transfer
529 *
530 * @bo: A pointer to a struct ttm_buffer_object.
531 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
532 * holding the data of @bo with the old placement.
533 *
534 * This is a utility function that may be called after an accelerated move
535 * has been scheduled. A new buffer object is created as a placeholder for
536 * the old data while it's being copied. When that buffer object is idle,
537 * it can be destroyed, releasing the space of the old placement.
538 * Returns:
539 * !0: Failure.
540 */
541
542 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
543 struct ttm_buffer_object **new_obj)
544 {
545 struct ttm_transfer_obj *fbo;
546 int ret;
547
548 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
549 if (!fbo)
550 return -ENOMEM;
551
552 fbo->base = *bo;
553 fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
554
555 ttm_bo_get(bo);
556 fbo->bo = bo;
557
558 /**
559 * Fix up members that we shouldn't copy directly:
560 * TODO: Explicit member copy would probably be better here.
561 */
562
563 atomic_inc(&ttm_bo_glob.bo_count);
564 INIT_LIST_HEAD(&fbo->base.ddestroy);
565 INIT_LIST_HEAD(&fbo->base.lru);
566 INIT_LIST_HEAD(&fbo->base.swap);
567 INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
568 fbo->base.moving = NULL;
569 #ifdef __NetBSD__
570 drm_vma_node_init(&fbo->vma_node);
571 uvm_obj_init(&fbo->uvmobj, bo->bdev->driver->ttm_uvm_ops, true, 1);
572 rw_obj_hold(bo->uvmobj.vmobjlock);
573 uvm_obj_setlock(&fbo->uvmobj, bo->uvmobj.vmobjlock);
574 #else
575 drm_vma_node_reset(&fbo->base.base.vma_node);
576 #endif
577
578 kref_init(&fbo->base.list_kref);
579 kref_init(&fbo->base.kref);
580 fbo->base.destroy = &ttm_transfered_destroy;
581 fbo->base.acc_size = 0;
582 if (bo->base.resv == &bo->base._resv)
583 fbo->base.base.resv = &fbo->base.base._resv;
584
585 dma_resv_init(&fbo->base.base._resv);
586 ret = dma_resv_trylock(&fbo->base.base._resv);
587 WARN_ON(!ret);
588
589 *new_obj = &fbo->base;
590 return 0;
591 }
592
593 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
594 {
595 /* Cached mappings need no adjustment */
596 if (caching_flags & TTM_PL_FLAG_CACHED)
597 return tmp;
598
599 #ifdef __NetBSD__
600 tmp &= ~PMAP_CACHE_MASK;
601 if (caching_flags & TTM_PL_FLAG_WC)
602 return (tmp | PMAP_WRITE_COMBINE);
603 else
604 return (tmp | PMAP_NOCACHE);
605 #else
606 #if defined(__i386__) || defined(__x86_64__)
607 if (caching_flags & TTM_PL_FLAG_WC)
608 tmp = pgprot_writecombine(tmp);
609 else if (boot_cpu_data.x86 > 3)
610 tmp = pgprot_noncached(tmp);
611 #endif
612 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
613 defined(__powerpc__) || defined(__mips__)
614 if (caching_flags & TTM_PL_FLAG_WC)
615 tmp = pgprot_writecombine(tmp);
616 else
617 tmp = pgprot_noncached(tmp);
618 #endif
619 #if defined(__sparc__)
620 tmp = pgprot_noncached(tmp);
621 #endif
622 return tmp;
623 #endif
624 }
625 EXPORT_SYMBOL(ttm_io_prot);
626
627 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
628 unsigned long offset,
629 unsigned long size,
630 struct ttm_bo_kmap_obj *map)
631 {
632 struct ttm_mem_reg *mem = &bo->mem;
633
634 if (bo->mem.bus.addr) {
635 map->bo_kmap_type = ttm_bo_map_premapped;
636 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
637 } else {
638 map->bo_kmap_type = ttm_bo_map_iomap;
639 #ifdef __NetBSD__
640 {
641 bus_addr_t addr;
642 int flags = BUS_SPACE_MAP_LINEAR;
643 int ret;
644
645 addr = (bo->mem.bus.base + bo->mem.bus.offset + offset);
646 if (ISSET(mem->placement, TTM_PL_FLAG_WC))
647 flags |= BUS_SPACE_MAP_PREFETCHABLE;
648 /* XXX errno NetBSD->Linux */
649 ret = -bus_space_map(bo->bdev->memt, addr, size, flags,
650 &map->u.io.memh);
651 if (ret)
652 return ret;
653 map->u.io.size = size;
654 map->virtual = bus_space_vaddr(bo->bdev->memt, map->u.io.memh);
655 }
656 #else
657 if (mem->placement & TTM_PL_FLAG_WC)
658 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
659 size);
660 else
661 map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
662 size);
663 #endif
664 }
665 return (!map->virtual) ? -ENOMEM : 0;
666 }
667
668 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
669 unsigned long start_page,
670 unsigned long num_pages,
671 struct ttm_bo_kmap_obj *map)
672 {
673 struct ttm_mem_reg *mem = &bo->mem;
674 struct ttm_operation_ctx ctx = {
675 .interruptible = false,
676 .no_wait_gpu = false
677 };
678 struct ttm_tt *ttm = bo->ttm;
679 pgprot_t prot;
680 int ret;
681
682 BUG_ON(!ttm);
683
684 ret = ttm_tt_populate(ttm, &ctx);
685 if (ret)
686 return ret;
687
688 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
689 /*
690 * We're mapping a single page, and the desired
691 * page protection is consistent with the bo.
692 */
693
694 map->bo_kmap_type = ttm_bo_map_kmap;
695 #ifdef __NetBSD__
696 map->u.kmapped.page = ttm->pages[start_page];
697 map->virtual = kmap(map->u.kmapped.page);
698 #else
699 map->page = ttm->pages[start_page];
700 map->virtual = kmap(map->page);
701 #endif
702 } else {
703 /*
704 * We need to use vmap to get the desired page protection
705 * or to make the buffer object look contiguous.
706 */
707 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
708 map->bo_kmap_type = ttm_bo_map_vmap;
709 map->virtual = vmap(ttm->pages + start_page, num_pages,
710 0, prot);
711 #ifdef __NetBSD__
712 map->u.vmapped.vsize = (vsize_t)num_pages << PAGE_SHIFT;
713 #endif
714 }
715 return (!map->virtual) ? -ENOMEM : 0;
716 }
717
718 int ttm_bo_kmap(struct ttm_buffer_object *bo,
719 unsigned long start_page, unsigned long num_pages,
720 struct ttm_bo_kmap_obj *map)
721 {
722 struct ttm_mem_type_manager *man =
723 &bo->bdev->man[bo->mem.mem_type];
724 unsigned long offset, size;
725 int ret;
726
727 map->virtual = NULL;
728 map->bo = bo;
729 if (num_pages > bo->num_pages)
730 return -EINVAL;
731 if (start_page > bo->num_pages)
732 return -EINVAL;
733
734 (void) ttm_mem_io_lock(man, false);
735 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
736 ttm_mem_io_unlock(man);
737 if (ret)
738 return ret;
739 if (!bo->mem.bus.is_iomem) {
740 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
741 } else {
742 offset = start_page << PAGE_SHIFT;
743 size = num_pages << PAGE_SHIFT;
744 return ttm_bo_ioremap(bo, offset, size, map);
745 }
746 }
747 EXPORT_SYMBOL(ttm_bo_kmap);
748
749 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
750 {
751 struct ttm_buffer_object *bo = map->bo;
752 struct ttm_mem_type_manager *man =
753 &bo->bdev->man[bo->mem.mem_type];
754
755 if (!map->virtual)
756 return;
757 switch (map->bo_kmap_type) {
758 case ttm_bo_map_iomap:
759 #ifdef __NetBSD__
760 bus_space_unmap(bo->bdev->memt, map->u.io.memh,
761 map->u.io.size);
762 #else
763 iounmap(map->virtual);
764 #endif
765 break;
766 case ttm_bo_map_vmap:
767 #ifdef __NetBSD__
768 vunmap(map->virtual, map->u.vmapped.vsize >> PAGE_SHIFT);
769 #else
770 vunmap(map->virtual);
771 #endif
772 break;
773 case ttm_bo_map_kmap:
774 #ifdef __NetBSD__
775 kunmap(map->u.kmapped.page);
776 #else
777 kunmap(map->page);
778 #endif
779 break;
780 case ttm_bo_map_premapped:
781 break;
782 default:
783 BUG();
784 }
785 (void) ttm_mem_io_lock(man, false);
786 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
787 ttm_mem_io_unlock(man);
788 map->virtual = NULL;
789 #ifndef __NetBSD__
790 map->page = NULL;
791 #endif
792 }
793 EXPORT_SYMBOL(ttm_bo_kunmap);
794
795 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
796 struct dma_fence *fence,
797 bool evict,
798 struct ttm_mem_reg *new_mem)
799 {
800 struct ttm_bo_device *bdev = bo->bdev;
801 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
802 struct ttm_mem_reg *old_mem = &bo->mem;
803 int ret;
804 struct ttm_buffer_object *ghost_obj;
805
806 dma_resv_add_excl_fence(bo->base.resv, fence);
807 if (evict) {
808 ret = ttm_bo_wait(bo, false, false);
809 if (ret)
810 return ret;
811
812 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
813 ttm_tt_destroy(bo->ttm);
814 bo->ttm = NULL;
815 }
816 ttm_bo_free_old_node(bo);
817 } else {
818 /**
819 * This should help pipeline ordinary buffer moves.
820 *
821 * Hang old buffer memory on a new buffer object,
822 * and leave it to be released when the GPU
823 * operation has completed.
824 */
825
826 dma_fence_put(bo->moving);
827 bo->moving = dma_fence_get(fence);
828
829 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
830 if (ret)
831 return ret;
832
833 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
834
835 /**
836 * If we're not moving to fixed memory, the TTM object
837 * needs to stay alive. Otherwhise hang it on the ghost
838 * bo to be unbound and destroyed.
839 */
840
841 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
842 ghost_obj->ttm = NULL;
843 else
844 bo->ttm = NULL;
845
846 dma_resv_unlock(&ghost_obj->base._resv);
847 ttm_bo_put(ghost_obj);
848 }
849
850 *old_mem = *new_mem;
851 new_mem->mm_node = NULL;
852
853 return 0;
854 }
855 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
856
857 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
858 struct dma_fence *fence, bool evict,
859 struct ttm_mem_reg *new_mem)
860 {
861 struct ttm_bo_device *bdev = bo->bdev;
862 struct ttm_mem_reg *old_mem = &bo->mem;
863
864 struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
865 struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
866
867 int ret;
868
869 dma_resv_add_excl_fence(bo->base.resv, fence);
870
871 if (!evict) {
872 struct ttm_buffer_object *ghost_obj;
873
874 /**
875 * This should help pipeline ordinary buffer moves.
876 *
877 * Hang old buffer memory on a new buffer object,
878 * and leave it to be released when the GPU
879 * operation has completed.
880 */
881
882 dma_fence_put(bo->moving);
883 bo->moving = dma_fence_get(fence);
884
885 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
886 if (ret)
887 return ret;
888
889 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
890
891 /**
892 * If we're not moving to fixed memory, the TTM object
893 * needs to stay alive. Otherwhise hang it on the ghost
894 * bo to be unbound and destroyed.
895 */
896
897 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
898 ghost_obj->ttm = NULL;
899 else
900 bo->ttm = NULL;
901
902 dma_resv_unlock(&ghost_obj->base._resv);
903 ttm_bo_put(ghost_obj);
904
905 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
906
907 /**
908 * BO doesn't have a TTM we need to bind/unbind. Just remember
909 * this eviction and free up the allocation
910 */
911
912 spin_lock(&from->move_lock);
913 if (!from->move || dma_fence_is_later(fence, from->move)) {
914 dma_fence_put(from->move);
915 from->move = dma_fence_get(fence);
916 }
917 spin_unlock(&from->move_lock);
918
919 ttm_bo_free_old_node(bo);
920
921 dma_fence_put(bo->moving);
922 bo->moving = dma_fence_get(fence);
923
924 } else {
925 /**
926 * Last resort, wait for the move to be completed.
927 *
928 * Should never happen in pratice.
929 */
930
931 ret = ttm_bo_wait(bo, false, false);
932 if (ret)
933 return ret;
934
935 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
936 ttm_tt_destroy(bo->ttm);
937 bo->ttm = NULL;
938 }
939 ttm_bo_free_old_node(bo);
940 }
941
942 *old_mem = *new_mem;
943 new_mem->mm_node = NULL;
944
945 return 0;
946 }
947 EXPORT_SYMBOL(ttm_bo_pipeline_move);
948
949 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
950 {
951 struct ttm_buffer_object *ghost;
952 int ret;
953
954 ret = ttm_buffer_object_transfer(bo, &ghost);
955 if (ret)
956 return ret;
957
958 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
959 /* Last resort, wait for the BO to be idle when we are OOM */
960 if (ret)
961 ttm_bo_wait(bo, false, false);
962
963 memset(&bo->mem, 0, sizeof(bo->mem));
964 bo->mem.mem_type = TTM_PL_SYSTEM;
965 bo->ttm = NULL;
966
967 dma_resv_unlock(&ghost->base._resv);
968 ttm_bo_put(ghost);
969
970 return 0;
971 }
972