ttm_bo_util.c revision 1.16 1 /* $NetBSD: ttm_bo_util.c,v 1.16 2020/02/14 04:35:20 riastradh Exp $ */
2
3 /**************************************************************************
4 *
5 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29 /*
30 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_util.c,v 1.16 2020/02/14 04:35:20 riastradh Exp $");
35
36 #include <drm/ttm/ttm_bo_driver.h>
37 #include <drm/ttm/ttm_placement.h>
38 #include <drm/drm_vma_manager.h>
39 #include <linux/io.h>
40 #include <linux/highmem.h>
41 #include <linux/wait.h>
42 #include <linux/slab.h>
43 #include <linux/vmalloc.h>
44 #include <linux/module.h>
45 #include <linux/reservation.h>
46 #include <linux/export.h>
47 #include <asm/barrier.h>
48
49 #ifdef __NetBSD__ /* PMAP_* caching flags for ttm_io_prot */
50 #include <uvm/uvm_pmap.h>
51 #include <drm/drm_auth_netbsd.h>
52 #include <linux/nbsd-namespace.h>
53 #endif
54
55 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
56 {
57 ttm_bo_mem_put(bo, &bo->mem);
58 }
59
60 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
61 bool evict,
62 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
63 {
64 struct ttm_tt *ttm = bo->ttm;
65 struct ttm_mem_reg *old_mem = &bo->mem;
66 int ret;
67
68 if (old_mem->mem_type != TTM_PL_SYSTEM) {
69 ttm_tt_unbind(ttm);
70 ttm_bo_free_old_node(bo);
71 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
72 TTM_PL_MASK_MEM);
73 old_mem->mem_type = TTM_PL_SYSTEM;
74 }
75
76 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
77 if (unlikely(ret != 0))
78 return ret;
79
80 if (new_mem->mem_type != TTM_PL_SYSTEM) {
81 ret = ttm_tt_bind(ttm, new_mem);
82 if (unlikely(ret != 0))
83 return ret;
84 }
85
86 *old_mem = *new_mem;
87 new_mem->mm_node = NULL;
88
89 return 0;
90 }
91 EXPORT_SYMBOL(ttm_bo_move_ttm);
92
93 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
94 {
95 if (likely(man->io_reserve_fastpath))
96 return 0;
97
98 if (interruptible)
99 return mutex_lock_interruptible(&man->io_reserve_mutex);
100
101 mutex_lock(&man->io_reserve_mutex);
102 return 0;
103 }
104 EXPORT_SYMBOL(ttm_mem_io_lock);
105
106 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
107 {
108 if (likely(man->io_reserve_fastpath))
109 return;
110
111 mutex_unlock(&man->io_reserve_mutex);
112 }
113 EXPORT_SYMBOL(ttm_mem_io_unlock);
114
115 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
116 {
117 struct ttm_buffer_object *bo;
118
119 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
120 return -EAGAIN;
121
122 bo = list_first_entry(&man->io_reserve_lru,
123 struct ttm_buffer_object,
124 io_reserve_lru);
125 list_del_init(&bo->io_reserve_lru);
126 ttm_bo_unmap_virtual_locked(bo);
127
128 return 0;
129 }
130
131
132 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
133 struct ttm_mem_reg *mem)
134 {
135 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
136 int ret = 0;
137
138 if (!bdev->driver->io_mem_reserve)
139 return 0;
140 if (likely(man->io_reserve_fastpath))
141 return bdev->driver->io_mem_reserve(bdev, mem);
142
143 if (bdev->driver->io_mem_reserve &&
144 mem->bus.io_reserved_count++ == 0) {
145 retry:
146 ret = bdev->driver->io_mem_reserve(bdev, mem);
147 if (ret == -EAGAIN) {
148 ret = ttm_mem_io_evict(man);
149 if (ret == 0)
150 goto retry;
151 }
152 }
153 return ret;
154 }
155 EXPORT_SYMBOL(ttm_mem_io_reserve);
156
157 void ttm_mem_io_free(struct ttm_bo_device *bdev,
158 struct ttm_mem_reg *mem)
159 {
160 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
161
162 if (likely(man->io_reserve_fastpath))
163 return;
164
165 if (bdev->driver->io_mem_reserve &&
166 --mem->bus.io_reserved_count == 0 &&
167 bdev->driver->io_mem_free)
168 bdev->driver->io_mem_free(bdev, mem);
169
170 }
171 EXPORT_SYMBOL(ttm_mem_io_free);
172
173 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
174 {
175 struct ttm_mem_reg *mem = &bo->mem;
176 int ret;
177
178 if (!mem->bus.io_reserved_vm) {
179 struct ttm_mem_type_manager *man =
180 &bo->bdev->man[mem->mem_type];
181
182 ret = ttm_mem_io_reserve(bo->bdev, mem);
183 if (unlikely(ret != 0))
184 return ret;
185 mem->bus.io_reserved_vm = true;
186 if (man->use_io_reserve_lru)
187 list_add_tail(&bo->io_reserve_lru,
188 &man->io_reserve_lru);
189 }
190 return 0;
191 }
192
193 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
194 {
195 struct ttm_mem_reg *mem = &bo->mem;
196
197 if (mem->bus.io_reserved_vm) {
198 mem->bus.io_reserved_vm = false;
199 list_del_init(&bo->io_reserve_lru);
200 ttm_mem_io_free(bo->bdev, mem);
201 }
202 }
203
204 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
205 void **virtual)
206 {
207 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
208 int ret;
209 void *addr;
210
211 *virtual = NULL;
212 (void) ttm_mem_io_lock(man, false);
213 ret = ttm_mem_io_reserve(bdev, mem);
214 ttm_mem_io_unlock(man);
215 if (ret || !mem->bus.is_iomem)
216 return ret;
217
218 if (mem->bus.addr) {
219 addr = mem->bus.addr;
220 } else {
221 #ifdef __NetBSD__
222 const bus_addr_t bus_addr = (mem->bus.base + mem->bus.offset);
223 int flags = BUS_SPACE_MAP_LINEAR;
224
225 if (ISSET(mem->placement, TTM_PL_FLAG_WC))
226 flags |= BUS_SPACE_MAP_PREFETCHABLE;
227 /* XXX errno NetBSD->Linux */
228 ret = -bus_space_map(bdev->memt, bus_addr, mem->bus.size,
229 flags, &mem->bus.memh);
230 if (ret) {
231 (void) ttm_mem_io_lock(man, false);
232 ttm_mem_io_free(bdev, mem);
233 ttm_mem_io_unlock(man);
234 return ret;
235 }
236 addr = bus_space_vaddr(bdev->memt, mem->bus.memh);
237 #else
238 if (mem->placement & TTM_PL_FLAG_WC)
239 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
240 else
241 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
242 if (!addr) {
243 (void) ttm_mem_io_lock(man, false);
244 ttm_mem_io_free(bdev, mem);
245 ttm_mem_io_unlock(man);
246 return -ENOMEM;
247 }
248 #endif
249 }
250 *virtual = addr;
251 return 0;
252 }
253
254 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
255 void *virtual)
256 {
257 struct ttm_mem_type_manager *man;
258
259 man = &bdev->man[mem->mem_type];
260
261 if (virtual && mem->bus.addr == NULL)
262 #ifdef __NetBSD__
263 bus_space_unmap(bdev->memt, mem->bus.memh, mem->bus.size);
264 #else
265 iounmap(virtual);
266 #endif
267 (void) ttm_mem_io_lock(man, false);
268 ttm_mem_io_free(bdev, mem);
269 ttm_mem_io_unlock(man);
270 }
271
272 #ifdef __NetBSD__
273 # define ioread32 fake_ioread32
274 # define iowrite32 fake_iowrite32
275
276 static inline uint32_t
277 ioread32(const volatile uint32_t *p)
278 {
279 uint32_t v;
280
281 v = *p;
282 __insn_barrier(); /* XXX ttm io barrier */
283
284 return v; /* XXX ttm byte order */
285 }
286
287 static inline void
288 iowrite32(uint32_t v, volatile uint32_t *p)
289 {
290
291 __insn_barrier(); /* XXX ttm io barrier */
292 *p = v; /* XXX ttm byte order */
293 }
294 #endif
295
296 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
297 {
298 uint32_t *dstP =
299 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
300 uint32_t *srcP =
301 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
302
303 int i;
304 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
305 iowrite32(ioread32(srcP++), dstP++);
306 return 0;
307 }
308
309 #ifdef __NetBSD__
310 # undef ioread32
311 # undef iowrite32
312 #endif
313
314 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
315 unsigned long page,
316 pgprot_t prot)
317 {
318 struct page *d = ttm->pages[page];
319 void *dst;
320
321 if (!d)
322 return -ENOMEM;
323
324 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
325
326 #ifdef CONFIG_X86
327 dst = kmap_atomic_prot(d, prot);
328 #else
329 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
330 dst = vmap(&d, 1, 0, prot);
331 else
332 dst = kmap(d);
333 #endif
334 if (!dst)
335 return -ENOMEM;
336
337 memcpy_fromio(dst, src, PAGE_SIZE);
338
339 #ifdef CONFIG_X86
340 kunmap_atomic(dst);
341 #else
342 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
343 #ifdef __NetBSD__
344 vunmap(dst, 1);
345 #else
346 vunmap(dst);
347 #endif
348 else
349 kunmap(d);
350 #endif
351
352 return 0;
353 }
354
355 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
356 unsigned long page,
357 pgprot_t prot)
358 {
359 struct page *s = ttm->pages[page];
360 void *src;
361
362 if (!s)
363 return -ENOMEM;
364
365 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
366 #ifdef CONFIG_X86
367 src = kmap_atomic_prot(s, prot);
368 #else
369 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
370 src = vmap(&s, 1, 0, prot);
371 else
372 src = kmap(s);
373 #endif
374 if (!src)
375 return -ENOMEM;
376
377 memcpy_toio(dst, src, PAGE_SIZE);
378
379 #ifdef CONFIG_X86
380 kunmap_atomic(src);
381 #else
382 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
383 #ifdef __NetBSD__
384 vunmap(src, 1);
385 #else
386 vunmap(src);
387 #endif
388 else
389 kunmap(s);
390 #endif
391
392 return 0;
393 }
394
395 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
396 bool evict, bool no_wait_gpu,
397 struct ttm_mem_reg *new_mem)
398 {
399 struct ttm_bo_device *bdev = bo->bdev;
400 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
401 struct ttm_tt *ttm = bo->ttm;
402 struct ttm_mem_reg *old_mem = &bo->mem;
403 struct ttm_mem_reg old_copy = *old_mem;
404 void *old_iomap;
405 void *new_iomap;
406 int ret;
407 unsigned long i;
408 unsigned long page;
409 unsigned long add = 0;
410 int dir;
411
412 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
413 if (ret)
414 return ret;
415 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
416 if (ret)
417 goto out;
418
419 /*
420 * Single TTM move. NOP.
421 */
422 if (old_iomap == NULL && new_iomap == NULL)
423 goto out2;
424
425 /*
426 * Don't move nonexistent data. Clear destination instead.
427 */
428 if (old_iomap == NULL &&
429 (ttm == NULL || (ttm->state == tt_unpopulated &&
430 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
431 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
432 goto out2;
433 }
434
435 /*
436 * TTM might be null for moves within the same region.
437 */
438 if (ttm && ttm->state == tt_unpopulated) {
439 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
440 if (ret)
441 goto out1;
442 }
443
444 add = 0;
445 dir = 1;
446
447 if ((old_mem->mem_type == new_mem->mem_type) &&
448 (new_mem->start < old_mem->start + old_mem->size)) {
449 dir = -1;
450 add = new_mem->num_pages - 1;
451 }
452
453 for (i = 0; i < new_mem->num_pages; ++i) {
454 page = i * dir + add;
455 if (old_iomap == NULL) {
456 pgprot_t prot = ttm_io_prot(old_mem->placement,
457 PAGE_KERNEL);
458 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
459 prot);
460 } else if (new_iomap == NULL) {
461 pgprot_t prot = ttm_io_prot(new_mem->placement,
462 PAGE_KERNEL);
463 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
464 prot);
465 } else
466 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
467 if (ret)
468 goto out1;
469 }
470 mb();
471 out2:
472 old_copy = *old_mem;
473 *old_mem = *new_mem;
474 new_mem->mm_node = NULL;
475
476 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
477 ttm_tt_unbind(ttm);
478 ttm_tt_destroy(ttm);
479 bo->ttm = NULL;
480 }
481
482 out1:
483 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
484 out:
485 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
486
487 /*
488 * On error, keep the mm node!
489 */
490 if (!ret)
491 ttm_bo_mem_put(bo, &old_copy);
492 return ret;
493 }
494 EXPORT_SYMBOL(ttm_bo_move_memcpy);
495
496 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
497 {
498 kfree(bo);
499 }
500
501 /**
502 * ttm_buffer_object_transfer
503 *
504 * @bo: A pointer to a struct ttm_buffer_object.
505 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
506 * holding the data of @bo with the old placement.
507 *
508 * This is a utility function that may be called after an accelerated move
509 * has been scheduled. A new buffer object is created as a placeholder for
510 * the old data while it's being copied. When that buffer object is idle,
511 * it can be destroyed, releasing the space of the old placement.
512 * Returns:
513 * !0: Failure.
514 */
515
516 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
517 struct ttm_buffer_object **new_obj)
518 {
519 struct ttm_buffer_object *fbo;
520 int ret;
521
522 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
523 if (!fbo)
524 return -ENOMEM;
525
526 *fbo = *bo;
527
528 /**
529 * Fix up members that we shouldn't copy directly:
530 * TODO: Explicit member copy would probably be better here.
531 */
532
533 INIT_LIST_HEAD(&fbo->ddestroy);
534 INIT_LIST_HEAD(&fbo->lru);
535 INIT_LIST_HEAD(&fbo->swap);
536 INIT_LIST_HEAD(&fbo->io_reserve_lru);
537 mutex_init(&fbo->wu_mutex);
538 #ifdef __NetBSD__
539 drm_vma_node_init(&fbo->vma_node);
540 uvm_obj_init(&fbo->uvmobj, bo->bdev->driver->ttm_uvm_ops, true, 1);
541 mutex_obj_hold(bo->uvmobj.vmobjlock);
542 uvm_obj_setlock(&fbo->uvmobj, bo->uvmobj.vmobjlock);
543 #else
544 drm_vma_node_reset(&fbo->vma_node);
545 #endif
546 atomic_set(&fbo->cpu_writers, 0);
547
548 kref_init(&fbo->list_kref);
549 kref_init(&fbo->kref);
550 fbo->destroy = &ttm_transfered_destroy;
551 fbo->acc_size = 0;
552 fbo->resv = &fbo->ttm_resv;
553 reservation_object_init(fbo->resv);
554 ret = ww_mutex_trylock(&fbo->resv->lock);
555 WARN_ON(!ret);
556
557 *new_obj = fbo;
558 return 0;
559 }
560
561 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
562 {
563 /* Cached mappings need no adjustment */
564 if (caching_flags & TTM_PL_FLAG_CACHED)
565 return tmp;
566
567 #ifdef __NetBSD__
568 tmp &= ~PMAP_CACHE_MASK;
569 if (caching_flags & TTM_PL_FLAG_WC)
570 return (tmp | PMAP_WRITE_COMBINE);
571 else
572 return (tmp | PMAP_NOCACHE);
573 #else
574 #if defined(__i386__) || defined(__x86_64__)
575 if (caching_flags & TTM_PL_FLAG_WC)
576 tmp = pgprot_writecombine(tmp);
577 else if (boot_cpu_data.x86 > 3)
578 tmp = pgprot_noncached(tmp);
579 #endif
580 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
581 defined(__powerpc__)
582 if (caching_flags & TTM_PL_FLAG_WC)
583 tmp = pgprot_writecombine(tmp);
584 else
585 tmp = pgprot_noncached(tmp);
586 #endif
587 #if defined(__sparc__) || defined(__mips__)
588 tmp = pgprot_noncached(tmp);
589 #endif
590 return tmp;
591 #endif
592 }
593 EXPORT_SYMBOL(ttm_io_prot);
594
595 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
596 unsigned long offset,
597 unsigned long size,
598 struct ttm_bo_kmap_obj *map)
599 {
600 struct ttm_mem_reg *mem = &bo->mem;
601
602 if (bo->mem.bus.addr) {
603 map->bo_kmap_type = ttm_bo_map_premapped;
604 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
605 } else {
606 map->bo_kmap_type = ttm_bo_map_iomap;
607 #ifdef __NetBSD__
608 {
609 bus_addr_t addr;
610 int flags = BUS_SPACE_MAP_LINEAR;
611 int ret;
612
613 addr = (bo->mem.bus.base + bo->mem.bus.offset + offset);
614 if (ISSET(mem->placement, TTM_PL_FLAG_WC))
615 flags |= BUS_SPACE_MAP_PREFETCHABLE;
616 /* XXX errno NetBSD->Linux */
617 ret = -bus_space_map(bo->bdev->memt, addr, size, flags,
618 &map->u.io.memh);
619 if (ret)
620 return ret;
621 map->u.io.size = size;
622 map->virtual = bus_space_vaddr(bo->bdev->memt, map->u.io.memh);
623 }
624 #else
625 if (mem->placement & TTM_PL_FLAG_WC)
626 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
627 size);
628 else
629 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
630 size);
631 #endif
632 }
633 return (!map->virtual) ? -ENOMEM : 0;
634 }
635
636 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
637 unsigned long start_page,
638 unsigned long num_pages,
639 struct ttm_bo_kmap_obj *map)
640 {
641 struct ttm_mem_reg *mem = &bo->mem;
642 pgprot_t prot;
643 struct ttm_tt *ttm = bo->ttm;
644 int ret;
645
646 BUG_ON(!ttm);
647
648 if (ttm->state == tt_unpopulated) {
649 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
650 if (ret)
651 return ret;
652 }
653
654 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
655 /*
656 * We're mapping a single page, and the desired
657 * page protection is consistent with the bo.
658 */
659
660 map->bo_kmap_type = ttm_bo_map_kmap;
661 #ifdef __NetBSD__
662 map->u.kmapped.page = ttm->pages[start_page];
663 map->virtual = kmap(map->u.kmapped.page);
664 #else
665 map->page = ttm->pages[start_page];
666 map->virtual = kmap(map->page);
667 #endif
668 } else {
669 /*
670 * We need to use vmap to get the desired page protection
671 * or to make the buffer object look contiguous.
672 */
673 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
674 map->bo_kmap_type = ttm_bo_map_vmap;
675 map->virtual = vmap(ttm->pages + start_page, num_pages,
676 0, prot);
677 #ifdef __NetBSD__
678 map->u.vmapped.vsize = (vsize_t)num_pages << PAGE_SHIFT;
679 #endif
680 }
681 return (!map->virtual) ? -ENOMEM : 0;
682 }
683
684 int ttm_bo_kmap(struct ttm_buffer_object *bo,
685 unsigned long start_page, unsigned long num_pages,
686 struct ttm_bo_kmap_obj *map)
687 {
688 struct ttm_mem_type_manager *man =
689 &bo->bdev->man[bo->mem.mem_type];
690 unsigned long offset, size;
691 int ret;
692
693 BUG_ON(!list_empty(&bo->swap));
694 map->virtual = NULL;
695 map->bo = bo;
696 if (num_pages > bo->num_pages)
697 return -EINVAL;
698 if (start_page > bo->num_pages)
699 return -EINVAL;
700 #if 0
701 if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
702 return -EPERM;
703 #endif
704 (void) ttm_mem_io_lock(man, false);
705 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
706 ttm_mem_io_unlock(man);
707 if (ret)
708 return ret;
709 if (!bo->mem.bus.is_iomem) {
710 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
711 } else {
712 offset = start_page << PAGE_SHIFT;
713 size = num_pages << PAGE_SHIFT;
714 return ttm_bo_ioremap(bo, offset, size, map);
715 }
716 }
717 EXPORT_SYMBOL(ttm_bo_kmap);
718
719 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
720 {
721 struct ttm_buffer_object *bo = map->bo;
722 struct ttm_mem_type_manager *man =
723 &bo->bdev->man[bo->mem.mem_type];
724
725 if (!map->virtual)
726 return;
727 switch (map->bo_kmap_type) {
728 case ttm_bo_map_iomap:
729 #ifdef __NetBSD__
730 bus_space_unmap(bo->bdev->memt, map->u.io.memh,
731 map->u.io.size);
732 #else
733 iounmap(map->virtual);
734 #endif
735 break;
736 case ttm_bo_map_vmap:
737 #ifdef __NetBSD__
738 vunmap(map->virtual, map->u.vmapped.vsize >> PAGE_SHIFT);
739 #else
740 vunmap(map->virtual);
741 #endif
742 break;
743 case ttm_bo_map_kmap:
744 #ifdef __NetBSD__
745 kunmap(map->u.kmapped.page);
746 #else
747 kunmap(map->page);
748 #endif
749 break;
750 case ttm_bo_map_premapped:
751 break;
752 default:
753 BUG();
754 }
755 (void) ttm_mem_io_lock(man, false);
756 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
757 ttm_mem_io_unlock(man);
758 map->virtual = NULL;
759 #ifndef __NetBSD__
760 map->page = NULL;
761 #endif
762 }
763 EXPORT_SYMBOL(ttm_bo_kunmap);
764
765 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
766 struct fence *fence,
767 bool evict,
768 bool no_wait_gpu,
769 struct ttm_mem_reg *new_mem)
770 {
771 struct ttm_bo_device *bdev = bo->bdev;
772 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
773 struct ttm_mem_reg *old_mem = &bo->mem;
774 int ret;
775 struct ttm_buffer_object *ghost_obj;
776
777 reservation_object_add_excl_fence(bo->resv, fence);
778 if (evict) {
779 ret = ttm_bo_wait(bo, false, false, false);
780 if (ret)
781 return ret;
782
783 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
784 (bo->ttm != NULL)) {
785 ttm_tt_unbind(bo->ttm);
786 ttm_tt_destroy(bo->ttm);
787 bo->ttm = NULL;
788 }
789 ttm_bo_free_old_node(bo);
790 } else {
791 /**
792 * This should help pipeline ordinary buffer moves.
793 *
794 * Hang old buffer memory on a new buffer object,
795 * and leave it to be released when the GPU
796 * operation has completed.
797 */
798
799 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
800
801 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
802 if (ret)
803 return ret;
804
805 reservation_object_add_excl_fence(ghost_obj->resv, fence);
806
807 /**
808 * If we're not moving to fixed memory, the TTM object
809 * needs to stay alive. Otherwhise hang it on the ghost
810 * bo to be unbound and destroyed.
811 */
812
813 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
814 ghost_obj->ttm = NULL;
815 else
816 bo->ttm = NULL;
817
818 ttm_bo_unreserve(ghost_obj);
819 ttm_bo_unref(&ghost_obj);
820 }
821
822 *old_mem = *new_mem;
823 new_mem->mm_node = NULL;
824
825 return 0;
826 }
827 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
828