ttm_bo_util.c revision 1.7 1 /* $NetBSD: ttm_bo_util.c,v 1.7 2018/08/27 04:58:37 riastradh Exp $ */
2
3 /**************************************************************************
4 *
5 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29 /*
30 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_util.c,v 1.7 2018/08/27 04:58:37 riastradh Exp $");
35
36 #include <drm/ttm/ttm_bo_driver.h>
37 #include <drm/ttm/ttm_placement.h>
38 #include <drm/drm_vma_manager.h>
39 #include <linux/io.h>
40 #include <linux/highmem.h>
41 #include <linux/wait.h>
42 #include <linux/slab.h>
43 #include <linux/vmalloc.h>
44 #include <linux/module.h>
45 #include <linux/reservation.h>
46 #include <linux/export.h>
47
48 #ifdef __NetBSD__ /* PMAP_* caching flags for ttm_io_prot */
49 #include <uvm/uvm_pmap.h>
50 #endif
51
52 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
53 {
54 ttm_bo_mem_put(bo, &bo->mem);
55 }
56
57 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
58 bool evict,
59 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
60 {
61 struct ttm_tt *ttm = bo->ttm;
62 struct ttm_mem_reg *old_mem = &bo->mem;
63 int ret;
64
65 if (old_mem->mem_type != TTM_PL_SYSTEM) {
66 ttm_tt_unbind(ttm);
67 ttm_bo_free_old_node(bo);
68 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
69 TTM_PL_MASK_MEM);
70 old_mem->mem_type = TTM_PL_SYSTEM;
71 }
72
73 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
74 if (unlikely(ret != 0))
75 return ret;
76
77 if (new_mem->mem_type != TTM_PL_SYSTEM) {
78 ret = ttm_tt_bind(ttm, new_mem);
79 if (unlikely(ret != 0))
80 return ret;
81 }
82
83 *old_mem = *new_mem;
84 new_mem->mm_node = NULL;
85
86 return 0;
87 }
88 EXPORT_SYMBOL(ttm_bo_move_ttm);
89
90 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
91 {
92 if (likely(man->io_reserve_fastpath))
93 return 0;
94
95 if (interruptible)
96 return mutex_lock_interruptible(&man->io_reserve_mutex);
97
98 mutex_lock(&man->io_reserve_mutex);
99 return 0;
100 }
101 EXPORT_SYMBOL(ttm_mem_io_lock);
102
103 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
104 {
105 if (likely(man->io_reserve_fastpath))
106 return;
107
108 mutex_unlock(&man->io_reserve_mutex);
109 }
110 EXPORT_SYMBOL(ttm_mem_io_unlock);
111
112 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
113 {
114 struct ttm_buffer_object *bo;
115
116 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
117 return -EAGAIN;
118
119 bo = list_first_entry(&man->io_reserve_lru,
120 struct ttm_buffer_object,
121 io_reserve_lru);
122 list_del_init(&bo->io_reserve_lru);
123 ttm_bo_unmap_virtual_locked(bo);
124
125 return 0;
126 }
127
128
129 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
130 struct ttm_mem_reg *mem)
131 {
132 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
133 int ret = 0;
134
135 if (!bdev->driver->io_mem_reserve)
136 return 0;
137 if (likely(man->io_reserve_fastpath))
138 return bdev->driver->io_mem_reserve(bdev, mem);
139
140 if (bdev->driver->io_mem_reserve &&
141 mem->bus.io_reserved_count++ == 0) {
142 retry:
143 ret = bdev->driver->io_mem_reserve(bdev, mem);
144 if (ret == -EAGAIN) {
145 ret = ttm_mem_io_evict(man);
146 if (ret == 0)
147 goto retry;
148 }
149 }
150 return ret;
151 }
152 EXPORT_SYMBOL(ttm_mem_io_reserve);
153
154 void ttm_mem_io_free(struct ttm_bo_device *bdev,
155 struct ttm_mem_reg *mem)
156 {
157 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
158
159 if (likely(man->io_reserve_fastpath))
160 return;
161
162 if (bdev->driver->io_mem_reserve &&
163 --mem->bus.io_reserved_count == 0 &&
164 bdev->driver->io_mem_free)
165 bdev->driver->io_mem_free(bdev, mem);
166
167 }
168 EXPORT_SYMBOL(ttm_mem_io_free);
169
170 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
171 {
172 struct ttm_mem_reg *mem = &bo->mem;
173 int ret;
174
175 if (!mem->bus.io_reserved_vm) {
176 struct ttm_mem_type_manager *man =
177 &bo->bdev->man[mem->mem_type];
178
179 ret = ttm_mem_io_reserve(bo->bdev, mem);
180 if (unlikely(ret != 0))
181 return ret;
182 mem->bus.io_reserved_vm = true;
183 if (man->use_io_reserve_lru)
184 list_add_tail(&bo->io_reserve_lru,
185 &man->io_reserve_lru);
186 }
187 return 0;
188 }
189
190 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
191 {
192 struct ttm_mem_reg *mem = &bo->mem;
193
194 if (mem->bus.io_reserved_vm) {
195 mem->bus.io_reserved_vm = false;
196 list_del_init(&bo->io_reserve_lru);
197 ttm_mem_io_free(bo->bdev, mem);
198 }
199 }
200
201 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
202 void **virtual)
203 {
204 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
205 int ret;
206 void *addr;
207
208 *virtual = NULL;
209 (void) ttm_mem_io_lock(man, false);
210 ret = ttm_mem_io_reserve(bdev, mem);
211 ttm_mem_io_unlock(man);
212 if (ret || !mem->bus.is_iomem)
213 return ret;
214
215 if (mem->bus.addr) {
216 addr = mem->bus.addr;
217 } else {
218 #ifdef __NetBSD__
219 const bus_addr_t bus_addr = (mem->bus.base + mem->bus.offset);
220 int flags = BUS_SPACE_MAP_LINEAR;
221
222 if (ISSET(mem->placement, TTM_PL_FLAG_WC))
223 flags |= BUS_SPACE_MAP_PREFETCHABLE;
224 /* XXX errno NetBSD->Linux */
225 ret = -bus_space_map(bdev->memt, bus_addr, mem->bus.size,
226 flags, &mem->bus.memh);
227 if (ret) {
228 (void) ttm_mem_io_lock(man, false);
229 ttm_mem_io_free(bdev, mem);
230 ttm_mem_io_unlock(man);
231 return ret;
232 }
233 addr = bus_space_vaddr(bdev->memt, mem->bus.memh);
234 #else
235 if (mem->placement & TTM_PL_FLAG_WC)
236 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
237 else
238 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
239 if (!addr) {
240 (void) ttm_mem_io_lock(man, false);
241 ttm_mem_io_free(bdev, mem);
242 ttm_mem_io_unlock(man);
243 return -ENOMEM;
244 }
245 #endif
246 }
247 *virtual = addr;
248 return 0;
249 }
250
251 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
252 void *virtual)
253 {
254 struct ttm_mem_type_manager *man;
255
256 man = &bdev->man[mem->mem_type];
257
258 if (virtual && mem->bus.addr == NULL)
259 #ifdef __NetBSD__
260 bus_space_unmap(bdev->memt, mem->bus.memh, mem->bus.size);
261 #else
262 iounmap(virtual);
263 #endif
264 (void) ttm_mem_io_lock(man, false);
265 ttm_mem_io_free(bdev, mem);
266 ttm_mem_io_unlock(man);
267 }
268
269 #ifdef __NetBSD__
270 # define ioread32 fake_ioread32
271 # define iowrite32 fake_iowrite32
272
273 static inline uint32_t
274 fake_ioread32(const volatile uint32_t *p)
275 {
276 uint32_t v;
277
278 v = *p;
279 __insn_barrier(); /* XXX */
280
281 return v;
282 }
283
284 static inline void
285 iowrite32(uint32_t v, volatile uint32_t *p)
286 {
287
288 __insn_barrier(); /* XXX */
289 *p = v;
290 }
291 #endif
292
293 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
294 {
295 uint32_t *dstP =
296 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
297 uint32_t *srcP =
298 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
299
300 int i;
301 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
302 iowrite32(ioread32(srcP++), dstP++);
303 return 0;
304 }
305
306 #ifdef __NetBSD__
307 # undef ioread32
308 # undef iowrite32
309 #endif
310
311 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
312 unsigned long page,
313 pgprot_t prot)
314 {
315 struct page *d = ttm->pages[page];
316 void *dst;
317
318 if (!d)
319 return -ENOMEM;
320
321 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
322
323 #ifdef CONFIG_X86
324 dst = kmap_atomic_prot(d, prot);
325 #else
326 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
327 dst = vmap(&d, 1, 0, prot);
328 else
329 dst = kmap(d);
330 #endif
331 if (!dst)
332 return -ENOMEM;
333
334 memcpy_fromio(dst, src, PAGE_SIZE);
335
336 #ifdef CONFIG_X86
337 kunmap_atomic(dst);
338 #else
339 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
340 #ifdef __NetBSD__
341 vunmap(dst, 1);
342 #else
343 vunmap(dst);
344 #endif
345 else
346 kunmap(d);
347 #endif
348
349 return 0;
350 }
351
352 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
353 unsigned long page,
354 pgprot_t prot)
355 {
356 struct page *s = ttm->pages[page];
357 void *src;
358
359 if (!s)
360 return -ENOMEM;
361
362 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
363 #ifdef CONFIG_X86
364 src = kmap_atomic_prot(s, prot);
365 #else
366 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
367 src = vmap(&s, 1, 0, prot);
368 else
369 src = kmap(s);
370 #endif
371 if (!src)
372 return -ENOMEM;
373
374 memcpy_toio(dst, src, PAGE_SIZE);
375
376 #ifdef CONFIG_X86
377 kunmap_atomic(src);
378 #else
379 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
380 #ifdef __NetBSD__
381 vunmap(src, 1);
382 #else
383 vunmap(src);
384 #endif
385 else
386 kunmap(s);
387 #endif
388
389 return 0;
390 }
391
392 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
393 bool evict, bool no_wait_gpu,
394 struct ttm_mem_reg *new_mem)
395 {
396 struct ttm_bo_device *bdev = bo->bdev;
397 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
398 struct ttm_tt *ttm = bo->ttm;
399 struct ttm_mem_reg *old_mem = &bo->mem;
400 struct ttm_mem_reg old_copy = *old_mem;
401 void *old_iomap;
402 void *new_iomap;
403 int ret;
404 unsigned long i;
405 unsigned long page;
406 unsigned long add = 0;
407 int dir;
408
409 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
410 if (ret)
411 return ret;
412 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
413 if (ret)
414 goto out;
415
416 /*
417 * Single TTM move. NOP.
418 */
419 if (old_iomap == NULL && new_iomap == NULL)
420 goto out2;
421
422 /*
423 * Don't move nonexistent data. Clear destination instead.
424 */
425 if (old_iomap == NULL &&
426 (ttm == NULL || (ttm->state == tt_unpopulated &&
427 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
428 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
429 goto out2;
430 }
431
432 /*
433 * TTM might be null for moves within the same region.
434 */
435 if (ttm && ttm->state == tt_unpopulated) {
436 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
437 if (ret)
438 goto out1;
439 }
440
441 add = 0;
442 dir = 1;
443
444 if ((old_mem->mem_type == new_mem->mem_type) &&
445 (new_mem->start < old_mem->start + old_mem->size)) {
446 dir = -1;
447 add = new_mem->num_pages - 1;
448 }
449
450 for (i = 0; i < new_mem->num_pages; ++i) {
451 page = i * dir + add;
452 if (old_iomap == NULL) {
453 pgprot_t prot = ttm_io_prot(old_mem->placement,
454 PAGE_KERNEL);
455 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
456 prot);
457 } else if (new_iomap == NULL) {
458 pgprot_t prot = ttm_io_prot(new_mem->placement,
459 PAGE_KERNEL);
460 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
461 prot);
462 } else
463 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
464 if (ret)
465 goto out1;
466 }
467 mb();
468 out2:
469 old_copy = *old_mem;
470 *old_mem = *new_mem;
471 new_mem->mm_node = NULL;
472
473 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
474 ttm_tt_unbind(ttm);
475 ttm_tt_destroy(ttm);
476 bo->ttm = NULL;
477 }
478
479 out1:
480 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
481 out:
482 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
483
484 /*
485 * On error, keep the mm node!
486 */
487 if (!ret)
488 ttm_bo_mem_put(bo, &old_copy);
489 return ret;
490 }
491 EXPORT_SYMBOL(ttm_bo_move_memcpy);
492
493 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
494 {
495 kfree(bo);
496 }
497
498 /**
499 * ttm_buffer_object_transfer
500 *
501 * @bo: A pointer to a struct ttm_buffer_object.
502 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
503 * holding the data of @bo with the old placement.
504 *
505 * This is a utility function that may be called after an accelerated move
506 * has been scheduled. A new buffer object is created as a placeholder for
507 * the old data while it's being copied. When that buffer object is idle,
508 * it can be destroyed, releasing the space of the old placement.
509 * Returns:
510 * !0: Failure.
511 */
512
513 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
514 struct ttm_buffer_object **new_obj)
515 {
516 struct ttm_buffer_object *fbo;
517 int ret;
518
519 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
520 if (!fbo)
521 return -ENOMEM;
522
523 *fbo = *bo;
524
525 /**
526 * Fix up members that we shouldn't copy directly:
527 * TODO: Explicit member copy would probably be better here.
528 */
529
530 INIT_LIST_HEAD(&fbo->ddestroy);
531 INIT_LIST_HEAD(&fbo->lru);
532 INIT_LIST_HEAD(&fbo->swap);
533 INIT_LIST_HEAD(&fbo->io_reserve_lru);
534 #ifdef __NetBSD__
535 linux_mutex_init(&fbo->wu_mutex);
536 drm_vma_node_init(&fbo->vma_node);
537 uvm_obj_init(&fbo->uvmobj, bdev->driver->ttm_uvm_ops, true, 1);
538 mutex_obj_hold(bo->uvmobj.vmobjlock);
539 uvm_obj_setlock(&fbo->uvmobj, bo->uvmobj.vmobjlock);
540 #else
541 mutex_init(&fbo->wu_mutex);
542 drm_vma_node_reset(&fbo->vma_node);
543 #endif
544 atomic_set(&fbo->cpu_writers, 0);
545
546 kref_init(&fbo->list_kref);
547 kref_init(&fbo->kref);
548 fbo->destroy = &ttm_transfered_destroy;
549 fbo->acc_size = 0;
550 fbo->resv = &fbo->ttm_resv;
551 reservation_object_init(fbo->resv);
552 ret = ww_mutex_trylock(&fbo->resv->lock);
553 WARN_ON(!ret);
554
555 *new_obj = fbo;
556 return 0;
557 }
558
559 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
560 {
561 /* Cached mappings need no adjustment */
562 if (caching_flags & TTM_PL_FLAG_CACHED)
563 return tmp;
564
565 #ifdef __NetBSD__
566 switch (caching_flags & TTM_PL_MASK_CACHING) {
567 case TTM_PL_FLAG_CACHED:
568 return (tmp | PMAP_WRITE_BACK);
569 case TTM_PL_FLAG_WC:
570 return (tmp | PMAP_WRITE_COMBINE);
571 case TTM_PL_FLAG_UNCACHED:
572 return (tmp | PMAP_NOCACHE);
573 default:
574 panic("invalid caching flags: %"PRIx32"\n",
575 (caching_flags & TTM_PL_MASK_CACHING));
576 }
577 #else
578 #if defined(__i386__) || defined(__x86_64__)
579 if (caching_flags & TTM_PL_FLAG_WC)
580 tmp = pgprot_writecombine(tmp);
581 else if (boot_cpu_data.x86 > 3)
582 tmp = pgprot_noncached(tmp);
583 #endif
584 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
585 defined(__powerpc__)
586 if (caching_flags & TTM_PL_FLAG_WC)
587 tmp = pgprot_writecombine(tmp);
588 else
589 tmp = pgprot_noncached(tmp);
590 #endif
591 #if defined(__sparc__) || defined(__mips__)
592 tmp = pgprot_noncached(tmp);
593 #endif
594 return tmp;
595 #endif
596 }
597 EXPORT_SYMBOL(ttm_io_prot);
598
599 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
600 unsigned long offset,
601 unsigned long size,
602 struct ttm_bo_kmap_obj *map)
603 {
604 struct ttm_mem_reg *mem = &bo->mem;
605
606 if (bo->mem.bus.addr) {
607 map->bo_kmap_type = ttm_bo_map_premapped;
608 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
609 } else {
610 map->bo_kmap_type = ttm_bo_map_iomap;
611 #ifdef __NetBSD__
612 {
613 bus_addr_t addr;
614 int flags = BUS_SPACE_MAP_LINEAR;
615 int ret;
616
617 addr = (bo->mem.bus.base + bo->mem.bus.offset + offset);
618 if (ISSET(mem->placement, TTM_PL_FLAG_WC))
619 flags |= BUS_SPACE_MAP_PREFETCHABLE;
620 /* XXX errno NetBSD->Linux */
621 ret = -bus_space_map(bo->bdev->memt, addr, size, flags,
622 &map->u.io.memh);
623 if (ret)
624 return ret;
625 map->u.io.size = size;
626 map->virtual = bus_space_vaddr(bo->bdev->memt, map->u.io.memh);
627 }
628 #else
629 if (mem->placement & TTM_PL_FLAG_WC)
630 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
631 size);
632 else
633 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
634 size);
635 #endif
636 }
637 return (!map->virtual) ? -ENOMEM : 0;
638 }
639
640 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
641 unsigned long start_page,
642 unsigned long num_pages,
643 struct ttm_bo_kmap_obj *map)
644 {
645 struct ttm_mem_reg *mem = &bo->mem;
646 pgprot_t prot;
647 struct ttm_tt *ttm = bo->ttm;
648 #ifdef __NetBSD__
649 unsigned i;
650 vaddr_t vaddr;
651 #endif
652 int ret;
653
654 BUG_ON(!ttm);
655
656 if (ttm->state == tt_unpopulated) {
657 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
658 if (ret)
659 return ret;
660 }
661
662 #ifdef __NetBSD__
663 /*
664 * Can't use uvm_map here because it provides no way to pass
665 * along the cacheability flags. So we'll uvm_km_alloc
666 * ourselves some KVA and then pmap_kenter_pa directly.
667 */
668
669 KASSERT(num_pages <= ttm->num_pages);
670 KASSERT(start_page <= (ttm->num_pages - num_pages));
671 prot = ttm_io_prot(mem->placement, (VM_PROT_READ | VM_PROT_WRITE));
672 vaddr = uvm_km_alloc(kernel_map, (num_pages << PAGE_SHIFT), PAGE_SIZE,
673 UVM_KMF_VAONLY | UVM_KMF_CANFAIL | UVM_KMF_WAITVA);
674 if (vaddr == 0)
675 return -ENOMEM;
676 for (i = 0; i < num_pages; i++)
677 pmap_kenter_pa(vaddr + i*PAGE_SIZE,
678 page_to_phys(ttm->pages[start_page + i]),
679 (VM_PROT_READ | VM_PROT_WRITE), prot);
680 pmap_update(pmap_kernel());
681 map->bo_kmap_type = ttm_bo_map_vmap;
682 map->u.uvm.vsize = (num_pages << PAGE_SHIFT);
683 map->virtual = (void *)vaddr;
684 return 0;
685 #else
686 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
687 /*
688 * We're mapping a single page, and the desired
689 * page protection is consistent with the bo.
690 */
691
692 map->bo_kmap_type = ttm_bo_map_kmap;
693 map->page = ttm->pages[start_page];
694 map->virtual = kmap(map->page);
695 } else {
696 /*
697 * We need to use vmap to get the desired page protection
698 * or to make the buffer object look contiguous.
699 */
700 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
701 map->bo_kmap_type = ttm_bo_map_vmap;
702 map->virtual = vmap(ttm->pages + start_page, num_pages,
703 0, prot);
704 }
705 return (!map->virtual) ? -ENOMEM : 0;
706 #endif
707 }
708
709 int ttm_bo_kmap(struct ttm_buffer_object *bo,
710 unsigned long start_page, unsigned long num_pages,
711 struct ttm_bo_kmap_obj *map)
712 {
713 struct ttm_mem_type_manager *man =
714 &bo->bdev->man[bo->mem.mem_type];
715 unsigned long offset, size;
716 int ret;
717
718 BUG_ON(!list_empty(&bo->swap));
719 map->virtual = NULL;
720 map->bo = bo;
721 if (num_pages > bo->num_pages)
722 return -EINVAL;
723 if (start_page > bo->num_pages)
724 return -EINVAL;
725 #if 0
726 if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
727 return -EPERM;
728 #endif
729 (void) ttm_mem_io_lock(man, false);
730 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
731 ttm_mem_io_unlock(man);
732 if (ret)
733 return ret;
734 if (!bo->mem.bus.is_iomem) {
735 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
736 } else {
737 offset = start_page << PAGE_SHIFT;
738 size = num_pages << PAGE_SHIFT;
739 return ttm_bo_ioremap(bo, offset, size, map);
740 }
741 }
742 EXPORT_SYMBOL(ttm_bo_kmap);
743
744 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
745 {
746 struct ttm_buffer_object *bo = map->bo;
747 struct ttm_mem_type_manager *man =
748 &bo->bdev->man[bo->mem.mem_type];
749
750 if (!map->virtual)
751 return;
752 switch (map->bo_kmap_type) {
753 case ttm_bo_map_iomap:
754 #ifdef __NetBSD__
755 bus_space_unmap(bo->bdev->memt, map->u.io.memh,
756 map->u.io.size);
757 #else
758 iounmap(map->virtual);
759 #endif
760 break;
761 case ttm_bo_map_vmap:
762 #ifdef __NetBSD__
763 pmap_kremove((vaddr_t)map->virtual, map->u.uvm.vsize);
764 pmap_update(pmap_kernel());
765 uvm_km_free(kernel_map, (vaddr_t)map->virtual,
766 map->u.uvm.vsize, UVM_KMF_VAONLY);
767 #else
768 vunmap(map->virtual);
769 #endif
770 break;
771 case ttm_bo_map_kmap:
772 #ifdef __NetBSD__
773 panic("ttm_bo_map_kmap does not exist in NetBSD");
774 #else
775 kunmap(map->page);
776 #endif
777 break;
778 case ttm_bo_map_premapped:
779 break;
780 default:
781 BUG();
782 }
783 (void) ttm_mem_io_lock(man, false);
784 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
785 ttm_mem_io_unlock(man);
786 map->virtual = NULL;
787 #ifndef __NetBSD__
788 map->page = NULL;
789 #endif
790 }
791 EXPORT_SYMBOL(ttm_bo_kunmap);
792
793 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
794 struct fence *fence,
795 bool evict,
796 bool no_wait_gpu,
797 struct ttm_mem_reg *new_mem)
798 {
799 struct ttm_bo_device *bdev = bo->bdev;
800 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
801 struct ttm_mem_reg *old_mem = &bo->mem;
802 int ret;
803 struct ttm_buffer_object *ghost_obj;
804
805 reservation_object_add_excl_fence(bo->resv, fence);
806 if (evict) {
807 ret = ttm_bo_wait(bo, false, false, false);
808 if (ret)
809 return ret;
810
811 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
812 (bo->ttm != NULL)) {
813 ttm_tt_unbind(bo->ttm);
814 ttm_tt_destroy(bo->ttm);
815 bo->ttm = NULL;
816 }
817 ttm_bo_free_old_node(bo);
818 } else {
819 /**
820 * This should help pipeline ordinary buffer moves.
821 *
822 * Hang old buffer memory on a new buffer object,
823 * and leave it to be released when the GPU
824 * operation has completed.
825 */
826
827 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
828
829 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
830 if (ret)
831 return ret;
832
833 reservation_object_add_excl_fence(ghost_obj->resv, fence);
834
835 /**
836 * If we're not moving to fixed memory, the TTM object
837 * needs to stay alive. Otherwhise hang it on the ghost
838 * bo to be unbound and destroyed.
839 */
840
841 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
842 ghost_obj->ttm = NULL;
843 else
844 bo->ttm = NULL;
845
846 ttm_bo_unreserve(ghost_obj);
847 ttm_bo_unref(&ghost_obj);
848 }
849
850 *old_mem = *new_mem;
851 new_mem->mm_node = NULL;
852
853 return 0;
854 }
855 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
856