Home | History | Annotate | Line # | Download | only in ttm
ttm_bo_util.c revision 1.12
      1 /*	$NetBSD: ttm_bo_util.c,v 1.12 2018/08/27 14:51:33 riastradh Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 /*
     30  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_util.c,v 1.12 2018/08/27 14:51:33 riastradh Exp $");
     35 
     36 #include <drm/ttm/ttm_bo_driver.h>
     37 #include <drm/ttm/ttm_placement.h>
     38 #include <drm/drm_vma_manager.h>
     39 #include <linux/io.h>
     40 #include <linux/highmem.h>
     41 #include <linux/wait.h>
     42 #include <linux/slab.h>
     43 #include <linux/vmalloc.h>
     44 #include <linux/module.h>
     45 #include <linux/reservation.h>
     46 #include <linux/export.h>
     47 #include <asm/barrier.h>
     48 
     49 #ifdef __NetBSD__		/* PMAP_* caching flags for ttm_io_prot */
     50 #include <uvm/uvm_pmap.h>
     51 #include <drm/drm_auth_netbsd.h>
     52 #endif
     53 
     54 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
     55 {
     56 	ttm_bo_mem_put(bo, &bo->mem);
     57 }
     58 
     59 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
     60 		    bool evict,
     61 		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
     62 {
     63 	struct ttm_tt *ttm = bo->ttm;
     64 	struct ttm_mem_reg *old_mem = &bo->mem;
     65 	int ret;
     66 
     67 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
     68 		ttm_tt_unbind(ttm);
     69 		ttm_bo_free_old_node(bo);
     70 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
     71 				TTM_PL_MASK_MEM);
     72 		old_mem->mem_type = TTM_PL_SYSTEM;
     73 	}
     74 
     75 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
     76 	if (unlikely(ret != 0))
     77 		return ret;
     78 
     79 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
     80 		ret = ttm_tt_bind(ttm, new_mem);
     81 		if (unlikely(ret != 0))
     82 			return ret;
     83 	}
     84 
     85 	*old_mem = *new_mem;
     86 	new_mem->mm_node = NULL;
     87 
     88 	return 0;
     89 }
     90 EXPORT_SYMBOL(ttm_bo_move_ttm);
     91 
     92 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
     93 {
     94 	if (likely(man->io_reserve_fastpath))
     95 		return 0;
     96 
     97 	if (interruptible)
     98 		return mutex_lock_interruptible(&man->io_reserve_mutex);
     99 
    100 	mutex_lock(&man->io_reserve_mutex);
    101 	return 0;
    102 }
    103 EXPORT_SYMBOL(ttm_mem_io_lock);
    104 
    105 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
    106 {
    107 	if (likely(man->io_reserve_fastpath))
    108 		return;
    109 
    110 	mutex_unlock(&man->io_reserve_mutex);
    111 }
    112 EXPORT_SYMBOL(ttm_mem_io_unlock);
    113 
    114 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
    115 {
    116 	struct ttm_buffer_object *bo;
    117 
    118 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
    119 		return -EAGAIN;
    120 
    121 	bo = list_first_entry(&man->io_reserve_lru,
    122 			      struct ttm_buffer_object,
    123 			      io_reserve_lru);
    124 	list_del_init(&bo->io_reserve_lru);
    125 	ttm_bo_unmap_virtual_locked(bo);
    126 
    127 	return 0;
    128 }
    129 
    130 
    131 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
    132 		       struct ttm_mem_reg *mem)
    133 {
    134 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    135 	int ret = 0;
    136 
    137 	if (!bdev->driver->io_mem_reserve)
    138 		return 0;
    139 	if (likely(man->io_reserve_fastpath))
    140 		return bdev->driver->io_mem_reserve(bdev, mem);
    141 
    142 	if (bdev->driver->io_mem_reserve &&
    143 	    mem->bus.io_reserved_count++ == 0) {
    144 retry:
    145 		ret = bdev->driver->io_mem_reserve(bdev, mem);
    146 		if (ret == -EAGAIN) {
    147 			ret = ttm_mem_io_evict(man);
    148 			if (ret == 0)
    149 				goto retry;
    150 		}
    151 	}
    152 	return ret;
    153 }
    154 EXPORT_SYMBOL(ttm_mem_io_reserve);
    155 
    156 void ttm_mem_io_free(struct ttm_bo_device *bdev,
    157 		     struct ttm_mem_reg *mem)
    158 {
    159 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    160 
    161 	if (likely(man->io_reserve_fastpath))
    162 		return;
    163 
    164 	if (bdev->driver->io_mem_reserve &&
    165 	    --mem->bus.io_reserved_count == 0 &&
    166 	    bdev->driver->io_mem_free)
    167 		bdev->driver->io_mem_free(bdev, mem);
    168 
    169 }
    170 EXPORT_SYMBOL(ttm_mem_io_free);
    171 
    172 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
    173 {
    174 	struct ttm_mem_reg *mem = &bo->mem;
    175 	int ret;
    176 
    177 	if (!mem->bus.io_reserved_vm) {
    178 		struct ttm_mem_type_manager *man =
    179 			&bo->bdev->man[mem->mem_type];
    180 
    181 		ret = ttm_mem_io_reserve(bo->bdev, mem);
    182 		if (unlikely(ret != 0))
    183 			return ret;
    184 		mem->bus.io_reserved_vm = true;
    185 		if (man->use_io_reserve_lru)
    186 			list_add_tail(&bo->io_reserve_lru,
    187 				      &man->io_reserve_lru);
    188 	}
    189 	return 0;
    190 }
    191 
    192 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
    193 {
    194 	struct ttm_mem_reg *mem = &bo->mem;
    195 
    196 	if (mem->bus.io_reserved_vm) {
    197 		mem->bus.io_reserved_vm = false;
    198 		list_del_init(&bo->io_reserve_lru);
    199 		ttm_mem_io_free(bo->bdev, mem);
    200 	}
    201 }
    202 
    203 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
    204 			void **virtual)
    205 {
    206 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    207 	int ret;
    208 	void *addr;
    209 
    210 	*virtual = NULL;
    211 	(void) ttm_mem_io_lock(man, false);
    212 	ret = ttm_mem_io_reserve(bdev, mem);
    213 	ttm_mem_io_unlock(man);
    214 	if (ret || !mem->bus.is_iomem)
    215 		return ret;
    216 
    217 	if (mem->bus.addr) {
    218 		addr = mem->bus.addr;
    219 	} else {
    220 #ifdef __NetBSD__
    221 		const bus_addr_t bus_addr = (mem->bus.base + mem->bus.offset);
    222 		int flags = BUS_SPACE_MAP_LINEAR;
    223 
    224 		if (ISSET(mem->placement, TTM_PL_FLAG_WC))
    225 			flags |= BUS_SPACE_MAP_PREFETCHABLE;
    226 		/* XXX errno NetBSD->Linux */
    227 		ret = -bus_space_map(bdev->memt, bus_addr, mem->bus.size,
    228 		    flags, &mem->bus.memh);
    229 		if (ret) {
    230 			(void) ttm_mem_io_lock(man, false);
    231 			ttm_mem_io_free(bdev, mem);
    232 			ttm_mem_io_unlock(man);
    233 			return ret;
    234 		}
    235 		addr = bus_space_vaddr(bdev->memt, mem->bus.memh);
    236 #else
    237 		if (mem->placement & TTM_PL_FLAG_WC)
    238 			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
    239 		else
    240 			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
    241 		if (!addr) {
    242 			(void) ttm_mem_io_lock(man, false);
    243 			ttm_mem_io_free(bdev, mem);
    244 			ttm_mem_io_unlock(man);
    245 			return -ENOMEM;
    246 		}
    247 #endif
    248 	}
    249 	*virtual = addr;
    250 	return 0;
    251 }
    252 
    253 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
    254 			 void *virtual)
    255 {
    256 	struct ttm_mem_type_manager *man;
    257 
    258 	man = &bdev->man[mem->mem_type];
    259 
    260 	if (virtual && mem->bus.addr == NULL)
    261 #ifdef __NetBSD__
    262 		bus_space_unmap(bdev->memt, mem->bus.memh, mem->bus.size);
    263 #else
    264 		iounmap(virtual);
    265 #endif
    266 	(void) ttm_mem_io_lock(man, false);
    267 	ttm_mem_io_free(bdev, mem);
    268 	ttm_mem_io_unlock(man);
    269 }
    270 
    271 #ifdef __NetBSD__
    272 #  define	ioread32	fake_ioread32
    273 #  define	iowrite32	fake_iowrite32
    274 
    275 static inline uint32_t
    276 ioread32(const volatile uint32_t *p)
    277 {
    278 	uint32_t v;
    279 
    280 	v = *p;
    281 	__insn_barrier();	/* XXX ttm io barrier */
    282 
    283 	return v;		/* XXX ttm byte order */
    284 }
    285 
    286 static inline void
    287 iowrite32(uint32_t v, volatile uint32_t *p)
    288 {
    289 
    290 	__insn_barrier();	/* XXX ttm io barrier */
    291 	*p = v;			/* XXX ttm byte order */
    292 }
    293 #endif
    294 
    295 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
    296 {
    297 	uint32_t *dstP =
    298 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
    299 	uint32_t *srcP =
    300 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
    301 
    302 	int i;
    303 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
    304 		iowrite32(ioread32(srcP++), dstP++);
    305 	return 0;
    306 }
    307 
    308 #ifdef __NetBSD__
    309 #  undef	ioread32
    310 #  undef	iowrite32
    311 #endif
    312 
    313 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
    314 				unsigned long page,
    315 				pgprot_t prot)
    316 {
    317 	struct page *d = ttm->pages[page];
    318 	void *dst;
    319 
    320 	if (!d)
    321 		return -ENOMEM;
    322 
    323 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
    324 
    325 #ifdef CONFIG_X86
    326 	dst = kmap_atomic_prot(d, prot);
    327 #else
    328 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
    329 		dst = vmap(&d, 1, 0, prot);
    330 	else
    331 		dst = kmap(d);
    332 #endif
    333 	if (!dst)
    334 		return -ENOMEM;
    335 
    336 	memcpy_fromio(dst, src, PAGE_SIZE);
    337 
    338 #ifdef CONFIG_X86
    339 	kunmap_atomic(dst);
    340 #else
    341 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
    342 #ifdef __NetBSD__
    343 		vunmap(dst, 1);
    344 #else
    345 		vunmap(dst);
    346 #endif
    347 	else
    348 		kunmap(d);
    349 #endif
    350 
    351 	return 0;
    352 }
    353 
    354 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
    355 				unsigned long page,
    356 				pgprot_t prot)
    357 {
    358 	struct page *s = ttm->pages[page];
    359 	void *src;
    360 
    361 	if (!s)
    362 		return -ENOMEM;
    363 
    364 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
    365 #ifdef CONFIG_X86
    366 	src = kmap_atomic_prot(s, prot);
    367 #else
    368 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
    369 		src = vmap(&s, 1, 0, prot);
    370 	else
    371 		src = kmap(s);
    372 #endif
    373 	if (!src)
    374 		return -ENOMEM;
    375 
    376 	memcpy_toio(dst, src, PAGE_SIZE);
    377 
    378 #ifdef CONFIG_X86
    379 	kunmap_atomic(src);
    380 #else
    381 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
    382 #ifdef __NetBSD__
    383 		vunmap(src, 1);
    384 #else
    385 		vunmap(src);
    386 #endif
    387 	else
    388 		kunmap(s);
    389 #endif
    390 
    391 	return 0;
    392 }
    393 
    394 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
    395 		       bool evict, bool no_wait_gpu,
    396 		       struct ttm_mem_reg *new_mem)
    397 {
    398 	struct ttm_bo_device *bdev = bo->bdev;
    399 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
    400 	struct ttm_tt *ttm = bo->ttm;
    401 	struct ttm_mem_reg *old_mem = &bo->mem;
    402 	struct ttm_mem_reg old_copy = *old_mem;
    403 	void *old_iomap;
    404 	void *new_iomap;
    405 	int ret;
    406 	unsigned long i;
    407 	unsigned long page;
    408 	unsigned long add = 0;
    409 	int dir;
    410 
    411 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
    412 	if (ret)
    413 		return ret;
    414 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
    415 	if (ret)
    416 		goto out;
    417 
    418 	/*
    419 	 * Single TTM move. NOP.
    420 	 */
    421 	if (old_iomap == NULL && new_iomap == NULL)
    422 		goto out2;
    423 
    424 	/*
    425 	 * Don't move nonexistent data. Clear destination instead.
    426 	 */
    427 	if (old_iomap == NULL &&
    428 	    (ttm == NULL || (ttm->state == tt_unpopulated &&
    429 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
    430 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
    431 		goto out2;
    432 	}
    433 
    434 	/*
    435 	 * TTM might be null for moves within the same region.
    436 	 */
    437 	if (ttm && ttm->state == tt_unpopulated) {
    438 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
    439 		if (ret)
    440 			goto out1;
    441 	}
    442 
    443 	add = 0;
    444 	dir = 1;
    445 
    446 	if ((old_mem->mem_type == new_mem->mem_type) &&
    447 	    (new_mem->start < old_mem->start + old_mem->size)) {
    448 		dir = -1;
    449 		add = new_mem->num_pages - 1;
    450 	}
    451 
    452 	for (i = 0; i < new_mem->num_pages; ++i) {
    453 		page = i * dir + add;
    454 		if (old_iomap == NULL) {
    455 			pgprot_t prot = ttm_io_prot(old_mem->placement,
    456 						    PAGE_KERNEL);
    457 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
    458 						   prot);
    459 		} else if (new_iomap == NULL) {
    460 			pgprot_t prot = ttm_io_prot(new_mem->placement,
    461 						    PAGE_KERNEL);
    462 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
    463 						   prot);
    464 		} else
    465 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
    466 		if (ret)
    467 			goto out1;
    468 	}
    469 	mb();
    470 out2:
    471 	old_copy = *old_mem;
    472 	*old_mem = *new_mem;
    473 	new_mem->mm_node = NULL;
    474 
    475 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
    476 		ttm_tt_unbind(ttm);
    477 		ttm_tt_destroy(ttm);
    478 		bo->ttm = NULL;
    479 	}
    480 
    481 out1:
    482 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
    483 out:
    484 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
    485 
    486 	/*
    487 	 * On error, keep the mm node!
    488 	 */
    489 	if (!ret)
    490 		ttm_bo_mem_put(bo, &old_copy);
    491 	return ret;
    492 }
    493 EXPORT_SYMBOL(ttm_bo_move_memcpy);
    494 
    495 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
    496 {
    497 	kfree(bo);
    498 }
    499 
    500 /**
    501  * ttm_buffer_object_transfer
    502  *
    503  * @bo: A pointer to a struct ttm_buffer_object.
    504  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
    505  * holding the data of @bo with the old placement.
    506  *
    507  * This is a utility function that may be called after an accelerated move
    508  * has been scheduled. A new buffer object is created as a placeholder for
    509  * the old data while it's being copied. When that buffer object is idle,
    510  * it can be destroyed, releasing the space of the old placement.
    511  * Returns:
    512  * !0: Failure.
    513  */
    514 
    515 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
    516 				      struct ttm_buffer_object **new_obj)
    517 {
    518 	struct ttm_buffer_object *fbo;
    519 	int ret;
    520 
    521 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
    522 	if (!fbo)
    523 		return -ENOMEM;
    524 
    525 	*fbo = *bo;
    526 
    527 	/**
    528 	 * Fix up members that we shouldn't copy directly:
    529 	 * TODO: Explicit member copy would probably be better here.
    530 	 */
    531 
    532 	INIT_LIST_HEAD(&fbo->ddestroy);
    533 	INIT_LIST_HEAD(&fbo->lru);
    534 	INIT_LIST_HEAD(&fbo->swap);
    535 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
    536 #ifdef __NetBSD__
    537 	linux_mutex_init(&fbo->wu_mutex);
    538 	drm_vma_node_init(&fbo->vma_node);
    539 	uvm_obj_init(&fbo->uvmobj, bo->bdev->driver->ttm_uvm_ops, true, 1);
    540 	mutex_obj_hold(bo->uvmobj.vmobjlock);
    541 	uvm_obj_setlock(&fbo->uvmobj, bo->uvmobj.vmobjlock);
    542 #else
    543 	mutex_init(&fbo->wu_mutex);
    544 	drm_vma_node_reset(&fbo->vma_node);
    545 #endif
    546 	atomic_set(&fbo->cpu_writers, 0);
    547 
    548 	kref_init(&fbo->list_kref);
    549 	kref_init(&fbo->kref);
    550 	fbo->destroy = &ttm_transfered_destroy;
    551 	fbo->acc_size = 0;
    552 	fbo->resv = &fbo->ttm_resv;
    553 	reservation_object_init(fbo->resv);
    554 	ret = ww_mutex_trylock(&fbo->resv->lock);
    555 	WARN_ON(!ret);
    556 
    557 	*new_obj = fbo;
    558 	return 0;
    559 }
    560 
    561 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
    562 {
    563 	/* Cached mappings need no adjustment */
    564 	if (caching_flags & TTM_PL_FLAG_CACHED)
    565 		return tmp;
    566 
    567 #ifdef __NetBSD__
    568 	switch (caching_flags & TTM_PL_MASK_CACHING) {
    569 	case TTM_PL_FLAG_CACHED:
    570 		return (tmp | PMAP_WRITE_BACK);
    571 	case TTM_PL_FLAG_WC:
    572 		return (tmp | PMAP_WRITE_COMBINE);
    573 	case TTM_PL_FLAG_UNCACHED:
    574 		return (tmp | PMAP_NOCACHE);
    575 	default:
    576 		panic("invalid caching flags: %"PRIx32"\n",
    577 		    (caching_flags & TTM_PL_MASK_CACHING));
    578 	}
    579 #else
    580 #if defined(__i386__) || defined(__x86_64__)
    581 	if (caching_flags & TTM_PL_FLAG_WC)
    582 		tmp = pgprot_writecombine(tmp);
    583 	else if (boot_cpu_data.x86 > 3)
    584 		tmp = pgprot_noncached(tmp);
    585 #endif
    586 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
    587     defined(__powerpc__)
    588 	if (caching_flags & TTM_PL_FLAG_WC)
    589 		tmp = pgprot_writecombine(tmp);
    590 	else
    591 		tmp = pgprot_noncached(tmp);
    592 #endif
    593 #if defined(__sparc__) || defined(__mips__)
    594 	tmp = pgprot_noncached(tmp);
    595 #endif
    596 	return tmp;
    597 #endif
    598 }
    599 EXPORT_SYMBOL(ttm_io_prot);
    600 
    601 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
    602 			  unsigned long offset,
    603 			  unsigned long size,
    604 			  struct ttm_bo_kmap_obj *map)
    605 {
    606 	struct ttm_mem_reg *mem = &bo->mem;
    607 
    608 	if (bo->mem.bus.addr) {
    609 		map->bo_kmap_type = ttm_bo_map_premapped;
    610 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
    611 	} else {
    612 		map->bo_kmap_type = ttm_bo_map_iomap;
    613 #ifdef __NetBSD__
    614 	    {
    615 		bus_addr_t addr;
    616 		int flags = BUS_SPACE_MAP_LINEAR;
    617 		int ret;
    618 
    619 		addr = (bo->mem.bus.base + bo->mem.bus.offset + offset);
    620 		if (ISSET(mem->placement, TTM_PL_FLAG_WC))
    621 			flags |= BUS_SPACE_MAP_PREFETCHABLE;
    622 		/* XXX errno NetBSD->Linux */
    623 		ret = -bus_space_map(bo->bdev->memt, addr, size, flags,
    624 		    &map->u.io.memh);
    625 		if (ret)
    626 			return ret;
    627 		map->u.io.size = size;
    628 		map->virtual = bus_space_vaddr(bo->bdev->memt, map->u.io.memh);
    629 	    }
    630 #else
    631 		if (mem->placement & TTM_PL_FLAG_WC)
    632 			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
    633 						  size);
    634 		else
    635 			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
    636 						       size);
    637 #endif
    638 	}
    639 	return (!map->virtual) ? -ENOMEM : 0;
    640 }
    641 
    642 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
    643 			   unsigned long start_page,
    644 			   unsigned long num_pages,
    645 			   struct ttm_bo_kmap_obj *map)
    646 {
    647 	struct ttm_mem_reg *mem = &bo->mem;
    648 	pgprot_t prot;
    649 	struct ttm_tt *ttm = bo->ttm;
    650 	int ret;
    651 
    652 	BUG_ON(!ttm);
    653 
    654 	if (ttm->state == tt_unpopulated) {
    655 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
    656 		if (ret)
    657 			return ret;
    658 	}
    659 
    660 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
    661 		/*
    662 		 * We're mapping a single page, and the desired
    663 		 * page protection is consistent with the bo.
    664 		 */
    665 
    666 		map->bo_kmap_type = ttm_bo_map_kmap;
    667 #ifdef __NetBSD__
    668 		map->u.kmapped.page = ttm->pages[start_page];
    669 		map->virtual = kmap(map->u.kmapped.page);
    670 #else
    671 		map->page = ttm->pages[start_page];
    672 		map->virtual = kmap(map->page);
    673 #endif
    674 	} else {
    675 		/*
    676 		 * We need to use vmap to get the desired page protection
    677 		 * or to make the buffer object look contiguous.
    678 		 */
    679 		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
    680 		map->bo_kmap_type = ttm_bo_map_vmap;
    681 		map->virtual = vmap(ttm->pages + start_page, num_pages,
    682 				    0, prot);
    683 #ifdef __NetBSD__
    684 		map->u.vmapped.vsize = (vsize_t)num_pages << PAGE_SHIFT;
    685 #endif
    686 	}
    687 	return (!map->virtual) ? -ENOMEM : 0;
    688 }
    689 
    690 int ttm_bo_kmap(struct ttm_buffer_object *bo,
    691 		unsigned long start_page, unsigned long num_pages,
    692 		struct ttm_bo_kmap_obj *map)
    693 {
    694 	struct ttm_mem_type_manager *man =
    695 		&bo->bdev->man[bo->mem.mem_type];
    696 	unsigned long offset, size;
    697 	int ret;
    698 
    699 	BUG_ON(!list_empty(&bo->swap));
    700 	map->virtual = NULL;
    701 	map->bo = bo;
    702 	if (num_pages > bo->num_pages)
    703 		return -EINVAL;
    704 	if (start_page > bo->num_pages)
    705 		return -EINVAL;
    706 #ifdef __NetBSD__
    707 	if (num_pages > 1 && !DRM_SUSER())
    708 #else
    709 	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
    710 #endif
    711 		return -EPERM;
    712 	(void) ttm_mem_io_lock(man, false);
    713 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
    714 	ttm_mem_io_unlock(man);
    715 	if (ret)
    716 		return ret;
    717 	if (!bo->mem.bus.is_iomem) {
    718 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
    719 	} else {
    720 		offset = start_page << PAGE_SHIFT;
    721 		size = num_pages << PAGE_SHIFT;
    722 		return ttm_bo_ioremap(bo, offset, size, map);
    723 	}
    724 }
    725 EXPORT_SYMBOL(ttm_bo_kmap);
    726 
    727 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
    728 {
    729 	struct ttm_buffer_object *bo = map->bo;
    730 	struct ttm_mem_type_manager *man =
    731 		&bo->bdev->man[bo->mem.mem_type];
    732 
    733 	if (!map->virtual)
    734 		return;
    735 	switch (map->bo_kmap_type) {
    736 	case ttm_bo_map_iomap:
    737 #ifdef __NetBSD__
    738 		bus_space_unmap(bo->bdev->memt, map->u.io.memh,
    739 		    map->u.io.size);
    740 #else
    741 		iounmap(map->virtual);
    742 #endif
    743 		break;
    744 	case ttm_bo_map_vmap:
    745 #ifdef __NetBSD__
    746 		vunmap(map->virtual, map->u.vmapped.vsize >> PAGE_SHIFT);
    747 #else
    748 		vunmap(map->virtual);
    749 #endif
    750 		break;
    751 	case ttm_bo_map_kmap:
    752 #ifdef __NetBSD__
    753 		kunmap(map->u.kmapped.page);
    754 #else
    755 		kunmap(map->page);
    756 #endif
    757 		break;
    758 	case ttm_bo_map_premapped:
    759 		break;
    760 	default:
    761 		BUG();
    762 	}
    763 	(void) ttm_mem_io_lock(man, false);
    764 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
    765 	ttm_mem_io_unlock(man);
    766 	map->virtual = NULL;
    767 #ifndef __NetBSD__
    768 	map->page = NULL;
    769 #endif
    770 }
    771 EXPORT_SYMBOL(ttm_bo_kunmap);
    772 
    773 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
    774 			      struct fence *fence,
    775 			      bool evict,
    776 			      bool no_wait_gpu,
    777 			      struct ttm_mem_reg *new_mem)
    778 {
    779 	struct ttm_bo_device *bdev = bo->bdev;
    780 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
    781 	struct ttm_mem_reg *old_mem = &bo->mem;
    782 	int ret;
    783 	struct ttm_buffer_object *ghost_obj;
    784 
    785 	reservation_object_add_excl_fence(bo->resv, fence);
    786 	if (evict) {
    787 		ret = ttm_bo_wait(bo, false, false, false);
    788 		if (ret)
    789 			return ret;
    790 
    791 		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
    792 		    (bo->ttm != NULL)) {
    793 			ttm_tt_unbind(bo->ttm);
    794 			ttm_tt_destroy(bo->ttm);
    795 			bo->ttm = NULL;
    796 		}
    797 		ttm_bo_free_old_node(bo);
    798 	} else {
    799 		/**
    800 		 * This should help pipeline ordinary buffer moves.
    801 		 *
    802 		 * Hang old buffer memory on a new buffer object,
    803 		 * and leave it to be released when the GPU
    804 		 * operation has completed.
    805 		 */
    806 
    807 		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
    808 
    809 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
    810 		if (ret)
    811 			return ret;
    812 
    813 		reservation_object_add_excl_fence(ghost_obj->resv, fence);
    814 
    815 		/**
    816 		 * If we're not moving to fixed memory, the TTM object
    817 		 * needs to stay alive. Otherwhise hang it on the ghost
    818 		 * bo to be unbound and destroyed.
    819 		 */
    820 
    821 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
    822 			ghost_obj->ttm = NULL;
    823 		else
    824 			bo->ttm = NULL;
    825 
    826 		ttm_bo_unreserve(ghost_obj);
    827 		ttm_bo_unref(&ghost_obj);
    828 	}
    829 
    830 	*old_mem = *new_mem;
    831 	new_mem->mm_node = NULL;
    832 
    833 	return 0;
    834 }
    835 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
    836