Home | History | Annotate | Line # | Download | only in ttm
ttm_bo_util.c revision 1.1.1.3
      1 /*	$NetBSD: ttm_bo_util.c,v 1.1.1.3 2018/08/27 01:34:59 riastradh Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 /*
     30  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_util.c,v 1.1.1.3 2018/08/27 01:34:59 riastradh Exp $");
     35 
     36 #include <drm/ttm/ttm_bo_driver.h>
     37 #include <drm/ttm/ttm_placement.h>
     38 #include <drm/drm_vma_manager.h>
     39 #include <linux/io.h>
     40 #include <linux/highmem.h>
     41 #include <linux/wait.h>
     42 #include <linux/slab.h>
     43 #include <linux/vmalloc.h>
     44 #include <linux/module.h>
     45 #include <linux/reservation.h>
     46 
     47 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
     48 {
     49 	ttm_bo_mem_put(bo, &bo->mem);
     50 }
     51 
     52 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
     53 		    bool evict,
     54 		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
     55 {
     56 	struct ttm_tt *ttm = bo->ttm;
     57 	struct ttm_mem_reg *old_mem = &bo->mem;
     58 	int ret;
     59 
     60 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
     61 		ttm_tt_unbind(ttm);
     62 		ttm_bo_free_old_node(bo);
     63 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
     64 				TTM_PL_MASK_MEM);
     65 		old_mem->mem_type = TTM_PL_SYSTEM;
     66 	}
     67 
     68 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
     69 	if (unlikely(ret != 0))
     70 		return ret;
     71 
     72 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
     73 		ret = ttm_tt_bind(ttm, new_mem);
     74 		if (unlikely(ret != 0))
     75 			return ret;
     76 	}
     77 
     78 	*old_mem = *new_mem;
     79 	new_mem->mm_node = NULL;
     80 
     81 	return 0;
     82 }
     83 EXPORT_SYMBOL(ttm_bo_move_ttm);
     84 
     85 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
     86 {
     87 	if (likely(man->io_reserve_fastpath))
     88 		return 0;
     89 
     90 	if (interruptible)
     91 		return mutex_lock_interruptible(&man->io_reserve_mutex);
     92 
     93 	mutex_lock(&man->io_reserve_mutex);
     94 	return 0;
     95 }
     96 EXPORT_SYMBOL(ttm_mem_io_lock);
     97 
     98 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
     99 {
    100 	if (likely(man->io_reserve_fastpath))
    101 		return;
    102 
    103 	mutex_unlock(&man->io_reserve_mutex);
    104 }
    105 EXPORT_SYMBOL(ttm_mem_io_unlock);
    106 
    107 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
    108 {
    109 	struct ttm_buffer_object *bo;
    110 
    111 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
    112 		return -EAGAIN;
    113 
    114 	bo = list_first_entry(&man->io_reserve_lru,
    115 			      struct ttm_buffer_object,
    116 			      io_reserve_lru);
    117 	list_del_init(&bo->io_reserve_lru);
    118 	ttm_bo_unmap_virtual_locked(bo);
    119 
    120 	return 0;
    121 }
    122 
    123 
    124 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
    125 		       struct ttm_mem_reg *mem)
    126 {
    127 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    128 	int ret = 0;
    129 
    130 	if (!bdev->driver->io_mem_reserve)
    131 		return 0;
    132 	if (likely(man->io_reserve_fastpath))
    133 		return bdev->driver->io_mem_reserve(bdev, mem);
    134 
    135 	if (bdev->driver->io_mem_reserve &&
    136 	    mem->bus.io_reserved_count++ == 0) {
    137 retry:
    138 		ret = bdev->driver->io_mem_reserve(bdev, mem);
    139 		if (ret == -EAGAIN) {
    140 			ret = ttm_mem_io_evict(man);
    141 			if (ret == 0)
    142 				goto retry;
    143 		}
    144 	}
    145 	return ret;
    146 }
    147 EXPORT_SYMBOL(ttm_mem_io_reserve);
    148 
    149 void ttm_mem_io_free(struct ttm_bo_device *bdev,
    150 		     struct ttm_mem_reg *mem)
    151 {
    152 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    153 
    154 	if (likely(man->io_reserve_fastpath))
    155 		return;
    156 
    157 	if (bdev->driver->io_mem_reserve &&
    158 	    --mem->bus.io_reserved_count == 0 &&
    159 	    bdev->driver->io_mem_free)
    160 		bdev->driver->io_mem_free(bdev, mem);
    161 
    162 }
    163 EXPORT_SYMBOL(ttm_mem_io_free);
    164 
    165 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
    166 {
    167 	struct ttm_mem_reg *mem = &bo->mem;
    168 	int ret;
    169 
    170 	if (!mem->bus.io_reserved_vm) {
    171 		struct ttm_mem_type_manager *man =
    172 			&bo->bdev->man[mem->mem_type];
    173 
    174 		ret = ttm_mem_io_reserve(bo->bdev, mem);
    175 		if (unlikely(ret != 0))
    176 			return ret;
    177 		mem->bus.io_reserved_vm = true;
    178 		if (man->use_io_reserve_lru)
    179 			list_add_tail(&bo->io_reserve_lru,
    180 				      &man->io_reserve_lru);
    181 	}
    182 	return 0;
    183 }
    184 
    185 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
    186 {
    187 	struct ttm_mem_reg *mem = &bo->mem;
    188 
    189 	if (mem->bus.io_reserved_vm) {
    190 		mem->bus.io_reserved_vm = false;
    191 		list_del_init(&bo->io_reserve_lru);
    192 		ttm_mem_io_free(bo->bdev, mem);
    193 	}
    194 }
    195 
    196 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
    197 			void **virtual)
    198 {
    199 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    200 	int ret;
    201 	void *addr;
    202 
    203 	*virtual = NULL;
    204 	(void) ttm_mem_io_lock(man, false);
    205 	ret = ttm_mem_io_reserve(bdev, mem);
    206 	ttm_mem_io_unlock(man);
    207 	if (ret || !mem->bus.is_iomem)
    208 		return ret;
    209 
    210 	if (mem->bus.addr) {
    211 		addr = mem->bus.addr;
    212 	} else {
    213 		if (mem->placement & TTM_PL_FLAG_WC)
    214 			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
    215 		else
    216 			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
    217 		if (!addr) {
    218 			(void) ttm_mem_io_lock(man, false);
    219 			ttm_mem_io_free(bdev, mem);
    220 			ttm_mem_io_unlock(man);
    221 			return -ENOMEM;
    222 		}
    223 	}
    224 	*virtual = addr;
    225 	return 0;
    226 }
    227 
    228 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
    229 			 void *virtual)
    230 {
    231 	struct ttm_mem_type_manager *man;
    232 
    233 	man = &bdev->man[mem->mem_type];
    234 
    235 	if (virtual && mem->bus.addr == NULL)
    236 		iounmap(virtual);
    237 	(void) ttm_mem_io_lock(man, false);
    238 	ttm_mem_io_free(bdev, mem);
    239 	ttm_mem_io_unlock(man);
    240 }
    241 
    242 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
    243 {
    244 	uint32_t *dstP =
    245 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
    246 	uint32_t *srcP =
    247 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
    248 
    249 	int i;
    250 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
    251 		iowrite32(ioread32(srcP++), dstP++);
    252 	return 0;
    253 }
    254 
    255 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
    256 				unsigned long page,
    257 				pgprot_t prot)
    258 {
    259 	struct page *d = ttm->pages[page];
    260 	void *dst;
    261 
    262 	if (!d)
    263 		return -ENOMEM;
    264 
    265 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
    266 
    267 #ifdef CONFIG_X86
    268 	dst = kmap_atomic_prot(d, prot);
    269 #else
    270 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
    271 		dst = vmap(&d, 1, 0, prot);
    272 	else
    273 		dst = kmap(d);
    274 #endif
    275 	if (!dst)
    276 		return -ENOMEM;
    277 
    278 	memcpy_fromio(dst, src, PAGE_SIZE);
    279 
    280 #ifdef CONFIG_X86
    281 	kunmap_atomic(dst);
    282 #else
    283 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
    284 		vunmap(dst);
    285 	else
    286 		kunmap(d);
    287 #endif
    288 
    289 	return 0;
    290 }
    291 
    292 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
    293 				unsigned long page,
    294 				pgprot_t prot)
    295 {
    296 	struct page *s = ttm->pages[page];
    297 	void *src;
    298 
    299 	if (!s)
    300 		return -ENOMEM;
    301 
    302 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
    303 #ifdef CONFIG_X86
    304 	src = kmap_atomic_prot(s, prot);
    305 #else
    306 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
    307 		src = vmap(&s, 1, 0, prot);
    308 	else
    309 		src = kmap(s);
    310 #endif
    311 	if (!src)
    312 		return -ENOMEM;
    313 
    314 	memcpy_toio(dst, src, PAGE_SIZE);
    315 
    316 #ifdef CONFIG_X86
    317 	kunmap_atomic(src);
    318 #else
    319 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
    320 		vunmap(src);
    321 	else
    322 		kunmap(s);
    323 #endif
    324 
    325 	return 0;
    326 }
    327 
    328 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
    329 		       bool evict, bool no_wait_gpu,
    330 		       struct ttm_mem_reg *new_mem)
    331 {
    332 	struct ttm_bo_device *bdev = bo->bdev;
    333 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
    334 	struct ttm_tt *ttm = bo->ttm;
    335 	struct ttm_mem_reg *old_mem = &bo->mem;
    336 	struct ttm_mem_reg old_copy = *old_mem;
    337 	void *old_iomap;
    338 	void *new_iomap;
    339 	int ret;
    340 	unsigned long i;
    341 	unsigned long page;
    342 	unsigned long add = 0;
    343 	int dir;
    344 
    345 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
    346 	if (ret)
    347 		return ret;
    348 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
    349 	if (ret)
    350 		goto out;
    351 
    352 	/*
    353 	 * Single TTM move. NOP.
    354 	 */
    355 	if (old_iomap == NULL && new_iomap == NULL)
    356 		goto out2;
    357 
    358 	/*
    359 	 * Don't move nonexistent data. Clear destination instead.
    360 	 */
    361 	if (old_iomap == NULL &&
    362 	    (ttm == NULL || (ttm->state == tt_unpopulated &&
    363 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
    364 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
    365 		goto out2;
    366 	}
    367 
    368 	/*
    369 	 * TTM might be null for moves within the same region.
    370 	 */
    371 	if (ttm && ttm->state == tt_unpopulated) {
    372 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
    373 		if (ret)
    374 			goto out1;
    375 	}
    376 
    377 	add = 0;
    378 	dir = 1;
    379 
    380 	if ((old_mem->mem_type == new_mem->mem_type) &&
    381 	    (new_mem->start < old_mem->start + old_mem->size)) {
    382 		dir = -1;
    383 		add = new_mem->num_pages - 1;
    384 	}
    385 
    386 	for (i = 0; i < new_mem->num_pages; ++i) {
    387 		page = i * dir + add;
    388 		if (old_iomap == NULL) {
    389 			pgprot_t prot = ttm_io_prot(old_mem->placement,
    390 						    PAGE_KERNEL);
    391 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
    392 						   prot);
    393 		} else if (new_iomap == NULL) {
    394 			pgprot_t prot = ttm_io_prot(new_mem->placement,
    395 						    PAGE_KERNEL);
    396 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
    397 						   prot);
    398 		} else
    399 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
    400 		if (ret)
    401 			goto out1;
    402 	}
    403 	mb();
    404 out2:
    405 	old_copy = *old_mem;
    406 	*old_mem = *new_mem;
    407 	new_mem->mm_node = NULL;
    408 
    409 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
    410 		ttm_tt_unbind(ttm);
    411 		ttm_tt_destroy(ttm);
    412 		bo->ttm = NULL;
    413 	}
    414 
    415 out1:
    416 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
    417 out:
    418 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
    419 
    420 	/*
    421 	 * On error, keep the mm node!
    422 	 */
    423 	if (!ret)
    424 		ttm_bo_mem_put(bo, &old_copy);
    425 	return ret;
    426 }
    427 EXPORT_SYMBOL(ttm_bo_move_memcpy);
    428 
    429 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
    430 {
    431 	kfree(bo);
    432 }
    433 
    434 /**
    435  * ttm_buffer_object_transfer
    436  *
    437  * @bo: A pointer to a struct ttm_buffer_object.
    438  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
    439  * holding the data of @bo with the old placement.
    440  *
    441  * This is a utility function that may be called after an accelerated move
    442  * has been scheduled. A new buffer object is created as a placeholder for
    443  * the old data while it's being copied. When that buffer object is idle,
    444  * it can be destroyed, releasing the space of the old placement.
    445  * Returns:
    446  * !0: Failure.
    447  */
    448 
    449 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
    450 				      struct ttm_buffer_object **new_obj)
    451 {
    452 	struct ttm_buffer_object *fbo;
    453 	int ret;
    454 
    455 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
    456 	if (!fbo)
    457 		return -ENOMEM;
    458 
    459 	*fbo = *bo;
    460 
    461 	/**
    462 	 * Fix up members that we shouldn't copy directly:
    463 	 * TODO: Explicit member copy would probably be better here.
    464 	 */
    465 
    466 	INIT_LIST_HEAD(&fbo->ddestroy);
    467 	INIT_LIST_HEAD(&fbo->lru);
    468 	INIT_LIST_HEAD(&fbo->swap);
    469 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
    470 	drm_vma_node_reset(&fbo->vma_node);
    471 	atomic_set(&fbo->cpu_writers, 0);
    472 
    473 	kref_init(&fbo->list_kref);
    474 	kref_init(&fbo->kref);
    475 	fbo->destroy = &ttm_transfered_destroy;
    476 	fbo->acc_size = 0;
    477 	fbo->resv = &fbo->ttm_resv;
    478 	reservation_object_init(fbo->resv);
    479 	ret = ww_mutex_trylock(&fbo->resv->lock);
    480 	WARN_ON(!ret);
    481 
    482 	*new_obj = fbo;
    483 	return 0;
    484 }
    485 
    486 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
    487 {
    488 	/* Cached mappings need no adjustment */
    489 	if (caching_flags & TTM_PL_FLAG_CACHED)
    490 		return tmp;
    491 
    492 #if defined(__i386__) || defined(__x86_64__)
    493 	if (caching_flags & TTM_PL_FLAG_WC)
    494 		tmp = pgprot_writecombine(tmp);
    495 	else if (boot_cpu_data.x86 > 3)
    496 		tmp = pgprot_noncached(tmp);
    497 #endif
    498 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
    499     defined(__powerpc__)
    500 	if (caching_flags & TTM_PL_FLAG_WC)
    501 		tmp = pgprot_writecombine(tmp);
    502 	else
    503 		tmp = pgprot_noncached(tmp);
    504 #endif
    505 #if defined(__sparc__) || defined(__mips__)
    506 	tmp = pgprot_noncached(tmp);
    507 #endif
    508 	return tmp;
    509 }
    510 EXPORT_SYMBOL(ttm_io_prot);
    511 
    512 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
    513 			  unsigned long offset,
    514 			  unsigned long size,
    515 			  struct ttm_bo_kmap_obj *map)
    516 {
    517 	struct ttm_mem_reg *mem = &bo->mem;
    518 
    519 	if (bo->mem.bus.addr) {
    520 		map->bo_kmap_type = ttm_bo_map_premapped;
    521 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
    522 	} else {
    523 		map->bo_kmap_type = ttm_bo_map_iomap;
    524 		if (mem->placement & TTM_PL_FLAG_WC)
    525 			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
    526 						  size);
    527 		else
    528 			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
    529 						       size);
    530 	}
    531 	return (!map->virtual) ? -ENOMEM : 0;
    532 }
    533 
    534 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
    535 			   unsigned long start_page,
    536 			   unsigned long num_pages,
    537 			   struct ttm_bo_kmap_obj *map)
    538 {
    539 	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
    540 	struct ttm_tt *ttm = bo->ttm;
    541 	int ret;
    542 
    543 	BUG_ON(!ttm);
    544 
    545 	if (ttm->state == tt_unpopulated) {
    546 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
    547 		if (ret)
    548 			return ret;
    549 	}
    550 
    551 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
    552 		/*
    553 		 * We're mapping a single page, and the desired
    554 		 * page protection is consistent with the bo.
    555 		 */
    556 
    557 		map->bo_kmap_type = ttm_bo_map_kmap;
    558 		map->page = ttm->pages[start_page];
    559 		map->virtual = kmap(map->page);
    560 	} else {
    561 		/*
    562 		 * We need to use vmap to get the desired page protection
    563 		 * or to make the buffer object look contiguous.
    564 		 */
    565 		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
    566 		map->bo_kmap_type = ttm_bo_map_vmap;
    567 		map->virtual = vmap(ttm->pages + start_page, num_pages,
    568 				    0, prot);
    569 	}
    570 	return (!map->virtual) ? -ENOMEM : 0;
    571 }
    572 
    573 int ttm_bo_kmap(struct ttm_buffer_object *bo,
    574 		unsigned long start_page, unsigned long num_pages,
    575 		struct ttm_bo_kmap_obj *map)
    576 {
    577 	struct ttm_mem_type_manager *man =
    578 		&bo->bdev->man[bo->mem.mem_type];
    579 	unsigned long offset, size;
    580 	int ret;
    581 
    582 	BUG_ON(!list_empty(&bo->swap));
    583 	map->virtual = NULL;
    584 	map->bo = bo;
    585 	if (num_pages > bo->num_pages)
    586 		return -EINVAL;
    587 	if (start_page > bo->num_pages)
    588 		return -EINVAL;
    589 #if 0
    590 	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
    591 		return -EPERM;
    592 #endif
    593 	(void) ttm_mem_io_lock(man, false);
    594 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
    595 	ttm_mem_io_unlock(man);
    596 	if (ret)
    597 		return ret;
    598 	if (!bo->mem.bus.is_iomem) {
    599 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
    600 	} else {
    601 		offset = start_page << PAGE_SHIFT;
    602 		size = num_pages << PAGE_SHIFT;
    603 		return ttm_bo_ioremap(bo, offset, size, map);
    604 	}
    605 }
    606 EXPORT_SYMBOL(ttm_bo_kmap);
    607 
    608 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
    609 {
    610 	struct ttm_buffer_object *bo = map->bo;
    611 	struct ttm_mem_type_manager *man =
    612 		&bo->bdev->man[bo->mem.mem_type];
    613 
    614 	if (!map->virtual)
    615 		return;
    616 	switch (map->bo_kmap_type) {
    617 	case ttm_bo_map_iomap:
    618 		iounmap(map->virtual);
    619 		break;
    620 	case ttm_bo_map_vmap:
    621 		vunmap(map->virtual);
    622 		break;
    623 	case ttm_bo_map_kmap:
    624 		kunmap(map->page);
    625 		break;
    626 	case ttm_bo_map_premapped:
    627 		break;
    628 	default:
    629 		BUG();
    630 	}
    631 	(void) ttm_mem_io_lock(man, false);
    632 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
    633 	ttm_mem_io_unlock(man);
    634 	map->virtual = NULL;
    635 	map->page = NULL;
    636 }
    637 EXPORT_SYMBOL(ttm_bo_kunmap);
    638 
    639 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
    640 			      struct fence *fence,
    641 			      bool evict,
    642 			      bool no_wait_gpu,
    643 			      struct ttm_mem_reg *new_mem)
    644 {
    645 	struct ttm_bo_device *bdev = bo->bdev;
    646 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
    647 	struct ttm_mem_reg *old_mem = &bo->mem;
    648 	int ret;
    649 	struct ttm_buffer_object *ghost_obj;
    650 
    651 	reservation_object_add_excl_fence(bo->resv, fence);
    652 	if (evict) {
    653 		ret = ttm_bo_wait(bo, false, false, false);
    654 		if (ret)
    655 			return ret;
    656 
    657 		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
    658 		    (bo->ttm != NULL)) {
    659 			ttm_tt_unbind(bo->ttm);
    660 			ttm_tt_destroy(bo->ttm);
    661 			bo->ttm = NULL;
    662 		}
    663 		ttm_bo_free_old_node(bo);
    664 	} else {
    665 		/**
    666 		 * This should help pipeline ordinary buffer moves.
    667 		 *
    668 		 * Hang old buffer memory on a new buffer object,
    669 		 * and leave it to be released when the GPU
    670 		 * operation has completed.
    671 		 */
    672 
    673 		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
    674 
    675 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
    676 		if (ret)
    677 			return ret;
    678 
    679 		reservation_object_add_excl_fence(ghost_obj->resv, fence);
    680 
    681 		/**
    682 		 * If we're not moving to fixed memory, the TTM object
    683 		 * needs to stay alive. Otherwhise hang it on the ghost
    684 		 * bo to be unbound and destroyed.
    685 		 */
    686 
    687 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
    688 			ghost_obj->ttm = NULL;
    689 		else
    690 			bo->ttm = NULL;
    691 
    692 		ttm_bo_unreserve(ghost_obj);
    693 		ttm_bo_unref(&ghost_obj);
    694 	}
    695 
    696 	*old_mem = *new_mem;
    697 	new_mem->mm_node = NULL;
    698 
    699 	return 0;
    700 }
    701 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
    702