Home | History | Annotate | Line # | Download | only in ttm
ttm_bo_util.c revision 1.1.1.2
      1 /**************************************************************************
      2  *
      3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 /*
     28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     29  */
     30 
     31 #include <drm/ttm/ttm_bo_driver.h>
     32 #include <drm/ttm/ttm_placement.h>
     33 #include <drm/drm_vma_manager.h>
     34 #include <linux/io.h>
     35 #include <linux/highmem.h>
     36 #include <linux/wait.h>
     37 #include <linux/slab.h>
     38 #include <linux/vmalloc.h>
     39 #include <linux/module.h>
     40 
     41 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
     42 {
     43 	ttm_bo_mem_put(bo, &bo->mem);
     44 }
     45 
     46 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
     47 		    bool evict,
     48 		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
     49 {
     50 	struct ttm_tt *ttm = bo->ttm;
     51 	struct ttm_mem_reg *old_mem = &bo->mem;
     52 	int ret;
     53 
     54 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
     55 		ttm_tt_unbind(ttm);
     56 		ttm_bo_free_old_node(bo);
     57 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
     58 				TTM_PL_MASK_MEM);
     59 		old_mem->mem_type = TTM_PL_SYSTEM;
     60 	}
     61 
     62 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
     63 	if (unlikely(ret != 0))
     64 		return ret;
     65 
     66 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
     67 		ret = ttm_tt_bind(ttm, new_mem);
     68 		if (unlikely(ret != 0))
     69 			return ret;
     70 	}
     71 
     72 	*old_mem = *new_mem;
     73 	new_mem->mm_node = NULL;
     74 
     75 	return 0;
     76 }
     77 EXPORT_SYMBOL(ttm_bo_move_ttm);
     78 
     79 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
     80 {
     81 	if (likely(man->io_reserve_fastpath))
     82 		return 0;
     83 
     84 	if (interruptible)
     85 		return mutex_lock_interruptible(&man->io_reserve_mutex);
     86 
     87 	mutex_lock(&man->io_reserve_mutex);
     88 	return 0;
     89 }
     90 EXPORT_SYMBOL(ttm_mem_io_lock);
     91 
     92 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
     93 {
     94 	if (likely(man->io_reserve_fastpath))
     95 		return;
     96 
     97 	mutex_unlock(&man->io_reserve_mutex);
     98 }
     99 EXPORT_SYMBOL(ttm_mem_io_unlock);
    100 
    101 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
    102 {
    103 	struct ttm_buffer_object *bo;
    104 
    105 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
    106 		return -EAGAIN;
    107 
    108 	bo = list_first_entry(&man->io_reserve_lru,
    109 			      struct ttm_buffer_object,
    110 			      io_reserve_lru);
    111 	list_del_init(&bo->io_reserve_lru);
    112 	ttm_bo_unmap_virtual_locked(bo);
    113 
    114 	return 0;
    115 }
    116 
    117 
    118 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
    119 		       struct ttm_mem_reg *mem)
    120 {
    121 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    122 	int ret = 0;
    123 
    124 	if (!bdev->driver->io_mem_reserve)
    125 		return 0;
    126 	if (likely(man->io_reserve_fastpath))
    127 		return bdev->driver->io_mem_reserve(bdev, mem);
    128 
    129 	if (bdev->driver->io_mem_reserve &&
    130 	    mem->bus.io_reserved_count++ == 0) {
    131 retry:
    132 		ret = bdev->driver->io_mem_reserve(bdev, mem);
    133 		if (ret == -EAGAIN) {
    134 			ret = ttm_mem_io_evict(man);
    135 			if (ret == 0)
    136 				goto retry;
    137 		}
    138 	}
    139 	return ret;
    140 }
    141 EXPORT_SYMBOL(ttm_mem_io_reserve);
    142 
    143 void ttm_mem_io_free(struct ttm_bo_device *bdev,
    144 		     struct ttm_mem_reg *mem)
    145 {
    146 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    147 
    148 	if (likely(man->io_reserve_fastpath))
    149 		return;
    150 
    151 	if (bdev->driver->io_mem_reserve &&
    152 	    --mem->bus.io_reserved_count == 0 &&
    153 	    bdev->driver->io_mem_free)
    154 		bdev->driver->io_mem_free(bdev, mem);
    155 
    156 }
    157 EXPORT_SYMBOL(ttm_mem_io_free);
    158 
    159 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
    160 {
    161 	struct ttm_mem_reg *mem = &bo->mem;
    162 	int ret;
    163 
    164 	if (!mem->bus.io_reserved_vm) {
    165 		struct ttm_mem_type_manager *man =
    166 			&bo->bdev->man[mem->mem_type];
    167 
    168 		ret = ttm_mem_io_reserve(bo->bdev, mem);
    169 		if (unlikely(ret != 0))
    170 			return ret;
    171 		mem->bus.io_reserved_vm = true;
    172 		if (man->use_io_reserve_lru)
    173 			list_add_tail(&bo->io_reserve_lru,
    174 				      &man->io_reserve_lru);
    175 	}
    176 	return 0;
    177 }
    178 
    179 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
    180 {
    181 	struct ttm_mem_reg *mem = &bo->mem;
    182 
    183 	if (mem->bus.io_reserved_vm) {
    184 		mem->bus.io_reserved_vm = false;
    185 		list_del_init(&bo->io_reserve_lru);
    186 		ttm_mem_io_free(bo->bdev, mem);
    187 	}
    188 }
    189 
    190 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
    191 			void **virtual)
    192 {
    193 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    194 	int ret;
    195 	void *addr;
    196 
    197 	*virtual = NULL;
    198 	(void) ttm_mem_io_lock(man, false);
    199 	ret = ttm_mem_io_reserve(bdev, mem);
    200 	ttm_mem_io_unlock(man);
    201 	if (ret || !mem->bus.is_iomem)
    202 		return ret;
    203 
    204 	if (mem->bus.addr) {
    205 		addr = mem->bus.addr;
    206 	} else {
    207 		if (mem->placement & TTM_PL_FLAG_WC)
    208 			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
    209 		else
    210 			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
    211 		if (!addr) {
    212 			(void) ttm_mem_io_lock(man, false);
    213 			ttm_mem_io_free(bdev, mem);
    214 			ttm_mem_io_unlock(man);
    215 			return -ENOMEM;
    216 		}
    217 	}
    218 	*virtual = addr;
    219 	return 0;
    220 }
    221 
    222 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
    223 			 void *virtual)
    224 {
    225 	struct ttm_mem_type_manager *man;
    226 
    227 	man = &bdev->man[mem->mem_type];
    228 
    229 	if (virtual && mem->bus.addr == NULL)
    230 		iounmap(virtual);
    231 	(void) ttm_mem_io_lock(man, false);
    232 	ttm_mem_io_free(bdev, mem);
    233 	ttm_mem_io_unlock(man);
    234 }
    235 
    236 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
    237 {
    238 	uint32_t *dstP =
    239 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
    240 	uint32_t *srcP =
    241 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
    242 
    243 	int i;
    244 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
    245 		iowrite32(ioread32(srcP++), dstP++);
    246 	return 0;
    247 }
    248 
    249 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
    250 				unsigned long page,
    251 				pgprot_t prot)
    252 {
    253 	struct page *d = ttm->pages[page];
    254 	void *dst;
    255 
    256 	if (!d)
    257 		return -ENOMEM;
    258 
    259 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
    260 
    261 #ifdef CONFIG_X86
    262 	dst = kmap_atomic_prot(d, prot);
    263 #else
    264 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
    265 		dst = vmap(&d, 1, 0, prot);
    266 	else
    267 		dst = kmap(d);
    268 #endif
    269 	if (!dst)
    270 		return -ENOMEM;
    271 
    272 	memcpy_fromio(dst, src, PAGE_SIZE);
    273 
    274 #ifdef CONFIG_X86
    275 	kunmap_atomic(dst);
    276 #else
    277 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
    278 		vunmap(dst);
    279 	else
    280 		kunmap(d);
    281 #endif
    282 
    283 	return 0;
    284 }
    285 
    286 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
    287 				unsigned long page,
    288 				pgprot_t prot)
    289 {
    290 	struct page *s = ttm->pages[page];
    291 	void *src;
    292 
    293 	if (!s)
    294 		return -ENOMEM;
    295 
    296 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
    297 #ifdef CONFIG_X86
    298 	src = kmap_atomic_prot(s, prot);
    299 #else
    300 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
    301 		src = vmap(&s, 1, 0, prot);
    302 	else
    303 		src = kmap(s);
    304 #endif
    305 	if (!src)
    306 		return -ENOMEM;
    307 
    308 	memcpy_toio(dst, src, PAGE_SIZE);
    309 
    310 #ifdef CONFIG_X86
    311 	kunmap_atomic(src);
    312 #else
    313 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
    314 		vunmap(src);
    315 	else
    316 		kunmap(s);
    317 #endif
    318 
    319 	return 0;
    320 }
    321 
    322 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
    323 		       bool evict, bool no_wait_gpu,
    324 		       struct ttm_mem_reg *new_mem)
    325 {
    326 	struct ttm_bo_device *bdev = bo->bdev;
    327 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
    328 	struct ttm_tt *ttm = bo->ttm;
    329 	struct ttm_mem_reg *old_mem = &bo->mem;
    330 	struct ttm_mem_reg old_copy = *old_mem;
    331 	void *old_iomap;
    332 	void *new_iomap;
    333 	int ret;
    334 	unsigned long i;
    335 	unsigned long page;
    336 	unsigned long add = 0;
    337 	int dir;
    338 
    339 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
    340 	if (ret)
    341 		return ret;
    342 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
    343 	if (ret)
    344 		goto out;
    345 
    346 	/*
    347 	 * Single TTM move. NOP.
    348 	 */
    349 	if (old_iomap == NULL && new_iomap == NULL)
    350 		goto out2;
    351 
    352 	/*
    353 	 * Don't move nonexistent data. Clear destination instead.
    354 	 */
    355 	if (old_iomap == NULL &&
    356 	    (ttm == NULL || (ttm->state == tt_unpopulated &&
    357 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
    358 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
    359 		goto out2;
    360 	}
    361 
    362 	/*
    363 	 * TTM might be null for moves within the same region.
    364 	 */
    365 	if (ttm && ttm->state == tt_unpopulated) {
    366 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
    367 		if (ret)
    368 			goto out1;
    369 	}
    370 
    371 	add = 0;
    372 	dir = 1;
    373 
    374 	if ((old_mem->mem_type == new_mem->mem_type) &&
    375 	    (new_mem->start < old_mem->start + old_mem->size)) {
    376 		dir = -1;
    377 		add = new_mem->num_pages - 1;
    378 	}
    379 
    380 	for (i = 0; i < new_mem->num_pages; ++i) {
    381 		page = i * dir + add;
    382 		if (old_iomap == NULL) {
    383 			pgprot_t prot = ttm_io_prot(old_mem->placement,
    384 						    PAGE_KERNEL);
    385 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
    386 						   prot);
    387 		} else if (new_iomap == NULL) {
    388 			pgprot_t prot = ttm_io_prot(new_mem->placement,
    389 						    PAGE_KERNEL);
    390 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
    391 						   prot);
    392 		} else
    393 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
    394 		if (ret)
    395 			goto out1;
    396 	}
    397 	mb();
    398 out2:
    399 	old_copy = *old_mem;
    400 	*old_mem = *new_mem;
    401 	new_mem->mm_node = NULL;
    402 
    403 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
    404 		ttm_tt_unbind(ttm);
    405 		ttm_tt_destroy(ttm);
    406 		bo->ttm = NULL;
    407 	}
    408 
    409 out1:
    410 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
    411 out:
    412 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
    413 
    414 	/*
    415 	 * On error, keep the mm node!
    416 	 */
    417 	if (!ret)
    418 		ttm_bo_mem_put(bo, &old_copy);
    419 	return ret;
    420 }
    421 EXPORT_SYMBOL(ttm_bo_move_memcpy);
    422 
    423 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
    424 {
    425 	kfree(bo);
    426 }
    427 
    428 /**
    429  * ttm_buffer_object_transfer
    430  *
    431  * @bo: A pointer to a struct ttm_buffer_object.
    432  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
    433  * holding the data of @bo with the old placement.
    434  *
    435  * This is a utility function that may be called after an accelerated move
    436  * has been scheduled. A new buffer object is created as a placeholder for
    437  * the old data while it's being copied. When that buffer object is idle,
    438  * it can be destroyed, releasing the space of the old placement.
    439  * Returns:
    440  * !0: Failure.
    441  */
    442 
    443 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
    444 				      struct ttm_buffer_object **new_obj)
    445 {
    446 	struct ttm_buffer_object *fbo;
    447 	struct ttm_bo_device *bdev = bo->bdev;
    448 	struct ttm_bo_driver *driver = bdev->driver;
    449 	int ret;
    450 
    451 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
    452 	if (!fbo)
    453 		return -ENOMEM;
    454 
    455 	*fbo = *bo;
    456 
    457 	/**
    458 	 * Fix up members that we shouldn't copy directly:
    459 	 * TODO: Explicit member copy would probably be better here.
    460 	 */
    461 
    462 	INIT_LIST_HEAD(&fbo->ddestroy);
    463 	INIT_LIST_HEAD(&fbo->lru);
    464 	INIT_LIST_HEAD(&fbo->swap);
    465 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
    466 	drm_vma_node_reset(&fbo->vma_node);
    467 	atomic_set(&fbo->cpu_writers, 0);
    468 
    469 	spin_lock(&bdev->fence_lock);
    470 	if (bo->sync_obj)
    471 		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
    472 	else
    473 		fbo->sync_obj = NULL;
    474 	spin_unlock(&bdev->fence_lock);
    475 	kref_init(&fbo->list_kref);
    476 	kref_init(&fbo->kref);
    477 	fbo->destroy = &ttm_transfered_destroy;
    478 	fbo->acc_size = 0;
    479 	fbo->resv = &fbo->ttm_resv;
    480 	reservation_object_init(fbo->resv);
    481 	ret = ww_mutex_trylock(&fbo->resv->lock);
    482 	WARN_ON(!ret);
    483 
    484 	*new_obj = fbo;
    485 	return 0;
    486 }
    487 
    488 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
    489 {
    490 #if defined(__i386__) || defined(__x86_64__)
    491 	if (caching_flags & TTM_PL_FLAG_WC)
    492 		tmp = pgprot_writecombine(tmp);
    493 	else if (boot_cpu_data.x86 > 3)
    494 		tmp = pgprot_noncached(tmp);
    495 
    496 #elif defined(__powerpc__)
    497 	if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
    498 		pgprot_val(tmp) |= _PAGE_NO_CACHE;
    499 		if (caching_flags & TTM_PL_FLAG_UNCACHED)
    500 			pgprot_val(tmp) |= _PAGE_GUARDED;
    501 	}
    502 #endif
    503 #if defined(__ia64__)
    504 	if (caching_flags & TTM_PL_FLAG_WC)
    505 		tmp = pgprot_writecombine(tmp);
    506 	else
    507 		tmp = pgprot_noncached(tmp);
    508 #endif
    509 #if defined(__sparc__) || defined(__mips__)
    510 	if (!(caching_flags & TTM_PL_FLAG_CACHED))
    511 		tmp = pgprot_noncached(tmp);
    512 #endif
    513 	return tmp;
    514 }
    515 EXPORT_SYMBOL(ttm_io_prot);
    516 
    517 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
    518 			  unsigned long offset,
    519 			  unsigned long size,
    520 			  struct ttm_bo_kmap_obj *map)
    521 {
    522 	struct ttm_mem_reg *mem = &bo->mem;
    523 
    524 	if (bo->mem.bus.addr) {
    525 		map->bo_kmap_type = ttm_bo_map_premapped;
    526 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
    527 	} else {
    528 		map->bo_kmap_type = ttm_bo_map_iomap;
    529 		if (mem->placement & TTM_PL_FLAG_WC)
    530 			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
    531 						  size);
    532 		else
    533 			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
    534 						       size);
    535 	}
    536 	return (!map->virtual) ? -ENOMEM : 0;
    537 }
    538 
    539 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
    540 			   unsigned long start_page,
    541 			   unsigned long num_pages,
    542 			   struct ttm_bo_kmap_obj *map)
    543 {
    544 	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
    545 	struct ttm_tt *ttm = bo->ttm;
    546 	int ret;
    547 
    548 	BUG_ON(!ttm);
    549 
    550 	if (ttm->state == tt_unpopulated) {
    551 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
    552 		if (ret)
    553 			return ret;
    554 	}
    555 
    556 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
    557 		/*
    558 		 * We're mapping a single page, and the desired
    559 		 * page protection is consistent with the bo.
    560 		 */
    561 
    562 		map->bo_kmap_type = ttm_bo_map_kmap;
    563 		map->page = ttm->pages[start_page];
    564 		map->virtual = kmap(map->page);
    565 	} else {
    566 		/*
    567 		 * We need to use vmap to get the desired page protection
    568 		 * or to make the buffer object look contiguous.
    569 		 */
    570 		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
    571 			PAGE_KERNEL :
    572 			ttm_io_prot(mem->placement, PAGE_KERNEL);
    573 		map->bo_kmap_type = ttm_bo_map_vmap;
    574 		map->virtual = vmap(ttm->pages + start_page, num_pages,
    575 				    0, prot);
    576 	}
    577 	return (!map->virtual) ? -ENOMEM : 0;
    578 }
    579 
    580 int ttm_bo_kmap(struct ttm_buffer_object *bo,
    581 		unsigned long start_page, unsigned long num_pages,
    582 		struct ttm_bo_kmap_obj *map)
    583 {
    584 	struct ttm_mem_type_manager *man =
    585 		&bo->bdev->man[bo->mem.mem_type];
    586 	unsigned long offset, size;
    587 	int ret;
    588 
    589 	BUG_ON(!list_empty(&bo->swap));
    590 	map->virtual = NULL;
    591 	map->bo = bo;
    592 	if (num_pages > bo->num_pages)
    593 		return -EINVAL;
    594 	if (start_page > bo->num_pages)
    595 		return -EINVAL;
    596 #if 0
    597 	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
    598 		return -EPERM;
    599 #endif
    600 	(void) ttm_mem_io_lock(man, false);
    601 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
    602 	ttm_mem_io_unlock(man);
    603 	if (ret)
    604 		return ret;
    605 	if (!bo->mem.bus.is_iomem) {
    606 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
    607 	} else {
    608 		offset = start_page << PAGE_SHIFT;
    609 		size = num_pages << PAGE_SHIFT;
    610 		return ttm_bo_ioremap(bo, offset, size, map);
    611 	}
    612 }
    613 EXPORT_SYMBOL(ttm_bo_kmap);
    614 
    615 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
    616 {
    617 	struct ttm_buffer_object *bo = map->bo;
    618 	struct ttm_mem_type_manager *man =
    619 		&bo->bdev->man[bo->mem.mem_type];
    620 
    621 	if (!map->virtual)
    622 		return;
    623 	switch (map->bo_kmap_type) {
    624 	case ttm_bo_map_iomap:
    625 		iounmap(map->virtual);
    626 		break;
    627 	case ttm_bo_map_vmap:
    628 		vunmap(map->virtual);
    629 		break;
    630 	case ttm_bo_map_kmap:
    631 		kunmap(map->page);
    632 		break;
    633 	case ttm_bo_map_premapped:
    634 		break;
    635 	default:
    636 		BUG();
    637 	}
    638 	(void) ttm_mem_io_lock(man, false);
    639 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
    640 	ttm_mem_io_unlock(man);
    641 	map->virtual = NULL;
    642 	map->page = NULL;
    643 }
    644 EXPORT_SYMBOL(ttm_bo_kunmap);
    645 
    646 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
    647 			      void *sync_obj,
    648 			      bool evict,
    649 			      bool no_wait_gpu,
    650 			      struct ttm_mem_reg *new_mem)
    651 {
    652 	struct ttm_bo_device *bdev = bo->bdev;
    653 	struct ttm_bo_driver *driver = bdev->driver;
    654 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
    655 	struct ttm_mem_reg *old_mem = &bo->mem;
    656 	int ret;
    657 	struct ttm_buffer_object *ghost_obj;
    658 	void *tmp_obj = NULL;
    659 
    660 	spin_lock(&bdev->fence_lock);
    661 	if (bo->sync_obj) {
    662 		tmp_obj = bo->sync_obj;
    663 		bo->sync_obj = NULL;
    664 	}
    665 	bo->sync_obj = driver->sync_obj_ref(sync_obj);
    666 	if (evict) {
    667 		ret = ttm_bo_wait(bo, false, false, false);
    668 		spin_unlock(&bdev->fence_lock);
    669 		if (tmp_obj)
    670 			driver->sync_obj_unref(&tmp_obj);
    671 		if (ret)
    672 			return ret;
    673 
    674 		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
    675 		    (bo->ttm != NULL)) {
    676 			ttm_tt_unbind(bo->ttm);
    677 			ttm_tt_destroy(bo->ttm);
    678 			bo->ttm = NULL;
    679 		}
    680 		ttm_bo_free_old_node(bo);
    681 	} else {
    682 		/**
    683 		 * This should help pipeline ordinary buffer moves.
    684 		 *
    685 		 * Hang old buffer memory on a new buffer object,
    686 		 * and leave it to be released when the GPU
    687 		 * operation has completed.
    688 		 */
    689 
    690 		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
    691 		spin_unlock(&bdev->fence_lock);
    692 		if (tmp_obj)
    693 			driver->sync_obj_unref(&tmp_obj);
    694 
    695 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
    696 		if (ret)
    697 			return ret;
    698 
    699 		/**
    700 		 * If we're not moving to fixed memory, the TTM object
    701 		 * needs to stay alive. Otherwhise hang it on the ghost
    702 		 * bo to be unbound and destroyed.
    703 		 */
    704 
    705 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
    706 			ghost_obj->ttm = NULL;
    707 		else
    708 			bo->ttm = NULL;
    709 
    710 		ttm_bo_unreserve(ghost_obj);
    711 		ttm_bo_unref(&ghost_obj);
    712 	}
    713 
    714 	*old_mem = *new_mem;
    715 	new_mem->mm_node = NULL;
    716 
    717 	return 0;
    718 }
    719 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
    720