Home | History | Annotate | Line # | Download | only in ttm
ttm_bo_util.c revision 1.1.1.4
      1 /*	$NetBSD: ttm_bo_util.c,v 1.1.1.4 2021/12/18 20:15:53 riastradh Exp $	*/
      2 
      3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
      4 /**************************************************************************
      5  *
      6  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
      7  * All Rights Reserved.
      8  *
      9  * Permission is hereby granted, free of charge, to any person obtaining a
     10  * copy of this software and associated documentation files (the
     11  * "Software"), to deal in the Software without restriction, including
     12  * without limitation the rights to use, copy, modify, merge, publish,
     13  * distribute, sub license, and/or sell copies of the Software, and to
     14  * permit persons to whom the Software is furnished to do so, subject to
     15  * the following conditions:
     16  *
     17  * The above copyright notice and this permission notice (including the
     18  * next paragraph) shall be included in all copies or substantial portions
     19  * of the Software.
     20  *
     21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  *
     29  **************************************************************************/
     30 /*
     31  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_util.c,v 1.1.1.4 2021/12/18 20:15:53 riastradh Exp $");
     36 
     37 #include <drm/ttm/ttm_bo_driver.h>
     38 #include <drm/ttm/ttm_placement.h>
     39 #include <drm/drm_vma_manager.h>
     40 #include <linux/io.h>
     41 #include <linux/highmem.h>
     42 #include <linux/wait.h>
     43 #include <linux/slab.h>
     44 #include <linux/vmalloc.h>
     45 #include <linux/module.h>
     46 #include <linux/dma-resv.h>
     47 
     48 struct ttm_transfer_obj {
     49 	struct ttm_buffer_object base;
     50 	struct ttm_buffer_object *bo;
     51 };
     52 
     53 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
     54 {
     55 	ttm_bo_mem_put(bo, &bo->mem);
     56 }
     57 
     58 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
     59 		   struct ttm_operation_ctx *ctx,
     60 		    struct ttm_mem_reg *new_mem)
     61 {
     62 	struct ttm_tt *ttm = bo->ttm;
     63 	struct ttm_mem_reg *old_mem = &bo->mem;
     64 	int ret;
     65 
     66 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
     67 		ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
     68 
     69 		if (unlikely(ret != 0)) {
     70 			if (ret != -ERESTARTSYS)
     71 				pr_err("Failed to expire sync object before unbinding TTM\n");
     72 			return ret;
     73 		}
     74 
     75 		ttm_tt_unbind(ttm);
     76 		ttm_bo_free_old_node(bo);
     77 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
     78 				TTM_PL_MASK_MEM);
     79 		old_mem->mem_type = TTM_PL_SYSTEM;
     80 	}
     81 
     82 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
     83 	if (unlikely(ret != 0))
     84 		return ret;
     85 
     86 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
     87 		ret = ttm_tt_bind(ttm, new_mem, ctx);
     88 		if (unlikely(ret != 0))
     89 			return ret;
     90 	}
     91 
     92 	*old_mem = *new_mem;
     93 	new_mem->mm_node = NULL;
     94 
     95 	return 0;
     96 }
     97 EXPORT_SYMBOL(ttm_bo_move_ttm);
     98 
     99 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
    100 {
    101 	if (likely(man->io_reserve_fastpath))
    102 		return 0;
    103 
    104 	if (interruptible)
    105 		return mutex_lock_interruptible(&man->io_reserve_mutex);
    106 
    107 	mutex_lock(&man->io_reserve_mutex);
    108 	return 0;
    109 }
    110 
    111 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
    112 {
    113 	if (likely(man->io_reserve_fastpath))
    114 		return;
    115 
    116 	mutex_unlock(&man->io_reserve_mutex);
    117 }
    118 
    119 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
    120 {
    121 	struct ttm_buffer_object *bo;
    122 
    123 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
    124 		return -EAGAIN;
    125 
    126 	bo = list_first_entry(&man->io_reserve_lru,
    127 			      struct ttm_buffer_object,
    128 			      io_reserve_lru);
    129 	list_del_init(&bo->io_reserve_lru);
    130 	ttm_bo_unmap_virtual_locked(bo);
    131 
    132 	return 0;
    133 }
    134 
    135 
    136 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
    137 		       struct ttm_mem_reg *mem)
    138 {
    139 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    140 	int ret = 0;
    141 
    142 	if (!bdev->driver->io_mem_reserve)
    143 		return 0;
    144 	if (likely(man->io_reserve_fastpath))
    145 		return bdev->driver->io_mem_reserve(bdev, mem);
    146 
    147 	if (bdev->driver->io_mem_reserve &&
    148 	    mem->bus.io_reserved_count++ == 0) {
    149 retry:
    150 		ret = bdev->driver->io_mem_reserve(bdev, mem);
    151 		if (ret == -EAGAIN) {
    152 			ret = ttm_mem_io_evict(man);
    153 			if (ret == 0)
    154 				goto retry;
    155 		}
    156 	}
    157 	return ret;
    158 }
    159 
    160 void ttm_mem_io_free(struct ttm_bo_device *bdev,
    161 		     struct ttm_mem_reg *mem)
    162 {
    163 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    164 
    165 	if (likely(man->io_reserve_fastpath))
    166 		return;
    167 
    168 	if (bdev->driver->io_mem_reserve &&
    169 	    --mem->bus.io_reserved_count == 0 &&
    170 	    bdev->driver->io_mem_free)
    171 		bdev->driver->io_mem_free(bdev, mem);
    172 
    173 }
    174 
    175 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
    176 {
    177 	struct ttm_mem_reg *mem = &bo->mem;
    178 	int ret;
    179 
    180 	if (!mem->bus.io_reserved_vm) {
    181 		struct ttm_mem_type_manager *man =
    182 			&bo->bdev->man[mem->mem_type];
    183 
    184 		ret = ttm_mem_io_reserve(bo->bdev, mem);
    185 		if (unlikely(ret != 0))
    186 			return ret;
    187 		mem->bus.io_reserved_vm = true;
    188 		if (man->use_io_reserve_lru)
    189 			list_add_tail(&bo->io_reserve_lru,
    190 				      &man->io_reserve_lru);
    191 	}
    192 	return 0;
    193 }
    194 
    195 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
    196 {
    197 	struct ttm_mem_reg *mem = &bo->mem;
    198 
    199 	if (mem->bus.io_reserved_vm) {
    200 		mem->bus.io_reserved_vm = false;
    201 		list_del_init(&bo->io_reserve_lru);
    202 		ttm_mem_io_free(bo->bdev, mem);
    203 	}
    204 }
    205 
    206 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
    207 			void **virtual)
    208 {
    209 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    210 	int ret;
    211 	void *addr;
    212 
    213 	*virtual = NULL;
    214 	(void) ttm_mem_io_lock(man, false);
    215 	ret = ttm_mem_io_reserve(bdev, mem);
    216 	ttm_mem_io_unlock(man);
    217 	if (ret || !mem->bus.is_iomem)
    218 		return ret;
    219 
    220 	if (mem->bus.addr) {
    221 		addr = mem->bus.addr;
    222 	} else {
    223 		if (mem->placement & TTM_PL_FLAG_WC)
    224 			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
    225 		else
    226 			addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
    227 		if (!addr) {
    228 			(void) ttm_mem_io_lock(man, false);
    229 			ttm_mem_io_free(bdev, mem);
    230 			ttm_mem_io_unlock(man);
    231 			return -ENOMEM;
    232 		}
    233 	}
    234 	*virtual = addr;
    235 	return 0;
    236 }
    237 
    238 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
    239 			 void *virtual)
    240 {
    241 	struct ttm_mem_type_manager *man;
    242 
    243 	man = &bdev->man[mem->mem_type];
    244 
    245 	if (virtual && mem->bus.addr == NULL)
    246 		iounmap(virtual);
    247 	(void) ttm_mem_io_lock(man, false);
    248 	ttm_mem_io_free(bdev, mem);
    249 	ttm_mem_io_unlock(man);
    250 }
    251 
    252 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
    253 {
    254 	uint32_t *dstP =
    255 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
    256 	uint32_t *srcP =
    257 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
    258 
    259 	int i;
    260 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
    261 		iowrite32(ioread32(srcP++), dstP++);
    262 	return 0;
    263 }
    264 
    265 #ifdef CONFIG_X86
    266 #define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
    267 #define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
    268 #else
    269 #define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0,  __prot)
    270 #define __ttm_kunmap_atomic(__addr) vunmap(__addr)
    271 #endif
    272 
    273 
    274 /**
    275  * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
    276  * specified page protection.
    277  *
    278  * @page: The page to map.
    279  * @prot: The page protection.
    280  *
    281  * This function maps a TTM page using the kmap_atomic api if available,
    282  * otherwise falls back to vmap. The user must make sure that the
    283  * specified page does not have an aliased mapping with a different caching
    284  * policy unless the architecture explicitly allows it. Also mapping and
    285  * unmapping using this api must be correctly nested. Unmapping should
    286  * occur in the reverse order of mapping.
    287  */
    288 void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
    289 {
    290 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
    291 		return kmap_atomic(page);
    292 	else
    293 		return __ttm_kmap_atomic_prot(page, prot);
    294 }
    295 EXPORT_SYMBOL(ttm_kmap_atomic_prot);
    296 
    297 /**
    298  * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
    299  * ttm_kmap_atomic_prot.
    300  *
    301  * @addr: The virtual address from the map.
    302  * @prot: The page protection.
    303  */
    304 void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
    305 {
    306 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
    307 		kunmap_atomic(addr);
    308 	else
    309 		__ttm_kunmap_atomic(addr);
    310 }
    311 EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
    312 
    313 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
    314 				unsigned long page,
    315 				pgprot_t prot)
    316 {
    317 	struct page *d = ttm->pages[page];
    318 	void *dst;
    319 
    320 	if (!d)
    321 		return -ENOMEM;
    322 
    323 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
    324 	dst = ttm_kmap_atomic_prot(d, prot);
    325 	if (!dst)
    326 		return -ENOMEM;
    327 
    328 	memcpy_fromio(dst, src, PAGE_SIZE);
    329 
    330 	ttm_kunmap_atomic_prot(dst, prot);
    331 
    332 	return 0;
    333 }
    334 
    335 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
    336 				unsigned long page,
    337 				pgprot_t prot)
    338 {
    339 	struct page *s = ttm->pages[page];
    340 	void *src;
    341 
    342 	if (!s)
    343 		return -ENOMEM;
    344 
    345 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
    346 	src = ttm_kmap_atomic_prot(s, prot);
    347 	if (!src)
    348 		return -ENOMEM;
    349 
    350 	memcpy_toio(dst, src, PAGE_SIZE);
    351 
    352 	ttm_kunmap_atomic_prot(src, prot);
    353 
    354 	return 0;
    355 }
    356 
    357 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
    358 		       struct ttm_operation_ctx *ctx,
    359 		       struct ttm_mem_reg *new_mem)
    360 {
    361 	struct ttm_bo_device *bdev = bo->bdev;
    362 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
    363 	struct ttm_tt *ttm = bo->ttm;
    364 	struct ttm_mem_reg *old_mem = &bo->mem;
    365 	struct ttm_mem_reg old_copy = *old_mem;
    366 	void *old_iomap;
    367 	void *new_iomap;
    368 	int ret;
    369 	unsigned long i;
    370 	unsigned long page;
    371 	unsigned long add = 0;
    372 	int dir;
    373 
    374 	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
    375 	if (ret)
    376 		return ret;
    377 
    378 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
    379 	if (ret)
    380 		return ret;
    381 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
    382 	if (ret)
    383 		goto out;
    384 
    385 	/*
    386 	 * Single TTM move. NOP.
    387 	 */
    388 	if (old_iomap == NULL && new_iomap == NULL)
    389 		goto out2;
    390 
    391 	/*
    392 	 * Don't move nonexistent data. Clear destination instead.
    393 	 */
    394 	if (old_iomap == NULL &&
    395 	    (ttm == NULL || (ttm->state == tt_unpopulated &&
    396 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
    397 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
    398 		goto out2;
    399 	}
    400 
    401 	/*
    402 	 * TTM might be null for moves within the same region.
    403 	 */
    404 	if (ttm) {
    405 		ret = ttm_tt_populate(ttm, ctx);
    406 		if (ret)
    407 			goto out1;
    408 	}
    409 
    410 	add = 0;
    411 	dir = 1;
    412 
    413 	if ((old_mem->mem_type == new_mem->mem_type) &&
    414 	    (new_mem->start < old_mem->start + old_mem->size)) {
    415 		dir = -1;
    416 		add = new_mem->num_pages - 1;
    417 	}
    418 
    419 	for (i = 0; i < new_mem->num_pages; ++i) {
    420 		page = i * dir + add;
    421 		if (old_iomap == NULL) {
    422 			pgprot_t prot = ttm_io_prot(old_mem->placement,
    423 						    PAGE_KERNEL);
    424 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
    425 						   prot);
    426 		} else if (new_iomap == NULL) {
    427 			pgprot_t prot = ttm_io_prot(new_mem->placement,
    428 						    PAGE_KERNEL);
    429 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
    430 						   prot);
    431 		} else {
    432 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
    433 		}
    434 		if (ret)
    435 			goto out1;
    436 	}
    437 	mb();
    438 out2:
    439 	old_copy = *old_mem;
    440 	*old_mem = *new_mem;
    441 	new_mem->mm_node = NULL;
    442 
    443 	if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
    444 		ttm_tt_destroy(ttm);
    445 		bo->ttm = NULL;
    446 	}
    447 
    448 out1:
    449 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
    450 out:
    451 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
    452 
    453 	/*
    454 	 * On error, keep the mm node!
    455 	 */
    456 	if (!ret)
    457 		ttm_bo_mem_put(bo, &old_copy);
    458 	return ret;
    459 }
    460 EXPORT_SYMBOL(ttm_bo_move_memcpy);
    461 
    462 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
    463 {
    464 	struct ttm_transfer_obj *fbo;
    465 
    466 	fbo = container_of(bo, struct ttm_transfer_obj, base);
    467 	ttm_bo_put(fbo->bo);
    468 	kfree(fbo);
    469 }
    470 
    471 /**
    472  * ttm_buffer_object_transfer
    473  *
    474  * @bo: A pointer to a struct ttm_buffer_object.
    475  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
    476  * holding the data of @bo with the old placement.
    477  *
    478  * This is a utility function that may be called after an accelerated move
    479  * has been scheduled. A new buffer object is created as a placeholder for
    480  * the old data while it's being copied. When that buffer object is idle,
    481  * it can be destroyed, releasing the space of the old placement.
    482  * Returns:
    483  * !0: Failure.
    484  */
    485 
    486 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
    487 				      struct ttm_buffer_object **new_obj)
    488 {
    489 	struct ttm_transfer_obj *fbo;
    490 	int ret;
    491 
    492 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
    493 	if (!fbo)
    494 		return -ENOMEM;
    495 
    496 	fbo->base = *bo;
    497 	fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
    498 
    499 	ttm_bo_get(bo);
    500 	fbo->bo = bo;
    501 
    502 	/**
    503 	 * Fix up members that we shouldn't copy directly:
    504 	 * TODO: Explicit member copy would probably be better here.
    505 	 */
    506 
    507 	atomic_inc(&ttm_bo_glob.bo_count);
    508 	INIT_LIST_HEAD(&fbo->base.ddestroy);
    509 	INIT_LIST_HEAD(&fbo->base.lru);
    510 	INIT_LIST_HEAD(&fbo->base.swap);
    511 	INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
    512 	fbo->base.moving = NULL;
    513 	drm_vma_node_reset(&fbo->base.base.vma_node);
    514 
    515 	kref_init(&fbo->base.list_kref);
    516 	kref_init(&fbo->base.kref);
    517 	fbo->base.destroy = &ttm_transfered_destroy;
    518 	fbo->base.acc_size = 0;
    519 	if (bo->base.resv == &bo->base._resv)
    520 		fbo->base.base.resv = &fbo->base.base._resv;
    521 
    522 	dma_resv_init(&fbo->base.base._resv);
    523 	ret = dma_resv_trylock(&fbo->base.base._resv);
    524 	WARN_ON(!ret);
    525 
    526 	*new_obj = &fbo->base;
    527 	return 0;
    528 }
    529 
    530 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
    531 {
    532 	/* Cached mappings need no adjustment */
    533 	if (caching_flags & TTM_PL_FLAG_CACHED)
    534 		return tmp;
    535 
    536 #if defined(__i386__) || defined(__x86_64__)
    537 	if (caching_flags & TTM_PL_FLAG_WC)
    538 		tmp = pgprot_writecombine(tmp);
    539 	else if (boot_cpu_data.x86 > 3)
    540 		tmp = pgprot_noncached(tmp);
    541 #endif
    542 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
    543     defined(__powerpc__) || defined(__mips__)
    544 	if (caching_flags & TTM_PL_FLAG_WC)
    545 		tmp = pgprot_writecombine(tmp);
    546 	else
    547 		tmp = pgprot_noncached(tmp);
    548 #endif
    549 #if defined(__sparc__)
    550 	tmp = pgprot_noncached(tmp);
    551 #endif
    552 	return tmp;
    553 }
    554 EXPORT_SYMBOL(ttm_io_prot);
    555 
    556 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
    557 			  unsigned long offset,
    558 			  unsigned long size,
    559 			  struct ttm_bo_kmap_obj *map)
    560 {
    561 	struct ttm_mem_reg *mem = &bo->mem;
    562 
    563 	if (bo->mem.bus.addr) {
    564 		map->bo_kmap_type = ttm_bo_map_premapped;
    565 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
    566 	} else {
    567 		map->bo_kmap_type = ttm_bo_map_iomap;
    568 		if (mem->placement & TTM_PL_FLAG_WC)
    569 			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
    570 						  size);
    571 		else
    572 			map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
    573 						       size);
    574 	}
    575 	return (!map->virtual) ? -ENOMEM : 0;
    576 }
    577 
    578 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
    579 			   unsigned long start_page,
    580 			   unsigned long num_pages,
    581 			   struct ttm_bo_kmap_obj *map)
    582 {
    583 	struct ttm_mem_reg *mem = &bo->mem;
    584 	struct ttm_operation_ctx ctx = {
    585 		.interruptible = false,
    586 		.no_wait_gpu = false
    587 	};
    588 	struct ttm_tt *ttm = bo->ttm;
    589 	pgprot_t prot;
    590 	int ret;
    591 
    592 	BUG_ON(!ttm);
    593 
    594 	ret = ttm_tt_populate(ttm, &ctx);
    595 	if (ret)
    596 		return ret;
    597 
    598 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
    599 		/*
    600 		 * We're mapping a single page, and the desired
    601 		 * page protection is consistent with the bo.
    602 		 */
    603 
    604 		map->bo_kmap_type = ttm_bo_map_kmap;
    605 		map->page = ttm->pages[start_page];
    606 		map->virtual = kmap(map->page);
    607 	} else {
    608 		/*
    609 		 * We need to use vmap to get the desired page protection
    610 		 * or to make the buffer object look contiguous.
    611 		 */
    612 		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
    613 		map->bo_kmap_type = ttm_bo_map_vmap;
    614 		map->virtual = vmap(ttm->pages + start_page, num_pages,
    615 				    0, prot);
    616 	}
    617 	return (!map->virtual) ? -ENOMEM : 0;
    618 }
    619 
    620 int ttm_bo_kmap(struct ttm_buffer_object *bo,
    621 		unsigned long start_page, unsigned long num_pages,
    622 		struct ttm_bo_kmap_obj *map)
    623 {
    624 	struct ttm_mem_type_manager *man =
    625 		&bo->bdev->man[bo->mem.mem_type];
    626 	unsigned long offset, size;
    627 	int ret;
    628 
    629 	map->virtual = NULL;
    630 	map->bo = bo;
    631 	if (num_pages > bo->num_pages)
    632 		return -EINVAL;
    633 	if (start_page > bo->num_pages)
    634 		return -EINVAL;
    635 
    636 	(void) ttm_mem_io_lock(man, false);
    637 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
    638 	ttm_mem_io_unlock(man);
    639 	if (ret)
    640 		return ret;
    641 	if (!bo->mem.bus.is_iomem) {
    642 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
    643 	} else {
    644 		offset = start_page << PAGE_SHIFT;
    645 		size = num_pages << PAGE_SHIFT;
    646 		return ttm_bo_ioremap(bo, offset, size, map);
    647 	}
    648 }
    649 EXPORT_SYMBOL(ttm_bo_kmap);
    650 
    651 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
    652 {
    653 	struct ttm_buffer_object *bo = map->bo;
    654 	struct ttm_mem_type_manager *man =
    655 		&bo->bdev->man[bo->mem.mem_type];
    656 
    657 	if (!map->virtual)
    658 		return;
    659 	switch (map->bo_kmap_type) {
    660 	case ttm_bo_map_iomap:
    661 		iounmap(map->virtual);
    662 		break;
    663 	case ttm_bo_map_vmap:
    664 		vunmap(map->virtual);
    665 		break;
    666 	case ttm_bo_map_kmap:
    667 		kunmap(map->page);
    668 		break;
    669 	case ttm_bo_map_premapped:
    670 		break;
    671 	default:
    672 		BUG();
    673 	}
    674 	(void) ttm_mem_io_lock(man, false);
    675 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
    676 	ttm_mem_io_unlock(man);
    677 	map->virtual = NULL;
    678 	map->page = NULL;
    679 }
    680 EXPORT_SYMBOL(ttm_bo_kunmap);
    681 
    682 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
    683 			      struct dma_fence *fence,
    684 			      bool evict,
    685 			      struct ttm_mem_reg *new_mem)
    686 {
    687 	struct ttm_bo_device *bdev = bo->bdev;
    688 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
    689 	struct ttm_mem_reg *old_mem = &bo->mem;
    690 	int ret;
    691 	struct ttm_buffer_object *ghost_obj;
    692 
    693 	dma_resv_add_excl_fence(bo->base.resv, fence);
    694 	if (evict) {
    695 		ret = ttm_bo_wait(bo, false, false);
    696 		if (ret)
    697 			return ret;
    698 
    699 		if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
    700 			ttm_tt_destroy(bo->ttm);
    701 			bo->ttm = NULL;
    702 		}
    703 		ttm_bo_free_old_node(bo);
    704 	} else {
    705 		/**
    706 		 * This should help pipeline ordinary buffer moves.
    707 		 *
    708 		 * Hang old buffer memory on a new buffer object,
    709 		 * and leave it to be released when the GPU
    710 		 * operation has completed.
    711 		 */
    712 
    713 		dma_fence_put(bo->moving);
    714 		bo->moving = dma_fence_get(fence);
    715 
    716 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
    717 		if (ret)
    718 			return ret;
    719 
    720 		dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
    721 
    722 		/**
    723 		 * If we're not moving to fixed memory, the TTM object
    724 		 * needs to stay alive. Otherwhise hang it on the ghost
    725 		 * bo to be unbound and destroyed.
    726 		 */
    727 
    728 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
    729 			ghost_obj->ttm = NULL;
    730 		else
    731 			bo->ttm = NULL;
    732 
    733 		dma_resv_unlock(&ghost_obj->base._resv);
    734 		ttm_bo_put(ghost_obj);
    735 	}
    736 
    737 	*old_mem = *new_mem;
    738 	new_mem->mm_node = NULL;
    739 
    740 	return 0;
    741 }
    742 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
    743 
    744 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
    745 			 struct dma_fence *fence, bool evict,
    746 			 struct ttm_mem_reg *new_mem)
    747 {
    748 	struct ttm_bo_device *bdev = bo->bdev;
    749 	struct ttm_mem_reg *old_mem = &bo->mem;
    750 
    751 	struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
    752 	struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
    753 
    754 	int ret;
    755 
    756 	dma_resv_add_excl_fence(bo->base.resv, fence);
    757 
    758 	if (!evict) {
    759 		struct ttm_buffer_object *ghost_obj;
    760 
    761 		/**
    762 		 * This should help pipeline ordinary buffer moves.
    763 		 *
    764 		 * Hang old buffer memory on a new buffer object,
    765 		 * and leave it to be released when the GPU
    766 		 * operation has completed.
    767 		 */
    768 
    769 		dma_fence_put(bo->moving);
    770 		bo->moving = dma_fence_get(fence);
    771 
    772 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
    773 		if (ret)
    774 			return ret;
    775 
    776 		dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
    777 
    778 		/**
    779 		 * If we're not moving to fixed memory, the TTM object
    780 		 * needs to stay alive. Otherwhise hang it on the ghost
    781 		 * bo to be unbound and destroyed.
    782 		 */
    783 
    784 		if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
    785 			ghost_obj->ttm = NULL;
    786 		else
    787 			bo->ttm = NULL;
    788 
    789 		dma_resv_unlock(&ghost_obj->base._resv);
    790 		ttm_bo_put(ghost_obj);
    791 
    792 	} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
    793 
    794 		/**
    795 		 * BO doesn't have a TTM we need to bind/unbind. Just remember
    796 		 * this eviction and free up the allocation
    797 		 */
    798 
    799 		spin_lock(&from->move_lock);
    800 		if (!from->move || dma_fence_is_later(fence, from->move)) {
    801 			dma_fence_put(from->move);
    802 			from->move = dma_fence_get(fence);
    803 		}
    804 		spin_unlock(&from->move_lock);
    805 
    806 		ttm_bo_free_old_node(bo);
    807 
    808 		dma_fence_put(bo->moving);
    809 		bo->moving = dma_fence_get(fence);
    810 
    811 	} else {
    812 		/**
    813 		 * Last resort, wait for the move to be completed.
    814 		 *
    815 		 * Should never happen in pratice.
    816 		 */
    817 
    818 		ret = ttm_bo_wait(bo, false, false);
    819 		if (ret)
    820 			return ret;
    821 
    822 		if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
    823 			ttm_tt_destroy(bo->ttm);
    824 			bo->ttm = NULL;
    825 		}
    826 		ttm_bo_free_old_node(bo);
    827 	}
    828 
    829 	*old_mem = *new_mem;
    830 	new_mem->mm_node = NULL;
    831 
    832 	return 0;
    833 }
    834 EXPORT_SYMBOL(ttm_bo_pipeline_move);
    835 
    836 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
    837 {
    838 	struct ttm_buffer_object *ghost;
    839 	int ret;
    840 
    841 	ret = ttm_buffer_object_transfer(bo, &ghost);
    842 	if (ret)
    843 		return ret;
    844 
    845 	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
    846 	/* Last resort, wait for the BO to be idle when we are OOM */
    847 	if (ret)
    848 		ttm_bo_wait(bo, false, false);
    849 
    850 	memset(&bo->mem, 0, sizeof(bo->mem));
    851 	bo->mem.mem_type = TTM_PL_SYSTEM;
    852 	bo->ttm = NULL;
    853 
    854 	dma_resv_unlock(&ghost->base._resv);
    855 	ttm_bo_put(ghost);
    856 
    857 	return 0;
    858 }
    859