Home | History | Annotate | Line # | Download | only in vmwgfx
      1 /*	$NetBSD: vmwgfx_ttm_buffer.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $	*/
      2 
      3 // SPDX-License-Identifier: GPL-2.0 OR MIT
      4 /**************************************************************************
      5  *
      6  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_ttm_buffer.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $");
     32 
     33 #include "vmwgfx_drv.h"
     34 #include <drm/ttm/ttm_bo_driver.h>
     35 #include <drm/ttm/ttm_placement.h>
     36 #include <drm/ttm/ttm_page_alloc.h>
     37 
     38 static const struct ttm_place vram_placement_flags = {
     39 	.fpfn = 0,
     40 	.lpfn = 0,
     41 	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
     42 };
     43 
     44 static const struct ttm_place vram_ne_placement_flags = {
     45 	.fpfn = 0,
     46 	.lpfn = 0,
     47 	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
     48 };
     49 
     50 static const struct ttm_place sys_placement_flags = {
     51 	.fpfn = 0,
     52 	.lpfn = 0,
     53 	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
     54 };
     55 
     56 static const struct ttm_place sys_ne_placement_flags = {
     57 	.fpfn = 0,
     58 	.lpfn = 0,
     59 	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
     60 };
     61 
     62 static const struct ttm_place gmr_placement_flags = {
     63 	.fpfn = 0,
     64 	.lpfn = 0,
     65 	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
     66 };
     67 
     68 static const struct ttm_place gmr_ne_placement_flags = {
     69 	.fpfn = 0,
     70 	.lpfn = 0,
     71 	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
     72 };
     73 
     74 static const struct ttm_place mob_placement_flags = {
     75 	.fpfn = 0,
     76 	.lpfn = 0,
     77 	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
     78 };
     79 
     80 static const struct ttm_place mob_ne_placement_flags = {
     81 	.fpfn = 0,
     82 	.lpfn = 0,
     83 	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
     84 };
     85 
     86 struct ttm_placement vmw_vram_placement = {
     87 	.num_placement = 1,
     88 	.placement = &vram_placement_flags,
     89 	.num_busy_placement = 1,
     90 	.busy_placement = &vram_placement_flags
     91 };
     92 
     93 static const struct ttm_place vram_gmr_placement_flags[] = {
     94 	{
     95 		.fpfn = 0,
     96 		.lpfn = 0,
     97 		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
     98 	}, {
     99 		.fpfn = 0,
    100 		.lpfn = 0,
    101 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
    102 	}
    103 };
    104 
    105 static const struct ttm_place gmr_vram_placement_flags[] = {
    106 	{
    107 		.fpfn = 0,
    108 		.lpfn = 0,
    109 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
    110 	}, {
    111 		.fpfn = 0,
    112 		.lpfn = 0,
    113 		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
    114 	}
    115 };
    116 
    117 struct ttm_placement vmw_vram_gmr_placement = {
    118 	.num_placement = 2,
    119 	.placement = vram_gmr_placement_flags,
    120 	.num_busy_placement = 1,
    121 	.busy_placement = &gmr_placement_flags
    122 };
    123 
    124 static const struct ttm_place vram_gmr_ne_placement_flags[] = {
    125 	{
    126 		.fpfn = 0,
    127 		.lpfn = 0,
    128 		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
    129 			 TTM_PL_FLAG_NO_EVICT
    130 	}, {
    131 		.fpfn = 0,
    132 		.lpfn = 0,
    133 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
    134 			 TTM_PL_FLAG_NO_EVICT
    135 	}
    136 };
    137 
    138 struct ttm_placement vmw_vram_gmr_ne_placement = {
    139 	.num_placement = 2,
    140 	.placement = vram_gmr_ne_placement_flags,
    141 	.num_busy_placement = 1,
    142 	.busy_placement = &gmr_ne_placement_flags
    143 };
    144 
    145 struct ttm_placement vmw_vram_sys_placement = {
    146 	.num_placement = 1,
    147 	.placement = &vram_placement_flags,
    148 	.num_busy_placement = 1,
    149 	.busy_placement = &sys_placement_flags
    150 };
    151 
    152 struct ttm_placement vmw_vram_ne_placement = {
    153 	.num_placement = 1,
    154 	.placement = &vram_ne_placement_flags,
    155 	.num_busy_placement = 1,
    156 	.busy_placement = &vram_ne_placement_flags
    157 };
    158 
    159 struct ttm_placement vmw_sys_placement = {
    160 	.num_placement = 1,
    161 	.placement = &sys_placement_flags,
    162 	.num_busy_placement = 1,
    163 	.busy_placement = &sys_placement_flags
    164 };
    165 
    166 struct ttm_placement vmw_sys_ne_placement = {
    167 	.num_placement = 1,
    168 	.placement = &sys_ne_placement_flags,
    169 	.num_busy_placement = 1,
    170 	.busy_placement = &sys_ne_placement_flags
    171 };
    172 
    173 static const struct ttm_place evictable_placement_flags[] = {
    174 	{
    175 		.fpfn = 0,
    176 		.lpfn = 0,
    177 		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
    178 	}, {
    179 		.fpfn = 0,
    180 		.lpfn = 0,
    181 		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
    182 	}, {
    183 		.fpfn = 0,
    184 		.lpfn = 0,
    185 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
    186 	}, {
    187 		.fpfn = 0,
    188 		.lpfn = 0,
    189 		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
    190 	}
    191 };
    192 
    193 static const struct ttm_place nonfixed_placement_flags[] = {
    194 	{
    195 		.fpfn = 0,
    196 		.lpfn = 0,
    197 		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
    198 	}, {
    199 		.fpfn = 0,
    200 		.lpfn = 0,
    201 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
    202 	}, {
    203 		.fpfn = 0,
    204 		.lpfn = 0,
    205 		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
    206 	}
    207 };
    208 
    209 struct ttm_placement vmw_evictable_placement = {
    210 	.num_placement = 4,
    211 	.placement = evictable_placement_flags,
    212 	.num_busy_placement = 1,
    213 	.busy_placement = &sys_placement_flags
    214 };
    215 
    216 struct ttm_placement vmw_srf_placement = {
    217 	.num_placement = 1,
    218 	.num_busy_placement = 2,
    219 	.placement = &gmr_placement_flags,
    220 	.busy_placement = gmr_vram_placement_flags
    221 };
    222 
    223 struct ttm_placement vmw_mob_placement = {
    224 	.num_placement = 1,
    225 	.num_busy_placement = 1,
    226 	.placement = &mob_placement_flags,
    227 	.busy_placement = &mob_placement_flags
    228 };
    229 
    230 struct ttm_placement vmw_mob_ne_placement = {
    231 	.num_placement = 1,
    232 	.num_busy_placement = 1,
    233 	.placement = &mob_ne_placement_flags,
    234 	.busy_placement = &mob_ne_placement_flags
    235 };
    236 
    237 struct ttm_placement vmw_nonfixed_placement = {
    238 	.num_placement = 3,
    239 	.placement = nonfixed_placement_flags,
    240 	.num_busy_placement = 1,
    241 	.busy_placement = &sys_placement_flags
    242 };
    243 
    244 struct vmw_ttm_tt {
    245 	struct ttm_dma_tt dma_ttm;
    246 	struct vmw_private *dev_priv;
    247 	int gmr_id;
    248 	struct vmw_mob *mob;
    249 	int mem_type;
    250 	struct sg_table sgt;
    251 	struct vmw_sg_table vsgt;
    252 	uint64_t sg_alloc_size;
    253 	bool mapped;
    254 };
    255 
    256 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
    257 
    258 /**
    259  * Helper functions to advance a struct vmw_piter iterator.
    260  *
    261  * @viter: Pointer to the iterator.
    262  *
    263  * These functions return false if past the end of the list,
    264  * true otherwise. Functions are selected depending on the current
    265  * DMA mapping mode.
    266  */
    267 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
    268 {
    269 	return ++(viter->i) < viter->num_pages;
    270 }
    271 
    272 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
    273 {
    274 	bool ret = __vmw_piter_non_sg_next(viter);
    275 
    276 	return __sg_page_iter_dma_next(&viter->iter) && ret;
    277 }
    278 
    279 
    280 /**
    281  * Helper functions to return a pointer to the current page.
    282  *
    283  * @viter: Pointer to the iterator
    284  *
    285  * These functions return a pointer to the page currently
    286  * pointed to by @viter. Functions are selected depending on the
    287  * current mapping mode.
    288  */
    289 static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
    290 {
    291 	return viter->pages[viter->i];
    292 }
    293 
    294 /**
    295  * Helper functions to return the DMA address of the current page.
    296  *
    297  * @viter: Pointer to the iterator
    298  *
    299  * These functions return the DMA address of the page currently
    300  * pointed to by @viter. Functions are selected depending on the
    301  * current mapping mode.
    302  */
    303 static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
    304 {
    305 	return page_to_phys(viter->pages[viter->i]);
    306 }
    307 
    308 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
    309 {
    310 	return viter->addrs[viter->i];
    311 }
    312 
    313 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
    314 {
    315 	return sg_page_iter_dma_address(&viter->iter);
    316 }
    317 
    318 
    319 /**
    320  * vmw_piter_start - Initialize a struct vmw_piter.
    321  *
    322  * @viter: Pointer to the iterator to initialize
    323  * @vsgt: Pointer to a struct vmw_sg_table to initialize from
    324  *
    325  * Note that we're following the convention of __sg_page_iter_start, so that
    326  * the iterator doesn't point to a valid page after initialization; it has
    327  * to be advanced one step first.
    328  */
    329 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
    330 		     unsigned long p_offset)
    331 {
    332 	viter->i = p_offset - 1;
    333 	viter->num_pages = vsgt->num_pages;
    334 	viter->page = &__vmw_piter_non_sg_page;
    335 	viter->pages = vsgt->pages;
    336 	switch (vsgt->mode) {
    337 	case vmw_dma_phys:
    338 		viter->next = &__vmw_piter_non_sg_next;
    339 		viter->dma_address = &__vmw_piter_phys_addr;
    340 		break;
    341 	case vmw_dma_alloc_coherent:
    342 		viter->next = &__vmw_piter_non_sg_next;
    343 		viter->dma_address = &__vmw_piter_dma_addr;
    344 		viter->addrs = vsgt->addrs;
    345 		break;
    346 	case vmw_dma_map_populate:
    347 	case vmw_dma_map_bind:
    348 		viter->next = &__vmw_piter_sg_next;
    349 		viter->dma_address = &__vmw_piter_sg_addr;
    350 		__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
    351 				     vsgt->sgt->orig_nents, p_offset);
    352 		break;
    353 	default:
    354 		BUG();
    355 	}
    356 }
    357 
    358 /**
    359  * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
    360  * TTM pages
    361  *
    362  * @vmw_tt: Pointer to a struct vmw_ttm_backend
    363  *
    364  * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
    365  */
    366 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
    367 {
    368 	struct device *dev = vmw_tt->dev_priv->dev->dev;
    369 
    370 	dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
    371 		DMA_BIDIRECTIONAL);
    372 	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
    373 }
    374 
    375 /**
    376  * vmw_ttm_map_for_dma - map TTM pages to get device addresses
    377  *
    378  * @vmw_tt: Pointer to a struct vmw_ttm_backend
    379  *
    380  * This function is used to get device addresses from the kernel DMA layer.
    381  * However, it's violating the DMA API in that when this operation has been
    382  * performed, it's illegal for the CPU to write to the pages without first
    383  * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
    384  * therefore only legal to call this function if we know that the function
    385  * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
    386  * a CPU write buffer flush.
    387  */
    388 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
    389 {
    390 	struct device *dev = vmw_tt->dev_priv->dev->dev;
    391 	int ret;
    392 
    393 	ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
    394 			 DMA_BIDIRECTIONAL);
    395 	if (unlikely(ret == 0))
    396 		return -ENOMEM;
    397 
    398 	vmw_tt->sgt.nents = ret;
    399 
    400 	return 0;
    401 }
    402 
    403 /**
    404  * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
    405  *
    406  * @vmw_tt: Pointer to a struct vmw_ttm_tt
    407  *
    408  * Select the correct function for and make sure the TTM pages are
    409  * visible to the device. Allocate storage for the device mappings.
    410  * If a mapping has already been performed, indicated by the storage
    411  * pointer being non NULL, the function returns success.
    412  */
    413 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
    414 {
    415 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
    416 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
    417 	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
    418 	struct ttm_operation_ctx ctx = {
    419 		.interruptible = true,
    420 		.no_wait_gpu = false
    421 	};
    422 	struct vmw_piter iter;
    423 	dma_addr_t old;
    424 	int ret = 0;
    425 	static size_t sgl_size;
    426 	static size_t sgt_size;
    427 
    428 	if (vmw_tt->mapped)
    429 		return 0;
    430 
    431 	vsgt->mode = dev_priv->map_mode;
    432 	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
    433 	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
    434 	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
    435 	vsgt->sgt = &vmw_tt->sgt;
    436 
    437 	switch (dev_priv->map_mode) {
    438 	case vmw_dma_map_bind:
    439 	case vmw_dma_map_populate:
    440 		if (unlikely(!sgl_size)) {
    441 			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
    442 			sgt_size = ttm_round_pot(sizeof(struct sg_table));
    443 		}
    444 		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
    445 		ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
    446 		if (unlikely(ret != 0))
    447 			return ret;
    448 
    449 		ret = __sg_alloc_table_from_pages
    450 			(&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
    451 			 (unsigned long) vsgt->num_pages << PAGE_SHIFT,
    452 			 dma_get_max_seg_size(dev_priv->dev->dev),
    453 			 GFP_KERNEL);
    454 		if (unlikely(ret != 0))
    455 			goto out_sg_alloc_fail;
    456 
    457 		if (vsgt->num_pages > vmw_tt->sgt.nents) {
    458 			uint64_t over_alloc =
    459 				sgl_size * (vsgt->num_pages -
    460 					    vmw_tt->sgt.nents);
    461 
    462 			ttm_mem_global_free(glob, over_alloc);
    463 			vmw_tt->sg_alloc_size -= over_alloc;
    464 		}
    465 
    466 		ret = vmw_ttm_map_for_dma(vmw_tt);
    467 		if (unlikely(ret != 0))
    468 			goto out_map_fail;
    469 
    470 		break;
    471 	default:
    472 		break;
    473 	}
    474 
    475 	old = ~((dma_addr_t) 0);
    476 	vmw_tt->vsgt.num_regions = 0;
    477 	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
    478 		dma_addr_t cur = vmw_piter_dma_addr(&iter);
    479 
    480 		if (cur != old + PAGE_SIZE)
    481 			vmw_tt->vsgt.num_regions++;
    482 		old = cur;
    483 	}
    484 
    485 	vmw_tt->mapped = true;
    486 	return 0;
    487 
    488 out_map_fail:
    489 	sg_free_table(vmw_tt->vsgt.sgt);
    490 	vmw_tt->vsgt.sgt = NULL;
    491 out_sg_alloc_fail:
    492 	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
    493 	return ret;
    494 }
    495 
    496 /**
    497  * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
    498  *
    499  * @vmw_tt: Pointer to a struct vmw_ttm_tt
    500  *
    501  * Tear down any previously set up device DMA mappings and free
    502  * any storage space allocated for them. If there are no mappings set up,
    503  * this function is a NOP.
    504  */
    505 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
    506 {
    507 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
    508 
    509 	if (!vmw_tt->vsgt.sgt)
    510 		return;
    511 
    512 	switch (dev_priv->map_mode) {
    513 	case vmw_dma_map_bind:
    514 	case vmw_dma_map_populate:
    515 		vmw_ttm_unmap_from_dma(vmw_tt);
    516 		sg_free_table(vmw_tt->vsgt.sgt);
    517 		vmw_tt->vsgt.sgt = NULL;
    518 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
    519 				    vmw_tt->sg_alloc_size);
    520 		break;
    521 	default:
    522 		break;
    523 	}
    524 	vmw_tt->mapped = false;
    525 }
    526 
    527 
    528 /**
    529  * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
    530  *
    531  * @bo: Pointer to a struct ttm_buffer_object
    532  *
    533  * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
    534  * instead of a pointer to a struct vmw_ttm_backend as argument.
    535  * Note that the buffer object must be either pinned or reserved before
    536  * calling this function.
    537  */
    538 int vmw_bo_map_dma(struct ttm_buffer_object *bo)
    539 {
    540 	struct vmw_ttm_tt *vmw_tt =
    541 		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
    542 
    543 	return vmw_ttm_map_dma(vmw_tt);
    544 }
    545 
    546 
    547 /**
    548  * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
    549  *
    550  * @bo: Pointer to a struct ttm_buffer_object
    551  *
    552  * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
    553  * instead of a pointer to a struct vmw_ttm_backend as argument.
    554  */
    555 void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
    556 {
    557 	struct vmw_ttm_tt *vmw_tt =
    558 		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
    559 
    560 	vmw_ttm_unmap_dma(vmw_tt);
    561 }
    562 
    563 
    564 /**
    565  * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
    566  * TTM buffer object
    567  *
    568  * @bo: Pointer to a struct ttm_buffer_object
    569  *
    570  * Returns a pointer to a struct vmw_sg_table object. The object should
    571  * not be freed after use.
    572  * Note that for the device addresses to be valid, the buffer object must
    573  * either be reserved or pinned.
    574  */
    575 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
    576 {
    577 	struct vmw_ttm_tt *vmw_tt =
    578 		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
    579 
    580 	return &vmw_tt->vsgt;
    581 }
    582 
    583 
    584 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
    585 {
    586 	struct vmw_ttm_tt *vmw_be =
    587 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
    588 	int ret;
    589 
    590 	ret = vmw_ttm_map_dma(vmw_be);
    591 	if (unlikely(ret != 0))
    592 		return ret;
    593 
    594 	vmw_be->gmr_id = bo_mem->start;
    595 	vmw_be->mem_type = bo_mem->mem_type;
    596 
    597 	switch (bo_mem->mem_type) {
    598 	case VMW_PL_GMR:
    599 		return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
    600 				    ttm->num_pages, vmw_be->gmr_id);
    601 	case VMW_PL_MOB:
    602 		if (unlikely(vmw_be->mob == NULL)) {
    603 			vmw_be->mob =
    604 				vmw_mob_create(ttm->num_pages);
    605 			if (unlikely(vmw_be->mob == NULL))
    606 				return -ENOMEM;
    607 		}
    608 
    609 		return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
    610 				    &vmw_be->vsgt, ttm->num_pages,
    611 				    vmw_be->gmr_id);
    612 	default:
    613 		BUG();
    614 	}
    615 	return 0;
    616 }
    617 
    618 static int vmw_ttm_unbind(struct ttm_tt *ttm)
    619 {
    620 	struct vmw_ttm_tt *vmw_be =
    621 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
    622 
    623 	switch (vmw_be->mem_type) {
    624 	case VMW_PL_GMR:
    625 		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
    626 		break;
    627 	case VMW_PL_MOB:
    628 		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
    629 		break;
    630 	default:
    631 		BUG();
    632 	}
    633 
    634 	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
    635 		vmw_ttm_unmap_dma(vmw_be);
    636 
    637 	return 0;
    638 }
    639 
    640 
    641 static void vmw_ttm_destroy(struct ttm_tt *ttm)
    642 {
    643 	struct vmw_ttm_tt *vmw_be =
    644 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
    645 
    646 	vmw_ttm_unmap_dma(vmw_be);
    647 	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
    648 		ttm_dma_tt_fini(&vmw_be->dma_ttm);
    649 	else
    650 		ttm_tt_fini(ttm);
    651 
    652 	if (vmw_be->mob)
    653 		vmw_mob_destroy(vmw_be->mob);
    654 
    655 	kfree(vmw_be);
    656 }
    657 
    658 
    659 static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
    660 {
    661 	struct vmw_ttm_tt *vmw_tt =
    662 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
    663 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
    664 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
    665 	int ret;
    666 
    667 	if (ttm->state != tt_unpopulated)
    668 		return 0;
    669 
    670 	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
    671 		size_t size =
    672 			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
    673 		ret = ttm_mem_global_alloc(glob, size, ctx);
    674 		if (unlikely(ret != 0))
    675 			return ret;
    676 
    677 		ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
    678 					ctx);
    679 		if (unlikely(ret != 0))
    680 			ttm_mem_global_free(glob, size);
    681 	} else
    682 		ret = ttm_pool_populate(ttm, ctx);
    683 
    684 	return ret;
    685 }
    686 
    687 static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
    688 {
    689 	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
    690 						 dma_ttm.ttm);
    691 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
    692 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
    693 
    694 
    695 	if (vmw_tt->mob) {
    696 		vmw_mob_destroy(vmw_tt->mob);
    697 		vmw_tt->mob = NULL;
    698 	}
    699 
    700 	vmw_ttm_unmap_dma(vmw_tt);
    701 	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
    702 		size_t size =
    703 			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
    704 
    705 		ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
    706 		ttm_mem_global_free(glob, size);
    707 	} else
    708 		ttm_pool_unpopulate(ttm);
    709 }
    710 
    711 static struct ttm_backend_func vmw_ttm_func = {
    712 	.bind = vmw_ttm_bind,
    713 	.unbind = vmw_ttm_unbind,
    714 	.destroy = vmw_ttm_destroy,
    715 };
    716 
    717 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
    718 					uint32_t page_flags)
    719 {
    720 	struct vmw_ttm_tt *vmw_be;
    721 	int ret;
    722 
    723 	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
    724 	if (!vmw_be)
    725 		return NULL;
    726 
    727 	vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
    728 	vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
    729 	vmw_be->mob = NULL;
    730 
    731 	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
    732 		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
    733 	else
    734 		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
    735 	if (unlikely(ret != 0))
    736 		goto out_no_init;
    737 
    738 	return &vmw_be->dma_ttm.ttm;
    739 out_no_init:
    740 	kfree(vmw_be);
    741 	return NULL;
    742 }
    743 
    744 static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
    745 {
    746 	return 0;
    747 }
    748 
    749 static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
    750 		      struct ttm_mem_type_manager *man)
    751 {
    752 	switch (type) {
    753 	case TTM_PL_SYSTEM:
    754 		/* System memory */
    755 
    756 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
    757 		man->available_caching = TTM_PL_FLAG_CACHED;
    758 		man->default_caching = TTM_PL_FLAG_CACHED;
    759 		break;
    760 	case TTM_PL_VRAM:
    761 		/* "On-card" video ram */
    762 		man->func = &ttm_bo_manager_func;
    763 		man->gpu_offset = 0;
    764 		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
    765 		man->available_caching = TTM_PL_FLAG_CACHED;
    766 		man->default_caching = TTM_PL_FLAG_CACHED;
    767 		break;
    768 	case VMW_PL_GMR:
    769 	case VMW_PL_MOB:
    770 		/*
    771 		 * "Guest Memory Regions" is an aperture like feature with
    772 		 *  one slot per bo. There is an upper limit of the number of
    773 		 *  slots as well as the bo size.
    774 		 */
    775 		man->func = &vmw_gmrid_manager_func;
    776 		man->gpu_offset = 0;
    777 		man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
    778 		man->available_caching = TTM_PL_FLAG_CACHED;
    779 		man->default_caching = TTM_PL_FLAG_CACHED;
    780 		break;
    781 	default:
    782 		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
    783 		return -EINVAL;
    784 	}
    785 	return 0;
    786 }
    787 
    788 static void vmw_evict_flags(struct ttm_buffer_object *bo,
    789 		     struct ttm_placement *placement)
    790 {
    791 	*placement = vmw_sys_placement;
    792 }
    793 
    794 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
    795 {
    796 	struct ttm_object_file *tfile =
    797 		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
    798 
    799 	return vmw_user_bo_verify_access(bo, tfile);
    800 }
    801 
    802 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
    803 {
    804 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    805 	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
    806 
    807 	mem->bus.addr = NULL;
    808 	mem->bus.is_iomem = false;
    809 	mem->bus.offset = 0;
    810 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
    811 	mem->bus.base = 0;
    812 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
    813 		return -EINVAL;
    814 	switch (mem->mem_type) {
    815 	case TTM_PL_SYSTEM:
    816 	case VMW_PL_GMR:
    817 	case VMW_PL_MOB:
    818 		return 0;
    819 	case TTM_PL_VRAM:
    820 		mem->bus.offset = mem->start << PAGE_SHIFT;
    821 		mem->bus.base = dev_priv->vram_start;
    822 		mem->bus.is_iomem = true;
    823 		break;
    824 	default:
    825 		return -EINVAL;
    826 	}
    827 	return 0;
    828 }
    829 
    830 static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
    831 {
    832 }
    833 
    834 static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
    835 {
    836 	return 0;
    837 }
    838 
    839 /**
    840  * vmw_move_notify - TTM move_notify_callback
    841  *
    842  * @bo: The TTM buffer object about to move.
    843  * @mem: The struct ttm_mem_reg indicating to what memory
    844  *       region the move is taking place.
    845  *
    846  * Calls move_notify for all subsystems needing it.
    847  * (currently only resources).
    848  */
    849 static void vmw_move_notify(struct ttm_buffer_object *bo,
    850 			    bool evict,
    851 			    struct ttm_mem_reg *mem)
    852 {
    853 	vmw_bo_move_notify(bo, mem);
    854 	vmw_query_move_notify(bo, mem);
    855 }
    856 
    857 
    858 /**
    859  * vmw_swap_notify - TTM move_notify_callback
    860  *
    861  * @bo: The TTM buffer object about to be swapped out.
    862  */
    863 static void vmw_swap_notify(struct ttm_buffer_object *bo)
    864 {
    865 	vmw_bo_swap_notify(bo);
    866 	(void) ttm_bo_wait(bo, false, false);
    867 }
    868 
    869 
    870 struct ttm_bo_driver vmw_bo_driver = {
    871 	.ttm_tt_create = &vmw_ttm_tt_create,
    872 	.ttm_tt_populate = &vmw_ttm_populate,
    873 	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
    874 	.invalidate_caches = vmw_invalidate_caches,
    875 	.init_mem_type = vmw_init_mem_type,
    876 	.eviction_valuable = ttm_bo_eviction_valuable,
    877 	.evict_flags = vmw_evict_flags,
    878 	.move = NULL,
    879 	.verify_access = vmw_verify_access,
    880 	.move_notify = vmw_move_notify,
    881 	.swap_notify = vmw_swap_notify,
    882 	.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
    883 	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
    884 	.io_mem_free = &vmw_ttm_io_mem_free,
    885 };
    886