Home | History | Annotate | Line # | Download | only in ttm
ttm_tt.c revision 1.14
      1 /*	$NetBSD: ttm_tt.c,v 1.14 2021/12/19 01:49:50 riastradh Exp $	*/
      2 
      3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
      4 /**************************************************************************
      5  *
      6  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
      7  * All Rights Reserved.
      8  *
      9  * Permission is hereby granted, free of charge, to any person obtaining a
     10  * copy of this software and associated documentation files (the
     11  * "Software"), to deal in the Software without restriction, including
     12  * without limitation the rights to use, copy, modify, merge, publish,
     13  * distribute, sub license, and/or sell copies of the Software, and to
     14  * permit persons to whom the Software is furnished to do so, subject to
     15  * the following conditions:
     16  *
     17  * The above copyright notice and this permission notice (including the
     18  * next paragraph) shall be included in all copies or substantial portions
     19  * of the Software.
     20  *
     21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  *
     29  **************************************************************************/
     30 /*
     31  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: ttm_tt.c,v 1.14 2021/12/19 01:49:50 riastradh Exp $");
     36 
     37 #define pr_fmt(fmt) "[TTM] " fmt
     38 
     39 #include <linux/sched.h>
     40 #include <linux/pagemap.h>
     41 #include <linux/shmem_fs.h>
     42 #include <linux/file.h>
     43 #include <drm/drm_cache.h>
     44 #include <drm/ttm/ttm_bo_driver.h>
     45 #include <drm/ttm/ttm_page_alloc.h>
     46 #include <drm/bus_dma_hacks.h>
     47 #include <drm/ttm/ttm_set_memory.h>
     48 
     49 /**
     50  * Allocates a ttm structure for the given BO.
     51  */
     52 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
     53 {
     54 	struct ttm_bo_device *bdev = bo->bdev;
     55 	uint32_t page_flags = 0;
     56 
     57 	dma_resv_assert_held(bo->base.resv);
     58 
     59 	if (bdev->need_dma32)
     60 		page_flags |= TTM_PAGE_FLAG_DMA32;
     61 
     62 	if (bdev->no_retry)
     63 		page_flags |= TTM_PAGE_FLAG_NO_RETRY;
     64 
     65 	switch (bo->type) {
     66 	case ttm_bo_type_device:
     67 		if (zero_alloc)
     68 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
     69 		break;
     70 	case ttm_bo_type_kernel:
     71 		break;
     72 	case ttm_bo_type_sg:
     73 		page_flags |= TTM_PAGE_FLAG_SG;
     74 		break;
     75 	default:
     76 		bo->ttm = NULL;
     77 		pr_err("Illegal buffer object type\n");
     78 		return -EINVAL;
     79 	}
     80 
     81 	bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
     82 	if (unlikely(bo->ttm == NULL))
     83 		return -ENOMEM;
     84 
     85 	return 0;
     86 }
     87 
     88 /**
     89  * Allocates storage for pointers to the pages that back the ttm.
     90  */
     91 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
     92 {
     93 	ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
     94 			GFP_KERNEL | __GFP_ZERO);
     95 	if (!ttm->pages)
     96 		return -ENOMEM;
     97 	return 0;
     98 }
     99 
    100 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
    101 {
    102 	ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
    103 					  sizeof(*ttm->ttm.pages) +
    104 					  sizeof(*ttm->dma_address),
    105 					  GFP_KERNEL | __GFP_ZERO);
    106 	if (!ttm->ttm.pages)
    107 		return -ENOMEM;
    108 	ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
    109 	return 0;
    110 }
    111 
    112 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
    113 {
    114 	ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
    115 					  sizeof(*ttm->dma_address),
    116 					  GFP_KERNEL | __GFP_ZERO);
    117 	if (!ttm->dma_address)
    118 		return -ENOMEM;
    119 	return 0;
    120 }
    121 
    122 static int ttm_tt_set_page_caching(struct page *p,
    123 				   enum ttm_caching_state c_old,
    124 				   enum ttm_caching_state c_new)
    125 {
    126 #ifdef __NetBSD__
    127 	return 0;
    128 #else
    129 	int ret = 0;
    130 
    131 	if (PageHighMem(p))
    132 		return 0;
    133 
    134 	if (c_old != tt_cached) {
    135 		/* p isn't in the default caching state, set it to
    136 		 * writeback first to free its current memtype. */
    137 
    138 		ret = ttm_set_pages_wb(p, 1);
    139 		if (ret)
    140 			return ret;
    141 	}
    142 
    143 	if (c_new == tt_wc)
    144 		ret = ttm_set_pages_wc(p, 1);
    145 	else if (c_new == tt_uncached)
    146 		ret = ttm_set_pages_uc(p, 1);
    147 
    148 	return ret;
    149 #endif
    150 }
    151 
    152 /*
    153  * Change caching policy for the linear kernel map
    154  * for range of pages in a ttm.
    155  */
    156 
    157 static int ttm_tt_set_caching(struct ttm_tt *ttm,
    158 			      enum ttm_caching_state c_state)
    159 {
    160 	int i, j;
    161 	struct page *cur_page;
    162 	int ret;
    163 
    164 	if (ttm->caching_state == c_state)
    165 		return 0;
    166 
    167 	if (ttm->state == tt_unpopulated) {
    168 		/* Change caching but don't populate */
    169 		ttm->caching_state = c_state;
    170 		return 0;
    171 	}
    172 
    173 	if (ttm->caching_state == tt_cached)
    174 		drm_clflush_pages(ttm->pages, ttm->num_pages);
    175 
    176 	for (i = 0; i < ttm->num_pages; ++i) {
    177 		cur_page = ttm->pages[i];
    178 		if (likely(cur_page != NULL)) {
    179 			ret = ttm_tt_set_page_caching(cur_page,
    180 						      ttm->caching_state,
    181 						      c_state);
    182 			if (unlikely(ret != 0))
    183 				goto out_err;
    184 		}
    185 	}
    186 
    187 	ttm->caching_state = c_state;
    188 
    189 	return 0;
    190 
    191 out_err:
    192 	for (j = 0; j < i; ++j) {
    193 		cur_page = ttm->pages[j];
    194 		if (likely(cur_page != NULL)) {
    195 			(void)ttm_tt_set_page_caching(cur_page, c_state,
    196 						      ttm->caching_state);
    197 		}
    198 	}
    199 
    200 	return ret;
    201 }
    202 
    203 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
    204 {
    205 	enum ttm_caching_state state;
    206 
    207 	if (placement & TTM_PL_FLAG_WC)
    208 		state = tt_wc;
    209 	else if (placement & TTM_PL_FLAG_UNCACHED)
    210 		state = tt_uncached;
    211 	else
    212 		state = tt_cached;
    213 
    214 	return ttm_tt_set_caching(ttm, state);
    215 }
    216 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
    217 
    218 void ttm_tt_destroy(struct ttm_tt *ttm)
    219 {
    220 	if (ttm == NULL)
    221 		return;
    222 
    223 	ttm_tt_unbind(ttm);
    224 
    225 	if (ttm->state == tt_unbound)
    226 		ttm_tt_unpopulate(ttm);
    227 
    228 #ifndef __NetBSD__
    229 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
    230 	    ttm->swap_storage)
    231 		fput(ttm->swap_storage);
    232 
    233 	ttm->swap_storage = NULL;
    234 #endif
    235 	ttm->func->destroy(ttm);
    236 }
    237 
    238 static void ttm_tt_init_fields(struct ttm_tt *ttm,
    239 			       struct ttm_buffer_object *bo,
    240 			       uint32_t page_flags)
    241 {
    242 	ttm->bdev = bo->bdev;
    243 	ttm->num_pages = bo->num_pages;
    244 	ttm->caching_state = tt_cached;
    245 	ttm->page_flags = page_flags;
    246 	ttm->state = tt_unpopulated;
    247 #ifdef __NetBSD__
    248 	WARN(bo->num_pages == 0,
    249 	    "zero-size allocation in %s, please file a NetBSD PR",
    250 	    __func__);	/* paranoia -- can't prove in five minutes */
    251 	ttm->swap_storage = uao_create(MAX(1, bo->num_pages), 0);
    252 	uao_set_pgfl(ttm->swap_storage, bus_dmamem_pgfl(ttm->bdev->dmat));
    253 	TAILQ_INIT(&ttm->pglist);
    254 #else
    255 	ttm->swap_storage = NULL;
    256 #endif
    257 	ttm->sg = bo->sg;
    258 }
    259 
    260 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
    261 		uint32_t page_flags)
    262 {
    263 	ttm_tt_init_fields(ttm, bo, page_flags);
    264 
    265 	if (ttm_tt_alloc_page_directory(ttm)) {
    266 		ttm_tt_destroy(ttm);
    267 		pr_err("Failed allocating page table\n");
    268 		return -ENOMEM;
    269 	}
    270 	return 0;
    271 }
    272 EXPORT_SYMBOL(ttm_tt_init);
    273 
    274 void ttm_tt_fini(struct ttm_tt *ttm)
    275 {
    276 	kvfree(ttm->pages);
    277 	ttm->pages = NULL;
    278 #ifdef __NetBSD__
    279 	uao_detach(ttm->swap_storage);
    280 	ttm->swap_storage = NULL;
    281 #endif
    282 }
    283 EXPORT_SYMBOL(ttm_tt_fini);
    284 
    285 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
    286 		    uint32_t page_flags)
    287 {
    288 	struct ttm_tt *ttm = &ttm_dma->ttm;
    289 
    290 	ttm_tt_init_fields(ttm, bo, page_flags);
    291 
    292 	INIT_LIST_HEAD(&ttm_dma->pages_list);
    293 	if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
    294 		ttm_tt_destroy(ttm);
    295 		pr_err("Failed allocating page table\n");
    296 		return -ENOMEM;
    297 	}
    298 #ifdef __NetBSD__
    299     {
    300 	int error;
    301 
    302 	if (ttm->num_pages > (SIZE_MAX /
    303 		MIN(sizeof(ttm_dma->dma_segs[0]), PAGE_SIZE))) {
    304 		error = ENOMEM;
    305 		goto fail0;
    306 	}
    307 	ttm_dma->dma_segs = kmem_alloc((ttm->num_pages *
    308 		sizeof(ttm_dma->dma_segs[0])), KM_SLEEP);
    309 	error = bus_dmamap_create(ttm->bdev->dmat,
    310 	    (ttm->num_pages * PAGE_SIZE), ttm->num_pages, PAGE_SIZE, 0,
    311 	    BUS_DMA_WAITOK, &ttm_dma->dma_address);
    312 	if (error)
    313 		goto fail1;
    314 
    315 	return 0;
    316 
    317 fail2: __unused
    318 	bus_dmamap_destroy(ttm->bdev->dmat, ttm_dma->dma_address);
    319 fail1:	kmem_free(ttm_dma->dma_segs, (ttm->num_pages *
    320 		sizeof(ttm_dma->dma_segs[0])));
    321 fail0:	KASSERT(error);
    322 	drm_free_large(ttm->pages);
    323 	uao_detach(ttm->swap_storage);
    324 	/* XXX errno NetBSD->Linux */
    325 	return -error;
    326     }
    327 #else
    328 	return 0;
    329 #endif
    330 }
    331 EXPORT_SYMBOL(ttm_dma_tt_init);
    332 
    333 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
    334 		   uint32_t page_flags)
    335 {
    336 	struct ttm_tt *ttm = &ttm_dma->ttm;
    337 	int ret;
    338 
    339 	ttm_tt_init_fields(ttm, bo, page_flags);
    340 
    341 	INIT_LIST_HEAD(&ttm_dma->pages_list);
    342 	if (page_flags & TTM_PAGE_FLAG_SG)
    343 		ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
    344 	else
    345 		ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
    346 	if (ret) {
    347 		ttm_tt_destroy(ttm);
    348 		pr_err("Failed allocating page table\n");
    349 		return -ENOMEM;
    350 	}
    351 	return 0;
    352 }
    353 EXPORT_SYMBOL(ttm_sg_tt_init);
    354 
    355 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
    356 {
    357 	struct ttm_tt *ttm = &ttm_dma->ttm;
    358 
    359 #ifdef __NetBSD__
    360 	bus_dmamap_destroy(ttm->bdev->dmat, ttm_dma->dma_address);
    361 	kmem_free(ttm_dma->dma_segs, (ttm->num_pages *
    362 		sizeof(ttm_dma->dma_segs[0])));
    363 #endif
    364 
    365 	if (ttm->pages)
    366 		kvfree(ttm->pages);
    367 	else
    368 		kvfree(ttm_dma->dma_address);
    369 	ttm->pages = NULL;
    370 
    371 #ifdef __NetBSD__
    372 	uao_detach(ttm->swap_storage);
    373 	ttm->swap_storage = NULL;
    374 #endif
    375 
    376 	ttm_dma->dma_address = NULL;
    377 }
    378 EXPORT_SYMBOL(ttm_dma_tt_fini);
    379 
    380 void ttm_tt_unbind(struct ttm_tt *ttm)
    381 {
    382 	int ret __diagused;
    383 
    384 	if (ttm->state == tt_bound) {
    385 		ret = ttm->func->unbind(ttm);
    386 		BUG_ON(ret);
    387 		ttm->state = tt_unbound;
    388 	}
    389 }
    390 
    391 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
    392 		struct ttm_operation_ctx *ctx)
    393 {
    394 	int ret = 0;
    395 
    396 	if (!ttm)
    397 		return -EINVAL;
    398 
    399 	if (ttm->state == tt_bound)
    400 		return 0;
    401 
    402 	ret = ttm_tt_populate(ttm, ctx);
    403 	if (ret)
    404 		return ret;
    405 
    406 	ret = ttm->func->bind(ttm, bo_mem);
    407 	if (unlikely(ret != 0))
    408 		return ret;
    409 
    410 	ttm->state = tt_bound;
    411 
    412 	return 0;
    413 }
    414 EXPORT_SYMBOL(ttm_tt_bind);
    415 
    416 #ifdef __NetBSD__
    417 /*
    418  * ttm_tt_wire(ttm)
    419  *
    420  *	Wire the uvm pages of ttm and fill the ttm page array.  ttm
    421  *	must be unpopulated, and must be marked swapped.  This does not
    422  *	change either state -- the caller is expected to include it
    423  *	among other operations for such a state transition.
    424  */
    425 int
    426 ttm_tt_wire(struct ttm_tt *ttm)
    427 {
    428 	struct uvm_object *uobj = ttm->swap_storage;
    429 	struct vm_page *page;
    430 	unsigned i;
    431 	int error;
    432 
    433 	KASSERTMSG((ttm->state == tt_unpopulated),
    434 	    "ttm_tt %p must be unpopulated for wiring, but state=%d",
    435 	    ttm, (int)ttm->state);
    436 	KASSERT(ISSET(ttm->page_flags, TTM_PAGE_FLAG_SWAPPED));
    437 	KASSERT(uobj != NULL);
    438 
    439 	error = uvm_obj_wirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT),
    440 	    &ttm->pglist);
    441 	if (error)
    442 		/* XXX errno NetBSD->Linux */
    443 		return -error;
    444 
    445 	i = 0;
    446 	TAILQ_FOREACH(page, &ttm->pglist, pageq.queue) {
    447 		KASSERT(i < ttm->num_pages);
    448 		KASSERT(ttm->pages[i] == NULL);
    449 		ttm->pages[i] = container_of(page, struct page, p_vmp);
    450 		i++;
    451 	}
    452 	KASSERT(i == ttm->num_pages);
    453 
    454 	/* Success!  */
    455 	return 0;
    456 }
    457 
    458 /*
    459  * ttm_tt_unwire(ttm)
    460  *
    461  *	Nullify the ttm page array and unwire the uvm pages of ttm.
    462  *	ttm must be unbound and must be marked swapped.  This does not
    463  *	change either state -- the caller is expected to include it
    464  *	among other operations for such a state transition.
    465  */
    466 void
    467 ttm_tt_unwire(struct ttm_tt *ttm)
    468 {
    469 	struct uvm_object *uobj = ttm->swap_storage;
    470 	unsigned i;
    471 
    472 	KASSERTMSG((ttm->state == tt_unbound),
    473 	    "ttm_tt %p must be unbound for unwiring, but state=%d",
    474 	    ttm, (int)ttm->state);
    475 	KASSERT(!ISSET(ttm->page_flags, TTM_PAGE_FLAG_SWAPPED));
    476 	KASSERT(uobj != NULL);
    477 
    478 	uvm_obj_unwirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT));
    479 	for (i = 0; i < ttm->num_pages; i++)
    480 		ttm->pages[i] = NULL;
    481 }
    482 #endif
    483 
    484 #ifndef __NetBSD__
    485 int ttm_tt_swapin(struct ttm_tt *ttm)
    486 {
    487 	struct address_space *swap_space;
    488 	struct file *swap_storage;
    489 	struct page *from_page;
    490 	struct page *to_page;
    491 	int i;
    492 	int ret = -ENOMEM;
    493 
    494 	swap_storage = ttm->swap_storage;
    495 	BUG_ON(swap_storage == NULL);
    496 
    497 	swap_space = swap_storage->f_mapping;
    498 
    499 	for (i = 0; i < ttm->num_pages; ++i) {
    500 		gfp_t gfp_mask = mapping_gfp_mask(swap_space);
    501 
    502 		gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
    503 		from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
    504 
    505 		if (IS_ERR(from_page)) {
    506 			ret = PTR_ERR(from_page);
    507 			goto out_err;
    508 		}
    509 		to_page = ttm->pages[i];
    510 		if (unlikely(to_page == NULL))
    511 			goto out_err;
    512 
    513 		copy_highpage(to_page, from_page);
    514 		put_page(from_page);
    515 	}
    516 
    517 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
    518 		fput(swap_storage);
    519 	ttm->swap_storage = NULL;
    520 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
    521 
    522 	return 0;
    523 out_err:
    524 	return ret;
    525 }
    526 #endif
    527 
    528 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
    529 {
    530 #ifdef __NetBSD__
    531 
    532 	KASSERTMSG((ttm->state == tt_unpopulated || ttm->state == tt_unbound),
    533 	    "ttm_tt %p must be unpopulated or unbound for swapout,"
    534 	    " but state=%d",
    535 	    ttm, (int)ttm->state);
    536 	KASSERTMSG((ttm->caching_state == tt_cached),
    537 	    "ttm_tt %p must be cached for swapout, but caching_state=%d",
    538 	    ttm, (int)ttm->caching_state);
    539 	KASSERT(persistent_swap_storage == NULL);
    540 
    541 	ttm->bdev->driver->ttm_tt_swapout(ttm);
    542 	return 0;
    543 #else
    544 	struct address_space *swap_space;
    545 	struct file *swap_storage;
    546 	struct page *from_page;
    547 	struct page *to_page;
    548 	int i;
    549 	int ret = -ENOMEM;
    550 
    551 	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
    552 	BUG_ON(ttm->caching_state != tt_cached);
    553 
    554 	if (!persistent_swap_storage) {
    555 		swap_storage = shmem_file_setup("ttm swap",
    556 						ttm->num_pages << PAGE_SHIFT,
    557 						0);
    558 		if (IS_ERR(swap_storage)) {
    559 			pr_err("Failed allocating swap storage\n");
    560 			return PTR_ERR(swap_storage);
    561 		}
    562 	} else {
    563 		swap_storage = persistent_swap_storage;
    564 	}
    565 
    566 	swap_space = swap_storage->f_mapping;
    567 
    568 	for (i = 0; i < ttm->num_pages; ++i) {
    569 		gfp_t gfp_mask = mapping_gfp_mask(swap_space);
    570 
    571 		gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
    572 
    573 		from_page = ttm->pages[i];
    574 		if (unlikely(from_page == NULL))
    575 			continue;
    576 
    577 		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
    578 		if (IS_ERR(to_page)) {
    579 			ret = PTR_ERR(to_page);
    580 			goto out_err;
    581 		}
    582 		copy_highpage(to_page, from_page);
    583 		set_page_dirty(to_page);
    584 		mark_page_accessed(to_page);
    585 		put_page(to_page);
    586 	}
    587 
    588 	ttm_tt_unpopulate(ttm);
    589 	ttm->swap_storage = swap_storage;
    590 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
    591 	if (persistent_swap_storage)
    592 		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
    593 
    594 	return 0;
    595 out_err:
    596 	if (!persistent_swap_storage)
    597 		fput(swap_storage);
    598 
    599 	return ret;
    600 #endif
    601 }
    602 
    603 static void ttm_tt_add_mapping(struct ttm_tt *ttm)
    604 {
    605 #ifndef __NetBSD__
    606 	pgoff_t i;
    607 
    608 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
    609 		return;
    610 
    611 	for (i = 0; i < ttm->num_pages; ++i)
    612 		ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
    613 #endif
    614 }
    615 
    616 int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
    617 {
    618 	int ret;
    619 
    620 	if (ttm->state != tt_unpopulated)
    621 		return 0;
    622 
    623 	if (ttm->bdev->driver->ttm_tt_populate)
    624 		ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
    625 	else
    626 #ifdef __NetBSD__
    627 		panic("no ttm population");
    628 #else
    629 		ret = ttm_pool_populate(ttm, ctx);
    630 #endif
    631 	if (!ret)
    632 		ttm_tt_add_mapping(ttm);
    633 	return ret;
    634 }
    635 
    636 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
    637 {
    638 #ifndef __NetBSD__
    639 	pgoff_t i;
    640 	struct page **page = ttm->pages;
    641 
    642 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
    643 		return;
    644 
    645 	for (i = 0; i < ttm->num_pages; ++i) {
    646 		(*page)->mapping = NULL;
    647 		(*page++)->index = 0;
    648 	}
    649 #endif
    650 }
    651 
    652 void ttm_tt_unpopulate(struct ttm_tt *ttm)
    653 {
    654 	if (ttm->state == tt_unpopulated)
    655 		return;
    656 
    657 	ttm_tt_clear_mapping(ttm);
    658 	if (ttm->bdev->driver->ttm_tt_unpopulate)
    659 		ttm->bdev->driver->ttm_tt_unpopulate(ttm);
    660 	else
    661 #ifdef __NetBSD__
    662 		panic("no ttm pool unpopulation");
    663 #else
    664 		ttm_pool_unpopulate(ttm);
    665 #endif
    666 }
    667