Home | History | Annotate | Line # | Download | only in ttm
ttm_tt.c revision 1.10.2.1
      1 /*	$NetBSD: ttm_tt.c,v 1.10.2.1 2018/09/06 06:56:34 pgoyette Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 /*
     30  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: ttm_tt.c,v 1.10.2.1 2018/09/06 06:56:34 pgoyette Exp $");
     35 
     36 #define pr_fmt(fmt) "[TTM] " fmt
     37 
     38 #include <linux/sched.h>
     39 #include <linux/highmem.h>
     40 #include <linux/pagemap.h>
     41 #include <linux/shmem_fs.h>
     42 #include <linux/file.h>
     43 #include <linux/swap.h>
     44 #include <linux/slab.h>
     45 #include <linux/export.h>
     46 #include <linux/printk.h>
     47 #include <drm/drm_cache.h>
     48 #include <drm/drm_mem_util.h>
     49 #include <drm/ttm/ttm_module.h>
     50 #include <drm/ttm/ttm_bo_driver.h>
     51 #include <drm/ttm/ttm_placement.h>
     52 #include <drm/ttm/ttm_page_alloc.h>
     53 #include <drm/bus_dma_hacks.h>
     54 
     55 /**
     56  * Allocates storage for pointers to the pages that back the ttm.
     57  */
     58 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
     59 {
     60 	ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
     61 }
     62 
     63 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
     64 {
     65 #ifdef __NetBSD__		/* cpu/dma addrs handled by bus_dma */
     66 	ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
     67 	    sizeof(*ttm->ttm.pages));
     68 #else
     69 	ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
     70 					  sizeof(*ttm->ttm.pages) +
     71 					  sizeof(*ttm->dma_address) +
     72 					  sizeof(*ttm->cpu_address));
     73 	ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
     74 	ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
     75 #endif
     76 }
     77 
     78 #ifdef CONFIG_X86
     79 static inline int ttm_tt_set_page_caching(struct page *p,
     80 					  enum ttm_caching_state c_old,
     81 					  enum ttm_caching_state c_new)
     82 {
     83 #ifdef __NetBSD__
     84 	return 0;
     85 #else
     86 	int ret = 0;
     87 
     88 	if (PageHighMem(p))
     89 		return 0;
     90 
     91 	if (c_old != tt_cached) {
     92 		/* p isn't in the default caching state, set it to
     93 		 * writeback first to free its current memtype. */
     94 
     95 		ret = set_pages_wb(p, 1);
     96 		if (ret)
     97 			return ret;
     98 	}
     99 
    100 	if (c_new == tt_wc)
    101 		ret = set_memory_wc((unsigned long) page_address(p), 1);
    102 	else if (c_new == tt_uncached)
    103 		ret = set_pages_uc(p, 1);
    104 
    105 	return ret;
    106 #endif
    107 }
    108 #else /* CONFIG_X86 */
    109 static inline int ttm_tt_set_page_caching(struct page *p,
    110 					  enum ttm_caching_state c_old,
    111 					  enum ttm_caching_state c_new)
    112 {
    113 	return 0;
    114 }
    115 #endif /* CONFIG_X86 */
    116 
    117 /*
    118  * Change caching policy for the linear kernel map
    119  * for range of pages in a ttm.
    120  */
    121 
    122 static int ttm_tt_set_caching(struct ttm_tt *ttm,
    123 			      enum ttm_caching_state c_state)
    124 {
    125 	int i, j;
    126 	struct page *cur_page;
    127 	int ret;
    128 
    129 	if (ttm->caching_state == c_state)
    130 		return 0;
    131 
    132 	if (ttm->state == tt_unpopulated) {
    133 		/* Change caching but don't populate */
    134 		ttm->caching_state = c_state;
    135 		return 0;
    136 	}
    137 
    138 	if (ttm->caching_state == tt_cached)
    139 		drm_clflush_pages(ttm->pages, ttm->num_pages);
    140 
    141 	for (i = 0; i < ttm->num_pages; ++i) {
    142 		cur_page = ttm->pages[i];
    143 		if (likely(cur_page != NULL)) {
    144 			ret = ttm_tt_set_page_caching(cur_page,
    145 						      ttm->caching_state,
    146 						      c_state);
    147 			if (unlikely(ret != 0))
    148 				goto out_err;
    149 		}
    150 	}
    151 
    152 	ttm->caching_state = c_state;
    153 
    154 	return 0;
    155 
    156 out_err:
    157 	for (j = 0; j < i; ++j) {
    158 		cur_page = ttm->pages[j];
    159 		if (likely(cur_page != NULL)) {
    160 			(void)ttm_tt_set_page_caching(cur_page, c_state,
    161 						      ttm->caching_state);
    162 		}
    163 	}
    164 
    165 	return ret;
    166 }
    167 
    168 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
    169 {
    170 	enum ttm_caching_state state;
    171 
    172 	if (placement & TTM_PL_FLAG_WC)
    173 		state = tt_wc;
    174 	else if (placement & TTM_PL_FLAG_UNCACHED)
    175 		state = tt_uncached;
    176 	else
    177 		state = tt_cached;
    178 
    179 	return ttm_tt_set_caching(ttm, state);
    180 }
    181 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
    182 
    183 void ttm_tt_destroy(struct ttm_tt *ttm)
    184 {
    185 	if (unlikely(ttm == NULL))
    186 		return;
    187 
    188 	if (ttm->state == tt_bound) {
    189 		ttm_tt_unbind(ttm);
    190 	}
    191 
    192 	if (ttm->state == tt_unbound)
    193 		ttm_tt_unpopulate(ttm);
    194 
    195 #ifndef __NetBSD__
    196 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
    197 	    ttm->swap_storage)
    198 		fput(ttm->swap_storage);
    199 
    200 	ttm->swap_storage = NULL;
    201 #endif
    202 	ttm->func->destroy(ttm);
    203 }
    204 
    205 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
    206 		unsigned long size, uint32_t page_flags,
    207 		struct page *dummy_read_page)
    208 {
    209 	ttm->bdev = bdev;
    210 	ttm->glob = bdev->glob;
    211 	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
    212 	ttm->caching_state = tt_cached;
    213 	ttm->page_flags = page_flags;
    214 	ttm->dummy_read_page = dummy_read_page;
    215 	ttm->state = tt_unpopulated;
    216 #ifdef __NetBSD__
    217 	WARN(size == 0, "zero-size allocation in %s, please file a NetBSD PR",
    218 	    __func__);	/* paranoia -- can't prove in five minutes */
    219 	size = MAX(size, 1);
    220 	ttm->swap_storage = uao_create(roundup2(size, PAGE_SIZE), 0);
    221 	uao_set_pgfl(ttm->swap_storage, bus_dmamem_pgfl(bdev->dmat));
    222 #else
    223 	ttm->swap_storage = NULL;
    224 #endif
    225 	TAILQ_INIT(&ttm->pglist);
    226 
    227 	ttm_tt_alloc_page_directory(ttm);
    228 	if (!ttm->pages) {
    229 		ttm_tt_destroy(ttm);
    230 		pr_err("Failed allocating page table\n");
    231 		return -ENOMEM;
    232 	}
    233 	return 0;
    234 }
    235 EXPORT_SYMBOL(ttm_tt_init);
    236 
    237 void ttm_tt_fini(struct ttm_tt *ttm)
    238 {
    239 #ifdef __NetBSD__
    240 	uao_detach(ttm->swap_storage);
    241 	ttm->swap_storage = NULL;
    242 #endif
    243 	drm_free_large(ttm->pages);
    244 	ttm->pages = NULL;
    245 }
    246 EXPORT_SYMBOL(ttm_tt_fini);
    247 
    248 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
    249 		unsigned long size, uint32_t page_flags,
    250 		struct page *dummy_read_page)
    251 {
    252 	struct ttm_tt *ttm = &ttm_dma->ttm;
    253 
    254 	ttm->bdev = bdev;
    255 	ttm->glob = bdev->glob;
    256 	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
    257 	ttm->caching_state = tt_cached;
    258 	ttm->page_flags = page_flags;
    259 	ttm->dummy_read_page = dummy_read_page;
    260 	ttm->state = tt_unpopulated;
    261 #ifdef __NetBSD__
    262 	WARN(size == 0, "zero-size allocation in %s, please file a NetBSD PR",
    263 	    __func__);	/* paranoia -- can't prove in five minutes */
    264 	size = MAX(size, 1);
    265 	ttm->swap_storage = uao_create(roundup2(size, PAGE_SIZE), 0);
    266 	uao_set_pgfl(ttm->swap_storage, bus_dmamem_pgfl(bdev->dmat));
    267 #else
    268 	ttm->swap_storage = NULL;
    269 #endif
    270 	TAILQ_INIT(&ttm->pglist);
    271 
    272 	INIT_LIST_HEAD(&ttm_dma->pages_list);
    273 	ttm_dma_tt_alloc_page_directory(ttm_dma);
    274 #ifdef __NetBSD__
    275     {
    276 	int error;
    277 
    278 	if (ttm->num_pages > (SIZE_MAX /
    279 		MIN(sizeof(ttm_dma->dma_segs[0]), PAGE_SIZE))) {
    280 		error = ENOMEM;
    281 		goto fail0;
    282 	}
    283 	ttm_dma->dma_segs = kmem_alloc((ttm->num_pages *
    284 		sizeof(ttm_dma->dma_segs[0])), KM_SLEEP);
    285 	error = bus_dmamap_create(ttm->bdev->dmat,
    286 	    (ttm->num_pages * PAGE_SIZE), ttm->num_pages, PAGE_SIZE, 0,
    287 	    BUS_DMA_WAITOK, &ttm_dma->dma_address);
    288 	if (error)
    289 		goto fail1;
    290 
    291 	return 0;
    292 
    293 fail2: __unused
    294 	bus_dmamap_destroy(ttm->bdev->dmat, ttm_dma->dma_address);
    295 fail1:	kmem_free(ttm_dma->dma_segs, (ttm->num_pages *
    296 		sizeof(ttm_dma->dma_segs[0])));
    297 fail0:	KASSERT(error);
    298 	drm_free_large(ttm->pages);
    299 	uao_detach(ttm->swap_storage);
    300 	/* XXX errno NetBSD->Linux */
    301 	return -error;
    302     }
    303 #else
    304 	if (!ttm->pages) {
    305 		ttm_tt_destroy(ttm);
    306 		pr_err("Failed allocating page table\n");
    307 		return -ENOMEM;
    308 	}
    309 	return 0;
    310 #endif
    311 }
    312 EXPORT_SYMBOL(ttm_dma_tt_init);
    313 
    314 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
    315 {
    316 	struct ttm_tt *ttm = &ttm_dma->ttm;
    317 
    318 #ifdef __NetBSD__
    319 	uao_detach(ttm->swap_storage);
    320 	ttm->swap_storage = NULL;
    321 #endif
    322 	drm_free_large(ttm->pages);
    323 	ttm->pages = NULL;
    324 #ifdef __NetBSD__
    325 	bus_dmamap_destroy(ttm->bdev->dmat, ttm_dma->dma_address);
    326 	kmem_free(ttm_dma->dma_segs, (ttm->num_pages *
    327 		sizeof(ttm_dma->dma_segs[0])));
    328 #else
    329 	ttm_dma->cpu_address = NULL;
    330 	ttm_dma->dma_address = NULL;
    331 #endif
    332 }
    333 EXPORT_SYMBOL(ttm_dma_tt_fini);
    334 
    335 void ttm_tt_unbind(struct ttm_tt *ttm)
    336 {
    337 	int ret __diagused;
    338 
    339 	if (ttm->state == tt_bound) {
    340 		ret = ttm->func->unbind(ttm);
    341 		BUG_ON(ret);
    342 		ttm->state = tt_unbound;
    343 	}
    344 }
    345 
    346 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
    347 {
    348 	int ret = 0;
    349 
    350 	if (!ttm)
    351 		return -EINVAL;
    352 
    353 	if (ttm->state == tt_bound)
    354 		return 0;
    355 
    356 	ret = ttm->bdev->driver->ttm_tt_populate(ttm);
    357 	if (ret)
    358 		return ret;
    359 
    360 	ret = ttm->func->bind(ttm, bo_mem);
    361 	if (unlikely(ret != 0))
    362 		return ret;
    363 
    364 	ttm->state = tt_bound;
    365 
    366 	return 0;
    367 }
    368 EXPORT_SYMBOL(ttm_tt_bind);
    369 
    370 #ifdef __NetBSD__
    371 /*
    372  * ttm_tt_wire(ttm)
    373  *
    374  *	Wire the uvm pages of ttm and fill the ttm page array.  ttm
    375  *	must be unpopulated, and must be marked swapped.  This does not
    376  *	change either state -- the caller is expected to include it
    377  *	among other operations for such a state transition.
    378  */
    379 int
    380 ttm_tt_wire(struct ttm_tt *ttm)
    381 {
    382 	struct uvm_object *uobj = ttm->swap_storage;
    383 	struct vm_page *page;
    384 	unsigned i;
    385 	int error;
    386 
    387 	KASSERTMSG((ttm->state == tt_unpopulated),
    388 	    "ttm_tt %p must be unpopulated for wiring, but state=%d",
    389 	    ttm, (int)ttm->state);
    390 	KASSERT(ISSET(ttm->page_flags, TTM_PAGE_FLAG_SWAPPED));
    391 	KASSERT(uobj != NULL);
    392 
    393 	error = uvm_obj_wirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT),
    394 	    &ttm->pglist);
    395 	if (error)
    396 		/* XXX errno NetBSD->Linux */
    397 		return -error;
    398 
    399 	i = 0;
    400 	TAILQ_FOREACH(page, &ttm->pglist, pageq.queue) {
    401 		KASSERT(i < ttm->num_pages);
    402 		KASSERT(ttm->pages[i] == NULL);
    403 		ttm->pages[i] = container_of(page, struct page, p_vmp);
    404 		i++;
    405 	}
    406 	KASSERT(i == ttm->num_pages);
    407 
    408 	/* Success!  */
    409 	return 0;
    410 }
    411 
    412 /*
    413  * ttm_tt_unwire(ttm)
    414  *
    415  *	Nullify the ttm page array and unwire the uvm pages of ttm.
    416  *	ttm must be unbound and must be marked swapped.  This does not
    417  *	change either state -- the caller is expected to include it
    418  *	among other operations for such a state transition.
    419  */
    420 void
    421 ttm_tt_unwire(struct ttm_tt *ttm)
    422 {
    423 	struct uvm_object *uobj = ttm->swap_storage;
    424 	unsigned i;
    425 
    426 	KASSERTMSG((ttm->state == tt_unbound),
    427 	    "ttm_tt %p must be unbound for unwiring, but state=%d",
    428 	    ttm, (int)ttm->state);
    429 	KASSERT(!ISSET(ttm->page_flags, TTM_PAGE_FLAG_SWAPPED));
    430 	KASSERT(uobj != NULL);
    431 
    432 	uvm_obj_unwirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT));
    433 	for (i = 0; i < ttm->num_pages; i++)
    434 		ttm->pages[i] = NULL;
    435 }
    436 #endif
    437 
    438 #ifndef __NetBSD__
    439 int ttm_tt_swapin(struct ttm_tt *ttm)
    440 {
    441 	struct address_space *swap_space;
    442 	struct file *swap_storage;
    443 	struct page *from_page;
    444 	struct page *to_page;
    445 	int i;
    446 	int ret = -ENOMEM;
    447 
    448 	swap_storage = ttm->swap_storage;
    449 	BUG_ON(swap_storage == NULL);
    450 
    451 	swap_space = file_inode(swap_storage)->i_mapping;
    452 
    453 	for (i = 0; i < ttm->num_pages; ++i) {
    454 		from_page = shmem_read_mapping_page(swap_space, i);
    455 		if (IS_ERR(from_page)) {
    456 			ret = PTR_ERR(from_page);
    457 			goto out_err;
    458 		}
    459 		to_page = ttm->pages[i];
    460 		if (unlikely(to_page == NULL))
    461 			goto out_err;
    462 
    463 		copy_highpage(to_page, from_page);
    464 		page_cache_release(from_page);
    465 	}
    466 
    467 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
    468 		fput(swap_storage);
    469 	ttm->swap_storage = NULL;
    470 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
    471 
    472 	return 0;
    473 out_err:
    474 	return ret;
    475 }
    476 #endif
    477 
    478 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
    479 {
    480 #ifdef __NetBSD__
    481 
    482 	KASSERTMSG((ttm->state == tt_unpopulated || ttm->state == tt_unbound),
    483 	    "ttm_tt %p must be unpopulated or unbound for swapout,"
    484 	    " but state=%d",
    485 	    ttm, (int)ttm->state);
    486 	KASSERTMSG((ttm->caching_state == tt_cached),
    487 	    "ttm_tt %p must be cached for swapout, but caching_state=%d",
    488 	    ttm, (int)ttm->caching_state);
    489 	KASSERT(persistent_swap_storage == NULL);
    490 
    491 	ttm->bdev->driver->ttm_tt_swapout(ttm);
    492 	return 0;
    493 #else
    494 	struct address_space *swap_space;
    495 	struct file *swap_storage;
    496 	struct page *from_page;
    497 	struct page *to_page;
    498 	int i;
    499 	int ret = -ENOMEM;
    500 
    501 	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
    502 	BUG_ON(ttm->caching_state != tt_cached);
    503 
    504 	if (!persistent_swap_storage) {
    505 		swap_storage = shmem_file_setup("ttm swap",
    506 						ttm->num_pages << PAGE_SHIFT,
    507 						0);
    508 		if (IS_ERR(swap_storage)) {
    509 			pr_err("Failed allocating swap storage\n");
    510 			return PTR_ERR(swap_storage);
    511 		}
    512 	} else
    513 		swap_storage = persistent_swap_storage;
    514 
    515 	swap_space = file_inode(swap_storage)->i_mapping;
    516 
    517 	for (i = 0; i < ttm->num_pages; ++i) {
    518 		from_page = ttm->pages[i];
    519 		if (unlikely(from_page == NULL))
    520 			continue;
    521 		to_page = shmem_read_mapping_page(swap_space, i);
    522 		if (IS_ERR(to_page)) {
    523 			ret = PTR_ERR(to_page);
    524 			goto out_err;
    525 		}
    526 		copy_highpage(to_page, from_page);
    527 		set_page_dirty(to_page);
    528 		mark_page_accessed(to_page);
    529 		page_cache_release(to_page);
    530 	}
    531 
    532 	ttm_tt_unpopulate(ttm);
    533 	ttm->swap_storage = swap_storage;
    534 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
    535 	if (persistent_swap_storage)
    536 		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
    537 
    538 	return 0;
    539 out_err:
    540 	if (!persistent_swap_storage)
    541 		fput(swap_storage);
    542 
    543 	return ret;
    544 #endif
    545 }
    546 
    547 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
    548 {
    549 #ifndef __NetBSD__
    550 	pgoff_t i;
    551 	struct page **page = ttm->pages;
    552 
    553 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
    554 		return;
    555 
    556 	for (i = 0; i < ttm->num_pages; ++i) {
    557 		(*page)->mapping = NULL;
    558 		(*page++)->index = 0;
    559 	}
    560 #endif
    561 }
    562 
    563 void ttm_tt_unpopulate(struct ttm_tt *ttm)
    564 {
    565 	if (ttm->state == tt_unpopulated)
    566 		return;
    567 
    568 	ttm_tt_clear_mapping(ttm);
    569 	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
    570 }
    571