Home | History | Annotate | Line # | Download | only in ttm
ttm_tt.c revision 1.1.1.1
      1 /**************************************************************************
      2  *
      3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 /*
     28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     29  */
     30 
     31 #define pr_fmt(fmt) "[TTM] " fmt
     32 
     33 #include <linux/sched.h>
     34 #include <linux/highmem.h>
     35 #include <linux/pagemap.h>
     36 #include <linux/shmem_fs.h>
     37 #include <linux/file.h>
     38 #include <linux/swap.h>
     39 #include <linux/slab.h>
     40 #include <linux/export.h>
     41 #include <drm/drm_cache.h>
     42 #include <drm/drm_mem_util.h>
     43 #include <drm/ttm/ttm_module.h>
     44 #include <drm/ttm/ttm_bo_driver.h>
     45 #include <drm/ttm/ttm_placement.h>
     46 #include <drm/ttm/ttm_page_alloc.h>
     47 
     48 /**
     49  * Allocates storage for pointers to the pages that back the ttm.
     50  */
     51 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
     52 {
     53 	ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
     54 }
     55 
     56 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
     57 {
     58 	ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
     59 	ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
     60 					    sizeof(*ttm->dma_address));
     61 }
     62 
     63 #ifdef CONFIG_X86
     64 static inline int ttm_tt_set_page_caching(struct page *p,
     65 					  enum ttm_caching_state c_old,
     66 					  enum ttm_caching_state c_new)
     67 {
     68 	int ret = 0;
     69 
     70 	if (PageHighMem(p))
     71 		return 0;
     72 
     73 	if (c_old != tt_cached) {
     74 		/* p isn't in the default caching state, set it to
     75 		 * writeback first to free its current memtype. */
     76 
     77 		ret = set_pages_wb(p, 1);
     78 		if (ret)
     79 			return ret;
     80 	}
     81 
     82 	if (c_new == tt_wc)
     83 		ret = set_memory_wc((unsigned long) page_address(p), 1);
     84 	else if (c_new == tt_uncached)
     85 		ret = set_pages_uc(p, 1);
     86 
     87 	return ret;
     88 }
     89 #else /* CONFIG_X86 */
     90 static inline int ttm_tt_set_page_caching(struct page *p,
     91 					  enum ttm_caching_state c_old,
     92 					  enum ttm_caching_state c_new)
     93 {
     94 	return 0;
     95 }
     96 #endif /* CONFIG_X86 */
     97 
     98 /*
     99  * Change caching policy for the linear kernel map
    100  * for range of pages in a ttm.
    101  */
    102 
    103 static int ttm_tt_set_caching(struct ttm_tt *ttm,
    104 			      enum ttm_caching_state c_state)
    105 {
    106 	int i, j;
    107 	struct page *cur_page;
    108 	int ret;
    109 
    110 	if (ttm->caching_state == c_state)
    111 		return 0;
    112 
    113 	if (ttm->state == tt_unpopulated) {
    114 		/* Change caching but don't populate */
    115 		ttm->caching_state = c_state;
    116 		return 0;
    117 	}
    118 
    119 	if (ttm->caching_state == tt_cached)
    120 		drm_clflush_pages(ttm->pages, ttm->num_pages);
    121 
    122 	for (i = 0; i < ttm->num_pages; ++i) {
    123 		cur_page = ttm->pages[i];
    124 		if (likely(cur_page != NULL)) {
    125 			ret = ttm_tt_set_page_caching(cur_page,
    126 						      ttm->caching_state,
    127 						      c_state);
    128 			if (unlikely(ret != 0))
    129 				goto out_err;
    130 		}
    131 	}
    132 
    133 	ttm->caching_state = c_state;
    134 
    135 	return 0;
    136 
    137 out_err:
    138 	for (j = 0; j < i; ++j) {
    139 		cur_page = ttm->pages[j];
    140 		if (likely(cur_page != NULL)) {
    141 			(void)ttm_tt_set_page_caching(cur_page, c_state,
    142 						      ttm->caching_state);
    143 		}
    144 	}
    145 
    146 	return ret;
    147 }
    148 
    149 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
    150 {
    151 	enum ttm_caching_state state;
    152 
    153 	if (placement & TTM_PL_FLAG_WC)
    154 		state = tt_wc;
    155 	else if (placement & TTM_PL_FLAG_UNCACHED)
    156 		state = tt_uncached;
    157 	else
    158 		state = tt_cached;
    159 
    160 	return ttm_tt_set_caching(ttm, state);
    161 }
    162 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
    163 
    164 void ttm_tt_destroy(struct ttm_tt *ttm)
    165 {
    166 	if (unlikely(ttm == NULL))
    167 		return;
    168 
    169 	if (ttm->state == tt_bound) {
    170 		ttm_tt_unbind(ttm);
    171 	}
    172 
    173 	if (likely(ttm->pages != NULL)) {
    174 		ttm->bdev->driver->ttm_tt_unpopulate(ttm);
    175 	}
    176 
    177 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
    178 	    ttm->swap_storage)
    179 		fput(ttm->swap_storage);
    180 
    181 	ttm->swap_storage = NULL;
    182 	ttm->func->destroy(ttm);
    183 }
    184 
    185 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
    186 		unsigned long size, uint32_t page_flags,
    187 		struct page *dummy_read_page)
    188 {
    189 	ttm->bdev = bdev;
    190 	ttm->glob = bdev->glob;
    191 	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
    192 	ttm->caching_state = tt_cached;
    193 	ttm->page_flags = page_flags;
    194 	ttm->dummy_read_page = dummy_read_page;
    195 	ttm->state = tt_unpopulated;
    196 	ttm->swap_storage = NULL;
    197 
    198 	ttm_tt_alloc_page_directory(ttm);
    199 	if (!ttm->pages) {
    200 		ttm_tt_destroy(ttm);
    201 		pr_err("Failed allocating page table\n");
    202 		return -ENOMEM;
    203 	}
    204 	return 0;
    205 }
    206 EXPORT_SYMBOL(ttm_tt_init);
    207 
    208 void ttm_tt_fini(struct ttm_tt *ttm)
    209 {
    210 	drm_free_large(ttm->pages);
    211 	ttm->pages = NULL;
    212 }
    213 EXPORT_SYMBOL(ttm_tt_fini);
    214 
    215 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
    216 		unsigned long size, uint32_t page_flags,
    217 		struct page *dummy_read_page)
    218 {
    219 	struct ttm_tt *ttm = &ttm_dma->ttm;
    220 
    221 	ttm->bdev = bdev;
    222 	ttm->glob = bdev->glob;
    223 	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
    224 	ttm->caching_state = tt_cached;
    225 	ttm->page_flags = page_flags;
    226 	ttm->dummy_read_page = dummy_read_page;
    227 	ttm->state = tt_unpopulated;
    228 	ttm->swap_storage = NULL;
    229 
    230 	INIT_LIST_HEAD(&ttm_dma->pages_list);
    231 	ttm_dma_tt_alloc_page_directory(ttm_dma);
    232 	if (!ttm->pages || !ttm_dma->dma_address) {
    233 		ttm_tt_destroy(ttm);
    234 		pr_err("Failed allocating page table\n");
    235 		return -ENOMEM;
    236 	}
    237 	return 0;
    238 }
    239 EXPORT_SYMBOL(ttm_dma_tt_init);
    240 
    241 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
    242 {
    243 	struct ttm_tt *ttm = &ttm_dma->ttm;
    244 
    245 	drm_free_large(ttm->pages);
    246 	ttm->pages = NULL;
    247 	drm_free_large(ttm_dma->dma_address);
    248 	ttm_dma->dma_address = NULL;
    249 }
    250 EXPORT_SYMBOL(ttm_dma_tt_fini);
    251 
    252 void ttm_tt_unbind(struct ttm_tt *ttm)
    253 {
    254 	int ret;
    255 
    256 	if (ttm->state == tt_bound) {
    257 		ret = ttm->func->unbind(ttm);
    258 		BUG_ON(ret);
    259 		ttm->state = tt_unbound;
    260 	}
    261 }
    262 
    263 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
    264 {
    265 	int ret = 0;
    266 
    267 	if (!ttm)
    268 		return -EINVAL;
    269 
    270 	if (ttm->state == tt_bound)
    271 		return 0;
    272 
    273 	ret = ttm->bdev->driver->ttm_tt_populate(ttm);
    274 	if (ret)
    275 		return ret;
    276 
    277 	ret = ttm->func->bind(ttm, bo_mem);
    278 	if (unlikely(ret != 0))
    279 		return ret;
    280 
    281 	ttm->state = tt_bound;
    282 
    283 	return 0;
    284 }
    285 EXPORT_SYMBOL(ttm_tt_bind);
    286 
    287 int ttm_tt_swapin(struct ttm_tt *ttm)
    288 {
    289 	struct address_space *swap_space;
    290 	struct file *swap_storage;
    291 	struct page *from_page;
    292 	struct page *to_page;
    293 	int i;
    294 	int ret = -ENOMEM;
    295 
    296 	swap_storage = ttm->swap_storage;
    297 	BUG_ON(swap_storage == NULL);
    298 
    299 	swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
    300 
    301 	for (i = 0; i < ttm->num_pages; ++i) {
    302 		from_page = shmem_read_mapping_page(swap_space, i);
    303 		if (IS_ERR(from_page)) {
    304 			ret = PTR_ERR(from_page);
    305 			goto out_err;
    306 		}
    307 		to_page = ttm->pages[i];
    308 		if (unlikely(to_page == NULL))
    309 			goto out_err;
    310 
    311 		copy_highpage(to_page, from_page);
    312 		page_cache_release(from_page);
    313 	}
    314 
    315 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
    316 		fput(swap_storage);
    317 	ttm->swap_storage = NULL;
    318 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
    319 
    320 	return 0;
    321 out_err:
    322 	return ret;
    323 }
    324 
    325 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
    326 {
    327 	struct address_space *swap_space;
    328 	struct file *swap_storage;
    329 	struct page *from_page;
    330 	struct page *to_page;
    331 	int i;
    332 	int ret = -ENOMEM;
    333 
    334 	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
    335 	BUG_ON(ttm->caching_state != tt_cached);
    336 
    337 	if (!persistent_swap_storage) {
    338 		swap_storage = shmem_file_setup("ttm swap",
    339 						ttm->num_pages << PAGE_SHIFT,
    340 						0);
    341 		if (unlikely(IS_ERR(swap_storage))) {
    342 			pr_err("Failed allocating swap storage\n");
    343 			return PTR_ERR(swap_storage);
    344 		}
    345 	} else
    346 		swap_storage = persistent_swap_storage;
    347 
    348 	swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
    349 
    350 	for (i = 0; i < ttm->num_pages; ++i) {
    351 		from_page = ttm->pages[i];
    352 		if (unlikely(from_page == NULL))
    353 			continue;
    354 		to_page = shmem_read_mapping_page(swap_space, i);
    355 		if (unlikely(IS_ERR(to_page))) {
    356 			ret = PTR_ERR(to_page);
    357 			goto out_err;
    358 		}
    359 		copy_highpage(to_page, from_page);
    360 		set_page_dirty(to_page);
    361 		mark_page_accessed(to_page);
    362 		page_cache_release(to_page);
    363 	}
    364 
    365 	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
    366 	ttm->swap_storage = swap_storage;
    367 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
    368 	if (persistent_swap_storage)
    369 		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
    370 
    371 	return 0;
    372 out_err:
    373 	if (!persistent_swap_storage)
    374 		fput(swap_storage);
    375 
    376 	return ret;
    377 }
    378