ttm_tt.c revision 1.1.1.4 1 /* $NetBSD: ttm_tt.c,v 1.1.1.4 2021/12/18 20:15:53 riastradh Exp $ */
2
3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 /**************************************************************************
5 *
6 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: ttm_tt.c,v 1.1.1.4 2021/12/18 20:15:53 riastradh Exp $");
36
37 #define pr_fmt(fmt) "[TTM] " fmt
38
39 #include <linux/sched.h>
40 #include <linux/pagemap.h>
41 #include <linux/shmem_fs.h>
42 #include <linux/file.h>
43 #include <drm/drm_cache.h>
44 #include <drm/ttm/ttm_bo_driver.h>
45 #include <drm/ttm/ttm_page_alloc.h>
46 #include <drm/ttm/ttm_set_memory.h>
47
48 /**
49 * Allocates a ttm structure for the given BO.
50 */
51 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
52 {
53 struct ttm_bo_device *bdev = bo->bdev;
54 uint32_t page_flags = 0;
55
56 dma_resv_assert_held(bo->base.resv);
57
58 if (bdev->need_dma32)
59 page_flags |= TTM_PAGE_FLAG_DMA32;
60
61 if (bdev->no_retry)
62 page_flags |= TTM_PAGE_FLAG_NO_RETRY;
63
64 switch (bo->type) {
65 case ttm_bo_type_device:
66 if (zero_alloc)
67 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
68 break;
69 case ttm_bo_type_kernel:
70 break;
71 case ttm_bo_type_sg:
72 page_flags |= TTM_PAGE_FLAG_SG;
73 break;
74 default:
75 bo->ttm = NULL;
76 pr_err("Illegal buffer object type\n");
77 return -EINVAL;
78 }
79
80 bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
81 if (unlikely(bo->ttm == NULL))
82 return -ENOMEM;
83
84 return 0;
85 }
86
87 /**
88 * Allocates storage for pointers to the pages that back the ttm.
89 */
90 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
91 {
92 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
93 GFP_KERNEL | __GFP_ZERO);
94 if (!ttm->pages)
95 return -ENOMEM;
96 return 0;
97 }
98
99 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
100 {
101 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
102 sizeof(*ttm->ttm.pages) +
103 sizeof(*ttm->dma_address),
104 GFP_KERNEL | __GFP_ZERO);
105 if (!ttm->ttm.pages)
106 return -ENOMEM;
107 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
108 return 0;
109 }
110
111 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
112 {
113 ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
114 sizeof(*ttm->dma_address),
115 GFP_KERNEL | __GFP_ZERO);
116 if (!ttm->dma_address)
117 return -ENOMEM;
118 return 0;
119 }
120
121 static int ttm_tt_set_page_caching(struct page *p,
122 enum ttm_caching_state c_old,
123 enum ttm_caching_state c_new)
124 {
125 int ret = 0;
126
127 if (PageHighMem(p))
128 return 0;
129
130 if (c_old != tt_cached) {
131 /* p isn't in the default caching state, set it to
132 * writeback first to free its current memtype. */
133
134 ret = ttm_set_pages_wb(p, 1);
135 if (ret)
136 return ret;
137 }
138
139 if (c_new == tt_wc)
140 ret = ttm_set_pages_wc(p, 1);
141 else if (c_new == tt_uncached)
142 ret = ttm_set_pages_uc(p, 1);
143
144 return ret;
145 }
146
147 /*
148 * Change caching policy for the linear kernel map
149 * for range of pages in a ttm.
150 */
151
152 static int ttm_tt_set_caching(struct ttm_tt *ttm,
153 enum ttm_caching_state c_state)
154 {
155 int i, j;
156 struct page *cur_page;
157 int ret;
158
159 if (ttm->caching_state == c_state)
160 return 0;
161
162 if (ttm->state == tt_unpopulated) {
163 /* Change caching but don't populate */
164 ttm->caching_state = c_state;
165 return 0;
166 }
167
168 if (ttm->caching_state == tt_cached)
169 drm_clflush_pages(ttm->pages, ttm->num_pages);
170
171 for (i = 0; i < ttm->num_pages; ++i) {
172 cur_page = ttm->pages[i];
173 if (likely(cur_page != NULL)) {
174 ret = ttm_tt_set_page_caching(cur_page,
175 ttm->caching_state,
176 c_state);
177 if (unlikely(ret != 0))
178 goto out_err;
179 }
180 }
181
182 ttm->caching_state = c_state;
183
184 return 0;
185
186 out_err:
187 for (j = 0; j < i; ++j) {
188 cur_page = ttm->pages[j];
189 if (likely(cur_page != NULL)) {
190 (void)ttm_tt_set_page_caching(cur_page, c_state,
191 ttm->caching_state);
192 }
193 }
194
195 return ret;
196 }
197
198 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
199 {
200 enum ttm_caching_state state;
201
202 if (placement & TTM_PL_FLAG_WC)
203 state = tt_wc;
204 else if (placement & TTM_PL_FLAG_UNCACHED)
205 state = tt_uncached;
206 else
207 state = tt_cached;
208
209 return ttm_tt_set_caching(ttm, state);
210 }
211 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
212
213 void ttm_tt_destroy(struct ttm_tt *ttm)
214 {
215 if (ttm == NULL)
216 return;
217
218 ttm_tt_unbind(ttm);
219
220 if (ttm->state == tt_unbound)
221 ttm_tt_unpopulate(ttm);
222
223 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
224 ttm->swap_storage)
225 fput(ttm->swap_storage);
226
227 ttm->swap_storage = NULL;
228 ttm->func->destroy(ttm);
229 }
230
231 static void ttm_tt_init_fields(struct ttm_tt *ttm,
232 struct ttm_buffer_object *bo,
233 uint32_t page_flags)
234 {
235 ttm->bdev = bo->bdev;
236 ttm->num_pages = bo->num_pages;
237 ttm->caching_state = tt_cached;
238 ttm->page_flags = page_flags;
239 ttm->state = tt_unpopulated;
240 ttm->swap_storage = NULL;
241 ttm->sg = bo->sg;
242 }
243
244 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
245 uint32_t page_flags)
246 {
247 ttm_tt_init_fields(ttm, bo, page_flags);
248
249 if (ttm_tt_alloc_page_directory(ttm)) {
250 ttm_tt_destroy(ttm);
251 pr_err("Failed allocating page table\n");
252 return -ENOMEM;
253 }
254 return 0;
255 }
256 EXPORT_SYMBOL(ttm_tt_init);
257
258 void ttm_tt_fini(struct ttm_tt *ttm)
259 {
260 kvfree(ttm->pages);
261 ttm->pages = NULL;
262 }
263 EXPORT_SYMBOL(ttm_tt_fini);
264
265 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
266 uint32_t page_flags)
267 {
268 struct ttm_tt *ttm = &ttm_dma->ttm;
269
270 ttm_tt_init_fields(ttm, bo, page_flags);
271
272 INIT_LIST_HEAD(&ttm_dma->pages_list);
273 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
274 ttm_tt_destroy(ttm);
275 pr_err("Failed allocating page table\n");
276 return -ENOMEM;
277 }
278 return 0;
279 }
280 EXPORT_SYMBOL(ttm_dma_tt_init);
281
282 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
283 uint32_t page_flags)
284 {
285 struct ttm_tt *ttm = &ttm_dma->ttm;
286 int ret;
287
288 ttm_tt_init_fields(ttm, bo, page_flags);
289
290 INIT_LIST_HEAD(&ttm_dma->pages_list);
291 if (page_flags & TTM_PAGE_FLAG_SG)
292 ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
293 else
294 ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
295 if (ret) {
296 ttm_tt_destroy(ttm);
297 pr_err("Failed allocating page table\n");
298 return -ENOMEM;
299 }
300 return 0;
301 }
302 EXPORT_SYMBOL(ttm_sg_tt_init);
303
304 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
305 {
306 struct ttm_tt *ttm = &ttm_dma->ttm;
307
308 if (ttm->pages)
309 kvfree(ttm->pages);
310 else
311 kvfree(ttm_dma->dma_address);
312 ttm->pages = NULL;
313 ttm_dma->dma_address = NULL;
314 }
315 EXPORT_SYMBOL(ttm_dma_tt_fini);
316
317 void ttm_tt_unbind(struct ttm_tt *ttm)
318 {
319 int ret;
320
321 if (ttm->state == tt_bound) {
322 ret = ttm->func->unbind(ttm);
323 BUG_ON(ret);
324 ttm->state = tt_unbound;
325 }
326 }
327
328 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
329 struct ttm_operation_ctx *ctx)
330 {
331 int ret = 0;
332
333 if (!ttm)
334 return -EINVAL;
335
336 if (ttm->state == tt_bound)
337 return 0;
338
339 ret = ttm_tt_populate(ttm, ctx);
340 if (ret)
341 return ret;
342
343 ret = ttm->func->bind(ttm, bo_mem);
344 if (unlikely(ret != 0))
345 return ret;
346
347 ttm->state = tt_bound;
348
349 return 0;
350 }
351 EXPORT_SYMBOL(ttm_tt_bind);
352
353 int ttm_tt_swapin(struct ttm_tt *ttm)
354 {
355 struct address_space *swap_space;
356 struct file *swap_storage;
357 struct page *from_page;
358 struct page *to_page;
359 int i;
360 int ret = -ENOMEM;
361
362 swap_storage = ttm->swap_storage;
363 BUG_ON(swap_storage == NULL);
364
365 swap_space = swap_storage->f_mapping;
366
367 for (i = 0; i < ttm->num_pages; ++i) {
368 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
369
370 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
371 from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
372
373 if (IS_ERR(from_page)) {
374 ret = PTR_ERR(from_page);
375 goto out_err;
376 }
377 to_page = ttm->pages[i];
378 if (unlikely(to_page == NULL))
379 goto out_err;
380
381 copy_highpage(to_page, from_page);
382 put_page(from_page);
383 }
384
385 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
386 fput(swap_storage);
387 ttm->swap_storage = NULL;
388 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
389
390 return 0;
391 out_err:
392 return ret;
393 }
394
395 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
396 {
397 struct address_space *swap_space;
398 struct file *swap_storage;
399 struct page *from_page;
400 struct page *to_page;
401 int i;
402 int ret = -ENOMEM;
403
404 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
405 BUG_ON(ttm->caching_state != tt_cached);
406
407 if (!persistent_swap_storage) {
408 swap_storage = shmem_file_setup("ttm swap",
409 ttm->num_pages << PAGE_SHIFT,
410 0);
411 if (IS_ERR(swap_storage)) {
412 pr_err("Failed allocating swap storage\n");
413 return PTR_ERR(swap_storage);
414 }
415 } else {
416 swap_storage = persistent_swap_storage;
417 }
418
419 swap_space = swap_storage->f_mapping;
420
421 for (i = 0; i < ttm->num_pages; ++i) {
422 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
423
424 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
425
426 from_page = ttm->pages[i];
427 if (unlikely(from_page == NULL))
428 continue;
429
430 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
431 if (IS_ERR(to_page)) {
432 ret = PTR_ERR(to_page);
433 goto out_err;
434 }
435 copy_highpage(to_page, from_page);
436 set_page_dirty(to_page);
437 mark_page_accessed(to_page);
438 put_page(to_page);
439 }
440
441 ttm_tt_unpopulate(ttm);
442 ttm->swap_storage = swap_storage;
443 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
444 if (persistent_swap_storage)
445 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
446
447 return 0;
448 out_err:
449 if (!persistent_swap_storage)
450 fput(swap_storage);
451
452 return ret;
453 }
454
455 static void ttm_tt_add_mapping(struct ttm_tt *ttm)
456 {
457 pgoff_t i;
458
459 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
460 return;
461
462 for (i = 0; i < ttm->num_pages; ++i)
463 ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
464 }
465
466 int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
467 {
468 int ret;
469
470 if (ttm->state != tt_unpopulated)
471 return 0;
472
473 if (ttm->bdev->driver->ttm_tt_populate)
474 ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
475 else
476 ret = ttm_pool_populate(ttm, ctx);
477 if (!ret)
478 ttm_tt_add_mapping(ttm);
479 return ret;
480 }
481
482 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
483 {
484 pgoff_t i;
485 struct page **page = ttm->pages;
486
487 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
488 return;
489
490 for (i = 0; i < ttm->num_pages; ++i) {
491 (*page)->mapping = NULL;
492 (*page++)->index = 0;
493 }
494 }
495
496 void ttm_tt_unpopulate(struct ttm_tt *ttm)
497 {
498 if (ttm->state == tt_unpopulated)
499 return;
500
501 ttm_tt_clear_mapping(ttm);
502 if (ttm->bdev->driver->ttm_tt_unpopulate)
503 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
504 else
505 ttm_pool_unpopulate(ttm);
506 }
507