ttm_tt.c revision 1.18 1 /* $NetBSD: ttm_tt.c,v 1.18 2021/12/19 12:29:16 riastradh Exp $ */
2
3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 /**************************************************************************
5 *
6 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: ttm_tt.c,v 1.18 2021/12/19 12:29:16 riastradh Exp $");
36
37 #define pr_fmt(fmt) "[TTM] " fmt
38
39 #include <linux/sched.h>
40 #include <linux/pagemap.h>
41 #include <linux/shmem_fs.h>
42 #include <linux/file.h>
43 #include <drm/drm_cache.h>
44 #include <drm/drm_mem_util.h>
45 #include <drm/ttm/ttm_bo_driver.h>
46 #include <drm/ttm/ttm_page_alloc.h>
47 #include <drm/bus_dma_hacks.h>
48 #include <drm/ttm/ttm_set_memory.h>
49
50 /**
51 * Allocates a ttm structure for the given BO.
52 */
53 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
54 {
55 struct ttm_bo_device *bdev = bo->bdev;
56 uint32_t page_flags = 0;
57
58 dma_resv_assert_held(bo->base.resv);
59
60 if (bdev->need_dma32)
61 page_flags |= TTM_PAGE_FLAG_DMA32;
62
63 if (bdev->no_retry)
64 page_flags |= TTM_PAGE_FLAG_NO_RETRY;
65
66 switch (bo->type) {
67 case ttm_bo_type_device:
68 if (zero_alloc)
69 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
70 break;
71 case ttm_bo_type_kernel:
72 break;
73 case ttm_bo_type_sg:
74 page_flags |= TTM_PAGE_FLAG_SG;
75 break;
76 default:
77 bo->ttm = NULL;
78 pr_err("Illegal buffer object type\n");
79 return -EINVAL;
80 }
81
82 bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
83 if (unlikely(bo->ttm == NULL))
84 return -ENOMEM;
85
86 return 0;
87 }
88
89 /**
90 * Allocates storage for pointers to the pages that back the ttm.
91 */
92 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
93 {
94 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
95 GFP_KERNEL | __GFP_ZERO);
96 if (!ttm->pages)
97 return -ENOMEM;
98 return 0;
99 }
100
101 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *);
102
103 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
104 {
105 #ifdef __NetBSD__
106 int r;
107
108 /* Create array of pages at ttm->ttm.pages. */
109 r = ttm_tt_alloc_page_directory(&ttm->ttm);
110 if (r)
111 return r;
112
113 /* Create bus DMA map at ttm->dma_address. */
114 r = ttm_sg_tt_alloc_page_directory(ttm);
115 if (r)
116 return r;
117
118 /* Success! */
119 return 0;
120 #else
121 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
122 sizeof(*ttm->ttm.pages) +
123 sizeof(*ttm->dma_address),
124 GFP_KERNEL | __GFP_ZERO);
125 if (!ttm->ttm.pages)
126 return -ENOMEM;
127 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
128 return 0;
129 #endif
130 }
131
132 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
133 {
134 #ifdef __NetBSD__
135 ttm->dma_address = NULL;
136 /* XXX errno NetBSD->Linux */
137 return -bus_dmamap_create(ttm->ttm.bdev->dmat,
138 ttm->ttm.num_pages << PAGE_SHIFT, ttm->ttm.num_pages, PAGE_SIZE, 0,
139 BUS_DMA_WAITOK, &ttm->dma_address);
140 #else
141 ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
142 sizeof(*ttm->dma_address),
143 GFP_KERNEL | __GFP_ZERO);
144 if (!ttm->dma_address)
145 return -ENOMEM;
146 return 0;
147 #endif
148 }
149
150 static int ttm_tt_set_page_caching(struct page *p,
151 enum ttm_caching_state c_old,
152 enum ttm_caching_state c_new)
153 {
154 #ifdef __NetBSD__
155 return 0;
156 #else
157 int ret = 0;
158
159 if (PageHighMem(p))
160 return 0;
161
162 if (c_old != tt_cached) {
163 /* p isn't in the default caching state, set it to
164 * writeback first to free its current memtype. */
165
166 ret = ttm_set_pages_wb(p, 1);
167 if (ret)
168 return ret;
169 }
170
171 if (c_new == tt_wc)
172 ret = ttm_set_pages_wc(p, 1);
173 else if (c_new == tt_uncached)
174 ret = ttm_set_pages_uc(p, 1);
175
176 return ret;
177 #endif
178 }
179
180 /*
181 * Change caching policy for the linear kernel map
182 * for range of pages in a ttm.
183 */
184
185 static int ttm_tt_set_caching(struct ttm_tt *ttm,
186 enum ttm_caching_state c_state)
187 {
188 int i, j;
189 struct page *cur_page;
190 int ret;
191
192 if (ttm->caching_state == c_state)
193 return 0;
194
195 if (ttm->state == tt_unpopulated) {
196 /* Change caching but don't populate */
197 ttm->caching_state = c_state;
198 return 0;
199 }
200
201 if (ttm->caching_state == tt_cached)
202 drm_clflush_pages(ttm->pages, ttm->num_pages);
203
204 for (i = 0; i < ttm->num_pages; ++i) {
205 cur_page = ttm->pages[i];
206 if (likely(cur_page != NULL)) {
207 ret = ttm_tt_set_page_caching(cur_page,
208 ttm->caching_state,
209 c_state);
210 if (unlikely(ret != 0))
211 goto out_err;
212 }
213 }
214
215 ttm->caching_state = c_state;
216
217 return 0;
218
219 out_err:
220 for (j = 0; j < i; ++j) {
221 cur_page = ttm->pages[j];
222 if (likely(cur_page != NULL)) {
223 (void)ttm_tt_set_page_caching(cur_page, c_state,
224 ttm->caching_state);
225 }
226 }
227
228 return ret;
229 }
230
231 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
232 {
233 enum ttm_caching_state state;
234
235 if (placement & TTM_PL_FLAG_WC)
236 state = tt_wc;
237 else if (placement & TTM_PL_FLAG_UNCACHED)
238 state = tt_uncached;
239 else
240 state = tt_cached;
241
242 return ttm_tt_set_caching(ttm, state);
243 }
244 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
245
246 void ttm_tt_destroy(struct ttm_tt *ttm)
247 {
248 if (ttm == NULL)
249 return;
250
251 ttm_tt_unbind(ttm);
252
253 if (ttm->state == tt_unbound)
254 ttm_tt_unpopulate(ttm);
255
256 #ifndef __NetBSD__
257 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
258 ttm->swap_storage)
259 fput(ttm->swap_storage);
260
261 ttm->swap_storage = NULL;
262 #endif
263 ttm->func->destroy(ttm);
264 }
265
266 static void ttm_tt_init_fields(struct ttm_tt *ttm,
267 struct ttm_buffer_object *bo,
268 uint32_t page_flags)
269 {
270 ttm->bdev = bo->bdev;
271 ttm->num_pages = bo->num_pages;
272 ttm->caching_state = tt_cached;
273 ttm->page_flags = page_flags;
274 ttm->state = tt_unpopulated;
275 #ifdef __NetBSD__
276 WARN(bo->num_pages == 0,
277 "zero-size allocation in %s, please file a NetBSD PR",
278 __func__); /* paranoia -- can't prove in five minutes */
279 ttm->swap_storage = uao_create(PAGE_SIZE * MAX(1, bo->num_pages), 0);
280 uao_set_pgfl(ttm->swap_storage, bus_dmamem_pgfl(ttm->bdev->dmat));
281 #else
282 ttm->swap_storage = NULL;
283 #endif
284 ttm->sg = bo->sg;
285 }
286
287 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
288 uint32_t page_flags)
289 {
290 ttm_tt_init_fields(ttm, bo, page_flags);
291
292 if (ttm_tt_alloc_page_directory(ttm)) {
293 ttm_tt_destroy(ttm);
294 pr_err("Failed allocating page table\n");
295 return -ENOMEM;
296 }
297 return 0;
298 }
299 EXPORT_SYMBOL(ttm_tt_init);
300
301 void ttm_tt_fini(struct ttm_tt *ttm)
302 {
303 kvfree(ttm->pages);
304 ttm->pages = NULL;
305 #ifdef __NetBSD__
306 uao_detach(ttm->swap_storage);
307 ttm->swap_storage = NULL;
308 #endif
309 }
310 EXPORT_SYMBOL(ttm_tt_fini);
311
312 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
313 uint32_t page_flags)
314 {
315 struct ttm_tt *ttm = &ttm_dma->ttm;
316
317 ttm_tt_init_fields(ttm, bo, page_flags);
318
319 INIT_LIST_HEAD(&ttm_dma->pages_list);
320 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
321 ttm_tt_destroy(ttm);
322 pr_err("Failed allocating page table\n");
323 return -ENOMEM;
324 }
325 return 0;
326 }
327 EXPORT_SYMBOL(ttm_dma_tt_init);
328
329 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
330 uint32_t page_flags)
331 {
332 struct ttm_tt *ttm = &ttm_dma->ttm;
333 int ret;
334
335 ttm_tt_init_fields(ttm, bo, page_flags);
336
337 INIT_LIST_HEAD(&ttm_dma->pages_list);
338 if (page_flags & TTM_PAGE_FLAG_SG)
339 ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
340 else
341 ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
342 if (ret) {
343 ttm_tt_destroy(ttm);
344 pr_err("Failed allocating page table\n");
345 return -ENOMEM;
346 }
347 return 0;
348 }
349 EXPORT_SYMBOL(ttm_sg_tt_init);
350
351 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
352 {
353 struct ttm_tt *ttm = &ttm_dma->ttm;
354
355 #ifdef __NetBSD__
356 if (ttm_dma->dma_address) {
357 bus_dmamap_destroy(ttm->bdev->dmat, ttm_dma->dma_address);
358 ttm_dma->dma_address = NULL;
359 }
360 ttm_tt_fini(ttm);
361 #else
362 if (ttm->pages)
363 kvfree(ttm->pages);
364 else
365 kvfree(ttm_dma->dma_address);
366 ttm->pages = NULL;
367 ttm_dma->dma_address = NULL;
368 #endif
369 }
370 EXPORT_SYMBOL(ttm_dma_tt_fini);
371
372 void ttm_tt_unbind(struct ttm_tt *ttm)
373 {
374 int ret __diagused;
375
376 if (ttm->state == tt_bound) {
377 ret = ttm->func->unbind(ttm);
378 BUG_ON(ret);
379 ttm->state = tt_unbound;
380 }
381 }
382
383 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
384 struct ttm_operation_ctx *ctx)
385 {
386 int ret = 0;
387
388 if (!ttm)
389 return -EINVAL;
390
391 if (ttm->state == tt_bound)
392 return 0;
393
394 ret = ttm_tt_populate(ttm, ctx);
395 if (ret)
396 return ret;
397
398 ret = ttm->func->bind(ttm, bo_mem);
399 if (unlikely(ret != 0))
400 return ret;
401
402 ttm->state = tt_bound;
403
404 return 0;
405 }
406 EXPORT_SYMBOL(ttm_tt_bind);
407
408 #ifdef __NetBSD__
409 /*
410 * ttm_tt_wire(ttm)
411 *
412 * Wire the uvm pages of ttm and fill the ttm page array. ttm
413 * must be unpopulated, and must be marked swapped. This does not
414 * change either state -- the caller is expected to include it
415 * among other operations for such a state transition.
416 */
417 int
418 ttm_tt_wire(struct ttm_tt *ttm)
419 {
420 struct uvm_object *uobj = ttm->swap_storage;
421 struct vm_page *vm_page;
422 unsigned i;
423 int error;
424
425 KASSERTMSG((ttm->state == tt_unpopulated),
426 "ttm_tt %p must be unpopulated for wiring, but state=%d",
427 ttm, (int)ttm->state);
428 KASSERT(ISSET(ttm->page_flags, TTM_PAGE_FLAG_SWAPPED));
429 KASSERT(uobj != NULL);
430
431 error = uvm_obj_wirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT),
432 NULL);
433 if (error)
434 /* XXX errno NetBSD->Linux */
435 return -error;
436
437 rw_enter(uobj->vmobjlock, RW_READER);
438 for (i = 0; i < ttm->num_pages; i++) {
439 vm_page = uvm_pagelookup(uobj, ptoa(i));
440 ttm->pages[i] = container_of(vm_page, struct page, p_vmp);
441 }
442 rw_exit(uobj->vmobjlock);
443
444 /* Success! */
445 return 0;
446 }
447
448 /*
449 * ttm_tt_unwire(ttm)
450 *
451 * Nullify the ttm page array and unwire the uvm pages of ttm.
452 * ttm must be unbound and must be marked swapped. This does not
453 * change either state -- the caller is expected to include it
454 * among other operations for such a state transition.
455 */
456 void
457 ttm_tt_unwire(struct ttm_tt *ttm)
458 {
459 struct uvm_object *uobj = ttm->swap_storage;
460 unsigned i;
461
462 KASSERTMSG((ttm->state == tt_unbound),
463 "ttm_tt %p must be unbound for unwiring, but state=%d",
464 ttm, (int)ttm->state);
465 KASSERT(!ISSET(ttm->page_flags, TTM_PAGE_FLAG_SWAPPED));
466 KASSERT(uobj != NULL);
467
468 uvm_obj_unwirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT));
469 for (i = 0; i < ttm->num_pages; i++)
470 ttm->pages[i] = NULL;
471 }
472 #endif
473
474 #ifndef __NetBSD__
475 int ttm_tt_swapin(struct ttm_tt *ttm)
476 {
477 struct address_space *swap_space;
478 struct file *swap_storage;
479 struct page *from_page;
480 struct page *to_page;
481 int i;
482 int ret = -ENOMEM;
483
484 swap_storage = ttm->swap_storage;
485 BUG_ON(swap_storage == NULL);
486
487 swap_space = swap_storage->f_mapping;
488
489 for (i = 0; i < ttm->num_pages; ++i) {
490 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
491
492 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
493 from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
494
495 if (IS_ERR(from_page)) {
496 ret = PTR_ERR(from_page);
497 goto out_err;
498 }
499 to_page = ttm->pages[i];
500 if (unlikely(to_page == NULL))
501 goto out_err;
502
503 copy_highpage(to_page, from_page);
504 put_page(from_page);
505 }
506
507 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
508 fput(swap_storage);
509 ttm->swap_storage = NULL;
510 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
511
512 return 0;
513 out_err:
514 return ret;
515 }
516 #endif
517
518 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
519 {
520 #ifdef __NetBSD__
521
522 KASSERTMSG((ttm->state == tt_unpopulated || ttm->state == tt_unbound),
523 "ttm_tt %p must be unpopulated or unbound for swapout,"
524 " but state=%d",
525 ttm, (int)ttm->state);
526 KASSERTMSG((ttm->caching_state == tt_cached),
527 "ttm_tt %p must be cached for swapout, but caching_state=%d",
528 ttm, (int)ttm->caching_state);
529 KASSERT(persistent_swap_storage == NULL);
530
531 ttm->bdev->driver->ttm_tt_swapout(ttm);
532 return 0;
533 #else
534 struct address_space *swap_space;
535 struct file *swap_storage;
536 struct page *from_page;
537 struct page *to_page;
538 int i;
539 int ret = -ENOMEM;
540
541 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
542 BUG_ON(ttm->caching_state != tt_cached);
543
544 if (!persistent_swap_storage) {
545 swap_storage = shmem_file_setup("ttm swap",
546 ttm->num_pages << PAGE_SHIFT,
547 0);
548 if (IS_ERR(swap_storage)) {
549 pr_err("Failed allocating swap storage\n");
550 return PTR_ERR(swap_storage);
551 }
552 } else {
553 swap_storage = persistent_swap_storage;
554 }
555
556 swap_space = swap_storage->f_mapping;
557
558 for (i = 0; i < ttm->num_pages; ++i) {
559 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
560
561 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
562
563 from_page = ttm->pages[i];
564 if (unlikely(from_page == NULL))
565 continue;
566
567 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
568 if (IS_ERR(to_page)) {
569 ret = PTR_ERR(to_page);
570 goto out_err;
571 }
572 copy_highpage(to_page, from_page);
573 set_page_dirty(to_page);
574 mark_page_accessed(to_page);
575 put_page(to_page);
576 }
577
578 ttm_tt_unpopulate(ttm);
579 ttm->swap_storage = swap_storage;
580 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
581 if (persistent_swap_storage)
582 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
583
584 return 0;
585 out_err:
586 if (!persistent_swap_storage)
587 fput(swap_storage);
588
589 return ret;
590 #endif
591 }
592
593 static void ttm_tt_add_mapping(struct ttm_tt *ttm)
594 {
595 #ifndef __NetBSD__
596 pgoff_t i;
597
598 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
599 return;
600
601 for (i = 0; i < ttm->num_pages; ++i)
602 ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
603 #endif
604 }
605
606 int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
607 {
608 int ret;
609
610 if (ttm->state != tt_unpopulated)
611 return 0;
612
613 if (ttm->bdev->driver->ttm_tt_populate)
614 ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
615 else
616 #ifdef __NetBSD__
617 panic("no ttm population");
618 #else
619 ret = ttm_pool_populate(ttm, ctx);
620 #endif
621 if (!ret)
622 ttm_tt_add_mapping(ttm);
623 return ret;
624 }
625
626 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
627 {
628 #ifndef __NetBSD__
629 pgoff_t i;
630 struct page **page = ttm->pages;
631
632 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
633 return;
634
635 for (i = 0; i < ttm->num_pages; ++i) {
636 (*page)->mapping = NULL;
637 (*page++)->index = 0;
638 }
639 #endif
640 }
641
642 void ttm_tt_unpopulate(struct ttm_tt *ttm)
643 {
644 if (ttm->state == tt_unpopulated)
645 return;
646
647 ttm_tt_clear_mapping(ttm);
648 if (ttm->bdev->driver->ttm_tt_unpopulate)
649 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
650 else
651 #ifdef __NetBSD__
652 panic("no ttm pool unpopulation");
653 #else
654 ttm_pool_unpopulate(ttm);
655 #endif
656 }
657