/src/sys/external/bsd/drm2/ttm/ |
ttm_bus_dma.c | 44 * ttm_bus_dma_populate(ttm_dma) 46 * If ttm_dma is not already populated, wire its pages and load 54 ttm_bus_dma_populate(struct ttm_dma_tt *ttm_dma) 58 KASSERT(ttm_dma->ttm.state == tt_unpopulated); 61 KASSERT(!ISSET(ttm_dma->ttm.page_flags, TTM_PAGE_FLAG_SWAPPED)); 63 ttm_dma->ttm.page_flags |= TTM_PAGE_FLAG_SWAPPED; 66 ret = ttm_tt_wire(&ttm_dma->ttm); 71 ttm_dma->ttm.state = tt_unbound; 74 ttm_dma->ttm.page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 78 ret = -bus_dmamap_load_pages(ttm_dma->ttm.bdev->dmat [all...] |
ttm_agp_backend.c | 49 struct ttm_dma_tt ttm_dma; member in struct:ttm_agp 65 ttm_agp->ttm_dma.ttm.func = &ttm_agp_backend_func; 67 if (ttm_dma_tt_init(&ttm_agp->ttm_dma, bo, page_flags) != 0) 71 return &ttm_agp->ttm_dma.ttm; 101 ttm_dma.ttm); 109 KASSERT(ttm_agp->ttm_dma.dma_address->dm_nsegs == ttm->num_pages); 111 KASSERT(ttm_agp->ttm_dma.dma_address->dm_segs[i].ds_len == 115 ttm_agp->ttm_dma.dma_address->dm_segs[i].ds_len); 137 ttm_dma.ttm); 158 ttm_dma.ttm) [all...] |
/src/sys/external/bsd/drm2/dist/include/drm/ttm/ |
ttm_page_alloc.h | 99 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, 101 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); 118 static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, 124 static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
|
ttm_tt.h | 173 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 175 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 186 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
|
/src/sys/external/bsd/drm2/dist/drm/ttm/ |
ttm_page_alloc_dma.c | 839 struct ttm_dma_tt *ttm_dma, 843 struct ttm_tt *ttm = &ttm_dma->ttm; 852 ttm_dma->dma_address[index] = d_page->dma; 853 list_move_tail(&d_page->page_list, &ttm_dma->pages_list); 861 static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) 863 struct ttm_tt *ttm = &ttm_dma->ttm; 890 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, 894 struct ttm_tt *ttm = &ttm_dma->ttm; 908 INIT_LIST_HEAD(&ttm_dma->pages_list); 919 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true) [all...] |
ttm_tt.c | 315 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 318 struct ttm_tt *ttm = &ttm_dma->ttm; 322 INIT_LIST_HEAD(&ttm_dma->pages_list); 323 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { 332 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 335 struct ttm_tt *ttm = &ttm_dma->ttm; 340 INIT_LIST_HEAD(&ttm_dma->pages_list); 342 ret = ttm_sg_tt_alloc_page_directory(ttm_dma); 344 ret = ttm_dma_tt_alloc_page_directory(ttm_dma); 354 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) [all...] |
/src/sys/external/bsd/drm2/dist/drm/nouveau/ |
nouveau_bo.c | 549 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; local in function:nouveau_bo_sync_for_device 554 if (!ttm_dma) 563 bus_dmamap_sync(dmat, ttm_dma->dma_address, 0, 564 PAGE_SIZE*ttm_dma->ttm.num_pages, 567 for (i = 0; i < ttm_dma->ttm.num_pages; i++) 569 ttm_dma->dma_address[i], 578 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; local in function:nouveau_bo_sync_for_cpu 583 if (!ttm_dma) 592 bus_dmamap_sync(dmat, ttm_dma->dma_address, 0, 593 PAGE_SIZE*ttm_dma->ttm.num_pages 1683 struct ttm_dma_tt *ttm_dma = (void *)ttm; local in function:nouveau_ttm_tt_populate 1758 struct ttm_dma_tt *ttm_dma = (void *)ttm; local in function:nouveau_ttm_tt_unpopulate 1804 struct ttm_dma_tt *ttm_dma = container_of(ttm, struct ttm_dma_tt, ttm); local in function:nouveau_ttm_tt_swapout [all...] |
/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon_ttm.c | 900 struct ttm_dma_tt *ttm_dma = >t->ttm; local in function:radeon_ttm_tt_swapout 902 ttm_bus_dma_swapout(ttm_dma);
|
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_ttm.c | 1435 struct ttm_dma_tt *ttm_dma = >t->ttm; local in function:amdgpu_ttm_tt_swapout 1437 ttm_bus_dma_swapout(ttm_dma);
|