Lines Matching refs:ttm_dma
44 * ttm_bus_dma_populate(ttm_dma)
46 * If ttm_dma is not already populated, wire its pages and load
54 ttm_bus_dma_populate(struct ttm_dma_tt *ttm_dma)
58 KASSERT(ttm_dma->ttm.state == tt_unpopulated);
61 KASSERT(!ISSET(ttm_dma->ttm.page_flags, TTM_PAGE_FLAG_SWAPPED));
63 ttm_dma->ttm.page_flags |= TTM_PAGE_FLAG_SWAPPED;
66 ret = ttm_tt_wire(&ttm_dma->ttm);
71 ttm_dma->ttm.state = tt_unbound;
74 ttm_dma->ttm.page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
78 ret = -bus_dmamap_load_pages(ttm_dma->ttm.bdev->dmat,
79 ttm_dma->dma_address, ttm_dma->ttm.pages,
80 (ttm_dma->ttm.num_pages << PAGE_SHIFT), BUS_DMA_NOWAIT);
88 bus_dmamap_unload(ttm_dma->ttm.bdev->dmat, ttm_dma->dma_address);
89 fail1: KASSERT(ttm_dma->ttm.state == tt_unbound);
90 ttm_tt_unwire(&ttm_dma->ttm);
91 ttm_dma->ttm.state = tt_unpopulated;
97 ttm_bus_dma_put(struct ttm_dma_tt *ttm_dma, int flags)
99 struct uvm_object *const uobj = ttm_dma->ttm.swap_storage;
100 const size_t size = (ttm_dma->ttm.num_pages << PAGE_SHIFT);
107 KASSERTMSG((ttm_dma->ttm.state == tt_unbound),
109 &ttm_dma->ttm, (int)ttm_dma->ttm.state);
112 if (!ISSET(ttm_dma->ttm.page_flags, TTM_PAGE_FLAG_SWAPPED)) {
113 bus_dmamap_unload(ttm_dma->ttm.bdev->dmat,
114 ttm_dma->dma_address);
115 ttm_tt_unwire(&ttm_dma->ttm);
116 ttm_dma->ttm.page_flags |= TTM_PAGE_FLAG_SWAPPED;
128 ttm_dma->ttm.state = tt_unpopulated;
132 * ttmm_bus_dma_unpopulate(ttm_dma)
135 * associated with ttm_dma.
141 ttm_bus_dma_unpopulate(struct ttm_dma_tt *ttm_dma)
144 ttm_bus_dma_put(ttm_dma, PGO_CLEANIT|PGO_FREE);
148 * ttm_bus_dma_swapout(ttm_dma)
151 * associated with ttm_dma so that they can be swapped out, but
158 ttm_bus_dma_swapout(struct ttm_dma_tt *ttm_dma)
161 ttm_bus_dma_put(ttm_dma, PGO_DEACTIVATE);