Lines Matching refs:bo
65 * ttm_global_mutex - protecting the global BO state
86 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
88 kfree(bo);
120 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
127 bo, bo->mem.num_pages, bo->mem.size >> 10,
128 bo->mem.size >> 20);
136 ttm_mem_type_debug(bo->bdev, &p, mem_type);
176 struct ttm_buffer_object *bo =
178 size_t acc_size = bo->acc_size;
180 BUG_ON(kref_read(&bo->list_kref));
181 BUG_ON(kref_read(&bo->kref));
182 BUG_ON(bo->mem.mm_node != NULL);
183 BUG_ON(!list_empty(&bo->lru));
184 BUG_ON(!list_empty(&bo->ddestroy));
185 ttm_tt_destroy(bo->ttm);
187 dma_fence_put(bo->moving);
188 if (!ttm_bo_uses_embedded_gem_object(bo))
189 dma_resv_fini(&bo->base._resv);
190 bo->destroy(bo);
194 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
197 struct ttm_bo_device *bdev = bo->bdev;
200 dma_resv_assert_held(bo->base.resv);
202 if (!list_empty(&bo->lru))
209 list_add_tail(&bo->lru, &man->lru[bo->priority]);
210 kref_get(&bo->list_kref);
212 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
213 !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
215 list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
216 kref_get(&bo->list_kref);
225 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
227 struct ttm_bo_device *bdev = bo->bdev;
230 if (!list_empty(&bo->swap)) {
231 list_del_init(&bo->swap);
232 kref_put(&bo->list_kref, ttm_bo_ref_bug);
235 if (!list_empty(&bo->lru)) {
236 list_del_init(&bo->lru);
237 kref_put(&bo->list_kref, ttm_bo_ref_bug);
242 bdev->driver->del_from_lru_notify(bo);
246 struct ttm_buffer_object *bo)
249 pos->first = bo;
250 pos->last = bo;
253 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
256 dma_resv_assert_held(bo->base.resv);
258 ttm_bo_del_from_lru(bo);
259 ttm_bo_add_mem_to_lru(bo, &bo->mem);
261 if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
262 switch (bo->mem.mem_type) {
264 ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
268 ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
271 if (bo->ttm && !(bo->ttm->page_flags &
273 ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
328 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
332 struct ttm_bo_device *bdev = bo->bdev;
333 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
335 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
340 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
344 ttm_bo_unmap_virtual_locked(bo);
353 if (bo->ttm == NULL) {
355 ret = ttm_tt_create(bo, zero);
360 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
365 ret = ttm_tt_bind(bo->ttm, mem, ctx);
370 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
372 bdev->driver->move_notify(bo, evict, mem);
373 bo->mem = *mem;
380 bdev->driver->move_notify(bo, evict, mem);
384 ret = ttm_bo_move_ttm(bo, ctx, mem);
386 ret = bdev->driver->move(bo, evict, ctx, mem);
388 ret = ttm_bo_move_memcpy(bo, ctx, mem);
392 swap(*mem, bo->mem);
393 bdev->driver->move_notify(bo, false, mem);
394 swap(*mem, bo->mem);
401 if (bo->evicted) {
403 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
407 bo->evicted = false;
410 if (bo->mem.mm_node)
411 bo->offset = (bo->mem.start << PAGE_SHIFT) +
412 bdev->man[bo->mem.mem_type].gpu_offset;
414 bo->offset = 0;
416 ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
420 new_man = &bdev->man[bo->mem.mem_type];
422 ttm_tt_destroy(bo->ttm);
423 bo->ttm = NULL;
430 * Call bo::reserved.
434 * Will release the bo::reserved lock.
437 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
439 if (bo->bdev->driver->move_notify)
440 bo->bdev->driver->move_notify(bo, false, NULL);
442 ttm_tt_destroy(bo->ttm);
443 bo->ttm = NULL;
444 ttm_bo_mem_put(bo, &bo->mem);
447 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
451 if (bo->base.resv == &bo->base._resv)
454 BUG_ON(!dma_resv_trylock(&bo->base._resv));
456 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
458 dma_resv_unlock(&bo->base._resv);
463 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
469 fobj = dma_resv_get_list(&bo->base._resv);
470 fence = dma_resv_get_excl(&bo->base._resv);
476 dma_resv_held(bo->base.resv));
483 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
485 struct ttm_bo_device *bdev = bo->bdev;
488 ret = ttm_bo_individualize_resv(bo);
491 * fences block for the BO to become idle
493 dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
500 ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
502 if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
503 ttm_bo_del_from_lru(bo);
505 if (bo->base.resv != &bo->base._resv)
506 dma_resv_unlock(&bo->base._resv);
508 ttm_bo_cleanup_memtype_use(bo);
509 dma_resv_unlock(bo->base.resv);
513 ttm_bo_flush_all_fences(bo);
520 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
521 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
522 ttm_bo_move_to_lru_tail(bo, NULL);
525 dma_resv_unlock(bo->base.resv);
527 if (bo->base.resv != &bo->base._resv)
528 dma_resv_unlock(&bo->base._resv);
531 kref_get(&bo->list_kref);
532 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
541 * If bo idle, remove from delayed- and lru lists, and unref.
552 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
559 if (unlikely(list_empty(&bo->ddestroy)))
560 resv = bo->base.resv;
562 resv = &bo->base._resv;
573 dma_resv_unlock(bo->base.resv);
586 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
601 if (ret || unlikely(list_empty(&bo->ddestroy))) {
603 dma_resv_unlock(bo->base.resv);
608 ttm_bo_del_from_lru(bo);
609 list_del_init(&bo->ddestroy);
610 kref_put(&bo->list_kref, ttm_bo_ref_bug);
613 ttm_bo_cleanup_memtype_use(bo);
616 dma_resv_unlock(bo->base.resv);
635 struct ttm_buffer_object *bo;
637 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
639 kref_get(&bo->list_kref);
640 list_move_tail(&bo->ddestroy, &removed);
642 if (remove_all || bo->base.resv != &bo->base._resv) {
644 dma_resv_lock(bo->base.resv, NULL);
647 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
649 } else if (dma_resv_trylock(bo->base.resv)) {
650 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
655 kref_put(&bo->list_kref, ttm_bo_release_list);
677 struct ttm_buffer_object *bo =
679 struct ttm_bo_device *bdev = bo->bdev;
680 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
682 if (bo->bdev->driver->release_notify)
683 bo->bdev->driver->release_notify(bo);
686 uvm_obj_destroy(&bo->uvmobj, true);
688 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
690 if (!ttm_bo_uses_embedded_gem_object(bo))
691 drm_vma_node_destroy(&bo->base.vma_node);
694 ttm_mem_io_free_vm(bo);
696 ttm_bo_cleanup_refs_or_queue(bo);
697 kref_put(&bo->list_kref, ttm_bo_release_list);
700 void ttm_bo_put(struct ttm_buffer_object *bo)
702 kref_put(&bo->kref, ttm_bo_release);
720 static int ttm_bo_evict(struct ttm_buffer_object *bo,
723 struct ttm_bo_device *bdev = bo->bdev;
728 dma_resv_assert_held(bo->base.resv);
732 bdev->driver->evict_flags(bo, &placement);
735 ret = ttm_bo_pipeline_gutting(bo);
739 return ttm_tt_create(bo, false);
742 evict_mem = bo->mem;
747 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
751 bo);
752 ttm_bo_mem_space_debug(bo, &placement);
757 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
761 ttm_bo_mem_put(bo, &evict_mem);
764 bo->evicted = true;
769 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
772 /* Don't evict this BO if it's outside of the
775 if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
776 (place->lpfn && place->lpfn <= bo->mem.start))
784 * Check the target bo is allowable to be evicted or swapout, including cases:
789 * or the target bo already is in delayed free list;
793 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
798 if (bo->base.resv == ctx->resv) {
799 dma_resv_assert_held(bo->base.resv);
801 || !list_empty(&bo->ddestroy))
807 ret = dma_resv_trylock(bo->base.resv);
817 * ttm_mem_evict_wait_busy - wait for a busy BO to become available
819 * @busy_bo: BO which couldn't be locked with trylock
841 * TODO: It would be better to keep the BO locked until allocation is at
857 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
865 list_for_each_entry(bo, &man->lru[i], lru) {
868 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
871 dma_resv_locking_ctx(bo->base.resv))
872 busy_bo = bo;
876 if (place && !bdev->driver->eviction_valuable(bo,
879 dma_resv_unlock(bo->base.resv);
886 if (&bo->lru != &man->lru[i])
889 bo = NULL;
892 if (!bo) {
902 kref_get(&bo->list_kref);
904 if (!list_empty(&bo->ddestroy)) {
905 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
907 kref_put(&bo->list_kref, ttm_bo_release_list);
913 ret = ttm_bo_evict(bo, ctx);
915 ttm_bo_unreserve(bo);
917 kref_put(&bo->list_kref, ttm_bo_release_list);
921 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
923 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
931 * Add the last move fence to the BO and reserve a new shared slot.
933 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
951 dma_resv_add_shared_fence(bo->base.resv, fence);
953 ret = dma_resv_reserve_shared(bo->base.resv, 1);
959 dma_fence_put(bo->moving);
960 bo->moving = fence;
968 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
973 struct ttm_bo_device *bdev = bo->bdev;
978 ticket = dma_resv_locking_ctx(bo->base.resv);
980 ret = (*man->func->get_node)(man, bo, place, mem);
991 return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
1040 * @bo: BO to find memory for
1049 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
1054 struct ttm_bo_device *bdev = bo->bdev;
1071 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags);
1082 ttm_bo_del_from_lru(bo);
1083 ttm_bo_add_mem_to_lru(bo, mem);
1097 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1102 struct ttm_bo_device *bdev = bo->bdev;
1106 ret = dma_resv_reserve_shared(bo->base.resv, 1);
1115 ret = ttm_bo_mem_placement(bo, place, mem, ctx);
1127 ret = (*man->func->get_node)(man, bo, place, mem);
1134 ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
1148 ret = ttm_bo_mem_placement(bo, place, mem, ctx);
1159 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
1174 if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
1176 ttm_bo_move_to_lru_tail(bo, NULL);
1184 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1191 dma_resv_assert_held(bo->base.resv);
1194 mem.num_pages = bo->num_pages;
1196 mem.page_alignment = bo->mem.page_alignment;
1203 ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1206 ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
1209 ttm_bo_mem_put(bo, &mem);
1256 int ttm_bo_validate(struct ttm_buffer_object *bo,
1263 dma_resv_assert_held(bo->base.resv);
1267 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1268 ret = ttm_bo_move_buffer(bo, placement, ctx);
1276 ttm_flag_masked(&bo->mem.placement, new_flags,
1282 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1283 ret = ttm_tt_create(bo
1292 struct ttm_buffer_object *bo,
1317 (*destroy)(bo);
1319 kfree(bo);
1327 (*destroy)(bo);
1329 kfree(bo);
1333 bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1335 kref_init(&bo->kref);
1336 kref_init(&bo->list_kref);
1337 INIT_LIST_HEAD(&bo->lru);
1338 INIT_LIST_HEAD(&bo->ddestroy);
1339 INIT_LIST_HEAD(&bo->swap);
1340 INIT_LIST_HEAD(&bo->io_reserve_lru);
1341 bo->bdev = bdev;
1342 bo->type = type;
1343 bo->num_pages = num_pages;
1344 bo->mem.size = num_pages << PAGE_SHIFT;
1345 bo->mem.mem_type = TTM_PL_SYSTEM;
1346 bo->mem.num_pages = bo->num_pages;
1347 bo->mem.mm_node = NULL;
1348 bo->mem.page_alignment = page_alignment;
1349 bo->mem.bus.io_reserved_vm = false;
1350 bo->mem.bus.io_reserved_count = 0;
1351 bo->moving = NULL;
1352 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1353 bo->acc_size = acc_size;
1354 bo->sg = sg;
1356 bo->base.resv = resv;
1357 dma_resv_assert_held(bo->base.resv);
1359 bo->base.resv = &bo->base._resv;
1361 if (!ttm_bo_uses_embedded_gem_object(bo)) {
1363 * bo.gem is not initialized, so we have to setup the
1366 dma_resv_init(&bo->base._resv);
1368 drm_vma_node_init(&bo->base.vma_node);
1370 drm_vma_node_reset(&bo->base.vma_node);
1374 uvm_obj_init(&bo->uvmobj, bdev->driver->ttm_uvm_ops, true, 1);
1382 if (bo->type == ttm_bo_type_device ||
1383 bo->type == ttm_bo_type_sg)
1384 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1385 bo->mem.num_pages);
1391 locked = dma_resv_trylock(bo->base.resv);
1396 ret = ttm_bo_validate(bo, placement, ctx);
1400 ttm_bo_unreserve(bo);
1402 ttm_bo_put(bo);
1407 ttm_bo_move_to_lru_tail(bo, NULL);
1415 struct ttm_buffer_object *bo,
1429 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1436 ttm_bo_unreserve(bo);
1478 struct ttm_buffer_object *bo;
1482 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1483 if (unlikely(bo == NULL))
1487 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1491 *p_bo = bo;
1835 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1838 if (bo->mem.bus.is_iomem) {
1841 KASSERTMSG((bo->mem.bus.base & (PAGE_SIZE - 1)) == 0,
1842 "bo bus base addr not page-aligned: %" PRIx64 "",
1843 (uint64_t)bo->mem.bus.base);
1844 KASSERTMSG((bo->mem.bus.offset & (PAGE_SIZE - 1)) == 0,
1845 "bo bus offset not page-aligned: %lx",
1846 bo->mem.bus.offset);
1847 start = bo->mem.bus.base + bo->mem.bus.offset;
1848 KASSERT((bo->mem.bus.size & (PAGE_SIZE - 1)) == 0);
1849 end = start + bo->mem.bus.size;
1853 } else if (bo->ttm != NULL) {
1856 rw_enter(bo->uvmobj.vmobjlock, RW_WRITER);
1857 for (i = 0; i < bo->ttm->num_pages; i++)
1858 pmap_page_protect(&bo->ttm->pages[i]->p_vmp,
1860 rw_exit(bo->uvmobj.vmobjlock);
1863 struct ttm_bo_device *bdev = bo->bdev;
1865 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1867 ttm_mem_io_free_vm(bo);
1870 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1872 struct ttm_bo_device *bdev = bo->bdev;
1873 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1876 ttm_bo_unmap_virtual_locked(bo);
1883 int ttm_bo_wait(struct ttm_buffer_object *bo,
1889 if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1895 timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1903 dma_resv_add_excl_fence(bo->base.resv, NULL);
1914 struct ttm_buffer_object *bo;
1921 list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1922 if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1937 kref_get(&bo->list_kref);
1939 if (!list_empty(&bo->ddestroy)) {
1940 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1941 kref_put(&bo->list_kref, ttm_bo_release_list);
1945 ttm_bo_del_from_lru(bo);
1952 if (bo->mem.mem_type != TTM_PL_SYSTEM ||
1953 bo->ttm->caching_state != tt_cached) {
1957 evict_mem = bo->mem;
1962 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
1968 * Make sure BO is idle.
1971 ret = ttm_bo_wait(bo, false, false);
1975 ttm_bo_unmap_virtual(bo);
1982 if (bo->bdev->driver->swap_notify)
1983 bo->bdev->driver->swap_notify(bo);
1985 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1994 dma_resv_unlock(bo->base.resv);
1995 kref_put(&bo->list_kref, ttm_bo_release_list);