Home | History | Annotate | Download | only in ttm

Lines Matching refs:bdev

104 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p,
107 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
136 ttm_mem_type_debug(bo->bdev, &p, mem_type);
197 struct ttm_bo_device *bdev = bo->bdev;
208 man = &bdev->man[mem->mem_type];
227 struct ttm_bo_device *bdev = bo->bdev;
241 if (notify && bdev->driver->del_from_lru_notify)
242 bdev->driver->del_from_lru_notify(bo);
292 man = &pos->first->bdev->man[TTM_PL_TT];
307 man = &pos->first->bdev->man[TTM_PL_VRAM];
332 struct ttm_bo_device *bdev = bo->bdev;
333 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
334 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
335 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
336 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
371 if (bdev->driver->move_notify)
372 bdev->driver->move_notify(bo, evict, mem);
379 if (bdev->driver->move_notify)
380 bdev->driver->move_notify(bo, evict, mem);
385 else if (bdev->driver->move)
386 ret = bdev->driver->move(bo, evict, ctx, mem);
391 if (bdev->driver->move_notify) {
393 bdev->driver->move_notify(bo, false, mem);
402 if (bdev->driver->invalidate_caches) {
403 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
412 bdev->man[bo->mem.mem_type].gpu_offset;
420 new_man = &bdev->man[bo->mem.mem_type];
439 if (bo->bdev->driver->move_notify)
440 bo->bdev->driver->move_notify(bo, false, NULL);
485 struct ttm_bo_device *bdev = bo->bdev;
532 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
535 schedule_delayed_work(&bdev->wq,
625 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
634 while (!list_empty(&bdev->ddestroy)) {
637 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
658 list_splice_tail(&removed, &bdev->ddestroy);
659 empty = list_empty(&bdev->ddestroy);
667 struct ttm_bo_device *bdev =
670 if (!ttm_bo_delayed_delete(bdev, false))
671 schedule_delayed_work(&bdev->wq,
679 struct ttm_bo_device *bdev = bo->bdev;
680 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
682 if (bo->bdev->driver->release_notify)
683 bo->bdev->driver->release_notify(bo);
688 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
706 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
708 return cancel_delayed_work_sync(&bdev->wq);
712 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
715 schedule_delayed_work(&bdev->wq,
723 struct ttm_bo_device *bdev = bo->bdev;
732 bdev->driver->evict_flags(bo, &placement);
851 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
858 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
876 if (place && !bdev->driver->eviction_valuable(bo,
923 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
973 struct ttm_bo_device *bdev = bo->bdev;
974 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
985 ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
1054 struct ttm_bo_device *bdev = bo->bdev;
1064 man = &bdev->man[mem_type];
1102 struct ttm_bo_device *bdev = bo->bdev;
1126 man = &bdev->man[mem->mem_type];
1291 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1308 if (sg && !drm_prime_sg_importable(bdev->dmat, sg)) {
1341 bo->bdev = bdev;
1374 uvm_obj_init(&bo->uvmobj, bdev->driver->ttm_uvm_ops, true, 1);
1384 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1414 int ttm_bo_init(struct ttm_bo_device *bdev,
1429 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1442 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1456 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1470 int ttm_bo_create(struct ttm_bo_device *bdev,
1486 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1487 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1497 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1505 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1519 ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx,
1542 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1551 man = &bdev->man[mem_type];
1564 ret = ttm_bo_force_list_clean(bdev, mem_type);
1580 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1582 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1594 return ttm_bo_force_list_clean(bdev, mem_type);
1598 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1606 man = &bdev->man[type];
1614 ret = bdev->driver->init_mem_type(bdev, type, man);
1617 man->bdev = bdev;
1718 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1726 man = &bdev->man[i];
1729 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1739 list_del(&bdev->device_list);
1742 cancel_delayed_work_sync(&bdev->wq);
1744 if (ttm_bo_delayed_delete(bdev, true))
1749 if (list_empty(&bdev->man[0].lru[0]))
1760 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1781 bdev->driver = driver;
1783 memset(bdev->man, 0, sizeof(bdev->man));
1789 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1793 bdev->vma_manager = vma_manager;
1794 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1795 INIT_LIST_HEAD(&bdev->ddestroy);
1797 bdev->memt = memt;
1798 bdev->dmat = dmat;
1800 bdev->dev_mapping = mapping;
1802 bdev->need_dma32 = need_dma32;
1804 list_add_tail(&bdev->device_list, &glob->device_list);
1818 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1820 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1863 struct ttm_bo_device *bdev = bo->bdev;
1865 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1872 struct ttm_bo_device *bdev = bo->bdev;
1873 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1982 if (bo->bdev->driver->swap_notify)
1983 bo->bdev->driver->swap_notify(bo);
2000 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)