Home | History | Annotate | Download | only in radeon

Lines Matching refs:bo

53 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
56 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
60 static void radeon_update_memory_usage(struct radeon_bo *bo,
63 struct radeon_device *rdev = bo->rdev;
64 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
84 struct radeon_bo *bo;
86 bo = container_of(tbo, struct radeon_bo, tbo);
88 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
90 mutex_lock(&bo->rdev->gem.mutex);
91 list_del_init(&bo->list);
92 mutex_unlock(&bo->rdev->gem.mutex);
93 radeon_bo_clear_surface_reg(bo);
94 WARN_ON_ONCE(!list_empty(&bo->va));
95 if (bo->tbo.base.import_attach)
96 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
97 drm_gem_object_release(&bo->tbo.base);
98 kfree(bo);
101 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
103 if (bo->destroy == &radeon_ttm_bo_destroy)
196 struct radeon_bo *bo;
216 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
217 if (bo == NULL)
219 drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
220 bo->rdev = rdev;
221 bo->surface_reg = -1;
222 INIT_LIST_HEAD(&bo->list);
223 INIT_LIST_HEAD(&bo->va);
224 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
228 bo->flags = flags;
231 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
237 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
243 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
254 if (bo->flags & RADEON_GEM_GTT_WC)
257 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
260 * mask out the WC flag from the BO
263 bo->flags &= ~RADEON_GEM_GTT_WC;
266 radeon_ttm_placement_from_domain(bo, domain);
269 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
270 &bo->placement, page_align, !kernel, acc_size,
276 *bo_ptr = bo;
278 trace_radeon_bo_create(bo);
283 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
288 if (bo->kptr) {
290 *ptr = bo->kptr;
294 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
298 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
300 *ptr = bo->kptr;
302 radeon_bo_check_tiling(bo, 0, 0);
306 void radeon_bo_kunmap(struct radeon_bo *bo)
308 if (bo->kptr == NULL)
310 bo->kptr = NULL;
311 radeon_bo_check_tiling(bo, 0, 0);
312 ttm_bo_kunmap(&bo->kmap);
315 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
317 if (bo == NULL)
320 ttm_bo_get(&bo->tbo);
321 return bo;
324 void radeon_bo_unref(struct radeon_bo **bo)
328 if ((*bo) == NULL)
330 tbo = &((*bo)->tbo);
332 *bo = NULL;
335 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
341 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
344 if (bo->pin_count) {
345 bo->pin_count++;
347 *gpu_addr = radeon_bo_gpu_offset(bo);
353 domain_start = bo->rdev->mc.vram_start;
355 domain_start = bo->rdev->mc.gtt_start;
357 (radeon_bo_gpu_offset(bo) - domain_start));
362 if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
363 /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
367 radeon_ttm_placement_from_domain(bo, domain);
368 for (i = 0; i < bo->placement.num_placement; i++) {
370 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
371 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
372 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
373 bo->placements[i].lpfn =
374 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
376 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
378 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
381 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
383 bo->pin_count = 1;
385 *gpu_addr = radeon_bo_gpu_offset(bo);
387 bo->rdev->vram_pin_size += radeon_bo_size(bo);
389 bo->rdev->gart_pin_size += radeon_bo_size(bo);
391 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
396 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
398 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
401 int radeon_bo_unpin(struct radeon_bo *bo)
406 if (!bo->pin_count) {
407 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
410 bo->pin_count--;
411 if (bo->pin_count)
413 for (i = 0; i < bo->placement.num_placement; i++) {
414 bo->placements[i].lpfn = 0;
415 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
417 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
419 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
420 bo->rdev->vram_pin_size -= radeon_bo_size(bo);
422 bo->rdev->gart_pin_size -= radeon_bo_size(bo);
424 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
444 struct radeon_bo *bo, *n;
450 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
452 &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
453 *((unsigned long *)&bo->tbo.base.refcount));
454 mutex_lock(&bo->rdev->gem.mutex);
455 list_del_init(&bo->list);
456 mutex_unlock(&bo->rdev->gem.mutex);
457 /* this should unref the ttm bo */
458 drm_gem_object_put_unlocked(&bo->tbo.base);
566 struct radeon_bo *bo = lobj->robj;
567 if (!bo->pin_count) {
571 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
577 * any size, because it doesn't take the current "bo"
589 radeon_ttm_placement_from_domain(bo, domain);
591 radeon_uvd_force_into_uvd_segment(bo, allowed);
594 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
608 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
609 lobj->tiling_flags = bo->tiling_flags;
620 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
622 struct radeon_device *rdev = bo->rdev;
628 dma_resv_assert_held(bo->tbo.base.resv);
630 if (!bo->tiling_flags)
633 if (bo->surface_reg >= 0) {
634 reg = &rdev->surface_regs[bo->surface_reg];
635 i = bo->surface_reg;
643 if (!reg->bo)
646 old_object = reg->bo;
655 /* find someone with a surface reg and nuke their BO */
657 old_object = reg->bo;
665 bo->surface_reg = i;
666 reg->bo = bo;
669 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
670 bo->tbo.mem.start << PAGE_SHIFT,
671 bo->tbo.num_pages << PAGE_SHIFT);
675 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
677 struct radeon_device *rdev = bo->rdev;
680 if (bo->surface_reg == -1)
683 reg = &rdev->surface_regs[bo->surface_reg];
684 radeon_clear_surface_reg(rdev, bo->surface_reg);
686 reg->bo = NULL;
687 bo->surface_reg = -1;
690 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
693 struct radeon_device *rdev = bo->rdev;
741 r = radeon_bo_reserve(bo, false);
744 bo->tiling_flags = tiling_flags;
745 bo->pitch = pitch;
746 radeon_bo_unreserve(bo);
750 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
754 dma_resv_assert_held(bo->tbo.base.resv);
757 *tiling_flags = bo->tiling_flags;
759 *pitch = bo->pitch;
762 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
766 dma_resv_assert_held(bo->tbo.base.resv);
768 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
772 radeon_bo_clear_surface_reg(bo);
776 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
780 if (bo->surface_reg >= 0)
781 radeon_bo_clear_surface_reg(bo);
785 if ((bo->surface_reg >= 0) && !has_moved)
788 return radeon_bo_get_surface_reg(bo);
791 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
797 if (!radeon_ttm_bo_is_radeon_bo(bo))
800 rbo = container_of(bo, struct radeon_bo, tbo);
808 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
812 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
820 if (!radeon_ttm_bo_is_radeon_bo(bo))
822 rbo = container_of(bo, struct radeon_bo, tbo);
825 if (bo->mem.mem_type != TTM_PL_VRAM)
828 size = bo->mem.num_pages << PAGE_SHIFT;
829 offset = bo->mem.start << PAGE_SHIFT;
833 /* Can't move a pinned BO to visible VRAM */
846 r = ttm_bo_validate(bo, &rbo->placement, &ctx);
849 return ttm_bo_validate(bo, &rbo->placement, &ctx);
854 offset = bo->mem.start << PAGE_SHIFT;
862 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
866 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
870 *mem_type = bo->tbo.mem.mem_type;
872 r = ttm_bo_wait(&bo->tbo, true, no_wait);
873 ttm_bo_unreserve(&bo->tbo);
880 * @bo: buffer object in question
885 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
888 struct dma_resv *resv = bo->tbo.base.resv;