Lines Matching defs:nvbo
142 struct nouveau_bo *nvbo = nouveau_bo(bo);
144 WARN_ON(nvbo->pin_refcnt > 0);
145 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
154 kfree(nvbo);
166 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
169 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
173 if (nvbo->mode) {
176 *size = roundup_64(*size, 64 * nvbo->mode);
180 *size = roundup_64(*size, 64 * nvbo->mode);
184 *size = roundup_64(*size, 64 * nvbo->mode);
188 *size = roundup_64(*size, 32 * nvbo->mode);
192 *size = roundup_64(*size, (1 << nvbo->page));
193 *align = max((1 << nvbo->page), *align);
204 struct nouveau_bo *nvbo;
214 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
215 if (!nvbo)
217 INIT_LIST_HEAD(&nvbo->head);
218 INIT_LIST_HEAD(&nvbo->entry);
219 INIT_LIST_HEAD(&nvbo->vma_list);
220 nvbo->bo.bdev = &drm->ttm.bdev;
231 nvbo->force_coherent = true;
235 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
236 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
240 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
243 nvbo->kind = (tile_flags & 0x00007f00) >> 8;
244 nvbo->comp = (tile_flags & 0x00030000) >> 16;
245 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
249 nvbo->zeta = (tile_flags & 0x00000007);
251 nvbo->mode = tile_mode;
252 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
274 if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
286 if (nvbo->comp && !vmm->page[pi].comp) {
288 nvbo->kind = mmu->kind[nvbo->kind];
289 nvbo->comp = 0;
291 nvbo->page = vmm->page[pi].shift;
293 nouveau_bo_fixup_align(nvbo, flags, align, size);
295 return nvbo;
298 kfree(nvbo);
303 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
310 acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
312 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
313 nouveau_bo_placement_set(nvbo, flags, 0);
315 ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
316 &nvbo->placement, align >> PAGE_SHIFT, false,
332 struct nouveau_bo *nvbo;
335 nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
337 if (IS_ERR(nvbo))
338 return PTR_ERR(nvbo);
340 ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
344 *pnvbo = nvbo;
362 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
364 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
369 nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
370 nvbo->bo.mem.num_pages < vram_pages / 4) {
377 if (nvbo->zeta) {
384 for (i = 0; i < nvbo->placement.num_placement; ++i) {
385 nvbo->placements[i].fpfn = fpfn;
386 nvbo->placements[i].lpfn = lpfn;
388 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
389 nvbo->busy_placements[i].fpfn = fpfn;
390 nvbo->busy_placements[i].lpfn = lpfn;
396 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
398 struct ttm_placement *pl = &nvbo->placement;
399 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
401 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
403 pl->placement = nvbo->placements;
404 set_placement_list(nvbo->placements, &pl->num_placement,
407 pl->busy_placement = nvbo->busy_placements;
408 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
411 set_placement_range(nvbo, type);
415 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
417 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
418 struct ttm_buffer_object *bo = &nvbo->bo;
428 if (!nvbo->contig) {
429 nvbo->contig = true;
435 if (nvbo->pin_refcnt) {
442 nvbo->pin_refcnt++;
447 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
448 ret = nouveau_bo_validate(nvbo, false, false);
453 nvbo->pin_refcnt++;
454 nouveau_bo_placement_set(nvbo, memtype, 0);
460 nvbo->pin_refcnt--;
461 ret = nouveau_bo_validate(nvbo, false, false);
464 nvbo->pin_refcnt++;
479 nvbo->contig = false;
485 nouveau_bo_unpin(struct nouveau_bo *nvbo)
487 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
488 struct ttm_buffer_object *bo = &nvbo->bo;
495 ref = --nvbo->pin_refcnt;
500 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
502 ret = nouveau_bo_validate(nvbo, false, false);
522 nouveau_bo_map(struct nouveau_bo *nvbo)
526 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
530 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
532 ttm_bo_unreserve(&nvbo->bo);
537 nouveau_bo_unmap(struct nouveau_bo *nvbo)
539 if (!nvbo)
542 ttm_bo_kunmap(&nvbo->kmap);
546 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
548 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
549 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
558 if (nvbo->force_coherent)
575 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
577 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
578 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
587 if (nvbo->force_coherent)
603 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
609 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
613 nouveau_bo_sync_for_device(nvbo);
674 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
677 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
688 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
691 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
702 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
705 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
811 struct nouveau_bo *nvbo = nouveau_bo(bo);
815 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
819 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
823 *pl = nvbo->placement;
1398 struct nouveau_bo *nvbo = nouveau_bo(bo);
1406 mem->mem.page == nvbo->page) {
1407 list_for_each_entry(vma, &nvbo->vma_list, head) {
1411 list_for_each_entry(vma, &nvbo->vma_list, head) {
1424 struct nouveau_bo *nvbo = nouveau_bo(bo);
1433 nvbo->mode, nvbo->zeta);
1458 struct nouveau_bo *nvbo = nouveau_bo(bo);
1467 if (nvbo->pin_refcnt)
1468 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1512 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1526 struct nouveau_bo *nvbo = nouveau_bo(bo);
1528 return drm_vma_node_verify_access(&nvbo->bo.base.vma_node, file);
1638 struct nouveau_bo *nvbo = nouveau_bo(bo);
1648 !nvbo->kind)
1652 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1654 ret = nouveau_bo_validate(nvbo, false, false);
1666 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1667 nvbo->placements[i].fpfn = 0;
1668 nvbo->placements[i].lpfn = mappable;
1671 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1672 nvbo->busy_placements[i].fpfn = 0;
1673 nvbo->busy_placements[i].lpfn = mappable;
1676 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1677 return nouveau_bo_validate(nvbo, false, false);
1811 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1813 struct dma_resv *resv = nvbo->bo.base.resv;