Lines Matching defs:nvbo
51 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
52 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
61 drm_prime_gem_destroy(gem, nvbo->bo.sg);
63 ttm_bo_put(&nvbo->bo);
73 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
74 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
83 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
91 ret = nouveau_vma_new(nvbo, vmm, &vma);
95 ttm_bo_unreserve(&nvbo->bo);
121 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
148 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
149 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
158 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
162 vma = nouveau_vma_find(nvbo, vmm);
167 nouveau_gem_object_unmap(nvbo, vma);
173 ttm_bo_unreserve(&nvbo->bo);
182 struct nouveau_bo *nvbo;
196 nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
198 if (IS_ERR(nvbo))
199 return PTR_ERR(nvbo);
203 ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
205 kfree(nvbo);
209 ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL);
211 /* XXX note: if this fails it kfrees nvbo */
219 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
222 nvbo->valid_domains &= domain;
225 nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
227 *pnvbo = nvbo;
236 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
240 if (is_power_of_2(nvbo->valid_domains))
241 rep->domain = nvbo->valid_domains;
242 else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
246 rep->offset = nvbo->bo.offset;
248 vma = nouveau_vma_find(nvbo, vmm);
255 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
256 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
257 rep->tile_mode = nvbo->mode;
258 rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
260 rep->tile_flags |= nvbo->kind << 8;
263 rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
265 rep->tile_flags |= nvbo->zeta;
275 struct nouveau_bo *nvbo = NULL;
280 req->info.tile_flags, &nvbo);
284 ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
287 ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
293 drm_gem_object_put_unlocked(&nvbo->bo.base);
301 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
302 struct ttm_buffer_object *bo = &nvbo->bo;
303 uint32_t domains = valid_domains & nvbo->valid_domains &
330 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
345 struct nouveau_bo *nvbo;
349 nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
350 b = &pbbo[nvbo->pbbo_index];
353 nouveau_bo_fence(nvbo, fence, !!b->write_domains);
364 if (unlikely(nvbo->validate_mapped)) {
365 ttm_bo_kunmap(&nvbo->kmap);
366 nvbo->validate_mapped = false;
369 list_del(&nvbo->entry);
370 nvbo->reserved_by = NULL;
371 ttm_bo_unreserve(&nvbo->bo);
372 drm_gem_object_put_unlocked(&nvbo->bo.base);
408 struct nouveau_bo *nvbo;
416 nvbo = nouveau_gem_object(gem);
417 if (nvbo == res_bo) {
423 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
431 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
438 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
441 res_bo = nvbo;
452 struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
461 b->user_priv = (uint64_t)(unsigned long)nvbo;
464 nvbo->reserved_by = file_priv;
465 nvbo->pbbo_index = i;
468 list_add_tail(&nvbo->entry, &both_list);
471 list_add_tail(&nvbo->entry, &vram_list);
474 list_add_tail(&nvbo->entry, &gart_list);
478 list_add_tail(&nvbo->entry, &both_list);
482 if (nvbo == res_bo)
505 struct nouveau_bo *nvbo;
508 list_for_each_entry(nvbo, list, entry) {
509 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
511 ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
519 ret = nouveau_bo_validate(nvbo, true, false);
526 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
534 if (nvbo->bo.offset == b->presumed.offset &&
535 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
537 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
541 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
545 b->presumed.offset = nvbo->bo.offset;
625 struct nouveau_bo *nvbo;
643 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
646 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
652 if (!nvbo->kmap.virtual) {
653 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
654 &nvbo->kmap);
659 nvbo->validate_mapped = true;
677 ret = ttm_bo_wait(&nvbo->bo, false, false);
683 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
820 struct nouveau_bo *nvbo = (void *)(unsigned long)
823 OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
834 struct nouveau_bo *nvbo = (void *)(unsigned long)
841 if (!nvbo->kmap.virtual) {
842 ret = ttm_bo_kmap(&nvbo->bo, 0,
843 nvbo->bo.mem.
845 &nvbo->kmap);
850 nvbo->validate_mapped = true;
853 nouveau_bo_wr32(nvbo, (push[i].offset +
858 (nvbo->bo.offset + push[i].offset));
926 struct nouveau_bo *nvbo;
935 nvbo = nouveau_gem_object(gem);
937 lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
946 nouveau_bo_sync_for_cpu(nvbo);
958 struct nouveau_bo *nvbo;
963 nvbo = nouveau_gem_object(gem);
965 nouveau_bo_sync_for_device(nvbo);