Lines Matching defs:backup
47 struct vmw_buffer_object *backup = res->backup;
48 struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
50 dma_resv_assert_held(res->backup->base.base.resv);
67 rb_insert_color(&res->mob_node, &backup->res_tree);
71 vmw_bo_prio_add(backup, res->used_prio);
80 struct vmw_buffer_object *backup = res->backup;
82 dma_resv_assert_held(backup->base.base.resv);
85 rb_erase(&res->mob_node, &backup->res_tree);
87 vmw_bo_prio_del(backup, res->used_prio);
133 if (res->backup) {
134 struct ttm_buffer_object *bo = &res->backup->base;
150 vmw_bo_dirty_release(res->backup);
152 vmw_bo_unreference(&res->backup);
236 res->backup = NULL;
359 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
361 * @res: The resource for which to allocate a backup buffer.
370 struct vmw_buffer_object *backup;
373 if (likely(res->backup)) {
374 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
378 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
379 if (unlikely(!backup))
382 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
389 res->backup = backup;
401 * containing backup data if a bind operation is needed.
431 * Handle the case where the backup mob is marked coherent but
436 if (res->backup->dirty && !res->dirty) {
440 } else if (!res->backup->dirty && res->dirty) {
456 vmw_bo_dirty_unmap(res->backup, start, end);
478 * @switch_backup: Backup buffer has been switched.
479 * @new_backup: Pointer to new backup buffer if command submission
481 * @new_backup_offset: New backup offset if @switch_backup is true.
498 if (switch_backup && new_backup != res->backup) {
499 if (res->backup) {
502 vmw_bo_dirty_release(res->backup);
503 vmw_bo_unreference(&res->backup);
507 res->backup = vmw_bo_reference(new_backup);
517 res->backup = NULL;
520 vmw_bo_dirty_release(res->backup);
539 * vmw_resource_check_buffer - Check whether a backup buffer is needed
544 * @res: The resource for which to allocate a backup buffer.
548 * reserved and validated backup buffer.
561 if (unlikely(res->backup == NULL)) {
568 ttm_bo_get(&res->backup->base);
569 val_buf->bo = &res->backup->base;
580 ret = ttm_bo_validate(&res->backup->base,
595 vmw_bo_unreference(&res->backup);
606 * a backup buffer is present for guest-backed resources. However,
620 if (res->func->needs_backup && res->backup == NULL &&
624 DRM_ERROR("Failed to allocate a backup buffer "
636 * backup buffer
639 * @val_buf: Backup buffer information.
659 * to a backup buffer.
704 * On succesful return, any backup DMA buffer pointed to by @res->backup will
727 if (res->backup)
728 val_buf.bo = &res->backup->base;
750 /* Trylock backup buffers with a NULL ticket. */
768 else if (!res->func->needs_backup && res->backup) {
770 vmw_bo_unreference(&res->backup);
785 * Evicts the Guest Backed hardware resource if the backup
789 * both require the backup buffer to be reserved.
907 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
946 /* Wait lock backup buffers with a ticket. */
1012 if (res->backup) {
1013 vbo = res->backup;
1067 if (--res->pin_count == 0 && res->backup) {
1068 struct vmw_buffer_object *vbo = res->backup;