Home | History | Annotate | Line # | Download | only in vmwgfx
      1 /*	$NetBSD: vmwgfx_resource.c,v 1.4 2022/02/17 01:21:02 riastradh Exp $	*/
      2 
      3 // SPDX-License-Identifier: GPL-2.0 OR MIT
      4 /**************************************************************************
      5  *
      6  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_resource.c,v 1.4 2022/02/17 01:21:02 riastradh Exp $");
     32 
     33 #include <drm/ttm/ttm_placement.h>
     34 
     35 #include "vmwgfx_resource_priv.h"
     36 #include "vmwgfx_binding.h"
     37 #include "vmwgfx_drv.h"
     38 
     39 #define VMW_RES_EVICT_ERR_COUNT 10
     40 
     41 /**
     42  * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
     43  * @res: The resource
     44  */
     45 void vmw_resource_mob_attach(struct vmw_resource *res)
     46 {
     47 	struct vmw_buffer_object *backup = res->backup;
     48 	struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
     49 
     50 	dma_resv_assert_held(res->backup->base.base.resv);
     51 	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
     52 		res->func->prio;
     53 
     54 #ifdef __NetBSD__
     55 	rb_tree_insert_node etc etc etc
     56 #else
     57 	while (*new) {
     58 		struct vmw_resource *this =
     59 			container_of(*new, struct vmw_resource, mob_node);
     60 
     61 		parent = *new;
     62 		new = (res->backup_offset < this->backup_offset) ?
     63 			&((*new)->rb_left) : &((*new)->rb_right);
     64 	}
     65 
     66 	rb_link_node(&res->mob_node, parent, new);
     67 	rb_insert_color(&res->mob_node, &backup->res_tree);
     68 #endif
     69 	res->mob_attached = true;
     70 
     71 	vmw_bo_prio_add(backup, res->used_prio);
     72 }
     73 
     74 /**
     75  * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
     76  * @res: The resource
     77  */
     78 void vmw_resource_mob_detach(struct vmw_resource *res)
     79 {
     80 	struct vmw_buffer_object *backup = res->backup;
     81 
     82 	dma_resv_assert_held(backup->base.base.resv);
     83 	if (vmw_resource_mob_attached(res)) {
     84 		res->mob_attached = false;
     85 		rb_erase(&res->mob_node, &backup->res_tree);
     86 		RB_CLEAR_NODE(&res->mob_node);
     87 		vmw_bo_prio_del(backup, res->used_prio);
     88 	}
     89 }
     90 
     91 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
     92 {
     93 	kref_get(&res->kref);
     94 	return res;
     95 }
     96 
     97 struct vmw_resource *
     98 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
     99 {
    100 	return kref_get_unless_zero(&res->kref) ? res : NULL;
    101 }
    102 
    103 /**
    104  * vmw_resource_release_id - release a resource id to the id manager.
    105  *
    106  * @res: Pointer to the resource.
    107  *
    108  * Release the resource id to the resource id manager and set it to -1
    109  */
    110 void vmw_resource_release_id(struct vmw_resource *res)
    111 {
    112 	struct vmw_private *dev_priv = res->dev_priv;
    113 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
    114 
    115 	spin_lock(&dev_priv->resource_lock);
    116 	if (res->id != -1)
    117 		idr_remove(idr, res->id);
    118 	res->id = -1;
    119 	spin_unlock(&dev_priv->resource_lock);
    120 }
    121 
    122 static void vmw_resource_release(struct kref *kref)
    123 {
    124 	struct vmw_resource *res =
    125 	    container_of(kref, struct vmw_resource, kref);
    126 	struct vmw_private *dev_priv = res->dev_priv;
    127 	int id;
    128 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
    129 
    130 	spin_lock(&dev_priv->resource_lock);
    131 	list_del_init(&res->lru_head);
    132 	spin_unlock(&dev_priv->resource_lock);
    133 	if (res->backup) {
    134 		struct ttm_buffer_object *bo = &res->backup->base;
    135 
    136 		ttm_bo_reserve(bo, false, false, NULL);
    137 		if (vmw_resource_mob_attached(res) &&
    138 		    res->func->unbind != NULL) {
    139 			struct ttm_validate_buffer val_buf;
    140 
    141 			val_buf.bo = bo;
    142 			val_buf.num_shared = 0;
    143 			res->func->unbind(res, false, &val_buf);
    144 		}
    145 		res->backup_dirty = false;
    146 		vmw_resource_mob_detach(res);
    147 		if (res->dirty)
    148 			res->func->dirty_free(res);
    149 		if (res->coherent)
    150 			vmw_bo_dirty_release(res->backup);
    151 		ttm_bo_unreserve(bo);
    152 		vmw_bo_unreference(&res->backup);
    153 	}
    154 
    155 	if (likely(res->hw_destroy != NULL)) {
    156 		mutex_lock(&dev_priv->binding_mutex);
    157 		vmw_binding_res_list_kill(&res->binding_head);
    158 		mutex_unlock(&dev_priv->binding_mutex);
    159 		res->hw_destroy(res);
    160 	}
    161 
    162 	id = res->id;
    163 	if (res->res_free != NULL)
    164 		res->res_free(res);
    165 	else
    166 		kfree(res);
    167 
    168 	spin_lock(&dev_priv->resource_lock);
    169 	if (id != -1)
    170 		idr_remove(idr, id);
    171 	spin_unlock(&dev_priv->resource_lock);
    172 }
    173 
    174 void vmw_resource_unreference(struct vmw_resource **p_res)
    175 {
    176 	struct vmw_resource *res = *p_res;
    177 
    178 	*p_res = NULL;
    179 	kref_put(&res->kref, vmw_resource_release);
    180 }
    181 
    182 
    183 /**
    184  * vmw_resource_alloc_id - release a resource id to the id manager.
    185  *
    186  * @res: Pointer to the resource.
    187  *
    188  * Allocate the lowest free resource from the resource manager, and set
    189  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
    190  */
    191 int vmw_resource_alloc_id(struct vmw_resource *res)
    192 {
    193 	struct vmw_private *dev_priv = res->dev_priv;
    194 	int ret;
    195 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
    196 
    197 	BUG_ON(res->id != -1);
    198 
    199 	idr_preload(GFP_KERNEL);
    200 	spin_lock(&dev_priv->resource_lock);
    201 
    202 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
    203 	if (ret >= 0)
    204 		res->id = ret;
    205 
    206 	spin_unlock(&dev_priv->resource_lock);
    207 	idr_preload_end();
    208 	return ret < 0 ? ret : 0;
    209 }
    210 
    211 /**
    212  * vmw_resource_init - initialize a struct vmw_resource
    213  *
    214  * @dev_priv:       Pointer to a device private struct.
    215  * @res:            The struct vmw_resource to initialize.
    216  * @obj_type:       Resource object type.
    217  * @delay_id:       Boolean whether to defer device id allocation until
    218  *                  the first validation.
    219  * @res_free:       Resource destructor.
    220  * @func:           Resource function table.
    221  */
    222 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
    223 		      bool delay_id,
    224 		      void (*res_free) (struct vmw_resource *res),
    225 		      const struct vmw_res_func *func)
    226 {
    227 	kref_init(&res->kref);
    228 	res->hw_destroy = NULL;
    229 	res->res_free = res_free;
    230 	res->dev_priv = dev_priv;
    231 	res->func = func;
    232 	RB_CLEAR_NODE(&res->mob_node);
    233 	INIT_LIST_HEAD(&res->lru_head);
    234 	INIT_LIST_HEAD(&res->binding_head);
    235 	res->id = -1;
    236 	res->backup = NULL;
    237 	res->backup_offset = 0;
    238 	res->backup_dirty = false;
    239 	res->res_dirty = false;
    240 	res->coherent = false;
    241 	res->used_prio = 3;
    242 	res->dirty = NULL;
    243 	if (delay_id)
    244 		return 0;
    245 	else
    246 		return vmw_resource_alloc_id(res);
    247 }
    248 
    249 
    250 /**
    251  * vmw_user_resource_lookup_handle - lookup a struct resource from a
    252  * TTM user-space handle and perform basic type checks
    253  *
    254  * @dev_priv:     Pointer to a device private struct
    255  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
    256  * @handle:       The TTM user-space handle
    257  * @converter:    Pointer to an object describing the resource type
    258  * @p_res:        On successful return the location pointed to will contain
    259  *                a pointer to a refcounted struct vmw_resource.
    260  *
    261  * If the handle can't be found or is associated with an incorrect resource
    262  * type, -EINVAL will be returned.
    263  */
    264 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
    265 				    struct ttm_object_file *tfile,
    266 				    uint32_t handle,
    267 				    const struct vmw_user_resource_conv
    268 				    *converter,
    269 				    struct vmw_resource **p_res)
    270 {
    271 	struct ttm_base_object *base;
    272 	struct vmw_resource *res;
    273 	int ret = -EINVAL;
    274 
    275 	base = ttm_base_object_lookup(tfile, handle);
    276 	if (unlikely(base == NULL))
    277 		return -EINVAL;
    278 
    279 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
    280 		goto out_bad_resource;
    281 
    282 	res = converter->base_obj_to_res(base);
    283 	kref_get(&res->kref);
    284 
    285 	*p_res = res;
    286 	ret = 0;
    287 
    288 out_bad_resource:
    289 	ttm_base_object_unref(&base);
    290 
    291 	return ret;
    292 }
    293 
    294 /**
    295  * vmw_user_resource_lookup_handle - lookup a struct resource from a
    296  * TTM user-space handle and perform basic type checks
    297  *
    298  * @dev_priv:     Pointer to a device private struct
    299  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
    300  * @handle:       The TTM user-space handle
    301  * @converter:    Pointer to an object describing the resource type
    302  * @p_res:        On successful return the location pointed to will contain
    303  *                a pointer to a refcounted struct vmw_resource.
    304  *
    305  * If the handle can't be found or is associated with an incorrect resource
    306  * type, -EINVAL will be returned.
    307  */
    308 struct vmw_resource *
    309 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
    310 				      struct ttm_object_file *tfile,
    311 				      uint32_t handle,
    312 				      const struct vmw_user_resource_conv
    313 				      *converter)
    314 {
    315 	struct ttm_base_object *base;
    316 
    317 	base = ttm_base_object_noref_lookup(tfile, handle);
    318 	if (!base)
    319 		return ERR_PTR(-ESRCH);
    320 
    321 	if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
    322 		ttm_base_object_noref_release();
    323 		return ERR_PTR(-EINVAL);
    324 	}
    325 
    326 	return converter->base_obj_to_res(base);
    327 }
    328 
    329 /**
    330  * Helper function that looks either a surface or bo.
    331  *
    332  * The pointer this pointed at by out_surf and out_buf needs to be null.
    333  */
    334 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
    335 			   struct ttm_object_file *tfile,
    336 			   uint32_t handle,
    337 			   struct vmw_surface **out_surf,
    338 			   struct vmw_buffer_object **out_buf)
    339 {
    340 	struct vmw_resource *res;
    341 	int ret;
    342 
    343 	BUG_ON(*out_surf || *out_buf);
    344 
    345 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
    346 					      user_surface_converter,
    347 					      &res);
    348 	if (!ret) {
    349 		*out_surf = vmw_res_to_srf(res);
    350 		return 0;
    351 	}
    352 
    353 	*out_surf = NULL;
    354 	ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
    355 	return ret;
    356 }
    357 
    358 /**
    359  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
    360  *
    361  * @res:            The resource for which to allocate a backup buffer.
    362  * @interruptible:  Whether any sleeps during allocation should be
    363  *                  performed while interruptible.
    364  */
    365 static int vmw_resource_buf_alloc(struct vmw_resource *res,
    366 				  bool interruptible)
    367 {
    368 	unsigned long size =
    369 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
    370 	struct vmw_buffer_object *backup;
    371 	int ret;
    372 
    373 	if (likely(res->backup)) {
    374 		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
    375 		return 0;
    376 	}
    377 
    378 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
    379 	if (unlikely(!backup))
    380 		return -ENOMEM;
    381 
    382 	ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
    383 			      res->func->backup_placement,
    384 			      interruptible,
    385 			      &vmw_bo_bo_free);
    386 	if (unlikely(ret != 0))
    387 		goto out_no_bo;
    388 
    389 	res->backup = backup;
    390 
    391 out_no_bo:
    392 	return ret;
    393 }
    394 
    395 /**
    396  * vmw_resource_do_validate - Make a resource up-to-date and visible
    397  *                            to the device.
    398  *
    399  * @res:            The resource to make visible to the device.
    400  * @val_buf:        Information about a buffer possibly
    401  *                  containing backup data if a bind operation is needed.
    402  *
    403  * On hardware resource shortage, this function returns -EBUSY and
    404  * should be retried once resources have been freed up.
    405  */
    406 static int vmw_resource_do_validate(struct vmw_resource *res,
    407 				    struct ttm_validate_buffer *val_buf,
    408 				    bool dirtying)
    409 {
    410 	int ret = 0;
    411 	const struct vmw_res_func *func = res->func;
    412 
    413 	if (unlikely(res->id == -1)) {
    414 		ret = func->create(res);
    415 		if (unlikely(ret != 0))
    416 			return ret;
    417 	}
    418 
    419 	if (func->bind &&
    420 	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
    421 	      val_buf->bo != NULL) ||
    422 	     (!func->needs_backup && val_buf->bo != NULL))) {
    423 		ret = func->bind(res, val_buf);
    424 		if (unlikely(ret != 0))
    425 			goto out_bind_failed;
    426 		if (func->needs_backup)
    427 			vmw_resource_mob_attach(res);
    428 	}
    429 
    430 	/*
    431 	 * Handle the case where the backup mob is marked coherent but
    432 	 * the resource isn't.
    433 	 */
    434 	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
    435 	    !res->coherent) {
    436 		if (res->backup->dirty && !res->dirty) {
    437 			ret = func->dirty_alloc(res);
    438 			if (ret)
    439 				return ret;
    440 		} else if (!res->backup->dirty && res->dirty) {
    441 			func->dirty_free(res);
    442 		}
    443 	}
    444 
    445 	/*
    446 	 * Transfer the dirty regions to the resource and update
    447 	 * the resource.
    448 	 */
    449 	if (res->dirty) {
    450 		if (dirtying && !res->res_dirty) {
    451 			pgoff_t start = res->backup_offset >> PAGE_SHIFT;
    452 			pgoff_t end = __KERNEL_DIV_ROUND_UP
    453 				(res->backup_offset + res->backup_size,
    454 				 PAGE_SIZE);
    455 
    456 			vmw_bo_dirty_unmap(res->backup, start, end);
    457 		}
    458 
    459 		vmw_bo_dirty_transfer_to_res(res);
    460 		return func->dirty_sync(res);
    461 	}
    462 
    463 	return 0;
    464 
    465 out_bind_failed:
    466 	func->destroy(res);
    467 
    468 	return ret;
    469 }
    470 
    471 /**
    472  * vmw_resource_unreserve - Unreserve a resource previously reserved for
    473  * command submission.
    474  *
    475  * @res:               Pointer to the struct vmw_resource to unreserve.
    476  * @dirty_set:         Change dirty status of the resource.
    477  * @dirty:             When changing dirty status indicates the new status.
    478  * @switch_backup:     Backup buffer has been switched.
    479  * @new_backup:        Pointer to new backup buffer if command submission
    480  *                     switched. May be NULL.
    481  * @new_backup_offset: New backup offset if @switch_backup is true.
    482  *
    483  * Currently unreserving a resource means putting it back on the device's
    484  * resource lru list, so that it can be evicted if necessary.
    485  */
    486 void vmw_resource_unreserve(struct vmw_resource *res,
    487 			    bool dirty_set,
    488 			    bool dirty,
    489 			    bool switch_backup,
    490 			    struct vmw_buffer_object *new_backup,
    491 			    unsigned long new_backup_offset)
    492 {
    493 	struct vmw_private *dev_priv = res->dev_priv;
    494 
    495 	if (!list_empty(&res->lru_head))
    496 		return;
    497 
    498 	if (switch_backup && new_backup != res->backup) {
    499 		if (res->backup) {
    500 			vmw_resource_mob_detach(res);
    501 			if (res->coherent)
    502 				vmw_bo_dirty_release(res->backup);
    503 			vmw_bo_unreference(&res->backup);
    504 		}
    505 
    506 		if (new_backup) {
    507 			res->backup = vmw_bo_reference(new_backup);
    508 
    509 			/*
    510 			 * The validation code should already have added a
    511 			 * dirty tracker here.
    512 			 */
    513 			WARN_ON(res->coherent && !new_backup->dirty);
    514 
    515 			vmw_resource_mob_attach(res);
    516 		} else {
    517 			res->backup = NULL;
    518 		}
    519 	} else if (switch_backup && res->coherent) {
    520 		vmw_bo_dirty_release(res->backup);
    521 	}
    522 
    523 	if (switch_backup)
    524 		res->backup_offset = new_backup_offset;
    525 
    526 	if (dirty_set)
    527 		res->res_dirty = dirty;
    528 
    529 	if (!res->func->may_evict || res->id == -1 || res->pin_count)
    530 		return;
    531 
    532 	spin_lock(&dev_priv->resource_lock);
    533 	list_add_tail(&res->lru_head,
    534 		      &res->dev_priv->res_lru[res->func->res_type]);
    535 	spin_unlock(&dev_priv->resource_lock);
    536 }
    537 
    538 /**
    539  * vmw_resource_check_buffer - Check whether a backup buffer is needed
    540  *                             for a resource and in that case, allocate
    541  *                             one, reserve and validate it.
    542  *
    543  * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
    544  * @res:            The resource for which to allocate a backup buffer.
    545  * @interruptible:  Whether any sleeps during allocation should be
    546  *                  performed while interruptible.
    547  * @val_buf:        On successful return contains data about the
    548  *                  reserved and validated backup buffer.
    549  */
    550 static int
    551 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
    552 			  struct vmw_resource *res,
    553 			  bool interruptible,
    554 			  struct ttm_validate_buffer *val_buf)
    555 {
    556 	struct ttm_operation_ctx ctx = { true, false };
    557 	struct list_head val_list;
    558 	bool backup_dirty = false;
    559 	int ret;
    560 
    561 	if (unlikely(res->backup == NULL)) {
    562 		ret = vmw_resource_buf_alloc(res, interruptible);
    563 		if (unlikely(ret != 0))
    564 			return ret;
    565 	}
    566 
    567 	INIT_LIST_HEAD(&val_list);
    568 	ttm_bo_get(&res->backup->base);
    569 	val_buf->bo = &res->backup->base;
    570 	val_buf->num_shared = 0;
    571 	list_add_tail(&val_buf->head, &val_list);
    572 	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
    573 	if (unlikely(ret != 0))
    574 		goto out_no_reserve;
    575 
    576 	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
    577 		return 0;
    578 
    579 	backup_dirty = res->backup_dirty;
    580 	ret = ttm_bo_validate(&res->backup->base,
    581 			      res->func->backup_placement,
    582 			      &ctx);
    583 
    584 	if (unlikely(ret != 0))
    585 		goto out_no_validate;
    586 
    587 	return 0;
    588 
    589 out_no_validate:
    590 	ttm_eu_backoff_reservation(ticket, &val_list);
    591 out_no_reserve:
    592 	ttm_bo_put(val_buf->bo);
    593 	val_buf->bo = NULL;
    594 	if (backup_dirty)
    595 		vmw_bo_unreference(&res->backup);
    596 
    597 	return ret;
    598 }
    599 
    600 /**
    601  * vmw_resource_reserve - Reserve a resource for command submission
    602  *
    603  * @res:            The resource to reserve.
    604  *
    605  * This function takes the resource off the LRU list and make sure
    606  * a backup buffer is present for guest-backed resources. However,
    607  * the buffer may not be bound to the resource at this point.
    608  *
    609  */
    610 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
    611 			 bool no_backup)
    612 {
    613 	struct vmw_private *dev_priv = res->dev_priv;
    614 	int ret;
    615 
    616 	spin_lock(&dev_priv->resource_lock);
    617 	list_del_init(&res->lru_head);
    618 	spin_unlock(&dev_priv->resource_lock);
    619 
    620 	if (res->func->needs_backup && res->backup == NULL &&
    621 	    !no_backup) {
    622 		ret = vmw_resource_buf_alloc(res, interruptible);
    623 		if (unlikely(ret != 0)) {
    624 			DRM_ERROR("Failed to allocate a backup buffer "
    625 				  "of size %lu. bytes\n",
    626 				  (unsigned long) res->backup_size);
    627 			return ret;
    628 		}
    629 	}
    630 
    631 	return 0;
    632 }
    633 
    634 /**
    635  * vmw_resource_backoff_reservation - Unreserve and unreference a
    636  *                                    backup buffer
    637  *.
    638  * @ticket:         The ww acquire ctx used for reservation.
    639  * @val_buf:        Backup buffer information.
    640  */
    641 static void
    642 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
    643 				 struct ttm_validate_buffer *val_buf)
    644 {
    645 	struct list_head val_list;
    646 
    647 	if (likely(val_buf->bo == NULL))
    648 		return;
    649 
    650 	INIT_LIST_HEAD(&val_list);
    651 	list_add_tail(&val_buf->head, &val_list);
    652 	ttm_eu_backoff_reservation(ticket, &val_list);
    653 	ttm_bo_put(val_buf->bo);
    654 	val_buf->bo = NULL;
    655 }
    656 
    657 /**
    658  * vmw_resource_do_evict - Evict a resource, and transfer its data
    659  *                         to a backup buffer.
    660  *
    661  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
    662  * @res:            The resource to evict.
    663  * @interruptible:  Whether to wait interruptible.
    664  */
    665 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
    666 				 struct vmw_resource *res, bool interruptible)
    667 {
    668 	struct ttm_validate_buffer val_buf;
    669 	const struct vmw_res_func *func = res->func;
    670 	int ret;
    671 
    672 	BUG_ON(!func->may_evict);
    673 
    674 	val_buf.bo = NULL;
    675 	val_buf.num_shared = 0;
    676 	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
    677 	if (unlikely(ret != 0))
    678 		return ret;
    679 
    680 	if (unlikely(func->unbind != NULL &&
    681 		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
    682 		ret = func->unbind(res, res->res_dirty, &val_buf);
    683 		if (unlikely(ret != 0))
    684 			goto out_no_unbind;
    685 		vmw_resource_mob_detach(res);
    686 	}
    687 	ret = func->destroy(res);
    688 	res->backup_dirty = true;
    689 	res->res_dirty = false;
    690 out_no_unbind:
    691 	vmw_resource_backoff_reservation(ticket, &val_buf);
    692 
    693 	return ret;
    694 }
    695 
    696 
    697 /**
    698  * vmw_resource_validate - Make a resource up-to-date and visible
    699  *                         to the device.
    700  * @res: The resource to make visible to the device.
    701  * @intr: Perform waits interruptible if possible.
    702  * @dirtying: Pending GPU operation will dirty the resource
    703  *
    704  * On succesful return, any backup DMA buffer pointed to by @res->backup will
    705  * be reserved and validated.
    706  * On hardware resource shortage, this function will repeatedly evict
    707  * resources of the same type until the validation succeeds.
    708  *
    709  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
    710  * on failure.
    711  */
    712 int vmw_resource_validate(struct vmw_resource *res, bool intr,
    713 			  bool dirtying)
    714 {
    715 	int ret;
    716 	struct vmw_resource *evict_res;
    717 	struct vmw_private *dev_priv = res->dev_priv;
    718 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
    719 	struct ttm_validate_buffer val_buf;
    720 	unsigned err_count = 0;
    721 
    722 	if (!res->func->create)
    723 		return 0;
    724 
    725 	val_buf.bo = NULL;
    726 	val_buf.num_shared = 0;
    727 	if (res->backup)
    728 		val_buf.bo = &res->backup->base;
    729 	do {
    730 		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
    731 		if (likely(ret != -EBUSY))
    732 			break;
    733 
    734 		spin_lock(&dev_priv->resource_lock);
    735 		if (list_empty(lru_list) || !res->func->may_evict) {
    736 			DRM_ERROR("Out of device device resources "
    737 				  "for %s.\n", res->func->type_name);
    738 			ret = -EBUSY;
    739 			spin_unlock(&dev_priv->resource_lock);
    740 			break;
    741 		}
    742 
    743 		evict_res = vmw_resource_reference
    744 			(list_first_entry(lru_list, struct vmw_resource,
    745 					  lru_head));
    746 		list_del_init(&evict_res->lru_head);
    747 
    748 		spin_unlock(&dev_priv->resource_lock);
    749 
    750 		/* Trylock backup buffers with a NULL ticket. */
    751 		ret = vmw_resource_do_evict(NULL, evict_res, intr);
    752 		if (unlikely(ret != 0)) {
    753 			spin_lock(&dev_priv->resource_lock);
    754 			list_add_tail(&evict_res->lru_head, lru_list);
    755 			spin_unlock(&dev_priv->resource_lock);
    756 			if (ret == -ERESTARTSYS ||
    757 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
    758 				vmw_resource_unreference(&evict_res);
    759 				goto out_no_validate;
    760 			}
    761 		}
    762 
    763 		vmw_resource_unreference(&evict_res);
    764 	} while (1);
    765 
    766 	if (unlikely(ret != 0))
    767 		goto out_no_validate;
    768 	else if (!res->func->needs_backup && res->backup) {
    769 		WARN_ON_ONCE(vmw_resource_mob_attached(res));
    770 		vmw_bo_unreference(&res->backup);
    771 	}
    772 
    773 	return 0;
    774 
    775 out_no_validate:
    776 	return ret;
    777 }
    778 
    779 
    780 /**
    781  * vmw_resource_unbind_list
    782  *
    783  * @vbo: Pointer to the current backing MOB.
    784  *
    785  * Evicts the Guest Backed hardware resource if the backup
    786  * buffer is being moved out of MOB memory.
    787  * Note that this function will not race with the resource
    788  * validation code, since resource validation and eviction
    789  * both require the backup buffer to be reserved.
    790  */
    791 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
    792 {
    793 	struct ttm_validate_buffer val_buf = {
    794 		.bo = &vbo->base,
    795 		.num_shared = 0
    796 	};
    797 
    798 	dma_resv_assert_held(vbo->base.base.resv);
    799 	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
    800 		struct rb_node *node = vbo->res_tree.rb_node;
    801 		struct vmw_resource *res =
    802 			container_of(node, struct vmw_resource, mob_node);
    803 
    804 		if (!WARN_ON_ONCE(!res->func->unbind))
    805 			(void) res->func->unbind(res, res->res_dirty, &val_buf);
    806 
    807 		res->backup_dirty = true;
    808 		res->res_dirty = false;
    809 		vmw_resource_mob_detach(res);
    810 	}
    811 
    812 	(void) ttm_bo_wait(&vbo->base, false, false);
    813 }
    814 
    815 
    816 /**
    817  * vmw_query_readback_all - Read back cached query states
    818  *
    819  * @dx_query_mob: Buffer containing the DX query MOB
    820  *
    821  * Read back cached states from the device if they exist.  This function
    822  * assumings binding_mutex is held.
    823  */
    824 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
    825 {
    826 	struct vmw_resource *dx_query_ctx;
    827 	struct vmw_private *dev_priv;
    828 	struct {
    829 		SVGA3dCmdHeader header;
    830 		SVGA3dCmdDXReadbackAllQuery body;
    831 	} *cmd;
    832 
    833 
    834 	/* No query bound, so do nothing */
    835 	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
    836 		return 0;
    837 
    838 	dx_query_ctx = dx_query_mob->dx_query_ctx;
    839 	dev_priv     = dx_query_ctx->dev_priv;
    840 
    841 	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
    842 	if (unlikely(cmd == NULL))
    843 		return -ENOMEM;
    844 
    845 	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
    846 	cmd->header.size = sizeof(cmd->body);
    847 	cmd->body.cid    = dx_query_ctx->id;
    848 
    849 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    850 
    851 	/* Triggers a rebind the next time affected context is bound */
    852 	dx_query_mob->dx_query_ctx = NULL;
    853 
    854 	return 0;
    855 }
    856 
    857 
    858 
    859 /**
    860  * vmw_query_move_notify - Read back cached query states
    861  *
    862  * @bo: The TTM buffer object about to move.
    863  * @mem: The memory region @bo is moving to.
    864  *
    865  * Called before the query MOB is swapped out to read back cached query
    866  * states from the device.
    867  */
    868 void vmw_query_move_notify(struct ttm_buffer_object *bo,
    869 			   struct ttm_mem_reg *mem)
    870 {
    871 	struct vmw_buffer_object *dx_query_mob;
    872 	struct ttm_bo_device *bdev = bo->bdev;
    873 	struct vmw_private *dev_priv;
    874 
    875 
    876 	dev_priv = container_of(bdev, struct vmw_private, bdev);
    877 
    878 	mutex_lock(&dev_priv->binding_mutex);
    879 
    880 	dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
    881 	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
    882 		mutex_unlock(&dev_priv->binding_mutex);
    883 		return;
    884 	}
    885 
    886 	/* If BO is being moved from MOB to system memory */
    887 	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
    888 		struct vmw_fence_obj *fence;
    889 
    890 		(void) vmw_query_readback_all(dx_query_mob);
    891 		mutex_unlock(&dev_priv->binding_mutex);
    892 
    893 		/* Create a fence and attach the BO to it */
    894 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
    895 		vmw_bo_fence_single(bo, fence);
    896 
    897 		if (fence != NULL)
    898 			vmw_fence_obj_unreference(&fence);
    899 
    900 		(void) ttm_bo_wait(bo, false, false);
    901 	} else
    902 		mutex_unlock(&dev_priv->binding_mutex);
    903 
    904 }
    905 
    906 /**
    907  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
    908  *
    909  * @res:            The resource being queried.
    910  */
    911 bool vmw_resource_needs_backup(const struct vmw_resource *res)
    912 {
    913 	return res->func->needs_backup;
    914 }
    915 
    916 /**
    917  * vmw_resource_evict_type - Evict all resources of a specific type
    918  *
    919  * @dev_priv:       Pointer to a device private struct
    920  * @type:           The resource type to evict
    921  *
    922  * To avoid thrashing starvation or as part of the hibernation sequence,
    923  * try to evict all evictable resources of a specific type.
    924  */
    925 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
    926 				    enum vmw_res_type type)
    927 {
    928 	struct list_head *lru_list = &dev_priv->res_lru[type];
    929 	struct vmw_resource *evict_res;
    930 	unsigned err_count = 0;
    931 	int ret;
    932 	struct ww_acquire_ctx ticket;
    933 
    934 	do {
    935 		spin_lock(&dev_priv->resource_lock);
    936 
    937 		if (list_empty(lru_list))
    938 			goto out_unlock;
    939 
    940 		evict_res = vmw_resource_reference(
    941 			list_first_entry(lru_list, struct vmw_resource,
    942 					 lru_head));
    943 		list_del_init(&evict_res->lru_head);
    944 		spin_unlock(&dev_priv->resource_lock);
    945 
    946 		/* Wait lock backup buffers with a ticket. */
    947 		ret = vmw_resource_do_evict(&ticket, evict_res, false);
    948 		if (unlikely(ret != 0)) {
    949 			spin_lock(&dev_priv->resource_lock);
    950 			list_add_tail(&evict_res->lru_head, lru_list);
    951 			spin_unlock(&dev_priv->resource_lock);
    952 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
    953 				vmw_resource_unreference(&evict_res);
    954 				return;
    955 			}
    956 		}
    957 
    958 		vmw_resource_unreference(&evict_res);
    959 	} while (1);
    960 
    961 out_unlock:
    962 	spin_unlock(&dev_priv->resource_lock);
    963 }
    964 
    965 /**
    966  * vmw_resource_evict_all - Evict all evictable resources
    967  *
    968  * @dev_priv:       Pointer to a device private struct
    969  *
    970  * To avoid thrashing starvation or as part of the hibernation sequence,
    971  * evict all evictable resources. In particular this means that all
    972  * guest-backed resources that are registered with the device are
    973  * evicted and the OTable becomes clean.
    974  */
    975 void vmw_resource_evict_all(struct vmw_private *dev_priv)
    976 {
    977 	enum vmw_res_type type;
    978 
    979 	mutex_lock(&dev_priv->cmdbuf_mutex);
    980 
    981 	for (type = 0; type < vmw_res_max; ++type)
    982 		vmw_resource_evict_type(dev_priv, type);
    983 
    984 	mutex_unlock(&dev_priv->cmdbuf_mutex);
    985 }
    986 
    987 /**
    988  * vmw_resource_pin - Add a pin reference on a resource
    989  *
    990  * @res: The resource to add a pin reference on
    991  *
    992  * This function adds a pin reference, and if needed validates the resource.
    993  * Having a pin reference means that the resource can never be evicted, and
    994  * its id will never change as long as there is a pin reference.
    995  * This function returns 0 on success and a negative error code on failure.
    996  */
    997 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
    998 {
    999 	struct ttm_operation_ctx ctx = { interruptible, false };
   1000 	struct vmw_private *dev_priv = res->dev_priv;
   1001 	int ret;
   1002 
   1003 	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
   1004 	mutex_lock(&dev_priv->cmdbuf_mutex);
   1005 	ret = vmw_resource_reserve(res, interruptible, false);
   1006 	if (ret)
   1007 		goto out_no_reserve;
   1008 
   1009 	if (res->pin_count == 0) {
   1010 		struct vmw_buffer_object *vbo = NULL;
   1011 
   1012 		if (res->backup) {
   1013 			vbo = res->backup;
   1014 
   1015 			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
   1016 			if (!vbo->pin_count) {
   1017 				ret = ttm_bo_validate
   1018 					(&vbo->base,
   1019 					 res->func->backup_placement,
   1020 					 &ctx);
   1021 				if (ret) {
   1022 					ttm_bo_unreserve(&vbo->base);
   1023 					goto out_no_validate;
   1024 				}
   1025 			}
   1026 
   1027 			/* Do we really need to pin the MOB as well? */
   1028 			vmw_bo_pin_reserved(vbo, true);
   1029 		}
   1030 		ret = vmw_resource_validate(res, interruptible, true);
   1031 		if (vbo)
   1032 			ttm_bo_unreserve(&vbo->base);
   1033 		if (ret)
   1034 			goto out_no_validate;
   1035 	}
   1036 	res->pin_count++;
   1037 
   1038 out_no_validate:
   1039 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
   1040 out_no_reserve:
   1041 	mutex_unlock(&dev_priv->cmdbuf_mutex);
   1042 	ttm_write_unlock(&dev_priv->reservation_sem);
   1043 
   1044 	return ret;
   1045 }
   1046 
   1047 /**
   1048  * vmw_resource_unpin - Remove a pin reference from a resource
   1049  *
   1050  * @res: The resource to remove a pin reference from
   1051  *
   1052  * Having a pin reference means that the resource can never be evicted, and
   1053  * its id will never change as long as there is a pin reference.
   1054  */
   1055 void vmw_resource_unpin(struct vmw_resource *res)
   1056 {
   1057 	struct vmw_private *dev_priv = res->dev_priv;
   1058 	int ret;
   1059 
   1060 	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
   1061 	mutex_lock(&dev_priv->cmdbuf_mutex);
   1062 
   1063 	ret = vmw_resource_reserve(res, false, true);
   1064 	WARN_ON(ret);
   1065 
   1066 	WARN_ON(res->pin_count == 0);
   1067 	if (--res->pin_count == 0 && res->backup) {
   1068 		struct vmw_buffer_object *vbo = res->backup;
   1069 
   1070 		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
   1071 		vmw_bo_pin_reserved(vbo, false);
   1072 		ttm_bo_unreserve(&vbo->base);
   1073 	}
   1074 
   1075 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
   1076 
   1077 	mutex_unlock(&dev_priv->cmdbuf_mutex);
   1078 	ttm_read_unlock(&dev_priv->reservation_sem);
   1079 }
   1080 
   1081 /**
   1082  * vmw_res_type - Return the resource type
   1083  *
   1084  * @res: Pointer to the resource
   1085  */
   1086 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
   1087 {
   1088 	return res->func->res_type;
   1089 }
   1090 
   1091 /**
   1092  * vmw_resource_update_dirty - Update a resource's dirty tracker with a
   1093  * sequential range of touched backing store memory.
   1094  * @res: The resource.
   1095  * @start: The first page touched.
   1096  * @end: The last page touched + 1.
   1097  */
   1098 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
   1099 			       pgoff_t end)
   1100 {
   1101 	if (res->dirty)
   1102 		res->func->dirty_range_add(res, start << PAGE_SHIFT,
   1103 					   end << PAGE_SHIFT);
   1104 }
   1105 
   1106 /**
   1107  * vmw_resources_clean - Clean resources intersecting a mob range
   1108  * @vbo: The mob buffer object
   1109  * @start: The mob page offset starting the range
   1110  * @end: The mob page offset ending the range
   1111  * @num_prefault: Returns how many pages including the first have been
   1112  * cleaned and are ok to prefault
   1113  */
   1114 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
   1115 			pgoff_t end, pgoff_t *num_prefault)
   1116 {
   1117 	struct rb_node *cur = vbo->res_tree.rb_node;
   1118 	struct vmw_resource *found = NULL;
   1119 	unsigned long res_start = start << PAGE_SHIFT;
   1120 	unsigned long res_end = end << PAGE_SHIFT;
   1121 	unsigned long last_cleaned = 0;
   1122 
   1123 	/*
   1124 	 * Find the resource with lowest backup_offset that intersects the
   1125 	 * range.
   1126 	 */
   1127 	while (cur) {
   1128 		struct vmw_resource *cur_res =
   1129 			container_of(cur, struct vmw_resource, mob_node);
   1130 
   1131 		if (cur_res->backup_offset >= res_end) {
   1132 			cur = cur->rb_left;
   1133 		} else if (cur_res->backup_offset + cur_res->backup_size <=
   1134 			   res_start) {
   1135 			cur = cur->rb_right;
   1136 		} else {
   1137 			found = cur_res;
   1138 			cur = cur->rb_left;
   1139 			/* Continue to look for resources with lower offsets */
   1140 		}
   1141 	}
   1142 
   1143 	/*
   1144 	 * In order of increasing backup_offset, clean dirty resorces
   1145 	 * intersecting the range.
   1146 	 */
   1147 	while (found) {
   1148 		if (found->res_dirty) {
   1149 			int ret;
   1150 
   1151 			if (!found->func->clean)
   1152 				return -EINVAL;
   1153 
   1154 			ret = found->func->clean(found);
   1155 			if (ret)
   1156 				return ret;
   1157 
   1158 			found->res_dirty = false;
   1159 		}
   1160 		last_cleaned = found->backup_offset + found->backup_size;
   1161 		cur = rb_next(&found->mob_node);
   1162 		if (!cur)
   1163 			break;
   1164 
   1165 		found = container_of(cur, struct vmw_resource, mob_node);
   1166 		if (found->backup_offset >= res_end)
   1167 			break;
   1168 	}
   1169 
   1170 	/*
   1171 	 * Set number of pages allowed prefaulting and fence the buffer object
   1172 	 */
   1173 	*num_prefault = 1;
   1174 	if (last_cleaned > res_start) {
   1175 		struct ttm_buffer_object *bo = &vbo->base;
   1176 
   1177 		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
   1178 						      PAGE_SIZE);
   1179 		vmw_bo_fence_single(bo, NULL);
   1180 		if (bo->moving)
   1181 			dma_fence_put(bo->moving);
   1182 		bo->moving = dma_fence_get
   1183 			(dma_resv_get_excl(bo->base.resv));
   1184 	}
   1185 
   1186 	return 0;
   1187 }
   1188