Home | History | Annotate | Line # | Download | only in vmwgfx
      1  1.3  riastrad /*	$NetBSD: vmwgfx_bo.c,v 1.3 2022/10/25 23:39:13 riastradh Exp $	*/
      2  1.1  riastrad 
      3  1.1  riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT
      4  1.1  riastrad /**************************************************************************
      5  1.1  riastrad  *
      6  1.1  riastrad  * Copyright  2011-2018 VMware, Inc., Palo Alto, CA., USA
      7  1.1  riastrad  * All Rights Reserved.
      8  1.1  riastrad  *
      9  1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
     10  1.1  riastrad  * copy of this software and associated documentation files (the
     11  1.1  riastrad  * "Software"), to deal in the Software without restriction, including
     12  1.1  riastrad  * without limitation the rights to use, copy, modify, merge, publish,
     13  1.1  riastrad  * distribute, sub license, and/or sell copies of the Software, and to
     14  1.1  riastrad  * permit persons to whom the Software is furnished to do so, subject to
     15  1.1  riastrad  * the following conditions:
     16  1.1  riastrad  *
     17  1.1  riastrad  * The above copyright notice and this permission notice (including the
     18  1.1  riastrad  * next paragraph) shall be included in all copies or substantial portions
     19  1.1  riastrad  * of the Software.
     20  1.1  riastrad  *
     21  1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22  1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23  1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24  1.1  riastrad  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25  1.1  riastrad  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26  1.1  riastrad  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27  1.1  riastrad  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  1.1  riastrad  *
     29  1.1  riastrad  **************************************************************************/
     30  1.1  riastrad 
     31  1.1  riastrad #include <sys/cdefs.h>
     32  1.3  riastrad __KERNEL_RCSID(0, "$NetBSD: vmwgfx_bo.c,v 1.3 2022/10/25 23:39:13 riastradh Exp $");
     33  1.1  riastrad 
     34  1.1  riastrad #include <drm/ttm/ttm_placement.h>
     35  1.1  riastrad 
     36  1.1  riastrad #include "vmwgfx_drv.h"
     37  1.1  riastrad #include "ttm_object.h"
     38  1.1  riastrad 
     39  1.1  riastrad 
     40  1.1  riastrad /**
     41  1.1  riastrad  * struct vmw_user_buffer_object - User-space-visible buffer object
     42  1.1  riastrad  *
     43  1.1  riastrad  * @prime: The prime object providing user visibility.
     44  1.1  riastrad  * @vbo: The struct vmw_buffer_object
     45  1.1  riastrad  */
     46  1.1  riastrad struct vmw_user_buffer_object {
     47  1.1  riastrad 	struct ttm_prime_object prime;
     48  1.1  riastrad 	struct vmw_buffer_object vbo;
     49  1.1  riastrad };
     50  1.1  riastrad 
     51  1.1  riastrad 
     52  1.1  riastrad /**
     53  1.1  riastrad  * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
     54  1.1  riastrad  * vmw_buffer_object.
     55  1.1  riastrad  *
     56  1.1  riastrad  * @bo: Pointer to the TTM buffer object.
     57  1.1  riastrad  * Return: Pointer to the struct vmw_buffer_object embedding the
     58  1.1  riastrad  * TTM buffer object.
     59  1.1  riastrad  */
     60  1.1  riastrad static struct vmw_buffer_object *
     61  1.1  riastrad vmw_buffer_object(struct ttm_buffer_object *bo)
     62  1.1  riastrad {
     63  1.1  riastrad 	return container_of(bo, struct vmw_buffer_object, base);
     64  1.1  riastrad }
     65  1.1  riastrad 
     66  1.1  riastrad 
     67  1.1  riastrad /**
     68  1.1  riastrad  * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
     69  1.1  riastrad  * vmw_user_buffer_object.
     70  1.1  riastrad  *
     71  1.1  riastrad  * @bo: Pointer to the TTM buffer object.
     72  1.1  riastrad  * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
     73  1.1  riastrad  * object.
     74  1.1  riastrad  */
     75  1.1  riastrad static struct vmw_user_buffer_object *
     76  1.1  riastrad vmw_user_buffer_object(struct ttm_buffer_object *bo)
     77  1.1  riastrad {
     78  1.1  riastrad 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
     79  1.1  riastrad 
     80  1.1  riastrad 	return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
     81  1.1  riastrad }
     82  1.1  riastrad 
     83  1.1  riastrad 
     84  1.1  riastrad /**
     85  1.1  riastrad  * vmw_bo_pin_in_placement - Validate a buffer to placement.
     86  1.1  riastrad  *
     87  1.1  riastrad  * @dev_priv:  Driver private.
     88  1.1  riastrad  * @buf:  DMA buffer to move.
     89  1.1  riastrad  * @placement:  The placement to pin it.
     90  1.1  riastrad  * @interruptible:  Use interruptible wait.
     91  1.1  riastrad  * Return: Zero on success, Negative error code on failure. In particular
     92  1.1  riastrad  * -ERESTARTSYS if interrupted by a signal
     93  1.1  riastrad  */
     94  1.1  riastrad int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
     95  1.1  riastrad 			    struct vmw_buffer_object *buf,
     96  1.1  riastrad 			    struct ttm_placement *placement,
     97  1.1  riastrad 			    bool interruptible)
     98  1.1  riastrad {
     99  1.1  riastrad 	struct ttm_operation_ctx ctx = {interruptible, false };
    100  1.1  riastrad 	struct ttm_buffer_object *bo = &buf->base;
    101  1.1  riastrad 	int ret;
    102  1.1  riastrad 	uint32_t new_flags;
    103  1.1  riastrad 
    104  1.1  riastrad 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
    105  1.1  riastrad 	if (unlikely(ret != 0))
    106  1.1  riastrad 		return ret;
    107  1.1  riastrad 
    108  1.1  riastrad 	vmw_execbuf_release_pinned_bo(dev_priv);
    109  1.1  riastrad 
    110  1.1  riastrad 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
    111  1.1  riastrad 	if (unlikely(ret != 0))
    112  1.1  riastrad 		goto err;
    113  1.1  riastrad 
    114  1.1  riastrad 	if (buf->pin_count > 0)
    115  1.1  riastrad 		ret = ttm_bo_mem_compat(placement, &bo->mem,
    116  1.1  riastrad 					&new_flags) == true ? 0 : -EINVAL;
    117  1.1  riastrad 	else
    118  1.1  riastrad 		ret = ttm_bo_validate(bo, placement, &ctx);
    119  1.1  riastrad 
    120  1.1  riastrad 	if (!ret)
    121  1.1  riastrad 		vmw_bo_pin_reserved(buf, true);
    122  1.1  riastrad 
    123  1.1  riastrad 	ttm_bo_unreserve(bo);
    124  1.1  riastrad 
    125  1.1  riastrad err:
    126  1.1  riastrad 	ttm_write_unlock(&dev_priv->reservation_sem);
    127  1.1  riastrad 	return ret;
    128  1.1  riastrad }
    129  1.1  riastrad 
    130  1.1  riastrad 
    131  1.1  riastrad /**
    132  1.1  riastrad  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
    133  1.1  riastrad  *
    134  1.1  riastrad  * This function takes the reservation_sem in write mode.
    135  1.1  riastrad  * Flushes and unpins the query bo to avoid failures.
    136  1.1  riastrad  *
    137  1.1  riastrad  * @dev_priv:  Driver private.
    138  1.1  riastrad  * @buf:  DMA buffer to move.
    139  1.1  riastrad  * @pin:  Pin buffer if true.
    140  1.1  riastrad  * @interruptible:  Use interruptible wait.
    141  1.1  riastrad  * Return: Zero on success, Negative error code on failure. In particular
    142  1.1  riastrad  * -ERESTARTSYS if interrupted by a signal
    143  1.1  riastrad  */
    144  1.1  riastrad int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
    145  1.1  riastrad 			      struct vmw_buffer_object *buf,
    146  1.1  riastrad 			      bool interruptible)
    147  1.1  riastrad {
    148  1.1  riastrad 	struct ttm_operation_ctx ctx = {interruptible, false };
    149  1.1  riastrad 	struct ttm_buffer_object *bo = &buf->base;
    150  1.1  riastrad 	int ret;
    151  1.1  riastrad 	uint32_t new_flags;
    152  1.1  riastrad 
    153  1.1  riastrad 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
    154  1.1  riastrad 	if (unlikely(ret != 0))
    155  1.1  riastrad 		return ret;
    156  1.1  riastrad 
    157  1.1  riastrad 	vmw_execbuf_release_pinned_bo(dev_priv);
    158  1.1  riastrad 
    159  1.1  riastrad 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
    160  1.1  riastrad 	if (unlikely(ret != 0))
    161  1.1  riastrad 		goto err;
    162  1.1  riastrad 
    163  1.1  riastrad 	if (buf->pin_count > 0) {
    164  1.1  riastrad 		ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
    165  1.1  riastrad 					&new_flags) == true ? 0 : -EINVAL;
    166  1.1  riastrad 		goto out_unreserve;
    167  1.1  riastrad 	}
    168  1.1  riastrad 
    169  1.1  riastrad 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
    170  1.1  riastrad 	if (likely(ret == 0) || ret == -ERESTARTSYS)
    171  1.1  riastrad 		goto out_unreserve;
    172  1.1  riastrad 
    173  1.1  riastrad 	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
    174  1.1  riastrad 
    175  1.1  riastrad out_unreserve:
    176  1.1  riastrad 	if (!ret)
    177  1.1  riastrad 		vmw_bo_pin_reserved(buf, true);
    178  1.1  riastrad 
    179  1.1  riastrad 	ttm_bo_unreserve(bo);
    180  1.1  riastrad err:
    181  1.1  riastrad 	ttm_write_unlock(&dev_priv->reservation_sem);
    182  1.1  riastrad 	return ret;
    183  1.1  riastrad }
    184  1.1  riastrad 
    185  1.1  riastrad 
    186  1.1  riastrad /**
    187  1.1  riastrad  * vmw_bo_pin_in_vram - Move a buffer to vram.
    188  1.1  riastrad  *
    189  1.1  riastrad  * This function takes the reservation_sem in write mode.
    190  1.1  riastrad  * Flushes and unpins the query bo to avoid failures.
    191  1.1  riastrad  *
    192  1.1  riastrad  * @dev_priv:  Driver private.
    193  1.1  riastrad  * @buf:  DMA buffer to move.
    194  1.1  riastrad  * @interruptible:  Use interruptible wait.
    195  1.1  riastrad  * Return: Zero on success, Negative error code on failure. In particular
    196  1.1  riastrad  * -ERESTARTSYS if interrupted by a signal
    197  1.1  riastrad  */
    198  1.1  riastrad int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
    199  1.1  riastrad 		       struct vmw_buffer_object *buf,
    200  1.1  riastrad 		       bool interruptible)
    201  1.1  riastrad {
    202  1.1  riastrad 	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
    203  1.1  riastrad 				       interruptible);
    204  1.1  riastrad }
    205  1.1  riastrad 
    206  1.1  riastrad 
    207  1.1  riastrad /**
    208  1.1  riastrad  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
    209  1.1  riastrad  *
    210  1.1  riastrad  * This function takes the reservation_sem in write mode.
    211  1.1  riastrad  * Flushes and unpins the query bo to avoid failures.
    212  1.1  riastrad  *
    213  1.1  riastrad  * @dev_priv:  Driver private.
    214  1.1  riastrad  * @buf:  DMA buffer to pin.
    215  1.1  riastrad  * @interruptible:  Use interruptible wait.
    216  1.1  riastrad  * Return: Zero on success, Negative error code on failure. In particular
    217  1.1  riastrad  * -ERESTARTSYS if interrupted by a signal
    218  1.1  riastrad  */
    219  1.1  riastrad int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
    220  1.1  riastrad 				struct vmw_buffer_object *buf,
    221  1.1  riastrad 				bool interruptible)
    222  1.1  riastrad {
    223  1.1  riastrad 	struct ttm_operation_ctx ctx = {interruptible, false };
    224  1.1  riastrad 	struct ttm_buffer_object *bo = &buf->base;
    225  1.1  riastrad 	struct ttm_placement placement;
    226  1.1  riastrad 	struct ttm_place place;
    227  1.1  riastrad 	int ret = 0;
    228  1.1  riastrad 	uint32_t new_flags;
    229  1.1  riastrad 
    230  1.1  riastrad 	place = vmw_vram_placement.placement[0];
    231  1.1  riastrad 	place.lpfn = bo->num_pages;
    232  1.1  riastrad 	placement.num_placement = 1;
    233  1.1  riastrad 	placement.placement = &place;
    234  1.1  riastrad 	placement.num_busy_placement = 1;
    235  1.1  riastrad 	placement.busy_placement = &place;
    236  1.1  riastrad 
    237  1.1  riastrad 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
    238  1.1  riastrad 	if (unlikely(ret != 0))
    239  1.1  riastrad 		return ret;
    240  1.1  riastrad 
    241  1.1  riastrad 	vmw_execbuf_release_pinned_bo(dev_priv);
    242  1.1  riastrad 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
    243  1.1  riastrad 	if (unlikely(ret != 0))
    244  1.1  riastrad 		goto err_unlock;
    245  1.1  riastrad 
    246  1.1  riastrad 	/*
    247  1.1  riastrad 	 * Is this buffer already in vram but not at the start of it?
    248  1.1  riastrad 	 * In that case, evict it first because TTM isn't good at handling
    249  1.1  riastrad 	 * that situation.
    250  1.1  riastrad 	 */
    251  1.1  riastrad 	if (bo->mem.mem_type == TTM_PL_VRAM &&
    252  1.1  riastrad 	    bo->mem.start < bo->num_pages &&
    253  1.1  riastrad 	    bo->mem.start > 0 &&
    254  1.1  riastrad 	    buf->pin_count == 0) {
    255  1.1  riastrad 		ctx.interruptible = false;
    256  1.1  riastrad 		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
    257  1.1  riastrad 	}
    258  1.1  riastrad 
    259  1.1  riastrad 	if (buf->pin_count > 0)
    260  1.1  riastrad 		ret = ttm_bo_mem_compat(&placement, &bo->mem,
    261  1.1  riastrad 					&new_flags) == true ? 0 : -EINVAL;
    262  1.1  riastrad 	else
    263  1.1  riastrad 		ret = ttm_bo_validate(bo, &placement, &ctx);
    264  1.1  riastrad 
    265  1.1  riastrad 	/* For some reason we didn't end up at the start of vram */
    266  1.1  riastrad 	WARN_ON(ret == 0 && bo->offset != 0);
    267  1.1  riastrad 	if (!ret)
    268  1.1  riastrad 		vmw_bo_pin_reserved(buf, true);
    269  1.1  riastrad 
    270  1.1  riastrad 	ttm_bo_unreserve(bo);
    271  1.1  riastrad err_unlock:
    272  1.1  riastrad 	ttm_write_unlock(&dev_priv->reservation_sem);
    273  1.1  riastrad 
    274  1.1  riastrad 	return ret;
    275  1.1  riastrad }
    276  1.1  riastrad 
    277  1.1  riastrad 
    278  1.1  riastrad /**
    279  1.1  riastrad  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
    280  1.1  riastrad  *
    281  1.1  riastrad  * This function takes the reservation_sem in write mode.
    282  1.1  riastrad  *
    283  1.1  riastrad  * @dev_priv:  Driver private.
    284  1.1  riastrad  * @buf:  DMA buffer to unpin.
    285  1.1  riastrad  * @interruptible:  Use interruptible wait.
    286  1.1  riastrad  * Return: Zero on success, Negative error code on failure. In particular
    287  1.1  riastrad  * -ERESTARTSYS if interrupted by a signal
    288  1.1  riastrad  */
    289  1.1  riastrad int vmw_bo_unpin(struct vmw_private *dev_priv,
    290  1.1  riastrad 		 struct vmw_buffer_object *buf,
    291  1.1  riastrad 		 bool interruptible)
    292  1.1  riastrad {
    293  1.1  riastrad 	struct ttm_buffer_object *bo = &buf->base;
    294  1.1  riastrad 	int ret;
    295  1.1  riastrad 
    296  1.1  riastrad 	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
    297  1.1  riastrad 	if (unlikely(ret != 0))
    298  1.1  riastrad 		return ret;
    299  1.1  riastrad 
    300  1.1  riastrad 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
    301  1.1  riastrad 	if (unlikely(ret != 0))
    302  1.1  riastrad 		goto err;
    303  1.1  riastrad 
    304  1.1  riastrad 	vmw_bo_pin_reserved(buf, false);
    305  1.1  riastrad 
    306  1.1  riastrad 	ttm_bo_unreserve(bo);
    307  1.1  riastrad 
    308  1.1  riastrad err:
    309  1.1  riastrad 	ttm_read_unlock(&dev_priv->reservation_sem);
    310  1.1  riastrad 	return ret;
    311  1.1  riastrad }
    312  1.1  riastrad 
    313  1.1  riastrad /**
    314  1.1  riastrad  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
    315  1.1  riastrad  * of a buffer.
    316  1.1  riastrad  *
    317  1.1  riastrad  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
    318  1.1  riastrad  * @ptr: SVGAGuestPtr returning the result.
    319  1.1  riastrad  */
    320  1.1  riastrad void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
    321  1.1  riastrad 			  SVGAGuestPtr *ptr)
    322  1.1  riastrad {
    323  1.1  riastrad 	if (bo->mem.mem_type == TTM_PL_VRAM) {
    324  1.1  riastrad 		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
    325  1.1  riastrad 		ptr->offset = bo->offset;
    326  1.1  riastrad 	} else {
    327  1.1  riastrad 		ptr->gmrId = bo->mem.start;
    328  1.1  riastrad 		ptr->offset = 0;
    329  1.1  riastrad 	}
    330  1.1  riastrad }
    331  1.1  riastrad 
    332  1.1  riastrad 
    333  1.1  riastrad /**
    334  1.1  riastrad  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
    335  1.1  riastrad  *
    336  1.1  riastrad  * @vbo: The buffer object. Must be reserved.
    337  1.1  riastrad  * @pin: Whether to pin or unpin.
    338  1.1  riastrad  *
    339  1.1  riastrad  */
    340  1.1  riastrad void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
    341  1.1  riastrad {
    342  1.1  riastrad 	struct ttm_operation_ctx ctx = { false, true };
    343  1.1  riastrad 	struct ttm_place pl;
    344  1.1  riastrad 	struct ttm_placement placement;
    345  1.1  riastrad 	struct ttm_buffer_object *bo = &vbo->base;
    346  1.1  riastrad 	uint32_t old_mem_type = bo->mem.mem_type;
    347  1.1  riastrad 	int ret;
    348  1.1  riastrad 
    349  1.1  riastrad 	dma_resv_assert_held(bo->base.resv);
    350  1.1  riastrad 
    351  1.1  riastrad 	if (pin) {
    352  1.1  riastrad 		if (vbo->pin_count++ > 0)
    353  1.1  riastrad 			return;
    354  1.1  riastrad 	} else {
    355  1.1  riastrad 		WARN_ON(vbo->pin_count <= 0);
    356  1.1  riastrad 		if (--vbo->pin_count > 0)
    357  1.1  riastrad 			return;
    358  1.1  riastrad 	}
    359  1.1  riastrad 
    360  1.1  riastrad 	pl.fpfn = 0;
    361  1.1  riastrad 	pl.lpfn = 0;
    362  1.1  riastrad 	pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
    363  1.1  riastrad 		| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
    364  1.1  riastrad 	if (pin)
    365  1.1  riastrad 		pl.flags |= TTM_PL_FLAG_NO_EVICT;
    366  1.1  riastrad 
    367  1.1  riastrad 	memset(&placement, 0, sizeof(placement));
    368  1.1  riastrad 	placement.num_placement = 1;
    369  1.1  riastrad 	placement.placement = &pl;
    370  1.1  riastrad 
    371  1.1  riastrad 	ret = ttm_bo_validate(bo, &placement, &ctx);
    372  1.1  riastrad 
    373  1.1  riastrad 	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
    374  1.1  riastrad }
    375  1.1  riastrad 
    376  1.1  riastrad 
    377  1.1  riastrad /**
    378  1.1  riastrad  * vmw_bo_map_and_cache - Map a buffer object and cache the map
    379  1.1  riastrad  *
    380  1.1  riastrad  * @vbo: The buffer object to map
    381  1.1  riastrad  * Return: A kernel virtual address or NULL if mapping failed.
    382  1.1  riastrad  *
    383  1.1  riastrad  * This function maps a buffer object into the kernel address space, or
    384  1.1  riastrad  * returns the virtual kernel address of an already existing map. The virtual
    385  1.1  riastrad  * address remains valid as long as the buffer object is pinned or reserved.
    386  1.1  riastrad  * The cached map is torn down on either
    387  1.1  riastrad  * 1) Buffer object move
    388  1.1  riastrad  * 2) Buffer object swapout
    389  1.1  riastrad  * 3) Buffer object destruction
    390  1.1  riastrad  *
    391  1.1  riastrad  */
    392  1.1  riastrad void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
    393  1.1  riastrad {
    394  1.1  riastrad 	struct ttm_buffer_object *bo = &vbo->base;
    395  1.1  riastrad 	bool not_used;
    396  1.1  riastrad 	void *virtual;
    397  1.1  riastrad 	int ret;
    398  1.1  riastrad 
    399  1.1  riastrad 	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
    400  1.1  riastrad 	if (virtual)
    401  1.1  riastrad 		return virtual;
    402  1.1  riastrad 
    403  1.1  riastrad 	ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
    404  1.1  riastrad 	if (ret)
    405  1.1  riastrad 		DRM_ERROR("Buffer object map failed: %d.\n", ret);
    406  1.1  riastrad 
    407  1.1  riastrad 	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
    408  1.1  riastrad }
    409  1.1  riastrad 
    410  1.1  riastrad 
    411  1.1  riastrad /**
    412  1.1  riastrad  * vmw_bo_unmap - Tear down a cached buffer object map.
    413  1.1  riastrad  *
    414  1.1  riastrad  * @vbo: The buffer object whose map we are tearing down.
    415  1.1  riastrad  *
    416  1.1  riastrad  * This function tears down a cached map set up using
    417  1.1  riastrad  * vmw_buffer_object_map_and_cache().
    418  1.1  riastrad  */
    419  1.1  riastrad void vmw_bo_unmap(struct vmw_buffer_object *vbo)
    420  1.1  riastrad {
    421  1.1  riastrad 	if (vbo->map.bo == NULL)
    422  1.1  riastrad 		return;
    423  1.1  riastrad 
    424  1.1  riastrad 	ttm_bo_kunmap(&vbo->map);
    425  1.1  riastrad }
    426  1.1  riastrad 
    427  1.1  riastrad 
    428  1.1  riastrad /**
    429  1.1  riastrad  * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
    430  1.1  riastrad  *
    431  1.1  riastrad  * @dev_priv: Pointer to a struct vmw_private identifying the device.
    432  1.1  riastrad  * @size: The requested buffer size.
    433  1.1  riastrad  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
    434  1.1  riastrad  */
    435  1.1  riastrad static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
    436  1.1  riastrad 			      bool user)
    437  1.1  riastrad {
    438  1.1  riastrad 	static size_t struct_size, user_struct_size;
    439  1.1  riastrad 	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
    440  1.1  riastrad 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
    441  1.1  riastrad 
    442  1.1  riastrad 	if (unlikely(struct_size == 0)) {
    443  1.1  riastrad 		size_t backend_size = ttm_round_pot(vmw_tt_size);
    444  1.1  riastrad 
    445  1.1  riastrad 		struct_size = backend_size +
    446  1.1  riastrad 			ttm_round_pot(sizeof(struct vmw_buffer_object));
    447  1.1  riastrad 		user_struct_size = backend_size +
    448  1.1  riastrad 		  ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
    449  1.1  riastrad 				      TTM_OBJ_EXTRA_SIZE;
    450  1.1  riastrad 	}
    451  1.1  riastrad 
    452  1.1  riastrad 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
    453  1.1  riastrad 		page_array_size +=
    454  1.1  riastrad 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
    455  1.1  riastrad 
    456  1.1  riastrad 	return ((user) ? user_struct_size : struct_size) +
    457  1.1  riastrad 		page_array_size;
    458  1.1  riastrad }
    459  1.1  riastrad 
    460  1.1  riastrad 
    461  1.1  riastrad /**
    462  1.1  riastrad  * vmw_bo_bo_free - vmw buffer object destructor
    463  1.1  riastrad  *
    464  1.1  riastrad  * @bo: Pointer to the embedded struct ttm_buffer_object
    465  1.1  riastrad  */
    466  1.1  riastrad void vmw_bo_bo_free(struct ttm_buffer_object *bo)
    467  1.1  riastrad {
    468  1.1  riastrad 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
    469  1.1  riastrad 
    470  1.1  riastrad 	WARN_ON(vmw_bo->dirty);
    471  1.1  riastrad 	WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
    472  1.1  riastrad 	vmw_bo_unmap(vmw_bo);
    473  1.1  riastrad 	kfree(vmw_bo);
    474  1.1  riastrad }
    475  1.1  riastrad 
    476  1.1  riastrad 
    477  1.1  riastrad /**
    478  1.1  riastrad  * vmw_user_bo_destroy - vmw buffer object destructor
    479  1.1  riastrad  *
    480  1.1  riastrad  * @bo: Pointer to the embedded struct ttm_buffer_object
    481  1.1  riastrad  */
    482  1.1  riastrad static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
    483  1.1  riastrad {
    484  1.1  riastrad 	struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
    485  1.1  riastrad 	struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
    486  1.1  riastrad 
    487  1.1  riastrad 	WARN_ON(vbo->dirty);
    488  1.1  riastrad 	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
    489  1.1  riastrad 	vmw_bo_unmap(vbo);
    490  1.1  riastrad 	ttm_prime_object_kfree(vmw_user_bo, prime);
    491  1.1  riastrad }
    492  1.1  riastrad 
    493  1.3  riastrad #ifdef __NetBSD__
    494  1.3  riastrad extern rb_tree_ops_t vmwgfx_res_rb_ops;
    495  1.3  riastrad #endif
    496  1.1  riastrad 
    497  1.1  riastrad /**
    498  1.1  riastrad  * vmw_bo_init - Initialize a vmw buffer object
    499  1.1  riastrad  *
    500  1.1  riastrad  * @dev_priv: Pointer to the device private struct
    501  1.1  riastrad  * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
    502  1.1  riastrad  * @size: Buffer object size in bytes.
    503  1.1  riastrad  * @placement: Initial placement.
    504  1.1  riastrad  * @interruptible: Whether waits should be performed interruptible.
    505  1.1  riastrad  * @bo_free: The buffer object destructor.
    506  1.1  riastrad  * Returns: Zero on success, negative error code on error.
    507  1.1  riastrad  *
    508  1.1  riastrad  * Note that on error, the code will free the buffer object.
    509  1.1  riastrad  */
    510  1.1  riastrad int vmw_bo_init(struct vmw_private *dev_priv,
    511  1.1  riastrad 		struct vmw_buffer_object *vmw_bo,
    512  1.1  riastrad 		size_t size, struct ttm_placement *placement,
    513  1.1  riastrad 		bool interruptible,
    514  1.1  riastrad 		void (*bo_free)(struct ttm_buffer_object *bo))
    515  1.1  riastrad {
    516  1.1  riastrad 	struct ttm_bo_device *bdev = &dev_priv->bdev;
    517  1.1  riastrad 	size_t acc_size;
    518  1.1  riastrad 	int ret;
    519  1.1  riastrad 	bool user = (bo_free == &vmw_user_bo_destroy);
    520  1.1  riastrad 
    521  1.1  riastrad 	WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
    522  1.1  riastrad 
    523  1.1  riastrad 	acc_size = vmw_bo_acc_size(dev_priv, size, user);
    524  1.1  riastrad 	memset(vmw_bo, 0, sizeof(*vmw_bo));
    525  1.1  riastrad 	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
    526  1.1  riastrad 	vmw_bo->base.priority = 3;
    527  1.3  riastrad #ifdef __NetBSD__
    528  1.3  riastrad 	rb_tree_init(&vmw_bo->res_tree.rbr_tree, &vmwgfx_res_rb_ops);
    529  1.3  riastrad #else
    530  1.1  riastrad 	vmw_bo->res_tree = RB_ROOT;
    531  1.3  riastrad #endif
    532  1.1  riastrad 
    533  1.1  riastrad 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
    534  1.1  riastrad 			  ttm_bo_type_device, placement,
    535  1.1  riastrad 			  0, interruptible, acc_size,
    536  1.1  riastrad 			  NULL, NULL, bo_free);
    537  1.1  riastrad 	return ret;
    538  1.1  riastrad }
    539  1.1  riastrad 
    540  1.1  riastrad 
    541  1.1  riastrad /**
    542  1.1  riastrad  * vmw_user_bo_release - TTM reference base object release callback for
    543  1.1  riastrad  * vmw user buffer objects
    544  1.1  riastrad  *
    545  1.1  riastrad  * @p_base: The TTM base object pointer about to be unreferenced.
    546  1.1  riastrad  *
    547  1.1  riastrad  * Clears the TTM base object pointer and drops the reference the
    548  1.1  riastrad  * base object has on the underlying struct vmw_buffer_object.
    549  1.1  riastrad  */
    550  1.1  riastrad static void vmw_user_bo_release(struct ttm_base_object **p_base)
    551  1.1  riastrad {
    552  1.1  riastrad 	struct vmw_user_buffer_object *vmw_user_bo;
    553  1.1  riastrad 	struct ttm_base_object *base = *p_base;
    554  1.1  riastrad 
    555  1.1  riastrad 	*p_base = NULL;
    556  1.1  riastrad 
    557  1.1  riastrad 	if (unlikely(base == NULL))
    558  1.1  riastrad 		return;
    559  1.1  riastrad 
    560  1.1  riastrad 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
    561  1.1  riastrad 				   prime.base);
    562  1.1  riastrad 	ttm_bo_put(&vmw_user_bo->vbo.base);
    563  1.1  riastrad }
    564  1.1  riastrad 
    565  1.1  riastrad 
    566  1.1  riastrad /**
    567  1.1  riastrad  * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
    568  1.1  riastrad  * for vmw user buffer objects
    569  1.1  riastrad  *
    570  1.1  riastrad  * @base: Pointer to the TTM base object
    571  1.1  riastrad  * @ref_type: Reference type of the reference reaching zero.
    572  1.1  riastrad  *
    573  1.1  riastrad  * Called when user-space drops its last synccpu reference on the buffer
    574  1.1  riastrad  * object, Either explicitly or as part of a cleanup file close.
    575  1.1  riastrad  */
    576  1.1  riastrad static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
    577  1.1  riastrad 					enum ttm_ref_type ref_type)
    578  1.1  riastrad {
    579  1.1  riastrad 	struct vmw_user_buffer_object *user_bo;
    580  1.1  riastrad 
    581  1.1  riastrad 	user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
    582  1.1  riastrad 
    583  1.1  riastrad 	switch (ref_type) {
    584  1.1  riastrad 	case TTM_REF_SYNCCPU_WRITE:
    585  1.1  riastrad 		atomic_dec(&user_bo->vbo.cpu_writers);
    586  1.1  riastrad 		break;
    587  1.1  riastrad 	default:
    588  1.1  riastrad 		WARN_ONCE(true, "Undefined buffer object reference release.\n");
    589  1.1  riastrad 	}
    590  1.1  riastrad }
    591  1.1  riastrad 
    592  1.1  riastrad 
    593  1.1  riastrad /**
    594  1.1  riastrad  * vmw_user_bo_alloc - Allocate a user buffer object
    595  1.1  riastrad  *
    596  1.1  riastrad  * @dev_priv: Pointer to a struct device private.
    597  1.1  riastrad  * @tfile: Pointer to a struct ttm_object_file on which to register the user
    598  1.1  riastrad  * object.
    599  1.1  riastrad  * @size: Size of the buffer object.
    600  1.1  riastrad  * @shareable: Boolean whether the buffer is shareable with other open files.
    601  1.1  riastrad  * @handle: Pointer to where the handle value should be assigned.
    602  1.1  riastrad  * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
    603  1.1  riastrad  * should be assigned.
    604  1.1  riastrad  * Return: Zero on success, negative error code on error.
    605  1.1  riastrad  */
    606  1.1  riastrad int vmw_user_bo_alloc(struct vmw_private *dev_priv,
    607  1.1  riastrad 		      struct ttm_object_file *tfile,
    608  1.1  riastrad 		      uint32_t size,
    609  1.1  riastrad 		      bool shareable,
    610  1.1  riastrad 		      uint32_t *handle,
    611  1.1  riastrad 		      struct vmw_buffer_object **p_vbo,
    612  1.1  riastrad 		      struct ttm_base_object **p_base)
    613  1.1  riastrad {
    614  1.1  riastrad 	struct vmw_user_buffer_object *user_bo;
    615  1.1  riastrad 	int ret;
    616  1.1  riastrad 
    617  1.1  riastrad 	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
    618  1.1  riastrad 	if (unlikely(!user_bo)) {
    619  1.1  riastrad 		DRM_ERROR("Failed to allocate a buffer.\n");
    620  1.1  riastrad 		return -ENOMEM;
    621  1.1  riastrad 	}
    622  1.1  riastrad 
    623  1.1  riastrad 	ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
    624  1.1  riastrad 			  (dev_priv->has_mob) ?
    625  1.1  riastrad 			  &vmw_sys_placement :
    626  1.1  riastrad 			  &vmw_vram_sys_placement, true,
    627  1.1  riastrad 			  &vmw_user_bo_destroy);
    628  1.1  riastrad 	if (unlikely(ret != 0))
    629  1.1  riastrad 		return ret;
    630  1.1  riastrad 
    631  1.1  riastrad 	ttm_bo_get(&user_bo->vbo.base);
    632  1.1  riastrad 	ret = ttm_prime_object_init(tfile,
    633  1.1  riastrad 				    size,
    634  1.1  riastrad 				    &user_bo->prime,
    635  1.1  riastrad 				    shareable,
    636  1.1  riastrad 				    ttm_buffer_type,
    637  1.1  riastrad 				    &vmw_user_bo_release,
    638  1.1  riastrad 				    &vmw_user_bo_ref_obj_release);
    639  1.1  riastrad 	if (unlikely(ret != 0)) {
    640  1.1  riastrad 		ttm_bo_put(&user_bo->vbo.base);
    641  1.1  riastrad 		goto out_no_base_object;
    642  1.1  riastrad 	}
    643  1.1  riastrad 
    644  1.1  riastrad 	*p_vbo = &user_bo->vbo;
    645  1.1  riastrad 	if (p_base) {
    646  1.1  riastrad 		*p_base = &user_bo->prime.base;
    647  1.1  riastrad 		kref_get(&(*p_base)->refcount);
    648  1.1  riastrad 	}
    649  1.1  riastrad 	*handle = user_bo->prime.base.handle;
    650  1.1  riastrad 
    651  1.1  riastrad out_no_base_object:
    652  1.1  riastrad 	return ret;
    653  1.1  riastrad }
    654  1.1  riastrad 
    655  1.1  riastrad 
    656  1.1  riastrad /**
    657  1.1  riastrad  * vmw_user_bo_verify_access - verify access permissions on this
    658  1.1  riastrad  * buffer object.
    659  1.1  riastrad  *
    660  1.1  riastrad  * @bo: Pointer to the buffer object being accessed
    661  1.1  riastrad  * @tfile: Identifying the caller.
    662  1.1  riastrad  */
    663  1.1  riastrad int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
    664  1.1  riastrad 			      struct ttm_object_file *tfile)
    665  1.1  riastrad {
    666  1.1  riastrad 	struct vmw_user_buffer_object *vmw_user_bo;
    667  1.1  riastrad 
    668  1.1  riastrad 	if (unlikely(bo->destroy != vmw_user_bo_destroy))
    669  1.1  riastrad 		return -EPERM;
    670  1.1  riastrad 
    671  1.1  riastrad 	vmw_user_bo = vmw_user_buffer_object(bo);
    672  1.1  riastrad 
    673  1.1  riastrad 	/* Check that the caller has opened the object. */
    674  1.1  riastrad 	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
    675  1.1  riastrad 		return 0;
    676  1.1  riastrad 
    677  1.1  riastrad 	DRM_ERROR("Could not grant buffer access.\n");
    678  1.1  riastrad 	return -EPERM;
    679  1.1  riastrad }
    680  1.1  riastrad 
    681  1.1  riastrad 
    682  1.1  riastrad /**
    683  1.1  riastrad  * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
    684  1.1  riastrad  * access, idling previous GPU operations on the buffer and optionally
    685  1.1  riastrad  * blocking it for further command submissions.
    686  1.1  riastrad  *
    687  1.1  riastrad  * @user_bo: Pointer to the buffer object being grabbed for CPU access
    688  1.1  riastrad  * @tfile: Identifying the caller.
    689  1.1  riastrad  * @flags: Flags indicating how the grab should be performed.
    690  1.1  riastrad  * Return: Zero on success, Negative error code on error. In particular,
    691  1.1  riastrad  * -EBUSY will be returned if a dontblock operation is requested and the
    692  1.1  riastrad  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
    693  1.1  riastrad  * interrupted by a signal.
    694  1.1  riastrad  *
    695  1.1  riastrad  * A blocking grab will be automatically released when @tfile is closed.
    696  1.1  riastrad  */
    697  1.1  riastrad static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
    698  1.1  riastrad 				    struct ttm_object_file *tfile,
    699  1.1  riastrad 				    uint32_t flags)
    700  1.1  riastrad {
    701  1.1  riastrad 	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
    702  1.1  riastrad 	struct ttm_buffer_object *bo = &user_bo->vbo.base;
    703  1.1  riastrad 	bool existed;
    704  1.1  riastrad 	int ret;
    705  1.1  riastrad 
    706  1.1  riastrad 	if (flags & drm_vmw_synccpu_allow_cs) {
    707  1.1  riastrad 		long lret;
    708  1.1  riastrad 
    709  1.1  riastrad 		lret = dma_resv_wait_timeout_rcu
    710  1.1  riastrad 			(bo->base.resv, true, true,
    711  1.1  riastrad 			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
    712  1.1  riastrad 		if (!lret)
    713  1.1  riastrad 			return -EBUSY;
    714  1.1  riastrad 		else if (lret < 0)
    715  1.1  riastrad 			return lret;
    716  1.1  riastrad 		return 0;
    717  1.1  riastrad 	}
    718  1.1  riastrad 
    719  1.1  riastrad 	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
    720  1.1  riastrad 	if (unlikely(ret != 0))
    721  1.1  riastrad 		return ret;
    722  1.1  riastrad 
    723  1.1  riastrad 	ret = ttm_bo_wait(bo, true, nonblock);
    724  1.1  riastrad 	if (likely(ret == 0))
    725  1.1  riastrad 		atomic_inc(&user_bo->vbo.cpu_writers);
    726  1.1  riastrad 
    727  1.1  riastrad 	ttm_bo_unreserve(bo);
    728  1.1  riastrad 	if (unlikely(ret != 0))
    729  1.1  riastrad 		return ret;
    730  1.1  riastrad 
    731  1.1  riastrad 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
    732  1.1  riastrad 				 TTM_REF_SYNCCPU_WRITE, &existed, false);
    733  1.1  riastrad 	if (ret != 0 || existed)
    734  1.1  riastrad 		atomic_dec(&user_bo->vbo.cpu_writers);
    735  1.1  riastrad 
    736  1.1  riastrad 	return ret;
    737  1.1  riastrad }
    738  1.1  riastrad 
    739  1.1  riastrad /**
    740  1.1  riastrad  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
    741  1.1  riastrad  * and unblock command submission on the buffer if blocked.
    742  1.1  riastrad  *
    743  1.1  riastrad  * @handle: Handle identifying the buffer object.
    744  1.1  riastrad  * @tfile: Identifying the caller.
    745  1.1  riastrad  * @flags: Flags indicating the type of release.
    746  1.1  riastrad  */
    747  1.1  riastrad static int vmw_user_bo_synccpu_release(uint32_t handle,
    748  1.1  riastrad 					   struct ttm_object_file *tfile,
    749  1.1  riastrad 					   uint32_t flags)
    750  1.1  riastrad {
    751  1.1  riastrad 	if (!(flags & drm_vmw_synccpu_allow_cs))
    752  1.1  riastrad 		return ttm_ref_object_base_unref(tfile, handle,
    753  1.1  riastrad 						 TTM_REF_SYNCCPU_WRITE);
    754  1.1  riastrad 
    755  1.1  riastrad 	return 0;
    756  1.1  riastrad }
    757  1.1  riastrad 
    758  1.1  riastrad 
    759  1.1  riastrad /**
    760  1.1  riastrad  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
    761  1.1  riastrad  * functionality.
    762  1.1  riastrad  *
    763  1.1  riastrad  * @dev: Identifies the drm device.
    764  1.1  riastrad  * @data: Pointer to the ioctl argument.
    765  1.1  riastrad  * @file_priv: Identifies the caller.
    766  1.1  riastrad  * Return: Zero on success, negative error code on error.
    767  1.1  riastrad  *
    768  1.1  riastrad  * This function checks the ioctl arguments for validity and calls the
    769  1.1  riastrad  * relevant synccpu functions.
    770  1.1  riastrad  */
    771  1.1  riastrad int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
    772  1.1  riastrad 			      struct drm_file *file_priv)
    773  1.1  riastrad {
    774  1.1  riastrad 	struct drm_vmw_synccpu_arg *arg =
    775  1.1  riastrad 		(struct drm_vmw_synccpu_arg *) data;
    776  1.1  riastrad 	struct vmw_buffer_object *vbo;
    777  1.1  riastrad 	struct vmw_user_buffer_object *user_bo;
    778  1.1  riastrad 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
    779  1.1  riastrad 	struct ttm_base_object *buffer_base;
    780  1.1  riastrad 	int ret;
    781  1.1  riastrad 
    782  1.1  riastrad 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
    783  1.1  riastrad 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
    784  1.1  riastrad 			       drm_vmw_synccpu_dontblock |
    785  1.1  riastrad 			       drm_vmw_synccpu_allow_cs)) != 0) {
    786  1.1  riastrad 		DRM_ERROR("Illegal synccpu flags.\n");
    787  1.1  riastrad 		return -EINVAL;
    788  1.1  riastrad 	}
    789  1.1  riastrad 
    790  1.1  riastrad 	switch (arg->op) {
    791  1.1  riastrad 	case drm_vmw_synccpu_grab:
    792  1.1  riastrad 		ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
    793  1.1  riastrad 					     &buffer_base);
    794  1.1  riastrad 		if (unlikely(ret != 0))
    795  1.1  riastrad 			return ret;
    796  1.1  riastrad 
    797  1.1  riastrad 		user_bo = container_of(vbo, struct vmw_user_buffer_object,
    798  1.1  riastrad 				       vbo);
    799  1.1  riastrad 		ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
    800  1.1  riastrad 		vmw_bo_unreference(&vbo);
    801  1.1  riastrad 		ttm_base_object_unref(&buffer_base);
    802  1.1  riastrad 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
    803  1.1  riastrad 			     ret != -EBUSY)) {
    804  1.1  riastrad 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
    805  1.1  riastrad 				  (unsigned int) arg->handle);
    806  1.1  riastrad 			return ret;
    807  1.1  riastrad 		}
    808  1.1  riastrad 		break;
    809  1.1  riastrad 	case drm_vmw_synccpu_release:
    810  1.1  riastrad 		ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
    811  1.1  riastrad 						  arg->flags);
    812  1.1  riastrad 		if (unlikely(ret != 0)) {
    813  1.1  riastrad 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
    814  1.1  riastrad 				  (unsigned int) arg->handle);
    815  1.1  riastrad 			return ret;
    816  1.1  riastrad 		}
    817  1.1  riastrad 		break;
    818  1.1  riastrad 	default:
    819  1.1  riastrad 		DRM_ERROR("Invalid synccpu operation.\n");
    820  1.1  riastrad 		return -EINVAL;
    821  1.1  riastrad 	}
    822  1.1  riastrad 
    823  1.1  riastrad 	return 0;
    824  1.1  riastrad }
    825  1.1  riastrad 
    826  1.1  riastrad 
    827  1.1  riastrad /**
    828  1.1  riastrad  * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
    829  1.1  riastrad  * allocation functionality.
    830  1.1  riastrad  *
    831  1.1  riastrad  * @dev: Identifies the drm device.
    832  1.1  riastrad  * @data: Pointer to the ioctl argument.
    833  1.1  riastrad  * @file_priv: Identifies the caller.
    834  1.1  riastrad  * Return: Zero on success, negative error code on error.
    835  1.1  riastrad  *
    836  1.1  riastrad  * This function checks the ioctl arguments for validity and allocates a
    837  1.1  riastrad  * struct vmw_user_buffer_object bo.
    838  1.1  riastrad  */
    839  1.1  riastrad int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
    840  1.1  riastrad 		       struct drm_file *file_priv)
    841  1.1  riastrad {
    842  1.1  riastrad 	struct vmw_private *dev_priv = vmw_priv(dev);
    843  1.1  riastrad 	union drm_vmw_alloc_dmabuf_arg *arg =
    844  1.1  riastrad 	    (union drm_vmw_alloc_dmabuf_arg *)data;
    845  1.1  riastrad 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
    846  1.1  riastrad 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
    847  1.1  riastrad 	struct vmw_buffer_object *vbo;
    848  1.1  riastrad 	uint32_t handle;
    849  1.1  riastrad 	int ret;
    850  1.1  riastrad 
    851  1.1  riastrad 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
    852  1.1  riastrad 	if (unlikely(ret != 0))
    853  1.1  riastrad 		return ret;
    854  1.1  riastrad 
    855  1.1  riastrad 	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
    856  1.1  riastrad 				req->size, false, &handle, &vbo,
    857  1.1  riastrad 				NULL);
    858  1.1  riastrad 	if (unlikely(ret != 0))
    859  1.1  riastrad 		goto out_no_bo;
    860  1.1  riastrad 
    861  1.1  riastrad 	rep->handle = handle;
    862  1.1  riastrad 	rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
    863  1.1  riastrad 	rep->cur_gmr_id = handle;
    864  1.1  riastrad 	rep->cur_gmr_offset = 0;
    865  1.1  riastrad 
    866  1.1  riastrad 	vmw_bo_unreference(&vbo);
    867  1.1  riastrad 
    868  1.1  riastrad out_no_bo:
    869  1.1  riastrad 	ttm_read_unlock(&dev_priv->reservation_sem);
    870  1.1  riastrad 
    871  1.1  riastrad 	return ret;
    872  1.1  riastrad }
    873  1.1  riastrad 
    874  1.1  riastrad 
    875  1.1  riastrad /**
    876  1.1  riastrad  * vmw_bo_unref_ioctl - Generic handle close ioctl.
    877  1.1  riastrad  *
    878  1.1  riastrad  * @dev: Identifies the drm device.
    879  1.1  riastrad  * @data: Pointer to the ioctl argument.
    880  1.1  riastrad  * @file_priv: Identifies the caller.
    881  1.1  riastrad  * Return: Zero on success, negative error code on error.
    882  1.1  riastrad  *
    883  1.1  riastrad  * This function checks the ioctl arguments for validity and closes a
    884  1.1  riastrad  * handle to a TTM base object, optionally freeing the object.
    885  1.1  riastrad  */
    886  1.1  riastrad int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
    887  1.1  riastrad 		       struct drm_file *file_priv)
    888  1.1  riastrad {
    889  1.1  riastrad 	struct drm_vmw_unref_dmabuf_arg *arg =
    890  1.1  riastrad 	    (struct drm_vmw_unref_dmabuf_arg *)data;
    891  1.1  riastrad 
    892  1.1  riastrad 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
    893  1.1  riastrad 					 arg->handle,
    894  1.1  riastrad 					 TTM_REF_USAGE);
    895  1.1  riastrad }
    896  1.1  riastrad 
    897  1.1  riastrad 
    898  1.1  riastrad /**
    899  1.1  riastrad  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
    900  1.1  riastrad  *
    901  1.1  riastrad  * @tfile: The TTM object file the handle is registered with.
    902  1.1  riastrad  * @handle: The user buffer object handle
    903  1.1  riastrad  * @out: Pointer to a where a pointer to the embedded
    904  1.1  riastrad  * struct vmw_buffer_object should be placed.
    905  1.1  riastrad  * @p_base: Pointer to where a pointer to the TTM base object should be
    906  1.1  riastrad  * placed, or NULL if no such pointer is required.
    907  1.1  riastrad  * Return: Zero on success, Negative error code on error.
    908  1.1  riastrad  *
    909  1.1  riastrad  * Both the output base object pointer and the vmw buffer object pointer
    910  1.1  riastrad  * will be refcounted.
    911  1.1  riastrad  */
    912  1.1  riastrad int vmw_user_bo_lookup(struct ttm_object_file *tfile,
    913  1.1  riastrad 		       uint32_t handle, struct vmw_buffer_object **out,
    914  1.1  riastrad 		       struct ttm_base_object **p_base)
    915  1.1  riastrad {
    916  1.1  riastrad 	struct vmw_user_buffer_object *vmw_user_bo;
    917  1.1  riastrad 	struct ttm_base_object *base;
    918  1.1  riastrad 
    919  1.1  riastrad 	base = ttm_base_object_lookup(tfile, handle);
    920  1.1  riastrad 	if (unlikely(base == NULL)) {
    921  1.1  riastrad 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
    922  1.1  riastrad 			  (unsigned long)handle);
    923  1.1  riastrad 		return -ESRCH;
    924  1.1  riastrad 	}
    925  1.1  riastrad 
    926  1.1  riastrad 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
    927  1.1  riastrad 		ttm_base_object_unref(&base);
    928  1.1  riastrad 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
    929  1.1  riastrad 			  (unsigned long)handle);
    930  1.1  riastrad 		return -EINVAL;
    931  1.1  riastrad 	}
    932  1.1  riastrad 
    933  1.1  riastrad 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
    934  1.1  riastrad 				   prime.base);
    935  1.1  riastrad 	ttm_bo_get(&vmw_user_bo->vbo.base);
    936  1.1  riastrad 	if (p_base)
    937  1.1  riastrad 		*p_base = base;
    938  1.1  riastrad 	else
    939  1.1  riastrad 		ttm_base_object_unref(&base);
    940  1.1  riastrad 	*out = &vmw_user_bo->vbo;
    941  1.1  riastrad 
    942  1.1  riastrad 	return 0;
    943  1.1  riastrad }
    944  1.1  riastrad 
    945  1.1  riastrad /**
    946  1.1  riastrad  * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
    947  1.1  riastrad  * @tfile: The TTM object file the handle is registered with.
    948  1.1  riastrad  * @handle: The user buffer object handle.
    949  1.1  riastrad  *
    950  1.1  riastrad  * This function looks up a struct vmw_user_bo and returns a pointer to the
    951  1.1  riastrad  * struct vmw_buffer_object it derives from without refcounting the pointer.
    952  1.1  riastrad  * The returned pointer is only valid until vmw_user_bo_noref_release() is
    953  1.1  riastrad  * called, and the object pointed to by the returned pointer may be doomed.
    954  1.1  riastrad  * Any persistent usage of the object requires a refcount to be taken using
    955  1.1  riastrad  * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
    956  1.1  riastrad  * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
    957  1.1  riastrad  * or scheduling functions may be called inbetween these function calls.
    958  1.1  riastrad  *
    959  1.1  riastrad  * Return: A struct vmw_buffer_object pointer if successful or negative
    960  1.1  riastrad  * error pointer on failure.
    961  1.1  riastrad  */
    962  1.1  riastrad struct vmw_buffer_object *
    963  1.1  riastrad vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
    964  1.1  riastrad {
    965  1.1  riastrad 	struct vmw_user_buffer_object *vmw_user_bo;
    966  1.1  riastrad 	struct ttm_base_object *base;
    967  1.1  riastrad 
    968  1.1  riastrad 	base = ttm_base_object_noref_lookup(tfile, handle);
    969  1.1  riastrad 	if (!base) {
    970  1.1  riastrad 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
    971  1.1  riastrad 			  (unsigned long)handle);
    972  1.1  riastrad 		return ERR_PTR(-ESRCH);
    973  1.1  riastrad 	}
    974  1.1  riastrad 
    975  1.1  riastrad 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
    976  1.1  riastrad 		ttm_base_object_noref_release();
    977  1.1  riastrad 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
    978  1.1  riastrad 			  (unsigned long)handle);
    979  1.1  riastrad 		return ERR_PTR(-EINVAL);
    980  1.1  riastrad 	}
    981  1.1  riastrad 
    982  1.1  riastrad 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
    983  1.1  riastrad 				   prime.base);
    984  1.1  riastrad 	return &vmw_user_bo->vbo;
    985  1.1  riastrad }
    986  1.1  riastrad 
    987  1.1  riastrad /**
    988  1.1  riastrad  * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
    989  1.1  riastrad  *
    990  1.1  riastrad  * @tfile: The TTM object file to register the handle with.
    991  1.1  riastrad  * @vbo: The embedded vmw buffer object.
    992  1.1  riastrad  * @handle: Pointer to where the new handle should be placed.
    993  1.1  riastrad  * Return: Zero on success, Negative error code on error.
    994  1.1  riastrad  */
    995  1.1  riastrad int vmw_user_bo_reference(struct ttm_object_file *tfile,
    996  1.1  riastrad 			  struct vmw_buffer_object *vbo,
    997  1.1  riastrad 			  uint32_t *handle)
    998  1.1  riastrad {
    999  1.1  riastrad 	struct vmw_user_buffer_object *user_bo;
   1000  1.1  riastrad 
   1001  1.1  riastrad 	if (vbo->base.destroy != vmw_user_bo_destroy)
   1002  1.1  riastrad 		return -EINVAL;
   1003  1.1  riastrad 
   1004  1.1  riastrad 	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
   1005  1.1  riastrad 
   1006  1.1  riastrad 	*handle = user_bo->prime.base.handle;
   1007  1.1  riastrad 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
   1008  1.1  riastrad 				  TTM_REF_USAGE, NULL, false);
   1009  1.1  riastrad }
   1010  1.1  riastrad 
   1011  1.1  riastrad 
   1012  1.1  riastrad /**
   1013  1.1  riastrad  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
   1014  1.1  riastrad  *                       object without unreserving it.
   1015  1.1  riastrad  *
   1016  1.1  riastrad  * @bo:             Pointer to the struct ttm_buffer_object to fence.
   1017  1.1  riastrad  * @fence:          Pointer to the fence. If NULL, this function will
   1018  1.1  riastrad  *                  insert a fence into the command stream..
   1019  1.1  riastrad  *
   1020  1.1  riastrad  * Contrary to the ttm_eu version of this function, it takes only
   1021  1.1  riastrad  * a single buffer object instead of a list, and it also doesn't
   1022  1.1  riastrad  * unreserve the buffer object, which needs to be done separately.
   1023  1.1  riastrad  */
   1024  1.1  riastrad void vmw_bo_fence_single(struct ttm_buffer_object *bo,
   1025  1.1  riastrad 			 struct vmw_fence_obj *fence)
   1026  1.1  riastrad {
   1027  1.1  riastrad 	struct ttm_bo_device *bdev = bo->bdev;
   1028  1.1  riastrad 
   1029  1.1  riastrad 	struct vmw_private *dev_priv =
   1030  1.1  riastrad 		container_of(bdev, struct vmw_private, bdev);
   1031  1.1  riastrad 
   1032  1.1  riastrad 	if (fence == NULL) {
   1033  1.1  riastrad 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
   1034  1.1  riastrad 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
   1035  1.1  riastrad 		dma_fence_put(&fence->base);
   1036  1.1  riastrad 	} else
   1037  1.1  riastrad 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
   1038  1.1  riastrad }
   1039  1.1  riastrad 
   1040  1.1  riastrad 
   1041  1.1  riastrad /**
   1042  1.1  riastrad  * vmw_dumb_create - Create a dumb kms buffer
   1043  1.1  riastrad  *
   1044  1.1  riastrad  * @file_priv: Pointer to a struct drm_file identifying the caller.
   1045  1.1  riastrad  * @dev: Pointer to the drm device.
   1046  1.1  riastrad  * @args: Pointer to a struct drm_mode_create_dumb structure
   1047  1.1  riastrad  * Return: Zero on success, negative error code on failure.
   1048  1.1  riastrad  *
   1049  1.1  riastrad  * This is a driver callback for the core drm create_dumb functionality.
   1050  1.1  riastrad  * Note that this is very similar to the vmw_bo_alloc ioctl, except
   1051  1.1  riastrad  * that the arguments have a different format.
   1052  1.1  riastrad  */
   1053  1.1  riastrad int vmw_dumb_create(struct drm_file *file_priv,
   1054  1.1  riastrad 		    struct drm_device *dev,
   1055  1.1  riastrad 		    struct drm_mode_create_dumb *args)
   1056  1.1  riastrad {
   1057  1.1  riastrad 	struct vmw_private *dev_priv = vmw_priv(dev);
   1058  1.1  riastrad 	struct vmw_buffer_object *vbo;
   1059  1.1  riastrad 	int ret;
   1060  1.1  riastrad 
   1061  1.1  riastrad 	args->pitch = args->width * ((args->bpp + 7) / 8);
   1062  1.1  riastrad 	args->size = args->pitch * args->height;
   1063  1.1  riastrad 
   1064  1.1  riastrad 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
   1065  1.1  riastrad 	if (unlikely(ret != 0))
   1066  1.1  riastrad 		return ret;
   1067  1.1  riastrad 
   1068  1.1  riastrad 	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
   1069  1.1  riastrad 				    args->size, false, &args->handle,
   1070  1.1  riastrad 				    &vbo, NULL);
   1071  1.1  riastrad 	if (unlikely(ret != 0))
   1072  1.1  riastrad 		goto out_no_bo;
   1073  1.1  riastrad 
   1074  1.1  riastrad 	vmw_bo_unreference(&vbo);
   1075  1.1  riastrad out_no_bo:
   1076  1.1  riastrad 	ttm_read_unlock(&dev_priv->reservation_sem);
   1077  1.1  riastrad 	return ret;
   1078  1.1  riastrad }
   1079  1.1  riastrad 
   1080  1.1  riastrad 
   1081  1.1  riastrad /**
   1082  1.1  riastrad  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
   1083  1.1  riastrad  *
   1084  1.1  riastrad  * @file_priv: Pointer to a struct drm_file identifying the caller.
   1085  1.1  riastrad  * @dev: Pointer to the drm device.
   1086  1.1  riastrad  * @handle: Handle identifying the dumb buffer.
   1087  1.1  riastrad  * @offset: The address space offset returned.
   1088  1.1  riastrad  * Return: Zero on success, negative error code on failure.
   1089  1.1  riastrad  *
   1090  1.1  riastrad  * This is a driver callback for the core drm dumb_map_offset functionality.
   1091  1.1  riastrad  */
   1092  1.1  riastrad int vmw_dumb_map_offset(struct drm_file *file_priv,
   1093  1.1  riastrad 			struct drm_device *dev, uint32_t handle,
   1094  1.1  riastrad 			uint64_t *offset)
   1095  1.1  riastrad {
   1096  1.1  riastrad 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
   1097  1.1  riastrad 	struct vmw_buffer_object *out_buf;
   1098  1.1  riastrad 	int ret;
   1099  1.1  riastrad 
   1100  1.1  riastrad 	ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
   1101  1.1  riastrad 	if (ret != 0)
   1102  1.1  riastrad 		return -EINVAL;
   1103  1.1  riastrad 
   1104  1.1  riastrad 	*offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
   1105  1.1  riastrad 	vmw_bo_unreference(&out_buf);
   1106  1.1  riastrad 	return 0;
   1107  1.1  riastrad }
   1108  1.1  riastrad 
   1109  1.1  riastrad 
   1110  1.1  riastrad /**
   1111  1.1  riastrad  * vmw_dumb_destroy - Destroy a dumb boffer
   1112  1.1  riastrad  *
   1113  1.1  riastrad  * @file_priv: Pointer to a struct drm_file identifying the caller.
   1114  1.1  riastrad  * @dev: Pointer to the drm device.
   1115  1.1  riastrad  * @handle: Handle identifying the dumb buffer.
   1116  1.1  riastrad  * Return: Zero on success, negative error code on failure.
   1117  1.1  riastrad  *
   1118  1.1  riastrad  * This is a driver callback for the core drm dumb_destroy functionality.
   1119  1.1  riastrad  */
   1120  1.1  riastrad int vmw_dumb_destroy(struct drm_file *file_priv,
   1121  1.1  riastrad 		     struct drm_device *dev,
   1122  1.1  riastrad 		     uint32_t handle)
   1123  1.1  riastrad {
   1124  1.1  riastrad 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
   1125  1.1  riastrad 					 handle, TTM_REF_USAGE);
   1126  1.1  riastrad }
   1127  1.1  riastrad 
   1128  1.1  riastrad 
   1129  1.1  riastrad /**
   1130  1.1  riastrad  * vmw_bo_swap_notify - swapout notify callback.
   1131  1.1  riastrad  *
   1132  1.1  riastrad  * @bo: The buffer object to be swapped out.
   1133  1.1  riastrad  */
   1134  1.1  riastrad void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
   1135  1.1  riastrad {
   1136  1.1  riastrad 	/* Is @bo embedded in a struct vmw_buffer_object? */
   1137  1.1  riastrad 	if (bo->destroy != vmw_bo_bo_free &&
   1138  1.1  riastrad 	    bo->destroy != vmw_user_bo_destroy)
   1139  1.1  riastrad 		return;
   1140  1.1  riastrad 
   1141  1.1  riastrad 	/* Kill any cached kernel maps before swapout */
   1142  1.1  riastrad 	vmw_bo_unmap(vmw_buffer_object(bo));
   1143  1.1  riastrad }
   1144  1.1  riastrad 
   1145  1.1  riastrad 
   1146  1.1  riastrad /**
   1147  1.1  riastrad  * vmw_bo_move_notify - TTM move_notify_callback
   1148  1.1  riastrad  *
   1149  1.1  riastrad  * @bo: The TTM buffer object about to move.
   1150  1.1  riastrad  * @mem: The struct ttm_mem_reg indicating to what memory
   1151  1.1  riastrad  *       region the move is taking place.
   1152  1.1  riastrad  *
   1153  1.1  riastrad  * Detaches cached maps and device bindings that require that the
   1154  1.1  riastrad  * buffer doesn't move.
   1155  1.1  riastrad  */
   1156  1.1  riastrad void vmw_bo_move_notify(struct ttm_buffer_object *bo,
   1157  1.1  riastrad 			struct ttm_mem_reg *mem)
   1158  1.1  riastrad {
   1159  1.1  riastrad 	struct vmw_buffer_object *vbo;
   1160  1.1  riastrad 
   1161  1.1  riastrad 	if (mem == NULL)
   1162  1.1  riastrad 		return;
   1163  1.1  riastrad 
   1164  1.1  riastrad 	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
   1165  1.1  riastrad 	if (bo->destroy != vmw_bo_bo_free &&
   1166  1.1  riastrad 	    bo->destroy != vmw_user_bo_destroy)
   1167  1.1  riastrad 		return;
   1168  1.1  riastrad 
   1169  1.1  riastrad 	vbo = container_of(bo, struct vmw_buffer_object, base);
   1170  1.1  riastrad 
   1171  1.1  riastrad 	/*
   1172  1.1  riastrad 	 * Kill any cached kernel maps before move to or from VRAM.
   1173  1.1  riastrad 	 * With other types of moves, the underlying pages stay the same,
   1174  1.1  riastrad 	 * and the map can be kept.
   1175  1.1  riastrad 	 */
   1176  1.1  riastrad 	if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
   1177  1.1  riastrad 		vmw_bo_unmap(vbo);
   1178  1.1  riastrad 
   1179  1.1  riastrad 	/*
   1180  1.1  riastrad 	 * If we're moving a backup MOB out of MOB placement, then make sure we
   1181  1.1  riastrad 	 * read back all resource content first, and unbind the MOB from
   1182  1.1  riastrad 	 * the resource.
   1183  1.1  riastrad 	 */
   1184  1.1  riastrad 	if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
   1185  1.1  riastrad 		vmw_resource_unbind_list(vbo);
   1186  1.1  riastrad }
   1187