Home | History | Annotate | Line # | Download | only in vmwgfx
vmwgfx_execbuf.c revision 1.1.1.2.4.2
      1 /**************************************************************************
      2  *
      3  * Copyright  2009 VMware, Inc., Palo Alto, CA., USA
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 
     28 #include "vmwgfx_drv.h"
     29 #include "vmwgfx_reg.h"
     30 #include <drm/ttm/ttm_bo_api.h>
     31 #include <drm/ttm/ttm_placement.h>
     32 
     33 #define VMW_RES_HT_ORDER 12
     34 
     35 /**
     36  * struct vmw_resource_relocation - Relocation info for resources
     37  *
     38  * @head: List head for the software context's relocation list.
     39  * @res: Non-ref-counted pointer to the resource.
     40  * @offset: Offset of 4 byte entries into the command buffer where the
     41  * id that needs fixup is located.
     42  */
     43 struct vmw_resource_relocation {
     44 	struct list_head head;
     45 	const struct vmw_resource *res;
     46 	unsigned long offset;
     47 };
     48 
     49 /**
     50  * struct vmw_resource_val_node - Validation info for resources
     51  *
     52  * @head: List head for the software context's resource list.
     53  * @hash: Hash entry for quick resouce to val_node lookup.
     54  * @res: Ref-counted pointer to the resource.
     55  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
     56  * @new_backup: Refcounted pointer to the new backup buffer.
     57  * @staged_bindings: If @res is a context, tracks bindings set up during
     58  * the command batch. Otherwise NULL.
     59  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
     60  * @first_usage: Set to true the first time the resource is referenced in
     61  * the command stream.
     62  * @no_buffer_needed: Resources do not need to allocate buffer backup on
     63  * reservation. The command stream will provide one.
     64  */
     65 struct vmw_resource_val_node {
     66 	struct list_head head;
     67 	struct drm_hash_item hash;
     68 	struct vmw_resource *res;
     69 	struct vmw_dma_buffer *new_backup;
     70 	struct vmw_ctx_binding_state *staged_bindings;
     71 	unsigned long new_backup_offset;
     72 	bool first_usage;
     73 	bool no_buffer_needed;
     74 };
     75 
     76 /**
     77  * struct vmw_cmd_entry - Describe a command for the verifier
     78  *
     79  * @user_allow: Whether allowed from the execbuf ioctl.
     80  * @gb_disable: Whether disabled if guest-backed objects are available.
     81  * @gb_enable: Whether enabled iff guest-backed objects are available.
     82  */
     83 struct vmw_cmd_entry {
     84 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
     85 		     SVGA3dCmdHeader *);
     86 	bool user_allow;
     87 	bool gb_disable;
     88 	bool gb_enable;
     89 };
     90 
     91 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
     92 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
     93 				       (_gb_disable), (_gb_enable)}
     94 
     95 /**
     96  * vmw_resource_unreserve - unreserve resources previously reserved for
     97  * command submission.
     98  *
     99  * @list_head: list of resources to unreserve.
    100  * @backoff: Whether command submission failed.
    101  */
    102 static void vmw_resource_list_unreserve(struct list_head *list,
    103 					bool backoff)
    104 {
    105 	struct vmw_resource_val_node *val;
    106 
    107 	list_for_each_entry(val, list, head) {
    108 		struct vmw_resource *res = val->res;
    109 		struct vmw_dma_buffer *new_backup =
    110 			backoff ? NULL : val->new_backup;
    111 
    112 		/*
    113 		 * Transfer staged context bindings to the
    114 		 * persistent context binding tracker.
    115 		 */
    116 		if (unlikely(val->staged_bindings)) {
    117 			if (!backoff) {
    118 				vmw_context_binding_state_transfer
    119 					(val->res, val->staged_bindings);
    120 			}
    121 			kfree(val->staged_bindings);
    122 			val->staged_bindings = NULL;
    123 		}
    124 		vmw_resource_unreserve(res, new_backup,
    125 			val->new_backup_offset);
    126 		vmw_dmabuf_unreference(&val->new_backup);
    127 	}
    128 }
    129 
    130 
    131 /**
    132  * vmw_resource_val_add - Add a resource to the software context's
    133  * resource list if it's not already on it.
    134  *
    135  * @sw_context: Pointer to the software context.
    136  * @res: Pointer to the resource.
    137  * @p_node On successful return points to a valid pointer to a
    138  * struct vmw_resource_val_node, if non-NULL on entry.
    139  */
    140 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
    141 				struct vmw_resource *res,
    142 				struct vmw_resource_val_node **p_node)
    143 {
    144 	struct vmw_resource_val_node *node;
    145 	struct drm_hash_item *hash;
    146 	int ret;
    147 
    148 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
    149 				    &hash) == 0)) {
    150 		node = container_of(hash, struct vmw_resource_val_node, hash);
    151 		node->first_usage = false;
    152 		if (unlikely(p_node != NULL))
    153 			*p_node = node;
    154 		return 0;
    155 	}
    156 
    157 	node = kzalloc(sizeof(*node), GFP_KERNEL);
    158 	if (unlikely(node == NULL)) {
    159 		DRM_ERROR("Failed to allocate a resource validation "
    160 			  "entry.\n");
    161 		return -ENOMEM;
    162 	}
    163 
    164 	node->hash.key = (unsigned long) res;
    165 	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
    166 	if (unlikely(ret != 0)) {
    167 		DRM_ERROR("Failed to initialize a resource validation "
    168 			  "entry.\n");
    169 		kfree(node);
    170 		return ret;
    171 	}
    172 	list_add_tail(&node->head, &sw_context->resource_list);
    173 	node->res = vmw_resource_reference(res);
    174 	node->first_usage = true;
    175 
    176 	if (unlikely(p_node != NULL))
    177 		*p_node = node;
    178 
    179 	return 0;
    180 }
    181 
    182 /**
    183  * vmw_resource_context_res_add - Put resources previously bound to a context on
    184  * the validation list
    185  *
    186  * @dev_priv: Pointer to a device private structure
    187  * @sw_context: Pointer to a software context used for this command submission
    188  * @ctx: Pointer to the context resource
    189  *
    190  * This function puts all resources that were previously bound to @ctx on
    191  * the resource validation list. This is part of the context state reemission
    192  */
    193 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
    194 					struct vmw_sw_context *sw_context,
    195 					struct vmw_resource *ctx)
    196 {
    197 	struct list_head *binding_list;
    198 	struct vmw_ctx_binding *entry;
    199 	int ret = 0;
    200 	struct vmw_resource *res;
    201 
    202 	mutex_lock(&dev_priv->binding_mutex);
    203 	binding_list = vmw_context_binding_list(ctx);
    204 
    205 	list_for_each_entry(entry, binding_list, ctx_list) {
    206 		res = vmw_resource_reference_unless_doomed(entry->bi.res);
    207 		if (unlikely(res == NULL))
    208 			continue;
    209 
    210 		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
    211 		vmw_resource_unreference(&res);
    212 		if (unlikely(ret != 0))
    213 			break;
    214 	}
    215 
    216 	mutex_unlock(&dev_priv->binding_mutex);
    217 	return ret;
    218 }
    219 
    220 /**
    221  * vmw_resource_relocation_add - Add a relocation to the relocation list
    222  *
    223  * @list: Pointer to head of relocation list.
    224  * @res: The resource.
    225  * @offset: Offset into the command buffer currently being parsed where the
    226  * id that needs fixup is located. Granularity is 4 bytes.
    227  */
    228 static int vmw_resource_relocation_add(struct list_head *list,
    229 				       const struct vmw_resource *res,
    230 				       unsigned long offset)
    231 {
    232 	struct vmw_resource_relocation *rel;
    233 
    234 	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
    235 	if (unlikely(rel == NULL)) {
    236 		DRM_ERROR("Failed to allocate a resource relocation.\n");
    237 		return -ENOMEM;
    238 	}
    239 
    240 	rel->res = res;
    241 	rel->offset = offset;
    242 	list_add_tail(&rel->head, list);
    243 
    244 	return 0;
    245 }
    246 
    247 /**
    248  * vmw_resource_relocations_free - Free all relocations on a list
    249  *
    250  * @list: Pointer to the head of the relocation list.
    251  */
    252 static void vmw_resource_relocations_free(struct list_head *list)
    253 {
    254 	struct vmw_resource_relocation *rel, *n;
    255 
    256 	list_for_each_entry_safe(rel, n, list, head) {
    257 		list_del(&rel->head);
    258 		kfree(rel);
    259 	}
    260 }
    261 
    262 /**
    263  * vmw_resource_relocations_apply - Apply all relocations on a list
    264  *
    265  * @cb: Pointer to the start of the command buffer bein patch. This need
    266  * not be the same buffer as the one being parsed when the relocation
    267  * list was built, but the contents must be the same modulo the
    268  * resource ids.
    269  * @list: Pointer to the head of the relocation list.
    270  */
    271 static void vmw_resource_relocations_apply(uint32_t *cb,
    272 					   struct list_head *list)
    273 {
    274 	struct vmw_resource_relocation *rel;
    275 
    276 	list_for_each_entry(rel, list, head) {
    277 		if (likely(rel->res != NULL))
    278 			cb[rel->offset] = rel->res->id;
    279 		else
    280 			cb[rel->offset] = SVGA_3D_CMD_NOP;
    281 	}
    282 }
    283 
    284 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
    285 			   struct vmw_sw_context *sw_context,
    286 			   SVGA3dCmdHeader *header)
    287 {
    288 	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
    289 }
    290 
    291 static int vmw_cmd_ok(struct vmw_private *dev_priv,
    292 		      struct vmw_sw_context *sw_context,
    293 		      SVGA3dCmdHeader *header)
    294 {
    295 	return 0;
    296 }
    297 
    298 /**
    299  * vmw_bo_to_validate_list - add a bo to a validate list
    300  *
    301  * @sw_context: The software context used for this command submission batch.
    302  * @bo: The buffer object to add.
    303  * @validate_as_mob: Validate this buffer as a MOB.
    304  * @p_val_node: If non-NULL Will be updated with the validate node number
    305  * on return.
    306  *
    307  * Returns -EINVAL if the limit of number of buffer objects per command
    308  * submission is reached.
    309  */
    310 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
    311 				   struct ttm_buffer_object *bo,
    312 				   bool validate_as_mob,
    313 				   uint32_t *p_val_node)
    314 {
    315 	uint32_t val_node;
    316 	struct vmw_validate_buffer *vval_buf;
    317 	struct ttm_validate_buffer *val_buf;
    318 	struct drm_hash_item *hash;
    319 	int ret;
    320 
    321 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
    322 				    &hash) == 0)) {
    323 		vval_buf = container_of(hash, struct vmw_validate_buffer,
    324 					hash);
    325 		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
    326 			DRM_ERROR("Inconsistent buffer usage.\n");
    327 			return -EINVAL;
    328 		}
    329 		val_buf = &vval_buf->base;
    330 		val_node = vval_buf - sw_context->val_bufs;
    331 	} else {
    332 		val_node = sw_context->cur_val_buf;
    333 		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
    334 			DRM_ERROR("Max number of DMA buffers per submission "
    335 				  "exceeded.\n");
    336 			return -EINVAL;
    337 		}
    338 		vval_buf = &sw_context->val_bufs[val_node];
    339 		vval_buf->hash.key = (unsigned long) bo;
    340 		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
    341 		if (unlikely(ret != 0)) {
    342 			DRM_ERROR("Failed to initialize a buffer validation "
    343 				  "entry.\n");
    344 			return ret;
    345 		}
    346 		++sw_context->cur_val_buf;
    347 		val_buf = &vval_buf->base;
    348 		val_buf->bo = ttm_bo_reference(bo);
    349 		val_buf->reserved = false;
    350 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
    351 		vval_buf->validate_as_mob = validate_as_mob;
    352 	}
    353 
    354 	sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
    355 
    356 	if (p_val_node)
    357 		*p_val_node = val_node;
    358 
    359 	return 0;
    360 }
    361 
    362 /**
    363  * vmw_resources_reserve - Reserve all resources on the sw_context's
    364  * resource list.
    365  *
    366  * @sw_context: Pointer to the software context.
    367  *
    368  * Note that since vmware's command submission currently is protected by
    369  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
    370  * since only a single thread at once will attempt this.
    371  */
    372 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
    373 {
    374 	struct vmw_resource_val_node *val;
    375 	int ret;
    376 
    377 	list_for_each_entry(val, &sw_context->resource_list, head) {
    378 		struct vmw_resource *res = val->res;
    379 
    380 		ret = vmw_resource_reserve(res, val->no_buffer_needed);
    381 		if (unlikely(ret != 0))
    382 			return ret;
    383 
    384 		if (res->backup) {
    385 			struct ttm_buffer_object *bo = &res->backup->base;
    386 
    387 			ret = vmw_bo_to_validate_list
    388 				(sw_context, bo,
    389 				 vmw_resource_needs_backup(res), NULL);
    390 
    391 			if (unlikely(ret != 0))
    392 				return ret;
    393 		}
    394 	}
    395 	return 0;
    396 }
    397 
    398 /**
    399  * vmw_resources_validate - Validate all resources on the sw_context's
    400  * resource list.
    401  *
    402  * @sw_context: Pointer to the software context.
    403  *
    404  * Before this function is called, all resource backup buffers must have
    405  * been validated.
    406  */
    407 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
    408 {
    409 	struct vmw_resource_val_node *val;
    410 	int ret;
    411 
    412 	list_for_each_entry(val, &sw_context->resource_list, head) {
    413 		struct vmw_resource *res = val->res;
    414 
    415 		ret = vmw_resource_validate(res);
    416 		if (unlikely(ret != 0)) {
    417 			if (ret != -ERESTARTSYS)
    418 				DRM_ERROR("Failed to validate resource.\n");
    419 			return ret;
    420 		}
    421 	}
    422 	return 0;
    423 }
    424 
    425 /**
    426  * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
    427  * on the resource validate list unless it's already there.
    428  *
    429  * @dev_priv: Pointer to a device private structure.
    430  * @sw_context: Pointer to the software context.
    431  * @res_type: Resource type.
    432  * @converter: User-space visisble type specific information.
    433  * @id: user-space resource id handle.
    434  * @id_loc: Pointer to the location in the command buffer currently being
    435  * parsed from where the user-space resource id handle is located.
    436  * @p_val: Pointer to pointer to resource validalidation node. Populated
    437  * on exit.
    438  */
    439 static int
    440 vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
    441 			 struct vmw_sw_context *sw_context,
    442 			 enum vmw_res_type res_type,
    443 			 const struct vmw_user_resource_conv *converter,
    444 			 uint32_t id,
    445 			 uint32_t *id_loc,
    446 			 struct vmw_resource_val_node **p_val)
    447 {
    448 	struct vmw_res_cache_entry *rcache =
    449 		&sw_context->res_cache[res_type];
    450 	struct vmw_resource *res;
    451 	struct vmw_resource_val_node *node;
    452 	int ret;
    453 
    454 	if (id == SVGA3D_INVALID_ID) {
    455 		if (p_val)
    456 			*p_val = NULL;
    457 		if (res_type == vmw_res_context) {
    458 			DRM_ERROR("Illegal context invalid id.\n");
    459 			return -EINVAL;
    460 		}
    461 		return 0;
    462 	}
    463 
    464 	/*
    465 	 * Fastpath in case of repeated commands referencing the same
    466 	 * resource
    467 	 */
    468 
    469 	if (likely(rcache->valid && id == rcache->handle)) {
    470 		const struct vmw_resource *res = rcache->res;
    471 
    472 		rcache->node->first_usage = false;
    473 		if (p_val)
    474 			*p_val = rcache->node;
    475 
    476 		return vmw_resource_relocation_add
    477 			(&sw_context->res_relocations, res,
    478 			 id_loc - sw_context->buf_start);
    479 	}
    480 
    481 	ret = vmw_user_resource_lookup_handle(dev_priv,
    482 					      sw_context->fp->tfile,
    483 					      id,
    484 					      converter,
    485 					      &res);
    486 	if (unlikely(ret != 0)) {
    487 		DRM_ERROR("Could not find or use resource 0x%08x.\n",
    488 			  (unsigned) id);
    489 		dump_stack();
    490 		return ret;
    491 	}
    492 
    493 	rcache->valid = true;
    494 	rcache->res = res;
    495 	rcache->handle = id;
    496 
    497 	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
    498 					  res,
    499 					  id_loc - sw_context->buf_start);
    500 	if (unlikely(ret != 0))
    501 		goto out_no_reloc;
    502 
    503 	ret = vmw_resource_val_add(sw_context, res, &node);
    504 	if (unlikely(ret != 0))
    505 		goto out_no_reloc;
    506 
    507 	rcache->node = node;
    508 	if (p_val)
    509 		*p_val = node;
    510 
    511 	if (dev_priv->has_mob && node->first_usage &&
    512 	    res_type == vmw_res_context) {
    513 		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
    514 		if (unlikely(ret != 0))
    515 			goto out_no_reloc;
    516 		node->staged_bindings =
    517 			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
    518 		if (node->staged_bindings == NULL) {
    519 			DRM_ERROR("Failed to allocate context binding "
    520 				  "information.\n");
    521 			goto out_no_reloc;
    522 		}
    523 		INIT_LIST_HEAD(&node->staged_bindings->list);
    524 	}
    525 
    526 	vmw_resource_unreference(&res);
    527 	return 0;
    528 
    529 out_no_reloc:
    530 	BUG_ON(sw_context->error_resource != NULL);
    531 	sw_context->error_resource = res;
    532 
    533 	return ret;
    534 }
    535 
    536 /**
    537  * vmw_cmd_res_check - Check that a resource is present and if so, put it
    538  * on the resource validate list unless it's already there.
    539  *
    540  * @dev_priv: Pointer to a device private structure.
    541  * @sw_context: Pointer to the software context.
    542  * @res_type: Resource type.
    543  * @converter: User-space visisble type specific information.
    544  * @id_loc: Pointer to the location in the command buffer currently being
    545  * parsed from where the user-space resource id handle is located.
    546  * @p_val: Pointer to pointer to resource validalidation node. Populated
    547  * on exit.
    548  */
    549 static int
    550 vmw_cmd_res_check(struct vmw_private *dev_priv,
    551 		  struct vmw_sw_context *sw_context,
    552 		  enum vmw_res_type res_type,
    553 		  const struct vmw_user_resource_conv *converter,
    554 		  uint32_t *id_loc,
    555 		  struct vmw_resource_val_node **p_val)
    556 {
    557 	return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
    558 					converter, *id_loc, id_loc, p_val);
    559 }
    560 
    561 /**
    562  * vmw_rebind_contexts - Rebind all resources previously bound to
    563  * referenced contexts.
    564  *
    565  * @sw_context: Pointer to the software context.
    566  *
    567  * Rebind context binding points that have been scrubbed because of eviction.
    568  */
    569 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
    570 {
    571 	struct vmw_resource_val_node *val;
    572 	int ret;
    573 
    574 	list_for_each_entry(val, &sw_context->resource_list, head) {
    575 		if (likely(!val->staged_bindings))
    576 			continue;
    577 
    578 		ret = vmw_context_rebind_all(val->res);
    579 		if (unlikely(ret != 0)) {
    580 			if (ret != -ERESTARTSYS)
    581 				DRM_ERROR("Failed to rebind context.\n");
    582 			return ret;
    583 		}
    584 	}
    585 
    586 	return 0;
    587 }
    588 
    589 /**
    590  * vmw_cmd_cid_check - Check a command header for valid context information.
    591  *
    592  * @dev_priv: Pointer to a device private structure.
    593  * @sw_context: Pointer to the software context.
    594  * @header: A command header with an embedded user-space context handle.
    595  *
    596  * Convenience function: Call vmw_cmd_res_check with the user-space context
    597  * handle embedded in @header.
    598  */
    599 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
    600 			     struct vmw_sw_context *sw_context,
    601 			     SVGA3dCmdHeader *header)
    602 {
    603 	struct vmw_cid_cmd {
    604 		SVGA3dCmdHeader header;
    605 		uint32_t cid;
    606 	} *cmd;
    607 
    608 	cmd = container_of(header, struct vmw_cid_cmd, header);
    609 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
    610 				 user_context_converter, &cmd->cid, NULL);
    611 }
    612 
    613 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
    614 					   struct vmw_sw_context *sw_context,
    615 					   SVGA3dCmdHeader *header)
    616 {
    617 	struct vmw_sid_cmd {
    618 		SVGA3dCmdHeader header;
    619 		SVGA3dCmdSetRenderTarget body;
    620 	} *cmd;
    621 	struct vmw_resource_val_node *ctx_node;
    622 	struct vmw_resource_val_node *res_node;
    623 	int ret;
    624 
    625 	cmd = container_of(header, struct vmw_sid_cmd, header);
    626 
    627 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
    628 				user_context_converter, &cmd->body.cid,
    629 				&ctx_node);
    630 	if (unlikely(ret != 0))
    631 		return ret;
    632 
    633 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
    634 				user_surface_converter,
    635 				&cmd->body.target.sid, &res_node);
    636 	if (unlikely(ret != 0))
    637 		return ret;
    638 
    639 	if (dev_priv->has_mob) {
    640 		struct vmw_ctx_bindinfo bi;
    641 
    642 		bi.ctx = ctx_node->res;
    643 		bi.res = res_node ? res_node->res : NULL;
    644 		bi.bt = vmw_ctx_binding_rt;
    645 		bi.i1.rt_type = cmd->body.type;
    646 		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
    647 	}
    648 
    649 	return 0;
    650 }
    651 
    652 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
    653 				      struct vmw_sw_context *sw_context,
    654 				      SVGA3dCmdHeader *header)
    655 {
    656 	struct vmw_sid_cmd {
    657 		SVGA3dCmdHeader header;
    658 		SVGA3dCmdSurfaceCopy body;
    659 	} *cmd;
    660 	int ret;
    661 
    662 	cmd = container_of(header, struct vmw_sid_cmd, header);
    663 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
    664 				user_surface_converter,
    665 				&cmd->body.src.sid, NULL);
    666 	if (unlikely(ret != 0))
    667 		return ret;
    668 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
    669 				 user_surface_converter,
    670 				 &cmd->body.dest.sid, NULL);
    671 }
    672 
    673 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
    674 				     struct vmw_sw_context *sw_context,
    675 				     SVGA3dCmdHeader *header)
    676 {
    677 	struct vmw_sid_cmd {
    678 		SVGA3dCmdHeader header;
    679 		SVGA3dCmdSurfaceStretchBlt body;
    680 	} *cmd;
    681 	int ret;
    682 
    683 	cmd = container_of(header, struct vmw_sid_cmd, header);
    684 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
    685 				user_surface_converter,
    686 				&cmd->body.src.sid, NULL);
    687 	if (unlikely(ret != 0))
    688 		return ret;
    689 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
    690 				 user_surface_converter,
    691 				 &cmd->body.dest.sid, NULL);
    692 }
    693 
    694 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
    695 					 struct vmw_sw_context *sw_context,
    696 					 SVGA3dCmdHeader *header)
    697 {
    698 	struct vmw_sid_cmd {
    699 		SVGA3dCmdHeader header;
    700 		SVGA3dCmdBlitSurfaceToScreen body;
    701 	} *cmd;
    702 
    703 	cmd = container_of(header, struct vmw_sid_cmd, header);
    704 
    705 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
    706 				 user_surface_converter,
    707 				 &cmd->body.srcImage.sid, NULL);
    708 }
    709 
    710 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
    711 				 struct vmw_sw_context *sw_context,
    712 				 SVGA3dCmdHeader *header)
    713 {
    714 	struct vmw_sid_cmd {
    715 		SVGA3dCmdHeader header;
    716 		SVGA3dCmdPresent body;
    717 	} *cmd;
    718 
    719 
    720 	cmd = container_of(header, struct vmw_sid_cmd, header);
    721 
    722 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
    723 				 user_surface_converter, &cmd->body.sid,
    724 				 NULL);
    725 }
    726 
    727 /**
    728  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
    729  *
    730  * @dev_priv: The device private structure.
    731  * @new_query_bo: The new buffer holding query results.
    732  * @sw_context: The software context used for this command submission.
    733  *
    734  * This function checks whether @new_query_bo is suitable for holding
    735  * query results, and if another buffer currently is pinned for query
    736  * results. If so, the function prepares the state of @sw_context for
    737  * switching pinned buffers after successful submission of the current
    738  * command batch.
    739  */
    740 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
    741 				       struct ttm_buffer_object *new_query_bo,
    742 				       struct vmw_sw_context *sw_context)
    743 {
    744 	struct vmw_res_cache_entry *ctx_entry =
    745 		&sw_context->res_cache[vmw_res_context];
    746 	int ret;
    747 
    748 	BUG_ON(!ctx_entry->valid);
    749 	sw_context->last_query_ctx = ctx_entry->res;
    750 
    751 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
    752 
    753 		if (unlikely(new_query_bo->num_pages > 4)) {
    754 			DRM_ERROR("Query buffer too large.\n");
    755 			return -EINVAL;
    756 		}
    757 
    758 		if (unlikely(sw_context->cur_query_bo != NULL)) {
    759 			sw_context->needs_post_query_barrier = true;
    760 			ret = vmw_bo_to_validate_list(sw_context,
    761 						      sw_context->cur_query_bo,
    762 						      dev_priv->has_mob, NULL);
    763 			if (unlikely(ret != 0))
    764 				return ret;
    765 		}
    766 		sw_context->cur_query_bo = new_query_bo;
    767 
    768 		ret = vmw_bo_to_validate_list(sw_context,
    769 					      dev_priv->dummy_query_bo,
    770 					      dev_priv->has_mob, NULL);
    771 		if (unlikely(ret != 0))
    772 			return ret;
    773 
    774 	}
    775 
    776 	return 0;
    777 }
    778 
    779 
    780 /**
    781  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
    782  *
    783  * @dev_priv: The device private structure.
    784  * @sw_context: The software context used for this command submission batch.
    785  *
    786  * This function will check if we're switching query buffers, and will then,
    787  * issue a dummy occlusion query wait used as a query barrier. When the fence
    788  * object following that query wait has signaled, we are sure that all
    789  * preceding queries have finished, and the old query buffer can be unpinned.
    790  * However, since both the new query buffer and the old one are fenced with
    791  * that fence, we can do an asynchronus unpin now, and be sure that the
    792  * old query buffer won't be moved until the fence has signaled.
    793  *
    794  * As mentioned above, both the new - and old query buffers need to be fenced
    795  * using a sequence emitted *after* calling this function.
    796  */
    797 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
    798 				     struct vmw_sw_context *sw_context)
    799 {
    800 	/*
    801 	 * The validate list should still hold references to all
    802 	 * contexts here.
    803 	 */
    804 
    805 	if (sw_context->needs_post_query_barrier) {
    806 		struct vmw_res_cache_entry *ctx_entry =
    807 			&sw_context->res_cache[vmw_res_context];
    808 		struct vmw_resource *ctx;
    809 		int ret;
    810 
    811 		BUG_ON(!ctx_entry->valid);
    812 		ctx = ctx_entry->res;
    813 
    814 		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
    815 
    816 		if (unlikely(ret != 0))
    817 			DRM_ERROR("Out of fifo space for dummy query.\n");
    818 	}
    819 
    820 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
    821 		if (dev_priv->pinned_bo) {
    822 			vmw_bo_pin(dev_priv->pinned_bo, false);
    823 			ttm_bo_unref(&dev_priv->pinned_bo);
    824 		}
    825 
    826 		if (!sw_context->needs_post_query_barrier) {
    827 			vmw_bo_pin(sw_context->cur_query_bo, true);
    828 
    829 			/*
    830 			 * We pin also the dummy_query_bo buffer so that we
    831 			 * don't need to validate it when emitting
    832 			 * dummy queries in context destroy paths.
    833 			 */
    834 
    835 			vmw_bo_pin(dev_priv->dummy_query_bo, true);
    836 			dev_priv->dummy_query_bo_pinned = true;
    837 
    838 			BUG_ON(sw_context->last_query_ctx == NULL);
    839 			dev_priv->query_cid = sw_context->last_query_ctx->id;
    840 			dev_priv->query_cid_valid = true;
    841 			dev_priv->pinned_bo =
    842 				ttm_bo_reference(sw_context->cur_query_bo);
    843 		}
    844 	}
    845 }
    846 
    847 /**
    848  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
    849  * handle to a MOB id.
    850  *
    851  * @dev_priv: Pointer to a device private structure.
    852  * @sw_context: The software context used for this command batch validation.
    853  * @id: Pointer to the user-space handle to be translated.
    854  * @vmw_bo_p: Points to a location that, on successful return will carry
    855  * a reference-counted pointer to the DMA buffer identified by the
    856  * user-space handle in @id.
    857  *
    858  * This function saves information needed to translate a user-space buffer
    859  * handle to a MOB id. The translation does not take place immediately, but
    860  * during a call to vmw_apply_relocations(). This function builds a relocation
    861  * list and a list of buffers to validate. The former needs to be freed using
    862  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
    863  * needs to be freed using vmw_clear_validations.
    864  */
    865 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
    866 				 struct vmw_sw_context *sw_context,
    867 				 SVGAMobId *id,
    868 				 struct vmw_dma_buffer **vmw_bo_p)
    869 {
    870 	struct vmw_dma_buffer *vmw_bo = NULL;
    871 	struct ttm_buffer_object *bo;
    872 	uint32_t handle = *id;
    873 	struct vmw_relocation *reloc;
    874 	int ret;
    875 
    876 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
    877 	if (unlikely(ret != 0)) {
    878 		DRM_ERROR("Could not find or use MOB buffer.\n");
    879 		return -EINVAL;
    880 	}
    881 	bo = &vmw_bo->base;
    882 
    883 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
    884 		DRM_ERROR("Max number relocations per submission"
    885 			  " exceeded\n");
    886 		ret = -EINVAL;
    887 		goto out_no_reloc;
    888 	}
    889 
    890 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
    891 	reloc->mob_loc = id;
    892 	reloc->location = NULL;
    893 
    894 	ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
    895 	if (unlikely(ret != 0))
    896 		goto out_no_reloc;
    897 
    898 	*vmw_bo_p = vmw_bo;
    899 	return 0;
    900 
    901 out_no_reloc:
    902 	vmw_dmabuf_unreference(&vmw_bo);
    903 	vmw_bo_p = NULL;
    904 	return ret;
    905 }
    906 
    907 /**
    908  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
    909  * handle to a valid SVGAGuestPtr
    910  *
    911  * @dev_priv: Pointer to a device private structure.
    912  * @sw_context: The software context used for this command batch validation.
    913  * @ptr: Pointer to the user-space handle to be translated.
    914  * @vmw_bo_p: Points to a location that, on successful return will carry
    915  * a reference-counted pointer to the DMA buffer identified by the
    916  * user-space handle in @id.
    917  *
    918  * This function saves information needed to translate a user-space buffer
    919  * handle to a valid SVGAGuestPtr. The translation does not take place
    920  * immediately, but during a call to vmw_apply_relocations().
    921  * This function builds a relocation list and a list of buffers to validate.
    922  * The former needs to be freed using either vmw_apply_relocations() or
    923  * vmw_free_relocations(). The latter needs to be freed using
    924  * vmw_clear_validations.
    925  */
    926 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
    927 				   struct vmw_sw_context *sw_context,
    928 				   SVGAGuestPtr *ptr,
    929 				   struct vmw_dma_buffer **vmw_bo_p)
    930 {
    931 	struct vmw_dma_buffer *vmw_bo = NULL;
    932 	struct ttm_buffer_object *bo;
    933 	uint32_t handle = ptr->gmrId;
    934 	struct vmw_relocation *reloc;
    935 	int ret;
    936 
    937 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
    938 	if (unlikely(ret != 0)) {
    939 		DRM_ERROR("Could not find or use GMR region.\n");
    940 		return -EINVAL;
    941 	}
    942 	bo = &vmw_bo->base;
    943 
    944 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
    945 		DRM_ERROR("Max number relocations per submission"
    946 			  " exceeded\n");
    947 		ret = -EINVAL;
    948 		goto out_no_reloc;
    949 	}
    950 
    951 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
    952 	reloc->location = ptr;
    953 
    954 	ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
    955 	if (unlikely(ret != 0))
    956 		goto out_no_reloc;
    957 
    958 	*vmw_bo_p = vmw_bo;
    959 	return 0;
    960 
    961 out_no_reloc:
    962 	vmw_dmabuf_unreference(&vmw_bo);
    963 	vmw_bo_p = NULL;
    964 	return ret;
    965 }
    966 
    967 /**
    968  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
    969  *
    970  * @dev_priv: Pointer to a device private struct.
    971  * @sw_context: The software context used for this command submission.
    972  * @header: Pointer to the command header in the command stream.
    973  */
    974 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
    975 				  struct vmw_sw_context *sw_context,
    976 				  SVGA3dCmdHeader *header)
    977 {
    978 	struct vmw_begin_gb_query_cmd {
    979 		SVGA3dCmdHeader header;
    980 		SVGA3dCmdBeginGBQuery q;
    981 	} *cmd;
    982 
    983 	cmd = container_of(header, struct vmw_begin_gb_query_cmd,
    984 			   header);
    985 
    986 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
    987 				 user_context_converter, &cmd->q.cid,
    988 				 NULL);
    989 }
    990 
    991 /**
    992  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
    993  *
    994  * @dev_priv: Pointer to a device private struct.
    995  * @sw_context: The software context used for this command submission.
    996  * @header: Pointer to the command header in the command stream.
    997  */
    998 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
    999 			       struct vmw_sw_context *sw_context,
   1000 			       SVGA3dCmdHeader *header)
   1001 {
   1002 	struct vmw_begin_query_cmd {
   1003 		SVGA3dCmdHeader header;
   1004 		SVGA3dCmdBeginQuery q;
   1005 	} *cmd;
   1006 
   1007 	cmd = container_of(header, struct vmw_begin_query_cmd,
   1008 			   header);
   1009 
   1010 	if (unlikely(dev_priv->has_mob)) {
   1011 		struct {
   1012 			SVGA3dCmdHeader header;
   1013 			SVGA3dCmdBeginGBQuery q;
   1014 		} gb_cmd;
   1015 
   1016 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
   1017 
   1018 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
   1019 		gb_cmd.header.size = cmd->header.size;
   1020 		gb_cmd.q.cid = cmd->q.cid;
   1021 		gb_cmd.q.type = cmd->q.type;
   1022 
   1023 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
   1024 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
   1025 	}
   1026 
   1027 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
   1028 				 user_context_converter, &cmd->q.cid,
   1029 				 NULL);
   1030 }
   1031 
   1032 /**
   1033  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
   1034  *
   1035  * @dev_priv: Pointer to a device private struct.
   1036  * @sw_context: The software context used for this command submission.
   1037  * @header: Pointer to the command header in the command stream.
   1038  */
   1039 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
   1040 				struct vmw_sw_context *sw_context,
   1041 				SVGA3dCmdHeader *header)
   1042 {
   1043 	struct vmw_dma_buffer *vmw_bo;
   1044 	struct vmw_query_cmd {
   1045 		SVGA3dCmdHeader header;
   1046 		SVGA3dCmdEndGBQuery q;
   1047 	} *cmd;
   1048 	int ret;
   1049 
   1050 	cmd = container_of(header, struct vmw_query_cmd, header);
   1051 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
   1052 	if (unlikely(ret != 0))
   1053 		return ret;
   1054 
   1055 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
   1056 				    &cmd->q.mobid,
   1057 				    &vmw_bo);
   1058 	if (unlikely(ret != 0))
   1059 		return ret;
   1060 
   1061 	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
   1062 
   1063 	vmw_dmabuf_unreference(&vmw_bo);
   1064 	return ret;
   1065 }
   1066 
   1067 /**
   1068  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
   1069  *
   1070  * @dev_priv: Pointer to a device private struct.
   1071  * @sw_context: The software context used for this command submission.
   1072  * @header: Pointer to the command header in the command stream.
   1073  */
   1074 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
   1075 			     struct vmw_sw_context *sw_context,
   1076 			     SVGA3dCmdHeader *header)
   1077 {
   1078 	struct vmw_dma_buffer *vmw_bo;
   1079 	struct vmw_query_cmd {
   1080 		SVGA3dCmdHeader header;
   1081 		SVGA3dCmdEndQuery q;
   1082 	} *cmd;
   1083 	int ret;
   1084 
   1085 	cmd = container_of(header, struct vmw_query_cmd, header);
   1086 	if (dev_priv->has_mob) {
   1087 		struct {
   1088 			SVGA3dCmdHeader header;
   1089 			SVGA3dCmdEndGBQuery q;
   1090 		} gb_cmd;
   1091 
   1092 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
   1093 
   1094 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
   1095 		gb_cmd.header.size = cmd->header.size;
   1096 		gb_cmd.q.cid = cmd->q.cid;
   1097 		gb_cmd.q.type = cmd->q.type;
   1098 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
   1099 		gb_cmd.q.offset = cmd->q.guestResult.offset;
   1100 
   1101 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
   1102 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
   1103 	}
   1104 
   1105 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
   1106 	if (unlikely(ret != 0))
   1107 		return ret;
   1108 
   1109 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
   1110 				      &cmd->q.guestResult,
   1111 				      &vmw_bo);
   1112 	if (unlikely(ret != 0))
   1113 		return ret;
   1114 
   1115 	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
   1116 
   1117 	vmw_dmabuf_unreference(&vmw_bo);
   1118 	return ret;
   1119 }
   1120 
   1121 /**
   1122  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
   1123  *
   1124  * @dev_priv: Pointer to a device private struct.
   1125  * @sw_context: The software context used for this command submission.
   1126  * @header: Pointer to the command header in the command stream.
   1127  */
   1128 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
   1129 				 struct vmw_sw_context *sw_context,
   1130 				 SVGA3dCmdHeader *header)
   1131 {
   1132 	struct vmw_dma_buffer *vmw_bo;
   1133 	struct vmw_query_cmd {
   1134 		SVGA3dCmdHeader header;
   1135 		SVGA3dCmdWaitForGBQuery q;
   1136 	} *cmd;
   1137 	int ret;
   1138 
   1139 	cmd = container_of(header, struct vmw_query_cmd, header);
   1140 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
   1141 	if (unlikely(ret != 0))
   1142 		return ret;
   1143 
   1144 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
   1145 				    &cmd->q.mobid,
   1146 				    &vmw_bo);
   1147 	if (unlikely(ret != 0))
   1148 		return ret;
   1149 
   1150 	vmw_dmabuf_unreference(&vmw_bo);
   1151 	return 0;
   1152 }
   1153 
   1154 /**
   1155  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
   1156  *
   1157  * @dev_priv: Pointer to a device private struct.
   1158  * @sw_context: The software context used for this command submission.
   1159  * @header: Pointer to the command header in the command stream.
   1160  */
   1161 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
   1162 			      struct vmw_sw_context *sw_context,
   1163 			      SVGA3dCmdHeader *header)
   1164 {
   1165 	struct vmw_dma_buffer *vmw_bo;
   1166 	struct vmw_query_cmd {
   1167 		SVGA3dCmdHeader header;
   1168 		SVGA3dCmdWaitForQuery q;
   1169 	} *cmd;
   1170 	int ret;
   1171 
   1172 	cmd = container_of(header, struct vmw_query_cmd, header);
   1173 	if (dev_priv->has_mob) {
   1174 		struct {
   1175 			SVGA3dCmdHeader header;
   1176 			SVGA3dCmdWaitForGBQuery q;
   1177 		} gb_cmd;
   1178 
   1179 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
   1180 
   1181 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
   1182 		gb_cmd.header.size = cmd->header.size;
   1183 		gb_cmd.q.cid = cmd->q.cid;
   1184 		gb_cmd.q.type = cmd->q.type;
   1185 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
   1186 		gb_cmd.q.offset = cmd->q.guestResult.offset;
   1187 
   1188 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
   1189 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
   1190 	}
   1191 
   1192 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
   1193 	if (unlikely(ret != 0))
   1194 		return ret;
   1195 
   1196 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
   1197 				      &cmd->q.guestResult,
   1198 				      &vmw_bo);
   1199 	if (unlikely(ret != 0))
   1200 		return ret;
   1201 
   1202 	vmw_dmabuf_unreference(&vmw_bo);
   1203 	return 0;
   1204 }
   1205 
   1206 static int vmw_cmd_dma(struct vmw_private *dev_priv,
   1207 		       struct vmw_sw_context *sw_context,
   1208 		       SVGA3dCmdHeader *header)
   1209 {
   1210 	struct vmw_dma_buffer *vmw_bo = NULL;
   1211 	struct vmw_surface *srf = NULL;
   1212 	struct vmw_dma_cmd {
   1213 		SVGA3dCmdHeader header;
   1214 		SVGA3dCmdSurfaceDMA dma;
   1215 	} *cmd;
   1216 	int ret;
   1217 	SVGA3dCmdSurfaceDMASuffix *suffix;
   1218 	uint32_t bo_size;
   1219 
   1220 	cmd = container_of(header, struct vmw_dma_cmd, header);
   1221 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
   1222 					       header->size - sizeof(*suffix));
   1223 
   1224 	/* Make sure device and verifier stays in sync. */
   1225 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
   1226 		DRM_ERROR("Invalid DMA suffix size.\n");
   1227 		return -EINVAL;
   1228 	}
   1229 
   1230 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
   1231 				      &cmd->dma.guest.ptr,
   1232 				      &vmw_bo);
   1233 	if (unlikely(ret != 0))
   1234 		return ret;
   1235 
   1236 	/* Make sure DMA doesn't cross BO boundaries. */
   1237 	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
   1238 	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
   1239 		DRM_ERROR("Invalid DMA offset.\n");
   1240 		return -EINVAL;
   1241 	}
   1242 
   1243 	bo_size -= cmd->dma.guest.ptr.offset;
   1244 	if (unlikely(suffix->maximumOffset > bo_size))
   1245 		suffix->maximumOffset = bo_size;
   1246 
   1247 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
   1248 				user_surface_converter, &cmd->dma.host.sid,
   1249 				NULL);
   1250 	if (unlikely(ret != 0)) {
   1251 		if (unlikely(ret != -ERESTARTSYS))
   1252 			DRM_ERROR("could not find surface for DMA.\n");
   1253 		goto out_no_surface;
   1254 	}
   1255 
   1256 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
   1257 
   1258 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
   1259 			     header);
   1260 
   1261 out_no_surface:
   1262 	vmw_dmabuf_unreference(&vmw_bo);
   1263 	return ret;
   1264 }
   1265 
   1266 static int vmw_cmd_draw(struct vmw_private *dev_priv,
   1267 			struct vmw_sw_context *sw_context,
   1268 			SVGA3dCmdHeader *header)
   1269 {
   1270 	struct vmw_draw_cmd {
   1271 		SVGA3dCmdHeader header;
   1272 		SVGA3dCmdDrawPrimitives body;
   1273 	} *cmd;
   1274 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
   1275 		(unsigned long)header + sizeof(*cmd));
   1276 	SVGA3dPrimitiveRange *range;
   1277 	uint32_t i;
   1278 	uint32_t maxnum;
   1279 	int ret;
   1280 
   1281 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
   1282 	if (unlikely(ret != 0))
   1283 		return ret;
   1284 
   1285 	cmd = container_of(header, struct vmw_draw_cmd, header);
   1286 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
   1287 
   1288 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
   1289 		DRM_ERROR("Illegal number of vertex declarations.\n");
   1290 		return -EINVAL;
   1291 	}
   1292 
   1293 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
   1294 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
   1295 					user_surface_converter,
   1296 					&decl->array.surfaceId, NULL);
   1297 		if (unlikely(ret != 0))
   1298 			return ret;
   1299 	}
   1300 
   1301 	maxnum = (header->size - sizeof(cmd->body) -
   1302 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
   1303 	if (unlikely(cmd->body.numRanges > maxnum)) {
   1304 		DRM_ERROR("Illegal number of index ranges.\n");
   1305 		return -EINVAL;
   1306 	}
   1307 
   1308 	range = (SVGA3dPrimitiveRange *) decl;
   1309 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
   1310 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
   1311 					user_surface_converter,
   1312 					&range->indexArray.surfaceId, NULL);
   1313 		if (unlikely(ret != 0))
   1314 			return ret;
   1315 	}
   1316 	return 0;
   1317 }
   1318 
   1319 
   1320 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
   1321 			     struct vmw_sw_context *sw_context,
   1322 			     SVGA3dCmdHeader *header)
   1323 {
   1324 	struct vmw_tex_state_cmd {
   1325 		SVGA3dCmdHeader header;
   1326 		SVGA3dCmdSetTextureState state;
   1327 	} *cmd;
   1328 
   1329 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
   1330 	  ((unsigned long) header + header->size + sizeof(header));
   1331 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
   1332 		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
   1333 	struct vmw_resource_val_node *ctx_node;
   1334 	struct vmw_resource_val_node *res_node;
   1335 	int ret;
   1336 
   1337 	cmd = container_of(header, struct vmw_tex_state_cmd,
   1338 			   header);
   1339 
   1340 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
   1341 				user_context_converter, &cmd->state.cid,
   1342 				&ctx_node);
   1343 	if (unlikely(ret != 0))
   1344 		return ret;
   1345 
   1346 	for (; cur_state < last_state; ++cur_state) {
   1347 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
   1348 			continue;
   1349 
   1350 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
   1351 					user_surface_converter,
   1352 					&cur_state->value, &res_node);
   1353 		if (unlikely(ret != 0))
   1354 			return ret;
   1355 
   1356 		if (dev_priv->has_mob) {
   1357 			struct vmw_ctx_bindinfo bi;
   1358 
   1359 			bi.ctx = ctx_node->res;
   1360 			bi.res = res_node ? res_node->res : NULL;
   1361 			bi.bt = vmw_ctx_binding_tex;
   1362 			bi.i1.texture_stage = cur_state->stage;
   1363 			vmw_context_binding_add(ctx_node->staged_bindings,
   1364 						&bi);
   1365 		}
   1366 	}
   1367 
   1368 	return 0;
   1369 }
   1370 
   1371 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
   1372 				      struct vmw_sw_context *sw_context,
   1373 				      void *buf)
   1374 {
   1375 	struct vmw_dma_buffer *vmw_bo;
   1376 	int ret;
   1377 
   1378 	struct {
   1379 		uint32_t header;
   1380 		SVGAFifoCmdDefineGMRFB body;
   1381 	} *cmd = buf;
   1382 
   1383 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
   1384 				      &cmd->body.ptr,
   1385 				      &vmw_bo);
   1386 	if (unlikely(ret != 0))
   1387 		return ret;
   1388 
   1389 	vmw_dmabuf_unreference(&vmw_bo);
   1390 
   1391 	return ret;
   1392 }
   1393 
   1394 /**
   1395  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
   1396  *
   1397  * @dev_priv: Pointer to a device private struct.
   1398  * @sw_context: The software context being used for this batch.
   1399  * @res_type: The resource type.
   1400  * @converter: Information about user-space binding for this resource type.
   1401  * @res_id: Pointer to the user-space resource handle in the command stream.
   1402  * @buf_id: Pointer to the user-space backup buffer handle in the command
   1403  * stream.
   1404  * @backup_offset: Offset of backup into MOB.
   1405  *
   1406  * This function prepares for registering a switch of backup buffers
   1407  * in the resource metadata just prior to unreserving.
   1408  */
   1409 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
   1410 				 struct vmw_sw_context *sw_context,
   1411 				 enum vmw_res_type res_type,
   1412 				 const struct vmw_user_resource_conv
   1413 				 *converter,
   1414 				 uint32_t *res_id,
   1415 				 uint32_t *buf_id,
   1416 				 unsigned long backup_offset)
   1417 {
   1418 	int ret;
   1419 	struct vmw_dma_buffer *dma_buf;
   1420 	struct vmw_resource_val_node *val_node;
   1421 
   1422 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
   1423 				converter, res_id, &val_node);
   1424 	if (unlikely(ret != 0))
   1425 		return ret;
   1426 
   1427 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
   1428 	if (unlikely(ret != 0))
   1429 		return ret;
   1430 
   1431 	if (val_node->first_usage)
   1432 		val_node->no_buffer_needed = true;
   1433 
   1434 	vmw_dmabuf_unreference(&val_node->new_backup);
   1435 	val_node->new_backup = dma_buf;
   1436 	val_node->new_backup_offset = backup_offset;
   1437 
   1438 	return 0;
   1439 }
   1440 
   1441 /**
   1442  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
   1443  * command
   1444  *
   1445  * @dev_priv: Pointer to a device private struct.
   1446  * @sw_context: The software context being used for this batch.
   1447  * @header: Pointer to the command header in the command stream.
   1448  */
   1449 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
   1450 				   struct vmw_sw_context *sw_context,
   1451 				   SVGA3dCmdHeader *header)
   1452 {
   1453 	struct vmw_bind_gb_surface_cmd {
   1454 		SVGA3dCmdHeader header;
   1455 		SVGA3dCmdBindGBSurface body;
   1456 	} *cmd;
   1457 
   1458 	cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
   1459 
   1460 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
   1461 				     user_surface_converter,
   1462 				     &cmd->body.sid, &cmd->body.mobid,
   1463 				     0);
   1464 }
   1465 
   1466 /**
   1467  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
   1468  * command
   1469  *
   1470  * @dev_priv: Pointer to a device private struct.
   1471  * @sw_context: The software context being used for this batch.
   1472  * @header: Pointer to the command header in the command stream.
   1473  */
   1474 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
   1475 				   struct vmw_sw_context *sw_context,
   1476 				   SVGA3dCmdHeader *header)
   1477 {
   1478 	struct vmw_gb_surface_cmd {
   1479 		SVGA3dCmdHeader header;
   1480 		SVGA3dCmdUpdateGBImage body;
   1481 	} *cmd;
   1482 
   1483 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
   1484 
   1485 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
   1486 				 user_surface_converter,
   1487 				 &cmd->body.image.sid, NULL);
   1488 }
   1489 
   1490 /**
   1491  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
   1492  * command
   1493  *
   1494  * @dev_priv: Pointer to a device private struct.
   1495  * @sw_context: The software context being used for this batch.
   1496  * @header: Pointer to the command header in the command stream.
   1497  */
   1498 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
   1499 				     struct vmw_sw_context *sw_context,
   1500 				     SVGA3dCmdHeader *header)
   1501 {
   1502 	struct vmw_gb_surface_cmd {
   1503 		SVGA3dCmdHeader header;
   1504 		SVGA3dCmdUpdateGBSurface body;
   1505 	} *cmd;
   1506 
   1507 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
   1508 
   1509 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
   1510 				 user_surface_converter,
   1511 				 &cmd->body.sid, NULL);
   1512 }
   1513 
   1514 /**
   1515  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
   1516  * command
   1517  *
   1518  * @dev_priv: Pointer to a device private struct.
   1519  * @sw_context: The software context being used for this batch.
   1520  * @header: Pointer to the command header in the command stream.
   1521  */
   1522 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
   1523 				     struct vmw_sw_context *sw_context,
   1524 				     SVGA3dCmdHeader *header)
   1525 {
   1526 	struct vmw_gb_surface_cmd {
   1527 		SVGA3dCmdHeader header;
   1528 		SVGA3dCmdReadbackGBImage body;
   1529 	} *cmd;
   1530 
   1531 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
   1532 
   1533 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
   1534 				 user_surface_converter,
   1535 				 &cmd->body.image.sid, NULL);
   1536 }
   1537 
   1538 /**
   1539  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
   1540  * command
   1541  *
   1542  * @dev_priv: Pointer to a device private struct.
   1543  * @sw_context: The software context being used for this batch.
   1544  * @header: Pointer to the command header in the command stream.
   1545  */
   1546 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
   1547 				       struct vmw_sw_context *sw_context,
   1548 				       SVGA3dCmdHeader *header)
   1549 {
   1550 	struct vmw_gb_surface_cmd {
   1551 		SVGA3dCmdHeader header;
   1552 		SVGA3dCmdReadbackGBSurface body;
   1553 	} *cmd;
   1554 
   1555 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
   1556 
   1557 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
   1558 				 user_surface_converter,
   1559 				 &cmd->body.sid, NULL);
   1560 }
   1561 
   1562 /**
   1563  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
   1564  * command
   1565  *
   1566  * @dev_priv: Pointer to a device private struct.
   1567  * @sw_context: The software context being used for this batch.
   1568  * @header: Pointer to the command header in the command stream.
   1569  */
   1570 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
   1571 				       struct vmw_sw_context *sw_context,
   1572 				       SVGA3dCmdHeader *header)
   1573 {
   1574 	struct vmw_gb_surface_cmd {
   1575 		SVGA3dCmdHeader header;
   1576 		SVGA3dCmdInvalidateGBImage body;
   1577 	} *cmd;
   1578 
   1579 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
   1580 
   1581 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
   1582 				 user_surface_converter,
   1583 				 &cmd->body.image.sid, NULL);
   1584 }
   1585 
   1586 /**
   1587  * vmw_cmd_invalidate_gb_surface - Validate an
   1588  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
   1589  *
   1590  * @dev_priv: Pointer to a device private struct.
   1591  * @sw_context: The software context being used for this batch.
   1592  * @header: Pointer to the command header in the command stream.
   1593  */
   1594 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
   1595 					 struct vmw_sw_context *sw_context,
   1596 					 SVGA3dCmdHeader *header)
   1597 {
   1598 	struct vmw_gb_surface_cmd {
   1599 		SVGA3dCmdHeader header;
   1600 		SVGA3dCmdInvalidateGBSurface body;
   1601 	} *cmd;
   1602 
   1603 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
   1604 
   1605 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
   1606 				 user_surface_converter,
   1607 				 &cmd->body.sid, NULL);
   1608 }
   1609 
   1610 
   1611 /**
   1612  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
   1613  * command
   1614  *
   1615  * @dev_priv: Pointer to a device private struct.
   1616  * @sw_context: The software context being used for this batch.
   1617  * @header: Pointer to the command header in the command stream.
   1618  */
   1619 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
   1620 				 struct vmw_sw_context *sw_context,
   1621 				 SVGA3dCmdHeader *header)
   1622 {
   1623 	struct vmw_shader_define_cmd {
   1624 		SVGA3dCmdHeader header;
   1625 		SVGA3dCmdDefineShader body;
   1626 	} *cmd;
   1627 	int ret;
   1628 	size_t size;
   1629 
   1630 	cmd = container_of(header, struct vmw_shader_define_cmd,
   1631 			   header);
   1632 
   1633 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
   1634 				user_context_converter, &cmd->body.cid,
   1635 				NULL);
   1636 	if (unlikely(ret != 0))
   1637 		return ret;
   1638 
   1639 	if (unlikely(!dev_priv->has_mob))
   1640 		return 0;
   1641 
   1642 	size = cmd->header.size - sizeof(cmd->body);
   1643 	ret = vmw_compat_shader_add(sw_context->fp->shman,
   1644 				    cmd->body.shid, cmd + 1,
   1645 				    cmd->body.type, size,
   1646 				    sw_context->fp->tfile,
   1647 				    &sw_context->staged_shaders);
   1648 	if (unlikely(ret != 0))
   1649 		return ret;
   1650 
   1651 	return vmw_resource_relocation_add(&sw_context->res_relocations,
   1652 					   NULL, &cmd->header.id -
   1653 					   sw_context->buf_start);
   1654 
   1655 	return 0;
   1656 }
   1657 
   1658 /**
   1659  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
   1660  * command
   1661  *
   1662  * @dev_priv: Pointer to a device private struct.
   1663  * @sw_context: The software context being used for this batch.
   1664  * @header: Pointer to the command header in the command stream.
   1665  */
   1666 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
   1667 				  struct vmw_sw_context *sw_context,
   1668 				  SVGA3dCmdHeader *header)
   1669 {
   1670 	struct vmw_shader_destroy_cmd {
   1671 		SVGA3dCmdHeader header;
   1672 		SVGA3dCmdDestroyShader body;
   1673 	} *cmd;
   1674 	int ret;
   1675 
   1676 	cmd = container_of(header, struct vmw_shader_destroy_cmd,
   1677 			   header);
   1678 
   1679 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
   1680 				user_context_converter, &cmd->body.cid,
   1681 				NULL);
   1682 	if (unlikely(ret != 0))
   1683 		return ret;
   1684 
   1685 	if (unlikely(!dev_priv->has_mob))
   1686 		return 0;
   1687 
   1688 	ret = vmw_compat_shader_remove(sw_context->fp->shman,
   1689 				       cmd->body.shid,
   1690 				       cmd->body.type,
   1691 				       &sw_context->staged_shaders);
   1692 	if (unlikely(ret != 0))
   1693 		return ret;
   1694 
   1695 	return vmw_resource_relocation_add(&sw_context->res_relocations,
   1696 					   NULL, &cmd->header.id -
   1697 					   sw_context->buf_start);
   1698 
   1699 	return 0;
   1700 }
   1701 
   1702 /**
   1703  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
   1704  * command
   1705  *
   1706  * @dev_priv: Pointer to a device private struct.
   1707  * @sw_context: The software context being used for this batch.
   1708  * @header: Pointer to the command header in the command stream.
   1709  */
   1710 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
   1711 			      struct vmw_sw_context *sw_context,
   1712 			      SVGA3dCmdHeader *header)
   1713 {
   1714 	struct vmw_set_shader_cmd {
   1715 		SVGA3dCmdHeader header;
   1716 		SVGA3dCmdSetShader body;
   1717 	} *cmd;
   1718 	struct vmw_resource_val_node *ctx_node;
   1719 	int ret;
   1720 
   1721 	cmd = container_of(header, struct vmw_set_shader_cmd,
   1722 			   header);
   1723 
   1724 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
   1725 				user_context_converter, &cmd->body.cid,
   1726 				&ctx_node);
   1727 	if (unlikely(ret != 0))
   1728 		return ret;
   1729 
   1730 	if (dev_priv->has_mob) {
   1731 		struct vmw_ctx_bindinfo bi;
   1732 		struct vmw_resource_val_node *res_node;
   1733 		u32 shid = cmd->body.shid;
   1734 
   1735 		if (shid != SVGA3D_INVALID_ID)
   1736 			(void) vmw_compat_shader_lookup(sw_context->fp->shman,
   1737 							cmd->body.type,
   1738 							&shid);
   1739 
   1740 		ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
   1741 					       vmw_res_shader,
   1742 					       user_shader_converter,
   1743 					       shid,
   1744 					       &cmd->body.shid, &res_node);
   1745 		if (unlikely(ret != 0))
   1746 			return ret;
   1747 
   1748 		bi.ctx = ctx_node->res;
   1749 		bi.res = res_node ? res_node->res : NULL;
   1750 		bi.bt = vmw_ctx_binding_shader;
   1751 		bi.i1.shader_type = cmd->body.type;
   1752 		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
   1753 	}
   1754 
   1755 	return 0;
   1756 }
   1757 
   1758 /**
   1759  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
   1760  * command
   1761  *
   1762  * @dev_priv: Pointer to a device private struct.
   1763  * @sw_context: The software context being used for this batch.
   1764  * @header: Pointer to the command header in the command stream.
   1765  */
   1766 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
   1767 				    struct vmw_sw_context *sw_context,
   1768 				    SVGA3dCmdHeader *header)
   1769 {
   1770 	struct vmw_set_shader_const_cmd {
   1771 		SVGA3dCmdHeader header;
   1772 		SVGA3dCmdSetShaderConst body;
   1773 	} *cmd;
   1774 	int ret;
   1775 
   1776 	cmd = container_of(header, struct vmw_set_shader_const_cmd,
   1777 			   header);
   1778 
   1779 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
   1780 				user_context_converter, &cmd->body.cid,
   1781 				NULL);
   1782 	if (unlikely(ret != 0))
   1783 		return ret;
   1784 
   1785 	if (dev_priv->has_mob)
   1786 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
   1787 
   1788 	return 0;
   1789 }
   1790 
   1791 /**
   1792  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
   1793  * command
   1794  *
   1795  * @dev_priv: Pointer to a device private struct.
   1796  * @sw_context: The software context being used for this batch.
   1797  * @header: Pointer to the command header in the command stream.
   1798  */
   1799 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
   1800 				  struct vmw_sw_context *sw_context,
   1801 				  SVGA3dCmdHeader *header)
   1802 {
   1803 	struct vmw_bind_gb_shader_cmd {
   1804 		SVGA3dCmdHeader header;
   1805 		SVGA3dCmdBindGBShader body;
   1806 	} *cmd;
   1807 
   1808 	cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
   1809 			   header);
   1810 
   1811 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
   1812 				     user_shader_converter,
   1813 				     &cmd->body.shid, &cmd->body.mobid,
   1814 				     cmd->body.offsetInBytes);
   1815 }
   1816 
   1817 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
   1818 				struct vmw_sw_context *sw_context,
   1819 				void *buf, uint32_t *size)
   1820 {
   1821 	uint32_t size_remaining = *size;
   1822 	uint32_t cmd_id;
   1823 
   1824 	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
   1825 	switch (cmd_id) {
   1826 	case SVGA_CMD_UPDATE:
   1827 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
   1828 		break;
   1829 	case SVGA_CMD_DEFINE_GMRFB:
   1830 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
   1831 		break;
   1832 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
   1833 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
   1834 		break;
   1835 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
   1836 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
   1837 		break;
   1838 	default:
   1839 		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
   1840 		return -EINVAL;
   1841 	}
   1842 
   1843 	if (*size > size_remaining) {
   1844 		DRM_ERROR("Invalid SVGA command (size mismatch):"
   1845 			  " %u.\n", cmd_id);
   1846 		return -EINVAL;
   1847 	}
   1848 
   1849 	if (unlikely(!sw_context->kernel)) {
   1850 		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
   1851 		return -EPERM;
   1852 	}
   1853 
   1854 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
   1855 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
   1856 
   1857 	return 0;
   1858 }
   1859 
   1860 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
   1861 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
   1862 		    false, false, false),
   1863 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
   1864 		    false, false, false),
   1865 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
   1866 		    true, false, false),
   1867 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
   1868 		    true, false, false),
   1869 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
   1870 		    true, false, false),
   1871 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
   1872 		    false, false, false),
   1873 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
   1874 		    false, false, false),
   1875 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
   1876 		    true, false, false),
   1877 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
   1878 		    true, false, false),
   1879 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
   1880 		    true, false, false),
   1881 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
   1882 		    &vmw_cmd_set_render_target_check, true, false, false),
   1883 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
   1884 		    true, false, false),
   1885 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
   1886 		    true, false, false),
   1887 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
   1888 		    true, false, false),
   1889 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
   1890 		    true, false, false),
   1891 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
   1892 		    true, false, false),
   1893 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
   1894 		    true, false, false),
   1895 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
   1896 		    true, false, false),
   1897 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
   1898 		    false, false, false),
   1899 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
   1900 		    true, false, false),
   1901 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
   1902 		    true, false, false),
   1903 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
   1904 		    true, false, false),
   1905 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
   1906 		    true, false, false),
   1907 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
   1908 		    true, false, false),
   1909 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
   1910 		    true, false, false),
   1911 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
   1912 		    true, false, false),
   1913 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
   1914 		    true, false, false),
   1915 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
   1916 		    true, false, false),
   1917 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
   1918 		    true, false, false),
   1919 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
   1920 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
   1921 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
   1922 		    false, false, false),
   1923 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
   1924 		    false, false, false),
   1925 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
   1926 		    false, false, false),
   1927 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
   1928 		    false, false, false),
   1929 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
   1930 		    false, false, false),
   1931 	VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
   1932 		    false, false, false),
   1933 	VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
   1934 		    false, false, false),
   1935 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
   1936 		    false, false, false),
   1937 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
   1938 		    false, false, false),
   1939 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
   1940 		    false, false, false),
   1941 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
   1942 		    false, false, false),
   1943 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
   1944 		    false, false, false),
   1945 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
   1946 		    false, false, false),
   1947 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
   1948 		    false, false, true),
   1949 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
   1950 		    false, false, true),
   1951 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
   1952 		    false, false, true),
   1953 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
   1954 		    false, false, true),
   1955 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
   1956 		    false, false, true),
   1957 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
   1958 		    false, false, true),
   1959 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
   1960 		    false, false, true),
   1961 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
   1962 		    false, false, true),
   1963 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
   1964 		    true, false, true),
   1965 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
   1966 		    false, false, true),
   1967 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
   1968 		    true, false, true),
   1969 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
   1970 		    &vmw_cmd_update_gb_surface, true, false, true),
   1971 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
   1972 		    &vmw_cmd_readback_gb_image, true, false, true),
   1973 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
   1974 		    &vmw_cmd_readback_gb_surface, true, false, true),
   1975 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
   1976 		    &vmw_cmd_invalidate_gb_image, true, false, true),
   1977 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
   1978 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
   1979 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
   1980 		    false, false, true),
   1981 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
   1982 		    false, false, true),
   1983 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
   1984 		    false, false, true),
   1985 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
   1986 		    false, false, true),
   1987 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
   1988 		    false, false, true),
   1989 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
   1990 		    false, false, true),
   1991 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
   1992 		    true, false, true),
   1993 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
   1994 		    false, false, true),
   1995 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
   1996 		    false, false, false),
   1997 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
   1998 		    true, false, true),
   1999 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
   2000 		    true, false, true),
   2001 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
   2002 		    true, false, true),
   2003 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
   2004 		    true, false, true),
   2005 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
   2006 		    false, false, true),
   2007 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
   2008 		    false, false, true),
   2009 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
   2010 		    false, false, true),
   2011 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
   2012 		    false, false, true),
   2013 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
   2014 		    false, false, true),
   2015 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
   2016 		    false, false, true),
   2017 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
   2018 		    false, false, true),
   2019 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
   2020 		    false, false, true),
   2021 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
   2022 		    false, false, true),
   2023 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
   2024 		    false, false, true),
   2025 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
   2026 		    true, false, true)
   2027 };
   2028 
   2029 static int vmw_cmd_check(struct vmw_private *dev_priv,
   2030 			 struct vmw_sw_context *sw_context,
   2031 			 void *buf, uint32_t *size)
   2032 {
   2033 	uint32_t cmd_id;
   2034 	uint32_t size_remaining = *size;
   2035 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
   2036 	int ret;
   2037 	const struct vmw_cmd_entry *entry;
   2038 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
   2039 
   2040 	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
   2041 	/* Handle any none 3D commands */
   2042 	if (unlikely(cmd_id < SVGA_CMD_MAX))
   2043 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
   2044 
   2045 
   2046 	cmd_id = le32_to_cpu(header->id);
   2047 	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
   2048 
   2049 	cmd_id -= SVGA_3D_CMD_BASE;
   2050 	if (unlikely(*size > size_remaining))
   2051 		goto out_invalid;
   2052 
   2053 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
   2054 		goto out_invalid;
   2055 
   2056 	entry = &vmw_cmd_entries[cmd_id];
   2057 	if (unlikely(!entry->func))
   2058 		goto out_invalid;
   2059 
   2060 	if (unlikely(!entry->user_allow && !sw_context->kernel))
   2061 		goto out_privileged;
   2062 
   2063 	if (unlikely(entry->gb_disable && gb))
   2064 		goto out_old;
   2065 
   2066 	if (unlikely(entry->gb_enable && !gb))
   2067 		goto out_new;
   2068 
   2069 	ret = entry->func(dev_priv, sw_context, header);
   2070 	if (unlikely(ret != 0))
   2071 		goto out_invalid;
   2072 
   2073 	return 0;
   2074 out_invalid:
   2075 	DRM_ERROR("Invalid SVGA3D command: %d\n",
   2076 		  cmd_id + SVGA_3D_CMD_BASE);
   2077 	return -EINVAL;
   2078 out_privileged:
   2079 	DRM_ERROR("Privileged SVGA3D command: %d\n",
   2080 		  cmd_id + SVGA_3D_CMD_BASE);
   2081 	return -EPERM;
   2082 out_old:
   2083 	DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
   2084 		  cmd_id + SVGA_3D_CMD_BASE);
   2085 	return -EINVAL;
   2086 out_new:
   2087 	DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
   2088 		  cmd_id + SVGA_3D_CMD_BASE);
   2089 	return -EINVAL;
   2090 }
   2091 
   2092 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
   2093 			     struct vmw_sw_context *sw_context,
   2094 			     void *buf,
   2095 			     uint32_t size)
   2096 {
   2097 	int32_t cur_size = size;
   2098 	int ret;
   2099 
   2100 	sw_context->buf_start = buf;
   2101 
   2102 	while (cur_size > 0) {
   2103 		size = cur_size;
   2104 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
   2105 		if (unlikely(ret != 0))
   2106 			return ret;
   2107 		buf = (void *)((unsigned long) buf + size);
   2108 		cur_size -= size;
   2109 	}
   2110 
   2111 	if (unlikely(cur_size != 0)) {
   2112 		DRM_ERROR("Command verifier out of sync.\n");
   2113 		return -EINVAL;
   2114 	}
   2115 
   2116 	return 0;
   2117 }
   2118 
   2119 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
   2120 {
   2121 	sw_context->cur_reloc = 0;
   2122 }
   2123 
   2124 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
   2125 {
   2126 	uint32_t i;
   2127 	struct vmw_relocation *reloc;
   2128 	struct ttm_validate_buffer *validate;
   2129 	struct ttm_buffer_object *bo;
   2130 
   2131 	for (i = 0; i < sw_context->cur_reloc; ++i) {
   2132 		reloc = &sw_context->relocs[i];
   2133 		validate = &sw_context->val_bufs[reloc->index].base;
   2134 		bo = validate->bo;
   2135 		switch (bo->mem.mem_type) {
   2136 		case TTM_PL_VRAM:
   2137 			reloc->location->offset += bo->offset;
   2138 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
   2139 			break;
   2140 		case VMW_PL_GMR:
   2141 			reloc->location->gmrId = bo->mem.start;
   2142 			break;
   2143 		case VMW_PL_MOB:
   2144 			*reloc->mob_loc = bo->mem.start;
   2145 			break;
   2146 		default:
   2147 			BUG();
   2148 		}
   2149 	}
   2150 	vmw_free_relocations(sw_context);
   2151 }
   2152 
   2153 /**
   2154  * vmw_resource_list_unrefererence - Free up a resource list and unreference
   2155  * all resources referenced by it.
   2156  *
   2157  * @list: The resource list.
   2158  */
   2159 static void vmw_resource_list_unreference(struct list_head *list)
   2160 {
   2161 	struct vmw_resource_val_node *val, *val_next;
   2162 
   2163 	/*
   2164 	 * Drop references to resources held during command submission.
   2165 	 */
   2166 
   2167 	list_for_each_entry_safe(val, val_next, list, head) {
   2168 		list_del_init(&val->head);
   2169 		vmw_resource_unreference(&val->res);
   2170 		if (unlikely(val->staged_bindings))
   2171 			kfree(val->staged_bindings);
   2172 		kfree(val);
   2173 	}
   2174 }
   2175 
   2176 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
   2177 {
   2178 	struct vmw_validate_buffer *entry, *next;
   2179 	struct vmw_resource_val_node *val;
   2180 
   2181 	/*
   2182 	 * Drop references to DMA buffers held during command submission.
   2183 	 */
   2184 	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
   2185 				 base.head) {
   2186 		list_del(&entry->base.head);
   2187 		ttm_bo_unref(&entry->base.bo);
   2188 		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
   2189 		sw_context->cur_val_buf--;
   2190 	}
   2191 	BUG_ON(sw_context->cur_val_buf != 0);
   2192 
   2193 	list_for_each_entry(val, &sw_context->resource_list, head)
   2194 		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
   2195 }
   2196 
   2197 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
   2198 				      struct ttm_buffer_object *bo,
   2199 				      bool validate_as_mob)
   2200 {
   2201 	int ret;
   2202 
   2203 
   2204 	/*
   2205 	 * Don't validate pinned buffers.
   2206 	 */
   2207 
   2208 	if (bo == dev_priv->pinned_bo ||
   2209 	    (bo == dev_priv->dummy_query_bo &&
   2210 	     dev_priv->dummy_query_bo_pinned))
   2211 		return 0;
   2212 
   2213 	if (validate_as_mob)
   2214 		return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
   2215 
   2216 	/**
   2217 	 * Put BO in VRAM if there is space, otherwise as a GMR.
   2218 	 * If there is no space in VRAM and GMR ids are all used up,
   2219 	 * start evicting GMRs to make room. If the DMA buffer can't be
   2220 	 * used as a GMR, this will return -ENOMEM.
   2221 	 */
   2222 
   2223 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
   2224 	if (likely(ret == 0 || ret == -ERESTARTSYS))
   2225 		return ret;
   2226 
   2227 	/**
   2228 	 * If that failed, try VRAM again, this time evicting
   2229 	 * previous contents.
   2230 	 */
   2231 
   2232 	DRM_INFO("Falling through to VRAM.\n");
   2233 	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
   2234 	return ret;
   2235 }
   2236 
   2237 static int vmw_validate_buffers(struct vmw_private *dev_priv,
   2238 				struct vmw_sw_context *sw_context)
   2239 {
   2240 	struct vmw_validate_buffer *entry;
   2241 	int ret;
   2242 
   2243 	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
   2244 		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
   2245 						 entry->validate_as_mob);
   2246 		if (unlikely(ret != 0))
   2247 			return ret;
   2248 	}
   2249 	return 0;
   2250 }
   2251 
   2252 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
   2253 				 uint32_t size)
   2254 {
   2255 	if (likely(sw_context->cmd_bounce_size >= size))
   2256 		return 0;
   2257 
   2258 	if (sw_context->cmd_bounce_size == 0)
   2259 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
   2260 
   2261 	while (sw_context->cmd_bounce_size < size) {
   2262 		sw_context->cmd_bounce_size =
   2263 			PAGE_ALIGN(sw_context->cmd_bounce_size +
   2264 				   (sw_context->cmd_bounce_size >> 1));
   2265 	}
   2266 
   2267 	if (sw_context->cmd_bounce != NULL)
   2268 		vfree(sw_context->cmd_bounce);
   2269 
   2270 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
   2271 
   2272 	if (sw_context->cmd_bounce == NULL) {
   2273 		DRM_ERROR("Failed to allocate command bounce buffer.\n");
   2274 		sw_context->cmd_bounce_size = 0;
   2275 		return -ENOMEM;
   2276 	}
   2277 
   2278 	return 0;
   2279 }
   2280 
   2281 /**
   2282  * vmw_execbuf_fence_commands - create and submit a command stream fence
   2283  *
   2284  * Creates a fence object and submits a command stream marker.
   2285  * If this fails for some reason, We sync the fifo and return NULL.
   2286  * It is then safe to fence buffers with a NULL pointer.
   2287  *
   2288  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
   2289  * a userspace handle if @p_handle is not NULL, otherwise not.
   2290  */
   2291 
   2292 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
   2293 			       struct vmw_private *dev_priv,
   2294 			       struct vmw_fence_obj **p_fence,
   2295 			       uint32_t *p_handle)
   2296 {
   2297 	uint32_t sequence;
   2298 	int ret;
   2299 	bool synced = false;
   2300 
   2301 	/* p_handle implies file_priv. */
   2302 	BUG_ON(p_handle != NULL && file_priv == NULL);
   2303 
   2304 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
   2305 	if (unlikely(ret != 0)) {
   2306 		DRM_ERROR("Fence submission error. Syncing.\n");
   2307 		synced = true;
   2308 	}
   2309 
   2310 	if (p_handle != NULL)
   2311 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
   2312 					    sequence,
   2313 					    DRM_VMW_FENCE_FLAG_EXEC,
   2314 					    p_fence, p_handle);
   2315 	else
   2316 		ret = vmw_fence_create(dev_priv->fman, sequence,
   2317 				       DRM_VMW_FENCE_FLAG_EXEC,
   2318 				       p_fence);
   2319 
   2320 	if (unlikely(ret != 0 && !synced)) {
   2321 		(void) vmw_fallback_wait(dev_priv, false, false,
   2322 					 sequence, false,
   2323 					 VMW_FENCE_WAIT_TIMEOUT);
   2324 		*p_fence = NULL;
   2325 	}
   2326 
   2327 	return 0;
   2328 }
   2329 
   2330 /**
   2331  * vmw_execbuf_copy_fence_user - copy fence object information to
   2332  * user-space.
   2333  *
   2334  * @dev_priv: Pointer to a vmw_private struct.
   2335  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
   2336  * @ret: Return value from fence object creation.
   2337  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
   2338  * which the information should be copied.
   2339  * @fence: Pointer to the fenc object.
   2340  * @fence_handle: User-space fence handle.
   2341  *
   2342  * This function copies fence information to user-space. If copying fails,
   2343  * The user-space struct drm_vmw_fence_rep::error member is hopefully
   2344  * left untouched, and if it's preloaded with an -EFAULT by user-space,
   2345  * the error will hopefully be detected.
   2346  * Also if copying fails, user-space will be unable to signal the fence
   2347  * object so we wait for it immediately, and then unreference the
   2348  * user-space reference.
   2349  */
   2350 void
   2351 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
   2352 			    struct vmw_fpriv *vmw_fp,
   2353 			    int ret,
   2354 			    struct drm_vmw_fence_rep __user *user_fence_rep,
   2355 			    struct vmw_fence_obj *fence,
   2356 			    uint32_t fence_handle)
   2357 {
   2358 	struct drm_vmw_fence_rep fence_rep;
   2359 
   2360 	if (user_fence_rep == NULL)
   2361 		return;
   2362 
   2363 	memset(&fence_rep, 0, sizeof(fence_rep));
   2364 
   2365 	fence_rep.error = ret;
   2366 	if (ret == 0) {
   2367 		BUG_ON(fence == NULL);
   2368 
   2369 		fence_rep.handle = fence_handle;
   2370 		fence_rep.seqno = fence->seqno;
   2371 		vmw_update_seqno(dev_priv, &dev_priv->fifo);
   2372 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
   2373 	}
   2374 
   2375 	/*
   2376 	 * copy_to_user errors will be detected by user space not
   2377 	 * seeing fence_rep::error filled in. Typically
   2378 	 * user-space would have pre-set that member to -EFAULT.
   2379 	 */
   2380 	ret = copy_to_user(user_fence_rep, &fence_rep,
   2381 			   sizeof(fence_rep));
   2382 
   2383 	/*
   2384 	 * User-space lost the fence object. We need to sync
   2385 	 * and unreference the handle.
   2386 	 */
   2387 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
   2388 		ttm_ref_object_base_unref(vmw_fp->tfile,
   2389 					  fence_handle, TTM_REF_USAGE);
   2390 		DRM_ERROR("Fence copy error. Syncing.\n");
   2391 		(void) vmw_fence_obj_wait(fence, fence->signal_mask,
   2392 					  false, false,
   2393 					  VMW_FENCE_WAIT_TIMEOUT);
   2394 	}
   2395 }
   2396 
   2397 int vmw_execbuf_process(struct drm_file *file_priv,
   2398 			struct vmw_private *dev_priv,
   2399 			void __user *user_commands,
   2400 			void *kernel_commands,
   2401 			uint32_t command_size,
   2402 			uint64_t throttle_us,
   2403 			struct drm_vmw_fence_rep __user *user_fence_rep,
   2404 			struct vmw_fence_obj **out_fence)
   2405 {
   2406 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
   2407 	struct vmw_fence_obj *fence = NULL;
   2408 	struct vmw_resource *error_resource;
   2409 	struct list_head resource_list;
   2410 	struct ww_acquire_ctx ticket;
   2411 	uint32_t handle;
   2412 	void *cmd;
   2413 	int ret;
   2414 
   2415 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
   2416 	if (unlikely(ret != 0))
   2417 		return -ERESTARTSYS;
   2418 
   2419 	if (kernel_commands == NULL) {
   2420 		sw_context->kernel = false;
   2421 
   2422 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
   2423 		if (unlikely(ret != 0))
   2424 			goto out_unlock;
   2425 
   2426 
   2427 		ret = copy_from_user(sw_context->cmd_bounce,
   2428 				     user_commands, command_size);
   2429 
   2430 		if (unlikely(ret != 0)) {
   2431 			ret = -EFAULT;
   2432 			DRM_ERROR("Failed copying commands.\n");
   2433 			goto out_unlock;
   2434 		}
   2435 		kernel_commands = sw_context->cmd_bounce;
   2436 	} else
   2437 		sw_context->kernel = true;
   2438 
   2439 	sw_context->fp = vmw_fpriv(file_priv);
   2440 	sw_context->cur_reloc = 0;
   2441 	sw_context->cur_val_buf = 0;
   2442 	sw_context->fence_flags = 0;
   2443 	INIT_LIST_HEAD(&sw_context->resource_list);
   2444 	sw_context->cur_query_bo = dev_priv->pinned_bo;
   2445 	sw_context->last_query_ctx = NULL;
   2446 	sw_context->needs_post_query_barrier = false;
   2447 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
   2448 	INIT_LIST_HEAD(&sw_context->validate_nodes);
   2449 	INIT_LIST_HEAD(&sw_context->res_relocations);
   2450 	if (!sw_context->res_ht_initialized) {
   2451 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
   2452 		if (unlikely(ret != 0))
   2453 			goto out_unlock;
   2454 		sw_context->res_ht_initialized = true;
   2455 	}
   2456 	INIT_LIST_HEAD(&sw_context->staged_shaders);
   2457 
   2458 	INIT_LIST_HEAD(&resource_list);
   2459 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
   2460 				command_size);
   2461 	if (unlikely(ret != 0))
   2462 		goto out_err_nores;
   2463 
   2464 	ret = vmw_resources_reserve(sw_context);
   2465 	if (unlikely(ret != 0))
   2466 		goto out_err_nores;
   2467 
   2468 	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
   2469 	if (unlikely(ret != 0))
   2470 		goto out_err;
   2471 
   2472 	ret = vmw_validate_buffers(dev_priv, sw_context);
   2473 	if (unlikely(ret != 0))
   2474 		goto out_err;
   2475 
   2476 	ret = vmw_resources_validate(sw_context);
   2477 	if (unlikely(ret != 0))
   2478 		goto out_err;
   2479 
   2480 	if (throttle_us) {
   2481 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
   2482 				   throttle_us);
   2483 
   2484 		if (unlikely(ret != 0))
   2485 			goto out_err;
   2486 	}
   2487 
   2488 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
   2489 	if (unlikely(ret != 0)) {
   2490 		ret = -ERESTARTSYS;
   2491 		goto out_err;
   2492 	}
   2493 
   2494 	if (dev_priv->has_mob) {
   2495 		ret = vmw_rebind_contexts(sw_context);
   2496 		if (unlikely(ret != 0))
   2497 			goto out_unlock_binding;
   2498 	}
   2499 
   2500 	cmd = vmw_fifo_reserve(dev_priv, command_size);
   2501 	if (unlikely(cmd == NULL)) {
   2502 		DRM_ERROR("Failed reserving fifo space for commands.\n");
   2503 		ret = -ENOMEM;
   2504 		goto out_unlock_binding;
   2505 	}
   2506 
   2507 	vmw_apply_relocations(sw_context);
   2508 	memcpy(cmd, kernel_commands, command_size);
   2509 
   2510 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
   2511 	vmw_resource_relocations_free(&sw_context->res_relocations);
   2512 
   2513 	vmw_fifo_commit(dev_priv, command_size);
   2514 
   2515 	vmw_query_bo_switch_commit(dev_priv, sw_context);
   2516 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
   2517 					 &fence,
   2518 					 (user_fence_rep) ? &handle : NULL);
   2519 	/*
   2520 	 * This error is harmless, because if fence submission fails,
   2521 	 * vmw_fifo_send_fence will sync. The error will be propagated to
   2522 	 * user-space in @fence_rep
   2523 	 */
   2524 
   2525 	if (ret != 0)
   2526 		DRM_ERROR("Fence submission error. Syncing.\n");
   2527 
   2528 	vmw_resource_list_unreserve(&sw_context->resource_list, false);
   2529 	mutex_unlock(&dev_priv->binding_mutex);
   2530 
   2531 	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
   2532 				    (void *) fence);
   2533 
   2534 	if (unlikely(dev_priv->pinned_bo != NULL &&
   2535 		     !dev_priv->query_cid_valid))
   2536 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
   2537 
   2538 	vmw_clear_validations(sw_context);
   2539 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
   2540 				    user_fence_rep, fence, handle);
   2541 
   2542 	/* Don't unreference when handing fence out */
   2543 	if (unlikely(out_fence != NULL)) {
   2544 		*out_fence = fence;
   2545 		fence = NULL;
   2546 	} else if (likely(fence != NULL)) {
   2547 		vmw_fence_obj_unreference(&fence);
   2548 	}
   2549 
   2550 	list_splice_init(&sw_context->resource_list, &resource_list);
   2551 	vmw_compat_shaders_commit(sw_context->fp->shman,
   2552 				  &sw_context->staged_shaders);
   2553 	mutex_unlock(&dev_priv->cmdbuf_mutex);
   2554 
   2555 	/*
   2556 	 * Unreference resources outside of the cmdbuf_mutex to
   2557 	 * avoid deadlocks in resource destruction paths.
   2558 	 */
   2559 	vmw_resource_list_unreference(&resource_list);
   2560 
   2561 	return 0;
   2562 
   2563 out_unlock_binding:
   2564 	mutex_unlock(&dev_priv->binding_mutex);
   2565 out_err:
   2566 	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
   2567 out_err_nores:
   2568 	vmw_resource_list_unreserve(&sw_context->resource_list, true);
   2569 	vmw_resource_relocations_free(&sw_context->res_relocations);
   2570 	vmw_free_relocations(sw_context);
   2571 	vmw_clear_validations(sw_context);
   2572 	if (unlikely(dev_priv->pinned_bo != NULL &&
   2573 		     !dev_priv->query_cid_valid))
   2574 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
   2575 out_unlock:
   2576 	list_splice_init(&sw_context->resource_list, &resource_list);
   2577 	error_resource = sw_context->error_resource;
   2578 	sw_context->error_resource = NULL;
   2579 	vmw_compat_shaders_revert(sw_context->fp->shman,
   2580 				  &sw_context->staged_shaders);
   2581 	mutex_unlock(&dev_priv->cmdbuf_mutex);
   2582 
   2583 	/*
   2584 	 * Unreference resources outside of the cmdbuf_mutex to
   2585 	 * avoid deadlocks in resource destruction paths.
   2586 	 */
   2587 	vmw_resource_list_unreference(&resource_list);
   2588 	if (unlikely(error_resource != NULL))
   2589 		vmw_resource_unreference(&error_resource);
   2590 
   2591 	return ret;
   2592 }
   2593 
   2594 /**
   2595  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
   2596  *
   2597  * @dev_priv: The device private structure.
   2598  *
   2599  * This function is called to idle the fifo and unpin the query buffer
   2600  * if the normal way to do this hits an error, which should typically be
   2601  * extremely rare.
   2602  */
   2603 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
   2604 {
   2605 	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
   2606 
   2607 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
   2608 	vmw_bo_pin(dev_priv->pinned_bo, false);
   2609 	vmw_bo_pin(dev_priv->dummy_query_bo, false);
   2610 	dev_priv->dummy_query_bo_pinned = false;
   2611 }
   2612 
   2613 
   2614 /**
   2615  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
   2616  * query bo.
   2617  *
   2618  * @dev_priv: The device private structure.
   2619  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
   2620  * _after_ a query barrier that flushes all queries touching the current
   2621  * buffer pointed to by @dev_priv->pinned_bo
   2622  *
   2623  * This function should be used to unpin the pinned query bo, or
   2624  * as a query barrier when we need to make sure that all queries have
   2625  * finished before the next fifo command. (For example on hardware
   2626  * context destructions where the hardware may otherwise leak unfinished
   2627  * queries).
   2628  *
   2629  * This function does not return any failure codes, but make attempts
   2630  * to do safe unpinning in case of errors.
   2631  *
   2632  * The function will synchronize on the previous query barrier, and will
   2633  * thus not finish until that barrier has executed.
   2634  *
   2635  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
   2636  * before calling this function.
   2637  */
   2638 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
   2639 				     struct vmw_fence_obj *fence)
   2640 {
   2641 	int ret = 0;
   2642 	struct list_head validate_list;
   2643 	struct ttm_validate_buffer pinned_val, query_val;
   2644 	struct vmw_fence_obj *lfence = NULL;
   2645 	struct ww_acquire_ctx ticket;
   2646 
   2647 	if (dev_priv->pinned_bo == NULL)
   2648 		goto out_unlock;
   2649 
   2650 	INIT_LIST_HEAD(&validate_list);
   2651 
   2652 	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
   2653 	list_add_tail(&pinned_val.head, &validate_list);
   2654 
   2655 	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
   2656 	list_add_tail(&query_val.head, &validate_list);
   2657 
   2658 	do {
   2659 		ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
   2660 	} while (ret == -ERESTARTSYS);
   2661 
   2662 	if (unlikely(ret != 0)) {
   2663 		vmw_execbuf_unpin_panic(dev_priv);
   2664 		goto out_no_reserve;
   2665 	}
   2666 
   2667 	if (dev_priv->query_cid_valid) {
   2668 		BUG_ON(fence != NULL);
   2669 		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
   2670 		if (unlikely(ret != 0)) {
   2671 			vmw_execbuf_unpin_panic(dev_priv);
   2672 			goto out_no_emit;
   2673 		}
   2674 		dev_priv->query_cid_valid = false;
   2675 	}
   2676 
   2677 	vmw_bo_pin(dev_priv->pinned_bo, false);
   2678 	vmw_bo_pin(dev_priv->dummy_query_bo, false);
   2679 	dev_priv->dummy_query_bo_pinned = false;
   2680 
   2681 	if (fence == NULL) {
   2682 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
   2683 						  NULL);
   2684 		fence = lfence;
   2685 	}
   2686 	ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
   2687 	if (lfence != NULL)
   2688 		vmw_fence_obj_unreference(&lfence);
   2689 
   2690 	ttm_bo_unref(&query_val.bo);
   2691 	ttm_bo_unref(&pinned_val.bo);
   2692 	ttm_bo_unref(&dev_priv->pinned_bo);
   2693 
   2694 out_unlock:
   2695 	return;
   2696 
   2697 out_no_emit:
   2698 	ttm_eu_backoff_reservation(&ticket, &validate_list);
   2699 out_no_reserve:
   2700 	ttm_bo_unref(&query_val.bo);
   2701 	ttm_bo_unref(&pinned_val.bo);
   2702 	ttm_bo_unref(&dev_priv->pinned_bo);
   2703 }
   2704 
   2705 /**
   2706  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
   2707  * query bo.
   2708  *
   2709  * @dev_priv: The device private structure.
   2710  *
   2711  * This function should be used to unpin the pinned query bo, or
   2712  * as a query barrier when we need to make sure that all queries have
   2713  * finished before the next fifo command. (For example on hardware
   2714  * context destructions where the hardware may otherwise leak unfinished
   2715  * queries).
   2716  *
   2717  * This function does not return any failure codes, but make attempts
   2718  * to do safe unpinning in case of errors.
   2719  *
   2720  * The function will synchronize on the previous query barrier, and will
   2721  * thus not finish until that barrier has executed.
   2722  */
   2723 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
   2724 {
   2725 	mutex_lock(&dev_priv->cmdbuf_mutex);
   2726 	if (dev_priv->query_cid_valid)
   2727 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
   2728 	mutex_unlock(&dev_priv->cmdbuf_mutex);
   2729 }
   2730 
   2731 
   2732 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
   2733 		      struct drm_file *file_priv)
   2734 {
   2735 	struct vmw_private *dev_priv = vmw_priv(dev);
   2736 	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
   2737 	int ret;
   2738 
   2739 	/*
   2740 	 * This will allow us to extend the ioctl argument while
   2741 	 * maintaining backwards compatibility:
   2742 	 * We take different code paths depending on the value of
   2743 	 * arg->version.
   2744 	 */
   2745 
   2746 	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
   2747 		DRM_ERROR("Incorrect execbuf version.\n");
   2748 		DRM_ERROR("You're running outdated experimental "
   2749 			  "vmwgfx user-space drivers.");
   2750 		return -EINVAL;
   2751 	}
   2752 
   2753 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
   2754 	if (unlikely(ret != 0))
   2755 		return ret;
   2756 
   2757 	ret = vmw_execbuf_process(file_priv, dev_priv,
   2758 				  (void __user *)(unsigned long)arg->commands,
   2759 				  NULL, arg->command_size, arg->throttle_us,
   2760 				  (void __user *)(unsigned long)arg->fence_rep,
   2761 				  NULL);
   2762 
   2763 	if (unlikely(ret != 0))
   2764 		goto out_unlock;
   2765 
   2766 	vmw_kms_cursor_post_execbuf(dev_priv);
   2767 
   2768 out_unlock:
   2769 	ttm_read_unlock(&dev_priv->reservation_sem);
   2770 	return ret;
   2771 }
   2772