Home | History | Annotate | Line # | Download | only in vmwgfx
      1 /*	$NetBSD: vmwgfx_context.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $	*/
      2 
      3 // SPDX-License-Identifier: GPL-2.0 OR MIT
      4 /**************************************************************************
      5  *
      6  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_context.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $");
     32 
     33 #include <drm/ttm/ttm_placement.h>
     34 
     35 #include "vmwgfx_drv.h"
     36 #include "vmwgfx_resource_priv.h"
     37 #include "vmwgfx_binding.h"
     38 
     39 struct vmw_user_context {
     40 	struct ttm_base_object base;
     41 	struct vmw_resource res;
     42 	struct vmw_ctx_binding_state *cbs;
     43 	struct vmw_cmdbuf_res_manager *man;
     44 	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
     45 	spinlock_t cotable_lock;
     46 	struct vmw_buffer_object *dx_query_mob;
     47 };
     48 
     49 static void vmw_user_context_free(struct vmw_resource *res);
     50 static struct vmw_resource *
     51 vmw_user_context_base_to_res(struct ttm_base_object *base);
     52 
     53 static int vmw_gb_context_create(struct vmw_resource *res);
     54 static int vmw_gb_context_bind(struct vmw_resource *res,
     55 			       struct ttm_validate_buffer *val_buf);
     56 static int vmw_gb_context_unbind(struct vmw_resource *res,
     57 				 bool readback,
     58 				 struct ttm_validate_buffer *val_buf);
     59 static int vmw_gb_context_destroy(struct vmw_resource *res);
     60 static int vmw_dx_context_create(struct vmw_resource *res);
     61 static int vmw_dx_context_bind(struct vmw_resource *res,
     62 			       struct ttm_validate_buffer *val_buf);
     63 static int vmw_dx_context_unbind(struct vmw_resource *res,
     64 				 bool readback,
     65 				 struct ttm_validate_buffer *val_buf);
     66 static int vmw_dx_context_destroy(struct vmw_resource *res);
     67 
     68 static uint64_t vmw_user_context_size;
     69 
     70 static const struct vmw_user_resource_conv user_context_conv = {
     71 	.object_type = VMW_RES_CONTEXT,
     72 	.base_obj_to_res = vmw_user_context_base_to_res,
     73 	.res_free = vmw_user_context_free
     74 };
     75 
     76 const struct vmw_user_resource_conv *user_context_converter =
     77 	&user_context_conv;
     78 
     79 
     80 static const struct vmw_res_func vmw_legacy_context_func = {
     81 	.res_type = vmw_res_context,
     82 	.needs_backup = false,
     83 	.may_evict = false,
     84 	.type_name = "legacy contexts",
     85 	.backup_placement = NULL,
     86 	.create = NULL,
     87 	.destroy = NULL,
     88 	.bind = NULL,
     89 	.unbind = NULL
     90 };
     91 
     92 static const struct vmw_res_func vmw_gb_context_func = {
     93 	.res_type = vmw_res_context,
     94 	.needs_backup = true,
     95 	.may_evict = true,
     96 	.prio = 3,
     97 	.dirty_prio = 3,
     98 	.type_name = "guest backed contexts",
     99 	.backup_placement = &vmw_mob_placement,
    100 	.create = vmw_gb_context_create,
    101 	.destroy = vmw_gb_context_destroy,
    102 	.bind = vmw_gb_context_bind,
    103 	.unbind = vmw_gb_context_unbind
    104 };
    105 
    106 static const struct vmw_res_func vmw_dx_context_func = {
    107 	.res_type = vmw_res_dx_context,
    108 	.needs_backup = true,
    109 	.may_evict = true,
    110 	.prio = 3,
    111 	.dirty_prio = 3,
    112 	.type_name = "dx contexts",
    113 	.backup_placement = &vmw_mob_placement,
    114 	.create = vmw_dx_context_create,
    115 	.destroy = vmw_dx_context_destroy,
    116 	.bind = vmw_dx_context_bind,
    117 	.unbind = vmw_dx_context_unbind
    118 };
    119 
    120 /**
    121  * Context management:
    122  */
    123 
    124 static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
    125 {
    126 	struct vmw_resource *res;
    127 	int i;
    128 
    129 	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
    130 		spin_lock(&uctx->cotable_lock);
    131 		res = uctx->cotables[i];
    132 		uctx->cotables[i] = NULL;
    133 		spin_unlock(&uctx->cotable_lock);
    134 
    135 		if (res)
    136 			vmw_resource_unreference(&res);
    137 	}
    138 }
    139 
    140 static void vmw_hw_context_destroy(struct vmw_resource *res)
    141 {
    142 	struct vmw_user_context *uctx =
    143 		container_of(res, struct vmw_user_context, res);
    144 	struct vmw_private *dev_priv = res->dev_priv;
    145 	struct {
    146 		SVGA3dCmdHeader header;
    147 		SVGA3dCmdDestroyContext body;
    148 	} *cmd;
    149 
    150 
    151 	if (res->func->destroy == vmw_gb_context_destroy ||
    152 	    res->func->destroy == vmw_dx_context_destroy) {
    153 		mutex_lock(&dev_priv->cmdbuf_mutex);
    154 		vmw_cmdbuf_res_man_destroy(uctx->man);
    155 		mutex_lock(&dev_priv->binding_mutex);
    156 		vmw_binding_state_kill(uctx->cbs);
    157 		(void) res->func->destroy(res);
    158 		mutex_unlock(&dev_priv->binding_mutex);
    159 		if (dev_priv->pinned_bo != NULL &&
    160 		    !dev_priv->query_cid_valid)
    161 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
    162 		mutex_unlock(&dev_priv->cmdbuf_mutex);
    163 		vmw_context_cotables_unref(uctx);
    164 		return;
    165 	}
    166 
    167 	vmw_execbuf_release_pinned_bo(dev_priv);
    168 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
    169 	if (unlikely(cmd == NULL))
    170 		return;
    171 
    172 	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
    173 	cmd->header.size = sizeof(cmd->body);
    174 	cmd->body.cid = res->id;
    175 
    176 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    177 	vmw_fifo_resource_dec(dev_priv);
    178 }
    179 
    180 static int vmw_gb_context_init(struct vmw_private *dev_priv,
    181 			       bool dx,
    182 			       struct vmw_resource *res,
    183 			       void (*res_free)(struct vmw_resource *res))
    184 {
    185 	int ret, i;
    186 	struct vmw_user_context *uctx =
    187 		container_of(res, struct vmw_user_context, res);
    188 
    189 	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
    190 			    SVGA3D_CONTEXT_DATA_SIZE);
    191 	ret = vmw_resource_init(dev_priv, res, true,
    192 				res_free,
    193 				dx ? &vmw_dx_context_func :
    194 				&vmw_gb_context_func);
    195 	if (unlikely(ret != 0))
    196 		goto out_err;
    197 
    198 	if (dev_priv->has_mob) {
    199 		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
    200 		if (IS_ERR(uctx->man)) {
    201 			ret = PTR_ERR(uctx->man);
    202 			uctx->man = NULL;
    203 			goto out_err;
    204 		}
    205 	}
    206 
    207 	uctx->cbs = vmw_binding_state_alloc(dev_priv);
    208 	if (IS_ERR(uctx->cbs)) {
    209 		ret = PTR_ERR(uctx->cbs);
    210 		goto out_err;
    211 	}
    212 
    213 	spin_lock_init(&uctx->cotable_lock);
    214 
    215 	if (dx) {
    216 		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
    217 			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
    218 							      &uctx->res, i);
    219 			if (IS_ERR(uctx->cotables[i])) {
    220 				ret = PTR_ERR(uctx->cotables[i]);
    221 				goto out_cotables;
    222 			}
    223 		}
    224 	}
    225 
    226 	res->hw_destroy = vmw_hw_context_destroy;
    227 	return 0;
    228 
    229 out_cotables:
    230 	vmw_context_cotables_unref(uctx);
    231 out_err:
    232 	if (res_free)
    233 		res_free(res);
    234 	else
    235 		kfree(res);
    236 	return ret;
    237 }
    238 
    239 static int vmw_context_init(struct vmw_private *dev_priv,
    240 			    struct vmw_resource *res,
    241 			    void (*res_free)(struct vmw_resource *res),
    242 			    bool dx)
    243 {
    244 	int ret;
    245 
    246 	struct {
    247 		SVGA3dCmdHeader header;
    248 		SVGA3dCmdDefineContext body;
    249 	} *cmd;
    250 
    251 	if (dev_priv->has_mob)
    252 		return vmw_gb_context_init(dev_priv, dx, res, res_free);
    253 
    254 	ret = vmw_resource_init(dev_priv, res, false,
    255 				res_free, &vmw_legacy_context_func);
    256 
    257 	if (unlikely(ret != 0)) {
    258 		DRM_ERROR("Failed to allocate a resource id.\n");
    259 		goto out_early;
    260 	}
    261 
    262 	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
    263 		DRM_ERROR("Out of hw context ids.\n");
    264 		vmw_resource_unreference(&res);
    265 		return -ENOMEM;
    266 	}
    267 
    268 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
    269 	if (unlikely(cmd == NULL)) {
    270 		vmw_resource_unreference(&res);
    271 		return -ENOMEM;
    272 	}
    273 
    274 	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
    275 	cmd->header.size = sizeof(cmd->body);
    276 	cmd->body.cid = res->id;
    277 
    278 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    279 	vmw_fifo_resource_inc(dev_priv);
    280 	res->hw_destroy = vmw_hw_context_destroy;
    281 	return 0;
    282 
    283 out_early:
    284 	if (res_free == NULL)
    285 		kfree(res);
    286 	else
    287 		res_free(res);
    288 	return ret;
    289 }
    290 
    291 
    292 /*
    293  * GB context.
    294  */
    295 
    296 static int vmw_gb_context_create(struct vmw_resource *res)
    297 {
    298 	struct vmw_private *dev_priv = res->dev_priv;
    299 	int ret;
    300 	struct {
    301 		SVGA3dCmdHeader header;
    302 		SVGA3dCmdDefineGBContext body;
    303 	} *cmd;
    304 
    305 	if (likely(res->id != -1))
    306 		return 0;
    307 
    308 	ret = vmw_resource_alloc_id(res);
    309 	if (unlikely(ret != 0)) {
    310 		DRM_ERROR("Failed to allocate a context id.\n");
    311 		goto out_no_id;
    312 	}
    313 
    314 	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
    315 		ret = -EBUSY;
    316 		goto out_no_fifo;
    317 	}
    318 
    319 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
    320 	if (unlikely(cmd == NULL)) {
    321 		ret = -ENOMEM;
    322 		goto out_no_fifo;
    323 	}
    324 
    325 	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
    326 	cmd->header.size = sizeof(cmd->body);
    327 	cmd->body.cid = res->id;
    328 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    329 	vmw_fifo_resource_inc(dev_priv);
    330 
    331 	return 0;
    332 
    333 out_no_fifo:
    334 	vmw_resource_release_id(res);
    335 out_no_id:
    336 	return ret;
    337 }
    338 
    339 static int vmw_gb_context_bind(struct vmw_resource *res,
    340 			       struct ttm_validate_buffer *val_buf)
    341 {
    342 	struct vmw_private *dev_priv = res->dev_priv;
    343 	struct {
    344 		SVGA3dCmdHeader header;
    345 		SVGA3dCmdBindGBContext body;
    346 	} *cmd;
    347 	struct ttm_buffer_object *bo = val_buf->bo;
    348 
    349 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
    350 
    351 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
    352 	if (unlikely(cmd == NULL))
    353 		return -ENOMEM;
    354 
    355 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
    356 	cmd->header.size = sizeof(cmd->body);
    357 	cmd->body.cid = res->id;
    358 	cmd->body.mobid = bo->mem.start;
    359 	cmd->body.validContents = res->backup_dirty;
    360 	res->backup_dirty = false;
    361 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    362 
    363 	return 0;
    364 }
    365 
    366 static int vmw_gb_context_unbind(struct vmw_resource *res,
    367 				 bool readback,
    368 				 struct ttm_validate_buffer *val_buf)
    369 {
    370 	struct vmw_private *dev_priv = res->dev_priv;
    371 	struct ttm_buffer_object *bo = val_buf->bo;
    372 	struct vmw_fence_obj *fence;
    373 	struct vmw_user_context *uctx =
    374 		container_of(res, struct vmw_user_context, res);
    375 
    376 	struct {
    377 		SVGA3dCmdHeader header;
    378 		SVGA3dCmdReadbackGBContext body;
    379 	} *cmd1;
    380 	struct {
    381 		SVGA3dCmdHeader header;
    382 		SVGA3dCmdBindGBContext body;
    383 	} *cmd2;
    384 	uint32_t submit_size;
    385 	uint8_t *cmd;
    386 
    387 
    388 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
    389 
    390 	mutex_lock(&dev_priv->binding_mutex);
    391 	vmw_binding_state_scrub(uctx->cbs);
    392 
    393 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
    394 
    395 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
    396 	if (unlikely(cmd == NULL)) {
    397 		mutex_unlock(&dev_priv->binding_mutex);
    398 		return -ENOMEM;
    399 	}
    400 
    401 	cmd2 = (void *) cmd;
    402 	if (readback) {
    403 		cmd1 = (void *) cmd;
    404 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
    405 		cmd1->header.size = sizeof(cmd1->body);
    406 		cmd1->body.cid = res->id;
    407 		cmd2 = (void *) (&cmd1[1]);
    408 	}
    409 	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
    410 	cmd2->header.size = sizeof(cmd2->body);
    411 	cmd2->body.cid = res->id;
    412 	cmd2->body.mobid = SVGA3D_INVALID_ID;
    413 
    414 	vmw_fifo_commit(dev_priv, submit_size);
    415 	mutex_unlock(&dev_priv->binding_mutex);
    416 
    417 	/*
    418 	 * Create a fence object and fence the backup buffer.
    419 	 */
    420 
    421 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
    422 					  &fence, NULL);
    423 
    424 	vmw_bo_fence_single(bo, fence);
    425 
    426 	if (likely(fence != NULL))
    427 		vmw_fence_obj_unreference(&fence);
    428 
    429 	return 0;
    430 }
    431 
    432 static int vmw_gb_context_destroy(struct vmw_resource *res)
    433 {
    434 	struct vmw_private *dev_priv = res->dev_priv;
    435 	struct {
    436 		SVGA3dCmdHeader header;
    437 		SVGA3dCmdDestroyGBContext body;
    438 	} *cmd;
    439 
    440 	if (likely(res->id == -1))
    441 		return 0;
    442 
    443 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
    444 	if (unlikely(cmd == NULL))
    445 		return -ENOMEM;
    446 
    447 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
    448 	cmd->header.size = sizeof(cmd->body);
    449 	cmd->body.cid = res->id;
    450 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    451 	if (dev_priv->query_cid == res->id)
    452 		dev_priv->query_cid_valid = false;
    453 	vmw_resource_release_id(res);
    454 	vmw_fifo_resource_dec(dev_priv);
    455 
    456 	return 0;
    457 }
    458 
    459 /*
    460  * DX context.
    461  */
    462 
    463 static int vmw_dx_context_create(struct vmw_resource *res)
    464 {
    465 	struct vmw_private *dev_priv = res->dev_priv;
    466 	int ret;
    467 	struct {
    468 		SVGA3dCmdHeader header;
    469 		SVGA3dCmdDXDefineContext body;
    470 	} *cmd;
    471 
    472 	if (likely(res->id != -1))
    473 		return 0;
    474 
    475 	ret = vmw_resource_alloc_id(res);
    476 	if (unlikely(ret != 0)) {
    477 		DRM_ERROR("Failed to allocate a context id.\n");
    478 		goto out_no_id;
    479 	}
    480 
    481 	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
    482 		ret = -EBUSY;
    483 		goto out_no_fifo;
    484 	}
    485 
    486 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
    487 	if (unlikely(cmd == NULL)) {
    488 		ret = -ENOMEM;
    489 		goto out_no_fifo;
    490 	}
    491 
    492 	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
    493 	cmd->header.size = sizeof(cmd->body);
    494 	cmd->body.cid = res->id;
    495 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    496 	vmw_fifo_resource_inc(dev_priv);
    497 
    498 	return 0;
    499 
    500 out_no_fifo:
    501 	vmw_resource_release_id(res);
    502 out_no_id:
    503 	return ret;
    504 }
    505 
    506 static int vmw_dx_context_bind(struct vmw_resource *res,
    507 			       struct ttm_validate_buffer *val_buf)
    508 {
    509 	struct vmw_private *dev_priv = res->dev_priv;
    510 	struct {
    511 		SVGA3dCmdHeader header;
    512 		SVGA3dCmdDXBindContext body;
    513 	} *cmd;
    514 	struct ttm_buffer_object *bo = val_buf->bo;
    515 
    516 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
    517 
    518 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
    519 	if (unlikely(cmd == NULL))
    520 		return -ENOMEM;
    521 
    522 	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
    523 	cmd->header.size = sizeof(cmd->body);
    524 	cmd->body.cid = res->id;
    525 	cmd->body.mobid = bo->mem.start;
    526 	cmd->body.validContents = res->backup_dirty;
    527 	res->backup_dirty = false;
    528 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    529 
    530 
    531 	return 0;
    532 }
    533 
    534 /**
    535  * vmw_dx_context_scrub_cotables - Scrub all bindings and
    536  * cotables from a context
    537  *
    538  * @ctx: Pointer to the context resource
    539  * @readback: Whether to save the otable contents on scrubbing.
    540  *
    541  * COtables must be unbound before their context, but unbinding requires
    542  * the backup buffer being reserved, whereas scrubbing does not.
    543  * This function scrubs all cotables of a context, potentially reading back
    544  * the contents into their backup buffers. However, scrubbing cotables
    545  * also makes the device context invalid, so scrub all bindings first so
    546  * that doesn't have to be done later with an invalid context.
    547  */
    548 void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
    549 				   bool readback)
    550 {
    551 	struct vmw_user_context *uctx =
    552 		container_of(ctx, struct vmw_user_context, res);
    553 	int i;
    554 
    555 	vmw_binding_state_scrub(uctx->cbs);
    556 	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
    557 		struct vmw_resource *res;
    558 
    559 		/* Avoid racing with ongoing cotable destruction. */
    560 		spin_lock(&uctx->cotable_lock);
    561 		res = uctx->cotables[vmw_cotable_scrub_order[i]];
    562 		if (res)
    563 			res = vmw_resource_reference_unless_doomed(res);
    564 		spin_unlock(&uctx->cotable_lock);
    565 		if (!res)
    566 			continue;
    567 
    568 		WARN_ON(vmw_cotable_scrub(res, readback));
    569 		vmw_resource_unreference(&res);
    570 	}
    571 }
    572 
    573 static int vmw_dx_context_unbind(struct vmw_resource *res,
    574 				 bool readback,
    575 				 struct ttm_validate_buffer *val_buf)
    576 {
    577 	struct vmw_private *dev_priv = res->dev_priv;
    578 	struct ttm_buffer_object *bo = val_buf->bo;
    579 	struct vmw_fence_obj *fence;
    580 	struct vmw_user_context *uctx =
    581 		container_of(res, struct vmw_user_context, res);
    582 
    583 	struct {
    584 		SVGA3dCmdHeader header;
    585 		SVGA3dCmdDXReadbackContext body;
    586 	} *cmd1;
    587 	struct {
    588 		SVGA3dCmdHeader header;
    589 		SVGA3dCmdDXBindContext body;
    590 	} *cmd2;
    591 	uint32_t submit_size;
    592 	uint8_t *cmd;
    593 
    594 
    595 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
    596 
    597 	mutex_lock(&dev_priv->binding_mutex);
    598 	vmw_dx_context_scrub_cotables(res, readback);
    599 
    600 	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
    601 	    readback) {
    602 		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
    603 		if (vmw_query_readback_all(uctx->dx_query_mob))
    604 			DRM_ERROR("Failed to read back query states\n");
    605 	}
    606 
    607 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
    608 
    609 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
    610 	if (unlikely(cmd == NULL)) {
    611 		mutex_unlock(&dev_priv->binding_mutex);
    612 		return -ENOMEM;
    613 	}
    614 
    615 	cmd2 = (void *) cmd;
    616 	if (readback) {
    617 		cmd1 = (void *) cmd;
    618 		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
    619 		cmd1->header.size = sizeof(cmd1->body);
    620 		cmd1->body.cid = res->id;
    621 		cmd2 = (void *) (&cmd1[1]);
    622 	}
    623 	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
    624 	cmd2->header.size = sizeof(cmd2->body);
    625 	cmd2->body.cid = res->id;
    626 	cmd2->body.mobid = SVGA3D_INVALID_ID;
    627 
    628 	vmw_fifo_commit(dev_priv, submit_size);
    629 	mutex_unlock(&dev_priv->binding_mutex);
    630 
    631 	/*
    632 	 * Create a fence object and fence the backup buffer.
    633 	 */
    634 
    635 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
    636 					  &fence, NULL);
    637 
    638 	vmw_bo_fence_single(bo, fence);
    639 
    640 	if (likely(fence != NULL))
    641 		vmw_fence_obj_unreference(&fence);
    642 
    643 	return 0;
    644 }
    645 
    646 static int vmw_dx_context_destroy(struct vmw_resource *res)
    647 {
    648 	struct vmw_private *dev_priv = res->dev_priv;
    649 	struct {
    650 		SVGA3dCmdHeader header;
    651 		SVGA3dCmdDXDestroyContext body;
    652 	} *cmd;
    653 
    654 	if (likely(res->id == -1))
    655 		return 0;
    656 
    657 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
    658 	if (unlikely(cmd == NULL))
    659 		return -ENOMEM;
    660 
    661 	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
    662 	cmd->header.size = sizeof(cmd->body);
    663 	cmd->body.cid = res->id;
    664 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    665 	if (dev_priv->query_cid == res->id)
    666 		dev_priv->query_cid_valid = false;
    667 	vmw_resource_release_id(res);
    668 	vmw_fifo_resource_dec(dev_priv);
    669 
    670 	return 0;
    671 }
    672 
    673 /**
    674  * User-space context management:
    675  */
    676 
    677 static struct vmw_resource *
    678 vmw_user_context_base_to_res(struct ttm_base_object *base)
    679 {
    680 	return &(container_of(base, struct vmw_user_context, base)->res);
    681 }
    682 
    683 static void vmw_user_context_free(struct vmw_resource *res)
    684 {
    685 	struct vmw_user_context *ctx =
    686 	    container_of(res, struct vmw_user_context, res);
    687 	struct vmw_private *dev_priv = res->dev_priv;
    688 
    689 	if (ctx->cbs)
    690 		vmw_binding_state_free(ctx->cbs);
    691 
    692 	(void) vmw_context_bind_dx_query(res, NULL);
    693 
    694 	ttm_base_object_kfree(ctx, base);
    695 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
    696 			    vmw_user_context_size);
    697 }
    698 
    699 /**
    700  * This function is called when user space has no more references on the
    701  * base object. It releases the base-object's reference on the resource object.
    702  */
    703 
    704 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
    705 {
    706 	struct ttm_base_object *base = *p_base;
    707 	struct vmw_user_context *ctx =
    708 	    container_of(base, struct vmw_user_context, base);
    709 	struct vmw_resource *res = &ctx->res;
    710 
    711 	*p_base = NULL;
    712 	vmw_resource_unreference(&res);
    713 }
    714 
    715 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
    716 			      struct drm_file *file_priv)
    717 {
    718 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
    719 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
    720 
    721 	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
    722 }
    723 
    724 static int vmw_context_define(struct drm_device *dev, void *data,
    725 			      struct drm_file *file_priv, bool dx)
    726 {
    727 	struct vmw_private *dev_priv = vmw_priv(dev);
    728 	struct vmw_user_context *ctx;
    729 	struct vmw_resource *res;
    730 	struct vmw_resource *tmp;
    731 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
    732 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
    733 	struct ttm_operation_ctx ttm_opt_ctx = {
    734 		.interruptible = true,
    735 		.no_wait_gpu = false
    736 	};
    737 	int ret;
    738 
    739 	if (!dev_priv->has_dx && dx) {
    740 		VMW_DEBUG_USER("DX contexts not supported by device.\n");
    741 		return -EINVAL;
    742 	}
    743 
    744 	if (unlikely(vmw_user_context_size == 0))
    745 		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
    746 		  ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
    747 		  + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
    748 
    749 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
    750 	if (unlikely(ret != 0))
    751 		return ret;
    752 
    753 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
    754 				   vmw_user_context_size,
    755 				   &ttm_opt_ctx);
    756 	if (unlikely(ret != 0)) {
    757 		if (ret != -ERESTARTSYS)
    758 			DRM_ERROR("Out of graphics memory for context"
    759 				  " creation.\n");
    760 		goto out_unlock;
    761 	}
    762 
    763 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
    764 	if (unlikely(!ctx)) {
    765 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
    766 				    vmw_user_context_size);
    767 		ret = -ENOMEM;
    768 		goto out_unlock;
    769 	}
    770 
    771 	res = &ctx->res;
    772 	ctx->base.shareable = false;
    773 	ctx->base.tfile = NULL;
    774 
    775 	/*
    776 	 * From here on, the destructor takes over resource freeing.
    777 	 */
    778 
    779 	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
    780 	if (unlikely(ret != 0))
    781 		goto out_unlock;
    782 
    783 	tmp = vmw_resource_reference(&ctx->res);
    784 	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
    785 				   &vmw_user_context_base_release, NULL);
    786 
    787 	if (unlikely(ret != 0)) {
    788 		vmw_resource_unreference(&tmp);
    789 		goto out_err;
    790 	}
    791 
    792 	arg->cid = ctx->base.handle;
    793 out_err:
    794 	vmw_resource_unreference(&res);
    795 out_unlock:
    796 	ttm_read_unlock(&dev_priv->reservation_sem);
    797 	return ret;
    798 }
    799 
    800 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
    801 			     struct drm_file *file_priv)
    802 {
    803 	return vmw_context_define(dev, data, file_priv, false);
    804 }
    805 
    806 int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
    807 				      struct drm_file *file_priv)
    808 {
    809 	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
    810 	struct drm_vmw_context_arg *rep = &arg->rep;
    811 
    812 	switch (arg->req) {
    813 	case drm_vmw_context_legacy:
    814 		return vmw_context_define(dev, rep, file_priv, false);
    815 	case drm_vmw_context_dx:
    816 		return vmw_context_define(dev, rep, file_priv, true);
    817 	default:
    818 		break;
    819 	}
    820 	return -EINVAL;
    821 }
    822 
    823 /**
    824  * vmw_context_binding_list - Return a list of context bindings
    825  *
    826  * @ctx: The context resource
    827  *
    828  * Returns the current list of bindings of the given context. Note that
    829  * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
    830  */
    831 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
    832 {
    833 	struct vmw_user_context *uctx =
    834 		container_of(ctx, struct vmw_user_context, res);
    835 
    836 	return vmw_binding_state_list(uctx->cbs);
    837 }
    838 
    839 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
    840 {
    841 	return container_of(ctx, struct vmw_user_context, res)->man;
    842 }
    843 
    844 struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
    845 					 SVGACOTableType cotable_type)
    846 {
    847 	if (cotable_type >= SVGA_COTABLE_DX10_MAX)
    848 		return ERR_PTR(-EINVAL);
    849 
    850 	return container_of(ctx, struct vmw_user_context, res)->
    851 		cotables[cotable_type];
    852 }
    853 
    854 /**
    855  * vmw_context_binding_state -
    856  * Return a pointer to a context binding state structure
    857  *
    858  * @ctx: The context resource
    859  *
    860  * Returns the current state of bindings of the given context. Note that
    861  * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
    862  */
    863 struct vmw_ctx_binding_state *
    864 vmw_context_binding_state(struct vmw_resource *ctx)
    865 {
    866 	return container_of(ctx, struct vmw_user_context, res)->cbs;
    867 }
    868 
    869 /**
    870  * vmw_context_bind_dx_query -
    871  * Sets query MOB for the context.  If @mob is NULL, then this function will
    872  * remove the association between the MOB and the context.  This function
    873  * assumes the binding_mutex is held.
    874  *
    875  * @ctx_res: The context resource
    876  * @mob: a reference to the query MOB
    877  *
    878  * Returns -EINVAL if a MOB has already been set and does not match the one
    879  * specified in the parameter.  0 otherwise.
    880  */
    881 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
    882 			      struct vmw_buffer_object *mob)
    883 {
    884 	struct vmw_user_context *uctx =
    885 		container_of(ctx_res, struct vmw_user_context, res);
    886 
    887 	if (mob == NULL) {
    888 		if (uctx->dx_query_mob) {
    889 			uctx->dx_query_mob->dx_query_ctx = NULL;
    890 			vmw_bo_unreference(&uctx->dx_query_mob);
    891 			uctx->dx_query_mob = NULL;
    892 		}
    893 
    894 		return 0;
    895 	}
    896 
    897 	/* Can only have one MOB per context for queries */
    898 	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
    899 		return -EINVAL;
    900 
    901 	mob->dx_query_ctx  = ctx_res;
    902 
    903 	if (!uctx->dx_query_mob)
    904 		uctx->dx_query_mob = vmw_bo_reference(mob);
    905 
    906 	return 0;
    907 }
    908 
    909 /**
    910  * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
    911  *
    912  * @ctx_res: The context resource
    913  */
    914 struct vmw_buffer_object *
    915 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
    916 {
    917 	struct vmw_user_context *uctx =
    918 		container_of(ctx_res, struct vmw_user_context, res);
    919 
    920 	return uctx->dx_query_mob;
    921 }
    922