1 1.2 riastrad /* $NetBSD: vmwgfx_context.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $ */ 2 1.2 riastrad 3 1.3 riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT 4 1.1 riastrad /************************************************************************** 5 1.1 riastrad * 6 1.3 riastrad * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 7 1.1 riastrad * 8 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 9 1.1 riastrad * copy of this software and associated documentation files (the 10 1.1 riastrad * "Software"), to deal in the Software without restriction, including 11 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 12 1.1 riastrad * distribute, sub license, and/or sell copies of the Software, and to 13 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 14 1.1 riastrad * the following conditions: 15 1.1 riastrad * 16 1.1 riastrad * The above copyright notice and this permission notice (including the 17 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions 18 1.1 riastrad * of the Software. 19 1.1 riastrad * 20 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 1.1 riastrad * 28 1.1 riastrad **************************************************************************/ 29 1.1 riastrad 30 1.2 riastrad #include <sys/cdefs.h> 31 1.2 riastrad __KERNEL_RCSID(0, "$NetBSD: vmwgfx_context.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $"); 32 1.2 riastrad 33 1.3 riastrad #include <drm/ttm/ttm_placement.h> 34 1.3 riastrad 35 1.1 riastrad #include "vmwgfx_drv.h" 36 1.1 riastrad #include "vmwgfx_resource_priv.h" 37 1.2 riastrad #include "vmwgfx_binding.h" 38 1.1 riastrad 39 1.1 riastrad struct vmw_user_context { 40 1.1 riastrad struct ttm_base_object base; 41 1.1 riastrad struct vmw_resource res; 42 1.2 riastrad struct vmw_ctx_binding_state *cbs; 43 1.2 riastrad struct vmw_cmdbuf_res_manager *man; 44 1.2 riastrad struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX]; 45 1.2 riastrad spinlock_t cotable_lock; 46 1.3 riastrad struct vmw_buffer_object *dx_query_mob; 47 1.1 riastrad }; 48 1.1 riastrad 49 1.1 riastrad static void vmw_user_context_free(struct vmw_resource *res); 50 1.1 riastrad static struct vmw_resource * 51 1.1 riastrad vmw_user_context_base_to_res(struct ttm_base_object *base); 52 1.1 riastrad 53 1.2 riastrad static int vmw_gb_context_create(struct vmw_resource *res); 54 1.2 riastrad static int vmw_gb_context_bind(struct vmw_resource *res, 55 1.2 riastrad struct ttm_validate_buffer *val_buf); 56 1.2 riastrad static int vmw_gb_context_unbind(struct vmw_resource *res, 57 1.2 riastrad bool readback, 58 1.2 riastrad struct ttm_validate_buffer *val_buf); 59 1.2 riastrad static int vmw_gb_context_destroy(struct vmw_resource *res); 60 1.2 riastrad static int vmw_dx_context_create(struct vmw_resource *res); 61 1.2 riastrad static int vmw_dx_context_bind(struct vmw_resource *res, 62 1.2 riastrad struct ttm_validate_buffer *val_buf); 63 1.2 riastrad static int vmw_dx_context_unbind(struct vmw_resource *res, 64 1.2 riastrad bool readback, 65 1.2 riastrad struct ttm_validate_buffer *val_buf); 66 1.2 riastrad static int vmw_dx_context_destroy(struct vmw_resource *res); 67 1.2 riastrad 68 1.1 riastrad static uint64_t vmw_user_context_size; 69 1.1 riastrad 70 1.1 riastrad static const struct vmw_user_resource_conv user_context_conv = { 71 1.1 riastrad .object_type = VMW_RES_CONTEXT, 72 1.1 riastrad .base_obj_to_res = vmw_user_context_base_to_res, 73 1.1 riastrad .res_free = vmw_user_context_free 74 1.1 riastrad }; 75 1.1 riastrad 76 1.1 riastrad const struct vmw_user_resource_conv *user_context_converter = 77 1.1 riastrad &user_context_conv; 78 1.1 riastrad 79 1.1 riastrad 80 1.1 riastrad static const struct vmw_res_func vmw_legacy_context_func = { 81 1.1 riastrad .res_type = vmw_res_context, 82 1.1 riastrad .needs_backup = false, 83 1.1 riastrad .may_evict = false, 84 1.1 riastrad .type_name = "legacy contexts", 85 1.1 riastrad .backup_placement = NULL, 86 1.1 riastrad .create = NULL, 87 1.1 riastrad .destroy = NULL, 88 1.1 riastrad .bind = NULL, 89 1.1 riastrad .unbind = NULL 90 1.1 riastrad }; 91 1.1 riastrad 92 1.2 riastrad static const struct vmw_res_func vmw_gb_context_func = { 93 1.2 riastrad .res_type = vmw_res_context, 94 1.2 riastrad .needs_backup = true, 95 1.2 riastrad .may_evict = true, 96 1.3 riastrad .prio = 3, 97 1.3 riastrad .dirty_prio = 3, 98 1.2 riastrad .type_name = "guest backed contexts", 99 1.2 riastrad .backup_placement = &vmw_mob_placement, 100 1.2 riastrad .create = vmw_gb_context_create, 101 1.2 riastrad .destroy = vmw_gb_context_destroy, 102 1.2 riastrad .bind = vmw_gb_context_bind, 103 1.2 riastrad .unbind = vmw_gb_context_unbind 104 1.2 riastrad }; 105 1.2 riastrad 106 1.2 riastrad static const struct vmw_res_func vmw_dx_context_func = { 107 1.2 riastrad .res_type = vmw_res_dx_context, 108 1.2 riastrad .needs_backup = true, 109 1.2 riastrad .may_evict = true, 110 1.3 riastrad .prio = 3, 111 1.3 riastrad .dirty_prio = 3, 112 1.2 riastrad .type_name = "dx contexts", 113 1.2 riastrad .backup_placement = &vmw_mob_placement, 114 1.2 riastrad .create = vmw_dx_context_create, 115 1.2 riastrad .destroy = vmw_dx_context_destroy, 116 1.2 riastrad .bind = vmw_dx_context_bind, 117 1.2 riastrad .unbind = vmw_dx_context_unbind 118 1.2 riastrad }; 119 1.2 riastrad 120 1.1 riastrad /** 121 1.1 riastrad * Context management: 122 1.1 riastrad */ 123 1.1 riastrad 124 1.2 riastrad static void vmw_context_cotables_unref(struct vmw_user_context *uctx) 125 1.2 riastrad { 126 1.2 riastrad struct vmw_resource *res; 127 1.2 riastrad int i; 128 1.2 riastrad 129 1.2 riastrad for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { 130 1.2 riastrad spin_lock(&uctx->cotable_lock); 131 1.2 riastrad res = uctx->cotables[i]; 132 1.2 riastrad uctx->cotables[i] = NULL; 133 1.2 riastrad spin_unlock(&uctx->cotable_lock); 134 1.2 riastrad 135 1.2 riastrad if (res) 136 1.2 riastrad vmw_resource_unreference(&res); 137 1.2 riastrad } 138 1.2 riastrad } 139 1.2 riastrad 140 1.1 riastrad static void vmw_hw_context_destroy(struct vmw_resource *res) 141 1.1 riastrad { 142 1.2 riastrad struct vmw_user_context *uctx = 143 1.2 riastrad container_of(res, struct vmw_user_context, res); 144 1.1 riastrad struct vmw_private *dev_priv = res->dev_priv; 145 1.1 riastrad struct { 146 1.1 riastrad SVGA3dCmdHeader header; 147 1.1 riastrad SVGA3dCmdDestroyContext body; 148 1.1 riastrad } *cmd; 149 1.1 riastrad 150 1.1 riastrad 151 1.2 riastrad if (res->func->destroy == vmw_gb_context_destroy || 152 1.2 riastrad res->func->destroy == vmw_dx_context_destroy) { 153 1.2 riastrad mutex_lock(&dev_priv->cmdbuf_mutex); 154 1.2 riastrad vmw_cmdbuf_res_man_destroy(uctx->man); 155 1.2 riastrad mutex_lock(&dev_priv->binding_mutex); 156 1.2 riastrad vmw_binding_state_kill(uctx->cbs); 157 1.2 riastrad (void) res->func->destroy(res); 158 1.2 riastrad mutex_unlock(&dev_priv->binding_mutex); 159 1.2 riastrad if (dev_priv->pinned_bo != NULL && 160 1.2 riastrad !dev_priv->query_cid_valid) 161 1.2 riastrad __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 162 1.2 riastrad mutex_unlock(&dev_priv->cmdbuf_mutex); 163 1.2 riastrad vmw_context_cotables_unref(uctx); 164 1.2 riastrad return; 165 1.2 riastrad } 166 1.2 riastrad 167 1.1 riastrad vmw_execbuf_release_pinned_bo(dev_priv); 168 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 169 1.3 riastrad if (unlikely(cmd == NULL)) 170 1.1 riastrad return; 171 1.1 riastrad 172 1.2 riastrad cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY; 173 1.2 riastrad cmd->header.size = sizeof(cmd->body); 174 1.2 riastrad cmd->body.cid = res->id; 175 1.1 riastrad 176 1.1 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 177 1.2 riastrad vmw_fifo_resource_dec(dev_priv); 178 1.2 riastrad } 179 1.2 riastrad 180 1.2 riastrad static int vmw_gb_context_init(struct vmw_private *dev_priv, 181 1.2 riastrad bool dx, 182 1.2 riastrad struct vmw_resource *res, 183 1.2 riastrad void (*res_free)(struct vmw_resource *res)) 184 1.2 riastrad { 185 1.2 riastrad int ret, i; 186 1.2 riastrad struct vmw_user_context *uctx = 187 1.2 riastrad container_of(res, struct vmw_user_context, res); 188 1.2 riastrad 189 1.2 riastrad res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) : 190 1.2 riastrad SVGA3D_CONTEXT_DATA_SIZE); 191 1.2 riastrad ret = vmw_resource_init(dev_priv, res, true, 192 1.2 riastrad res_free, 193 1.2 riastrad dx ? &vmw_dx_context_func : 194 1.2 riastrad &vmw_gb_context_func); 195 1.2 riastrad if (unlikely(ret != 0)) 196 1.2 riastrad goto out_err; 197 1.2 riastrad 198 1.2 riastrad if (dev_priv->has_mob) { 199 1.2 riastrad uctx->man = vmw_cmdbuf_res_man_create(dev_priv); 200 1.2 riastrad if (IS_ERR(uctx->man)) { 201 1.2 riastrad ret = PTR_ERR(uctx->man); 202 1.2 riastrad uctx->man = NULL; 203 1.2 riastrad goto out_err; 204 1.2 riastrad } 205 1.2 riastrad } 206 1.2 riastrad 207 1.2 riastrad uctx->cbs = vmw_binding_state_alloc(dev_priv); 208 1.2 riastrad if (IS_ERR(uctx->cbs)) { 209 1.2 riastrad ret = PTR_ERR(uctx->cbs); 210 1.2 riastrad goto out_err; 211 1.2 riastrad } 212 1.2 riastrad 213 1.2 riastrad spin_lock_init(&uctx->cotable_lock); 214 1.2 riastrad 215 1.2 riastrad if (dx) { 216 1.2 riastrad for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { 217 1.2 riastrad uctx->cotables[i] = vmw_cotable_alloc(dev_priv, 218 1.2 riastrad &uctx->res, i); 219 1.3 riastrad if (IS_ERR(uctx->cotables[i])) { 220 1.3 riastrad ret = PTR_ERR(uctx->cotables[i]); 221 1.2 riastrad goto out_cotables; 222 1.2 riastrad } 223 1.2 riastrad } 224 1.2 riastrad } 225 1.2 riastrad 226 1.3 riastrad res->hw_destroy = vmw_hw_context_destroy; 227 1.2 riastrad return 0; 228 1.2 riastrad 229 1.2 riastrad out_cotables: 230 1.2 riastrad vmw_context_cotables_unref(uctx); 231 1.2 riastrad out_err: 232 1.2 riastrad if (res_free) 233 1.2 riastrad res_free(res); 234 1.2 riastrad else 235 1.2 riastrad kfree(res); 236 1.2 riastrad return ret; 237 1.1 riastrad } 238 1.1 riastrad 239 1.1 riastrad static int vmw_context_init(struct vmw_private *dev_priv, 240 1.1 riastrad struct vmw_resource *res, 241 1.2 riastrad void (*res_free)(struct vmw_resource *res), 242 1.2 riastrad bool dx) 243 1.1 riastrad { 244 1.1 riastrad int ret; 245 1.1 riastrad 246 1.1 riastrad struct { 247 1.1 riastrad SVGA3dCmdHeader header; 248 1.1 riastrad SVGA3dCmdDefineContext body; 249 1.1 riastrad } *cmd; 250 1.1 riastrad 251 1.2 riastrad if (dev_priv->has_mob) 252 1.2 riastrad return vmw_gb_context_init(dev_priv, dx, res, res_free); 253 1.2 riastrad 254 1.1 riastrad ret = vmw_resource_init(dev_priv, res, false, 255 1.1 riastrad res_free, &vmw_legacy_context_func); 256 1.1 riastrad 257 1.1 riastrad if (unlikely(ret != 0)) { 258 1.1 riastrad DRM_ERROR("Failed to allocate a resource id.\n"); 259 1.1 riastrad goto out_early; 260 1.1 riastrad } 261 1.1 riastrad 262 1.1 riastrad if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { 263 1.1 riastrad DRM_ERROR("Out of hw context ids.\n"); 264 1.1 riastrad vmw_resource_unreference(&res); 265 1.1 riastrad return -ENOMEM; 266 1.1 riastrad } 267 1.1 riastrad 268 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 269 1.1 riastrad if (unlikely(cmd == NULL)) { 270 1.1 riastrad vmw_resource_unreference(&res); 271 1.1 riastrad return -ENOMEM; 272 1.1 riastrad } 273 1.1 riastrad 274 1.2 riastrad cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE; 275 1.2 riastrad cmd->header.size = sizeof(cmd->body); 276 1.2 riastrad cmd->body.cid = res->id; 277 1.1 riastrad 278 1.1 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 279 1.2 riastrad vmw_fifo_resource_inc(dev_priv); 280 1.3 riastrad res->hw_destroy = vmw_hw_context_destroy; 281 1.1 riastrad return 0; 282 1.1 riastrad 283 1.1 riastrad out_early: 284 1.1 riastrad if (res_free == NULL) 285 1.1 riastrad kfree(res); 286 1.1 riastrad else 287 1.1 riastrad res_free(res); 288 1.1 riastrad return ret; 289 1.1 riastrad } 290 1.1 riastrad 291 1.2 riastrad 292 1.2 riastrad /* 293 1.2 riastrad * GB context. 294 1.2 riastrad */ 295 1.2 riastrad 296 1.2 riastrad static int vmw_gb_context_create(struct vmw_resource *res) 297 1.2 riastrad { 298 1.2 riastrad struct vmw_private *dev_priv = res->dev_priv; 299 1.2 riastrad int ret; 300 1.2 riastrad struct { 301 1.2 riastrad SVGA3dCmdHeader header; 302 1.2 riastrad SVGA3dCmdDefineGBContext body; 303 1.2 riastrad } *cmd; 304 1.2 riastrad 305 1.2 riastrad if (likely(res->id != -1)) 306 1.2 riastrad return 0; 307 1.2 riastrad 308 1.2 riastrad ret = vmw_resource_alloc_id(res); 309 1.2 riastrad if (unlikely(ret != 0)) { 310 1.2 riastrad DRM_ERROR("Failed to allocate a context id.\n"); 311 1.2 riastrad goto out_no_id; 312 1.2 riastrad } 313 1.2 riastrad 314 1.2 riastrad if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) { 315 1.2 riastrad ret = -EBUSY; 316 1.2 riastrad goto out_no_fifo; 317 1.2 riastrad } 318 1.2 riastrad 319 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 320 1.2 riastrad if (unlikely(cmd == NULL)) { 321 1.2 riastrad ret = -ENOMEM; 322 1.2 riastrad goto out_no_fifo; 323 1.2 riastrad } 324 1.2 riastrad 325 1.2 riastrad cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT; 326 1.2 riastrad cmd->header.size = sizeof(cmd->body); 327 1.2 riastrad cmd->body.cid = res->id; 328 1.2 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 329 1.2 riastrad vmw_fifo_resource_inc(dev_priv); 330 1.2 riastrad 331 1.2 riastrad return 0; 332 1.2 riastrad 333 1.2 riastrad out_no_fifo: 334 1.2 riastrad vmw_resource_release_id(res); 335 1.2 riastrad out_no_id: 336 1.2 riastrad return ret; 337 1.2 riastrad } 338 1.2 riastrad 339 1.2 riastrad static int vmw_gb_context_bind(struct vmw_resource *res, 340 1.2 riastrad struct ttm_validate_buffer *val_buf) 341 1.2 riastrad { 342 1.2 riastrad struct vmw_private *dev_priv = res->dev_priv; 343 1.2 riastrad struct { 344 1.2 riastrad SVGA3dCmdHeader header; 345 1.2 riastrad SVGA3dCmdBindGBContext body; 346 1.2 riastrad } *cmd; 347 1.2 riastrad struct ttm_buffer_object *bo = val_buf->bo; 348 1.2 riastrad 349 1.2 riastrad BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 350 1.2 riastrad 351 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 352 1.3 riastrad if (unlikely(cmd == NULL)) 353 1.2 riastrad return -ENOMEM; 354 1.3 riastrad 355 1.2 riastrad cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; 356 1.2 riastrad cmd->header.size = sizeof(cmd->body); 357 1.2 riastrad cmd->body.cid = res->id; 358 1.2 riastrad cmd->body.mobid = bo->mem.start; 359 1.2 riastrad cmd->body.validContents = res->backup_dirty; 360 1.2 riastrad res->backup_dirty = false; 361 1.2 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 362 1.2 riastrad 363 1.2 riastrad return 0; 364 1.2 riastrad } 365 1.2 riastrad 366 1.2 riastrad static int vmw_gb_context_unbind(struct vmw_resource *res, 367 1.2 riastrad bool readback, 368 1.2 riastrad struct ttm_validate_buffer *val_buf) 369 1.1 riastrad { 370 1.2 riastrad struct vmw_private *dev_priv = res->dev_priv; 371 1.2 riastrad struct ttm_buffer_object *bo = val_buf->bo; 372 1.2 riastrad struct vmw_fence_obj *fence; 373 1.2 riastrad struct vmw_user_context *uctx = 374 1.2 riastrad container_of(res, struct vmw_user_context, res); 375 1.2 riastrad 376 1.2 riastrad struct { 377 1.2 riastrad SVGA3dCmdHeader header; 378 1.2 riastrad SVGA3dCmdReadbackGBContext body; 379 1.2 riastrad } *cmd1; 380 1.2 riastrad struct { 381 1.2 riastrad SVGA3dCmdHeader header; 382 1.2 riastrad SVGA3dCmdBindGBContext body; 383 1.2 riastrad } *cmd2; 384 1.2 riastrad uint32_t submit_size; 385 1.2 riastrad uint8_t *cmd; 386 1.2 riastrad 387 1.2 riastrad 388 1.2 riastrad BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 389 1.2 riastrad 390 1.2 riastrad mutex_lock(&dev_priv->binding_mutex); 391 1.2 riastrad vmw_binding_state_scrub(uctx->cbs); 392 1.2 riastrad 393 1.2 riastrad submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); 394 1.2 riastrad 395 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, submit_size); 396 1.2 riastrad if (unlikely(cmd == NULL)) { 397 1.2 riastrad mutex_unlock(&dev_priv->binding_mutex); 398 1.2 riastrad return -ENOMEM; 399 1.2 riastrad } 400 1.2 riastrad 401 1.2 riastrad cmd2 = (void *) cmd; 402 1.2 riastrad if (readback) { 403 1.2 riastrad cmd1 = (void *) cmd; 404 1.2 riastrad cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT; 405 1.2 riastrad cmd1->header.size = sizeof(cmd1->body); 406 1.2 riastrad cmd1->body.cid = res->id; 407 1.2 riastrad cmd2 = (void *) (&cmd1[1]); 408 1.2 riastrad } 409 1.2 riastrad cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; 410 1.2 riastrad cmd2->header.size = sizeof(cmd2->body); 411 1.2 riastrad cmd2->body.cid = res->id; 412 1.2 riastrad cmd2->body.mobid = SVGA3D_INVALID_ID; 413 1.2 riastrad 414 1.2 riastrad vmw_fifo_commit(dev_priv, submit_size); 415 1.2 riastrad mutex_unlock(&dev_priv->binding_mutex); 416 1.2 riastrad 417 1.2 riastrad /* 418 1.2 riastrad * Create a fence object and fence the backup buffer. 419 1.2 riastrad */ 420 1.2 riastrad 421 1.2 riastrad (void) vmw_execbuf_fence_commands(NULL, dev_priv, 422 1.2 riastrad &fence, NULL); 423 1.2 riastrad 424 1.3 riastrad vmw_bo_fence_single(bo, fence); 425 1.2 riastrad 426 1.2 riastrad if (likely(fence != NULL)) 427 1.2 riastrad vmw_fence_obj_unreference(&fence); 428 1.2 riastrad 429 1.2 riastrad return 0; 430 1.2 riastrad } 431 1.2 riastrad 432 1.2 riastrad static int vmw_gb_context_destroy(struct vmw_resource *res) 433 1.2 riastrad { 434 1.2 riastrad struct vmw_private *dev_priv = res->dev_priv; 435 1.2 riastrad struct { 436 1.2 riastrad SVGA3dCmdHeader header; 437 1.2 riastrad SVGA3dCmdDestroyGBContext body; 438 1.2 riastrad } *cmd; 439 1.2 riastrad 440 1.2 riastrad if (likely(res->id == -1)) 441 1.2 riastrad return 0; 442 1.2 riastrad 443 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 444 1.3 riastrad if (unlikely(cmd == NULL)) 445 1.2 riastrad return -ENOMEM; 446 1.2 riastrad 447 1.2 riastrad cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT; 448 1.2 riastrad cmd->header.size = sizeof(cmd->body); 449 1.2 riastrad cmd->body.cid = res->id; 450 1.2 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 451 1.2 riastrad if (dev_priv->query_cid == res->id) 452 1.2 riastrad dev_priv->query_cid_valid = false; 453 1.2 riastrad vmw_resource_release_id(res); 454 1.2 riastrad vmw_fifo_resource_dec(dev_priv); 455 1.2 riastrad 456 1.2 riastrad return 0; 457 1.2 riastrad } 458 1.2 riastrad 459 1.2 riastrad /* 460 1.2 riastrad * DX context. 461 1.2 riastrad */ 462 1.2 riastrad 463 1.2 riastrad static int vmw_dx_context_create(struct vmw_resource *res) 464 1.2 riastrad { 465 1.2 riastrad struct vmw_private *dev_priv = res->dev_priv; 466 1.1 riastrad int ret; 467 1.2 riastrad struct { 468 1.2 riastrad SVGA3dCmdHeader header; 469 1.2 riastrad SVGA3dCmdDXDefineContext body; 470 1.2 riastrad } *cmd; 471 1.2 riastrad 472 1.2 riastrad if (likely(res->id != -1)) 473 1.2 riastrad return 0; 474 1.2 riastrad 475 1.2 riastrad ret = vmw_resource_alloc_id(res); 476 1.2 riastrad if (unlikely(ret != 0)) { 477 1.2 riastrad DRM_ERROR("Failed to allocate a context id.\n"); 478 1.2 riastrad goto out_no_id; 479 1.2 riastrad } 480 1.2 riastrad 481 1.2 riastrad if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) { 482 1.2 riastrad ret = -EBUSY; 483 1.2 riastrad goto out_no_fifo; 484 1.2 riastrad } 485 1.2 riastrad 486 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 487 1.2 riastrad if (unlikely(cmd == NULL)) { 488 1.2 riastrad ret = -ENOMEM; 489 1.2 riastrad goto out_no_fifo; 490 1.2 riastrad } 491 1.2 riastrad 492 1.2 riastrad cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT; 493 1.2 riastrad cmd->header.size = sizeof(cmd->body); 494 1.2 riastrad cmd->body.cid = res->id; 495 1.2 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 496 1.2 riastrad vmw_fifo_resource_inc(dev_priv); 497 1.2 riastrad 498 1.2 riastrad return 0; 499 1.2 riastrad 500 1.2 riastrad out_no_fifo: 501 1.2 riastrad vmw_resource_release_id(res); 502 1.2 riastrad out_no_id: 503 1.2 riastrad return ret; 504 1.2 riastrad } 505 1.2 riastrad 506 1.2 riastrad static int vmw_dx_context_bind(struct vmw_resource *res, 507 1.2 riastrad struct ttm_validate_buffer *val_buf) 508 1.2 riastrad { 509 1.2 riastrad struct vmw_private *dev_priv = res->dev_priv; 510 1.2 riastrad struct { 511 1.2 riastrad SVGA3dCmdHeader header; 512 1.2 riastrad SVGA3dCmdDXBindContext body; 513 1.2 riastrad } *cmd; 514 1.2 riastrad struct ttm_buffer_object *bo = val_buf->bo; 515 1.2 riastrad 516 1.2 riastrad BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 517 1.2 riastrad 518 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 519 1.3 riastrad if (unlikely(cmd == NULL)) 520 1.2 riastrad return -ENOMEM; 521 1.2 riastrad 522 1.2 riastrad cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; 523 1.2 riastrad cmd->header.size = sizeof(cmd->body); 524 1.2 riastrad cmd->body.cid = res->id; 525 1.2 riastrad cmd->body.mobid = bo->mem.start; 526 1.2 riastrad cmd->body.validContents = res->backup_dirty; 527 1.2 riastrad res->backup_dirty = false; 528 1.2 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 529 1.2 riastrad 530 1.2 riastrad 531 1.2 riastrad return 0; 532 1.2 riastrad } 533 1.2 riastrad 534 1.2 riastrad /** 535 1.2 riastrad * vmw_dx_context_scrub_cotables - Scrub all bindings and 536 1.2 riastrad * cotables from a context 537 1.2 riastrad * 538 1.2 riastrad * @ctx: Pointer to the context resource 539 1.2 riastrad * @readback: Whether to save the otable contents on scrubbing. 540 1.2 riastrad * 541 1.2 riastrad * COtables must be unbound before their context, but unbinding requires 542 1.2 riastrad * the backup buffer being reserved, whereas scrubbing does not. 543 1.2 riastrad * This function scrubs all cotables of a context, potentially reading back 544 1.2 riastrad * the contents into their backup buffers. However, scrubbing cotables 545 1.2 riastrad * also makes the device context invalid, so scrub all bindings first so 546 1.2 riastrad * that doesn't have to be done later with an invalid context. 547 1.2 riastrad */ 548 1.2 riastrad void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, 549 1.2 riastrad bool readback) 550 1.2 riastrad { 551 1.2 riastrad struct vmw_user_context *uctx = 552 1.2 riastrad container_of(ctx, struct vmw_user_context, res); 553 1.2 riastrad int i; 554 1.2 riastrad 555 1.2 riastrad vmw_binding_state_scrub(uctx->cbs); 556 1.2 riastrad for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { 557 1.2 riastrad struct vmw_resource *res; 558 1.2 riastrad 559 1.2 riastrad /* Avoid racing with ongoing cotable destruction. */ 560 1.2 riastrad spin_lock(&uctx->cotable_lock); 561 1.2 riastrad res = uctx->cotables[vmw_cotable_scrub_order[i]]; 562 1.2 riastrad if (res) 563 1.2 riastrad res = vmw_resource_reference_unless_doomed(res); 564 1.2 riastrad spin_unlock(&uctx->cotable_lock); 565 1.2 riastrad if (!res) 566 1.2 riastrad continue; 567 1.2 riastrad 568 1.2 riastrad WARN_ON(vmw_cotable_scrub(res, readback)); 569 1.2 riastrad vmw_resource_unreference(&res); 570 1.2 riastrad } 571 1.2 riastrad } 572 1.2 riastrad 573 1.2 riastrad static int vmw_dx_context_unbind(struct vmw_resource *res, 574 1.2 riastrad bool readback, 575 1.2 riastrad struct ttm_validate_buffer *val_buf) 576 1.2 riastrad { 577 1.2 riastrad struct vmw_private *dev_priv = res->dev_priv; 578 1.2 riastrad struct ttm_buffer_object *bo = val_buf->bo; 579 1.2 riastrad struct vmw_fence_obj *fence; 580 1.2 riastrad struct vmw_user_context *uctx = 581 1.2 riastrad container_of(res, struct vmw_user_context, res); 582 1.2 riastrad 583 1.2 riastrad struct { 584 1.2 riastrad SVGA3dCmdHeader header; 585 1.2 riastrad SVGA3dCmdDXReadbackContext body; 586 1.2 riastrad } *cmd1; 587 1.2 riastrad struct { 588 1.2 riastrad SVGA3dCmdHeader header; 589 1.2 riastrad SVGA3dCmdDXBindContext body; 590 1.2 riastrad } *cmd2; 591 1.2 riastrad uint32_t submit_size; 592 1.2 riastrad uint8_t *cmd; 593 1.2 riastrad 594 1.2 riastrad 595 1.2 riastrad BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 596 1.2 riastrad 597 1.2 riastrad mutex_lock(&dev_priv->binding_mutex); 598 1.2 riastrad vmw_dx_context_scrub_cotables(res, readback); 599 1.2 riastrad 600 1.2 riastrad if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx && 601 1.2 riastrad readback) { 602 1.2 riastrad WARN_ON(uctx->dx_query_mob->dx_query_ctx != res); 603 1.2 riastrad if (vmw_query_readback_all(uctx->dx_query_mob)) 604 1.2 riastrad DRM_ERROR("Failed to read back query states\n"); 605 1.2 riastrad } 606 1.2 riastrad 607 1.2 riastrad submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); 608 1.2 riastrad 609 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, submit_size); 610 1.2 riastrad if (unlikely(cmd == NULL)) { 611 1.2 riastrad mutex_unlock(&dev_priv->binding_mutex); 612 1.2 riastrad return -ENOMEM; 613 1.2 riastrad } 614 1.2 riastrad 615 1.2 riastrad cmd2 = (void *) cmd; 616 1.2 riastrad if (readback) { 617 1.2 riastrad cmd1 = (void *) cmd; 618 1.2 riastrad cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT; 619 1.2 riastrad cmd1->header.size = sizeof(cmd1->body); 620 1.2 riastrad cmd1->body.cid = res->id; 621 1.2 riastrad cmd2 = (void *) (&cmd1[1]); 622 1.2 riastrad } 623 1.2 riastrad cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; 624 1.2 riastrad cmd2->header.size = sizeof(cmd2->body); 625 1.2 riastrad cmd2->body.cid = res->id; 626 1.2 riastrad cmd2->body.mobid = SVGA3D_INVALID_ID; 627 1.2 riastrad 628 1.2 riastrad vmw_fifo_commit(dev_priv, submit_size); 629 1.2 riastrad mutex_unlock(&dev_priv->binding_mutex); 630 1.2 riastrad 631 1.2 riastrad /* 632 1.2 riastrad * Create a fence object and fence the backup buffer. 633 1.2 riastrad */ 634 1.2 riastrad 635 1.2 riastrad (void) vmw_execbuf_fence_commands(NULL, dev_priv, 636 1.2 riastrad &fence, NULL); 637 1.2 riastrad 638 1.3 riastrad vmw_bo_fence_single(bo, fence); 639 1.2 riastrad 640 1.2 riastrad if (likely(fence != NULL)) 641 1.2 riastrad vmw_fence_obj_unreference(&fence); 642 1.2 riastrad 643 1.2 riastrad return 0; 644 1.2 riastrad } 645 1.2 riastrad 646 1.2 riastrad static int vmw_dx_context_destroy(struct vmw_resource *res) 647 1.2 riastrad { 648 1.2 riastrad struct vmw_private *dev_priv = res->dev_priv; 649 1.2 riastrad struct { 650 1.2 riastrad SVGA3dCmdHeader header; 651 1.2 riastrad SVGA3dCmdDXDestroyContext body; 652 1.2 riastrad } *cmd; 653 1.1 riastrad 654 1.2 riastrad if (likely(res->id == -1)) 655 1.2 riastrad return 0; 656 1.2 riastrad 657 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 658 1.3 riastrad if (unlikely(cmd == NULL)) 659 1.2 riastrad return -ENOMEM; 660 1.1 riastrad 661 1.2 riastrad cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT; 662 1.2 riastrad cmd->header.size = sizeof(cmd->body); 663 1.2 riastrad cmd->body.cid = res->id; 664 1.2 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 665 1.2 riastrad if (dev_priv->query_cid == res->id) 666 1.2 riastrad dev_priv->query_cid_valid = false; 667 1.2 riastrad vmw_resource_release_id(res); 668 1.2 riastrad vmw_fifo_resource_dec(dev_priv); 669 1.1 riastrad 670 1.2 riastrad return 0; 671 1.1 riastrad } 672 1.1 riastrad 673 1.1 riastrad /** 674 1.1 riastrad * User-space context management: 675 1.1 riastrad */ 676 1.1 riastrad 677 1.1 riastrad static struct vmw_resource * 678 1.1 riastrad vmw_user_context_base_to_res(struct ttm_base_object *base) 679 1.1 riastrad { 680 1.1 riastrad return &(container_of(base, struct vmw_user_context, base)->res); 681 1.1 riastrad } 682 1.1 riastrad 683 1.1 riastrad static void vmw_user_context_free(struct vmw_resource *res) 684 1.1 riastrad { 685 1.1 riastrad struct vmw_user_context *ctx = 686 1.1 riastrad container_of(res, struct vmw_user_context, res); 687 1.1 riastrad struct vmw_private *dev_priv = res->dev_priv; 688 1.1 riastrad 689 1.2 riastrad if (ctx->cbs) 690 1.2 riastrad vmw_binding_state_free(ctx->cbs); 691 1.2 riastrad 692 1.2 riastrad (void) vmw_context_bind_dx_query(res, NULL); 693 1.2 riastrad 694 1.1 riastrad ttm_base_object_kfree(ctx, base); 695 1.1 riastrad ttm_mem_global_free(vmw_mem_glob(dev_priv), 696 1.1 riastrad vmw_user_context_size); 697 1.1 riastrad } 698 1.1 riastrad 699 1.1 riastrad /** 700 1.1 riastrad * This function is called when user space has no more references on the 701 1.1 riastrad * base object. It releases the base-object's reference on the resource object. 702 1.1 riastrad */ 703 1.1 riastrad 704 1.1 riastrad static void vmw_user_context_base_release(struct ttm_base_object **p_base) 705 1.1 riastrad { 706 1.1 riastrad struct ttm_base_object *base = *p_base; 707 1.1 riastrad struct vmw_user_context *ctx = 708 1.1 riastrad container_of(base, struct vmw_user_context, base); 709 1.1 riastrad struct vmw_resource *res = &ctx->res; 710 1.1 riastrad 711 1.1 riastrad *p_base = NULL; 712 1.1 riastrad vmw_resource_unreference(&res); 713 1.1 riastrad } 714 1.1 riastrad 715 1.1 riastrad int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 716 1.1 riastrad struct drm_file *file_priv) 717 1.1 riastrad { 718 1.1 riastrad struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; 719 1.1 riastrad struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 720 1.1 riastrad 721 1.1 riastrad return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); 722 1.1 riastrad } 723 1.1 riastrad 724 1.2 riastrad static int vmw_context_define(struct drm_device *dev, void *data, 725 1.2 riastrad struct drm_file *file_priv, bool dx) 726 1.1 riastrad { 727 1.1 riastrad struct vmw_private *dev_priv = vmw_priv(dev); 728 1.1 riastrad struct vmw_user_context *ctx; 729 1.1 riastrad struct vmw_resource *res; 730 1.1 riastrad struct vmw_resource *tmp; 731 1.1 riastrad struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; 732 1.1 riastrad struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 733 1.3 riastrad struct ttm_operation_ctx ttm_opt_ctx = { 734 1.3 riastrad .interruptible = true, 735 1.3 riastrad .no_wait_gpu = false 736 1.3 riastrad }; 737 1.1 riastrad int ret; 738 1.1 riastrad 739 1.2 riastrad if (!dev_priv->has_dx && dx) { 740 1.3 riastrad VMW_DEBUG_USER("DX contexts not supported by device.\n"); 741 1.2 riastrad return -EINVAL; 742 1.2 riastrad } 743 1.1 riastrad 744 1.1 riastrad if (unlikely(vmw_user_context_size == 0)) 745 1.3 riastrad vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 746 1.3 riastrad ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) + 747 1.3 riastrad + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; 748 1.1 riastrad 749 1.2 riastrad ret = ttm_read_lock(&dev_priv->reservation_sem, true); 750 1.1 riastrad if (unlikely(ret != 0)) 751 1.1 riastrad return ret; 752 1.1 riastrad 753 1.1 riastrad ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 754 1.1 riastrad vmw_user_context_size, 755 1.3 riastrad &ttm_opt_ctx); 756 1.1 riastrad if (unlikely(ret != 0)) { 757 1.1 riastrad if (ret != -ERESTARTSYS) 758 1.1 riastrad DRM_ERROR("Out of graphics memory for context" 759 1.1 riastrad " creation.\n"); 760 1.1 riastrad goto out_unlock; 761 1.1 riastrad } 762 1.1 riastrad 763 1.1 riastrad ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 764 1.3 riastrad if (unlikely(!ctx)) { 765 1.1 riastrad ttm_mem_global_free(vmw_mem_glob(dev_priv), 766 1.1 riastrad vmw_user_context_size); 767 1.1 riastrad ret = -ENOMEM; 768 1.1 riastrad goto out_unlock; 769 1.1 riastrad } 770 1.1 riastrad 771 1.1 riastrad res = &ctx->res; 772 1.1 riastrad ctx->base.shareable = false; 773 1.1 riastrad ctx->base.tfile = NULL; 774 1.1 riastrad 775 1.1 riastrad /* 776 1.1 riastrad * From here on, the destructor takes over resource freeing. 777 1.1 riastrad */ 778 1.1 riastrad 779 1.2 riastrad ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx); 780 1.1 riastrad if (unlikely(ret != 0)) 781 1.1 riastrad goto out_unlock; 782 1.1 riastrad 783 1.1 riastrad tmp = vmw_resource_reference(&ctx->res); 784 1.1 riastrad ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, 785 1.1 riastrad &vmw_user_context_base_release, NULL); 786 1.1 riastrad 787 1.1 riastrad if (unlikely(ret != 0)) { 788 1.1 riastrad vmw_resource_unreference(&tmp); 789 1.1 riastrad goto out_err; 790 1.1 riastrad } 791 1.1 riastrad 792 1.3 riastrad arg->cid = ctx->base.handle; 793 1.1 riastrad out_err: 794 1.1 riastrad vmw_resource_unreference(&res); 795 1.1 riastrad out_unlock: 796 1.2 riastrad ttm_read_unlock(&dev_priv->reservation_sem); 797 1.1 riastrad return ret; 798 1.2 riastrad } 799 1.2 riastrad 800 1.2 riastrad int vmw_context_define_ioctl(struct drm_device *dev, void *data, 801 1.2 riastrad struct drm_file *file_priv) 802 1.2 riastrad { 803 1.2 riastrad return vmw_context_define(dev, data, file_priv, false); 804 1.2 riastrad } 805 1.2 riastrad 806 1.2 riastrad int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, 807 1.2 riastrad struct drm_file *file_priv) 808 1.2 riastrad { 809 1.2 riastrad union drm_vmw_extended_context_arg *arg = (typeof(arg)) data; 810 1.2 riastrad struct drm_vmw_context_arg *rep = &arg->rep; 811 1.2 riastrad 812 1.2 riastrad switch (arg->req) { 813 1.2 riastrad case drm_vmw_context_legacy: 814 1.2 riastrad return vmw_context_define(dev, rep, file_priv, false); 815 1.2 riastrad case drm_vmw_context_dx: 816 1.2 riastrad return vmw_context_define(dev, rep, file_priv, true); 817 1.2 riastrad default: 818 1.2 riastrad break; 819 1.2 riastrad } 820 1.2 riastrad return -EINVAL; 821 1.2 riastrad } 822 1.2 riastrad 823 1.2 riastrad /** 824 1.2 riastrad * vmw_context_binding_list - Return a list of context bindings 825 1.2 riastrad * 826 1.2 riastrad * @ctx: The context resource 827 1.2 riastrad * 828 1.2 riastrad * Returns the current list of bindings of the given context. Note that 829 1.2 riastrad * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. 830 1.2 riastrad */ 831 1.2 riastrad struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) 832 1.2 riastrad { 833 1.2 riastrad struct vmw_user_context *uctx = 834 1.2 riastrad container_of(ctx, struct vmw_user_context, res); 835 1.2 riastrad 836 1.2 riastrad return vmw_binding_state_list(uctx->cbs); 837 1.2 riastrad } 838 1.2 riastrad 839 1.2 riastrad struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) 840 1.2 riastrad { 841 1.2 riastrad return container_of(ctx, struct vmw_user_context, res)->man; 842 1.2 riastrad } 843 1.2 riastrad 844 1.2 riastrad struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, 845 1.2 riastrad SVGACOTableType cotable_type) 846 1.2 riastrad { 847 1.2 riastrad if (cotable_type >= SVGA_COTABLE_DX10_MAX) 848 1.2 riastrad return ERR_PTR(-EINVAL); 849 1.2 riastrad 850 1.3 riastrad return container_of(ctx, struct vmw_user_context, res)-> 851 1.3 riastrad cotables[cotable_type]; 852 1.2 riastrad } 853 1.2 riastrad 854 1.2 riastrad /** 855 1.2 riastrad * vmw_context_binding_state - 856 1.2 riastrad * Return a pointer to a context binding state structure 857 1.2 riastrad * 858 1.2 riastrad * @ctx: The context resource 859 1.2 riastrad * 860 1.2 riastrad * Returns the current state of bindings of the given context. Note that 861 1.2 riastrad * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked. 862 1.2 riastrad */ 863 1.2 riastrad struct vmw_ctx_binding_state * 864 1.2 riastrad vmw_context_binding_state(struct vmw_resource *ctx) 865 1.2 riastrad { 866 1.2 riastrad return container_of(ctx, struct vmw_user_context, res)->cbs; 867 1.2 riastrad } 868 1.2 riastrad 869 1.2 riastrad /** 870 1.2 riastrad * vmw_context_bind_dx_query - 871 1.2 riastrad * Sets query MOB for the context. If @mob is NULL, then this function will 872 1.2 riastrad * remove the association between the MOB and the context. This function 873 1.2 riastrad * assumes the binding_mutex is held. 874 1.2 riastrad * 875 1.2 riastrad * @ctx_res: The context resource 876 1.2 riastrad * @mob: a reference to the query MOB 877 1.2 riastrad * 878 1.2 riastrad * Returns -EINVAL if a MOB has already been set and does not match the one 879 1.2 riastrad * specified in the parameter. 0 otherwise. 880 1.2 riastrad */ 881 1.2 riastrad int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, 882 1.3 riastrad struct vmw_buffer_object *mob) 883 1.2 riastrad { 884 1.2 riastrad struct vmw_user_context *uctx = 885 1.2 riastrad container_of(ctx_res, struct vmw_user_context, res); 886 1.2 riastrad 887 1.2 riastrad if (mob == NULL) { 888 1.2 riastrad if (uctx->dx_query_mob) { 889 1.2 riastrad uctx->dx_query_mob->dx_query_ctx = NULL; 890 1.3 riastrad vmw_bo_unreference(&uctx->dx_query_mob); 891 1.2 riastrad uctx->dx_query_mob = NULL; 892 1.2 riastrad } 893 1.2 riastrad 894 1.2 riastrad return 0; 895 1.2 riastrad } 896 1.2 riastrad 897 1.2 riastrad /* Can only have one MOB per context for queries */ 898 1.2 riastrad if (uctx->dx_query_mob && uctx->dx_query_mob != mob) 899 1.2 riastrad return -EINVAL; 900 1.2 riastrad 901 1.2 riastrad mob->dx_query_ctx = ctx_res; 902 1.2 riastrad 903 1.2 riastrad if (!uctx->dx_query_mob) 904 1.3 riastrad uctx->dx_query_mob = vmw_bo_reference(mob); 905 1.2 riastrad 906 1.2 riastrad return 0; 907 1.2 riastrad } 908 1.2 riastrad 909 1.2 riastrad /** 910 1.2 riastrad * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob 911 1.2 riastrad * 912 1.2 riastrad * @ctx_res: The context resource 913 1.2 riastrad */ 914 1.3 riastrad struct vmw_buffer_object * 915 1.2 riastrad vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res) 916 1.2 riastrad { 917 1.2 riastrad struct vmw_user_context *uctx = 918 1.2 riastrad container_of(ctx_res, struct vmw_user_context, res); 919 1.1 riastrad 920 1.2 riastrad return uctx->dx_query_mob; 921 1.1 riastrad } 922