1 1.5 riastrad /* $NetBSD: vmwgfx_execbuf.c,v 1.5 2022/10/25 23:35:57 riastradh Exp $ */ 2 1.2 riastrad 3 1.3 riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT 4 1.1 riastrad /************************************************************************** 5 1.1 riastrad * 6 1.3 riastrad * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA 7 1.1 riastrad * 8 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 9 1.1 riastrad * copy of this software and associated documentation files (the 10 1.1 riastrad * "Software"), to deal in the Software without restriction, including 11 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 12 1.1 riastrad * distribute, sub license, and/or sell copies of the Software, and to 13 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 14 1.1 riastrad * the following conditions: 15 1.1 riastrad * 16 1.1 riastrad * The above copyright notice and this permission notice (including the 17 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions 18 1.1 riastrad * of the Software. 19 1.1 riastrad * 20 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 1.1 riastrad * 28 1.1 riastrad **************************************************************************/ 29 1.2 riastrad #include <sys/cdefs.h> 30 1.5 riastrad __KERNEL_RCSID(0, "$NetBSD: vmwgfx_execbuf.c,v 1.5 2022/10/25 23:35:57 riastradh Exp $"); 31 1.2 riastrad 32 1.3 riastrad #include <linux/sync_file.h> 33 1.3 riastrad 34 1.5 riastrad #ifdef __NetBSD__ 35 1.5 riastrad #include <sys/filedesc.h> 36 1.5 riastrad #endif 37 1.5 riastrad 38 1.1 riastrad #include "vmwgfx_drv.h" 39 1.1 riastrad #include "vmwgfx_reg.h" 40 1.1 riastrad #include <drm/ttm/ttm_bo_api.h> 41 1.1 riastrad #include <drm/ttm/ttm_placement.h> 42 1.2 riastrad #include "vmwgfx_so.h" 43 1.2 riastrad #include "vmwgfx_binding.h" 44 1.1 riastrad 45 1.1 riastrad #define VMW_RES_HT_ORDER 12 46 1.1 riastrad 47 1.3 riastrad /* 48 1.3 riastrad * Helper macro to get dx_ctx_node if available otherwise print an error 49 1.3 riastrad * message. This is for use in command verifier function where if dx_ctx_node 50 1.3 riastrad * is not set then command is invalid. 51 1.3 riastrad */ 52 1.3 riastrad #define VMW_GET_CTX_NODE(__sw_context) \ 53 1.3 riastrad ({ \ 54 1.3 riastrad __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \ 55 1.3 riastrad VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \ 56 1.3 riastrad __sw_context->dx_ctx_node; \ 57 1.3 riastrad }); \ 58 1.3 riastrad }) 59 1.3 riastrad 60 1.3 riastrad #define VMW_DECLARE_CMD_VAR(__var, __type) \ 61 1.3 riastrad struct { \ 62 1.3 riastrad SVGA3dCmdHeader header; \ 63 1.3 riastrad __type body; \ 64 1.3 riastrad } __var 65 1.3 riastrad 66 1.3 riastrad /** 67 1.3 riastrad * struct vmw_relocation - Buffer object relocation 68 1.3 riastrad * 69 1.3 riastrad * @head: List head for the command submission context's relocation list 70 1.3 riastrad * @vbo: Non ref-counted pointer to buffer object 71 1.3 riastrad * @mob_loc: Pointer to location for mob id to be modified 72 1.3 riastrad * @location: Pointer to location for guest pointer to be modified 73 1.3 riastrad */ 74 1.3 riastrad struct vmw_relocation { 75 1.3 riastrad struct list_head head; 76 1.3 riastrad struct vmw_buffer_object *vbo; 77 1.3 riastrad union { 78 1.3 riastrad SVGAMobId *mob_loc; 79 1.3 riastrad SVGAGuestPtr *location; 80 1.3 riastrad }; 81 1.3 riastrad }; 82 1.3 riastrad 83 1.3 riastrad /** 84 1.3 riastrad * enum vmw_resource_relocation_type - Relocation type for resources 85 1.3 riastrad * 86 1.3 riastrad * @vmw_res_rel_normal: Traditional relocation. The resource id in the 87 1.3 riastrad * command stream is replaced with the actual id after validation. 88 1.3 riastrad * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced 89 1.3 riastrad * with a NOP. 90 1.3 riastrad * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after 91 1.3 riastrad * validation is -1, the command is replaced with a NOP. Otherwise no action. 92 1.3 riastrad */ 93 1.3 riastrad enum vmw_resource_relocation_type { 94 1.3 riastrad vmw_res_rel_normal, 95 1.3 riastrad vmw_res_rel_nop, 96 1.3 riastrad vmw_res_rel_cond_nop, 97 1.3 riastrad vmw_res_rel_max 98 1.3 riastrad }; 99 1.3 riastrad 100 1.1 riastrad /** 101 1.1 riastrad * struct vmw_resource_relocation - Relocation info for resources 102 1.1 riastrad * 103 1.1 riastrad * @head: List head for the software context's relocation list. 104 1.1 riastrad * @res: Non-ref-counted pointer to the resource. 105 1.3 riastrad * @offset: Offset of single byte entries into the command buffer where the id 106 1.3 riastrad * that needs fixup is located. 107 1.3 riastrad * @rel_type: Type of relocation. 108 1.1 riastrad */ 109 1.1 riastrad struct vmw_resource_relocation { 110 1.1 riastrad struct list_head head; 111 1.1 riastrad const struct vmw_resource *res; 112 1.3 riastrad u32 offset:29; 113 1.3 riastrad enum vmw_resource_relocation_type rel_type:3; 114 1.1 riastrad }; 115 1.1 riastrad 116 1.1 riastrad /** 117 1.3 riastrad * struct vmw_ctx_validation_info - Extra validation metadata for contexts 118 1.1 riastrad * 119 1.3 riastrad * @head: List head of context list 120 1.3 riastrad * @ctx: The context resource 121 1.3 riastrad * @cur: The context's persistent binding state 122 1.3 riastrad * @staged: The binding state changes of this command buffer 123 1.1 riastrad */ 124 1.3 riastrad struct vmw_ctx_validation_info { 125 1.1 riastrad struct list_head head; 126 1.3 riastrad struct vmw_resource *ctx; 127 1.3 riastrad struct vmw_ctx_binding_state *cur; 128 1.3 riastrad struct vmw_ctx_binding_state *staged; 129 1.2 riastrad }; 130 1.2 riastrad 131 1.2 riastrad /** 132 1.2 riastrad * struct vmw_cmd_entry - Describe a command for the verifier 133 1.2 riastrad * 134 1.2 riastrad * @user_allow: Whether allowed from the execbuf ioctl. 135 1.2 riastrad * @gb_disable: Whether disabled if guest-backed objects are available. 136 1.2 riastrad * @gb_enable: Whether enabled iff guest-backed objects are available. 137 1.2 riastrad */ 138 1.2 riastrad struct vmw_cmd_entry { 139 1.2 riastrad int (*func) (struct vmw_private *, struct vmw_sw_context *, 140 1.2 riastrad SVGA3dCmdHeader *); 141 1.2 riastrad bool user_allow; 142 1.2 riastrad bool gb_disable; 143 1.2 riastrad bool gb_enable; 144 1.3 riastrad const char *cmd_name; 145 1.1 riastrad }; 146 1.1 riastrad 147 1.2 riastrad #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ 148 1.2 riastrad [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ 149 1.3 riastrad (_gb_disable), (_gb_enable), #_cmd} 150 1.2 riastrad 151 1.2 riastrad static int vmw_resource_context_res_add(struct vmw_private *dev_priv, 152 1.2 riastrad struct vmw_sw_context *sw_context, 153 1.2 riastrad struct vmw_resource *ctx); 154 1.2 riastrad static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 155 1.2 riastrad struct vmw_sw_context *sw_context, 156 1.2 riastrad SVGAMobId *id, 157 1.3 riastrad struct vmw_buffer_object **vmw_bo_p); 158 1.3 riastrad /** 159 1.3 riastrad * vmw_ptr_diff - Compute the offset from a to b in bytes 160 1.3 riastrad * 161 1.3 riastrad * @a: A starting pointer. 162 1.3 riastrad * @b: A pointer offset in the same address space. 163 1.3 riastrad * 164 1.3 riastrad * Returns: The offset in bytes between the two pointers. 165 1.3 riastrad */ 166 1.3 riastrad static size_t vmw_ptr_diff(void *a, void *b) 167 1.3 riastrad { 168 1.3 riastrad return (unsigned long) b - (unsigned long) a; 169 1.3 riastrad } 170 1.2 riastrad 171 1.1 riastrad /** 172 1.3 riastrad * vmw_execbuf_bindings_commit - Commit modified binding state 173 1.1 riastrad * 174 1.3 riastrad * @sw_context: The command submission context 175 1.3 riastrad * @backoff: Whether this is part of the error path and binding state changes 176 1.3 riastrad * should be ignored 177 1.1 riastrad */ 178 1.3 riastrad static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, 179 1.3 riastrad bool backoff) 180 1.1 riastrad { 181 1.3 riastrad struct vmw_ctx_validation_info *entry; 182 1.2 riastrad 183 1.3 riastrad list_for_each_entry(entry, &sw_context->ctx_list, head) { 184 1.3 riastrad if (!backoff) 185 1.3 riastrad vmw_binding_state_commit(entry->cur, entry->staged); 186 1.1 riastrad 187 1.3 riastrad if (entry->staged != sw_context->staged_bindings) 188 1.3 riastrad vmw_binding_state_free(entry->staged); 189 1.3 riastrad else 190 1.3 riastrad sw_context->staged_bindings_inuse = false; 191 1.3 riastrad } 192 1.1 riastrad 193 1.3 riastrad /* List entries are freed with the validation context */ 194 1.3 riastrad INIT_LIST_HEAD(&sw_context->ctx_list); 195 1.3 riastrad } 196 1.2 riastrad 197 1.3 riastrad /** 198 1.3 riastrad * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced 199 1.3 riastrad * 200 1.3 riastrad * @sw_context: The command submission context 201 1.3 riastrad */ 202 1.3 riastrad static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) 203 1.3 riastrad { 204 1.3 riastrad if (sw_context->dx_query_mob) 205 1.3 riastrad vmw_context_bind_dx_query(sw_context->dx_query_ctx, 206 1.3 riastrad sw_context->dx_query_mob); 207 1.1 riastrad } 208 1.1 riastrad 209 1.2 riastrad /** 210 1.3 riastrad * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to 211 1.3 riastrad * the validate list. 212 1.2 riastrad * 213 1.2 riastrad * @dev_priv: Pointer to the device private: 214 1.3 riastrad * @sw_context: The command submission context 215 1.3 riastrad * @node: The validation node holding the context resource metadata 216 1.2 riastrad */ 217 1.2 riastrad static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, 218 1.2 riastrad struct vmw_sw_context *sw_context, 219 1.3 riastrad struct vmw_resource *res, 220 1.3 riastrad struct vmw_ctx_validation_info *node) 221 1.2 riastrad { 222 1.2 riastrad int ret; 223 1.2 riastrad 224 1.3 riastrad ret = vmw_resource_context_res_add(dev_priv, sw_context, res); 225 1.2 riastrad if (unlikely(ret != 0)) 226 1.2 riastrad goto out_err; 227 1.2 riastrad 228 1.2 riastrad if (!sw_context->staged_bindings) { 229 1.3 riastrad sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv); 230 1.2 riastrad if (IS_ERR(sw_context->staged_bindings)) { 231 1.2 riastrad ret = PTR_ERR(sw_context->staged_bindings); 232 1.2 riastrad sw_context->staged_bindings = NULL; 233 1.2 riastrad goto out_err; 234 1.2 riastrad } 235 1.2 riastrad } 236 1.2 riastrad 237 1.2 riastrad if (sw_context->staged_bindings_inuse) { 238 1.3 riastrad node->staged = vmw_binding_state_alloc(dev_priv); 239 1.3 riastrad if (IS_ERR(node->staged)) { 240 1.3 riastrad ret = PTR_ERR(node->staged); 241 1.3 riastrad node->staged = NULL; 242 1.2 riastrad goto out_err; 243 1.2 riastrad } 244 1.2 riastrad } else { 245 1.3 riastrad node->staged = sw_context->staged_bindings; 246 1.2 riastrad sw_context->staged_bindings_inuse = true; 247 1.2 riastrad } 248 1.2 riastrad 249 1.3 riastrad node->ctx = res; 250 1.3 riastrad node->cur = vmw_context_binding_state(res); 251 1.3 riastrad list_add_tail(&node->head, &sw_context->ctx_list); 252 1.3 riastrad 253 1.2 riastrad return 0; 254 1.3 riastrad 255 1.2 riastrad out_err: 256 1.2 riastrad return ret; 257 1.2 riastrad } 258 1.1 riastrad 259 1.1 riastrad /** 260 1.3 riastrad * vmw_execbuf_res_size - calculate extra size fore the resource validation node 261 1.3 riastrad * 262 1.3 riastrad * @dev_priv: Pointer to the device private struct. 263 1.3 riastrad * @res_type: The resource type. 264 1.3 riastrad * 265 1.3 riastrad * Guest-backed contexts and DX contexts require extra size to store execbuf 266 1.3 riastrad * private information in the validation node. Typically the binding manager 267 1.3 riastrad * associated data structures. 268 1.3 riastrad * 269 1.3 riastrad * Returns: The extra size requirement based on resource type. 270 1.3 riastrad */ 271 1.3 riastrad static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv, 272 1.3 riastrad enum vmw_res_type res_type) 273 1.3 riastrad { 274 1.3 riastrad return (res_type == vmw_res_dx_context || 275 1.3 riastrad (res_type == vmw_res_context && dev_priv->has_mob)) ? 276 1.3 riastrad sizeof(struct vmw_ctx_validation_info) : 0; 277 1.3 riastrad } 278 1.3 riastrad 279 1.3 riastrad /** 280 1.3 riastrad * vmw_execbuf_rcache_update - Update a resource-node cache entry 281 1.3 riastrad * 282 1.3 riastrad * @rcache: Pointer to the entry to update. 283 1.3 riastrad * @res: Pointer to the resource. 284 1.3 riastrad * @private: Pointer to the execbuf-private space in the resource validation 285 1.3 riastrad * node. 286 1.3 riastrad */ 287 1.3 riastrad static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, 288 1.3 riastrad struct vmw_resource *res, 289 1.3 riastrad void *private) 290 1.3 riastrad { 291 1.3 riastrad rcache->res = res; 292 1.3 riastrad rcache->private = private; 293 1.3 riastrad rcache->valid = 1; 294 1.3 riastrad rcache->valid_handle = 0; 295 1.3 riastrad } 296 1.3 riastrad 297 1.3 riastrad /** 298 1.3 riastrad * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced 299 1.3 riastrad * rcu-protected pointer to the validation list. 300 1.1 riastrad * 301 1.1 riastrad * @sw_context: Pointer to the software context. 302 1.3 riastrad * @res: Unreferenced rcu-protected pointer to the resource. 303 1.3 riastrad * @dirty: Whether to change dirty status. 304 1.3 riastrad * 305 1.3 riastrad * Returns: 0 on success. Negative error code on failure. Typical error codes 306 1.3 riastrad * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed. 307 1.1 riastrad */ 308 1.3 riastrad static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context, 309 1.3 riastrad struct vmw_resource *res, 310 1.3 riastrad u32 dirty) 311 1.1 riastrad { 312 1.2 riastrad struct vmw_private *dev_priv = res->dev_priv; 313 1.1 riastrad int ret; 314 1.3 riastrad enum vmw_res_type res_type = vmw_res_type(res); 315 1.3 riastrad struct vmw_res_cache_entry *rcache; 316 1.3 riastrad struct vmw_ctx_validation_info *ctx_info; 317 1.3 riastrad bool first_usage; 318 1.3 riastrad unsigned int priv_size; 319 1.3 riastrad 320 1.3 riastrad rcache = &sw_context->res_cache[res_type]; 321 1.3 riastrad if (likely(rcache->valid && rcache->res == res)) { 322 1.3 riastrad if (dirty) 323 1.3 riastrad vmw_validation_res_set_dirty(sw_context->ctx, 324 1.3 riastrad rcache->private, dirty); 325 1.3 riastrad vmw_user_resource_noref_release(); 326 1.1 riastrad return 0; 327 1.1 riastrad } 328 1.1 riastrad 329 1.3 riastrad priv_size = vmw_execbuf_res_size(dev_priv, res_type); 330 1.3 riastrad ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size, 331 1.3 riastrad dirty, (void **)&ctx_info, 332 1.3 riastrad &first_usage); 333 1.3 riastrad vmw_user_resource_noref_release(); 334 1.3 riastrad if (ret) 335 1.3 riastrad return ret; 336 1.3 riastrad 337 1.3 riastrad if (priv_size && first_usage) { 338 1.3 riastrad ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res, 339 1.3 riastrad ctx_info); 340 1.3 riastrad if (ret) { 341 1.3 riastrad VMW_DEBUG_USER("Failed first usage context setup.\n"); 342 1.3 riastrad return ret; 343 1.3 riastrad } 344 1.1 riastrad } 345 1.1 riastrad 346 1.3 riastrad vmw_execbuf_rcache_update(rcache, res, ctx_info); 347 1.3 riastrad return 0; 348 1.3 riastrad } 349 1.3 riastrad 350 1.3 riastrad /** 351 1.3 riastrad * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource 352 1.3 riastrad * validation list if it's not already on it 353 1.3 riastrad * 354 1.3 riastrad * @sw_context: Pointer to the software context. 355 1.3 riastrad * @res: Pointer to the resource. 356 1.3 riastrad * @dirty: Whether to change dirty status. 357 1.3 riastrad * 358 1.3 riastrad * Returns: Zero on success. Negative error code on failure. 359 1.3 riastrad */ 360 1.3 riastrad static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context, 361 1.3 riastrad struct vmw_resource *res, 362 1.3 riastrad u32 dirty) 363 1.3 riastrad { 364 1.3 riastrad struct vmw_res_cache_entry *rcache; 365 1.3 riastrad enum vmw_res_type res_type = vmw_res_type(res); 366 1.3 riastrad void *ptr; 367 1.3 riastrad int ret; 368 1.1 riastrad 369 1.3 riastrad rcache = &sw_context->res_cache[res_type]; 370 1.3 riastrad if (likely(rcache->valid && rcache->res == res)) { 371 1.3 riastrad if (dirty) 372 1.3 riastrad vmw_validation_res_set_dirty(sw_context->ctx, 373 1.3 riastrad rcache->private, dirty); 374 1.2 riastrad return 0; 375 1.2 riastrad } 376 1.2 riastrad 377 1.3 riastrad ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty, 378 1.3 riastrad &ptr, NULL); 379 1.3 riastrad if (ret) 380 1.3 riastrad return ret; 381 1.3 riastrad 382 1.3 riastrad vmw_execbuf_rcache_update(rcache, res, ptr); 383 1.2 riastrad 384 1.3 riastrad return 0; 385 1.2 riastrad } 386 1.2 riastrad 387 1.2 riastrad /** 388 1.3 riastrad * vmw_view_res_val_add - Add a view and the surface it's pointing to to the 389 1.3 riastrad * validation list 390 1.2 riastrad * 391 1.2 riastrad * @sw_context: The software context holding the validation list. 392 1.2 riastrad * @view: Pointer to the view resource. 393 1.2 riastrad * 394 1.2 riastrad * Returns 0 if success, negative error code otherwise. 395 1.2 riastrad */ 396 1.2 riastrad static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, 397 1.2 riastrad struct vmw_resource *view) 398 1.2 riastrad { 399 1.2 riastrad int ret; 400 1.2 riastrad 401 1.2 riastrad /* 402 1.3 riastrad * First add the resource the view is pointing to, otherwise it may be 403 1.3 riastrad * swapped out when the view is validated. 404 1.2 riastrad */ 405 1.3 riastrad ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view), 406 1.3 riastrad vmw_view_dirtying(view)); 407 1.2 riastrad if (ret) 408 1.2 riastrad return ret; 409 1.2 riastrad 410 1.3 riastrad return vmw_execbuf_res_noctx_val_add(sw_context, view, 411 1.3 riastrad VMW_RES_DIRTY_NONE); 412 1.2 riastrad } 413 1.2 riastrad 414 1.2 riastrad /** 415 1.3 riastrad * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing 416 1.3 riastrad * to to the validation list. 417 1.2 riastrad * 418 1.2 riastrad * @sw_context: The software context holding the validation list. 419 1.2 riastrad * @view_type: The view type to look up. 420 1.2 riastrad * @id: view id of the view. 421 1.2 riastrad * 422 1.3 riastrad * The view is represented by a view id and the DX context it's created on, or 423 1.3 riastrad * scheduled for creation on. If there is no DX context set, the function will 424 1.3 riastrad * return an -EINVAL error pointer. 425 1.3 riastrad * 426 1.3 riastrad * Returns: Unreferenced pointer to the resource on success, negative error 427 1.3 riastrad * pointer on failure. 428 1.3 riastrad */ 429 1.3 riastrad static struct vmw_resource * 430 1.3 riastrad vmw_view_id_val_add(struct vmw_sw_context *sw_context, 431 1.3 riastrad enum vmw_view_type view_type, u32 id) 432 1.2 riastrad { 433 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 434 1.2 riastrad struct vmw_resource *view; 435 1.2 riastrad int ret; 436 1.2 riastrad 437 1.3 riastrad if (!ctx_node) 438 1.3 riastrad return ERR_PTR(-EINVAL); 439 1.2 riastrad 440 1.2 riastrad view = vmw_view_lookup(sw_context->man, view_type, id); 441 1.2 riastrad if (IS_ERR(view)) 442 1.3 riastrad return view; 443 1.2 riastrad 444 1.2 riastrad ret = vmw_view_res_val_add(sw_context, view); 445 1.3 riastrad if (ret) 446 1.3 riastrad return ERR_PTR(ret); 447 1.2 riastrad 448 1.3 riastrad return view; 449 1.2 riastrad } 450 1.2 riastrad 451 1.2 riastrad /** 452 1.2 riastrad * vmw_resource_context_res_add - Put resources previously bound to a context on 453 1.2 riastrad * the validation list 454 1.2 riastrad * 455 1.2 riastrad * @dev_priv: Pointer to a device private structure 456 1.2 riastrad * @sw_context: Pointer to a software context used for this command submission 457 1.2 riastrad * @ctx: Pointer to the context resource 458 1.2 riastrad * 459 1.3 riastrad * This function puts all resources that were previously bound to @ctx on the 460 1.3 riastrad * resource validation list. This is part of the context state reemission 461 1.2 riastrad */ 462 1.2 riastrad static int vmw_resource_context_res_add(struct vmw_private *dev_priv, 463 1.2 riastrad struct vmw_sw_context *sw_context, 464 1.2 riastrad struct vmw_resource *ctx) 465 1.2 riastrad { 466 1.2 riastrad struct list_head *binding_list; 467 1.2 riastrad struct vmw_ctx_bindinfo *entry; 468 1.2 riastrad int ret = 0; 469 1.2 riastrad struct vmw_resource *res; 470 1.2 riastrad u32 i; 471 1.2 riastrad 472 1.2 riastrad /* Add all cotables to the validation list. */ 473 1.2 riastrad if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { 474 1.2 riastrad for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { 475 1.2 riastrad res = vmw_context_cotable(ctx, i); 476 1.2 riastrad if (IS_ERR(res)) 477 1.2 riastrad continue; 478 1.2 riastrad 479 1.3 riastrad ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 480 1.3 riastrad VMW_RES_DIRTY_SET); 481 1.2 riastrad if (unlikely(ret != 0)) 482 1.2 riastrad return ret; 483 1.2 riastrad } 484 1.2 riastrad } 485 1.2 riastrad 486 1.2 riastrad /* Add all resources bound to the context to the validation list */ 487 1.2 riastrad mutex_lock(&dev_priv->binding_mutex); 488 1.2 riastrad binding_list = vmw_context_binding_list(ctx); 489 1.2 riastrad 490 1.2 riastrad list_for_each_entry(entry, binding_list, ctx_list) { 491 1.2 riastrad if (vmw_res_type(entry->res) == vmw_res_view) 492 1.2 riastrad ret = vmw_view_res_val_add(sw_context, entry->res); 493 1.2 riastrad else 494 1.3 riastrad ret = vmw_execbuf_res_noctx_val_add 495 1.3 riastrad (sw_context, entry->res, 496 1.3 riastrad vmw_binding_dirtying(entry->bt)); 497 1.2 riastrad if (unlikely(ret != 0)) 498 1.2 riastrad break; 499 1.2 riastrad } 500 1.2 riastrad 501 1.2 riastrad if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { 502 1.3 riastrad struct vmw_buffer_object *dx_query_mob; 503 1.2 riastrad 504 1.2 riastrad dx_query_mob = vmw_context_get_dx_query_mob(ctx); 505 1.2 riastrad if (dx_query_mob) 506 1.3 riastrad ret = vmw_validation_add_bo(sw_context->ctx, 507 1.3 riastrad dx_query_mob, true, false); 508 1.2 riastrad } 509 1.2 riastrad 510 1.2 riastrad mutex_unlock(&dev_priv->binding_mutex); 511 1.2 riastrad return ret; 512 1.1 riastrad } 513 1.1 riastrad 514 1.1 riastrad /** 515 1.1 riastrad * vmw_resource_relocation_add - Add a relocation to the relocation list 516 1.1 riastrad * 517 1.1 riastrad * @list: Pointer to head of relocation list. 518 1.1 riastrad * @res: The resource. 519 1.3 riastrad * @offset: Offset into the command buffer currently being parsed where the id 520 1.3 riastrad * that needs fixup is located. Granularity is one byte. 521 1.3 riastrad * @rel_type: Relocation type. 522 1.1 riastrad */ 523 1.3 riastrad static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, 524 1.1 riastrad const struct vmw_resource *res, 525 1.3 riastrad unsigned long offset, 526 1.3 riastrad enum vmw_resource_relocation_type 527 1.3 riastrad rel_type) 528 1.1 riastrad { 529 1.1 riastrad struct vmw_resource_relocation *rel; 530 1.1 riastrad 531 1.3 riastrad rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel)); 532 1.3 riastrad if (unlikely(!rel)) { 533 1.3 riastrad VMW_DEBUG_USER("Failed to allocate a resource relocation.\n"); 534 1.1 riastrad return -ENOMEM; 535 1.1 riastrad } 536 1.1 riastrad 537 1.1 riastrad rel->res = res; 538 1.1 riastrad rel->offset = offset; 539 1.3 riastrad rel->rel_type = rel_type; 540 1.3 riastrad list_add_tail(&rel->head, &sw_context->res_relocations); 541 1.1 riastrad 542 1.1 riastrad return 0; 543 1.1 riastrad } 544 1.1 riastrad 545 1.1 riastrad /** 546 1.1 riastrad * vmw_resource_relocations_free - Free all relocations on a list 547 1.1 riastrad * 548 1.3 riastrad * @list: Pointer to the head of the relocation list 549 1.1 riastrad */ 550 1.1 riastrad static void vmw_resource_relocations_free(struct list_head *list) 551 1.1 riastrad { 552 1.3 riastrad /* Memory is validation context memory, so no need to free it */ 553 1.3 riastrad INIT_LIST_HEAD(list); 554 1.1 riastrad } 555 1.1 riastrad 556 1.1 riastrad /** 557 1.1 riastrad * vmw_resource_relocations_apply - Apply all relocations on a list 558 1.1 riastrad * 559 1.3 riastrad * @cb: Pointer to the start of the command buffer bein patch. This need not be 560 1.3 riastrad * the same buffer as the one being parsed when the relocation list was built, 561 1.3 riastrad * but the contents must be the same modulo the resource ids. 562 1.1 riastrad * @list: Pointer to the head of the relocation list. 563 1.1 riastrad */ 564 1.1 riastrad static void vmw_resource_relocations_apply(uint32_t *cb, 565 1.1 riastrad struct list_head *list) 566 1.1 riastrad { 567 1.1 riastrad struct vmw_resource_relocation *rel; 568 1.1 riastrad 569 1.3 riastrad /* Validate the struct vmw_resource_relocation member size */ 570 1.3 riastrad BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29)); 571 1.3 riastrad BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3)); 572 1.3 riastrad 573 1.2 riastrad list_for_each_entry(rel, list, head) { 574 1.3 riastrad u32 *addr = (u32 *)((unsigned long) cb + rel->offset); 575 1.3 riastrad switch (rel->rel_type) { 576 1.3 riastrad case vmw_res_rel_normal: 577 1.3 riastrad *addr = rel->res->id; 578 1.3 riastrad break; 579 1.3 riastrad case vmw_res_rel_nop: 580 1.3 riastrad *addr = SVGA_3D_CMD_NOP; 581 1.3 riastrad break; 582 1.3 riastrad default: 583 1.3 riastrad if (rel->res->id == -1) 584 1.3 riastrad *addr = SVGA_3D_CMD_NOP; 585 1.3 riastrad break; 586 1.3 riastrad } 587 1.2 riastrad } 588 1.1 riastrad } 589 1.1 riastrad 590 1.1 riastrad static int vmw_cmd_invalid(struct vmw_private *dev_priv, 591 1.1 riastrad struct vmw_sw_context *sw_context, 592 1.1 riastrad SVGA3dCmdHeader *header) 593 1.1 riastrad { 594 1.2 riastrad return -EINVAL; 595 1.1 riastrad } 596 1.1 riastrad 597 1.1 riastrad static int vmw_cmd_ok(struct vmw_private *dev_priv, 598 1.1 riastrad struct vmw_sw_context *sw_context, 599 1.1 riastrad SVGA3dCmdHeader *header) 600 1.1 riastrad { 601 1.1 riastrad return 0; 602 1.1 riastrad } 603 1.1 riastrad 604 1.1 riastrad /** 605 1.3 riastrad * vmw_resources_reserve - Reserve all resources on the sw_context's resource 606 1.3 riastrad * list. 607 1.1 riastrad * 608 1.1 riastrad * @sw_context: Pointer to the software context. 609 1.1 riastrad * 610 1.3 riastrad * Note that since vmware's command submission currently is protected by the 611 1.3 riastrad * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since 612 1.3 riastrad * only a single thread at once will attempt this. 613 1.1 riastrad */ 614 1.1 riastrad static int vmw_resources_reserve(struct vmw_sw_context *sw_context) 615 1.1 riastrad { 616 1.3 riastrad int ret; 617 1.1 riastrad 618 1.3 riastrad ret = vmw_validation_res_reserve(sw_context->ctx, true); 619 1.3 riastrad if (ret) 620 1.3 riastrad return ret; 621 1.2 riastrad 622 1.2 riastrad if (sw_context->dx_query_mob) { 623 1.3 riastrad struct vmw_buffer_object *expected_dx_query_mob; 624 1.2 riastrad 625 1.2 riastrad expected_dx_query_mob = 626 1.2 riastrad vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); 627 1.2 riastrad if (expected_dx_query_mob && 628 1.2 riastrad expected_dx_query_mob != sw_context->dx_query_mob) { 629 1.2 riastrad ret = -EINVAL; 630 1.2 riastrad } 631 1.2 riastrad } 632 1.2 riastrad 633 1.2 riastrad return ret; 634 1.1 riastrad } 635 1.1 riastrad 636 1.1 riastrad /** 637 1.3 riastrad * vmw_cmd_res_check - Check that a resource is present and if so, put it on the 638 1.3 riastrad * resource validate list unless it's already there. 639 1.1 riastrad * 640 1.1 riastrad * @dev_priv: Pointer to a device private structure. 641 1.1 riastrad * @sw_context: Pointer to the software context. 642 1.1 riastrad * @res_type: Resource type. 643 1.3 riastrad * @dirty: Whether to change dirty status. 644 1.1 riastrad * @converter: User-space visisble type specific information. 645 1.3 riastrad * @id_loc: Pointer to the location in the command buffer currently being parsed 646 1.3 riastrad * from where the user-space resource id handle is located. 647 1.3 riastrad * @p_val: Pointer to pointer to resource validalidation node. Populated on 648 1.3 riastrad * exit. 649 1.1 riastrad */ 650 1.2 riastrad static int 651 1.2 riastrad vmw_cmd_res_check(struct vmw_private *dev_priv, 652 1.2 riastrad struct vmw_sw_context *sw_context, 653 1.2 riastrad enum vmw_res_type res_type, 654 1.3 riastrad u32 dirty, 655 1.2 riastrad const struct vmw_user_resource_conv *converter, 656 1.2 riastrad uint32_t *id_loc, 657 1.3 riastrad struct vmw_resource **p_res) 658 1.1 riastrad { 659 1.3 riastrad struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; 660 1.1 riastrad struct vmw_resource *res; 661 1.1 riastrad int ret; 662 1.1 riastrad 663 1.3 riastrad if (p_res) 664 1.3 riastrad *p_res = NULL; 665 1.3 riastrad 666 1.2 riastrad if (*id_loc == SVGA3D_INVALID_ID) { 667 1.2 riastrad if (res_type == vmw_res_context) { 668 1.3 riastrad VMW_DEBUG_USER("Illegal context invalid id.\n"); 669 1.2 riastrad return -EINVAL; 670 1.2 riastrad } 671 1.1 riastrad return 0; 672 1.2 riastrad } 673 1.1 riastrad 674 1.3 riastrad if (likely(rcache->valid_handle && *id_loc == rcache->handle)) { 675 1.3 riastrad res = rcache->res; 676 1.3 riastrad if (dirty) 677 1.3 riastrad vmw_validation_res_set_dirty(sw_context->ctx, 678 1.3 riastrad rcache->private, dirty); 679 1.3 riastrad } else { 680 1.3 riastrad unsigned int size = vmw_execbuf_res_size(dev_priv, res_type); 681 1.3 riastrad 682 1.3 riastrad ret = vmw_validation_preload_res(sw_context->ctx, size); 683 1.3 riastrad if (ret) 684 1.3 riastrad return ret; 685 1.3 riastrad 686 1.3 riastrad res = vmw_user_resource_noref_lookup_handle 687 1.3 riastrad (dev_priv, sw_context->fp->tfile, *id_loc, converter); 688 1.3 riastrad if (IS_ERR(res)) { 689 1.3 riastrad VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n", 690 1.3 riastrad (unsigned int) *id_loc); 691 1.3 riastrad return PTR_ERR(res); 692 1.3 riastrad } 693 1.1 riastrad 694 1.3 riastrad ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty); 695 1.3 riastrad if (unlikely(ret != 0)) 696 1.3 riastrad return ret; 697 1.1 riastrad 698 1.3 riastrad if (rcache->valid && rcache->res == res) { 699 1.3 riastrad rcache->valid_handle = true; 700 1.3 riastrad rcache->handle = *id_loc; 701 1.3 riastrad } 702 1.1 riastrad } 703 1.1 riastrad 704 1.3 riastrad ret = vmw_resource_relocation_add(sw_context, res, 705 1.3 riastrad vmw_ptr_diff(sw_context->buf_start, 706 1.3 riastrad id_loc), 707 1.3 riastrad vmw_res_rel_normal); 708 1.3 riastrad if (p_res) 709 1.3 riastrad *p_res = res; 710 1.1 riastrad 711 1.1 riastrad return 0; 712 1.1 riastrad } 713 1.1 riastrad 714 1.1 riastrad /** 715 1.2 riastrad * vmw_rebind_dx_query - Rebind DX query associated with the context 716 1.2 riastrad * 717 1.2 riastrad * @ctx_res: context the query belongs to 718 1.2 riastrad * 719 1.2 riastrad * This function assumes binding_mutex is held. 720 1.2 riastrad */ 721 1.2 riastrad static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) 722 1.2 riastrad { 723 1.2 riastrad struct vmw_private *dev_priv = ctx_res->dev_priv; 724 1.3 riastrad struct vmw_buffer_object *dx_query_mob; 725 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery); 726 1.2 riastrad 727 1.2 riastrad dx_query_mob = vmw_context_get_dx_query_mob(ctx_res); 728 1.2 riastrad 729 1.2 riastrad if (!dx_query_mob || dx_query_mob->dx_query_ctx) 730 1.2 riastrad return 0; 731 1.2 riastrad 732 1.3 riastrad cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id); 733 1.3 riastrad if (cmd == NULL) 734 1.2 riastrad return -ENOMEM; 735 1.2 riastrad 736 1.2 riastrad cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; 737 1.2 riastrad cmd->header.size = sizeof(cmd->body); 738 1.2 riastrad cmd->body.cid = ctx_res->id; 739 1.2 riastrad cmd->body.mobid = dx_query_mob->base.mem.start; 740 1.2 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 741 1.2 riastrad 742 1.2 riastrad vmw_context_bind_dx_query(ctx_res, dx_query_mob); 743 1.2 riastrad 744 1.2 riastrad return 0; 745 1.2 riastrad } 746 1.2 riastrad 747 1.2 riastrad /** 748 1.3 riastrad * vmw_rebind_contexts - Rebind all resources previously bound to referenced 749 1.3 riastrad * contexts. 750 1.2 riastrad * 751 1.2 riastrad * @sw_context: Pointer to the software context. 752 1.2 riastrad * 753 1.2 riastrad * Rebind context binding points that have been scrubbed because of eviction. 754 1.2 riastrad */ 755 1.2 riastrad static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) 756 1.2 riastrad { 757 1.3 riastrad struct vmw_ctx_validation_info *val; 758 1.2 riastrad int ret; 759 1.2 riastrad 760 1.3 riastrad list_for_each_entry(val, &sw_context->ctx_list, head) { 761 1.3 riastrad ret = vmw_binding_rebind_all(val->cur); 762 1.2 riastrad if (unlikely(ret != 0)) { 763 1.2 riastrad if (ret != -ERESTARTSYS) 764 1.3 riastrad VMW_DEBUG_USER("Failed to rebind context.\n"); 765 1.2 riastrad return ret; 766 1.2 riastrad } 767 1.2 riastrad 768 1.3 riastrad ret = vmw_rebind_all_dx_query(val->ctx); 769 1.3 riastrad if (ret != 0) { 770 1.3 riastrad VMW_DEBUG_USER("Failed to rebind queries.\n"); 771 1.2 riastrad return ret; 772 1.3 riastrad } 773 1.2 riastrad } 774 1.2 riastrad 775 1.2 riastrad return 0; 776 1.2 riastrad } 777 1.2 riastrad 778 1.2 riastrad /** 779 1.3 riastrad * vmw_view_bindings_add - Add an array of view bindings to a context binding 780 1.3 riastrad * state tracker. 781 1.2 riastrad * 782 1.2 riastrad * @sw_context: The execbuf state used for this command. 783 1.2 riastrad * @view_type: View type for the bindings. 784 1.2 riastrad * @binding_type: Binding type for the bindings. 785 1.2 riastrad * @shader_slot: The shader slot to user for the bindings. 786 1.2 riastrad * @view_ids: Array of view ids to be bound. 787 1.2 riastrad * @num_views: Number of view ids in @view_ids. 788 1.2 riastrad * @first_slot: The binding slot to be used for the first view id in @view_ids. 789 1.2 riastrad */ 790 1.2 riastrad static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, 791 1.2 riastrad enum vmw_view_type view_type, 792 1.2 riastrad enum vmw_ctx_binding_type binding_type, 793 1.2 riastrad uint32 shader_slot, 794 1.2 riastrad uint32 view_ids[], u32 num_views, 795 1.2 riastrad u32 first_slot) 796 1.2 riastrad { 797 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 798 1.2 riastrad u32 i; 799 1.2 riastrad 800 1.3 riastrad if (!ctx_node) 801 1.2 riastrad return -EINVAL; 802 1.2 riastrad 803 1.2 riastrad for (i = 0; i < num_views; ++i) { 804 1.2 riastrad struct vmw_ctx_bindinfo_view binding; 805 1.2 riastrad struct vmw_resource *view = NULL; 806 1.2 riastrad 807 1.2 riastrad if (view_ids[i] != SVGA3D_INVALID_ID) { 808 1.3 riastrad view = vmw_view_id_val_add(sw_context, view_type, 809 1.3 riastrad view_ids[i]); 810 1.2 riastrad if (IS_ERR(view)) { 811 1.3 riastrad VMW_DEBUG_USER("View not found.\n"); 812 1.2 riastrad return PTR_ERR(view); 813 1.2 riastrad } 814 1.2 riastrad } 815 1.3 riastrad binding.bi.ctx = ctx_node->ctx; 816 1.2 riastrad binding.bi.res = view; 817 1.2 riastrad binding.bi.bt = binding_type; 818 1.2 riastrad binding.shader_slot = shader_slot; 819 1.2 riastrad binding.slot = first_slot + i; 820 1.3 riastrad vmw_binding_add(ctx_node->staged, &binding.bi, 821 1.2 riastrad shader_slot, binding.slot); 822 1.2 riastrad } 823 1.2 riastrad 824 1.2 riastrad return 0; 825 1.2 riastrad } 826 1.2 riastrad 827 1.2 riastrad /** 828 1.1 riastrad * vmw_cmd_cid_check - Check a command header for valid context information. 829 1.1 riastrad * 830 1.1 riastrad * @dev_priv: Pointer to a device private structure. 831 1.1 riastrad * @sw_context: Pointer to the software context. 832 1.1 riastrad * @header: A command header with an embedded user-space context handle. 833 1.1 riastrad * 834 1.1 riastrad * Convenience function: Call vmw_cmd_res_check with the user-space context 835 1.1 riastrad * handle embedded in @header. 836 1.1 riastrad */ 837 1.1 riastrad static int vmw_cmd_cid_check(struct vmw_private *dev_priv, 838 1.1 riastrad struct vmw_sw_context *sw_context, 839 1.1 riastrad SVGA3dCmdHeader *header) 840 1.1 riastrad { 841 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, uint32_t) = 842 1.3 riastrad container_of(header, typeof(*cmd), header); 843 1.1 riastrad 844 1.1 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 845 1.3 riastrad VMW_RES_DIRTY_SET, user_context_converter, 846 1.3 riastrad &cmd->body, NULL); 847 1.3 riastrad } 848 1.3 riastrad 849 1.3 riastrad /** 850 1.3 riastrad * vmw_execbuf_info_from_res - Get the private validation metadata for a 851 1.3 riastrad * recently validated resource 852 1.3 riastrad * 853 1.3 riastrad * @sw_context: Pointer to the command submission context 854 1.3 riastrad * @res: The resource 855 1.3 riastrad * 856 1.3 riastrad * The resource pointed to by @res needs to be present in the command submission 857 1.3 riastrad * context's resource cache and hence the last resource of that type to be 858 1.3 riastrad * processed by the validation code. 859 1.3 riastrad * 860 1.3 riastrad * Return: a pointer to the private metadata of the resource, or NULL if it 861 1.3 riastrad * wasn't found 862 1.3 riastrad */ 863 1.3 riastrad static struct vmw_ctx_validation_info * 864 1.3 riastrad vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, 865 1.3 riastrad struct vmw_resource *res) 866 1.3 riastrad { 867 1.3 riastrad struct vmw_res_cache_entry *rcache = 868 1.3 riastrad &sw_context->res_cache[vmw_res_type(res)]; 869 1.3 riastrad 870 1.3 riastrad if (rcache->valid && rcache->res == res) 871 1.3 riastrad return rcache->private; 872 1.3 riastrad 873 1.3 riastrad WARN_ON_ONCE(true); 874 1.3 riastrad return NULL; 875 1.1 riastrad } 876 1.1 riastrad 877 1.1 riastrad static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, 878 1.1 riastrad struct vmw_sw_context *sw_context, 879 1.1 riastrad SVGA3dCmdHeader *header) 880 1.1 riastrad { 881 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget); 882 1.3 riastrad struct vmw_resource *ctx; 883 1.3 riastrad struct vmw_resource *res; 884 1.1 riastrad int ret; 885 1.1 riastrad 886 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 887 1.2 riastrad 888 1.2 riastrad if (cmd->body.type >= SVGA3D_RT_MAX) { 889 1.3 riastrad VMW_DEBUG_USER("Illegal render target type %u.\n", 890 1.3 riastrad (unsigned int) cmd->body.type); 891 1.2 riastrad return -EINVAL; 892 1.2 riastrad } 893 1.2 riastrad 894 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 895 1.3 riastrad VMW_RES_DIRTY_SET, user_context_converter, 896 1.3 riastrad &cmd->body.cid, &ctx); 897 1.1 riastrad if (unlikely(ret != 0)) 898 1.1 riastrad return ret; 899 1.1 riastrad 900 1.1 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 901 1.3 riastrad VMW_RES_DIRTY_SET, user_surface_converter, 902 1.3 riastrad &cmd->body.target.sid, &res); 903 1.3 riastrad if (unlikely(ret)) 904 1.2 riastrad return ret; 905 1.2 riastrad 906 1.2 riastrad if (dev_priv->has_mob) { 907 1.2 riastrad struct vmw_ctx_bindinfo_view binding; 908 1.3 riastrad struct vmw_ctx_validation_info *node; 909 1.2 riastrad 910 1.3 riastrad node = vmw_execbuf_info_from_res(sw_context, ctx); 911 1.3 riastrad if (!node) 912 1.3 riastrad return -EINVAL; 913 1.3 riastrad 914 1.3 riastrad binding.bi.ctx = ctx; 915 1.3 riastrad binding.bi.res = res; 916 1.2 riastrad binding.bi.bt = vmw_ctx_binding_rt; 917 1.2 riastrad binding.slot = cmd->body.type; 918 1.3 riastrad vmw_binding_add(node->staged, &binding.bi, 0, binding.slot); 919 1.2 riastrad } 920 1.2 riastrad 921 1.2 riastrad return 0; 922 1.1 riastrad } 923 1.1 riastrad 924 1.1 riastrad static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, 925 1.1 riastrad struct vmw_sw_context *sw_context, 926 1.1 riastrad SVGA3dCmdHeader *header) 927 1.1 riastrad { 928 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy); 929 1.1 riastrad int ret; 930 1.1 riastrad 931 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 932 1.2 riastrad 933 1.1 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 934 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 935 1.1 riastrad &cmd->body.src.sid, NULL); 936 1.2 riastrad if (ret) 937 1.1 riastrad return ret; 938 1.2 riastrad 939 1.1 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 940 1.3 riastrad VMW_RES_DIRTY_SET, user_surface_converter, 941 1.1 riastrad &cmd->body.dest.sid, NULL); 942 1.1 riastrad } 943 1.1 riastrad 944 1.2 riastrad static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv, 945 1.3 riastrad struct vmw_sw_context *sw_context, 946 1.3 riastrad SVGA3dCmdHeader *header) 947 1.1 riastrad { 948 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy); 949 1.1 riastrad int ret; 950 1.1 riastrad 951 1.2 riastrad cmd = container_of(header, typeof(*cmd), header); 952 1.1 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 953 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 954 1.2 riastrad &cmd->body.src, NULL); 955 1.2 riastrad if (ret != 0) 956 1.1 riastrad return ret; 957 1.2 riastrad 958 1.1 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 959 1.3 riastrad VMW_RES_DIRTY_SET, user_surface_converter, 960 1.2 riastrad &cmd->body.dest, NULL); 961 1.1 riastrad } 962 1.1 riastrad 963 1.2 riastrad static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv, 964 1.2 riastrad struct vmw_sw_context *sw_context, 965 1.2 riastrad SVGA3dCmdHeader *header) 966 1.1 riastrad { 967 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion); 968 1.2 riastrad int ret; 969 1.2 riastrad 970 1.2 riastrad cmd = container_of(header, typeof(*cmd), header); 971 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 972 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 973 1.2 riastrad &cmd->body.srcSid, NULL); 974 1.2 riastrad if (ret != 0) 975 1.2 riastrad return ret; 976 1.2 riastrad 977 1.2 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 978 1.3 riastrad VMW_RES_DIRTY_SET, user_surface_converter, 979 1.2 riastrad &cmd->body.dstSid, NULL); 980 1.2 riastrad } 981 1.2 riastrad 982 1.2 riastrad static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, 983 1.2 riastrad struct vmw_sw_context *sw_context, 984 1.2 riastrad SVGA3dCmdHeader *header) 985 1.2 riastrad { 986 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt); 987 1.2 riastrad int ret; 988 1.2 riastrad 989 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 990 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 991 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 992 1.2 riastrad &cmd->body.src.sid, NULL); 993 1.2 riastrad if (unlikely(ret != 0)) 994 1.2 riastrad return ret; 995 1.3 riastrad 996 1.2 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 997 1.3 riastrad VMW_RES_DIRTY_SET, user_surface_converter, 998 1.2 riastrad &cmd->body.dest.sid, NULL); 999 1.2 riastrad } 1000 1.2 riastrad 1001 1.2 riastrad static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, 1002 1.2 riastrad struct vmw_sw_context *sw_context, 1003 1.2 riastrad SVGA3dCmdHeader *header) 1004 1.2 riastrad { 1005 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) = 1006 1.3 riastrad container_of(header, typeof(*cmd), header); 1007 1.1 riastrad 1008 1.1 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1009 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 1010 1.1 riastrad &cmd->body.srcImage.sid, NULL); 1011 1.1 riastrad } 1012 1.1 riastrad 1013 1.1 riastrad static int vmw_cmd_present_check(struct vmw_private *dev_priv, 1014 1.1 riastrad struct vmw_sw_context *sw_context, 1015 1.1 riastrad SVGA3dCmdHeader *header) 1016 1.1 riastrad { 1017 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) = 1018 1.3 riastrad container_of(header, typeof(*cmd), header); 1019 1.1 riastrad 1020 1.1 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1021 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 1022 1.3 riastrad &cmd->body.sid, NULL); 1023 1.1 riastrad } 1024 1.1 riastrad 1025 1.1 riastrad /** 1026 1.1 riastrad * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. 1027 1.1 riastrad * 1028 1.1 riastrad * @dev_priv: The device private structure. 1029 1.1 riastrad * @new_query_bo: The new buffer holding query results. 1030 1.1 riastrad * @sw_context: The software context used for this command submission. 1031 1.1 riastrad * 1032 1.3 riastrad * This function checks whether @new_query_bo is suitable for holding query 1033 1.3 riastrad * results, and if another buffer currently is pinned for query results. If so, 1034 1.3 riastrad * the function prepares the state of @sw_context for switching pinned buffers 1035 1.3 riastrad * after successful submission of the current command batch. 1036 1.1 riastrad */ 1037 1.1 riastrad static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, 1038 1.3 riastrad struct vmw_buffer_object *new_query_bo, 1039 1.1 riastrad struct vmw_sw_context *sw_context) 1040 1.1 riastrad { 1041 1.1 riastrad struct vmw_res_cache_entry *ctx_entry = 1042 1.1 riastrad &sw_context->res_cache[vmw_res_context]; 1043 1.1 riastrad int ret; 1044 1.1 riastrad 1045 1.1 riastrad BUG_ON(!ctx_entry->valid); 1046 1.1 riastrad sw_context->last_query_ctx = ctx_entry->res; 1047 1.1 riastrad 1048 1.1 riastrad if (unlikely(new_query_bo != sw_context->cur_query_bo)) { 1049 1.1 riastrad 1050 1.2 riastrad if (unlikely(new_query_bo->base.num_pages > 4)) { 1051 1.3 riastrad VMW_DEBUG_USER("Query buffer too large.\n"); 1052 1.1 riastrad return -EINVAL; 1053 1.1 riastrad } 1054 1.1 riastrad 1055 1.1 riastrad if (unlikely(sw_context->cur_query_bo != NULL)) { 1056 1.1 riastrad sw_context->needs_post_query_barrier = true; 1057 1.3 riastrad ret = vmw_validation_add_bo(sw_context->ctx, 1058 1.3 riastrad sw_context->cur_query_bo, 1059 1.3 riastrad dev_priv->has_mob, false); 1060 1.1 riastrad if (unlikely(ret != 0)) 1061 1.1 riastrad return ret; 1062 1.1 riastrad } 1063 1.1 riastrad sw_context->cur_query_bo = new_query_bo; 1064 1.1 riastrad 1065 1.3 riastrad ret = vmw_validation_add_bo(sw_context->ctx, 1066 1.3 riastrad dev_priv->dummy_query_bo, 1067 1.3 riastrad dev_priv->has_mob, false); 1068 1.1 riastrad if (unlikely(ret != 0)) 1069 1.1 riastrad return ret; 1070 1.1 riastrad } 1071 1.1 riastrad 1072 1.1 riastrad return 0; 1073 1.1 riastrad } 1074 1.1 riastrad 1075 1.1 riastrad /** 1076 1.1 riastrad * vmw_query_bo_switch_commit - Finalize switching pinned query buffer 1077 1.1 riastrad * 1078 1.1 riastrad * @dev_priv: The device private structure. 1079 1.1 riastrad * @sw_context: The software context used for this command submission batch. 1080 1.1 riastrad * 1081 1.1 riastrad * This function will check if we're switching query buffers, and will then, 1082 1.1 riastrad * issue a dummy occlusion query wait used as a query barrier. When the fence 1083 1.3 riastrad * object following that query wait has signaled, we are sure that all preceding 1084 1.3 riastrad * queries have finished, and the old query buffer can be unpinned. However, 1085 1.3 riastrad * since both the new query buffer and the old one are fenced with that fence, 1086 1.3 riastrad * we can do an asynchronus unpin now, and be sure that the old query buffer 1087 1.3 riastrad * won't be moved until the fence has signaled. 1088 1.1 riastrad * 1089 1.1 riastrad * As mentioned above, both the new - and old query buffers need to be fenced 1090 1.1 riastrad * using a sequence emitted *after* calling this function. 1091 1.1 riastrad */ 1092 1.1 riastrad static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, 1093 1.1 riastrad struct vmw_sw_context *sw_context) 1094 1.1 riastrad { 1095 1.1 riastrad /* 1096 1.1 riastrad * The validate list should still hold references to all 1097 1.1 riastrad * contexts here. 1098 1.1 riastrad */ 1099 1.1 riastrad if (sw_context->needs_post_query_barrier) { 1100 1.1 riastrad struct vmw_res_cache_entry *ctx_entry = 1101 1.1 riastrad &sw_context->res_cache[vmw_res_context]; 1102 1.1 riastrad struct vmw_resource *ctx; 1103 1.1 riastrad int ret; 1104 1.1 riastrad 1105 1.1 riastrad BUG_ON(!ctx_entry->valid); 1106 1.1 riastrad ctx = ctx_entry->res; 1107 1.1 riastrad 1108 1.1 riastrad ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); 1109 1.1 riastrad 1110 1.1 riastrad if (unlikely(ret != 0)) 1111 1.3 riastrad VMW_DEBUG_USER("Out of fifo space for dummy query.\n"); 1112 1.1 riastrad } 1113 1.1 riastrad 1114 1.1 riastrad if (dev_priv->pinned_bo != sw_context->cur_query_bo) { 1115 1.1 riastrad if (dev_priv->pinned_bo) { 1116 1.2 riastrad vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 1117 1.3 riastrad vmw_bo_unreference(&dev_priv->pinned_bo); 1118 1.1 riastrad } 1119 1.1 riastrad 1120 1.1 riastrad if (!sw_context->needs_post_query_barrier) { 1121 1.2 riastrad vmw_bo_pin_reserved(sw_context->cur_query_bo, true); 1122 1.1 riastrad 1123 1.1 riastrad /* 1124 1.1 riastrad * We pin also the dummy_query_bo buffer so that we 1125 1.3 riastrad * don't need to validate it when emitting dummy queries 1126 1.3 riastrad * in context destroy paths. 1127 1.1 riastrad */ 1128 1.2 riastrad if (!dev_priv->dummy_query_bo_pinned) { 1129 1.2 riastrad vmw_bo_pin_reserved(dev_priv->dummy_query_bo, 1130 1.2 riastrad true); 1131 1.2 riastrad dev_priv->dummy_query_bo_pinned = true; 1132 1.2 riastrad } 1133 1.1 riastrad 1134 1.1 riastrad BUG_ON(sw_context->last_query_ctx == NULL); 1135 1.1 riastrad dev_priv->query_cid = sw_context->last_query_ctx->id; 1136 1.1 riastrad dev_priv->query_cid_valid = true; 1137 1.1 riastrad dev_priv->pinned_bo = 1138 1.3 riastrad vmw_bo_reference(sw_context->cur_query_bo); 1139 1.1 riastrad } 1140 1.1 riastrad } 1141 1.1 riastrad } 1142 1.1 riastrad 1143 1.1 riastrad /** 1144 1.3 riastrad * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle 1145 1.3 riastrad * to a MOB id. 1146 1.2 riastrad * 1147 1.2 riastrad * @dev_priv: Pointer to a device private structure. 1148 1.2 riastrad * @sw_context: The software context used for this command batch validation. 1149 1.2 riastrad * @id: Pointer to the user-space handle to be translated. 1150 1.3 riastrad * @vmw_bo_p: Points to a location that, on successful return will carry a 1151 1.3 riastrad * non-reference-counted pointer to the buffer object identified by the 1152 1.2 riastrad * user-space handle in @id. 1153 1.2 riastrad * 1154 1.2 riastrad * This function saves information needed to translate a user-space buffer 1155 1.2 riastrad * handle to a MOB id. The translation does not take place immediately, but 1156 1.3 riastrad * during a call to vmw_apply_relocations(). 1157 1.3 riastrad * 1158 1.3 riastrad * This function builds a relocation list and a list of buffers to validate. The 1159 1.3 riastrad * former needs to be freed using either vmw_apply_relocations() or 1160 1.3 riastrad * vmw_free_relocations(). The latter needs to be freed using 1161 1.3 riastrad * vmw_clear_validations. 1162 1.2 riastrad */ 1163 1.2 riastrad static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 1164 1.2 riastrad struct vmw_sw_context *sw_context, 1165 1.2 riastrad SVGAMobId *id, 1166 1.3 riastrad struct vmw_buffer_object **vmw_bo_p) 1167 1.2 riastrad { 1168 1.3 riastrad struct vmw_buffer_object *vmw_bo; 1169 1.2 riastrad uint32_t handle = *id; 1170 1.2 riastrad struct vmw_relocation *reloc; 1171 1.2 riastrad int ret; 1172 1.2 riastrad 1173 1.3 riastrad vmw_validation_preload_bo(sw_context->ctx); 1174 1.3 riastrad vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); 1175 1.3 riastrad if (IS_ERR(vmw_bo)) { 1176 1.3 riastrad VMW_DEBUG_USER("Could not find or use MOB buffer.\n"); 1177 1.3 riastrad return PTR_ERR(vmw_bo); 1178 1.2 riastrad } 1179 1.2 riastrad 1180 1.3 riastrad ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); 1181 1.3 riastrad vmw_user_bo_noref_release(); 1182 1.3 riastrad if (unlikely(ret != 0)) 1183 1.3 riastrad return ret; 1184 1.3 riastrad 1185 1.3 riastrad reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); 1186 1.3 riastrad if (!reloc) 1187 1.3 riastrad return -ENOMEM; 1188 1.2 riastrad 1189 1.2 riastrad reloc->mob_loc = id; 1190 1.3 riastrad reloc->vbo = vmw_bo; 1191 1.2 riastrad 1192 1.3 riastrad *vmw_bo_p = vmw_bo; 1193 1.3 riastrad list_add_tail(&reloc->head, &sw_context->bo_relocations); 1194 1.2 riastrad 1195 1.2 riastrad return 0; 1196 1.2 riastrad } 1197 1.2 riastrad 1198 1.2 riastrad /** 1199 1.3 riastrad * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle 1200 1.3 riastrad * to a valid SVGAGuestPtr 1201 1.1 riastrad * 1202 1.1 riastrad * @dev_priv: Pointer to a device private structure. 1203 1.1 riastrad * @sw_context: The software context used for this command batch validation. 1204 1.1 riastrad * @ptr: Pointer to the user-space handle to be translated. 1205 1.3 riastrad * @vmw_bo_p: Points to a location that, on successful return will carry a 1206 1.3 riastrad * non-reference-counted pointer to the DMA buffer identified by the user-space 1207 1.3 riastrad * handle in @id. 1208 1.1 riastrad * 1209 1.1 riastrad * This function saves information needed to translate a user-space buffer 1210 1.1 riastrad * handle to a valid SVGAGuestPtr. The translation does not take place 1211 1.1 riastrad * immediately, but during a call to vmw_apply_relocations(). 1212 1.3 riastrad * 1213 1.1 riastrad * This function builds a relocation list and a list of buffers to validate. 1214 1.1 riastrad * The former needs to be freed using either vmw_apply_relocations() or 1215 1.1 riastrad * vmw_free_relocations(). The latter needs to be freed using 1216 1.1 riastrad * vmw_clear_validations. 1217 1.1 riastrad */ 1218 1.1 riastrad static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, 1219 1.1 riastrad struct vmw_sw_context *sw_context, 1220 1.1 riastrad SVGAGuestPtr *ptr, 1221 1.3 riastrad struct vmw_buffer_object **vmw_bo_p) 1222 1.1 riastrad { 1223 1.3 riastrad struct vmw_buffer_object *vmw_bo; 1224 1.1 riastrad uint32_t handle = ptr->gmrId; 1225 1.1 riastrad struct vmw_relocation *reloc; 1226 1.1 riastrad int ret; 1227 1.1 riastrad 1228 1.3 riastrad vmw_validation_preload_bo(sw_context->ctx); 1229 1.3 riastrad vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); 1230 1.3 riastrad if (IS_ERR(vmw_bo)) { 1231 1.3 riastrad VMW_DEBUG_USER("Could not find or use GMR region.\n"); 1232 1.3 riastrad return PTR_ERR(vmw_bo); 1233 1.1 riastrad } 1234 1.1 riastrad 1235 1.3 riastrad ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); 1236 1.3 riastrad vmw_user_bo_noref_release(); 1237 1.3 riastrad if (unlikely(ret != 0)) 1238 1.3 riastrad return ret; 1239 1.3 riastrad 1240 1.3 riastrad reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); 1241 1.3 riastrad if (!reloc) 1242 1.3 riastrad return -ENOMEM; 1243 1.1 riastrad 1244 1.1 riastrad reloc->location = ptr; 1245 1.3 riastrad reloc->vbo = vmw_bo; 1246 1.3 riastrad *vmw_bo_p = vmw_bo; 1247 1.3 riastrad list_add_tail(&reloc->head, &sw_context->bo_relocations); 1248 1.1 riastrad 1249 1.1 riastrad return 0; 1250 1.2 riastrad } 1251 1.2 riastrad 1252 1.2 riastrad /** 1253 1.3 riastrad * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command. 1254 1.2 riastrad * 1255 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1256 1.2 riastrad * @sw_context: The software context used for this command submission. 1257 1.2 riastrad * @header: Pointer to the command header in the command stream. 1258 1.2 riastrad * 1259 1.2 riastrad * This function adds the new query into the query COTABLE 1260 1.2 riastrad */ 1261 1.2 riastrad static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, 1262 1.2 riastrad struct vmw_sw_context *sw_context, 1263 1.2 riastrad SVGA3dCmdHeader *header) 1264 1.2 riastrad { 1265 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery); 1266 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 1267 1.2 riastrad struct vmw_resource *cotable_res; 1268 1.3 riastrad int ret; 1269 1.2 riastrad 1270 1.3 riastrad if (!ctx_node) 1271 1.2 riastrad return -EINVAL; 1272 1.2 riastrad 1273 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 1274 1.2 riastrad 1275 1.3 riastrad if (cmd->body.type < SVGA3D_QUERYTYPE_MIN || 1276 1.3 riastrad cmd->body.type >= SVGA3D_QUERYTYPE_MAX) 1277 1.2 riastrad return -EINVAL; 1278 1.2 riastrad 1279 1.3 riastrad cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY); 1280 1.3 riastrad ret = vmw_cotable_notify(cotable_res, cmd->body.queryId); 1281 1.2 riastrad 1282 1.2 riastrad return ret; 1283 1.2 riastrad } 1284 1.2 riastrad 1285 1.2 riastrad /** 1286 1.3 riastrad * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command. 1287 1.2 riastrad * 1288 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1289 1.2 riastrad * @sw_context: The software context used for this command submission. 1290 1.2 riastrad * @header: Pointer to the command header in the command stream. 1291 1.2 riastrad * 1292 1.3 riastrad * The query bind operation will eventually associate the query ID with its 1293 1.3 riastrad * backing MOB. In this function, we take the user mode MOB ID and use 1294 1.3 riastrad * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent. 1295 1.2 riastrad */ 1296 1.2 riastrad static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, 1297 1.2 riastrad struct vmw_sw_context *sw_context, 1298 1.2 riastrad SVGA3dCmdHeader *header) 1299 1.2 riastrad { 1300 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery); 1301 1.3 riastrad struct vmw_buffer_object *vmw_bo; 1302 1.3 riastrad int ret; 1303 1.2 riastrad 1304 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 1305 1.2 riastrad 1306 1.2 riastrad /* 1307 1.2 riastrad * Look up the buffer pointed to by q.mobid, put it on the relocation 1308 1.2 riastrad * list so its kernel mode MOB ID can be filled in later 1309 1.2 riastrad */ 1310 1.3 riastrad ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, 1311 1.2 riastrad &vmw_bo); 1312 1.2 riastrad 1313 1.2 riastrad if (ret != 0) 1314 1.2 riastrad return ret; 1315 1.2 riastrad 1316 1.2 riastrad sw_context->dx_query_mob = vmw_bo; 1317 1.3 riastrad sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx; 1318 1.3 riastrad return 0; 1319 1.1 riastrad } 1320 1.1 riastrad 1321 1.2 riastrad /** 1322 1.3 riastrad * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command. 1323 1.2 riastrad * 1324 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1325 1.2 riastrad * @sw_context: The software context used for this command submission. 1326 1.2 riastrad * @header: Pointer to the command header in the command stream. 1327 1.2 riastrad */ 1328 1.2 riastrad static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, 1329 1.2 riastrad struct vmw_sw_context *sw_context, 1330 1.2 riastrad SVGA3dCmdHeader *header) 1331 1.2 riastrad { 1332 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) = 1333 1.3 riastrad container_of(header, typeof(*cmd), header); 1334 1.2 riastrad 1335 1.2 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1336 1.3 riastrad VMW_RES_DIRTY_SET, user_context_converter, 1337 1.3 riastrad &cmd->body.cid, NULL); 1338 1.2 riastrad } 1339 1.2 riastrad 1340 1.1 riastrad /** 1341 1.3 riastrad * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command. 1342 1.1 riastrad * 1343 1.1 riastrad * @dev_priv: Pointer to a device private struct. 1344 1.1 riastrad * @sw_context: The software context used for this command submission. 1345 1.1 riastrad * @header: Pointer to the command header in the command stream. 1346 1.1 riastrad */ 1347 1.1 riastrad static int vmw_cmd_begin_query(struct vmw_private *dev_priv, 1348 1.1 riastrad struct vmw_sw_context *sw_context, 1349 1.1 riastrad SVGA3dCmdHeader *header) 1350 1.1 riastrad { 1351 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) = 1352 1.3 riastrad container_of(header, typeof(*cmd), header); 1353 1.1 riastrad 1354 1.2 riastrad if (unlikely(dev_priv->has_mob)) { 1355 1.3 riastrad VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery); 1356 1.2 riastrad 1357 1.2 riastrad BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 1358 1.2 riastrad 1359 1.2 riastrad gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; 1360 1.2 riastrad gb_cmd.header.size = cmd->header.size; 1361 1.3 riastrad gb_cmd.body.cid = cmd->body.cid; 1362 1.3 riastrad gb_cmd.body.type = cmd->body.type; 1363 1.2 riastrad 1364 1.2 riastrad memcpy(cmd, &gb_cmd, sizeof(*cmd)); 1365 1.2 riastrad return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); 1366 1.2 riastrad } 1367 1.2 riastrad 1368 1.1 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1369 1.3 riastrad VMW_RES_DIRTY_SET, user_context_converter, 1370 1.3 riastrad &cmd->body.cid, NULL); 1371 1.1 riastrad } 1372 1.1 riastrad 1373 1.1 riastrad /** 1374 1.3 riastrad * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command. 1375 1.2 riastrad * 1376 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1377 1.2 riastrad * @sw_context: The software context used for this command submission. 1378 1.2 riastrad * @header: Pointer to the command header in the command stream. 1379 1.2 riastrad */ 1380 1.2 riastrad static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, 1381 1.2 riastrad struct vmw_sw_context *sw_context, 1382 1.2 riastrad SVGA3dCmdHeader *header) 1383 1.2 riastrad { 1384 1.3 riastrad struct vmw_buffer_object *vmw_bo; 1385 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery); 1386 1.2 riastrad int ret; 1387 1.2 riastrad 1388 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 1389 1.2 riastrad ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1390 1.2 riastrad if (unlikely(ret != 0)) 1391 1.2 riastrad return ret; 1392 1.2 riastrad 1393 1.3 riastrad ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, 1394 1.2 riastrad &vmw_bo); 1395 1.2 riastrad if (unlikely(ret != 0)) 1396 1.2 riastrad return ret; 1397 1.2 riastrad 1398 1.2 riastrad ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1399 1.2 riastrad 1400 1.2 riastrad return ret; 1401 1.2 riastrad } 1402 1.2 riastrad 1403 1.2 riastrad /** 1404 1.3 riastrad * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command. 1405 1.1 riastrad * 1406 1.1 riastrad * @dev_priv: Pointer to a device private struct. 1407 1.1 riastrad * @sw_context: The software context used for this command submission. 1408 1.1 riastrad * @header: Pointer to the command header in the command stream. 1409 1.1 riastrad */ 1410 1.1 riastrad static int vmw_cmd_end_query(struct vmw_private *dev_priv, 1411 1.1 riastrad struct vmw_sw_context *sw_context, 1412 1.1 riastrad SVGA3dCmdHeader *header) 1413 1.1 riastrad { 1414 1.3 riastrad struct vmw_buffer_object *vmw_bo; 1415 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery); 1416 1.1 riastrad int ret; 1417 1.1 riastrad 1418 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 1419 1.2 riastrad if (dev_priv->has_mob) { 1420 1.3 riastrad VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery); 1421 1.2 riastrad 1422 1.2 riastrad BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 1423 1.2 riastrad 1424 1.2 riastrad gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; 1425 1.2 riastrad gb_cmd.header.size = cmd->header.size; 1426 1.3 riastrad gb_cmd.body.cid = cmd->body.cid; 1427 1.3 riastrad gb_cmd.body.type = cmd->body.type; 1428 1.3 riastrad gb_cmd.body.mobid = cmd->body.guestResult.gmrId; 1429 1.3 riastrad gb_cmd.body.offset = cmd->body.guestResult.offset; 1430 1.2 riastrad 1431 1.2 riastrad memcpy(cmd, &gb_cmd, sizeof(*cmd)); 1432 1.2 riastrad return vmw_cmd_end_gb_query(dev_priv, sw_context, header); 1433 1.2 riastrad } 1434 1.2 riastrad 1435 1.1 riastrad ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1436 1.1 riastrad if (unlikely(ret != 0)) 1437 1.1 riastrad return ret; 1438 1.1 riastrad 1439 1.1 riastrad ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1440 1.3 riastrad &cmd->body.guestResult, &vmw_bo); 1441 1.1 riastrad if (unlikely(ret != 0)) 1442 1.1 riastrad return ret; 1443 1.1 riastrad 1444 1.2 riastrad ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1445 1.1 riastrad 1446 1.1 riastrad return ret; 1447 1.1 riastrad } 1448 1.1 riastrad 1449 1.2 riastrad /** 1450 1.3 riastrad * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command. 1451 1.2 riastrad * 1452 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1453 1.2 riastrad * @sw_context: The software context used for this command submission. 1454 1.2 riastrad * @header: Pointer to the command header in the command stream. 1455 1.2 riastrad */ 1456 1.2 riastrad static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, 1457 1.2 riastrad struct vmw_sw_context *sw_context, 1458 1.2 riastrad SVGA3dCmdHeader *header) 1459 1.2 riastrad { 1460 1.3 riastrad struct vmw_buffer_object *vmw_bo; 1461 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery); 1462 1.2 riastrad int ret; 1463 1.2 riastrad 1464 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 1465 1.2 riastrad ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1466 1.2 riastrad if (unlikely(ret != 0)) 1467 1.2 riastrad return ret; 1468 1.2 riastrad 1469 1.3 riastrad ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, 1470 1.2 riastrad &vmw_bo); 1471 1.2 riastrad if (unlikely(ret != 0)) 1472 1.2 riastrad return ret; 1473 1.2 riastrad 1474 1.2 riastrad return 0; 1475 1.2 riastrad } 1476 1.2 riastrad 1477 1.2 riastrad /** 1478 1.3 riastrad * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command. 1479 1.1 riastrad * 1480 1.1 riastrad * @dev_priv: Pointer to a device private struct. 1481 1.1 riastrad * @sw_context: The software context used for this command submission. 1482 1.1 riastrad * @header: Pointer to the command header in the command stream. 1483 1.1 riastrad */ 1484 1.1 riastrad static int vmw_cmd_wait_query(struct vmw_private *dev_priv, 1485 1.1 riastrad struct vmw_sw_context *sw_context, 1486 1.1 riastrad SVGA3dCmdHeader *header) 1487 1.1 riastrad { 1488 1.3 riastrad struct vmw_buffer_object *vmw_bo; 1489 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery); 1490 1.1 riastrad int ret; 1491 1.1 riastrad 1492 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 1493 1.2 riastrad if (dev_priv->has_mob) { 1494 1.3 riastrad VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery); 1495 1.2 riastrad 1496 1.2 riastrad BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 1497 1.2 riastrad 1498 1.2 riastrad gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; 1499 1.2 riastrad gb_cmd.header.size = cmd->header.size; 1500 1.3 riastrad gb_cmd.body.cid = cmd->body.cid; 1501 1.3 riastrad gb_cmd.body.type = cmd->body.type; 1502 1.3 riastrad gb_cmd.body.mobid = cmd->body.guestResult.gmrId; 1503 1.3 riastrad gb_cmd.body.offset = cmd->body.guestResult.offset; 1504 1.2 riastrad 1505 1.2 riastrad memcpy(cmd, &gb_cmd, sizeof(*cmd)); 1506 1.2 riastrad return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); 1507 1.2 riastrad } 1508 1.2 riastrad 1509 1.1 riastrad ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1510 1.1 riastrad if (unlikely(ret != 0)) 1511 1.1 riastrad return ret; 1512 1.1 riastrad 1513 1.1 riastrad ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1514 1.3 riastrad &cmd->body.guestResult, &vmw_bo); 1515 1.1 riastrad if (unlikely(ret != 0)) 1516 1.1 riastrad return ret; 1517 1.1 riastrad 1518 1.1 riastrad return 0; 1519 1.1 riastrad } 1520 1.1 riastrad 1521 1.1 riastrad static int vmw_cmd_dma(struct vmw_private *dev_priv, 1522 1.1 riastrad struct vmw_sw_context *sw_context, 1523 1.1 riastrad SVGA3dCmdHeader *header) 1524 1.1 riastrad { 1525 1.3 riastrad struct vmw_buffer_object *vmw_bo = NULL; 1526 1.1 riastrad struct vmw_surface *srf = NULL; 1527 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA); 1528 1.1 riastrad int ret; 1529 1.2 riastrad SVGA3dCmdSurfaceDMASuffix *suffix; 1530 1.2 riastrad uint32_t bo_size; 1531 1.3 riastrad bool dirty; 1532 1.1 riastrad 1533 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 1534 1.3 riastrad suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body + 1535 1.2 riastrad header->size - sizeof(*suffix)); 1536 1.2 riastrad 1537 1.2 riastrad /* Make sure device and verifier stays in sync. */ 1538 1.2 riastrad if (unlikely(suffix->suffixSize != sizeof(*suffix))) { 1539 1.3 riastrad VMW_DEBUG_USER("Invalid DMA suffix size.\n"); 1540 1.2 riastrad return -EINVAL; 1541 1.2 riastrad } 1542 1.2 riastrad 1543 1.1 riastrad ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1544 1.3 riastrad &cmd->body.guest.ptr, &vmw_bo); 1545 1.1 riastrad if (unlikely(ret != 0)) 1546 1.1 riastrad return ret; 1547 1.1 riastrad 1548 1.2 riastrad /* Make sure DMA doesn't cross BO boundaries. */ 1549 1.2 riastrad bo_size = vmw_bo->base.num_pages * PAGE_SIZE; 1550 1.3 riastrad if (unlikely(cmd->body.guest.ptr.offset > bo_size)) { 1551 1.3 riastrad VMW_DEBUG_USER("Invalid DMA offset.\n"); 1552 1.2 riastrad return -EINVAL; 1553 1.2 riastrad } 1554 1.2 riastrad 1555 1.3 riastrad bo_size -= cmd->body.guest.ptr.offset; 1556 1.2 riastrad if (unlikely(suffix->maximumOffset > bo_size)) 1557 1.2 riastrad suffix->maximumOffset = bo_size; 1558 1.2 riastrad 1559 1.3 riastrad dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ? 1560 1.3 riastrad VMW_RES_DIRTY_SET : 0; 1561 1.1 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1562 1.3 riastrad dirty, user_surface_converter, 1563 1.3 riastrad &cmd->body.host.sid, NULL); 1564 1.1 riastrad if (unlikely(ret != 0)) { 1565 1.1 riastrad if (unlikely(ret != -ERESTARTSYS)) 1566 1.3 riastrad VMW_DEBUG_USER("could not find surface for DMA.\n"); 1567 1.3 riastrad return ret; 1568 1.1 riastrad } 1569 1.1 riastrad 1570 1.1 riastrad srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); 1571 1.1 riastrad 1572 1.3 riastrad vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header); 1573 1.1 riastrad 1574 1.3 riastrad return 0; 1575 1.1 riastrad } 1576 1.1 riastrad 1577 1.1 riastrad static int vmw_cmd_draw(struct vmw_private *dev_priv, 1578 1.1 riastrad struct vmw_sw_context *sw_context, 1579 1.1 riastrad SVGA3dCmdHeader *header) 1580 1.1 riastrad { 1581 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives); 1582 1.1 riastrad SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( 1583 1.1 riastrad (unsigned long)header + sizeof(*cmd)); 1584 1.1 riastrad SVGA3dPrimitiveRange *range; 1585 1.1 riastrad uint32_t i; 1586 1.1 riastrad uint32_t maxnum; 1587 1.1 riastrad int ret; 1588 1.1 riastrad 1589 1.1 riastrad ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1590 1.1 riastrad if (unlikely(ret != 0)) 1591 1.1 riastrad return ret; 1592 1.1 riastrad 1593 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 1594 1.1 riastrad maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); 1595 1.1 riastrad 1596 1.1 riastrad if (unlikely(cmd->body.numVertexDecls > maxnum)) { 1597 1.3 riastrad VMW_DEBUG_USER("Illegal number of vertex declarations.\n"); 1598 1.1 riastrad return -EINVAL; 1599 1.1 riastrad } 1600 1.1 riastrad 1601 1.1 riastrad for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { 1602 1.1 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1603 1.3 riastrad VMW_RES_DIRTY_NONE, 1604 1.1 riastrad user_surface_converter, 1605 1.1 riastrad &decl->array.surfaceId, NULL); 1606 1.1 riastrad if (unlikely(ret != 0)) 1607 1.1 riastrad return ret; 1608 1.1 riastrad } 1609 1.1 riastrad 1610 1.1 riastrad maxnum = (header->size - sizeof(cmd->body) - 1611 1.1 riastrad cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); 1612 1.1 riastrad if (unlikely(cmd->body.numRanges > maxnum)) { 1613 1.3 riastrad VMW_DEBUG_USER("Illegal number of index ranges.\n"); 1614 1.1 riastrad return -EINVAL; 1615 1.1 riastrad } 1616 1.1 riastrad 1617 1.1 riastrad range = (SVGA3dPrimitiveRange *) decl; 1618 1.1 riastrad for (i = 0; i < cmd->body.numRanges; ++i, ++range) { 1619 1.1 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1620 1.3 riastrad VMW_RES_DIRTY_NONE, 1621 1.1 riastrad user_surface_converter, 1622 1.1 riastrad &range->indexArray.surfaceId, NULL); 1623 1.1 riastrad if (unlikely(ret != 0)) 1624 1.1 riastrad return ret; 1625 1.1 riastrad } 1626 1.2 riastrad return 0; 1627 1.2 riastrad } 1628 1.2 riastrad 1629 1.2 riastrad static int vmw_cmd_tex_state(struct vmw_private *dev_priv, 1630 1.2 riastrad struct vmw_sw_context *sw_context, 1631 1.2 riastrad SVGA3dCmdHeader *header) 1632 1.2 riastrad { 1633 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState); 1634 1.2 riastrad SVGA3dTextureState *last_state = (SVGA3dTextureState *) 1635 1.2 riastrad ((unsigned long) header + header->size + sizeof(header)); 1636 1.2 riastrad SVGA3dTextureState *cur_state = (SVGA3dTextureState *) 1637 1.3 riastrad ((unsigned long) header + sizeof(*cmd)); 1638 1.3 riastrad struct vmw_resource *ctx; 1639 1.3 riastrad struct vmw_resource *res; 1640 1.2 riastrad int ret; 1641 1.2 riastrad 1642 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 1643 1.2 riastrad 1644 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1645 1.3 riastrad VMW_RES_DIRTY_SET, user_context_converter, 1646 1.3 riastrad &cmd->body.cid, &ctx); 1647 1.2 riastrad if (unlikely(ret != 0)) 1648 1.2 riastrad return ret; 1649 1.2 riastrad 1650 1.2 riastrad for (; cur_state < last_state; ++cur_state) { 1651 1.2 riastrad if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) 1652 1.2 riastrad continue; 1653 1.2 riastrad 1654 1.2 riastrad if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) { 1655 1.3 riastrad VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n", 1656 1.3 riastrad (unsigned int) cur_state->stage); 1657 1.2 riastrad return -EINVAL; 1658 1.2 riastrad } 1659 1.2 riastrad 1660 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1661 1.3 riastrad VMW_RES_DIRTY_NONE, 1662 1.2 riastrad user_surface_converter, 1663 1.3 riastrad &cur_state->value, &res); 1664 1.2 riastrad if (unlikely(ret != 0)) 1665 1.2 riastrad return ret; 1666 1.2 riastrad 1667 1.2 riastrad if (dev_priv->has_mob) { 1668 1.2 riastrad struct vmw_ctx_bindinfo_tex binding; 1669 1.3 riastrad struct vmw_ctx_validation_info *node; 1670 1.2 riastrad 1671 1.3 riastrad node = vmw_execbuf_info_from_res(sw_context, ctx); 1672 1.3 riastrad if (!node) 1673 1.3 riastrad return -EINVAL; 1674 1.3 riastrad 1675 1.3 riastrad binding.bi.ctx = ctx; 1676 1.3 riastrad binding.bi.res = res; 1677 1.2 riastrad binding.bi.bt = vmw_ctx_binding_tex; 1678 1.2 riastrad binding.texture_stage = cur_state->stage; 1679 1.3 riastrad vmw_binding_add(node->staged, &binding.bi, 0, 1680 1.3 riastrad binding.texture_stage); 1681 1.2 riastrad } 1682 1.2 riastrad } 1683 1.2 riastrad 1684 1.2 riastrad return 0; 1685 1.2 riastrad } 1686 1.2 riastrad 1687 1.2 riastrad static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, 1688 1.2 riastrad struct vmw_sw_context *sw_context, 1689 1.2 riastrad void *buf) 1690 1.2 riastrad { 1691 1.3 riastrad struct vmw_buffer_object *vmw_bo; 1692 1.2 riastrad 1693 1.2 riastrad struct { 1694 1.2 riastrad uint32_t header; 1695 1.2 riastrad SVGAFifoCmdDefineGMRFB body; 1696 1.2 riastrad } *cmd = buf; 1697 1.2 riastrad 1698 1.3 riastrad return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr, 1699 1.3 riastrad &vmw_bo); 1700 1.2 riastrad } 1701 1.2 riastrad 1702 1.2 riastrad /** 1703 1.2 riastrad * vmw_cmd_res_switch_backup - Utility function to handle backup buffer 1704 1.2 riastrad * switching 1705 1.2 riastrad * 1706 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1707 1.2 riastrad * @sw_context: The software context being used for this batch. 1708 1.2 riastrad * @val_node: The validation node representing the resource. 1709 1.2 riastrad * @buf_id: Pointer to the user-space backup buffer handle in the command 1710 1.2 riastrad * stream. 1711 1.2 riastrad * @backup_offset: Offset of backup into MOB. 1712 1.2 riastrad * 1713 1.3 riastrad * This function prepares for registering a switch of backup buffers in the 1714 1.3 riastrad * resource metadata just prior to unreserving. It's basically a wrapper around 1715 1.3 riastrad * vmw_cmd_res_switch_backup with a different interface. 1716 1.2 riastrad */ 1717 1.2 riastrad static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, 1718 1.2 riastrad struct vmw_sw_context *sw_context, 1719 1.3 riastrad struct vmw_resource *res, uint32_t *buf_id, 1720 1.2 riastrad unsigned long backup_offset) 1721 1.2 riastrad { 1722 1.3 riastrad struct vmw_buffer_object *vbo; 1723 1.3 riastrad void *info; 1724 1.2 riastrad int ret; 1725 1.2 riastrad 1726 1.3 riastrad info = vmw_execbuf_info_from_res(sw_context, res); 1727 1.3 riastrad if (!info) 1728 1.3 riastrad return -EINVAL; 1729 1.3 riastrad 1730 1.3 riastrad ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo); 1731 1.2 riastrad if (ret) 1732 1.2 riastrad return ret; 1733 1.2 riastrad 1734 1.3 riastrad vmw_validation_res_switch_backup(sw_context->ctx, info, vbo, 1735 1.3 riastrad backup_offset); 1736 1.2 riastrad return 0; 1737 1.2 riastrad } 1738 1.2 riastrad 1739 1.2 riastrad /** 1740 1.2 riastrad * vmw_cmd_switch_backup - Utility function to handle backup buffer switching 1741 1.2 riastrad * 1742 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1743 1.2 riastrad * @sw_context: The software context being used for this batch. 1744 1.2 riastrad * @res_type: The resource type. 1745 1.2 riastrad * @converter: Information about user-space binding for this resource type. 1746 1.2 riastrad * @res_id: Pointer to the user-space resource handle in the command stream. 1747 1.2 riastrad * @buf_id: Pointer to the user-space backup buffer handle in the command 1748 1.2 riastrad * stream. 1749 1.2 riastrad * @backup_offset: Offset of backup into MOB. 1750 1.2 riastrad * 1751 1.3 riastrad * This function prepares for registering a switch of backup buffers in the 1752 1.3 riastrad * resource metadata just prior to unreserving. It's basically a wrapper around 1753 1.3 riastrad * vmw_cmd_res_switch_backup with a different interface. 1754 1.2 riastrad */ 1755 1.2 riastrad static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, 1756 1.2 riastrad struct vmw_sw_context *sw_context, 1757 1.2 riastrad enum vmw_res_type res_type, 1758 1.2 riastrad const struct vmw_user_resource_conv 1759 1.3 riastrad *converter, uint32_t *res_id, uint32_t *buf_id, 1760 1.2 riastrad unsigned long backup_offset) 1761 1.2 riastrad { 1762 1.3 riastrad struct vmw_resource *res; 1763 1.2 riastrad int ret; 1764 1.2 riastrad 1765 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, 1766 1.3 riastrad VMW_RES_DIRTY_NONE, converter, res_id, &res); 1767 1.2 riastrad if (ret) 1768 1.2 riastrad return ret; 1769 1.2 riastrad 1770 1.3 riastrad return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id, 1771 1.3 riastrad backup_offset); 1772 1.2 riastrad } 1773 1.2 riastrad 1774 1.2 riastrad /** 1775 1.3 riastrad * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command 1776 1.2 riastrad * 1777 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1778 1.2 riastrad * @sw_context: The software context being used for this batch. 1779 1.2 riastrad * @header: Pointer to the command header in the command stream. 1780 1.2 riastrad */ 1781 1.2 riastrad static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, 1782 1.2 riastrad struct vmw_sw_context *sw_context, 1783 1.2 riastrad SVGA3dCmdHeader *header) 1784 1.2 riastrad { 1785 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) = 1786 1.3 riastrad container_of(header, typeof(*cmd), header); 1787 1.2 riastrad 1788 1.2 riastrad return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, 1789 1.3 riastrad user_surface_converter, &cmd->body.sid, 1790 1.3 riastrad &cmd->body.mobid, 0); 1791 1.2 riastrad } 1792 1.2 riastrad 1793 1.2 riastrad /** 1794 1.3 riastrad * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command 1795 1.2 riastrad * 1796 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1797 1.2 riastrad * @sw_context: The software context being used for this batch. 1798 1.2 riastrad * @header: Pointer to the command header in the command stream. 1799 1.2 riastrad */ 1800 1.2 riastrad static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, 1801 1.2 riastrad struct vmw_sw_context *sw_context, 1802 1.2 riastrad SVGA3dCmdHeader *header) 1803 1.2 riastrad { 1804 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) = 1805 1.3 riastrad container_of(header, typeof(*cmd), header); 1806 1.2 riastrad 1807 1.2 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1808 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 1809 1.2 riastrad &cmd->body.image.sid, NULL); 1810 1.2 riastrad } 1811 1.2 riastrad 1812 1.2 riastrad /** 1813 1.3 riastrad * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command 1814 1.2 riastrad * 1815 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1816 1.2 riastrad * @sw_context: The software context being used for this batch. 1817 1.2 riastrad * @header: Pointer to the command header in the command stream. 1818 1.2 riastrad */ 1819 1.2 riastrad static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, 1820 1.2 riastrad struct vmw_sw_context *sw_context, 1821 1.2 riastrad SVGA3dCmdHeader *header) 1822 1.2 riastrad { 1823 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) = 1824 1.3 riastrad container_of(header, typeof(*cmd), header); 1825 1.2 riastrad 1826 1.2 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1827 1.3 riastrad VMW_RES_DIRTY_CLEAR, user_surface_converter, 1828 1.2 riastrad &cmd->body.sid, NULL); 1829 1.2 riastrad } 1830 1.2 riastrad 1831 1.2 riastrad /** 1832 1.3 riastrad * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command 1833 1.2 riastrad * 1834 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1835 1.2 riastrad * @sw_context: The software context being used for this batch. 1836 1.2 riastrad * @header: Pointer to the command header in the command stream. 1837 1.2 riastrad */ 1838 1.2 riastrad static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, 1839 1.2 riastrad struct vmw_sw_context *sw_context, 1840 1.2 riastrad SVGA3dCmdHeader *header) 1841 1.2 riastrad { 1842 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) = 1843 1.3 riastrad container_of(header, typeof(*cmd), header); 1844 1.2 riastrad 1845 1.2 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1846 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 1847 1.2 riastrad &cmd->body.image.sid, NULL); 1848 1.2 riastrad } 1849 1.2 riastrad 1850 1.2 riastrad /** 1851 1.3 riastrad * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE 1852 1.2 riastrad * command 1853 1.2 riastrad * 1854 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1855 1.2 riastrad * @sw_context: The software context being used for this batch. 1856 1.2 riastrad * @header: Pointer to the command header in the command stream. 1857 1.2 riastrad */ 1858 1.2 riastrad static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, 1859 1.2 riastrad struct vmw_sw_context *sw_context, 1860 1.2 riastrad SVGA3dCmdHeader *header) 1861 1.2 riastrad { 1862 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) = 1863 1.3 riastrad container_of(header, typeof(*cmd), header); 1864 1.2 riastrad 1865 1.2 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1866 1.3 riastrad VMW_RES_DIRTY_CLEAR, user_surface_converter, 1867 1.2 riastrad &cmd->body.sid, NULL); 1868 1.2 riastrad } 1869 1.2 riastrad 1870 1.2 riastrad /** 1871 1.3 riastrad * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1872 1.2 riastrad * command 1873 1.2 riastrad * 1874 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1875 1.2 riastrad * @sw_context: The software context being used for this batch. 1876 1.2 riastrad * @header: Pointer to the command header in the command stream. 1877 1.2 riastrad */ 1878 1.2 riastrad static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, 1879 1.2 riastrad struct vmw_sw_context *sw_context, 1880 1.2 riastrad SVGA3dCmdHeader *header) 1881 1.2 riastrad { 1882 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) = 1883 1.3 riastrad container_of(header, typeof(*cmd), header); 1884 1.2 riastrad 1885 1.2 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1886 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 1887 1.2 riastrad &cmd->body.image.sid, NULL); 1888 1.2 riastrad } 1889 1.2 riastrad 1890 1.2 riastrad /** 1891 1.3 riastrad * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1892 1.3 riastrad * command 1893 1.2 riastrad * 1894 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1895 1.2 riastrad * @sw_context: The software context being used for this batch. 1896 1.2 riastrad * @header: Pointer to the command header in the command stream. 1897 1.2 riastrad */ 1898 1.2 riastrad static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, 1899 1.2 riastrad struct vmw_sw_context *sw_context, 1900 1.2 riastrad SVGA3dCmdHeader *header) 1901 1.2 riastrad { 1902 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) = 1903 1.3 riastrad container_of(header, typeof(*cmd), header); 1904 1.2 riastrad 1905 1.2 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1906 1.3 riastrad VMW_RES_DIRTY_CLEAR, user_surface_converter, 1907 1.2 riastrad &cmd->body.sid, NULL); 1908 1.2 riastrad } 1909 1.2 riastrad 1910 1.2 riastrad /** 1911 1.3 riastrad * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command 1912 1.2 riastrad * 1913 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1914 1.2 riastrad * @sw_context: The software context being used for this batch. 1915 1.2 riastrad * @header: Pointer to the command header in the command stream. 1916 1.2 riastrad */ 1917 1.2 riastrad static int vmw_cmd_shader_define(struct vmw_private *dev_priv, 1918 1.2 riastrad struct vmw_sw_context *sw_context, 1919 1.2 riastrad SVGA3dCmdHeader *header) 1920 1.2 riastrad { 1921 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader); 1922 1.2 riastrad int ret; 1923 1.2 riastrad size_t size; 1924 1.3 riastrad struct vmw_resource *ctx; 1925 1.2 riastrad 1926 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 1927 1.2 riastrad 1928 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1929 1.3 riastrad VMW_RES_DIRTY_SET, user_context_converter, 1930 1.3 riastrad &cmd->body.cid, &ctx); 1931 1.2 riastrad if (unlikely(ret != 0)) 1932 1.2 riastrad return ret; 1933 1.2 riastrad 1934 1.2 riastrad if (unlikely(!dev_priv->has_mob)) 1935 1.2 riastrad return 0; 1936 1.2 riastrad 1937 1.2 riastrad size = cmd->header.size - sizeof(cmd->body); 1938 1.3 riastrad ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx), 1939 1.3 riastrad cmd->body.shid, cmd + 1, cmd->body.type, 1940 1.3 riastrad size, &sw_context->staged_cmd_res); 1941 1.2 riastrad if (unlikely(ret != 0)) 1942 1.2 riastrad return ret; 1943 1.2 riastrad 1944 1.3 riastrad return vmw_resource_relocation_add(sw_context, NULL, 1945 1.3 riastrad vmw_ptr_diff(sw_context->buf_start, 1946 1.3 riastrad &cmd->header.id), 1947 1.3 riastrad vmw_res_rel_nop); 1948 1.2 riastrad } 1949 1.2 riastrad 1950 1.2 riastrad /** 1951 1.3 riastrad * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command 1952 1.2 riastrad * 1953 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1954 1.2 riastrad * @sw_context: The software context being used for this batch. 1955 1.2 riastrad * @header: Pointer to the command header in the command stream. 1956 1.2 riastrad */ 1957 1.2 riastrad static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, 1958 1.2 riastrad struct vmw_sw_context *sw_context, 1959 1.2 riastrad SVGA3dCmdHeader *header) 1960 1.2 riastrad { 1961 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader); 1962 1.2 riastrad int ret; 1963 1.3 riastrad struct vmw_resource *ctx; 1964 1.2 riastrad 1965 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 1966 1.2 riastrad 1967 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1968 1.3 riastrad VMW_RES_DIRTY_SET, user_context_converter, 1969 1.3 riastrad &cmd->body.cid, &ctx); 1970 1.2 riastrad if (unlikely(ret != 0)) 1971 1.2 riastrad return ret; 1972 1.2 riastrad 1973 1.2 riastrad if (unlikely(!dev_priv->has_mob)) 1974 1.2 riastrad return 0; 1975 1.2 riastrad 1976 1.3 riastrad ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid, 1977 1.3 riastrad cmd->body.type, &sw_context->staged_cmd_res); 1978 1.2 riastrad if (unlikely(ret != 0)) 1979 1.2 riastrad return ret; 1980 1.2 riastrad 1981 1.3 riastrad return vmw_resource_relocation_add(sw_context, NULL, 1982 1.3 riastrad vmw_ptr_diff(sw_context->buf_start, 1983 1.3 riastrad &cmd->header.id), 1984 1.3 riastrad vmw_res_rel_nop); 1985 1.2 riastrad } 1986 1.2 riastrad 1987 1.2 riastrad /** 1988 1.3 riastrad * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command 1989 1.2 riastrad * 1990 1.2 riastrad * @dev_priv: Pointer to a device private struct. 1991 1.2 riastrad * @sw_context: The software context being used for this batch. 1992 1.2 riastrad * @header: Pointer to the command header in the command stream. 1993 1.2 riastrad */ 1994 1.2 riastrad static int vmw_cmd_set_shader(struct vmw_private *dev_priv, 1995 1.2 riastrad struct vmw_sw_context *sw_context, 1996 1.2 riastrad SVGA3dCmdHeader *header) 1997 1.2 riastrad { 1998 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader); 1999 1.2 riastrad struct vmw_ctx_bindinfo_shader binding; 2000 1.3 riastrad struct vmw_resource *ctx, *res = NULL; 2001 1.3 riastrad struct vmw_ctx_validation_info *ctx_info; 2002 1.2 riastrad int ret; 2003 1.2 riastrad 2004 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 2005 1.2 riastrad 2006 1.2 riastrad if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { 2007 1.3 riastrad VMW_DEBUG_USER("Illegal shader type %u.\n", 2008 1.3 riastrad (unsigned int) cmd->body.type); 2009 1.2 riastrad return -EINVAL; 2010 1.2 riastrad } 2011 1.2 riastrad 2012 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2013 1.3 riastrad VMW_RES_DIRTY_SET, user_context_converter, 2014 1.3 riastrad &cmd->body.cid, &ctx); 2015 1.2 riastrad if (unlikely(ret != 0)) 2016 1.2 riastrad return ret; 2017 1.2 riastrad 2018 1.2 riastrad if (!dev_priv->has_mob) 2019 1.2 riastrad return 0; 2020 1.2 riastrad 2021 1.2 riastrad if (cmd->body.shid != SVGA3D_INVALID_ID) { 2022 1.3 riastrad /* 2023 1.3 riastrad * This is the compat shader path - Per device guest-backed 2024 1.3 riastrad * shaders, but user-space thinks it's per context host- 2025 1.3 riastrad * backed shaders. 2026 1.3 riastrad */ 2027 1.3 riastrad res = vmw_shader_lookup(vmw_context_res_man(ctx), 2028 1.3 riastrad cmd->body.shid, cmd->body.type); 2029 1.3 riastrad if (!IS_ERR(res)) { 2030 1.3 riastrad ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 2031 1.3 riastrad VMW_RES_DIRTY_NONE); 2032 1.3 riastrad if (unlikely(ret != 0)) 2033 1.3 riastrad return ret; 2034 1.2 riastrad 2035 1.3 riastrad ret = vmw_resource_relocation_add 2036 1.3 riastrad (sw_context, res, 2037 1.3 riastrad vmw_ptr_diff(sw_context->buf_start, 2038 1.3 riastrad &cmd->body.shid), 2039 1.3 riastrad vmw_res_rel_normal); 2040 1.2 riastrad if (unlikely(ret != 0)) 2041 1.2 riastrad return ret; 2042 1.2 riastrad } 2043 1.2 riastrad } 2044 1.2 riastrad 2045 1.3 riastrad if (IS_ERR_OR_NULL(res)) { 2046 1.3 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, 2047 1.3 riastrad VMW_RES_DIRTY_NONE, 2048 1.3 riastrad user_shader_converter, &cmd->body.shid, 2049 1.3 riastrad &res); 2050 1.2 riastrad if (unlikely(ret != 0)) 2051 1.2 riastrad return ret; 2052 1.2 riastrad } 2053 1.2 riastrad 2054 1.3 riastrad ctx_info = vmw_execbuf_info_from_res(sw_context, ctx); 2055 1.3 riastrad if (!ctx_info) 2056 1.3 riastrad return -EINVAL; 2057 1.3 riastrad 2058 1.3 riastrad binding.bi.ctx = ctx; 2059 1.3 riastrad binding.bi.res = res; 2060 1.2 riastrad binding.bi.bt = vmw_ctx_binding_shader; 2061 1.2 riastrad binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2062 1.3 riastrad vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0); 2063 1.3 riastrad 2064 1.2 riastrad return 0; 2065 1.2 riastrad } 2066 1.2 riastrad 2067 1.2 riastrad /** 2068 1.3 riastrad * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command 2069 1.2 riastrad * 2070 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2071 1.2 riastrad * @sw_context: The software context being used for this batch. 2072 1.2 riastrad * @header: Pointer to the command header in the command stream. 2073 1.2 riastrad */ 2074 1.2 riastrad static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, 2075 1.2 riastrad struct vmw_sw_context *sw_context, 2076 1.2 riastrad SVGA3dCmdHeader *header) 2077 1.2 riastrad { 2078 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst); 2079 1.2 riastrad int ret; 2080 1.2 riastrad 2081 1.3 riastrad cmd = container_of(header, typeof(*cmd), header); 2082 1.2 riastrad 2083 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2084 1.3 riastrad VMW_RES_DIRTY_SET, user_context_converter, 2085 1.3 riastrad &cmd->body.cid, NULL); 2086 1.2 riastrad if (unlikely(ret != 0)) 2087 1.2 riastrad return ret; 2088 1.2 riastrad 2089 1.2 riastrad if (dev_priv->has_mob) 2090 1.2 riastrad header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; 2091 1.2 riastrad 2092 1.2 riastrad return 0; 2093 1.2 riastrad } 2094 1.2 riastrad 2095 1.2 riastrad /** 2096 1.3 riastrad * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command 2097 1.2 riastrad * 2098 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2099 1.2 riastrad * @sw_context: The software context being used for this batch. 2100 1.2 riastrad * @header: Pointer to the command header in the command stream. 2101 1.2 riastrad */ 2102 1.2 riastrad static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, 2103 1.2 riastrad struct vmw_sw_context *sw_context, 2104 1.2 riastrad SVGA3dCmdHeader *header) 2105 1.2 riastrad { 2106 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) = 2107 1.3 riastrad container_of(header, typeof(*cmd), header); 2108 1.2 riastrad 2109 1.2 riastrad return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, 2110 1.3 riastrad user_shader_converter, &cmd->body.shid, 2111 1.3 riastrad &cmd->body.mobid, cmd->body.offsetInBytes); 2112 1.2 riastrad } 2113 1.2 riastrad 2114 1.2 riastrad /** 2115 1.3 riastrad * vmw_cmd_dx_set_single_constant_buffer - Validate 2116 1.2 riastrad * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. 2117 1.2 riastrad * 2118 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2119 1.2 riastrad * @sw_context: The software context being used for this batch. 2120 1.2 riastrad * @header: Pointer to the command header in the command stream. 2121 1.2 riastrad */ 2122 1.2 riastrad static int 2123 1.2 riastrad vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, 2124 1.2 riastrad struct vmw_sw_context *sw_context, 2125 1.2 riastrad SVGA3dCmdHeader *header) 2126 1.2 riastrad { 2127 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer); 2128 1.3 riastrad struct vmw_resource *res = NULL; 2129 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2130 1.2 riastrad struct vmw_ctx_bindinfo_cb binding; 2131 1.2 riastrad int ret; 2132 1.2 riastrad 2133 1.3 riastrad if (!ctx_node) 2134 1.2 riastrad return -EINVAL; 2135 1.2 riastrad 2136 1.2 riastrad cmd = container_of(header, typeof(*cmd), header); 2137 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2138 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 2139 1.3 riastrad &cmd->body.sid, &res); 2140 1.2 riastrad if (unlikely(ret != 0)) 2141 1.2 riastrad return ret; 2142 1.2 riastrad 2143 1.3 riastrad binding.bi.ctx = ctx_node->ctx; 2144 1.3 riastrad binding.bi.res = res; 2145 1.2 riastrad binding.bi.bt = vmw_ctx_binding_cb; 2146 1.2 riastrad binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2147 1.2 riastrad binding.offset = cmd->body.offsetInBytes; 2148 1.2 riastrad binding.size = cmd->body.sizeInBytes; 2149 1.2 riastrad binding.slot = cmd->body.slot; 2150 1.2 riastrad 2151 1.2 riastrad if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 || 2152 1.2 riastrad binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { 2153 1.3 riastrad VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", 2154 1.3 riastrad (unsigned int) cmd->body.type, 2155 1.3 riastrad (unsigned int) binding.slot); 2156 1.2 riastrad return -EINVAL; 2157 1.2 riastrad } 2158 1.2 riastrad 2159 1.3 riastrad vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 2160 1.3 riastrad binding.slot); 2161 1.2 riastrad 2162 1.2 riastrad return 0; 2163 1.2 riastrad } 2164 1.2 riastrad 2165 1.2 riastrad /** 2166 1.3 riastrad * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES 2167 1.3 riastrad * command 2168 1.2 riastrad * 2169 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2170 1.2 riastrad * @sw_context: The software context being used for this batch. 2171 1.2 riastrad * @header: Pointer to the command header in the command stream. 2172 1.2 riastrad */ 2173 1.2 riastrad static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, 2174 1.2 riastrad struct vmw_sw_context *sw_context, 2175 1.2 riastrad SVGA3dCmdHeader *header) 2176 1.2 riastrad { 2177 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) = 2178 1.3 riastrad container_of(header, typeof(*cmd), header); 2179 1.2 riastrad u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / 2180 1.2 riastrad sizeof(SVGA3dShaderResourceViewId); 2181 1.2 riastrad 2182 1.2 riastrad if ((u64) cmd->body.startView + (u64) num_sr_view > 2183 1.2 riastrad (u64) SVGA3D_DX_MAX_SRVIEWS || 2184 1.2 riastrad cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { 2185 1.3 riastrad VMW_DEBUG_USER("Invalid shader binding.\n"); 2186 1.2 riastrad return -EINVAL; 2187 1.2 riastrad } 2188 1.2 riastrad 2189 1.2 riastrad return vmw_view_bindings_add(sw_context, vmw_view_sr, 2190 1.2 riastrad vmw_ctx_binding_sr, 2191 1.2 riastrad cmd->body.type - SVGA3D_SHADERTYPE_MIN, 2192 1.2 riastrad (void *) &cmd[1], num_sr_view, 2193 1.2 riastrad cmd->body.startView); 2194 1.2 riastrad } 2195 1.2 riastrad 2196 1.2 riastrad /** 2197 1.3 riastrad * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command 2198 1.2 riastrad * 2199 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2200 1.2 riastrad * @sw_context: The software context being used for this batch. 2201 1.2 riastrad * @header: Pointer to the command header in the command stream. 2202 1.2 riastrad */ 2203 1.2 riastrad static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, 2204 1.2 riastrad struct vmw_sw_context *sw_context, 2205 1.2 riastrad SVGA3dCmdHeader *header) 2206 1.2 riastrad { 2207 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader); 2208 1.2 riastrad struct vmw_resource *res = NULL; 2209 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2210 1.2 riastrad struct vmw_ctx_bindinfo_shader binding; 2211 1.2 riastrad int ret = 0; 2212 1.2 riastrad 2213 1.3 riastrad if (!ctx_node) 2214 1.2 riastrad return -EINVAL; 2215 1.2 riastrad 2216 1.2 riastrad cmd = container_of(header, typeof(*cmd), header); 2217 1.2 riastrad 2218 1.3 riastrad if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX || 2219 1.3 riastrad cmd->body.type < SVGA3D_SHADERTYPE_MIN) { 2220 1.3 riastrad VMW_DEBUG_USER("Illegal shader type %u.\n", 2221 1.3 riastrad (unsigned int) cmd->body.type); 2222 1.2 riastrad return -EINVAL; 2223 1.2 riastrad } 2224 1.2 riastrad 2225 1.2 riastrad if (cmd->body.shaderId != SVGA3D_INVALID_ID) { 2226 1.2 riastrad res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0); 2227 1.2 riastrad if (IS_ERR(res)) { 2228 1.3 riastrad VMW_DEBUG_USER("Could not find shader for binding.\n"); 2229 1.2 riastrad return PTR_ERR(res); 2230 1.2 riastrad } 2231 1.2 riastrad 2232 1.3 riastrad ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 2233 1.3 riastrad VMW_RES_DIRTY_NONE); 2234 1.2 riastrad if (ret) 2235 1.3 riastrad return ret; 2236 1.2 riastrad } 2237 1.2 riastrad 2238 1.3 riastrad binding.bi.ctx = ctx_node->ctx; 2239 1.2 riastrad binding.bi.res = res; 2240 1.2 riastrad binding.bi.bt = vmw_ctx_binding_dx_shader; 2241 1.2 riastrad binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2242 1.2 riastrad 2243 1.3 riastrad vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0); 2244 1.2 riastrad 2245 1.3 riastrad return 0; 2246 1.2 riastrad } 2247 1.2 riastrad 2248 1.2 riastrad /** 2249 1.3 riastrad * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS 2250 1.3 riastrad * command 2251 1.2 riastrad * 2252 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2253 1.2 riastrad * @sw_context: The software context being used for this batch. 2254 1.2 riastrad * @header: Pointer to the command header in the command stream. 2255 1.2 riastrad */ 2256 1.2 riastrad static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, 2257 1.2 riastrad struct vmw_sw_context *sw_context, 2258 1.2 riastrad SVGA3dCmdHeader *header) 2259 1.2 riastrad { 2260 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2261 1.2 riastrad struct vmw_ctx_bindinfo_vb binding; 2262 1.3 riastrad struct vmw_resource *res; 2263 1.2 riastrad struct { 2264 1.2 riastrad SVGA3dCmdHeader header; 2265 1.2 riastrad SVGA3dCmdDXSetVertexBuffers body; 2266 1.2 riastrad SVGA3dVertexBuffer buf[]; 2267 1.2 riastrad } *cmd; 2268 1.2 riastrad int i, ret, num; 2269 1.2 riastrad 2270 1.3 riastrad if (!ctx_node) 2271 1.2 riastrad return -EINVAL; 2272 1.2 riastrad 2273 1.2 riastrad cmd = container_of(header, typeof(*cmd), header); 2274 1.2 riastrad num = (cmd->header.size - sizeof(cmd->body)) / 2275 1.2 riastrad sizeof(SVGA3dVertexBuffer); 2276 1.2 riastrad if ((u64)num + (u64)cmd->body.startBuffer > 2277 1.2 riastrad (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) { 2278 1.3 riastrad VMW_DEBUG_USER("Invalid number of vertex buffers.\n"); 2279 1.2 riastrad return -EINVAL; 2280 1.2 riastrad } 2281 1.2 riastrad 2282 1.2 riastrad for (i = 0; i < num; i++) { 2283 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2284 1.3 riastrad VMW_RES_DIRTY_NONE, 2285 1.2 riastrad user_surface_converter, 2286 1.3 riastrad &cmd->buf[i].sid, &res); 2287 1.2 riastrad if (unlikely(ret != 0)) 2288 1.2 riastrad return ret; 2289 1.2 riastrad 2290 1.3 riastrad binding.bi.ctx = ctx_node->ctx; 2291 1.2 riastrad binding.bi.bt = vmw_ctx_binding_vb; 2292 1.3 riastrad binding.bi.res = res; 2293 1.2 riastrad binding.offset = cmd->buf[i].offset; 2294 1.2 riastrad binding.stride = cmd->buf[i].stride; 2295 1.2 riastrad binding.slot = i + cmd->body.startBuffer; 2296 1.2 riastrad 2297 1.3 riastrad vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot); 2298 1.2 riastrad } 2299 1.2 riastrad 2300 1.2 riastrad return 0; 2301 1.2 riastrad } 2302 1.2 riastrad 2303 1.2 riastrad /** 2304 1.3 riastrad * vmw_cmd_dx_ia_set_vertex_buffers - Validate 2305 1.3 riastrad * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. 2306 1.2 riastrad * 2307 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2308 1.2 riastrad * @sw_context: The software context being used for this batch. 2309 1.2 riastrad * @header: Pointer to the command header in the command stream. 2310 1.2 riastrad */ 2311 1.2 riastrad static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, 2312 1.2 riastrad struct vmw_sw_context *sw_context, 2313 1.2 riastrad SVGA3dCmdHeader *header) 2314 1.2 riastrad { 2315 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2316 1.2 riastrad struct vmw_ctx_bindinfo_ib binding; 2317 1.3 riastrad struct vmw_resource *res; 2318 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer); 2319 1.2 riastrad int ret; 2320 1.2 riastrad 2321 1.3 riastrad if (!ctx_node) 2322 1.2 riastrad return -EINVAL; 2323 1.2 riastrad 2324 1.2 riastrad cmd = container_of(header, typeof(*cmd), header); 2325 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2326 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 2327 1.3 riastrad &cmd->body.sid, &res); 2328 1.2 riastrad if (unlikely(ret != 0)) 2329 1.2 riastrad return ret; 2330 1.2 riastrad 2331 1.3 riastrad binding.bi.ctx = ctx_node->ctx; 2332 1.3 riastrad binding.bi.res = res; 2333 1.2 riastrad binding.bi.bt = vmw_ctx_binding_ib; 2334 1.2 riastrad binding.offset = cmd->body.offset; 2335 1.2 riastrad binding.format = cmd->body.format; 2336 1.2 riastrad 2337 1.3 riastrad vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0); 2338 1.2 riastrad 2339 1.2 riastrad return 0; 2340 1.2 riastrad } 2341 1.2 riastrad 2342 1.2 riastrad /** 2343 1.3 riastrad * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS 2344 1.3 riastrad * command 2345 1.2 riastrad * 2346 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2347 1.2 riastrad * @sw_context: The software context being used for this batch. 2348 1.2 riastrad * @header: Pointer to the command header in the command stream. 2349 1.2 riastrad */ 2350 1.2 riastrad static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, 2351 1.2 riastrad struct vmw_sw_context *sw_context, 2352 1.2 riastrad SVGA3dCmdHeader *header) 2353 1.2 riastrad { 2354 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) = 2355 1.3 riastrad container_of(header, typeof(*cmd), header); 2356 1.2 riastrad u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) / 2357 1.2 riastrad sizeof(SVGA3dRenderTargetViewId); 2358 1.3 riastrad int ret; 2359 1.2 riastrad 2360 1.2 riastrad if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) { 2361 1.3 riastrad VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n"); 2362 1.2 riastrad return -EINVAL; 2363 1.2 riastrad } 2364 1.2 riastrad 2365 1.3 riastrad ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds, 2366 1.3 riastrad 0, &cmd->body.depthStencilViewId, 1, 0); 2367 1.2 riastrad if (ret) 2368 1.2 riastrad return ret; 2369 1.2 riastrad 2370 1.2 riastrad return vmw_view_bindings_add(sw_context, vmw_view_rt, 2371 1.3 riastrad vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1], 2372 1.3 riastrad num_rt_view, 0); 2373 1.2 riastrad } 2374 1.2 riastrad 2375 1.2 riastrad /** 2376 1.3 riastrad * vmw_cmd_dx_clear_rendertarget_view - Validate 2377 1.2 riastrad * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command 2378 1.2 riastrad * 2379 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2380 1.2 riastrad * @sw_context: The software context being used for this batch. 2381 1.2 riastrad * @header: Pointer to the command header in the command stream. 2382 1.2 riastrad */ 2383 1.2 riastrad static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, 2384 1.2 riastrad struct vmw_sw_context *sw_context, 2385 1.2 riastrad SVGA3dCmdHeader *header) 2386 1.2 riastrad { 2387 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) = 2388 1.3 riastrad container_of(header, typeof(*cmd), header); 2389 1.3 riastrad struct vmw_resource *ret; 2390 1.3 riastrad 2391 1.3 riastrad ret = vmw_view_id_val_add(sw_context, vmw_view_rt, 2392 1.3 riastrad cmd->body.renderTargetViewId); 2393 1.2 riastrad 2394 1.3 riastrad return PTR_ERR_OR_ZERO(ret); 2395 1.2 riastrad } 2396 1.2 riastrad 2397 1.2 riastrad /** 2398 1.3 riastrad * vmw_cmd_dx_clear_rendertarget_view - Validate 2399 1.2 riastrad * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command 2400 1.2 riastrad * 2401 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2402 1.2 riastrad * @sw_context: The software context being used for this batch. 2403 1.2 riastrad * @header: Pointer to the command header in the command stream. 2404 1.2 riastrad */ 2405 1.2 riastrad static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, 2406 1.2 riastrad struct vmw_sw_context *sw_context, 2407 1.2 riastrad SVGA3dCmdHeader *header) 2408 1.2 riastrad { 2409 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) = 2410 1.3 riastrad container_of(header, typeof(*cmd), header); 2411 1.3 riastrad struct vmw_resource *ret; 2412 1.3 riastrad 2413 1.3 riastrad ret = vmw_view_id_val_add(sw_context, vmw_view_ds, 2414 1.3 riastrad cmd->body.depthStencilViewId); 2415 1.2 riastrad 2416 1.3 riastrad return PTR_ERR_OR_ZERO(ret); 2417 1.2 riastrad } 2418 1.2 riastrad 2419 1.2 riastrad static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, 2420 1.2 riastrad struct vmw_sw_context *sw_context, 2421 1.2 riastrad SVGA3dCmdHeader *header) 2422 1.2 riastrad { 2423 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2424 1.3 riastrad struct vmw_resource *srf; 2425 1.2 riastrad struct vmw_resource *res; 2426 1.2 riastrad enum vmw_view_type view_type; 2427 1.2 riastrad int ret; 2428 1.2 riastrad /* 2429 1.3 riastrad * This is based on the fact that all affected define commands have the 2430 1.3 riastrad * same initial command body layout. 2431 1.2 riastrad */ 2432 1.2 riastrad struct { 2433 1.2 riastrad SVGA3dCmdHeader header; 2434 1.2 riastrad uint32 defined_id; 2435 1.2 riastrad uint32 sid; 2436 1.2 riastrad } *cmd; 2437 1.2 riastrad 2438 1.3 riastrad if (!ctx_node) 2439 1.2 riastrad return -EINVAL; 2440 1.2 riastrad 2441 1.2 riastrad view_type = vmw_view_cmd_to_type(header->id); 2442 1.2 riastrad if (view_type == vmw_view_max) 2443 1.2 riastrad return -EINVAL; 2444 1.3 riastrad 2445 1.2 riastrad cmd = container_of(header, typeof(*cmd), header); 2446 1.3 riastrad if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) { 2447 1.3 riastrad VMW_DEBUG_USER("Invalid surface id.\n"); 2448 1.3 riastrad return -EINVAL; 2449 1.3 riastrad } 2450 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2451 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 2452 1.3 riastrad &cmd->sid, &srf); 2453 1.2 riastrad if (unlikely(ret != 0)) 2454 1.2 riastrad return ret; 2455 1.2 riastrad 2456 1.3 riastrad res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]); 2457 1.2 riastrad ret = vmw_cotable_notify(res, cmd->defined_id); 2458 1.2 riastrad if (unlikely(ret != 0)) 2459 1.2 riastrad return ret; 2460 1.2 riastrad 2461 1.3 riastrad return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type, 2462 1.3 riastrad cmd->defined_id, header, 2463 1.2 riastrad header->size + sizeof(*header), 2464 1.2 riastrad &sw_context->staged_cmd_res); 2465 1.2 riastrad } 2466 1.2 riastrad 2467 1.2 riastrad /** 2468 1.3 riastrad * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command. 2469 1.2 riastrad * 2470 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2471 1.2 riastrad * @sw_context: The software context being used for this batch. 2472 1.2 riastrad * @header: Pointer to the command header in the command stream. 2473 1.2 riastrad */ 2474 1.2 riastrad static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, 2475 1.2 riastrad struct vmw_sw_context *sw_context, 2476 1.2 riastrad SVGA3dCmdHeader *header) 2477 1.2 riastrad { 2478 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2479 1.2 riastrad struct vmw_ctx_bindinfo_so binding; 2480 1.3 riastrad struct vmw_resource *res; 2481 1.2 riastrad struct { 2482 1.2 riastrad SVGA3dCmdHeader header; 2483 1.2 riastrad SVGA3dCmdDXSetSOTargets body; 2484 1.2 riastrad SVGA3dSoTarget targets[]; 2485 1.2 riastrad } *cmd; 2486 1.2 riastrad int i, ret, num; 2487 1.2 riastrad 2488 1.3 riastrad if (!ctx_node) 2489 1.2 riastrad return -EINVAL; 2490 1.2 riastrad 2491 1.2 riastrad cmd = container_of(header, typeof(*cmd), header); 2492 1.3 riastrad num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget); 2493 1.2 riastrad 2494 1.2 riastrad if (num > SVGA3D_DX_MAX_SOTARGETS) { 2495 1.3 riastrad VMW_DEBUG_USER("Invalid DX SO binding.\n"); 2496 1.2 riastrad return -EINVAL; 2497 1.2 riastrad } 2498 1.2 riastrad 2499 1.2 riastrad for (i = 0; i < num; i++) { 2500 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2501 1.3 riastrad VMW_RES_DIRTY_SET, 2502 1.2 riastrad user_surface_converter, 2503 1.3 riastrad &cmd->targets[i].sid, &res); 2504 1.2 riastrad if (unlikely(ret != 0)) 2505 1.2 riastrad return ret; 2506 1.2 riastrad 2507 1.3 riastrad binding.bi.ctx = ctx_node->ctx; 2508 1.3 riastrad binding.bi.res = res; 2509 1.2 riastrad binding.bi.bt = vmw_ctx_binding_so, 2510 1.2 riastrad binding.offset = cmd->targets[i].offset; 2511 1.2 riastrad binding.size = cmd->targets[i].sizeInBytes; 2512 1.2 riastrad binding.slot = i; 2513 1.2 riastrad 2514 1.3 riastrad vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot); 2515 1.2 riastrad } 2516 1.2 riastrad 2517 1.2 riastrad return 0; 2518 1.2 riastrad } 2519 1.2 riastrad 2520 1.2 riastrad static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, 2521 1.2 riastrad struct vmw_sw_context *sw_context, 2522 1.2 riastrad SVGA3dCmdHeader *header) 2523 1.2 riastrad { 2524 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2525 1.2 riastrad struct vmw_resource *res; 2526 1.2 riastrad /* 2527 1.2 riastrad * This is based on the fact that all affected define commands have 2528 1.2 riastrad * the same initial command body layout. 2529 1.2 riastrad */ 2530 1.2 riastrad struct { 2531 1.2 riastrad SVGA3dCmdHeader header; 2532 1.2 riastrad uint32 defined_id; 2533 1.2 riastrad } *cmd; 2534 1.2 riastrad enum vmw_so_type so_type; 2535 1.2 riastrad int ret; 2536 1.2 riastrad 2537 1.3 riastrad if (!ctx_node) 2538 1.2 riastrad return -EINVAL; 2539 1.2 riastrad 2540 1.2 riastrad so_type = vmw_so_cmd_to_type(header->id); 2541 1.3 riastrad res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]); 2542 1.2 riastrad cmd = container_of(header, typeof(*cmd), header); 2543 1.2 riastrad ret = vmw_cotable_notify(res, cmd->defined_id); 2544 1.2 riastrad 2545 1.2 riastrad return ret; 2546 1.2 riastrad } 2547 1.2 riastrad 2548 1.2 riastrad /** 2549 1.3 riastrad * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE 2550 1.3 riastrad * command 2551 1.2 riastrad * 2552 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2553 1.2 riastrad * @sw_context: The software context being used for this batch. 2554 1.2 riastrad * @header: Pointer to the command header in the command stream. 2555 1.2 riastrad */ 2556 1.2 riastrad static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, 2557 1.2 riastrad struct vmw_sw_context *sw_context, 2558 1.2 riastrad SVGA3dCmdHeader *header) 2559 1.2 riastrad { 2560 1.2 riastrad struct { 2561 1.2 riastrad SVGA3dCmdHeader header; 2562 1.2 riastrad union { 2563 1.2 riastrad SVGA3dCmdDXReadbackSubResource r_body; 2564 1.2 riastrad SVGA3dCmdDXInvalidateSubResource i_body; 2565 1.2 riastrad SVGA3dCmdDXUpdateSubResource u_body; 2566 1.2 riastrad SVGA3dSurfaceId sid; 2567 1.2 riastrad }; 2568 1.2 riastrad } *cmd; 2569 1.2 riastrad 2570 1.2 riastrad BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) != 2571 1.2 riastrad offsetof(typeof(*cmd), sid)); 2572 1.2 riastrad BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) != 2573 1.2 riastrad offsetof(typeof(*cmd), sid)); 2574 1.2 riastrad BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) != 2575 1.2 riastrad offsetof(typeof(*cmd), sid)); 2576 1.2 riastrad 2577 1.2 riastrad cmd = container_of(header, typeof(*cmd), header); 2578 1.2 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2579 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 2580 1.2 riastrad &cmd->sid, NULL); 2581 1.2 riastrad } 2582 1.2 riastrad 2583 1.2 riastrad static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, 2584 1.2 riastrad struct vmw_sw_context *sw_context, 2585 1.2 riastrad SVGA3dCmdHeader *header) 2586 1.2 riastrad { 2587 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2588 1.2 riastrad 2589 1.3 riastrad if (!ctx_node) 2590 1.2 riastrad return -EINVAL; 2591 1.2 riastrad 2592 1.2 riastrad return 0; 2593 1.2 riastrad } 2594 1.2 riastrad 2595 1.2 riastrad /** 2596 1.3 riastrad * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view 2597 1.3 riastrad * resource for removal. 2598 1.2 riastrad * 2599 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2600 1.2 riastrad * @sw_context: The software context being used for this batch. 2601 1.2 riastrad * @header: Pointer to the command header in the command stream. 2602 1.2 riastrad * 2603 1.3 riastrad * Check that the view exists, and if it was not created using this command 2604 1.3 riastrad * batch, conditionally make this command a NOP. 2605 1.2 riastrad */ 2606 1.2 riastrad static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, 2607 1.2 riastrad struct vmw_sw_context *sw_context, 2608 1.2 riastrad SVGA3dCmdHeader *header) 2609 1.2 riastrad { 2610 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2611 1.2 riastrad struct { 2612 1.2 riastrad SVGA3dCmdHeader header; 2613 1.2 riastrad union vmw_view_destroy body; 2614 1.2 riastrad } *cmd = container_of(header, typeof(*cmd), header); 2615 1.2 riastrad enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id); 2616 1.2 riastrad struct vmw_resource *view; 2617 1.2 riastrad int ret; 2618 1.2 riastrad 2619 1.3 riastrad if (!ctx_node) 2620 1.2 riastrad return -EINVAL; 2621 1.2 riastrad 2622 1.3 riastrad ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type, 2623 1.3 riastrad &sw_context->staged_cmd_res, &view); 2624 1.2 riastrad if (ret || !view) 2625 1.2 riastrad return ret; 2626 1.2 riastrad 2627 1.2 riastrad /* 2628 1.3 riastrad * If the view wasn't created during this command batch, it might 2629 1.3 riastrad * have been removed due to a context swapout, so add a 2630 1.3 riastrad * relocation to conditionally make this command a NOP to avoid 2631 1.3 riastrad * device errors. 2632 1.2 riastrad */ 2633 1.3 riastrad return vmw_resource_relocation_add(sw_context, view, 2634 1.3 riastrad vmw_ptr_diff(sw_context->buf_start, 2635 1.3 riastrad &cmd->header.id), 2636 1.3 riastrad vmw_res_rel_cond_nop); 2637 1.1 riastrad } 2638 1.1 riastrad 2639 1.2 riastrad /** 2640 1.3 riastrad * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command 2641 1.2 riastrad * 2642 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2643 1.2 riastrad * @sw_context: The software context being used for this batch. 2644 1.2 riastrad * @header: Pointer to the command header in the command stream. 2645 1.2 riastrad */ 2646 1.2 riastrad static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, 2647 1.2 riastrad struct vmw_sw_context *sw_context, 2648 1.2 riastrad SVGA3dCmdHeader *header) 2649 1.1 riastrad { 2650 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2651 1.2 riastrad struct vmw_resource *res; 2652 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) = 2653 1.3 riastrad container_of(header, typeof(*cmd), header); 2654 1.2 riastrad int ret; 2655 1.1 riastrad 2656 1.3 riastrad if (!ctx_node) 2657 1.2 riastrad return -EINVAL; 2658 1.1 riastrad 2659 1.3 riastrad res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER); 2660 1.2 riastrad ret = vmw_cotable_notify(res, cmd->body.shaderId); 2661 1.2 riastrad if (ret) 2662 1.1 riastrad return ret; 2663 1.1 riastrad 2664 1.3 riastrad return vmw_dx_shader_add(sw_context->man, ctx_node->ctx, 2665 1.2 riastrad cmd->body.shaderId, cmd->body.type, 2666 1.2 riastrad &sw_context->staged_cmd_res); 2667 1.1 riastrad } 2668 1.1 riastrad 2669 1.2 riastrad /** 2670 1.3 riastrad * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command 2671 1.2 riastrad * 2672 1.2 riastrad * @dev_priv: Pointer to a device private struct. 2673 1.2 riastrad * @sw_context: The software context being used for this batch. 2674 1.2 riastrad * @header: Pointer to the command header in the command stream. 2675 1.2 riastrad */ 2676 1.2 riastrad static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, 2677 1.2 riastrad struct vmw_sw_context *sw_context, 2678 1.2 riastrad SVGA3dCmdHeader *header) 2679 1.1 riastrad { 2680 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2681 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) = 2682 1.3 riastrad container_of(header, typeof(*cmd), header); 2683 1.1 riastrad int ret; 2684 1.1 riastrad 2685 1.3 riastrad if (!ctx_node) 2686 1.2 riastrad return -EINVAL; 2687 1.1 riastrad 2688 1.2 riastrad ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0, 2689 1.2 riastrad &sw_context->staged_cmd_res); 2690 1.1 riastrad 2691 1.1 riastrad return ret; 2692 1.1 riastrad } 2693 1.1 riastrad 2694 1.1 riastrad /** 2695 1.3 riastrad * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command 2696 1.1 riastrad * 2697 1.1 riastrad * @dev_priv: Pointer to a device private struct. 2698 1.1 riastrad * @sw_context: The software context being used for this batch. 2699 1.1 riastrad * @header: Pointer to the command header in the command stream. 2700 1.1 riastrad */ 2701 1.2 riastrad static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, 2702 1.2 riastrad struct vmw_sw_context *sw_context, 2703 1.2 riastrad SVGA3dCmdHeader *header) 2704 1.1 riastrad { 2705 1.3 riastrad struct vmw_resource *ctx; 2706 1.2 riastrad struct vmw_resource *res; 2707 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) = 2708 1.3 riastrad container_of(header, typeof(*cmd), header); 2709 1.1 riastrad int ret; 2710 1.1 riastrad 2711 1.2 riastrad if (cmd->body.cid != SVGA3D_INVALID_ID) { 2712 1.2 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2713 1.3 riastrad VMW_RES_DIRTY_SET, 2714 1.3 riastrad user_context_converter, &cmd->body.cid, 2715 1.3 riastrad &ctx); 2716 1.2 riastrad if (ret) 2717 1.2 riastrad return ret; 2718 1.2 riastrad } else { 2719 1.3 riastrad struct vmw_ctx_validation_info *ctx_node = 2720 1.3 riastrad VMW_GET_CTX_NODE(sw_context); 2721 1.3 riastrad 2722 1.3 riastrad if (!ctx_node) 2723 1.2 riastrad return -EINVAL; 2724 1.3 riastrad 2725 1.3 riastrad ctx = ctx_node->ctx; 2726 1.2 riastrad } 2727 1.2 riastrad 2728 1.3 riastrad res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0); 2729 1.2 riastrad if (IS_ERR(res)) { 2730 1.3 riastrad VMW_DEBUG_USER("Could not find shader to bind.\n"); 2731 1.2 riastrad return PTR_ERR(res); 2732 1.2 riastrad } 2733 1.2 riastrad 2734 1.3 riastrad ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 2735 1.3 riastrad VMW_RES_DIRTY_NONE); 2736 1.2 riastrad if (ret) { 2737 1.3 riastrad VMW_DEBUG_USER("Error creating resource validation node.\n"); 2738 1.3 riastrad return ret; 2739 1.2 riastrad } 2740 1.2 riastrad 2741 1.3 riastrad return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, 2742 1.3 riastrad &cmd->body.mobid, 2743 1.3 riastrad cmd->body.offsetInBytes); 2744 1.3 riastrad } 2745 1.3 riastrad 2746 1.3 riastrad /** 2747 1.3 riastrad * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command 2748 1.3 riastrad * 2749 1.3 riastrad * @dev_priv: Pointer to a device private struct. 2750 1.3 riastrad * @sw_context: The software context being used for this batch. 2751 1.3 riastrad * @header: Pointer to the command header in the command stream. 2752 1.3 riastrad */ 2753 1.3 riastrad static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv, 2754 1.3 riastrad struct vmw_sw_context *sw_context, 2755 1.3 riastrad SVGA3dCmdHeader *header) 2756 1.3 riastrad { 2757 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) = 2758 1.3 riastrad container_of(header, typeof(*cmd), header); 2759 1.3 riastrad struct vmw_resource *ret; 2760 1.3 riastrad 2761 1.3 riastrad ret = vmw_view_id_val_add(sw_context, vmw_view_sr, 2762 1.3 riastrad cmd->body.shaderResourceViewId); 2763 1.3 riastrad 2764 1.3 riastrad return PTR_ERR_OR_ZERO(ret); 2765 1.3 riastrad } 2766 1.3 riastrad 2767 1.3 riastrad /** 2768 1.3 riastrad * vmw_cmd_dx_transfer_from_buffer - Validate 2769 1.3 riastrad * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command 2770 1.3 riastrad * 2771 1.3 riastrad * @dev_priv: Pointer to a device private struct. 2772 1.3 riastrad * @sw_context: The software context being used for this batch. 2773 1.3 riastrad * @header: Pointer to the command header in the command stream. 2774 1.3 riastrad */ 2775 1.3 riastrad static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv, 2776 1.3 riastrad struct vmw_sw_context *sw_context, 2777 1.3 riastrad SVGA3dCmdHeader *header) 2778 1.3 riastrad { 2779 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) = 2780 1.3 riastrad container_of(header, typeof(*cmd), header); 2781 1.3 riastrad int ret; 2782 1.3 riastrad 2783 1.3 riastrad ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2784 1.3 riastrad VMW_RES_DIRTY_NONE, user_surface_converter, 2785 1.3 riastrad &cmd->body.srcSid, NULL); 2786 1.3 riastrad if (ret != 0) 2787 1.3 riastrad return ret; 2788 1.3 riastrad 2789 1.3 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2790 1.3 riastrad VMW_RES_DIRTY_SET, user_surface_converter, 2791 1.3 riastrad &cmd->body.destSid, NULL); 2792 1.3 riastrad } 2793 1.3 riastrad 2794 1.3 riastrad /** 2795 1.3 riastrad * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command 2796 1.3 riastrad * 2797 1.3 riastrad * @dev_priv: Pointer to a device private struct. 2798 1.3 riastrad * @sw_context: The software context being used for this batch. 2799 1.3 riastrad * @header: Pointer to the command header in the command stream. 2800 1.3 riastrad */ 2801 1.3 riastrad static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv, 2802 1.3 riastrad struct vmw_sw_context *sw_context, 2803 1.3 riastrad SVGA3dCmdHeader *header) 2804 1.3 riastrad { 2805 1.3 riastrad VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) = 2806 1.3 riastrad container_of(header, typeof(*cmd), header); 2807 1.1 riastrad 2808 1.3 riastrad if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)) 2809 1.3 riastrad return -EINVAL; 2810 1.1 riastrad 2811 1.3 riastrad return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2812 1.3 riastrad VMW_RES_DIRTY_SET, user_surface_converter, 2813 1.3 riastrad &cmd->body.surface.sid, NULL); 2814 1.1 riastrad } 2815 1.1 riastrad 2816 1.1 riastrad static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, 2817 1.1 riastrad struct vmw_sw_context *sw_context, 2818 1.1 riastrad void *buf, uint32_t *size) 2819 1.1 riastrad { 2820 1.1 riastrad uint32_t size_remaining = *size; 2821 1.1 riastrad uint32_t cmd_id; 2822 1.1 riastrad 2823 1.2 riastrad cmd_id = ((uint32_t *)buf)[0]; 2824 1.1 riastrad switch (cmd_id) { 2825 1.1 riastrad case SVGA_CMD_UPDATE: 2826 1.1 riastrad *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); 2827 1.1 riastrad break; 2828 1.1 riastrad case SVGA_CMD_DEFINE_GMRFB: 2829 1.1 riastrad *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); 2830 1.1 riastrad break; 2831 1.1 riastrad case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: 2832 1.1 riastrad *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 2833 1.1 riastrad break; 2834 1.1 riastrad case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: 2835 1.1 riastrad *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 2836 1.1 riastrad break; 2837 1.1 riastrad default: 2838 1.3 riastrad VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id); 2839 1.1 riastrad return -EINVAL; 2840 1.1 riastrad } 2841 1.1 riastrad 2842 1.1 riastrad if (*size > size_remaining) { 2843 1.3 riastrad VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n", 2844 1.3 riastrad cmd_id); 2845 1.1 riastrad return -EINVAL; 2846 1.1 riastrad } 2847 1.1 riastrad 2848 1.1 riastrad if (unlikely(!sw_context->kernel)) { 2849 1.3 riastrad VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id); 2850 1.1 riastrad return -EPERM; 2851 1.1 riastrad } 2852 1.1 riastrad 2853 1.1 riastrad if (cmd_id == SVGA_CMD_DEFINE_GMRFB) 2854 1.1 riastrad return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); 2855 1.1 riastrad 2856 1.1 riastrad return 0; 2857 1.1 riastrad } 2858 1.1 riastrad 2859 1.2 riastrad static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { 2860 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, 2861 1.2 riastrad false, false, false), 2862 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, 2863 1.2 riastrad false, false, false), 2864 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, 2865 1.2 riastrad true, false, false), 2866 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, 2867 1.2 riastrad true, false, false), 2868 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, 2869 1.2 riastrad true, false, false), 2870 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, 2871 1.2 riastrad false, false, false), 2872 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, 2873 1.2 riastrad false, false, false), 2874 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, 2875 1.2 riastrad true, false, false), 2876 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, 2877 1.2 riastrad true, false, false), 2878 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, 2879 1.2 riastrad true, false, false), 2880 1.1 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, 2881 1.2 riastrad &vmw_cmd_set_render_target_check, true, false, false), 2882 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, 2883 1.2 riastrad true, false, false), 2884 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, 2885 1.2 riastrad true, false, false), 2886 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, 2887 1.2 riastrad true, false, false), 2888 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, 2889 1.2 riastrad true, false, false), 2890 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, 2891 1.2 riastrad true, false, false), 2892 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, 2893 1.2 riastrad true, false, false), 2894 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, 2895 1.2 riastrad true, false, false), 2896 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, 2897 1.2 riastrad false, false, false), 2898 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, 2899 1.2 riastrad true, false, false), 2900 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, 2901 1.2 riastrad true, false, false), 2902 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, 2903 1.2 riastrad true, false, false), 2904 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, 2905 1.2 riastrad true, false, false), 2906 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, 2907 1.2 riastrad true, false, false), 2908 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, 2909 1.2 riastrad true, false, false), 2910 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, 2911 1.2 riastrad true, false, false), 2912 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, 2913 1.2 riastrad true, false, false), 2914 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, 2915 1.2 riastrad true, false, false), 2916 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, 2917 1.2 riastrad true, false, false), 2918 1.1 riastrad VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 2919 1.2 riastrad &vmw_cmd_blt_surf_screen_check, false, false, false), 2920 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, 2921 1.2 riastrad false, false, false), 2922 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, 2923 1.2 riastrad false, false, false), 2924 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, 2925 1.2 riastrad false, false, false), 2926 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, 2927 1.2 riastrad false, false, false), 2928 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, 2929 1.2 riastrad false, false, false), 2930 1.3 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid, 2931 1.2 riastrad false, false, false), 2932 1.3 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid, 2933 1.2 riastrad false, false, false), 2934 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, 2935 1.2 riastrad false, false, false), 2936 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, 2937 1.2 riastrad false, false, false), 2938 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, 2939 1.2 riastrad false, false, false), 2940 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, 2941 1.2 riastrad false, false, false), 2942 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, 2943 1.2 riastrad false, false, false), 2944 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, 2945 1.2 riastrad false, false, false), 2946 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, 2947 1.2 riastrad false, false, true), 2948 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, 2949 1.2 riastrad false, false, true), 2950 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, 2951 1.2 riastrad false, false, true), 2952 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, 2953 1.2 riastrad false, false, true), 2954 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid, 2955 1.2 riastrad false, false, true), 2956 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, 2957 1.2 riastrad false, false, true), 2958 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, 2959 1.2 riastrad false, false, true), 2960 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, 2961 1.2 riastrad false, false, true), 2962 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, 2963 1.2 riastrad true, false, true), 2964 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, 2965 1.2 riastrad false, false, true), 2966 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, 2967 1.2 riastrad true, false, true), 2968 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, 2969 1.2 riastrad &vmw_cmd_update_gb_surface, true, false, true), 2970 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, 2971 1.2 riastrad &vmw_cmd_readback_gb_image, true, false, true), 2972 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, 2973 1.2 riastrad &vmw_cmd_readback_gb_surface, true, false, true), 2974 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, 2975 1.2 riastrad &vmw_cmd_invalidate_gb_image, true, false, true), 2976 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, 2977 1.2 riastrad &vmw_cmd_invalidate_gb_surface, true, false, true), 2978 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, 2979 1.2 riastrad false, false, true), 2980 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, 2981 1.2 riastrad false, false, true), 2982 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, 2983 1.2 riastrad false, false, true), 2984 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, 2985 1.2 riastrad false, false, true), 2986 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, 2987 1.2 riastrad false, false, true), 2988 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, 2989 1.2 riastrad false, false, true), 2990 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, 2991 1.2 riastrad true, false, true), 2992 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, 2993 1.2 riastrad false, false, true), 2994 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, 2995 1.2 riastrad false, false, false), 2996 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, 2997 1.2 riastrad true, false, true), 2998 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, 2999 1.2 riastrad true, false, true), 3000 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, 3001 1.2 riastrad true, false, true), 3002 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, 3003 1.2 riastrad true, false, true), 3004 1.3 riastrad VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok, 3005 1.3 riastrad true, false, true), 3006 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, 3007 1.2 riastrad false, false, true), 3008 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, 3009 1.2 riastrad false, false, true), 3010 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, 3011 1.2 riastrad false, false, true), 3012 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, 3013 1.2 riastrad false, false, true), 3014 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, 3015 1.2 riastrad false, false, true), 3016 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, 3017 1.2 riastrad false, false, true), 3018 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, 3019 1.2 riastrad false, false, true), 3020 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, 3021 1.2 riastrad false, false, true), 3022 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 3023 1.2 riastrad false, false, true), 3024 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 3025 1.2 riastrad false, false, true), 3026 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, 3027 1.2 riastrad true, false, true), 3028 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid, 3029 1.2 riastrad false, false, true), 3030 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid, 3031 1.2 riastrad false, false, true), 3032 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid, 3033 1.2 riastrad false, false, true), 3034 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid, 3035 1.2 riastrad false, false, true), 3036 1.2 riastrad 3037 1.3 riastrad /* SM commands */ 3038 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid, 3039 1.2 riastrad false, false, true), 3040 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid, 3041 1.2 riastrad false, false, true), 3042 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid, 3043 1.2 riastrad false, false, true), 3044 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid, 3045 1.2 riastrad false, false, true), 3046 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid, 3047 1.2 riastrad false, false, true), 3048 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER, 3049 1.2 riastrad &vmw_cmd_dx_set_single_constant_buffer, true, false, true), 3050 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES, 3051 1.2 riastrad &vmw_cmd_dx_set_shader_res, true, false, true), 3052 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader, 3053 1.2 riastrad true, false, true), 3054 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check, 3055 1.2 riastrad true, false, true), 3056 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check, 3057 1.2 riastrad true, false, true), 3058 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check, 3059 1.2 riastrad true, false, true), 3060 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check, 3061 1.2 riastrad true, false, true), 3062 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED, 3063 1.2 riastrad &vmw_cmd_dx_cid_check, true, false, true), 3064 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check, 3065 1.2 riastrad true, false, true), 3066 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS, 3067 1.2 riastrad &vmw_cmd_dx_set_vertex_buffers, true, false, true), 3068 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER, 3069 1.2 riastrad &vmw_cmd_dx_set_index_buffer, true, false, true), 3070 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS, 3071 1.2 riastrad &vmw_cmd_dx_set_rendertargets, true, false, true), 3072 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check, 3073 1.2 riastrad true, false, true), 3074 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE, 3075 1.2 riastrad &vmw_cmd_dx_cid_check, true, false, true), 3076 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, 3077 1.2 riastrad &vmw_cmd_dx_cid_check, true, false, true), 3078 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, 3079 1.2 riastrad true, false, true), 3080 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check, 3081 1.2 riastrad true, false, true), 3082 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, 3083 1.2 riastrad true, false, true), 3084 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, 3085 1.2 riastrad &vmw_cmd_dx_cid_check, true, false, true), 3086 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check, 3087 1.2 riastrad true, false, true), 3088 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check, 3089 1.2 riastrad true, false, true), 3090 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, 3091 1.2 riastrad true, false, true), 3092 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check, 3093 1.2 riastrad true, false, true), 3094 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, 3095 1.2 riastrad true, false, true), 3096 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check, 3097 1.2 riastrad true, false, true), 3098 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW, 3099 1.2 riastrad &vmw_cmd_dx_clear_rendertarget_view, true, false, true), 3100 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW, 3101 1.2 riastrad &vmw_cmd_dx_clear_depthstencil_view, true, false, true), 3102 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid, 3103 1.2 riastrad true, false, true), 3104 1.3 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips, 3105 1.2 riastrad true, false, true), 3106 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE, 3107 1.2 riastrad &vmw_cmd_dx_check_subresource, true, false, true), 3108 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE, 3109 1.2 riastrad &vmw_cmd_dx_check_subresource, true, false, true), 3110 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE, 3111 1.2 riastrad &vmw_cmd_dx_check_subresource, true, false, true), 3112 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW, 3113 1.2 riastrad &vmw_cmd_dx_view_define, true, false, true), 3114 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, 3115 1.2 riastrad &vmw_cmd_dx_view_remove, true, false, true), 3116 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW, 3117 1.2 riastrad &vmw_cmd_dx_view_define, true, false, true), 3118 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, 3119 1.2 riastrad &vmw_cmd_dx_view_remove, true, false, true), 3120 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW, 3121 1.2 riastrad &vmw_cmd_dx_view_define, true, false, true), 3122 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, 3123 1.2 riastrad &vmw_cmd_dx_view_remove, true, false, true), 3124 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT, 3125 1.2 riastrad &vmw_cmd_dx_so_define, true, false, true), 3126 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT, 3127 1.2 riastrad &vmw_cmd_dx_cid_check, true, false, true), 3128 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE, 3129 1.2 riastrad &vmw_cmd_dx_so_define, true, false, true), 3130 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE, 3131 1.2 riastrad &vmw_cmd_dx_cid_check, true, false, true), 3132 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE, 3133 1.2 riastrad &vmw_cmd_dx_so_define, true, false, true), 3134 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE, 3135 1.2 riastrad &vmw_cmd_dx_cid_check, true, false, true), 3136 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE, 3137 1.2 riastrad &vmw_cmd_dx_so_define, true, false, true), 3138 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE, 3139 1.2 riastrad &vmw_cmd_dx_cid_check, true, false, true), 3140 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE, 3141 1.2 riastrad &vmw_cmd_dx_so_define, true, false, true), 3142 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE, 3143 1.2 riastrad &vmw_cmd_dx_cid_check, true, false, true), 3144 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER, 3145 1.2 riastrad &vmw_cmd_dx_define_shader, true, false, true), 3146 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER, 3147 1.2 riastrad &vmw_cmd_dx_destroy_shader, true, false, true), 3148 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER, 3149 1.2 riastrad &vmw_cmd_dx_bind_shader, true, false, true), 3150 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT, 3151 1.2 riastrad &vmw_cmd_dx_so_define, true, false, true), 3152 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT, 3153 1.2 riastrad &vmw_cmd_dx_cid_check, true, false, true), 3154 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check, 3155 1.2 riastrad true, false, true), 3156 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS, 3157 1.2 riastrad &vmw_cmd_dx_set_so_targets, true, false, true), 3158 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT, 3159 1.2 riastrad &vmw_cmd_dx_cid_check, true, false, true), 3160 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY, 3161 1.2 riastrad &vmw_cmd_dx_cid_check, true, false, true), 3162 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY, 3163 1.2 riastrad &vmw_cmd_buffer_copy_check, true, false, true), 3164 1.2 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, 3165 1.2 riastrad &vmw_cmd_pred_copy_check, true, false, true), 3166 1.3 riastrad VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, 3167 1.3 riastrad &vmw_cmd_dx_transfer_from_buffer, 3168 1.3 riastrad true, false, true), 3169 1.3 riastrad VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy, 3170 1.3 riastrad true, false, true), 3171 1.1 riastrad }; 3172 1.1 riastrad 3173 1.3 riastrad bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) 3174 1.3 riastrad { 3175 1.3 riastrad u32 cmd_id = ((u32 *) buf)[0]; 3176 1.3 riastrad 3177 1.3 riastrad if (cmd_id >= SVGA_CMD_MAX) { 3178 1.3 riastrad SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 3179 1.3 riastrad const struct vmw_cmd_entry *entry; 3180 1.3 riastrad 3181 1.3 riastrad *size = header->size + sizeof(SVGA3dCmdHeader); 3182 1.3 riastrad cmd_id = header->id; 3183 1.3 riastrad if (cmd_id >= SVGA_3D_CMD_MAX) 3184 1.3 riastrad return false; 3185 1.3 riastrad 3186 1.3 riastrad cmd_id -= SVGA_3D_CMD_BASE; 3187 1.3 riastrad entry = &vmw_cmd_entries[cmd_id]; 3188 1.3 riastrad *cmd = entry->cmd_name; 3189 1.3 riastrad return true; 3190 1.3 riastrad } 3191 1.3 riastrad 3192 1.3 riastrad switch (cmd_id) { 3193 1.3 riastrad case SVGA_CMD_UPDATE: 3194 1.3 riastrad *cmd = "SVGA_CMD_UPDATE"; 3195 1.3 riastrad *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate); 3196 1.3 riastrad break; 3197 1.3 riastrad case SVGA_CMD_DEFINE_GMRFB: 3198 1.3 riastrad *cmd = "SVGA_CMD_DEFINE_GMRFB"; 3199 1.3 riastrad *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB); 3200 1.3 riastrad break; 3201 1.3 riastrad case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: 3202 1.3 riastrad *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN"; 3203 1.3 riastrad *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 3204 1.3 riastrad break; 3205 1.3 riastrad case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: 3206 1.3 riastrad *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB"; 3207 1.3 riastrad *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 3208 1.3 riastrad break; 3209 1.3 riastrad default: 3210 1.3 riastrad *cmd = "UNKNOWN"; 3211 1.3 riastrad *size = 0; 3212 1.3 riastrad return false; 3213 1.3 riastrad } 3214 1.3 riastrad 3215 1.3 riastrad return true; 3216 1.3 riastrad } 3217 1.3 riastrad 3218 1.1 riastrad static int vmw_cmd_check(struct vmw_private *dev_priv, 3219 1.3 riastrad struct vmw_sw_context *sw_context, void *buf, 3220 1.3 riastrad uint32_t *size) 3221 1.1 riastrad { 3222 1.1 riastrad uint32_t cmd_id; 3223 1.1 riastrad uint32_t size_remaining = *size; 3224 1.1 riastrad SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 3225 1.1 riastrad int ret; 3226 1.2 riastrad const struct vmw_cmd_entry *entry; 3227 1.2 riastrad bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; 3228 1.1 riastrad 3229 1.2 riastrad cmd_id = ((uint32_t *)buf)[0]; 3230 1.1 riastrad /* Handle any none 3D commands */ 3231 1.1 riastrad if (unlikely(cmd_id < SVGA_CMD_MAX)) 3232 1.1 riastrad return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); 3233 1.1 riastrad 3234 1.1 riastrad 3235 1.2 riastrad cmd_id = header->id; 3236 1.2 riastrad *size = header->size + sizeof(SVGA3dCmdHeader); 3237 1.1 riastrad 3238 1.1 riastrad cmd_id -= SVGA_3D_CMD_BASE; 3239 1.1 riastrad if (unlikely(*size > size_remaining)) 3240 1.2 riastrad goto out_invalid; 3241 1.1 riastrad 3242 1.1 riastrad if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) 3243 1.2 riastrad goto out_invalid; 3244 1.2 riastrad 3245 1.2 riastrad entry = &vmw_cmd_entries[cmd_id]; 3246 1.2 riastrad if (unlikely(!entry->func)) 3247 1.2 riastrad goto out_invalid; 3248 1.2 riastrad 3249 1.2 riastrad if (unlikely(!entry->user_allow && !sw_context->kernel)) 3250 1.2 riastrad goto out_privileged; 3251 1.2 riastrad 3252 1.2 riastrad if (unlikely(entry->gb_disable && gb)) 3253 1.2 riastrad goto out_old; 3254 1.2 riastrad 3255 1.2 riastrad if (unlikely(entry->gb_enable && !gb)) 3256 1.2 riastrad goto out_new; 3257 1.1 riastrad 3258 1.2 riastrad ret = entry->func(dev_priv, sw_context, header); 3259 1.3 riastrad if (unlikely(ret != 0)) { 3260 1.3 riastrad VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n", 3261 1.3 riastrad cmd_id + SVGA_3D_CMD_BASE, ret); 3262 1.3 riastrad return ret; 3263 1.3 riastrad } 3264 1.1 riastrad 3265 1.1 riastrad return 0; 3266 1.2 riastrad out_invalid: 3267 1.3 riastrad VMW_DEBUG_USER("Invalid SVGA3D command: %d\n", 3268 1.3 riastrad cmd_id + SVGA_3D_CMD_BASE); 3269 1.2 riastrad return -EINVAL; 3270 1.2 riastrad out_privileged: 3271 1.3 riastrad VMW_DEBUG_USER("Privileged SVGA3D command: %d\n", 3272 1.3 riastrad cmd_id + SVGA_3D_CMD_BASE); 3273 1.2 riastrad return -EPERM; 3274 1.2 riastrad out_old: 3275 1.3 riastrad VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n", 3276 1.3 riastrad cmd_id + SVGA_3D_CMD_BASE); 3277 1.2 riastrad return -EINVAL; 3278 1.2 riastrad out_new: 3279 1.3 riastrad VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n", 3280 1.3 riastrad cmd_id + SVGA_3D_CMD_BASE); 3281 1.1 riastrad return -EINVAL; 3282 1.1 riastrad } 3283 1.1 riastrad 3284 1.1 riastrad static int vmw_cmd_check_all(struct vmw_private *dev_priv, 3285 1.3 riastrad struct vmw_sw_context *sw_context, void *buf, 3286 1.1 riastrad uint32_t size) 3287 1.1 riastrad { 3288 1.1 riastrad int32_t cur_size = size; 3289 1.1 riastrad int ret; 3290 1.1 riastrad 3291 1.1 riastrad sw_context->buf_start = buf; 3292 1.1 riastrad 3293 1.1 riastrad while (cur_size > 0) { 3294 1.1 riastrad size = cur_size; 3295 1.1 riastrad ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); 3296 1.1 riastrad if (unlikely(ret != 0)) 3297 1.1 riastrad return ret; 3298 1.1 riastrad buf = (void *)((unsigned long) buf + size); 3299 1.1 riastrad cur_size -= size; 3300 1.1 riastrad } 3301 1.1 riastrad 3302 1.1 riastrad if (unlikely(cur_size != 0)) { 3303 1.3 riastrad VMW_DEBUG_USER("Command verifier out of sync.\n"); 3304 1.1 riastrad return -EINVAL; 3305 1.1 riastrad } 3306 1.1 riastrad 3307 1.1 riastrad return 0; 3308 1.1 riastrad } 3309 1.1 riastrad 3310 1.1 riastrad static void vmw_free_relocations(struct vmw_sw_context *sw_context) 3311 1.1 riastrad { 3312 1.3 riastrad /* Memory is validation context memory, so no need to free it */ 3313 1.3 riastrad INIT_LIST_HEAD(&sw_context->bo_relocations); 3314 1.1 riastrad } 3315 1.1 riastrad 3316 1.1 riastrad static void vmw_apply_relocations(struct vmw_sw_context *sw_context) 3317 1.1 riastrad { 3318 1.1 riastrad struct vmw_relocation *reloc; 3319 1.1 riastrad struct ttm_buffer_object *bo; 3320 1.1 riastrad 3321 1.3 riastrad list_for_each_entry(reloc, &sw_context->bo_relocations, head) { 3322 1.3 riastrad bo = &reloc->vbo->base; 3323 1.1 riastrad switch (bo->mem.mem_type) { 3324 1.1 riastrad case TTM_PL_VRAM: 3325 1.1 riastrad reloc->location->offset += bo->offset; 3326 1.1 riastrad reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; 3327 1.1 riastrad break; 3328 1.1 riastrad case VMW_PL_GMR: 3329 1.1 riastrad reloc->location->gmrId = bo->mem.start; 3330 1.1 riastrad break; 3331 1.2 riastrad case VMW_PL_MOB: 3332 1.2 riastrad *reloc->mob_loc = bo->mem.start; 3333 1.2 riastrad break; 3334 1.1 riastrad default: 3335 1.1 riastrad BUG(); 3336 1.1 riastrad } 3337 1.1 riastrad } 3338 1.1 riastrad vmw_free_relocations(sw_context); 3339 1.1 riastrad } 3340 1.1 riastrad 3341 1.1 riastrad static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, 3342 1.1 riastrad uint32_t size) 3343 1.1 riastrad { 3344 1.1 riastrad if (likely(sw_context->cmd_bounce_size >= size)) 3345 1.1 riastrad return 0; 3346 1.1 riastrad 3347 1.1 riastrad if (sw_context->cmd_bounce_size == 0) 3348 1.1 riastrad sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; 3349 1.1 riastrad 3350 1.1 riastrad while (sw_context->cmd_bounce_size < size) { 3351 1.1 riastrad sw_context->cmd_bounce_size = 3352 1.1 riastrad PAGE_ALIGN(sw_context->cmd_bounce_size + 3353 1.1 riastrad (sw_context->cmd_bounce_size >> 1)); 3354 1.1 riastrad } 3355 1.1 riastrad 3356 1.3 riastrad vfree(sw_context->cmd_bounce); 3357 1.1 riastrad sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); 3358 1.1 riastrad 3359 1.1 riastrad if (sw_context->cmd_bounce == NULL) { 3360 1.3 riastrad VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n"); 3361 1.1 riastrad sw_context->cmd_bounce_size = 0; 3362 1.1 riastrad return -ENOMEM; 3363 1.1 riastrad } 3364 1.1 riastrad 3365 1.1 riastrad return 0; 3366 1.1 riastrad } 3367 1.1 riastrad 3368 1.1 riastrad /** 3369 1.1 riastrad * vmw_execbuf_fence_commands - create and submit a command stream fence 3370 1.1 riastrad * 3371 1.1 riastrad * Creates a fence object and submits a command stream marker. 3372 1.1 riastrad * If this fails for some reason, We sync the fifo and return NULL. 3373 1.1 riastrad * It is then safe to fence buffers with a NULL pointer. 3374 1.1 riastrad * 3375 1.3 riastrad * If @p_handle is not NULL @file_priv must also not be NULL. Creates a 3376 1.3 riastrad * userspace handle if @p_handle is not NULL, otherwise not. 3377 1.1 riastrad */ 3378 1.1 riastrad 3379 1.1 riastrad int vmw_execbuf_fence_commands(struct drm_file *file_priv, 3380 1.1 riastrad struct vmw_private *dev_priv, 3381 1.1 riastrad struct vmw_fence_obj **p_fence, 3382 1.1 riastrad uint32_t *p_handle) 3383 1.1 riastrad { 3384 1.1 riastrad uint32_t sequence; 3385 1.1 riastrad int ret; 3386 1.1 riastrad bool synced = false; 3387 1.1 riastrad 3388 1.1 riastrad /* p_handle implies file_priv. */ 3389 1.1 riastrad BUG_ON(p_handle != NULL && file_priv == NULL); 3390 1.1 riastrad 3391 1.1 riastrad ret = vmw_fifo_send_fence(dev_priv, &sequence); 3392 1.1 riastrad if (unlikely(ret != 0)) { 3393 1.3 riastrad VMW_DEBUG_USER("Fence submission error. Syncing.\n"); 3394 1.1 riastrad synced = true; 3395 1.1 riastrad } 3396 1.1 riastrad 3397 1.1 riastrad if (p_handle != NULL) 3398 1.1 riastrad ret = vmw_user_fence_create(file_priv, dev_priv->fman, 3399 1.2 riastrad sequence, p_fence, p_handle); 3400 1.1 riastrad else 3401 1.2 riastrad ret = vmw_fence_create(dev_priv->fman, sequence, p_fence); 3402 1.1 riastrad 3403 1.1 riastrad if (unlikely(ret != 0 && !synced)) { 3404 1.3 riastrad (void) vmw_fallback_wait(dev_priv, false, false, sequence, 3405 1.3 riastrad false, VMW_FENCE_WAIT_TIMEOUT); 3406 1.1 riastrad *p_fence = NULL; 3407 1.1 riastrad } 3408 1.1 riastrad 3409 1.3 riastrad return ret; 3410 1.1 riastrad } 3411 1.1 riastrad 3412 1.1 riastrad /** 3413 1.3 riastrad * vmw_execbuf_copy_fence_user - copy fence object information to user-space. 3414 1.1 riastrad * 3415 1.1 riastrad * @dev_priv: Pointer to a vmw_private struct. 3416 1.1 riastrad * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. 3417 1.1 riastrad * @ret: Return value from fence object creation. 3418 1.3 riastrad * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which 3419 1.3 riastrad * the information should be copied. 3420 1.1 riastrad * @fence: Pointer to the fenc object. 3421 1.1 riastrad * @fence_handle: User-space fence handle. 3422 1.3 riastrad * @out_fence_fd: exported file descriptor for the fence. -1 if not used 3423 1.3 riastrad * @sync_file: Only used to clean up in case of an error in this function. 3424 1.3 riastrad * 3425 1.3 riastrad * This function copies fence information to user-space. If copying fails, the 3426 1.3 riastrad * user-space struct drm_vmw_fence_rep::error member is hopefully left 3427 1.3 riastrad * untouched, and if it's preloaded with an -EFAULT by user-space, the error 3428 1.3 riastrad * will hopefully be detected. 3429 1.1 riastrad * 3430 1.3 riastrad * Also if copying fails, user-space will be unable to signal the fence object 3431 1.3 riastrad * so we wait for it immediately, and then unreference the user-space reference. 3432 1.1 riastrad */ 3433 1.1 riastrad void 3434 1.1 riastrad vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, 3435 1.3 riastrad struct vmw_fpriv *vmw_fp, int ret, 3436 1.1 riastrad struct drm_vmw_fence_rep __user *user_fence_rep, 3437 1.3 riastrad struct vmw_fence_obj *fence, uint32_t fence_handle, 3438 1.3 riastrad int32_t out_fence_fd, struct sync_file *sync_file) 3439 1.1 riastrad { 3440 1.1 riastrad struct drm_vmw_fence_rep fence_rep; 3441 1.1 riastrad 3442 1.1 riastrad if (user_fence_rep == NULL) 3443 1.1 riastrad return; 3444 1.1 riastrad 3445 1.1 riastrad memset(&fence_rep, 0, sizeof(fence_rep)); 3446 1.1 riastrad 3447 1.1 riastrad fence_rep.error = ret; 3448 1.3 riastrad fence_rep.fd = out_fence_fd; 3449 1.1 riastrad if (ret == 0) { 3450 1.1 riastrad BUG_ON(fence == NULL); 3451 1.1 riastrad 3452 1.1 riastrad fence_rep.handle = fence_handle; 3453 1.2 riastrad fence_rep.seqno = fence->base.seqno; 3454 1.4 riastrad spin_lock(&dev_priv->fence_lock); 3455 1.1 riastrad vmw_update_seqno(dev_priv, &dev_priv->fifo); 3456 1.1 riastrad fence_rep.passed_seqno = dev_priv->last_read_seqno; 3457 1.4 riastrad spin_unlock(&dev_priv->fence_lock); 3458 1.1 riastrad } 3459 1.1 riastrad 3460 1.1 riastrad /* 3461 1.3 riastrad * copy_to_user errors will be detected by user space not seeing 3462 1.3 riastrad * fence_rep::error filled in. Typically user-space would have pre-set 3463 1.3 riastrad * that member to -EFAULT. 3464 1.1 riastrad */ 3465 1.1 riastrad ret = copy_to_user(user_fence_rep, &fence_rep, 3466 1.1 riastrad sizeof(fence_rep)); 3467 1.1 riastrad 3468 1.1 riastrad /* 3469 1.3 riastrad * User-space lost the fence object. We need to sync and unreference the 3470 1.3 riastrad * handle. 3471 1.1 riastrad */ 3472 1.1 riastrad if (unlikely(ret != 0) && (fence_rep.error == 0)) { 3473 1.5 riastrad #ifdef __NetBSD__ 3474 1.5 riastrad if (fd_getfile(fence_rep.fd)) 3475 1.5 riastrad (void)fd_close(fence_rep.fd); 3476 1.5 riastrad #else 3477 1.3 riastrad if (sync_file) 3478 1.3 riastrad fput(sync_file->file); 3479 1.3 riastrad 3480 1.3 riastrad if (fence_rep.fd != -1) { 3481 1.3 riastrad put_unused_fd(fence_rep.fd); 3482 1.3 riastrad fence_rep.fd = -1; 3483 1.3 riastrad } 3484 1.5 riastrad #endif 3485 1.3 riastrad 3486 1.3 riastrad ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, 3487 1.3 riastrad TTM_REF_USAGE); 3488 1.3 riastrad VMW_DEBUG_USER("Fence copy error. Syncing.\n"); 3489 1.2 riastrad (void) vmw_fence_obj_wait(fence, false, false, 3490 1.1 riastrad VMW_FENCE_WAIT_TIMEOUT); 3491 1.1 riastrad } 3492 1.1 riastrad } 3493 1.1 riastrad 3494 1.2 riastrad /** 3495 1.3 riastrad * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo. 3496 1.2 riastrad * 3497 1.2 riastrad * @dev_priv: Pointer to a device private structure. 3498 1.2 riastrad * @kernel_commands: Pointer to the unpatched command batch. 3499 1.2 riastrad * @command_size: Size of the unpatched command batch. 3500 1.2 riastrad * @sw_context: Structure holding the relocation lists. 3501 1.2 riastrad * 3502 1.3 riastrad * Side effects: If this function returns 0, then the command batch pointed to 3503 1.3 riastrad * by @kernel_commands will have been modified. 3504 1.2 riastrad */ 3505 1.2 riastrad static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, 3506 1.3 riastrad void *kernel_commands, u32 command_size, 3507 1.2 riastrad struct vmw_sw_context *sw_context) 3508 1.2 riastrad { 3509 1.2 riastrad void *cmd; 3510 1.2 riastrad 3511 1.2 riastrad if (sw_context->dx_ctx_node) 3512 1.3 riastrad cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size, 3513 1.3 riastrad sw_context->dx_ctx_node->ctx->id); 3514 1.2 riastrad else 3515 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, command_size); 3516 1.3 riastrad 3517 1.3 riastrad if (!cmd) 3518 1.2 riastrad return -ENOMEM; 3519 1.2 riastrad 3520 1.2 riastrad vmw_apply_relocations(sw_context); 3521 1.2 riastrad memcpy(cmd, kernel_commands, command_size); 3522 1.2 riastrad vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); 3523 1.2 riastrad vmw_resource_relocations_free(&sw_context->res_relocations); 3524 1.2 riastrad vmw_fifo_commit(dev_priv, command_size); 3525 1.2 riastrad 3526 1.2 riastrad return 0; 3527 1.2 riastrad } 3528 1.2 riastrad 3529 1.2 riastrad /** 3530 1.3 riastrad * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the 3531 1.3 riastrad * command buffer manager. 3532 1.2 riastrad * 3533 1.2 riastrad * @dev_priv: Pointer to a device private structure. 3534 1.2 riastrad * @header: Opaque handle to the command buffer allocation. 3535 1.2 riastrad * @command_size: Size of the unpatched command batch. 3536 1.2 riastrad * @sw_context: Structure holding the relocation lists. 3537 1.2 riastrad * 3538 1.3 riastrad * Side effects: If this function returns 0, then the command buffer represented 3539 1.3 riastrad * by @header will have been modified. 3540 1.2 riastrad */ 3541 1.2 riastrad static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, 3542 1.2 riastrad struct vmw_cmdbuf_header *header, 3543 1.2 riastrad u32 command_size, 3544 1.2 riastrad struct vmw_sw_context *sw_context) 3545 1.2 riastrad { 3546 1.3 riastrad u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id : 3547 1.2 riastrad SVGA3D_INVALID_ID); 3548 1.3 riastrad void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false, 3549 1.3 riastrad header); 3550 1.2 riastrad 3551 1.2 riastrad vmw_apply_relocations(sw_context); 3552 1.2 riastrad vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); 3553 1.2 riastrad vmw_resource_relocations_free(&sw_context->res_relocations); 3554 1.2 riastrad vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false); 3555 1.2 riastrad 3556 1.2 riastrad return 0; 3557 1.2 riastrad } 3558 1.2 riastrad 3559 1.2 riastrad /** 3560 1.2 riastrad * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for 3561 1.2 riastrad * submission using a command buffer. 3562 1.2 riastrad * 3563 1.2 riastrad * @dev_priv: Pointer to a device private structure. 3564 1.2 riastrad * @user_commands: User-space pointer to the commands to be submitted. 3565 1.2 riastrad * @command_size: Size of the unpatched command batch. 3566 1.2 riastrad * @header: Out parameter returning the opaque pointer to the command buffer. 3567 1.2 riastrad * 3568 1.2 riastrad * This function checks whether we can use the command buffer manager for 3569 1.3 riastrad * submission and if so, creates a command buffer of suitable size and copies 3570 1.3 riastrad * the user data into that buffer. 3571 1.2 riastrad * 3572 1.2 riastrad * On successful return, the function returns a pointer to the data in the 3573 1.2 riastrad * command buffer and *@header is set to non-NULL. 3574 1.3 riastrad * 3575 1.3 riastrad * If command buffers could not be used, the function will return the value of 3576 1.3 riastrad * @kernel_commands on function call. That value may be NULL. In that case, the 3577 1.3 riastrad * value of *@header will be set to NULL. 3578 1.3 riastrad * 3579 1.2 riastrad * If an error is encountered, the function will return a pointer error value. 3580 1.2 riastrad * If the function is interrupted by a signal while sleeping, it will return 3581 1.2 riastrad * -ERESTARTSYS casted to a pointer error value. 3582 1.2 riastrad */ 3583 1.2 riastrad static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, 3584 1.2 riastrad void __user *user_commands, 3585 1.3 riastrad void *kernel_commands, u32 command_size, 3586 1.2 riastrad struct vmw_cmdbuf_header **header) 3587 1.2 riastrad { 3588 1.2 riastrad size_t cmdbuf_size; 3589 1.2 riastrad int ret; 3590 1.2 riastrad 3591 1.2 riastrad *header = NULL; 3592 1.2 riastrad if (command_size > SVGA_CB_MAX_SIZE) { 3593 1.3 riastrad VMW_DEBUG_USER("Command buffer is too large.\n"); 3594 1.2 riastrad return ERR_PTR(-EINVAL); 3595 1.2 riastrad } 3596 1.2 riastrad 3597 1.2 riastrad if (!dev_priv->cman || kernel_commands) 3598 1.2 riastrad return kernel_commands; 3599 1.2 riastrad 3600 1.2 riastrad /* If possible, add a little space for fencing. */ 3601 1.2 riastrad cmdbuf_size = command_size + 512; 3602 1.2 riastrad cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); 3603 1.3 riastrad kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true, 3604 1.3 riastrad header); 3605 1.2 riastrad if (IS_ERR(kernel_commands)) 3606 1.2 riastrad return kernel_commands; 3607 1.2 riastrad 3608 1.3 riastrad ret = copy_from_user(kernel_commands, user_commands, command_size); 3609 1.2 riastrad if (ret) { 3610 1.3 riastrad VMW_DEBUG_USER("Failed copying commands.\n"); 3611 1.2 riastrad vmw_cmdbuf_header_free(*header); 3612 1.2 riastrad *header = NULL; 3613 1.2 riastrad return ERR_PTR(-EFAULT); 3614 1.2 riastrad } 3615 1.2 riastrad 3616 1.2 riastrad return kernel_commands; 3617 1.2 riastrad } 3618 1.2 riastrad 3619 1.2 riastrad static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, 3620 1.2 riastrad struct vmw_sw_context *sw_context, 3621 1.2 riastrad uint32_t handle) 3622 1.2 riastrad { 3623 1.2 riastrad struct vmw_resource *res; 3624 1.2 riastrad int ret; 3625 1.3 riastrad unsigned int size; 3626 1.2 riastrad 3627 1.2 riastrad if (handle == SVGA3D_INVALID_ID) 3628 1.2 riastrad return 0; 3629 1.2 riastrad 3630 1.3 riastrad size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context); 3631 1.3 riastrad ret = vmw_validation_preload_res(sw_context->ctx, size); 3632 1.3 riastrad if (ret) 3633 1.2 riastrad return ret; 3634 1.3 riastrad 3635 1.3 riastrad res = vmw_user_resource_noref_lookup_handle 3636 1.3 riastrad (dev_priv, sw_context->fp->tfile, handle, 3637 1.3 riastrad user_context_converter); 3638 1.3 riastrad if (IS_ERR(res)) { 3639 1.3 riastrad VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n", 3640 1.3 riastrad (unsigned int) handle); 3641 1.3 riastrad return PTR_ERR(res); 3642 1.2 riastrad } 3643 1.2 riastrad 3644 1.3 riastrad ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET); 3645 1.2 riastrad if (unlikely(ret != 0)) 3646 1.3 riastrad return ret; 3647 1.2 riastrad 3648 1.3 riastrad sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res); 3649 1.2 riastrad sw_context->man = vmw_context_res_man(res); 3650 1.3 riastrad 3651 1.3 riastrad return 0; 3652 1.2 riastrad } 3653 1.2 riastrad 3654 1.1 riastrad int vmw_execbuf_process(struct drm_file *file_priv, 3655 1.1 riastrad struct vmw_private *dev_priv, 3656 1.3 riastrad void __user *user_commands, void *kernel_commands, 3657 1.3 riastrad uint32_t command_size, uint64_t throttle_us, 3658 1.2 riastrad uint32_t dx_context_handle, 3659 1.1 riastrad struct drm_vmw_fence_rep __user *user_fence_rep, 3660 1.3 riastrad struct vmw_fence_obj **out_fence, uint32_t flags) 3661 1.1 riastrad { 3662 1.1 riastrad struct vmw_sw_context *sw_context = &dev_priv->ctx; 3663 1.1 riastrad struct vmw_fence_obj *fence = NULL; 3664 1.2 riastrad struct vmw_cmdbuf_header *header; 3665 1.3 riastrad uint32_t handle = 0; 3666 1.1 riastrad int ret; 3667 1.5 riastrad #ifdef __NetBSD__ 3668 1.5 riastrad int out_fence_fd = -1; 3669 1.5 riastrad struct file *out_fence_fp = NULL; 3670 1.5 riastrad #else 3671 1.3 riastrad int32_t out_fence_fd = -1; 3672 1.5 riastrad #endif 3673 1.3 riastrad struct sync_file *sync_file = NULL; 3674 1.3 riastrad DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); 3675 1.3 riastrad 3676 1.3 riastrad vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm); 3677 1.3 riastrad 3678 1.3 riastrad if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { 3679 1.5 riastrad #ifdef __NetBSD__ 3680 1.5 riastrad ret = -fd_allocfile(&out_fence_fp, &out_fence_fd); 3681 1.5 riastrad if (ret) 3682 1.5 riastrad return ret; 3683 1.5 riastrad #else 3684 1.3 riastrad out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 3685 1.3 riastrad if (out_fence_fd < 0) { 3686 1.3 riastrad VMW_DEBUG_USER("Failed to get a fence fd.\n"); 3687 1.3 riastrad return out_fence_fd; 3688 1.3 riastrad } 3689 1.5 riastrad #endif 3690 1.3 riastrad } 3691 1.1 riastrad 3692 1.2 riastrad if (throttle_us) { 3693 1.2 riastrad ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, 3694 1.2 riastrad throttle_us); 3695 1.2 riastrad 3696 1.2 riastrad if (ret) 3697 1.3 riastrad goto out_free_fence_fd; 3698 1.2 riastrad } 3699 1.2 riastrad 3700 1.2 riastrad kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, 3701 1.2 riastrad kernel_commands, command_size, 3702 1.2 riastrad &header); 3703 1.3 riastrad if (IS_ERR(kernel_commands)) { 3704 1.3 riastrad ret = PTR_ERR(kernel_commands); 3705 1.3 riastrad goto out_free_fence_fd; 3706 1.3 riastrad } 3707 1.2 riastrad 3708 1.1 riastrad ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); 3709 1.2 riastrad if (ret) { 3710 1.2 riastrad ret = -ERESTARTSYS; 3711 1.2 riastrad goto out_free_header; 3712 1.2 riastrad } 3713 1.1 riastrad 3714 1.2 riastrad sw_context->kernel = false; 3715 1.1 riastrad if (kernel_commands == NULL) { 3716 1.1 riastrad ret = vmw_resize_cmd_bounce(sw_context, command_size); 3717 1.1 riastrad if (unlikely(ret != 0)) 3718 1.1 riastrad goto out_unlock; 3719 1.1 riastrad 3720 1.3 riastrad ret = copy_from_user(sw_context->cmd_bounce, user_commands, 3721 1.3 riastrad command_size); 3722 1.1 riastrad if (unlikely(ret != 0)) { 3723 1.1 riastrad ret = -EFAULT; 3724 1.3 riastrad VMW_DEBUG_USER("Failed copying commands.\n"); 3725 1.1 riastrad goto out_unlock; 3726 1.1 riastrad } 3727 1.3 riastrad 3728 1.1 riastrad kernel_commands = sw_context->cmd_bounce; 3729 1.3 riastrad } else if (!header) { 3730 1.1 riastrad sw_context->kernel = true; 3731 1.3 riastrad } 3732 1.1 riastrad 3733 1.2 riastrad sw_context->fp = vmw_fpriv(file_priv); 3734 1.3 riastrad INIT_LIST_HEAD(&sw_context->ctx_list); 3735 1.1 riastrad sw_context->cur_query_bo = dev_priv->pinned_bo; 3736 1.1 riastrad sw_context->last_query_ctx = NULL; 3737 1.1 riastrad sw_context->needs_post_query_barrier = false; 3738 1.2 riastrad sw_context->dx_ctx_node = NULL; 3739 1.2 riastrad sw_context->dx_query_mob = NULL; 3740 1.2 riastrad sw_context->dx_query_ctx = NULL; 3741 1.1 riastrad memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); 3742 1.1 riastrad INIT_LIST_HEAD(&sw_context->res_relocations); 3743 1.3 riastrad INIT_LIST_HEAD(&sw_context->bo_relocations); 3744 1.3 riastrad 3745 1.2 riastrad if (sw_context->staged_bindings) 3746 1.2 riastrad vmw_binding_state_reset(sw_context->staged_bindings); 3747 1.2 riastrad 3748 1.1 riastrad if (!sw_context->res_ht_initialized) { 3749 1.1 riastrad ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); 3750 1.1 riastrad if (unlikely(ret != 0)) 3751 1.1 riastrad goto out_unlock; 3752 1.3 riastrad 3753 1.1 riastrad sw_context->res_ht_initialized = true; 3754 1.1 riastrad } 3755 1.3 riastrad 3756 1.2 riastrad INIT_LIST_HEAD(&sw_context->staged_cmd_res); 3757 1.3 riastrad sw_context->ctx = &val_ctx; 3758 1.2 riastrad ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); 3759 1.3 riastrad if (unlikely(ret != 0)) 3760 1.2 riastrad goto out_err_nores; 3761 1.1 riastrad 3762 1.1 riastrad ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 3763 1.1 riastrad command_size); 3764 1.1 riastrad if (unlikely(ret != 0)) 3765 1.2 riastrad goto out_err_nores; 3766 1.1 riastrad 3767 1.1 riastrad ret = vmw_resources_reserve(sw_context); 3768 1.1 riastrad if (unlikely(ret != 0)) 3769 1.2 riastrad goto out_err_nores; 3770 1.1 riastrad 3771 1.3 riastrad ret = vmw_validation_bo_reserve(&val_ctx, true); 3772 1.1 riastrad if (unlikely(ret != 0)) 3773 1.2 riastrad goto out_err_nores; 3774 1.1 riastrad 3775 1.3 riastrad ret = vmw_validation_bo_validate(&val_ctx, true); 3776 1.1 riastrad if (unlikely(ret != 0)) 3777 1.1 riastrad goto out_err; 3778 1.1 riastrad 3779 1.3 riastrad ret = vmw_validation_res_validate(&val_ctx, true); 3780 1.1 riastrad if (unlikely(ret != 0)) 3781 1.1 riastrad goto out_err; 3782 1.1 riastrad 3783 1.3 riastrad vmw_validation_drop_ht(&val_ctx); 3784 1.3 riastrad 3785 1.2 riastrad ret = mutex_lock_interruptible(&dev_priv->binding_mutex); 3786 1.2 riastrad if (unlikely(ret != 0)) { 3787 1.2 riastrad ret = -ERESTARTSYS; 3788 1.2 riastrad goto out_err; 3789 1.2 riastrad } 3790 1.1 riastrad 3791 1.2 riastrad if (dev_priv->has_mob) { 3792 1.2 riastrad ret = vmw_rebind_contexts(sw_context); 3793 1.1 riastrad if (unlikely(ret != 0)) 3794 1.2 riastrad goto out_unlock_binding; 3795 1.1 riastrad } 3796 1.1 riastrad 3797 1.2 riastrad if (!header) { 3798 1.2 riastrad ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands, 3799 1.2 riastrad command_size, sw_context); 3800 1.2 riastrad } else { 3801 1.2 riastrad ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size, 3802 1.2 riastrad sw_context); 3803 1.2 riastrad header = NULL; 3804 1.2 riastrad } 3805 1.2 riastrad mutex_unlock(&dev_priv->binding_mutex); 3806 1.2 riastrad if (ret) 3807 1.1 riastrad goto out_err; 3808 1.1 riastrad 3809 1.1 riastrad vmw_query_bo_switch_commit(dev_priv, sw_context); 3810 1.3 riastrad ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, 3811 1.1 riastrad (user_fence_rep) ? &handle : NULL); 3812 1.1 riastrad /* 3813 1.1 riastrad * This error is harmless, because if fence submission fails, 3814 1.1 riastrad * vmw_fifo_send_fence will sync. The error will be propagated to 3815 1.1 riastrad * user-space in @fence_rep 3816 1.1 riastrad */ 3817 1.1 riastrad if (ret != 0) 3818 1.3 riastrad VMW_DEBUG_USER("Fence submission error. Syncing.\n"); 3819 1.1 riastrad 3820 1.3 riastrad vmw_execbuf_bindings_commit(sw_context, false); 3821 1.3 riastrad vmw_bind_dx_query_mob(sw_context); 3822 1.3 riastrad vmw_validation_res_unreserve(&val_ctx, false); 3823 1.2 riastrad 3824 1.3 riastrad vmw_validation_bo_fence(sw_context->ctx, fence); 3825 1.1 riastrad 3826 1.3 riastrad if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) 3827 1.1 riastrad __vmw_execbuf_release_pinned_bo(dev_priv, fence); 3828 1.1 riastrad 3829 1.3 riastrad /* 3830 1.3 riastrad * If anything fails here, give up trying to export the fence and do a 3831 1.3 riastrad * sync since the user mode will not be able to sync the fence itself. 3832 1.3 riastrad * This ensures we are still functionally correct. 3833 1.3 riastrad */ 3834 1.3 riastrad if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { 3835 1.3 riastrad 3836 1.5 riastrad #ifdef __NetBSD__ 3837 1.5 riastrad sync_file = sync_file_create(&fence->base, out_fence_fp); 3838 1.5 riastrad #else 3839 1.3 riastrad sync_file = sync_file_create(&fence->base); 3840 1.5 riastrad #endif 3841 1.3 riastrad if (!sync_file) { 3842 1.3 riastrad VMW_DEBUG_USER("Sync file create failed for fence\n"); 3843 1.5 riastrad #ifdef __NetBSD__ 3844 1.5 riastrad fd_abort(curproc, out_fence_fp, out_fence_fd); 3845 1.5 riastrad out_fence_fp = NULL; 3846 1.5 riastrad #else 3847 1.3 riastrad put_unused_fd(out_fence_fd); 3848 1.5 riastrad #endif 3849 1.3 riastrad out_fence_fd = -1; 3850 1.3 riastrad 3851 1.3 riastrad (void) vmw_fence_obj_wait(fence, false, false, 3852 1.3 riastrad VMW_FENCE_WAIT_TIMEOUT); 3853 1.3 riastrad } else { 3854 1.3 riastrad /* Link the fence with the FD created earlier */ 3855 1.5 riastrad #ifdef __NetBSD__ 3856 1.5 riastrad fd_affix(curproc, out_fence_fp, out_fence_fd); 3857 1.5 riastrad #else 3858 1.3 riastrad fd_install(out_fence_fd, sync_file->file); 3859 1.5 riastrad #endif 3860 1.3 riastrad } 3861 1.3 riastrad } 3862 1.3 riastrad 3863 1.1 riastrad vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, 3864 1.3 riastrad user_fence_rep, fence, handle, out_fence_fd, 3865 1.3 riastrad sync_file); 3866 1.1 riastrad 3867 1.1 riastrad /* Don't unreference when handing fence out */ 3868 1.1 riastrad if (unlikely(out_fence != NULL)) { 3869 1.1 riastrad *out_fence = fence; 3870 1.1 riastrad fence = NULL; 3871 1.1 riastrad } else if (likely(fence != NULL)) { 3872 1.1 riastrad vmw_fence_obj_unreference(&fence); 3873 1.1 riastrad } 3874 1.1 riastrad 3875 1.2 riastrad vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); 3876 1.1 riastrad mutex_unlock(&dev_priv->cmdbuf_mutex); 3877 1.1 riastrad 3878 1.1 riastrad /* 3879 1.3 riastrad * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks 3880 1.3 riastrad * in resource destruction paths. 3881 1.1 riastrad */ 3882 1.3 riastrad vmw_validation_unref_lists(&val_ctx); 3883 1.1 riastrad 3884 1.1 riastrad return 0; 3885 1.1 riastrad 3886 1.2 riastrad out_unlock_binding: 3887 1.2 riastrad mutex_unlock(&dev_priv->binding_mutex); 3888 1.1 riastrad out_err: 3889 1.3 riastrad vmw_validation_bo_backoff(&val_ctx); 3890 1.2 riastrad out_err_nores: 3891 1.3 riastrad vmw_execbuf_bindings_commit(sw_context, true); 3892 1.3 riastrad vmw_validation_res_unreserve(&val_ctx, true); 3893 1.1 riastrad vmw_resource_relocations_free(&sw_context->res_relocations); 3894 1.1 riastrad vmw_free_relocations(sw_context); 3895 1.3 riastrad if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) 3896 1.1 riastrad __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 3897 1.1 riastrad out_unlock: 3898 1.2 riastrad vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); 3899 1.3 riastrad vmw_validation_drop_ht(&val_ctx); 3900 1.3 riastrad WARN_ON(!list_empty(&sw_context->ctx_list)); 3901 1.1 riastrad mutex_unlock(&dev_priv->cmdbuf_mutex); 3902 1.1 riastrad 3903 1.1 riastrad /* 3904 1.3 riastrad * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks 3905 1.3 riastrad * in resource destruction paths. 3906 1.1 riastrad */ 3907 1.3 riastrad vmw_validation_unref_lists(&val_ctx); 3908 1.2 riastrad out_free_header: 3909 1.2 riastrad if (header) 3910 1.2 riastrad vmw_cmdbuf_header_free(header); 3911 1.3 riastrad out_free_fence_fd: 3912 1.3 riastrad if (out_fence_fd >= 0) 3913 1.5 riastrad #ifdef __NetBSD__ 3914 1.5 riastrad fd_abort(curproc, out_fence_fp, out_fence_fd); 3915 1.5 riastrad #else 3916 1.3 riastrad put_unused_fd(out_fence_fd); 3917 1.5 riastrad #endif 3918 1.1 riastrad 3919 1.1 riastrad return ret; 3920 1.1 riastrad } 3921 1.1 riastrad 3922 1.1 riastrad /** 3923 1.1 riastrad * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. 3924 1.1 riastrad * 3925 1.1 riastrad * @dev_priv: The device private structure. 3926 1.1 riastrad * 3927 1.3 riastrad * This function is called to idle the fifo and unpin the query buffer if the 3928 1.3 riastrad * normal way to do this hits an error, which should typically be extremely 3929 1.3 riastrad * rare. 3930 1.1 riastrad */ 3931 1.1 riastrad static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) 3932 1.1 riastrad { 3933 1.3 riastrad VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n"); 3934 1.1 riastrad 3935 1.1 riastrad (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); 3936 1.2 riastrad vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 3937 1.2 riastrad if (dev_priv->dummy_query_bo_pinned) { 3938 1.2 riastrad vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); 3939 1.2 riastrad dev_priv->dummy_query_bo_pinned = false; 3940 1.2 riastrad } 3941 1.1 riastrad } 3942 1.1 riastrad 3943 1.1 riastrad 3944 1.1 riastrad /** 3945 1.3 riastrad * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query 3946 1.3 riastrad * bo. 3947 1.1 riastrad * 3948 1.1 riastrad * @dev_priv: The device private structure. 3949 1.3 riastrad * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a 3950 1.3 riastrad * query barrier that flushes all queries touching the current buffer pointed to 3951 1.3 riastrad * by @dev_priv->pinned_bo 3952 1.3 riastrad * 3953 1.3 riastrad * This function should be used to unpin the pinned query bo, or as a query 3954 1.3 riastrad * barrier when we need to make sure that all queries have finished before the 3955 1.3 riastrad * next fifo command. (For example on hardware context destructions where the 3956 1.3 riastrad * hardware may otherwise leak unfinished queries). 3957 1.1 riastrad * 3958 1.3 riastrad * This function does not return any failure codes, but make attempts to do safe 3959 1.3 riastrad * unpinning in case of errors. 3960 1.1 riastrad * 3961 1.3 riastrad * The function will synchronize on the previous query barrier, and will thus 3962 1.3 riastrad * not finish until that barrier has executed. 3963 1.1 riastrad * 3964 1.3 riastrad * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before 3965 1.3 riastrad * calling this function. 3966 1.1 riastrad */ 3967 1.1 riastrad void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 3968 1.1 riastrad struct vmw_fence_obj *fence) 3969 1.1 riastrad { 3970 1.1 riastrad int ret = 0; 3971 1.1 riastrad struct vmw_fence_obj *lfence = NULL; 3972 1.3 riastrad DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 3973 1.1 riastrad 3974 1.1 riastrad if (dev_priv->pinned_bo == NULL) 3975 1.1 riastrad goto out_unlock; 3976 1.1 riastrad 3977 1.3 riastrad ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false, 3978 1.3 riastrad false); 3979 1.3 riastrad if (ret) 3980 1.3 riastrad goto out_no_reserve; 3981 1.1 riastrad 3982 1.3 riastrad ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false, 3983 1.3 riastrad false); 3984 1.3 riastrad if (ret) 3985 1.3 riastrad goto out_no_reserve; 3986 1.1 riastrad 3987 1.3 riastrad ret = vmw_validation_bo_reserve(&val_ctx, false); 3988 1.3 riastrad if (ret) 3989 1.1 riastrad goto out_no_reserve; 3990 1.1 riastrad 3991 1.1 riastrad if (dev_priv->query_cid_valid) { 3992 1.1 riastrad BUG_ON(fence != NULL); 3993 1.1 riastrad ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); 3994 1.3 riastrad if (ret) 3995 1.1 riastrad goto out_no_emit; 3996 1.1 riastrad dev_priv->query_cid_valid = false; 3997 1.1 riastrad } 3998 1.1 riastrad 3999 1.2 riastrad vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 4000 1.2 riastrad if (dev_priv->dummy_query_bo_pinned) { 4001 1.2 riastrad vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); 4002 1.2 riastrad dev_priv->dummy_query_bo_pinned = false; 4003 1.2 riastrad } 4004 1.1 riastrad if (fence == NULL) { 4005 1.1 riastrad (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, 4006 1.1 riastrad NULL); 4007 1.1 riastrad fence = lfence; 4008 1.1 riastrad } 4009 1.3 riastrad vmw_validation_bo_fence(&val_ctx, fence); 4010 1.1 riastrad if (lfence != NULL) 4011 1.1 riastrad vmw_fence_obj_unreference(&lfence); 4012 1.1 riastrad 4013 1.3 riastrad vmw_validation_unref_lists(&val_ctx); 4014 1.3 riastrad vmw_bo_unreference(&dev_priv->pinned_bo); 4015 1.1 riastrad 4016 1.1 riastrad out_unlock: 4017 1.1 riastrad return; 4018 1.1 riastrad out_no_emit: 4019 1.3 riastrad vmw_validation_bo_backoff(&val_ctx); 4020 1.1 riastrad out_no_reserve: 4021 1.3 riastrad vmw_validation_unref_lists(&val_ctx); 4022 1.3 riastrad vmw_execbuf_unpin_panic(dev_priv); 4023 1.3 riastrad vmw_bo_unreference(&dev_priv->pinned_bo); 4024 1.1 riastrad } 4025 1.1 riastrad 4026 1.1 riastrad /** 4027 1.3 riastrad * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo. 4028 1.1 riastrad * 4029 1.1 riastrad * @dev_priv: The device private structure. 4030 1.1 riastrad * 4031 1.3 riastrad * This function should be used to unpin the pinned query bo, or as a query 4032 1.3 riastrad * barrier when we need to make sure that all queries have finished before the 4033 1.3 riastrad * next fifo command. (For example on hardware context destructions where the 4034 1.3 riastrad * hardware may otherwise leak unfinished queries). 4035 1.1 riastrad * 4036 1.3 riastrad * This function does not return any failure codes, but make attempts to do safe 4037 1.3 riastrad * unpinning in case of errors. 4038 1.1 riastrad * 4039 1.3 riastrad * The function will synchronize on the previous query barrier, and will thus 4040 1.3 riastrad * not finish until that barrier has executed. 4041 1.1 riastrad */ 4042 1.1 riastrad void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) 4043 1.1 riastrad { 4044 1.1 riastrad mutex_lock(&dev_priv->cmdbuf_mutex); 4045 1.1 riastrad if (dev_priv->query_cid_valid) 4046 1.1 riastrad __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 4047 1.1 riastrad mutex_unlock(&dev_priv->cmdbuf_mutex); 4048 1.1 riastrad } 4049 1.1 riastrad 4050 1.3 riastrad int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 4051 1.3 riastrad struct drm_file *file_priv) 4052 1.1 riastrad { 4053 1.1 riastrad struct vmw_private *dev_priv = vmw_priv(dev); 4054 1.3 riastrad struct drm_vmw_execbuf_arg *arg = data; 4055 1.1 riastrad int ret; 4056 1.3 riastrad struct dma_fence *in_fence = NULL; 4057 1.1 riastrad 4058 1.1 riastrad /* 4059 1.3 riastrad * Extend the ioctl argument while maintaining backwards compatibility: 4060 1.3 riastrad * We take different code paths depending on the value of arg->version. 4061 1.3 riastrad * 4062 1.3 riastrad * Note: The ioctl argument is extended and zeropadded by core DRM. 4063 1.1 riastrad */ 4064 1.3 riastrad if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION || 4065 1.3 riastrad arg->version == 0)) { 4066 1.3 riastrad VMW_DEBUG_USER("Incorrect execbuf version.\n"); 4067 1.1 riastrad return -EINVAL; 4068 1.1 riastrad } 4069 1.1 riastrad 4070 1.3 riastrad switch (arg->version) { 4071 1.2 riastrad case 1: 4072 1.3 riastrad /* For v1 core DRM have extended + zeropadded the data */ 4073 1.3 riastrad arg->context_handle = (uint32_t) -1; 4074 1.2 riastrad break; 4075 1.2 riastrad case 2: 4076 1.3 riastrad default: 4077 1.3 riastrad /* For v2 and later core DRM would have correctly copied it */ 4078 1.3 riastrad break; 4079 1.3 riastrad } 4080 1.3 riastrad 4081 1.3 riastrad /* If imported a fence FD from elsewhere, then wait on it */ 4082 1.3 riastrad if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) { 4083 1.3 riastrad in_fence = sync_file_get_fence(arg->imported_fence_fd); 4084 1.3 riastrad 4085 1.3 riastrad if (!in_fence) { 4086 1.3 riastrad VMW_DEBUG_USER("Cannot get imported fence\n"); 4087 1.2 riastrad return -EINVAL; 4088 1.2 riastrad } 4089 1.3 riastrad 4090 1.3 riastrad ret = vmw_wait_dma_fence(dev_priv->fman, in_fence); 4091 1.3 riastrad if (ret) 4092 1.3 riastrad goto out; 4093 1.2 riastrad } 4094 1.2 riastrad 4095 1.2 riastrad ret = ttm_read_lock(&dev_priv->reservation_sem, true); 4096 1.1 riastrad if (unlikely(ret != 0)) 4097 1.1 riastrad return ret; 4098 1.1 riastrad 4099 1.1 riastrad ret = vmw_execbuf_process(file_priv, dev_priv, 4100 1.3 riastrad (void __user *)(unsigned long)arg->commands, 4101 1.3 riastrad NULL, arg->command_size, arg->throttle_us, 4102 1.3 riastrad arg->context_handle, 4103 1.3 riastrad (void __user *)(unsigned long)arg->fence_rep, 4104 1.3 riastrad NULL, arg->flags); 4105 1.3 riastrad 4106 1.2 riastrad ttm_read_unlock(&dev_priv->reservation_sem); 4107 1.1 riastrad if (unlikely(ret != 0)) 4108 1.3 riastrad goto out; 4109 1.1 riastrad 4110 1.1 riastrad vmw_kms_cursor_post_execbuf(dev_priv); 4111 1.1 riastrad 4112 1.3 riastrad out: 4113 1.3 riastrad if (in_fence) 4114 1.3 riastrad dma_fence_put(in_fence); 4115 1.3 riastrad return ret; 4116 1.1 riastrad } 4117