vmwgfx_execbuf.c revision 1.3 1 /* $NetBSD: vmwgfx_execbuf.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $ */
2
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 /**************************************************************************
5 *
6 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_execbuf.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $");
31
32 #include <linux/sync_file.h>
33
34 #include "vmwgfx_drv.h"
35 #include "vmwgfx_reg.h"
36 #include <drm/ttm/ttm_bo_api.h>
37 #include <drm/ttm/ttm_placement.h>
38 #include "vmwgfx_so.h"
39 #include "vmwgfx_binding.h"
40
41 #define VMW_RES_HT_ORDER 12
42
43 /*
44 * Helper macro to get dx_ctx_node if available otherwise print an error
45 * message. This is for use in command verifier function where if dx_ctx_node
46 * is not set then command is invalid.
47 */
48 #define VMW_GET_CTX_NODE(__sw_context) \
49 ({ \
50 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
51 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
52 __sw_context->dx_ctx_node; \
53 }); \
54 })
55
56 #define VMW_DECLARE_CMD_VAR(__var, __type) \
57 struct { \
58 SVGA3dCmdHeader header; \
59 __type body; \
60 } __var
61
62 /**
63 * struct vmw_relocation - Buffer object relocation
64 *
65 * @head: List head for the command submission context's relocation list
66 * @vbo: Non ref-counted pointer to buffer object
67 * @mob_loc: Pointer to location for mob id to be modified
68 * @location: Pointer to location for guest pointer to be modified
69 */
70 struct vmw_relocation {
71 struct list_head head;
72 struct vmw_buffer_object *vbo;
73 union {
74 SVGAMobId *mob_loc;
75 SVGAGuestPtr *location;
76 };
77 };
78
79 /**
80 * enum vmw_resource_relocation_type - Relocation type for resources
81 *
82 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
83 * command stream is replaced with the actual id after validation.
84 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
85 * with a NOP.
86 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
87 * validation is -1, the command is replaced with a NOP. Otherwise no action.
88 */
89 enum vmw_resource_relocation_type {
90 vmw_res_rel_normal,
91 vmw_res_rel_nop,
92 vmw_res_rel_cond_nop,
93 vmw_res_rel_max
94 };
95
96 /**
97 * struct vmw_resource_relocation - Relocation info for resources
98 *
99 * @head: List head for the software context's relocation list.
100 * @res: Non-ref-counted pointer to the resource.
101 * @offset: Offset of single byte entries into the command buffer where the id
102 * that needs fixup is located.
103 * @rel_type: Type of relocation.
104 */
105 struct vmw_resource_relocation {
106 struct list_head head;
107 const struct vmw_resource *res;
108 u32 offset:29;
109 enum vmw_resource_relocation_type rel_type:3;
110 };
111
112 /**
113 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
114 *
115 * @head: List head of context list
116 * @ctx: The context resource
117 * @cur: The context's persistent binding state
118 * @staged: The binding state changes of this command buffer
119 */
120 struct vmw_ctx_validation_info {
121 struct list_head head;
122 struct vmw_resource *ctx;
123 struct vmw_ctx_binding_state *cur;
124 struct vmw_ctx_binding_state *staged;
125 };
126
127 /**
128 * struct vmw_cmd_entry - Describe a command for the verifier
129 *
130 * @user_allow: Whether allowed from the execbuf ioctl.
131 * @gb_disable: Whether disabled if guest-backed objects are available.
132 * @gb_enable: Whether enabled iff guest-backed objects are available.
133 */
134 struct vmw_cmd_entry {
135 int (*func) (struct vmw_private *, struct vmw_sw_context *,
136 SVGA3dCmdHeader *);
137 bool user_allow;
138 bool gb_disable;
139 bool gb_enable;
140 const char *cmd_name;
141 };
142
143 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
144 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
145 (_gb_disable), (_gb_enable), #_cmd}
146
147 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
148 struct vmw_sw_context *sw_context,
149 struct vmw_resource *ctx);
150 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
151 struct vmw_sw_context *sw_context,
152 SVGAMobId *id,
153 struct vmw_buffer_object **vmw_bo_p);
154 /**
155 * vmw_ptr_diff - Compute the offset from a to b in bytes
156 *
157 * @a: A starting pointer.
158 * @b: A pointer offset in the same address space.
159 *
160 * Returns: The offset in bytes between the two pointers.
161 */
162 static size_t vmw_ptr_diff(void *a, void *b)
163 {
164 return (unsigned long) b - (unsigned long) a;
165 }
166
167 /**
168 * vmw_execbuf_bindings_commit - Commit modified binding state
169 *
170 * @sw_context: The command submission context
171 * @backoff: Whether this is part of the error path and binding state changes
172 * should be ignored
173 */
174 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
175 bool backoff)
176 {
177 struct vmw_ctx_validation_info *entry;
178
179 list_for_each_entry(entry, &sw_context->ctx_list, head) {
180 if (!backoff)
181 vmw_binding_state_commit(entry->cur, entry->staged);
182
183 if (entry->staged != sw_context->staged_bindings)
184 vmw_binding_state_free(entry->staged);
185 else
186 sw_context->staged_bindings_inuse = false;
187 }
188
189 /* List entries are freed with the validation context */
190 INIT_LIST_HEAD(&sw_context->ctx_list);
191 }
192
193 /**
194 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
195 *
196 * @sw_context: The command submission context
197 */
198 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
199 {
200 if (sw_context->dx_query_mob)
201 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
202 sw_context->dx_query_mob);
203 }
204
205 /**
206 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
207 * the validate list.
208 *
209 * @dev_priv: Pointer to the device private:
210 * @sw_context: The command submission context
211 * @node: The validation node holding the context resource metadata
212 */
213 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
214 struct vmw_sw_context *sw_context,
215 struct vmw_resource *res,
216 struct vmw_ctx_validation_info *node)
217 {
218 int ret;
219
220 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
221 if (unlikely(ret != 0))
222 goto out_err;
223
224 if (!sw_context->staged_bindings) {
225 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
226 if (IS_ERR(sw_context->staged_bindings)) {
227 ret = PTR_ERR(sw_context->staged_bindings);
228 sw_context->staged_bindings = NULL;
229 goto out_err;
230 }
231 }
232
233 if (sw_context->staged_bindings_inuse) {
234 node->staged = vmw_binding_state_alloc(dev_priv);
235 if (IS_ERR(node->staged)) {
236 ret = PTR_ERR(node->staged);
237 node->staged = NULL;
238 goto out_err;
239 }
240 } else {
241 node->staged = sw_context->staged_bindings;
242 sw_context->staged_bindings_inuse = true;
243 }
244
245 node->ctx = res;
246 node->cur = vmw_context_binding_state(res);
247 list_add_tail(&node->head, &sw_context->ctx_list);
248
249 return 0;
250
251 out_err:
252 return ret;
253 }
254
255 /**
256 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
257 *
258 * @dev_priv: Pointer to the device private struct.
259 * @res_type: The resource type.
260 *
261 * Guest-backed contexts and DX contexts require extra size to store execbuf
262 * private information in the validation node. Typically the binding manager
263 * associated data structures.
264 *
265 * Returns: The extra size requirement based on resource type.
266 */
267 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
268 enum vmw_res_type res_type)
269 {
270 return (res_type == vmw_res_dx_context ||
271 (res_type == vmw_res_context && dev_priv->has_mob)) ?
272 sizeof(struct vmw_ctx_validation_info) : 0;
273 }
274
275 /**
276 * vmw_execbuf_rcache_update - Update a resource-node cache entry
277 *
278 * @rcache: Pointer to the entry to update.
279 * @res: Pointer to the resource.
280 * @private: Pointer to the execbuf-private space in the resource validation
281 * node.
282 */
283 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
284 struct vmw_resource *res,
285 void *private)
286 {
287 rcache->res = res;
288 rcache->private = private;
289 rcache->valid = 1;
290 rcache->valid_handle = 0;
291 }
292
293 /**
294 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
295 * rcu-protected pointer to the validation list.
296 *
297 * @sw_context: Pointer to the software context.
298 * @res: Unreferenced rcu-protected pointer to the resource.
299 * @dirty: Whether to change dirty status.
300 *
301 * Returns: 0 on success. Negative error code on failure. Typical error codes
302 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
303 */
304 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
305 struct vmw_resource *res,
306 u32 dirty)
307 {
308 struct vmw_private *dev_priv = res->dev_priv;
309 int ret;
310 enum vmw_res_type res_type = vmw_res_type(res);
311 struct vmw_res_cache_entry *rcache;
312 struct vmw_ctx_validation_info *ctx_info;
313 bool first_usage;
314 unsigned int priv_size;
315
316 rcache = &sw_context->res_cache[res_type];
317 if (likely(rcache->valid && rcache->res == res)) {
318 if (dirty)
319 vmw_validation_res_set_dirty(sw_context->ctx,
320 rcache->private, dirty);
321 vmw_user_resource_noref_release();
322 return 0;
323 }
324
325 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
326 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
327 dirty, (void **)&ctx_info,
328 &first_usage);
329 vmw_user_resource_noref_release();
330 if (ret)
331 return ret;
332
333 if (priv_size && first_usage) {
334 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
335 ctx_info);
336 if (ret) {
337 VMW_DEBUG_USER("Failed first usage context setup.\n");
338 return ret;
339 }
340 }
341
342 vmw_execbuf_rcache_update(rcache, res, ctx_info);
343 return 0;
344 }
345
346 /**
347 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
348 * validation list if it's not already on it
349 *
350 * @sw_context: Pointer to the software context.
351 * @res: Pointer to the resource.
352 * @dirty: Whether to change dirty status.
353 *
354 * Returns: Zero on success. Negative error code on failure.
355 */
356 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
357 struct vmw_resource *res,
358 u32 dirty)
359 {
360 struct vmw_res_cache_entry *rcache;
361 enum vmw_res_type res_type = vmw_res_type(res);
362 void *ptr;
363 int ret;
364
365 rcache = &sw_context->res_cache[res_type];
366 if (likely(rcache->valid && rcache->res == res)) {
367 if (dirty)
368 vmw_validation_res_set_dirty(sw_context->ctx,
369 rcache->private, dirty);
370 return 0;
371 }
372
373 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
374 &ptr, NULL);
375 if (ret)
376 return ret;
377
378 vmw_execbuf_rcache_update(rcache, res, ptr);
379
380 return 0;
381 }
382
383 /**
384 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
385 * validation list
386 *
387 * @sw_context: The software context holding the validation list.
388 * @view: Pointer to the view resource.
389 *
390 * Returns 0 if success, negative error code otherwise.
391 */
392 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
393 struct vmw_resource *view)
394 {
395 int ret;
396
397 /*
398 * First add the resource the view is pointing to, otherwise it may be
399 * swapped out when the view is validated.
400 */
401 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
402 vmw_view_dirtying(view));
403 if (ret)
404 return ret;
405
406 return vmw_execbuf_res_noctx_val_add(sw_context, view,
407 VMW_RES_DIRTY_NONE);
408 }
409
410 /**
411 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
412 * to to the validation list.
413 *
414 * @sw_context: The software context holding the validation list.
415 * @view_type: The view type to look up.
416 * @id: view id of the view.
417 *
418 * The view is represented by a view id and the DX context it's created on, or
419 * scheduled for creation on. If there is no DX context set, the function will
420 * return an -EINVAL error pointer.
421 *
422 * Returns: Unreferenced pointer to the resource on success, negative error
423 * pointer on failure.
424 */
425 static struct vmw_resource *
426 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
427 enum vmw_view_type view_type, u32 id)
428 {
429 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
430 struct vmw_resource *view;
431 int ret;
432
433 if (!ctx_node)
434 return ERR_PTR(-EINVAL);
435
436 view = vmw_view_lookup(sw_context->man, view_type, id);
437 if (IS_ERR(view))
438 return view;
439
440 ret = vmw_view_res_val_add(sw_context, view);
441 if (ret)
442 return ERR_PTR(ret);
443
444 return view;
445 }
446
447 /**
448 * vmw_resource_context_res_add - Put resources previously bound to a context on
449 * the validation list
450 *
451 * @dev_priv: Pointer to a device private structure
452 * @sw_context: Pointer to a software context used for this command submission
453 * @ctx: Pointer to the context resource
454 *
455 * This function puts all resources that were previously bound to @ctx on the
456 * resource validation list. This is part of the context state reemission
457 */
458 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
459 struct vmw_sw_context *sw_context,
460 struct vmw_resource *ctx)
461 {
462 struct list_head *binding_list;
463 struct vmw_ctx_bindinfo *entry;
464 int ret = 0;
465 struct vmw_resource *res;
466 u32 i;
467
468 /* Add all cotables to the validation list. */
469 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
470 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
471 res = vmw_context_cotable(ctx, i);
472 if (IS_ERR(res))
473 continue;
474
475 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
476 VMW_RES_DIRTY_SET);
477 if (unlikely(ret != 0))
478 return ret;
479 }
480 }
481
482 /* Add all resources bound to the context to the validation list */
483 mutex_lock(&dev_priv->binding_mutex);
484 binding_list = vmw_context_binding_list(ctx);
485
486 list_for_each_entry(entry, binding_list, ctx_list) {
487 if (vmw_res_type(entry->res) == vmw_res_view)
488 ret = vmw_view_res_val_add(sw_context, entry->res);
489 else
490 ret = vmw_execbuf_res_noctx_val_add
491 (sw_context, entry->res,
492 vmw_binding_dirtying(entry->bt));
493 if (unlikely(ret != 0))
494 break;
495 }
496
497 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
498 struct vmw_buffer_object *dx_query_mob;
499
500 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
501 if (dx_query_mob)
502 ret = vmw_validation_add_bo(sw_context->ctx,
503 dx_query_mob, true, false);
504 }
505
506 mutex_unlock(&dev_priv->binding_mutex);
507 return ret;
508 }
509
510 /**
511 * vmw_resource_relocation_add - Add a relocation to the relocation list
512 *
513 * @list: Pointer to head of relocation list.
514 * @res: The resource.
515 * @offset: Offset into the command buffer currently being parsed where the id
516 * that needs fixup is located. Granularity is one byte.
517 * @rel_type: Relocation type.
518 */
519 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
520 const struct vmw_resource *res,
521 unsigned long offset,
522 enum vmw_resource_relocation_type
523 rel_type)
524 {
525 struct vmw_resource_relocation *rel;
526
527 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
528 if (unlikely(!rel)) {
529 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
530 return -ENOMEM;
531 }
532
533 rel->res = res;
534 rel->offset = offset;
535 rel->rel_type = rel_type;
536 list_add_tail(&rel->head, &sw_context->res_relocations);
537
538 return 0;
539 }
540
541 /**
542 * vmw_resource_relocations_free - Free all relocations on a list
543 *
544 * @list: Pointer to the head of the relocation list
545 */
546 static void vmw_resource_relocations_free(struct list_head *list)
547 {
548 /* Memory is validation context memory, so no need to free it */
549 INIT_LIST_HEAD(list);
550 }
551
552 /**
553 * vmw_resource_relocations_apply - Apply all relocations on a list
554 *
555 * @cb: Pointer to the start of the command buffer bein patch. This need not be
556 * the same buffer as the one being parsed when the relocation list was built,
557 * but the contents must be the same modulo the resource ids.
558 * @list: Pointer to the head of the relocation list.
559 */
560 static void vmw_resource_relocations_apply(uint32_t *cb,
561 struct list_head *list)
562 {
563 struct vmw_resource_relocation *rel;
564
565 /* Validate the struct vmw_resource_relocation member size */
566 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
567 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
568
569 list_for_each_entry(rel, list, head) {
570 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
571 switch (rel->rel_type) {
572 case vmw_res_rel_normal:
573 *addr = rel->res->id;
574 break;
575 case vmw_res_rel_nop:
576 *addr = SVGA_3D_CMD_NOP;
577 break;
578 default:
579 if (rel->res->id == -1)
580 *addr = SVGA_3D_CMD_NOP;
581 break;
582 }
583 }
584 }
585
586 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
587 struct vmw_sw_context *sw_context,
588 SVGA3dCmdHeader *header)
589 {
590 return -EINVAL;
591 }
592
593 static int vmw_cmd_ok(struct vmw_private *dev_priv,
594 struct vmw_sw_context *sw_context,
595 SVGA3dCmdHeader *header)
596 {
597 return 0;
598 }
599
600 /**
601 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
602 * list.
603 *
604 * @sw_context: Pointer to the software context.
605 *
606 * Note that since vmware's command submission currently is protected by the
607 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
608 * only a single thread at once will attempt this.
609 */
610 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
611 {
612 int ret;
613
614 ret = vmw_validation_res_reserve(sw_context->ctx, true);
615 if (ret)
616 return ret;
617
618 if (sw_context->dx_query_mob) {
619 struct vmw_buffer_object *expected_dx_query_mob;
620
621 expected_dx_query_mob =
622 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
623 if (expected_dx_query_mob &&
624 expected_dx_query_mob != sw_context->dx_query_mob) {
625 ret = -EINVAL;
626 }
627 }
628
629 return ret;
630 }
631
632 /**
633 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
634 * resource validate list unless it's already there.
635 *
636 * @dev_priv: Pointer to a device private structure.
637 * @sw_context: Pointer to the software context.
638 * @res_type: Resource type.
639 * @dirty: Whether to change dirty status.
640 * @converter: User-space visisble type specific information.
641 * @id_loc: Pointer to the location in the command buffer currently being parsed
642 * from where the user-space resource id handle is located.
643 * @p_val: Pointer to pointer to resource validalidation node. Populated on
644 * exit.
645 */
646 static int
647 vmw_cmd_res_check(struct vmw_private *dev_priv,
648 struct vmw_sw_context *sw_context,
649 enum vmw_res_type res_type,
650 u32 dirty,
651 const struct vmw_user_resource_conv *converter,
652 uint32_t *id_loc,
653 struct vmw_resource **p_res)
654 {
655 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
656 struct vmw_resource *res;
657 int ret;
658
659 if (p_res)
660 *p_res = NULL;
661
662 if (*id_loc == SVGA3D_INVALID_ID) {
663 if (res_type == vmw_res_context) {
664 VMW_DEBUG_USER("Illegal context invalid id.\n");
665 return -EINVAL;
666 }
667 return 0;
668 }
669
670 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
671 res = rcache->res;
672 if (dirty)
673 vmw_validation_res_set_dirty(sw_context->ctx,
674 rcache->private, dirty);
675 } else {
676 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
677
678 ret = vmw_validation_preload_res(sw_context->ctx, size);
679 if (ret)
680 return ret;
681
682 res = vmw_user_resource_noref_lookup_handle
683 (dev_priv, sw_context->fp->tfile, *id_loc, converter);
684 if (IS_ERR(res)) {
685 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
686 (unsigned int) *id_loc);
687 return PTR_ERR(res);
688 }
689
690 ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
691 if (unlikely(ret != 0))
692 return ret;
693
694 if (rcache->valid && rcache->res == res) {
695 rcache->valid_handle = true;
696 rcache->handle = *id_loc;
697 }
698 }
699
700 ret = vmw_resource_relocation_add(sw_context, res,
701 vmw_ptr_diff(sw_context->buf_start,
702 id_loc),
703 vmw_res_rel_normal);
704 if (p_res)
705 *p_res = res;
706
707 return 0;
708 }
709
710 /**
711 * vmw_rebind_dx_query - Rebind DX query associated with the context
712 *
713 * @ctx_res: context the query belongs to
714 *
715 * This function assumes binding_mutex is held.
716 */
717 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
718 {
719 struct vmw_private *dev_priv = ctx_res->dev_priv;
720 struct vmw_buffer_object *dx_query_mob;
721 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
722
723 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
724
725 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
726 return 0;
727
728 cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
729 if (cmd == NULL)
730 return -ENOMEM;
731
732 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
733 cmd->header.size = sizeof(cmd->body);
734 cmd->body.cid = ctx_res->id;
735 cmd->body.mobid = dx_query_mob->base.mem.start;
736 vmw_fifo_commit(dev_priv, sizeof(*cmd));
737
738 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
739
740 return 0;
741 }
742
743 /**
744 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
745 * contexts.
746 *
747 * @sw_context: Pointer to the software context.
748 *
749 * Rebind context binding points that have been scrubbed because of eviction.
750 */
751 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
752 {
753 struct vmw_ctx_validation_info *val;
754 int ret;
755
756 list_for_each_entry(val, &sw_context->ctx_list, head) {
757 ret = vmw_binding_rebind_all(val->cur);
758 if (unlikely(ret != 0)) {
759 if (ret != -ERESTARTSYS)
760 VMW_DEBUG_USER("Failed to rebind context.\n");
761 return ret;
762 }
763
764 ret = vmw_rebind_all_dx_query(val->ctx);
765 if (ret != 0) {
766 VMW_DEBUG_USER("Failed to rebind queries.\n");
767 return ret;
768 }
769 }
770
771 return 0;
772 }
773
774 /**
775 * vmw_view_bindings_add - Add an array of view bindings to a context binding
776 * state tracker.
777 *
778 * @sw_context: The execbuf state used for this command.
779 * @view_type: View type for the bindings.
780 * @binding_type: Binding type for the bindings.
781 * @shader_slot: The shader slot to user for the bindings.
782 * @view_ids: Array of view ids to be bound.
783 * @num_views: Number of view ids in @view_ids.
784 * @first_slot: The binding slot to be used for the first view id in @view_ids.
785 */
786 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
787 enum vmw_view_type view_type,
788 enum vmw_ctx_binding_type binding_type,
789 uint32 shader_slot,
790 uint32 view_ids[], u32 num_views,
791 u32 first_slot)
792 {
793 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
794 u32 i;
795
796 if (!ctx_node)
797 return -EINVAL;
798
799 for (i = 0; i < num_views; ++i) {
800 struct vmw_ctx_bindinfo_view binding;
801 struct vmw_resource *view = NULL;
802
803 if (view_ids[i] != SVGA3D_INVALID_ID) {
804 view = vmw_view_id_val_add(sw_context, view_type,
805 view_ids[i]);
806 if (IS_ERR(view)) {
807 VMW_DEBUG_USER("View not found.\n");
808 return PTR_ERR(view);
809 }
810 }
811 binding.bi.ctx = ctx_node->ctx;
812 binding.bi.res = view;
813 binding.bi.bt = binding_type;
814 binding.shader_slot = shader_slot;
815 binding.slot = first_slot + i;
816 vmw_binding_add(ctx_node->staged, &binding.bi,
817 shader_slot, binding.slot);
818 }
819
820 return 0;
821 }
822
823 /**
824 * vmw_cmd_cid_check - Check a command header for valid context information.
825 *
826 * @dev_priv: Pointer to a device private structure.
827 * @sw_context: Pointer to the software context.
828 * @header: A command header with an embedded user-space context handle.
829 *
830 * Convenience function: Call vmw_cmd_res_check with the user-space context
831 * handle embedded in @header.
832 */
833 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
834 struct vmw_sw_context *sw_context,
835 SVGA3dCmdHeader *header)
836 {
837 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
838 container_of(header, typeof(*cmd), header);
839
840 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
841 VMW_RES_DIRTY_SET, user_context_converter,
842 &cmd->body, NULL);
843 }
844
845 /**
846 * vmw_execbuf_info_from_res - Get the private validation metadata for a
847 * recently validated resource
848 *
849 * @sw_context: Pointer to the command submission context
850 * @res: The resource
851 *
852 * The resource pointed to by @res needs to be present in the command submission
853 * context's resource cache and hence the last resource of that type to be
854 * processed by the validation code.
855 *
856 * Return: a pointer to the private metadata of the resource, or NULL if it
857 * wasn't found
858 */
859 static struct vmw_ctx_validation_info *
860 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
861 struct vmw_resource *res)
862 {
863 struct vmw_res_cache_entry *rcache =
864 &sw_context->res_cache[vmw_res_type(res)];
865
866 if (rcache->valid && rcache->res == res)
867 return rcache->private;
868
869 WARN_ON_ONCE(true);
870 return NULL;
871 }
872
873 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
874 struct vmw_sw_context *sw_context,
875 SVGA3dCmdHeader *header)
876 {
877 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
878 struct vmw_resource *ctx;
879 struct vmw_resource *res;
880 int ret;
881
882 cmd = container_of(header, typeof(*cmd), header);
883
884 if (cmd->body.type >= SVGA3D_RT_MAX) {
885 VMW_DEBUG_USER("Illegal render target type %u.\n",
886 (unsigned int) cmd->body.type);
887 return -EINVAL;
888 }
889
890 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
891 VMW_RES_DIRTY_SET, user_context_converter,
892 &cmd->body.cid, &ctx);
893 if (unlikely(ret != 0))
894 return ret;
895
896 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
897 VMW_RES_DIRTY_SET, user_surface_converter,
898 &cmd->body.target.sid, &res);
899 if (unlikely(ret))
900 return ret;
901
902 if (dev_priv->has_mob) {
903 struct vmw_ctx_bindinfo_view binding;
904 struct vmw_ctx_validation_info *node;
905
906 node = vmw_execbuf_info_from_res(sw_context, ctx);
907 if (!node)
908 return -EINVAL;
909
910 binding.bi.ctx = ctx;
911 binding.bi.res = res;
912 binding.bi.bt = vmw_ctx_binding_rt;
913 binding.slot = cmd->body.type;
914 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
915 }
916
917 return 0;
918 }
919
920 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
921 struct vmw_sw_context *sw_context,
922 SVGA3dCmdHeader *header)
923 {
924 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
925 int ret;
926
927 cmd = container_of(header, typeof(*cmd), header);
928
929 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
930 VMW_RES_DIRTY_NONE, user_surface_converter,
931 &cmd->body.src.sid, NULL);
932 if (ret)
933 return ret;
934
935 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
936 VMW_RES_DIRTY_SET, user_surface_converter,
937 &cmd->body.dest.sid, NULL);
938 }
939
940 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
941 struct vmw_sw_context *sw_context,
942 SVGA3dCmdHeader *header)
943 {
944 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
945 int ret;
946
947 cmd = container_of(header, typeof(*cmd), header);
948 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
949 VMW_RES_DIRTY_NONE, user_surface_converter,
950 &cmd->body.src, NULL);
951 if (ret != 0)
952 return ret;
953
954 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
955 VMW_RES_DIRTY_SET, user_surface_converter,
956 &cmd->body.dest, NULL);
957 }
958
959 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
960 struct vmw_sw_context *sw_context,
961 SVGA3dCmdHeader *header)
962 {
963 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
964 int ret;
965
966 cmd = container_of(header, typeof(*cmd), header);
967 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
968 VMW_RES_DIRTY_NONE, user_surface_converter,
969 &cmd->body.srcSid, NULL);
970 if (ret != 0)
971 return ret;
972
973 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
974 VMW_RES_DIRTY_SET, user_surface_converter,
975 &cmd->body.dstSid, NULL);
976 }
977
978 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
979 struct vmw_sw_context *sw_context,
980 SVGA3dCmdHeader *header)
981 {
982 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
983 int ret;
984
985 cmd = container_of(header, typeof(*cmd), header);
986 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
987 VMW_RES_DIRTY_NONE, user_surface_converter,
988 &cmd->body.src.sid, NULL);
989 if (unlikely(ret != 0))
990 return ret;
991
992 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
993 VMW_RES_DIRTY_SET, user_surface_converter,
994 &cmd->body.dest.sid, NULL);
995 }
996
997 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
998 struct vmw_sw_context *sw_context,
999 SVGA3dCmdHeader *header)
1000 {
1001 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
1002 container_of(header, typeof(*cmd), header);
1003
1004 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1005 VMW_RES_DIRTY_NONE, user_surface_converter,
1006 &cmd->body.srcImage.sid, NULL);
1007 }
1008
1009 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1010 struct vmw_sw_context *sw_context,
1011 SVGA3dCmdHeader *header)
1012 {
1013 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1014 container_of(header, typeof(*cmd), header);
1015
1016 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1017 VMW_RES_DIRTY_NONE, user_surface_converter,
1018 &cmd->body.sid, NULL);
1019 }
1020
1021 /**
1022 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1023 *
1024 * @dev_priv: The device private structure.
1025 * @new_query_bo: The new buffer holding query results.
1026 * @sw_context: The software context used for this command submission.
1027 *
1028 * This function checks whether @new_query_bo is suitable for holding query
1029 * results, and if another buffer currently is pinned for query results. If so,
1030 * the function prepares the state of @sw_context for switching pinned buffers
1031 * after successful submission of the current command batch.
1032 */
1033 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1034 struct vmw_buffer_object *new_query_bo,
1035 struct vmw_sw_context *sw_context)
1036 {
1037 struct vmw_res_cache_entry *ctx_entry =
1038 &sw_context->res_cache[vmw_res_context];
1039 int ret;
1040
1041 BUG_ON(!ctx_entry->valid);
1042 sw_context->last_query_ctx = ctx_entry->res;
1043
1044 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1045
1046 if (unlikely(new_query_bo->base.num_pages > 4)) {
1047 VMW_DEBUG_USER("Query buffer too large.\n");
1048 return -EINVAL;
1049 }
1050
1051 if (unlikely(sw_context->cur_query_bo != NULL)) {
1052 sw_context->needs_post_query_barrier = true;
1053 ret = vmw_validation_add_bo(sw_context->ctx,
1054 sw_context->cur_query_bo,
1055 dev_priv->has_mob, false);
1056 if (unlikely(ret != 0))
1057 return ret;
1058 }
1059 sw_context->cur_query_bo = new_query_bo;
1060
1061 ret = vmw_validation_add_bo(sw_context->ctx,
1062 dev_priv->dummy_query_bo,
1063 dev_priv->has_mob, false);
1064 if (unlikely(ret != 0))
1065 return ret;
1066 }
1067
1068 return 0;
1069 }
1070
1071 /**
1072 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1073 *
1074 * @dev_priv: The device private structure.
1075 * @sw_context: The software context used for this command submission batch.
1076 *
1077 * This function will check if we're switching query buffers, and will then,
1078 * issue a dummy occlusion query wait used as a query barrier. When the fence
1079 * object following that query wait has signaled, we are sure that all preceding
1080 * queries have finished, and the old query buffer can be unpinned. However,
1081 * since both the new query buffer and the old one are fenced with that fence,
1082 * we can do an asynchronus unpin now, and be sure that the old query buffer
1083 * won't be moved until the fence has signaled.
1084 *
1085 * As mentioned above, both the new - and old query buffers need to be fenced
1086 * using a sequence emitted *after* calling this function.
1087 */
1088 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1089 struct vmw_sw_context *sw_context)
1090 {
1091 /*
1092 * The validate list should still hold references to all
1093 * contexts here.
1094 */
1095 if (sw_context->needs_post_query_barrier) {
1096 struct vmw_res_cache_entry *ctx_entry =
1097 &sw_context->res_cache[vmw_res_context];
1098 struct vmw_resource *ctx;
1099 int ret;
1100
1101 BUG_ON(!ctx_entry->valid);
1102 ctx = ctx_entry->res;
1103
1104 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1105
1106 if (unlikely(ret != 0))
1107 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1108 }
1109
1110 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1111 if (dev_priv->pinned_bo) {
1112 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1113 vmw_bo_unreference(&dev_priv->pinned_bo);
1114 }
1115
1116 if (!sw_context->needs_post_query_barrier) {
1117 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1118
1119 /*
1120 * We pin also the dummy_query_bo buffer so that we
1121 * don't need to validate it when emitting dummy queries
1122 * in context destroy paths.
1123 */
1124 if (!dev_priv->dummy_query_bo_pinned) {
1125 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1126 true);
1127 dev_priv->dummy_query_bo_pinned = true;
1128 }
1129
1130 BUG_ON(sw_context->last_query_ctx == NULL);
1131 dev_priv->query_cid = sw_context->last_query_ctx->id;
1132 dev_priv->query_cid_valid = true;
1133 dev_priv->pinned_bo =
1134 vmw_bo_reference(sw_context->cur_query_bo);
1135 }
1136 }
1137 }
1138
1139 /**
1140 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1141 * to a MOB id.
1142 *
1143 * @dev_priv: Pointer to a device private structure.
1144 * @sw_context: The software context used for this command batch validation.
1145 * @id: Pointer to the user-space handle to be translated.
1146 * @vmw_bo_p: Points to a location that, on successful return will carry a
1147 * non-reference-counted pointer to the buffer object identified by the
1148 * user-space handle in @id.
1149 *
1150 * This function saves information needed to translate a user-space buffer
1151 * handle to a MOB id. The translation does not take place immediately, but
1152 * during a call to vmw_apply_relocations().
1153 *
1154 * This function builds a relocation list and a list of buffers to validate. The
1155 * former needs to be freed using either vmw_apply_relocations() or
1156 * vmw_free_relocations(). The latter needs to be freed using
1157 * vmw_clear_validations.
1158 */
1159 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1160 struct vmw_sw_context *sw_context,
1161 SVGAMobId *id,
1162 struct vmw_buffer_object **vmw_bo_p)
1163 {
1164 struct vmw_buffer_object *vmw_bo;
1165 uint32_t handle = *id;
1166 struct vmw_relocation *reloc;
1167 int ret;
1168
1169 vmw_validation_preload_bo(sw_context->ctx);
1170 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1171 if (IS_ERR(vmw_bo)) {
1172 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1173 return PTR_ERR(vmw_bo);
1174 }
1175
1176 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1177 vmw_user_bo_noref_release();
1178 if (unlikely(ret != 0))
1179 return ret;
1180
1181 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1182 if (!reloc)
1183 return -ENOMEM;
1184
1185 reloc->mob_loc = id;
1186 reloc->vbo = vmw_bo;
1187
1188 *vmw_bo_p = vmw_bo;
1189 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1190
1191 return 0;
1192 }
1193
1194 /**
1195 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1196 * to a valid SVGAGuestPtr
1197 *
1198 * @dev_priv: Pointer to a device private structure.
1199 * @sw_context: The software context used for this command batch validation.
1200 * @ptr: Pointer to the user-space handle to be translated.
1201 * @vmw_bo_p: Points to a location that, on successful return will carry a
1202 * non-reference-counted pointer to the DMA buffer identified by the user-space
1203 * handle in @id.
1204 *
1205 * This function saves information needed to translate a user-space buffer
1206 * handle to a valid SVGAGuestPtr. The translation does not take place
1207 * immediately, but during a call to vmw_apply_relocations().
1208 *
1209 * This function builds a relocation list and a list of buffers to validate.
1210 * The former needs to be freed using either vmw_apply_relocations() or
1211 * vmw_free_relocations(). The latter needs to be freed using
1212 * vmw_clear_validations.
1213 */
1214 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1215 struct vmw_sw_context *sw_context,
1216 SVGAGuestPtr *ptr,
1217 struct vmw_buffer_object **vmw_bo_p)
1218 {
1219 struct vmw_buffer_object *vmw_bo;
1220 uint32_t handle = ptr->gmrId;
1221 struct vmw_relocation *reloc;
1222 int ret;
1223
1224 vmw_validation_preload_bo(sw_context->ctx);
1225 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1226 if (IS_ERR(vmw_bo)) {
1227 VMW_DEBUG_USER("Could not find or use GMR region.\n");
1228 return PTR_ERR(vmw_bo);
1229 }
1230
1231 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1232 vmw_user_bo_noref_release();
1233 if (unlikely(ret != 0))
1234 return ret;
1235
1236 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1237 if (!reloc)
1238 return -ENOMEM;
1239
1240 reloc->location = ptr;
1241 reloc->vbo = vmw_bo;
1242 *vmw_bo_p = vmw_bo;
1243 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1244
1245 return 0;
1246 }
1247
1248 /**
1249 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1250 *
1251 * @dev_priv: Pointer to a device private struct.
1252 * @sw_context: The software context used for this command submission.
1253 * @header: Pointer to the command header in the command stream.
1254 *
1255 * This function adds the new query into the query COTABLE
1256 */
1257 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1258 struct vmw_sw_context *sw_context,
1259 SVGA3dCmdHeader *header)
1260 {
1261 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1262 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1263 struct vmw_resource *cotable_res;
1264 int ret;
1265
1266 if (!ctx_node)
1267 return -EINVAL;
1268
1269 cmd = container_of(header, typeof(*cmd), header);
1270
1271 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1272 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1273 return -EINVAL;
1274
1275 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1276 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1277
1278 return ret;
1279 }
1280
1281 /**
1282 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1283 *
1284 * @dev_priv: Pointer to a device private struct.
1285 * @sw_context: The software context used for this command submission.
1286 * @header: Pointer to the command header in the command stream.
1287 *
1288 * The query bind operation will eventually associate the query ID with its
1289 * backing MOB. In this function, we take the user mode MOB ID and use
1290 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1291 */
1292 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1293 struct vmw_sw_context *sw_context,
1294 SVGA3dCmdHeader *header)
1295 {
1296 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1297 struct vmw_buffer_object *vmw_bo;
1298 int ret;
1299
1300 cmd = container_of(header, typeof(*cmd), header);
1301
1302 /*
1303 * Look up the buffer pointed to by q.mobid, put it on the relocation
1304 * list so its kernel mode MOB ID can be filled in later
1305 */
1306 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1307 &vmw_bo);
1308
1309 if (ret != 0)
1310 return ret;
1311
1312 sw_context->dx_query_mob = vmw_bo;
1313 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1314 return 0;
1315 }
1316
1317 /**
1318 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1319 *
1320 * @dev_priv: Pointer to a device private struct.
1321 * @sw_context: The software context used for this command submission.
1322 * @header: Pointer to the command header in the command stream.
1323 */
1324 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1325 struct vmw_sw_context *sw_context,
1326 SVGA3dCmdHeader *header)
1327 {
1328 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1329 container_of(header, typeof(*cmd), header);
1330
1331 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1332 VMW_RES_DIRTY_SET, user_context_converter,
1333 &cmd->body.cid, NULL);
1334 }
1335
1336 /**
1337 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1338 *
1339 * @dev_priv: Pointer to a device private struct.
1340 * @sw_context: The software context used for this command submission.
1341 * @header: Pointer to the command header in the command stream.
1342 */
1343 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1344 struct vmw_sw_context *sw_context,
1345 SVGA3dCmdHeader *header)
1346 {
1347 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1348 container_of(header, typeof(*cmd), header);
1349
1350 if (unlikely(dev_priv->has_mob)) {
1351 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1352
1353 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1354
1355 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1356 gb_cmd.header.size = cmd->header.size;
1357 gb_cmd.body.cid = cmd->body.cid;
1358 gb_cmd.body.type = cmd->body.type;
1359
1360 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1361 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1362 }
1363
1364 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1365 VMW_RES_DIRTY_SET, user_context_converter,
1366 &cmd->body.cid, NULL);
1367 }
1368
1369 /**
1370 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1371 *
1372 * @dev_priv: Pointer to a device private struct.
1373 * @sw_context: The software context used for this command submission.
1374 * @header: Pointer to the command header in the command stream.
1375 */
1376 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1377 struct vmw_sw_context *sw_context,
1378 SVGA3dCmdHeader *header)
1379 {
1380 struct vmw_buffer_object *vmw_bo;
1381 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1382 int ret;
1383
1384 cmd = container_of(header, typeof(*cmd), header);
1385 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1386 if (unlikely(ret != 0))
1387 return ret;
1388
1389 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1390 &vmw_bo);
1391 if (unlikely(ret != 0))
1392 return ret;
1393
1394 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1395
1396 return ret;
1397 }
1398
1399 /**
1400 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1401 *
1402 * @dev_priv: Pointer to a device private struct.
1403 * @sw_context: The software context used for this command submission.
1404 * @header: Pointer to the command header in the command stream.
1405 */
1406 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1407 struct vmw_sw_context *sw_context,
1408 SVGA3dCmdHeader *header)
1409 {
1410 struct vmw_buffer_object *vmw_bo;
1411 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1412 int ret;
1413
1414 cmd = container_of(header, typeof(*cmd), header);
1415 if (dev_priv->has_mob) {
1416 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1417
1418 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1419
1420 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1421 gb_cmd.header.size = cmd->header.size;
1422 gb_cmd.body.cid = cmd->body.cid;
1423 gb_cmd.body.type = cmd->body.type;
1424 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1425 gb_cmd.body.offset = cmd->body.guestResult.offset;
1426
1427 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1428 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1429 }
1430
1431 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1432 if (unlikely(ret != 0))
1433 return ret;
1434
1435 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1436 &cmd->body.guestResult, &vmw_bo);
1437 if (unlikely(ret != 0))
1438 return ret;
1439
1440 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1441
1442 return ret;
1443 }
1444
1445 /**
1446 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1447 *
1448 * @dev_priv: Pointer to a device private struct.
1449 * @sw_context: The software context used for this command submission.
1450 * @header: Pointer to the command header in the command stream.
1451 */
1452 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1453 struct vmw_sw_context *sw_context,
1454 SVGA3dCmdHeader *header)
1455 {
1456 struct vmw_buffer_object *vmw_bo;
1457 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1458 int ret;
1459
1460 cmd = container_of(header, typeof(*cmd), header);
1461 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1462 if (unlikely(ret != 0))
1463 return ret;
1464
1465 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1466 &vmw_bo);
1467 if (unlikely(ret != 0))
1468 return ret;
1469
1470 return 0;
1471 }
1472
1473 /**
1474 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1475 *
1476 * @dev_priv: Pointer to a device private struct.
1477 * @sw_context: The software context used for this command submission.
1478 * @header: Pointer to the command header in the command stream.
1479 */
1480 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1481 struct vmw_sw_context *sw_context,
1482 SVGA3dCmdHeader *header)
1483 {
1484 struct vmw_buffer_object *vmw_bo;
1485 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1486 int ret;
1487
1488 cmd = container_of(header, typeof(*cmd), header);
1489 if (dev_priv->has_mob) {
1490 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1491
1492 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1493
1494 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1495 gb_cmd.header.size = cmd->header.size;
1496 gb_cmd.body.cid = cmd->body.cid;
1497 gb_cmd.body.type = cmd->body.type;
1498 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1499 gb_cmd.body.offset = cmd->body.guestResult.offset;
1500
1501 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1502 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1503 }
1504
1505 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1506 if (unlikely(ret != 0))
1507 return ret;
1508
1509 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1510 &cmd->body.guestResult, &vmw_bo);
1511 if (unlikely(ret != 0))
1512 return ret;
1513
1514 return 0;
1515 }
1516
1517 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1518 struct vmw_sw_context *sw_context,
1519 SVGA3dCmdHeader *header)
1520 {
1521 struct vmw_buffer_object *vmw_bo = NULL;
1522 struct vmw_surface *srf = NULL;
1523 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1524 int ret;
1525 SVGA3dCmdSurfaceDMASuffix *suffix;
1526 uint32_t bo_size;
1527 bool dirty;
1528
1529 cmd = container_of(header, typeof(*cmd), header);
1530 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1531 header->size - sizeof(*suffix));
1532
1533 /* Make sure device and verifier stays in sync. */
1534 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1535 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1536 return -EINVAL;
1537 }
1538
1539 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1540 &cmd->body.guest.ptr, &vmw_bo);
1541 if (unlikely(ret != 0))
1542 return ret;
1543
1544 /* Make sure DMA doesn't cross BO boundaries. */
1545 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1546 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1547 VMW_DEBUG_USER("Invalid DMA offset.\n");
1548 return -EINVAL;
1549 }
1550
1551 bo_size -= cmd->body.guest.ptr.offset;
1552 if (unlikely(suffix->maximumOffset > bo_size))
1553 suffix->maximumOffset = bo_size;
1554
1555 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1556 VMW_RES_DIRTY_SET : 0;
1557 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1558 dirty, user_surface_converter,
1559 &cmd->body.host.sid, NULL);
1560 if (unlikely(ret != 0)) {
1561 if (unlikely(ret != -ERESTARTSYS))
1562 VMW_DEBUG_USER("could not find surface for DMA.\n");
1563 return ret;
1564 }
1565
1566 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1567
1568 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1569
1570 return 0;
1571 }
1572
1573 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1574 struct vmw_sw_context *sw_context,
1575 SVGA3dCmdHeader *header)
1576 {
1577 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1578 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1579 (unsigned long)header + sizeof(*cmd));
1580 SVGA3dPrimitiveRange *range;
1581 uint32_t i;
1582 uint32_t maxnum;
1583 int ret;
1584
1585 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1586 if (unlikely(ret != 0))
1587 return ret;
1588
1589 cmd = container_of(header, typeof(*cmd), header);
1590 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1591
1592 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1593 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1594 return -EINVAL;
1595 }
1596
1597 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1598 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1599 VMW_RES_DIRTY_NONE,
1600 user_surface_converter,
1601 &decl->array.surfaceId, NULL);
1602 if (unlikely(ret != 0))
1603 return ret;
1604 }
1605
1606 maxnum = (header->size - sizeof(cmd->body) -
1607 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1608 if (unlikely(cmd->body.numRanges > maxnum)) {
1609 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1610 return -EINVAL;
1611 }
1612
1613 range = (SVGA3dPrimitiveRange *) decl;
1614 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1615 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1616 VMW_RES_DIRTY_NONE,
1617 user_surface_converter,
1618 &range->indexArray.surfaceId, NULL);
1619 if (unlikely(ret != 0))
1620 return ret;
1621 }
1622 return 0;
1623 }
1624
1625 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1626 struct vmw_sw_context *sw_context,
1627 SVGA3dCmdHeader *header)
1628 {
1629 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1630 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1631 ((unsigned long) header + header->size + sizeof(header));
1632 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1633 ((unsigned long) header + sizeof(*cmd));
1634 struct vmw_resource *ctx;
1635 struct vmw_resource *res;
1636 int ret;
1637
1638 cmd = container_of(header, typeof(*cmd), header);
1639
1640 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1641 VMW_RES_DIRTY_SET, user_context_converter,
1642 &cmd->body.cid, &ctx);
1643 if (unlikely(ret != 0))
1644 return ret;
1645
1646 for (; cur_state < last_state; ++cur_state) {
1647 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1648 continue;
1649
1650 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1651 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1652 (unsigned int) cur_state->stage);
1653 return -EINVAL;
1654 }
1655
1656 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1657 VMW_RES_DIRTY_NONE,
1658 user_surface_converter,
1659 &cur_state->value, &res);
1660 if (unlikely(ret != 0))
1661 return ret;
1662
1663 if (dev_priv->has_mob) {
1664 struct vmw_ctx_bindinfo_tex binding;
1665 struct vmw_ctx_validation_info *node;
1666
1667 node = vmw_execbuf_info_from_res(sw_context, ctx);
1668 if (!node)
1669 return -EINVAL;
1670
1671 binding.bi.ctx = ctx;
1672 binding.bi.res = res;
1673 binding.bi.bt = vmw_ctx_binding_tex;
1674 binding.texture_stage = cur_state->stage;
1675 vmw_binding_add(node->staged, &binding.bi, 0,
1676 binding.texture_stage);
1677 }
1678 }
1679
1680 return 0;
1681 }
1682
1683 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1684 struct vmw_sw_context *sw_context,
1685 void *buf)
1686 {
1687 struct vmw_buffer_object *vmw_bo;
1688
1689 struct {
1690 uint32_t header;
1691 SVGAFifoCmdDefineGMRFB body;
1692 } *cmd = buf;
1693
1694 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1695 &vmw_bo);
1696 }
1697
1698 /**
1699 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1700 * switching
1701 *
1702 * @dev_priv: Pointer to a device private struct.
1703 * @sw_context: The software context being used for this batch.
1704 * @val_node: The validation node representing the resource.
1705 * @buf_id: Pointer to the user-space backup buffer handle in the command
1706 * stream.
1707 * @backup_offset: Offset of backup into MOB.
1708 *
1709 * This function prepares for registering a switch of backup buffers in the
1710 * resource metadata just prior to unreserving. It's basically a wrapper around
1711 * vmw_cmd_res_switch_backup with a different interface.
1712 */
1713 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1714 struct vmw_sw_context *sw_context,
1715 struct vmw_resource *res, uint32_t *buf_id,
1716 unsigned long backup_offset)
1717 {
1718 struct vmw_buffer_object *vbo;
1719 void *info;
1720 int ret;
1721
1722 info = vmw_execbuf_info_from_res(sw_context, res);
1723 if (!info)
1724 return -EINVAL;
1725
1726 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1727 if (ret)
1728 return ret;
1729
1730 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1731 backup_offset);
1732 return 0;
1733 }
1734
1735 /**
1736 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1737 *
1738 * @dev_priv: Pointer to a device private struct.
1739 * @sw_context: The software context being used for this batch.
1740 * @res_type: The resource type.
1741 * @converter: Information about user-space binding for this resource type.
1742 * @res_id: Pointer to the user-space resource handle in the command stream.
1743 * @buf_id: Pointer to the user-space backup buffer handle in the command
1744 * stream.
1745 * @backup_offset: Offset of backup into MOB.
1746 *
1747 * This function prepares for registering a switch of backup buffers in the
1748 * resource metadata just prior to unreserving. It's basically a wrapper around
1749 * vmw_cmd_res_switch_backup with a different interface.
1750 */
1751 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1752 struct vmw_sw_context *sw_context,
1753 enum vmw_res_type res_type,
1754 const struct vmw_user_resource_conv
1755 *converter, uint32_t *res_id, uint32_t *buf_id,
1756 unsigned long backup_offset)
1757 {
1758 struct vmw_resource *res;
1759 int ret;
1760
1761 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1762 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1763 if (ret)
1764 return ret;
1765
1766 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1767 backup_offset);
1768 }
1769
1770 /**
1771 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1772 *
1773 * @dev_priv: Pointer to a device private struct.
1774 * @sw_context: The software context being used for this batch.
1775 * @header: Pointer to the command header in the command stream.
1776 */
1777 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1778 struct vmw_sw_context *sw_context,
1779 SVGA3dCmdHeader *header)
1780 {
1781 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1782 container_of(header, typeof(*cmd), header);
1783
1784 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1785 user_surface_converter, &cmd->body.sid,
1786 &cmd->body.mobid, 0);
1787 }
1788
1789 /**
1790 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1791 *
1792 * @dev_priv: Pointer to a device private struct.
1793 * @sw_context: The software context being used for this batch.
1794 * @header: Pointer to the command header in the command stream.
1795 */
1796 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1797 struct vmw_sw_context *sw_context,
1798 SVGA3dCmdHeader *header)
1799 {
1800 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1801 container_of(header, typeof(*cmd), header);
1802
1803 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1804 VMW_RES_DIRTY_NONE, user_surface_converter,
1805 &cmd->body.image.sid, NULL);
1806 }
1807
1808 /**
1809 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1810 *
1811 * @dev_priv: Pointer to a device private struct.
1812 * @sw_context: The software context being used for this batch.
1813 * @header: Pointer to the command header in the command stream.
1814 */
1815 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1816 struct vmw_sw_context *sw_context,
1817 SVGA3dCmdHeader *header)
1818 {
1819 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1820 container_of(header, typeof(*cmd), header);
1821
1822 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1823 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1824 &cmd->body.sid, NULL);
1825 }
1826
1827 /**
1828 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1829 *
1830 * @dev_priv: Pointer to a device private struct.
1831 * @sw_context: The software context being used for this batch.
1832 * @header: Pointer to the command header in the command stream.
1833 */
1834 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1835 struct vmw_sw_context *sw_context,
1836 SVGA3dCmdHeader *header)
1837 {
1838 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1839 container_of(header, typeof(*cmd), header);
1840
1841 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1842 VMW_RES_DIRTY_NONE, user_surface_converter,
1843 &cmd->body.image.sid, NULL);
1844 }
1845
1846 /**
1847 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1848 * command
1849 *
1850 * @dev_priv: Pointer to a device private struct.
1851 * @sw_context: The software context being used for this batch.
1852 * @header: Pointer to the command header in the command stream.
1853 */
1854 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1855 struct vmw_sw_context *sw_context,
1856 SVGA3dCmdHeader *header)
1857 {
1858 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1859 container_of(header, typeof(*cmd), header);
1860
1861 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1862 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1863 &cmd->body.sid, NULL);
1864 }
1865
1866 /**
1867 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1868 * command
1869 *
1870 * @dev_priv: Pointer to a device private struct.
1871 * @sw_context: The software context being used for this batch.
1872 * @header: Pointer to the command header in the command stream.
1873 */
1874 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1875 struct vmw_sw_context *sw_context,
1876 SVGA3dCmdHeader *header)
1877 {
1878 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1879 container_of(header, typeof(*cmd), header);
1880
1881 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1882 VMW_RES_DIRTY_NONE, user_surface_converter,
1883 &cmd->body.image.sid, NULL);
1884 }
1885
1886 /**
1887 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1888 * command
1889 *
1890 * @dev_priv: Pointer to a device private struct.
1891 * @sw_context: The software context being used for this batch.
1892 * @header: Pointer to the command header in the command stream.
1893 */
1894 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1895 struct vmw_sw_context *sw_context,
1896 SVGA3dCmdHeader *header)
1897 {
1898 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1899 container_of(header, typeof(*cmd), header);
1900
1901 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1902 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1903 &cmd->body.sid, NULL);
1904 }
1905
1906 /**
1907 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1908 *
1909 * @dev_priv: Pointer to a device private struct.
1910 * @sw_context: The software context being used for this batch.
1911 * @header: Pointer to the command header in the command stream.
1912 */
1913 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1914 struct vmw_sw_context *sw_context,
1915 SVGA3dCmdHeader *header)
1916 {
1917 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1918 int ret;
1919 size_t size;
1920 struct vmw_resource *ctx;
1921
1922 cmd = container_of(header, typeof(*cmd), header);
1923
1924 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1925 VMW_RES_DIRTY_SET, user_context_converter,
1926 &cmd->body.cid, &ctx);
1927 if (unlikely(ret != 0))
1928 return ret;
1929
1930 if (unlikely(!dev_priv->has_mob))
1931 return 0;
1932
1933 size = cmd->header.size - sizeof(cmd->body);
1934 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1935 cmd->body.shid, cmd + 1, cmd->body.type,
1936 size, &sw_context->staged_cmd_res);
1937 if (unlikely(ret != 0))
1938 return ret;
1939
1940 return vmw_resource_relocation_add(sw_context, NULL,
1941 vmw_ptr_diff(sw_context->buf_start,
1942 &cmd->header.id),
1943 vmw_res_rel_nop);
1944 }
1945
1946 /**
1947 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1948 *
1949 * @dev_priv: Pointer to a device private struct.
1950 * @sw_context: The software context being used for this batch.
1951 * @header: Pointer to the command header in the command stream.
1952 */
1953 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1954 struct vmw_sw_context *sw_context,
1955 SVGA3dCmdHeader *header)
1956 {
1957 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1958 int ret;
1959 struct vmw_resource *ctx;
1960
1961 cmd = container_of(header, typeof(*cmd), header);
1962
1963 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1964 VMW_RES_DIRTY_SET, user_context_converter,
1965 &cmd->body.cid, &ctx);
1966 if (unlikely(ret != 0))
1967 return ret;
1968
1969 if (unlikely(!dev_priv->has_mob))
1970 return 0;
1971
1972 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1973 cmd->body.type, &sw_context->staged_cmd_res);
1974 if (unlikely(ret != 0))
1975 return ret;
1976
1977 return vmw_resource_relocation_add(sw_context, NULL,
1978 vmw_ptr_diff(sw_context->buf_start,
1979 &cmd->header.id),
1980 vmw_res_rel_nop);
1981 }
1982
1983 /**
1984 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1985 *
1986 * @dev_priv: Pointer to a device private struct.
1987 * @sw_context: The software context being used for this batch.
1988 * @header: Pointer to the command header in the command stream.
1989 */
1990 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1991 struct vmw_sw_context *sw_context,
1992 SVGA3dCmdHeader *header)
1993 {
1994 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1995 struct vmw_ctx_bindinfo_shader binding;
1996 struct vmw_resource *ctx, *res = NULL;
1997 struct vmw_ctx_validation_info *ctx_info;
1998 int ret;
1999
2000 cmd = container_of(header, typeof(*cmd), header);
2001
2002 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2003 VMW_DEBUG_USER("Illegal shader type %u.\n",
2004 (unsigned int) cmd->body.type);
2005 return -EINVAL;
2006 }
2007
2008 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2009 VMW_RES_DIRTY_SET, user_context_converter,
2010 &cmd->body.cid, &ctx);
2011 if (unlikely(ret != 0))
2012 return ret;
2013
2014 if (!dev_priv->has_mob)
2015 return 0;
2016
2017 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2018 /*
2019 * This is the compat shader path - Per device guest-backed
2020 * shaders, but user-space thinks it's per context host-
2021 * backed shaders.
2022 */
2023 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2024 cmd->body.shid, cmd->body.type);
2025 if (!IS_ERR(res)) {
2026 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2027 VMW_RES_DIRTY_NONE);
2028 if (unlikely(ret != 0))
2029 return ret;
2030
2031 ret = vmw_resource_relocation_add
2032 (sw_context, res,
2033 vmw_ptr_diff(sw_context->buf_start,
2034 &cmd->body.shid),
2035 vmw_res_rel_normal);
2036 if (unlikely(ret != 0))
2037 return ret;
2038 }
2039 }
2040
2041 if (IS_ERR_OR_NULL(res)) {
2042 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2043 VMW_RES_DIRTY_NONE,
2044 user_shader_converter, &cmd->body.shid,
2045 &res);
2046 if (unlikely(ret != 0))
2047 return ret;
2048 }
2049
2050 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2051 if (!ctx_info)
2052 return -EINVAL;
2053
2054 binding.bi.ctx = ctx;
2055 binding.bi.res = res;
2056 binding.bi.bt = vmw_ctx_binding_shader;
2057 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2058 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2059
2060 return 0;
2061 }
2062
2063 /**
2064 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2065 *
2066 * @dev_priv: Pointer to a device private struct.
2067 * @sw_context: The software context being used for this batch.
2068 * @header: Pointer to the command header in the command stream.
2069 */
2070 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2071 struct vmw_sw_context *sw_context,
2072 SVGA3dCmdHeader *header)
2073 {
2074 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2075 int ret;
2076
2077 cmd = container_of(header, typeof(*cmd), header);
2078
2079 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2080 VMW_RES_DIRTY_SET, user_context_converter,
2081 &cmd->body.cid, NULL);
2082 if (unlikely(ret != 0))
2083 return ret;
2084
2085 if (dev_priv->has_mob)
2086 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2087
2088 return 0;
2089 }
2090
2091 /**
2092 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2093 *
2094 * @dev_priv: Pointer to a device private struct.
2095 * @sw_context: The software context being used for this batch.
2096 * @header: Pointer to the command header in the command stream.
2097 */
2098 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2099 struct vmw_sw_context *sw_context,
2100 SVGA3dCmdHeader *header)
2101 {
2102 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2103 container_of(header, typeof(*cmd), header);
2104
2105 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2106 user_shader_converter, &cmd->body.shid,
2107 &cmd->body.mobid, cmd->body.offsetInBytes);
2108 }
2109
2110 /**
2111 * vmw_cmd_dx_set_single_constant_buffer - Validate
2112 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2113 *
2114 * @dev_priv: Pointer to a device private struct.
2115 * @sw_context: The software context being used for this batch.
2116 * @header: Pointer to the command header in the command stream.
2117 */
2118 static int
2119 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2120 struct vmw_sw_context *sw_context,
2121 SVGA3dCmdHeader *header)
2122 {
2123 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2124 struct vmw_resource *res = NULL;
2125 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2126 struct vmw_ctx_bindinfo_cb binding;
2127 int ret;
2128
2129 if (!ctx_node)
2130 return -EINVAL;
2131
2132 cmd = container_of(header, typeof(*cmd), header);
2133 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2134 VMW_RES_DIRTY_NONE, user_surface_converter,
2135 &cmd->body.sid, &res);
2136 if (unlikely(ret != 0))
2137 return ret;
2138
2139 binding.bi.ctx = ctx_node->ctx;
2140 binding.bi.res = res;
2141 binding.bi.bt = vmw_ctx_binding_cb;
2142 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2143 binding.offset = cmd->body.offsetInBytes;
2144 binding.size = cmd->body.sizeInBytes;
2145 binding.slot = cmd->body.slot;
2146
2147 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2148 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2149 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2150 (unsigned int) cmd->body.type,
2151 (unsigned int) binding.slot);
2152 return -EINVAL;
2153 }
2154
2155 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2156 binding.slot);
2157
2158 return 0;
2159 }
2160
2161 /**
2162 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2163 * command
2164 *
2165 * @dev_priv: Pointer to a device private struct.
2166 * @sw_context: The software context being used for this batch.
2167 * @header: Pointer to the command header in the command stream.
2168 */
2169 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2170 struct vmw_sw_context *sw_context,
2171 SVGA3dCmdHeader *header)
2172 {
2173 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2174 container_of(header, typeof(*cmd), header);
2175 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2176 sizeof(SVGA3dShaderResourceViewId);
2177
2178 if ((u64) cmd->body.startView + (u64) num_sr_view >
2179 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2180 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2181 VMW_DEBUG_USER("Invalid shader binding.\n");
2182 return -EINVAL;
2183 }
2184
2185 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2186 vmw_ctx_binding_sr,
2187 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2188 (void *) &cmd[1], num_sr_view,
2189 cmd->body.startView);
2190 }
2191
2192 /**
2193 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2194 *
2195 * @dev_priv: Pointer to a device private struct.
2196 * @sw_context: The software context being used for this batch.
2197 * @header: Pointer to the command header in the command stream.
2198 */
2199 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2200 struct vmw_sw_context *sw_context,
2201 SVGA3dCmdHeader *header)
2202 {
2203 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2204 struct vmw_resource *res = NULL;
2205 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2206 struct vmw_ctx_bindinfo_shader binding;
2207 int ret = 0;
2208
2209 if (!ctx_node)
2210 return -EINVAL;
2211
2212 cmd = container_of(header, typeof(*cmd), header);
2213
2214 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
2215 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2216 VMW_DEBUG_USER("Illegal shader type %u.\n",
2217 (unsigned int) cmd->body.type);
2218 return -EINVAL;
2219 }
2220
2221 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2222 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2223 if (IS_ERR(res)) {
2224 VMW_DEBUG_USER("Could not find shader for binding.\n");
2225 return PTR_ERR(res);
2226 }
2227
2228 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2229 VMW_RES_DIRTY_NONE);
2230 if (ret)
2231 return ret;
2232 }
2233
2234 binding.bi.ctx = ctx_node->ctx;
2235 binding.bi.res = res;
2236 binding.bi.bt = vmw_ctx_binding_dx_shader;
2237 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2238
2239 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2240
2241 return 0;
2242 }
2243
2244 /**
2245 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2246 * command
2247 *
2248 * @dev_priv: Pointer to a device private struct.
2249 * @sw_context: The software context being used for this batch.
2250 * @header: Pointer to the command header in the command stream.
2251 */
2252 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2253 struct vmw_sw_context *sw_context,
2254 SVGA3dCmdHeader *header)
2255 {
2256 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2257 struct vmw_ctx_bindinfo_vb binding;
2258 struct vmw_resource *res;
2259 struct {
2260 SVGA3dCmdHeader header;
2261 SVGA3dCmdDXSetVertexBuffers body;
2262 SVGA3dVertexBuffer buf[];
2263 } *cmd;
2264 int i, ret, num;
2265
2266 if (!ctx_node)
2267 return -EINVAL;
2268
2269 cmd = container_of(header, typeof(*cmd), header);
2270 num = (cmd->header.size - sizeof(cmd->body)) /
2271 sizeof(SVGA3dVertexBuffer);
2272 if ((u64)num + (u64)cmd->body.startBuffer >
2273 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2274 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2275 return -EINVAL;
2276 }
2277
2278 for (i = 0; i < num; i++) {
2279 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2280 VMW_RES_DIRTY_NONE,
2281 user_surface_converter,
2282 &cmd->buf[i].sid, &res);
2283 if (unlikely(ret != 0))
2284 return ret;
2285
2286 binding.bi.ctx = ctx_node->ctx;
2287 binding.bi.bt = vmw_ctx_binding_vb;
2288 binding.bi.res = res;
2289 binding.offset = cmd->buf[i].offset;
2290 binding.stride = cmd->buf[i].stride;
2291 binding.slot = i + cmd->body.startBuffer;
2292
2293 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2294 }
2295
2296 return 0;
2297 }
2298
2299 /**
2300 * vmw_cmd_dx_ia_set_vertex_buffers - Validate
2301 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2302 *
2303 * @dev_priv: Pointer to a device private struct.
2304 * @sw_context: The software context being used for this batch.
2305 * @header: Pointer to the command header in the command stream.
2306 */
2307 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2308 struct vmw_sw_context *sw_context,
2309 SVGA3dCmdHeader *header)
2310 {
2311 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2312 struct vmw_ctx_bindinfo_ib binding;
2313 struct vmw_resource *res;
2314 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2315 int ret;
2316
2317 if (!ctx_node)
2318 return -EINVAL;
2319
2320 cmd = container_of(header, typeof(*cmd), header);
2321 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2322 VMW_RES_DIRTY_NONE, user_surface_converter,
2323 &cmd->body.sid, &res);
2324 if (unlikely(ret != 0))
2325 return ret;
2326
2327 binding.bi.ctx = ctx_node->ctx;
2328 binding.bi.res = res;
2329 binding.bi.bt = vmw_ctx_binding_ib;
2330 binding.offset = cmd->body.offset;
2331 binding.format = cmd->body.format;
2332
2333 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2334
2335 return 0;
2336 }
2337
2338 /**
2339 * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2340 * command
2341 *
2342 * @dev_priv: Pointer to a device private struct.
2343 * @sw_context: The software context being used for this batch.
2344 * @header: Pointer to the command header in the command stream.
2345 */
2346 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2347 struct vmw_sw_context *sw_context,
2348 SVGA3dCmdHeader *header)
2349 {
2350 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2351 container_of(header, typeof(*cmd), header);
2352 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2353 sizeof(SVGA3dRenderTargetViewId);
2354 int ret;
2355
2356 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2357 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2358 return -EINVAL;
2359 }
2360
2361 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2362 0, &cmd->body.depthStencilViewId, 1, 0);
2363 if (ret)
2364 return ret;
2365
2366 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2367 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2368 num_rt_view, 0);
2369 }
2370
2371 /**
2372 * vmw_cmd_dx_clear_rendertarget_view - Validate
2373 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2374 *
2375 * @dev_priv: Pointer to a device private struct.
2376 * @sw_context: The software context being used for this batch.
2377 * @header: Pointer to the command header in the command stream.
2378 */
2379 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2380 struct vmw_sw_context *sw_context,
2381 SVGA3dCmdHeader *header)
2382 {
2383 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2384 container_of(header, typeof(*cmd), header);
2385 struct vmw_resource *ret;
2386
2387 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2388 cmd->body.renderTargetViewId);
2389
2390 return PTR_ERR_OR_ZERO(ret);
2391 }
2392
2393 /**
2394 * vmw_cmd_dx_clear_rendertarget_view - Validate
2395 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2396 *
2397 * @dev_priv: Pointer to a device private struct.
2398 * @sw_context: The software context being used for this batch.
2399 * @header: Pointer to the command header in the command stream.
2400 */
2401 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2402 struct vmw_sw_context *sw_context,
2403 SVGA3dCmdHeader *header)
2404 {
2405 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2406 container_of(header, typeof(*cmd), header);
2407 struct vmw_resource *ret;
2408
2409 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2410 cmd->body.depthStencilViewId);
2411
2412 return PTR_ERR_OR_ZERO(ret);
2413 }
2414
2415 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2416 struct vmw_sw_context *sw_context,
2417 SVGA3dCmdHeader *header)
2418 {
2419 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2420 struct vmw_resource *srf;
2421 struct vmw_resource *res;
2422 enum vmw_view_type view_type;
2423 int ret;
2424 /*
2425 * This is based on the fact that all affected define commands have the
2426 * same initial command body layout.
2427 */
2428 struct {
2429 SVGA3dCmdHeader header;
2430 uint32 defined_id;
2431 uint32 sid;
2432 } *cmd;
2433
2434 if (!ctx_node)
2435 return -EINVAL;
2436
2437 view_type = vmw_view_cmd_to_type(header->id);
2438 if (view_type == vmw_view_max)
2439 return -EINVAL;
2440
2441 cmd = container_of(header, typeof(*cmd), header);
2442 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2443 VMW_DEBUG_USER("Invalid surface id.\n");
2444 return -EINVAL;
2445 }
2446 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2447 VMW_RES_DIRTY_NONE, user_surface_converter,
2448 &cmd->sid, &srf);
2449 if (unlikely(ret != 0))
2450 return ret;
2451
2452 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2453 ret = vmw_cotable_notify(res, cmd->defined_id);
2454 if (unlikely(ret != 0))
2455 return ret;
2456
2457 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2458 cmd->defined_id, header,
2459 header->size + sizeof(*header),
2460 &sw_context->staged_cmd_res);
2461 }
2462
2463 /**
2464 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2465 *
2466 * @dev_priv: Pointer to a device private struct.
2467 * @sw_context: The software context being used for this batch.
2468 * @header: Pointer to the command header in the command stream.
2469 */
2470 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2471 struct vmw_sw_context *sw_context,
2472 SVGA3dCmdHeader *header)
2473 {
2474 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2475 struct vmw_ctx_bindinfo_so binding;
2476 struct vmw_resource *res;
2477 struct {
2478 SVGA3dCmdHeader header;
2479 SVGA3dCmdDXSetSOTargets body;
2480 SVGA3dSoTarget targets[];
2481 } *cmd;
2482 int i, ret, num;
2483
2484 if (!ctx_node)
2485 return -EINVAL;
2486
2487 cmd = container_of(header, typeof(*cmd), header);
2488 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2489
2490 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2491 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2492 return -EINVAL;
2493 }
2494
2495 for (i = 0; i < num; i++) {
2496 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2497 VMW_RES_DIRTY_SET,
2498 user_surface_converter,
2499 &cmd->targets[i].sid, &res);
2500 if (unlikely(ret != 0))
2501 return ret;
2502
2503 binding.bi.ctx = ctx_node->ctx;
2504 binding.bi.res = res;
2505 binding.bi.bt = vmw_ctx_binding_so,
2506 binding.offset = cmd->targets[i].offset;
2507 binding.size = cmd->targets[i].sizeInBytes;
2508 binding.slot = i;
2509
2510 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2511 }
2512
2513 return 0;
2514 }
2515
2516 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2517 struct vmw_sw_context *sw_context,
2518 SVGA3dCmdHeader *header)
2519 {
2520 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2521 struct vmw_resource *res;
2522 /*
2523 * This is based on the fact that all affected define commands have
2524 * the same initial command body layout.
2525 */
2526 struct {
2527 SVGA3dCmdHeader header;
2528 uint32 defined_id;
2529 } *cmd;
2530 enum vmw_so_type so_type;
2531 int ret;
2532
2533 if (!ctx_node)
2534 return -EINVAL;
2535
2536 so_type = vmw_so_cmd_to_type(header->id);
2537 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2538 cmd = container_of(header, typeof(*cmd), header);
2539 ret = vmw_cotable_notify(res, cmd->defined_id);
2540
2541 return ret;
2542 }
2543
2544 /**
2545 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2546 * command
2547 *
2548 * @dev_priv: Pointer to a device private struct.
2549 * @sw_context: The software context being used for this batch.
2550 * @header: Pointer to the command header in the command stream.
2551 */
2552 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2553 struct vmw_sw_context *sw_context,
2554 SVGA3dCmdHeader *header)
2555 {
2556 struct {
2557 SVGA3dCmdHeader header;
2558 union {
2559 SVGA3dCmdDXReadbackSubResource r_body;
2560 SVGA3dCmdDXInvalidateSubResource i_body;
2561 SVGA3dCmdDXUpdateSubResource u_body;
2562 SVGA3dSurfaceId sid;
2563 };
2564 } *cmd;
2565
2566 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2567 offsetof(typeof(*cmd), sid));
2568 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2569 offsetof(typeof(*cmd), sid));
2570 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2571 offsetof(typeof(*cmd), sid));
2572
2573 cmd = container_of(header, typeof(*cmd), header);
2574 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2575 VMW_RES_DIRTY_NONE, user_surface_converter,
2576 &cmd->sid, NULL);
2577 }
2578
2579 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2580 struct vmw_sw_context *sw_context,
2581 SVGA3dCmdHeader *header)
2582 {
2583 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2584
2585 if (!ctx_node)
2586 return -EINVAL;
2587
2588 return 0;
2589 }
2590
2591 /**
2592 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2593 * resource for removal.
2594 *
2595 * @dev_priv: Pointer to a device private struct.
2596 * @sw_context: The software context being used for this batch.
2597 * @header: Pointer to the command header in the command stream.
2598 *
2599 * Check that the view exists, and if it was not created using this command
2600 * batch, conditionally make this command a NOP.
2601 */
2602 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2603 struct vmw_sw_context *sw_context,
2604 SVGA3dCmdHeader *header)
2605 {
2606 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2607 struct {
2608 SVGA3dCmdHeader header;
2609 union vmw_view_destroy body;
2610 } *cmd = container_of(header, typeof(*cmd), header);
2611 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2612 struct vmw_resource *view;
2613 int ret;
2614
2615 if (!ctx_node)
2616 return -EINVAL;
2617
2618 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2619 &sw_context->staged_cmd_res, &view);
2620 if (ret || !view)
2621 return ret;
2622
2623 /*
2624 * If the view wasn't created during this command batch, it might
2625 * have been removed due to a context swapout, so add a
2626 * relocation to conditionally make this command a NOP to avoid
2627 * device errors.
2628 */
2629 return vmw_resource_relocation_add(sw_context, view,
2630 vmw_ptr_diff(sw_context->buf_start,
2631 &cmd->header.id),
2632 vmw_res_rel_cond_nop);
2633 }
2634
2635 /**
2636 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2637 *
2638 * @dev_priv: Pointer to a device private struct.
2639 * @sw_context: The software context being used for this batch.
2640 * @header: Pointer to the command header in the command stream.
2641 */
2642 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2643 struct vmw_sw_context *sw_context,
2644 SVGA3dCmdHeader *header)
2645 {
2646 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2647 struct vmw_resource *res;
2648 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2649 container_of(header, typeof(*cmd), header);
2650 int ret;
2651
2652 if (!ctx_node)
2653 return -EINVAL;
2654
2655 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2656 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2657 if (ret)
2658 return ret;
2659
2660 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2661 cmd->body.shaderId, cmd->body.type,
2662 &sw_context->staged_cmd_res);
2663 }
2664
2665 /**
2666 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2667 *
2668 * @dev_priv: Pointer to a device private struct.
2669 * @sw_context: The software context being used for this batch.
2670 * @header: Pointer to the command header in the command stream.
2671 */
2672 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2673 struct vmw_sw_context *sw_context,
2674 SVGA3dCmdHeader *header)
2675 {
2676 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2677 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2678 container_of(header, typeof(*cmd), header);
2679 int ret;
2680
2681 if (!ctx_node)
2682 return -EINVAL;
2683
2684 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2685 &sw_context->staged_cmd_res);
2686
2687 return ret;
2688 }
2689
2690 /**
2691 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2692 *
2693 * @dev_priv: Pointer to a device private struct.
2694 * @sw_context: The software context being used for this batch.
2695 * @header: Pointer to the command header in the command stream.
2696 */
2697 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2698 struct vmw_sw_context *sw_context,
2699 SVGA3dCmdHeader *header)
2700 {
2701 struct vmw_resource *ctx;
2702 struct vmw_resource *res;
2703 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2704 container_of(header, typeof(*cmd), header);
2705 int ret;
2706
2707 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2708 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2709 VMW_RES_DIRTY_SET,
2710 user_context_converter, &cmd->body.cid,
2711 &ctx);
2712 if (ret)
2713 return ret;
2714 } else {
2715 struct vmw_ctx_validation_info *ctx_node =
2716 VMW_GET_CTX_NODE(sw_context);
2717
2718 if (!ctx_node)
2719 return -EINVAL;
2720
2721 ctx = ctx_node->ctx;
2722 }
2723
2724 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2725 if (IS_ERR(res)) {
2726 VMW_DEBUG_USER("Could not find shader to bind.\n");
2727 return PTR_ERR(res);
2728 }
2729
2730 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2731 VMW_RES_DIRTY_NONE);
2732 if (ret) {
2733 VMW_DEBUG_USER("Error creating resource validation node.\n");
2734 return ret;
2735 }
2736
2737 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2738 &cmd->body.mobid,
2739 cmd->body.offsetInBytes);
2740 }
2741
2742 /**
2743 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2744 *
2745 * @dev_priv: Pointer to a device private struct.
2746 * @sw_context: The software context being used for this batch.
2747 * @header: Pointer to the command header in the command stream.
2748 */
2749 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2750 struct vmw_sw_context *sw_context,
2751 SVGA3dCmdHeader *header)
2752 {
2753 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2754 container_of(header, typeof(*cmd), header);
2755 struct vmw_resource *ret;
2756
2757 ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
2758 cmd->body.shaderResourceViewId);
2759
2760 return PTR_ERR_OR_ZERO(ret);
2761 }
2762
2763 /**
2764 * vmw_cmd_dx_transfer_from_buffer - Validate
2765 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2766 *
2767 * @dev_priv: Pointer to a device private struct.
2768 * @sw_context: The software context being used for this batch.
2769 * @header: Pointer to the command header in the command stream.
2770 */
2771 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2772 struct vmw_sw_context *sw_context,
2773 SVGA3dCmdHeader *header)
2774 {
2775 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2776 container_of(header, typeof(*cmd), header);
2777 int ret;
2778
2779 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2780 VMW_RES_DIRTY_NONE, user_surface_converter,
2781 &cmd->body.srcSid, NULL);
2782 if (ret != 0)
2783 return ret;
2784
2785 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2786 VMW_RES_DIRTY_SET, user_surface_converter,
2787 &cmd->body.destSid, NULL);
2788 }
2789
2790 /**
2791 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2792 *
2793 * @dev_priv: Pointer to a device private struct.
2794 * @sw_context: The software context being used for this batch.
2795 * @header: Pointer to the command header in the command stream.
2796 */
2797 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2798 struct vmw_sw_context *sw_context,
2799 SVGA3dCmdHeader *header)
2800 {
2801 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2802 container_of(header, typeof(*cmd), header);
2803
2804 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2805 return -EINVAL;
2806
2807 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2808 VMW_RES_DIRTY_SET, user_surface_converter,
2809 &cmd->body.surface.sid, NULL);
2810 }
2811
2812 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2813 struct vmw_sw_context *sw_context,
2814 void *buf, uint32_t *size)
2815 {
2816 uint32_t size_remaining = *size;
2817 uint32_t cmd_id;
2818
2819 cmd_id = ((uint32_t *)buf)[0];
2820 switch (cmd_id) {
2821 case SVGA_CMD_UPDATE:
2822 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
2823 break;
2824 case SVGA_CMD_DEFINE_GMRFB:
2825 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2826 break;
2827 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2828 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2829 break;
2830 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2831 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2832 break;
2833 default:
2834 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
2835 return -EINVAL;
2836 }
2837
2838 if (*size > size_remaining) {
2839 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
2840 cmd_id);
2841 return -EINVAL;
2842 }
2843
2844 if (unlikely(!sw_context->kernel)) {
2845 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
2846 return -EPERM;
2847 }
2848
2849 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
2850 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
2851
2852 return 0;
2853 }
2854
2855 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
2856 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
2857 false, false, false),
2858 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
2859 false, false, false),
2860 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
2861 true, false, false),
2862 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
2863 true, false, false),
2864 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
2865 true, false, false),
2866 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
2867 false, false, false),
2868 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
2869 false, false, false),
2870 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
2871 true, false, false),
2872 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
2873 true, false, false),
2874 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
2875 true, false, false),
2876 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
2877 &vmw_cmd_set_render_target_check, true, false, false),
2878 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
2879 true, false, false),
2880 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
2881 true, false, false),
2882 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
2883 true, false, false),
2884 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
2885 true, false, false),
2886 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
2887 true, false, false),
2888 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
2889 true, false, false),
2890 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
2891 true, false, false),
2892 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
2893 false, false, false),
2894 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
2895 true, false, false),
2896 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
2897 true, false, false),
2898 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
2899 true, false, false),
2900 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
2901 true, false, false),
2902 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
2903 true, false, false),
2904 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
2905 true, false, false),
2906 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
2907 true, false, false),
2908 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
2909 true, false, false),
2910 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
2911 true, false, false),
2912 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
2913 true, false, false),
2914 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
2915 &vmw_cmd_blt_surf_screen_check, false, false, false),
2916 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
2917 false, false, false),
2918 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
2919 false, false, false),
2920 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
2921 false, false, false),
2922 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
2923 false, false, false),
2924 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
2925 false, false, false),
2926 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
2927 false, false, false),
2928 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
2929 false, false, false),
2930 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
2931 false, false, false),
2932 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
2933 false, false, false),
2934 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
2935 false, false, false),
2936 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
2937 false, false, false),
2938 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
2939 false, false, false),
2940 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
2941 false, false, false),
2942 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
2943 false, false, true),
2944 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
2945 false, false, true),
2946 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
2947 false, false, true),
2948 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
2949 false, false, true),
2950 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
2951 false, false, true),
2952 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
2953 false, false, true),
2954 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2955 false, false, true),
2956 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2957 false, false, true),
2958 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2959 true, false, true),
2960 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2961 false, false, true),
2962 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2963 true, false, true),
2964 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
2965 &vmw_cmd_update_gb_surface, true, false, true),
2966 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
2967 &vmw_cmd_readback_gb_image, true, false, true),
2968 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2969 &vmw_cmd_readback_gb_surface, true, false, true),
2970 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2971 &vmw_cmd_invalidate_gb_image, true, false, true),
2972 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2973 &vmw_cmd_invalidate_gb_surface, true, false, true),
2974 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2975 false, false, true),
2976 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2977 false, false, true),
2978 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2979 false, false, true),
2980 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2981 false, false, true),
2982 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2983 false, false, true),
2984 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2985 false, false, true),
2986 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2987 true, false, true),
2988 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2989 false, false, true),
2990 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2991 false, false, false),
2992 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2993 true, false, true),
2994 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2995 true, false, true),
2996 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2997 true, false, true),
2998 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2999 true, false, true),
3000 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3001 true, false, true),
3002 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3003 false, false, true),
3004 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3005 false, false, true),
3006 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3007 false, false, true),
3008 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3009 false, false, true),
3010 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3011 false, false, true),
3012 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3013 false, false, true),
3014 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3015 false, false, true),
3016 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3017 false, false, true),
3018 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3019 false, false, true),
3020 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3021 false, false, true),
3022 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3023 true, false, true),
3024 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3025 false, false, true),
3026 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3027 false, false, true),
3028 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3029 false, false, true),
3030 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3031 false, false, true),
3032
3033 /* SM commands */
3034 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3035 false, false, true),
3036 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3037 false, false, true),
3038 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3039 false, false, true),
3040 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3041 false, false, true),
3042 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3043 false, false, true),
3044 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3045 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3046 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3047 &vmw_cmd_dx_set_shader_res, true, false, true),
3048 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3049 true, false, true),
3050 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3051 true, false, true),
3052 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3053 true, false, true),
3054 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3055 true, false, true),
3056 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3057 true, false, true),
3058 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3059 &vmw_cmd_dx_cid_check, true, false, true),
3060 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3061 true, false, true),
3062 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3063 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3064 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3065 &vmw_cmd_dx_set_index_buffer, true, false, true),
3066 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3067 &vmw_cmd_dx_set_rendertargets, true, false, true),
3068 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3069 true, false, true),
3070 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3071 &vmw_cmd_dx_cid_check, true, false, true),
3072 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3073 &vmw_cmd_dx_cid_check, true, false, true),
3074 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3075 true, false, true),
3076 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3077 true, false, true),
3078 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3079 true, false, true),
3080 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3081 &vmw_cmd_dx_cid_check, true, false, true),
3082 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3083 true, false, true),
3084 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3085 true, false, true),
3086 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3087 true, false, true),
3088 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3089 true, false, true),
3090 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3091 true, false, true),
3092 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3093 true, false, true),
3094 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3095 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3096 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3097 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3098 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3099 true, false, true),
3100 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3101 true, false, true),
3102 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3103 &vmw_cmd_dx_check_subresource, true, false, true),
3104 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3105 &vmw_cmd_dx_check_subresource, true, false, true),
3106 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3107 &vmw_cmd_dx_check_subresource, true, false, true),
3108 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3109 &vmw_cmd_dx_view_define, true, false, true),
3110 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3111 &vmw_cmd_dx_view_remove, true, false, true),
3112 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3113 &vmw_cmd_dx_view_define, true, false, true),
3114 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3115 &vmw_cmd_dx_view_remove, true, false, true),
3116 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3117 &vmw_cmd_dx_view_define, true, false, true),
3118 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3119 &vmw_cmd_dx_view_remove, true, false, true),
3120 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3121 &vmw_cmd_dx_so_define, true, false, true),
3122 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3123 &vmw_cmd_dx_cid_check, true, false, true),
3124 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3125 &vmw_cmd_dx_so_define, true, false, true),
3126 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3127 &vmw_cmd_dx_cid_check, true, false, true),
3128 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3129 &vmw_cmd_dx_so_define, true, false, true),
3130 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3131 &vmw_cmd_dx_cid_check, true, false, true),
3132 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3133 &vmw_cmd_dx_so_define, true, false, true),
3134 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3135 &vmw_cmd_dx_cid_check, true, false, true),
3136 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3137 &vmw_cmd_dx_so_define, true, false, true),
3138 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3139 &vmw_cmd_dx_cid_check, true, false, true),
3140 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3141 &vmw_cmd_dx_define_shader, true, false, true),
3142 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3143 &vmw_cmd_dx_destroy_shader, true, false, true),
3144 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3145 &vmw_cmd_dx_bind_shader, true, false, true),
3146 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3147 &vmw_cmd_dx_so_define, true, false, true),
3148 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3149 &vmw_cmd_dx_cid_check, true, false, true),
3150 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3151 true, false, true),
3152 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3153 &vmw_cmd_dx_set_so_targets, true, false, true),
3154 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3155 &vmw_cmd_dx_cid_check, true, false, true),
3156 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3157 &vmw_cmd_dx_cid_check, true, false, true),
3158 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3159 &vmw_cmd_buffer_copy_check, true, false, true),
3160 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3161 &vmw_cmd_pred_copy_check, true, false, true),
3162 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3163 &vmw_cmd_dx_transfer_from_buffer,
3164 true, false, true),
3165 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3166 true, false, true),
3167 };
3168
3169 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3170 {
3171 u32 cmd_id = ((u32 *) buf)[0];
3172
3173 if (cmd_id >= SVGA_CMD_MAX) {
3174 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3175 const struct vmw_cmd_entry *entry;
3176
3177 *size = header->size + sizeof(SVGA3dCmdHeader);
3178 cmd_id = header->id;
3179 if (cmd_id >= SVGA_3D_CMD_MAX)
3180 return false;
3181
3182 cmd_id -= SVGA_3D_CMD_BASE;
3183 entry = &vmw_cmd_entries[cmd_id];
3184 *cmd = entry->cmd_name;
3185 return true;
3186 }
3187
3188 switch (cmd_id) {
3189 case SVGA_CMD_UPDATE:
3190 *cmd = "SVGA_CMD_UPDATE";
3191 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3192 break;
3193 case SVGA_CMD_DEFINE_GMRFB:
3194 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3195 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3196 break;
3197 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3198 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3199 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3200 break;
3201 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3202 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3203 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3204 break;
3205 default:
3206 *cmd = "UNKNOWN";
3207 *size = 0;
3208 return false;
3209 }
3210
3211 return true;
3212 }
3213
3214 static int vmw_cmd_check(struct vmw_private *dev_priv,
3215 struct vmw_sw_context *sw_context, void *buf,
3216 uint32_t *size)
3217 {
3218 uint32_t cmd_id;
3219 uint32_t size_remaining = *size;
3220 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3221 int ret;
3222 const struct vmw_cmd_entry *entry;
3223 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3224
3225 cmd_id = ((uint32_t *)buf)[0];
3226 /* Handle any none 3D commands */
3227 if (unlikely(cmd_id < SVGA_CMD_MAX))
3228 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3229
3230
3231 cmd_id = header->id;
3232 *size = header->size + sizeof(SVGA3dCmdHeader);
3233
3234 cmd_id -= SVGA_3D_CMD_BASE;
3235 if (unlikely(*size > size_remaining))
3236 goto out_invalid;
3237
3238 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3239 goto out_invalid;
3240
3241 entry = &vmw_cmd_entries[cmd_id];
3242 if (unlikely(!entry->func))
3243 goto out_invalid;
3244
3245 if (unlikely(!entry->user_allow && !sw_context->kernel))
3246 goto out_privileged;
3247
3248 if (unlikely(entry->gb_disable && gb))
3249 goto out_old;
3250
3251 if (unlikely(entry->gb_enable && !gb))
3252 goto out_new;
3253
3254 ret = entry->func(dev_priv, sw_context, header);
3255 if (unlikely(ret != 0)) {
3256 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3257 cmd_id + SVGA_3D_CMD_BASE, ret);
3258 return ret;
3259 }
3260
3261 return 0;
3262 out_invalid:
3263 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3264 cmd_id + SVGA_3D_CMD_BASE);
3265 return -EINVAL;
3266 out_privileged:
3267 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3268 cmd_id + SVGA_3D_CMD_BASE);
3269 return -EPERM;
3270 out_old:
3271 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3272 cmd_id + SVGA_3D_CMD_BASE);
3273 return -EINVAL;
3274 out_new:
3275 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3276 cmd_id + SVGA_3D_CMD_BASE);
3277 return -EINVAL;
3278 }
3279
3280 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3281 struct vmw_sw_context *sw_context, void *buf,
3282 uint32_t size)
3283 {
3284 int32_t cur_size = size;
3285 int ret;
3286
3287 sw_context->buf_start = buf;
3288
3289 while (cur_size > 0) {
3290 size = cur_size;
3291 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3292 if (unlikely(ret != 0))
3293 return ret;
3294 buf = (void *)((unsigned long) buf + size);
3295 cur_size -= size;
3296 }
3297
3298 if (unlikely(cur_size != 0)) {
3299 VMW_DEBUG_USER("Command verifier out of sync.\n");
3300 return -EINVAL;
3301 }
3302
3303 return 0;
3304 }
3305
3306 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3307 {
3308 /* Memory is validation context memory, so no need to free it */
3309 INIT_LIST_HEAD(&sw_context->bo_relocations);
3310 }
3311
3312 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3313 {
3314 struct vmw_relocation *reloc;
3315 struct ttm_buffer_object *bo;
3316
3317 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3318 bo = &reloc->vbo->base;
3319 switch (bo->mem.mem_type) {
3320 case TTM_PL_VRAM:
3321 reloc->location->offset += bo->offset;
3322 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3323 break;
3324 case VMW_PL_GMR:
3325 reloc->location->gmrId = bo->mem.start;
3326 break;
3327 case VMW_PL_MOB:
3328 *reloc->mob_loc = bo->mem.start;
3329 break;
3330 default:
3331 BUG();
3332 }
3333 }
3334 vmw_free_relocations(sw_context);
3335 }
3336
3337 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3338 uint32_t size)
3339 {
3340 if (likely(sw_context->cmd_bounce_size >= size))
3341 return 0;
3342
3343 if (sw_context->cmd_bounce_size == 0)
3344 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3345
3346 while (sw_context->cmd_bounce_size < size) {
3347 sw_context->cmd_bounce_size =
3348 PAGE_ALIGN(sw_context->cmd_bounce_size +
3349 (sw_context->cmd_bounce_size >> 1));
3350 }
3351
3352 vfree(sw_context->cmd_bounce);
3353 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3354
3355 if (sw_context->cmd_bounce == NULL) {
3356 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3357 sw_context->cmd_bounce_size = 0;
3358 return -ENOMEM;
3359 }
3360
3361 return 0;
3362 }
3363
3364 /**
3365 * vmw_execbuf_fence_commands - create and submit a command stream fence
3366 *
3367 * Creates a fence object and submits a command stream marker.
3368 * If this fails for some reason, We sync the fifo and return NULL.
3369 * It is then safe to fence buffers with a NULL pointer.
3370 *
3371 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3372 * userspace handle if @p_handle is not NULL, otherwise not.
3373 */
3374
3375 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3376 struct vmw_private *dev_priv,
3377 struct vmw_fence_obj **p_fence,
3378 uint32_t *p_handle)
3379 {
3380 uint32_t sequence;
3381 int ret;
3382 bool synced = false;
3383
3384 /* p_handle implies file_priv. */
3385 BUG_ON(p_handle != NULL && file_priv == NULL);
3386
3387 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3388 if (unlikely(ret != 0)) {
3389 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3390 synced = true;
3391 }
3392
3393 if (p_handle != NULL)
3394 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3395 sequence, p_fence, p_handle);
3396 else
3397 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3398
3399 if (unlikely(ret != 0 && !synced)) {
3400 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3401 false, VMW_FENCE_WAIT_TIMEOUT);
3402 *p_fence = NULL;
3403 }
3404
3405 return ret;
3406 }
3407
3408 /**
3409 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3410 *
3411 * @dev_priv: Pointer to a vmw_private struct.
3412 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3413 * @ret: Return value from fence object creation.
3414 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3415 * the information should be copied.
3416 * @fence: Pointer to the fenc object.
3417 * @fence_handle: User-space fence handle.
3418 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3419 * @sync_file: Only used to clean up in case of an error in this function.
3420 *
3421 * This function copies fence information to user-space. If copying fails, the
3422 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3423 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3424 * will hopefully be detected.
3425 *
3426 * Also if copying fails, user-space will be unable to signal the fence object
3427 * so we wait for it immediately, and then unreference the user-space reference.
3428 */
3429 void
3430 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3431 struct vmw_fpriv *vmw_fp, int ret,
3432 struct drm_vmw_fence_rep __user *user_fence_rep,
3433 struct vmw_fence_obj *fence, uint32_t fence_handle,
3434 int32_t out_fence_fd, struct sync_file *sync_file)
3435 {
3436 struct drm_vmw_fence_rep fence_rep;
3437
3438 if (user_fence_rep == NULL)
3439 return;
3440
3441 memset(&fence_rep, 0, sizeof(fence_rep));
3442
3443 fence_rep.error = ret;
3444 fence_rep.fd = out_fence_fd;
3445 if (ret == 0) {
3446 BUG_ON(fence == NULL);
3447
3448 fence_rep.handle = fence_handle;
3449 fence_rep.seqno = fence->base.seqno;
3450 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3451 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3452 }
3453
3454 /*
3455 * copy_to_user errors will be detected by user space not seeing
3456 * fence_rep::error filled in. Typically user-space would have pre-set
3457 * that member to -EFAULT.
3458 */
3459 ret = copy_to_user(user_fence_rep, &fence_rep,
3460 sizeof(fence_rep));
3461
3462 /*
3463 * User-space lost the fence object. We need to sync and unreference the
3464 * handle.
3465 */
3466 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3467 if (sync_file)
3468 fput(sync_file->file);
3469
3470 if (fence_rep.fd != -1) {
3471 put_unused_fd(fence_rep.fd);
3472 fence_rep.fd = -1;
3473 }
3474
3475 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3476 TTM_REF_USAGE);
3477 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3478 (void) vmw_fence_obj_wait(fence, false, false,
3479 VMW_FENCE_WAIT_TIMEOUT);
3480 }
3481 }
3482
3483 /**
3484 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3485 *
3486 * @dev_priv: Pointer to a device private structure.
3487 * @kernel_commands: Pointer to the unpatched command batch.
3488 * @command_size: Size of the unpatched command batch.
3489 * @sw_context: Structure holding the relocation lists.
3490 *
3491 * Side effects: If this function returns 0, then the command batch pointed to
3492 * by @kernel_commands will have been modified.
3493 */
3494 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3495 void *kernel_commands, u32 command_size,
3496 struct vmw_sw_context *sw_context)
3497 {
3498 void *cmd;
3499
3500 if (sw_context->dx_ctx_node)
3501 cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
3502 sw_context->dx_ctx_node->ctx->id);
3503 else
3504 cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
3505
3506 if (!cmd)
3507 return -ENOMEM;
3508
3509 vmw_apply_relocations(sw_context);
3510 memcpy(cmd, kernel_commands, command_size);
3511 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3512 vmw_resource_relocations_free(&sw_context->res_relocations);
3513 vmw_fifo_commit(dev_priv, command_size);
3514
3515 return 0;
3516 }
3517
3518 /**
3519 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3520 * command buffer manager.
3521 *
3522 * @dev_priv: Pointer to a device private structure.
3523 * @header: Opaque handle to the command buffer allocation.
3524 * @command_size: Size of the unpatched command batch.
3525 * @sw_context: Structure holding the relocation lists.
3526 *
3527 * Side effects: If this function returns 0, then the command buffer represented
3528 * by @header will have been modified.
3529 */
3530 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3531 struct vmw_cmdbuf_header *header,
3532 u32 command_size,
3533 struct vmw_sw_context *sw_context)
3534 {
3535 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3536 SVGA3D_INVALID_ID);
3537 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3538 header);
3539
3540 vmw_apply_relocations(sw_context);
3541 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3542 vmw_resource_relocations_free(&sw_context->res_relocations);
3543 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3544
3545 return 0;
3546 }
3547
3548 /**
3549 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3550 * submission using a command buffer.
3551 *
3552 * @dev_priv: Pointer to a device private structure.
3553 * @user_commands: User-space pointer to the commands to be submitted.
3554 * @command_size: Size of the unpatched command batch.
3555 * @header: Out parameter returning the opaque pointer to the command buffer.
3556 *
3557 * This function checks whether we can use the command buffer manager for
3558 * submission and if so, creates a command buffer of suitable size and copies
3559 * the user data into that buffer.
3560 *
3561 * On successful return, the function returns a pointer to the data in the
3562 * command buffer and *@header is set to non-NULL.
3563 *
3564 * If command buffers could not be used, the function will return the value of
3565 * @kernel_commands on function call. That value may be NULL. In that case, the
3566 * value of *@header will be set to NULL.
3567 *
3568 * If an error is encountered, the function will return a pointer error value.
3569 * If the function is interrupted by a signal while sleeping, it will return
3570 * -ERESTARTSYS casted to a pointer error value.
3571 */
3572 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3573 void __user *user_commands,
3574 void *kernel_commands, u32 command_size,
3575 struct vmw_cmdbuf_header **header)
3576 {
3577 size_t cmdbuf_size;
3578 int ret;
3579
3580 *header = NULL;
3581 if (command_size > SVGA_CB_MAX_SIZE) {
3582 VMW_DEBUG_USER("Command buffer is too large.\n");
3583 return ERR_PTR(-EINVAL);
3584 }
3585
3586 if (!dev_priv->cman || kernel_commands)
3587 return kernel_commands;
3588
3589 /* If possible, add a little space for fencing. */
3590 cmdbuf_size = command_size + 512;
3591 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3592 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3593 header);
3594 if (IS_ERR(kernel_commands))
3595 return kernel_commands;
3596
3597 ret = copy_from_user(kernel_commands, user_commands, command_size);
3598 if (ret) {
3599 VMW_DEBUG_USER("Failed copying commands.\n");
3600 vmw_cmdbuf_header_free(*header);
3601 *header = NULL;
3602 return ERR_PTR(-EFAULT);
3603 }
3604
3605 return kernel_commands;
3606 }
3607
3608 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3609 struct vmw_sw_context *sw_context,
3610 uint32_t handle)
3611 {
3612 struct vmw_resource *res;
3613 int ret;
3614 unsigned int size;
3615
3616 if (handle == SVGA3D_INVALID_ID)
3617 return 0;
3618
3619 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3620 ret = vmw_validation_preload_res(sw_context->ctx, size);
3621 if (ret)
3622 return ret;
3623
3624 res = vmw_user_resource_noref_lookup_handle
3625 (dev_priv, sw_context->fp->tfile, handle,
3626 user_context_converter);
3627 if (IS_ERR(res)) {
3628 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
3629 (unsigned int) handle);
3630 return PTR_ERR(res);
3631 }
3632
3633 ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
3634 if (unlikely(ret != 0))
3635 return ret;
3636
3637 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
3638 sw_context->man = vmw_context_res_man(res);
3639
3640 return 0;
3641 }
3642
3643 int vmw_execbuf_process(struct drm_file *file_priv,
3644 struct vmw_private *dev_priv,
3645 void __user *user_commands, void *kernel_commands,
3646 uint32_t command_size, uint64_t throttle_us,
3647 uint32_t dx_context_handle,
3648 struct drm_vmw_fence_rep __user *user_fence_rep,
3649 struct vmw_fence_obj **out_fence, uint32_t flags)
3650 {
3651 struct vmw_sw_context *sw_context = &dev_priv->ctx;
3652 struct vmw_fence_obj *fence = NULL;
3653 struct vmw_cmdbuf_header *header;
3654 uint32_t handle = 0;
3655 int ret;
3656 int32_t out_fence_fd = -1;
3657 struct sync_file *sync_file = NULL;
3658 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
3659
3660 vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
3661
3662 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3663 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3664 if (out_fence_fd < 0) {
3665 VMW_DEBUG_USER("Failed to get a fence fd.\n");
3666 return out_fence_fd;
3667 }
3668 }
3669
3670 if (throttle_us) {
3671 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3672 throttle_us);
3673
3674 if (ret)
3675 goto out_free_fence_fd;
3676 }
3677
3678 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3679 kernel_commands, command_size,
3680 &header);
3681 if (IS_ERR(kernel_commands)) {
3682 ret = PTR_ERR(kernel_commands);
3683 goto out_free_fence_fd;
3684 }
3685
3686 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3687 if (ret) {
3688 ret = -ERESTARTSYS;
3689 goto out_free_header;
3690 }
3691
3692 sw_context->kernel = false;
3693 if (kernel_commands == NULL) {
3694 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3695 if (unlikely(ret != 0))
3696 goto out_unlock;
3697
3698 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
3699 command_size);
3700 if (unlikely(ret != 0)) {
3701 ret = -EFAULT;
3702 VMW_DEBUG_USER("Failed copying commands.\n");
3703 goto out_unlock;
3704 }
3705
3706 kernel_commands = sw_context->cmd_bounce;
3707 } else if (!header) {
3708 sw_context->kernel = true;
3709 }
3710
3711 sw_context->fp = vmw_fpriv(file_priv);
3712 INIT_LIST_HEAD(&sw_context->ctx_list);
3713 sw_context->cur_query_bo = dev_priv->pinned_bo;
3714 sw_context->last_query_ctx = NULL;
3715 sw_context->needs_post_query_barrier = false;
3716 sw_context->dx_ctx_node = NULL;
3717 sw_context->dx_query_mob = NULL;
3718 sw_context->dx_query_ctx = NULL;
3719 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3720 INIT_LIST_HEAD(&sw_context->res_relocations);
3721 INIT_LIST_HEAD(&sw_context->bo_relocations);
3722
3723 if (sw_context->staged_bindings)
3724 vmw_binding_state_reset(sw_context->staged_bindings);
3725
3726 if (!sw_context->res_ht_initialized) {
3727 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3728 if (unlikely(ret != 0))
3729 goto out_unlock;
3730
3731 sw_context->res_ht_initialized = true;
3732 }
3733
3734 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3735 sw_context->ctx = &val_ctx;
3736 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3737 if (unlikely(ret != 0))
3738 goto out_err_nores;
3739
3740 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3741 command_size);
3742 if (unlikely(ret != 0))
3743 goto out_err_nores;
3744
3745 ret = vmw_resources_reserve(sw_context);
3746 if (unlikely(ret != 0))
3747 goto out_err_nores;
3748
3749 ret = vmw_validation_bo_reserve(&val_ctx, true);
3750 if (unlikely(ret != 0))
3751 goto out_err_nores;
3752
3753 ret = vmw_validation_bo_validate(&val_ctx, true);
3754 if (unlikely(ret != 0))
3755 goto out_err;
3756
3757 ret = vmw_validation_res_validate(&val_ctx, true);
3758 if (unlikely(ret != 0))
3759 goto out_err;
3760
3761 vmw_validation_drop_ht(&val_ctx);
3762
3763 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3764 if (unlikely(ret != 0)) {
3765 ret = -ERESTARTSYS;
3766 goto out_err;
3767 }
3768
3769 if (dev_priv->has_mob) {
3770 ret = vmw_rebind_contexts(sw_context);
3771 if (unlikely(ret != 0))
3772 goto out_unlock_binding;
3773 }
3774
3775 if (!header) {
3776 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3777 command_size, sw_context);
3778 } else {
3779 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3780 sw_context);
3781 header = NULL;
3782 }
3783 mutex_unlock(&dev_priv->binding_mutex);
3784 if (ret)
3785 goto out_err;
3786
3787 vmw_query_bo_switch_commit(dev_priv, sw_context);
3788 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
3789 (user_fence_rep) ? &handle : NULL);
3790 /*
3791 * This error is harmless, because if fence submission fails,
3792 * vmw_fifo_send_fence will sync. The error will be propagated to
3793 * user-space in @fence_rep
3794 */
3795 if (ret != 0)
3796 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3797
3798 vmw_execbuf_bindings_commit(sw_context, false);
3799 vmw_bind_dx_query_mob(sw_context);
3800 vmw_validation_res_unreserve(&val_ctx, false);
3801
3802 vmw_validation_bo_fence(sw_context->ctx, fence);
3803
3804 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
3805 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
3806
3807 /*
3808 * If anything fails here, give up trying to export the fence and do a
3809 * sync since the user mode will not be able to sync the fence itself.
3810 * This ensures we are still functionally correct.
3811 */
3812 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3813
3814 sync_file = sync_file_create(&fence->base);
3815 if (!sync_file) {
3816 VMW_DEBUG_USER("Sync file create failed for fence\n");
3817 put_unused_fd(out_fence_fd);
3818 out_fence_fd = -1;
3819
3820 (void) vmw_fence_obj_wait(fence, false, false,
3821 VMW_FENCE_WAIT_TIMEOUT);
3822 } else {
3823 /* Link the fence with the FD created earlier */
3824 fd_install(out_fence_fd, sync_file->file);
3825 }
3826 }
3827
3828 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
3829 user_fence_rep, fence, handle, out_fence_fd,
3830 sync_file);
3831
3832 /* Don't unreference when handing fence out */
3833 if (unlikely(out_fence != NULL)) {
3834 *out_fence = fence;
3835 fence = NULL;
3836 } else if (likely(fence != NULL)) {
3837 vmw_fence_obj_unreference(&fence);
3838 }
3839
3840 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
3841 mutex_unlock(&dev_priv->cmdbuf_mutex);
3842
3843 /*
3844 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3845 * in resource destruction paths.
3846 */
3847 vmw_validation_unref_lists(&val_ctx);
3848
3849 return 0;
3850
3851 out_unlock_binding:
3852 mutex_unlock(&dev_priv->binding_mutex);
3853 out_err:
3854 vmw_validation_bo_backoff(&val_ctx);
3855 out_err_nores:
3856 vmw_execbuf_bindings_commit(sw_context, true);
3857 vmw_validation_res_unreserve(&val_ctx, true);
3858 vmw_resource_relocations_free(&sw_context->res_relocations);
3859 vmw_free_relocations(sw_context);
3860 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
3861 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
3862 out_unlock:
3863 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
3864 vmw_validation_drop_ht(&val_ctx);
3865 WARN_ON(!list_empty(&sw_context->ctx_list));
3866 mutex_unlock(&dev_priv->cmdbuf_mutex);
3867
3868 /*
3869 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3870 * in resource destruction paths.
3871 */
3872 vmw_validation_unref_lists(&val_ctx);
3873 out_free_header:
3874 if (header)
3875 vmw_cmdbuf_header_free(header);
3876 out_free_fence_fd:
3877 if (out_fence_fd >= 0)
3878 put_unused_fd(out_fence_fd);
3879
3880 return ret;
3881 }
3882
3883 /**
3884 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
3885 *
3886 * @dev_priv: The device private structure.
3887 *
3888 * This function is called to idle the fifo and unpin the query buffer if the
3889 * normal way to do this hits an error, which should typically be extremely
3890 * rare.
3891 */
3892 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
3893 {
3894 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
3895
3896 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
3897 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3898 if (dev_priv->dummy_query_bo_pinned) {
3899 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3900 dev_priv->dummy_query_bo_pinned = false;
3901 }
3902 }
3903
3904
3905 /**
3906 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
3907 * bo.
3908 *
3909 * @dev_priv: The device private structure.
3910 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
3911 * query barrier that flushes all queries touching the current buffer pointed to
3912 * by @dev_priv->pinned_bo
3913 *
3914 * This function should be used to unpin the pinned query bo, or as a query
3915 * barrier when we need to make sure that all queries have finished before the
3916 * next fifo command. (For example on hardware context destructions where the
3917 * hardware may otherwise leak unfinished queries).
3918 *
3919 * This function does not return any failure codes, but make attempts to do safe
3920 * unpinning in case of errors.
3921 *
3922 * The function will synchronize on the previous query barrier, and will thus
3923 * not finish until that barrier has executed.
3924 *
3925 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
3926 * calling this function.
3927 */
3928 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
3929 struct vmw_fence_obj *fence)
3930 {
3931 int ret = 0;
3932 struct vmw_fence_obj *lfence = NULL;
3933 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
3934
3935 if (dev_priv->pinned_bo == NULL)
3936 goto out_unlock;
3937
3938 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
3939 false);
3940 if (ret)
3941 goto out_no_reserve;
3942
3943 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
3944 false);
3945 if (ret)
3946 goto out_no_reserve;
3947
3948 ret = vmw_validation_bo_reserve(&val_ctx, false);
3949 if (ret)
3950 goto out_no_reserve;
3951
3952 if (dev_priv->query_cid_valid) {
3953 BUG_ON(fence != NULL);
3954 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
3955 if (ret)
3956 goto out_no_emit;
3957 dev_priv->query_cid_valid = false;
3958 }
3959
3960 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3961 if (dev_priv->dummy_query_bo_pinned) {
3962 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3963 dev_priv->dummy_query_bo_pinned = false;
3964 }
3965 if (fence == NULL) {
3966 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
3967 NULL);
3968 fence = lfence;
3969 }
3970 vmw_validation_bo_fence(&val_ctx, fence);
3971 if (lfence != NULL)
3972 vmw_fence_obj_unreference(&lfence);
3973
3974 vmw_validation_unref_lists(&val_ctx);
3975 vmw_bo_unreference(&dev_priv->pinned_bo);
3976
3977 out_unlock:
3978 return;
3979 out_no_emit:
3980 vmw_validation_bo_backoff(&val_ctx);
3981 out_no_reserve:
3982 vmw_validation_unref_lists(&val_ctx);
3983 vmw_execbuf_unpin_panic(dev_priv);
3984 vmw_bo_unreference(&dev_priv->pinned_bo);
3985 }
3986
3987 /**
3988 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
3989 *
3990 * @dev_priv: The device private structure.
3991 *
3992 * This function should be used to unpin the pinned query bo, or as a query
3993 * barrier when we need to make sure that all queries have finished before the
3994 * next fifo command. (For example on hardware context destructions where the
3995 * hardware may otherwise leak unfinished queries).
3996 *
3997 * This function does not return any failure codes, but make attempts to do safe
3998 * unpinning in case of errors.
3999 *
4000 * The function will synchronize on the previous query barrier, and will thus
4001 * not finish until that barrier has executed.
4002 */
4003 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4004 {
4005 mutex_lock(&dev_priv->cmdbuf_mutex);
4006 if (dev_priv->query_cid_valid)
4007 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4008 mutex_unlock(&dev_priv->cmdbuf_mutex);
4009 }
4010
4011 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4012 struct drm_file *file_priv)
4013 {
4014 struct vmw_private *dev_priv = vmw_priv(dev);
4015 struct drm_vmw_execbuf_arg *arg = data;
4016 int ret;
4017 struct dma_fence *in_fence = NULL;
4018
4019 /*
4020 * Extend the ioctl argument while maintaining backwards compatibility:
4021 * We take different code paths depending on the value of arg->version.
4022 *
4023 * Note: The ioctl argument is extended and zeropadded by core DRM.
4024 */
4025 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4026 arg->version == 0)) {
4027 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4028 return -EINVAL;
4029 }
4030
4031 switch (arg->version) {
4032 case 1:
4033 /* For v1 core DRM have extended + zeropadded the data */
4034 arg->context_handle = (uint32_t) -1;
4035 break;
4036 case 2:
4037 default:
4038 /* For v2 and later core DRM would have correctly copied it */
4039 break;
4040 }
4041
4042 /* If imported a fence FD from elsewhere, then wait on it */
4043 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4044 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4045
4046 if (!in_fence) {
4047 VMW_DEBUG_USER("Cannot get imported fence\n");
4048 return -EINVAL;
4049 }
4050
4051 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4052 if (ret)
4053 goto out;
4054 }
4055
4056 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4057 if (unlikely(ret != 0))
4058 return ret;
4059
4060 ret = vmw_execbuf_process(file_priv, dev_priv,
4061 (void __user *)(unsigned long)arg->commands,
4062 NULL, arg->command_size, arg->throttle_us,
4063 arg->context_handle,
4064 (void __user *)(unsigned long)arg->fence_rep,
4065 NULL, arg->flags);
4066
4067 ttm_read_unlock(&dev_priv->reservation_sem);
4068 if (unlikely(ret != 0))
4069 goto out;
4070
4071 vmw_kms_cursor_post_execbuf(dev_priv);
4072
4073 out:
4074 if (in_fence)
4075 dma_fence_put(in_fence);
4076 return ret;
4077 }
4078