vmwgfx_context.c revision 1.1.1.2.28.1 1 /* $NetBSD: vmwgfx_context.c,v 1.1.1.2.28.1 2018/09/06 06:56:34 pgoyette Exp $ */
2
3 /**************************************************************************
4 *
5 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_context.c,v 1.1.1.2.28.1 2018/09/06 06:56:34 pgoyette Exp $");
32
33 #include "vmwgfx_drv.h"
34 #include "vmwgfx_resource_priv.h"
35 #include "vmwgfx_binding.h"
36 #include "ttm/ttm_placement.h"
37
38 struct vmw_user_context {
39 struct ttm_base_object base;
40 struct vmw_resource res;
41 struct vmw_ctx_binding_state *cbs;
42 struct vmw_cmdbuf_res_manager *man;
43 struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
44 spinlock_t cotable_lock;
45 struct vmw_dma_buffer *dx_query_mob;
46 };
47
48 static void vmw_user_context_free(struct vmw_resource *res);
49 static struct vmw_resource *
50 vmw_user_context_base_to_res(struct ttm_base_object *base);
51
52 static int vmw_gb_context_create(struct vmw_resource *res);
53 static int vmw_gb_context_bind(struct vmw_resource *res,
54 struct ttm_validate_buffer *val_buf);
55 static int vmw_gb_context_unbind(struct vmw_resource *res,
56 bool readback,
57 struct ttm_validate_buffer *val_buf);
58 static int vmw_gb_context_destroy(struct vmw_resource *res);
59 static int vmw_dx_context_create(struct vmw_resource *res);
60 static int vmw_dx_context_bind(struct vmw_resource *res,
61 struct ttm_validate_buffer *val_buf);
62 static int vmw_dx_context_unbind(struct vmw_resource *res,
63 bool readback,
64 struct ttm_validate_buffer *val_buf);
65 static int vmw_dx_context_destroy(struct vmw_resource *res);
66
67 static uint64_t vmw_user_context_size;
68
69 static const struct vmw_user_resource_conv user_context_conv = {
70 .object_type = VMW_RES_CONTEXT,
71 .base_obj_to_res = vmw_user_context_base_to_res,
72 .res_free = vmw_user_context_free
73 };
74
75 const struct vmw_user_resource_conv *user_context_converter =
76 &user_context_conv;
77
78
79 static const struct vmw_res_func vmw_legacy_context_func = {
80 .res_type = vmw_res_context,
81 .needs_backup = false,
82 .may_evict = false,
83 .type_name = "legacy contexts",
84 .backup_placement = NULL,
85 .create = NULL,
86 .destroy = NULL,
87 .bind = NULL,
88 .unbind = NULL
89 };
90
91 static const struct vmw_res_func vmw_gb_context_func = {
92 .res_type = vmw_res_context,
93 .needs_backup = true,
94 .may_evict = true,
95 .type_name = "guest backed contexts",
96 .backup_placement = &vmw_mob_placement,
97 .create = vmw_gb_context_create,
98 .destroy = vmw_gb_context_destroy,
99 .bind = vmw_gb_context_bind,
100 .unbind = vmw_gb_context_unbind
101 };
102
103 static const struct vmw_res_func vmw_dx_context_func = {
104 .res_type = vmw_res_dx_context,
105 .needs_backup = true,
106 .may_evict = true,
107 .type_name = "dx contexts",
108 .backup_placement = &vmw_mob_placement,
109 .create = vmw_dx_context_create,
110 .destroy = vmw_dx_context_destroy,
111 .bind = vmw_dx_context_bind,
112 .unbind = vmw_dx_context_unbind
113 };
114
115 /**
116 * Context management:
117 */
118
119 static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
120 {
121 struct vmw_resource *res;
122 int i;
123
124 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
125 spin_lock(&uctx->cotable_lock);
126 res = uctx->cotables[i];
127 uctx->cotables[i] = NULL;
128 spin_unlock(&uctx->cotable_lock);
129
130 if (res)
131 vmw_resource_unreference(&res);
132 }
133 }
134
135 static void vmw_hw_context_destroy(struct vmw_resource *res)
136 {
137 struct vmw_user_context *uctx =
138 container_of(res, struct vmw_user_context, res);
139 struct vmw_private *dev_priv = res->dev_priv;
140 struct {
141 SVGA3dCmdHeader header;
142 SVGA3dCmdDestroyContext body;
143 } *cmd;
144
145
146 if (res->func->destroy == vmw_gb_context_destroy ||
147 res->func->destroy == vmw_dx_context_destroy) {
148 mutex_lock(&dev_priv->cmdbuf_mutex);
149 vmw_cmdbuf_res_man_destroy(uctx->man);
150 mutex_lock(&dev_priv->binding_mutex);
151 vmw_binding_state_kill(uctx->cbs);
152 (void) res->func->destroy(res);
153 mutex_unlock(&dev_priv->binding_mutex);
154 if (dev_priv->pinned_bo != NULL &&
155 !dev_priv->query_cid_valid)
156 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
157 mutex_unlock(&dev_priv->cmdbuf_mutex);
158 vmw_context_cotables_unref(uctx);
159 return;
160 }
161
162 vmw_execbuf_release_pinned_bo(dev_priv);
163 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
164 if (unlikely(cmd == NULL)) {
165 DRM_ERROR("Failed reserving FIFO space for surface "
166 "destruction.\n");
167 return;
168 }
169
170 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
171 cmd->header.size = sizeof(cmd->body);
172 cmd->body.cid = res->id;
173
174 vmw_fifo_commit(dev_priv, sizeof(*cmd));
175 vmw_fifo_resource_dec(dev_priv);
176 }
177
178 static int vmw_gb_context_init(struct vmw_private *dev_priv,
179 bool dx,
180 struct vmw_resource *res,
181 void (*res_free)(struct vmw_resource *res))
182 {
183 int ret, i;
184 struct vmw_user_context *uctx =
185 container_of(res, struct vmw_user_context, res);
186
187 res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
188 SVGA3D_CONTEXT_DATA_SIZE);
189 ret = vmw_resource_init(dev_priv, res, true,
190 res_free,
191 dx ? &vmw_dx_context_func :
192 &vmw_gb_context_func);
193 if (unlikely(ret != 0))
194 goto out_err;
195
196 if (dev_priv->has_mob) {
197 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
198 if (IS_ERR(uctx->man)) {
199 ret = PTR_ERR(uctx->man);
200 uctx->man = NULL;
201 goto out_err;
202 }
203 }
204
205 uctx->cbs = vmw_binding_state_alloc(dev_priv);
206 if (IS_ERR(uctx->cbs)) {
207 ret = PTR_ERR(uctx->cbs);
208 goto out_err;
209 }
210
211 spin_lock_init(&uctx->cotable_lock);
212
213 if (dx) {
214 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
215 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
216 &uctx->res, i);
217 if (unlikely(uctx->cotables[i] == NULL)) {
218 ret = -ENOMEM;
219 goto out_cotables;
220 }
221 }
222 }
223
224
225
226 vmw_resource_activate(res, vmw_hw_context_destroy);
227 return 0;
228
229 out_cotables:
230 vmw_context_cotables_unref(uctx);
231 out_err:
232 if (res_free)
233 res_free(res);
234 else
235 kfree(res);
236 return ret;
237 }
238
239 static int vmw_context_init(struct vmw_private *dev_priv,
240 struct vmw_resource *res,
241 void (*res_free)(struct vmw_resource *res),
242 bool dx)
243 {
244 int ret;
245
246 struct {
247 SVGA3dCmdHeader header;
248 SVGA3dCmdDefineContext body;
249 } *cmd;
250
251 if (dev_priv->has_mob)
252 return vmw_gb_context_init(dev_priv, dx, res, res_free);
253
254 ret = vmw_resource_init(dev_priv, res, false,
255 res_free, &vmw_legacy_context_func);
256
257 if (unlikely(ret != 0)) {
258 DRM_ERROR("Failed to allocate a resource id.\n");
259 goto out_early;
260 }
261
262 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
263 DRM_ERROR("Out of hw context ids.\n");
264 vmw_resource_unreference(&res);
265 return -ENOMEM;
266 }
267
268 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
269 if (unlikely(cmd == NULL)) {
270 DRM_ERROR("Fifo reserve failed.\n");
271 vmw_resource_unreference(&res);
272 return -ENOMEM;
273 }
274
275 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
276 cmd->header.size = sizeof(cmd->body);
277 cmd->body.cid = res->id;
278
279 vmw_fifo_commit(dev_priv, sizeof(*cmd));
280 vmw_fifo_resource_inc(dev_priv);
281 vmw_resource_activate(res, vmw_hw_context_destroy);
282 return 0;
283
284 out_early:
285 if (res_free == NULL)
286 kfree(res);
287 else
288 res_free(res);
289 return ret;
290 }
291
292
293 /*
294 * GB context.
295 */
296
297 static int vmw_gb_context_create(struct vmw_resource *res)
298 {
299 struct vmw_private *dev_priv = res->dev_priv;
300 int ret;
301 struct {
302 SVGA3dCmdHeader header;
303 SVGA3dCmdDefineGBContext body;
304 } *cmd;
305
306 if (likely(res->id != -1))
307 return 0;
308
309 ret = vmw_resource_alloc_id(res);
310 if (unlikely(ret != 0)) {
311 DRM_ERROR("Failed to allocate a context id.\n");
312 goto out_no_id;
313 }
314
315 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
316 ret = -EBUSY;
317 goto out_no_fifo;
318 }
319
320 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
321 if (unlikely(cmd == NULL)) {
322 DRM_ERROR("Failed reserving FIFO space for context "
323 "creation.\n");
324 ret = -ENOMEM;
325 goto out_no_fifo;
326 }
327
328 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
329 cmd->header.size = sizeof(cmd->body);
330 cmd->body.cid = res->id;
331 vmw_fifo_commit(dev_priv, sizeof(*cmd));
332 vmw_fifo_resource_inc(dev_priv);
333
334 return 0;
335
336 out_no_fifo:
337 vmw_resource_release_id(res);
338 out_no_id:
339 return ret;
340 }
341
342 static int vmw_gb_context_bind(struct vmw_resource *res,
343 struct ttm_validate_buffer *val_buf)
344 {
345 struct vmw_private *dev_priv = res->dev_priv;
346 struct {
347 SVGA3dCmdHeader header;
348 SVGA3dCmdBindGBContext body;
349 } *cmd;
350 struct ttm_buffer_object *bo = val_buf->bo;
351
352 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
353
354 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
355 if (unlikely(cmd == NULL)) {
356 DRM_ERROR("Failed reserving FIFO space for context "
357 "binding.\n");
358 return -ENOMEM;
359 }
360 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
361 cmd->header.size = sizeof(cmd->body);
362 cmd->body.cid = res->id;
363 cmd->body.mobid = bo->mem.start;
364 cmd->body.validContents = res->backup_dirty;
365 res->backup_dirty = false;
366 vmw_fifo_commit(dev_priv, sizeof(*cmd));
367
368 return 0;
369 }
370
371 static int vmw_gb_context_unbind(struct vmw_resource *res,
372 bool readback,
373 struct ttm_validate_buffer *val_buf)
374 {
375 struct vmw_private *dev_priv = res->dev_priv;
376 struct ttm_buffer_object *bo = val_buf->bo;
377 struct vmw_fence_obj *fence;
378 struct vmw_user_context *uctx =
379 container_of(res, struct vmw_user_context, res);
380
381 struct {
382 SVGA3dCmdHeader header;
383 SVGA3dCmdReadbackGBContext body;
384 } *cmd1;
385 struct {
386 SVGA3dCmdHeader header;
387 SVGA3dCmdBindGBContext body;
388 } *cmd2;
389 uint32_t submit_size;
390 uint8_t *cmd;
391
392
393 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
394
395 mutex_lock(&dev_priv->binding_mutex);
396 vmw_binding_state_scrub(uctx->cbs);
397
398 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
399
400 cmd = vmw_fifo_reserve(dev_priv, submit_size);
401 if (unlikely(cmd == NULL)) {
402 DRM_ERROR("Failed reserving FIFO space for context "
403 "unbinding.\n");
404 mutex_unlock(&dev_priv->binding_mutex);
405 return -ENOMEM;
406 }
407
408 cmd2 = (void *) cmd;
409 if (readback) {
410 cmd1 = (void *) cmd;
411 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
412 cmd1->header.size = sizeof(cmd1->body);
413 cmd1->body.cid = res->id;
414 cmd2 = (void *) (&cmd1[1]);
415 }
416 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
417 cmd2->header.size = sizeof(cmd2->body);
418 cmd2->body.cid = res->id;
419 cmd2->body.mobid = SVGA3D_INVALID_ID;
420
421 vmw_fifo_commit(dev_priv, submit_size);
422 mutex_unlock(&dev_priv->binding_mutex);
423
424 /*
425 * Create a fence object and fence the backup buffer.
426 */
427
428 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
429 &fence, NULL);
430
431 vmw_fence_single_bo(bo, fence);
432
433 if (likely(fence != NULL))
434 vmw_fence_obj_unreference(&fence);
435
436 return 0;
437 }
438
439 static int vmw_gb_context_destroy(struct vmw_resource *res)
440 {
441 struct vmw_private *dev_priv = res->dev_priv;
442 struct {
443 SVGA3dCmdHeader header;
444 SVGA3dCmdDestroyGBContext body;
445 } *cmd;
446
447 if (likely(res->id == -1))
448 return 0;
449
450 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
451 if (unlikely(cmd == NULL)) {
452 DRM_ERROR("Failed reserving FIFO space for context "
453 "destruction.\n");
454 return -ENOMEM;
455 }
456
457 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
458 cmd->header.size = sizeof(cmd->body);
459 cmd->body.cid = res->id;
460 vmw_fifo_commit(dev_priv, sizeof(*cmd));
461 if (dev_priv->query_cid == res->id)
462 dev_priv->query_cid_valid = false;
463 vmw_resource_release_id(res);
464 vmw_fifo_resource_dec(dev_priv);
465
466 return 0;
467 }
468
469 /*
470 * DX context.
471 */
472
473 static int vmw_dx_context_create(struct vmw_resource *res)
474 {
475 struct vmw_private *dev_priv = res->dev_priv;
476 int ret;
477 struct {
478 SVGA3dCmdHeader header;
479 SVGA3dCmdDXDefineContext body;
480 } *cmd;
481
482 if (likely(res->id != -1))
483 return 0;
484
485 ret = vmw_resource_alloc_id(res);
486 if (unlikely(ret != 0)) {
487 DRM_ERROR("Failed to allocate a context id.\n");
488 goto out_no_id;
489 }
490
491 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
492 ret = -EBUSY;
493 goto out_no_fifo;
494 }
495
496 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
497 if (unlikely(cmd == NULL)) {
498 DRM_ERROR("Failed reserving FIFO space for context "
499 "creation.\n");
500 ret = -ENOMEM;
501 goto out_no_fifo;
502 }
503
504 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
505 cmd->header.size = sizeof(cmd->body);
506 cmd->body.cid = res->id;
507 vmw_fifo_commit(dev_priv, sizeof(*cmd));
508 vmw_fifo_resource_inc(dev_priv);
509
510 return 0;
511
512 out_no_fifo:
513 vmw_resource_release_id(res);
514 out_no_id:
515 return ret;
516 }
517
518 static int vmw_dx_context_bind(struct vmw_resource *res,
519 struct ttm_validate_buffer *val_buf)
520 {
521 struct vmw_private *dev_priv = res->dev_priv;
522 struct {
523 SVGA3dCmdHeader header;
524 SVGA3dCmdDXBindContext body;
525 } *cmd;
526 struct ttm_buffer_object *bo = val_buf->bo;
527
528 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
529
530 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
531 if (unlikely(cmd == NULL)) {
532 DRM_ERROR("Failed reserving FIFO space for context "
533 "binding.\n");
534 return -ENOMEM;
535 }
536
537 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
538 cmd->header.size = sizeof(cmd->body);
539 cmd->body.cid = res->id;
540 cmd->body.mobid = bo->mem.start;
541 cmd->body.validContents = res->backup_dirty;
542 res->backup_dirty = false;
543 vmw_fifo_commit(dev_priv, sizeof(*cmd));
544
545
546 return 0;
547 }
548
549 /**
550 * vmw_dx_context_scrub_cotables - Scrub all bindings and
551 * cotables from a context
552 *
553 * @ctx: Pointer to the context resource
554 * @readback: Whether to save the otable contents on scrubbing.
555 *
556 * COtables must be unbound before their context, but unbinding requires
557 * the backup buffer being reserved, whereas scrubbing does not.
558 * This function scrubs all cotables of a context, potentially reading back
559 * the contents into their backup buffers. However, scrubbing cotables
560 * also makes the device context invalid, so scrub all bindings first so
561 * that doesn't have to be done later with an invalid context.
562 */
563 void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
564 bool readback)
565 {
566 struct vmw_user_context *uctx =
567 container_of(ctx, struct vmw_user_context, res);
568 int i;
569
570 vmw_binding_state_scrub(uctx->cbs);
571 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
572 struct vmw_resource *res;
573
574 /* Avoid racing with ongoing cotable destruction. */
575 spin_lock(&uctx->cotable_lock);
576 res = uctx->cotables[vmw_cotable_scrub_order[i]];
577 if (res)
578 res = vmw_resource_reference_unless_doomed(res);
579 spin_unlock(&uctx->cotable_lock);
580 if (!res)
581 continue;
582
583 WARN_ON(vmw_cotable_scrub(res, readback));
584 vmw_resource_unreference(&res);
585 }
586 }
587
588 static int vmw_dx_context_unbind(struct vmw_resource *res,
589 bool readback,
590 struct ttm_validate_buffer *val_buf)
591 {
592 struct vmw_private *dev_priv = res->dev_priv;
593 struct ttm_buffer_object *bo = val_buf->bo;
594 struct vmw_fence_obj *fence;
595 struct vmw_user_context *uctx =
596 container_of(res, struct vmw_user_context, res);
597
598 struct {
599 SVGA3dCmdHeader header;
600 SVGA3dCmdDXReadbackContext body;
601 } *cmd1;
602 struct {
603 SVGA3dCmdHeader header;
604 SVGA3dCmdDXBindContext body;
605 } *cmd2;
606 uint32_t submit_size;
607 uint8_t *cmd;
608
609
610 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
611
612 mutex_lock(&dev_priv->binding_mutex);
613 vmw_dx_context_scrub_cotables(res, readback);
614
615 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
616 readback) {
617 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
618 if (vmw_query_readback_all(uctx->dx_query_mob))
619 DRM_ERROR("Failed to read back query states\n");
620 }
621
622 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
623
624 cmd = vmw_fifo_reserve(dev_priv, submit_size);
625 if (unlikely(cmd == NULL)) {
626 DRM_ERROR("Failed reserving FIFO space for context "
627 "unbinding.\n");
628 mutex_unlock(&dev_priv->binding_mutex);
629 return -ENOMEM;
630 }
631
632 cmd2 = (void *) cmd;
633 if (readback) {
634 cmd1 = (void *) cmd;
635 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
636 cmd1->header.size = sizeof(cmd1->body);
637 cmd1->body.cid = res->id;
638 cmd2 = (void *) (&cmd1[1]);
639 }
640 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
641 cmd2->header.size = sizeof(cmd2->body);
642 cmd2->body.cid = res->id;
643 cmd2->body.mobid = SVGA3D_INVALID_ID;
644
645 vmw_fifo_commit(dev_priv, submit_size);
646 mutex_unlock(&dev_priv->binding_mutex);
647
648 /*
649 * Create a fence object and fence the backup buffer.
650 */
651
652 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
653 &fence, NULL);
654
655 vmw_fence_single_bo(bo, fence);
656
657 if (likely(fence != NULL))
658 vmw_fence_obj_unreference(&fence);
659
660 return 0;
661 }
662
663 static int vmw_dx_context_destroy(struct vmw_resource *res)
664 {
665 struct vmw_private *dev_priv = res->dev_priv;
666 struct {
667 SVGA3dCmdHeader header;
668 SVGA3dCmdDXDestroyContext body;
669 } *cmd;
670
671 if (likely(res->id == -1))
672 return 0;
673
674 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
675 if (unlikely(cmd == NULL)) {
676 DRM_ERROR("Failed reserving FIFO space for context "
677 "destruction.\n");
678 return -ENOMEM;
679 }
680
681 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
682 cmd->header.size = sizeof(cmd->body);
683 cmd->body.cid = res->id;
684 vmw_fifo_commit(dev_priv, sizeof(*cmd));
685 if (dev_priv->query_cid == res->id)
686 dev_priv->query_cid_valid = false;
687 vmw_resource_release_id(res);
688 vmw_fifo_resource_dec(dev_priv);
689
690 return 0;
691 }
692
693 /**
694 * User-space context management:
695 */
696
697 static struct vmw_resource *
698 vmw_user_context_base_to_res(struct ttm_base_object *base)
699 {
700 return &(container_of(base, struct vmw_user_context, base)->res);
701 }
702
703 static void vmw_user_context_free(struct vmw_resource *res)
704 {
705 struct vmw_user_context *ctx =
706 container_of(res, struct vmw_user_context, res);
707 struct vmw_private *dev_priv = res->dev_priv;
708
709 if (ctx->cbs)
710 vmw_binding_state_free(ctx->cbs);
711
712 (void) vmw_context_bind_dx_query(res, NULL);
713
714 ttm_base_object_kfree(ctx, base);
715 ttm_mem_global_free(vmw_mem_glob(dev_priv),
716 vmw_user_context_size);
717 }
718
719 /**
720 * This function is called when user space has no more references on the
721 * base object. It releases the base-object's reference on the resource object.
722 */
723
724 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
725 {
726 struct ttm_base_object *base = *p_base;
727 struct vmw_user_context *ctx =
728 container_of(base, struct vmw_user_context, base);
729 struct vmw_resource *res = &ctx->res;
730
731 *p_base = NULL;
732 vmw_resource_unreference(&res);
733 }
734
735 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
736 struct drm_file *file_priv)
737 {
738 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
739 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
740
741 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
742 }
743
744 static int vmw_context_define(struct drm_device *dev, void *data,
745 struct drm_file *file_priv, bool dx)
746 {
747 struct vmw_private *dev_priv = vmw_priv(dev);
748 struct vmw_user_context *ctx;
749 struct vmw_resource *res;
750 struct vmw_resource *tmp;
751 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
752 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
753 int ret;
754
755 if (!dev_priv->has_dx && dx) {
756 DRM_ERROR("DX contexts not supported by device.\n");
757 return -EINVAL;
758 }
759
760 /*
761 * Approximate idr memory usage with 128 bytes. It will be limited
762 * by maximum number_of contexts anyway.
763 */
764
765 if (unlikely(vmw_user_context_size == 0))
766 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
767 ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
768
769 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
770 if (unlikely(ret != 0))
771 return ret;
772
773 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
774 vmw_user_context_size,
775 false, true);
776 if (unlikely(ret != 0)) {
777 if (ret != -ERESTARTSYS)
778 DRM_ERROR("Out of graphics memory for context"
779 " creation.\n");
780 goto out_unlock;
781 }
782
783 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
784 if (unlikely(ctx == NULL)) {
785 ttm_mem_global_free(vmw_mem_glob(dev_priv),
786 vmw_user_context_size);
787 ret = -ENOMEM;
788 goto out_unlock;
789 }
790
791 res = &ctx->res;
792 ctx->base.shareable = false;
793 ctx->base.tfile = NULL;
794
795 /*
796 * From here on, the destructor takes over resource freeing.
797 */
798
799 ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
800 if (unlikely(ret != 0))
801 goto out_unlock;
802
803 tmp = vmw_resource_reference(&ctx->res);
804 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
805 &vmw_user_context_base_release, NULL);
806
807 if (unlikely(ret != 0)) {
808 vmw_resource_unreference(&tmp);
809 goto out_err;
810 }
811
812 arg->cid = ctx->base.hash.key;
813 out_err:
814 vmw_resource_unreference(&res);
815 out_unlock:
816 ttm_read_unlock(&dev_priv->reservation_sem);
817 return ret;
818 }
819
820 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
821 struct drm_file *file_priv)
822 {
823 return vmw_context_define(dev, data, file_priv, false);
824 }
825
826 int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
827 struct drm_file *file_priv)
828 {
829 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
830 struct drm_vmw_context_arg *rep = &arg->rep;
831
832 switch (arg->req) {
833 case drm_vmw_context_legacy:
834 return vmw_context_define(dev, rep, file_priv, false);
835 case drm_vmw_context_dx:
836 return vmw_context_define(dev, rep, file_priv, true);
837 default:
838 break;
839 }
840 return -EINVAL;
841 }
842
843 /**
844 * vmw_context_binding_list - Return a list of context bindings
845 *
846 * @ctx: The context resource
847 *
848 * Returns the current list of bindings of the given context. Note that
849 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
850 */
851 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
852 {
853 struct vmw_user_context *uctx =
854 container_of(ctx, struct vmw_user_context, res);
855
856 return vmw_binding_state_list(uctx->cbs);
857 }
858
859 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
860 {
861 return container_of(ctx, struct vmw_user_context, res)->man;
862 }
863
864 struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
865 SVGACOTableType cotable_type)
866 {
867 if (cotable_type >= SVGA_COTABLE_DX10_MAX)
868 return ERR_PTR(-EINVAL);
869
870 return vmw_resource_reference
871 (container_of(ctx, struct vmw_user_context, res)->
872 cotables[cotable_type]);
873 }
874
875 /**
876 * vmw_context_binding_state -
877 * Return a pointer to a context binding state structure
878 *
879 * @ctx: The context resource
880 *
881 * Returns the current state of bindings of the given context. Note that
882 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
883 */
884 struct vmw_ctx_binding_state *
885 vmw_context_binding_state(struct vmw_resource *ctx)
886 {
887 return container_of(ctx, struct vmw_user_context, res)->cbs;
888 }
889
890 /**
891 * vmw_context_bind_dx_query -
892 * Sets query MOB for the context. If @mob is NULL, then this function will
893 * remove the association between the MOB and the context. This function
894 * assumes the binding_mutex is held.
895 *
896 * @ctx_res: The context resource
897 * @mob: a reference to the query MOB
898 *
899 * Returns -EINVAL if a MOB has already been set and does not match the one
900 * specified in the parameter. 0 otherwise.
901 */
902 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
903 struct vmw_dma_buffer *mob)
904 {
905 struct vmw_user_context *uctx =
906 container_of(ctx_res, struct vmw_user_context, res);
907
908 if (mob == NULL) {
909 if (uctx->dx_query_mob) {
910 uctx->dx_query_mob->dx_query_ctx = NULL;
911 vmw_dmabuf_unreference(&uctx->dx_query_mob);
912 uctx->dx_query_mob = NULL;
913 }
914
915 return 0;
916 }
917
918 /* Can only have one MOB per context for queries */
919 if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
920 return -EINVAL;
921
922 mob->dx_query_ctx = ctx_res;
923
924 if (!uctx->dx_query_mob)
925 uctx->dx_query_mob = vmw_dmabuf_reference(mob);
926
927 return 0;
928 }
929
930 /**
931 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
932 *
933 * @ctx_res: The context resource
934 */
935 struct vmw_dma_buffer *
936 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
937 {
938 struct vmw_user_context *uctx =
939 container_of(ctx_res, struct vmw_user_context, res);
940
941 return uctx->dx_query_mob;
942 }
943