vmwgfx_mob.c revision 1.1.1.2 1 /* $NetBSD: vmwgfx_mob.c,v 1.1.1.2 2018/08/27 01:35:00 riastradh Exp $ */
2
3 /**************************************************************************
4 *
5 * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_mob.c,v 1.1.1.2 2018/08/27 01:35:00 riastradh Exp $");
32
33 #include "vmwgfx_drv.h"
34
35 /*
36 * If we set up the screen target otable, screen objects stop working.
37 */
38
39 #define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
40
41 #ifdef CONFIG_64BIT
42 #define VMW_PPN_SIZE 8
43 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
44 #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
45 #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
46 #else
47 #define VMW_PPN_SIZE 4
48 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
49 #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
50 #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
51 #endif
52
53 /*
54 * struct vmw_mob - Structure containing page table and metadata for a
55 * Guest Memory OBject.
56 *
57 * @num_pages Number of pages that make up the page table.
58 * @pt_level The indirection level of the page table. 0-2.
59 * @pt_root_page DMA address of the level 0 page of the page table.
60 */
61 struct vmw_mob {
62 struct ttm_buffer_object *pt_bo;
63 unsigned long num_pages;
64 unsigned pt_level;
65 dma_addr_t pt_root_page;
66 uint32_t id;
67 };
68
69 /*
70 * struct vmw_otable - Guest Memory OBject table metadata
71 *
72 * @size: Size of the table (page-aligned).
73 * @page_table: Pointer to a struct vmw_mob holding the page table.
74 */
75 static const struct vmw_otable pre_dx_tables[] = {
76 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
77 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
78 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
79 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
80 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
81 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
82 };
83
84 static const struct vmw_otable dx_tables[] = {
85 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
86 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
87 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
88 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
89 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
90 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
91 {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
92 };
93
94 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
95 struct vmw_mob *mob);
96 static void vmw_mob_pt_setup(struct vmw_mob *mob,
97 struct vmw_piter data_iter,
98 unsigned long num_data_pages);
99
100 /*
101 * vmw_setup_otable_base - Issue an object table base setup command to
102 * the device
103 *
104 * @dev_priv: Pointer to a device private structure
105 * @type: Type of object table base
106 * @offset Start of table offset into dev_priv::otable_bo
107 * @otable Pointer to otable metadata;
108 *
109 * This function returns -ENOMEM if it fails to reserve fifo space,
110 * and may block waiting for fifo space.
111 */
112 static int vmw_setup_otable_base(struct vmw_private *dev_priv,
113 SVGAOTableType type,
114 struct ttm_buffer_object *otable_bo,
115 unsigned long offset,
116 struct vmw_otable *otable)
117 {
118 struct {
119 SVGA3dCmdHeader header;
120 SVGA3dCmdSetOTableBase64 body;
121 } *cmd;
122 struct vmw_mob *mob;
123 const struct vmw_sg_table *vsgt;
124 struct vmw_piter iter;
125 int ret;
126
127 BUG_ON(otable->page_table != NULL);
128
129 vsgt = vmw_bo_sg_table(otable_bo);
130 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
131 WARN_ON(!vmw_piter_next(&iter));
132
133 mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
134 if (unlikely(mob == NULL)) {
135 DRM_ERROR("Failed creating OTable page table.\n");
136 return -ENOMEM;
137 }
138
139 if (otable->size <= PAGE_SIZE) {
140 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
141 mob->pt_root_page = vmw_piter_dma_addr(&iter);
142 } else if (vsgt->num_regions == 1) {
143 mob->pt_level = SVGA3D_MOBFMT_RANGE;
144 mob->pt_root_page = vmw_piter_dma_addr(&iter);
145 } else {
146 ret = vmw_mob_pt_populate(dev_priv, mob);
147 if (unlikely(ret != 0))
148 goto out_no_populate;
149
150 vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
151 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
152 }
153
154 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
155 if (unlikely(cmd == NULL)) {
156 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
157 ret = -ENOMEM;
158 goto out_no_fifo;
159 }
160
161 memset(cmd, 0, sizeof(*cmd));
162 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
163 cmd->header.size = sizeof(cmd->body);
164 cmd->body.type = type;
165 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
166 cmd->body.sizeInBytes = otable->size;
167 cmd->body.validSizeInBytes = 0;
168 cmd->body.ptDepth = mob->pt_level;
169
170 /*
171 * The device doesn't support this, But the otable size is
172 * determined at compile-time, so this BUG shouldn't trigger
173 * randomly.
174 */
175 BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
176
177 vmw_fifo_commit(dev_priv, sizeof(*cmd));
178 otable->page_table = mob;
179
180 return 0;
181
182 out_no_fifo:
183 out_no_populate:
184 vmw_mob_destroy(mob);
185 return ret;
186 }
187
188 /*
189 * vmw_takedown_otable_base - Issue an object table base takedown command
190 * to the device
191 *
192 * @dev_priv: Pointer to a device private structure
193 * @type: Type of object table base
194 *
195 */
196 static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
197 SVGAOTableType type,
198 struct vmw_otable *otable)
199 {
200 struct {
201 SVGA3dCmdHeader header;
202 SVGA3dCmdSetOTableBase body;
203 } *cmd;
204 struct ttm_buffer_object *bo;
205
206 if (otable->page_table == NULL)
207 return;
208
209 bo = otable->page_table->pt_bo;
210 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
211 if (unlikely(cmd == NULL)) {
212 DRM_ERROR("Failed reserving FIFO space for OTable "
213 "takedown.\n");
214 return;
215 }
216
217 memset(cmd, 0, sizeof(*cmd));
218 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
219 cmd->header.size = sizeof(cmd->body);
220 cmd->body.type = type;
221 cmd->body.baseAddress = 0;
222 cmd->body.sizeInBytes = 0;
223 cmd->body.validSizeInBytes = 0;
224 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
225 vmw_fifo_commit(dev_priv, sizeof(*cmd));
226
227 if (bo) {
228 int ret;
229
230 ret = ttm_bo_reserve(bo, false, true, false, NULL);
231 BUG_ON(ret != 0);
232
233 vmw_fence_single_bo(bo, NULL);
234 ttm_bo_unreserve(bo);
235 }
236
237 vmw_mob_destroy(otable->page_table);
238 otable->page_table = NULL;
239 }
240
241
242 static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
243 struct vmw_otable_batch *batch)
244 {
245 unsigned long offset;
246 unsigned long bo_size;
247 struct vmw_otable *otables = batch->otables;
248 SVGAOTableType i;
249 int ret;
250
251 bo_size = 0;
252 for (i = 0; i < batch->num_otables; ++i) {
253 if (!otables[i].enabled)
254 continue;
255
256 otables[i].size =
257 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
258 bo_size += otables[i].size;
259 }
260
261 ret = ttm_bo_create(&dev_priv->bdev, bo_size,
262 ttm_bo_type_device,
263 &vmw_sys_ne_placement,
264 0, false, NULL,
265 &batch->otable_bo);
266
267 if (unlikely(ret != 0))
268 goto out_no_bo;
269
270 ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
271 BUG_ON(ret != 0);
272 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
273 if (unlikely(ret != 0))
274 goto out_unreserve;
275 ret = vmw_bo_map_dma(batch->otable_bo);
276 if (unlikely(ret != 0))
277 goto out_unreserve;
278
279 ttm_bo_unreserve(batch->otable_bo);
280
281 offset = 0;
282 for (i = 0; i < batch->num_otables; ++i) {
283 if (!batch->otables[i].enabled)
284 continue;
285
286 ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
287 offset,
288 &otables[i]);
289 if (unlikely(ret != 0))
290 goto out_no_setup;
291 offset += otables[i].size;
292 }
293
294 return 0;
295
296 out_unreserve:
297 ttm_bo_unreserve(batch->otable_bo);
298 out_no_setup:
299 for (i = 0; i < batch->num_otables; ++i) {
300 if (batch->otables[i].enabled)
301 vmw_takedown_otable_base(dev_priv, i,
302 &batch->otables[i]);
303 }
304
305 ttm_bo_unref(&batch->otable_bo);
306 out_no_bo:
307 return ret;
308 }
309
310 /*
311 * vmw_otables_setup - Set up guest backed memory object tables
312 *
313 * @dev_priv: Pointer to a device private structure
314 *
315 * Takes care of the device guest backed surface
316 * initialization, by setting up the guest backed memory object tables.
317 * Returns 0 on success and various error codes on failure. A successful return
318 * means the object tables can be taken down using the vmw_otables_takedown
319 * function.
320 */
321 int vmw_otables_setup(struct vmw_private *dev_priv)
322 {
323 struct vmw_otable **otables = &dev_priv->otable_batch.otables;
324 int ret;
325
326 if (dev_priv->has_dx) {
327 *otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
328 if (*otables == NULL)
329 return -ENOMEM;
330
331 memcpy(*otables, dx_tables, sizeof(dx_tables));
332 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
333 } else {
334 *otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
335 if (*otables == NULL)
336 return -ENOMEM;
337
338 memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
339 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
340 }
341
342 ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
343 if (unlikely(ret != 0))
344 goto out_setup;
345
346 return 0;
347
348 out_setup:
349 kfree(*otables);
350 return ret;
351 }
352
353 static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
354 struct vmw_otable_batch *batch)
355 {
356 SVGAOTableType i;
357 struct ttm_buffer_object *bo = batch->otable_bo;
358 int ret;
359
360 for (i = 0; i < batch->num_otables; ++i)
361 if (batch->otables[i].enabled)
362 vmw_takedown_otable_base(dev_priv, i,
363 &batch->otables[i]);
364
365 ret = ttm_bo_reserve(bo, false, true, false, NULL);
366 BUG_ON(ret != 0);
367
368 vmw_fence_single_bo(bo, NULL);
369 ttm_bo_unreserve(bo);
370
371 ttm_bo_unref(&batch->otable_bo);
372 }
373
374 /*
375 * vmw_otables_takedown - Take down guest backed memory object tables
376 *
377 * @dev_priv: Pointer to a device private structure
378 *
379 * Take down the Guest Memory Object tables.
380 */
381 void vmw_otables_takedown(struct vmw_private *dev_priv)
382 {
383 vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
384 kfree(dev_priv->otable_batch.otables);
385 }
386
387 /*
388 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
389 * needed for a guest backed memory object.
390 *
391 * @data_pages: Number of data pages in the memory object buffer.
392 */
393 static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
394 {
395 unsigned long data_size = data_pages * PAGE_SIZE;
396 unsigned long tot_size = 0;
397
398 while (likely(data_size > PAGE_SIZE)) {
399 data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
400 data_size *= VMW_PPN_SIZE;
401 tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
402 }
403
404 return tot_size >> PAGE_SHIFT;
405 }
406
407 /*
408 * vmw_mob_create - Create a mob, but don't populate it.
409 *
410 * @data_pages: Number of data pages of the underlying buffer object.
411 */
412 struct vmw_mob *vmw_mob_create(unsigned long data_pages)
413 {
414 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
415
416 if (unlikely(mob == NULL))
417 return NULL;
418
419 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
420
421 return mob;
422 }
423
424 /*
425 * vmw_mob_pt_populate - Populate the mob pagetable
426 *
427 * @mob: Pointer to the mob the pagetable of which we want to
428 * populate.
429 *
430 * This function allocates memory to be used for the pagetable, and
431 * adjusts TTM memory accounting accordingly. Returns ENOMEM if
432 * memory resources aren't sufficient and may cause TTM buffer objects
433 * to be swapped out by using the TTM memory accounting function.
434 */
435 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
436 struct vmw_mob *mob)
437 {
438 int ret;
439 BUG_ON(mob->pt_bo != NULL);
440
441 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
442 ttm_bo_type_device,
443 &vmw_sys_ne_placement,
444 0, false, NULL, &mob->pt_bo);
445 if (unlikely(ret != 0))
446 return ret;
447
448 ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
449
450 BUG_ON(ret != 0);
451 ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
452 if (unlikely(ret != 0))
453 goto out_unreserve;
454 ret = vmw_bo_map_dma(mob->pt_bo);
455 if (unlikely(ret != 0))
456 goto out_unreserve;
457
458 ttm_bo_unreserve(mob->pt_bo);
459
460 return 0;
461
462 out_unreserve:
463 ttm_bo_unreserve(mob->pt_bo);
464 ttm_bo_unref(&mob->pt_bo);
465
466 return ret;
467 }
468
469 /**
470 * vmw_mob_assign_ppn - Assign a value to a page table entry
471 *
472 * @addr: Pointer to pointer to page table entry.
473 * @val: The page table entry
474 *
475 * Assigns a value to a page table entry pointed to by *@addr and increments
476 * *@addr according to the page table entry size.
477 */
478 #if (VMW_PPN_SIZE == 8)
479 static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
480 {
481 *((u64 *) *addr) = val >> PAGE_SHIFT;
482 *addr += 2;
483 }
484 #else
485 static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
486 {
487 *(*addr)++ = val >> PAGE_SHIFT;
488 }
489 #endif
490
491 /*
492 * vmw_mob_build_pt - Build a pagetable
493 *
494 * @data_addr: Array of DMA addresses to the underlying buffer
495 * object's data pages.
496 * @num_data_pages: Number of buffer object data pages.
497 * @pt_pages: Array of page pointers to the page table pages.
498 *
499 * Returns the number of page table pages actually used.
500 * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
501 */
502 static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
503 unsigned long num_data_pages,
504 struct vmw_piter *pt_iter)
505 {
506 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
507 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
508 unsigned long pt_page;
509 u32 *addr, *save_addr;
510 unsigned long i;
511 struct page *page;
512
513 for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
514 page = vmw_piter_page(pt_iter);
515
516 save_addr = addr = kmap_atomic(page);
517
518 for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
519 vmw_mob_assign_ppn(&addr,
520 vmw_piter_dma_addr(data_iter));
521 if (unlikely(--num_data_pages == 0))
522 break;
523 WARN_ON(!vmw_piter_next(data_iter));
524 }
525 kunmap_atomic(save_addr);
526 vmw_piter_next(pt_iter);
527 }
528
529 return num_pt_pages;
530 }
531
532 /*
533 * vmw_mob_build_pt - Set up a multilevel mob pagetable
534 *
535 * @mob: Pointer to a mob whose page table needs setting up.
536 * @data_addr Array of DMA addresses to the buffer object's data
537 * pages.
538 * @num_data_pages: Number of buffer object data pages.
539 *
540 * Uses tail recursion to set up a multilevel mob page table.
541 */
542 static void vmw_mob_pt_setup(struct vmw_mob *mob,
543 struct vmw_piter data_iter,
544 unsigned long num_data_pages)
545 {
546 unsigned long num_pt_pages = 0;
547 struct ttm_buffer_object *bo = mob->pt_bo;
548 struct vmw_piter save_pt_iter;
549 struct vmw_piter pt_iter;
550 const struct vmw_sg_table *vsgt;
551 int ret;
552
553 ret = ttm_bo_reserve(bo, false, true, false, NULL);
554 BUG_ON(ret != 0);
555
556 vsgt = vmw_bo_sg_table(bo);
557 vmw_piter_start(&pt_iter, vsgt, 0);
558 BUG_ON(!vmw_piter_next(&pt_iter));
559 mob->pt_level = 0;
560 while (likely(num_data_pages > 1)) {
561 ++mob->pt_level;
562 BUG_ON(mob->pt_level > 2);
563 save_pt_iter = pt_iter;
564 num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
565 &pt_iter);
566 data_iter = save_pt_iter;
567 num_data_pages = num_pt_pages;
568 }
569
570 mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
571 ttm_bo_unreserve(bo);
572 }
573
574 /*
575 * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
576 *
577 * @mob: Pointer to a mob to destroy.
578 */
579 void vmw_mob_destroy(struct vmw_mob *mob)
580 {
581 if (mob->pt_bo)
582 ttm_bo_unref(&mob->pt_bo);
583 kfree(mob);
584 }
585
586 /*
587 * vmw_mob_unbind - Hide a mob from the device.
588 *
589 * @dev_priv: Pointer to a device private.
590 * @mob_id: Device id of the mob to unbind.
591 */
592 void vmw_mob_unbind(struct vmw_private *dev_priv,
593 struct vmw_mob *mob)
594 {
595 struct {
596 SVGA3dCmdHeader header;
597 SVGA3dCmdDestroyGBMob body;
598 } *cmd;
599 int ret;
600 struct ttm_buffer_object *bo = mob->pt_bo;
601
602 if (bo) {
603 ret = ttm_bo_reserve(bo, false, true, false, NULL);
604 /*
605 * Noone else should be using this buffer.
606 */
607 BUG_ON(ret != 0);
608 }
609
610 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
611 if (unlikely(cmd == NULL)) {
612 DRM_ERROR("Failed reserving FIFO space for Memory "
613 "Object unbinding.\n");
614 } else {
615 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
616 cmd->header.size = sizeof(cmd->body);
617 cmd->body.mobid = mob->id;
618 vmw_fifo_commit(dev_priv, sizeof(*cmd));
619 }
620 if (bo) {
621 vmw_fence_single_bo(bo, NULL);
622 ttm_bo_unreserve(bo);
623 }
624 vmw_fifo_resource_dec(dev_priv);
625 }
626
627 /*
628 * vmw_mob_bind - Make a mob visible to the device after first
629 * populating it if necessary.
630 *
631 * @dev_priv: Pointer to a device private.
632 * @mob: Pointer to the mob we're making visible.
633 * @data_addr: Array of DMA addresses to the data pages of the underlying
634 * buffer object.
635 * @num_data_pages: Number of data pages of the underlying buffer
636 * object.
637 * @mob_id: Device id of the mob to bind
638 *
639 * This function is intended to be interfaced with the ttm_tt backend
640 * code.
641 */
642 int vmw_mob_bind(struct vmw_private *dev_priv,
643 struct vmw_mob *mob,
644 const struct vmw_sg_table *vsgt,
645 unsigned long num_data_pages,
646 int32_t mob_id)
647 {
648 int ret;
649 bool pt_set_up = false;
650 struct vmw_piter data_iter;
651 struct {
652 SVGA3dCmdHeader header;
653 SVGA3dCmdDefineGBMob64 body;
654 } *cmd;
655
656 mob->id = mob_id;
657 vmw_piter_start(&data_iter, vsgt, 0);
658 if (unlikely(!vmw_piter_next(&data_iter)))
659 return 0;
660
661 if (likely(num_data_pages == 1)) {
662 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
663 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
664 } else if (vsgt->num_regions == 1) {
665 mob->pt_level = SVGA3D_MOBFMT_RANGE;
666 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
667 } else if (unlikely(mob->pt_bo == NULL)) {
668 ret = vmw_mob_pt_populate(dev_priv, mob);
669 if (unlikely(ret != 0))
670 return ret;
671
672 vmw_mob_pt_setup(mob, data_iter, num_data_pages);
673 pt_set_up = true;
674 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
675 }
676
677 vmw_fifo_resource_inc(dev_priv);
678
679 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
680 if (unlikely(cmd == NULL)) {
681 DRM_ERROR("Failed reserving FIFO space for Memory "
682 "Object binding.\n");
683 goto out_no_cmd_space;
684 }
685
686 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
687 cmd->header.size = sizeof(cmd->body);
688 cmd->body.mobid = mob_id;
689 cmd->body.ptDepth = mob->pt_level;
690 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
691 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
692
693 vmw_fifo_commit(dev_priv, sizeof(*cmd));
694
695 return 0;
696
697 out_no_cmd_space:
698 vmw_fifo_resource_dec(dev_priv);
699 if (pt_set_up)
700 ttm_bo_unref(&mob->pt_bo);
701
702 return -ENOMEM;
703 }
704