vmwgfx_bo.c revision 1.1.1.1 1 /* $NetBSD: vmwgfx_bo.c,v 1.1.1.1 2021/12/18 20:15:54 riastradh Exp $ */
2
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 /**************************************************************************
5 *
6 * Copyright 2011-2018 VMware, Inc., Palo Alto, CA., USA
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 **************************************************************************/
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_bo.c,v 1.1.1.1 2021/12/18 20:15:54 riastradh Exp $");
33
34 #include <drm/ttm/ttm_placement.h>
35
36 #include "vmwgfx_drv.h"
37 #include "ttm_object.h"
38
39
40 /**
41 * struct vmw_user_buffer_object - User-space-visible buffer object
42 *
43 * @prime: The prime object providing user visibility.
44 * @vbo: The struct vmw_buffer_object
45 */
46 struct vmw_user_buffer_object {
47 struct ttm_prime_object prime;
48 struct vmw_buffer_object vbo;
49 };
50
51
52 /**
53 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
54 * vmw_buffer_object.
55 *
56 * @bo: Pointer to the TTM buffer object.
57 * Return: Pointer to the struct vmw_buffer_object embedding the
58 * TTM buffer object.
59 */
60 static struct vmw_buffer_object *
61 vmw_buffer_object(struct ttm_buffer_object *bo)
62 {
63 return container_of(bo, struct vmw_buffer_object, base);
64 }
65
66
67 /**
68 * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
69 * vmw_user_buffer_object.
70 *
71 * @bo: Pointer to the TTM buffer object.
72 * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
73 * object.
74 */
75 static struct vmw_user_buffer_object *
76 vmw_user_buffer_object(struct ttm_buffer_object *bo)
77 {
78 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
79
80 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
81 }
82
83
84 /**
85 * vmw_bo_pin_in_placement - Validate a buffer to placement.
86 *
87 * @dev_priv: Driver private.
88 * @buf: DMA buffer to move.
89 * @placement: The placement to pin it.
90 * @interruptible: Use interruptible wait.
91 * Return: Zero on success, Negative error code on failure. In particular
92 * -ERESTARTSYS if interrupted by a signal
93 */
94 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
95 struct vmw_buffer_object *buf,
96 struct ttm_placement *placement,
97 bool interruptible)
98 {
99 struct ttm_operation_ctx ctx = {interruptible, false };
100 struct ttm_buffer_object *bo = &buf->base;
101 int ret;
102 uint32_t new_flags;
103
104 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
105 if (unlikely(ret != 0))
106 return ret;
107
108 vmw_execbuf_release_pinned_bo(dev_priv);
109
110 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
111 if (unlikely(ret != 0))
112 goto err;
113
114 if (buf->pin_count > 0)
115 ret = ttm_bo_mem_compat(placement, &bo->mem,
116 &new_flags) == true ? 0 : -EINVAL;
117 else
118 ret = ttm_bo_validate(bo, placement, &ctx);
119
120 if (!ret)
121 vmw_bo_pin_reserved(buf, true);
122
123 ttm_bo_unreserve(bo);
124
125 err:
126 ttm_write_unlock(&dev_priv->reservation_sem);
127 return ret;
128 }
129
130
131 /**
132 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
133 *
134 * This function takes the reservation_sem in write mode.
135 * Flushes and unpins the query bo to avoid failures.
136 *
137 * @dev_priv: Driver private.
138 * @buf: DMA buffer to move.
139 * @pin: Pin buffer if true.
140 * @interruptible: Use interruptible wait.
141 * Return: Zero on success, Negative error code on failure. In particular
142 * -ERESTARTSYS if interrupted by a signal
143 */
144 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
145 struct vmw_buffer_object *buf,
146 bool interruptible)
147 {
148 struct ttm_operation_ctx ctx = {interruptible, false };
149 struct ttm_buffer_object *bo = &buf->base;
150 int ret;
151 uint32_t new_flags;
152
153 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
154 if (unlikely(ret != 0))
155 return ret;
156
157 vmw_execbuf_release_pinned_bo(dev_priv);
158
159 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
160 if (unlikely(ret != 0))
161 goto err;
162
163 if (buf->pin_count > 0) {
164 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
165 &new_flags) == true ? 0 : -EINVAL;
166 goto out_unreserve;
167 }
168
169 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
170 if (likely(ret == 0) || ret == -ERESTARTSYS)
171 goto out_unreserve;
172
173 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
174
175 out_unreserve:
176 if (!ret)
177 vmw_bo_pin_reserved(buf, true);
178
179 ttm_bo_unreserve(bo);
180 err:
181 ttm_write_unlock(&dev_priv->reservation_sem);
182 return ret;
183 }
184
185
186 /**
187 * vmw_bo_pin_in_vram - Move a buffer to vram.
188 *
189 * This function takes the reservation_sem in write mode.
190 * Flushes and unpins the query bo to avoid failures.
191 *
192 * @dev_priv: Driver private.
193 * @buf: DMA buffer to move.
194 * @interruptible: Use interruptible wait.
195 * Return: Zero on success, Negative error code on failure. In particular
196 * -ERESTARTSYS if interrupted by a signal
197 */
198 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
199 struct vmw_buffer_object *buf,
200 bool interruptible)
201 {
202 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
203 interruptible);
204 }
205
206
207 /**
208 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
209 *
210 * This function takes the reservation_sem in write mode.
211 * Flushes and unpins the query bo to avoid failures.
212 *
213 * @dev_priv: Driver private.
214 * @buf: DMA buffer to pin.
215 * @interruptible: Use interruptible wait.
216 * Return: Zero on success, Negative error code on failure. In particular
217 * -ERESTARTSYS if interrupted by a signal
218 */
219 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
220 struct vmw_buffer_object *buf,
221 bool interruptible)
222 {
223 struct ttm_operation_ctx ctx = {interruptible, false };
224 struct ttm_buffer_object *bo = &buf->base;
225 struct ttm_placement placement;
226 struct ttm_place place;
227 int ret = 0;
228 uint32_t new_flags;
229
230 place = vmw_vram_placement.placement[0];
231 place.lpfn = bo->num_pages;
232 placement.num_placement = 1;
233 placement.placement = &place;
234 placement.num_busy_placement = 1;
235 placement.busy_placement = &place;
236
237 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
238 if (unlikely(ret != 0))
239 return ret;
240
241 vmw_execbuf_release_pinned_bo(dev_priv);
242 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
243 if (unlikely(ret != 0))
244 goto err_unlock;
245
246 /*
247 * Is this buffer already in vram but not at the start of it?
248 * In that case, evict it first because TTM isn't good at handling
249 * that situation.
250 */
251 if (bo->mem.mem_type == TTM_PL_VRAM &&
252 bo->mem.start < bo->num_pages &&
253 bo->mem.start > 0 &&
254 buf->pin_count == 0) {
255 ctx.interruptible = false;
256 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
257 }
258
259 if (buf->pin_count > 0)
260 ret = ttm_bo_mem_compat(&placement, &bo->mem,
261 &new_flags) == true ? 0 : -EINVAL;
262 else
263 ret = ttm_bo_validate(bo, &placement, &ctx);
264
265 /* For some reason we didn't end up at the start of vram */
266 WARN_ON(ret == 0 && bo->offset != 0);
267 if (!ret)
268 vmw_bo_pin_reserved(buf, true);
269
270 ttm_bo_unreserve(bo);
271 err_unlock:
272 ttm_write_unlock(&dev_priv->reservation_sem);
273
274 return ret;
275 }
276
277
278 /**
279 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
280 *
281 * This function takes the reservation_sem in write mode.
282 *
283 * @dev_priv: Driver private.
284 * @buf: DMA buffer to unpin.
285 * @interruptible: Use interruptible wait.
286 * Return: Zero on success, Negative error code on failure. In particular
287 * -ERESTARTSYS if interrupted by a signal
288 */
289 int vmw_bo_unpin(struct vmw_private *dev_priv,
290 struct vmw_buffer_object *buf,
291 bool interruptible)
292 {
293 struct ttm_buffer_object *bo = &buf->base;
294 int ret;
295
296 ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
297 if (unlikely(ret != 0))
298 return ret;
299
300 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
301 if (unlikely(ret != 0))
302 goto err;
303
304 vmw_bo_pin_reserved(buf, false);
305
306 ttm_bo_unreserve(bo);
307
308 err:
309 ttm_read_unlock(&dev_priv->reservation_sem);
310 return ret;
311 }
312
313 /**
314 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
315 * of a buffer.
316 *
317 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
318 * @ptr: SVGAGuestPtr returning the result.
319 */
320 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
321 SVGAGuestPtr *ptr)
322 {
323 if (bo->mem.mem_type == TTM_PL_VRAM) {
324 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
325 ptr->offset = bo->offset;
326 } else {
327 ptr->gmrId = bo->mem.start;
328 ptr->offset = 0;
329 }
330 }
331
332
333 /**
334 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
335 *
336 * @vbo: The buffer object. Must be reserved.
337 * @pin: Whether to pin or unpin.
338 *
339 */
340 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
341 {
342 struct ttm_operation_ctx ctx = { false, true };
343 struct ttm_place pl;
344 struct ttm_placement placement;
345 struct ttm_buffer_object *bo = &vbo->base;
346 uint32_t old_mem_type = bo->mem.mem_type;
347 int ret;
348
349 dma_resv_assert_held(bo->base.resv);
350
351 if (pin) {
352 if (vbo->pin_count++ > 0)
353 return;
354 } else {
355 WARN_ON(vbo->pin_count <= 0);
356 if (--vbo->pin_count > 0)
357 return;
358 }
359
360 pl.fpfn = 0;
361 pl.lpfn = 0;
362 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
363 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
364 if (pin)
365 pl.flags |= TTM_PL_FLAG_NO_EVICT;
366
367 memset(&placement, 0, sizeof(placement));
368 placement.num_placement = 1;
369 placement.placement = &pl;
370
371 ret = ttm_bo_validate(bo, &placement, &ctx);
372
373 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
374 }
375
376
377 /**
378 * vmw_bo_map_and_cache - Map a buffer object and cache the map
379 *
380 * @vbo: The buffer object to map
381 * Return: A kernel virtual address or NULL if mapping failed.
382 *
383 * This function maps a buffer object into the kernel address space, or
384 * returns the virtual kernel address of an already existing map. The virtual
385 * address remains valid as long as the buffer object is pinned or reserved.
386 * The cached map is torn down on either
387 * 1) Buffer object move
388 * 2) Buffer object swapout
389 * 3) Buffer object destruction
390 *
391 */
392 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
393 {
394 struct ttm_buffer_object *bo = &vbo->base;
395 bool not_used;
396 void *virtual;
397 int ret;
398
399 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
400 if (virtual)
401 return virtual;
402
403 ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
404 if (ret)
405 DRM_ERROR("Buffer object map failed: %d.\n", ret);
406
407 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
408 }
409
410
411 /**
412 * vmw_bo_unmap - Tear down a cached buffer object map.
413 *
414 * @vbo: The buffer object whose map we are tearing down.
415 *
416 * This function tears down a cached map set up using
417 * vmw_buffer_object_map_and_cache().
418 */
419 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
420 {
421 if (vbo->map.bo == NULL)
422 return;
423
424 ttm_bo_kunmap(&vbo->map);
425 }
426
427
428 /**
429 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
430 *
431 * @dev_priv: Pointer to a struct vmw_private identifying the device.
432 * @size: The requested buffer size.
433 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
434 */
435 static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
436 bool user)
437 {
438 static size_t struct_size, user_struct_size;
439 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
440 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
441
442 if (unlikely(struct_size == 0)) {
443 size_t backend_size = ttm_round_pot(vmw_tt_size);
444
445 struct_size = backend_size +
446 ttm_round_pot(sizeof(struct vmw_buffer_object));
447 user_struct_size = backend_size +
448 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
449 TTM_OBJ_EXTRA_SIZE;
450 }
451
452 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
453 page_array_size +=
454 ttm_round_pot(num_pages * sizeof(dma_addr_t));
455
456 return ((user) ? user_struct_size : struct_size) +
457 page_array_size;
458 }
459
460
461 /**
462 * vmw_bo_bo_free - vmw buffer object destructor
463 *
464 * @bo: Pointer to the embedded struct ttm_buffer_object
465 */
466 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
467 {
468 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
469
470 WARN_ON(vmw_bo->dirty);
471 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
472 vmw_bo_unmap(vmw_bo);
473 kfree(vmw_bo);
474 }
475
476
477 /**
478 * vmw_user_bo_destroy - vmw buffer object destructor
479 *
480 * @bo: Pointer to the embedded struct ttm_buffer_object
481 */
482 static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
483 {
484 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
485 struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
486
487 WARN_ON(vbo->dirty);
488 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
489 vmw_bo_unmap(vbo);
490 ttm_prime_object_kfree(vmw_user_bo, prime);
491 }
492
493
494 /**
495 * vmw_bo_init - Initialize a vmw buffer object
496 *
497 * @dev_priv: Pointer to the device private struct
498 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
499 * @size: Buffer object size in bytes.
500 * @placement: Initial placement.
501 * @interruptible: Whether waits should be performed interruptible.
502 * @bo_free: The buffer object destructor.
503 * Returns: Zero on success, negative error code on error.
504 *
505 * Note that on error, the code will free the buffer object.
506 */
507 int vmw_bo_init(struct vmw_private *dev_priv,
508 struct vmw_buffer_object *vmw_bo,
509 size_t size, struct ttm_placement *placement,
510 bool interruptible,
511 void (*bo_free)(struct ttm_buffer_object *bo))
512 {
513 struct ttm_bo_device *bdev = &dev_priv->bdev;
514 size_t acc_size;
515 int ret;
516 bool user = (bo_free == &vmw_user_bo_destroy);
517
518 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
519
520 acc_size = vmw_bo_acc_size(dev_priv, size, user);
521 memset(vmw_bo, 0, sizeof(*vmw_bo));
522 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
523 vmw_bo->base.priority = 3;
524 vmw_bo->res_tree = RB_ROOT;
525
526 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
527 ttm_bo_type_device, placement,
528 0, interruptible, acc_size,
529 NULL, NULL, bo_free);
530 return ret;
531 }
532
533
534 /**
535 * vmw_user_bo_release - TTM reference base object release callback for
536 * vmw user buffer objects
537 *
538 * @p_base: The TTM base object pointer about to be unreferenced.
539 *
540 * Clears the TTM base object pointer and drops the reference the
541 * base object has on the underlying struct vmw_buffer_object.
542 */
543 static void vmw_user_bo_release(struct ttm_base_object **p_base)
544 {
545 struct vmw_user_buffer_object *vmw_user_bo;
546 struct ttm_base_object *base = *p_base;
547
548 *p_base = NULL;
549
550 if (unlikely(base == NULL))
551 return;
552
553 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
554 prime.base);
555 ttm_bo_put(&vmw_user_bo->vbo.base);
556 }
557
558
559 /**
560 * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
561 * for vmw user buffer objects
562 *
563 * @base: Pointer to the TTM base object
564 * @ref_type: Reference type of the reference reaching zero.
565 *
566 * Called when user-space drops its last synccpu reference on the buffer
567 * object, Either explicitly or as part of a cleanup file close.
568 */
569 static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
570 enum ttm_ref_type ref_type)
571 {
572 struct vmw_user_buffer_object *user_bo;
573
574 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
575
576 switch (ref_type) {
577 case TTM_REF_SYNCCPU_WRITE:
578 atomic_dec(&user_bo->vbo.cpu_writers);
579 break;
580 default:
581 WARN_ONCE(true, "Undefined buffer object reference release.\n");
582 }
583 }
584
585
586 /**
587 * vmw_user_bo_alloc - Allocate a user buffer object
588 *
589 * @dev_priv: Pointer to a struct device private.
590 * @tfile: Pointer to a struct ttm_object_file on which to register the user
591 * object.
592 * @size: Size of the buffer object.
593 * @shareable: Boolean whether the buffer is shareable with other open files.
594 * @handle: Pointer to where the handle value should be assigned.
595 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
596 * should be assigned.
597 * Return: Zero on success, negative error code on error.
598 */
599 int vmw_user_bo_alloc(struct vmw_private *dev_priv,
600 struct ttm_object_file *tfile,
601 uint32_t size,
602 bool shareable,
603 uint32_t *handle,
604 struct vmw_buffer_object **p_vbo,
605 struct ttm_base_object **p_base)
606 {
607 struct vmw_user_buffer_object *user_bo;
608 int ret;
609
610 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
611 if (unlikely(!user_bo)) {
612 DRM_ERROR("Failed to allocate a buffer.\n");
613 return -ENOMEM;
614 }
615
616 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
617 (dev_priv->has_mob) ?
618 &vmw_sys_placement :
619 &vmw_vram_sys_placement, true,
620 &vmw_user_bo_destroy);
621 if (unlikely(ret != 0))
622 return ret;
623
624 ttm_bo_get(&user_bo->vbo.base);
625 ret = ttm_prime_object_init(tfile,
626 size,
627 &user_bo->prime,
628 shareable,
629 ttm_buffer_type,
630 &vmw_user_bo_release,
631 &vmw_user_bo_ref_obj_release);
632 if (unlikely(ret != 0)) {
633 ttm_bo_put(&user_bo->vbo.base);
634 goto out_no_base_object;
635 }
636
637 *p_vbo = &user_bo->vbo;
638 if (p_base) {
639 *p_base = &user_bo->prime.base;
640 kref_get(&(*p_base)->refcount);
641 }
642 *handle = user_bo->prime.base.handle;
643
644 out_no_base_object:
645 return ret;
646 }
647
648
649 /**
650 * vmw_user_bo_verify_access - verify access permissions on this
651 * buffer object.
652 *
653 * @bo: Pointer to the buffer object being accessed
654 * @tfile: Identifying the caller.
655 */
656 int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
657 struct ttm_object_file *tfile)
658 {
659 struct vmw_user_buffer_object *vmw_user_bo;
660
661 if (unlikely(bo->destroy != vmw_user_bo_destroy))
662 return -EPERM;
663
664 vmw_user_bo = vmw_user_buffer_object(bo);
665
666 /* Check that the caller has opened the object. */
667 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
668 return 0;
669
670 DRM_ERROR("Could not grant buffer access.\n");
671 return -EPERM;
672 }
673
674
675 /**
676 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
677 * access, idling previous GPU operations on the buffer and optionally
678 * blocking it for further command submissions.
679 *
680 * @user_bo: Pointer to the buffer object being grabbed for CPU access
681 * @tfile: Identifying the caller.
682 * @flags: Flags indicating how the grab should be performed.
683 * Return: Zero on success, Negative error code on error. In particular,
684 * -EBUSY will be returned if a dontblock operation is requested and the
685 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
686 * interrupted by a signal.
687 *
688 * A blocking grab will be automatically released when @tfile is closed.
689 */
690 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
691 struct ttm_object_file *tfile,
692 uint32_t flags)
693 {
694 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
695 struct ttm_buffer_object *bo = &user_bo->vbo.base;
696 bool existed;
697 int ret;
698
699 if (flags & drm_vmw_synccpu_allow_cs) {
700 long lret;
701
702 lret = dma_resv_wait_timeout_rcu
703 (bo->base.resv, true, true,
704 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
705 if (!lret)
706 return -EBUSY;
707 else if (lret < 0)
708 return lret;
709 return 0;
710 }
711
712 ret = ttm_bo_reserve(bo, true, nonblock, NULL);
713 if (unlikely(ret != 0))
714 return ret;
715
716 ret = ttm_bo_wait(bo, true, nonblock);
717 if (likely(ret == 0))
718 atomic_inc(&user_bo->vbo.cpu_writers);
719
720 ttm_bo_unreserve(bo);
721 if (unlikely(ret != 0))
722 return ret;
723
724 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
725 TTM_REF_SYNCCPU_WRITE, &existed, false);
726 if (ret != 0 || existed)
727 atomic_dec(&user_bo->vbo.cpu_writers);
728
729 return ret;
730 }
731
732 /**
733 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
734 * and unblock command submission on the buffer if blocked.
735 *
736 * @handle: Handle identifying the buffer object.
737 * @tfile: Identifying the caller.
738 * @flags: Flags indicating the type of release.
739 */
740 static int vmw_user_bo_synccpu_release(uint32_t handle,
741 struct ttm_object_file *tfile,
742 uint32_t flags)
743 {
744 if (!(flags & drm_vmw_synccpu_allow_cs))
745 return ttm_ref_object_base_unref(tfile, handle,
746 TTM_REF_SYNCCPU_WRITE);
747
748 return 0;
749 }
750
751
752 /**
753 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
754 * functionality.
755 *
756 * @dev: Identifies the drm device.
757 * @data: Pointer to the ioctl argument.
758 * @file_priv: Identifies the caller.
759 * Return: Zero on success, negative error code on error.
760 *
761 * This function checks the ioctl arguments for validity and calls the
762 * relevant synccpu functions.
763 */
764 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
765 struct drm_file *file_priv)
766 {
767 struct drm_vmw_synccpu_arg *arg =
768 (struct drm_vmw_synccpu_arg *) data;
769 struct vmw_buffer_object *vbo;
770 struct vmw_user_buffer_object *user_bo;
771 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
772 struct ttm_base_object *buffer_base;
773 int ret;
774
775 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
776 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
777 drm_vmw_synccpu_dontblock |
778 drm_vmw_synccpu_allow_cs)) != 0) {
779 DRM_ERROR("Illegal synccpu flags.\n");
780 return -EINVAL;
781 }
782
783 switch (arg->op) {
784 case drm_vmw_synccpu_grab:
785 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
786 &buffer_base);
787 if (unlikely(ret != 0))
788 return ret;
789
790 user_bo = container_of(vbo, struct vmw_user_buffer_object,
791 vbo);
792 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
793 vmw_bo_unreference(&vbo);
794 ttm_base_object_unref(&buffer_base);
795 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
796 ret != -EBUSY)) {
797 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
798 (unsigned int) arg->handle);
799 return ret;
800 }
801 break;
802 case drm_vmw_synccpu_release:
803 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
804 arg->flags);
805 if (unlikely(ret != 0)) {
806 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
807 (unsigned int) arg->handle);
808 return ret;
809 }
810 break;
811 default:
812 DRM_ERROR("Invalid synccpu operation.\n");
813 return -EINVAL;
814 }
815
816 return 0;
817 }
818
819
820 /**
821 * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
822 * allocation functionality.
823 *
824 * @dev: Identifies the drm device.
825 * @data: Pointer to the ioctl argument.
826 * @file_priv: Identifies the caller.
827 * Return: Zero on success, negative error code on error.
828 *
829 * This function checks the ioctl arguments for validity and allocates a
830 * struct vmw_user_buffer_object bo.
831 */
832 int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
833 struct drm_file *file_priv)
834 {
835 struct vmw_private *dev_priv = vmw_priv(dev);
836 union drm_vmw_alloc_dmabuf_arg *arg =
837 (union drm_vmw_alloc_dmabuf_arg *)data;
838 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
839 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
840 struct vmw_buffer_object *vbo;
841 uint32_t handle;
842 int ret;
843
844 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
845 if (unlikely(ret != 0))
846 return ret;
847
848 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
849 req->size, false, &handle, &vbo,
850 NULL);
851 if (unlikely(ret != 0))
852 goto out_no_bo;
853
854 rep->handle = handle;
855 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
856 rep->cur_gmr_id = handle;
857 rep->cur_gmr_offset = 0;
858
859 vmw_bo_unreference(&vbo);
860
861 out_no_bo:
862 ttm_read_unlock(&dev_priv->reservation_sem);
863
864 return ret;
865 }
866
867
868 /**
869 * vmw_bo_unref_ioctl - Generic handle close ioctl.
870 *
871 * @dev: Identifies the drm device.
872 * @data: Pointer to the ioctl argument.
873 * @file_priv: Identifies the caller.
874 * Return: Zero on success, negative error code on error.
875 *
876 * This function checks the ioctl arguments for validity and closes a
877 * handle to a TTM base object, optionally freeing the object.
878 */
879 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
880 struct drm_file *file_priv)
881 {
882 struct drm_vmw_unref_dmabuf_arg *arg =
883 (struct drm_vmw_unref_dmabuf_arg *)data;
884
885 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
886 arg->handle,
887 TTM_REF_USAGE);
888 }
889
890
891 /**
892 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
893 *
894 * @tfile: The TTM object file the handle is registered with.
895 * @handle: The user buffer object handle
896 * @out: Pointer to a where a pointer to the embedded
897 * struct vmw_buffer_object should be placed.
898 * @p_base: Pointer to where a pointer to the TTM base object should be
899 * placed, or NULL if no such pointer is required.
900 * Return: Zero on success, Negative error code on error.
901 *
902 * Both the output base object pointer and the vmw buffer object pointer
903 * will be refcounted.
904 */
905 int vmw_user_bo_lookup(struct ttm_object_file *tfile,
906 uint32_t handle, struct vmw_buffer_object **out,
907 struct ttm_base_object **p_base)
908 {
909 struct vmw_user_buffer_object *vmw_user_bo;
910 struct ttm_base_object *base;
911
912 base = ttm_base_object_lookup(tfile, handle);
913 if (unlikely(base == NULL)) {
914 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
915 (unsigned long)handle);
916 return -ESRCH;
917 }
918
919 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
920 ttm_base_object_unref(&base);
921 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
922 (unsigned long)handle);
923 return -EINVAL;
924 }
925
926 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
927 prime.base);
928 ttm_bo_get(&vmw_user_bo->vbo.base);
929 if (p_base)
930 *p_base = base;
931 else
932 ttm_base_object_unref(&base);
933 *out = &vmw_user_bo->vbo;
934
935 return 0;
936 }
937
938 /**
939 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
940 * @tfile: The TTM object file the handle is registered with.
941 * @handle: The user buffer object handle.
942 *
943 * This function looks up a struct vmw_user_bo and returns a pointer to the
944 * struct vmw_buffer_object it derives from without refcounting the pointer.
945 * The returned pointer is only valid until vmw_user_bo_noref_release() is
946 * called, and the object pointed to by the returned pointer may be doomed.
947 * Any persistent usage of the object requires a refcount to be taken using
948 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
949 * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
950 * or scheduling functions may be called inbetween these function calls.
951 *
952 * Return: A struct vmw_buffer_object pointer if successful or negative
953 * error pointer on failure.
954 */
955 struct vmw_buffer_object *
956 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
957 {
958 struct vmw_user_buffer_object *vmw_user_bo;
959 struct ttm_base_object *base;
960
961 base = ttm_base_object_noref_lookup(tfile, handle);
962 if (!base) {
963 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
964 (unsigned long)handle);
965 return ERR_PTR(-ESRCH);
966 }
967
968 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
969 ttm_base_object_noref_release();
970 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
971 (unsigned long)handle);
972 return ERR_PTR(-EINVAL);
973 }
974
975 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
976 prime.base);
977 return &vmw_user_bo->vbo;
978 }
979
980 /**
981 * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
982 *
983 * @tfile: The TTM object file to register the handle with.
984 * @vbo: The embedded vmw buffer object.
985 * @handle: Pointer to where the new handle should be placed.
986 * Return: Zero on success, Negative error code on error.
987 */
988 int vmw_user_bo_reference(struct ttm_object_file *tfile,
989 struct vmw_buffer_object *vbo,
990 uint32_t *handle)
991 {
992 struct vmw_user_buffer_object *user_bo;
993
994 if (vbo->base.destroy != vmw_user_bo_destroy)
995 return -EINVAL;
996
997 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
998
999 *handle = user_bo->prime.base.handle;
1000 return ttm_ref_object_add(tfile, &user_bo->prime.base,
1001 TTM_REF_USAGE, NULL, false);
1002 }
1003
1004
1005 /**
1006 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
1007 * object without unreserving it.
1008 *
1009 * @bo: Pointer to the struct ttm_buffer_object to fence.
1010 * @fence: Pointer to the fence. If NULL, this function will
1011 * insert a fence into the command stream..
1012 *
1013 * Contrary to the ttm_eu version of this function, it takes only
1014 * a single buffer object instead of a list, and it also doesn't
1015 * unreserve the buffer object, which needs to be done separately.
1016 */
1017 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1018 struct vmw_fence_obj *fence)
1019 {
1020 struct ttm_bo_device *bdev = bo->bdev;
1021
1022 struct vmw_private *dev_priv =
1023 container_of(bdev, struct vmw_private, bdev);
1024
1025 if (fence == NULL) {
1026 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1027 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1028 dma_fence_put(&fence->base);
1029 } else
1030 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1031 }
1032
1033
1034 /**
1035 * vmw_dumb_create - Create a dumb kms buffer
1036 *
1037 * @file_priv: Pointer to a struct drm_file identifying the caller.
1038 * @dev: Pointer to the drm device.
1039 * @args: Pointer to a struct drm_mode_create_dumb structure
1040 * Return: Zero on success, negative error code on failure.
1041 *
1042 * This is a driver callback for the core drm create_dumb functionality.
1043 * Note that this is very similar to the vmw_bo_alloc ioctl, except
1044 * that the arguments have a different format.
1045 */
1046 int vmw_dumb_create(struct drm_file *file_priv,
1047 struct drm_device *dev,
1048 struct drm_mode_create_dumb *args)
1049 {
1050 struct vmw_private *dev_priv = vmw_priv(dev);
1051 struct vmw_buffer_object *vbo;
1052 int ret;
1053
1054 args->pitch = args->width * ((args->bpp + 7) / 8);
1055 args->size = args->pitch * args->height;
1056
1057 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1058 if (unlikely(ret != 0))
1059 return ret;
1060
1061 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1062 args->size, false, &args->handle,
1063 &vbo, NULL);
1064 if (unlikely(ret != 0))
1065 goto out_no_bo;
1066
1067 vmw_bo_unreference(&vbo);
1068 out_no_bo:
1069 ttm_read_unlock(&dev_priv->reservation_sem);
1070 return ret;
1071 }
1072
1073
1074 /**
1075 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1076 *
1077 * @file_priv: Pointer to a struct drm_file identifying the caller.
1078 * @dev: Pointer to the drm device.
1079 * @handle: Handle identifying the dumb buffer.
1080 * @offset: The address space offset returned.
1081 * Return: Zero on success, negative error code on failure.
1082 *
1083 * This is a driver callback for the core drm dumb_map_offset functionality.
1084 */
1085 int vmw_dumb_map_offset(struct drm_file *file_priv,
1086 struct drm_device *dev, uint32_t handle,
1087 uint64_t *offset)
1088 {
1089 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1090 struct vmw_buffer_object *out_buf;
1091 int ret;
1092
1093 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1094 if (ret != 0)
1095 return -EINVAL;
1096
1097 *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1098 vmw_bo_unreference(&out_buf);
1099 return 0;
1100 }
1101
1102
1103 /**
1104 * vmw_dumb_destroy - Destroy a dumb boffer
1105 *
1106 * @file_priv: Pointer to a struct drm_file identifying the caller.
1107 * @dev: Pointer to the drm device.
1108 * @handle: Handle identifying the dumb buffer.
1109 * Return: Zero on success, negative error code on failure.
1110 *
1111 * This is a driver callback for the core drm dumb_destroy functionality.
1112 */
1113 int vmw_dumb_destroy(struct drm_file *file_priv,
1114 struct drm_device *dev,
1115 uint32_t handle)
1116 {
1117 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1118 handle, TTM_REF_USAGE);
1119 }
1120
1121
1122 /**
1123 * vmw_bo_swap_notify - swapout notify callback.
1124 *
1125 * @bo: The buffer object to be swapped out.
1126 */
1127 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1128 {
1129 /* Is @bo embedded in a struct vmw_buffer_object? */
1130 if (bo->destroy != vmw_bo_bo_free &&
1131 bo->destroy != vmw_user_bo_destroy)
1132 return;
1133
1134 /* Kill any cached kernel maps before swapout */
1135 vmw_bo_unmap(vmw_buffer_object(bo));
1136 }
1137
1138
1139 /**
1140 * vmw_bo_move_notify - TTM move_notify_callback
1141 *
1142 * @bo: The TTM buffer object about to move.
1143 * @mem: The struct ttm_mem_reg indicating to what memory
1144 * region the move is taking place.
1145 *
1146 * Detaches cached maps and device bindings that require that the
1147 * buffer doesn't move.
1148 */
1149 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1150 struct ttm_mem_reg *mem)
1151 {
1152 struct vmw_buffer_object *vbo;
1153
1154 if (mem == NULL)
1155 return;
1156
1157 /* Make sure @bo is embedded in a struct vmw_buffer_object? */
1158 if (bo->destroy != vmw_bo_bo_free &&
1159 bo->destroy != vmw_user_bo_destroy)
1160 return;
1161
1162 vbo = container_of(bo, struct vmw_buffer_object, base);
1163
1164 /*
1165 * Kill any cached kernel maps before move to or from VRAM.
1166 * With other types of moves, the underlying pages stay the same,
1167 * and the map can be kept.
1168 */
1169 if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1170 vmw_bo_unmap(vbo);
1171
1172 /*
1173 * If we're moving a backup MOB out of MOB placement, then make sure we
1174 * read back all resource content first, and unbind the MOB from
1175 * the resource.
1176 */
1177 if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1178 vmw_resource_unbind_list(vbo);
1179 }
1180