vmwgfx_resource.c revision 1.1.1.2.28.1 1 /* $NetBSD: vmwgfx_resource.c,v 1.1.1.2.28.1 2018/09/06 06:56:34 pgoyette Exp $ */
2
3 /**************************************************************************
4 *
5 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_resource.c,v 1.1.1.2.28.1 2018/09/06 06:56:34 pgoyette Exp $");
32
33 #include "vmwgfx_drv.h"
34 #include <drm/vmwgfx_drm.h>
35 #include <drm/ttm/ttm_object.h>
36 #include <drm/ttm/ttm_placement.h>
37 #include <drm/drmP.h>
38 #include "vmwgfx_resource_priv.h"
39 #include "vmwgfx_binding.h"
40
41 #define VMW_RES_EVICT_ERR_COUNT 10
42
43 struct vmw_user_dma_buffer {
44 struct ttm_prime_object prime;
45 struct vmw_dma_buffer dma;
46 };
47
48 struct vmw_bo_user_rep {
49 uint32_t handle;
50 uint64_t map_handle;
51 };
52
53 struct vmw_stream {
54 struct vmw_resource res;
55 uint32_t stream_id;
56 };
57
58 struct vmw_user_stream {
59 struct ttm_base_object base;
60 struct vmw_stream stream;
61 };
62
63
64 static uint64_t vmw_user_stream_size;
65
66 static const struct vmw_res_func vmw_stream_func = {
67 .res_type = vmw_res_stream,
68 .needs_backup = false,
69 .may_evict = false,
70 .type_name = "video streams",
71 .backup_placement = NULL,
72 .create = NULL,
73 .destroy = NULL,
74 .bind = NULL,
75 .unbind = NULL
76 };
77
78 static inline struct vmw_dma_buffer *
79 vmw_dma_buffer(struct ttm_buffer_object *bo)
80 {
81 return container_of(bo, struct vmw_dma_buffer, base);
82 }
83
84 static inline struct vmw_user_dma_buffer *
85 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
86 {
87 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
88 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
89 }
90
91 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
92 {
93 kref_get(&res->kref);
94 return res;
95 }
96
97 struct vmw_resource *
98 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
99 {
100 return kref_get_unless_zero(&res->kref) ? res : NULL;
101 }
102
103 /**
104 * vmw_resource_release_id - release a resource id to the id manager.
105 *
106 * @res: Pointer to the resource.
107 *
108 * Release the resource id to the resource id manager and set it to -1
109 */
110 void vmw_resource_release_id(struct vmw_resource *res)
111 {
112 struct vmw_private *dev_priv = res->dev_priv;
113 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
114
115 write_lock(&dev_priv->resource_lock);
116 if (res->id != -1)
117 idr_remove(idr, res->id);
118 res->id = -1;
119 write_unlock(&dev_priv->resource_lock);
120 }
121
122 static void vmw_resource_release(struct kref *kref)
123 {
124 struct vmw_resource *res =
125 container_of(kref, struct vmw_resource, kref);
126 struct vmw_private *dev_priv = res->dev_priv;
127 int id;
128 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
129
130 write_lock(&dev_priv->resource_lock);
131 res->avail = false;
132 list_del_init(&res->lru_head);
133 write_unlock(&dev_priv->resource_lock);
134 if (res->backup) {
135 struct ttm_buffer_object *bo = &res->backup->base;
136
137 ttm_bo_reserve(bo, false, false, false, NULL);
138 if (!list_empty(&res->mob_head) &&
139 res->func->unbind != NULL) {
140 struct ttm_validate_buffer val_buf;
141
142 val_buf.bo = bo;
143 val_buf.shared = false;
144 res->func->unbind(res, false, &val_buf);
145 }
146 res->backup_dirty = false;
147 list_del_init(&res->mob_head);
148 ttm_bo_unreserve(bo);
149 vmw_dmabuf_unreference(&res->backup);
150 }
151
152 if (likely(res->hw_destroy != NULL)) {
153 mutex_lock(&dev_priv->binding_mutex);
154 vmw_binding_res_list_kill(&res->binding_head);
155 mutex_unlock(&dev_priv->binding_mutex);
156 res->hw_destroy(res);
157 }
158
159 id = res->id;
160 if (res->res_free != NULL)
161 res->res_free(res);
162 else
163 kfree(res);
164
165 write_lock(&dev_priv->resource_lock);
166 if (id != -1)
167 idr_remove(idr, id);
168 write_unlock(&dev_priv->resource_lock);
169 }
170
171 void vmw_resource_unreference(struct vmw_resource **p_res)
172 {
173 struct vmw_resource *res = *p_res;
174
175 *p_res = NULL;
176 kref_put(&res->kref, vmw_resource_release);
177 }
178
179
180 /**
181 * vmw_resource_alloc_id - release a resource id to the id manager.
182 *
183 * @res: Pointer to the resource.
184 *
185 * Allocate the lowest free resource from the resource manager, and set
186 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
187 */
188 int vmw_resource_alloc_id(struct vmw_resource *res)
189 {
190 struct vmw_private *dev_priv = res->dev_priv;
191 int ret;
192 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
193
194 BUG_ON(res->id != -1);
195
196 idr_preload(GFP_KERNEL);
197 write_lock(&dev_priv->resource_lock);
198
199 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
200 if (ret >= 0)
201 res->id = ret;
202
203 write_unlock(&dev_priv->resource_lock);
204 idr_preload_end();
205 return ret < 0 ? ret : 0;
206 }
207
208 /**
209 * vmw_resource_init - initialize a struct vmw_resource
210 *
211 * @dev_priv: Pointer to a device private struct.
212 * @res: The struct vmw_resource to initialize.
213 * @obj_type: Resource object type.
214 * @delay_id: Boolean whether to defer device id allocation until
215 * the first validation.
216 * @res_free: Resource destructor.
217 * @func: Resource function table.
218 */
219 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
220 bool delay_id,
221 void (*res_free) (struct vmw_resource *res),
222 const struct vmw_res_func *func)
223 {
224 kref_init(&res->kref);
225 res->hw_destroy = NULL;
226 res->res_free = res_free;
227 res->avail = false;
228 res->dev_priv = dev_priv;
229 res->func = func;
230 INIT_LIST_HEAD(&res->lru_head);
231 INIT_LIST_HEAD(&res->mob_head);
232 INIT_LIST_HEAD(&res->binding_head);
233 res->id = -1;
234 res->backup = NULL;
235 res->backup_offset = 0;
236 res->backup_dirty = false;
237 res->res_dirty = false;
238 if (delay_id)
239 return 0;
240 else
241 return vmw_resource_alloc_id(res);
242 }
243
244 /**
245 * vmw_resource_activate
246 *
247 * @res: Pointer to the newly created resource
248 * @hw_destroy: Destroy function. NULL if none.
249 *
250 * Activate a resource after the hardware has been made aware of it.
251 * Set tye destroy function to @destroy. Typically this frees the
252 * resource and destroys the hardware resources associated with it.
253 * Activate basically means that the function vmw_resource_lookup will
254 * find it.
255 */
256 void vmw_resource_activate(struct vmw_resource *res,
257 void (*hw_destroy) (struct vmw_resource *))
258 {
259 struct vmw_private *dev_priv = res->dev_priv;
260
261 write_lock(&dev_priv->resource_lock);
262 res->avail = true;
263 res->hw_destroy = hw_destroy;
264 write_unlock(&dev_priv->resource_lock);
265 }
266
267 static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
268 struct idr *idr, int id)
269 {
270 struct vmw_resource *res;
271
272 read_lock(&dev_priv->resource_lock);
273 res = idr_find(idr, id);
274 if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
275 res = NULL;
276
277 read_unlock(&dev_priv->resource_lock);
278
279 if (unlikely(res == NULL))
280 return NULL;
281
282 return res;
283 }
284
285 /**
286 * vmw_user_resource_lookup_handle - lookup a struct resource from a
287 * TTM user-space handle and perform basic type checks
288 *
289 * @dev_priv: Pointer to a device private struct
290 * @tfile: Pointer to a struct ttm_object_file identifying the caller
291 * @handle: The TTM user-space handle
292 * @converter: Pointer to an object describing the resource type
293 * @p_res: On successful return the location pointed to will contain
294 * a pointer to a refcounted struct vmw_resource.
295 *
296 * If the handle can't be found or is associated with an incorrect resource
297 * type, -EINVAL will be returned.
298 */
299 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
300 struct ttm_object_file *tfile,
301 uint32_t handle,
302 const struct vmw_user_resource_conv
303 *converter,
304 struct vmw_resource **p_res)
305 {
306 struct ttm_base_object *base;
307 struct vmw_resource *res;
308 int ret = -EINVAL;
309
310 base = ttm_base_object_lookup(tfile, handle);
311 if (unlikely(base == NULL))
312 return -EINVAL;
313
314 if (unlikely(ttm_base_object_type(base) != converter->object_type))
315 goto out_bad_resource;
316
317 res = converter->base_obj_to_res(base);
318
319 read_lock(&dev_priv->resource_lock);
320 if (!res->avail || res->res_free != converter->res_free) {
321 read_unlock(&dev_priv->resource_lock);
322 goto out_bad_resource;
323 }
324
325 kref_get(&res->kref);
326 read_unlock(&dev_priv->resource_lock);
327
328 *p_res = res;
329 ret = 0;
330
331 out_bad_resource:
332 ttm_base_object_unref(&base);
333
334 return ret;
335 }
336
337 /**
338 * Helper function that looks either a surface or dmabuf.
339 *
340 * The pointer this pointed at by out_surf and out_buf needs to be null.
341 */
342 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
343 struct ttm_object_file *tfile,
344 uint32_t handle,
345 struct vmw_surface **out_surf,
346 struct vmw_dma_buffer **out_buf)
347 {
348 struct vmw_resource *res;
349 int ret;
350
351 BUG_ON(*out_surf || *out_buf);
352
353 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
354 user_surface_converter,
355 &res);
356 if (!ret) {
357 *out_surf = vmw_res_to_srf(res);
358 return 0;
359 }
360
361 *out_surf = NULL;
362 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
363 return ret;
364 }
365
366 /**
367 * Buffer management.
368 */
369
370 /**
371 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
372 *
373 * @dev_priv: Pointer to a struct vmw_private identifying the device.
374 * @size: The requested buffer size.
375 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
376 */
377 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
378 bool user)
379 {
380 static size_t struct_size, user_struct_size;
381 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
382 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
383
384 if (unlikely(struct_size == 0)) {
385 size_t backend_size = ttm_round_pot(vmw_tt_size);
386
387 struct_size = backend_size +
388 ttm_round_pot(sizeof(struct vmw_dma_buffer));
389 user_struct_size = backend_size +
390 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
391 }
392
393 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
394 page_array_size +=
395 ttm_round_pot(num_pages * sizeof(dma_addr_t));
396
397 return ((user) ? user_struct_size : struct_size) +
398 page_array_size;
399 }
400
401 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
402 {
403 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
404
405 kfree(vmw_bo);
406 }
407
408 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
409 {
410 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
411
412 ttm_prime_object_kfree(vmw_user_bo, prime);
413 }
414
415 int vmw_dmabuf_init(struct vmw_private *dev_priv,
416 struct vmw_dma_buffer *vmw_bo,
417 size_t size, struct ttm_placement *placement,
418 bool interruptible,
419 void (*bo_free) (struct ttm_buffer_object *bo))
420 {
421 struct ttm_bo_device *bdev = &dev_priv->bdev;
422 size_t acc_size;
423 int ret;
424 bool user = (bo_free == &vmw_user_dmabuf_destroy);
425
426 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
427
428 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
429 memset(vmw_bo, 0, sizeof(*vmw_bo));
430
431 INIT_LIST_HEAD(&vmw_bo->res_list);
432
433 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
434 ttm_bo_type_device, placement,
435 0, interruptible,
436 NULL, acc_size, NULL, NULL, bo_free);
437 return ret;
438 }
439
440 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
441 {
442 struct vmw_user_dma_buffer *vmw_user_bo;
443 struct ttm_base_object *base = *p_base;
444 struct ttm_buffer_object *bo;
445
446 *p_base = NULL;
447
448 if (unlikely(base == NULL))
449 return;
450
451 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
452 prime.base);
453 bo = &vmw_user_bo->dma.base;
454 ttm_bo_unref(&bo);
455 }
456
457 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
458 enum ttm_ref_type ref_type)
459 {
460 struct vmw_user_dma_buffer *user_bo;
461 user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
462
463 switch (ref_type) {
464 case TTM_REF_SYNCCPU_WRITE:
465 ttm_bo_synccpu_write_release(&user_bo->dma.base);
466 break;
467 default:
468 BUG();
469 }
470 }
471
472 /**
473 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
474 *
475 * @dev_priv: Pointer to a struct device private.
476 * @tfile: Pointer to a struct ttm_object_file on which to register the user
477 * object.
478 * @size: Size of the dma buffer.
479 * @shareable: Boolean whether the buffer is shareable with other open files.
480 * @handle: Pointer to where the handle value should be assigned.
481 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
482 * should be assigned.
483 */
484 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
485 struct ttm_object_file *tfile,
486 uint32_t size,
487 bool shareable,
488 uint32_t *handle,
489 struct vmw_dma_buffer **p_dma_buf,
490 struct ttm_base_object **p_base)
491 {
492 struct vmw_user_dma_buffer *user_bo;
493 struct ttm_buffer_object *tmp;
494 int ret;
495
496 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
497 if (unlikely(user_bo == NULL)) {
498 DRM_ERROR("Failed to allocate a buffer.\n");
499 return -ENOMEM;
500 }
501
502 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
503 (dev_priv->has_mob) ?
504 &vmw_sys_placement :
505 &vmw_vram_sys_placement, true,
506 &vmw_user_dmabuf_destroy);
507 if (unlikely(ret != 0))
508 return ret;
509
510 tmp = ttm_bo_reference(&user_bo->dma.base);
511 ret = ttm_prime_object_init(tfile,
512 size,
513 &user_bo->prime,
514 shareable,
515 ttm_buffer_type,
516 &vmw_user_dmabuf_release,
517 &vmw_user_dmabuf_ref_obj_release);
518 if (unlikely(ret != 0)) {
519 ttm_bo_unref(&tmp);
520 goto out_no_base_object;
521 }
522
523 *p_dma_buf = &user_bo->dma;
524 if (p_base) {
525 *p_base = &user_bo->prime.base;
526 kref_get(&(*p_base)->refcount);
527 }
528 *handle = user_bo->prime.base.hash.key;
529
530 out_no_base_object:
531 return ret;
532 }
533
534 /**
535 * vmw_user_dmabuf_verify_access - verify access permissions on this
536 * buffer object.
537 *
538 * @bo: Pointer to the buffer object being accessed
539 * @tfile: Identifying the caller.
540 */
541 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
542 struct ttm_object_file *tfile)
543 {
544 struct vmw_user_dma_buffer *vmw_user_bo;
545
546 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
547 return -EPERM;
548
549 vmw_user_bo = vmw_user_dma_buffer(bo);
550
551 /* Check that the caller has opened the object. */
552 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
553 return 0;
554
555 DRM_ERROR("Could not grant buffer access.\n");
556 return -EPERM;
557 }
558
559 /**
560 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
561 * access, idling previous GPU operations on the buffer and optionally
562 * blocking it for further command submissions.
563 *
564 * @user_bo: Pointer to the buffer object being grabbed for CPU access
565 * @tfile: Identifying the caller.
566 * @flags: Flags indicating how the grab should be performed.
567 *
568 * A blocking grab will be automatically released when @tfile is closed.
569 */
570 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
571 struct ttm_object_file *tfile,
572 uint32_t flags)
573 {
574 struct ttm_buffer_object *bo = &user_bo->dma.base;
575 bool existed;
576 int ret;
577
578 if (flags & drm_vmw_synccpu_allow_cs) {
579 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
580 long lret;
581
582 if (nonblock)
583 return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
584
585 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
586 if (!lret)
587 return -EBUSY;
588 else if (lret < 0)
589 return lret;
590 return 0;
591 }
592
593 ret = ttm_bo_synccpu_write_grab
594 (bo, !!(flags & drm_vmw_synccpu_dontblock));
595 if (unlikely(ret != 0))
596 return ret;
597
598 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
599 TTM_REF_SYNCCPU_WRITE, &existed, false);
600 if (ret != 0 || existed)
601 ttm_bo_synccpu_write_release(&user_bo->dma.base);
602
603 return ret;
604 }
605
606 /**
607 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
608 * and unblock command submission on the buffer if blocked.
609 *
610 * @handle: Handle identifying the buffer object.
611 * @tfile: Identifying the caller.
612 * @flags: Flags indicating the type of release.
613 */
614 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
615 struct ttm_object_file *tfile,
616 uint32_t flags)
617 {
618 if (!(flags & drm_vmw_synccpu_allow_cs))
619 return ttm_ref_object_base_unref(tfile, handle,
620 TTM_REF_SYNCCPU_WRITE);
621
622 return 0;
623 }
624
625 /**
626 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
627 * functionality.
628 *
629 * @dev: Identifies the drm device.
630 * @data: Pointer to the ioctl argument.
631 * @file_priv: Identifies the caller.
632 *
633 * This function checks the ioctl arguments for validity and calls the
634 * relevant synccpu functions.
635 */
636 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
637 struct drm_file *file_priv)
638 {
639 struct drm_vmw_synccpu_arg *arg =
640 (struct drm_vmw_synccpu_arg *) data;
641 struct vmw_dma_buffer *dma_buf;
642 struct vmw_user_dma_buffer *user_bo;
643 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
644 struct ttm_base_object *buffer_base;
645 int ret;
646
647 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
648 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
649 drm_vmw_synccpu_dontblock |
650 drm_vmw_synccpu_allow_cs)) != 0) {
651 DRM_ERROR("Illegal synccpu flags.\n");
652 return -EINVAL;
653 }
654
655 switch (arg->op) {
656 case drm_vmw_synccpu_grab:
657 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
658 &buffer_base);
659 if (unlikely(ret != 0))
660 return ret;
661
662 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
663 dma);
664 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
665 vmw_dmabuf_unreference(&dma_buf);
666 ttm_base_object_unref(&buffer_base);
667 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
668 ret != -EBUSY)) {
669 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
670 (unsigned int) arg->handle);
671 return ret;
672 }
673 break;
674 case drm_vmw_synccpu_release:
675 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
676 arg->flags);
677 if (unlikely(ret != 0)) {
678 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
679 (unsigned int) arg->handle);
680 return ret;
681 }
682 break;
683 default:
684 DRM_ERROR("Invalid synccpu operation.\n");
685 return -EINVAL;
686 }
687
688 return 0;
689 }
690
691 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
692 struct drm_file *file_priv)
693 {
694 struct vmw_private *dev_priv = vmw_priv(dev);
695 union drm_vmw_alloc_dmabuf_arg *arg =
696 (union drm_vmw_alloc_dmabuf_arg *)data;
697 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
698 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
699 struct vmw_dma_buffer *dma_buf;
700 uint32_t handle;
701 int ret;
702
703 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
704 if (unlikely(ret != 0))
705 return ret;
706
707 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
708 req->size, false, &handle, &dma_buf,
709 NULL);
710 if (unlikely(ret != 0))
711 goto out_no_dmabuf;
712
713 rep->handle = handle;
714 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
715 rep->cur_gmr_id = handle;
716 rep->cur_gmr_offset = 0;
717
718 vmw_dmabuf_unreference(&dma_buf);
719
720 out_no_dmabuf:
721 ttm_read_unlock(&dev_priv->reservation_sem);
722
723 return ret;
724 }
725
726 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
727 struct drm_file *file_priv)
728 {
729 struct drm_vmw_unref_dmabuf_arg *arg =
730 (struct drm_vmw_unref_dmabuf_arg *)data;
731
732 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
733 arg->handle,
734 TTM_REF_USAGE);
735 }
736
737 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
738 uint32_t handle, struct vmw_dma_buffer **out,
739 struct ttm_base_object **p_base)
740 {
741 struct vmw_user_dma_buffer *vmw_user_bo;
742 struct ttm_base_object *base;
743
744 base = ttm_base_object_lookup(tfile, handle);
745 if (unlikely(base == NULL)) {
746 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
747 (unsigned long)handle);
748 return -ESRCH;
749 }
750
751 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
752 ttm_base_object_unref(&base);
753 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
754 (unsigned long)handle);
755 return -EINVAL;
756 }
757
758 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
759 prime.base);
760 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
761 if (p_base)
762 *p_base = base;
763 else
764 ttm_base_object_unref(&base);
765 *out = &vmw_user_bo->dma;
766
767 return 0;
768 }
769
770 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
771 struct vmw_dma_buffer *dma_buf,
772 uint32_t *handle)
773 {
774 struct vmw_user_dma_buffer *user_bo;
775
776 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
777 return -EINVAL;
778
779 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
780
781 *handle = user_bo->prime.base.hash.key;
782 return ttm_ref_object_add(tfile, &user_bo->prime.base,
783 TTM_REF_USAGE, NULL, false);
784 }
785
786 /*
787 * Stream management
788 */
789
790 static void vmw_stream_destroy(struct vmw_resource *res)
791 {
792 struct vmw_private *dev_priv = res->dev_priv;
793 struct vmw_stream *stream;
794 int ret;
795
796 DRM_INFO("%s: unref\n", __func__);
797 stream = container_of(res, struct vmw_stream, res);
798
799 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
800 WARN_ON(ret != 0);
801 }
802
803 static int vmw_stream_init(struct vmw_private *dev_priv,
804 struct vmw_stream *stream,
805 void (*res_free) (struct vmw_resource *res))
806 {
807 struct vmw_resource *res = &stream->res;
808 int ret;
809
810 ret = vmw_resource_init(dev_priv, res, false, res_free,
811 &vmw_stream_func);
812
813 if (unlikely(ret != 0)) {
814 if (res_free == NULL)
815 kfree(stream);
816 else
817 res_free(&stream->res);
818 return ret;
819 }
820
821 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
822 if (ret) {
823 vmw_resource_unreference(&res);
824 return ret;
825 }
826
827 DRM_INFO("%s: claimed\n", __func__);
828
829 vmw_resource_activate(&stream->res, vmw_stream_destroy);
830 return 0;
831 }
832
833 static void vmw_user_stream_free(struct vmw_resource *res)
834 {
835 struct vmw_user_stream *stream =
836 container_of(res, struct vmw_user_stream, stream.res);
837 struct vmw_private *dev_priv = res->dev_priv;
838
839 ttm_base_object_kfree(stream, base);
840 ttm_mem_global_free(vmw_mem_glob(dev_priv),
841 vmw_user_stream_size);
842 }
843
844 /**
845 * This function is called when user space has no more references on the
846 * base object. It releases the base-object's reference on the resource object.
847 */
848
849 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
850 {
851 struct ttm_base_object *base = *p_base;
852 struct vmw_user_stream *stream =
853 container_of(base, struct vmw_user_stream, base);
854 struct vmw_resource *res = &stream->stream.res;
855
856 *p_base = NULL;
857 vmw_resource_unreference(&res);
858 }
859
860 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
861 struct drm_file *file_priv)
862 {
863 struct vmw_private *dev_priv = vmw_priv(dev);
864 struct vmw_resource *res;
865 struct vmw_user_stream *stream;
866 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
867 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
868 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
869 int ret = 0;
870
871
872 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
873 if (unlikely(res == NULL))
874 return -EINVAL;
875
876 if (res->res_free != &vmw_user_stream_free) {
877 ret = -EINVAL;
878 goto out;
879 }
880
881 stream = container_of(res, struct vmw_user_stream, stream.res);
882 if (stream->base.tfile != tfile) {
883 ret = -EINVAL;
884 goto out;
885 }
886
887 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
888 out:
889 vmw_resource_unreference(&res);
890 return ret;
891 }
892
893 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
894 struct drm_file *file_priv)
895 {
896 struct vmw_private *dev_priv = vmw_priv(dev);
897 struct vmw_user_stream *stream;
898 struct vmw_resource *res;
899 struct vmw_resource *tmp;
900 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
901 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
902 int ret;
903
904 /*
905 * Approximate idr memory usage with 128 bytes. It will be limited
906 * by maximum number_of streams anyway?
907 */
908
909 if (unlikely(vmw_user_stream_size == 0))
910 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
911
912 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
913 if (unlikely(ret != 0))
914 return ret;
915
916 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
917 vmw_user_stream_size,
918 false, true);
919 ttm_read_unlock(&dev_priv->reservation_sem);
920 if (unlikely(ret != 0)) {
921 if (ret != -ERESTARTSYS)
922 DRM_ERROR("Out of graphics memory for stream"
923 " creation.\n");
924
925 goto out_ret;
926 }
927
928 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
929 if (unlikely(stream == NULL)) {
930 ttm_mem_global_free(vmw_mem_glob(dev_priv),
931 vmw_user_stream_size);
932 ret = -ENOMEM;
933 goto out_ret;
934 }
935
936 res = &stream->stream.res;
937 stream->base.shareable = false;
938 stream->base.tfile = NULL;
939
940 /*
941 * From here on, the destructor takes over resource freeing.
942 */
943
944 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
945 if (unlikely(ret != 0))
946 goto out_ret;
947
948 tmp = vmw_resource_reference(res);
949 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
950 &vmw_user_stream_base_release, NULL);
951
952 if (unlikely(ret != 0)) {
953 vmw_resource_unreference(&tmp);
954 goto out_err;
955 }
956
957 arg->stream_id = res->id;
958 out_err:
959 vmw_resource_unreference(&res);
960 out_ret:
961 return ret;
962 }
963
964 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
965 struct ttm_object_file *tfile,
966 uint32_t *inout_id, struct vmw_resource **out)
967 {
968 struct vmw_user_stream *stream;
969 struct vmw_resource *res;
970 int ret;
971
972 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
973 *inout_id);
974 if (unlikely(res == NULL))
975 return -EINVAL;
976
977 if (res->res_free != &vmw_user_stream_free) {
978 ret = -EINVAL;
979 goto err_ref;
980 }
981
982 stream = container_of(res, struct vmw_user_stream, stream.res);
983 if (stream->base.tfile != tfile) {
984 ret = -EPERM;
985 goto err_ref;
986 }
987
988 *inout_id = stream->stream.stream_id;
989 *out = res;
990 return 0;
991 err_ref:
992 vmw_resource_unreference(&res);
993 return ret;
994 }
995
996
997 /**
998 * vmw_dumb_create - Create a dumb kms buffer
999 *
1000 * @file_priv: Pointer to a struct drm_file identifying the caller.
1001 * @dev: Pointer to the drm device.
1002 * @args: Pointer to a struct drm_mode_create_dumb structure
1003 *
1004 * This is a driver callback for the core drm create_dumb functionality.
1005 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
1006 * that the arguments have a different format.
1007 */
1008 int vmw_dumb_create(struct drm_file *file_priv,
1009 struct drm_device *dev,
1010 struct drm_mode_create_dumb *args)
1011 {
1012 struct vmw_private *dev_priv = vmw_priv(dev);
1013 struct vmw_dma_buffer *dma_buf;
1014 int ret;
1015
1016 args->pitch = args->width * ((args->bpp + 7) / 8);
1017 args->size = args->pitch * args->height;
1018
1019 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1020 if (unlikely(ret != 0))
1021 return ret;
1022
1023 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1024 args->size, false, &args->handle,
1025 &dma_buf, NULL);
1026 if (unlikely(ret != 0))
1027 goto out_no_dmabuf;
1028
1029 vmw_dmabuf_unreference(&dma_buf);
1030 out_no_dmabuf:
1031 ttm_read_unlock(&dev_priv->reservation_sem);
1032 return ret;
1033 }
1034
1035 /**
1036 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1037 *
1038 * @file_priv: Pointer to a struct drm_file identifying the caller.
1039 * @dev: Pointer to the drm device.
1040 * @handle: Handle identifying the dumb buffer.
1041 * @offset: The address space offset returned.
1042 *
1043 * This is a driver callback for the core drm dumb_map_offset functionality.
1044 */
1045 int vmw_dumb_map_offset(struct drm_file *file_priv,
1046 struct drm_device *dev, uint32_t handle,
1047 uint64_t *offset)
1048 {
1049 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1050 struct vmw_dma_buffer *out_buf;
1051 int ret;
1052
1053 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
1054 if (ret != 0)
1055 return -EINVAL;
1056
1057 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1058 vmw_dmabuf_unreference(&out_buf);
1059 return 0;
1060 }
1061
1062 /**
1063 * vmw_dumb_destroy - Destroy a dumb boffer
1064 *
1065 * @file_priv: Pointer to a struct drm_file identifying the caller.
1066 * @dev: Pointer to the drm device.
1067 * @handle: Handle identifying the dumb buffer.
1068 *
1069 * This is a driver callback for the core drm dumb_destroy functionality.
1070 */
1071 int vmw_dumb_destroy(struct drm_file *file_priv,
1072 struct drm_device *dev,
1073 uint32_t handle)
1074 {
1075 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1076 handle, TTM_REF_USAGE);
1077 }
1078
1079 /**
1080 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1081 *
1082 * @res: The resource for which to allocate a backup buffer.
1083 * @interruptible: Whether any sleeps during allocation should be
1084 * performed while interruptible.
1085 */
1086 static int vmw_resource_buf_alloc(struct vmw_resource *res,
1087 bool interruptible)
1088 {
1089 unsigned long size =
1090 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1091 struct vmw_dma_buffer *backup;
1092 int ret;
1093
1094 if (likely(res->backup)) {
1095 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1096 return 0;
1097 }
1098
1099 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1100 if (unlikely(backup == NULL))
1101 return -ENOMEM;
1102
1103 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1104 res->func->backup_placement,
1105 interruptible,
1106 &vmw_dmabuf_bo_free);
1107 if (unlikely(ret != 0))
1108 goto out_no_dmabuf;
1109
1110 res->backup = backup;
1111
1112 out_no_dmabuf:
1113 return ret;
1114 }
1115
1116 /**
1117 * vmw_resource_do_validate - Make a resource up-to-date and visible
1118 * to the device.
1119 *
1120 * @res: The resource to make visible to the device.
1121 * @val_buf: Information about a buffer possibly
1122 * containing backup data if a bind operation is needed.
1123 *
1124 * On hardware resource shortage, this function returns -EBUSY and
1125 * should be retried once resources have been freed up.
1126 */
1127 static int vmw_resource_do_validate(struct vmw_resource *res,
1128 struct ttm_validate_buffer *val_buf)
1129 {
1130 int ret = 0;
1131 const struct vmw_res_func *func = res->func;
1132
1133 if (unlikely(res->id == -1)) {
1134 ret = func->create(res);
1135 if (unlikely(ret != 0))
1136 return ret;
1137 }
1138
1139 if (func->bind &&
1140 ((func->needs_backup && list_empty(&res->mob_head) &&
1141 val_buf->bo != NULL) ||
1142 (!func->needs_backup && val_buf->bo != NULL))) {
1143 ret = func->bind(res, val_buf);
1144 if (unlikely(ret != 0))
1145 goto out_bind_failed;
1146 if (func->needs_backup)
1147 list_add_tail(&res->mob_head, &res->backup->res_list);
1148 }
1149
1150 /*
1151 * Only do this on write operations, and move to
1152 * vmw_resource_unreserve if it can be called after
1153 * backup buffers have been unreserved. Otherwise
1154 * sort out locking.
1155 */
1156 res->res_dirty = true;
1157
1158 return 0;
1159
1160 out_bind_failed:
1161 func->destroy(res);
1162
1163 return ret;
1164 }
1165
1166 /**
1167 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1168 * command submission.
1169 *
1170 * @res: Pointer to the struct vmw_resource to unreserve.
1171 * @switch_backup: Backup buffer has been switched.
1172 * @new_backup: Pointer to new backup buffer if command submission
1173 * switched. May be NULL.
1174 * @new_backup_offset: New backup offset if @switch_backup is true.
1175 *
1176 * Currently unreserving a resource means putting it back on the device's
1177 * resource lru list, so that it can be evicted if necessary.
1178 */
1179 void vmw_resource_unreserve(struct vmw_resource *res,
1180 bool switch_backup,
1181 struct vmw_dma_buffer *new_backup,
1182 unsigned long new_backup_offset)
1183 {
1184 struct vmw_private *dev_priv = res->dev_priv;
1185
1186 if (!list_empty(&res->lru_head))
1187 return;
1188
1189 if (switch_backup && new_backup != res->backup) {
1190 if (res->backup) {
1191 lockdep_assert_held(&res->backup->base.resv->lock.base);
1192 list_del_init(&res->mob_head);
1193 vmw_dmabuf_unreference(&res->backup);
1194 }
1195
1196 if (new_backup) {
1197 res->backup = vmw_dmabuf_reference(new_backup);
1198 lockdep_assert_held(&new_backup->base.resv->lock.base);
1199 list_add_tail(&res->mob_head, &new_backup->res_list);
1200 } else {
1201 res->backup = NULL;
1202 }
1203 }
1204 if (switch_backup)
1205 res->backup_offset = new_backup_offset;
1206
1207 if (!res->func->may_evict || res->id == -1 || res->pin_count)
1208 return;
1209
1210 write_lock(&dev_priv->resource_lock);
1211 list_add_tail(&res->lru_head,
1212 &res->dev_priv->res_lru[res->func->res_type]);
1213 write_unlock(&dev_priv->resource_lock);
1214 }
1215
1216 /**
1217 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1218 * for a resource and in that case, allocate
1219 * one, reserve and validate it.
1220 *
1221 * @res: The resource for which to allocate a backup buffer.
1222 * @interruptible: Whether any sleeps during allocation should be
1223 * performed while interruptible.
1224 * @val_buf: On successful return contains data about the
1225 * reserved and validated backup buffer.
1226 */
1227 static int
1228 vmw_resource_check_buffer(struct vmw_resource *res,
1229 bool interruptible,
1230 struct ttm_validate_buffer *val_buf)
1231 {
1232 struct list_head val_list;
1233 bool backup_dirty = false;
1234 int ret;
1235
1236 if (unlikely(res->backup == NULL)) {
1237 ret = vmw_resource_buf_alloc(res, interruptible);
1238 if (unlikely(ret != 0))
1239 return ret;
1240 }
1241
1242 INIT_LIST_HEAD(&val_list);
1243 val_buf->bo = ttm_bo_reference(&res->backup->base);
1244 val_buf->shared = false;
1245 list_add_tail(&val_buf->head, &val_list);
1246 ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
1247 if (unlikely(ret != 0))
1248 goto out_no_reserve;
1249
1250 if (res->func->needs_backup && list_empty(&res->mob_head))
1251 return 0;
1252
1253 backup_dirty = res->backup_dirty;
1254 ret = ttm_bo_validate(&res->backup->base,
1255 res->func->backup_placement,
1256 true, false);
1257
1258 if (unlikely(ret != 0))
1259 goto out_no_validate;
1260
1261 return 0;
1262
1263 out_no_validate:
1264 ttm_eu_backoff_reservation(NULL, &val_list);
1265 out_no_reserve:
1266 ttm_bo_unref(&val_buf->bo);
1267 if (backup_dirty)
1268 vmw_dmabuf_unreference(&res->backup);
1269
1270 return ret;
1271 }
1272
1273 /**
1274 * vmw_resource_reserve - Reserve a resource for command submission
1275 *
1276 * @res: The resource to reserve.
1277 *
1278 * This function takes the resource off the LRU list and make sure
1279 * a backup buffer is present for guest-backed resources. However,
1280 * the buffer may not be bound to the resource at this point.
1281 *
1282 */
1283 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1284 bool no_backup)
1285 {
1286 struct vmw_private *dev_priv = res->dev_priv;
1287 int ret;
1288
1289 write_lock(&dev_priv->resource_lock);
1290 list_del_init(&res->lru_head);
1291 write_unlock(&dev_priv->resource_lock);
1292
1293 if (res->func->needs_backup && res->backup == NULL &&
1294 !no_backup) {
1295 ret = vmw_resource_buf_alloc(res, interruptible);
1296 if (unlikely(ret != 0)) {
1297 DRM_ERROR("Failed to allocate a backup buffer "
1298 "of size %lu. bytes\n",
1299 (unsigned long) res->backup_size);
1300 return ret;
1301 }
1302 }
1303
1304 return 0;
1305 }
1306
1307 /**
1308 * vmw_resource_backoff_reservation - Unreserve and unreference a
1309 * backup buffer
1310 *.
1311 * @val_buf: Backup buffer information.
1312 */
1313 static void
1314 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1315 {
1316 struct list_head val_list;
1317
1318 if (likely(val_buf->bo == NULL))
1319 return;
1320
1321 INIT_LIST_HEAD(&val_list);
1322 list_add_tail(&val_buf->head, &val_list);
1323 ttm_eu_backoff_reservation(NULL, &val_list);
1324 ttm_bo_unref(&val_buf->bo);
1325 }
1326
1327 /**
1328 * vmw_resource_do_evict - Evict a resource, and transfer its data
1329 * to a backup buffer.
1330 *
1331 * @res: The resource to evict.
1332 * @interruptible: Whether to wait interruptible.
1333 */
1334 static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1335 {
1336 struct ttm_validate_buffer val_buf;
1337 const struct vmw_res_func *func = res->func;
1338 int ret;
1339
1340 BUG_ON(!func->may_evict);
1341
1342 val_buf.bo = NULL;
1343 val_buf.shared = false;
1344 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1345 if (unlikely(ret != 0))
1346 return ret;
1347
1348 if (unlikely(func->unbind != NULL &&
1349 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1350 ret = func->unbind(res, res->res_dirty, &val_buf);
1351 if (unlikely(ret != 0))
1352 goto out_no_unbind;
1353 list_del_init(&res->mob_head);
1354 }
1355 ret = func->destroy(res);
1356 res->backup_dirty = true;
1357 res->res_dirty = false;
1358 out_no_unbind:
1359 vmw_resource_backoff_reservation(&val_buf);
1360
1361 return ret;
1362 }
1363
1364
1365 /**
1366 * vmw_resource_validate - Make a resource up-to-date and visible
1367 * to the device.
1368 *
1369 * @res: The resource to make visible to the device.
1370 *
1371 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1372 * be reserved and validated.
1373 * On hardware resource shortage, this function will repeatedly evict
1374 * resources of the same type until the validation succeeds.
1375 */
1376 int vmw_resource_validate(struct vmw_resource *res)
1377 {
1378 int ret;
1379 struct vmw_resource *evict_res;
1380 struct vmw_private *dev_priv = res->dev_priv;
1381 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1382 struct ttm_validate_buffer val_buf;
1383 unsigned err_count = 0;
1384
1385 if (!res->func->create)
1386 return 0;
1387
1388 val_buf.bo = NULL;
1389 val_buf.shared = false;
1390 if (res->backup)
1391 val_buf.bo = &res->backup->base;
1392 do {
1393 ret = vmw_resource_do_validate(res, &val_buf);
1394 if (likely(ret != -EBUSY))
1395 break;
1396
1397 write_lock(&dev_priv->resource_lock);
1398 if (list_empty(lru_list) || !res->func->may_evict) {
1399 DRM_ERROR("Out of device device resources "
1400 "for %s.\n", res->func->type_name);
1401 ret = -EBUSY;
1402 write_unlock(&dev_priv->resource_lock);
1403 break;
1404 }
1405
1406 evict_res = vmw_resource_reference
1407 (list_first_entry(lru_list, struct vmw_resource,
1408 lru_head));
1409 list_del_init(&evict_res->lru_head);
1410
1411 write_unlock(&dev_priv->resource_lock);
1412
1413 ret = vmw_resource_do_evict(evict_res, true);
1414 if (unlikely(ret != 0)) {
1415 write_lock(&dev_priv->resource_lock);
1416 list_add_tail(&evict_res->lru_head, lru_list);
1417 write_unlock(&dev_priv->resource_lock);
1418 if (ret == -ERESTARTSYS ||
1419 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1420 vmw_resource_unreference(&evict_res);
1421 goto out_no_validate;
1422 }
1423 }
1424
1425 vmw_resource_unreference(&evict_res);
1426 } while (1);
1427
1428 if (unlikely(ret != 0))
1429 goto out_no_validate;
1430 else if (!res->func->needs_backup && res->backup) {
1431 list_del_init(&res->mob_head);
1432 vmw_dmabuf_unreference(&res->backup);
1433 }
1434
1435 return 0;
1436
1437 out_no_validate:
1438 return ret;
1439 }
1440
1441 /**
1442 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1443 * object without unreserving it.
1444 *
1445 * @bo: Pointer to the struct ttm_buffer_object to fence.
1446 * @fence: Pointer to the fence. If NULL, this function will
1447 * insert a fence into the command stream..
1448 *
1449 * Contrary to the ttm_eu version of this function, it takes only
1450 * a single buffer object instead of a list, and it also doesn't
1451 * unreserve the buffer object, which needs to be done separately.
1452 */
1453 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1454 struct vmw_fence_obj *fence)
1455 {
1456 struct ttm_bo_device *bdev = bo->bdev;
1457
1458 struct vmw_private *dev_priv =
1459 container_of(bdev, struct vmw_private, bdev);
1460
1461 if (fence == NULL) {
1462 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1463 reservation_object_add_excl_fence(bo->resv, &fence->base);
1464 fence_put(&fence->base);
1465 } else
1466 reservation_object_add_excl_fence(bo->resv, &fence->base);
1467 }
1468
1469 /**
1470 * vmw_resource_move_notify - TTM move_notify_callback
1471 *
1472 * @bo: The TTM buffer object about to move.
1473 * @mem: The struct ttm_mem_reg indicating to what memory
1474 * region the move is taking place.
1475 *
1476 * Evicts the Guest Backed hardware resource if the backup
1477 * buffer is being moved out of MOB memory.
1478 * Note that this function should not race with the resource
1479 * validation code as long as it accesses only members of struct
1480 * resource that remain static while bo::res is !NULL and
1481 * while we have @bo reserved. struct resource::backup is *not* a
1482 * static member. The resource validation code will take care
1483 * to set @bo::res to NULL, while having @bo reserved when the
1484 * buffer is no longer bound to the resource, so @bo:res can be
1485 * used to determine whether there is a need to unbind and whether
1486 * it is safe to unbind.
1487 */
1488 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1489 struct ttm_mem_reg *mem)
1490 {
1491 struct vmw_dma_buffer *dma_buf;
1492
1493 if (mem == NULL)
1494 return;
1495
1496 if (bo->destroy != vmw_dmabuf_bo_free &&
1497 bo->destroy != vmw_user_dmabuf_destroy)
1498 return;
1499
1500 dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1501
1502 if (mem->mem_type != VMW_PL_MOB) {
1503 struct vmw_resource *res, *n;
1504 struct ttm_validate_buffer val_buf;
1505
1506 val_buf.bo = bo;
1507 val_buf.shared = false;
1508
1509 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1510
1511 if (unlikely(res->func->unbind == NULL))
1512 continue;
1513
1514 (void) res->func->unbind(res, true, &val_buf);
1515 res->backup_dirty = true;
1516 res->res_dirty = false;
1517 list_del_init(&res->mob_head);
1518 }
1519
1520 (void) ttm_bo_wait(bo, false, false, false);
1521 }
1522 }
1523
1524
1525
1526 /**
1527 * vmw_query_readback_all - Read back cached query states
1528 *
1529 * @dx_query_mob: Buffer containing the DX query MOB
1530 *
1531 * Read back cached states from the device if they exist. This function
1532 * assumings binding_mutex is held.
1533 */
1534 int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1535 {
1536 struct vmw_resource *dx_query_ctx;
1537 struct vmw_private *dev_priv;
1538 struct {
1539 SVGA3dCmdHeader header;
1540 SVGA3dCmdDXReadbackAllQuery body;
1541 } *cmd;
1542
1543
1544 /* No query bound, so do nothing */
1545 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1546 return 0;
1547
1548 dx_query_ctx = dx_query_mob->dx_query_ctx;
1549 dev_priv = dx_query_ctx->dev_priv;
1550
1551 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1552 if (unlikely(cmd == NULL)) {
1553 DRM_ERROR("Failed reserving FIFO space for "
1554 "query MOB read back.\n");
1555 return -ENOMEM;
1556 }
1557
1558 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1559 cmd->header.size = sizeof(cmd->body);
1560 cmd->body.cid = dx_query_ctx->id;
1561
1562 vmw_fifo_commit(dev_priv, sizeof(*cmd));
1563
1564 /* Triggers a rebind the next time affected context is bound */
1565 dx_query_mob->dx_query_ctx = NULL;
1566
1567 return 0;
1568 }
1569
1570
1571
1572 /**
1573 * vmw_query_move_notify - Read back cached query states
1574 *
1575 * @bo: The TTM buffer object about to move.
1576 * @mem: The memory region @bo is moving to.
1577 *
1578 * Called before the query MOB is swapped out to read back cached query
1579 * states from the device.
1580 */
1581 void vmw_query_move_notify(struct ttm_buffer_object *bo,
1582 struct ttm_mem_reg *mem)
1583 {
1584 struct vmw_dma_buffer *dx_query_mob;
1585 struct ttm_bo_device *bdev = bo->bdev;
1586 struct vmw_private *dev_priv;
1587
1588
1589 dev_priv = container_of(bdev, struct vmw_private, bdev);
1590
1591 mutex_lock(&dev_priv->binding_mutex);
1592
1593 dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1594 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1595 mutex_unlock(&dev_priv->binding_mutex);
1596 return;
1597 }
1598
1599 /* If BO is being moved from MOB to system memory */
1600 if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
1601 struct vmw_fence_obj *fence;
1602
1603 (void) vmw_query_readback_all(dx_query_mob);
1604 mutex_unlock(&dev_priv->binding_mutex);
1605
1606 /* Create a fence and attach the BO to it */
1607 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1608 vmw_fence_single_bo(bo, fence);
1609
1610 if (fence != NULL)
1611 vmw_fence_obj_unreference(&fence);
1612
1613 (void) ttm_bo_wait(bo, false, false, false);
1614 } else
1615 mutex_unlock(&dev_priv->binding_mutex);
1616
1617 }
1618
1619 /**
1620 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1621 *
1622 * @res: The resource being queried.
1623 */
1624 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1625 {
1626 return res->func->needs_backup;
1627 }
1628
1629 /**
1630 * vmw_resource_evict_type - Evict all resources of a specific type
1631 *
1632 * @dev_priv: Pointer to a device private struct
1633 * @type: The resource type to evict
1634 *
1635 * To avoid thrashing starvation or as part of the hibernation sequence,
1636 * try to evict all evictable resources of a specific type.
1637 */
1638 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1639 enum vmw_res_type type)
1640 {
1641 struct list_head *lru_list = &dev_priv->res_lru[type];
1642 struct vmw_resource *evict_res;
1643 unsigned err_count = 0;
1644 int ret;
1645
1646 do {
1647 write_lock(&dev_priv->resource_lock);
1648
1649 if (list_empty(lru_list))
1650 goto out_unlock;
1651
1652 evict_res = vmw_resource_reference(
1653 list_first_entry(lru_list, struct vmw_resource,
1654 lru_head));
1655 list_del_init(&evict_res->lru_head);
1656 write_unlock(&dev_priv->resource_lock);
1657
1658 ret = vmw_resource_do_evict(evict_res, false);
1659 if (unlikely(ret != 0)) {
1660 write_lock(&dev_priv->resource_lock);
1661 list_add_tail(&evict_res->lru_head, lru_list);
1662 write_unlock(&dev_priv->resource_lock);
1663 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1664 vmw_resource_unreference(&evict_res);
1665 return;
1666 }
1667 }
1668
1669 vmw_resource_unreference(&evict_res);
1670 } while (1);
1671
1672 out_unlock:
1673 write_unlock(&dev_priv->resource_lock);
1674 }
1675
1676 /**
1677 * vmw_resource_evict_all - Evict all evictable resources
1678 *
1679 * @dev_priv: Pointer to a device private struct
1680 *
1681 * To avoid thrashing starvation or as part of the hibernation sequence,
1682 * evict all evictable resources. In particular this means that all
1683 * guest-backed resources that are registered with the device are
1684 * evicted and the OTable becomes clean.
1685 */
1686 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1687 {
1688 enum vmw_res_type type;
1689
1690 mutex_lock(&dev_priv->cmdbuf_mutex);
1691
1692 for (type = 0; type < vmw_res_max; ++type)
1693 vmw_resource_evict_type(dev_priv, type);
1694
1695 mutex_unlock(&dev_priv->cmdbuf_mutex);
1696 }
1697
1698 /**
1699 * vmw_resource_pin - Add a pin reference on a resource
1700 *
1701 * @res: The resource to add a pin reference on
1702 *
1703 * This function adds a pin reference, and if needed validates the resource.
1704 * Having a pin reference means that the resource can never be evicted, and
1705 * its id will never change as long as there is a pin reference.
1706 * This function returns 0 on success and a negative error code on failure.
1707 */
1708 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1709 {
1710 struct vmw_private *dev_priv = res->dev_priv;
1711 int ret;
1712
1713 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1714 mutex_lock(&dev_priv->cmdbuf_mutex);
1715 ret = vmw_resource_reserve(res, interruptible, false);
1716 if (ret)
1717 goto out_no_reserve;
1718
1719 if (res->pin_count == 0) {
1720 struct vmw_dma_buffer *vbo = NULL;
1721
1722 if (res->backup) {
1723 vbo = res->backup;
1724
1725 ttm_bo_reserve(&vbo->base, interruptible, false, false,
1726 NULL);
1727 if (!vbo->pin_count) {
1728 ret = ttm_bo_validate
1729 (&vbo->base,
1730 res->func->backup_placement,
1731 interruptible, false);
1732 if (ret) {
1733 ttm_bo_unreserve(&vbo->base);
1734 goto out_no_validate;
1735 }
1736 }
1737
1738 /* Do we really need to pin the MOB as well? */
1739 vmw_bo_pin_reserved(vbo, true);
1740 }
1741 ret = vmw_resource_validate(res);
1742 if (vbo)
1743 ttm_bo_unreserve(&vbo->base);
1744 if (ret)
1745 goto out_no_validate;
1746 }
1747 res->pin_count++;
1748
1749 out_no_validate:
1750 vmw_resource_unreserve(res, false, NULL, 0UL);
1751 out_no_reserve:
1752 mutex_unlock(&dev_priv->cmdbuf_mutex);
1753 ttm_write_unlock(&dev_priv->reservation_sem);
1754
1755 return ret;
1756 }
1757
1758 /**
1759 * vmw_resource_unpin - Remove a pin reference from a resource
1760 *
1761 * @res: The resource to remove a pin reference from
1762 *
1763 * Having a pin reference means that the resource can never be evicted, and
1764 * its id will never change as long as there is a pin reference.
1765 */
1766 void vmw_resource_unpin(struct vmw_resource *res)
1767 {
1768 struct vmw_private *dev_priv = res->dev_priv;
1769 int ret;
1770
1771 ttm_read_lock(&dev_priv->reservation_sem, false);
1772 mutex_lock(&dev_priv->cmdbuf_mutex);
1773
1774 ret = vmw_resource_reserve(res, false, true);
1775 WARN_ON(ret);
1776
1777 WARN_ON(res->pin_count == 0);
1778 if (--res->pin_count == 0 && res->backup) {
1779 struct vmw_dma_buffer *vbo = res->backup;
1780
1781 ttm_bo_reserve(&vbo->base, false, false, false, NULL);
1782 vmw_bo_pin_reserved(vbo, false);
1783 ttm_bo_unreserve(&vbo->base);
1784 }
1785
1786 vmw_resource_unreserve(res, false, NULL, 0UL);
1787
1788 mutex_unlock(&dev_priv->cmdbuf_mutex);
1789 ttm_read_unlock(&dev_priv->reservation_sem);
1790 }
1791
1792 /**
1793 * vmw_res_type - Return the resource type
1794 *
1795 * @res: Pointer to the resource
1796 */
1797 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1798 {
1799 return res->func->res_type;
1800 }
1801