vmwgfx_resource.c revision 1.3 1 /* $NetBSD: vmwgfx_resource.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $ */
2
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 /**************************************************************************
5 *
6 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_resource.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $");
32
33 #include <drm/ttm/ttm_placement.h>
34
35 #include "vmwgfx_resource_priv.h"
36 #include "vmwgfx_binding.h"
37 #include "vmwgfx_drv.h"
38
39 #define VMW_RES_EVICT_ERR_COUNT 10
40
41 /**
42 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
43 * @res: The resource
44 */
45 void vmw_resource_mob_attach(struct vmw_resource *res)
46 {
47 struct vmw_buffer_object *backup = res->backup;
48 struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
49
50 dma_resv_assert_held(res->backup->base.base.resv);
51 res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
52 res->func->prio;
53
54 while (*new) {
55 struct vmw_resource *this =
56 container_of(*new, struct vmw_resource, mob_node);
57
58 parent = *new;
59 new = (res->backup_offset < this->backup_offset) ?
60 &((*new)->rb_left) : &((*new)->rb_right);
61 }
62
63 rb_link_node(&res->mob_node, parent, new);
64 rb_insert_color(&res->mob_node, &backup->res_tree);
65
66 vmw_bo_prio_add(backup, res->used_prio);
67 }
68
69 /**
70 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
71 * @res: The resource
72 */
73 void vmw_resource_mob_detach(struct vmw_resource *res)
74 {
75 struct vmw_buffer_object *backup = res->backup;
76
77 dma_resv_assert_held(backup->base.base.resv);
78 if (vmw_resource_mob_attached(res)) {
79 rb_erase(&res->mob_node, &backup->res_tree);
80 RB_CLEAR_NODE(&res->mob_node);
81 vmw_bo_prio_del(backup, res->used_prio);
82 }
83 }
84
85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86 {
87 kref_get(&res->kref);
88 return res;
89 }
90
91 struct vmw_resource *
92 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
93 {
94 return kref_get_unless_zero(&res->kref) ? res : NULL;
95 }
96
97 /**
98 * vmw_resource_release_id - release a resource id to the id manager.
99 *
100 * @res: Pointer to the resource.
101 *
102 * Release the resource id to the resource id manager and set it to -1
103 */
104 void vmw_resource_release_id(struct vmw_resource *res)
105 {
106 struct vmw_private *dev_priv = res->dev_priv;
107 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
108
109 spin_lock(&dev_priv->resource_lock);
110 if (res->id != -1)
111 idr_remove(idr, res->id);
112 res->id = -1;
113 spin_unlock(&dev_priv->resource_lock);
114 }
115
116 static void vmw_resource_release(struct kref *kref)
117 {
118 struct vmw_resource *res =
119 container_of(kref, struct vmw_resource, kref);
120 struct vmw_private *dev_priv = res->dev_priv;
121 int id;
122 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
123
124 spin_lock(&dev_priv->resource_lock);
125 list_del_init(&res->lru_head);
126 spin_unlock(&dev_priv->resource_lock);
127 if (res->backup) {
128 struct ttm_buffer_object *bo = &res->backup->base;
129
130 ttm_bo_reserve(bo, false, false, NULL);
131 if (vmw_resource_mob_attached(res) &&
132 res->func->unbind != NULL) {
133 struct ttm_validate_buffer val_buf;
134
135 val_buf.bo = bo;
136 val_buf.num_shared = 0;
137 res->func->unbind(res, false, &val_buf);
138 }
139 res->backup_dirty = false;
140 vmw_resource_mob_detach(res);
141 if (res->dirty)
142 res->func->dirty_free(res);
143 if (res->coherent)
144 vmw_bo_dirty_release(res->backup);
145 ttm_bo_unreserve(bo);
146 vmw_bo_unreference(&res->backup);
147 }
148
149 if (likely(res->hw_destroy != NULL)) {
150 mutex_lock(&dev_priv->binding_mutex);
151 vmw_binding_res_list_kill(&res->binding_head);
152 mutex_unlock(&dev_priv->binding_mutex);
153 res->hw_destroy(res);
154 }
155
156 id = res->id;
157 if (res->res_free != NULL)
158 res->res_free(res);
159 else
160 kfree(res);
161
162 spin_lock(&dev_priv->resource_lock);
163 if (id != -1)
164 idr_remove(idr, id);
165 spin_unlock(&dev_priv->resource_lock);
166 }
167
168 void vmw_resource_unreference(struct vmw_resource **p_res)
169 {
170 struct vmw_resource *res = *p_res;
171
172 *p_res = NULL;
173 kref_put(&res->kref, vmw_resource_release);
174 }
175
176
177 /**
178 * vmw_resource_alloc_id - release a resource id to the id manager.
179 *
180 * @res: Pointer to the resource.
181 *
182 * Allocate the lowest free resource from the resource manager, and set
183 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
184 */
185 int vmw_resource_alloc_id(struct vmw_resource *res)
186 {
187 struct vmw_private *dev_priv = res->dev_priv;
188 int ret;
189 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
190
191 BUG_ON(res->id != -1);
192
193 idr_preload(GFP_KERNEL);
194 spin_lock(&dev_priv->resource_lock);
195
196 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
197 if (ret >= 0)
198 res->id = ret;
199
200 spin_unlock(&dev_priv->resource_lock);
201 idr_preload_end();
202 return ret < 0 ? ret : 0;
203 }
204
205 /**
206 * vmw_resource_init - initialize a struct vmw_resource
207 *
208 * @dev_priv: Pointer to a device private struct.
209 * @res: The struct vmw_resource to initialize.
210 * @obj_type: Resource object type.
211 * @delay_id: Boolean whether to defer device id allocation until
212 * the first validation.
213 * @res_free: Resource destructor.
214 * @func: Resource function table.
215 */
216 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
217 bool delay_id,
218 void (*res_free) (struct vmw_resource *res),
219 const struct vmw_res_func *func)
220 {
221 kref_init(&res->kref);
222 res->hw_destroy = NULL;
223 res->res_free = res_free;
224 res->dev_priv = dev_priv;
225 res->func = func;
226 RB_CLEAR_NODE(&res->mob_node);
227 INIT_LIST_HEAD(&res->lru_head);
228 INIT_LIST_HEAD(&res->binding_head);
229 res->id = -1;
230 res->backup = NULL;
231 res->backup_offset = 0;
232 res->backup_dirty = false;
233 res->res_dirty = false;
234 res->coherent = false;
235 res->used_prio = 3;
236 res->dirty = NULL;
237 if (delay_id)
238 return 0;
239 else
240 return vmw_resource_alloc_id(res);
241 }
242
243
244 /**
245 * vmw_user_resource_lookup_handle - lookup a struct resource from a
246 * TTM user-space handle and perform basic type checks
247 *
248 * @dev_priv: Pointer to a device private struct
249 * @tfile: Pointer to a struct ttm_object_file identifying the caller
250 * @handle: The TTM user-space handle
251 * @converter: Pointer to an object describing the resource type
252 * @p_res: On successful return the location pointed to will contain
253 * a pointer to a refcounted struct vmw_resource.
254 *
255 * If the handle can't be found or is associated with an incorrect resource
256 * type, -EINVAL will be returned.
257 */
258 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
259 struct ttm_object_file *tfile,
260 uint32_t handle,
261 const struct vmw_user_resource_conv
262 *converter,
263 struct vmw_resource **p_res)
264 {
265 struct ttm_base_object *base;
266 struct vmw_resource *res;
267 int ret = -EINVAL;
268
269 base = ttm_base_object_lookup(tfile, handle);
270 if (unlikely(base == NULL))
271 return -EINVAL;
272
273 if (unlikely(ttm_base_object_type(base) != converter->object_type))
274 goto out_bad_resource;
275
276 res = converter->base_obj_to_res(base);
277 kref_get(&res->kref);
278
279 *p_res = res;
280 ret = 0;
281
282 out_bad_resource:
283 ttm_base_object_unref(&base);
284
285 return ret;
286 }
287
288 /**
289 * vmw_user_resource_lookup_handle - lookup a struct resource from a
290 * TTM user-space handle and perform basic type checks
291 *
292 * @dev_priv: Pointer to a device private struct
293 * @tfile: Pointer to a struct ttm_object_file identifying the caller
294 * @handle: The TTM user-space handle
295 * @converter: Pointer to an object describing the resource type
296 * @p_res: On successful return the location pointed to will contain
297 * a pointer to a refcounted struct vmw_resource.
298 *
299 * If the handle can't be found or is associated with an incorrect resource
300 * type, -EINVAL will be returned.
301 */
302 struct vmw_resource *
303 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
304 struct ttm_object_file *tfile,
305 uint32_t handle,
306 const struct vmw_user_resource_conv
307 *converter)
308 {
309 struct ttm_base_object *base;
310
311 base = ttm_base_object_noref_lookup(tfile, handle);
312 if (!base)
313 return ERR_PTR(-ESRCH);
314
315 if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
316 ttm_base_object_noref_release();
317 return ERR_PTR(-EINVAL);
318 }
319
320 return converter->base_obj_to_res(base);
321 }
322
323 /**
324 * Helper function that looks either a surface or bo.
325 *
326 * The pointer this pointed at by out_surf and out_buf needs to be null.
327 */
328 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
329 struct ttm_object_file *tfile,
330 uint32_t handle,
331 struct vmw_surface **out_surf,
332 struct vmw_buffer_object **out_buf)
333 {
334 struct vmw_resource *res;
335 int ret;
336
337 BUG_ON(*out_surf || *out_buf);
338
339 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
340 user_surface_converter,
341 &res);
342 if (!ret) {
343 *out_surf = vmw_res_to_srf(res);
344 return 0;
345 }
346
347 *out_surf = NULL;
348 ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
349 return ret;
350 }
351
352 /**
353 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
354 *
355 * @res: The resource for which to allocate a backup buffer.
356 * @interruptible: Whether any sleeps during allocation should be
357 * performed while interruptible.
358 */
359 static int vmw_resource_buf_alloc(struct vmw_resource *res,
360 bool interruptible)
361 {
362 unsigned long size =
363 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
364 struct vmw_buffer_object *backup;
365 int ret;
366
367 if (likely(res->backup)) {
368 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
369 return 0;
370 }
371
372 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
373 if (unlikely(!backup))
374 return -ENOMEM;
375
376 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
377 res->func->backup_placement,
378 interruptible,
379 &vmw_bo_bo_free);
380 if (unlikely(ret != 0))
381 goto out_no_bo;
382
383 res->backup = backup;
384
385 out_no_bo:
386 return ret;
387 }
388
389 /**
390 * vmw_resource_do_validate - Make a resource up-to-date and visible
391 * to the device.
392 *
393 * @res: The resource to make visible to the device.
394 * @val_buf: Information about a buffer possibly
395 * containing backup data if a bind operation is needed.
396 *
397 * On hardware resource shortage, this function returns -EBUSY and
398 * should be retried once resources have been freed up.
399 */
400 static int vmw_resource_do_validate(struct vmw_resource *res,
401 struct ttm_validate_buffer *val_buf,
402 bool dirtying)
403 {
404 int ret = 0;
405 const struct vmw_res_func *func = res->func;
406
407 if (unlikely(res->id == -1)) {
408 ret = func->create(res);
409 if (unlikely(ret != 0))
410 return ret;
411 }
412
413 if (func->bind &&
414 ((func->needs_backup && !vmw_resource_mob_attached(res) &&
415 val_buf->bo != NULL) ||
416 (!func->needs_backup && val_buf->bo != NULL))) {
417 ret = func->bind(res, val_buf);
418 if (unlikely(ret != 0))
419 goto out_bind_failed;
420 if (func->needs_backup)
421 vmw_resource_mob_attach(res);
422 }
423
424 /*
425 * Handle the case where the backup mob is marked coherent but
426 * the resource isn't.
427 */
428 if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
429 !res->coherent) {
430 if (res->backup->dirty && !res->dirty) {
431 ret = func->dirty_alloc(res);
432 if (ret)
433 return ret;
434 } else if (!res->backup->dirty && res->dirty) {
435 func->dirty_free(res);
436 }
437 }
438
439 /*
440 * Transfer the dirty regions to the resource and update
441 * the resource.
442 */
443 if (res->dirty) {
444 if (dirtying && !res->res_dirty) {
445 pgoff_t start = res->backup_offset >> PAGE_SHIFT;
446 pgoff_t end = __KERNEL_DIV_ROUND_UP
447 (res->backup_offset + res->backup_size,
448 PAGE_SIZE);
449
450 vmw_bo_dirty_unmap(res->backup, start, end);
451 }
452
453 vmw_bo_dirty_transfer_to_res(res);
454 return func->dirty_sync(res);
455 }
456
457 return 0;
458
459 out_bind_failed:
460 func->destroy(res);
461
462 return ret;
463 }
464
465 /**
466 * vmw_resource_unreserve - Unreserve a resource previously reserved for
467 * command submission.
468 *
469 * @res: Pointer to the struct vmw_resource to unreserve.
470 * @dirty_set: Change dirty status of the resource.
471 * @dirty: When changing dirty status indicates the new status.
472 * @switch_backup: Backup buffer has been switched.
473 * @new_backup: Pointer to new backup buffer if command submission
474 * switched. May be NULL.
475 * @new_backup_offset: New backup offset if @switch_backup is true.
476 *
477 * Currently unreserving a resource means putting it back on the device's
478 * resource lru list, so that it can be evicted if necessary.
479 */
480 void vmw_resource_unreserve(struct vmw_resource *res,
481 bool dirty_set,
482 bool dirty,
483 bool switch_backup,
484 struct vmw_buffer_object *new_backup,
485 unsigned long new_backup_offset)
486 {
487 struct vmw_private *dev_priv = res->dev_priv;
488
489 if (!list_empty(&res->lru_head))
490 return;
491
492 if (switch_backup && new_backup != res->backup) {
493 if (res->backup) {
494 vmw_resource_mob_detach(res);
495 if (res->coherent)
496 vmw_bo_dirty_release(res->backup);
497 vmw_bo_unreference(&res->backup);
498 }
499
500 if (new_backup) {
501 res->backup = vmw_bo_reference(new_backup);
502
503 /*
504 * The validation code should already have added a
505 * dirty tracker here.
506 */
507 WARN_ON(res->coherent && !new_backup->dirty);
508
509 vmw_resource_mob_attach(res);
510 } else {
511 res->backup = NULL;
512 }
513 } else if (switch_backup && res->coherent) {
514 vmw_bo_dirty_release(res->backup);
515 }
516
517 if (switch_backup)
518 res->backup_offset = new_backup_offset;
519
520 if (dirty_set)
521 res->res_dirty = dirty;
522
523 if (!res->func->may_evict || res->id == -1 || res->pin_count)
524 return;
525
526 spin_lock(&dev_priv->resource_lock);
527 list_add_tail(&res->lru_head,
528 &res->dev_priv->res_lru[res->func->res_type]);
529 spin_unlock(&dev_priv->resource_lock);
530 }
531
532 /**
533 * vmw_resource_check_buffer - Check whether a backup buffer is needed
534 * for a resource and in that case, allocate
535 * one, reserve and validate it.
536 *
537 * @ticket: The ww aqcquire context to use, or NULL if trylocking.
538 * @res: The resource for which to allocate a backup buffer.
539 * @interruptible: Whether any sleeps during allocation should be
540 * performed while interruptible.
541 * @val_buf: On successful return contains data about the
542 * reserved and validated backup buffer.
543 */
544 static int
545 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
546 struct vmw_resource *res,
547 bool interruptible,
548 struct ttm_validate_buffer *val_buf)
549 {
550 struct ttm_operation_ctx ctx = { true, false };
551 struct list_head val_list;
552 bool backup_dirty = false;
553 int ret;
554
555 if (unlikely(res->backup == NULL)) {
556 ret = vmw_resource_buf_alloc(res, interruptible);
557 if (unlikely(ret != 0))
558 return ret;
559 }
560
561 INIT_LIST_HEAD(&val_list);
562 ttm_bo_get(&res->backup->base);
563 val_buf->bo = &res->backup->base;
564 val_buf->num_shared = 0;
565 list_add_tail(&val_buf->head, &val_list);
566 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
567 if (unlikely(ret != 0))
568 goto out_no_reserve;
569
570 if (res->func->needs_backup && !vmw_resource_mob_attached(res))
571 return 0;
572
573 backup_dirty = res->backup_dirty;
574 ret = ttm_bo_validate(&res->backup->base,
575 res->func->backup_placement,
576 &ctx);
577
578 if (unlikely(ret != 0))
579 goto out_no_validate;
580
581 return 0;
582
583 out_no_validate:
584 ttm_eu_backoff_reservation(ticket, &val_list);
585 out_no_reserve:
586 ttm_bo_put(val_buf->bo);
587 val_buf->bo = NULL;
588 if (backup_dirty)
589 vmw_bo_unreference(&res->backup);
590
591 return ret;
592 }
593
594 /**
595 * vmw_resource_reserve - Reserve a resource for command submission
596 *
597 * @res: The resource to reserve.
598 *
599 * This function takes the resource off the LRU list and make sure
600 * a backup buffer is present for guest-backed resources. However,
601 * the buffer may not be bound to the resource at this point.
602 *
603 */
604 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
605 bool no_backup)
606 {
607 struct vmw_private *dev_priv = res->dev_priv;
608 int ret;
609
610 spin_lock(&dev_priv->resource_lock);
611 list_del_init(&res->lru_head);
612 spin_unlock(&dev_priv->resource_lock);
613
614 if (res->func->needs_backup && res->backup == NULL &&
615 !no_backup) {
616 ret = vmw_resource_buf_alloc(res, interruptible);
617 if (unlikely(ret != 0)) {
618 DRM_ERROR("Failed to allocate a backup buffer "
619 "of size %lu. bytes\n",
620 (unsigned long) res->backup_size);
621 return ret;
622 }
623 }
624
625 return 0;
626 }
627
628 /**
629 * vmw_resource_backoff_reservation - Unreserve and unreference a
630 * backup buffer
631 *.
632 * @ticket: The ww acquire ctx used for reservation.
633 * @val_buf: Backup buffer information.
634 */
635 static void
636 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
637 struct ttm_validate_buffer *val_buf)
638 {
639 struct list_head val_list;
640
641 if (likely(val_buf->bo == NULL))
642 return;
643
644 INIT_LIST_HEAD(&val_list);
645 list_add_tail(&val_buf->head, &val_list);
646 ttm_eu_backoff_reservation(ticket, &val_list);
647 ttm_bo_put(val_buf->bo);
648 val_buf->bo = NULL;
649 }
650
651 /**
652 * vmw_resource_do_evict - Evict a resource, and transfer its data
653 * to a backup buffer.
654 *
655 * @ticket: The ww acquire ticket to use, or NULL if trylocking.
656 * @res: The resource to evict.
657 * @interruptible: Whether to wait interruptible.
658 */
659 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
660 struct vmw_resource *res, bool interruptible)
661 {
662 struct ttm_validate_buffer val_buf;
663 const struct vmw_res_func *func = res->func;
664 int ret;
665
666 BUG_ON(!func->may_evict);
667
668 val_buf.bo = NULL;
669 val_buf.num_shared = 0;
670 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
671 if (unlikely(ret != 0))
672 return ret;
673
674 if (unlikely(func->unbind != NULL &&
675 (!func->needs_backup || vmw_resource_mob_attached(res)))) {
676 ret = func->unbind(res, res->res_dirty, &val_buf);
677 if (unlikely(ret != 0))
678 goto out_no_unbind;
679 vmw_resource_mob_detach(res);
680 }
681 ret = func->destroy(res);
682 res->backup_dirty = true;
683 res->res_dirty = false;
684 out_no_unbind:
685 vmw_resource_backoff_reservation(ticket, &val_buf);
686
687 return ret;
688 }
689
690
691 /**
692 * vmw_resource_validate - Make a resource up-to-date and visible
693 * to the device.
694 * @res: The resource to make visible to the device.
695 * @intr: Perform waits interruptible if possible.
696 * @dirtying: Pending GPU operation will dirty the resource
697 *
698 * On succesful return, any backup DMA buffer pointed to by @res->backup will
699 * be reserved and validated.
700 * On hardware resource shortage, this function will repeatedly evict
701 * resources of the same type until the validation succeeds.
702 *
703 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
704 * on failure.
705 */
706 int vmw_resource_validate(struct vmw_resource *res, bool intr,
707 bool dirtying)
708 {
709 int ret;
710 struct vmw_resource *evict_res;
711 struct vmw_private *dev_priv = res->dev_priv;
712 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
713 struct ttm_validate_buffer val_buf;
714 unsigned err_count = 0;
715
716 if (!res->func->create)
717 return 0;
718
719 val_buf.bo = NULL;
720 val_buf.num_shared = 0;
721 if (res->backup)
722 val_buf.bo = &res->backup->base;
723 do {
724 ret = vmw_resource_do_validate(res, &val_buf, dirtying);
725 if (likely(ret != -EBUSY))
726 break;
727
728 spin_lock(&dev_priv->resource_lock);
729 if (list_empty(lru_list) || !res->func->may_evict) {
730 DRM_ERROR("Out of device device resources "
731 "for %s.\n", res->func->type_name);
732 ret = -EBUSY;
733 spin_unlock(&dev_priv->resource_lock);
734 break;
735 }
736
737 evict_res = vmw_resource_reference
738 (list_first_entry(lru_list, struct vmw_resource,
739 lru_head));
740 list_del_init(&evict_res->lru_head);
741
742 spin_unlock(&dev_priv->resource_lock);
743
744 /* Trylock backup buffers with a NULL ticket. */
745 ret = vmw_resource_do_evict(NULL, evict_res, intr);
746 if (unlikely(ret != 0)) {
747 spin_lock(&dev_priv->resource_lock);
748 list_add_tail(&evict_res->lru_head, lru_list);
749 spin_unlock(&dev_priv->resource_lock);
750 if (ret == -ERESTARTSYS ||
751 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
752 vmw_resource_unreference(&evict_res);
753 goto out_no_validate;
754 }
755 }
756
757 vmw_resource_unreference(&evict_res);
758 } while (1);
759
760 if (unlikely(ret != 0))
761 goto out_no_validate;
762 else if (!res->func->needs_backup && res->backup) {
763 WARN_ON_ONCE(vmw_resource_mob_attached(res));
764 vmw_bo_unreference(&res->backup);
765 }
766
767 return 0;
768
769 out_no_validate:
770 return ret;
771 }
772
773
774 /**
775 * vmw_resource_unbind_list
776 *
777 * @vbo: Pointer to the current backing MOB.
778 *
779 * Evicts the Guest Backed hardware resource if the backup
780 * buffer is being moved out of MOB memory.
781 * Note that this function will not race with the resource
782 * validation code, since resource validation and eviction
783 * both require the backup buffer to be reserved.
784 */
785 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
786 {
787 struct ttm_validate_buffer val_buf = {
788 .bo = &vbo->base,
789 .num_shared = 0
790 };
791
792 dma_resv_assert_held(vbo->base.base.resv);
793 while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
794 struct rb_node *node = vbo->res_tree.rb_node;
795 struct vmw_resource *res =
796 container_of(node, struct vmw_resource, mob_node);
797
798 if (!WARN_ON_ONCE(!res->func->unbind))
799 (void) res->func->unbind(res, res->res_dirty, &val_buf);
800
801 res->backup_dirty = true;
802 res->res_dirty = false;
803 vmw_resource_mob_detach(res);
804 }
805
806 (void) ttm_bo_wait(&vbo->base, false, false);
807 }
808
809
810 /**
811 * vmw_query_readback_all - Read back cached query states
812 *
813 * @dx_query_mob: Buffer containing the DX query MOB
814 *
815 * Read back cached states from the device if they exist. This function
816 * assumings binding_mutex is held.
817 */
818 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
819 {
820 struct vmw_resource *dx_query_ctx;
821 struct vmw_private *dev_priv;
822 struct {
823 SVGA3dCmdHeader header;
824 SVGA3dCmdDXReadbackAllQuery body;
825 } *cmd;
826
827
828 /* No query bound, so do nothing */
829 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
830 return 0;
831
832 dx_query_ctx = dx_query_mob->dx_query_ctx;
833 dev_priv = dx_query_ctx->dev_priv;
834
835 cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
836 if (unlikely(cmd == NULL))
837 return -ENOMEM;
838
839 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
840 cmd->header.size = sizeof(cmd->body);
841 cmd->body.cid = dx_query_ctx->id;
842
843 vmw_fifo_commit(dev_priv, sizeof(*cmd));
844
845 /* Triggers a rebind the next time affected context is bound */
846 dx_query_mob->dx_query_ctx = NULL;
847
848 return 0;
849 }
850
851
852
853 /**
854 * vmw_query_move_notify - Read back cached query states
855 *
856 * @bo: The TTM buffer object about to move.
857 * @mem: The memory region @bo is moving to.
858 *
859 * Called before the query MOB is swapped out to read back cached query
860 * states from the device.
861 */
862 void vmw_query_move_notify(struct ttm_buffer_object *bo,
863 struct ttm_mem_reg *mem)
864 {
865 struct vmw_buffer_object *dx_query_mob;
866 struct ttm_bo_device *bdev = bo->bdev;
867 struct vmw_private *dev_priv;
868
869
870 dev_priv = container_of(bdev, struct vmw_private, bdev);
871
872 mutex_lock(&dev_priv->binding_mutex);
873
874 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
875 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
876 mutex_unlock(&dev_priv->binding_mutex);
877 return;
878 }
879
880 /* If BO is being moved from MOB to system memory */
881 if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
882 struct vmw_fence_obj *fence;
883
884 (void) vmw_query_readback_all(dx_query_mob);
885 mutex_unlock(&dev_priv->binding_mutex);
886
887 /* Create a fence and attach the BO to it */
888 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
889 vmw_bo_fence_single(bo, fence);
890
891 if (fence != NULL)
892 vmw_fence_obj_unreference(&fence);
893
894 (void) ttm_bo_wait(bo, false, false);
895 } else
896 mutex_unlock(&dev_priv->binding_mutex);
897
898 }
899
900 /**
901 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
902 *
903 * @res: The resource being queried.
904 */
905 bool vmw_resource_needs_backup(const struct vmw_resource *res)
906 {
907 return res->func->needs_backup;
908 }
909
910 /**
911 * vmw_resource_evict_type - Evict all resources of a specific type
912 *
913 * @dev_priv: Pointer to a device private struct
914 * @type: The resource type to evict
915 *
916 * To avoid thrashing starvation or as part of the hibernation sequence,
917 * try to evict all evictable resources of a specific type.
918 */
919 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
920 enum vmw_res_type type)
921 {
922 struct list_head *lru_list = &dev_priv->res_lru[type];
923 struct vmw_resource *evict_res;
924 unsigned err_count = 0;
925 int ret;
926 struct ww_acquire_ctx ticket;
927
928 do {
929 spin_lock(&dev_priv->resource_lock);
930
931 if (list_empty(lru_list))
932 goto out_unlock;
933
934 evict_res = vmw_resource_reference(
935 list_first_entry(lru_list, struct vmw_resource,
936 lru_head));
937 list_del_init(&evict_res->lru_head);
938 spin_unlock(&dev_priv->resource_lock);
939
940 /* Wait lock backup buffers with a ticket. */
941 ret = vmw_resource_do_evict(&ticket, evict_res, false);
942 if (unlikely(ret != 0)) {
943 spin_lock(&dev_priv->resource_lock);
944 list_add_tail(&evict_res->lru_head, lru_list);
945 spin_unlock(&dev_priv->resource_lock);
946 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
947 vmw_resource_unreference(&evict_res);
948 return;
949 }
950 }
951
952 vmw_resource_unreference(&evict_res);
953 } while (1);
954
955 out_unlock:
956 spin_unlock(&dev_priv->resource_lock);
957 }
958
959 /**
960 * vmw_resource_evict_all - Evict all evictable resources
961 *
962 * @dev_priv: Pointer to a device private struct
963 *
964 * To avoid thrashing starvation or as part of the hibernation sequence,
965 * evict all evictable resources. In particular this means that all
966 * guest-backed resources that are registered with the device are
967 * evicted and the OTable becomes clean.
968 */
969 void vmw_resource_evict_all(struct vmw_private *dev_priv)
970 {
971 enum vmw_res_type type;
972
973 mutex_lock(&dev_priv->cmdbuf_mutex);
974
975 for (type = 0; type < vmw_res_max; ++type)
976 vmw_resource_evict_type(dev_priv, type);
977
978 mutex_unlock(&dev_priv->cmdbuf_mutex);
979 }
980
981 /**
982 * vmw_resource_pin - Add a pin reference on a resource
983 *
984 * @res: The resource to add a pin reference on
985 *
986 * This function adds a pin reference, and if needed validates the resource.
987 * Having a pin reference means that the resource can never be evicted, and
988 * its id will never change as long as there is a pin reference.
989 * This function returns 0 on success and a negative error code on failure.
990 */
991 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
992 {
993 struct ttm_operation_ctx ctx = { interruptible, false };
994 struct vmw_private *dev_priv = res->dev_priv;
995 int ret;
996
997 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
998 mutex_lock(&dev_priv->cmdbuf_mutex);
999 ret = vmw_resource_reserve(res, interruptible, false);
1000 if (ret)
1001 goto out_no_reserve;
1002
1003 if (res->pin_count == 0) {
1004 struct vmw_buffer_object *vbo = NULL;
1005
1006 if (res->backup) {
1007 vbo = res->backup;
1008
1009 ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1010 if (!vbo->pin_count) {
1011 ret = ttm_bo_validate
1012 (&vbo->base,
1013 res->func->backup_placement,
1014 &ctx);
1015 if (ret) {
1016 ttm_bo_unreserve(&vbo->base);
1017 goto out_no_validate;
1018 }
1019 }
1020
1021 /* Do we really need to pin the MOB as well? */
1022 vmw_bo_pin_reserved(vbo, true);
1023 }
1024 ret = vmw_resource_validate(res, interruptible, true);
1025 if (vbo)
1026 ttm_bo_unreserve(&vbo->base);
1027 if (ret)
1028 goto out_no_validate;
1029 }
1030 res->pin_count++;
1031
1032 out_no_validate:
1033 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1034 out_no_reserve:
1035 mutex_unlock(&dev_priv->cmdbuf_mutex);
1036 ttm_write_unlock(&dev_priv->reservation_sem);
1037
1038 return ret;
1039 }
1040
1041 /**
1042 * vmw_resource_unpin - Remove a pin reference from a resource
1043 *
1044 * @res: The resource to remove a pin reference from
1045 *
1046 * Having a pin reference means that the resource can never be evicted, and
1047 * its id will never change as long as there is a pin reference.
1048 */
1049 void vmw_resource_unpin(struct vmw_resource *res)
1050 {
1051 struct vmw_private *dev_priv = res->dev_priv;
1052 int ret;
1053
1054 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
1055 mutex_lock(&dev_priv->cmdbuf_mutex);
1056
1057 ret = vmw_resource_reserve(res, false, true);
1058 WARN_ON(ret);
1059
1060 WARN_ON(res->pin_count == 0);
1061 if (--res->pin_count == 0 && res->backup) {
1062 struct vmw_buffer_object *vbo = res->backup;
1063
1064 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1065 vmw_bo_pin_reserved(vbo, false);
1066 ttm_bo_unreserve(&vbo->base);
1067 }
1068
1069 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1070
1071 mutex_unlock(&dev_priv->cmdbuf_mutex);
1072 ttm_read_unlock(&dev_priv->reservation_sem);
1073 }
1074
1075 /**
1076 * vmw_res_type - Return the resource type
1077 *
1078 * @res: Pointer to the resource
1079 */
1080 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1081 {
1082 return res->func->res_type;
1083 }
1084
1085 /**
1086 * vmw_resource_update_dirty - Update a resource's dirty tracker with a
1087 * sequential range of touched backing store memory.
1088 * @res: The resource.
1089 * @start: The first page touched.
1090 * @end: The last page touched + 1.
1091 */
1092 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1093 pgoff_t end)
1094 {
1095 if (res->dirty)
1096 res->func->dirty_range_add(res, start << PAGE_SHIFT,
1097 end << PAGE_SHIFT);
1098 }
1099
1100 /**
1101 * vmw_resources_clean - Clean resources intersecting a mob range
1102 * @vbo: The mob buffer object
1103 * @start: The mob page offset starting the range
1104 * @end: The mob page offset ending the range
1105 * @num_prefault: Returns how many pages including the first have been
1106 * cleaned and are ok to prefault
1107 */
1108 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1109 pgoff_t end, pgoff_t *num_prefault)
1110 {
1111 struct rb_node *cur = vbo->res_tree.rb_node;
1112 struct vmw_resource *found = NULL;
1113 unsigned long res_start = start << PAGE_SHIFT;
1114 unsigned long res_end = end << PAGE_SHIFT;
1115 unsigned long last_cleaned = 0;
1116
1117 /*
1118 * Find the resource with lowest backup_offset that intersects the
1119 * range.
1120 */
1121 while (cur) {
1122 struct vmw_resource *cur_res =
1123 container_of(cur, struct vmw_resource, mob_node);
1124
1125 if (cur_res->backup_offset >= res_end) {
1126 cur = cur->rb_left;
1127 } else if (cur_res->backup_offset + cur_res->backup_size <=
1128 res_start) {
1129 cur = cur->rb_right;
1130 } else {
1131 found = cur_res;
1132 cur = cur->rb_left;
1133 /* Continue to look for resources with lower offsets */
1134 }
1135 }
1136
1137 /*
1138 * In order of increasing backup_offset, clean dirty resorces
1139 * intersecting the range.
1140 */
1141 while (found) {
1142 if (found->res_dirty) {
1143 int ret;
1144
1145 if (!found->func->clean)
1146 return -EINVAL;
1147
1148 ret = found->func->clean(found);
1149 if (ret)
1150 return ret;
1151
1152 found->res_dirty = false;
1153 }
1154 last_cleaned = found->backup_offset + found->backup_size;
1155 cur = rb_next(&found->mob_node);
1156 if (!cur)
1157 break;
1158
1159 found = container_of(cur, struct vmw_resource, mob_node);
1160 if (found->backup_offset >= res_end)
1161 break;
1162 }
1163
1164 /*
1165 * Set number of pages allowed prefaulting and fence the buffer object
1166 */
1167 *num_prefault = 1;
1168 if (last_cleaned > res_start) {
1169 struct ttm_buffer_object *bo = &vbo->base;
1170
1171 *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1172 PAGE_SIZE);
1173 vmw_bo_fence_single(bo, NULL);
1174 if (bo->moving)
1175 dma_fence_put(bo->moving);
1176 bo->moving = dma_fence_get
1177 (dma_resv_get_excl(bo->base.resv));
1178 }
1179
1180 return 0;
1181 }
1182