Home | History | Annotate | Line # | Download | only in ttm
      1 /*	$NetBSD: ttm_execbuf_util.c,v 1.6 2021/12/18 23:45:44 riastradh Exp $	*/
      2 
      3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
      4 /**************************************************************************
      5  *
      6  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
      7  * All Rights Reserved.
      8  *
      9  * Permission is hereby granted, free of charge, to any person obtaining a
     10  * copy of this software and associated documentation files (the
     11  * "Software"), to deal in the Software without restriction, including
     12  * without limitation the rights to use, copy, modify, merge, publish,
     13  * distribute, sub license, and/or sell copies of the Software, and to
     14  * permit persons to whom the Software is furnished to do so, subject to
     15  * the following conditions:
     16  *
     17  * The above copyright notice and this permission notice (including the
     18  * next paragraph) shall be included in all copies or substantial portions
     19  * of the Software.
     20  *
     21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  *
     29  **************************************************************************/
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: ttm_execbuf_util.c,v 1.6 2021/12/18 23:45:44 riastradh Exp $");
     33 
     34 #include <drm/ttm/ttm_execbuf_util.h>
     35 #include <drm/ttm/ttm_bo_driver.h>
     36 #include <drm/ttm/ttm_placement.h>
     37 #include <linux/wait.h>
     38 #include <linux/sched.h>
     39 #include <linux/module.h>
     40 
     41 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
     42 					      struct ttm_validate_buffer *entry)
     43 {
     44 	list_for_each_entry_continue_reverse(entry, list, head) {
     45 		struct ttm_buffer_object *bo = entry->bo;
     46 
     47 		dma_resv_unlock(bo->base.resv);
     48 	}
     49 }
     50 
     51 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
     52 				struct list_head *list)
     53 {
     54 	struct ttm_validate_buffer *entry;
     55 
     56 	if (list_empty(list))
     57 		return;
     58 
     59 	spin_lock(&ttm_bo_glob.lru_lock);
     60 	list_for_each_entry(entry, list, head) {
     61 		struct ttm_buffer_object *bo = entry->bo;
     62 
     63 		ttm_bo_move_to_lru_tail(bo, NULL);
     64 		dma_resv_unlock(bo->base.resv);
     65 	}
     66 	spin_unlock(&ttm_bo_glob.lru_lock);
     67 
     68 	if (ticket)
     69 		ww_acquire_fini(ticket);
     70 }
     71 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
     72 
     73 /*
     74  * Reserve buffers for validation.
     75  *
     76  * If a buffer in the list is marked for CPU access, we back off and
     77  * wait for that buffer to become free for GPU access.
     78  *
     79  * If a buffer is reserved for another validation, the validator with
     80  * the highest validation sequence backs off and waits for that buffer
     81  * to become unreserved. This prevents deadlocks when validating multiple
     82  * buffers in different orders.
     83  */
     84 
     85 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
     86 			   struct list_head *list, bool intr,
     87 			   struct list_head *dups)
     88 {
     89 	struct ttm_validate_buffer *entry;
     90 	int ret;
     91 
     92 	if (list_empty(list))
     93 		return 0;
     94 
     95 	if (ticket)
     96 		ww_acquire_init(ticket, &reservation_ww_class);
     97 
     98 	list_for_each_entry(entry, list, head) {
     99 		struct ttm_buffer_object *bo = entry->bo;
    100 
    101 		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
    102 		if (ret == -EALREADY && dups) {
    103 			struct ttm_validate_buffer *safe = entry;
    104 			entry = list_prev_entry(entry, head);
    105 			list_del(&safe->head);
    106 			list_add(&safe->head, dups);
    107 			continue;
    108 		}
    109 
    110 		if (!ret) {
    111 			if (!entry->num_shared)
    112 				continue;
    113 
    114 			ret = dma_resv_reserve_shared(bo->base.resv,
    115 								entry->num_shared);
    116 			if (!ret)
    117 				continue;
    118 		}
    119 
    120 		/* uh oh, we lost out, drop every reservation and try
    121 		 * to only reserve this buffer, then start over if
    122 		 * this succeeds.
    123 		 */
    124 		ttm_eu_backoff_reservation_reverse(list, entry);
    125 
    126 		if (ret == -EDEADLK) {
    127 			if (intr) {
    128 				ret = dma_resv_lock_slow_interruptible(bo->base.resv,
    129 										 ticket);
    130 			} else {
    131 				dma_resv_lock_slow(bo->base.resv, ticket);
    132 				ret = 0;
    133 			}
    134 		}
    135 
    136 		if (!ret && entry->num_shared)
    137 			ret = dma_resv_reserve_shared(bo->base.resv,
    138 								entry->num_shared);
    139 
    140 		if (unlikely(ret != 0)) {
    141 			if (ret == -EINTR)
    142 				ret = -ERESTARTSYS;
    143 			if (ticket) {
    144 				ww_acquire_done(ticket);
    145 				ww_acquire_fini(ticket);
    146 			}
    147 			return ret;
    148 		}
    149 
    150 		/* move this item to the front of the list,
    151 		 * forces correct iteration of the loop without keeping track
    152 		 */
    153 		list_del(&entry->head);
    154 		list_add(&entry->head, list);
    155 	}
    156 
    157 	return 0;
    158 }
    159 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
    160 
    161 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
    162 				 struct list_head *list,
    163 				 struct dma_fence *fence)
    164 {
    165 	struct ttm_validate_buffer *entry;
    166 
    167 	if (list_empty(list))
    168 		return;
    169 
    170 	spin_lock(&ttm_bo_glob.lru_lock);
    171 	list_for_each_entry(entry, list, head) {
    172 		struct ttm_buffer_object *bo = entry->bo;
    173 
    174 		if (entry->num_shared)
    175 			dma_resv_add_shared_fence(bo->base.resv, fence);
    176 		else
    177 			dma_resv_add_excl_fence(bo->base.resv, fence);
    178 		ttm_bo_move_to_lru_tail(bo, NULL);
    179 		dma_resv_unlock(bo->base.resv);
    180 	}
    181 	spin_unlock(&ttm_bo_glob.lru_lock);
    182 	if (ticket)
    183 		ww_acquire_fini(ticket);
    184 }
    185 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
    186