ttm_execbuf_util.c revision 1.2.30.1 1 /* $NetBSD: ttm_execbuf_util.c,v 1.2.30.1 2019/06/10 22:08:28 christos Exp $ */
2
3 /**************************************************************************
4 *
5 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: ttm_execbuf_util.c,v 1.2.30.1 2019/06/10 22:08:28 christos Exp $");
32
33 #include <drm/ttm/ttm_execbuf_util.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/wait.h>
37 #include <linux/sched.h>
38 #include <linux/module.h>
39 #include <linux/export.h>
40
41 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
42 struct ttm_validate_buffer *entry)
43 {
44 list_for_each_entry_continue_reverse(entry, list, head) {
45 struct ttm_buffer_object *bo = entry->bo;
46
47 __ttm_bo_unreserve(bo);
48 }
49 }
50
51 static void ttm_eu_del_from_lru_locked(struct list_head *list)
52 {
53 struct ttm_validate_buffer *entry;
54
55 list_for_each_entry(entry, list, head) {
56 struct ttm_buffer_object *bo = entry->bo;
57 unsigned put_count = ttm_bo_del_from_lru(bo);
58
59 ttm_bo_list_ref_sub(bo, put_count, true);
60 }
61 }
62
63 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
64 struct list_head *list)
65 {
66 struct ttm_validate_buffer *entry;
67 struct ttm_bo_global *glob;
68
69 if (list_empty(list))
70 return;
71
72 entry = list_first_entry(list, struct ttm_validate_buffer, head);
73 glob = entry->bo->glob;
74
75 spin_lock(&glob->lru_lock);
76 list_for_each_entry(entry, list, head) {
77 struct ttm_buffer_object *bo = entry->bo;
78
79 ttm_bo_add_to_lru(bo);
80 __ttm_bo_unreserve(bo);
81 }
82 spin_unlock(&glob->lru_lock);
83
84 if (ticket)
85 ww_acquire_fini(ticket);
86 }
87 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
88
89 /*
90 * Reserve buffers for validation.
91 *
92 * If a buffer in the list is marked for CPU access, we back off and
93 * wait for that buffer to become free for GPU access.
94 *
95 * If a buffer is reserved for another validation, the validator with
96 * the highest validation sequence backs off and waits for that buffer
97 * to become unreserved. This prevents deadlocks when validating multiple
98 * buffers in different orders.
99 */
100
101 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
102 struct list_head *list, bool intr,
103 struct list_head *dups)
104 {
105 struct ttm_bo_global *glob;
106 struct ttm_validate_buffer *entry;
107 int ret;
108
109 if (list_empty(list))
110 return 0;
111
112 entry = list_first_entry(list, struct ttm_validate_buffer, head);
113 glob = entry->bo->glob;
114
115 if (ticket)
116 ww_acquire_init(ticket, &reservation_ww_class);
117
118 list_for_each_entry(entry, list, head) {
119 struct ttm_buffer_object *bo = entry->bo;
120
121 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
122 ticket);
123 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
124 __ttm_bo_unreserve(bo);
125
126 ret = -EBUSY;
127
128 } else if (ret == -EALREADY && dups) {
129 struct ttm_validate_buffer *safe = entry;
130 entry = list_prev_entry(entry, head);
131 list_del(&safe->head);
132 list_add(&safe->head, dups);
133 continue;
134 }
135
136 if (!ret) {
137 if (!entry->shared)
138 continue;
139
140 ret = reservation_object_reserve_shared(bo->resv);
141 if (!ret)
142 continue;
143 }
144
145 /* uh oh, we lost out, drop every reservation and try
146 * to only reserve this buffer, then start over if
147 * this succeeds.
148 */
149 ttm_eu_backoff_reservation_reverse(list, entry);
150
151 if (ret == -EDEADLK && intr) {
152 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
153 ticket);
154 } else if (ret == -EDEADLK) {
155 ww_mutex_lock_slow(&bo->resv->lock, ticket);
156 ret = 0;
157 }
158
159 if (!ret && entry->shared)
160 ret = reservation_object_reserve_shared(bo->resv);
161
162 if (unlikely(ret != 0)) {
163 if (ret == -EINTR)
164 ret = -ERESTARTSYS;
165 if (ticket) {
166 ww_acquire_done(ticket);
167 ww_acquire_fini(ticket);
168 }
169 return ret;
170 }
171
172 /* move this item to the front of the list,
173 * forces correct iteration of the loop without keeping track
174 */
175 list_del(&entry->head);
176 list_add(&entry->head, list);
177 }
178
179 if (ticket)
180 ww_acquire_done(ticket);
181 spin_lock(&glob->lru_lock);
182 ttm_eu_del_from_lru_locked(list);
183 spin_unlock(&glob->lru_lock);
184 return 0;
185 }
186 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
187
188 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
189 struct list_head *list, struct fence *fence)
190 {
191 struct ttm_validate_buffer *entry;
192 struct ttm_buffer_object *bo;
193 struct ttm_bo_global *glob;
194 struct ttm_bo_device *bdev;
195 struct ttm_bo_driver *driver __unused;
196
197 if (list_empty(list))
198 return;
199
200 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
201 bdev = bo->bdev;
202 driver = bdev->driver;
203 glob = bo->glob;
204
205 spin_lock(&glob->lru_lock);
206
207 list_for_each_entry(entry, list, head) {
208 bo = entry->bo;
209 if (entry->shared)
210 reservation_object_add_shared_fence(bo->resv, fence);
211 else
212 reservation_object_add_excl_fence(bo->resv, fence);
213 ttm_bo_add_to_lru(bo);
214 __ttm_bo_unreserve(bo);
215 }
216 spin_unlock(&glob->lru_lock);
217 if (ticket)
218 ww_acquire_fini(ticket);
219 }
220 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
221