drm_wait_netbsd.h revision 1.9 1 /* $NetBSD: drm_wait_netbsd.h,v 1.9 2015/02/28 18:25:39 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _DRM_DRM_WAIT_NETBSD_H_
33 #define _DRM_DRM_WAIT_NETBSD_H_
34
35 #include <sys/param.h>
36 #include <sys/condvar.h>
37 #if DIAGNOSTIC
38 #include <sys/cpu.h> /* cpu_intr_p */
39 #endif
40 #include <sys/kernel.h>
41 #include <sys/mutex.h>
42 #include <sys/systm.h>
43
44 #include <linux/mutex.h>
45 #include <linux/spinlock.h>
46
47 typedef kcondvar_t drm_waitqueue_t;
48
49 #define DRM_HZ hz /* XXX Hurk... */
50
51 #define DRM_UDELAY DELAY
52
53 static inline void
54 DRM_INIT_WAITQUEUE(drm_waitqueue_t *q, const char *name)
55 {
56 cv_init(q, name);
57 }
58
59 static inline void
60 DRM_DESTROY_WAITQUEUE(drm_waitqueue_t *q)
61 {
62 cv_destroy(q);
63 }
64
65 static inline bool
66 DRM_WAITERS_P(drm_waitqueue_t *q, struct mutex *interlock)
67 {
68 KASSERT(mutex_is_locked(interlock));
69 return cv_has_waiters(q);
70 }
71
72 static inline void
73 DRM_WAKEUP_ONE(drm_waitqueue_t *q, struct mutex *interlock)
74 {
75 KASSERT(mutex_is_locked(interlock));
76 cv_signal(q);
77 }
78
79 static inline void
80 DRM_WAKEUP_ALL(drm_waitqueue_t *q, struct mutex *interlock)
81 {
82 KASSERT(mutex_is_locked(interlock));
83 cv_broadcast(q);
84 }
85
86 static inline bool
87 DRM_SPIN_WAITERS_P(drm_waitqueue_t *q, spinlock_t *interlock)
88 {
89 KASSERT(spin_is_locked(interlock));
90 return cv_has_waiters(q);
91 }
92
93 static inline void
94 DRM_SPIN_WAKEUP_ONE(drm_waitqueue_t *q, spinlock_t *interlock)
95 {
96 KASSERT(spin_is_locked(interlock));
97 cv_signal(q);
98 }
99
100 static inline void
101 DRM_SPIN_WAKEUP_ALL(drm_waitqueue_t *q, spinlock_t *interlock)
102 {
103 KASSERT(spin_is_locked(interlock));
104 cv_broadcast(q);
105 }
106
107 /*
108 * DRM_SPIN_WAIT_ON is a replacement for the legacy DRM_WAIT_ON
109 * portability macro. It requires a spin interlock, which may require
110 * changes to the surrounding code so that the waits actually are
111 * interlocked by a spin lock. It also polls the condition at every
112 * tick, which masks missing wakeups. Since DRM_WAIT_ON is going away,
113 * in favour of Linux's native wait_event* API, waits in new code
114 * should be written to use the DRM_*WAIT*_UNTIL macros below.
115 *
116 * Like the legacy DRM_WAIT_ON, DRM_SPIN_WAIT_ON returns
117 *
118 * . -EBUSY if timed out (yes, -EBUSY, not -ETIMEDOUT or -EWOULDBLOCK),
119 * . -EINTR/-ERESTART if interrupted by a signal, or
120 * . 0 if the condition was true before or just after the timeout.
121 *
122 * Note that cv_timedwait* return -EWOULDBLOCK, not -EBUSY, on timeout.
123 */
124
125 #define DRM_SPIN_WAIT_ON(RET, Q, INTERLOCK, TICKS, CONDITION) do \
126 { \
127 extern int hardclock_ticks; \
128 const int _dswo_start = hardclock_ticks; \
129 const int _dswo_end = _dswo_start + (TICKS); \
130 \
131 KASSERT(spin_is_locked((INTERLOCK))); \
132 KASSERT(!cpu_intr_p()); \
133 KASSERT(!cpu_softintr_p()); \
134 KASSERT(!cold); \
135 \
136 for (;;) { \
137 if (CONDITION) { \
138 (RET) = 0; \
139 break; \
140 } \
141 const int _dswo_now = hardclock_ticks; \
142 if (_dswo_end < _dswo_now) { \
143 (RET) = -EBUSY; /* Match Linux... */ \
144 break; \
145 } \
146 /* XXX errno NetBSD->Linux */ \
147 (RET) = -cv_timedwait_sig((Q), &(INTERLOCK)->sl_lock, \
148 (_dswo_end - _dswo_now)); \
149 if (RET) { \
150 if ((RET) == -EWOULDBLOCK) \
151 (RET) = (CONDITION) ? 0 : -EBUSY; \
152 \
153 break; \
154 } \
155 } \
156 } while (0)
157
158 /*
159 * The DRM_*WAIT*_UNTIL macros are replacements for the Linux
160 * wait_event* macros. Like DRM_SPIN_WAIT_ON, they add an interlock,
161 * and so may require some changes to the surrounding code. They have
162 * a different return value convention from DRM_SPIN_WAIT_ON and a
163 * different return value convention from cv_*wait*.
164 *
165 * The untimed DRM_*WAIT*_UNTIL macros return
166 *
167 * . -EINTR/-ERESTART if interrupted by a signal, or
168 * . zero if the condition evaluated
169 *
170 * The timed DRM_*TIMED_WAIT*_UNTIL macros return
171 *
172 * . -EINTR/-ERESTART if interrupted by a signal,
173 * . 0 if the condition was false after the timeout,
174 * . 1 if the condition was true just after the timeout, or
175 * . the number of ticks remaining if the condition was true before the
176 * timeout.
177 *
178 * Contrast DRM_SPIN_WAIT_ON which returns -EINTR/-ERESTART on signal,
179 * -EBUSY on timeout, and zero on success; and cv_*wait*, which return
180 * -EINTR/-ERESTART on signal, -EWOULDBLOCK on timeout, and zero on
181 * success.
182 *
183 * XXX In retrospect, giving the timed and untimed macros a different
184 * return convention from one another to match Linux may have been a
185 * bad idea. All of this inconsistent timeout return convention logic
186 * has been a consistent source of bugs.
187 */
188
189 #define _DRM_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, CONDITION) do \
190 { \
191 KASSERT(mutex_is_locked((INTERLOCK))); \
192 ASSERT_SLEEPABLE(); \
193 KASSERT(!cold); \
194 for (;;) { \
195 if (CONDITION) { \
196 (RET) = 0; \
197 break; \
198 } \
199 /* XXX errno NetBSD->Linux */ \
200 (RET) = -WAIT((Q), &(INTERLOCK)->mtx_lock); \
201 if (RET) \
202 break; \
203 } \
204 } while (0)
205
206 #define cv_wait_nointr(Q, I) (cv_wait((Q), (I)), 0)
207
208 #define DRM_WAIT_NOINTR_UNTIL(RET, Q, I, C) \
209 _DRM_WAIT_UNTIL(RET, cv_wait_nointr, Q, I, C)
210
211 #define DRM_WAIT_UNTIL(RET, Q, I, C) \
212 _DRM_WAIT_UNTIL(RET, cv_wait_sig, Q, I, C)
213
214 #define _DRM_TIMED_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, TICKS, CONDITION) do \
215 { \
216 extern int hardclock_ticks; \
217 const int _dtwu_start = hardclock_ticks; \
218 int _dtwu_ticks = (TICKS); \
219 KASSERT(mutex_is_locked((INTERLOCK))); \
220 ASSERT_SLEEPABLE(); \
221 KASSERT(!cold); \
222 for (;;) { \
223 if (CONDITION) { \
224 (RET) = _dtwu_ticks; \
225 break; \
226 } \
227 /* XXX errno NetBSD->Linux */ \
228 (RET) = -WAIT((Q), &(INTERLOCK)->mtx_lock, \
229 _dtwu_ticks); \
230 if (RET) { \
231 if ((RET) == -EWOULDBLOCK) \
232 (RET) = (CONDITION) ? 1 : 0; \
233 break; \
234 } \
235 const int _dtwu_now = hardclock_ticks; \
236 KASSERT(_dtwu_start <= _dtwu_now); \
237 if ((_dtwu_now - _dtwu_start) < _dtwu_ticks) { \
238 _dtwu_ticks -= (_dtwu_now - _dtwu_start); \
239 } else { \
240 (RET) = (CONDITION) ? 1 : 0; \
241 break; \
242 } \
243 } \
244 } while (0)
245
246 #define DRM_TIMED_WAIT_NOINTR_UNTIL(RET, Q, I, T, C) \
247 _DRM_TIMED_WAIT_UNTIL(RET, cv_timedwait, Q, I, T, C)
248
249 #define DRM_TIMED_WAIT_UNTIL(RET, Q, I, T, C) \
250 _DRM_TIMED_WAIT_UNTIL(RET, cv_timedwait_sig, Q, I, T, C)
251
252 /*
253 * XXX Can't assert sleepable here because we hold a spin lock. At
254 * least we can assert that we're not in (soft) interrupt context, and
255 * hope that nobody tries to use these with a sometimes quickly
256 * satisfied condition while holding a different spin lock.
257 */
258
259 #define _DRM_SPIN_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, CONDITION) do \
260 { \
261 KASSERT(spin_is_locked((INTERLOCK))); \
262 KASSERT(!cpu_intr_p()); \
263 KASSERT(!cpu_softintr_p()); \
264 KASSERT(!cold); \
265 (RET) = 0; \
266 while (!(CONDITION)) { \
267 /* XXX errno NetBSD->Linux */ \
268 (RET) = -WAIT((Q), &(INTERLOCK)->sl_lock); \
269 if (RET) \
270 break; \
271 } \
272 } while (0)
273
274 #define DRM_SPIN_WAIT_NOINTR_UNTIL(RET, Q, I, C) \
275 _DRM_SPIN_WAIT_UNTIL(RET, cv_wait_nointr, Q, I, C)
276
277 #define DRM_SPIN_WAIT_UNTIL(RET, Q, I, C) \
278 _DRM_SPIN_WAIT_UNTIL(RET, cv_wait_sig, Q, I, C)
279
280 #define _DRM_SPIN_TIMED_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, TICKS, CONDITION) \
281 do \
282 { \
283 extern int hardclock_ticks; \
284 const int _dstwu_start = hardclock_ticks; \
285 int _dstwu_ticks = (TICKS); \
286 KASSERT(spin_is_locked((INTERLOCK))); \
287 KASSERT(!cpu_intr_p()); \
288 KASSERT(!cpu_softintr_p()); \
289 KASSERT(!cold); \
290 for (;;) { \
291 if (CONDITION) { \
292 (RET) = _dstwu_ticks; \
293 break; \
294 } \
295 /* XXX errno NetBSD->Linux */ \
296 (RET) = -WAIT((Q), &(INTERLOCK)->sl_lock, \
297 _dstwu_ticks); \
298 if (RET) { \
299 if ((RET) == -EWOULDBLOCK) \
300 (RET) = (CONDITION) ? 1 : 0; \
301 break; \
302 } \
303 const int _dstwu_now = hardclock_ticks; \
304 KASSERT(_dstwu_start <= _dstwu_now); \
305 if ((_dstwu_now - _dstwu_start) < _dstwu_ticks) { \
306 _dstwu_ticks -= (_dstwu_now - _dstwu_start); \
307 } else { \
308 (RET) = (CONDITION) ? 1 : 0; \
309 break; \
310 } \
311 } \
312 } while (0)
313
314 #define DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(RET, Q, I, T, C) \
315 _DRM_SPIN_TIMED_WAIT_UNTIL(RET, cv_timedwait, Q, I, T, C)
316
317 #define DRM_SPIN_TIMED_WAIT_UNTIL(RET, Q, I, T, C) \
318 _DRM_SPIN_TIMED_WAIT_UNTIL(RET, cv_timedwait_sig, Q, I, T, C)
319
320 #endif /* _DRM_DRM_WAIT_NETBSD_H_ */
321