drm_wait_netbsd.h revision 1.11 1 /* $NetBSD: drm_wait_netbsd.h,v 1.11 2015/02/28 21:30:22 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _DRM_DRM_WAIT_NETBSD_H_
33 #define _DRM_DRM_WAIT_NETBSD_H_
34
35 #include <sys/param.h>
36 #include <sys/condvar.h>
37 #if DIAGNOSTIC
38 #include <sys/cpu.h> /* cpu_intr_p */
39 #endif
40 #include <sys/kernel.h>
41 #include <sys/mutex.h>
42 #include <sys/systm.h>
43
44 #include <linux/mutex.h>
45 #include <linux/spinlock.h>
46
47 typedef kcondvar_t drm_waitqueue_t;
48
49 #define DRM_HZ hz /* XXX Hurk... */
50
51 #define DRM_UDELAY DELAY
52
53 static inline void
54 DRM_INIT_WAITQUEUE(drm_waitqueue_t *q, const char *name)
55 {
56 cv_init(q, name);
57 }
58
59 static inline void
60 DRM_DESTROY_WAITQUEUE(drm_waitqueue_t *q)
61 {
62 cv_destroy(q);
63 }
64
65 static inline bool
66 DRM_WAITERS_P(drm_waitqueue_t *q, struct mutex *interlock)
67 {
68 KASSERT(mutex_is_locked(interlock));
69 return cv_has_waiters(q);
70 }
71
72 static inline void
73 DRM_WAKEUP_ONE(drm_waitqueue_t *q, struct mutex *interlock)
74 {
75 KASSERT(mutex_is_locked(interlock));
76 cv_signal(q);
77 }
78
79 static inline void
80 DRM_WAKEUP_ALL(drm_waitqueue_t *q, struct mutex *interlock)
81 {
82 KASSERT(mutex_is_locked(interlock));
83 cv_broadcast(q);
84 }
85
86 static inline bool
87 DRM_SPIN_WAITERS_P(drm_waitqueue_t *q, spinlock_t *interlock)
88 {
89 KASSERT(spin_is_locked(interlock));
90 return cv_has_waiters(q);
91 }
92
93 static inline void
94 DRM_SPIN_WAKEUP_ONE(drm_waitqueue_t *q, spinlock_t *interlock)
95 {
96 KASSERT(spin_is_locked(interlock));
97 cv_signal(q);
98 }
99
100 static inline void
101 DRM_SPIN_WAKEUP_ALL(drm_waitqueue_t *q, spinlock_t *interlock)
102 {
103 KASSERT(spin_is_locked(interlock));
104 cv_broadcast(q);
105 }
106
107 /*
108 * DRM_SPIN_WAIT_ON is a replacement for the legacy DRM_WAIT_ON
109 * portability macro. It requires a spin interlock, which may require
110 * changes to the surrounding code so that the waits actually are
111 * interlocked by a spin lock. It also polls the condition at every
112 * tick, which masks missing wakeups. Since DRM_WAIT_ON is going away,
113 * in favour of Linux's native wait_event* API, waits in new code
114 * should be written to use the DRM_*WAIT*_UNTIL macros below.
115 *
116 * Like the legacy DRM_WAIT_ON, DRM_SPIN_WAIT_ON returns
117 *
118 * . -EBUSY if timed out (yes, -EBUSY, not -ETIMEDOUT or -EWOULDBLOCK),
119 * . -EINTR/-ERESTART if interrupted by a signal, or
120 * . 0 if the condition was true before or just after the timeout.
121 *
122 * Note that cv_timedwait* return -EWOULDBLOCK, not -EBUSY, on timeout.
123 */
124
125 #define DRM_SPIN_WAIT_ON(RET, Q, INTERLOCK, TICKS, CONDITION) do \
126 { \
127 extern int hardclock_ticks; \
128 const int _dswo_start = hardclock_ticks; \
129 const int _dswo_end = _dswo_start + (TICKS); \
130 \
131 KASSERT(spin_is_locked((INTERLOCK))); \
132 KASSERT(!cpu_intr_p()); \
133 KASSERT(!cpu_softintr_p()); \
134 KASSERT(!cold); \
135 \
136 for (;;) { \
137 if (CONDITION) { \
138 (RET) = 0; \
139 break; \
140 } \
141 const int _dswo_now = hardclock_ticks; \
142 if (_dswo_end < _dswo_now) { \
143 (RET) = -EBUSY; /* Match Linux... */ \
144 break; \
145 } \
146 /* XXX errno NetBSD->Linux */ \
147 (RET) = -cv_timedwait_sig((Q), &(INTERLOCK)->sl_lock, 1); \
148 if (RET) { \
149 if ((RET) == -EWOULDBLOCK) \
150 (RET) = (CONDITION) ? 0 : -EBUSY; \
151 break; \
152 } \
153 } \
154 } while (0)
155
156 /*
157 * The DRM_*WAIT*_UNTIL macros are replacements for the Linux
158 * wait_event* macros. Like DRM_SPIN_WAIT_ON, they add an interlock,
159 * and so may require some changes to the surrounding code. They have
160 * a different return value convention from DRM_SPIN_WAIT_ON and a
161 * different return value convention from cv_*wait*.
162 *
163 * The untimed DRM_*WAIT*_UNTIL macros return
164 *
165 * . -EINTR/-ERESTART if interrupted by a signal, or
166 * . zero if the condition evaluated
167 *
168 * The timed DRM_*TIMED_WAIT*_UNTIL macros return
169 *
170 * . -EINTR/-ERESTART if interrupted by a signal,
171 * . 0 if the condition was false after the timeout,
172 * . 1 if the condition was true just after the timeout, or
173 * . the number of ticks remaining if the condition was true before the
174 * timeout.
175 *
176 * Contrast DRM_SPIN_WAIT_ON which returns -EINTR/-ERESTART on signal,
177 * -EBUSY on timeout, and zero on success; and cv_*wait*, which return
178 * -EINTR/-ERESTART on signal, -EWOULDBLOCK on timeout, and zero on
179 * success.
180 *
181 * XXX In retrospect, giving the timed and untimed macros a different
182 * return convention from one another to match Linux may have been a
183 * bad idea. All of this inconsistent timeout return convention logic
184 * has been a consistent source of bugs.
185 */
186
187 #define _DRM_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, CONDITION) do \
188 { \
189 KASSERT(mutex_is_locked((INTERLOCK))); \
190 ASSERT_SLEEPABLE(); \
191 KASSERT(!cold); \
192 for (;;) { \
193 if (CONDITION) { \
194 (RET) = 0; \
195 break; \
196 } \
197 /* XXX errno NetBSD->Linux */ \
198 (RET) = -WAIT((Q), &(INTERLOCK)->mtx_lock); \
199 if (RET) \
200 break; \
201 } \
202 } while (0)
203
204 #define cv_wait_nointr(Q, I) (cv_wait((Q), (I)), 0)
205
206 #define DRM_WAIT_NOINTR_UNTIL(RET, Q, I, C) \
207 _DRM_WAIT_UNTIL(RET, cv_wait_nointr, Q, I, C)
208
209 #define DRM_WAIT_UNTIL(RET, Q, I, C) \
210 _DRM_WAIT_UNTIL(RET, cv_wait_sig, Q, I, C)
211
212 #define _DRM_TIMED_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, TICKS, CONDITION) do \
213 { \
214 extern int hardclock_ticks; \
215 const int _dtwu_start = hardclock_ticks; \
216 int _dtwu_ticks = (TICKS); \
217 KASSERT(mutex_is_locked((INTERLOCK))); \
218 ASSERT_SLEEPABLE(); \
219 KASSERT(!cold); \
220 for (;;) { \
221 if (CONDITION) { \
222 (RET) = _dtwu_ticks; \
223 break; \
224 } \
225 /* XXX errno NetBSD->Linux */ \
226 (RET) = -WAIT((Q), &(INTERLOCK)->mtx_lock, \
227 _dtwu_ticks); \
228 if (RET) { \
229 if ((RET) == -EWOULDBLOCK) \
230 (RET) = (CONDITION) ? 1 : 0; \
231 break; \
232 } \
233 const int _dtwu_now = hardclock_ticks; \
234 KASSERT(_dtwu_start <= _dtwu_now); \
235 if ((_dtwu_now - _dtwu_start) < _dtwu_ticks) { \
236 _dtwu_ticks -= (_dtwu_now - _dtwu_start); \
237 } else { \
238 (RET) = (CONDITION) ? 1 : 0; \
239 break; \
240 } \
241 } \
242 } while (0)
243
244 #define DRM_TIMED_WAIT_NOINTR_UNTIL(RET, Q, I, T, C) \
245 _DRM_TIMED_WAIT_UNTIL(RET, cv_timedwait, Q, I, T, C)
246
247 #define DRM_TIMED_WAIT_UNTIL(RET, Q, I, T, C) \
248 _DRM_TIMED_WAIT_UNTIL(RET, cv_timedwait_sig, Q, I, T, C)
249
250 /*
251 * XXX Can't assert sleepable here because we hold a spin lock. At
252 * least we can assert that we're not in (soft) interrupt context, and
253 * hope that nobody tries to use these with a sometimes quickly
254 * satisfied condition while holding a different spin lock.
255 */
256
257 #define _DRM_SPIN_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, CONDITION) do \
258 { \
259 KASSERT(spin_is_locked((INTERLOCK))); \
260 KASSERT(!cpu_intr_p()); \
261 KASSERT(!cpu_softintr_p()); \
262 KASSERT(!cold); \
263 (RET) = 0; \
264 while (!(CONDITION)) { \
265 /* XXX errno NetBSD->Linux */ \
266 (RET) = -WAIT((Q), &(INTERLOCK)->sl_lock); \
267 if (RET) \
268 break; \
269 } \
270 } while (0)
271
272 #define DRM_SPIN_WAIT_NOINTR_UNTIL(RET, Q, I, C) \
273 _DRM_SPIN_WAIT_UNTIL(RET, cv_wait_nointr, Q, I, C)
274
275 #define DRM_SPIN_WAIT_UNTIL(RET, Q, I, C) \
276 _DRM_SPIN_WAIT_UNTIL(RET, cv_wait_sig, Q, I, C)
277
278 #define _DRM_SPIN_TIMED_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, TICKS, CONDITION) \
279 do \
280 { \
281 extern int hardclock_ticks; \
282 const int _dstwu_start = hardclock_ticks; \
283 int _dstwu_ticks = (TICKS); \
284 KASSERT(spin_is_locked((INTERLOCK))); \
285 KASSERT(!cpu_intr_p()); \
286 KASSERT(!cpu_softintr_p()); \
287 KASSERT(!cold); \
288 for (;;) { \
289 if (CONDITION) { \
290 (RET) = _dstwu_ticks; \
291 break; \
292 } \
293 /* XXX errno NetBSD->Linux */ \
294 (RET) = -WAIT((Q), &(INTERLOCK)->sl_lock, \
295 _dstwu_ticks); \
296 if (RET) { \
297 if ((RET) == -EWOULDBLOCK) \
298 (RET) = (CONDITION) ? 1 : 0; \
299 break; \
300 } \
301 const int _dstwu_now = hardclock_ticks; \
302 KASSERT(_dstwu_start <= _dstwu_now); \
303 if ((_dstwu_now - _dstwu_start) < _dstwu_ticks) { \
304 _dstwu_ticks -= (_dstwu_now - _dstwu_start); \
305 } else { \
306 (RET) = (CONDITION) ? 1 : 0; \
307 break; \
308 } \
309 } \
310 } while (0)
311
312 #define DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(RET, Q, I, T, C) \
313 _DRM_SPIN_TIMED_WAIT_UNTIL(RET, cv_timedwait, Q, I, T, C)
314
315 #define DRM_SPIN_TIMED_WAIT_UNTIL(RET, Q, I, T, C) \
316 _DRM_SPIN_TIMED_WAIT_UNTIL(RET, cv_timedwait_sig, Q, I, T, C)
317
318 #endif /* _DRM_DRM_WAIT_NETBSD_H_ */
319