drm_wait_netbsd.h revision 1.5.2.1 1 /* $NetBSD: drm_wait_netbsd.h,v 1.5.2.1 2015/04/06 15:18:17 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _DRM_DRM_WAIT_NETBSD_H_
33 #define _DRM_DRM_WAIT_NETBSD_H_
34
35 #include <sys/param.h>
36 #include <sys/condvar.h>
37 #if DIAGNOSTIC
38 #include <sys/cpu.h> /* cpu_intr_p */
39 #endif
40 #include <sys/kernel.h>
41 #include <sys/mutex.h>
42 #include <sys/systm.h>
43
44 #include <linux/mutex.h>
45 #include <linux/spinlock.h>
46
47 typedef kcondvar_t drm_waitqueue_t;
48
49 #define DRM_HZ hz /* XXX Hurk... */
50
51 #define DRM_UDELAY DELAY
52
53 static inline void
54 DRM_INIT_WAITQUEUE(drm_waitqueue_t *q, const char *name)
55 {
56 cv_init(q, name);
57 }
58
59 static inline void
60 DRM_DESTROY_WAITQUEUE(drm_waitqueue_t *q)
61 {
62 cv_destroy(q);
63 }
64
65 static inline bool
66 DRM_WAITERS_P(drm_waitqueue_t *q, struct mutex *interlock)
67 {
68 KASSERT(mutex_is_locked(interlock));
69 return cv_has_waiters(q);
70 }
71
72 static inline void
73 DRM_WAKEUP_ONE(drm_waitqueue_t *q, struct mutex *interlock)
74 {
75 KASSERT(mutex_is_locked(interlock));
76 cv_signal(q);
77 }
78
79 static inline void
80 DRM_WAKEUP_ALL(drm_waitqueue_t *q, struct mutex *interlock)
81 {
82 KASSERT(mutex_is_locked(interlock));
83 cv_broadcast(q);
84 }
85
86 static inline bool
87 DRM_SPIN_WAITERS_P(drm_waitqueue_t *q, spinlock_t *interlock)
88 {
89 KASSERT(spin_is_locked(interlock));
90 return cv_has_waiters(q);
91 }
92
93 static inline void
94 DRM_SPIN_WAKEUP_ONE(drm_waitqueue_t *q, spinlock_t *interlock)
95 {
96 KASSERT(spin_is_locked(interlock));
97 cv_signal(q);
98 }
99
100 static inline void
101 DRM_SPIN_WAKEUP_ALL(drm_waitqueue_t *q, spinlock_t *interlock)
102 {
103 KASSERT(spin_is_locked(interlock));
104 cv_broadcast(q);
105 }
106
107 /*
108 * DRM_SPIN_WAIT_ON is a replacement for the legacy DRM_WAIT_ON
109 * portability macro. It requires a spin interlock, which may require
110 * changes to the surrounding code so that the waits actually are
111 * interlocked by a spin lock. It also polls the condition at every
112 * tick, which masks missing wakeups. Since DRM_WAIT_ON is going away,
113 * in favour of Linux's native wait_event* API, waits in new code
114 * should be written to use the DRM_*WAIT*_UNTIL macros below.
115 *
116 * Like the legacy DRM_WAIT_ON, DRM_SPIN_WAIT_ON returns
117 *
118 * . -EBUSY if timed out (yes, -EBUSY, not -ETIMEDOUT or -EWOULDBLOCK),
119 * . -EINTR/-ERESTART if interrupted by a signal, or
120 * . 0 if the condition was true before or just after the timeout.
121 *
122 * Note that cv_timedwait* return -EWOULDBLOCK, not -EBUSY, on timeout.
123 */
124
125 #define DRM_SPIN_WAIT_ON(RET, Q, INTERLOCK, TICKS, CONDITION) do \
126 { \
127 extern int hardclock_ticks; \
128 const int _dswo_start = hardclock_ticks; \
129 const int _dswo_end = _dswo_start + (TICKS); \
130 \
131 KASSERT(spin_is_locked((INTERLOCK))); \
132 KASSERT(!cpu_intr_p()); \
133 KASSERT(!cpu_softintr_p()); \
134 KASSERT(!cold); \
135 \
136 for (;;) { \
137 if (CONDITION) { \
138 (RET) = 0; \
139 break; \
140 } \
141 const int _dswo_now = hardclock_ticks; \
142 if (_dswo_end < _dswo_now) { \
143 (RET) = -EBUSY; /* Match Linux... */ \
144 break; \
145 } \
146 /* XXX errno NetBSD->Linux */ \
147 (RET) = -cv_timedwait_sig((Q), &(INTERLOCK)->sl_lock, 1); \
148 if (RET) { \
149 if ((RET) == -EWOULDBLOCK) \
150 /* Waited only one tick. */ \
151 continue; \
152 break; \
153 } \
154 } \
155 } while (0)
156
157 /*
158 * The DRM_*WAIT*_UNTIL macros are replacements for the Linux
159 * wait_event* macros. Like DRM_SPIN_WAIT_ON, they add an interlock,
160 * and so may require some changes to the surrounding code. They have
161 * a different return value convention from DRM_SPIN_WAIT_ON and a
162 * different return value convention from cv_*wait*.
163 *
164 * The untimed DRM_*WAIT*_UNTIL macros return
165 *
166 * . -EINTR/-ERESTART if interrupted by a signal, or
167 * . zero if the condition evaluated
168 *
169 * The timed DRM_*TIMED_WAIT*_UNTIL macros return
170 *
171 * . -EINTR/-ERESTART if interrupted by a signal,
172 * . 0 if the condition was false after the timeout,
173 * . 1 if the condition was true just after the timeout, or
174 * . the number of ticks remaining if the condition was true before the
175 * timeout.
176 *
177 * Contrast DRM_SPIN_WAIT_ON which returns -EINTR/-ERESTART on signal,
178 * -EBUSY on timeout, and zero on success; and cv_*wait*, which return
179 * -EINTR/-ERESTART on signal, -EWOULDBLOCK on timeout, and zero on
180 * success.
181 *
182 * XXX In retrospect, giving the timed and untimed macros a different
183 * return convention from one another to match Linux may have been a
184 * bad idea. All of this inconsistent timeout return convention logic
185 * has been a consistent source of bugs.
186 */
187
188 #define _DRM_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, CONDITION) do \
189 { \
190 KASSERT(mutex_is_locked((INTERLOCK))); \
191 ASSERT_SLEEPABLE(); \
192 KASSERT(!cold); \
193 for (;;) { \
194 if (CONDITION) { \
195 (RET) = 0; \
196 break; \
197 } \
198 /* XXX errno NetBSD->Linux */ \
199 (RET) = -WAIT((Q), &(INTERLOCK)->mtx_lock); \
200 if (RET) \
201 break; \
202 } \
203 } while (0)
204
205 #define cv_wait_nointr(Q, I) (cv_wait((Q), (I)), 0)
206
207 #define DRM_WAIT_NOINTR_UNTIL(RET, Q, I, C) \
208 _DRM_WAIT_UNTIL(RET, cv_wait_nointr, Q, I, C)
209
210 #define DRM_WAIT_UNTIL(RET, Q, I, C) \
211 _DRM_WAIT_UNTIL(RET, cv_wait_sig, Q, I, C)
212
213 #define _DRM_TIMED_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, TICKS, CONDITION) do \
214 { \
215 extern int hardclock_ticks; \
216 const int _dtwu_start = hardclock_ticks; \
217 int _dtwu_ticks = (TICKS); \
218 KASSERT(mutex_is_locked((INTERLOCK))); \
219 ASSERT_SLEEPABLE(); \
220 KASSERT(!cold); \
221 for (;;) { \
222 if (CONDITION) { \
223 (RET) = _dtwu_ticks; \
224 break; \
225 } \
226 /* XXX errno NetBSD->Linux */ \
227 (RET) = -WAIT((Q), &(INTERLOCK)->mtx_lock, \
228 _dtwu_ticks); \
229 if (RET) { \
230 if ((RET) == -EWOULDBLOCK) \
231 (RET) = (CONDITION) ? 1 : 0; \
232 break; \
233 } \
234 const int _dtwu_now = hardclock_ticks; \
235 KASSERT(_dtwu_start <= _dtwu_now); \
236 if ((_dtwu_now - _dtwu_start) < _dtwu_ticks) { \
237 _dtwu_ticks -= (_dtwu_now - _dtwu_start); \
238 } else { \
239 (RET) = (CONDITION) ? 1 : 0; \
240 break; \
241 } \
242 } \
243 } while (0)
244
245 #define DRM_TIMED_WAIT_NOINTR_UNTIL(RET, Q, I, T, C) \
246 _DRM_TIMED_WAIT_UNTIL(RET, cv_timedwait, Q, I, T, C)
247
248 #define DRM_TIMED_WAIT_UNTIL(RET, Q, I, T, C) \
249 _DRM_TIMED_WAIT_UNTIL(RET, cv_timedwait_sig, Q, I, T, C)
250
251 /*
252 * XXX Can't assert sleepable here because we hold a spin lock. At
253 * least we can assert that we're not in (soft) interrupt context, and
254 * hope that nobody tries to use these with a sometimes quickly
255 * satisfied condition while holding a different spin lock.
256 */
257
258 #define _DRM_SPIN_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, CONDITION) do \
259 { \
260 KASSERT(spin_is_locked((INTERLOCK))); \
261 KASSERT(!cpu_intr_p()); \
262 KASSERT(!cpu_softintr_p()); \
263 KASSERT(!cold); \
264 (RET) = 0; \
265 while (!(CONDITION)) { \
266 /* XXX errno NetBSD->Linux */ \
267 (RET) = -WAIT((Q), &(INTERLOCK)->sl_lock); \
268 if (RET) \
269 break; \
270 } \
271 } while (0)
272
273 #define DRM_SPIN_WAIT_NOINTR_UNTIL(RET, Q, I, C) \
274 _DRM_SPIN_WAIT_UNTIL(RET, cv_wait_nointr, Q, I, C)
275
276 #define DRM_SPIN_WAIT_UNTIL(RET, Q, I, C) \
277 _DRM_SPIN_WAIT_UNTIL(RET, cv_wait_sig, Q, I, C)
278
279 #define _DRM_SPIN_TIMED_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, TICKS, CONDITION) \
280 do \
281 { \
282 extern int hardclock_ticks; \
283 const int _dstwu_start = hardclock_ticks; \
284 int _dstwu_ticks = (TICKS); \
285 KASSERT(spin_is_locked((INTERLOCK))); \
286 KASSERT(!cpu_intr_p()); \
287 KASSERT(!cpu_softintr_p()); \
288 KASSERT(!cold); \
289 for (;;) { \
290 if (CONDITION) { \
291 (RET) = _dstwu_ticks; \
292 break; \
293 } \
294 /* XXX errno NetBSD->Linux */ \
295 (RET) = -WAIT((Q), &(INTERLOCK)->sl_lock, \
296 _dstwu_ticks); \
297 if (RET) { \
298 if ((RET) == -EWOULDBLOCK) \
299 (RET) = (CONDITION) ? 1 : 0; \
300 break; \
301 } \
302 const int _dstwu_now = hardclock_ticks; \
303 KASSERT(_dstwu_start <= _dstwu_now); \
304 if ((_dstwu_now - _dstwu_start) < _dstwu_ticks) { \
305 _dstwu_ticks -= (_dstwu_now - _dstwu_start); \
306 } else { \
307 (RET) = (CONDITION) ? 1 : 0; \
308 break; \
309 } \
310 } \
311 } while (0)
312
313 #define DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(RET, Q, I, T, C) \
314 _DRM_SPIN_TIMED_WAIT_UNTIL(RET, cv_timedwait, Q, I, T, C)
315
316 #define DRM_SPIN_TIMED_WAIT_UNTIL(RET, Q, I, T, C) \
317 _DRM_SPIN_TIMED_WAIT_UNTIL(RET, cv_timedwait_sig, Q, I, T, C)
318
319 #endif /* _DRM_DRM_WAIT_NETBSD_H_ */
320