kern_turnstile.c revision 1.1.18.1 1 1.1.18.1 yamt /* $NetBSD: kern_turnstile.c,v 1.1.18.1 2007/02/26 09:11:13 yamt Exp $ */
2 1.1.18.1 yamt
3 1.1.18.1 yamt /*-
4 1.1.18.1 yamt * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
5 1.1.18.1 yamt * All rights reserved.
6 1.1.18.1 yamt *
7 1.1.18.1 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.1.18.1 yamt * by Jason R. Thorpe and Andrew Doran.
9 1.1.18.1 yamt *
10 1.1.18.1 yamt * Redistribution and use in source and binary forms, with or without
11 1.1.18.1 yamt * modification, are permitted provided that the following conditions
12 1.1.18.1 yamt * are met:
13 1.1.18.1 yamt * 1. Redistributions of source code must retain the above copyright
14 1.1.18.1 yamt * notice, this list of conditions and the following disclaimer.
15 1.1.18.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1.18.1 yamt * notice, this list of conditions and the following disclaimer in the
17 1.1.18.1 yamt * documentation and/or other materials provided with the distribution.
18 1.1.18.1 yamt * 3. All advertising materials mentioning features or use of this software
19 1.1.18.1 yamt * must display the following acknowledgement:
20 1.1.18.1 yamt * This product includes software developed by the NetBSD
21 1.1.18.1 yamt * Foundation, Inc. and its contributors.
22 1.1.18.1 yamt * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1.18.1 yamt * contributors may be used to endorse or promote products derived
24 1.1.18.1 yamt * from this software without specific prior written permission.
25 1.1.18.1 yamt *
26 1.1.18.1 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1.18.1 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1.18.1 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1.18.1 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1.18.1 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1.18.1 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1.18.1 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1.18.1 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1.18.1 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1.18.1 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1.18.1 yamt * POSSIBILITY OF SUCH DAMAGE.
37 1.1.18.1 yamt */
38 1.1.18.1 yamt
39 1.1.18.1 yamt /*
40 1.1.18.1 yamt * Turnstiles are described in detail in:
41 1.1.18.1 yamt *
42 1.1.18.1 yamt * Solaris Internals: Core Kernel Architecture, Jim Mauro and
43 1.1.18.1 yamt * Richard McDougall.
44 1.1.18.1 yamt *
45 1.1.18.1 yamt * Turnstiles are kept in a hash table. There are likely to be many more
46 1.1.18.1 yamt * synchronisation objects than there are threads. Since a thread can block
47 1.1.18.1 yamt * on only one lock at a time, we only need one turnstile per thread, and
48 1.1.18.1 yamt * so they are allocated at thread creation time.
49 1.1.18.1 yamt *
50 1.1.18.1 yamt * When a thread decides it needs to block on a lock, it looks up the
51 1.1.18.1 yamt * active turnstile for that lock. If no active turnstile exists, then
52 1.1.18.1 yamt * the process lends its turnstile to the lock. If there is already an
53 1.1.18.1 yamt * active turnstile for the lock, the thread places its turnstile on a
54 1.1.18.1 yamt * list of free turnstiles, and references the active one instead.
55 1.1.18.1 yamt *
56 1.1.18.1 yamt * The act of looking up the turnstile acquires an interlock on the sleep
57 1.1.18.1 yamt * queue. If a thread decides it doesn't need to block after all, then this
58 1.1.18.1 yamt * interlock must be released by explicitly aborting the turnstile
59 1.1.18.1 yamt * operation.
60 1.1.18.1 yamt *
61 1.1.18.1 yamt * When a thread is awakened, it needs to get its turnstile back. If there
62 1.1.18.1 yamt * are still other threads waiting in the active turnstile, the the thread
63 1.1.18.1 yamt * grabs a free turnstile off the free list. Otherwise, it can take back
64 1.1.18.1 yamt * the active turnstile from the lock (thus deactivating the turnstile).
65 1.1.18.1 yamt *
66 1.1.18.1 yamt * Turnstiles are the place to do priority inheritence. However, we do
67 1.1.18.1 yamt * not currently implement that.
68 1.1.18.1 yamt */
69 1.1.18.1 yamt
70 1.1.18.1 yamt #include <sys/cdefs.h>
71 1.1.18.1 yamt __KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.1.18.1 2007/02/26 09:11:13 yamt Exp $");
72 1.1.18.1 yamt
73 1.1.18.1 yamt #include "opt_lockdebug.h"
74 1.1.18.1 yamt #include "opt_multiprocessor.h"
75 1.1.18.1 yamt #include "opt_ktrace.h"
76 1.1.18.1 yamt #include "opt_ddb.h"
77 1.1.18.1 yamt
78 1.1.18.1 yamt #include <sys/param.h>
79 1.1.18.1 yamt #include <sys/lock.h>
80 1.1.18.1 yamt #include <sys/pool.h>
81 1.1.18.1 yamt #include <sys/proc.h>
82 1.1.18.1 yamt #include <sys/sleepq.h>
83 1.1.18.1 yamt #include <sys/systm.h>
84 1.1.18.1 yamt
85 1.1.18.1 yamt #include <uvm/uvm_extern.h>
86 1.1.18.1 yamt
87 1.1.18.1 yamt #define TS_HASH_SIZE 64
88 1.1.18.1 yamt #define TS_HASH_MASK (TS_HASH_SIZE - 1)
89 1.1.18.1 yamt #define TS_HASH(obj) (((uintptr_t)(obj) >> 3) & TS_HASH_MASK)
90 1.1.18.1 yamt
91 1.1.18.1 yamt tschain_t turnstile_tab[TS_HASH_SIZE];
92 1.1.18.1 yamt
93 1.1.18.1 yamt struct pool turnstile_pool;
94 1.1.18.1 yamt struct pool_cache turnstile_cache;
95 1.1.18.1 yamt
96 1.1.18.1 yamt int turnstile_ctor(void *, void *, int);
97 1.1.18.1 yamt void turnstile_unsleep(struct lwp *);
98 1.1.18.1 yamt void turnstile_changepri(struct lwp *, int);
99 1.1.18.1 yamt
100 1.1.18.1 yamt extern turnstile_t turnstile0;
101 1.1.18.1 yamt
102 1.1.18.1 yamt syncobj_t turnstile_syncobj = {
103 1.1.18.1 yamt SOBJ_SLEEPQ_FIFO,
104 1.1.18.1 yamt turnstile_unsleep,
105 1.1.18.1 yamt turnstile_changepri
106 1.1.18.1 yamt };
107 1.1.18.1 yamt
108 1.1.18.1 yamt /*
109 1.1.18.1 yamt * turnstile_init:
110 1.1.18.1 yamt *
111 1.1.18.1 yamt * Initialize the turnstile mechanism.
112 1.1.18.1 yamt */
113 1.1.18.1 yamt void
114 1.1.18.1 yamt turnstile_init(void)
115 1.1.18.1 yamt {
116 1.1.18.1 yamt tschain_t *tc;
117 1.1.18.1 yamt int i;
118 1.1.18.1 yamt
119 1.1.18.1 yamt for (i = 0; i < TS_HASH_SIZE; i++) {
120 1.1.18.1 yamt tc = &turnstile_tab[i];
121 1.1.18.1 yamt LIST_INIT(&tc->tc_chain);
122 1.1.18.1 yamt #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
123 1.1.18.1 yamt mutex_init(&tc->tc_mutexstore, MUTEX_SPIN, IPL_SCHED);
124 1.1.18.1 yamt tc->tc_mutex = &tc->tc_mutexstore;
125 1.1.18.1 yamt #else
126 1.1.18.1 yamt tc->tc_mutex = &sched_mutex;
127 1.1.18.1 yamt #endif
128 1.1.18.1 yamt }
129 1.1.18.1 yamt
130 1.1.18.1 yamt pool_init(&turnstile_pool, sizeof(turnstile_t), 0, 0, 0,
131 1.1.18.1 yamt "tstilepl", &pool_allocator_nointr);
132 1.1.18.1 yamt pool_cache_init(&turnstile_cache, &turnstile_pool,
133 1.1.18.1 yamt turnstile_ctor, NULL, NULL);
134 1.1.18.1 yamt
135 1.1.18.1 yamt (void)turnstile_ctor(NULL, &turnstile0, 0);
136 1.1.18.1 yamt }
137 1.1.18.1 yamt
138 1.1.18.1 yamt /*
139 1.1.18.1 yamt * turnstile_ctor:
140 1.1.18.1 yamt *
141 1.1.18.1 yamt * Constructor for turnstiles.
142 1.1.18.1 yamt */
143 1.1.18.1 yamt int
144 1.1.18.1 yamt turnstile_ctor(void *arg, void *obj, int flags)
145 1.1.18.1 yamt {
146 1.1.18.1 yamt turnstile_t *ts = obj;
147 1.1.18.1 yamt
148 1.1.18.1 yamt memset(ts, 0, sizeof(*ts));
149 1.1.18.1 yamt sleepq_init(&ts->ts_sleepq[TS_READER_Q], NULL);
150 1.1.18.1 yamt sleepq_init(&ts->ts_sleepq[TS_WRITER_Q], NULL);
151 1.1.18.1 yamt return (0);
152 1.1.18.1 yamt }
153 1.1.18.1 yamt
154 1.1.18.1 yamt /*
155 1.1.18.1 yamt * turnstile_remove:
156 1.1.18.1 yamt *
157 1.1.18.1 yamt * Remove an LWP from a turnstile sleep queue and wake it.
158 1.1.18.1 yamt */
159 1.1.18.1 yamt static inline int
160 1.1.18.1 yamt turnstile_remove(turnstile_t *ts, struct lwp *l, sleepq_t *sq)
161 1.1.18.1 yamt {
162 1.1.18.1 yamt turnstile_t *nts;
163 1.1.18.1 yamt
164 1.1.18.1 yamt KASSERT(l->l_ts == ts);
165 1.1.18.1 yamt
166 1.1.18.1 yamt /*
167 1.1.18.1 yamt * This process is no longer using the active turnstile.
168 1.1.18.1 yamt * Find an inactive one on the free list to give to it.
169 1.1.18.1 yamt */
170 1.1.18.1 yamt if ((nts = ts->ts_free) != NULL) {
171 1.1.18.1 yamt KASSERT(TS_ALL_WAITERS(ts) > 1);
172 1.1.18.1 yamt l->l_ts = nts;
173 1.1.18.1 yamt ts->ts_free = nts->ts_free;
174 1.1.18.1 yamt nts->ts_free = NULL;
175 1.1.18.1 yamt } else {
176 1.1.18.1 yamt /*
177 1.1.18.1 yamt * If the free list is empty, this is the last
178 1.1.18.1 yamt * waiter.
179 1.1.18.1 yamt */
180 1.1.18.1 yamt KASSERT(TS_ALL_WAITERS(ts) == 1);
181 1.1.18.1 yamt LIST_REMOVE(ts, ts_chain);
182 1.1.18.1 yamt }
183 1.1.18.1 yamt
184 1.1.18.1 yamt return sleepq_remove(sq, l);
185 1.1.18.1 yamt }
186 1.1.18.1 yamt
187 1.1.18.1 yamt /*
188 1.1.18.1 yamt * turnstile_lookup:
189 1.1.18.1 yamt *
190 1.1.18.1 yamt * Look up the turnstile for the specified lock. This acquires and
191 1.1.18.1 yamt * holds the turnstile chain lock (sleep queue interlock).
192 1.1.18.1 yamt */
193 1.1.18.1 yamt turnstile_t *
194 1.1.18.1 yamt turnstile_lookup(wchan_t obj)
195 1.1.18.1 yamt {
196 1.1.18.1 yamt turnstile_t *ts;
197 1.1.18.1 yamt tschain_t *tc;
198 1.1.18.1 yamt
199 1.1.18.1 yamt tc = &turnstile_tab[TS_HASH(obj)];
200 1.1.18.1 yamt mutex_spin_enter(tc->tc_mutex);
201 1.1.18.1 yamt
202 1.1.18.1 yamt LIST_FOREACH(ts, &tc->tc_chain, ts_chain)
203 1.1.18.1 yamt if (ts->ts_obj == obj)
204 1.1.18.1 yamt return (ts);
205 1.1.18.1 yamt
206 1.1.18.1 yamt /*
207 1.1.18.1 yamt * No turnstile yet for this lock. No problem, turnstile_block()
208 1.1.18.1 yamt * handles this by fetching the turnstile from the blocking thread.
209 1.1.18.1 yamt */
210 1.1.18.1 yamt return (NULL);
211 1.1.18.1 yamt }
212 1.1.18.1 yamt
213 1.1.18.1 yamt /*
214 1.1.18.1 yamt * turnstile_exit:
215 1.1.18.1 yamt *
216 1.1.18.1 yamt * Abort a turnstile operation.
217 1.1.18.1 yamt */
218 1.1.18.1 yamt void
219 1.1.18.1 yamt turnstile_exit(wchan_t obj)
220 1.1.18.1 yamt {
221 1.1.18.1 yamt tschain_t *tc;
222 1.1.18.1 yamt
223 1.1.18.1 yamt tc = &turnstile_tab[TS_HASH(obj)];
224 1.1.18.1 yamt mutex_spin_exit(tc->tc_mutex);
225 1.1.18.1 yamt }
226 1.1.18.1 yamt
227 1.1.18.1 yamt /*
228 1.1.18.1 yamt * turnstile_block:
229 1.1.18.1 yamt *
230 1.1.18.1 yamt * Enter an object into the turnstile chain and prepare the current
231 1.1.18.1 yamt * LWP for sleep.
232 1.1.18.1 yamt */
233 1.1.18.1 yamt void
234 1.1.18.1 yamt turnstile_block(turnstile_t *ts, int q, wchan_t obj)
235 1.1.18.1 yamt {
236 1.1.18.1 yamt struct lwp *l;
237 1.1.18.1 yamt turnstile_t *ots;
238 1.1.18.1 yamt tschain_t *tc;
239 1.1.18.1 yamt sleepq_t *sq;
240 1.1.18.1 yamt
241 1.1.18.1 yamt tc = &turnstile_tab[TS_HASH(obj)];
242 1.1.18.1 yamt l = curlwp;
243 1.1.18.1 yamt
244 1.1.18.1 yamt KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
245 1.1.18.1 yamt KASSERT(mutex_owned(tc->tc_mutex));
246 1.1.18.1 yamt KASSERT(l != NULL && l->l_ts != NULL);
247 1.1.18.1 yamt
248 1.1.18.1 yamt if (ts == NULL) {
249 1.1.18.1 yamt /*
250 1.1.18.1 yamt * We are the first thread to wait for this object;
251 1.1.18.1 yamt * lend our turnstile to it.
252 1.1.18.1 yamt */
253 1.1.18.1 yamt ts = l->l_ts;
254 1.1.18.1 yamt KASSERT(TS_ALL_WAITERS(ts) == 0);
255 1.1.18.1 yamt KASSERT(TAILQ_EMPTY(&ts->ts_sleepq[TS_READER_Q].sq_queue) &&
256 1.1.18.1 yamt TAILQ_EMPTY(&ts->ts_sleepq[TS_WRITER_Q].sq_queue));
257 1.1.18.1 yamt ts->ts_obj = obj;
258 1.1.18.1 yamt ts->ts_sleepq[TS_READER_Q].sq_mutex = tc->tc_mutex;
259 1.1.18.1 yamt ts->ts_sleepq[TS_WRITER_Q].sq_mutex = tc->tc_mutex;
260 1.1.18.1 yamt LIST_INSERT_HEAD(&tc->tc_chain, ts, ts_chain);
261 1.1.18.1 yamt } else {
262 1.1.18.1 yamt /*
263 1.1.18.1 yamt * Object already has a turnstile. Put our turnstile
264 1.1.18.1 yamt * onto the free list, and reference the existing
265 1.1.18.1 yamt * turnstile instead.
266 1.1.18.1 yamt */
267 1.1.18.1 yamt ots = l->l_ts;
268 1.1.18.1 yamt ots->ts_free = ts->ts_free;
269 1.1.18.1 yamt ts->ts_free = ots;
270 1.1.18.1 yamt l->l_ts = ts;
271 1.1.18.1 yamt
272 1.1.18.1 yamt KASSERT(TS_ALL_WAITERS(ts) != 0);
273 1.1.18.1 yamt KASSERT(!TAILQ_EMPTY(&ts->ts_sleepq[TS_READER_Q].sq_queue) ||
274 1.1.18.1 yamt !TAILQ_EMPTY(&ts->ts_sleepq[TS_WRITER_Q].sq_queue));
275 1.1.18.1 yamt }
276 1.1.18.1 yamt
277 1.1.18.1 yamt sq = &ts->ts_sleepq[q];
278 1.1.18.1 yamt sleepq_enter(sq, l);
279 1.1.18.1 yamt sleepq_block(sq, sched_kpri(l), obj, "tstile", 0, 0,
280 1.1.18.1 yamt &turnstile_syncobj);
281 1.1.18.1 yamt }
282 1.1.18.1 yamt
283 1.1.18.1 yamt /*
284 1.1.18.1 yamt * turnstile_wakeup:
285 1.1.18.1 yamt *
286 1.1.18.1 yamt * Wake up the specified number of threads that are blocked
287 1.1.18.1 yamt * in a turnstile.
288 1.1.18.1 yamt */
289 1.1.18.1 yamt void
290 1.1.18.1 yamt turnstile_wakeup(turnstile_t *ts, int q, int count, struct lwp *nl)
291 1.1.18.1 yamt {
292 1.1.18.1 yamt sleepq_t *sq;
293 1.1.18.1 yamt tschain_t *tc;
294 1.1.18.1 yamt struct lwp *l;
295 1.1.18.1 yamt int swapin;
296 1.1.18.1 yamt
297 1.1.18.1 yamt tc = &turnstile_tab[TS_HASH(ts->ts_obj)];
298 1.1.18.1 yamt sq = &ts->ts_sleepq[q];
299 1.1.18.1 yamt swapin = 0;
300 1.1.18.1 yamt
301 1.1.18.1 yamt KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
302 1.1.18.1 yamt KASSERT(count > 0 && count <= TS_WAITERS(ts, q));
303 1.1.18.1 yamt KASSERT(mutex_owned(tc->tc_mutex) && sq->sq_mutex == tc->tc_mutex);
304 1.1.18.1 yamt
305 1.1.18.1 yamt if (nl != NULL) {
306 1.1.18.1 yamt #if defined(DEBUG) || defined(LOCKDEBUG)
307 1.1.18.1 yamt TAILQ_FOREACH(l, &sq->sq_queue, l_sleepchain) {
308 1.1.18.1 yamt if (l == nl)
309 1.1.18.1 yamt break;
310 1.1.18.1 yamt }
311 1.1.18.1 yamt if (l == NULL)
312 1.1.18.1 yamt panic("turnstile_wakeup: nl not on sleepq");
313 1.1.18.1 yamt #endif
314 1.1.18.1 yamt swapin |= turnstile_remove(ts, nl, sq);
315 1.1.18.1 yamt } else {
316 1.1.18.1 yamt while (count-- > 0) {
317 1.1.18.1 yamt l = TAILQ_FIRST(&sq->sq_queue);
318 1.1.18.1 yamt KASSERT(l != NULL);
319 1.1.18.1 yamt swapin |= turnstile_remove(ts, l, sq);
320 1.1.18.1 yamt }
321 1.1.18.1 yamt }
322 1.1.18.1 yamt mutex_spin_exit(tc->tc_mutex);
323 1.1.18.1 yamt
324 1.1.18.1 yamt /*
325 1.1.18.1 yamt * If there are newly awakend threads that need to be swapped in,
326 1.1.18.1 yamt * then kick the swapper into action.
327 1.1.18.1 yamt */
328 1.1.18.1 yamt if (swapin)
329 1.1.18.1 yamt uvm_kick_scheduler();
330 1.1.18.1 yamt }
331 1.1.18.1 yamt
332 1.1.18.1 yamt /*
333 1.1.18.1 yamt * turnstile_unsleep:
334 1.1.18.1 yamt *
335 1.1.18.1 yamt * Remove an LWP from the turnstile. This is called when the LWP has
336 1.1.18.1 yamt * not been awoken normally but instead interrupted: for example, if it
337 1.1.18.1 yamt * has received a signal. It's not a valid action for turnstiles,
338 1.1.18.1 yamt * since LWPs blocking on a turnstile are not interruptable.
339 1.1.18.1 yamt */
340 1.1.18.1 yamt void
341 1.1.18.1 yamt turnstile_unsleep(struct lwp *l)
342 1.1.18.1 yamt {
343 1.1.18.1 yamt
344 1.1.18.1 yamt lwp_unlock(l);
345 1.1.18.1 yamt panic("turnstile_unsleep");
346 1.1.18.1 yamt }
347 1.1.18.1 yamt
348 1.1.18.1 yamt /*
349 1.1.18.1 yamt * turnstile_changepri:
350 1.1.18.1 yamt *
351 1.1.18.1 yamt * Adjust the priority of an LWP residing on a turnstile. Since we do
352 1.1.18.1 yamt * not yet do priority inheritance, we mostly ignore this action.
353 1.1.18.1 yamt */
354 1.1.18.1 yamt void
355 1.1.18.1 yamt turnstile_changepri(struct lwp *l, int pri)
356 1.1.18.1 yamt {
357 1.1.18.1 yamt
358 1.1.18.1 yamt /* LWPs on turnstiles always have kernel priority. */
359 1.1.18.1 yamt l->l_usrpri = pri;
360 1.1.18.1 yamt l->l_priority = sched_kpri(l);
361 1.1.18.1 yamt }
362 1.1.18.1 yamt
363 1.1.18.1 yamt #if defined(LOCKDEBUG)
364 1.1.18.1 yamt /*
365 1.1.18.1 yamt * turnstile_print:
366 1.1.18.1 yamt *
367 1.1.18.1 yamt * Given the address of a lock object, print the contents of a
368 1.1.18.1 yamt * turnstile.
369 1.1.18.1 yamt */
370 1.1.18.1 yamt void
371 1.1.18.1 yamt turnstile_print(volatile void *obj, void (*pr)(const char *, ...))
372 1.1.18.1 yamt {
373 1.1.18.1 yamt turnstile_t *ts;
374 1.1.18.1 yamt tschain_t *tc;
375 1.1.18.1 yamt sleepq_t *rsq, *wsq;
376 1.1.18.1 yamt struct lwp *l;
377 1.1.18.1 yamt
378 1.1.18.1 yamt tc = &turnstile_tab[TS_HASH(obj)];
379 1.1.18.1 yamt
380 1.1.18.1 yamt LIST_FOREACH(ts, &tc->tc_chain, ts_chain)
381 1.1.18.1 yamt if (ts->ts_obj == obj)
382 1.1.18.1 yamt break;
383 1.1.18.1 yamt
384 1.1.18.1 yamt (*pr)("Turnstile chain at %p with tc_mutex at %p.\n", tc, tc->tc_mutex);
385 1.1.18.1 yamt if (ts == NULL) {
386 1.1.18.1 yamt (*pr)("=> No active turnstile for this lock.\n");
387 1.1.18.1 yamt return;
388 1.1.18.1 yamt }
389 1.1.18.1 yamt
390 1.1.18.1 yamt rsq = &ts->ts_sleepq[TS_READER_Q];
391 1.1.18.1 yamt wsq = &ts->ts_sleepq[TS_WRITER_Q];
392 1.1.18.1 yamt
393 1.1.18.1 yamt (*pr)("=> Turnstile at %p (wrq=%p, rdq=%p).\n", ts, rsq, wsq);
394 1.1.18.1 yamt
395 1.1.18.1 yamt (*pr)("=> %d waiting readers:", rsq->sq_waiters);
396 1.1.18.1 yamt TAILQ_FOREACH(l, &rsq->sq_queue, l_sleepchain) {
397 1.1.18.1 yamt (*pr)(" %p", l);
398 1.1.18.1 yamt }
399 1.1.18.1 yamt (*pr)("\n");
400 1.1.18.1 yamt
401 1.1.18.1 yamt (*pr)("=> %d waiting writers:", wsq->sq_waiters);
402 1.1.18.1 yamt TAILQ_FOREACH(l, &wsq->sq_queue, l_sleepchain) {
403 1.1.18.1 yamt (*pr)(" %p", l);
404 1.1.18.1 yamt }
405 1.1.18.1 yamt (*pr)("\n");
406 1.1.18.1 yamt }
407 1.1.18.1 yamt #endif /* LOCKDEBUG */
408