kern_turnstile.c revision 1.1.18.2 1 1.1.18.2 yamt /* $NetBSD: kern_turnstile.c,v 1.1.18.2 2007/09/03 14:40:58 yamt Exp $ */
2 1.1.18.1 yamt
3 1.1.18.1 yamt /*-
4 1.1.18.1 yamt * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
5 1.1.18.1 yamt * All rights reserved.
6 1.1.18.1 yamt *
7 1.1.18.1 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.1.18.1 yamt * by Jason R. Thorpe and Andrew Doran.
9 1.1.18.1 yamt *
10 1.1.18.1 yamt * Redistribution and use in source and binary forms, with or without
11 1.1.18.1 yamt * modification, are permitted provided that the following conditions
12 1.1.18.1 yamt * are met:
13 1.1.18.1 yamt * 1. Redistributions of source code must retain the above copyright
14 1.1.18.1 yamt * notice, this list of conditions and the following disclaimer.
15 1.1.18.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1.18.1 yamt * notice, this list of conditions and the following disclaimer in the
17 1.1.18.1 yamt * documentation and/or other materials provided with the distribution.
18 1.1.18.1 yamt * 3. All advertising materials mentioning features or use of this software
19 1.1.18.1 yamt * must display the following acknowledgement:
20 1.1.18.1 yamt * This product includes software developed by the NetBSD
21 1.1.18.1 yamt * Foundation, Inc. and its contributors.
22 1.1.18.1 yamt * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1.18.1 yamt * contributors may be used to endorse or promote products derived
24 1.1.18.1 yamt * from this software without specific prior written permission.
25 1.1.18.1 yamt *
26 1.1.18.1 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1.18.1 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1.18.1 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1.18.1 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1.18.1 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1.18.1 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1.18.1 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1.18.1 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1.18.1 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1.18.1 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1.18.1 yamt * POSSIBILITY OF SUCH DAMAGE.
37 1.1.18.1 yamt */
38 1.1.18.1 yamt
39 1.1.18.1 yamt /*
40 1.1.18.1 yamt * Turnstiles are described in detail in:
41 1.1.18.1 yamt *
42 1.1.18.1 yamt * Solaris Internals: Core Kernel Architecture, Jim Mauro and
43 1.1.18.1 yamt * Richard McDougall.
44 1.1.18.1 yamt *
45 1.1.18.1 yamt * Turnstiles are kept in a hash table. There are likely to be many more
46 1.1.18.1 yamt * synchronisation objects than there are threads. Since a thread can block
47 1.1.18.1 yamt * on only one lock at a time, we only need one turnstile per thread, and
48 1.1.18.1 yamt * so they are allocated at thread creation time.
49 1.1.18.1 yamt *
50 1.1.18.1 yamt * When a thread decides it needs to block on a lock, it looks up the
51 1.1.18.1 yamt * active turnstile for that lock. If no active turnstile exists, then
52 1.1.18.1 yamt * the process lends its turnstile to the lock. If there is already an
53 1.1.18.1 yamt * active turnstile for the lock, the thread places its turnstile on a
54 1.1.18.1 yamt * list of free turnstiles, and references the active one instead.
55 1.1.18.1 yamt *
56 1.1.18.1 yamt * The act of looking up the turnstile acquires an interlock on the sleep
57 1.1.18.1 yamt * queue. If a thread decides it doesn't need to block after all, then this
58 1.1.18.1 yamt * interlock must be released by explicitly aborting the turnstile
59 1.1.18.1 yamt * operation.
60 1.1.18.1 yamt *
61 1.1.18.1 yamt * When a thread is awakened, it needs to get its turnstile back. If there
62 1.1.18.1 yamt * are still other threads waiting in the active turnstile, the the thread
63 1.1.18.1 yamt * grabs a free turnstile off the free list. Otherwise, it can take back
64 1.1.18.1 yamt * the active turnstile from the lock (thus deactivating the turnstile).
65 1.1.18.1 yamt *
66 1.1.18.2 yamt * Turnstiles are the place to do priority inheritence.
67 1.1.18.1 yamt */
68 1.1.18.1 yamt
69 1.1.18.1 yamt #include <sys/cdefs.h>
70 1.1.18.2 yamt __KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.1.18.2 2007/09/03 14:40:58 yamt Exp $");
71 1.1.18.1 yamt
72 1.1.18.1 yamt #include <sys/param.h>
73 1.1.18.1 yamt #include <sys/lock.h>
74 1.1.18.2 yamt #include <sys/lockdebug.h>
75 1.1.18.1 yamt #include <sys/pool.h>
76 1.1.18.1 yamt #include <sys/proc.h>
77 1.1.18.1 yamt #include <sys/sleepq.h>
78 1.1.18.1 yamt #include <sys/systm.h>
79 1.1.18.1 yamt
80 1.1.18.1 yamt #include <uvm/uvm_extern.h>
81 1.1.18.1 yamt
82 1.1.18.1 yamt #define TS_HASH_SIZE 64
83 1.1.18.1 yamt #define TS_HASH_MASK (TS_HASH_SIZE - 1)
84 1.1.18.1 yamt #define TS_HASH(obj) (((uintptr_t)(obj) >> 3) & TS_HASH_MASK)
85 1.1.18.1 yamt
86 1.1.18.1 yamt tschain_t turnstile_tab[TS_HASH_SIZE];
87 1.1.18.1 yamt
88 1.1.18.1 yamt struct pool turnstile_pool;
89 1.1.18.1 yamt struct pool_cache turnstile_cache;
90 1.1.18.1 yamt
91 1.1.18.1 yamt int turnstile_ctor(void *, void *, int);
92 1.1.18.1 yamt
93 1.1.18.1 yamt extern turnstile_t turnstile0;
94 1.1.18.1 yamt
95 1.1.18.1 yamt /*
96 1.1.18.1 yamt * turnstile_init:
97 1.1.18.1 yamt *
98 1.1.18.1 yamt * Initialize the turnstile mechanism.
99 1.1.18.1 yamt */
100 1.1.18.1 yamt void
101 1.1.18.1 yamt turnstile_init(void)
102 1.1.18.1 yamt {
103 1.1.18.1 yamt tschain_t *tc;
104 1.1.18.1 yamt int i;
105 1.1.18.1 yamt
106 1.1.18.1 yamt for (i = 0; i < TS_HASH_SIZE; i++) {
107 1.1.18.1 yamt tc = &turnstile_tab[i];
108 1.1.18.1 yamt LIST_INIT(&tc->tc_chain);
109 1.1.18.2 yamt mutex_init(&tc->tc_mutex, MUTEX_SPIN, IPL_SCHED);
110 1.1.18.1 yamt }
111 1.1.18.1 yamt
112 1.1.18.1 yamt pool_init(&turnstile_pool, sizeof(turnstile_t), 0, 0, 0,
113 1.1.18.2 yamt "tstilepl", &pool_allocator_nointr, IPL_NONE);
114 1.1.18.1 yamt pool_cache_init(&turnstile_cache, &turnstile_pool,
115 1.1.18.1 yamt turnstile_ctor, NULL, NULL);
116 1.1.18.1 yamt
117 1.1.18.1 yamt (void)turnstile_ctor(NULL, &turnstile0, 0);
118 1.1.18.1 yamt }
119 1.1.18.1 yamt
120 1.1.18.1 yamt /*
121 1.1.18.1 yamt * turnstile_ctor:
122 1.1.18.1 yamt *
123 1.1.18.1 yamt * Constructor for turnstiles.
124 1.1.18.1 yamt */
125 1.1.18.1 yamt int
126 1.1.18.1 yamt turnstile_ctor(void *arg, void *obj, int flags)
127 1.1.18.1 yamt {
128 1.1.18.1 yamt turnstile_t *ts = obj;
129 1.1.18.1 yamt
130 1.1.18.1 yamt memset(ts, 0, sizeof(*ts));
131 1.1.18.1 yamt sleepq_init(&ts->ts_sleepq[TS_READER_Q], NULL);
132 1.1.18.1 yamt sleepq_init(&ts->ts_sleepq[TS_WRITER_Q], NULL);
133 1.1.18.1 yamt return (0);
134 1.1.18.1 yamt }
135 1.1.18.1 yamt
136 1.1.18.1 yamt /*
137 1.1.18.1 yamt * turnstile_remove:
138 1.1.18.1 yamt *
139 1.1.18.1 yamt * Remove an LWP from a turnstile sleep queue and wake it.
140 1.1.18.1 yamt */
141 1.1.18.2 yamt static inline void
142 1.1.18.2 yamt turnstile_remove(turnstile_t *ts, lwp_t *l, sleepq_t *sq)
143 1.1.18.1 yamt {
144 1.1.18.1 yamt turnstile_t *nts;
145 1.1.18.1 yamt
146 1.1.18.1 yamt KASSERT(l->l_ts == ts);
147 1.1.18.1 yamt
148 1.1.18.1 yamt /*
149 1.1.18.1 yamt * This process is no longer using the active turnstile.
150 1.1.18.1 yamt * Find an inactive one on the free list to give to it.
151 1.1.18.1 yamt */
152 1.1.18.1 yamt if ((nts = ts->ts_free) != NULL) {
153 1.1.18.1 yamt KASSERT(TS_ALL_WAITERS(ts) > 1);
154 1.1.18.1 yamt l->l_ts = nts;
155 1.1.18.1 yamt ts->ts_free = nts->ts_free;
156 1.1.18.1 yamt nts->ts_free = NULL;
157 1.1.18.1 yamt } else {
158 1.1.18.1 yamt /*
159 1.1.18.1 yamt * If the free list is empty, this is the last
160 1.1.18.1 yamt * waiter.
161 1.1.18.1 yamt */
162 1.1.18.1 yamt KASSERT(TS_ALL_WAITERS(ts) == 1);
163 1.1.18.1 yamt LIST_REMOVE(ts, ts_chain);
164 1.1.18.1 yamt }
165 1.1.18.1 yamt
166 1.1.18.2 yamt (void)sleepq_remove(sq, l);
167 1.1.18.1 yamt }
168 1.1.18.1 yamt
169 1.1.18.1 yamt /*
170 1.1.18.1 yamt * turnstile_lookup:
171 1.1.18.1 yamt *
172 1.1.18.1 yamt * Look up the turnstile for the specified lock. This acquires and
173 1.1.18.1 yamt * holds the turnstile chain lock (sleep queue interlock).
174 1.1.18.1 yamt */
175 1.1.18.1 yamt turnstile_t *
176 1.1.18.1 yamt turnstile_lookup(wchan_t obj)
177 1.1.18.1 yamt {
178 1.1.18.1 yamt turnstile_t *ts;
179 1.1.18.1 yamt tschain_t *tc;
180 1.1.18.1 yamt
181 1.1.18.1 yamt tc = &turnstile_tab[TS_HASH(obj)];
182 1.1.18.2 yamt mutex_spin_enter(&tc->tc_mutex);
183 1.1.18.1 yamt
184 1.1.18.1 yamt LIST_FOREACH(ts, &tc->tc_chain, ts_chain)
185 1.1.18.1 yamt if (ts->ts_obj == obj)
186 1.1.18.1 yamt return (ts);
187 1.1.18.1 yamt
188 1.1.18.1 yamt /*
189 1.1.18.1 yamt * No turnstile yet for this lock. No problem, turnstile_block()
190 1.1.18.1 yamt * handles this by fetching the turnstile from the blocking thread.
191 1.1.18.1 yamt */
192 1.1.18.1 yamt return (NULL);
193 1.1.18.1 yamt }
194 1.1.18.1 yamt
195 1.1.18.1 yamt /*
196 1.1.18.1 yamt * turnstile_exit:
197 1.1.18.1 yamt *
198 1.1.18.1 yamt * Abort a turnstile operation.
199 1.1.18.1 yamt */
200 1.1.18.1 yamt void
201 1.1.18.1 yamt turnstile_exit(wchan_t obj)
202 1.1.18.1 yamt {
203 1.1.18.1 yamt tschain_t *tc;
204 1.1.18.1 yamt
205 1.1.18.1 yamt tc = &turnstile_tab[TS_HASH(obj)];
206 1.1.18.2 yamt mutex_spin_exit(&tc->tc_mutex);
207 1.1.18.1 yamt }
208 1.1.18.1 yamt
209 1.1.18.1 yamt /*
210 1.1.18.1 yamt * turnstile_block:
211 1.1.18.1 yamt *
212 1.1.18.1 yamt * Enter an object into the turnstile chain and prepare the current
213 1.1.18.1 yamt * LWP for sleep.
214 1.1.18.1 yamt */
215 1.1.18.1 yamt void
216 1.1.18.2 yamt turnstile_block(turnstile_t *ts, int q, wchan_t obj, syncobj_t *sobj)
217 1.1.18.1 yamt {
218 1.1.18.2 yamt lwp_t *l;
219 1.1.18.2 yamt lwp_t *cur; /* cached curlwp */
220 1.1.18.2 yamt lwp_t *owner;
221 1.1.18.1 yamt turnstile_t *ots;
222 1.1.18.1 yamt tschain_t *tc;
223 1.1.18.1 yamt sleepq_t *sq;
224 1.1.18.2 yamt pri_t prio;
225 1.1.18.1 yamt
226 1.1.18.1 yamt tc = &turnstile_tab[TS_HASH(obj)];
227 1.1.18.2 yamt l = cur = curlwp;
228 1.1.18.1 yamt
229 1.1.18.1 yamt KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
230 1.1.18.2 yamt KASSERT(mutex_owned(&tc->tc_mutex));
231 1.1.18.1 yamt KASSERT(l != NULL && l->l_ts != NULL);
232 1.1.18.1 yamt
233 1.1.18.1 yamt if (ts == NULL) {
234 1.1.18.1 yamt /*
235 1.1.18.1 yamt * We are the first thread to wait for this object;
236 1.1.18.1 yamt * lend our turnstile to it.
237 1.1.18.1 yamt */
238 1.1.18.1 yamt ts = l->l_ts;
239 1.1.18.1 yamt KASSERT(TS_ALL_WAITERS(ts) == 0);
240 1.1.18.1 yamt KASSERT(TAILQ_EMPTY(&ts->ts_sleepq[TS_READER_Q].sq_queue) &&
241 1.1.18.1 yamt TAILQ_EMPTY(&ts->ts_sleepq[TS_WRITER_Q].sq_queue));
242 1.1.18.1 yamt ts->ts_obj = obj;
243 1.1.18.2 yamt ts->ts_inheritor = NULL;
244 1.1.18.2 yamt ts->ts_sleepq[TS_READER_Q].sq_mutex = &tc->tc_mutex;
245 1.1.18.2 yamt ts->ts_sleepq[TS_WRITER_Q].sq_mutex = &tc->tc_mutex;
246 1.1.18.1 yamt LIST_INSERT_HEAD(&tc->tc_chain, ts, ts_chain);
247 1.1.18.1 yamt } else {
248 1.1.18.1 yamt /*
249 1.1.18.1 yamt * Object already has a turnstile. Put our turnstile
250 1.1.18.1 yamt * onto the free list, and reference the existing
251 1.1.18.1 yamt * turnstile instead.
252 1.1.18.1 yamt */
253 1.1.18.1 yamt ots = l->l_ts;
254 1.1.18.1 yamt ots->ts_free = ts->ts_free;
255 1.1.18.1 yamt ts->ts_free = ots;
256 1.1.18.1 yamt l->l_ts = ts;
257 1.1.18.1 yamt
258 1.1.18.2 yamt KASSERT(ts->ts_obj == obj);
259 1.1.18.1 yamt KASSERT(TS_ALL_WAITERS(ts) != 0);
260 1.1.18.1 yamt KASSERT(!TAILQ_EMPTY(&ts->ts_sleepq[TS_READER_Q].sq_queue) ||
261 1.1.18.1 yamt !TAILQ_EMPTY(&ts->ts_sleepq[TS_WRITER_Q].sq_queue));
262 1.1.18.1 yamt }
263 1.1.18.1 yamt
264 1.1.18.1 yamt sq = &ts->ts_sleepq[q];
265 1.1.18.1 yamt sleepq_enter(sq, l);
266 1.1.18.2 yamt LOCKDEBUG_BARRIER(&tc->tc_mutex, 1);
267 1.1.18.2 yamt l->l_priority = sched_kpri(l);
268 1.1.18.2 yamt prio = lwp_eprio(l);
269 1.1.18.2 yamt sleepq_enqueue(sq, prio, obj, "tstile", sobj);
270 1.1.18.2 yamt
271 1.1.18.2 yamt /*
272 1.1.18.2 yamt * lend our priority to lwps on the blocking chain.
273 1.1.18.2 yamt */
274 1.1.18.2 yamt
275 1.1.18.2 yamt for (;;) {
276 1.1.18.2 yamt bool dolock;
277 1.1.18.2 yamt
278 1.1.18.2 yamt if (l->l_wchan == NULL)
279 1.1.18.2 yamt break;
280 1.1.18.2 yamt
281 1.1.18.2 yamt owner = (*l->l_syncobj->sobj_owner)(l->l_wchan);
282 1.1.18.2 yamt if (owner == NULL)
283 1.1.18.2 yamt break;
284 1.1.18.2 yamt
285 1.1.18.2 yamt KASSERT(l != owner);
286 1.1.18.2 yamt KASSERT(cur != owner);
287 1.1.18.2 yamt
288 1.1.18.2 yamt if (l->l_mutex != owner->l_mutex)
289 1.1.18.2 yamt dolock = true;
290 1.1.18.2 yamt else
291 1.1.18.2 yamt dolock = false;
292 1.1.18.2 yamt if (dolock && !lwp_trylock(owner)) {
293 1.1.18.2 yamt /*
294 1.1.18.2 yamt * restart from curlwp.
295 1.1.18.2 yamt */
296 1.1.18.2 yamt lwp_unlock(l);
297 1.1.18.2 yamt l = cur;
298 1.1.18.2 yamt lwp_lock(l);
299 1.1.18.2 yamt prio = lwp_eprio(l);
300 1.1.18.2 yamt continue;
301 1.1.18.2 yamt }
302 1.1.18.2 yamt if (prio >= lwp_eprio(owner)) {
303 1.1.18.2 yamt if (dolock)
304 1.1.18.2 yamt lwp_unlock(owner);
305 1.1.18.2 yamt break;
306 1.1.18.2 yamt }
307 1.1.18.2 yamt ts = l->l_ts;
308 1.1.18.2 yamt KASSERT(ts->ts_inheritor == owner || ts->ts_inheritor == NULL);
309 1.1.18.2 yamt if (ts->ts_inheritor == NULL) {
310 1.1.18.2 yamt ts->ts_inheritor = owner;
311 1.1.18.2 yamt ts->ts_eprio = prio;
312 1.1.18.2 yamt SLIST_INSERT_HEAD(&owner->l_pi_lenders, ts, ts_pichain);
313 1.1.18.2 yamt lwp_lendpri(owner, prio);
314 1.1.18.2 yamt } else if (prio < ts->ts_eprio) {
315 1.1.18.2 yamt ts->ts_eprio = prio;
316 1.1.18.2 yamt lwp_lendpri(owner, prio);
317 1.1.18.2 yamt }
318 1.1.18.2 yamt if (dolock)
319 1.1.18.2 yamt lwp_unlock(l);
320 1.1.18.2 yamt l = owner;
321 1.1.18.2 yamt }
322 1.1.18.2 yamt LOCKDEBUG_BARRIER(l->l_mutex, 1);
323 1.1.18.2 yamt if (cur->l_mutex != l->l_mutex) {
324 1.1.18.2 yamt lwp_unlock(l);
325 1.1.18.2 yamt lwp_lock(cur);
326 1.1.18.2 yamt }
327 1.1.18.2 yamt LOCKDEBUG_BARRIER(cur->l_mutex, 1);
328 1.1.18.2 yamt
329 1.1.18.2 yamt sleepq_block(0, false);
330 1.1.18.1 yamt }
331 1.1.18.1 yamt
332 1.1.18.1 yamt /*
333 1.1.18.1 yamt * turnstile_wakeup:
334 1.1.18.1 yamt *
335 1.1.18.1 yamt * Wake up the specified number of threads that are blocked
336 1.1.18.1 yamt * in a turnstile.
337 1.1.18.1 yamt */
338 1.1.18.1 yamt void
339 1.1.18.2 yamt turnstile_wakeup(turnstile_t *ts, int q, int count, lwp_t *nl)
340 1.1.18.1 yamt {
341 1.1.18.1 yamt sleepq_t *sq;
342 1.1.18.1 yamt tschain_t *tc;
343 1.1.18.2 yamt lwp_t *l;
344 1.1.18.1 yamt
345 1.1.18.1 yamt tc = &turnstile_tab[TS_HASH(ts->ts_obj)];
346 1.1.18.1 yamt sq = &ts->ts_sleepq[q];
347 1.1.18.1 yamt
348 1.1.18.1 yamt KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
349 1.1.18.1 yamt KASSERT(count > 0 && count <= TS_WAITERS(ts, q));
350 1.1.18.2 yamt KASSERT(mutex_owned(&tc->tc_mutex) && sq->sq_mutex == &tc->tc_mutex);
351 1.1.18.2 yamt KASSERT(ts->ts_inheritor == curlwp || ts->ts_inheritor == NULL);
352 1.1.18.2 yamt
353 1.1.18.2 yamt /*
354 1.1.18.2 yamt * restore inherited priority if necessary.
355 1.1.18.2 yamt */
356 1.1.18.2 yamt
357 1.1.18.2 yamt if (ts->ts_inheritor != NULL) {
358 1.1.18.2 yamt turnstile_t *iter;
359 1.1.18.2 yamt turnstile_t *next;
360 1.1.18.2 yamt turnstile_t *prev = NULL;
361 1.1.18.2 yamt pri_t prio;
362 1.1.18.2 yamt bool dolock;
363 1.1.18.2 yamt
364 1.1.18.2 yamt ts->ts_inheritor = NULL;
365 1.1.18.2 yamt l = curlwp;
366 1.1.18.2 yamt
367 1.1.18.2 yamt dolock = l->l_mutex == &l->l_cpu->ci_schedstate.spc_lwplock;
368 1.1.18.2 yamt if (dolock) {
369 1.1.18.2 yamt lwp_lock(l);
370 1.1.18.2 yamt }
371 1.1.18.2 yamt
372 1.1.18.2 yamt /*
373 1.1.18.2 yamt * the following loop does two things.
374 1.1.18.2 yamt *
375 1.1.18.2 yamt * - remove ts from the list.
376 1.1.18.2 yamt *
377 1.1.18.2 yamt * - from the rest of the list, find the highest priority.
378 1.1.18.2 yamt */
379 1.1.18.2 yamt
380 1.1.18.2 yamt prio = MAXPRI;
381 1.1.18.2 yamt KASSERT(!SLIST_EMPTY(&l->l_pi_lenders));
382 1.1.18.2 yamt for (iter = SLIST_FIRST(&l->l_pi_lenders);
383 1.1.18.2 yamt iter != NULL; iter = next) {
384 1.1.18.2 yamt KASSERT(lwp_eprio(l) <= ts->ts_eprio);
385 1.1.18.2 yamt next = SLIST_NEXT(iter, ts_pichain);
386 1.1.18.2 yamt if (iter == ts) {
387 1.1.18.2 yamt if (prev == NULL) {
388 1.1.18.2 yamt SLIST_REMOVE_HEAD(&l->l_pi_lenders,
389 1.1.18.2 yamt ts_pichain);
390 1.1.18.2 yamt } else {
391 1.1.18.2 yamt SLIST_REMOVE_AFTER(prev, ts_pichain);
392 1.1.18.2 yamt }
393 1.1.18.2 yamt } else if (prio > iter->ts_eprio) {
394 1.1.18.2 yamt prio = iter->ts_eprio;
395 1.1.18.2 yamt }
396 1.1.18.2 yamt prev = iter;
397 1.1.18.2 yamt }
398 1.1.18.2 yamt
399 1.1.18.2 yamt lwp_lendpri(l, prio);
400 1.1.18.2 yamt
401 1.1.18.2 yamt if (dolock) {
402 1.1.18.2 yamt lwp_unlock(l);
403 1.1.18.2 yamt }
404 1.1.18.2 yamt }
405 1.1.18.1 yamt
406 1.1.18.1 yamt if (nl != NULL) {
407 1.1.18.1 yamt #if defined(DEBUG) || defined(LOCKDEBUG)
408 1.1.18.1 yamt TAILQ_FOREACH(l, &sq->sq_queue, l_sleepchain) {
409 1.1.18.1 yamt if (l == nl)
410 1.1.18.1 yamt break;
411 1.1.18.1 yamt }
412 1.1.18.1 yamt if (l == NULL)
413 1.1.18.1 yamt panic("turnstile_wakeup: nl not on sleepq");
414 1.1.18.1 yamt #endif
415 1.1.18.2 yamt turnstile_remove(ts, nl, sq);
416 1.1.18.1 yamt } else {
417 1.1.18.1 yamt while (count-- > 0) {
418 1.1.18.1 yamt l = TAILQ_FIRST(&sq->sq_queue);
419 1.1.18.1 yamt KASSERT(l != NULL);
420 1.1.18.2 yamt turnstile_remove(ts, l, sq);
421 1.1.18.1 yamt }
422 1.1.18.1 yamt }
423 1.1.18.2 yamt mutex_spin_exit(&tc->tc_mutex);
424 1.1.18.1 yamt }
425 1.1.18.1 yamt
426 1.1.18.1 yamt /*
427 1.1.18.1 yamt * turnstile_unsleep:
428 1.1.18.1 yamt *
429 1.1.18.1 yamt * Remove an LWP from the turnstile. This is called when the LWP has
430 1.1.18.1 yamt * not been awoken normally but instead interrupted: for example, if it
431 1.1.18.1 yamt * has received a signal. It's not a valid action for turnstiles,
432 1.1.18.1 yamt * since LWPs blocking on a turnstile are not interruptable.
433 1.1.18.1 yamt */
434 1.1.18.1 yamt void
435 1.1.18.2 yamt turnstile_unsleep(lwp_t *l)
436 1.1.18.1 yamt {
437 1.1.18.1 yamt
438 1.1.18.1 yamt lwp_unlock(l);
439 1.1.18.1 yamt panic("turnstile_unsleep");
440 1.1.18.1 yamt }
441 1.1.18.1 yamt
442 1.1.18.1 yamt /*
443 1.1.18.1 yamt * turnstile_changepri:
444 1.1.18.1 yamt *
445 1.1.18.2 yamt * Adjust the priority of an LWP residing on a turnstile.
446 1.1.18.1 yamt */
447 1.1.18.1 yamt void
448 1.1.18.2 yamt turnstile_changepri(lwp_t *l, pri_t pri)
449 1.1.18.1 yamt {
450 1.1.18.1 yamt
451 1.1.18.2 yamt /* XXX priority inheritance */
452 1.1.18.2 yamt sleepq_changepri(l, pri);
453 1.1.18.1 yamt }
454 1.1.18.1 yamt
455 1.1.18.1 yamt #if defined(LOCKDEBUG)
456 1.1.18.1 yamt /*
457 1.1.18.1 yamt * turnstile_print:
458 1.1.18.1 yamt *
459 1.1.18.1 yamt * Given the address of a lock object, print the contents of a
460 1.1.18.1 yamt * turnstile.
461 1.1.18.1 yamt */
462 1.1.18.1 yamt void
463 1.1.18.1 yamt turnstile_print(volatile void *obj, void (*pr)(const char *, ...))
464 1.1.18.1 yamt {
465 1.1.18.1 yamt turnstile_t *ts;
466 1.1.18.1 yamt tschain_t *tc;
467 1.1.18.1 yamt sleepq_t *rsq, *wsq;
468 1.1.18.2 yamt lwp_t *l;
469 1.1.18.1 yamt
470 1.1.18.1 yamt tc = &turnstile_tab[TS_HASH(obj)];
471 1.1.18.1 yamt
472 1.1.18.1 yamt LIST_FOREACH(ts, &tc->tc_chain, ts_chain)
473 1.1.18.1 yamt if (ts->ts_obj == obj)
474 1.1.18.1 yamt break;
475 1.1.18.1 yamt
476 1.1.18.2 yamt (*pr)("Turnstile chain at %p.\n", tc);
477 1.1.18.1 yamt if (ts == NULL) {
478 1.1.18.1 yamt (*pr)("=> No active turnstile for this lock.\n");
479 1.1.18.1 yamt return;
480 1.1.18.1 yamt }
481 1.1.18.1 yamt
482 1.1.18.1 yamt rsq = &ts->ts_sleepq[TS_READER_Q];
483 1.1.18.1 yamt wsq = &ts->ts_sleepq[TS_WRITER_Q];
484 1.1.18.1 yamt
485 1.1.18.1 yamt (*pr)("=> Turnstile at %p (wrq=%p, rdq=%p).\n", ts, rsq, wsq);
486 1.1.18.1 yamt
487 1.1.18.1 yamt (*pr)("=> %d waiting readers:", rsq->sq_waiters);
488 1.1.18.1 yamt TAILQ_FOREACH(l, &rsq->sq_queue, l_sleepchain) {
489 1.1.18.1 yamt (*pr)(" %p", l);
490 1.1.18.1 yamt }
491 1.1.18.1 yamt (*pr)("\n");
492 1.1.18.1 yamt
493 1.1.18.1 yamt (*pr)("=> %d waiting writers:", wsq->sq_waiters);
494 1.1.18.1 yamt TAILQ_FOREACH(l, &wsq->sq_queue, l_sleepchain) {
495 1.1.18.1 yamt (*pr)(" %p", l);
496 1.1.18.1 yamt }
497 1.1.18.1 yamt (*pr)("\n");
498 1.1.18.1 yamt }
499 1.1.18.1 yamt #endif /* LOCKDEBUG */
500