kern_sleepq.c revision 1.16 1 1.16 rmind /* $NetBSD: kern_sleepq.c,v 1.16 2007/10/13 00:13:05 rmind Exp $ */
2 1.2 ad
3 1.2 ad /*-
4 1.2 ad * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 1.2 ad * All rights reserved.
6 1.2 ad *
7 1.2 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 ad * by Andrew Doran.
9 1.2 ad *
10 1.2 ad * Redistribution and use in source and binary forms, with or without
11 1.2 ad * modification, are permitted provided that the following conditions
12 1.2 ad * are met:
13 1.2 ad * 1. Redistributions of source code must retain the above copyright
14 1.2 ad * notice, this list of conditions and the following disclaimer.
15 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 ad * notice, this list of conditions and the following disclaimer in the
17 1.2 ad * documentation and/or other materials provided with the distribution.
18 1.2 ad * 3. All advertising materials mentioning features or use of this software
19 1.2 ad * must display the following acknowledgement:
20 1.2 ad * This product includes software developed by the NetBSD
21 1.2 ad * Foundation, Inc. and its contributors.
22 1.2 ad * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2 ad * contributors may be used to endorse or promote products derived
24 1.2 ad * from this software without specific prior written permission.
25 1.2 ad *
26 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2 ad * POSSIBILITY OF SUCH DAMAGE.
37 1.2 ad */
38 1.2 ad
39 1.2 ad /*
40 1.2 ad * Sleep queue implementation, used by turnstiles and general sleep/wakeup
41 1.2 ad * interfaces.
42 1.2 ad */
43 1.2 ad
44 1.2 ad #include <sys/cdefs.h>
45 1.16 rmind __KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.16 2007/10/13 00:13:05 rmind Exp $");
46 1.2 ad
47 1.2 ad #include <sys/param.h>
48 1.2 ad #include <sys/lock.h>
49 1.2 ad #include <sys/kernel.h>
50 1.9 yamt #include <sys/cpu.h>
51 1.2 ad #include <sys/pool.h>
52 1.2 ad #include <sys/proc.h>
53 1.2 ad #include <sys/resourcevar.h>
54 1.2 ad #include <sys/sched.h>
55 1.2 ad #include <sys/systm.h>
56 1.2 ad #include <sys/sleepq.h>
57 1.2 ad #include <sys/ktrace.h>
58 1.2 ad
59 1.4 ad #include <uvm/uvm_extern.h>
60 1.4 ad
61 1.8 ad int sleepq_sigtoerror(lwp_t *, int);
62 1.2 ad
63 1.2 ad /* General purpose sleep table, used by ltsleep() and condition variables. */
64 1.2 ad sleeptab_t sleeptab;
65 1.2 ad
66 1.2 ad /*
67 1.2 ad * sleeptab_init:
68 1.2 ad *
69 1.2 ad * Initialize a sleep table.
70 1.2 ad */
71 1.2 ad void
72 1.2 ad sleeptab_init(sleeptab_t *st)
73 1.2 ad {
74 1.2 ad sleepq_t *sq;
75 1.2 ad int i;
76 1.2 ad
77 1.2 ad for (i = 0; i < SLEEPTAB_HASH_SIZE; i++) {
78 1.2 ad sq = &st->st_queues[i].st_queue;
79 1.2 ad mutex_init(&st->st_queues[i].st_mutex, MUTEX_SPIN, IPL_SCHED);
80 1.2 ad sleepq_init(sq, &st->st_queues[i].st_mutex);
81 1.2 ad }
82 1.2 ad }
83 1.2 ad
84 1.2 ad /*
85 1.2 ad * sleepq_init:
86 1.2 ad *
87 1.2 ad * Prepare a sleep queue for use.
88 1.2 ad */
89 1.2 ad void
90 1.2 ad sleepq_init(sleepq_t *sq, kmutex_t *mtx)
91 1.2 ad {
92 1.2 ad
93 1.2 ad sq->sq_waiters = 0;
94 1.2 ad sq->sq_mutex = mtx;
95 1.2 ad TAILQ_INIT(&sq->sq_queue);
96 1.2 ad }
97 1.2 ad
98 1.2 ad /*
99 1.2 ad * sleepq_remove:
100 1.2 ad *
101 1.2 ad * Remove an LWP from a sleep queue and wake it up. Return non-zero if
102 1.2 ad * the LWP is swapped out; if so the caller needs to awaken the swapper
103 1.2 ad * to bring the LWP into memory.
104 1.2 ad */
105 1.2 ad int
106 1.8 ad sleepq_remove(sleepq_t *sq, lwp_t *l)
107 1.2 ad {
108 1.9 yamt struct schedstate_percpu *spc;
109 1.2 ad struct cpu_info *ci;
110 1.14 ad pri_t pri;
111 1.2 ad
112 1.4 ad KASSERT(lwp_locked(l, sq->sq_mutex));
113 1.2 ad KASSERT(sq->sq_waiters > 0);
114 1.2 ad
115 1.2 ad sq->sq_waiters--;
116 1.2 ad TAILQ_REMOVE(&sq->sq_queue, l, l_sleepchain);
117 1.2 ad
118 1.2 ad #ifdef DIAGNOSTIC
119 1.2 ad if (sq->sq_waiters == 0)
120 1.2 ad KASSERT(TAILQ_FIRST(&sq->sq_queue) == NULL);
121 1.2 ad else
122 1.2 ad KASSERT(TAILQ_FIRST(&sq->sq_queue) != NULL);
123 1.2 ad #endif
124 1.2 ad
125 1.2 ad l->l_syncobj = &sched_syncobj;
126 1.2 ad l->l_wchan = NULL;
127 1.2 ad l->l_sleepq = NULL;
128 1.5 pavel l->l_flag &= ~LW_SINTR;
129 1.2 ad
130 1.9 yamt ci = l->l_cpu;
131 1.9 yamt spc = &ci->ci_schedstate;
132 1.9 yamt
133 1.2 ad /*
134 1.2 ad * If not sleeping, the LWP must have been suspended. Let whoever
135 1.2 ad * holds it stopped set it running again.
136 1.2 ad */
137 1.2 ad if (l->l_stat != LSSLEEP) {
138 1.16 rmind KASSERT(l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED);
139 1.9 yamt lwp_setlock(l, &spc->spc_lwplock);
140 1.2 ad return 0;
141 1.2 ad }
142 1.2 ad
143 1.2 ad /*
144 1.2 ad * If the LWP is still on the CPU, mark it as LSONPROC. It may be
145 1.2 ad * about to call mi_switch(), in which case it will yield.
146 1.2 ad */
147 1.9 yamt if ((l->l_flag & LW_RUNNING) != 0) {
148 1.2 ad l->l_stat = LSONPROC;
149 1.2 ad l->l_slptime = 0;
150 1.9 yamt lwp_setlock(l, &spc->spc_lwplock);
151 1.2 ad return 0;
152 1.2 ad }
153 1.2 ad
154 1.2 ad /*
155 1.16 rmind * Call the wake-up handler of scheduler.
156 1.16 rmind * It might change the CPU for this thread.
157 1.16 rmind */
158 1.16 rmind sched_wakeup(l);
159 1.16 rmind ci = l->l_cpu;
160 1.16 rmind spc = &ci->ci_schedstate;
161 1.16 rmind
162 1.16 rmind /*
163 1.2 ad * Set it running. We'll try to get the last CPU that ran
164 1.2 ad * this LWP to pick it up again.
165 1.2 ad */
166 1.9 yamt spc_lock(ci);
167 1.9 yamt lwp_setlock(l, spc->spc_mutex);
168 1.9 yamt sched_setrunnable(l);
169 1.2 ad l->l_stat = LSRUN;
170 1.2 ad l->l_slptime = 0;
171 1.5 pavel if ((l->l_flag & LW_INMEM) != 0) {
172 1.9 yamt sched_enqueue(l, false);
173 1.14 ad pri = lwp_eprio(l);
174 1.14 ad /* XXX This test is not good enough! */
175 1.14 ad if ((pri < spc->spc_curpriority && pri < PUSER) ||
176 1.14 ad #ifdef MULTIPROCESSOR
177 1.14 ad ci->ci_curlwp == ci->ci_data.cpu_idlelwp) {
178 1.14 ad #else
179 1.14 ad curlwp == ci->ci_data.cpu_idlelwp) {
180 1.14 ad #endif
181 1.14 ad cpu_need_resched(ci, RESCHED_IMMED);
182 1.14 ad }
183 1.9 yamt spc_unlock(ci);
184 1.2 ad return 0;
185 1.2 ad }
186 1.9 yamt spc_unlock(ci);
187 1.2 ad return 1;
188 1.2 ad }
189 1.2 ad
190 1.2 ad /*
191 1.2 ad * sleepq_insert:
192 1.2 ad *
193 1.2 ad * Insert an LWP into the sleep queue, optionally sorting by priority.
194 1.2 ad */
195 1.2 ad inline void
196 1.8 ad sleepq_insert(sleepq_t *sq, lwp_t *l, syncobj_t *sobj)
197 1.2 ad {
198 1.8 ad lwp_t *l2;
199 1.6 yamt const int pri = lwp_eprio(l);
200 1.2 ad
201 1.2 ad if ((sobj->sobj_flag & SOBJ_SLEEPQ_SORTED) != 0) {
202 1.2 ad TAILQ_FOREACH(l2, &sq->sq_queue, l_sleepchain) {
203 1.6 yamt if (lwp_eprio(l2) > pri) {
204 1.2 ad TAILQ_INSERT_BEFORE(l2, l, l_sleepchain);
205 1.2 ad return;
206 1.2 ad }
207 1.2 ad }
208 1.2 ad }
209 1.2 ad
210 1.14 ad if ((sobj->sobj_flag & SOBJ_SLEEPQ_LIFO) != 0)
211 1.14 ad TAILQ_INSERT_HEAD(&sq->sq_queue, l, l_sleepchain);
212 1.14 ad else
213 1.14 ad TAILQ_INSERT_TAIL(&sq->sq_queue, l, l_sleepchain);
214 1.2 ad }
215 1.2 ad
216 1.9 yamt /*
217 1.9 yamt * sleepq_enqueue:
218 1.9 yamt *
219 1.9 yamt * Enter an LWP into the sleep queue and prepare for sleep. The sleep
220 1.9 yamt * queue must already be locked, and any interlock (such as the kernel
221 1.9 yamt * lock) must have be released (see sleeptab_lookup(), sleepq_enter()).
222 1.9 yamt */
223 1.2 ad void
224 1.7 yamt sleepq_enqueue(sleepq_t *sq, pri_t pri, wchan_t wchan, const char *wmesg,
225 1.9 yamt syncobj_t *sobj)
226 1.2 ad {
227 1.8 ad lwp_t *l = curlwp;
228 1.2 ad
229 1.4 ad KASSERT(mutex_owned(sq->sq_mutex));
230 1.2 ad KASSERT(l->l_stat == LSONPROC);
231 1.2 ad KASSERT(l->l_wchan == NULL && l->l_sleepq == NULL);
232 1.2 ad
233 1.2 ad l->l_syncobj = sobj;
234 1.2 ad l->l_wchan = wchan;
235 1.2 ad l->l_sleepq = sq;
236 1.2 ad l->l_wmesg = wmesg;
237 1.2 ad l->l_slptime = 0;
238 1.2 ad l->l_priority = pri;
239 1.2 ad l->l_stat = LSSLEEP;
240 1.2 ad l->l_sleeperr = 0;
241 1.2 ad
242 1.2 ad sq->sq_waiters++;
243 1.6 yamt sleepq_insert(sq, l, sobj);
244 1.15 rmind sched_slept(l);
245 1.6 yamt }
246 1.6 yamt
247 1.9 yamt /*
248 1.9 yamt * sleepq_block:
249 1.9 yamt *
250 1.9 yamt * After any intermediate step such as releasing an interlock, switch.
251 1.9 yamt * sleepq_block() may return early under exceptional conditions, for
252 1.9 yamt * example if the LWP's containing process is exiting.
253 1.9 yamt */
254 1.9 yamt int
255 1.9 yamt sleepq_block(int timo, bool catch)
256 1.6 yamt {
257 1.10 ad int error = 0, sig;
258 1.9 yamt struct proc *p;
259 1.8 ad lwp_t *l = curlwp;
260 1.11 ad bool early = false;
261 1.2 ad
262 1.12 ad ktrcsw(1, 0);
263 1.4 ad
264 1.2 ad /*
265 1.2 ad * If sleeping interruptably, check for pending signals, exits or
266 1.2 ad * core dump events.
267 1.2 ad */
268 1.2 ad if (catch) {
269 1.5 pavel l->l_flag |= LW_SINTR;
270 1.5 pavel if ((l->l_flag & (LW_CANCELLED|LW_WEXIT|LW_WCORE)) != 0) {
271 1.5 pavel l->l_flag &= ~LW_CANCELLED;
272 1.14 ad error = EINTR;
273 1.14 ad early = true;
274 1.14 ad } else if ((l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0))
275 1.11 ad early = true;
276 1.2 ad }
277 1.2 ad
278 1.13 yamt if (early) {
279 1.13 yamt /* lwp_unsleep() will release the lock */
280 1.13 yamt lwp_unsleep(l);
281 1.13 yamt } else {
282 1.11 ad if (timo)
283 1.14 ad callout_schedule(&l->l_timeout_ch, timo);
284 1.11 ad mi_switch(l);
285 1.11 ad
286 1.11 ad /* The LWP and sleep queue are now unlocked. */
287 1.11 ad if (timo) {
288 1.11 ad /*
289 1.11 ad * Even if the callout appears to have fired, we need to
290 1.11 ad * stop it in order to synchronise with other CPUs.
291 1.11 ad */
292 1.14 ad if (callout_stop(&l->l_timeout_ch))
293 1.11 ad error = EWOULDBLOCK;
294 1.11 ad }
295 1.2 ad }
296 1.2 ad
297 1.9 yamt if (catch && error == 0) {
298 1.2 ad p = l->l_proc;
299 1.5 pavel if ((l->l_flag & (LW_CANCELLED | LW_WEXIT | LW_WCORE)) != 0)
300 1.2 ad error = EINTR;
301 1.5 pavel else if ((l->l_flag & LW_PENDSIG) != 0) {
302 1.2 ad KERNEL_LOCK(1, l); /* XXXSMP pool_put() */
303 1.2 ad mutex_enter(&p->p_smutex);
304 1.2 ad if ((sig = issignal(l)) != 0)
305 1.2 ad error = sleepq_sigtoerror(l, sig);
306 1.2 ad mutex_exit(&p->p_smutex);
307 1.2 ad KERNEL_UNLOCK_LAST(l);
308 1.2 ad }
309 1.2 ad }
310 1.2 ad
311 1.12 ad ktrcsw(0, 0);
312 1.2 ad
313 1.2 ad KERNEL_LOCK(l->l_biglocks, l);
314 1.2 ad return error;
315 1.2 ad }
316 1.2 ad
317 1.2 ad /*
318 1.2 ad * sleepq_wake:
319 1.2 ad *
320 1.2 ad * Wake zero or more LWPs blocked on a single wait channel.
321 1.2 ad */
322 1.8 ad lwp_t *
323 1.2 ad sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected)
324 1.2 ad {
325 1.8 ad lwp_t *l, *next;
326 1.2 ad int swapin = 0;
327 1.2 ad
328 1.4 ad KASSERT(mutex_owned(sq->sq_mutex));
329 1.2 ad
330 1.2 ad for (l = TAILQ_FIRST(&sq->sq_queue); l != NULL; l = next) {
331 1.2 ad KASSERT(l->l_sleepq == sq);
332 1.2 ad next = TAILQ_NEXT(l, l_sleepchain);
333 1.2 ad if (l->l_wchan != wchan)
334 1.2 ad continue;
335 1.2 ad swapin |= sleepq_remove(sq, l);
336 1.2 ad if (--expected == 0)
337 1.2 ad break;
338 1.2 ad }
339 1.2 ad
340 1.2 ad sleepq_unlock(sq);
341 1.2 ad
342 1.2 ad /*
343 1.2 ad * If there are newly awakend threads that need to be swapped in,
344 1.2 ad * then kick the swapper into action.
345 1.2 ad */
346 1.2 ad if (swapin)
347 1.4 ad uvm_kick_scheduler();
348 1.8 ad
349 1.8 ad return l;
350 1.2 ad }
351 1.2 ad
352 1.2 ad /*
353 1.2 ad * sleepq_unsleep:
354 1.2 ad *
355 1.2 ad * Remove an LWP from its sleep queue and set it runnable again.
356 1.2 ad * sleepq_unsleep() is called with the LWP's mutex held, and will
357 1.2 ad * always release it.
358 1.2 ad */
359 1.2 ad void
360 1.8 ad sleepq_unsleep(lwp_t *l)
361 1.2 ad {
362 1.2 ad sleepq_t *sq = l->l_sleepq;
363 1.2 ad int swapin;
364 1.2 ad
365 1.4 ad KASSERT(lwp_locked(l, NULL));
366 1.2 ad KASSERT(l->l_wchan != NULL);
367 1.2 ad KASSERT(l->l_mutex == sq->sq_mutex);
368 1.2 ad
369 1.2 ad swapin = sleepq_remove(sq, l);
370 1.2 ad sleepq_unlock(sq);
371 1.2 ad
372 1.2 ad if (swapin)
373 1.4 ad uvm_kick_scheduler();
374 1.2 ad }
375 1.2 ad
376 1.2 ad /*
377 1.2 ad * sleepq_timeout:
378 1.2 ad *
379 1.2 ad * Entered via the callout(9) subsystem to time out an LWP that is on a
380 1.2 ad * sleep queue.
381 1.2 ad */
382 1.2 ad void
383 1.2 ad sleepq_timeout(void *arg)
384 1.2 ad {
385 1.8 ad lwp_t *l = arg;
386 1.2 ad
387 1.2 ad /*
388 1.2 ad * Lock the LWP. Assuming it's still on the sleep queue, its
389 1.2 ad * current mutex will also be the sleep queue mutex.
390 1.2 ad */
391 1.2 ad lwp_lock(l);
392 1.2 ad
393 1.2 ad if (l->l_wchan == NULL) {
394 1.2 ad /* Somebody beat us to it. */
395 1.2 ad lwp_unlock(l);
396 1.2 ad return;
397 1.2 ad }
398 1.2 ad
399 1.2 ad lwp_unsleep(l);
400 1.2 ad }
401 1.2 ad
402 1.2 ad /*
403 1.2 ad * sleepq_sigtoerror:
404 1.2 ad *
405 1.2 ad * Given a signal number, interpret and return an error code.
406 1.2 ad */
407 1.2 ad int
408 1.8 ad sleepq_sigtoerror(lwp_t *l, int sig)
409 1.2 ad {
410 1.2 ad struct proc *p = l->l_proc;
411 1.2 ad int error;
412 1.2 ad
413 1.4 ad KASSERT(mutex_owned(&p->p_smutex));
414 1.2 ad
415 1.2 ad /*
416 1.2 ad * If this sleep was canceled, don't let the syscall restart.
417 1.2 ad */
418 1.2 ad if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
419 1.2 ad error = EINTR;
420 1.2 ad else
421 1.2 ad error = ERESTART;
422 1.2 ad
423 1.2 ad return error;
424 1.2 ad }
425 1.2 ad
426 1.2 ad /*
427 1.2 ad * sleepq_abort:
428 1.2 ad *
429 1.2 ad * After a panic or during autoconfiguration, lower the interrupt
430 1.2 ad * priority level to give pending interrupts a chance to run, and
431 1.2 ad * then return. Called if sleepq_dontsleep() returns non-zero, and
432 1.2 ad * always returns zero.
433 1.2 ad */
434 1.2 ad int
435 1.2 ad sleepq_abort(kmutex_t *mtx, int unlock)
436 1.2 ad {
437 1.2 ad extern int safepri;
438 1.2 ad int s;
439 1.2 ad
440 1.2 ad s = splhigh();
441 1.2 ad splx(safepri);
442 1.2 ad splx(s);
443 1.2 ad if (mtx != NULL && unlock != 0)
444 1.2 ad mutex_exit(mtx);
445 1.2 ad
446 1.2 ad return 0;
447 1.2 ad }
448 1.2 ad
449 1.2 ad /*
450 1.2 ad * sleepq_changepri:
451 1.2 ad *
452 1.2 ad * Adjust the priority of an LWP residing on a sleepq. This method
453 1.2 ad * will only alter the user priority; the effective priority is
454 1.2 ad * assumed to have been fixed at the time of insertion into the queue.
455 1.2 ad */
456 1.2 ad void
457 1.8 ad sleepq_changepri(lwp_t *l, pri_t pri)
458 1.2 ad {
459 1.2 ad
460 1.2 ad KASSERT(lwp_locked(l, l->l_sleepq->sq_mutex));
461 1.2 ad l->l_usrpri = pri;
462 1.2 ad }
463 1.6 yamt
464 1.6 yamt void
465 1.8 ad sleepq_lendpri(lwp_t *l, pri_t pri)
466 1.6 yamt {
467 1.6 yamt sleepq_t *sq = l->l_sleepq;
468 1.7 yamt pri_t opri;
469 1.6 yamt
470 1.6 yamt KASSERT(lwp_locked(l, sq->sq_mutex));
471 1.6 yamt
472 1.6 yamt opri = lwp_eprio(l);
473 1.6 yamt l->l_inheritedprio = pri;
474 1.6 yamt
475 1.6 yamt if (lwp_eprio(l) != opri &&
476 1.6 yamt (l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_SORTED) != 0) {
477 1.6 yamt TAILQ_REMOVE(&sq->sq_queue, l, l_sleepchain);
478 1.6 yamt sleepq_insert(sq, l, l->l_syncobj);
479 1.6 yamt }
480 1.6 yamt }
481