kern_sleepq.c revision 1.15 1 1.15 rmind /* $NetBSD: kern_sleepq.c,v 1.15 2007/10/09 19:00:14 rmind Exp $ */
2 1.2 ad
3 1.2 ad /*-
4 1.2 ad * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 1.2 ad * All rights reserved.
6 1.2 ad *
7 1.2 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 ad * by Andrew Doran.
9 1.2 ad *
10 1.2 ad * Redistribution and use in source and binary forms, with or without
11 1.2 ad * modification, are permitted provided that the following conditions
12 1.2 ad * are met:
13 1.2 ad * 1. Redistributions of source code must retain the above copyright
14 1.2 ad * notice, this list of conditions and the following disclaimer.
15 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 ad * notice, this list of conditions and the following disclaimer in the
17 1.2 ad * documentation and/or other materials provided with the distribution.
18 1.2 ad * 3. All advertising materials mentioning features or use of this software
19 1.2 ad * must display the following acknowledgement:
20 1.2 ad * This product includes software developed by the NetBSD
21 1.2 ad * Foundation, Inc. and its contributors.
22 1.2 ad * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2 ad * contributors may be used to endorse or promote products derived
24 1.2 ad * from this software without specific prior written permission.
25 1.2 ad *
26 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2 ad * POSSIBILITY OF SUCH DAMAGE.
37 1.2 ad */
38 1.2 ad
39 1.2 ad /*
40 1.2 ad * Sleep queue implementation, used by turnstiles and general sleep/wakeup
41 1.2 ad * interfaces.
42 1.2 ad */
43 1.2 ad
44 1.2 ad #include <sys/cdefs.h>
45 1.15 rmind __KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.15 2007/10/09 19:00:14 rmind Exp $");
46 1.2 ad
47 1.2 ad #include <sys/param.h>
48 1.2 ad #include <sys/lock.h>
49 1.2 ad #include <sys/kernel.h>
50 1.9 yamt #include <sys/cpu.h>
51 1.2 ad #include <sys/pool.h>
52 1.2 ad #include <sys/proc.h>
53 1.2 ad #include <sys/resourcevar.h>
54 1.2 ad #include <sys/sched.h>
55 1.2 ad #include <sys/systm.h>
56 1.2 ad #include <sys/sleepq.h>
57 1.2 ad #include <sys/ktrace.h>
58 1.2 ad
59 1.4 ad #include <uvm/uvm_extern.h>
60 1.4 ad
61 1.8 ad int sleepq_sigtoerror(lwp_t *, int);
62 1.2 ad
63 1.2 ad /* General purpose sleep table, used by ltsleep() and condition variables. */
64 1.2 ad sleeptab_t sleeptab;
65 1.2 ad
66 1.2 ad /*
67 1.2 ad * sleeptab_init:
68 1.2 ad *
69 1.2 ad * Initialize a sleep table.
70 1.2 ad */
71 1.2 ad void
72 1.2 ad sleeptab_init(sleeptab_t *st)
73 1.2 ad {
74 1.2 ad sleepq_t *sq;
75 1.2 ad int i;
76 1.2 ad
77 1.2 ad for (i = 0; i < SLEEPTAB_HASH_SIZE; i++) {
78 1.2 ad sq = &st->st_queues[i].st_queue;
79 1.2 ad mutex_init(&st->st_queues[i].st_mutex, MUTEX_SPIN, IPL_SCHED);
80 1.2 ad sleepq_init(sq, &st->st_queues[i].st_mutex);
81 1.2 ad }
82 1.2 ad }
83 1.2 ad
84 1.2 ad /*
85 1.2 ad * sleepq_init:
86 1.2 ad *
87 1.2 ad * Prepare a sleep queue for use.
88 1.2 ad */
89 1.2 ad void
90 1.2 ad sleepq_init(sleepq_t *sq, kmutex_t *mtx)
91 1.2 ad {
92 1.2 ad
93 1.2 ad sq->sq_waiters = 0;
94 1.2 ad sq->sq_mutex = mtx;
95 1.2 ad TAILQ_INIT(&sq->sq_queue);
96 1.2 ad }
97 1.2 ad
98 1.2 ad /*
99 1.2 ad * sleepq_remove:
100 1.2 ad *
101 1.2 ad * Remove an LWP from a sleep queue and wake it up. Return non-zero if
102 1.2 ad * the LWP is swapped out; if so the caller needs to awaken the swapper
103 1.2 ad * to bring the LWP into memory.
104 1.2 ad */
105 1.2 ad int
106 1.8 ad sleepq_remove(sleepq_t *sq, lwp_t *l)
107 1.2 ad {
108 1.9 yamt struct schedstate_percpu *spc;
109 1.2 ad struct cpu_info *ci;
110 1.14 ad pri_t pri;
111 1.2 ad
112 1.4 ad KASSERT(lwp_locked(l, sq->sq_mutex));
113 1.2 ad KASSERT(sq->sq_waiters > 0);
114 1.2 ad
115 1.2 ad sq->sq_waiters--;
116 1.2 ad TAILQ_REMOVE(&sq->sq_queue, l, l_sleepchain);
117 1.2 ad
118 1.2 ad #ifdef DIAGNOSTIC
119 1.2 ad if (sq->sq_waiters == 0)
120 1.2 ad KASSERT(TAILQ_FIRST(&sq->sq_queue) == NULL);
121 1.2 ad else
122 1.2 ad KASSERT(TAILQ_FIRST(&sq->sq_queue) != NULL);
123 1.2 ad #endif
124 1.2 ad
125 1.2 ad l->l_syncobj = &sched_syncobj;
126 1.2 ad l->l_wchan = NULL;
127 1.2 ad l->l_sleepq = NULL;
128 1.5 pavel l->l_flag &= ~LW_SINTR;
129 1.2 ad
130 1.15 rmind /*
131 1.15 rmind * Call the wake-up handler of scheduler.
132 1.15 rmind * It might change the CPU for this thread.
133 1.15 rmind */
134 1.15 rmind sched_wakeup(l);
135 1.15 rmind
136 1.9 yamt ci = l->l_cpu;
137 1.9 yamt spc = &ci->ci_schedstate;
138 1.9 yamt
139 1.2 ad /*
140 1.2 ad * If not sleeping, the LWP must have been suspended. Let whoever
141 1.2 ad * holds it stopped set it running again.
142 1.2 ad */
143 1.2 ad if (l->l_stat != LSSLEEP) {
144 1.2 ad KASSERT(l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED);
145 1.9 yamt lwp_setlock(l, &spc->spc_lwplock);
146 1.2 ad return 0;
147 1.2 ad }
148 1.2 ad
149 1.2 ad /*
150 1.2 ad * If the LWP is still on the CPU, mark it as LSONPROC. It may be
151 1.2 ad * about to call mi_switch(), in which case it will yield.
152 1.2 ad */
153 1.9 yamt if ((l->l_flag & LW_RUNNING) != 0) {
154 1.2 ad l->l_stat = LSONPROC;
155 1.2 ad l->l_slptime = 0;
156 1.9 yamt lwp_setlock(l, &spc->spc_lwplock);
157 1.2 ad return 0;
158 1.2 ad }
159 1.2 ad
160 1.2 ad /*
161 1.2 ad * Set it running. We'll try to get the last CPU that ran
162 1.2 ad * this LWP to pick it up again.
163 1.2 ad */
164 1.9 yamt spc_lock(ci);
165 1.9 yamt lwp_setlock(l, spc->spc_mutex);
166 1.9 yamt sched_setrunnable(l);
167 1.2 ad l->l_stat = LSRUN;
168 1.2 ad l->l_slptime = 0;
169 1.5 pavel if ((l->l_flag & LW_INMEM) != 0) {
170 1.9 yamt sched_enqueue(l, false);
171 1.14 ad pri = lwp_eprio(l);
172 1.14 ad /* XXX This test is not good enough! */
173 1.14 ad if ((pri < spc->spc_curpriority && pri < PUSER) ||
174 1.14 ad #ifdef MULTIPROCESSOR
175 1.14 ad ci->ci_curlwp == ci->ci_data.cpu_idlelwp) {
176 1.14 ad #else
177 1.14 ad curlwp == ci->ci_data.cpu_idlelwp) {
178 1.14 ad #endif
179 1.14 ad cpu_need_resched(ci, RESCHED_IMMED);
180 1.14 ad }
181 1.9 yamt spc_unlock(ci);
182 1.2 ad return 0;
183 1.2 ad }
184 1.9 yamt spc_unlock(ci);
185 1.2 ad return 1;
186 1.2 ad }
187 1.2 ad
188 1.2 ad /*
189 1.2 ad * sleepq_insert:
190 1.2 ad *
191 1.2 ad * Insert an LWP into the sleep queue, optionally sorting by priority.
192 1.2 ad */
193 1.2 ad inline void
194 1.8 ad sleepq_insert(sleepq_t *sq, lwp_t *l, syncobj_t *sobj)
195 1.2 ad {
196 1.8 ad lwp_t *l2;
197 1.6 yamt const int pri = lwp_eprio(l);
198 1.2 ad
199 1.2 ad if ((sobj->sobj_flag & SOBJ_SLEEPQ_SORTED) != 0) {
200 1.2 ad TAILQ_FOREACH(l2, &sq->sq_queue, l_sleepchain) {
201 1.6 yamt if (lwp_eprio(l2) > pri) {
202 1.2 ad TAILQ_INSERT_BEFORE(l2, l, l_sleepchain);
203 1.2 ad return;
204 1.2 ad }
205 1.2 ad }
206 1.2 ad }
207 1.2 ad
208 1.14 ad if ((sobj->sobj_flag & SOBJ_SLEEPQ_LIFO) != 0)
209 1.14 ad TAILQ_INSERT_HEAD(&sq->sq_queue, l, l_sleepchain);
210 1.14 ad else
211 1.14 ad TAILQ_INSERT_TAIL(&sq->sq_queue, l, l_sleepchain);
212 1.2 ad }
213 1.2 ad
214 1.9 yamt /*
215 1.9 yamt * sleepq_enqueue:
216 1.9 yamt *
217 1.9 yamt * Enter an LWP into the sleep queue and prepare for sleep. The sleep
218 1.9 yamt * queue must already be locked, and any interlock (such as the kernel
219 1.9 yamt * lock) must have be released (see sleeptab_lookup(), sleepq_enter()).
220 1.9 yamt */
221 1.2 ad void
222 1.7 yamt sleepq_enqueue(sleepq_t *sq, pri_t pri, wchan_t wchan, const char *wmesg,
223 1.9 yamt syncobj_t *sobj)
224 1.2 ad {
225 1.8 ad lwp_t *l = curlwp;
226 1.2 ad
227 1.4 ad KASSERT(mutex_owned(sq->sq_mutex));
228 1.2 ad KASSERT(l->l_stat == LSONPROC);
229 1.2 ad KASSERT(l->l_wchan == NULL && l->l_sleepq == NULL);
230 1.2 ad
231 1.2 ad l->l_syncobj = sobj;
232 1.2 ad l->l_wchan = wchan;
233 1.2 ad l->l_sleepq = sq;
234 1.2 ad l->l_wmesg = wmesg;
235 1.2 ad l->l_slptime = 0;
236 1.2 ad l->l_priority = pri;
237 1.2 ad l->l_stat = LSSLEEP;
238 1.2 ad l->l_sleeperr = 0;
239 1.2 ad
240 1.2 ad sq->sq_waiters++;
241 1.6 yamt sleepq_insert(sq, l, sobj);
242 1.15 rmind sched_slept(l);
243 1.6 yamt }
244 1.6 yamt
245 1.9 yamt /*
246 1.9 yamt * sleepq_block:
247 1.9 yamt *
248 1.9 yamt * After any intermediate step such as releasing an interlock, switch.
249 1.9 yamt * sleepq_block() may return early under exceptional conditions, for
250 1.9 yamt * example if the LWP's containing process is exiting.
251 1.9 yamt */
252 1.9 yamt int
253 1.9 yamt sleepq_block(int timo, bool catch)
254 1.6 yamt {
255 1.10 ad int error = 0, sig;
256 1.9 yamt struct proc *p;
257 1.8 ad lwp_t *l = curlwp;
258 1.11 ad bool early = false;
259 1.2 ad
260 1.12 ad ktrcsw(1, 0);
261 1.4 ad
262 1.2 ad /*
263 1.2 ad * If sleeping interruptably, check for pending signals, exits or
264 1.2 ad * core dump events.
265 1.2 ad */
266 1.2 ad if (catch) {
267 1.5 pavel l->l_flag |= LW_SINTR;
268 1.5 pavel if ((l->l_flag & (LW_CANCELLED|LW_WEXIT|LW_WCORE)) != 0) {
269 1.5 pavel l->l_flag &= ~LW_CANCELLED;
270 1.14 ad error = EINTR;
271 1.14 ad early = true;
272 1.14 ad } else if ((l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0))
273 1.11 ad early = true;
274 1.2 ad }
275 1.2 ad
276 1.13 yamt if (early) {
277 1.13 yamt /* lwp_unsleep() will release the lock */
278 1.13 yamt lwp_unsleep(l);
279 1.13 yamt } else {
280 1.11 ad if (timo)
281 1.14 ad callout_schedule(&l->l_timeout_ch, timo);
282 1.11 ad mi_switch(l);
283 1.11 ad
284 1.11 ad /* The LWP and sleep queue are now unlocked. */
285 1.11 ad if (timo) {
286 1.11 ad /*
287 1.11 ad * Even if the callout appears to have fired, we need to
288 1.11 ad * stop it in order to synchronise with other CPUs.
289 1.11 ad */
290 1.14 ad if (callout_stop(&l->l_timeout_ch))
291 1.11 ad error = EWOULDBLOCK;
292 1.11 ad }
293 1.2 ad }
294 1.2 ad
295 1.9 yamt if (catch && error == 0) {
296 1.2 ad p = l->l_proc;
297 1.5 pavel if ((l->l_flag & (LW_CANCELLED | LW_WEXIT | LW_WCORE)) != 0)
298 1.2 ad error = EINTR;
299 1.5 pavel else if ((l->l_flag & LW_PENDSIG) != 0) {
300 1.2 ad KERNEL_LOCK(1, l); /* XXXSMP pool_put() */
301 1.2 ad mutex_enter(&p->p_smutex);
302 1.2 ad if ((sig = issignal(l)) != 0)
303 1.2 ad error = sleepq_sigtoerror(l, sig);
304 1.2 ad mutex_exit(&p->p_smutex);
305 1.2 ad KERNEL_UNLOCK_LAST(l);
306 1.2 ad }
307 1.2 ad }
308 1.2 ad
309 1.12 ad ktrcsw(0, 0);
310 1.2 ad
311 1.2 ad KERNEL_LOCK(l->l_biglocks, l);
312 1.2 ad return error;
313 1.2 ad }
314 1.2 ad
315 1.2 ad /*
316 1.2 ad * sleepq_wake:
317 1.2 ad *
318 1.2 ad * Wake zero or more LWPs blocked on a single wait channel.
319 1.2 ad */
320 1.8 ad lwp_t *
321 1.2 ad sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected)
322 1.2 ad {
323 1.8 ad lwp_t *l, *next;
324 1.2 ad int swapin = 0;
325 1.2 ad
326 1.4 ad KASSERT(mutex_owned(sq->sq_mutex));
327 1.2 ad
328 1.2 ad for (l = TAILQ_FIRST(&sq->sq_queue); l != NULL; l = next) {
329 1.2 ad KASSERT(l->l_sleepq == sq);
330 1.2 ad next = TAILQ_NEXT(l, l_sleepchain);
331 1.2 ad if (l->l_wchan != wchan)
332 1.2 ad continue;
333 1.2 ad swapin |= sleepq_remove(sq, l);
334 1.2 ad if (--expected == 0)
335 1.2 ad break;
336 1.2 ad }
337 1.2 ad
338 1.2 ad sleepq_unlock(sq);
339 1.2 ad
340 1.2 ad /*
341 1.2 ad * If there are newly awakend threads that need to be swapped in,
342 1.2 ad * then kick the swapper into action.
343 1.2 ad */
344 1.2 ad if (swapin)
345 1.4 ad uvm_kick_scheduler();
346 1.8 ad
347 1.8 ad return l;
348 1.2 ad }
349 1.2 ad
350 1.2 ad /*
351 1.2 ad * sleepq_unsleep:
352 1.2 ad *
353 1.2 ad * Remove an LWP from its sleep queue and set it runnable again.
354 1.2 ad * sleepq_unsleep() is called with the LWP's mutex held, and will
355 1.2 ad * always release it.
356 1.2 ad */
357 1.2 ad void
358 1.8 ad sleepq_unsleep(lwp_t *l)
359 1.2 ad {
360 1.2 ad sleepq_t *sq = l->l_sleepq;
361 1.2 ad int swapin;
362 1.2 ad
363 1.4 ad KASSERT(lwp_locked(l, NULL));
364 1.2 ad KASSERT(l->l_wchan != NULL);
365 1.2 ad KASSERT(l->l_mutex == sq->sq_mutex);
366 1.2 ad
367 1.2 ad swapin = sleepq_remove(sq, l);
368 1.2 ad sleepq_unlock(sq);
369 1.2 ad
370 1.2 ad if (swapin)
371 1.4 ad uvm_kick_scheduler();
372 1.2 ad }
373 1.2 ad
374 1.2 ad /*
375 1.2 ad * sleepq_timeout:
376 1.2 ad *
377 1.2 ad * Entered via the callout(9) subsystem to time out an LWP that is on a
378 1.2 ad * sleep queue.
379 1.2 ad */
380 1.2 ad void
381 1.2 ad sleepq_timeout(void *arg)
382 1.2 ad {
383 1.8 ad lwp_t *l = arg;
384 1.2 ad
385 1.2 ad /*
386 1.2 ad * Lock the LWP. Assuming it's still on the sleep queue, its
387 1.2 ad * current mutex will also be the sleep queue mutex.
388 1.2 ad */
389 1.2 ad lwp_lock(l);
390 1.2 ad
391 1.2 ad if (l->l_wchan == NULL) {
392 1.2 ad /* Somebody beat us to it. */
393 1.2 ad lwp_unlock(l);
394 1.2 ad return;
395 1.2 ad }
396 1.2 ad
397 1.2 ad lwp_unsleep(l);
398 1.2 ad }
399 1.2 ad
400 1.2 ad /*
401 1.2 ad * sleepq_sigtoerror:
402 1.2 ad *
403 1.2 ad * Given a signal number, interpret and return an error code.
404 1.2 ad */
405 1.2 ad int
406 1.8 ad sleepq_sigtoerror(lwp_t *l, int sig)
407 1.2 ad {
408 1.2 ad struct proc *p = l->l_proc;
409 1.2 ad int error;
410 1.2 ad
411 1.4 ad KASSERT(mutex_owned(&p->p_smutex));
412 1.2 ad
413 1.2 ad /*
414 1.2 ad * If this sleep was canceled, don't let the syscall restart.
415 1.2 ad */
416 1.2 ad if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
417 1.2 ad error = EINTR;
418 1.2 ad else
419 1.2 ad error = ERESTART;
420 1.2 ad
421 1.2 ad return error;
422 1.2 ad }
423 1.2 ad
424 1.2 ad /*
425 1.2 ad * sleepq_abort:
426 1.2 ad *
427 1.2 ad * After a panic or during autoconfiguration, lower the interrupt
428 1.2 ad * priority level to give pending interrupts a chance to run, and
429 1.2 ad * then return. Called if sleepq_dontsleep() returns non-zero, and
430 1.2 ad * always returns zero.
431 1.2 ad */
432 1.2 ad int
433 1.2 ad sleepq_abort(kmutex_t *mtx, int unlock)
434 1.2 ad {
435 1.2 ad extern int safepri;
436 1.2 ad int s;
437 1.2 ad
438 1.2 ad s = splhigh();
439 1.2 ad splx(safepri);
440 1.2 ad splx(s);
441 1.2 ad if (mtx != NULL && unlock != 0)
442 1.2 ad mutex_exit(mtx);
443 1.2 ad
444 1.2 ad return 0;
445 1.2 ad }
446 1.2 ad
447 1.2 ad /*
448 1.2 ad * sleepq_changepri:
449 1.2 ad *
450 1.2 ad * Adjust the priority of an LWP residing on a sleepq. This method
451 1.2 ad * will only alter the user priority; the effective priority is
452 1.2 ad * assumed to have been fixed at the time of insertion into the queue.
453 1.2 ad */
454 1.2 ad void
455 1.8 ad sleepq_changepri(lwp_t *l, pri_t pri)
456 1.2 ad {
457 1.2 ad
458 1.2 ad KASSERT(lwp_locked(l, l->l_sleepq->sq_mutex));
459 1.2 ad l->l_usrpri = pri;
460 1.2 ad }
461 1.6 yamt
462 1.6 yamt void
463 1.8 ad sleepq_lendpri(lwp_t *l, pri_t pri)
464 1.6 yamt {
465 1.6 yamt sleepq_t *sq = l->l_sleepq;
466 1.7 yamt pri_t opri;
467 1.6 yamt
468 1.6 yamt KASSERT(lwp_locked(l, sq->sq_mutex));
469 1.6 yamt
470 1.6 yamt opri = lwp_eprio(l);
471 1.6 yamt l->l_inheritedprio = pri;
472 1.6 yamt
473 1.6 yamt if (lwp_eprio(l) != opri &&
474 1.6 yamt (l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_SORTED) != 0) {
475 1.6 yamt TAILQ_REMOVE(&sq->sq_queue, l, l_sleepchain);
476 1.6 yamt sleepq_insert(sq, l, l->l_syncobj);
477 1.6 yamt }
478 1.6 yamt }
479