kern_sleepq.c revision 1.57 1 /* $NetBSD: kern_sleepq.c,v 1.57 2020/01/08 17:38:42 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Sleep queue implementation, used by turnstiles and general sleep/wakeup
34 * interfaces.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.57 2020/01/08 17:38:42 ad Exp $");
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/cpu.h>
43 #include <sys/intr.h>
44 #include <sys/pool.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/sched.h>
48 #include <sys/systm.h>
49 #include <sys/sleepq.h>
50 #include <sys/ktrace.h>
51
52 /*
53 * for sleepq_abort:
54 * During autoconfiguration or after a panic, a sleep will simply lower the
55 * priority briefly to allow interrupts, then return. The priority to be
56 * used (IPL_SAFEPRI) is machine-dependent, thus this value is initialized and
57 * maintained in the machine-dependent layers. This priority will typically
58 * be 0, or the lowest priority that is safe for use on the interrupt stack;
59 * it can be made higher to block network software interrupts after panics.
60 */
61 #ifndef IPL_SAFEPRI
62 #define IPL_SAFEPRI 0
63 #endif
64
65 static int sleepq_sigtoerror(lwp_t *, int);
66
67 /* General purpose sleep table, used by mtsleep() and condition variables. */
68 sleeptab_t sleeptab __cacheline_aligned;
69 sleepqlock_t sleepq_locks[SLEEPTAB_HASH_SIZE] __cacheline_aligned;
70
71 /*
72 * sleeptab_init:
73 *
74 * Initialize a sleep table.
75 */
76 void
77 sleeptab_init(sleeptab_t *st)
78 {
79 static bool again;
80 int i;
81
82 for (i = 0; i < SLEEPTAB_HASH_SIZE; i++) {
83 if (!again) {
84 mutex_init(&sleepq_locks[i].lock, MUTEX_DEFAULT,
85 IPL_SCHED);
86 }
87 sleepq_init(&st->st_queue[i]);
88 }
89 again = true;
90 }
91
92 /*
93 * sleepq_init:
94 *
95 * Prepare a sleep queue for use.
96 */
97 void
98 sleepq_init(sleepq_t *sq)
99 {
100
101 TAILQ_INIT(sq);
102 }
103
104 /*
105 * sleepq_remove:
106 *
107 * Remove an LWP from a sleep queue and wake it up.
108 */
109 void
110 sleepq_remove(sleepq_t *sq, lwp_t *l)
111 {
112 struct schedstate_percpu *spc;
113 struct cpu_info *ci;
114
115 KASSERT(lwp_locked(l, NULL));
116
117 TAILQ_REMOVE(sq, l, l_sleepchain);
118 l->l_syncobj = &sched_syncobj;
119 l->l_wchan = NULL;
120 l->l_sleepq = NULL;
121 l->l_flag &= ~LW_SINTR;
122
123 ci = l->l_cpu;
124 spc = &ci->ci_schedstate;
125
126 /*
127 * If not sleeping, the LWP must have been suspended. Let whoever
128 * holds it stopped set it running again.
129 */
130 if (l->l_stat != LSSLEEP) {
131 KASSERT(l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED);
132 lwp_setlock(l, spc->spc_lwplock);
133 return;
134 }
135
136 /*
137 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
138 * about to call mi_switch(), in which case it will yield.
139 */
140 if ((l->l_flag & LW_RUNNING) != 0) {
141 l->l_stat = LSONPROC;
142 l->l_slptime = 0;
143 lwp_setlock(l, spc->spc_lwplock);
144 return;
145 }
146
147 /* Update sleep time delta, call the wake-up handler of scheduler */
148 l->l_slpticksum += (hardclock_ticks - l->l_slpticks);
149 sched_wakeup(l);
150
151 /* Look for a CPU to wake up */
152 l->l_cpu = sched_takecpu(l);
153 ci = l->l_cpu;
154 spc = &ci->ci_schedstate;
155
156 /*
157 * Set it running.
158 */
159 spc_lock(ci);
160 lwp_setlock(l, spc->spc_mutex);
161 sched_setrunnable(l);
162 l->l_stat = LSRUN;
163 l->l_slptime = 0;
164 sched_enqueue(l);
165 sched_resched_lwp(l, true);
166 /* LWP & SPC now unlocked, but we still hold sleep queue lock. */
167 }
168
169 /*
170 * sleepq_insert:
171 *
172 * Insert an LWP into the sleep queue, optionally sorting by priority.
173 */
174 static void
175 sleepq_insert(sleepq_t *sq, lwp_t *l, syncobj_t *sobj)
176 {
177
178 if ((sobj->sobj_flag & SOBJ_SLEEPQ_SORTED) != 0) {
179 lwp_t *l2;
180 const int pri = lwp_eprio(l);
181
182 TAILQ_FOREACH(l2, sq, l_sleepchain) {
183 if (lwp_eprio(l2) < pri) {
184 TAILQ_INSERT_BEFORE(l2, l, l_sleepchain);
185 return;
186 }
187 }
188 }
189
190 if ((sobj->sobj_flag & SOBJ_SLEEPQ_LIFO) != 0)
191 TAILQ_INSERT_HEAD(sq, l, l_sleepchain);
192 else
193 TAILQ_INSERT_TAIL(sq, l, l_sleepchain);
194 }
195
196 /*
197 * sleepq_enqueue:
198 *
199 * Enter an LWP into the sleep queue and prepare for sleep. The sleep
200 * queue must already be locked, and any interlock (such as the kernel
201 * lock) must have be released (see sleeptab_lookup(), sleepq_enter()).
202 */
203 void
204 sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj)
205 {
206 lwp_t *l = curlwp;
207
208 KASSERT(lwp_locked(l, NULL));
209 KASSERT(l->l_stat == LSONPROC);
210 KASSERT(l->l_wchan == NULL && l->l_sleepq == NULL);
211
212 l->l_syncobj = sobj;
213 l->l_wchan = wchan;
214 l->l_sleepq = sq;
215 l->l_wmesg = wmesg;
216 l->l_slptime = 0;
217 l->l_stat = LSSLEEP;
218 l->l_sleeperr = 0;
219
220 sleepq_insert(sq, l, sobj);
221
222 /* Save the time when thread has slept */
223 l->l_slpticks = hardclock_ticks;
224 sched_slept(l);
225 }
226
227 /*
228 * sleepq_block:
229 *
230 * After any intermediate step such as releasing an interlock, switch.
231 * sleepq_block() may return early under exceptional conditions, for
232 * example if the LWP's containing process is exiting.
233 *
234 * timo is a timeout in ticks. timo = 0 specifies an infinite timeout.
235 */
236 int
237 sleepq_block(int timo, bool catch_p)
238 {
239 int error = 0, sig;
240 struct proc *p;
241 lwp_t *l = curlwp;
242 bool early = false;
243 int biglocks = l->l_biglocks;
244
245 ktrcsw(1, 0);
246
247 /*
248 * If sleeping interruptably, check for pending signals, exits or
249 * core dump events.
250 */
251 if (catch_p) {
252 l->l_flag |= LW_SINTR;
253 if ((l->l_flag & (LW_CANCELLED|LW_WEXIT|LW_WCORE)) != 0) {
254 l->l_flag &= ~LW_CANCELLED;
255 error = EINTR;
256 early = true;
257 } else if ((l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0))
258 early = true;
259 }
260
261 if (early) {
262 /* lwp_unsleep() will release the lock */
263 lwp_unsleep(l, true);
264 } else {
265 if (timo) {
266 callout_schedule(&l->l_timeout_ch, timo);
267 }
268 spc_lock(l->l_cpu);
269 mi_switch(l);
270
271 /* The LWP and sleep queue are now unlocked. */
272 if (timo) {
273 /*
274 * Even if the callout appears to have fired, we
275 * need to stop it in order to synchronise with
276 * other CPUs. It's important that we do this in
277 * this LWP's context, and not during wakeup, in
278 * order to keep the callout & its cache lines
279 * co-located on the CPU with the LWP.
280 */
281 if (callout_halt(&l->l_timeout_ch, NULL))
282 error = EWOULDBLOCK;
283 }
284 }
285
286 if (catch_p && error == 0) {
287 p = l->l_proc;
288 if ((l->l_flag & (LW_CANCELLED | LW_WEXIT | LW_WCORE)) != 0)
289 error = EINTR;
290 else if ((l->l_flag & LW_PENDSIG) != 0) {
291 /*
292 * Acquiring p_lock may cause us to recurse
293 * through the sleep path and back into this
294 * routine, but is safe because LWPs sleeping
295 * on locks are non-interruptable. We will
296 * not recurse again.
297 */
298 mutex_enter(p->p_lock);
299 if (((sig = sigispending(l, 0)) != 0 &&
300 (sigprop[sig] & SA_STOP) == 0) ||
301 (sig = issignal(l)) != 0)
302 error = sleepq_sigtoerror(l, sig);
303 mutex_exit(p->p_lock);
304 }
305 }
306
307 ktrcsw(0, 0);
308 if (__predict_false(biglocks != 0)) {
309 KERNEL_LOCK(biglocks, NULL);
310 }
311 return error;
312 }
313
314 /*
315 * sleepq_wake:
316 *
317 * Wake zero or more LWPs blocked on a single wait channel.
318 */
319 void
320 sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
321 {
322 lwp_t *l, *next;
323
324 KASSERT(mutex_owned(mp));
325
326 for (l = TAILQ_FIRST(sq); l != NULL; l = next) {
327 KASSERT(l->l_sleepq == sq);
328 KASSERT(l->l_mutex == mp);
329 next = TAILQ_NEXT(l, l_sleepchain);
330 if (l->l_wchan != wchan)
331 continue;
332 sleepq_remove(sq, l);
333 if (--expected == 0)
334 break;
335 }
336
337 mutex_spin_exit(mp);
338 }
339
340 /*
341 * sleepq_unsleep:
342 *
343 * Remove an LWP from its sleep queue and set it runnable again.
344 * sleepq_unsleep() is called with the LWP's mutex held, and will
345 * release it if "unlock" is true.
346 */
347 void
348 sleepq_unsleep(lwp_t *l, bool unlock)
349 {
350 sleepq_t *sq = l->l_sleepq;
351 kmutex_t *mp = l->l_mutex;
352
353 KASSERT(lwp_locked(l, mp));
354 KASSERT(l->l_wchan != NULL);
355
356 sleepq_remove(sq, l);
357 if (unlock) {
358 mutex_spin_exit(mp);
359 }
360 }
361
362 /*
363 * sleepq_timeout:
364 *
365 * Entered via the callout(9) subsystem to time out an LWP that is on a
366 * sleep queue.
367 */
368 void
369 sleepq_timeout(void *arg)
370 {
371 lwp_t *l = arg;
372
373 /*
374 * Lock the LWP. Assuming it's still on the sleep queue, its
375 * current mutex will also be the sleep queue mutex.
376 */
377 lwp_lock(l);
378
379 if (l->l_wchan == NULL) {
380 /* Somebody beat us to it. */
381 lwp_unlock(l);
382 return;
383 }
384
385 lwp_unsleep(l, true);
386 }
387
388 /*
389 * sleepq_sigtoerror:
390 *
391 * Given a signal number, interpret and return an error code.
392 */
393 static int
394 sleepq_sigtoerror(lwp_t *l, int sig)
395 {
396 struct proc *p = l->l_proc;
397 int error;
398
399 KASSERT(mutex_owned(p->p_lock));
400
401 /*
402 * If this sleep was canceled, don't let the syscall restart.
403 */
404 if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
405 error = EINTR;
406 else
407 error = ERESTART;
408
409 return error;
410 }
411
412 /*
413 * sleepq_abort:
414 *
415 * After a panic or during autoconfiguration, lower the interrupt
416 * priority level to give pending interrupts a chance to run, and
417 * then return. Called if sleepq_dontsleep() returns non-zero, and
418 * always returns zero.
419 */
420 int
421 sleepq_abort(kmutex_t *mtx, int unlock)
422 {
423 int s;
424
425 s = splhigh();
426 splx(IPL_SAFEPRI);
427 splx(s);
428 if (mtx != NULL && unlock != 0)
429 mutex_exit(mtx);
430
431 return 0;
432 }
433
434 /*
435 * sleepq_reinsert:
436 *
437 * Move the possition of the lwp in the sleep queue after a possible
438 * change of the lwp's effective priority.
439 */
440 static void
441 sleepq_reinsert(sleepq_t *sq, lwp_t *l)
442 {
443
444 KASSERT(l->l_sleepq == sq);
445 if ((l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_SORTED) == 0) {
446 return;
447 }
448
449 /*
450 * Don't let the sleep queue become empty, even briefly.
451 * cv_signal() and cv_broadcast() inspect it without the
452 * sleep queue lock held and need to see a non-empty queue
453 * head if there are waiters.
454 */
455 if (TAILQ_FIRST(sq) == l && TAILQ_NEXT(l, l_sleepchain) == NULL) {
456 return;
457 }
458 TAILQ_REMOVE(sq, l, l_sleepchain);
459 sleepq_insert(sq, l, l->l_syncobj);
460 }
461
462 /*
463 * sleepq_changepri:
464 *
465 * Adjust the priority of an LWP residing on a sleepq.
466 */
467 void
468 sleepq_changepri(lwp_t *l, pri_t pri)
469 {
470 sleepq_t *sq = l->l_sleepq;
471
472 KASSERT(lwp_locked(l, NULL));
473
474 l->l_priority = pri;
475 sleepq_reinsert(sq, l);
476 }
477
478 /*
479 * sleepq_changepri:
480 *
481 * Adjust the lended priority of an LWP residing on a sleepq.
482 */
483 void
484 sleepq_lendpri(lwp_t *l, pri_t pri)
485 {
486 sleepq_t *sq = l->l_sleepq;
487
488 KASSERT(lwp_locked(l, NULL));
489
490 l->l_inheritedprio = pri;
491 l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
492 sleepq_reinsert(sq, l);
493 }
494