kern_sleepq.c revision 1.76 1 1.76 ad /* $NetBSD: kern_sleepq.c,v 1.76 2023/09/23 20:23:07 ad Exp $ */
2 1.2 ad
3 1.2 ad /*-
4 1.75 ad * Copyright (c) 2006, 2007, 2008, 2009, 2019, 2020, 2023
5 1.75 ad * The NetBSD Foundation, Inc.
6 1.2 ad * All rights reserved.
7 1.2 ad *
8 1.2 ad * This code is derived from software contributed to The NetBSD Foundation
9 1.2 ad * by Andrew Doran.
10 1.2 ad *
11 1.2 ad * Redistribution and use in source and binary forms, with or without
12 1.2 ad * modification, are permitted provided that the following conditions
13 1.2 ad * are met:
14 1.2 ad * 1. Redistributions of source code must retain the above copyright
15 1.2 ad * notice, this list of conditions and the following disclaimer.
16 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
17 1.2 ad * notice, this list of conditions and the following disclaimer in the
18 1.2 ad * documentation and/or other materials provided with the distribution.
19 1.2 ad *
20 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.2 ad * POSSIBILITY OF SUCH DAMAGE.
31 1.2 ad */
32 1.2 ad
33 1.2 ad /*
34 1.2 ad * Sleep queue implementation, used by turnstiles and general sleep/wakeup
35 1.2 ad * interfaces.
36 1.2 ad */
37 1.2 ad
38 1.2 ad #include <sys/cdefs.h>
39 1.76 ad __KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.76 2023/09/23 20:23:07 ad Exp $");
40 1.2 ad
41 1.2 ad #include <sys/param.h>
42 1.2 ad #include <sys/kernel.h>
43 1.9 yamt #include <sys/cpu.h>
44 1.47 matt #include <sys/intr.h>
45 1.2 ad #include <sys/pool.h>
46 1.2 ad #include <sys/proc.h>
47 1.2 ad #include <sys/resourcevar.h>
48 1.2 ad #include <sys/sched.h>
49 1.2 ad #include <sys/systm.h>
50 1.2 ad #include <sys/sleepq.h>
51 1.2 ad #include <sys/ktrace.h>
52 1.2 ad
53 1.47 matt /*
54 1.47 matt * for sleepq_abort:
55 1.47 matt * During autoconfiguration or after a panic, a sleep will simply lower the
56 1.47 matt * priority briefly to allow interrupts, then return. The priority to be
57 1.47 matt * used (IPL_SAFEPRI) is machine-dependent, thus this value is initialized and
58 1.47 matt * maintained in the machine-dependent layers. This priority will typically
59 1.47 matt * be 0, or the lowest priority that is safe for use on the interrupt stack;
60 1.47 matt * it can be made higher to block network software interrupts after panics.
61 1.47 matt */
62 1.47 matt #ifndef IPL_SAFEPRI
63 1.47 matt #define IPL_SAFEPRI 0
64 1.47 matt #endif
65 1.47 matt
66 1.39 rmind static int sleepq_sigtoerror(lwp_t *, int);
67 1.2 ad
68 1.45 rmind /* General purpose sleep table, used by mtsleep() and condition variables. */
69 1.52 ad sleeptab_t sleeptab __cacheline_aligned;
70 1.55 ad sleepqlock_t sleepq_locks[SLEEPTAB_HASH_SIZE] __cacheline_aligned;
71 1.2 ad
72 1.2 ad /*
73 1.2 ad * sleeptab_init:
74 1.2 ad *
75 1.2 ad * Initialize a sleep table.
76 1.2 ad */
77 1.2 ad void
78 1.2 ad sleeptab_init(sleeptab_t *st)
79 1.2 ad {
80 1.56 ad static bool again;
81 1.2 ad int i;
82 1.2 ad
83 1.2 ad for (i = 0; i < SLEEPTAB_HASH_SIZE; i++) {
84 1.56 ad if (!again) {
85 1.56 ad mutex_init(&sleepq_locks[i].lock, MUTEX_DEFAULT,
86 1.56 ad IPL_SCHED);
87 1.56 ad }
88 1.52 ad sleepq_init(&st->st_queue[i]);
89 1.2 ad }
90 1.56 ad again = true;
91 1.2 ad }
92 1.2 ad
93 1.2 ad /*
94 1.2 ad * sleepq_init:
95 1.2 ad *
96 1.2 ad * Prepare a sleep queue for use.
97 1.2 ad */
98 1.2 ad void
99 1.30 ad sleepq_init(sleepq_t *sq)
100 1.2 ad {
101 1.2 ad
102 1.63 ad LIST_INIT(sq);
103 1.2 ad }
104 1.2 ad
105 1.2 ad /*
106 1.2 ad * sleepq_remove:
107 1.2 ad *
108 1.37 rmind * Remove an LWP from a sleep queue and wake it up.
109 1.2 ad */
110 1.37 rmind void
111 1.8 ad sleepq_remove(sleepq_t *sq, lwp_t *l)
112 1.2 ad {
113 1.9 yamt struct schedstate_percpu *spc;
114 1.2 ad struct cpu_info *ci;
115 1.2 ad
116 1.30 ad KASSERT(lwp_locked(l, NULL));
117 1.2 ad
118 1.59 ad if ((l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_NULL) == 0) {
119 1.59 ad KASSERT(sq != NULL);
120 1.63 ad LIST_REMOVE(l, l_sleepchain);
121 1.59 ad } else {
122 1.59 ad KASSERT(sq == NULL);
123 1.59 ad }
124 1.59 ad
125 1.2 ad l->l_syncobj = &sched_syncobj;
126 1.2 ad l->l_wchan = NULL;
127 1.2 ad l->l_sleepq = NULL;
128 1.5 pavel l->l_flag &= ~LW_SINTR;
129 1.2 ad
130 1.9 yamt ci = l->l_cpu;
131 1.9 yamt spc = &ci->ci_schedstate;
132 1.9 yamt
133 1.2 ad /*
134 1.2 ad * If not sleeping, the LWP must have been suspended. Let whoever
135 1.2 ad * holds it stopped set it running again.
136 1.2 ad */
137 1.2 ad if (l->l_stat != LSSLEEP) {
138 1.16 rmind KASSERT(l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED);
139 1.21 ad lwp_setlock(l, spc->spc_lwplock);
140 1.37 rmind return;
141 1.2 ad }
142 1.2 ad
143 1.2 ad /*
144 1.2 ad * If the LWP is still on the CPU, mark it as LSONPROC. It may be
145 1.2 ad * about to call mi_switch(), in which case it will yield.
146 1.2 ad */
147 1.61 ad if ((l->l_pflag & LP_RUNNING) != 0) {
148 1.2 ad l->l_stat = LSONPROC;
149 1.2 ad l->l_slptime = 0;
150 1.21 ad lwp_setlock(l, spc->spc_lwplock);
151 1.37 rmind return;
152 1.2 ad }
153 1.2 ad
154 1.29 rmind /* Update sleep time delta, call the wake-up handler of scheduler */
155 1.65 maxv l->l_slpticksum += (getticks() - l->l_slpticks);
156 1.16 rmind sched_wakeup(l);
157 1.29 rmind
158 1.29 rmind /* Look for a CPU to wake up */
159 1.29 rmind l->l_cpu = sched_takecpu(l);
160 1.16 rmind ci = l->l_cpu;
161 1.16 rmind spc = &ci->ci_schedstate;
162 1.16 rmind
163 1.16 rmind /*
164 1.17 yamt * Set it running.
165 1.2 ad */
166 1.9 yamt spc_lock(ci);
167 1.9 yamt lwp_setlock(l, spc->spc_mutex);
168 1.9 yamt sched_setrunnable(l);
169 1.2 ad l->l_stat = LSRUN;
170 1.2 ad l->l_slptime = 0;
171 1.53 ad sched_enqueue(l);
172 1.53 ad sched_resched_lwp(l, true);
173 1.53 ad /* LWP & SPC now unlocked, but we still hold sleep queue lock. */
174 1.2 ad }
175 1.2 ad
176 1.2 ad /*
177 1.2 ad * sleepq_insert:
178 1.2 ad *
179 1.2 ad * Insert an LWP into the sleep queue, optionally sorting by priority.
180 1.2 ad */
181 1.46 rmind static void
182 1.8 ad sleepq_insert(sleepq_t *sq, lwp_t *l, syncobj_t *sobj)
183 1.2 ad {
184 1.2 ad
185 1.59 ad if ((sobj->sobj_flag & SOBJ_SLEEPQ_NULL) != 0) {
186 1.59 ad KASSERT(sq == NULL);
187 1.59 ad return;
188 1.59 ad }
189 1.59 ad KASSERT(sq != NULL);
190 1.59 ad
191 1.2 ad if ((sobj->sobj_flag & SOBJ_SLEEPQ_SORTED) != 0) {
192 1.68 thorpej lwp_t *l2, *l_last = NULL;
193 1.60 christos const pri_t pri = lwp_eprio(l);
194 1.40 yamt
195 1.63 ad LIST_FOREACH(l2, sq, l_sleepchain) {
196 1.68 thorpej l_last = l2;
197 1.18 ad if (lwp_eprio(l2) < pri) {
198 1.63 ad LIST_INSERT_BEFORE(l2, l, l_sleepchain);
199 1.2 ad return;
200 1.2 ad }
201 1.2 ad }
202 1.68 thorpej /*
203 1.68 thorpej * Ensure FIFO ordering if no waiters are of lower priority.
204 1.68 thorpej */
205 1.68 thorpej if (l_last != NULL) {
206 1.68 thorpej LIST_INSERT_AFTER(l_last, l, l_sleepchain);
207 1.68 thorpej return;
208 1.68 thorpej }
209 1.2 ad }
210 1.2 ad
211 1.63 ad LIST_INSERT_HEAD(sq, l, l_sleepchain);
212 1.2 ad }
213 1.2 ad
214 1.9 yamt /*
215 1.75 ad * sleepq_enter:
216 1.75 ad *
217 1.75 ad * Prepare to block on a sleep queue, after which any interlock can be
218 1.75 ad * safely released.
219 1.75 ad */
220 1.75 ad void
221 1.75 ad sleepq_enter(sleepq_t *sq, lwp_t *l, kmutex_t *mp)
222 1.75 ad {
223 1.75 ad
224 1.75 ad /*
225 1.75 ad * Acquire the per-LWP mutex and lend it our sleep queue lock.
226 1.75 ad * Once interlocked, we can release the kernel lock.
227 1.75 ad */
228 1.75 ad lwp_lock(l);
229 1.75 ad lwp_unlock_to(l, mp);
230 1.75 ad KERNEL_UNLOCK_ALL(NULL, &l->l_biglocks);
231 1.75 ad }
232 1.75 ad
233 1.75 ad /*
234 1.9 yamt * sleepq_enqueue:
235 1.9 yamt *
236 1.9 yamt * Enter an LWP into the sleep queue and prepare for sleep. The sleep
237 1.9 yamt * queue must already be locked, and any interlock (such as the kernel
238 1.9 yamt * lock) must have be released (see sleeptab_lookup(), sleepq_enter()).
239 1.9 yamt */
240 1.2 ad void
241 1.66 ad sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj,
242 1.66 ad bool catch_p)
243 1.2 ad {
244 1.8 ad lwp_t *l = curlwp;
245 1.2 ad
246 1.30 ad KASSERT(lwp_locked(l, NULL));
247 1.2 ad KASSERT(l->l_stat == LSONPROC);
248 1.74 riastrad KASSERT(l->l_wchan == NULL);
249 1.74 riastrad KASSERT(l->l_sleepq == NULL);
250 1.66 ad KASSERT((l->l_flag & LW_SINTR) == 0);
251 1.2 ad
252 1.2 ad l->l_syncobj = sobj;
253 1.2 ad l->l_wchan = wchan;
254 1.2 ad l->l_sleepq = sq;
255 1.2 ad l->l_wmesg = wmesg;
256 1.2 ad l->l_slptime = 0;
257 1.2 ad l->l_stat = LSSLEEP;
258 1.66 ad if (catch_p)
259 1.66 ad l->l_flag |= LW_SINTR;
260 1.2 ad
261 1.6 yamt sleepq_insert(sq, l, sobj);
262 1.29 rmind
263 1.29 rmind /* Save the time when thread has slept */
264 1.65 maxv l->l_slpticks = getticks();
265 1.15 rmind sched_slept(l);
266 1.6 yamt }
267 1.6 yamt
268 1.9 yamt /*
269 1.67 thorpej * sleepq_transfer:
270 1.67 thorpej *
271 1.67 thorpej * Move an LWP from one sleep queue to another. Both sleep queues
272 1.67 thorpej * must already be locked.
273 1.67 thorpej *
274 1.67 thorpej * The LWP will be updated with the new sleepq, wchan, wmesg,
275 1.67 thorpej * sobj, and mutex. The interruptible flag will also be updated.
276 1.67 thorpej */
277 1.67 thorpej void
278 1.67 thorpej sleepq_transfer(lwp_t *l, sleepq_t *from_sq, sleepq_t *sq, wchan_t wchan,
279 1.67 thorpej const char *wmesg, syncobj_t *sobj, kmutex_t *mp, bool catch_p)
280 1.67 thorpej {
281 1.67 thorpej
282 1.67 thorpej KASSERT(l->l_sleepq == from_sq);
283 1.67 thorpej
284 1.67 thorpej LIST_REMOVE(l, l_sleepchain);
285 1.67 thorpej l->l_syncobj = sobj;
286 1.67 thorpej l->l_wchan = wchan;
287 1.67 thorpej l->l_sleepq = sq;
288 1.67 thorpej l->l_wmesg = wmesg;
289 1.67 thorpej
290 1.67 thorpej if (catch_p)
291 1.69 thorpej l->l_flag = LW_SINTR | LW_CATCHINTR;
292 1.67 thorpej else
293 1.69 thorpej l->l_flag = ~(LW_SINTR | LW_CATCHINTR);
294 1.67 thorpej
295 1.69 thorpej /*
296 1.69 thorpej * This allows the transfer from one sleepq to another where
297 1.69 thorpej * it is known that they're both protected by the same lock.
298 1.69 thorpej */
299 1.69 thorpej if (mp != NULL)
300 1.69 thorpej lwp_setlock(l, mp);
301 1.67 thorpej
302 1.67 thorpej sleepq_insert(sq, l, sobj);
303 1.67 thorpej }
304 1.67 thorpej
305 1.67 thorpej /*
306 1.69 thorpej * sleepq_uncatch:
307 1.69 thorpej *
308 1.69 thorpej * Mark the LWP as no longer sleeping interruptibly.
309 1.69 thorpej */
310 1.69 thorpej void
311 1.69 thorpej sleepq_uncatch(lwp_t *l)
312 1.69 thorpej {
313 1.69 thorpej l->l_flag = ~(LW_SINTR | LW_CATCHINTR);
314 1.69 thorpej }
315 1.69 thorpej
316 1.69 thorpej /*
317 1.9 yamt * sleepq_block:
318 1.9 yamt *
319 1.9 yamt * After any intermediate step such as releasing an interlock, switch.
320 1.9 yamt * sleepq_block() may return early under exceptional conditions, for
321 1.9 yamt * example if the LWP's containing process is exiting.
322 1.48 apb *
323 1.48 apb * timo is a timeout in ticks. timo = 0 specifies an infinite timeout.
324 1.9 yamt */
325 1.9 yamt int
326 1.75 ad sleepq_block(int timo, bool catch_p, syncobj_t *syncobj)
327 1.6 yamt {
328 1.10 ad int error = 0, sig;
329 1.9 yamt struct proc *p;
330 1.8 ad lwp_t *l = curlwp;
331 1.11 ad bool early = false;
332 1.34 yamt int biglocks = l->l_biglocks;
333 1.2 ad
334 1.72 riastrad ktrcsw(1, 0, syncobj);
335 1.4 ad
336 1.2 ad /*
337 1.2 ad * If sleeping interruptably, check for pending signals, exits or
338 1.66 ad * core dump events.
339 1.69 thorpej *
340 1.69 thorpej * Note the usage of LW_CATCHINTR. This expresses our intent
341 1.69 thorpej * to catch or not catch sleep interruptions, which might change
342 1.69 thorpej * while we are sleeping. It is independent from LW_SINTR because
343 1.69 thorpej * we don't want to leave LW_SINTR set when the LWP is not asleep.
344 1.2 ad */
345 1.50 matt if (catch_p) {
346 1.5 pavel if ((l->l_flag & (LW_CANCELLED|LW_WEXIT|LW_WCORE)) != 0) {
347 1.5 pavel l->l_flag &= ~LW_CANCELLED;
348 1.14 ad error = EINTR;
349 1.14 ad early = true;
350 1.14 ad } else if ((l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0))
351 1.11 ad early = true;
352 1.69 thorpej l->l_flag |= LW_CATCHINTR;
353 1.69 thorpej } else
354 1.69 thorpej l->l_flag &= ~LW_CATCHINTR;
355 1.2 ad
356 1.13 yamt if (early) {
357 1.13 yamt /* lwp_unsleep() will release the lock */
358 1.22 ad lwp_unsleep(l, true);
359 1.13 yamt } else {
360 1.66 ad /*
361 1.66 ad * The LWP may have already been awoken if the caller
362 1.66 ad * dropped the sleep queue lock between sleepq_enqueue() and
363 1.70 msaitoh * sleepq_block(). If that happens l_stat will be LSONPROC
364 1.66 ad * and mi_switch() will treat this as a preemption. No need
365 1.66 ad * to do anything special here.
366 1.66 ad */
367 1.46 rmind if (timo) {
368 1.64 ad l->l_flag &= ~LW_STIMO;
369 1.14 ad callout_schedule(&l->l_timeout_ch, timo);
370 1.46 rmind }
371 1.76 ad l->l_boostpri = l->l_syncobj->sobj_boostpri;
372 1.54 ad spc_lock(l->l_cpu);
373 1.46 rmind mi_switch(l);
374 1.11 ad
375 1.11 ad /* The LWP and sleep queue are now unlocked. */
376 1.11 ad if (timo) {
377 1.11 ad /*
378 1.52 ad * Even if the callout appears to have fired, we
379 1.52 ad * need to stop it in order to synchronise with
380 1.52 ad * other CPUs. It's important that we do this in
381 1.52 ad * this LWP's context, and not during wakeup, in
382 1.52 ad * order to keep the callout & its cache lines
383 1.52 ad * co-located on the CPU with the LWP.
384 1.11 ad */
385 1.64 ad (void)callout_halt(&l->l_timeout_ch, NULL);
386 1.64 ad error = (l->l_flag & LW_STIMO) ? EWOULDBLOCK : 0;
387 1.11 ad }
388 1.2 ad }
389 1.2 ad
390 1.69 thorpej /*
391 1.69 thorpej * LW_CATCHINTR is only modified in this function OR when we
392 1.69 thorpej * are asleep (with the sleepq locked). We can therefore safely
393 1.69 thorpej * test it unlocked here as it is guaranteed to be stable by
394 1.69 thorpej * virtue of us running.
395 1.69 thorpej *
396 1.69 thorpej * We do not bother clearing it if set; that would require us
397 1.69 thorpej * to take the LWP lock, and it doesn't seem worth the hassle
398 1.69 thorpej * considering it is only meaningful here inside this function,
399 1.69 thorpej * and is set to reflect intent upon entry.
400 1.69 thorpej */
401 1.69 thorpej if ((l->l_flag & LW_CATCHINTR) != 0 && error == 0) {
402 1.2 ad p = l->l_proc;
403 1.5 pavel if ((l->l_flag & (LW_CANCELLED | LW_WEXIT | LW_WCORE)) != 0)
404 1.2 ad error = EINTR;
405 1.5 pavel else if ((l->l_flag & LW_PENDSIG) != 0) {
406 1.33 ad /*
407 1.33 ad * Acquiring p_lock may cause us to recurse
408 1.33 ad * through the sleep path and back into this
409 1.33 ad * routine, but is safe because LWPs sleeping
410 1.62 ad * on locks are non-interruptable and we will
411 1.33 ad * not recurse again.
412 1.33 ad */
413 1.27 ad mutex_enter(p->p_lock);
414 1.43 christos if (((sig = sigispending(l, 0)) != 0 &&
415 1.43 christos (sigprop[sig] & SA_STOP) == 0) ||
416 1.43 christos (sig = issignal(l)) != 0)
417 1.2 ad error = sleepq_sigtoerror(l, sig);
418 1.27 ad mutex_exit(p->p_lock);
419 1.2 ad }
420 1.2 ad }
421 1.2 ad
422 1.72 riastrad ktrcsw(0, 0, syncobj);
423 1.34 yamt if (__predict_false(biglocks != 0)) {
424 1.34 yamt KERNEL_LOCK(biglocks, NULL);
425 1.30 ad }
426 1.2 ad return error;
427 1.2 ad }
428 1.2 ad
429 1.2 ad /*
430 1.2 ad * sleepq_wake:
431 1.2 ad *
432 1.2 ad * Wake zero or more LWPs blocked on a single wait channel.
433 1.2 ad */
434 1.49 pooka void
435 1.30 ad sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
436 1.2 ad {
437 1.8 ad lwp_t *l, *next;
438 1.2 ad
439 1.30 ad KASSERT(mutex_owned(mp));
440 1.2 ad
441 1.63 ad for (l = LIST_FIRST(sq); l != NULL; l = next) {
442 1.2 ad KASSERT(l->l_sleepq == sq);
443 1.30 ad KASSERT(l->l_mutex == mp);
444 1.63 ad next = LIST_NEXT(l, l_sleepchain);
445 1.2 ad if (l->l_wchan != wchan)
446 1.2 ad continue;
447 1.37 rmind sleepq_remove(sq, l);
448 1.2 ad if (--expected == 0)
449 1.2 ad break;
450 1.2 ad }
451 1.2 ad
452 1.30 ad mutex_spin_exit(mp);
453 1.2 ad }
454 1.2 ad
455 1.2 ad /*
456 1.2 ad * sleepq_unsleep:
457 1.2 ad *
458 1.2 ad * Remove an LWP from its sleep queue and set it runnable again.
459 1.2 ad * sleepq_unsleep() is called with the LWP's mutex held, and will
460 1.52 ad * release it if "unlock" is true.
461 1.2 ad */
462 1.37 rmind void
463 1.52 ad sleepq_unsleep(lwp_t *l, bool unlock)
464 1.2 ad {
465 1.2 ad sleepq_t *sq = l->l_sleepq;
466 1.30 ad kmutex_t *mp = l->l_mutex;
467 1.2 ad
468 1.30 ad KASSERT(lwp_locked(l, mp));
469 1.2 ad KASSERT(l->l_wchan != NULL);
470 1.2 ad
471 1.37 rmind sleepq_remove(sq, l);
472 1.52 ad if (unlock) {
473 1.30 ad mutex_spin_exit(mp);
474 1.22 ad }
475 1.2 ad }
476 1.2 ad
477 1.2 ad /*
478 1.2 ad * sleepq_timeout:
479 1.2 ad *
480 1.2 ad * Entered via the callout(9) subsystem to time out an LWP that is on a
481 1.2 ad * sleep queue.
482 1.2 ad */
483 1.2 ad void
484 1.2 ad sleepq_timeout(void *arg)
485 1.2 ad {
486 1.8 ad lwp_t *l = arg;
487 1.2 ad
488 1.2 ad /*
489 1.2 ad * Lock the LWP. Assuming it's still on the sleep queue, its
490 1.2 ad * current mutex will also be the sleep queue mutex.
491 1.2 ad */
492 1.2 ad lwp_lock(l);
493 1.2 ad
494 1.2 ad if (l->l_wchan == NULL) {
495 1.2 ad /* Somebody beat us to it. */
496 1.2 ad lwp_unlock(l);
497 1.2 ad return;
498 1.2 ad }
499 1.2 ad
500 1.64 ad l->l_flag |= LW_STIMO;
501 1.22 ad lwp_unsleep(l, true);
502 1.2 ad }
503 1.2 ad
504 1.2 ad /*
505 1.2 ad * sleepq_sigtoerror:
506 1.2 ad *
507 1.2 ad * Given a signal number, interpret and return an error code.
508 1.2 ad */
509 1.39 rmind static int
510 1.8 ad sleepq_sigtoerror(lwp_t *l, int sig)
511 1.2 ad {
512 1.2 ad struct proc *p = l->l_proc;
513 1.2 ad int error;
514 1.2 ad
515 1.27 ad KASSERT(mutex_owned(p->p_lock));
516 1.2 ad
517 1.2 ad /*
518 1.2 ad * If this sleep was canceled, don't let the syscall restart.
519 1.2 ad */
520 1.2 ad if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
521 1.2 ad error = EINTR;
522 1.2 ad else
523 1.2 ad error = ERESTART;
524 1.2 ad
525 1.2 ad return error;
526 1.2 ad }
527 1.2 ad
528 1.2 ad /*
529 1.2 ad * sleepq_abort:
530 1.2 ad *
531 1.2 ad * After a panic or during autoconfiguration, lower the interrupt
532 1.2 ad * priority level to give pending interrupts a chance to run, and
533 1.2 ad * then return. Called if sleepq_dontsleep() returns non-zero, and
534 1.2 ad * always returns zero.
535 1.2 ad */
536 1.2 ad int
537 1.2 ad sleepq_abort(kmutex_t *mtx, int unlock)
538 1.2 ad {
539 1.2 ad int s;
540 1.2 ad
541 1.2 ad s = splhigh();
542 1.47 matt splx(IPL_SAFEPRI);
543 1.2 ad splx(s);
544 1.2 ad if (mtx != NULL && unlock != 0)
545 1.2 ad mutex_exit(mtx);
546 1.2 ad
547 1.2 ad return 0;
548 1.2 ad }
549 1.2 ad
550 1.2 ad /*
551 1.44 yamt * sleepq_reinsert:
552 1.2 ad *
553 1.71 andvar * Move the position of the lwp in the sleep queue after a possible
554 1.44 yamt * change of the lwp's effective priority.
555 1.2 ad */
556 1.44 yamt static void
557 1.44 yamt sleepq_reinsert(sleepq_t *sq, lwp_t *l)
558 1.2 ad {
559 1.2 ad
560 1.44 yamt KASSERT(l->l_sleepq == sq);
561 1.59 ad if ((l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_SORTED) == 0) {
562 1.32 ad return;
563 1.32 ad }
564 1.32 ad
565 1.32 ad /*
566 1.32 ad * Don't let the sleep queue become empty, even briefly.
567 1.32 ad * cv_signal() and cv_broadcast() inspect it without the
568 1.32 ad * sleep queue lock held and need to see a non-empty queue
569 1.32 ad * head if there are waiters.
570 1.32 ad */
571 1.63 ad if (LIST_FIRST(sq) == l && LIST_NEXT(l, l_sleepchain) == NULL) {
572 1.32 ad return;
573 1.18 ad }
574 1.63 ad LIST_REMOVE(l, l_sleepchain);
575 1.32 ad sleepq_insert(sq, l, l->l_syncobj);
576 1.2 ad }
577 1.6 yamt
578 1.44 yamt /*
579 1.44 yamt * sleepq_changepri:
580 1.44 yamt *
581 1.44 yamt * Adjust the priority of an LWP residing on a sleepq.
582 1.44 yamt */
583 1.44 yamt void
584 1.44 yamt sleepq_changepri(lwp_t *l, pri_t pri)
585 1.44 yamt {
586 1.44 yamt sleepq_t *sq = l->l_sleepq;
587 1.44 yamt
588 1.44 yamt KASSERT(lwp_locked(l, NULL));
589 1.44 yamt
590 1.44 yamt l->l_priority = pri;
591 1.44 yamt sleepq_reinsert(sq, l);
592 1.44 yamt }
593 1.44 yamt
594 1.44 yamt /*
595 1.44 yamt * sleepq_changepri:
596 1.44 yamt *
597 1.44 yamt * Adjust the lended priority of an LWP residing on a sleepq.
598 1.44 yamt */
599 1.6 yamt void
600 1.8 ad sleepq_lendpri(lwp_t *l, pri_t pri)
601 1.6 yamt {
602 1.6 yamt sleepq_t *sq = l->l_sleepq;
603 1.6 yamt
604 1.30 ad KASSERT(lwp_locked(l, NULL));
605 1.6 yamt
606 1.6 yamt l->l_inheritedprio = pri;
607 1.51 christos l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
608 1.44 yamt sleepq_reinsert(sq, l);
609 1.6 yamt }
610