1 1.87 martin /* $NetBSD: kern_sleepq.c,v 1.87 2023/11/02 10:31:55 martin Exp $ */ 2 1.2 ad 3 1.2 ad /*- 4 1.75 ad * Copyright (c) 2006, 2007, 2008, 2009, 2019, 2020, 2023 5 1.75 ad * The NetBSD Foundation, Inc. 6 1.2 ad * All rights reserved. 7 1.2 ad * 8 1.2 ad * This code is derived from software contributed to The NetBSD Foundation 9 1.2 ad * by Andrew Doran. 10 1.2 ad * 11 1.2 ad * Redistribution and use in source and binary forms, with or without 12 1.2 ad * modification, are permitted provided that the following conditions 13 1.2 ad * are met: 14 1.2 ad * 1. Redistributions of source code must retain the above copyright 15 1.2 ad * notice, this list of conditions and the following disclaimer. 16 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright 17 1.2 ad * notice, this list of conditions and the following disclaimer in the 18 1.2 ad * documentation and/or other materials provided with the distribution. 19 1.2 ad * 20 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 1.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 1.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 1.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 1.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 1.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 1.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 1.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 1.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 1.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 1.2 ad * POSSIBILITY OF SUCH DAMAGE. 31 1.2 ad */ 32 1.2 ad 33 1.2 ad /* 34 1.2 ad * Sleep queue implementation, used by turnstiles and general sleep/wakeup 35 1.2 ad * interfaces. 36 1.2 ad */ 37 1.2 ad 38 1.2 ad #include <sys/cdefs.h> 39 1.87 martin __KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.87 2023/11/02 10:31:55 martin Exp $"); 40 1.2 ad 41 1.2 ad #include <sys/param.h> 42 1.86 riastrad 43 1.9 yamt #include <sys/cpu.h> 44 1.47 matt #include <sys/intr.h> 45 1.86 riastrad #include <sys/kernel.h> 46 1.86 riastrad #include <sys/ktrace.h> 47 1.2 ad #include <sys/pool.h> 48 1.86 riastrad #include <sys/proc.h> 49 1.2 ad #include <sys/resourcevar.h> 50 1.2 ad #include <sys/sched.h> 51 1.2 ad #include <sys/sleepq.h> 52 1.85 riastrad #include <sys/syncobj.h> 53 1.86 riastrad #include <sys/systm.h> 54 1.2 ad 55 1.47 matt /* 56 1.47 matt * for sleepq_abort: 57 1.47 matt * During autoconfiguration or after a panic, a sleep will simply lower the 58 1.47 matt * priority briefly to allow interrupts, then return. The priority to be 59 1.47 matt * used (IPL_SAFEPRI) is machine-dependent, thus this value is initialized and 60 1.47 matt * maintained in the machine-dependent layers. This priority will typically 61 1.47 matt * be 0, or the lowest priority that is safe for use on the interrupt stack; 62 1.47 matt * it can be made higher to block network software interrupts after panics. 63 1.47 matt */ 64 1.47 matt #ifndef IPL_SAFEPRI 65 1.47 matt #define IPL_SAFEPRI 0 66 1.47 matt #endif 67 1.47 matt 68 1.39 rmind static int sleepq_sigtoerror(lwp_t *, int); 69 1.2 ad 70 1.45 rmind /* General purpose sleep table, used by mtsleep() and condition variables. */ 71 1.52 ad sleeptab_t sleeptab __cacheline_aligned; 72 1.55 ad sleepqlock_t sleepq_locks[SLEEPTAB_HASH_SIZE] __cacheline_aligned; 73 1.2 ad 74 1.2 ad /* 75 1.2 ad * sleeptab_init: 76 1.2 ad * 77 1.2 ad * Initialize a sleep table. 78 1.2 ad */ 79 1.2 ad void 80 1.2 ad sleeptab_init(sleeptab_t *st) 81 1.2 ad { 82 1.56 ad static bool again; 83 1.2 ad int i; 84 1.2 ad 85 1.2 ad for (i = 0; i < SLEEPTAB_HASH_SIZE; i++) { 86 1.56 ad if (!again) { 87 1.56 ad mutex_init(&sleepq_locks[i].lock, MUTEX_DEFAULT, 88 1.56 ad IPL_SCHED); 89 1.56 ad } 90 1.52 ad sleepq_init(&st->st_queue[i]); 91 1.2 ad } 92 1.56 ad again = true; 93 1.2 ad } 94 1.2 ad 95 1.2 ad /* 96 1.2 ad * sleepq_init: 97 1.2 ad * 98 1.2 ad * Prepare a sleep queue for use. 99 1.2 ad */ 100 1.2 ad void 101 1.30 ad sleepq_init(sleepq_t *sq) 102 1.2 ad { 103 1.2 ad 104 1.63 ad LIST_INIT(sq); 105 1.2 ad } 106 1.2 ad 107 1.2 ad /* 108 1.2 ad * sleepq_remove: 109 1.2 ad * 110 1.82 ad * Remove an LWP from a sleep queue and wake it up. Distinguish 111 1.82 ad * between deliberate wakeups (which are a valuable information) and 112 1.82 ad * "unsleep" (an out-of-band action must be taken). 113 1.82 ad * 114 1.82 ad * For wakeup, convert any interruptable wait into non-interruptable 115 1.82 ad * one before waking the LWP. Otherwise, if only one LWP is awoken it 116 1.82 ad * could fail to do something useful with the wakeup due to an error 117 1.82 ad * return and the caller of e.g. cv_signal() may not expect this. 118 1.2 ad */ 119 1.37 rmind void 120 1.82 ad sleepq_remove(sleepq_t *sq, lwp_t *l, bool wakeup) 121 1.2 ad { 122 1.9 yamt struct schedstate_percpu *spc; 123 1.2 ad struct cpu_info *ci; 124 1.2 ad 125 1.30 ad KASSERT(lwp_locked(l, NULL)); 126 1.2 ad 127 1.59 ad if ((l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_NULL) == 0) { 128 1.59 ad KASSERT(sq != NULL); 129 1.63 ad LIST_REMOVE(l, l_sleepchain); 130 1.59 ad } else { 131 1.59 ad KASSERT(sq == NULL); 132 1.59 ad } 133 1.59 ad 134 1.2 ad l->l_syncobj = &sched_syncobj; 135 1.2 ad l->l_wchan = NULL; 136 1.2 ad l->l_sleepq = NULL; 137 1.82 ad l->l_flag &= wakeup ? ~(LW_SINTR|LW_CATCHINTR|LW_STIMO) : ~LW_SINTR; 138 1.2 ad 139 1.9 yamt ci = l->l_cpu; 140 1.9 yamt spc = &ci->ci_schedstate; 141 1.9 yamt 142 1.2 ad /* 143 1.2 ad * If not sleeping, the LWP must have been suspended. Let whoever 144 1.2 ad * holds it stopped set it running again. 145 1.2 ad */ 146 1.2 ad if (l->l_stat != LSSLEEP) { 147 1.16 rmind KASSERT(l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED); 148 1.21 ad lwp_setlock(l, spc->spc_lwplock); 149 1.37 rmind return; 150 1.2 ad } 151 1.2 ad 152 1.2 ad /* 153 1.2 ad * If the LWP is still on the CPU, mark it as LSONPROC. It may be 154 1.2 ad * about to call mi_switch(), in which case it will yield. 155 1.2 ad */ 156 1.61 ad if ((l->l_pflag & LP_RUNNING) != 0) { 157 1.2 ad l->l_stat = LSONPROC; 158 1.2 ad l->l_slptime = 0; 159 1.21 ad lwp_setlock(l, spc->spc_lwplock); 160 1.37 rmind return; 161 1.2 ad } 162 1.2 ad 163 1.29 rmind /* Update sleep time delta, call the wake-up handler of scheduler */ 164 1.65 maxv l->l_slpticksum += (getticks() - l->l_slpticks); 165 1.16 rmind sched_wakeup(l); 166 1.29 rmind 167 1.29 rmind /* Look for a CPU to wake up */ 168 1.29 rmind l->l_cpu = sched_takecpu(l); 169 1.16 rmind ci = l->l_cpu; 170 1.16 rmind spc = &ci->ci_schedstate; 171 1.16 rmind 172 1.16 rmind /* 173 1.17 yamt * Set it running. 174 1.2 ad */ 175 1.9 yamt spc_lock(ci); 176 1.9 yamt lwp_setlock(l, spc->spc_mutex); 177 1.9 yamt sched_setrunnable(l); 178 1.2 ad l->l_stat = LSRUN; 179 1.2 ad l->l_slptime = 0; 180 1.53 ad sched_enqueue(l); 181 1.53 ad sched_resched_lwp(l, true); 182 1.53 ad /* LWP & SPC now unlocked, but we still hold sleep queue lock. */ 183 1.2 ad } 184 1.2 ad 185 1.2 ad /* 186 1.2 ad * sleepq_insert: 187 1.2 ad * 188 1.2 ad * Insert an LWP into the sleep queue, optionally sorting by priority. 189 1.2 ad */ 190 1.46 rmind static void 191 1.8 ad sleepq_insert(sleepq_t *sq, lwp_t *l, syncobj_t *sobj) 192 1.2 ad { 193 1.2 ad 194 1.59 ad if ((sobj->sobj_flag & SOBJ_SLEEPQ_NULL) != 0) { 195 1.59 ad KASSERT(sq == NULL); 196 1.59 ad return; 197 1.59 ad } 198 1.59 ad KASSERT(sq != NULL); 199 1.59 ad 200 1.2 ad if ((sobj->sobj_flag & SOBJ_SLEEPQ_SORTED) != 0) { 201 1.68 thorpej lwp_t *l2, *l_last = NULL; 202 1.60 christos const pri_t pri = lwp_eprio(l); 203 1.40 yamt 204 1.63 ad LIST_FOREACH(l2, sq, l_sleepchain) { 205 1.68 thorpej l_last = l2; 206 1.18 ad if (lwp_eprio(l2) < pri) { 207 1.63 ad LIST_INSERT_BEFORE(l2, l, l_sleepchain); 208 1.2 ad return; 209 1.2 ad } 210 1.2 ad } 211 1.68 thorpej /* 212 1.68 thorpej * Ensure FIFO ordering if no waiters are of lower priority. 213 1.68 thorpej */ 214 1.68 thorpej if (l_last != NULL) { 215 1.68 thorpej LIST_INSERT_AFTER(l_last, l, l_sleepchain); 216 1.68 thorpej return; 217 1.68 thorpej } 218 1.2 ad } 219 1.2 ad 220 1.63 ad LIST_INSERT_HEAD(sq, l, l_sleepchain); 221 1.2 ad } 222 1.2 ad 223 1.9 yamt /* 224 1.75 ad * sleepq_enter: 225 1.75 ad * 226 1.75 ad * Prepare to block on a sleep queue, after which any interlock can be 227 1.75 ad * safely released. 228 1.75 ad */ 229 1.77 ad int 230 1.75 ad sleepq_enter(sleepq_t *sq, lwp_t *l, kmutex_t *mp) 231 1.75 ad { 232 1.77 ad int nlocks; 233 1.75 ad 234 1.78 ad KASSERT((sq != NULL) == (mp != NULL)); 235 1.78 ad 236 1.75 ad /* 237 1.75 ad * Acquire the per-LWP mutex and lend it our sleep queue lock. 238 1.75 ad * Once interlocked, we can release the kernel lock. 239 1.75 ad */ 240 1.75 ad lwp_lock(l); 241 1.78 ad if (mp != NULL) { 242 1.78 ad lwp_unlock_to(l, mp); 243 1.78 ad } 244 1.78 ad if (__predict_false((nlocks = l->l_blcnt) != 0)) { 245 1.78 ad KERNEL_UNLOCK_ALL(NULL, NULL); 246 1.78 ad } 247 1.77 ad return nlocks; 248 1.75 ad } 249 1.75 ad 250 1.75 ad /* 251 1.9 yamt * sleepq_enqueue: 252 1.9 yamt * 253 1.9 yamt * Enter an LWP into the sleep queue and prepare for sleep. The sleep 254 1.9 yamt * queue must already be locked, and any interlock (such as the kernel 255 1.9 yamt * lock) must have be released (see sleeptab_lookup(), sleepq_enter()). 256 1.9 yamt */ 257 1.2 ad void 258 1.66 ad sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj, 259 1.66 ad bool catch_p) 260 1.2 ad { 261 1.8 ad lwp_t *l = curlwp; 262 1.2 ad 263 1.30 ad KASSERT(lwp_locked(l, NULL)); 264 1.2 ad KASSERT(l->l_stat == LSONPROC); 265 1.74 riastrad KASSERT(l->l_wchan == NULL); 266 1.74 riastrad KASSERT(l->l_sleepq == NULL); 267 1.66 ad KASSERT((l->l_flag & LW_SINTR) == 0); 268 1.2 ad 269 1.2 ad l->l_syncobj = sobj; 270 1.2 ad l->l_wchan = wchan; 271 1.2 ad l->l_sleepq = sq; 272 1.2 ad l->l_wmesg = wmesg; 273 1.2 ad l->l_slptime = 0; 274 1.2 ad l->l_stat = LSSLEEP; 275 1.66 ad if (catch_p) 276 1.66 ad l->l_flag |= LW_SINTR; 277 1.2 ad 278 1.6 yamt sleepq_insert(sq, l, sobj); 279 1.29 rmind 280 1.29 rmind /* Save the time when thread has slept */ 281 1.65 maxv l->l_slpticks = getticks(); 282 1.15 rmind sched_slept(l); 283 1.6 yamt } 284 1.6 yamt 285 1.9 yamt /* 286 1.67 thorpej * sleepq_transfer: 287 1.67 thorpej * 288 1.67 thorpej * Move an LWP from one sleep queue to another. Both sleep queues 289 1.67 thorpej * must already be locked. 290 1.67 thorpej * 291 1.67 thorpej * The LWP will be updated with the new sleepq, wchan, wmesg, 292 1.67 thorpej * sobj, and mutex. The interruptible flag will also be updated. 293 1.67 thorpej */ 294 1.67 thorpej void 295 1.67 thorpej sleepq_transfer(lwp_t *l, sleepq_t *from_sq, sleepq_t *sq, wchan_t wchan, 296 1.67 thorpej const char *wmesg, syncobj_t *sobj, kmutex_t *mp, bool catch_p) 297 1.67 thorpej { 298 1.67 thorpej 299 1.67 thorpej KASSERT(l->l_sleepq == from_sq); 300 1.67 thorpej 301 1.67 thorpej LIST_REMOVE(l, l_sleepchain); 302 1.67 thorpej l->l_syncobj = sobj; 303 1.67 thorpej l->l_wchan = wchan; 304 1.67 thorpej l->l_sleepq = sq; 305 1.67 thorpej l->l_wmesg = wmesg; 306 1.67 thorpej 307 1.67 thorpej if (catch_p) 308 1.69 thorpej l->l_flag = LW_SINTR | LW_CATCHINTR; 309 1.67 thorpej else 310 1.69 thorpej l->l_flag = ~(LW_SINTR | LW_CATCHINTR); 311 1.67 thorpej 312 1.69 thorpej /* 313 1.69 thorpej * This allows the transfer from one sleepq to another where 314 1.69 thorpej * it is known that they're both protected by the same lock. 315 1.69 thorpej */ 316 1.69 thorpej if (mp != NULL) 317 1.69 thorpej lwp_setlock(l, mp); 318 1.67 thorpej 319 1.67 thorpej sleepq_insert(sq, l, sobj); 320 1.67 thorpej } 321 1.67 thorpej 322 1.67 thorpej /* 323 1.69 thorpej * sleepq_uncatch: 324 1.69 thorpej * 325 1.69 thorpej * Mark the LWP as no longer sleeping interruptibly. 326 1.69 thorpej */ 327 1.69 thorpej void 328 1.69 thorpej sleepq_uncatch(lwp_t *l) 329 1.69 thorpej { 330 1.79 ad 331 1.80 ad l->l_flag &= ~(LW_SINTR | LW_CATCHINTR | LW_STIMO); 332 1.69 thorpej } 333 1.69 thorpej 334 1.69 thorpej /* 335 1.9 yamt * sleepq_block: 336 1.9 yamt * 337 1.9 yamt * After any intermediate step such as releasing an interlock, switch. 338 1.9 yamt * sleepq_block() may return early under exceptional conditions, for 339 1.9 yamt * example if the LWP's containing process is exiting. 340 1.48 apb * 341 1.48 apb * timo is a timeout in ticks. timo = 0 specifies an infinite timeout. 342 1.9 yamt */ 343 1.9 yamt int 344 1.77 ad sleepq_block(int timo, bool catch_p, syncobj_t *syncobj, int nlocks) 345 1.6 yamt { 346 1.87 martin const int mask = LW_CANCELLED|LW_WEXIT|LW_WCORE|LW_PENDSIG; 347 1.81 ad int error = 0, sig, flag; 348 1.9 yamt struct proc *p; 349 1.8 ad lwp_t *l = curlwp; 350 1.11 ad bool early = false; 351 1.2 ad 352 1.72 riastrad ktrcsw(1, 0, syncobj); 353 1.4 ad 354 1.2 ad /* 355 1.2 ad * If sleeping interruptably, check for pending signals, exits or 356 1.66 ad * core dump events. 357 1.69 thorpej * 358 1.69 thorpej * Note the usage of LW_CATCHINTR. This expresses our intent 359 1.69 thorpej * to catch or not catch sleep interruptions, which might change 360 1.69 thorpej * while we are sleeping. It is independent from LW_SINTR because 361 1.69 thorpej * we don't want to leave LW_SINTR set when the LWP is not asleep. 362 1.2 ad */ 363 1.50 matt if (catch_p) { 364 1.87 martin if ((l->l_flag & (LW_CANCELLED|LW_WEXIT|LW_WCORE)) != 0) { 365 1.87 martin l->l_flag &= ~LW_CANCELLED; 366 1.87 martin error = EINTR; 367 1.87 martin early = true; 368 1.87 martin } else if ((l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) 369 1.87 martin early = true; 370 1.87 martin l->l_flag |= LW_CATCHINTR; 371 1.69 thorpej } else 372 1.87 martin l->l_flag &= ~LW_CATCHINTR; 373 1.2 ad 374 1.13 yamt if (early) { 375 1.13 yamt /* lwp_unsleep() will release the lock */ 376 1.22 ad lwp_unsleep(l, true); 377 1.13 yamt } else { 378 1.66 ad /* 379 1.66 ad * The LWP may have already been awoken if the caller 380 1.66 ad * dropped the sleep queue lock between sleepq_enqueue() and 381 1.70 msaitoh * sleepq_block(). If that happens l_stat will be LSONPROC 382 1.66 ad * and mi_switch() will treat this as a preemption. No need 383 1.66 ad * to do anything special here. 384 1.66 ad */ 385 1.46 rmind if (timo) { 386 1.64 ad l->l_flag &= ~LW_STIMO; 387 1.14 ad callout_schedule(&l->l_timeout_ch, timo); 388 1.46 rmind } 389 1.76 ad l->l_boostpri = l->l_syncobj->sobj_boostpri; 390 1.54 ad spc_lock(l->l_cpu); 391 1.46 rmind mi_switch(l); 392 1.11 ad 393 1.11 ad /* The LWP and sleep queue are now unlocked. */ 394 1.11 ad if (timo) { 395 1.11 ad /* 396 1.52 ad * Even if the callout appears to have fired, we 397 1.52 ad * need to stop it in order to synchronise with 398 1.52 ad * other CPUs. It's important that we do this in 399 1.52 ad * this LWP's context, and not during wakeup, in 400 1.52 ad * order to keep the callout & its cache lines 401 1.52 ad * co-located on the CPU with the LWP. 402 1.11 ad */ 403 1.64 ad (void)callout_halt(&l->l_timeout_ch, NULL); 404 1.64 ad error = (l->l_flag & LW_STIMO) ? EWOULDBLOCK : 0; 405 1.11 ad } 406 1.2 ad } 407 1.2 ad 408 1.69 thorpej /* 409 1.69 thorpej * LW_CATCHINTR is only modified in this function OR when we 410 1.69 thorpej * are asleep (with the sleepq locked). We can therefore safely 411 1.69 thorpej * test it unlocked here as it is guaranteed to be stable by 412 1.69 thorpej * virtue of us running. 413 1.69 thorpej * 414 1.69 thorpej * We do not bother clearing it if set; that would require us 415 1.69 thorpej * to take the LWP lock, and it doesn't seem worth the hassle 416 1.69 thorpej * considering it is only meaningful here inside this function, 417 1.69 thorpej * and is set to reflect intent upon entry. 418 1.69 thorpej */ 419 1.81 ad flag = atomic_load_relaxed(&l->l_flag); 420 1.81 ad if (__predict_false((flag & mask) != 0)) { 421 1.83 ad if ((flag & LW_CATCHINTR) == 0 || error != 0) 422 1.81 ad /* nothing */; 423 1.81 ad else if ((flag & (LW_CANCELLED | LW_WEXIT | LW_WCORE)) != 0) 424 1.2 ad error = EINTR; 425 1.81 ad else if ((flag & LW_PENDSIG) != 0) { 426 1.33 ad /* 427 1.33 ad * Acquiring p_lock may cause us to recurse 428 1.33 ad * through the sleep path and back into this 429 1.33 ad * routine, but is safe because LWPs sleeping 430 1.62 ad * on locks are non-interruptable and we will 431 1.33 ad * not recurse again. 432 1.33 ad */ 433 1.82 ad p = l->l_proc; 434 1.27 ad mutex_enter(p->p_lock); 435 1.43 christos if (((sig = sigispending(l, 0)) != 0 && 436 1.43 christos (sigprop[sig] & SA_STOP) == 0) || 437 1.43 christos (sig = issignal(l)) != 0) 438 1.2 ad error = sleepq_sigtoerror(l, sig); 439 1.27 ad mutex_exit(p->p_lock); 440 1.87 martin } 441 1.2 ad } 442 1.2 ad 443 1.72 riastrad ktrcsw(0, 0, syncobj); 444 1.77 ad if (__predict_false(nlocks != 0)) { 445 1.77 ad KERNEL_LOCK(nlocks, NULL); 446 1.30 ad } 447 1.2 ad return error; 448 1.2 ad } 449 1.2 ad 450 1.2 ad /* 451 1.2 ad * sleepq_wake: 452 1.2 ad * 453 1.2 ad * Wake zero or more LWPs blocked on a single wait channel. 454 1.2 ad */ 455 1.49 pooka void 456 1.30 ad sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp) 457 1.2 ad { 458 1.8 ad lwp_t *l, *next; 459 1.2 ad 460 1.30 ad KASSERT(mutex_owned(mp)); 461 1.2 ad 462 1.63 ad for (l = LIST_FIRST(sq); l != NULL; l = next) { 463 1.2 ad KASSERT(l->l_sleepq == sq); 464 1.30 ad KASSERT(l->l_mutex == mp); 465 1.63 ad next = LIST_NEXT(l, l_sleepchain); 466 1.2 ad if (l->l_wchan != wchan) 467 1.2 ad continue; 468 1.82 ad sleepq_remove(sq, l, true); 469 1.2 ad if (--expected == 0) 470 1.2 ad break; 471 1.2 ad } 472 1.2 ad 473 1.30 ad mutex_spin_exit(mp); 474 1.2 ad } 475 1.2 ad 476 1.2 ad /* 477 1.2 ad * sleepq_unsleep: 478 1.2 ad * 479 1.2 ad * Remove an LWP from its sleep queue and set it runnable again. 480 1.2 ad * sleepq_unsleep() is called with the LWP's mutex held, and will 481 1.52 ad * release it if "unlock" is true. 482 1.2 ad */ 483 1.37 rmind void 484 1.52 ad sleepq_unsleep(lwp_t *l, bool unlock) 485 1.2 ad { 486 1.2 ad sleepq_t *sq = l->l_sleepq; 487 1.30 ad kmutex_t *mp = l->l_mutex; 488 1.2 ad 489 1.30 ad KASSERT(lwp_locked(l, mp)); 490 1.2 ad KASSERT(l->l_wchan != NULL); 491 1.2 ad 492 1.82 ad sleepq_remove(sq, l, false); 493 1.52 ad if (unlock) { 494 1.30 ad mutex_spin_exit(mp); 495 1.22 ad } 496 1.2 ad } 497 1.2 ad 498 1.2 ad /* 499 1.2 ad * sleepq_timeout: 500 1.2 ad * 501 1.2 ad * Entered via the callout(9) subsystem to time out an LWP that is on a 502 1.2 ad * sleep queue. 503 1.2 ad */ 504 1.2 ad void 505 1.2 ad sleepq_timeout(void *arg) 506 1.2 ad { 507 1.8 ad lwp_t *l = arg; 508 1.2 ad 509 1.2 ad /* 510 1.2 ad * Lock the LWP. Assuming it's still on the sleep queue, its 511 1.2 ad * current mutex will also be the sleep queue mutex. 512 1.2 ad */ 513 1.2 ad lwp_lock(l); 514 1.2 ad 515 1.82 ad if (l->l_wchan == NULL || l->l_syncobj == &callout_syncobj) { 516 1.82 ad /* 517 1.82 ad * Somebody beat us to it, or the LWP is blocked in 518 1.82 ad * callout_halt() waiting for us to finish here. In 519 1.82 ad * neither case should the LWP produce EWOULDBLOCK. 520 1.82 ad */ 521 1.2 ad lwp_unlock(l); 522 1.2 ad return; 523 1.2 ad } 524 1.2 ad 525 1.64 ad l->l_flag |= LW_STIMO; 526 1.22 ad lwp_unsleep(l, true); 527 1.2 ad } 528 1.2 ad 529 1.2 ad /* 530 1.2 ad * sleepq_sigtoerror: 531 1.2 ad * 532 1.2 ad * Given a signal number, interpret and return an error code. 533 1.2 ad */ 534 1.39 rmind static int 535 1.8 ad sleepq_sigtoerror(lwp_t *l, int sig) 536 1.2 ad { 537 1.2 ad struct proc *p = l->l_proc; 538 1.2 ad int error; 539 1.2 ad 540 1.27 ad KASSERT(mutex_owned(p->p_lock)); 541 1.2 ad 542 1.2 ad /* 543 1.2 ad * If this sleep was canceled, don't let the syscall restart. 544 1.2 ad */ 545 1.2 ad if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0) 546 1.2 ad error = EINTR; 547 1.2 ad else 548 1.2 ad error = ERESTART; 549 1.2 ad 550 1.2 ad return error; 551 1.2 ad } 552 1.2 ad 553 1.2 ad /* 554 1.2 ad * sleepq_abort: 555 1.2 ad * 556 1.2 ad * After a panic or during autoconfiguration, lower the interrupt 557 1.2 ad * priority level to give pending interrupts a chance to run, and 558 1.2 ad * then return. Called if sleepq_dontsleep() returns non-zero, and 559 1.2 ad * always returns zero. 560 1.2 ad */ 561 1.2 ad int 562 1.2 ad sleepq_abort(kmutex_t *mtx, int unlock) 563 1.2 ad { 564 1.2 ad int s; 565 1.2 ad 566 1.2 ad s = splhigh(); 567 1.47 matt splx(IPL_SAFEPRI); 568 1.2 ad splx(s); 569 1.2 ad if (mtx != NULL && unlock != 0) 570 1.2 ad mutex_exit(mtx); 571 1.2 ad 572 1.2 ad return 0; 573 1.2 ad } 574 1.2 ad 575 1.2 ad /* 576 1.44 yamt * sleepq_reinsert: 577 1.2 ad * 578 1.71 andvar * Move the position of the lwp in the sleep queue after a possible 579 1.44 yamt * change of the lwp's effective priority. 580 1.2 ad */ 581 1.44 yamt static void 582 1.44 yamt sleepq_reinsert(sleepq_t *sq, lwp_t *l) 583 1.2 ad { 584 1.2 ad 585 1.44 yamt KASSERT(l->l_sleepq == sq); 586 1.59 ad if ((l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_SORTED) == 0) { 587 1.32 ad return; 588 1.32 ad } 589 1.32 ad 590 1.32 ad /* 591 1.32 ad * Don't let the sleep queue become empty, even briefly. 592 1.32 ad * cv_signal() and cv_broadcast() inspect it without the 593 1.32 ad * sleep queue lock held and need to see a non-empty queue 594 1.32 ad * head if there are waiters. 595 1.32 ad */ 596 1.63 ad if (LIST_FIRST(sq) == l && LIST_NEXT(l, l_sleepchain) == NULL) { 597 1.32 ad return; 598 1.18 ad } 599 1.63 ad LIST_REMOVE(l, l_sleepchain); 600 1.32 ad sleepq_insert(sq, l, l->l_syncobj); 601 1.2 ad } 602 1.6 yamt 603 1.44 yamt /* 604 1.44 yamt * sleepq_changepri: 605 1.44 yamt * 606 1.44 yamt * Adjust the priority of an LWP residing on a sleepq. 607 1.44 yamt */ 608 1.44 yamt void 609 1.44 yamt sleepq_changepri(lwp_t *l, pri_t pri) 610 1.44 yamt { 611 1.44 yamt sleepq_t *sq = l->l_sleepq; 612 1.44 yamt 613 1.44 yamt KASSERT(lwp_locked(l, NULL)); 614 1.44 yamt 615 1.44 yamt l->l_priority = pri; 616 1.44 yamt sleepq_reinsert(sq, l); 617 1.44 yamt } 618 1.44 yamt 619 1.44 yamt /* 620 1.44 yamt * sleepq_changepri: 621 1.44 yamt * 622 1.44 yamt * Adjust the lended priority of an LWP residing on a sleepq. 623 1.44 yamt */ 624 1.6 yamt void 625 1.8 ad sleepq_lendpri(lwp_t *l, pri_t pri) 626 1.6 yamt { 627 1.6 yamt sleepq_t *sq = l->l_sleepq; 628 1.6 yamt 629 1.30 ad KASSERT(lwp_locked(l, NULL)); 630 1.6 yamt 631 1.6 yamt l->l_inheritedprio = pri; 632 1.51 christos l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio); 633 1.44 yamt sleepq_reinsert(sq, l); 634 1.6 yamt } 635