1 1.79 ad /* $NetBSD: kern_timeout.c,v 1.79 2023/10/08 13:23:05 ad Exp $ */ 2 1.1 thorpej 3 1.1 thorpej /*- 4 1.78 ad * Copyright (c) 2003, 2006, 2007, 2008, 2009, 2019, 2023 5 1.78 ad * The NetBSD Foundation, Inc. 6 1.1 thorpej * All rights reserved. 7 1.1 thorpej * 8 1.1 thorpej * This code is derived from software contributed to The NetBSD Foundation 9 1.22 ad * by Jason R. Thorpe, and by Andrew Doran. 10 1.1 thorpej * 11 1.1 thorpej * Redistribution and use in source and binary forms, with or without 12 1.1 thorpej * modification, are permitted provided that the following conditions 13 1.1 thorpej * are met: 14 1.1 thorpej * 1. Redistributions of source code must retain the above copyright 15 1.1 thorpej * notice, this list of conditions and the following disclaimer. 16 1.1 thorpej * 2. Redistributions in binary form must reproduce the above copyright 17 1.1 thorpej * notice, this list of conditions and the following disclaimer in the 18 1.1 thorpej * documentation and/or other materials provided with the distribution. 19 1.1 thorpej * 20 1.1 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 1.1 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 1.1 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 1.1 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 1.1 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 1.1 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 1.1 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 1.1 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 1.1 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 1.1 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 1.1 thorpej * POSSIBILITY OF SUCH DAMAGE. 31 1.1 thorpej */ 32 1.1 thorpej 33 1.1 thorpej /* 34 1.1 thorpej * Copyright (c) 2001 Thomas Nordin <nordin (at) openbsd.org> 35 1.1 thorpej * Copyright (c) 2000-2001 Artur Grabowski <art (at) openbsd.org> 36 1.14 perry * All rights reserved. 37 1.14 perry * 38 1.14 perry * Redistribution and use in source and binary forms, with or without 39 1.14 perry * modification, are permitted provided that the following conditions 40 1.14 perry * are met: 41 1.1 thorpej * 42 1.14 perry * 1. Redistributions of source code must retain the above copyright 43 1.14 perry * notice, this list of conditions and the following disclaimer. 44 1.14 perry * 2. Redistributions in binary form must reproduce the above copyright 45 1.14 perry * notice, this list of conditions and the following disclaimer in the 46 1.14 perry * documentation and/or other materials provided with the distribution. 47 1.1 thorpej * 3. The name of the author may not be used to endorse or promote products 48 1.14 perry * derived from this software without specific prior written permission. 49 1.1 thorpej * 50 1.1 thorpej * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 51 1.1 thorpej * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 52 1.1 thorpej * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 53 1.1 thorpej * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 54 1.1 thorpej * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 55 1.1 thorpej * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 56 1.1 thorpej * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 57 1.1 thorpej * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 58 1.1 thorpej * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 59 1.14 perry * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 1.1 thorpej */ 61 1.7 lukem 62 1.7 lukem #include <sys/cdefs.h> 63 1.79 ad __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.79 2023/10/08 13:23:05 ad Exp $"); 64 1.1 thorpej 65 1.1 thorpej /* 66 1.22 ad * Timeouts are kept in a hierarchical timing wheel. The c_time is the 67 1.36 ad * value of c_cpu->cc_ticks when the timeout should be called. There are 68 1.36 ad * four levels with 256 buckets each. See 'Scheme 7' in "Hashed and 69 1.36 ad * Hierarchical Timing Wheels: Efficient Data Structures for Implementing 70 1.36 ad * a Timer Facility" by George Varghese and Tony Lauck. 71 1.22 ad * 72 1.22 ad * Some of the "math" in here is a bit tricky. We have to beware of 73 1.22 ad * wrapping ints. 74 1.22 ad * 75 1.22 ad * We use the fact that any element added to the queue must be added with 76 1.22 ad * a positive time. That means that any element `to' on the queue cannot 77 1.22 ad * be scheduled to timeout further in time than INT_MAX, but c->c_time can 78 1.71 riastrad * be positive or negative so comparing it with anything is dangerous. 79 1.22 ad * The only way we can use the c->c_time value in any predictable way is 80 1.22 ad * when we calculate how far in the future `to' will timeout - "c->c_time 81 1.36 ad * - c->c_cpu->cc_ticks". The result will always be positive for future 82 1.22 ad * timeouts and 0 or negative for due timeouts. 83 1.1 thorpej */ 84 1.1 thorpej 85 1.24 ad #define _CALLOUT_PRIVATE 86 1.24 ad 87 1.1 thorpej #include <sys/param.h> 88 1.1 thorpej #include <sys/systm.h> 89 1.1 thorpej #include <sys/kernel.h> 90 1.1 thorpej #include <sys/callout.h> 91 1.45 rmind #include <sys/lwp.h> 92 1.20 ad #include <sys/mutex.h> 93 1.22 ad #include <sys/proc.h> 94 1.22 ad #include <sys/sleepq.h> 95 1.22 ad #include <sys/syncobj.h> 96 1.22 ad #include <sys/evcnt.h> 97 1.27 ad #include <sys/intr.h> 98 1.33 ad #include <sys/cpu.h> 99 1.36 ad #include <sys/kmem.h> 100 1.72 riastrad #include <sys/sdt.h> 101 1.1 thorpej 102 1.1 thorpej #ifdef DDB 103 1.1 thorpej #include <machine/db_machdep.h> 104 1.1 thorpej #include <ddb/db_interface.h> 105 1.1 thorpej #include <ddb/db_access.h> 106 1.49 christos #include <ddb/db_cpu.h> 107 1.1 thorpej #include <ddb/db_sym.h> 108 1.1 thorpej #include <ddb/db_output.h> 109 1.1 thorpej #endif 110 1.1 thorpej 111 1.22 ad #define BUCKETS 1024 112 1.22 ad #define WHEELSIZE 256 113 1.22 ad #define WHEELMASK 255 114 1.22 ad #define WHEELBITS 8 115 1.22 ad 116 1.1 thorpej #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK) 117 1.1 thorpej 118 1.36 ad #define BUCKET(cc, rel, abs) \ 119 1.1 thorpej (((rel) <= (1 << (2*WHEELBITS))) \ 120 1.1 thorpej ? ((rel) <= (1 << WHEELBITS)) \ 121 1.36 ad ? &(cc)->cc_wheel[MASKWHEEL(0, (abs))] \ 122 1.36 ad : &(cc)->cc_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE] \ 123 1.1 thorpej : ((rel) <= (1 << (3*WHEELBITS))) \ 124 1.36 ad ? &(cc)->cc_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE] \ 125 1.36 ad : &(cc)->cc_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE]) 126 1.1 thorpej 127 1.36 ad #define MOVEBUCKET(cc, wheel, time) \ 128 1.36 ad CIRCQ_APPEND(&(cc)->cc_todo, \ 129 1.36 ad &(cc)->cc_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE]) 130 1.1 thorpej 131 1.1 thorpej /* 132 1.1 thorpej * Circular queue definitions. 133 1.1 thorpej */ 134 1.1 thorpej 135 1.11 scw #define CIRCQ_INIT(list) \ 136 1.1 thorpej do { \ 137 1.11 scw (list)->cq_next_l = (list); \ 138 1.11 scw (list)->cq_prev_l = (list); \ 139 1.1 thorpej } while (/*CONSTCOND*/0) 140 1.1 thorpej 141 1.1 thorpej #define CIRCQ_INSERT(elem, list) \ 142 1.1 thorpej do { \ 143 1.11 scw (elem)->cq_prev_e = (list)->cq_prev_e; \ 144 1.11 scw (elem)->cq_next_l = (list); \ 145 1.11 scw (list)->cq_prev_l->cq_next_l = (elem); \ 146 1.11 scw (list)->cq_prev_l = (elem); \ 147 1.1 thorpej } while (/*CONSTCOND*/0) 148 1.1 thorpej 149 1.1 thorpej #define CIRCQ_APPEND(fst, snd) \ 150 1.1 thorpej do { \ 151 1.1 thorpej if (!CIRCQ_EMPTY(snd)) { \ 152 1.11 scw (fst)->cq_prev_l->cq_next_l = (snd)->cq_next_l; \ 153 1.11 scw (snd)->cq_next_l->cq_prev_l = (fst)->cq_prev_l; \ 154 1.11 scw (snd)->cq_prev_l->cq_next_l = (fst); \ 155 1.11 scw (fst)->cq_prev_l = (snd)->cq_prev_l; \ 156 1.1 thorpej CIRCQ_INIT(snd); \ 157 1.1 thorpej } \ 158 1.1 thorpej } while (/*CONSTCOND*/0) 159 1.1 thorpej 160 1.1 thorpej #define CIRCQ_REMOVE(elem) \ 161 1.1 thorpej do { \ 162 1.11 scw (elem)->cq_next_l->cq_prev_e = (elem)->cq_prev_e; \ 163 1.11 scw (elem)->cq_prev_l->cq_next_e = (elem)->cq_next_e; \ 164 1.1 thorpej } while (/*CONSTCOND*/0) 165 1.1 thorpej 166 1.11 scw #define CIRCQ_FIRST(list) ((list)->cq_next_e) 167 1.11 scw #define CIRCQ_NEXT(elem) ((elem)->cq_next_e) 168 1.11 scw #define CIRCQ_LAST(elem,list) ((elem)->cq_next_l == (list)) 169 1.11 scw #define CIRCQ_EMPTY(list) ((list)->cq_next_l == (list)) 170 1.1 thorpej 171 1.36 ad struct callout_cpu { 172 1.44 ad kmutex_t *cc_lock; 173 1.36 ad sleepq_t cc_sleepq; 174 1.36 ad u_int cc_nwait; 175 1.36 ad u_int cc_ticks; 176 1.36 ad lwp_t *cc_lwp; 177 1.36 ad callout_impl_t *cc_active; 178 1.36 ad struct evcnt cc_ev_late; 179 1.36 ad struct evcnt cc_ev_block; 180 1.36 ad struct callout_circq cc_todo; /* Worklist */ 181 1.36 ad struct callout_circq cc_wheel[BUCKETS]; /* Queues of timeouts */ 182 1.36 ad char cc_name1[12]; 183 1.36 ad char cc_name2[12]; 184 1.72 riastrad struct cpu_info *cc_cpu; 185 1.36 ad }; 186 1.36 ad 187 1.65 rin #ifdef DDB 188 1.65 rin static struct callout_cpu ccb; 189 1.65 rin #endif 190 1.49 christos 191 1.65 rin #ifndef CRASH /* _KERNEL */ 192 1.49 christos static void callout_softclock(void *); 193 1.57 ad static void callout_wait(callout_impl_t *, void *, kmutex_t *); 194 1.57 ad 195 1.57 ad static struct callout_cpu callout_cpu0 __cacheline_aligned; 196 1.57 ad static void *callout_sih __read_mostly; 197 1.36 ad 198 1.72 riastrad SDT_PROBE_DEFINE2(sdt, kernel, callout, init, 199 1.72 riastrad "struct callout *"/*ch*/, 200 1.72 riastrad "unsigned"/*flags*/); 201 1.72 riastrad SDT_PROBE_DEFINE1(sdt, kernel, callout, destroy, 202 1.72 riastrad "struct callout *"/*ch*/); 203 1.72 riastrad SDT_PROBE_DEFINE4(sdt, kernel, callout, setfunc, 204 1.72 riastrad "struct callout *"/*ch*/, 205 1.72 riastrad "void (*)(void *)"/*func*/, 206 1.72 riastrad "void *"/*arg*/, 207 1.72 riastrad "unsigned"/*flags*/); 208 1.72 riastrad SDT_PROBE_DEFINE5(sdt, kernel, callout, schedule, 209 1.72 riastrad "struct callout *"/*ch*/, 210 1.72 riastrad "void (*)(void *)"/*func*/, 211 1.72 riastrad "void *"/*arg*/, 212 1.72 riastrad "unsigned"/*flags*/, 213 1.72 riastrad "int"/*ticks*/); 214 1.72 riastrad SDT_PROBE_DEFINE6(sdt, kernel, callout, migrate, 215 1.72 riastrad "struct callout *"/*ch*/, 216 1.72 riastrad "void (*)(void *)"/*func*/, 217 1.72 riastrad "void *"/*arg*/, 218 1.72 riastrad "unsigned"/*flags*/, 219 1.72 riastrad "struct cpu_info *"/*ocpu*/, 220 1.72 riastrad "struct cpu_info *"/*ncpu*/); 221 1.72 riastrad SDT_PROBE_DEFINE4(sdt, kernel, callout, entry, 222 1.72 riastrad "struct callout *"/*ch*/, 223 1.72 riastrad "void (*)(void *)"/*func*/, 224 1.72 riastrad "void *"/*arg*/, 225 1.72 riastrad "unsigned"/*flags*/); 226 1.72 riastrad SDT_PROBE_DEFINE4(sdt, kernel, callout, return, 227 1.72 riastrad "struct callout *"/*ch*/, 228 1.72 riastrad "void (*)(void *)"/*func*/, 229 1.72 riastrad "void *"/*arg*/, 230 1.72 riastrad "unsigned"/*flags*/); 231 1.72 riastrad SDT_PROBE_DEFINE5(sdt, kernel, callout, stop, 232 1.72 riastrad "struct callout *"/*ch*/, 233 1.72 riastrad "void (*)(void *)"/*func*/, 234 1.72 riastrad "void *"/*arg*/, 235 1.72 riastrad "unsigned"/*flags*/, 236 1.72 riastrad "bool"/*expired*/); 237 1.72 riastrad SDT_PROBE_DEFINE4(sdt, kernel, callout, halt, 238 1.72 riastrad "struct callout *"/*ch*/, 239 1.72 riastrad "void (*)(void *)"/*func*/, 240 1.72 riastrad "void *"/*arg*/, 241 1.72 riastrad "unsigned"/*flags*/); 242 1.72 riastrad SDT_PROBE_DEFINE5(sdt, kernel, callout, halt__done, 243 1.72 riastrad "struct callout *"/*ch*/, 244 1.72 riastrad "void (*)(void *)"/*func*/, 245 1.72 riastrad "void *"/*arg*/, 246 1.72 riastrad "unsigned"/*flags*/, 247 1.72 riastrad "bool"/*expired*/); 248 1.72 riastrad 249 1.79 ad syncobj_t callout_syncobj = { 250 1.79 ad .sobj_name = "callout", 251 1.79 ad .sobj_flag = SOBJ_SLEEPQ_SORTED, 252 1.79 ad .sobj_boostpri = PRI_KERNEL, 253 1.79 ad .sobj_unsleep = sleepq_unsleep, 254 1.79 ad .sobj_changepri = sleepq_changepri, 255 1.79 ad .sobj_lendpri = sleepq_lendpri, 256 1.79 ad .sobj_owner = syncobj_noowner, 257 1.79 ad }; 258 1.79 ad 259 1.36 ad static inline kmutex_t * 260 1.36 ad callout_lock(callout_impl_t *c) 261 1.36 ad { 262 1.44 ad struct callout_cpu *cc; 263 1.36 ad kmutex_t *lock; 264 1.36 ad 265 1.36 ad for (;;) { 266 1.44 ad cc = c->c_cpu; 267 1.44 ad lock = cc->cc_lock; 268 1.36 ad mutex_spin_enter(lock); 269 1.44 ad if (__predict_true(cc == c->c_cpu)) 270 1.36 ad return lock; 271 1.36 ad mutex_spin_exit(lock); 272 1.36 ad } 273 1.36 ad } 274 1.5 thorpej 275 1.1 thorpej /* 276 1.75 pho * Check if the callout is currently running on an LWP that isn't curlwp. 277 1.75 pho */ 278 1.75 pho static inline bool 279 1.75 pho callout_running_somewhere_else(callout_impl_t *c, struct callout_cpu *cc) 280 1.75 pho { 281 1.75 pho KASSERT(c->c_cpu == cc); 282 1.75 pho 283 1.75 pho return cc->cc_active == c && cc->cc_lwp != curlwp; 284 1.75 pho } 285 1.75 pho 286 1.75 pho /* 287 1.1 thorpej * callout_startup: 288 1.1 thorpej * 289 1.1 thorpej * Initialize the callout facility, called at system startup time. 290 1.36 ad * Do just enough to allow callouts to be safely registered. 291 1.1 thorpej */ 292 1.1 thorpej void 293 1.1 thorpej callout_startup(void) 294 1.1 thorpej { 295 1.36 ad struct callout_cpu *cc; 296 1.1 thorpej int b; 297 1.1 thorpej 298 1.36 ad KASSERT(curcpu()->ci_data.cpu_callout == NULL); 299 1.22 ad 300 1.36 ad cc = &callout_cpu0; 301 1.44 ad cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED); 302 1.36 ad CIRCQ_INIT(&cc->cc_todo); 303 1.1 thorpej for (b = 0; b < BUCKETS; b++) 304 1.36 ad CIRCQ_INIT(&cc->cc_wheel[b]); 305 1.36 ad curcpu()->ci_data.cpu_callout = cc; 306 1.22 ad } 307 1.22 ad 308 1.22 ad /* 309 1.36 ad * callout_init_cpu: 310 1.22 ad * 311 1.36 ad * Per-CPU initialization. 312 1.22 ad */ 313 1.47 martin CTASSERT(sizeof(callout_impl_t) <= sizeof(callout_t)); 314 1.47 martin 315 1.22 ad void 316 1.36 ad callout_init_cpu(struct cpu_info *ci) 317 1.22 ad { 318 1.36 ad struct callout_cpu *cc; 319 1.36 ad int b; 320 1.22 ad 321 1.36 ad if ((cc = ci->ci_data.cpu_callout) == NULL) { 322 1.36 ad cc = kmem_zalloc(sizeof(*cc), KM_SLEEP); 323 1.44 ad cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED); 324 1.36 ad CIRCQ_INIT(&cc->cc_todo); 325 1.36 ad for (b = 0; b < BUCKETS; b++) 326 1.36 ad CIRCQ_INIT(&cc->cc_wheel[b]); 327 1.36 ad } else { 328 1.36 ad /* Boot CPU, one time only. */ 329 1.36 ad callout_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, 330 1.36 ad callout_softclock, NULL); 331 1.36 ad if (callout_sih == NULL) 332 1.36 ad panic("callout_init_cpu (2)"); 333 1.36 ad } 334 1.36 ad 335 1.40 ad sleepq_init(&cc->cc_sleepq); 336 1.36 ad 337 1.36 ad snprintf(cc->cc_name1, sizeof(cc->cc_name1), "late/%u", 338 1.36 ad cpu_index(ci)); 339 1.36 ad evcnt_attach_dynamic(&cc->cc_ev_late, EVCNT_TYPE_MISC, 340 1.36 ad NULL, "callout", cc->cc_name1); 341 1.36 ad 342 1.36 ad snprintf(cc->cc_name2, sizeof(cc->cc_name2), "wait/%u", 343 1.36 ad cpu_index(ci)); 344 1.36 ad evcnt_attach_dynamic(&cc->cc_ev_block, EVCNT_TYPE_MISC, 345 1.36 ad NULL, "callout", cc->cc_name2); 346 1.36 ad 347 1.72 riastrad cc->cc_cpu = ci; 348 1.36 ad ci->ci_data.cpu_callout = cc; 349 1.1 thorpej } 350 1.1 thorpej 351 1.1 thorpej /* 352 1.1 thorpej * callout_init: 353 1.1 thorpej * 354 1.36 ad * Initialize a callout structure. This must be quick, so we fill 355 1.36 ad * only the minimum number of fields. 356 1.1 thorpej */ 357 1.1 thorpej void 358 1.22 ad callout_init(callout_t *cs, u_int flags) 359 1.1 thorpej { 360 1.22 ad callout_impl_t *c = (callout_impl_t *)cs; 361 1.36 ad struct callout_cpu *cc; 362 1.22 ad 363 1.22 ad KASSERT((flags & ~CALLOUT_FLAGMASK) == 0); 364 1.1 thorpej 365 1.72 riastrad SDT_PROBE2(sdt, kernel, callout, init, cs, flags); 366 1.72 riastrad 367 1.36 ad cc = curcpu()->ci_data.cpu_callout; 368 1.36 ad c->c_func = NULL; 369 1.22 ad c->c_magic = CALLOUT_MAGIC; 370 1.36 ad if (__predict_true((flags & CALLOUT_MPSAFE) != 0 && cc != NULL)) { 371 1.36 ad c->c_flags = flags; 372 1.36 ad c->c_cpu = cc; 373 1.36 ad return; 374 1.36 ad } 375 1.36 ad c->c_flags = flags | CALLOUT_BOUND; 376 1.36 ad c->c_cpu = &callout_cpu0; 377 1.22 ad } 378 1.22 ad 379 1.22 ad /* 380 1.22 ad * callout_destroy: 381 1.22 ad * 382 1.22 ad * Destroy a callout structure. The callout must be stopped. 383 1.22 ad */ 384 1.22 ad void 385 1.22 ad callout_destroy(callout_t *cs) 386 1.22 ad { 387 1.22 ad callout_impl_t *c = (callout_impl_t *)cs; 388 1.22 ad 389 1.72 riastrad SDT_PROBE1(sdt, kernel, callout, destroy, cs); 390 1.72 riastrad 391 1.53 christos KASSERTMSG(c->c_magic == CALLOUT_MAGIC, 392 1.53 christos "callout %p: c_magic (%#x) != CALLOUT_MAGIC (%#x)", 393 1.53 christos c, c->c_magic, CALLOUT_MAGIC); 394 1.22 ad /* 395 1.22 ad * It's not necessary to lock in order to see the correct value 396 1.22 ad * of c->c_flags. If the callout could potentially have been 397 1.22 ad * running, the current thread should have stopped it. 398 1.22 ad */ 399 1.48 martin KASSERTMSG((c->c_flags & CALLOUT_PENDING) == 0, 400 1.59 ad "pending callout %p: c_func (%p) c_flags (%#x) destroyed from %p", 401 1.59 ad c, c->c_func, c->c_flags, __builtin_return_address(0)); 402 1.75 pho KASSERTMSG(!callout_running_somewhere_else(c, c->c_cpu), 403 1.59 ad "running callout %p: c_func (%p) c_flags (%#x) destroyed from %p", 404 1.48 martin c, c->c_func, c->c_flags, __builtin_return_address(0)); 405 1.22 ad c->c_magic = 0; 406 1.1 thorpej } 407 1.1 thorpej 408 1.1 thorpej /* 409 1.29 joerg * callout_schedule_locked: 410 1.1 thorpej * 411 1.29 joerg * Schedule a callout to run. The function and argument must 412 1.29 joerg * already be set in the callout structure. Must be called with 413 1.29 joerg * callout_lock. 414 1.1 thorpej */ 415 1.29 joerg static void 416 1.36 ad callout_schedule_locked(callout_impl_t *c, kmutex_t *lock, int to_ticks) 417 1.1 thorpej { 418 1.36 ad struct callout_cpu *cc, *occ; 419 1.20 ad int old_time; 420 1.1 thorpej 421 1.72 riastrad SDT_PROBE5(sdt, kernel, callout, schedule, 422 1.72 riastrad c, c->c_func, c->c_arg, c->c_flags, to_ticks); 423 1.72 riastrad 424 1.1 thorpej KASSERT(to_ticks >= 0); 425 1.29 joerg KASSERT(c->c_func != NULL); 426 1.1 thorpej 427 1.1 thorpej /* Initialize the time here, it won't change. */ 428 1.36 ad occ = c->c_cpu; 429 1.43 ad c->c_flags &= ~(CALLOUT_FIRED | CALLOUT_INVOKING); 430 1.1 thorpej 431 1.1 thorpej /* 432 1.1 thorpej * If this timeout is already scheduled and now is moved 433 1.36 ad * earlier, reschedule it now. Otherwise leave it in place 434 1.1 thorpej * and let it be rescheduled later. 435 1.1 thorpej */ 436 1.22 ad if ((c->c_flags & CALLOUT_PENDING) != 0) { 437 1.36 ad /* Leave on existing CPU. */ 438 1.36 ad old_time = c->c_time; 439 1.36 ad c->c_time = to_ticks + occ->cc_ticks; 440 1.4 yamt if (c->c_time - old_time < 0) { 441 1.1 thorpej CIRCQ_REMOVE(&c->c_list); 442 1.36 ad CIRCQ_INSERT(&c->c_list, &occ->cc_todo); 443 1.1 thorpej } 444 1.36 ad mutex_spin_exit(lock); 445 1.36 ad return; 446 1.36 ad } 447 1.36 ad 448 1.36 ad cc = curcpu()->ci_data.cpu_callout; 449 1.36 ad if ((c->c_flags & CALLOUT_BOUND) != 0 || cc == occ || 450 1.44 ad !mutex_tryenter(cc->cc_lock)) { 451 1.36 ad /* Leave on existing CPU. */ 452 1.36 ad c->c_time = to_ticks + occ->cc_ticks; 453 1.36 ad c->c_flags |= CALLOUT_PENDING; 454 1.36 ad CIRCQ_INSERT(&c->c_list, &occ->cc_todo); 455 1.1 thorpej } else { 456 1.36 ad /* Move to this CPU. */ 457 1.36 ad c->c_cpu = cc; 458 1.36 ad c->c_time = to_ticks + cc->cc_ticks; 459 1.1 thorpej c->c_flags |= CALLOUT_PENDING; 460 1.36 ad CIRCQ_INSERT(&c->c_list, &cc->cc_todo); 461 1.44 ad mutex_spin_exit(cc->cc_lock); 462 1.72 riastrad SDT_PROBE6(sdt, kernel, callout, migrate, 463 1.72 riastrad c, c->c_func, c->c_arg, c->c_flags, 464 1.72 riastrad occ->cc_cpu, cc->cc_cpu); 465 1.1 thorpej } 466 1.36 ad mutex_spin_exit(lock); 467 1.29 joerg } 468 1.29 joerg 469 1.29 joerg /* 470 1.29 joerg * callout_reset: 471 1.29 joerg * 472 1.29 joerg * Reset a callout structure with a new function and argument, and 473 1.29 joerg * schedule it to run. 474 1.29 joerg */ 475 1.29 joerg void 476 1.29 joerg callout_reset(callout_t *cs, int to_ticks, void (*func)(void *), void *arg) 477 1.29 joerg { 478 1.29 joerg callout_impl_t *c = (callout_impl_t *)cs; 479 1.36 ad kmutex_t *lock; 480 1.29 joerg 481 1.29 joerg KASSERT(c->c_magic == CALLOUT_MAGIC); 482 1.42 rmind KASSERT(func != NULL); 483 1.29 joerg 484 1.36 ad lock = callout_lock(c); 485 1.72 riastrad SDT_PROBE4(sdt, kernel, callout, setfunc, cs, func, arg, c->c_flags); 486 1.29 joerg c->c_func = func; 487 1.29 joerg c->c_arg = arg; 488 1.36 ad callout_schedule_locked(c, lock, to_ticks); 489 1.1 thorpej } 490 1.1 thorpej 491 1.1 thorpej /* 492 1.1 thorpej * callout_schedule: 493 1.1 thorpej * 494 1.1 thorpej * Schedule a callout to run. The function and argument must 495 1.1 thorpej * already be set in the callout structure. 496 1.1 thorpej */ 497 1.1 thorpej void 498 1.22 ad callout_schedule(callout_t *cs, int to_ticks) 499 1.1 thorpej { 500 1.22 ad callout_impl_t *c = (callout_impl_t *)cs; 501 1.36 ad kmutex_t *lock; 502 1.1 thorpej 503 1.22 ad KASSERT(c->c_magic == CALLOUT_MAGIC); 504 1.1 thorpej 505 1.36 ad lock = callout_lock(c); 506 1.36 ad callout_schedule_locked(c, lock, to_ticks); 507 1.1 thorpej } 508 1.1 thorpej 509 1.1 thorpej /* 510 1.1 thorpej * callout_stop: 511 1.1 thorpej * 512 1.36 ad * Try to cancel a pending callout. It may be too late: the callout 513 1.36 ad * could be running on another CPU. If called from interrupt context, 514 1.36 ad * the callout could already be in progress at a lower priority. 515 1.1 thorpej */ 516 1.22 ad bool 517 1.22 ad callout_stop(callout_t *cs) 518 1.1 thorpej { 519 1.22 ad callout_impl_t *c = (callout_impl_t *)cs; 520 1.36 ad kmutex_t *lock; 521 1.22 ad bool expired; 522 1.22 ad 523 1.22 ad KASSERT(c->c_magic == CALLOUT_MAGIC); 524 1.1 thorpej 525 1.36 ad lock = callout_lock(c); 526 1.20 ad 527 1.22 ad if ((c->c_flags & CALLOUT_PENDING) != 0) 528 1.1 thorpej CIRCQ_REMOVE(&c->c_list); 529 1.32 ad expired = ((c->c_flags & CALLOUT_FIRED) != 0); 530 1.32 ad c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED); 531 1.32 ad 532 1.72 riastrad SDT_PROBE5(sdt, kernel, callout, stop, 533 1.72 riastrad c, c->c_func, c->c_arg, c->c_flags, expired); 534 1.72 riastrad 535 1.36 ad mutex_spin_exit(lock); 536 1.32 ad 537 1.32 ad return expired; 538 1.32 ad } 539 1.32 ad 540 1.32 ad /* 541 1.32 ad * callout_halt: 542 1.32 ad * 543 1.32 ad * Cancel a pending callout. If in-flight, block until it completes. 544 1.36 ad * May not be called from a hard interrupt handler. If the callout 545 1.36 ad * can take locks, the caller of callout_halt() must not hold any of 546 1.37 ad * those locks, otherwise the two could deadlock. If 'interlock' is 547 1.37 ad * non-NULL and we must wait for the callout to complete, it will be 548 1.37 ad * released and re-acquired before returning. 549 1.32 ad */ 550 1.32 ad bool 551 1.38 ad callout_halt(callout_t *cs, void *interlock) 552 1.32 ad { 553 1.32 ad callout_impl_t *c = (callout_impl_t *)cs; 554 1.57 ad kmutex_t *lock; 555 1.32 ad 556 1.32 ad KASSERT(c->c_magic == CALLOUT_MAGIC); 557 1.32 ad KASSERT(!cpu_intr_p()); 558 1.54 ozaki KASSERT(interlock == NULL || mutex_owned(interlock)); 559 1.32 ad 560 1.57 ad /* Fast path. */ 561 1.36 ad lock = callout_lock(c); 562 1.72 riastrad SDT_PROBE4(sdt, kernel, callout, halt, 563 1.72 riastrad c, c->c_func, c->c_arg, c->c_flags); 564 1.74 pho if ((c->c_flags & CALLOUT_PENDING) != 0) 565 1.57 ad CIRCQ_REMOVE(&c->c_list); 566 1.74 pho c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED); 567 1.75 pho if (__predict_false(callout_running_somewhere_else(c, c->c_cpu))) { 568 1.57 ad callout_wait(c, interlock, lock); 569 1.57 ad return true; 570 1.57 ad } 571 1.72 riastrad SDT_PROBE5(sdt, kernel, callout, halt__done, 572 1.72 riastrad c, c->c_func, c->c_arg, c->c_flags, /*expired*/false); 573 1.57 ad mutex_spin_exit(lock); 574 1.57 ad return false; 575 1.57 ad } 576 1.1 thorpej 577 1.57 ad /* 578 1.57 ad * callout_wait: 579 1.57 ad * 580 1.57 ad * Slow path for callout_halt(). Deliberately marked __noinline to 581 1.57 ad * prevent unneeded overhead in the caller. 582 1.57 ad */ 583 1.57 ad static void __noinline 584 1.57 ad callout_wait(callout_impl_t *c, void *interlock, kmutex_t *lock) 585 1.57 ad { 586 1.57 ad struct callout_cpu *cc; 587 1.57 ad struct lwp *l; 588 1.57 ad kmutex_t *relock; 589 1.78 ad int nlocks; 590 1.1 thorpej 591 1.32 ad l = curlwp; 592 1.57 ad relock = NULL; 593 1.36 ad for (;;) { 594 1.58 ad /* 595 1.58 ad * At this point we know the callout is not pending, but it 596 1.58 ad * could be running on a CPU somewhere. That can be curcpu 597 1.58 ad * in a few cases: 598 1.58 ad * 599 1.58 ad * - curlwp is a higher priority soft interrupt 600 1.58 ad * - the callout blocked on a lock and is currently asleep 601 1.58 ad * - the callout itself has called callout_halt() (nice!) 602 1.58 ad */ 603 1.36 ad cc = c->c_cpu; 604 1.75 pho if (__predict_true(!callout_running_somewhere_else(c, cc))) 605 1.36 ad break; 606 1.58 ad 607 1.58 ad /* It's running - need to wait for it to complete. */ 608 1.37 ad if (interlock != NULL) { 609 1.37 ad /* 610 1.37 ad * Avoid potential scheduler lock order problems by 611 1.37 ad * dropping the interlock without the callout lock 612 1.58 ad * held; then retry. 613 1.37 ad */ 614 1.37 ad mutex_spin_exit(lock); 615 1.37 ad mutex_exit(interlock); 616 1.37 ad relock = interlock; 617 1.37 ad interlock = NULL; 618 1.37 ad } else { 619 1.37 ad /* XXX Better to do priority inheritance. */ 620 1.37 ad KASSERT(l->l_wchan == NULL); 621 1.37 ad cc->cc_nwait++; 622 1.37 ad cc->cc_ev_block.ev_count++; 623 1.78 ad nlocks = sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock); 624 1.37 ad sleepq_enqueue(&cc->cc_sleepq, cc, "callout", 625 1.79 ad &callout_syncobj, false); 626 1.79 ad sleepq_block(0, false, &callout_syncobj, nlocks); 627 1.37 ad } 628 1.58 ad 629 1.58 ad /* 630 1.71 riastrad * Re-lock the callout and check the state of play again. 631 1.58 ad * It's a common design pattern for callouts to re-schedule 632 1.58 ad * themselves so put a stop to it again if needed. 633 1.58 ad */ 634 1.36 ad lock = callout_lock(c); 635 1.58 ad if ((c->c_flags & CALLOUT_PENDING) != 0) 636 1.58 ad CIRCQ_REMOVE(&c->c_list); 637 1.58 ad c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED); 638 1.32 ad } 639 1.32 ad 640 1.72 riastrad SDT_PROBE5(sdt, kernel, callout, halt__done, 641 1.72 riastrad c, c->c_func, c->c_arg, c->c_flags, /*expired*/true); 642 1.72 riastrad 643 1.36 ad mutex_spin_exit(lock); 644 1.37 ad if (__predict_false(relock != NULL)) 645 1.37 ad mutex_enter(relock); 646 1.22 ad } 647 1.22 ad 648 1.36 ad #ifdef notyet 649 1.36 ad /* 650 1.36 ad * callout_bind: 651 1.36 ad * 652 1.36 ad * Bind a callout so that it will only execute on one CPU. 653 1.36 ad * The callout must be stopped, and must be MPSAFE. 654 1.36 ad * 655 1.36 ad * XXX Disabled for now until it is decided how to handle 656 1.36 ad * offlined CPUs. We may want weak+strong binding. 657 1.36 ad */ 658 1.36 ad void 659 1.36 ad callout_bind(callout_t *cs, struct cpu_info *ci) 660 1.36 ad { 661 1.36 ad callout_impl_t *c = (callout_impl_t *)cs; 662 1.36 ad struct callout_cpu *cc; 663 1.36 ad kmutex_t *lock; 664 1.36 ad 665 1.36 ad KASSERT((c->c_flags & CALLOUT_PENDING) == 0); 666 1.36 ad KASSERT(c->c_cpu->cc_active != c); 667 1.36 ad KASSERT(c->c_magic == CALLOUT_MAGIC); 668 1.36 ad KASSERT((c->c_flags & CALLOUT_MPSAFE) != 0); 669 1.36 ad 670 1.36 ad lock = callout_lock(c); 671 1.36 ad cc = ci->ci_data.cpu_callout; 672 1.36 ad c->c_flags |= CALLOUT_BOUND; 673 1.36 ad if (c->c_cpu != cc) { 674 1.36 ad /* 675 1.36 ad * Assigning c_cpu effectively unlocks the callout 676 1.36 ad * structure, as we don't hold the new CPU's lock. 677 1.36 ad * Issue memory barrier to prevent accesses being 678 1.36 ad * reordered. 679 1.36 ad */ 680 1.36 ad membar_exit(); 681 1.36 ad c->c_cpu = cc; 682 1.36 ad } 683 1.36 ad mutex_spin_exit(lock); 684 1.36 ad } 685 1.36 ad #endif 686 1.36 ad 687 1.22 ad void 688 1.22 ad callout_setfunc(callout_t *cs, void (*func)(void *), void *arg) 689 1.22 ad { 690 1.22 ad callout_impl_t *c = (callout_impl_t *)cs; 691 1.36 ad kmutex_t *lock; 692 1.22 ad 693 1.22 ad KASSERT(c->c_magic == CALLOUT_MAGIC); 694 1.42 rmind KASSERT(func != NULL); 695 1.22 ad 696 1.36 ad lock = callout_lock(c); 697 1.72 riastrad SDT_PROBE4(sdt, kernel, callout, setfunc, cs, func, arg, c->c_flags); 698 1.22 ad c->c_func = func; 699 1.22 ad c->c_arg = arg; 700 1.36 ad mutex_spin_exit(lock); 701 1.22 ad } 702 1.22 ad 703 1.22 ad bool 704 1.22 ad callout_expired(callout_t *cs) 705 1.22 ad { 706 1.22 ad callout_impl_t *c = (callout_impl_t *)cs; 707 1.36 ad kmutex_t *lock; 708 1.22 ad bool rv; 709 1.22 ad 710 1.22 ad KASSERT(c->c_magic == CALLOUT_MAGIC); 711 1.22 ad 712 1.36 ad lock = callout_lock(c); 713 1.22 ad rv = ((c->c_flags & CALLOUT_FIRED) != 0); 714 1.36 ad mutex_spin_exit(lock); 715 1.22 ad 716 1.22 ad return rv; 717 1.22 ad } 718 1.22 ad 719 1.22 ad bool 720 1.22 ad callout_active(callout_t *cs) 721 1.22 ad { 722 1.22 ad callout_impl_t *c = (callout_impl_t *)cs; 723 1.36 ad kmutex_t *lock; 724 1.22 ad bool rv; 725 1.22 ad 726 1.22 ad KASSERT(c->c_magic == CALLOUT_MAGIC); 727 1.22 ad 728 1.36 ad lock = callout_lock(c); 729 1.22 ad rv = ((c->c_flags & (CALLOUT_PENDING|CALLOUT_FIRED)) != 0); 730 1.36 ad mutex_spin_exit(lock); 731 1.22 ad 732 1.22 ad return rv; 733 1.22 ad } 734 1.22 ad 735 1.22 ad bool 736 1.22 ad callout_pending(callout_t *cs) 737 1.22 ad { 738 1.22 ad callout_impl_t *c = (callout_impl_t *)cs; 739 1.36 ad kmutex_t *lock; 740 1.22 ad bool rv; 741 1.22 ad 742 1.22 ad KASSERT(c->c_magic == CALLOUT_MAGIC); 743 1.22 ad 744 1.36 ad lock = callout_lock(c); 745 1.22 ad rv = ((c->c_flags & CALLOUT_PENDING) != 0); 746 1.36 ad mutex_spin_exit(lock); 747 1.22 ad 748 1.22 ad return rv; 749 1.22 ad } 750 1.22 ad 751 1.22 ad bool 752 1.22 ad callout_invoking(callout_t *cs) 753 1.22 ad { 754 1.22 ad callout_impl_t *c = (callout_impl_t *)cs; 755 1.36 ad kmutex_t *lock; 756 1.22 ad bool rv; 757 1.22 ad 758 1.22 ad KASSERT(c->c_magic == CALLOUT_MAGIC); 759 1.22 ad 760 1.36 ad lock = callout_lock(c); 761 1.22 ad rv = ((c->c_flags & CALLOUT_INVOKING) != 0); 762 1.36 ad mutex_spin_exit(lock); 763 1.22 ad 764 1.22 ad return rv; 765 1.22 ad } 766 1.22 ad 767 1.22 ad void 768 1.22 ad callout_ack(callout_t *cs) 769 1.22 ad { 770 1.22 ad callout_impl_t *c = (callout_impl_t *)cs; 771 1.36 ad kmutex_t *lock; 772 1.22 ad 773 1.22 ad KASSERT(c->c_magic == CALLOUT_MAGIC); 774 1.22 ad 775 1.36 ad lock = callout_lock(c); 776 1.22 ad c->c_flags &= ~CALLOUT_INVOKING; 777 1.36 ad mutex_spin_exit(lock); 778 1.1 thorpej } 779 1.1 thorpej 780 1.1 thorpej /* 781 1.36 ad * callout_hardclock: 782 1.36 ad * 783 1.36 ad * Called from hardclock() once every tick. We schedule a soft 784 1.36 ad * interrupt if there is work to be done. 785 1.1 thorpej */ 786 1.22 ad void 787 1.1 thorpej callout_hardclock(void) 788 1.1 thorpej { 789 1.36 ad struct callout_cpu *cc; 790 1.36 ad int needsoftclock, ticks; 791 1.1 thorpej 792 1.36 ad cc = curcpu()->ci_data.cpu_callout; 793 1.44 ad mutex_spin_enter(cc->cc_lock); 794 1.1 thorpej 795 1.36 ad ticks = ++cc->cc_ticks; 796 1.36 ad 797 1.36 ad MOVEBUCKET(cc, 0, ticks); 798 1.36 ad if (MASKWHEEL(0, ticks) == 0) { 799 1.36 ad MOVEBUCKET(cc, 1, ticks); 800 1.36 ad if (MASKWHEEL(1, ticks) == 0) { 801 1.36 ad MOVEBUCKET(cc, 2, ticks); 802 1.36 ad if (MASKWHEEL(2, ticks) == 0) 803 1.36 ad MOVEBUCKET(cc, 3, ticks); 804 1.1 thorpej } 805 1.1 thorpej } 806 1.1 thorpej 807 1.36 ad needsoftclock = !CIRCQ_EMPTY(&cc->cc_todo); 808 1.44 ad mutex_spin_exit(cc->cc_lock); 809 1.1 thorpej 810 1.22 ad if (needsoftclock) 811 1.36 ad softint_schedule(callout_sih); 812 1.1 thorpej } 813 1.1 thorpej 814 1.36 ad /* 815 1.36 ad * callout_softclock: 816 1.36 ad * 817 1.36 ad * Soft interrupt handler, scheduled above if there is work to 818 1.36 ad * be done. Callouts are made in soft interrupt context. 819 1.36 ad */ 820 1.22 ad static void 821 1.22 ad callout_softclock(void *v) 822 1.1 thorpej { 823 1.22 ad callout_impl_t *c; 824 1.36 ad struct callout_cpu *cc; 825 1.1 thorpej void (*func)(void *); 826 1.1 thorpej void *arg; 827 1.68 riastrad int mpsafe, count, ticks, delta; 828 1.73 riastrad u_int flags __unused; 829 1.22 ad lwp_t *l; 830 1.1 thorpej 831 1.22 ad l = curlwp; 832 1.36 ad KASSERT(l->l_cpu == curcpu()); 833 1.36 ad cc = l->l_cpu->ci_data.cpu_callout; 834 1.1 thorpej 835 1.44 ad mutex_spin_enter(cc->cc_lock); 836 1.36 ad cc->cc_lwp = l; 837 1.36 ad while (!CIRCQ_EMPTY(&cc->cc_todo)) { 838 1.36 ad c = CIRCQ_FIRST(&cc->cc_todo); 839 1.22 ad KASSERT(c->c_magic == CALLOUT_MAGIC); 840 1.22 ad KASSERT(c->c_func != NULL); 841 1.36 ad KASSERT(c->c_cpu == cc); 842 1.26 ad KASSERT((c->c_flags & CALLOUT_PENDING) != 0); 843 1.26 ad KASSERT((c->c_flags & CALLOUT_FIRED) == 0); 844 1.1 thorpej CIRCQ_REMOVE(&c->c_list); 845 1.1 thorpej 846 1.1 thorpej /* If due run it, otherwise insert it into the right bucket. */ 847 1.36 ad ticks = cc->cc_ticks; 848 1.56 kre delta = (int)((unsigned)c->c_time - (unsigned)ticks); 849 1.56 kre if (delta > 0) { 850 1.36 ad CIRCQ_INSERT(&c->c_list, BUCKET(cc, delta, c->c_time)); 851 1.36 ad continue; 852 1.36 ad } 853 1.56 kre if (delta < 0) 854 1.36 ad cc->cc_ev_late.ev_count++; 855 1.36 ad 856 1.43 ad c->c_flags = (c->c_flags & ~CALLOUT_PENDING) | 857 1.43 ad (CALLOUT_FIRED | CALLOUT_INVOKING); 858 1.36 ad mpsafe = (c->c_flags & CALLOUT_MPSAFE); 859 1.36 ad func = c->c_func; 860 1.36 ad arg = c->c_arg; 861 1.36 ad cc->cc_active = c; 862 1.72 riastrad flags = c->c_flags; 863 1.36 ad 864 1.44 ad mutex_spin_exit(cc->cc_lock); 865 1.42 rmind KASSERT(func != NULL); 866 1.72 riastrad SDT_PROBE4(sdt, kernel, callout, entry, c, func, arg, flags); 867 1.44 ad if (__predict_false(!mpsafe)) { 868 1.36 ad KERNEL_LOCK(1, NULL); 869 1.36 ad (*func)(arg); 870 1.36 ad KERNEL_UNLOCK_ONE(NULL); 871 1.36 ad } else 872 1.36 ad (*func)(arg); 873 1.72 riastrad SDT_PROBE4(sdt, kernel, callout, return, c, func, arg, flags); 874 1.69 riastrad KASSERTMSG(l->l_blcnt == 0, 875 1.69 riastrad "callout %p func %p leaked %d biglocks", 876 1.69 riastrad c, func, l->l_blcnt); 877 1.44 ad mutex_spin_enter(cc->cc_lock); 878 1.36 ad 879 1.36 ad /* 880 1.36 ad * We can't touch 'c' here because it might be 881 1.36 ad * freed already. If LWPs waiting for callout 882 1.36 ad * to complete, awaken them. 883 1.36 ad */ 884 1.36 ad cc->cc_active = NULL; 885 1.36 ad if ((count = cc->cc_nwait) != 0) { 886 1.36 ad cc->cc_nwait = 0; 887 1.36 ad /* sleepq_wake() drops the lock. */ 888 1.44 ad sleepq_wake(&cc->cc_sleepq, cc, count, cc->cc_lock); 889 1.44 ad mutex_spin_enter(cc->cc_lock); 890 1.1 thorpej } 891 1.1 thorpej } 892 1.36 ad cc->cc_lwp = NULL; 893 1.44 ad mutex_spin_exit(cc->cc_lock); 894 1.1 thorpej } 895 1.63 rin #endif /* !CRASH */ 896 1.1 thorpej 897 1.1 thorpej #ifdef DDB 898 1.1 thorpej static void 899 1.51 christos db_show_callout_bucket(struct callout_cpu *cc, struct callout_circq *kbucket, 900 1.51 christos struct callout_circq *bucket) 901 1.1 thorpej { 902 1.49 christos callout_impl_t *c, ci; 903 1.1 thorpej db_expr_t offset; 904 1.15 christos const char *name; 905 1.15 christos static char question[] = "?"; 906 1.36 ad int b; 907 1.1 thorpej 908 1.51 christos if (CIRCQ_LAST(bucket, kbucket)) 909 1.11 scw return; 910 1.11 scw 911 1.51 christos for (c = CIRCQ_FIRST(bucket); /*nothing*/; c = CIRCQ_NEXT(&c->c_list)) { 912 1.49 christos db_read_bytes((db_addr_t)c, sizeof(ci), (char *)&ci); 913 1.49 christos c = &ci; 914 1.10 scw db_find_sym_and_offset((db_addr_t)(intptr_t)c->c_func, &name, 915 1.10 scw &offset); 916 1.15 christos name = name ? name : question; 917 1.36 ad b = (bucket - cc->cc_wheel); 918 1.36 ad if (b < 0) 919 1.36 ad b = -WHEELSIZE; 920 1.36 ad db_printf("%9d %2d/%-4d %16lx %s\n", 921 1.36 ad c->c_time - cc->cc_ticks, b / WHEELSIZE, b, 922 1.36 ad (u_long)c->c_arg, name); 923 1.51 christos if (CIRCQ_LAST(&c->c_list, kbucket)) 924 1.11 scw break; 925 1.1 thorpej } 926 1.1 thorpej } 927 1.1 thorpej 928 1.1 thorpej void 929 1.21 matt db_show_callout(db_expr_t addr, bool haddr, db_expr_t count, const char *modif) 930 1.1 thorpej { 931 1.64 rin struct callout_cpu *cc; 932 1.64 rin struct cpu_info *ci; 933 1.1 thorpej int b; 934 1.1 thorpej 935 1.49 christos #ifndef CRASH 936 1.60 maxv db_printf("hardclock_ticks now: %d\n", getticks()); 937 1.49 christos #endif 938 1.1 thorpej db_printf(" ticks wheel arg func\n"); 939 1.1 thorpej 940 1.1 thorpej /* 941 1.1 thorpej * Don't lock the callwheel; all the other CPUs are paused 942 1.1 thorpej * anyhow, and we might be called in a circumstance where 943 1.1 thorpej * some other CPU was paused while holding the lock. 944 1.1 thorpej */ 945 1.49 christos for (ci = db_cpu_first(); ci != NULL; ci = db_cpu_next(ci)) { 946 1.66 rin db_read_bytes((db_addr_t)ci + 947 1.66 rin offsetof(struct cpu_info, ci_data.cpu_callout), 948 1.66 rin sizeof(cc), (char *)&cc); 949 1.64 rin db_read_bytes((db_addr_t)cc, sizeof(ccb), (char *)&ccb); 950 1.64 rin db_show_callout_bucket(&ccb, &cc->cc_todo, &ccb.cc_todo); 951 1.36 ad } 952 1.36 ad for (b = 0; b < BUCKETS; b++) { 953 1.49 christos for (ci = db_cpu_first(); ci != NULL; ci = db_cpu_next(ci)) { 954 1.66 rin db_read_bytes((db_addr_t)ci + 955 1.66 rin offsetof(struct cpu_info, ci_data.cpu_callout), 956 1.66 rin sizeof(cc), (char *)&cc); 957 1.64 rin db_read_bytes((db_addr_t)cc, sizeof(ccb), (char *)&ccb); 958 1.64 rin db_show_callout_bucket(&ccb, &cc->cc_wheel[b], 959 1.64 rin &ccb.cc_wheel[b]); 960 1.36 ad } 961 1.36 ad } 962 1.1 thorpej } 963 1.1 thorpej #endif /* DDB */ 964