1 1.10 rillig /* $NetBSD: cyclic.c,v 1.10 2024/09/08 09:36:46 rillig Exp $ */ 2 1.2 darran 3 1.1 darran /* 4 1.1 darran * CDDL HEADER START 5 1.1 darran * 6 1.1 darran * The contents of this file are subject to the terms of the 7 1.1 darran * Common Development and Distribution License, Version 1.0 only 8 1.1 darran * (the "License"). You may not use this file except in compliance 9 1.1 darran * with the License. 10 1.1 darran * 11 1.1 darran * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 12 1.1 darran * or http://www.opensolaris.org/os/licensing. 13 1.1 darran * See the License for the specific language governing permissions 14 1.1 darran * and limitations under the License. 15 1.1 darran * 16 1.1 darran * When distributing Covered Code, include this CDDL HEADER in each 17 1.1 darran * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 18 1.1 darran * If applicable, add the following below this CDDL HEADER, with the 19 1.1 darran * fields enclosed by brackets "[]" replaced with your own identifying 20 1.1 darran * information: Portions Copyright [yyyy] [name of copyright owner] 21 1.1 darran * 22 1.1 darran * CDDL HEADER END 23 1.1 darran * 24 1.1 darran * Portions Copyright 2008 John Birrell <jb (at) freebsd.org> 25 1.1 darran * 26 1.8 chs * $FreeBSD: head/sys/cddl/dev/cyclic/cyclic.c 227293 2011-11-07 06:44:47Z ed $ 27 1.1 darran * 28 1.1 darran * This is a simplified version of the cyclic timer subsystem from 29 1.1 darran * OpenSolaris. In the FreeBSD version, we don't use interrupt levels. 30 1.1 darran */ 31 1.1 darran 32 1.1 darran /* 33 1.1 darran * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 34 1.1 darran * Use is subject to license terms. 35 1.1 darran */ 36 1.1 darran 37 1.1 darran /* 38 1.1 darran * The Cyclic Subsystem 39 1.1 darran * -------------------- 40 1.1 darran * 41 1.1 darran * Prehistory 42 1.1 darran * 43 1.1 darran * Historically, most computer architectures have specified interval-based 44 1.1 darran * timer parts (e.g. SPARCstation's counter/timer; Intel's i8254). While 45 1.1 darran * these parts deal in relative (i.e. not absolute) time values, they are 46 1.1 darran * typically used by the operating system to implement the abstraction of 47 1.1 darran * absolute time. As a result, these parts cannot typically be reprogrammed 48 1.1 darran * without introducing error in the system's notion of time. 49 1.1 darran * 50 1.1 darran * Starting in about 1994, chip architectures began specifying high resolution 51 1.1 darran * timestamp registers. As of this writing (1999), all major chip families 52 1.1 darran * (UltraSPARC, PentiumPro, MIPS, PowerPC, Alpha) have high resolution 53 1.1 darran * timestamp registers, and two (UltraSPARC and MIPS) have added the capacity 54 1.1 darran * to interrupt based on timestamp values. These timestamp-compare registers 55 1.1 darran * present a time-based interrupt source which can be reprogrammed arbitrarily 56 1.1 darran * often without introducing error. Given the low cost of implementing such a 57 1.1 darran * timestamp-compare register (and the tangible benefit of eliminating 58 1.1 darran * discrete timer parts), it is reasonable to expect that future chip 59 1.1 darran * architectures will adopt this feature. 60 1.1 darran * 61 1.1 darran * The cyclic subsystem has been designed to take advantage of chip 62 1.1 darran * architectures with the capacity to interrupt based on absolute, high 63 1.1 darran * resolution values of time. 64 1.1 darran * 65 1.1 darran * Subsystem Overview 66 1.1 darran * 67 1.1 darran * The cyclic subsystem is a low-level kernel subsystem designed to provide 68 1.1 darran * arbitrarily high resolution, per-CPU interval timers (to avoid colliding 69 1.1 darran * with existing terms, we dub such an interval timer a "cyclic"). 70 1.1 darran * Alternatively, a cyclic may be specified to be "omnipresent", denoting 71 1.1 darran * firing on all online CPUs. 72 1.1 darran * 73 1.1 darran * Cyclic Subsystem Interface Overview 74 1.1 darran * ----------------------------------- 75 1.1 darran * 76 1.1 darran * The cyclic subsystem has interfaces with the kernel at-large, with other 77 1.1 darran * kernel subsystems (e.g. the processor management subsystem, the checkpoint 78 1.1 darran * resume subsystem) and with the platform (the cyclic backend). Each 79 1.1 darran * of these interfaces is given a brief synopsis here, and is described 80 1.1 darran * in full above the interface's implementation. 81 1.1 darran * 82 1.1 darran * The following diagram displays the cyclic subsystem's interfaces to 83 1.1 darran * other kernel components. The arrows denote a "calls" relationship, with 84 1.1 darran * the large arrow indicating the cyclic subsystem's consumer interface. 85 1.1 darran * Each arrow is labeled with the section in which the corresponding 86 1.1 darran * interface is described. 87 1.1 darran * 88 1.1 darran * Kernel at-large consumers 89 1.1 darran * -----------++------------ 90 1.1 darran * || 91 1.1 darran * || 92 1.1 darran * _||_ 93 1.1 darran * \ / 94 1.1 darran * \/ 95 1.1 darran * +---------------------+ 96 1.1 darran * | | 97 1.1 darran * | Cyclic subsystem |<----------- Other kernel subsystems 98 1.1 darran * | | 99 1.1 darran * +---------------------+ 100 1.1 darran * ^ | 101 1.1 darran * | | 102 1.1 darran * | | 103 1.1 darran * | v 104 1.1 darran * +---------------------+ 105 1.1 darran * | | 106 1.1 darran * | Cyclic backend | 107 1.1 darran * | (platform specific) | 108 1.1 darran * | | 109 1.1 darran * +---------------------+ 110 1.1 darran * 111 1.1 darran * 112 1.1 darran * Kernel At-Large Interfaces 113 1.1 darran * 114 1.1 darran * cyclic_add() <-- Creates a cyclic 115 1.1 darran * cyclic_add_omni() <-- Creates an omnipresent cyclic 116 1.1 darran * cyclic_remove() <-- Removes a cyclic 117 1.1 darran * 118 1.1 darran * Backend Interfaces 119 1.1 darran * 120 1.1 darran * cyclic_init() <-- Initializes the cyclic subsystem 121 1.1 darran * cyclic_fire() <-- Interrupt entry point 122 1.1 darran * 123 1.1 darran * The backend-supplied interfaces (through the cyc_backend structure) are 124 1.1 darran * documented in detail in <sys/cyclic_impl.h> 125 1.1 darran * 126 1.1 darran * 127 1.1 darran * Cyclic Subsystem Implementation Overview 128 1.1 darran * ---------------------------------------- 129 1.1 darran * 130 1.1 darran * The cyclic subsystem is designed to minimize interference between cyclics 131 1.1 darran * on different CPUs. Thus, all of the cyclic subsystem's data structures 132 1.1 darran * hang off of a per-CPU structure, cyc_cpu. 133 1.1 darran * 134 1.1 darran * Each cyc_cpu has a power-of-two sized array of cyclic structures (the 135 1.1 darran * cyp_cyclics member of the cyc_cpu structure). If cyclic_add() is called 136 1.1 darran * and there does not exist a free slot in the cyp_cyclics array, the size of 137 1.1 darran * the array will be doubled. The array will never shrink. Cyclics are 138 1.1 darran * referred to by their index in the cyp_cyclics array, which is of type 139 1.1 darran * cyc_index_t. 140 1.1 darran * 141 1.1 darran * The cyclics are kept sorted by expiration time in the cyc_cpu's heap. The 142 1.1 darran * heap is keyed by cyclic expiration time, with parents expiring earlier 143 1.1 darran * than their children. 144 1.1 darran * 145 1.1 darran * Heap Management 146 1.1 darran * 147 1.1 darran * The heap is managed primarily by cyclic_fire(). Upon entry, cyclic_fire() 148 1.1 darran * compares the root cyclic's expiration time to the current time. If the 149 1.1 darran * expiration time is in the past, cyclic_expire() is called on the root 150 1.1 darran * cyclic. Upon return from cyclic_expire(), the cyclic's new expiration time 151 1.1 darran * is derived by adding its interval to its old expiration time, and a 152 1.1 darran * downheap operation is performed. After the downheap, cyclic_fire() 153 1.1 darran * examines the (potentially changed) root cyclic, repeating the 154 1.1 darran * cyclic_expire()/add interval/cyclic_downheap() sequence until the root 155 1.1 darran * cyclic has an expiration time in the future. This expiration time 156 1.1 darran * (guaranteed to be the earliest in the heap) is then communicated to the 157 1.1 darran * backend via cyb_reprogram. Optimal backends will next call cyclic_fire() 158 1.1 darran * shortly after the root cyclic's expiration time. 159 1.1 darran * 160 1.1 darran * To allow efficient, deterministic downheap operations, we implement the 161 1.1 darran * heap as an array (the cyp_heap member of the cyc_cpu structure), with each 162 1.1 darran * element containing an index into the CPU's cyp_cyclics array. 163 1.1 darran * 164 1.1 darran * The heap is laid out in the array according to the following: 165 1.1 darran * 166 1.1 darran * 1. The root of the heap is always in the 0th element of the heap array 167 1.1 darran * 2. The left and right children of the nth element are element 168 1.1 darran * (((n + 1) << 1) - 1) and element ((n + 1) << 1), respectively. 169 1.1 darran * 170 1.1 darran * This layout is standard (see, e.g., Cormen's "Algorithms"); the proof 171 1.1 darran * that these constraints correctly lay out a heap (or indeed, any binary 172 1.1 darran * tree) is trivial and left to the reader. 173 1.1 darran * 174 1.1 darran * To see the heap by example, assume our cyclics array has the following 175 1.1 darran * members (at time t): 176 1.1 darran * 177 1.1 darran * cy_handler cy_expire 178 1.1 darran * --------------------------------------------- 179 1.1 darran * [ 0] clock() t+10000000 180 1.1 darran * [ 1] deadman() t+1000000000 181 1.1 darran * [ 2] clock_highres_fire() t+100 182 1.1 darran * [ 3] clock_highres_fire() t+1000 183 1.1 darran * [ 4] clock_highres_fire() t+500 184 1.1 darran * [ 5] (free) -- 185 1.1 darran * [ 6] (free) -- 186 1.1 darran * [ 7] (free) -- 187 1.1 darran * 188 1.1 darran * The heap array could be: 189 1.1 darran * 190 1.1 darran * [0] [1] [2] [3] [4] [5] [6] [7] 191 1.1 darran * +-----+-----+-----+-----+-----+-----+-----+-----+ 192 1.1 darran * | | | | | | | | | 193 1.1 darran * | 2 | 3 | 4 | 0 | 1 | x | x | x | 194 1.1 darran * | | | | | | | | | 195 1.1 darran * +-----+-----+-----+-----+-----+-----+-----+-----+ 196 1.1 darran * 197 1.1 darran * Graphically, this array corresponds to the following (excuse the ASCII art): 198 1.1 darran * 199 1.1 darran * 2 200 1.1 darran * | 201 1.1 darran * +------------------+------------------+ 202 1.1 darran * 3 4 203 1.1 darran * | 204 1.1 darran * +---------+--------+ 205 1.1 darran * 0 1 206 1.1 darran * 207 1.1 darran * Note that the heap is laid out by layer: all nodes at a given depth are 208 1.1 darran * stored in consecutive elements of the array. Moreover, layers of 209 1.1 darran * consecutive depths are in adjacent element ranges. This property 210 1.1 darran * guarantees high locality of reference during downheap operations. 211 1.1 darran * Specifically, we are guaranteed that we can downheap to a depth of 212 1.1 darran * 213 1.1 darran * lg (cache_line_size / sizeof (cyc_index_t)) 214 1.1 darran * 215 1.1 darran * nodes with at most one cache miss. On UltraSPARC (64 byte e-cache line 216 1.1 darran * size), this corresponds to a depth of four nodes. Thus, if there are 217 1.1 darran * fewer than sixteen cyclics in the heap, downheaps on UltraSPARC miss at 218 1.1 darran * most once in the e-cache. 219 1.1 darran * 220 1.1 darran * Downheaps are required to compare siblings as they proceed down the 221 1.1 darran * heap. For downheaps proceeding beyond the one-cache-miss depth, every 222 1.1 darran * access to a left child could potentially miss in the cache. However, 223 1.1 darran * if we assume 224 1.1 darran * 225 1.1 darran * (cache_line_size / sizeof (cyc_index_t)) > 2, 226 1.1 darran * 227 1.1 darran * then all siblings are guaranteed to be on the same cache line. Thus, the 228 1.1 darran * miss on the left child will guarantee a hit on the right child; downheaps 229 1.1 darran * will incur at most one cache miss per layer beyond the one-cache-miss 230 1.1 darran * depth. The total number of cache misses for heap management during a 231 1.1 darran * downheap operation is thus bounded by 232 1.1 darran * 233 1.1 darran * lg (n) - lg (cache_line_size / sizeof (cyc_index_t)) 234 1.1 darran * 235 1.1 darran * Traditional pointer-based heaps are implemented without regard to 236 1.1 darran * locality. Downheaps can thus incur two cache misses per layer (one for 237 1.1 darran * each child), but at most one cache miss at the root. This yields a bound 238 1.1 darran * of 239 1.1 darran * 240 1.1 darran * 2 * lg (n) - 1 241 1.1 darran * 242 1.1 darran * on the total cache misses. 243 1.1 darran * 244 1.1 darran * This difference may seem theoretically trivial (the difference is, after 245 1.1 darran * all, constant), but can become substantial in practice -- especially for 246 1.1 darran * caches with very large cache lines and high miss penalties (e.g. TLBs). 247 1.1 darran * 248 1.1 darran * Heaps must always be full, balanced trees. Heap management must therefore 249 1.1 darran * track the next point-of-insertion into the heap. In pointer-based heaps, 250 1.1 darran * recomputing this point takes O(lg (n)). Given the layout of the 251 1.1 darran * array-based implementation, however, the next point-of-insertion is 252 1.1 darran * always: 253 1.1 darran * 254 1.1 darran * heap[number_of_elements] 255 1.1 darran * 256 1.1 darran * We exploit this property by implementing the free-list in the usused 257 1.1 darran * heap elements. Heap insertion, therefore, consists only of filling in 258 1.1 darran * the cyclic at cyp_cyclics[cyp_heap[number_of_elements]], incrementing 259 1.1 darran * the number of elements, and performing an upheap. Heap deletion consists 260 1.1 darran * of decrementing the number of elements, swapping the to-be-deleted element 261 1.1 darran * with the element at cyp_heap[number_of_elements], and downheaping. 262 1.1 darran * 263 1.1 darran * Filling in more details in our earlier example: 264 1.1 darran * 265 1.1 darran * +--- free list head 266 1.1 darran * | 267 1.1 darran * V 268 1.1 darran * 269 1.1 darran * [0] [1] [2] [3] [4] [5] [6] [7] 270 1.1 darran * +-----+-----+-----+-----+-----+-----+-----+-----+ 271 1.1 darran * | | | | | | | | | 272 1.1 darran * | 2 | 3 | 4 | 0 | 1 | 5 | 6 | 7 | 273 1.1 darran * | | | | | | | | | 274 1.1 darran * +-----+-----+-----+-----+-----+-----+-----+-----+ 275 1.1 darran * 276 1.1 darran * To insert into this heap, we would just need to fill in the cyclic at 277 1.1 darran * cyp_cyclics[5], bump the number of elements (from 5 to 6) and perform 278 1.1 darran * an upheap. 279 1.1 darran * 280 1.1 darran * If we wanted to remove, say, cyp_cyclics[3], we would first scan for it 281 1.1 darran * in the cyp_heap, and discover it at cyp_heap[1]. We would then decrement 282 1.1 darran * the number of elements (from 5 to 4), swap cyp_heap[1] with cyp_heap[4], 283 1.1 darran * and perform a downheap from cyp_heap[1]. The linear scan is required 284 1.1 darran * because the cyclic does not keep a backpointer into the heap. This makes 285 1.1 darran * heap manipulation (e.g. downheaps) faster at the expense of removal 286 1.1 darran * operations. 287 1.1 darran * 288 1.1 darran * Expiry processing 289 1.1 darran * 290 1.1 darran * As alluded to above, cyclic_expire() is called by cyclic_fire() to expire 291 1.1 darran * a cyclic. Cyclic subsystem consumers are guaranteed that for an arbitrary 292 1.1 darran * time t in the future, their cyclic handler will have been called 293 1.1 darran * (t - cyt_when) / cyt_interval times. cyclic_expire() simply needs to call 294 1.1 darran * the handler. 295 1.1 darran * 296 1.1 darran * Resizing 297 1.1 darran * 298 1.1 darran * All of the discussion thus far has assumed a static number of cyclics. 299 1.1 darran * Obviously, static limitations are not practical; we need the capacity 300 1.1 darran * to resize our data structures dynamically. 301 1.1 darran * 302 1.1 darran * We resize our data structures lazily, and only on a per-CPU basis. 303 1.1 darran * The size of the data structures always doubles and never shrinks. We 304 1.1 darran * serialize adds (and thus resizes) on cpu_lock; we never need to deal 305 1.1 darran * with concurrent resizes. Resizes should be rare; they may induce jitter 306 1.1 darran * on the CPU being resized, but should not affect cyclic operation on other 307 1.1 darran * CPUs. 308 1.1 darran * 309 1.1 darran * Three key cyc_cpu data structures need to be resized: the cyclics array, 310 1.1 darran * nad the heap array. Resizing is relatively straightforward: 311 1.1 darran * 312 1.1 darran * 1. The new, larger arrays are allocated in cyclic_expand() (called 313 1.1 darran * from cyclic_add()). 314 1.1 darran * 2. The contents of the old arrays are copied into the new arrays. 315 1.1 darran * 3. The old cyclics array is bzero()'d 316 1.1 darran * 4. The pointers are updated. 317 1.1 darran * 318 1.1 darran * Removals 319 1.1 darran * 320 1.1 darran * Cyclic removals should be rare. To simplify the implementation (and to 321 1.1 darran * allow optimization for the cyclic_fire()/cyclic_expire() 322 1.1 darran * path), we force removals and adds to serialize on cpu_lock. 323 1.1 darran * 324 1.1 darran */ 325 1.1 darran #include <sys/cdefs.h> 326 1.1 darran #include <sys/param.h> 327 1.1 darran #include <sys/conf.h> 328 1.1 darran #include <sys/kernel.h> 329 1.4 chs #ifdef __FreeBSD___ 330 1.1 darran #include <sys/lock.h> 331 1.1 darran #include <sys/sx.h> 332 1.4 chs #endif 333 1.1 darran #include <sys/cyclic_impl.h> 334 1.1 darran #include <sys/module.h> 335 1.1 darran #include <sys/systm.h> 336 1.1 darran #include <sys/atomic.h> 337 1.1 darran #include <sys/kmem.h> 338 1.1 darran #include <sys/cmn_err.h> 339 1.7 chs #include <sys/dtrace_bsd.h> 340 1.4 chs #ifdef __FreeBSD__ 341 1.1 darran #include <machine/cpu.h> 342 1.4 chs #endif 343 1.4 chs 344 1.4 chs #ifdef __NetBSD__ 345 1.4 chs #include <sys/cpu.h> 346 1.4 chs #include <sys/malloc.h> 347 1.4 chs #include <sys/xcall.h> 348 1.4 chs 349 1.4 chs #undef mutex_init 350 1.9 ad #define mtx_init(m, d, p, f) mutex_init(m, MUTEX_DEFAULT, IPL_HIGH) 351 1.4 chs #define mtx_lock_spin(x) mutex_spin_enter(x) 352 1.4 chs #define mtx_unlock_spin(x) mutex_spin_exit(x) 353 1.4 chs #define mtx_destroy(x) mutex_destroy(x) 354 1.4 chs 355 1.4 chs #define SYSINIT(a1, a2, a3, a4, a5) 356 1.4 chs #define SYSUNINIT(a1, a2, a3, a4, a5) 357 1.4 chs #define CPU_FOREACH(var) \ 358 1.4 chs CPU_INFO_ITERATOR cii; \ 359 1.4 chs struct cpu_info *ci; \ 360 1.4 chs for (CPU_INFO_FOREACH(cii, ci)) 361 1.4 chs #define MAXCPU MAXCPUS 362 1.4 chs #define TRAPF_USERMODE(x) CLKF_USERMODE(x) 363 1.4 chs #define TRAPF_PC(x) CLKF_PC(x) 364 1.4 chs #endif 365 1.1 darran 366 1.1 darran static kmem_cache_t *cyclic_id_cache; 367 1.1 darran static cyc_id_t *cyclic_id_head; 368 1.1 darran static cyc_backend_t cyclic_backend; 369 1.1 darran 370 1.1 darran MALLOC_DEFINE(M_CYCLIC, "cyclic", "Cyclic timer subsystem"); 371 1.1 darran 372 1.1 darran /* 373 1.1 darran * Returns 1 if the upheap propagated to the root, 0 if it did not. This 374 1.1 darran * allows the caller to reprogram the backend only when the root has been 375 1.1 darran * modified. 376 1.1 darran */ 377 1.1 darran static int 378 1.1 darran cyclic_upheap(cyc_cpu_t *cpu, cyc_index_t ndx) 379 1.1 darran { 380 1.1 darran cyclic_t *cyclics; 381 1.1 darran cyc_index_t *heap; 382 1.1 darran cyc_index_t heap_parent, heap_current = ndx; 383 1.1 darran cyc_index_t parent, current; 384 1.1 darran 385 1.1 darran if (heap_current == 0) 386 1.1 darran return (1); 387 1.1 darran 388 1.1 darran heap = cpu->cyp_heap; 389 1.1 darran cyclics = cpu->cyp_cyclics; 390 1.1 darran heap_parent = CYC_HEAP_PARENT(heap_current); 391 1.1 darran 392 1.1 darran for (;;) { 393 1.1 darran current = heap[heap_current]; 394 1.1 darran parent = heap[heap_parent]; 395 1.1 darran 396 1.1 darran /* 397 1.1 darran * We have an expiration time later than our parent; we're 398 1.1 darran * done. 399 1.1 darran */ 400 1.1 darran if (cyclics[current].cy_expire >= cyclics[parent].cy_expire) 401 1.1 darran return (0); 402 1.1 darran 403 1.1 darran /* 404 1.1 darran * We need to swap with our parent, and continue up the heap. 405 1.1 darran */ 406 1.1 darran heap[heap_parent] = current; 407 1.1 darran heap[heap_current] = parent; 408 1.1 darran 409 1.1 darran /* 410 1.1 darran * If we just reached the root, we're done. 411 1.1 darran */ 412 1.1 darran if (heap_parent == 0) 413 1.1 darran return (1); 414 1.1 darran 415 1.1 darran heap_current = heap_parent; 416 1.1 darran heap_parent = CYC_HEAP_PARENT(heap_current); 417 1.1 darran } 418 1.1 darran } 419 1.1 darran 420 1.1 darran static void 421 1.1 darran cyclic_downheap(cyc_cpu_t *cpu, cyc_index_t ndx) 422 1.1 darran { 423 1.1 darran cyclic_t *cyclics = cpu->cyp_cyclics; 424 1.1 darran cyc_index_t *heap = cpu->cyp_heap; 425 1.1 darran 426 1.1 darran cyc_index_t heap_left, heap_right, heap_me = ndx; 427 1.1 darran cyc_index_t left, right, me; 428 1.1 darran cyc_index_t nelems = cpu->cyp_nelems; 429 1.1 darran 430 1.1 darran for (;;) { 431 1.1 darran /* 432 1.1 darran * If we don't have a left child (i.e., we're a leaf), we're 433 1.1 darran * done. 434 1.1 darran */ 435 1.1 darran if ((heap_left = CYC_HEAP_LEFT(heap_me)) >= nelems) 436 1.1 darran return; 437 1.1 darran 438 1.1 darran left = heap[heap_left]; 439 1.1 darran me = heap[heap_me]; 440 1.1 darran 441 1.1 darran heap_right = CYC_HEAP_RIGHT(heap_me); 442 1.1 darran 443 1.1 darran /* 444 1.1 darran * Even if we don't have a right child, we still need to compare 445 1.1 darran * our expiration time against that of our left child. 446 1.1 darran */ 447 1.1 darran if (heap_right >= nelems) 448 1.1 darran goto comp_left; 449 1.1 darran 450 1.1 darran right = heap[heap_right]; 451 1.1 darran 452 1.1 darran /* 453 1.1 darran * We have both a left and a right child. We need to compare 454 1.1 darran * the expiration times of the children to determine which 455 1.1 darran * expires earlier. 456 1.1 darran */ 457 1.1 darran if (cyclics[right].cy_expire < cyclics[left].cy_expire) { 458 1.1 darran /* 459 1.1 darran * Our right child is the earlier of our children. 460 1.1 darran * We'll now compare our expiration time to its; if 461 1.1 darran * ours is the earlier, we're done. 462 1.1 darran */ 463 1.1 darran if (cyclics[me].cy_expire <= cyclics[right].cy_expire) 464 1.1 darran return; 465 1.1 darran 466 1.1 darran /* 467 1.1 darran * Our right child expires earlier than we do; swap 468 1.1 darran * with our right child, and descend right. 469 1.1 darran */ 470 1.1 darran heap[heap_right] = me; 471 1.1 darran heap[heap_me] = right; 472 1.1 darran heap_me = heap_right; 473 1.1 darran continue; 474 1.1 darran } 475 1.1 darran 476 1.1 darran comp_left: 477 1.1 darran /* 478 1.1 darran * Our left child is the earlier of our children (or we have 479 1.1 darran * no right child). We'll now compare our expiration time 480 1.1 darran * to its; if ours is the earlier, we're done. 481 1.1 darran */ 482 1.1 darran if (cyclics[me].cy_expire <= cyclics[left].cy_expire) 483 1.1 darran return; 484 1.1 darran 485 1.1 darran /* 486 1.1 darran * Our left child expires earlier than we do; swap with our 487 1.1 darran * left child, and descend left. 488 1.1 darran */ 489 1.1 darran heap[heap_left] = me; 490 1.1 darran heap[heap_me] = left; 491 1.1 darran heap_me = heap_left; 492 1.1 darran } 493 1.1 darran } 494 1.1 darran 495 1.1 darran static void 496 1.1 darran cyclic_expire(cyc_cpu_t *cpu, cyc_index_t ndx, cyclic_t *cyclic) 497 1.1 darran { 498 1.1 darran cyc_func_t handler = cyclic->cy_handler; 499 1.1 darran void *arg = cyclic->cy_arg; 500 1.1 darran 501 1.1 darran (*handler)(arg); 502 1.1 darran } 503 1.1 darran 504 1.1 darran /* 505 1.1 darran * cyclic_fire(cpu_t *) 506 1.1 darran * 507 1.1 darran * Overview 508 1.1 darran * 509 1.1 darran * cyclic_fire() is the cyclic subsystem's interrupt handler. 510 1.1 darran * Called by the cyclic backend. 511 1.1 darran * 512 1.1 darran * Arguments and notes 513 1.1 darran * 514 1.1 darran * The only argument is the CPU on which the interrupt is executing; 515 1.1 darran * backends must call into cyclic_fire() on the specified CPU. 516 1.1 darran * 517 1.1 darran * cyclic_fire() may be called spuriously without ill effect. Optimal 518 1.1 darran * backends will call into cyclic_fire() at or shortly after the time 519 1.1 darran * requested via cyb_reprogram(). However, calling cyclic_fire() 520 1.1 darran * arbitrarily late will only manifest latency bubbles; the correctness 521 1.1 darran * of the cyclic subsystem does not rely on the timeliness of the backend. 522 1.1 darran * 523 1.1 darran * cyclic_fire() is wait-free; it will not block or spin. 524 1.1 darran * 525 1.1 darran * Return values 526 1.1 darran * 527 1.1 darran * None. 528 1.1 darran * 529 1.1 darran */ 530 1.1 darran static void 531 1.1 darran cyclic_fire(cpu_t *c) 532 1.1 darran { 533 1.1 darran cyc_cpu_t *cpu = c->cpu_cyclic; 534 1.3 chs cyc_backend_t *be = cpu->cyp_backend; 535 1.1 darran cyc_index_t *heap = cpu->cyp_heap; 536 1.1 darran cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics; 537 1.3 chs void *arg = be->cyb_arg; 538 1.1 darran hrtime_t now = gethrtime(); 539 1.1 darran hrtime_t exp; 540 1.1 darran 541 1.1 darran if (cpu->cyp_nelems == 0) { 542 1.1 darran /* This is a spurious fire. */ 543 1.1 darran return; 544 1.1 darran } 545 1.1 darran 546 1.1 darran for (;;) { 547 1.1 darran cyc_index_t ndx = heap[0]; 548 1.1 darran 549 1.1 darran cyclic = &cyclics[ndx]; 550 1.1 darran 551 1.1 darran ASSERT(!(cyclic->cy_flags & CYF_FREE)); 552 1.1 darran 553 1.1 darran if ((exp = cyclic->cy_expire) > now) 554 1.1 darran break; 555 1.1 darran 556 1.1 darran cyclic_expire(cpu, ndx, cyclic); 557 1.1 darran 558 1.1 darran /* 559 1.1 darran * If this cyclic will be set to next expire in the distant 560 1.1 darran * past, we have one of two situations: 561 1.1 darran * 562 1.1 darran * a) This is the first firing of a cyclic which had 563 1.1 darran * cy_expire set to 0. 564 1.1 darran * 565 1.1 darran * b) We are tragically late for a cyclic -- most likely 566 1.1 darran * due to being in the debugger. 567 1.1 darran * 568 1.1 darran * In either case, we set the new expiration time to be the 569 1.1 darran * the next interval boundary. This assures that the 570 1.1 darran * expiration time modulo the interval is invariant. 571 1.1 darran * 572 1.1 darran * We arbitrarily define "distant" to be one second (one second 573 1.1 darran * is chosen because it's shorter than any foray to the 574 1.1 darran * debugger while still being longer than any legitimate 575 1.1 darran * stretch). 576 1.1 darran */ 577 1.1 darran exp += cyclic->cy_interval; 578 1.1 darran 579 1.1 darran if (now - exp > NANOSEC) { 580 1.1 darran hrtime_t interval = cyclic->cy_interval; 581 1.1 darran 582 1.1 darran exp += ((now - exp) / interval + 1) * interval; 583 1.1 darran } 584 1.1 darran 585 1.1 darran cyclic->cy_expire = exp; 586 1.1 darran cyclic_downheap(cpu, 0); 587 1.1 darran } 588 1.1 darran 589 1.1 darran /* 590 1.1 darran * Now we have a cyclic in the root slot which isn't in the past; 591 1.1 darran * reprogram the interrupt source. 592 1.1 darran */ 593 1.3 chs be->cyb_reprogram(arg, exp); 594 1.3 chs } 595 1.3 chs 596 1.3 chs static void 597 1.3 chs cyclic_expand_xcall(cyc_xcallarg_t *arg) 598 1.3 chs { 599 1.3 chs cyc_cpu_t *cpu = arg->cyx_cpu; 600 1.3 chs cyc_index_t new_size = arg->cyx_size, size = cpu->cyp_size, i; 601 1.3 chs cyc_index_t *new_heap = arg->cyx_heap; 602 1.3 chs cyclic_t *cyclics = cpu->cyp_cyclics, *new_cyclics = arg->cyx_cyclics; 603 1.3 chs 604 1.3 chs /* Disable preemption and interrupts. */ 605 1.3 chs mtx_lock_spin(&cpu->cyp_mtx); 606 1.3 chs 607 1.3 chs /* 608 1.3 chs * Assert that the new size is a power of 2. 609 1.3 chs */ 610 1.3 chs ASSERT((new_size & (new_size - 1)) == 0); 611 1.3 chs ASSERT(new_size == (size << 1)); 612 1.3 chs ASSERT(cpu->cyp_heap != NULL && cpu->cyp_cyclics != NULL); 613 1.3 chs 614 1.3 chs bcopy(cpu->cyp_heap, new_heap, sizeof (cyc_index_t) * size); 615 1.3 chs bcopy(cyclics, new_cyclics, sizeof (cyclic_t) * size); 616 1.3 chs 617 1.3 chs /* 618 1.3 chs * Set up the free list, and set all of the new cyclics to be CYF_FREE. 619 1.3 chs */ 620 1.3 chs for (i = size; i < new_size; i++) { 621 1.3 chs new_heap[i] = i; 622 1.3 chs new_cyclics[i].cy_flags = CYF_FREE; 623 1.3 chs } 624 1.1 darran 625 1.3 chs /* 626 1.3 chs * We can go ahead and plow the value of cyp_heap and cyp_cyclics; 627 1.3 chs * cyclic_expand() has kept a copy. 628 1.3 chs */ 629 1.3 chs cpu->cyp_heap = new_heap; 630 1.3 chs cpu->cyp_cyclics = new_cyclics; 631 1.3 chs cpu->cyp_size = new_size; 632 1.1 darran mtx_unlock_spin(&cpu->cyp_mtx); 633 1.1 darran } 634 1.1 darran 635 1.1 darran /* 636 1.1 darran * cyclic_expand() will cross call onto the CPU to perform the actual 637 1.1 darran * expand operation. 638 1.1 darran */ 639 1.1 darran static void 640 1.1 darran cyclic_expand(cyc_cpu_t *cpu) 641 1.1 darran { 642 1.3 chs cyc_index_t new_size, old_size; 643 1.1 darran cyc_index_t *new_heap, *old_heap; 644 1.1 darran cyclic_t *new_cyclics, *old_cyclics; 645 1.3 chs cyc_xcallarg_t arg; 646 1.3 chs cyc_backend_t *be = cpu->cyp_backend; 647 1.1 darran 648 1.1 darran ASSERT(MUTEX_HELD(&cpu_lock)); 649 1.1 darran 650 1.3 chs old_heap = cpu->cyp_heap; 651 1.3 chs old_cyclics = cpu->cyp_cyclics; 652 1.3 chs 653 1.3 chs if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) { 654 1.1 darran new_size = CY_DEFAULT_PERCPU; 655 1.3 chs ASSERT(old_heap == NULL && old_cyclics == NULL); 656 1.3 chs } 657 1.1 darran 658 1.1 darran /* 659 1.1 darran * Check that the new_size is a power of 2. 660 1.1 darran */ 661 1.1 darran ASSERT(((new_size - 1) & new_size) == 0); 662 1.1 darran 663 1.1 darran new_heap = malloc(sizeof(cyc_index_t) * new_size, M_CYCLIC, M_WAITOK); 664 1.1 darran new_cyclics = malloc(sizeof(cyclic_t) * new_size, M_CYCLIC, M_ZERO | M_WAITOK); 665 1.1 darran 666 1.3 chs arg.cyx_cpu = cpu; 667 1.3 chs arg.cyx_heap = new_heap; 668 1.3 chs arg.cyx_cyclics = new_cyclics; 669 1.3 chs arg.cyx_size = new_size; 670 1.1 darran 671 1.3 chs be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, 672 1.3 chs (cyc_func_t)cyclic_expand_xcall, &arg); 673 1.1 darran 674 1.1 darran if (old_cyclics != NULL) { 675 1.1 darran ASSERT(old_heap != NULL); 676 1.1 darran ASSERT(old_size != 0); 677 1.1 darran free(old_cyclics, M_CYCLIC); 678 1.1 darran free(old_heap, M_CYCLIC); 679 1.1 darran } 680 1.1 darran } 681 1.1 darran 682 1.3 chs static void 683 1.3 chs cyclic_add_xcall(cyc_xcallarg_t *arg) 684 1.1 darran { 685 1.3 chs cyc_cpu_t *cpu = arg->cyx_cpu; 686 1.3 chs cyc_handler_t *hdlr = arg->cyx_hdlr; 687 1.3 chs cyc_time_t *when = arg->cyx_when; 688 1.3 chs cyc_backend_t *be = cpu->cyp_backend; 689 1.1 darran cyc_index_t ndx, nelems; 690 1.3 chs cyb_arg_t bar = be->cyb_arg; 691 1.1 darran cyclic_t *cyclic; 692 1.1 darran 693 1.3 chs ASSERT(cpu->cyp_nelems < cpu->cyp_size); 694 1.1 darran 695 1.3 chs /* Disable preemption and interrupts. */ 696 1.1 darran mtx_lock_spin(&cpu->cyp_mtx); 697 1.1 darran nelems = cpu->cyp_nelems++; 698 1.1 darran 699 1.3 chs if (nelems == 0) { 700 1.1 darran /* 701 1.1 darran * If this is the first element, we need to enable the 702 1.1 darran * backend on this CPU. 703 1.1 darran */ 704 1.3 chs be->cyb_enable(bar); 705 1.3 chs } 706 1.1 darran 707 1.1 darran ndx = cpu->cyp_heap[nelems]; 708 1.1 darran cyclic = &cpu->cyp_cyclics[ndx]; 709 1.1 darran 710 1.1 darran ASSERT(cyclic->cy_flags == CYF_FREE); 711 1.1 darran cyclic->cy_interval = when->cyt_interval; 712 1.1 darran 713 1.3 chs if (when->cyt_when == 0) { 714 1.3 chs /* 715 1.3 chs * If a start time hasn't been explicitly specified, we'll 716 1.3 chs * start on the next interval boundary. 717 1.3 chs */ 718 1.3 chs cyclic->cy_expire = (gethrtime() / cyclic->cy_interval + 1) * 719 1.3 chs cyclic->cy_interval; 720 1.3 chs } else { 721 1.1 darran cyclic->cy_expire = when->cyt_when; 722 1.3 chs } 723 1.1 darran 724 1.1 darran cyclic->cy_handler = hdlr->cyh_func; 725 1.1 darran cyclic->cy_arg = hdlr->cyh_arg; 726 1.3 chs cyclic->cy_flags = arg->cyx_flags; 727 1.1 darran 728 1.1 darran if (cyclic_upheap(cpu, nelems)) { 729 1.1 darran hrtime_t exp = cyclic->cy_expire; 730 1.1 darran 731 1.1 darran /* 732 1.1 darran * If our upheap propagated to the root, we need to 733 1.1 darran * reprogram the interrupt source. 734 1.1 darran */ 735 1.3 chs be->cyb_reprogram(bar, exp); 736 1.1 darran } 737 1.1 darran mtx_unlock_spin(&cpu->cyp_mtx); 738 1.1 darran 739 1.3 chs arg->cyx_ndx = ndx; 740 1.1 darran } 741 1.1 darran 742 1.3 chs static cyc_index_t 743 1.3 chs cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr, 744 1.3 chs cyc_time_t *when, uint16_t flags) 745 1.1 darran { 746 1.3 chs cyc_backend_t *be = cpu->cyp_backend; 747 1.3 chs cyb_arg_t bar = be->cyb_arg; 748 1.3 chs cyc_xcallarg_t arg; 749 1.1 darran 750 1.1 darran ASSERT(MUTEX_HELD(&cpu_lock)); 751 1.3 chs ASSERT(!(cpu->cyp_cpu->cpu_flags & CPU_OFFLINE)); 752 1.3 chs ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0); 753 1.3 chs 754 1.3 chs if (cpu->cyp_nelems == cpu->cyp_size) { 755 1.3 chs /* 756 1.3 chs * This is expensive; it will cross call onto the other 757 1.3 chs * CPU to perform the expansion. 758 1.3 chs */ 759 1.3 chs cyclic_expand(cpu); 760 1.3 chs ASSERT(cpu->cyp_nelems < cpu->cyp_size); 761 1.3 chs } 762 1.3 chs 763 1.3 chs /* 764 1.3 chs * By now, we know that we're going to be able to successfully 765 1.3 chs * perform the add. Now cross call over to the CPU of interest to 766 1.3 chs * actually add our cyclic. 767 1.3 chs */ 768 1.3 chs arg.cyx_cpu = cpu; 769 1.3 chs arg.cyx_hdlr = hdlr; 770 1.3 chs arg.cyx_when = when; 771 1.3 chs arg.cyx_flags = flags; 772 1.3 chs 773 1.3 chs be->cyb_xcall(bar, cpu->cyp_cpu, (cyc_func_t)cyclic_add_xcall, &arg); 774 1.1 darran 775 1.3 chs return (arg.cyx_ndx); 776 1.3 chs } 777 1.1 darran 778 1.3 chs static void 779 1.3 chs cyclic_remove_xcall(cyc_xcallarg_t *arg) 780 1.3 chs { 781 1.3 chs cyc_cpu_t *cpu = arg->cyx_cpu; 782 1.3 chs cyc_backend_t *be = cpu->cyp_backend; 783 1.3 chs cyb_arg_t bar = be->cyb_arg; 784 1.3 chs cyc_index_t ndx = arg->cyx_ndx, nelems = cpu->cyp_nelems, i; 785 1.3 chs cyc_index_t *heap = cpu->cyp_heap, last; 786 1.3 chs cyclic_t *cyclic; 787 1.1 darran 788 1.3 chs ASSERT(nelems > 0); 789 1.1 darran 790 1.3 chs /* Disable preemption and interrupts. */ 791 1.3 chs mtx_lock_spin(&cpu->cyp_mtx); 792 1.1 darran cyclic = &cpu->cyp_cyclics[ndx]; 793 1.1 darran 794 1.1 darran /* 795 1.1 darran * Grab the current expiration time. If this cyclic is being 796 1.1 darran * removed as part of a juggling operation, the expiration time 797 1.1 darran * will be used when the cyclic is added to the new CPU. 798 1.1 darran */ 799 1.3 chs if (arg->cyx_when != NULL) { 800 1.3 chs arg->cyx_when->cyt_when = cyclic->cy_expire; 801 1.3 chs arg->cyx_when->cyt_interval = cyclic->cy_interval; 802 1.1 darran } 803 1.1 darran 804 1.3 chs /* 805 1.3 chs * Now set the flags to CYF_FREE. We don't need a membar_enter() 806 1.3 chs * between zeroing pend and setting the flags because we're at 807 1.3 chs * CY_HIGH_LEVEL (that is, the zeroing of pend and the setting 808 1.3 chs * of cy_flags appear atomic to softints). 809 1.3 chs */ 810 1.1 darran cyclic->cy_flags = CYF_FREE; 811 1.1 darran 812 1.1 darran for (i = 0; i < nelems; i++) { 813 1.1 darran if (heap[i] == ndx) 814 1.1 darran break; 815 1.1 darran } 816 1.1 darran 817 1.1 darran if (i == nelems) 818 1.1 darran panic("attempt to remove non-existent cyclic"); 819 1.1 darran 820 1.1 darran cpu->cyp_nelems = --nelems; 821 1.1 darran 822 1.3 chs if (nelems == 0) { 823 1.1 darran /* 824 1.1 darran * If we just removed the last element, then we need to 825 1.1 darran * disable the backend on this CPU. 826 1.1 darran */ 827 1.3 chs be->cyb_disable(bar); 828 1.3 chs } 829 1.1 darran 830 1.3 chs if (i == nelems) { 831 1.1 darran /* 832 1.1 darran * If we just removed the last element of the heap, then 833 1.1 darran * we don't have to downheap. 834 1.1 darran */ 835 1.3 chs goto out; 836 1.3 chs } 837 1.1 darran 838 1.1 darran /* 839 1.1 darran * Swap the last element of the heap with the one we want to 840 1.1 darran * remove, and downheap (this has the implicit effect of putting 841 1.1 darran * the newly freed element on the free list). 842 1.1 darran */ 843 1.1 darran heap[i] = (last = heap[nelems]); 844 1.1 darran heap[nelems] = ndx; 845 1.1 darran 846 1.3 chs if (i == 0) { 847 1.1 darran cyclic_downheap(cpu, 0); 848 1.3 chs } else { 849 1.1 darran if (cyclic_upheap(cpu, i) == 0) { 850 1.1 darran /* 851 1.1 darran * The upheap didn't propagate to the root; if it 852 1.1 darran * didn't propagate at all, we need to downheap. 853 1.1 darran */ 854 1.3 chs if (heap[i] == last) { 855 1.1 darran cyclic_downheap(cpu, i); 856 1.3 chs } 857 1.3 chs goto out; 858 1.1 darran } 859 1.1 darran } 860 1.1 darran 861 1.1 darran /* 862 1.1 darran * We're here because we changed the root; we need to reprogram 863 1.1 darran * the clock source. 864 1.1 darran */ 865 1.1 darran cyclic = &cpu->cyp_cyclics[heap[0]]; 866 1.1 darran 867 1.1 darran ASSERT(nelems != 0); 868 1.3 chs be->cyb_reprogram(bar, cyclic->cy_expire); 869 1.3 chs out: 870 1.3 chs mtx_unlock_spin(&cpu->cyp_mtx); 871 1.3 chs } 872 1.3 chs 873 1.3 chs static int 874 1.3 chs cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait) 875 1.3 chs { 876 1.3 chs cyc_backend_t *be = cpu->cyp_backend; 877 1.3 chs cyc_xcallarg_t arg; 878 1.3 chs 879 1.3 chs ASSERT(MUTEX_HELD(&cpu_lock)); 880 1.3 chs ASSERT(wait == CY_WAIT || wait == CY_NOWAIT); 881 1.3 chs 882 1.3 chs arg.cyx_ndx = ndx; 883 1.3 chs arg.cyx_cpu = cpu; 884 1.3 chs arg.cyx_when = when; 885 1.3 chs arg.cyx_wait = wait; 886 1.1 darran 887 1.3 chs be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, 888 1.3 chs (cyc_func_t)cyclic_remove_xcall, &arg); 889 1.1 darran 890 1.1 darran return (1); 891 1.1 darran } 892 1.1 darran 893 1.1 darran static void 894 1.1 darran cyclic_configure(cpu_t *c) 895 1.1 darran { 896 1.1 darran cyc_cpu_t *cpu = malloc(sizeof(cyc_cpu_t), M_CYCLIC, M_ZERO | M_WAITOK); 897 1.1 darran cyc_backend_t *nbe = malloc(sizeof(cyc_backend_t), M_CYCLIC, M_ZERO | M_WAITOK); 898 1.1 darran 899 1.1 darran ASSERT(MUTEX_HELD(&cpu_lock)); 900 1.1 darran 901 1.1 darran if (cyclic_id_cache == NULL) 902 1.4 chs cyclic_id_cache = kmem_cache_create(__UNCONST("cyclic_id_cache"), 903 1.1 darran sizeof (cyc_id_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 904 1.1 darran 905 1.1 darran cpu->cyp_cpu = c; 906 1.1 darran 907 1.1 darran cpu->cyp_size = 1; 908 1.1 darran cpu->cyp_heap = malloc(sizeof(cyc_index_t), M_CYCLIC, M_ZERO | M_WAITOK); 909 1.1 darran cpu->cyp_cyclics = malloc(sizeof(cyclic_t), M_CYCLIC, M_ZERO | M_WAITOK); 910 1.1 darran cpu->cyp_cyclics->cy_flags = CYF_FREE; 911 1.1 darran 912 1.1 darran mtx_init(&cpu->cyp_mtx, "cyclic cpu", NULL, MTX_SPIN); 913 1.1 darran 914 1.1 darran /* 915 1.1 darran * Setup the backend for this CPU. 916 1.1 darran */ 917 1.1 darran bcopy(&cyclic_backend, nbe, sizeof (cyc_backend_t)); 918 1.1 darran if (nbe->cyb_configure != NULL) 919 1.1 darran nbe->cyb_arg = nbe->cyb_configure(c); 920 1.1 darran cpu->cyp_backend = nbe; 921 1.1 darran 922 1.1 darran /* 923 1.1 darran * On platforms where stray interrupts may be taken during startup, 924 1.1 darran * the CPU's cpu_cyclic pointer serves as an indicator that the 925 1.1 darran * cyclic subsystem for this CPU is prepared to field interrupts. 926 1.1 darran */ 927 1.1 darran membar_producer(); 928 1.1 darran 929 1.1 darran c->cpu_cyclic = cpu; 930 1.1 darran } 931 1.1 darran 932 1.1 darran static void 933 1.1 darran cyclic_unconfigure(cpu_t *c) 934 1.1 darran { 935 1.1 darran cyc_cpu_t *cpu = c->cpu_cyclic; 936 1.1 darran cyc_backend_t *be = cpu->cyp_backend; 937 1.1 darran cyb_arg_t bar = be->cyb_arg; 938 1.1 darran 939 1.1 darran ASSERT(MUTEX_HELD(&cpu_lock)); 940 1.1 darran 941 1.1 darran c->cpu_cyclic = NULL; 942 1.1 darran 943 1.1 darran /* 944 1.1 darran * Let the backend know that the CPU is being yanked, and free up 945 1.1 darran * the backend structure. 946 1.1 darran */ 947 1.1 darran if (be->cyb_unconfigure != NULL) 948 1.1 darran be->cyb_unconfigure(bar); 949 1.1 darran free(be, M_CYCLIC); 950 1.1 darran cpu->cyp_backend = NULL; 951 1.1 darran 952 1.1 darran mtx_destroy(&cpu->cyp_mtx); 953 1.1 darran 954 1.1 darran /* Finally, clean up our remaining dynamic structures. */ 955 1.1 darran free(cpu->cyp_cyclics, M_CYCLIC); 956 1.1 darran free(cpu->cyp_heap, M_CYCLIC); 957 1.1 darran free(cpu, M_CYCLIC); 958 1.1 darran } 959 1.1 darran 960 1.1 darran static void 961 1.1 darran cyclic_omni_start(cyc_id_t *idp, cyc_cpu_t *cpu) 962 1.1 darran { 963 1.1 darran cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr; 964 1.1 darran cyc_omni_cpu_t *ocpu = malloc(sizeof(cyc_omni_cpu_t), M_CYCLIC , M_WAITOK); 965 1.1 darran cyc_handler_t hdlr; 966 1.1 darran cyc_time_t when; 967 1.1 darran 968 1.1 darran ASSERT(MUTEX_HELD(&cpu_lock)); 969 1.1 darran ASSERT(idp->cyi_cpu == NULL); 970 1.1 darran 971 1.1 darran hdlr.cyh_func = NULL; 972 1.1 darran hdlr.cyh_arg = NULL; 973 1.1 darran 974 1.1 darran when.cyt_when = 0; 975 1.1 darran when.cyt_interval = 0; 976 1.1 darran 977 1.1 darran omni->cyo_online(omni->cyo_arg, cpu->cyp_cpu, &hdlr, &when); 978 1.1 darran 979 1.1 darran ASSERT(hdlr.cyh_func != NULL); 980 1.1 darran ASSERT(when.cyt_when >= 0 && when.cyt_interval > 0); 981 1.1 darran 982 1.1 darran ocpu->cyo_cpu = cpu; 983 1.1 darran ocpu->cyo_arg = hdlr.cyh_arg; 984 1.1 darran ocpu->cyo_ndx = cyclic_add_here(cpu, &hdlr, &when, 0); 985 1.1 darran ocpu->cyo_next = idp->cyi_omni_list; 986 1.1 darran idp->cyi_omni_list = ocpu; 987 1.1 darran } 988 1.1 darran 989 1.1 darran static void 990 1.1 darran cyclic_omni_stop(cyc_id_t *idp, cyc_cpu_t *cpu) 991 1.1 darran { 992 1.1 darran cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr; 993 1.1 darran cyc_omni_cpu_t *ocpu = idp->cyi_omni_list, *prev = NULL; 994 1.1 darran 995 1.1 darran ASSERT(MUTEX_HELD(&cpu_lock)); 996 1.1 darran ASSERT(idp->cyi_cpu == NULL); 997 1.1 darran ASSERT(ocpu != NULL); 998 1.1 darran 999 1.1 darran while (ocpu != NULL && ocpu->cyo_cpu != cpu) { 1000 1.1 darran prev = ocpu; 1001 1.1 darran ocpu = ocpu->cyo_next; 1002 1.1 darran } 1003 1.1 darran 1004 1.1 darran /* 1005 1.10 rillig * We _must_ have found a cyc_omni_cpu which corresponds to this 1006 1.1 darran * CPU -- the definition of an omnipresent cyclic is that it runs 1007 1.1 darran * on all online CPUs. 1008 1.1 darran */ 1009 1.1 darran ASSERT(ocpu != NULL); 1010 1.1 darran 1011 1.1 darran if (prev == NULL) { 1012 1.1 darran idp->cyi_omni_list = ocpu->cyo_next; 1013 1.1 darran } else { 1014 1.1 darran prev->cyo_next = ocpu->cyo_next; 1015 1.1 darran } 1016 1.1 darran 1017 1.1 darran (void) cyclic_remove_here(ocpu->cyo_cpu, ocpu->cyo_ndx, NULL, CY_WAIT); 1018 1.1 darran 1019 1.1 darran /* 1020 1.1 darran * The cyclic has been removed from this CPU; time to call the 1021 1.1 darran * omnipresent offline handler. 1022 1.1 darran */ 1023 1.1 darran if (omni->cyo_offline != NULL) 1024 1.1 darran omni->cyo_offline(omni->cyo_arg, cpu->cyp_cpu, ocpu->cyo_arg); 1025 1.1 darran 1026 1.1 darran free(ocpu, M_CYCLIC); 1027 1.1 darran } 1028 1.1 darran 1029 1.1 darran static cyc_id_t * 1030 1.1 darran cyclic_new_id(void) 1031 1.1 darran { 1032 1.1 darran cyc_id_t *idp; 1033 1.1 darran 1034 1.1 darran ASSERT(MUTEX_HELD(&cpu_lock)); 1035 1.1 darran 1036 1.1 darran idp = kmem_cache_alloc(cyclic_id_cache, KM_SLEEP); 1037 1.1 darran 1038 1.1 darran /* 1039 1.1 darran * The cyi_cpu field of the cyc_id_t structure tracks the CPU 1040 1.1 darran * associated with the cyclic. If and only if this field is NULL, the 1041 1.1 darran * cyc_id_t is an omnipresent cyclic. Note that cyi_omni_list may be 1042 1.1 darran * NULL for an omnipresent cyclic while the cyclic is being created 1043 1.1 darran * or destroyed. 1044 1.1 darran */ 1045 1.1 darran idp->cyi_cpu = NULL; 1046 1.1 darran idp->cyi_ndx = 0; 1047 1.1 darran 1048 1.1 darran idp->cyi_next = cyclic_id_head; 1049 1.1 darran idp->cyi_prev = NULL; 1050 1.1 darran idp->cyi_omni_list = NULL; 1051 1.1 darran 1052 1.1 darran if (cyclic_id_head != NULL) { 1053 1.1 darran ASSERT(cyclic_id_head->cyi_prev == NULL); 1054 1.1 darran cyclic_id_head->cyi_prev = idp; 1055 1.1 darran } 1056 1.1 darran 1057 1.1 darran cyclic_id_head = idp; 1058 1.1 darran 1059 1.1 darran return (idp); 1060 1.1 darran } 1061 1.1 darran 1062 1.1 darran /* 1063 1.1 darran * cyclic_id_t cyclic_add(cyc_handler_t *, cyc_time_t *) 1064 1.1 darran * 1065 1.1 darran * Overview 1066 1.1 darran * 1067 1.1 darran * cyclic_add() will create an unbound cyclic with the specified handler and 1068 1.1 darran * interval. The cyclic will run on a CPU which both has interrupts enabled 1069 1.1 darran * and is in the system CPU partition. 1070 1.1 darran * 1071 1.1 darran * Arguments and notes 1072 1.1 darran * 1073 1.1 darran * As its first argument, cyclic_add() takes a cyc_handler, which has the 1074 1.1 darran * following members: 1075 1.1 darran * 1076 1.1 darran * cyc_func_t cyh_func <-- Cyclic handler 1077 1.1 darran * void *cyh_arg <-- Argument to cyclic handler 1078 1.1 darran * 1079 1.1 darran * In addition to a cyc_handler, cyclic_add() takes a cyc_time, which 1080 1.1 darran * has the following members: 1081 1.1 darran * 1082 1.1 darran * hrtime_t cyt_when <-- Absolute time, in nanoseconds since boot, at 1083 1.1 darran * which to start firing 1084 1.1 darran * hrtime_t cyt_interval <-- Length of interval, in nanoseconds 1085 1.1 darran * 1086 1.1 darran * gethrtime() is the time source for nanoseconds since boot. If cyt_when 1087 1.1 darran * is set to 0, the cyclic will start to fire when cyt_interval next 1088 1.1 darran * divides the number of nanoseconds since boot. 1089 1.1 darran * 1090 1.1 darran * The cyt_interval field _must_ be filled in by the caller; one-shots are 1091 1.1 darran * _not_ explicitly supported by the cyclic subsystem (cyclic_add() will 1092 1.1 darran * assert that cyt_interval is non-zero). The maximum value for either 1093 1.1 darran * field is INT64_MAX; the caller is responsible for assuring that 1094 1.1 darran * cyt_when + cyt_interval <= INT64_MAX. Neither field may be negative. 1095 1.1 darran * 1096 1.1 darran * For an arbitrary time t in the future, the cyclic handler is guaranteed 1097 1.1 darran * to have been called (t - cyt_when) / cyt_interval times. This will 1098 1.1 darran * be true even if interrupts have been disabled for periods greater than 1099 1.1 darran * cyt_interval nanoseconds. In order to compensate for such periods, 1100 1.1 darran * the cyclic handler may be called a finite number of times with an 1101 1.1 darran * arbitrarily small interval. 1102 1.1 darran * 1103 1.1 darran * The cyclic subsystem will not enforce any lower bound on the interval; 1104 1.1 darran * if the interval is less than the time required to process an interrupt, 1105 1.1 darran * the CPU will wedge. It's the responsibility of the caller to assure that 1106 1.1 darran * either the value of the interval is sane, or that its caller has 1107 1.1 darran * sufficient privilege to deny service (i.e. its caller is root). 1108 1.1 darran * 1109 1.1 darran * Return value 1110 1.1 darran * 1111 1.1 darran * cyclic_add() returns a cyclic_id_t, which is guaranteed to be a value 1112 1.1 darran * other than CYCLIC_NONE. cyclic_add() cannot fail. 1113 1.1 darran * 1114 1.1 darran * Caller's context 1115 1.1 darran * 1116 1.1 darran * cpu_lock must be held by the caller, and the caller must not be in 1117 1.1 darran * interrupt context. cyclic_add() will perform a KM_SLEEP kernel 1118 1.1 darran * memory allocation, so the usual rules (e.g. p_lock cannot be held) 1119 1.1 darran * apply. A cyclic may be added even in the presence of CPUs that have 1120 1.1 darran * not been configured with respect to the cyclic subsystem, but only 1121 1.1 darran * configured CPUs will be eligible to run the new cyclic. 1122 1.1 darran * 1123 1.1 darran * Cyclic handler's context 1124 1.1 darran * 1125 1.1 darran * Cyclic handlers will be executed in the interrupt context corresponding 1126 1.1 darran * to the specified level (i.e. either high, lock or low level). The 1127 1.1 darran * usual context rules apply. 1128 1.1 darran * 1129 1.1 darran * A cyclic handler may not grab ANY locks held by the caller of any of 1130 1.1 darran * cyclic_add() or cyclic_remove(); the implementation of these functions 1131 1.1 darran * may require blocking on cyclic handler completion. 1132 1.1 darran * Moreover, cyclic handlers may not make any call back into the cyclic 1133 1.1 darran * subsystem. 1134 1.1 darran */ 1135 1.1 darran cyclic_id_t 1136 1.1 darran cyclic_add(cyc_handler_t *hdlr, cyc_time_t *when) 1137 1.1 darran { 1138 1.1 darran cyc_id_t *idp = cyclic_new_id(); 1139 1.4 chs solaris_cpu_t *c = &solaris_cpu[cpu_number()]; 1140 1.1 darran 1141 1.1 darran ASSERT(MUTEX_HELD(&cpu_lock)); 1142 1.1 darran ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0); 1143 1.1 darran 1144 1.1 darran idp->cyi_cpu = c->cpu_cyclic; 1145 1.1 darran idp->cyi_ndx = cyclic_add_here(idp->cyi_cpu, hdlr, when, 0); 1146 1.1 darran 1147 1.1 darran return ((uintptr_t)idp); 1148 1.1 darran } 1149 1.1 darran 1150 1.1 darran /* 1151 1.1 darran * cyclic_id_t cyclic_add_omni(cyc_omni_handler_t *) 1152 1.1 darran * 1153 1.1 darran * Overview 1154 1.1 darran * 1155 1.1 darran * cyclic_add_omni() will create an omnipresent cyclic with the specified 1156 1.1 darran * online and offline handlers. Omnipresent cyclics run on all online 1157 1.1 darran * CPUs, including CPUs which have unbound interrupts disabled. 1158 1.1 darran * 1159 1.1 darran * Arguments 1160 1.1 darran * 1161 1.1 darran * As its only argument, cyclic_add_omni() takes a cyc_omni_handler, which 1162 1.1 darran * has the following members: 1163 1.1 darran * 1164 1.1 darran * void (*cyo_online)() <-- Online handler 1165 1.1 darran * void (*cyo_offline)() <-- Offline handler 1166 1.1 darran * void *cyo_arg <-- Argument to be passed to on/offline handlers 1167 1.1 darran * 1168 1.1 darran * Online handler 1169 1.1 darran * 1170 1.1 darran * The cyo_online member is a pointer to a function which has the following 1171 1.1 darran * four arguments: 1172 1.1 darran * 1173 1.1 darran * void * <-- Argument (cyo_arg) 1174 1.1 darran * cpu_t * <-- Pointer to CPU about to be onlined 1175 1.1 darran * cyc_handler_t * <-- Pointer to cyc_handler_t; must be filled in 1176 1.1 darran * by omni online handler 1177 1.1 darran * cyc_time_t * <-- Pointer to cyc_time_t; must be filled in by 1178 1.1 darran * omni online handler 1179 1.1 darran * 1180 1.1 darran * The omni cyclic online handler is always called _before_ the omni 1181 1.1 darran * cyclic begins to fire on the specified CPU. As the above argument 1182 1.1 darran * description implies, the online handler must fill in the two structures 1183 1.1 darran * passed to it: the cyc_handler_t and the cyc_time_t. These are the 1184 1.1 darran * same two structures passed to cyclic_add(), outlined above. This 1185 1.1 darran * allows the omni cyclic to have maximum flexibility; different CPUs may 1186 1.1 darran * optionally 1187 1.1 darran * 1188 1.1 darran * (a) have different intervals 1189 1.1 darran * (b) be explicitly in or out of phase with one another 1190 1.1 darran * (c) have different handlers 1191 1.1 darran * (d) have different handler arguments 1192 1.1 darran * (e) fire at different levels 1193 1.1 darran * 1194 1.1 darran * Of these, (e) seems somewhat dubious, but is nonetheless allowed. 1195 1.1 darran * 1196 1.1 darran * The omni online handler is called in the same context as cyclic_add(), 1197 1.1 darran * and has the same liberties: omni online handlers may perform KM_SLEEP 1198 1.1 darran * kernel memory allocations, and may grab locks which are also acquired 1199 1.1 darran * by cyclic handlers. However, omni cyclic online handlers may _not_ 1200 1.1 darran * call back into the cyclic subsystem, and should be generally careful 1201 1.1 darran * about calling into arbitrary kernel subsystems. 1202 1.1 darran * 1203 1.1 darran * Offline handler 1204 1.1 darran * 1205 1.1 darran * The cyo_offline member is a pointer to a function which has the following 1206 1.1 darran * three arguments: 1207 1.1 darran * 1208 1.1 darran * void * <-- Argument (cyo_arg) 1209 1.1 darran * cpu_t * <-- Pointer to CPU about to be offlined 1210 1.1 darran * void * <-- CPU's cyclic argument (that is, value 1211 1.1 darran * to which cyh_arg member of the cyc_handler_t 1212 1.1 darran * was set in the omni online handler) 1213 1.1 darran * 1214 1.1 darran * The omni cyclic offline handler is always called _after_ the omni 1215 1.1 darran * cyclic has ceased firing on the specified CPU. Its purpose is to 1216 1.1 darran * allow cleanup of any resources dynamically allocated in the omni cyclic 1217 1.1 darran * online handler. The context of the offline handler is identical to 1218 1.1 darran * that of the online handler; the same constraints and liberties apply. 1219 1.1 darran * 1220 1.1 darran * The offline handler is optional; it may be NULL. 1221 1.1 darran * 1222 1.1 darran * Return value 1223 1.1 darran * 1224 1.1 darran * cyclic_add_omni() returns a cyclic_id_t, which is guaranteed to be a 1225 1.1 darran * value other than CYCLIC_NONE. cyclic_add_omni() cannot fail. 1226 1.1 darran * 1227 1.1 darran * Caller's context 1228 1.1 darran * 1229 1.1 darran * The caller's context is identical to that of cyclic_add(), specified 1230 1.1 darran * above. 1231 1.1 darran */ 1232 1.1 darran cyclic_id_t 1233 1.1 darran cyclic_add_omni(cyc_omni_handler_t *omni) 1234 1.1 darran { 1235 1.1 darran cyc_id_t *idp = cyclic_new_id(); 1236 1.1 darran cyc_cpu_t *cpu; 1237 1.1 darran cpu_t *c; 1238 1.1 darran int i; 1239 1.1 darran 1240 1.1 darran ASSERT(MUTEX_HELD(&cpu_lock)); 1241 1.1 darran ASSERT(omni != NULL && omni->cyo_online != NULL); 1242 1.1 darran 1243 1.1 darran idp->cyi_omni_hdlr = *omni; 1244 1.1 darran 1245 1.3 chs CPU_FOREACH(i) { 1246 1.4 chs i = cpu_index(ci); 1247 1.1 darran c = &solaris_cpu[i]; 1248 1.1 darran if ((cpu = c->cpu_cyclic) == NULL) 1249 1.1 darran continue; 1250 1.1 darran cyclic_omni_start(idp, cpu); 1251 1.1 darran } 1252 1.1 darran 1253 1.1 darran /* 1254 1.1 darran * We must have found at least one online CPU on which to run 1255 1.1 darran * this cyclic. 1256 1.1 darran */ 1257 1.1 darran ASSERT(idp->cyi_omni_list != NULL); 1258 1.1 darran ASSERT(idp->cyi_cpu == NULL); 1259 1.1 darran 1260 1.1 darran return ((uintptr_t)idp); 1261 1.1 darran } 1262 1.1 darran 1263 1.1 darran /* 1264 1.1 darran * void cyclic_remove(cyclic_id_t) 1265 1.1 darran * 1266 1.1 darran * Overview 1267 1.1 darran * 1268 1.1 darran * cyclic_remove() will remove the specified cyclic from the system. 1269 1.1 darran * 1270 1.1 darran * Arguments and notes 1271 1.1 darran * 1272 1.1 darran * The only argument is a cyclic_id returned from either cyclic_add() or 1273 1.1 darran * cyclic_add_omni(). 1274 1.1 darran * 1275 1.1 darran * By the time cyclic_remove() returns, the caller is guaranteed that the 1276 1.1 darran * removed cyclic handler has completed execution (this is the same 1277 1.1 darran * semantic that untimeout() provides). As a result, cyclic_remove() may 1278 1.1 darran * need to block, waiting for the removed cyclic to complete execution. 1279 1.1 darran * This leads to an important constraint on the caller: no lock may be 1280 1.1 darran * held across cyclic_remove() that also may be acquired by a cyclic 1281 1.1 darran * handler. 1282 1.1 darran * 1283 1.1 darran * Return value 1284 1.1 darran * 1285 1.1 darran * None; cyclic_remove() always succeeds. 1286 1.1 darran * 1287 1.1 darran * Caller's context 1288 1.1 darran * 1289 1.1 darran * cpu_lock must be held by the caller, and the caller must not be in 1290 1.1 darran * interrupt context. The caller may not hold any locks which are also 1291 1.1 darran * grabbed by any cyclic handler. See "Arguments and notes", above. 1292 1.1 darran */ 1293 1.1 darran void 1294 1.1 darran cyclic_remove(cyclic_id_t id) 1295 1.1 darran { 1296 1.1 darran cyc_id_t *idp = (cyc_id_t *)id; 1297 1.1 darran cyc_id_t *prev = idp->cyi_prev, *next = idp->cyi_next; 1298 1.1 darran cyc_cpu_t *cpu = idp->cyi_cpu; 1299 1.1 darran 1300 1.1 darran ASSERT(MUTEX_HELD(&cpu_lock)); 1301 1.1 darran 1302 1.1 darran if (cpu != NULL) { 1303 1.1 darran (void) cyclic_remove_here(cpu, idp->cyi_ndx, NULL, CY_WAIT); 1304 1.1 darran } else { 1305 1.1 darran ASSERT(idp->cyi_omni_list != NULL); 1306 1.1 darran while (idp->cyi_omni_list != NULL) 1307 1.1 darran cyclic_omni_stop(idp, idp->cyi_omni_list->cyo_cpu); 1308 1.1 darran } 1309 1.1 darran 1310 1.1 darran if (prev != NULL) { 1311 1.1 darran ASSERT(cyclic_id_head != idp); 1312 1.1 darran prev->cyi_next = next; 1313 1.1 darran } else { 1314 1.1 darran ASSERT(cyclic_id_head == idp); 1315 1.1 darran cyclic_id_head = next; 1316 1.1 darran } 1317 1.1 darran 1318 1.1 darran if (next != NULL) 1319 1.1 darran next->cyi_prev = prev; 1320 1.1 darran 1321 1.1 darran kmem_cache_free(cyclic_id_cache, idp); 1322 1.1 darran } 1323 1.1 darran 1324 1.1 darran static void 1325 1.1 darran cyclic_init(cyc_backend_t *be) 1326 1.1 darran { 1327 1.1 darran ASSERT(MUTEX_HELD(&cpu_lock)); 1328 1.1 darran 1329 1.1 darran /* 1330 1.1 darran * Copy the passed cyc_backend into the backend template. This must 1331 1.1 darran * be done before the CPU can be configured. 1332 1.1 darran */ 1333 1.1 darran bcopy(be, &cyclic_backend, sizeof (cyc_backend_t)); 1334 1.1 darran 1335 1.4 chs cyclic_configure(&solaris_cpu[cpu_number()]); 1336 1.1 darran } 1337 1.1 darran 1338 1.1 darran /* 1339 1.1 darran * It is assumed that cyclic_mp_init() is called some time after cyclic 1340 1.1 darran * init (and therefore, after cpu0 has been initialized). We grab cpu_lock, 1341 1.1 darran * find the already initialized CPU, and initialize every other CPU with the 1342 1.1 darran * same backend. 1343 1.1 darran */ 1344 1.1 darran static void 1345 1.1 darran cyclic_mp_init(void) 1346 1.1 darran { 1347 1.1 darran cpu_t *c; 1348 1.1 darran int i; 1349 1.1 darran 1350 1.4 chs #ifndef __NetBSD__ 1351 1.1 darran mutex_enter(&cpu_lock); 1352 1.4 chs #endif 1353 1.1 darran 1354 1.3 chs CPU_FOREACH(i) { 1355 1.4 chs i = cpu_index(ci); 1356 1.1 darran c = &solaris_cpu[i]; 1357 1.1 darran if (c->cpu_cyclic == NULL) 1358 1.1 darran cyclic_configure(c); 1359 1.1 darran } 1360 1.1 darran 1361 1.4 chs #ifndef __NetBSD__ 1362 1.1 darran mutex_exit(&cpu_lock); 1363 1.4 chs #endif 1364 1.1 darran } 1365 1.1 darran 1366 1.1 darran static void 1367 1.1 darran cyclic_uninit(void) 1368 1.1 darran { 1369 1.1 darran cpu_t *c; 1370 1.1 darran int id; 1371 1.1 darran 1372 1.3 chs CPU_FOREACH(id) { 1373 1.4 chs id = cpu_index(ci); 1374 1.1 darran c = &solaris_cpu[id]; 1375 1.1 darran if (c->cpu_cyclic == NULL) 1376 1.1 darran continue; 1377 1.1 darran cyclic_unconfigure(c); 1378 1.1 darran } 1379 1.1 darran 1380 1.1 darran if (cyclic_id_cache != NULL) 1381 1.1 darran kmem_cache_destroy(cyclic_id_cache); 1382 1.1 darran } 1383 1.1 darran 1384 1.1 darran #include "cyclic_machdep.c" 1385 1.1 darran 1386 1.1 darran /* 1387 1.1 darran * Cyclic subsystem initialisation. 1388 1.1 darran */ 1389 1.1 darran static void 1390 1.1 darran cyclic_load(void *dummy) 1391 1.1 darran { 1392 1.1 darran mutex_enter(&cpu_lock); 1393 1.1 darran 1394 1.1 darran /* Initialise the machine-dependent backend. */ 1395 1.1 darran cyclic_machdep_init(); 1396 1.1 darran 1397 1.1 darran mutex_exit(&cpu_lock); 1398 1.1 darran } 1399 1.1 darran 1400 1.1 darran SYSINIT(cyclic_register, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_load, NULL); 1401 1.1 darran 1402 1.1 darran static void 1403 1.1 darran cyclic_unload(void) 1404 1.1 darran { 1405 1.1 darran mutex_enter(&cpu_lock); 1406 1.1 darran 1407 1.1 darran /* Uninitialise the machine-dependent backend. */ 1408 1.1 darran cyclic_machdep_uninit(); 1409 1.1 darran 1410 1.1 darran mutex_exit(&cpu_lock); 1411 1.1 darran } 1412 1.1 darran 1413 1.1 darran SYSUNINIT(cyclic_unregister, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_unload, NULL); 1414 1.1 darran 1415 1.4 chs #ifdef __FreeBSD__ 1416 1.1 darran /* ARGSUSED */ 1417 1.1 darran static int 1418 1.1 darran cyclic_modevent(module_t mod __unused, int type, void *data __unused) 1419 1.1 darran { 1420 1.1 darran int error = 0; 1421 1.1 darran 1422 1.1 darran switch (type) { 1423 1.1 darran case MOD_LOAD: 1424 1.1 darran break; 1425 1.1 darran 1426 1.1 darran case MOD_UNLOAD: 1427 1.1 darran break; 1428 1.1 darran 1429 1.1 darran case MOD_SHUTDOWN: 1430 1.1 darran break; 1431 1.1 darran 1432 1.1 darran default: 1433 1.1 darran error = EOPNOTSUPP; 1434 1.1 darran break; 1435 1.1 darran 1436 1.1 darran } 1437 1.1 darran return (error); 1438 1.1 darran } 1439 1.1 darran 1440 1.1 darran DEV_MODULE(cyclic, cyclic_modevent, NULL); 1441 1.1 darran MODULE_VERSION(cyclic, 1); 1442 1.1 darran MODULE_DEPEND(cyclic, opensolaris, 1, 1, 1); 1443 1.4 chs #endif 1444 1.4 chs 1445 1.4 chs #ifdef __NetBSD__ 1446 1.4 chs static int 1447 1.4 chs cyclic_modcmd(modcmd_t cmd, void *data) 1448 1.4 chs { 1449 1.4 chs switch (cmd) { 1450 1.4 chs case MODULE_CMD_INIT: 1451 1.4 chs cyclic_load(NULL); 1452 1.4 chs return 0; 1453 1.4 chs 1454 1.4 chs case MODULE_CMD_FINI: 1455 1.4 chs cyclic_unload(); 1456 1.4 chs return 0; 1457 1.5 riastrad 1458 1.5 riastrad case MODULE_CMD_AUTOUNLOAD: 1459 1.5 riastrad if (cyclic_id_head != NULL) 1460 1.5 riastrad return EBUSY; 1461 1.5 riastrad return 0; 1462 1.5 riastrad 1463 1.4 chs default: 1464 1.4 chs return ENOTTY; 1465 1.4 chs } 1466 1.4 chs } 1467 1.4 chs 1468 1.6 chs MODULE(MODULE_CLASS_MISC, cyclic, "solaris"); 1469 1.4 chs #endif 1470