kern_softint.c revision 1.3.4.9 1 1.3.4.9 yamt /* $NetBSD: kern_softint.c,v 1.3.4.9 2008/03/24 09:39:02 yamt Exp $ */
2 1.3.4.2 yamt
3 1.3.4.2 yamt /*-
4 1.3.4.6 yamt * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
5 1.3.4.2 yamt * All rights reserved.
6 1.3.4.2 yamt *
7 1.3.4.2 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.3.4.2 yamt * by Andrew Doran.
9 1.3.4.2 yamt *
10 1.3.4.2 yamt * Redistribution and use in source and binary forms, with or without
11 1.3.4.2 yamt * modification, are permitted provided that the following conditions
12 1.3.4.2 yamt * are met:
13 1.3.4.2 yamt * 1. Redistributions of source code must retain the above copyright
14 1.3.4.2 yamt * notice, this list of conditions and the following disclaimer.
15 1.3.4.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.3.4.2 yamt * notice, this list of conditions and the following disclaimer in the
17 1.3.4.2 yamt * documentation and/or other materials provided with the distribution.
18 1.3.4.2 yamt * 3. All advertising materials mentioning features or use of this software
19 1.3.4.2 yamt * must display the following acknowledgement:
20 1.3.4.2 yamt * This product includes software developed by the NetBSD
21 1.3.4.2 yamt * Foundation, Inc. and its contributors.
22 1.3.4.2 yamt * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.3.4.2 yamt * contributors may be used to endorse or promote products derived
24 1.3.4.2 yamt * from this software without specific prior written permission.
25 1.3.4.2 yamt *
26 1.3.4.2 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.3.4.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.3.4.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.3.4.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.3.4.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.3.4.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.3.4.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.3.4.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.3.4.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.3.4.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.3.4.2 yamt * POSSIBILITY OF SUCH DAMAGE.
37 1.3.4.2 yamt */
38 1.3.4.2 yamt
39 1.3.4.2 yamt /*
40 1.3.4.4 yamt * Generic software interrupt framework.
41 1.3.4.4 yamt *
42 1.3.4.4 yamt * Overview
43 1.3.4.4 yamt *
44 1.3.4.4 yamt * The soft interrupt framework provides a mechanism to schedule a
45 1.3.4.4 yamt * low priority callback that runs with thread context. It allows
46 1.3.4.4 yamt * for dynamic registration of software interrupts, and for fair
47 1.3.4.4 yamt * queueing and prioritization of those interrupts. The callbacks
48 1.3.4.4 yamt * can be scheduled to run from nearly any point in the kernel: by
49 1.3.4.4 yamt * code running with thread context, by code running from a
50 1.3.4.4 yamt * hardware interrupt handler, and at any interrupt priority
51 1.3.4.4 yamt * level.
52 1.3.4.4 yamt *
53 1.3.4.4 yamt * Priority levels
54 1.3.4.4 yamt *
55 1.3.4.4 yamt * Since soft interrupt dispatch can be tied to the underlying
56 1.3.4.4 yamt * architecture's interrupt dispatch code, it can be limited
57 1.3.4.4 yamt * both by the capabilities of the hardware and the capabilities
58 1.3.4.4 yamt * of the interrupt dispatch code itself. The number of priority
59 1.3.4.4 yamt * levels is restricted to four. In order of priority (lowest to
60 1.3.4.4 yamt * highest) the levels are: clock, bio, net, serial.
61 1.3.4.4 yamt *
62 1.3.4.4 yamt * The names are symbolic and in isolation do not have any direct
63 1.3.4.4 yamt * connection with a particular kind of device activity: they are
64 1.3.4.4 yamt * only meant as a guide.
65 1.3.4.4 yamt *
66 1.3.4.4 yamt * The four priority levels map directly to scheduler priority
67 1.3.4.4 yamt * levels, and where the architecture implements 'fast' software
68 1.3.4.4 yamt * interrupts, they also map onto interrupt priorities. The
69 1.3.4.4 yamt * interrupt priorities are intended to be hidden from machine
70 1.3.4.4 yamt * independent code, which should use thread-safe mechanisms to
71 1.3.4.4 yamt * synchronize with software interrupts (for example: mutexes).
72 1.3.4.4 yamt *
73 1.3.4.4 yamt * Capabilities
74 1.3.4.4 yamt *
75 1.3.4.4 yamt * Software interrupts run with limited machine context. In
76 1.3.4.4 yamt * particular, they do not posess any address space context. They
77 1.3.4.4 yamt * should not try to operate on user space addresses, or to use
78 1.3.4.4 yamt * virtual memory facilities other than those noted as interrupt
79 1.3.4.4 yamt * safe.
80 1.3.4.4 yamt *
81 1.3.4.4 yamt * Unlike hardware interrupts, software interrupts do have thread
82 1.3.4.4 yamt * context. They may block on synchronization objects, sleep, and
83 1.3.4.4 yamt * resume execution at a later time.
84 1.3.4.4 yamt *
85 1.3.4.4 yamt * Since software interrupts are a limited resource and run with
86 1.3.4.4 yamt * higher priority than most other LWPs in the system, all
87 1.3.4.4 yamt * block-and-resume activity by a software interrupt must be kept
88 1.3.4.4 yamt * short to allow futher processing at that level to continue. By
89 1.3.4.4 yamt * extension, code running with process context must take care to
90 1.3.4.4 yamt * ensure that any lock that may be taken from a software interrupt
91 1.3.4.4 yamt * can not be held for more than a short period of time.
92 1.3.4.4 yamt *
93 1.3.4.4 yamt * The kernel does not allow software interrupts to use facilities
94 1.3.4.4 yamt * or perform actions that may block for a significant amount of
95 1.3.4.4 yamt * time. This means that it's not valid for a software interrupt
96 1.3.4.6 yamt * to sleep on condition variables or wait for resources to become
97 1.3.4.6 yamt * available (for example, memory).
98 1.3.4.4 yamt *
99 1.3.4.4 yamt * Per-CPU operation
100 1.3.4.4 yamt *
101 1.3.4.4 yamt * If a soft interrupt is triggered on a CPU, it can only be
102 1.3.4.4 yamt * dispatched on the same CPU. Each LWP dedicated to handling a
103 1.3.4.4 yamt * soft interrupt is bound to its home CPU, so if the LWP blocks
104 1.3.4.4 yamt * and needs to run again, it can only run there. Nearly all data
105 1.3.4.4 yamt * structures used to manage software interrupts are per-CPU.
106 1.3.4.4 yamt *
107 1.3.4.4 yamt * The per-CPU requirement is intended to reduce "ping-pong" of
108 1.3.4.4 yamt * cache lines between CPUs: lines occupied by data structures
109 1.3.4.4 yamt * used to manage the soft interrupts, and lines occupied by data
110 1.3.4.4 yamt * items being passed down to the soft interrupt. As a positive
111 1.3.4.4 yamt * side effect, this also means that the soft interrupt dispatch
112 1.3.4.4 yamt * code does not need to to use spinlocks to synchronize.
113 1.3.4.4 yamt *
114 1.3.4.4 yamt * Generic implementation
115 1.3.4.4 yamt *
116 1.3.4.4 yamt * A generic, low performance implementation is provided that
117 1.3.4.4 yamt * works across all architectures, with no machine-dependent
118 1.3.4.4 yamt * modifications needed. This implementation uses the scheduler,
119 1.3.4.4 yamt * and so has a number of restrictions:
120 1.3.4.4 yamt *
121 1.3.4.4 yamt * 1) The software interrupts are not currently preemptive, so
122 1.3.4.4 yamt * must wait for the currently executing LWP to yield the CPU.
123 1.3.4.4 yamt * This can introduce latency.
124 1.3.4.4 yamt *
125 1.3.4.4 yamt * 2) An expensive context switch is required for a software
126 1.3.4.4 yamt * interrupt to be handled.
127 1.3.4.4 yamt *
128 1.3.4.4 yamt * 'Fast' software interrupts
129 1.3.4.4 yamt *
130 1.3.4.4 yamt * If an architectures defines __HAVE_FAST_SOFTINTS, it implements
131 1.3.4.4 yamt * the fast mechanism. Threads running either in the kernel or in
132 1.3.4.4 yamt * userspace will be interrupted, but will not be preempted. When
133 1.3.4.4 yamt * the soft interrupt completes execution, the interrupted LWP
134 1.3.4.4 yamt * is resumed. Interrupt dispatch code must provide the minimum
135 1.3.4.4 yamt * level of context necessary for the soft interrupt to block and
136 1.3.4.4 yamt * be resumed at a later time. The machine-dependent dispatch
137 1.3.4.4 yamt * path looks something like the following:
138 1.3.4.4 yamt *
139 1.3.4.4 yamt * softintr()
140 1.3.4.4 yamt * {
141 1.3.4.4 yamt * go to IPL_HIGH if necessary for switch;
142 1.3.4.4 yamt * save any necessary registers in a format that can be
143 1.3.4.4 yamt * restored by cpu_switchto if the softint blocks;
144 1.3.4.4 yamt * arrange for cpu_switchto() to restore into the
145 1.3.4.4 yamt * trampoline function;
146 1.3.4.4 yamt * identify LWP to handle this interrupt;
147 1.3.4.4 yamt * switch to the LWP's stack;
148 1.3.4.4 yamt * switch register stacks, if necessary;
149 1.3.4.4 yamt * assign new value of curlwp;
150 1.3.4.4 yamt * call MI softint_dispatch, passing old curlwp and IPL
151 1.3.4.4 yamt * to execute interrupt at;
152 1.3.4.4 yamt * switch back to old stack;
153 1.3.4.4 yamt * switch back to old register stack, if necessary;
154 1.3.4.4 yamt * restore curlwp;
155 1.3.4.4 yamt * return to interrupted LWP;
156 1.3.4.4 yamt * }
157 1.3.4.4 yamt *
158 1.3.4.4 yamt * If the soft interrupt blocks, a trampoline function is returned
159 1.3.4.4 yamt * to in the context of the interrupted LWP, as arranged for by
160 1.3.4.4 yamt * softint():
161 1.3.4.4 yamt *
162 1.3.4.4 yamt * softint_ret()
163 1.3.4.4 yamt * {
164 1.3.4.4 yamt * unlock soft interrupt LWP;
165 1.3.4.4 yamt * resume interrupt processing, likely returning to
166 1.3.4.4 yamt * interrupted LWP or dispatching another, different
167 1.3.4.4 yamt * interrupt;
168 1.3.4.4 yamt * }
169 1.3.4.4 yamt *
170 1.3.4.4 yamt * Once the soft interrupt has fired (and even if it has blocked),
171 1.3.4.4 yamt * no further soft interrupts at that level will be triggered by
172 1.3.4.4 yamt * MI code until the soft interrupt handler has ceased execution.
173 1.3.4.4 yamt * If a soft interrupt handler blocks and is resumed, it resumes
174 1.3.4.4 yamt * execution as a normal LWP (kthread) and gains VM context. Only
175 1.3.4.4 yamt * when it has completed and is ready to fire again will it
176 1.3.4.4 yamt * interrupt other threads.
177 1.3.4.4 yamt *
178 1.3.4.4 yamt * Future directions
179 1.3.4.4 yamt *
180 1.3.4.4 yamt * Provide a cheap way to direct software interrupts to remote
181 1.3.4.4 yamt * CPUs. Provide a way to enqueue work items into the handler
182 1.3.4.4 yamt * record, removing additional spl calls (see subr_workqueue.c).
183 1.3.4.2 yamt */
184 1.3.4.2 yamt
185 1.3.4.2 yamt #include <sys/cdefs.h>
186 1.3.4.9 yamt __KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.3.4.9 2008/03/24 09:39:02 yamt Exp $");
187 1.3.4.2 yamt
188 1.3.4.2 yamt #include <sys/param.h>
189 1.3.4.4 yamt #include <sys/malloc.h>
190 1.3.4.4 yamt #include <sys/proc.h>
191 1.3.4.2 yamt #include <sys/intr.h>
192 1.3.4.4 yamt #include <sys/mutex.h>
193 1.3.4.4 yamt #include <sys/kthread.h>
194 1.3.4.4 yamt #include <sys/evcnt.h>
195 1.3.4.4 yamt #include <sys/cpu.h>
196 1.3.4.4 yamt
197 1.3.4.4 yamt #include <net/netisr.h>
198 1.3.4.4 yamt
199 1.3.4.4 yamt #include <uvm/uvm_extern.h>
200 1.3.4.4 yamt
201 1.3.4.4 yamt /* This could overlap with signal info in struct lwp. */
202 1.3.4.4 yamt typedef struct softint {
203 1.3.4.4 yamt SIMPLEQ_HEAD(, softhand) si_q;
204 1.3.4.4 yamt struct lwp *si_lwp;
205 1.3.4.4 yamt struct cpu_info *si_cpu;
206 1.3.4.4 yamt uintptr_t si_machdep;
207 1.3.4.4 yamt struct evcnt si_evcnt;
208 1.3.4.4 yamt struct evcnt si_evcnt_block;
209 1.3.4.4 yamt int si_active;
210 1.3.4.4 yamt char si_name[8];
211 1.3.4.4 yamt char si_name_block[8+6];
212 1.3.4.4 yamt } softint_t;
213 1.3.4.4 yamt
214 1.3.4.4 yamt typedef struct softhand {
215 1.3.4.4 yamt SIMPLEQ_ENTRY(softhand) sh_q;
216 1.3.4.4 yamt void (*sh_func)(void *);
217 1.3.4.4 yamt void *sh_arg;
218 1.3.4.4 yamt softint_t *sh_isr;
219 1.3.4.4 yamt u_int sh_pending;
220 1.3.4.4 yamt u_int sh_flags;
221 1.3.4.4 yamt } softhand_t;
222 1.3.4.4 yamt
223 1.3.4.4 yamt typedef struct softcpu {
224 1.3.4.4 yamt struct cpu_info *sc_cpu;
225 1.3.4.4 yamt softint_t sc_int[SOFTINT_COUNT];
226 1.3.4.4 yamt softhand_t sc_hand[1];
227 1.3.4.4 yamt } softcpu_t;
228 1.3.4.4 yamt
229 1.3.4.4 yamt static void softint_thread(void *);
230 1.3.4.4 yamt
231 1.3.4.4 yamt u_int softint_bytes = 8192;
232 1.3.4.4 yamt u_int softint_timing;
233 1.3.4.4 yamt static u_int softint_max;
234 1.3.4.4 yamt static kmutex_t softint_lock;
235 1.3.4.4 yamt static void *softint_netisrs[32];
236 1.3.4.2 yamt
237 1.3.4.4 yamt /*
238 1.3.4.4 yamt * softint_init_isr:
239 1.3.4.4 yamt *
240 1.3.4.4 yamt * Initialize a single interrupt level for a single CPU.
241 1.3.4.4 yamt */
242 1.3.4.4 yamt static void
243 1.3.4.4 yamt softint_init_isr(softcpu_t *sc, const char *desc, pri_t pri, u_int level)
244 1.3.4.4 yamt {
245 1.3.4.4 yamt struct cpu_info *ci;
246 1.3.4.4 yamt softint_t *si;
247 1.3.4.4 yamt int error;
248 1.3.4.4 yamt
249 1.3.4.4 yamt si = &sc->sc_int[level];
250 1.3.4.4 yamt ci = sc->sc_cpu;
251 1.3.4.4 yamt si->si_cpu = ci;
252 1.3.4.4 yamt
253 1.3.4.4 yamt SIMPLEQ_INIT(&si->si_q);
254 1.3.4.4 yamt
255 1.3.4.4 yamt error = kthread_create(pri, KTHREAD_MPSAFE | KTHREAD_INTR |
256 1.3.4.4 yamt KTHREAD_IDLE, ci, softint_thread, si, &si->si_lwp,
257 1.3.4.8 yamt "soft%s/%u", desc, ci->ci_index);
258 1.3.4.4 yamt if (error != 0)
259 1.3.4.4 yamt panic("softint_init_isr: error %d", error);
260 1.3.4.4 yamt
261 1.3.4.8 yamt snprintf(si->si_name, sizeof(si->si_name), "%s/%u", desc,
262 1.3.4.8 yamt ci->ci_index);
263 1.3.4.4 yamt evcnt_attach_dynamic(&si->si_evcnt, EVCNT_TYPE_INTR, NULL,
264 1.3.4.4 yamt "softint", si->si_name);
265 1.3.4.8 yamt snprintf(si->si_name_block, sizeof(si->si_name_block), "%s block/%u",
266 1.3.4.8 yamt desc, ci->ci_index);
267 1.3.4.4 yamt evcnt_attach_dynamic(&si->si_evcnt_block, EVCNT_TYPE_INTR, NULL,
268 1.3.4.4 yamt "softint", si->si_name_block);
269 1.3.4.2 yamt
270 1.3.4.4 yamt si->si_lwp->l_private = si;
271 1.3.4.4 yamt softint_init_md(si->si_lwp, level, &si->si_machdep);
272 1.3.4.4 yamt }
273 1.3.4.2 yamt /*
274 1.3.4.2 yamt * softint_init:
275 1.3.4.2 yamt *
276 1.3.4.2 yamt * Initialize per-CPU data structures. Called from mi_cpu_attach().
277 1.3.4.2 yamt */
278 1.3.4.2 yamt void
279 1.3.4.2 yamt softint_init(struct cpu_info *ci)
280 1.3.4.2 yamt {
281 1.3.4.4 yamt static struct cpu_info *first;
282 1.3.4.4 yamt softcpu_t *sc, *scfirst;
283 1.3.4.4 yamt softhand_t *sh, *shmax;
284 1.3.4.4 yamt
285 1.3.4.4 yamt if (first == NULL) {
286 1.3.4.4 yamt /* Boot CPU. */
287 1.3.4.4 yamt first = ci;
288 1.3.4.4 yamt mutex_init(&softint_lock, MUTEX_DEFAULT, IPL_NONE);
289 1.3.4.4 yamt softint_bytes = round_page(softint_bytes);
290 1.3.4.4 yamt softint_max = (softint_bytes - sizeof(softcpu_t)) /
291 1.3.4.4 yamt sizeof(softhand_t);
292 1.3.4.4 yamt }
293 1.3.4.2 yamt
294 1.3.4.4 yamt sc = (softcpu_t *)uvm_km_alloc(kernel_map, softint_bytes, 0,
295 1.3.4.4 yamt UVM_KMF_WIRED | UVM_KMF_ZERO);
296 1.3.4.4 yamt if (sc == NULL)
297 1.3.4.4 yamt panic("softint_init_cpu: cannot allocate memory");
298 1.3.4.4 yamt
299 1.3.4.4 yamt ci->ci_data.cpu_softcpu = sc;
300 1.3.4.4 yamt ci->ci_data.cpu_softints = 0;
301 1.3.4.4 yamt sc->sc_cpu = ci;
302 1.3.4.4 yamt
303 1.3.4.4 yamt softint_init_isr(sc, "net", PRI_SOFTNET, SOFTINT_NET);
304 1.3.4.4 yamt softint_init_isr(sc, "bio", PRI_SOFTBIO, SOFTINT_BIO);
305 1.3.4.4 yamt softint_init_isr(sc, "clk", PRI_SOFTCLOCK, SOFTINT_CLOCK);
306 1.3.4.4 yamt softint_init_isr(sc, "ser", PRI_SOFTSERIAL, SOFTINT_SERIAL);
307 1.3.4.4 yamt
308 1.3.4.4 yamt if (first != ci) {
309 1.3.4.4 yamt mutex_enter(&softint_lock);
310 1.3.4.4 yamt scfirst = first->ci_data.cpu_softcpu;
311 1.3.4.4 yamt sh = sc->sc_hand;
312 1.3.4.4 yamt memcpy(sh, scfirst->sc_hand, sizeof(*sh) * softint_max);
313 1.3.4.4 yamt /* Update pointers for this CPU. */
314 1.3.4.4 yamt for (shmax = sh + softint_max; sh < shmax; sh++) {
315 1.3.4.4 yamt if (sh->sh_func == NULL)
316 1.3.4.4 yamt continue;
317 1.3.4.4 yamt sh->sh_isr =
318 1.3.4.4 yamt &sc->sc_int[sh->sh_flags & SOFTINT_LVLMASK];
319 1.3.4.4 yamt }
320 1.3.4.4 yamt mutex_exit(&softint_lock);
321 1.3.4.4 yamt } else {
322 1.3.4.4 yamt /*
323 1.3.4.4 yamt * Establish handlers for legacy net interrupts.
324 1.3.4.4 yamt * XXX Needs to go away.
325 1.3.4.4 yamt */
326 1.3.4.4 yamt #define DONETISR(n, f) \
327 1.3.4.4 yamt softint_netisrs[(n)] = \
328 1.3.4.4 yamt softint_establish(SOFTINT_NET, (void (*)(void *))(f), NULL)
329 1.3.4.4 yamt #include <net/netisr_dispatch.h>
330 1.3.4.4 yamt }
331 1.3.4.2 yamt }
332 1.3.4.2 yamt
333 1.3.4.2 yamt /*
334 1.3.4.2 yamt * softint_establish:
335 1.3.4.2 yamt *
336 1.3.4.2 yamt * Register a software interrupt handler.
337 1.3.4.2 yamt */
338 1.3.4.2 yamt void *
339 1.3.4.2 yamt softint_establish(u_int flags, void (*func)(void *), void *arg)
340 1.3.4.2 yamt {
341 1.3.4.4 yamt CPU_INFO_ITERATOR cii;
342 1.3.4.4 yamt struct cpu_info *ci;
343 1.3.4.4 yamt softcpu_t *sc;
344 1.3.4.4 yamt softhand_t *sh;
345 1.3.4.4 yamt u_int level, index;
346 1.3.4.2 yamt
347 1.3.4.2 yamt level = (flags & SOFTINT_LVLMASK);
348 1.3.4.2 yamt KASSERT(level < SOFTINT_COUNT);
349 1.3.4.2 yamt
350 1.3.4.4 yamt mutex_enter(&softint_lock);
351 1.3.4.4 yamt
352 1.3.4.4 yamt /* Find a free slot. */
353 1.3.4.4 yamt sc = curcpu()->ci_data.cpu_softcpu;
354 1.3.4.4 yamt for (index = 1; index < softint_max; index++)
355 1.3.4.4 yamt if (sc->sc_hand[index].sh_func == NULL)
356 1.3.4.4 yamt break;
357 1.3.4.4 yamt if (index == softint_max) {
358 1.3.4.4 yamt mutex_exit(&softint_lock);
359 1.3.4.4 yamt printf("WARNING: softint_establish: table full, "
360 1.3.4.4 yamt "increase softint_bytes\n");
361 1.3.4.4 yamt return NULL;
362 1.3.4.2 yamt }
363 1.3.4.2 yamt
364 1.3.4.4 yamt /* Set up the handler on each CPU. */
365 1.3.4.5 yamt if (ncpu < 2) {
366 1.3.4.5 yamt /* XXX hack for machines with no CPU_INFO_FOREACH() early on */
367 1.3.4.5 yamt sc = curcpu()->ci_data.cpu_softcpu;
368 1.3.4.5 yamt sh = &sc->sc_hand[index];
369 1.3.4.5 yamt sh->sh_isr = &sc->sc_int[level];
370 1.3.4.5 yamt sh->sh_func = func;
371 1.3.4.5 yamt sh->sh_arg = arg;
372 1.3.4.5 yamt sh->sh_flags = flags;
373 1.3.4.5 yamt sh->sh_pending = 0;
374 1.3.4.5 yamt } else for (CPU_INFO_FOREACH(cii, ci)) {
375 1.3.4.4 yamt sc = ci->ci_data.cpu_softcpu;
376 1.3.4.4 yamt sh = &sc->sc_hand[index];
377 1.3.4.4 yamt sh->sh_isr = &sc->sc_int[level];
378 1.3.4.4 yamt sh->sh_func = func;
379 1.3.4.4 yamt sh->sh_arg = arg;
380 1.3.4.4 yamt sh->sh_flags = flags;
381 1.3.4.4 yamt sh->sh_pending = 0;
382 1.3.4.4 yamt }
383 1.3.4.4 yamt
384 1.3.4.4 yamt mutex_exit(&softint_lock);
385 1.3.4.4 yamt
386 1.3.4.4 yamt return (void *)((uint8_t *)&sc->sc_hand[index] - (uint8_t *)sc);
387 1.3.4.2 yamt }
388 1.3.4.2 yamt
389 1.3.4.2 yamt /*
390 1.3.4.2 yamt * softint_disestablish:
391 1.3.4.2 yamt *
392 1.3.4.2 yamt * Unregister a software interrupt handler.
393 1.3.4.2 yamt */
394 1.3.4.2 yamt void
395 1.3.4.2 yamt softint_disestablish(void *arg)
396 1.3.4.2 yamt {
397 1.3.4.4 yamt CPU_INFO_ITERATOR cii;
398 1.3.4.4 yamt struct cpu_info *ci;
399 1.3.4.4 yamt softcpu_t *sc;
400 1.3.4.4 yamt softhand_t *sh;
401 1.3.4.4 yamt uintptr_t offset;
402 1.3.4.4 yamt
403 1.3.4.4 yamt offset = (uintptr_t)arg;
404 1.3.4.4 yamt KASSERT(offset != 0 && offset < softint_bytes);
405 1.3.4.4 yamt
406 1.3.4.4 yamt mutex_enter(&softint_lock);
407 1.3.4.4 yamt
408 1.3.4.4 yamt /* Clear the handler on each CPU. */
409 1.3.4.4 yamt for (CPU_INFO_FOREACH(cii, ci)) {
410 1.3.4.4 yamt sc = ci->ci_data.cpu_softcpu;
411 1.3.4.4 yamt sh = (softhand_t *)((uint8_t *)sc + offset);
412 1.3.4.4 yamt KASSERT(sh->sh_func != NULL);
413 1.3.4.4 yamt KASSERT(sh->sh_pending == 0);
414 1.3.4.4 yamt sh->sh_func = NULL;
415 1.3.4.4 yamt }
416 1.3.4.2 yamt
417 1.3.4.4 yamt mutex_exit(&softint_lock);
418 1.3.4.2 yamt }
419 1.3.4.2 yamt
420 1.3.4.2 yamt /*
421 1.3.4.2 yamt * softint_schedule:
422 1.3.4.2 yamt *
423 1.3.4.2 yamt * Trigger a software interrupt. Must be called from a hardware
424 1.3.4.2 yamt * interrupt handler, or with preemption disabled (since we are
425 1.3.4.2 yamt * using the value of curcpu()).
426 1.3.4.2 yamt */
427 1.3.4.2 yamt void
428 1.3.4.2 yamt softint_schedule(void *arg)
429 1.3.4.2 yamt {
430 1.3.4.4 yamt softhand_t *sh;
431 1.3.4.4 yamt softint_t *si;
432 1.3.4.4 yamt uintptr_t offset;
433 1.3.4.4 yamt int s;
434 1.3.4.4 yamt
435 1.3.4.4 yamt /* Find the handler record for this CPU. */
436 1.3.4.4 yamt offset = (uintptr_t)arg;
437 1.3.4.4 yamt KASSERT(offset != 0 && offset < softint_bytes);
438 1.3.4.4 yamt sh = (softhand_t *)((uint8_t *)curcpu()->ci_data.cpu_softcpu + offset);
439 1.3.4.4 yamt
440 1.3.4.4 yamt /* If it's already pending there's nothing to do. */
441 1.3.4.4 yamt if (sh->sh_pending)
442 1.3.4.4 yamt return;
443 1.3.4.4 yamt
444 1.3.4.4 yamt /*
445 1.3.4.4 yamt * Enqueue the handler into the LWP's pending list.
446 1.3.4.4 yamt * If the LWP is completely idle, then make it run.
447 1.3.4.4 yamt */
448 1.3.4.4 yamt s = splhigh();
449 1.3.4.4 yamt if (!sh->sh_pending) {
450 1.3.4.4 yamt si = sh->sh_isr;
451 1.3.4.4 yamt sh->sh_pending = 1;
452 1.3.4.4 yamt SIMPLEQ_INSERT_TAIL(&si->si_q, sh, sh_q);
453 1.3.4.4 yamt if (si->si_active == 0) {
454 1.3.4.4 yamt si->si_active = 1;
455 1.3.4.4 yamt softint_trigger(si->si_machdep);
456 1.3.4.4 yamt }
457 1.3.4.4 yamt }
458 1.3.4.4 yamt splx(s);
459 1.3.4.4 yamt }
460 1.3.4.4 yamt
461 1.3.4.4 yamt /*
462 1.3.4.4 yamt * softint_execute:
463 1.3.4.4 yamt *
464 1.3.4.4 yamt * Invoke handlers for the specified soft interrupt.
465 1.3.4.4 yamt * Must be entered at splhigh. Will drop the priority
466 1.3.4.4 yamt * to the level specified, but returns back at splhigh.
467 1.3.4.4 yamt */
468 1.3.4.4 yamt static inline void
469 1.3.4.4 yamt softint_execute(softint_t *si, lwp_t *l, int s)
470 1.3.4.4 yamt {
471 1.3.4.4 yamt softhand_t *sh;
472 1.3.4.4 yamt bool havelock;
473 1.3.4.4 yamt
474 1.3.4.4 yamt #ifdef __HAVE_FAST_SOFTINTS
475 1.3.4.4 yamt KASSERT(si->si_lwp == curlwp);
476 1.3.4.4 yamt #else
477 1.3.4.4 yamt /* May be running in user context. */
478 1.3.4.4 yamt #endif
479 1.3.4.4 yamt KASSERT(si->si_cpu == curcpu());
480 1.3.4.4 yamt KASSERT(si->si_lwp->l_wchan == NULL);
481 1.3.4.4 yamt KASSERT(si->si_active);
482 1.3.4.4 yamt
483 1.3.4.4 yamt havelock = false;
484 1.3.4.4 yamt
485 1.3.4.4 yamt /*
486 1.3.4.4 yamt * Note: due to priority inheritance we may have interrupted a
487 1.3.4.4 yamt * higher priority LWP. Since the soft interrupt must be quick
488 1.3.4.4 yamt * and is non-preemptable, we don't bother yielding.
489 1.3.4.4 yamt */
490 1.3.4.4 yamt
491 1.3.4.4 yamt while (!SIMPLEQ_EMPTY(&si->si_q)) {
492 1.3.4.4 yamt /*
493 1.3.4.4 yamt * Pick the longest waiting handler to run. We block
494 1.3.4.4 yamt * interrupts but do not lock in order to do this, as
495 1.3.4.4 yamt * we are protecting against the local CPU only.
496 1.3.4.4 yamt */
497 1.3.4.4 yamt sh = SIMPLEQ_FIRST(&si->si_q);
498 1.3.4.4 yamt SIMPLEQ_REMOVE_HEAD(&si->si_q, sh_q);
499 1.3.4.4 yamt sh->sh_pending = 0;
500 1.3.4.4 yamt splx(s);
501 1.3.4.4 yamt
502 1.3.4.4 yamt /* Run the handler. */
503 1.3.4.4 yamt if ((sh->sh_flags & SOFTINT_MPSAFE) == 0 && !havelock) {
504 1.3.4.4 yamt KERNEL_LOCK(1, l);
505 1.3.4.4 yamt havelock = true;
506 1.3.4.4 yamt }
507 1.3.4.4 yamt (*sh->sh_func)(sh->sh_arg);
508 1.3.4.4 yamt
509 1.3.4.4 yamt (void)splhigh();
510 1.3.4.4 yamt }
511 1.3.4.4 yamt
512 1.3.4.4 yamt if (havelock) {
513 1.3.4.4 yamt KERNEL_UNLOCK_ONE(l);
514 1.3.4.4 yamt }
515 1.3.4.4 yamt
516 1.3.4.4 yamt /*
517 1.3.4.4 yamt * Unlocked, but only for statistics.
518 1.3.4.4 yamt * Should be per-CPU to prevent cache ping-pong.
519 1.3.4.4 yamt */
520 1.3.4.4 yamt uvmexp.softs++;
521 1.3.4.2 yamt
522 1.3.4.9 yamt KASSERT(si->si_cpu == curcpu());
523 1.3.4.9 yamt KASSERT(si->si_lwp->l_wchan == NULL);
524 1.3.4.9 yamt KASSERT(si->si_active);
525 1.3.4.4 yamt si->si_evcnt.ev_count++;
526 1.3.4.4 yamt si->si_active = 0;
527 1.3.4.2 yamt }
528 1.3.4.2 yamt
529 1.3.4.2 yamt /*
530 1.3.4.2 yamt * softint_block:
531 1.3.4.2 yamt *
532 1.3.4.2 yamt * Update statistics when the soft interrupt blocks.
533 1.3.4.2 yamt */
534 1.3.4.2 yamt void
535 1.3.4.2 yamt softint_block(lwp_t *l)
536 1.3.4.2 yamt {
537 1.3.4.4 yamt softint_t *si = l->l_private;
538 1.3.4.2 yamt
539 1.3.4.4 yamt KASSERT((l->l_pflag & LP_INTR) != 0);
540 1.3.4.4 yamt si->si_evcnt_block.ev_count++;
541 1.3.4.4 yamt }
542 1.3.4.4 yamt
543 1.3.4.4 yamt /*
544 1.3.4.4 yamt * schednetisr:
545 1.3.4.4 yamt *
546 1.3.4.4 yamt * Trigger a legacy network interrupt. XXX Needs to go away.
547 1.3.4.4 yamt */
548 1.3.4.4 yamt void
549 1.3.4.4 yamt schednetisr(int isr)
550 1.3.4.4 yamt {
551 1.3.4.4 yamt
552 1.3.4.4 yamt softint_schedule(softint_netisrs[isr]);
553 1.3.4.4 yamt }
554 1.3.4.4 yamt
555 1.3.4.4 yamt #ifndef __HAVE_FAST_SOFTINTS
556 1.3.4.4 yamt
557 1.3.4.4 yamt /*
558 1.3.4.4 yamt * softint_init_md:
559 1.3.4.4 yamt *
560 1.3.4.4 yamt * Slow path: perform machine-dependent initialization.
561 1.3.4.4 yamt */
562 1.3.4.4 yamt void
563 1.3.4.4 yamt softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
564 1.3.4.4 yamt {
565 1.3.4.4 yamt softint_t *si;
566 1.3.4.4 yamt
567 1.3.4.4 yamt *machdep = (1 << level);
568 1.3.4.4 yamt si = l->l_private;
569 1.3.4.4 yamt
570 1.3.4.4 yamt lwp_lock(l);
571 1.3.4.4 yamt lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_mutex);
572 1.3.4.4 yamt lwp_lock(l);
573 1.3.4.4 yamt /* Cheat and make the KASSERT in softint_thread() happy. */
574 1.3.4.4 yamt si->si_active = 1;
575 1.3.4.4 yamt l->l_stat = LSRUN;
576 1.3.4.4 yamt sched_enqueue(l, false);
577 1.3.4.4 yamt lwp_unlock(l);
578 1.3.4.4 yamt }
579 1.3.4.4 yamt
580 1.3.4.4 yamt /*
581 1.3.4.4 yamt * softint_trigger:
582 1.3.4.4 yamt *
583 1.3.4.4 yamt * Slow path: cause a soft interrupt handler to begin executing.
584 1.3.4.4 yamt * Called at IPL_HIGH.
585 1.3.4.4 yamt */
586 1.3.4.4 yamt void
587 1.3.4.4 yamt softint_trigger(uintptr_t machdep)
588 1.3.4.4 yamt {
589 1.3.4.4 yamt struct cpu_info *ci;
590 1.3.4.4 yamt lwp_t *l;
591 1.3.4.4 yamt
592 1.3.4.4 yamt l = curlwp;
593 1.3.4.4 yamt ci = l->l_cpu;
594 1.3.4.4 yamt ci->ci_data.cpu_softints |= machdep;
595 1.3.4.4 yamt if (l == ci->ci_data.cpu_idlelwp) {
596 1.3.4.4 yamt cpu_need_resched(ci, 0);
597 1.3.4.4 yamt } else {
598 1.3.4.4 yamt /* MI equivalent of aston() */
599 1.3.4.4 yamt cpu_signotify(l);
600 1.3.4.4 yamt }
601 1.3.4.4 yamt }
602 1.3.4.4 yamt
603 1.3.4.4 yamt /*
604 1.3.4.4 yamt * softint_thread:
605 1.3.4.4 yamt *
606 1.3.4.4 yamt * Slow path: MI software interrupt dispatch.
607 1.3.4.4 yamt */
608 1.3.4.4 yamt void
609 1.3.4.4 yamt softint_thread(void *cookie)
610 1.3.4.4 yamt {
611 1.3.4.4 yamt softint_t *si;
612 1.3.4.4 yamt lwp_t *l;
613 1.3.4.4 yamt int s;
614 1.3.4.4 yamt
615 1.3.4.4 yamt l = curlwp;
616 1.3.4.4 yamt si = l->l_private;
617 1.3.4.4 yamt
618 1.3.4.4 yamt for (;;) {
619 1.3.4.4 yamt /*
620 1.3.4.4 yamt * Clear pending status and run it. We must drop the
621 1.3.4.4 yamt * spl before mi_switch(), since IPL_HIGH may be higher
622 1.3.4.4 yamt * than IPL_SCHED (and it is not safe to switch at a
623 1.3.4.4 yamt * higher level).
624 1.3.4.4 yamt */
625 1.3.4.4 yamt s = splhigh();
626 1.3.4.4 yamt l->l_cpu->ci_data.cpu_softints &= ~si->si_machdep;
627 1.3.4.4 yamt softint_execute(si, l, s);
628 1.3.4.4 yamt splx(s);
629 1.3.4.4 yamt
630 1.3.4.4 yamt lwp_lock(l);
631 1.3.4.4 yamt l->l_stat = LSIDL;
632 1.3.4.4 yamt mi_switch(l);
633 1.3.4.4 yamt }
634 1.3.4.2 yamt }
635 1.3.4.3 yamt
636 1.3.4.3 yamt /*
637 1.3.4.3 yamt * softint_picklwp:
638 1.3.4.3 yamt *
639 1.3.4.3 yamt * Slow path: called from mi_switch() to pick the highest priority
640 1.3.4.3 yamt * soft interrupt LWP that needs to run.
641 1.3.4.3 yamt */
642 1.3.4.3 yamt lwp_t *
643 1.3.4.3 yamt softint_picklwp(void)
644 1.3.4.3 yamt {
645 1.3.4.4 yamt struct cpu_info *ci;
646 1.3.4.4 yamt u_int mask;
647 1.3.4.4 yamt softint_t *si;
648 1.3.4.4 yamt lwp_t *l;
649 1.3.4.4 yamt
650 1.3.4.4 yamt ci = curcpu();
651 1.3.4.4 yamt si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
652 1.3.4.4 yamt mask = ci->ci_data.cpu_softints;
653 1.3.4.4 yamt
654 1.3.4.4 yamt if ((mask & (1 << SOFTINT_SERIAL)) != 0) {
655 1.3.4.4 yamt l = si[SOFTINT_SERIAL].si_lwp;
656 1.3.4.4 yamt } else if ((mask & (1 << SOFTINT_NET)) != 0) {
657 1.3.4.4 yamt l = si[SOFTINT_NET].si_lwp;
658 1.3.4.4 yamt } else if ((mask & (1 << SOFTINT_BIO)) != 0) {
659 1.3.4.4 yamt l = si[SOFTINT_BIO].si_lwp;
660 1.3.4.4 yamt } else if ((mask & (1 << SOFTINT_CLOCK)) != 0) {
661 1.3.4.4 yamt l = si[SOFTINT_CLOCK].si_lwp;
662 1.3.4.4 yamt } else {
663 1.3.4.4 yamt panic("softint_picklwp");
664 1.3.4.4 yamt }
665 1.3.4.3 yamt
666 1.3.4.4 yamt return l;
667 1.3.4.3 yamt }
668 1.3.4.3 yamt
669 1.3.4.3 yamt /*
670 1.3.4.3 yamt * softint_overlay:
671 1.3.4.3 yamt *
672 1.3.4.3 yamt * Slow path: called from lwp_userret() to run a soft interrupt
673 1.3.4.4 yamt * within the context of a user thread.
674 1.3.4.3 yamt */
675 1.3.4.3 yamt void
676 1.3.4.3 yamt softint_overlay(void)
677 1.3.4.3 yamt {
678 1.3.4.4 yamt struct cpu_info *ci;
679 1.3.4.4 yamt u_int softints;
680 1.3.4.4 yamt softint_t *si;
681 1.3.4.4 yamt pri_t obase;
682 1.3.4.4 yamt lwp_t *l;
683 1.3.4.4 yamt int s;
684 1.3.4.4 yamt
685 1.3.4.4 yamt l = curlwp;
686 1.3.4.4 yamt ci = l->l_cpu;
687 1.3.4.4 yamt si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
688 1.3.4.4 yamt
689 1.3.4.4 yamt KASSERT((l->l_pflag & LP_INTR) == 0);
690 1.3.4.4 yamt
691 1.3.4.4 yamt /* Arrange to elevate priority if the LWP blocks. */
692 1.3.4.4 yamt obase = l->l_kpribase;
693 1.3.4.4 yamt l->l_kpribase = PRI_KERNEL_RT;
694 1.3.4.4 yamt l->l_pflag |= LP_INTR;
695 1.3.4.4 yamt s = splhigh();
696 1.3.4.4 yamt while ((softints = ci->ci_data.cpu_softints) != 0) {
697 1.3.4.4 yamt if ((softints & (1 << SOFTINT_SERIAL)) != 0) {
698 1.3.4.4 yamt ci->ci_data.cpu_softints &= ~(1 << SOFTINT_SERIAL);
699 1.3.4.4 yamt softint_execute(&si[SOFTINT_SERIAL], l, s);
700 1.3.4.4 yamt continue;
701 1.3.4.4 yamt }
702 1.3.4.4 yamt if ((softints & (1 << SOFTINT_NET)) != 0) {
703 1.3.4.4 yamt ci->ci_data.cpu_softints &= ~(1 << SOFTINT_NET);
704 1.3.4.4 yamt softint_execute(&si[SOFTINT_NET], l, s);
705 1.3.4.4 yamt continue;
706 1.3.4.4 yamt }
707 1.3.4.4 yamt if ((softints & (1 << SOFTINT_BIO)) != 0) {
708 1.3.4.4 yamt ci->ci_data.cpu_softints &= ~(1 << SOFTINT_BIO);
709 1.3.4.4 yamt softint_execute(&si[SOFTINT_BIO], l, s);
710 1.3.4.4 yamt continue;
711 1.3.4.4 yamt }
712 1.3.4.4 yamt if ((softints & (1 << SOFTINT_CLOCK)) != 0) {
713 1.3.4.4 yamt ci->ci_data.cpu_softints &= ~(1 << SOFTINT_CLOCK);
714 1.3.4.4 yamt softint_execute(&si[SOFTINT_CLOCK], l, s);
715 1.3.4.4 yamt continue;
716 1.3.4.4 yamt }
717 1.3.4.4 yamt }
718 1.3.4.4 yamt splx(s);
719 1.3.4.4 yamt l->l_pflag &= ~LP_INTR;
720 1.3.4.4 yamt l->l_kpribase = obase;
721 1.3.4.4 yamt }
722 1.3.4.3 yamt
723 1.3.4.4 yamt #else /* !__HAVE_FAST_SOFTINTS */
724 1.3.4.4 yamt
725 1.3.4.4 yamt /*
726 1.3.4.4 yamt * softint_thread:
727 1.3.4.4 yamt *
728 1.3.4.4 yamt * Fast path: the LWP is switched to without restoring any state,
729 1.3.4.4 yamt * so we should not arrive here - there is a direct handoff between
730 1.3.4.4 yamt * the interrupt stub and softint_dispatch().
731 1.3.4.4 yamt */
732 1.3.4.4 yamt void
733 1.3.4.4 yamt softint_thread(void *cookie)
734 1.3.4.4 yamt {
735 1.3.4.4 yamt
736 1.3.4.4 yamt panic("softint_thread");
737 1.3.4.3 yamt }
738 1.3.4.3 yamt
739 1.3.4.3 yamt /*
740 1.3.4.4 yamt * softint_dispatch:
741 1.3.4.3 yamt *
742 1.3.4.4 yamt * Fast path: entry point from machine-dependent code.
743 1.3.4.3 yamt */
744 1.3.4.4 yamt void
745 1.3.4.4 yamt softint_dispatch(lwp_t *pinned, int s)
746 1.3.4.3 yamt {
747 1.3.4.5 yamt struct bintime now;
748 1.3.4.4 yamt softint_t *si;
749 1.3.4.4 yamt u_int timing;
750 1.3.4.4 yamt lwp_t *l;
751 1.3.4.4 yamt
752 1.3.4.4 yamt l = curlwp;
753 1.3.4.4 yamt si = l->l_private;
754 1.3.4.4 yamt
755 1.3.4.4 yamt /*
756 1.3.4.4 yamt * Note the interrupted LWP, and mark the current LWP as running
757 1.3.4.4 yamt * before proceeding. Although this must as a rule be done with
758 1.3.4.4 yamt * the LWP locked, at this point no external agents will want to
759 1.3.4.4 yamt * modify the interrupt LWP's state.
760 1.3.4.4 yamt */
761 1.3.4.4 yamt timing = (softint_timing ? LW_TIMEINTR : 0);
762 1.3.4.4 yamt l->l_switchto = pinned;
763 1.3.4.4 yamt l->l_stat = LSONPROC;
764 1.3.4.4 yamt l->l_flag |= (LW_RUNNING | timing);
765 1.3.4.4 yamt
766 1.3.4.4 yamt /*
767 1.3.4.4 yamt * Dispatch the interrupt. If softints are being timed, charge
768 1.3.4.4 yamt * for it.
769 1.3.4.4 yamt */
770 1.3.4.4 yamt if (timing)
771 1.3.4.7 yamt binuptime(&l->l_stime);
772 1.3.4.4 yamt softint_execute(si, l, s);
773 1.3.4.4 yamt if (timing) {
774 1.3.4.7 yamt binuptime(&now);
775 1.3.4.4 yamt updatertime(l, &now);
776 1.3.4.4 yamt l->l_flag &= ~LW_TIMEINTR;
777 1.3.4.4 yamt }
778 1.3.4.3 yamt
779 1.3.4.4 yamt /*
780 1.3.4.4 yamt * If we blocked while handling the interrupt, the pinned LWP is
781 1.3.4.4 yamt * gone so switch to the idle LWP. It will select a new LWP to
782 1.3.4.4 yamt * run.
783 1.3.4.4 yamt *
784 1.3.4.4 yamt * We must drop the priority level as switching at IPL_HIGH could
785 1.3.4.4 yamt * deadlock the system. We have already set si->si_active = 0,
786 1.3.4.4 yamt * which means another interrupt at this level can be triggered.
787 1.3.4.4 yamt * That's not be a problem: we are lowering to level 's' which will
788 1.3.4.4 yamt * prevent softint_dispatch() from being reentered at level 's',
789 1.3.4.4 yamt * until the priority is finally dropped to IPL_NONE on entry to
790 1.3.4.4 yamt * the idle loop.
791 1.3.4.4 yamt */
792 1.3.4.4 yamt l->l_stat = LSIDL;
793 1.3.4.4 yamt if (l->l_switchto == NULL) {
794 1.3.4.4 yamt splx(s);
795 1.3.4.4 yamt pmap_deactivate(l);
796 1.3.4.4 yamt lwp_exit_switchaway(l);
797 1.3.4.4 yamt /* NOTREACHED */
798 1.3.4.4 yamt }
799 1.3.4.4 yamt l->l_switchto = NULL;
800 1.3.4.4 yamt l->l_flag &= ~LW_RUNNING;
801 1.3.4.3 yamt }
802 1.3.4.4 yamt
803 1.3.4.4 yamt #endif /* !__HAVE_FAST_SOFTINTS */
804