gicv3.c revision 1.4 1 /* $NetBSD: gicv3.c,v 1.4 2018/11/05 11:50:15 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2018 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_multiprocessor.h"
30
31 #define _INTR_PRIVATE
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: gicv3.c,v 1.4 2018/11/05 11:50:15 jmcneill Exp $");
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/device.h>
40 #include <sys/intr.h>
41 #include <sys/systm.h>
42 #include <sys/cpu.h>
43
44 #include <arm/locore.h>
45 #include <arm/armreg.h>
46
47 #include <arm/cortex/gicv3.h>
48 #include <arm/cortex/gic_reg.h>
49
50 #define PICTOSOFTC(pic) \
51 ((void *)((uintptr_t)(pic) - offsetof(struct gicv3_softc, sc_pic)))
52
53 #define IPL_TO_PRIORITY(ipl) ((IPL_HIGH - (ipl)) << 4)
54
55 static struct gicv3_softc *gicv3_softc;
56
57 static inline uint32_t
58 gicd_read_4(struct gicv3_softc *sc, bus_size_t reg)
59 {
60 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_d, reg);
61 }
62
63 static inline void
64 gicd_write_4(struct gicv3_softc *sc, bus_size_t reg, uint32_t val)
65 {
66 bus_space_write_4(sc->sc_bst, sc->sc_bsh_d, reg, val);
67 }
68
69 static inline void
70 gicd_write_8(struct gicv3_softc *sc, bus_size_t reg, uint64_t val)
71 {
72 bus_space_write_8(sc->sc_bst, sc->sc_bsh_d, reg, val);
73 }
74
75 static inline uint32_t
76 gicr_read_4(struct gicv3_softc *sc, u_int index, bus_size_t reg)
77 {
78 KASSERT(index < sc->sc_bsh_r_count);
79 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_r[index], reg);
80 }
81
82 static inline void
83 gicr_write_4(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint32_t val)
84 {
85 KASSERT(index < sc->sc_bsh_r_count);
86 bus_space_write_4(sc->sc_bst, sc->sc_bsh_r[index], reg, val);
87 }
88
89 static inline uint64_t
90 gicr_read_8(struct gicv3_softc *sc, u_int index, bus_size_t reg)
91 {
92 KASSERT(index < sc->sc_bsh_r_count);
93 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_r[index], reg);
94 }
95
96 static inline void
97 gicr_write_8(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint64_t val)
98 {
99 KASSERT(index < sc->sc_bsh_r_count);
100 bus_space_write_8(sc->sc_bst, sc->sc_bsh_r[index], reg, val);
101 }
102
103 static void
104 gicv3_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
105 {
106 struct gicv3_softc * const sc = PICTOSOFTC(pic);
107 struct cpu_info * const ci = curcpu();
108 const u_int group = irqbase / 32;
109
110 if (group == 0) {
111 sc->sc_enabled_sgippi |= mask;
112 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, mask);
113 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTRL) & GICR_CTRL_RWP)
114 ;
115 } else {
116 gicd_write_4(sc, GICD_ISENABLERn(group), mask);
117 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
118 ;
119 }
120 }
121
122 static void
123 gicv3_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
124 {
125 struct gicv3_softc * const sc = PICTOSOFTC(pic);
126 struct cpu_info * const ci = curcpu();
127 const u_int group = irqbase / 32;
128
129 if (group == 0) {
130 sc->sc_enabled_sgippi &= ~mask;
131 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, mask);
132 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTRL) & GICR_CTRL_RWP)
133 ;
134 } else {
135 gicd_write_4(sc, GICD_ICENABLERn(group), mask);
136 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
137 ;
138 }
139 }
140
141 static void
142 gicv3_establish_irq(struct pic_softc *pic, struct intrsource *is)
143 {
144 struct gicv3_softc * const sc = PICTOSOFTC(pic);
145 const u_int group = is->is_irq / 32;
146 uint32_t ipriority, icfg;
147 uint64_t irouter;
148 u_int n;
149
150 const u_int ipriority_val = 0x80 | IPL_TO_PRIORITY(is->is_ipl);
151 const u_int ipriority_shift = (is->is_irq & 0x3) * 8;
152 const u_int icfg_shift = (is->is_irq & 0xf) * 2;
153
154 if (group == 0) {
155 /* SGIs and PPIs are always MP-safe */
156 is->is_mpsafe = true;
157
158 /* Update interrupt configuration and priority on all redistributors */
159 for (n = 0; n < sc->sc_bsh_r_count; n++) {
160 icfg = gicr_read_4(sc, n, GICR_ICFGRn(is->is_irq / 16));
161 if (is->is_type == IST_LEVEL)
162 icfg &= ~(0x2 << icfg_shift);
163 if (is->is_type == IST_EDGE)
164 icfg |= (0x2 << icfg_shift);
165 gicr_write_4(sc, n, GICR_ICFGRn(is->is_irq / 16), icfg);
166
167 ipriority = gicr_read_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4));
168 ipriority &= ~(0xff << ipriority_shift);
169 ipriority |= (ipriority_val << ipriority_shift);
170 gicr_write_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4), ipriority);
171 }
172 } else {
173 if (is->is_mpsafe) {
174 /* Route MP-safe interrupts to all participating PEs */
175 irouter = GICD_IROUTER_Interrupt_Routing_mode;
176 } else {
177 /* Route non-MP-safe interrupts to the primary PE only */
178 irouter = sc->sc_default_irouter;
179 }
180 gicd_write_8(sc, GICD_IROUTER(is->is_irq), irouter);
181
182 /* Update interrupt configuration */
183 icfg = gicd_read_4(sc, GICD_ICFGRn(is->is_irq / 16));
184 if (is->is_type == IST_LEVEL)
185 icfg &= ~(0x2 << icfg_shift);
186 if (is->is_type == IST_EDGE)
187 icfg |= (0x2 << icfg_shift);
188 gicd_write_4(sc, GICD_ICFGRn(is->is_irq / 16), icfg);
189
190 /* Update interrupt priority */
191 ipriority = gicd_read_4(sc, GICD_IPRIORITYRn(is->is_irq / 4));
192 ipriority &= ~(0xff << ipriority_shift);
193 ipriority |= (ipriority_val << ipriority_shift);
194 gicd_write_4(sc, GICD_IPRIORITYRn(is->is_irq / 4), ipriority);
195 }
196 }
197
198 static void
199 gicv3_set_priority(struct pic_softc *pic, int ipl)
200 {
201 icc_pmr_write(IPL_TO_PRIORITY(ipl) << 1);
202 }
203
204 static void
205 gicv3_dist_enable(struct gicv3_softc *sc)
206 {
207 uint32_t gicd_ctrl;
208 u_int n;
209
210 /* Disable the distributor */
211 gicd_write_4(sc, GICD_CTRL, 0);
212
213 /* Wait for register write to complete */
214 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
215 ;
216
217 /* Clear all INTID enable bits */
218 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32)
219 gicd_write_4(sc, GICD_ICENABLERn(n / 32), ~0);
220
221 /* Set default priorities to lowest */
222 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 4)
223 gicd_write_4(sc, GICD_IPRIORITYRn(n / 4), ~0);
224
225 /* Set all interrupts to G1NS */
226 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32) {
227 gicd_write_4(sc, GICD_IGROUPRn(n / 32), ~0);
228 gicd_write_4(sc, GICD_IGRPMODRn(n / 32), 0);
229 }
230
231 /* Set all interrupts level-sensitive by default */
232 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 16)
233 gicd_write_4(sc, GICD_ICFGRn(n / 16), 0);
234
235 /* Wait for register writes to complete */
236 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
237 ;
238
239 /* Enable Affinity routing and G1NS interrupts */
240 gicd_ctrl = GICD_CTRL_EnableGrp1NS | GICD_CTRL_Enable | GICD_CTRL_ARE_NS;
241 gicd_write_4(sc, GICD_CTRL, gicd_ctrl);
242 }
243
244 static void
245 gicv3_redist_enable(struct gicv3_softc *sc, struct cpu_info *ci)
246 {
247 uint32_t icfg;
248 u_int n, o;
249
250 /* Clear INTID enable bits */
251 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, ~0);
252
253 /* Wait for register write to complete */
254 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTRL) & GICR_CTRL_RWP)
255 ;
256
257 /* Set default priorities */
258 for (n = 0; n < 32; n += 4) {
259 uint32_t priority = 0;
260 size_t byte_shift = 0;
261 for (o = 0; o < 4; o++, byte_shift += 8) {
262 struct intrsource * const is = sc->sc_pic.pic_sources[n + o];
263 if (is == NULL)
264 priority |= 0xff << byte_shift;
265 else {
266 const u_int ipriority_val = 0x80 | IPL_TO_PRIORITY(is->is_ipl);
267 priority |= ipriority_val << byte_shift;
268 }
269 }
270 gicr_write_4(sc, ci->ci_gic_redist, GICR_IPRIORITYRn(n / 4), priority);
271 }
272
273 /* Set all interrupts to G1NS */
274 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGROUPR0, ~0);
275 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGRPMODR0, 0);
276
277 /* Restore PPI configs */
278 for (n = 0, icfg = 0; n < 16; n++) {
279 struct intrsource * const is = sc->sc_pic.pic_sources[16 + n];
280 if (is != NULL && is->is_type == IST_EDGE)
281 icfg |= (0x2 << (n * 2));
282 }
283 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICFGRn(1), icfg);
284
285 /* Restore current enable bits */
286 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, sc->sc_enabled_sgippi);
287
288 /* Wait for register write to complete */
289 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTRL) & GICR_CTRL_RWP)
290 ;
291 }
292
293 static uint64_t
294 gicv3_cpu_identity(void)
295 {
296 u_int aff3, aff2, aff1, aff0;
297
298 #ifdef __aarch64__
299 const register_t mpidr = reg_mpidr_el1_read();
300 aff0 = __SHIFTOUT(mpidr, MPIDR_AFF0);
301 aff1 = __SHIFTOUT(mpidr, MPIDR_AFF1);
302 aff2 = __SHIFTOUT(mpidr, MPIDR_AFF2);
303 aff3 = __SHIFTOUT(mpidr, MPIDR_AFF3);
304 #else
305 const register_t mpidr = armreg_mpidr_read();
306 aff0 = __SHIFTOUT(mpidr, MPIDR_AFF0);
307 aff1 = __SHIFTOUT(mpidr, MPIDR_AFF1);
308 aff2 = __SHIFTOUT(mpidr, MPIDR_AFF2);
309 aff3 = 0;
310 #endif
311
312 return __SHIFTIN(aff0, GICR_TYPER_Affinity_Value_Aff0) |
313 __SHIFTIN(aff1, GICR_TYPER_Affinity_Value_Aff1) |
314 __SHIFTIN(aff2, GICR_TYPER_Affinity_Value_Aff2) |
315 __SHIFTIN(aff3, GICR_TYPER_Affinity_Value_Aff3);
316 }
317
318 static u_int
319 gicv3_find_redist(struct gicv3_softc *sc)
320 {
321 uint64_t gicr_typer;
322 u_int n;
323
324 const uint64_t cpu_identity = gicv3_cpu_identity();
325
326 for (n = 0; n < sc->sc_bsh_r_count; n++) {
327 gicr_typer = gicr_read_8(sc, n, GICR_TYPER);
328 if ((gicr_typer & GICR_TYPER_Affinity_Value) == cpu_identity)
329 return n;
330 }
331
332 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
333 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
334 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
335 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
336
337 panic("%s: could not find GICv3 redistributor for cpu %d.%d.%d.%d",
338 cpu_name(curcpu()), aff3, aff2, aff1, aff0);
339 }
340
341 static uint64_t
342 gicv3_sgir(struct gicv3_softc *sc)
343 {
344 const uint64_t cpu_identity = gicv3_cpu_identity();
345
346 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
347 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
348 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
349 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
350
351 return __SHIFTIN(__BIT(aff0), ICC_SGIR_EL1_TargetList) |
352 __SHIFTIN(aff1, ICC_SGIR_EL1_Aff1) |
353 __SHIFTIN(aff2, ICC_SGIR_EL1_Aff2) |
354 __SHIFTIN(aff3, ICC_SGIR_EL1_Aff3);
355 }
356
357 static void
358 gicv3_cpu_init(struct pic_softc *pic, struct cpu_info *ci)
359 {
360 struct gicv3_softc * const sc = PICTOSOFTC(pic);
361 uint32_t icc_sre, icc_ctlr, gicr_waker;
362
363 ci->ci_gic_redist = gicv3_find_redist(sc);
364 ci->ci_gic_sgir = gicv3_sgir(sc);
365
366 if (CPU_IS_PRIMARY(ci)) {
367 /* Store route to primary CPU for non-MPSAFE SPIs */
368 const uint64_t cpu_identity = gicv3_cpu_identity();
369 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
370 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
371 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
372 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
373 sc->sc_default_irouter =
374 __SHIFTIN(aff0, GICD_IROUTER_Aff0) |
375 __SHIFTIN(aff1, GICD_IROUTER_Aff1) |
376 __SHIFTIN(aff2, GICD_IROUTER_Aff2) |
377 __SHIFTIN(aff3, GICD_IROUTER_Aff3);
378 }
379
380 /* Enable System register access and disable IRQ/FIQ bypass */
381 icc_sre = ICC_SRE_EL1_SRE | ICC_SRE_EL1_DFB | ICC_SRE_EL1_DIB;
382 icc_sre_write(icc_sre);
383
384 /* Mark the connected PE as being awake */
385 gicr_waker = gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER);
386 gicr_waker &= ~GICR_WAKER_ProcessorSleep;
387 gicr_write_4(sc, ci->ci_gic_redist, GICR_WAKER, gicr_waker);
388 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER) & GICR_WAKER_ChildrenAsleep)
389 ;
390
391 /* Set initial priority mask */
392 gicv3_set_priority(pic, IPL_HIGH);
393
394 /* Disable preemption */
395 const uint32_t icc_bpr = __SHIFTIN(0x7, ICC_BPR_EL1_BinaryPoint);
396 icc_bpr1_write(icc_bpr);
397
398 /* Enable group 1 interrupt signaling */
399 icc_igrpen1_write(ICC_IGRPEN_EL1_Enable);
400
401 /* Set EOI mode */
402 icc_ctlr = icc_ctlr_read();
403 icc_ctlr &= ~ICC_CTLR_EL1_EOImode;
404 icc_ctlr_write(icc_ctlr);
405
406 /* Enable redistributor */
407 gicv3_redist_enable(sc, ci);
408
409 /* Allow IRQ exceptions */
410 cpsie(I32_bit);
411 }
412
413 #ifdef MULTIPROCESSOR
414 static void
415 gicv3_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi)
416 {
417 CPU_INFO_ITERATOR cii;
418 struct cpu_info *ci;
419 uint64_t intid, aff, targets;
420
421 intid = __SHIFTIN(ipi, ICC_SGIR_EL1_INTID);
422 if (kcp == NULL) {
423 /* Interrupts routed to all PEs, excluding "self" */
424 if (ncpu == 1)
425 return;
426 icc_sgi1r_write(intid | ICC_SGIR_EL1_IRM);
427 } else {
428 /* Interrupts routed to specific PEs */
429 aff = 0;
430 targets = 0;
431 for (CPU_INFO_FOREACH(cii, ci)) {
432 if (!kcpuset_isset(kcp, cpu_index(ci)))
433 continue;
434 if ((ci->ci_gic_sgir & ICC_SGIR_EL1_Aff) != aff) {
435 if (targets != 0) {
436 icc_sgi1r_write(intid | aff | targets);
437 targets = 0;
438 }
439 aff = (ci->ci_gic_sgir & ICC_SGIR_EL1_Aff);
440 }
441 targets |= (ci->ci_gic_sgir & ICC_SGIR_EL1_TargetList);
442 }
443 if (targets != 0)
444 icc_sgi1r_write(intid | aff | targets);
445 }
446 }
447 #endif
448
449 static const struct pic_ops gicv3_picops = {
450 .pic_unblock_irqs = gicv3_unblock_irqs,
451 .pic_block_irqs = gicv3_block_irqs,
452 .pic_establish_irq = gicv3_establish_irq,
453 .pic_set_priority = gicv3_set_priority,
454 #ifdef MULTIPROCESSOR
455 .pic_cpu_init = gicv3_cpu_init,
456 .pic_ipi_send = gicv3_ipi_send,
457 #endif
458 };
459
460 void
461 gicv3_irq_handler(void *frame)
462 {
463 struct cpu_info * const ci = curcpu();
464 struct gicv3_softc * const sc = gicv3_softc;
465 const int oldipl = ci->ci_cpl;
466
467 ci->ci_data.cpu_nintr++;
468
469 for (;;) {
470 const uint32_t iar = icc_iar1_read();
471 const uint32_t irq = __SHIFTOUT(iar, ICC_IAR_INTID);
472 if (irq == ICC_IAR_INTID_SPURIOUS)
473 break;
474
475 if (irq >= sc->sc_pic.pic_maxsources)
476 continue;
477
478 struct intrsource * const is = sc->sc_pic.pic_sources[irq];
479 KASSERT(is != NULL);
480
481 const int ipl = is->is_ipl;
482 if (ci->ci_cpl < ipl)
483 pic_set_priority(ci, ipl);
484
485 cpsie(I32_bit);
486 pic_dispatch(is, frame);
487 cpsid(I32_bit);
488
489 icc_eoi1r_write(iar);
490 }
491
492 if (ci->ci_cpl != oldipl)
493 pic_set_priority(ci, oldipl);
494 }
495
496 int
497 gicv3_init(struct gicv3_softc *sc)
498 {
499 const uint32_t gicd_typer = gicd_read_4(sc, GICD_TYPER);
500
501 KASSERT(CPU_IS_PRIMARY(curcpu()));
502
503 sc->sc_pic.pic_ops = &gicv3_picops;
504 sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(gicd_typer);
505 snprintf(sc->sc_pic.pic_name, sizeof(sc->sc_pic.pic_name), "gicv3");
506 #ifdef MULTIPROCESSOR
507 sc->sc_pic.pic_cpus = kcpuset_running;
508 #endif
509 pic_add(&sc->sc_pic, 0);
510
511 KASSERT(gicv3_softc == NULL);
512 gicv3_softc = sc;
513
514 for (int i = 0; i < sc->sc_bsh_r_count; i++) {
515 const uint64_t gicr_typer = gicr_read_8(sc, i, GICR_TYPER);
516 const u_int aff0 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff0);
517 const u_int aff1 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff1);
518 const u_int aff2 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff2);
519 const u_int aff3 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff3);
520
521 aprint_debug_dev(sc->sc_dev, "redist %d: cpu %d.%d.%d.%d\n",
522 i, aff3, aff2, aff1, aff0);
523 }
524
525 gicv3_dist_enable(sc);
526
527 gicv3_cpu_init(&sc->sc_pic, curcpu());
528
529 #ifdef __HAVE_PIC_FAST_SOFTINTS
530 intr_establish(SOFTINT_BIO, IPL_SOFTBIO, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_BIO);
531 intr_establish(SOFTINT_CLOCK, IPL_SOFTCLOCK, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_CLOCK);
532 intr_establish(SOFTINT_NET, IPL_SOFTNET, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_NET);
533 intr_establish(SOFTINT_SERIAL, IPL_SOFTSERIAL, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_SERIAL);
534 #endif
535
536 #ifdef MULTIPROCESSOR
537 intr_establish(IPI_AST, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_ast, (void *)-1);
538 intr_establish(IPI_XCALL, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_xcall, (void *)-1);
539 intr_establish(IPI_GENERIC, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_generic, (void *)-1);
540 intr_establish(IPI_NOP, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_nop, (void *)-1);
541 intr_establish(IPI_SHOOTDOWN, IPL_SCHED, IST_MPSAFE | IST_EDGE, pic_ipi_shootdown, (void *)-1);
542 #ifdef DDB
543 intr_establish(IPI_DDB, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_ddb, NULL);
544 #endif
545 #ifdef __HAVE_PREEMPTION
546 intr_establish(IPI_KPREEMPT, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_kpreempt, (void *)-1);
547 #endif
548 #endif
549
550 return 0;
551 }
552