pic.c revision 1.52 1 1.52 skrll /* $NetBSD: pic.c,v 1.52 2019/12/24 20:40:09 skrll Exp $ */
2 1.2 matt /*-
3 1.2 matt * Copyright (c) 2008 The NetBSD Foundation, Inc.
4 1.2 matt * All rights reserved.
5 1.2 matt *
6 1.2 matt * This code is derived from software contributed to The NetBSD Foundation
7 1.2 matt * by Matt Thomas.
8 1.2 matt *
9 1.2 matt * Redistribution and use in source and binary forms, with or without
10 1.2 matt * modification, are permitted provided that the following conditions
11 1.2 matt * are met:
12 1.2 matt * 1. Redistributions of source code must retain the above copyright
13 1.2 matt * notice, this list of conditions and the following disclaimer.
14 1.2 matt * 2. Redistributions in binary form must reproduce the above copyright
15 1.2 matt * notice, this list of conditions and the following disclaimer in the
16 1.2 matt * documentation and/or other materials provided with the distribution.
17 1.2 matt *
18 1.2 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.2 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.2 matt * POSSIBILITY OF SUCH DAMAGE.
29 1.2 matt */
30 1.21 matt
31 1.21 matt #define _INTR_PRIVATE
32 1.21 matt #include "opt_ddb.h"
33 1.25 skrll #include "opt_multiprocessor.h"
34 1.21 matt
35 1.2 matt #include <sys/cdefs.h>
36 1.52 skrll __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.52 2019/12/24 20:40:09 skrll Exp $");
37 1.2 matt
38 1.2 matt #include <sys/param.h>
39 1.13 matt #include <sys/atomic.h>
40 1.13 matt #include <sys/cpu.h>
41 1.2 matt #include <sys/evcnt.h>
42 1.13 matt #include <sys/intr.h>
43 1.13 matt #include <sys/kernel.h>
44 1.11 matt #include <sys/kmem.h>
45 1.35 skrll #include <sys/mutex.h>
46 1.35 skrll #include <sys/once.h>
47 1.44 jmcneill #include <sys/interrupt.h>
48 1.13 matt #include <sys/xcall.h>
49 1.22 rmind #include <sys/ipi.h>
50 1.2 matt
51 1.2 matt #include <arm/armreg.h>
52 1.2 matt #include <arm/cpufunc.h>
53 1.42 ryo #include <arm/locore.h> /* for compat aarch64 */
54 1.2 matt
55 1.21 matt #ifdef DDB
56 1.21 matt #include <arm/db_machdep.h>
57 1.21 matt #endif
58 1.21 matt
59 1.2 matt #include <arm/pic/picvar.h>
60 1.2 matt
61 1.28 matt #if defined(__HAVE_PIC_PENDING_INTRS)
62 1.29 matt /*
63 1.29 matt * This implementation of pending interrupts on a MULTIPROCESSOR system makes
64 1.29 matt * the assumption that a PIC (pic_softc) shall only have all its interrupts
65 1.29 matt * come from the same CPU. In other words, interrupts from a single PIC will
66 1.29 matt * not be distributed among multiple CPUs.
67 1.29 matt */
68 1.29 matt struct pic_pending {
69 1.29 matt volatile uint32_t blocked_pics;
70 1.29 matt volatile uint32_t pending_pics;
71 1.29 matt volatile uint32_t pending_ipls;
72 1.29 matt };
73 1.2 matt static uint32_t
74 1.2 matt pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int);
75 1.2 matt static struct pic_softc *
76 1.29 matt pic_list_find_pic_by_pending_ipl(struct pic_pending *, uint32_t);
77 1.2 matt static void
78 1.29 matt pic_deliver_irqs(struct pic_pending *, struct pic_softc *, int, void *);
79 1.2 matt static void
80 1.29 matt pic_list_deliver_irqs(struct pic_pending *, register_t, int, void *);
81 1.29 matt
82 1.29 matt #ifdef MULTIPROCESSOR
83 1.29 matt percpu_t *pic_pending_percpu;
84 1.29 matt #else
85 1.29 matt struct pic_pending pic_pending;
86 1.29 matt #endif /* MULTIPROCESSOR */
87 1.28 matt #endif /* __HAVE_PIC_PENDING_INTRS */
88 1.2 matt
89 1.2 matt struct pic_softc *pic_list[PIC_MAXPICS];
90 1.2 matt #if PIC_MAXPICS > 32
91 1.2 matt #error PIC_MAXPICS > 32 not supported
92 1.2 matt #endif
93 1.2 matt struct intrsource *pic_sources[PIC_MAXMAXSOURCES];
94 1.2 matt struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES];
95 1.2 matt struct intrsource **pic_iplsource[NIPL] = {
96 1.2 matt [0 ... NIPL-1] = pic__iplsources,
97 1.2 matt };
98 1.2 matt size_t pic_ipl_offset[NIPL+1];
99 1.35 skrll
100 1.35 skrll static kmutex_t pic_lock;
101 1.51 skrll static size_t pic_sourcebase;
102 1.52 skrll static int pic_lastbase;
103 1.41 skrll static struct evcnt pic_deferral_ev =
104 1.2 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr");
105 1.2 matt EVCNT_ATTACH_STATIC(pic_deferral_ev);
106 1.2 matt
107 1.35 skrll static int pic_init(void);
108 1.35 skrll
109 1.11 matt #ifdef __HAVE_PIC_SET_PRIORITY
110 1.11 matt void
111 1.11 matt pic_set_priority(struct cpu_info *ci, int newipl)
112 1.11 matt {
113 1.13 matt register_t psw = cpsid(I32_bit);
114 1.13 matt if (pic_list[0] != NULL)
115 1.13 matt (pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl);
116 1.11 matt ci->ci_cpl = newipl;
117 1.13 matt if ((psw & I32_bit) == 0)
118 1.13 matt cpsie(I32_bit);
119 1.13 matt }
120 1.13 matt #endif
121 1.13 matt
122 1.13 matt #ifdef MULTIPROCESSOR
123 1.13 matt int
124 1.34 matt pic_ipi_ast(void *arg)
125 1.34 matt {
126 1.34 matt setsoftast(curcpu());
127 1.34 matt return 1;
128 1.34 matt }
129 1.34 matt
130 1.34 matt int
131 1.13 matt pic_ipi_nop(void *arg)
132 1.13 matt {
133 1.13 matt /* do nothing */
134 1.13 matt return 1;
135 1.13 matt }
136 1.13 matt
137 1.13 matt int
138 1.13 matt pic_ipi_xcall(void *arg)
139 1.13 matt {
140 1.13 matt xc_ipi_handler();
141 1.13 matt return 1;
142 1.13 matt }
143 1.13 matt
144 1.22 rmind int
145 1.22 rmind pic_ipi_generic(void *arg)
146 1.22 rmind {
147 1.22 rmind ipi_cpu_handler();
148 1.22 rmind return 1;
149 1.22 rmind }
150 1.22 rmind
151 1.21 matt #ifdef DDB
152 1.21 matt int
153 1.21 matt pic_ipi_ddb(void *arg)
154 1.21 matt {
155 1.23 skrll // printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg);
156 1.21 matt kdb_trap(-1, arg);
157 1.21 matt return 1;
158 1.21 matt }
159 1.39 nisimura #endif /* DDB */
160 1.34 matt
161 1.34 matt #ifdef __HAVE_PREEMPTION
162 1.34 matt int
163 1.34 matt pic_ipi_kpreempt(void *arg)
164 1.34 matt {
165 1.34 matt atomic_or_uint(&curcpu()->ci_astpending, __BIT(1));
166 1.34 matt return 1;
167 1.34 matt }
168 1.39 nisimura #endif /* __HAVE_PREEMPTION */
169 1.21 matt
170 1.13 matt void
171 1.13 matt intr_cpu_init(struct cpu_info *ci)
172 1.13 matt {
173 1.13 matt for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
174 1.13 matt struct pic_softc * const pic = pic_list[slot];
175 1.13 matt if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) {
176 1.13 matt (*pic->pic_ops->pic_cpu_init)(pic, ci);
177 1.13 matt }
178 1.13 matt }
179 1.13 matt }
180 1.13 matt
181 1.13 matt typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long);
182 1.13 matt
183 1.13 matt void
184 1.13 matt intr_ipi_send(const kcpuset_t *kcp, u_long ipi)
185 1.13 matt {
186 1.32 matt struct cpu_info * const ci = curcpu();
187 1.13 matt KASSERT(ipi < NIPI);
188 1.29 matt bool __diagused sent_p = false;
189 1.29 matt for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
190 1.29 matt struct pic_softc * const pic = pic_list[slot];
191 1.29 matt if (pic == NULL || pic->pic_cpus == NULL)
192 1.29 matt continue;
193 1.30 matt if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) {
194 1.32 matt // never send to ourself
195 1.32 matt if (pic->pic_cpus == ci->ci_kcpuset)
196 1.32 matt continue;
197 1.32 matt
198 1.29 matt (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi);
199 1.29 matt // If we were targeting a single CPU or this pic
200 1.29 matt // handles all cpus, we're done.
201 1.29 matt if (kcp != NULL || pic->pic_cpus == kcpuset_running)
202 1.29 matt return;
203 1.29 matt sent_p = true;
204 1.29 matt }
205 1.29 matt }
206 1.43 ryo KASSERT(cold || sent_p || ncpu <= 1);
207 1.13 matt }
208 1.13 matt #endif /* MULTIPROCESSOR */
209 1.13 matt
210 1.13 matt #ifdef __HAVE_PIC_FAST_SOFTINTS
211 1.13 matt int
212 1.13 matt pic_handle_softint(void *arg)
213 1.13 matt {
214 1.13 matt void softint_switch(lwp_t *, int);
215 1.41 skrll struct cpu_info * const ci = curcpu();
216 1.13 matt const size_t softint = (size_t) arg;
217 1.13 matt int s = splhigh();
218 1.13 matt ci->ci_intr_depth--; // don't count these as interrupts
219 1.13 matt softint_switch(ci->ci_softlwps[softint], s);
220 1.13 matt ci->ci_intr_depth++;
221 1.13 matt splx(s);
222 1.13 matt return 1;
223 1.11 matt }
224 1.11 matt #endif
225 1.2 matt
226 1.2 matt int
227 1.2 matt pic_handle_intr(void *arg)
228 1.2 matt {
229 1.2 matt struct pic_softc * const pic = arg;
230 1.2 matt int rv;
231 1.2 matt
232 1.2 matt rv = (*pic->pic_ops->pic_find_pending_irqs)(pic);
233 1.2 matt
234 1.2 matt return rv > 0;
235 1.2 matt }
236 1.2 matt
237 1.28 matt #if defined(__HAVE_PIC_PENDING_INTRS)
238 1.2 matt void
239 1.2 matt pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is)
240 1.2 matt {
241 1.2 matt const uint32_t ipl_mask = __BIT(is->is_ipl);
242 1.2 matt
243 1.4 matt atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5],
244 1.4 matt __BIT(is->is_irq & 0x1f));
245 1.2 matt
246 1.4 matt atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
247 1.29 matt #ifdef MULTIPROCESSOR
248 1.29 matt struct pic_pending *pend = percpu_getref(pic_pending_percpu);
249 1.29 matt #else
250 1.29 matt struct pic_pending *pend = &pic_pending;
251 1.29 matt #endif
252 1.29 matt atomic_or_32(&pend->pending_ipls, ipl_mask);
253 1.29 matt atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id));
254 1.29 matt #ifdef MULTIPROCESSOR
255 1.29 matt percpu_putref(pic_pending_percpu);
256 1.29 matt #endif
257 1.2 matt }
258 1.2 matt
259 1.2 matt void
260 1.2 matt pic_mark_pending(struct pic_softc *pic, int irq)
261 1.2 matt {
262 1.2 matt struct intrsource * const is = pic->pic_sources[irq];
263 1.2 matt
264 1.2 matt KASSERT(irq < pic->pic_maxsources);
265 1.2 matt KASSERT(is != NULL);
266 1.2 matt
267 1.2 matt pic_mark_pending_source(pic, is);
268 1.2 matt }
269 1.2 matt
270 1.2 matt uint32_t
271 1.2 matt pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base,
272 1.2 matt uint32_t pending)
273 1.2 matt {
274 1.2 matt struct intrsource ** const isbase = &pic->pic_sources[irq_base];
275 1.2 matt struct intrsource *is;
276 1.4 matt volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5];
277 1.2 matt uint32_t ipl_mask = 0;
278 1.2 matt
279 1.2 matt if (pending == 0)
280 1.2 matt return ipl_mask;
281 1.2 matt
282 1.2 matt KASSERT((irq_base & 31) == 0);
283 1.41 skrll
284 1.2 matt (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending);
285 1.2 matt
286 1.4 matt atomic_or_32(ipending, pending);
287 1.40 skrll while (pending != 0) {
288 1.2 matt int n = ffs(pending);
289 1.2 matt if (n-- == 0)
290 1.2 matt break;
291 1.2 matt is = isbase[n];
292 1.2 matt KASSERT(is != NULL);
293 1.2 matt KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32);
294 1.2 matt pending &= ~__BIT(n);
295 1.2 matt ipl_mask |= __BIT(is->is_ipl);
296 1.2 matt }
297 1.2 matt
298 1.4 matt atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
299 1.29 matt #ifdef MULTIPROCESSOR
300 1.29 matt struct pic_pending *pend = percpu_getref(pic_pending_percpu);
301 1.29 matt #else
302 1.29 matt struct pic_pending *pend = &pic_pending;
303 1.29 matt #endif
304 1.29 matt atomic_or_32(&pend->pending_ipls, ipl_mask);
305 1.29 matt atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id));
306 1.29 matt #ifdef MULTIPROCESSOR
307 1.29 matt percpu_putref(pic_pending_percpu);
308 1.29 matt #endif
309 1.2 matt return ipl_mask;
310 1.2 matt }
311 1.2 matt
312 1.2 matt uint32_t
313 1.2 matt pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base,
314 1.2 matt uint32_t pending, int ipl)
315 1.2 matt {
316 1.2 matt uint32_t ipl_irq_mask = 0;
317 1.2 matt uint32_t irq_mask;
318 1.2 matt
319 1.2 matt for (;;) {
320 1.2 matt int irq = ffs(pending);
321 1.2 matt if (irq-- == 0)
322 1.2 matt return ipl_irq_mask;
323 1.2 matt
324 1.2 matt irq_mask = __BIT(irq);
325 1.8 bsh #if 1
326 1.10 skrll KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL,
327 1.10 skrll "%s: irq_base %zu irq %d\n", __func__, irq_base, irq);
328 1.8 bsh #else
329 1.8 bsh if (pic->pic_sources[irq_base + irq] == NULL) {
330 1.8 bsh aprint_error("stray interrupt? irq_base=%zu irq=%d\n",
331 1.8 bsh irq_base, irq);
332 1.8 bsh } else
333 1.8 bsh #endif
334 1.2 matt if (pic->pic_sources[irq_base + irq]->is_ipl == ipl)
335 1.2 matt ipl_irq_mask |= irq_mask;
336 1.2 matt
337 1.2 matt pending &= ~irq_mask;
338 1.2 matt }
339 1.2 matt }
340 1.28 matt #endif /* __HAVE_PIC_PENDING_INTRS */
341 1.2 matt
342 1.2 matt void
343 1.2 matt pic_dispatch(struct intrsource *is, void *frame)
344 1.2 matt {
345 1.20 matt int (*func)(void *) = is->is_func;
346 1.20 matt void *arg = is->is_arg;
347 1.2 matt
348 1.20 matt if (__predict_false(arg == NULL)) {
349 1.20 matt if (__predict_false(frame == NULL)) {
350 1.20 matt pic_deferral_ev.ev_count++;
351 1.20 matt return;
352 1.20 matt }
353 1.20 matt arg = frame;
354 1.2 matt }
355 1.13 matt
356 1.20 matt #ifdef MULTIPROCESSOR
357 1.20 matt if (!is->is_mpsafe) {
358 1.20 matt KERNEL_LOCK(1, NULL);
359 1.21 matt const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count;
360 1.21 matt const u_int l_blcnt __diagused = curlwp->l_blcnt;
361 1.20 matt (void)(*func)(arg);
362 1.21 matt KASSERT(ci_blcnt == curcpu()->ci_biglock_count);
363 1.21 matt KASSERT(l_blcnt == curlwp->l_blcnt);
364 1.20 matt KERNEL_UNLOCK_ONE(NULL);
365 1.20 matt } else
366 1.20 matt #endif
367 1.20 matt (void)(*func)(arg);
368 1.20 matt
369 1.20 matt
370 1.13 matt struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu);
371 1.13 matt KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
372 1.13 matt pcpu->pcpu_evs[is->is_irq].ev_count++;
373 1.13 matt percpu_putref(is->is_pic->pic_percpu);
374 1.2 matt }
375 1.2 matt
376 1.28 matt #if defined(__HAVE_PIC_PENDING_INTRS)
377 1.2 matt void
378 1.29 matt pic_deliver_irqs(struct pic_pending *pend, struct pic_softc *pic, int ipl,
379 1.29 matt void *frame)
380 1.2 matt {
381 1.2 matt const uint32_t ipl_mask = __BIT(ipl);
382 1.2 matt struct intrsource *is;
383 1.4 matt volatile uint32_t *ipending = pic->pic_pending_irqs;
384 1.4 matt volatile uint32_t *iblocked = pic->pic_blocked_irqs;
385 1.2 matt size_t irq_base;
386 1.2 matt #if PIC_MAXSOURCES > 32
387 1.2 matt size_t irq_count;
388 1.6 kiyohara int poi = 0; /* Possibility of interrupting */
389 1.2 matt #endif
390 1.2 matt uint32_t pending_irqs;
391 1.2 matt uint32_t blocked_irqs;
392 1.2 matt int irq;
393 1.19 martin bool progress __diagused = false;
394 1.29 matt
395 1.2 matt KASSERT(pic->pic_pending_ipls & ipl_mask);
396 1.2 matt
397 1.2 matt irq_base = 0;
398 1.2 matt #if PIC_MAXSOURCES > 32
399 1.2 matt irq_count = 0;
400 1.2 matt #endif
401 1.2 matt
402 1.2 matt for (;;) {
403 1.2 matt pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base,
404 1.2 matt *ipending, ipl);
405 1.2 matt KASSERT((pending_irqs & *ipending) == pending_irqs);
406 1.2 matt KASSERT((pending_irqs & ~(*ipending)) == 0);
407 1.2 matt if (pending_irqs == 0) {
408 1.2 matt #if PIC_MAXSOURCES > 32
409 1.2 matt irq_count += 32;
410 1.6 kiyohara if (__predict_true(irq_count >= pic->pic_maxsources)) {
411 1.6 kiyohara if (!poi)
412 1.6 kiyohara /*Interrupt at this level was handled.*/
413 1.6 kiyohara break;
414 1.6 kiyohara irq_base = 0;
415 1.6 kiyohara irq_count = 0;
416 1.6 kiyohara poi = 0;
417 1.2 matt ipending = pic->pic_pending_irqs;
418 1.2 matt iblocked = pic->pic_blocked_irqs;
419 1.6 kiyohara } else {
420 1.6 kiyohara irq_base += 32;
421 1.6 kiyohara ipending++;
422 1.6 kiyohara iblocked++;
423 1.6 kiyohara KASSERT(irq_base <= pic->pic_maxsources);
424 1.2 matt }
425 1.2 matt continue;
426 1.2 matt #else
427 1.2 matt break;
428 1.2 matt #endif
429 1.2 matt }
430 1.2 matt progress = true;
431 1.5 kiyohara blocked_irqs = 0;
432 1.2 matt do {
433 1.2 matt irq = ffs(pending_irqs) - 1;
434 1.2 matt KASSERT(irq >= 0);
435 1.2 matt
436 1.4 matt atomic_and_32(ipending, ~__BIT(irq));
437 1.2 matt is = pic->pic_sources[irq_base + irq];
438 1.2 matt if (is != NULL) {
439 1.2 matt cpsie(I32_bit);
440 1.2 matt pic_dispatch(is, frame);
441 1.2 matt cpsid(I32_bit);
442 1.6 kiyohara #if PIC_MAXSOURCES > 32
443 1.6 kiyohara /*
444 1.6 kiyohara * There is a possibility of interrupting
445 1.6 kiyohara * from cpsie() to cpsid().
446 1.6 kiyohara */
447 1.6 kiyohara poi = 1;
448 1.6 kiyohara #endif
449 1.5 kiyohara blocked_irqs |= __BIT(irq);
450 1.2 matt } else {
451 1.2 matt KASSERT(0);
452 1.2 matt }
453 1.2 matt pending_irqs = pic_find_pending_irqs_by_ipl(pic,
454 1.2 matt irq_base, *ipending, ipl);
455 1.2 matt } while (pending_irqs);
456 1.2 matt if (blocked_irqs) {
457 1.4 matt atomic_or_32(iblocked, blocked_irqs);
458 1.29 matt atomic_or_32(&pend->blocked_pics, __BIT(pic->pic_id));
459 1.2 matt }
460 1.2 matt }
461 1.2 matt
462 1.2 matt KASSERT(progress);
463 1.2 matt /*
464 1.2 matt * Since interrupts are disabled, we don't have to be too careful
465 1.2 matt * about these.
466 1.2 matt */
467 1.4 matt if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0)
468 1.29 matt atomic_and_32(&pend->pending_pics, ~__BIT(pic->pic_id));
469 1.2 matt }
470 1.2 matt
471 1.2 matt static void
472 1.29 matt pic_list_unblock_irqs(struct pic_pending *pend)
473 1.2 matt {
474 1.29 matt uint32_t blocked_pics = pend->blocked_pics;
475 1.29 matt
476 1.29 matt pend->blocked_pics = 0;
477 1.2 matt
478 1.2 matt for (;;) {
479 1.2 matt struct pic_softc *pic;
480 1.2 matt #if PIC_MAXSOURCES > 32
481 1.4 matt volatile uint32_t *iblocked;
482 1.4 matt uint32_t blocked;
483 1.2 matt size_t irq_base;
484 1.2 matt #endif
485 1.2 matt
486 1.4 matt int pic_id = ffs(blocked_pics);
487 1.2 matt if (pic_id-- == 0)
488 1.2 matt return;
489 1.2 matt
490 1.2 matt pic = pic_list[pic_id];
491 1.2 matt KASSERT(pic != NULL);
492 1.2 matt #if PIC_MAXSOURCES > 32
493 1.2 matt for (irq_base = 0, iblocked = pic->pic_blocked_irqs;
494 1.2 matt irq_base < pic->pic_maxsources;
495 1.2 matt irq_base += 32, iblocked++) {
496 1.4 matt if ((blocked = *iblocked) != 0) {
497 1.2 matt (*pic->pic_ops->pic_unblock_irqs)(pic,
498 1.4 matt irq_base, blocked);
499 1.4 matt atomic_and_32(iblocked, ~blocked);
500 1.2 matt }
501 1.2 matt }
502 1.2 matt #else
503 1.2 matt KASSERT(pic->pic_blocked_irqs[0] != 0);
504 1.2 matt (*pic->pic_ops->pic_unblock_irqs)(pic,
505 1.2 matt 0, pic->pic_blocked_irqs[0]);
506 1.4 matt pic->pic_blocked_irqs[0] = 0;
507 1.2 matt #endif
508 1.4 matt blocked_pics &= ~__BIT(pic_id);
509 1.2 matt }
510 1.2 matt }
511 1.2 matt
512 1.2 matt
513 1.2 matt struct pic_softc *
514 1.29 matt pic_list_find_pic_by_pending_ipl(struct pic_pending *pend, uint32_t ipl_mask)
515 1.2 matt {
516 1.29 matt uint32_t pending_pics = pend->pending_pics;
517 1.2 matt struct pic_softc *pic;
518 1.2 matt
519 1.2 matt for (;;) {
520 1.4 matt int pic_id = ffs(pending_pics);
521 1.2 matt if (pic_id-- == 0)
522 1.2 matt return NULL;
523 1.2 matt
524 1.2 matt pic = pic_list[pic_id];
525 1.2 matt KASSERT(pic != NULL);
526 1.2 matt if (pic->pic_pending_ipls & ipl_mask)
527 1.2 matt return pic;
528 1.4 matt pending_pics &= ~__BIT(pic_id);
529 1.2 matt }
530 1.2 matt }
531 1.2 matt
532 1.2 matt void
533 1.29 matt pic_list_deliver_irqs(struct pic_pending *pend, register_t psw, int ipl,
534 1.29 matt void *frame)
535 1.2 matt {
536 1.2 matt const uint32_t ipl_mask = __BIT(ipl);
537 1.2 matt struct pic_softc *pic;
538 1.2 matt
539 1.29 matt while ((pic = pic_list_find_pic_by_pending_ipl(pend, ipl_mask)) != NULL) {
540 1.29 matt pic_deliver_irqs(pend, pic, ipl, frame);
541 1.2 matt KASSERT((pic->pic_pending_ipls & ipl_mask) == 0);
542 1.2 matt }
543 1.29 matt atomic_and_32(&pend->pending_ipls, ~ipl_mask);
544 1.2 matt }
545 1.28 matt #endif /* __HAVE_PIC_PENDING_INTRS */
546 1.2 matt
547 1.2 matt void
548 1.2 matt pic_do_pending_ints(register_t psw, int newipl, void *frame)
549 1.2 matt {
550 1.2 matt struct cpu_info * const ci = curcpu();
551 1.13 matt if (__predict_false(newipl == IPL_HIGH)) {
552 1.13 matt KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl);
553 1.2 matt return;
554 1.13 matt }
555 1.28 matt #if defined(__HAVE_PIC_PENDING_INTRS)
556 1.29 matt #ifdef MULTIPROCESSOR
557 1.29 matt struct pic_pending *pend = percpu_getref(pic_pending_percpu);
558 1.29 matt #else
559 1.29 matt struct pic_pending *pend = &pic_pending;
560 1.29 matt #endif
561 1.29 matt while ((pend->pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
562 1.29 matt KASSERT(pend->pending_ipls < __BIT(NIPL));
563 1.2 matt for (;;) {
564 1.29 matt int ipl = 31 - __builtin_clz(pend->pending_ipls);
565 1.2 matt KASSERT(ipl < NIPL);
566 1.2 matt if (ipl <= newipl)
567 1.2 matt break;
568 1.2 matt
569 1.12 matt pic_set_priority(ci, ipl);
570 1.29 matt pic_list_deliver_irqs(pend, psw, ipl, frame);
571 1.29 matt pic_list_unblock_irqs(pend);
572 1.2 matt }
573 1.2 matt }
574 1.29 matt #ifdef MULTIPROCESSOR
575 1.29 matt percpu_putref(pic_pending_percpu);
576 1.29 matt #endif
577 1.28 matt #endif /* __HAVE_PIC_PENDING_INTRS */
578 1.33 jmcneill #ifdef __HAVE_PREEMPTION
579 1.27 matt if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) {
580 1.27 matt pic_set_priority(ci, IPL_SCHED);
581 1.27 matt kpreempt(0);
582 1.27 matt }
583 1.27 matt #endif
584 1.2 matt if (ci->ci_cpl != newipl)
585 1.11 matt pic_set_priority(ci, newipl);
586 1.13 matt }
587 1.13 matt
588 1.13 matt static void
589 1.13 matt pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci)
590 1.13 matt {
591 1.13 matt struct pic_percpu * const pcpu = v0;
592 1.13 matt struct pic_softc * const pic = v1;
593 1.13 matt
594 1.13 matt pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]),
595 1.13 matt KM_SLEEP);
596 1.13 matt KASSERT(pcpu->pcpu_evs != NULL);
597 1.13 matt
598 1.13 matt #define PCPU_NAMELEN 32
599 1.14 matt #ifdef DIAGNOSTIC
600 1.13 matt const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name);
601 1.14 matt #endif
602 1.13 matt
603 1.13 matt KASSERT(namelen < PCPU_NAMELEN);
604 1.13 matt pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP);
605 1.13 matt #ifdef MULTIPROCESSOR
606 1.13 matt snprintf(pcpu->pcpu_name, PCPU_NAMELEN,
607 1.13 matt "%s (%s)", pic->pic_name, ci->ci_data.cpu_name);
608 1.13 matt #else
609 1.13 matt strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN);
610 1.13 matt #endif
611 1.13 matt pcpu->pcpu_magic = PICPERCPU_MAGIC;
612 1.13 matt #if 0
613 1.13 matt printf("%s: %s %s: <%s>\n",
614 1.13 matt __func__, ci->ci_data.cpu_name, pic->pic_name,
615 1.13 matt pcpu->pcpu_name);
616 1.2 matt #endif
617 1.2 matt }
618 1.2 matt
619 1.29 matt #if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR)
620 1.29 matt static void
621 1.29 matt pic_pending_zero(void *v0, void *v1, struct cpu_info *ci)
622 1.29 matt {
623 1.29 matt struct pic_pending * const p = v0;
624 1.29 matt memset(p, 0, sizeof(*p));
625 1.29 matt }
626 1.29 matt #endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */
627 1.29 matt
628 1.35 skrll static int
629 1.35 skrll pic_init(void)
630 1.35 skrll {
631 1.35 skrll
632 1.35 skrll mutex_init(&pic_lock, MUTEX_DEFAULT, IPL_HIGH);
633 1.35 skrll
634 1.35 skrll return 0;
635 1.35 skrll }
636 1.35 skrll
637 1.52 skrll int
638 1.2 matt pic_add(struct pic_softc *pic, int irqbase)
639 1.2 matt {
640 1.2 matt int slot, maybe_slot = -1;
641 1.35 skrll size_t sourcebase;
642 1.35 skrll static ONCE_DECL(pic_once);
643 1.35 skrll
644 1.35 skrll RUN_ONCE(&pic_once, pic_init);
645 1.2 matt
646 1.13 matt KASSERT(strlen(pic->pic_name) > 0);
647 1.13 matt
648 1.29 matt #if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR)
649 1.29 matt if (__predict_false(pic_pending_percpu == NULL)) {
650 1.29 matt pic_pending_percpu = percpu_alloc(sizeof(struct pic_pending));
651 1.29 matt
652 1.29 matt /*
653 1.29 matt * Now zero the per-cpu pending data.
654 1.29 matt */
655 1.29 matt percpu_foreach(pic_pending_percpu, pic_pending_zero, NULL);
656 1.29 matt }
657 1.29 matt #endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */
658 1.29 matt
659 1.35 skrll mutex_enter(&pic_lock);
660 1.52 skrll if (irqbase == PIC_IRQBASE_ALLOC) {
661 1.52 skrll irqbase = pic_lastbase;
662 1.52 skrll }
663 1.2 matt for (slot = 0; slot < PIC_MAXPICS; slot++) {
664 1.2 matt struct pic_softc * const xpic = pic_list[slot];
665 1.2 matt if (xpic == NULL) {
666 1.2 matt if (maybe_slot < 0)
667 1.2 matt maybe_slot = slot;
668 1.2 matt if (irqbase < 0)
669 1.2 matt break;
670 1.2 matt continue;
671 1.2 matt }
672 1.2 matt if (irqbase < 0 || xpic->pic_irqbase < 0)
673 1.2 matt continue;
674 1.2 matt if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources)
675 1.2 matt continue;
676 1.2 matt if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase)
677 1.2 matt continue;
678 1.2 matt panic("pic_add: pic %s (%zu sources @ irq %u) conflicts"
679 1.2 matt " with pic %s (%zu sources @ irq %u)",
680 1.2 matt pic->pic_name, pic->pic_maxsources, irqbase,
681 1.2 matt xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase);
682 1.2 matt }
683 1.2 matt slot = maybe_slot;
684 1.2 matt #if 0
685 1.2 matt printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n",
686 1.2 matt pic->pic_name, pic_sourcebase, pic->pic_maxsources);
687 1.2 matt #endif
688 1.17 matt KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu",
689 1.17 matt pic->pic_maxsources);
690 1.2 matt KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES);
691 1.35 skrll sourcebase = pic_sourcebase;
692 1.35 skrll pic_sourcebase += pic->pic_maxsources;
693 1.52 skrll if (pic_lastbase < irqbase + pic->pic_maxsources)
694 1.52 skrll pic_lastbase = irqbase + pic->pic_maxsources;
695 1.35 skrll mutex_exit(&pic_lock);
696 1.2 matt
697 1.13 matt /*
698 1.13 matt * Allocate a pointer to each cpu's evcnts and then, for each cpu,
699 1.13 matt * allocate its evcnts and then attach an evcnt for each pin.
700 1.13 matt * We can't allocate the evcnt structures directly since
701 1.41 skrll * percpu will move the contents of percpu memory around and
702 1.13 matt * corrupt the pointers in the evcnts themselves. Remember, any
703 1.13 matt * problem can be solved with sufficient indirection.
704 1.13 matt */
705 1.13 matt pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu));
706 1.13 matt
707 1.13 matt /*
708 1.13 matt * Now allocate the per-cpu evcnts.
709 1.13 matt */
710 1.13 matt percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic);
711 1.13 matt
712 1.35 skrll pic->pic_sources = &pic_sources[sourcebase];
713 1.2 matt pic->pic_irqbase = irqbase;
714 1.2 matt pic->pic_id = slot;
715 1.13 matt #ifdef __HAVE_PIC_SET_PRIORITY
716 1.13 matt KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL));
717 1.13 matt #endif
718 1.13 matt #ifdef MULTIPROCESSOR
719 1.29 matt KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL));
720 1.13 matt #endif
721 1.2 matt pic_list[slot] = pic;
722 1.52 skrll
723 1.52 skrll return irqbase;
724 1.2 matt }
725 1.2 matt
726 1.2 matt int
727 1.2 matt pic_alloc_irq(struct pic_softc *pic)
728 1.2 matt {
729 1.2 matt int irq;
730 1.2 matt
731 1.2 matt for (irq = 0; irq < pic->pic_maxsources; irq++) {
732 1.2 matt if (pic->pic_sources[irq] == NULL)
733 1.2 matt return irq;
734 1.2 matt }
735 1.2 matt
736 1.2 matt return -1;
737 1.2 matt }
738 1.2 matt
739 1.13 matt static void
740 1.13 matt pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci)
741 1.13 matt {
742 1.13 matt struct pic_percpu * const pcpu = v0;
743 1.13 matt struct intrsource * const is = v1;
744 1.13 matt
745 1.13 matt KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
746 1.13 matt evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL,
747 1.13 matt pcpu->pcpu_name, is->is_source);
748 1.13 matt }
749 1.13 matt
750 1.2 matt void *
751 1.2 matt pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type,
752 1.48 jmcneill int (*func)(void *), void *arg, const char *xname)
753 1.2 matt {
754 1.2 matt struct intrsource *is;
755 1.2 matt int off, nipl;
756 1.2 matt
757 1.2 matt if (pic->pic_sources[irq]) {
758 1.2 matt printf("pic_establish_intr: pic %s irq %d already present\n",
759 1.2 matt pic->pic_name, irq);
760 1.2 matt return NULL;
761 1.2 matt }
762 1.2 matt
763 1.11 matt is = kmem_zalloc(sizeof(*is), KM_SLEEP);
764 1.2 matt is->is_pic = pic;
765 1.2 matt is->is_irq = irq;
766 1.2 matt is->is_ipl = ipl;
767 1.21 matt is->is_type = type & 0xff;
768 1.2 matt is->is_func = func;
769 1.2 matt is->is_arg = arg;
770 1.20 matt #ifdef MULTIPROCESSOR
771 1.24 skrll is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM;
772 1.20 matt #endif
773 1.13 matt
774 1.2 matt if (pic->pic_ops->pic_source_name)
775 1.2 matt (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source,
776 1.2 matt sizeof(is->is_source));
777 1.2 matt else
778 1.2 matt snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq);
779 1.2 matt
780 1.13 matt /*
781 1.13 matt * Now attach the per-cpu evcnts.
782 1.13 matt */
783 1.13 matt percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is);
784 1.2 matt
785 1.2 matt pic->pic_sources[irq] = is;
786 1.2 matt
787 1.2 matt /*
788 1.2 matt * First try to use an existing slot which is empty.
789 1.2 matt */
790 1.2 matt for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) {
791 1.2 matt if (pic__iplsources[off] == NULL) {
792 1.2 matt is->is_iplidx = off - pic_ipl_offset[ipl];
793 1.2 matt pic__iplsources[off] = is;
794 1.36 mlelstv goto unblock;
795 1.2 matt }
796 1.2 matt }
797 1.2 matt
798 1.2 matt /*
799 1.2 matt * Move up all the sources by one.
800 1.2 matt */
801 1.2 matt if (ipl < NIPL) {
802 1.2 matt off = pic_ipl_offset[ipl+1];
803 1.2 matt memmove(&pic__iplsources[off+1], &pic__iplsources[off],
804 1.2 matt sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off));
805 1.2 matt }
806 1.2 matt
807 1.2 matt /*
808 1.2 matt * Advance the offset of all IPLs higher than this. Include an
809 1.2 matt * extra one as well. Thus the number of sources per ipl is
810 1.2 matt * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl].
811 1.2 matt */
812 1.2 matt for (nipl = ipl + 1; nipl <= NIPL; nipl++)
813 1.2 matt pic_ipl_offset[nipl]++;
814 1.2 matt
815 1.2 matt /*
816 1.2 matt * Insert into the previously made position at the end of this IPL's
817 1.2 matt * sources.
818 1.2 matt */
819 1.2 matt off = pic_ipl_offset[ipl + 1] - 1;
820 1.2 matt is->is_iplidx = off - pic_ipl_offset[ipl];
821 1.2 matt pic__iplsources[off] = is;
822 1.2 matt
823 1.2 matt (*pic->pic_ops->pic_establish_irq)(pic, is);
824 1.2 matt
825 1.36 mlelstv unblock:
826 1.2 matt (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f,
827 1.2 matt __BIT(is->is_irq & 0x1f));
828 1.41 skrll
829 1.48 jmcneill if (xname) {
830 1.48 jmcneill if (is->is_xname == NULL)
831 1.48 jmcneill is->is_xname = kmem_zalloc(INTRDEVNAMEBUF, KM_SLEEP);
832 1.48 jmcneill if (is->is_xname[0] != '\0')
833 1.48 jmcneill strlcat(is->is_xname, ", ", INTRDEVNAMEBUF);
834 1.48 jmcneill strlcat(is->is_xname, xname, INTRDEVNAMEBUF);
835 1.48 jmcneill }
836 1.48 jmcneill
837 1.2 matt /* We're done. */
838 1.2 matt return is;
839 1.2 matt }
840 1.2 matt
841 1.13 matt static void
842 1.13 matt pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci)
843 1.13 matt {
844 1.13 matt struct pic_percpu * const pcpu = v0;
845 1.13 matt struct intrsource * const is = v1;
846 1.13 matt
847 1.13 matt KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
848 1.13 matt evcnt_detach(&pcpu->pcpu_evs[is->is_irq]);
849 1.13 matt }
850 1.13 matt
851 1.2 matt void
852 1.2 matt pic_disestablish_source(struct intrsource *is)
853 1.2 matt {
854 1.2 matt struct pic_softc * const pic = is->is_pic;
855 1.2 matt const int irq = is->is_irq;
856 1.2 matt
857 1.13 matt KASSERT(is == pic->pic_sources[irq]);
858 1.13 matt
859 1.15 msaitoh (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
860 1.2 matt pic->pic_sources[irq] = NULL;
861 1.2 matt pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL;
862 1.48 jmcneill if (is->is_xname != NULL) {
863 1.48 jmcneill kmem_free(is->is_xname, INTRDEVNAMEBUF);
864 1.48 jmcneill is->is_xname = NULL;
865 1.48 jmcneill }
866 1.13 matt /*
867 1.13 matt * Now detach the per-cpu evcnts.
868 1.13 matt */
869 1.13 matt percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is);
870 1.2 matt
871 1.11 matt kmem_free(is, sizeof(*is));
872 1.2 matt }
873 1.2 matt
874 1.2 matt void *
875 1.2 matt intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg)
876 1.2 matt {
877 1.48 jmcneill return intr_establish_xname(irq, ipl, type, func, arg, NULL);
878 1.48 jmcneill }
879 1.48 jmcneill
880 1.48 jmcneill void *
881 1.48 jmcneill intr_establish_xname(int irq, int ipl, int type, int (*func)(void *), void *arg,
882 1.48 jmcneill const char *xname)
883 1.48 jmcneill {
884 1.11 matt KASSERT(!cpu_intr_p());
885 1.11 matt KASSERT(!cpu_softintr_p());
886 1.11 matt
887 1.13 matt for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
888 1.2 matt struct pic_softc * const pic = pic_list[slot];
889 1.2 matt if (pic == NULL || pic->pic_irqbase < 0)
890 1.2 matt continue;
891 1.2 matt if (pic->pic_irqbase <= irq
892 1.2 matt && irq < pic->pic_irqbase + pic->pic_maxsources) {
893 1.2 matt return pic_establish_intr(pic, irq - pic->pic_irqbase,
894 1.48 jmcneill ipl, type, func, arg, xname);
895 1.2 matt }
896 1.2 matt }
897 1.2 matt
898 1.2 matt return NULL;
899 1.2 matt }
900 1.2 matt
901 1.2 matt void
902 1.2 matt intr_disestablish(void *ih)
903 1.2 matt {
904 1.2 matt struct intrsource * const is = ih;
905 1.13 matt
906 1.13 matt KASSERT(!cpu_intr_p());
907 1.13 matt KASSERT(!cpu_softintr_p());
908 1.13 matt
909 1.2 matt pic_disestablish_source(is);
910 1.2 matt }
911 1.44 jmcneill
912 1.49 jmcneill void
913 1.49 jmcneill intr_mask(void *ih)
914 1.49 jmcneill {
915 1.49 jmcneill struct intrsource * const is = ih;
916 1.49 jmcneill struct pic_softc * const pic = is->is_pic;
917 1.49 jmcneill const int irq = is->is_irq;
918 1.49 jmcneill
919 1.50 jmcneill if (atomic_inc_32_nv(&is->is_mask_count) == 1)
920 1.50 jmcneill (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
921 1.49 jmcneill }
922 1.49 jmcneill
923 1.49 jmcneill void
924 1.49 jmcneill intr_unmask(void *ih)
925 1.49 jmcneill {
926 1.49 jmcneill struct intrsource * const is = ih;
927 1.49 jmcneill struct pic_softc * const pic = is->is_pic;
928 1.49 jmcneill const int irq = is->is_irq;
929 1.49 jmcneill
930 1.50 jmcneill if (atomic_dec_32_nv(&is->is_mask_count) == 0)
931 1.50 jmcneill (*pic->pic_ops->pic_unblock_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
932 1.49 jmcneill }
933 1.49 jmcneill
934 1.45 jmcneill const char *
935 1.45 jmcneill intr_string(intr_handle_t irq, char *buf, size_t len)
936 1.45 jmcneill {
937 1.45 jmcneill for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
938 1.45 jmcneill struct pic_softc * const pic = pic_list[slot];
939 1.45 jmcneill if (pic == NULL || pic->pic_irqbase < 0)
940 1.45 jmcneill continue;
941 1.45 jmcneill if (pic->pic_irqbase <= irq
942 1.45 jmcneill && irq < pic->pic_irqbase + pic->pic_maxsources) {
943 1.45 jmcneill struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase];
944 1.45 jmcneill snprintf(buf, len, "%s %s", pic->pic_name, is->is_source);
945 1.45 jmcneill return buf;
946 1.45 jmcneill }
947 1.45 jmcneill }
948 1.45 jmcneill
949 1.45 jmcneill return NULL;
950 1.45 jmcneill }
951 1.45 jmcneill
952 1.46 jmcneill static struct intrsource *
953 1.46 jmcneill intr_get_source(const char *intrid)
954 1.46 jmcneill {
955 1.46 jmcneill struct intrsource *is;
956 1.46 jmcneill intrid_t buf;
957 1.46 jmcneill size_t slot;
958 1.46 jmcneill int irq;
959 1.46 jmcneill
960 1.46 jmcneill KASSERT(mutex_owned(&cpu_lock));
961 1.46 jmcneill
962 1.46 jmcneill for (slot = 0; slot < PIC_MAXPICS; slot++) {
963 1.46 jmcneill struct pic_softc * const pic = pic_list[slot];
964 1.46 jmcneill if (pic == NULL || pic->pic_irqbase < 0)
965 1.46 jmcneill continue;
966 1.46 jmcneill for (irq = 0; irq < pic->pic_maxsources; irq++) {
967 1.47 jmcneill is = pic->pic_sources[irq];
968 1.46 jmcneill if (is == NULL || is->is_source[0] == '\0')
969 1.46 jmcneill continue;
970 1.46 jmcneill
971 1.46 jmcneill snprintf(buf, sizeof(buf), "%s %s", pic->pic_name, is->is_source);
972 1.46 jmcneill if (strcmp(buf, intrid) == 0)
973 1.46 jmcneill return is;
974 1.46 jmcneill }
975 1.46 jmcneill }
976 1.46 jmcneill
977 1.46 jmcneill return NULL;
978 1.46 jmcneill }
979 1.46 jmcneill
980 1.46 jmcneill struct intrids_handler *
981 1.46 jmcneill interrupt_construct_intrids(const kcpuset_t *cpuset)
982 1.46 jmcneill {
983 1.46 jmcneill struct intrids_handler *iih;
984 1.46 jmcneill struct intrsource *is;
985 1.46 jmcneill int count, irq, n;
986 1.46 jmcneill size_t slot;
987 1.46 jmcneill
988 1.46 jmcneill if (kcpuset_iszero(cpuset))
989 1.46 jmcneill return NULL;
990 1.46 jmcneill
991 1.46 jmcneill count = 0;
992 1.46 jmcneill for (slot = 0; slot < PIC_MAXPICS; slot++) {
993 1.46 jmcneill struct pic_softc * const pic = pic_list[slot];
994 1.46 jmcneill if (pic != NULL && pic->pic_irqbase >= 0) {
995 1.46 jmcneill for (irq = 0; irq < pic->pic_maxsources; irq++) {
996 1.47 jmcneill is = pic->pic_sources[irq];
997 1.46 jmcneill if (is && is->is_source[0] != '\0')
998 1.46 jmcneill count++;
999 1.46 jmcneill }
1000 1.46 jmcneill }
1001 1.46 jmcneill }
1002 1.46 jmcneill
1003 1.46 jmcneill iih = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, KM_SLEEP);
1004 1.46 jmcneill iih->iih_nids = count;
1005 1.46 jmcneill
1006 1.46 jmcneill for (n = 0, slot = 0; n < count && slot < PIC_MAXPICS; slot++) {
1007 1.46 jmcneill struct pic_softc * const pic = pic_list[slot];
1008 1.46 jmcneill if (pic == NULL || pic->pic_irqbase < 0)
1009 1.46 jmcneill continue;
1010 1.46 jmcneill for (irq = 0; irq < pic->pic_maxsources; irq++) {
1011 1.47 jmcneill is = pic->pic_sources[irq];
1012 1.46 jmcneill if (is == NULL || is->is_source[0] == '\0')
1013 1.46 jmcneill continue;
1014 1.46 jmcneill
1015 1.46 jmcneill snprintf(iih->iih_intrids[n++], sizeof(intrid_t), "%s %s",
1016 1.46 jmcneill pic->pic_name, is->is_source);
1017 1.46 jmcneill }
1018 1.46 jmcneill }
1019 1.46 jmcneill
1020 1.46 jmcneill return iih;
1021 1.46 jmcneill }
1022 1.46 jmcneill
1023 1.46 jmcneill void
1024 1.46 jmcneill interrupt_destruct_intrids(struct intrids_handler *iih)
1025 1.46 jmcneill {
1026 1.46 jmcneill if (iih == NULL)
1027 1.46 jmcneill return;
1028 1.46 jmcneill
1029 1.46 jmcneill kmem_free(iih, sizeof(int) + sizeof(intrid_t) * iih->iih_nids);
1030 1.46 jmcneill }
1031 1.46 jmcneill
1032 1.46 jmcneill void
1033 1.46 jmcneill interrupt_get_available(kcpuset_t *cpuset)
1034 1.46 jmcneill {
1035 1.46 jmcneill CPU_INFO_ITERATOR cii;
1036 1.46 jmcneill struct cpu_info *ci;
1037 1.46 jmcneill
1038 1.46 jmcneill kcpuset_zero(cpuset);
1039 1.46 jmcneill
1040 1.46 jmcneill mutex_enter(&cpu_lock);
1041 1.46 jmcneill for (CPU_INFO_FOREACH(cii, ci)) {
1042 1.46 jmcneill if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
1043 1.46 jmcneill kcpuset_set(cpuset, cpu_index(ci));
1044 1.46 jmcneill }
1045 1.46 jmcneill mutex_exit(&cpu_lock);
1046 1.46 jmcneill }
1047 1.46 jmcneill
1048 1.46 jmcneill void
1049 1.46 jmcneill interrupt_get_devname(const char *intrid, char *buf, size_t len)
1050 1.46 jmcneill {
1051 1.48 jmcneill struct intrsource *is;
1052 1.48 jmcneill
1053 1.48 jmcneill mutex_enter(&cpu_lock);
1054 1.48 jmcneill is = intr_get_source(intrid);
1055 1.48 jmcneill if (is == NULL || is->is_xname == NULL)
1056 1.48 jmcneill buf[0] = '\0';
1057 1.48 jmcneill else
1058 1.48 jmcneill strlcpy(buf, is->is_xname, len);
1059 1.48 jmcneill mutex_exit(&cpu_lock);
1060 1.46 jmcneill }
1061 1.46 jmcneill
1062 1.46 jmcneill struct interrupt_get_count_arg {
1063 1.46 jmcneill struct intrsource *is;
1064 1.46 jmcneill uint64_t count;
1065 1.46 jmcneill u_int cpu_idx;
1066 1.46 jmcneill };
1067 1.46 jmcneill
1068 1.46 jmcneill static void
1069 1.46 jmcneill interrupt_get_count_cb(void *v0, void *v1, struct cpu_info *ci)
1070 1.46 jmcneill {
1071 1.46 jmcneill struct pic_percpu * const pcpu = v0;
1072 1.46 jmcneill struct interrupt_get_count_arg * const arg = v1;
1073 1.46 jmcneill
1074 1.46 jmcneill if (arg->cpu_idx != cpu_index(ci))
1075 1.46 jmcneill return;
1076 1.46 jmcneill
1077 1.46 jmcneill arg->count = pcpu->pcpu_evs[arg->is->is_irq].ev_count;
1078 1.46 jmcneill }
1079 1.46 jmcneill
1080 1.46 jmcneill uint64_t
1081 1.46 jmcneill interrupt_get_count(const char *intrid, u_int cpu_idx)
1082 1.46 jmcneill {
1083 1.46 jmcneill struct interrupt_get_count_arg arg;
1084 1.46 jmcneill struct intrsource *is;
1085 1.46 jmcneill uint64_t count;
1086 1.46 jmcneill
1087 1.46 jmcneill count = 0;
1088 1.46 jmcneill
1089 1.46 jmcneill mutex_enter(&cpu_lock);
1090 1.46 jmcneill is = intr_get_source(intrid);
1091 1.46 jmcneill if (is != NULL && is->is_pic != NULL) {
1092 1.46 jmcneill arg.is = is;
1093 1.46 jmcneill arg.count = 0;
1094 1.46 jmcneill arg.cpu_idx = cpu_idx;
1095 1.46 jmcneill percpu_foreach(is->is_pic->pic_percpu, interrupt_get_count_cb, &arg);
1096 1.46 jmcneill count = arg.count;
1097 1.46 jmcneill }
1098 1.46 jmcneill mutex_exit(&cpu_lock);
1099 1.46 jmcneill
1100 1.46 jmcneill return count;
1101 1.46 jmcneill }
1102 1.46 jmcneill
1103 1.44 jmcneill #ifdef MULTIPROCESSOR
1104 1.46 jmcneill void
1105 1.46 jmcneill interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
1106 1.46 jmcneill {
1107 1.46 jmcneill struct intrsource *is;
1108 1.46 jmcneill struct pic_softc *pic;
1109 1.46 jmcneill
1110 1.46 jmcneill kcpuset_zero(cpuset);
1111 1.46 jmcneill
1112 1.46 jmcneill mutex_enter(&cpu_lock);
1113 1.46 jmcneill is = intr_get_source(intrid);
1114 1.46 jmcneill if (is != NULL) {
1115 1.46 jmcneill pic = is->is_pic;
1116 1.46 jmcneill if (pic && pic->pic_ops->pic_get_affinity)
1117 1.46 jmcneill pic->pic_ops->pic_get_affinity(pic, is->is_irq, cpuset);
1118 1.46 jmcneill }
1119 1.46 jmcneill mutex_exit(&cpu_lock);
1120 1.46 jmcneill }
1121 1.46 jmcneill
1122 1.46 jmcneill int
1123 1.46 jmcneill interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
1124 1.46 jmcneill kcpuset_t *oldset)
1125 1.46 jmcneill {
1126 1.46 jmcneill struct intrsource *is;
1127 1.46 jmcneill int error;
1128 1.46 jmcneill
1129 1.46 jmcneill mutex_enter(&cpu_lock);
1130 1.46 jmcneill is = intr_get_source(intrid);
1131 1.46 jmcneill if (is == NULL) {
1132 1.46 jmcneill error = ENOENT;
1133 1.46 jmcneill } else {
1134 1.46 jmcneill error = interrupt_distribute(is, newset, oldset);
1135 1.46 jmcneill }
1136 1.46 jmcneill mutex_exit(&cpu_lock);
1137 1.46 jmcneill
1138 1.46 jmcneill return error;
1139 1.46 jmcneill }
1140 1.46 jmcneill
1141 1.44 jmcneill int
1142 1.44 jmcneill interrupt_distribute(void *ih, const kcpuset_t *newset, kcpuset_t *oldset)
1143 1.44 jmcneill {
1144 1.44 jmcneill struct intrsource * const is = ih;
1145 1.44 jmcneill struct pic_softc * const pic = is->is_pic;
1146 1.44 jmcneill
1147 1.44 jmcneill if (pic == NULL)
1148 1.44 jmcneill return EOPNOTSUPP;
1149 1.44 jmcneill if (pic->pic_ops->pic_set_affinity == NULL ||
1150 1.44 jmcneill pic->pic_ops->pic_get_affinity == NULL)
1151 1.44 jmcneill return EOPNOTSUPP;
1152 1.44 jmcneill
1153 1.44 jmcneill if (!is->is_mpsafe)
1154 1.44 jmcneill return EINVAL;
1155 1.44 jmcneill
1156 1.44 jmcneill if (oldset != NULL)
1157 1.44 jmcneill pic->pic_ops->pic_get_affinity(pic, is->is_irq, oldset);
1158 1.44 jmcneill
1159 1.44 jmcneill return pic->pic_ops->pic_set_affinity(pic, is->is_irq, newset);
1160 1.44 jmcneill }
1161 1.44 jmcneill #endif
1162