pic.c revision 1.84 1 /* $NetBSD: pic.c,v 1.84 2022/10/29 15:13:27 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #define _INTR_PRIVATE
33 #include "opt_ddb.h"
34 #include "opt_multiprocessor.h"
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.84 2022/10/29 15:13:27 riastradh Exp $");
38
39 #include <sys/param.h>
40 #include <sys/atomic.h>
41 #include <sys/cpu.h>
42 #include <sys/evcnt.h>
43 #include <sys/interrupt.h>
44 #include <sys/intr.h>
45 #include <sys/ipi.h>
46 #include <sys/kernel.h>
47 #include <sys/kmem.h>
48 #include <sys/mutex.h>
49 #include <sys/once.h>
50 #include <sys/xcall.h>
51
52 #include <arm/armreg.h>
53 #include <arm/cpufunc.h>
54 #include <arm/locore.h> /* for compat aarch64 */
55
56 #ifdef DDB
57 #include <arm/db_machdep.h>
58 #endif
59
60 #include <arm/pic/picvar.h>
61
62 #if defined(__HAVE_PIC_PENDING_INTRS)
63 /*
64 * This implementation of pending interrupts on a MULTIPROCESSOR system makes
65 * the assumption that a PIC (pic_softc) shall only have all its interrupts
66 * come from the same CPU. In other words, interrupts from a single PIC will
67 * not be distributed among multiple CPUs.
68 */
69 static uint32_t
70 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int);
71 static struct pic_softc *
72 pic_list_find_pic_by_pending_ipl(struct cpu_info *, uint32_t);
73 static void
74 pic_deliver_irqs(struct cpu_info *, struct pic_softc *, int, void *);
75 static void
76 pic_list_deliver_irqs(struct cpu_info *, register_t, int, void *);
77
78 #endif /* __HAVE_PIC_PENDING_INTRS */
79
80 struct pic_softc *pic_list[PIC_MAXPICS];
81 #if PIC_MAXPICS > 32
82 #error PIC_MAXPICS > 32 not supported
83 #endif
84 struct intrsource *pic_sources[PIC_MAXMAXSOURCES];
85 struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES];
86 size_t pic_ipl_offset[NIPL + 1];
87
88 static kmutex_t pic_lock;
89 static size_t pic_sourcebase;
90 static int pic_lastbase;
91 static struct evcnt pic_deferral_ev =
92 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr");
93 EVCNT_ATTACH_STATIC(pic_deferral_ev);
94
95 static int pic_init(void);
96
97 #ifdef __HAVE_PIC_SET_PRIORITY
98 void
99 pic_set_priority(struct cpu_info *ci, int newipl)
100 {
101 if (__predict_false(pic_list[0] == NULL)) {
102 ci->ci_cpl = newipl;
103 return;
104 }
105
106 pic_list[0]->pic_ops->pic_set_priority(pic_list[0], newipl);
107 }
108 #endif
109
110 #ifdef MULTIPROCESSOR
111 int
112 pic_ipi_ast(void *arg)
113 {
114 setsoftast(curcpu());
115 return 1;
116 }
117
118 int
119 pic_ipi_nop(void *arg)
120 {
121 /* do nothing */
122 return 1;
123 }
124
125 int
126 pic_ipi_xcall(void *arg)
127 {
128 xc_ipi_handler();
129 return 1;
130 }
131
132 int
133 pic_ipi_generic(void *arg)
134 {
135 ipi_cpu_handler();
136 return 1;
137 }
138
139 #ifdef DDB
140 int
141 pic_ipi_ddb(void *arg)
142 {
143 // printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg);
144 kdb_trap(-1, arg);
145 return 1;
146 }
147 #endif /* DDB */
148
149 #ifdef __HAVE_PREEMPTION
150 int
151 pic_ipi_kpreempt(void *arg)
152 {
153 struct lwp * const l = curlwp;
154
155 l->l_md.md_astpending |= __BIT(1);
156 return 1;
157 }
158 #endif /* __HAVE_PREEMPTION */
159
160 void
161 intr_cpu_init(struct cpu_info *ci)
162 {
163 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
164 struct pic_softc * const pic = pic_list[slot];
165 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) {
166 (*pic->pic_ops->pic_cpu_init)(pic, ci);
167 }
168 }
169 }
170
171 typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long);
172
173 void
174 intr_ipi_send(const kcpuset_t *kcp, u_long ipi)
175 {
176 struct cpu_info * const ci = curcpu();
177 KASSERT(ipi < NIPI);
178 KASSERT(kcp == NULL || kcpuset_countset(kcp) == 1);
179 bool __diagused sent_p = false;
180 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
181 struct pic_softc * const pic = pic_list[slot];
182 if (pic == NULL || pic->pic_cpus == NULL)
183 continue;
184 if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) {
185 /*
186 * Never send to ourself.
187 *
188 * This test uses pointer comparison for systems
189 * that have a pic per cpu, e.g. RPI[23]. GIC sets
190 * pic_cpus to kcpuset_running and handles "not for
191 * self" internally.
192 */
193 if (pic->pic_cpus == ci->ci_kcpuset)
194 continue;
195
196 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi);
197
198 /*
199 * If we were targeting a single CPU or this pic
200 * handles all cpus, we're done.
201 */
202 if (kcp != NULL || pic->pic_cpus == kcpuset_running)
203 return;
204 sent_p = true;
205 }
206 }
207 KASSERTMSG(cold || sent_p || ncpu <= 1, "cold %d sent_p %d ncpu %d",
208 cold, sent_p, ncpu);
209 }
210 #endif /* MULTIPROCESSOR */
211
212 #ifdef __HAVE_PIC_FAST_SOFTINTS
213 int
214 pic_handle_softint(void *arg)
215 {
216 void softint_switch(lwp_t *, int);
217 struct cpu_info * const ci = curcpu();
218 const size_t softint = (size_t) arg;
219 int s = splhigh();
220 ci->ci_intr_depth--; // don't count these as interrupts
221 softint_switch(ci->ci_softlwps[softint], s);
222 ci->ci_intr_depth++;
223 splx(s);
224 return 1;
225 }
226 #endif
227
228 int
229 pic_handle_intr(void *arg)
230 {
231 struct pic_softc * const pic = arg;
232 int rv;
233
234 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic);
235
236 return rv > 0;
237 }
238
239 #if defined(__HAVE_PIC_PENDING_INTRS)
240 void
241 pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is)
242 {
243 const uint32_t ipl_mask = __BIT(is->is_ipl);
244 struct cpu_info * const ci = curcpu();
245
246 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5],
247 __BIT(is->is_irq & 0x1f));
248
249 atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
250 ci->ci_pending_ipls |= ipl_mask;
251 ci->ci_pending_pics |= __BIT(pic->pic_id);
252 }
253
254 void
255 pic_mark_pending(struct pic_softc *pic, int irq)
256 {
257 struct intrsource * const is = pic->pic_sources[irq];
258
259 KASSERT(irq < pic->pic_maxsources);
260 KASSERT(is != NULL);
261
262 pic_mark_pending_source(pic, is);
263 }
264
265 uint32_t
266 pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base,
267 uint32_t pending)
268 {
269 struct intrsource ** const isbase = &pic->pic_sources[irq_base];
270 struct cpu_info * const ci = curcpu();
271 struct intrsource *is;
272 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5];
273 uint32_t ipl_mask = 0;
274
275 if (pending == 0)
276 return ipl_mask;
277
278 KASSERT((irq_base & 31) == 0);
279
280 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending);
281
282 atomic_or_32(ipending, pending);
283 while (pending != 0) {
284 int n = ffs(pending);
285 if (n-- == 0)
286 break;
287 is = isbase[n];
288 KASSERT(is != NULL);
289 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32);
290 pending &= ~__BIT(n);
291 ipl_mask |= __BIT(is->is_ipl);
292 }
293
294 atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
295 ci->ci_pending_ipls |= ipl_mask;
296 ci->ci_pending_pics |= __BIT(pic->pic_id);
297
298 return ipl_mask;
299 }
300
301 static uint32_t
302 pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base,
303 uint32_t pending, int ipl)
304 {
305 uint32_t ipl_irq_mask = 0;
306 uint32_t irq_mask;
307
308 for (;;) {
309 int irq = ffs(pending);
310 if (irq-- == 0)
311 return ipl_irq_mask;
312
313 irq_mask = __BIT(irq);
314 #if 1
315 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL,
316 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq);
317 #else
318 if (pic->pic_sources[irq_base + irq] == NULL) {
319 aprint_error("stray interrupt? irq_base=%zu irq=%d\n",
320 irq_base, irq);
321 } else
322 #endif
323 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl)
324 ipl_irq_mask |= irq_mask;
325
326 pending &= ~irq_mask;
327 }
328 }
329 #endif /* __HAVE_PIC_PENDING_INTRS */
330
331 void
332 pic_dispatch(struct intrsource *is, void *frame)
333 {
334 int (*func)(void *) = is->is_func;
335 void *arg = is->is_arg;
336 int ocpl, ncpl;
337
338 if (__predict_false(arg == NULL)) {
339 if (__predict_false(frame == NULL)) {
340 pic_deferral_ev.ev_count++;
341 return;
342 }
343 arg = frame;
344 }
345
346 ocpl = curcpu()->ci_cpl;
347 #ifdef MULTIPROCESSOR
348 const bool mpsafe = is->is_mpsafe;
349 #else
350 const bool mpsafe = true;
351 #endif
352 if (!mpsafe) {
353 KERNEL_LOCK(1, NULL);
354 const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count;
355 const u_int l_blcnt __diagused = curlwp->l_blcnt;
356 (void)(*func)(arg);
357 KASSERT(ci_blcnt == curcpu()->ci_biglock_count);
358 KASSERT(l_blcnt == curlwp->l_blcnt);
359 KERNEL_UNLOCK_ONE(NULL);
360 } else {
361 (void)(*func)(arg);
362 }
363 ncpl = curcpu()->ci_cpl;
364 KASSERTMSG(ocpl <= ncpl, "pic %s irq %u intrsource %s:"
365 " cpl slipped %d -> %d",
366 is->is_pic->pic_name, is->is_irq, is->is_source,
367 ocpl, ncpl);
368
369 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu);
370 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
371 pcpu->pcpu_evs[is->is_irq].ev_count++;
372 percpu_putref(is->is_pic->pic_percpu);
373 }
374
375 #if defined(__HAVE_PIC_PENDING_INTRS)
376 static void
377 pic_deliver_irqs(struct cpu_info *ci, struct pic_softc *pic, int ipl,
378 void *frame)
379 {
380 const uint32_t ipl_mask = __BIT(ipl);
381 struct intrsource *is;
382 volatile uint32_t *ipending = pic->pic_pending_irqs;
383 volatile uint32_t *iblocked = pic->pic_blocked_irqs;
384 size_t irq_base;
385 #if PIC_MAXSOURCES > 32
386 size_t irq_count;
387 int poi = 0; /* Possibility of interrupting */
388 #endif
389 uint32_t pending_irqs;
390 uint32_t blocked_irqs;
391 int irq;
392 bool progress __diagused = false;
393
394 KASSERT(pic->pic_pending_ipls & ipl_mask);
395
396 irq_base = 0;
397 #if PIC_MAXSOURCES > 32
398 irq_count = 0;
399 #endif
400
401 for (;;) {
402 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base,
403 *ipending, ipl);
404 KASSERT((pending_irqs & *ipending) == pending_irqs);
405 KASSERT((pending_irqs & ~(*ipending)) == 0);
406 if (pending_irqs == 0) {
407 #if PIC_MAXSOURCES > 32
408 irq_count += 32;
409 if (__predict_true(irq_count >= pic->pic_maxsources)) {
410 if (!poi)
411 /*Interrupt at this level was handled.*/
412 break;
413 irq_base = 0;
414 irq_count = 0;
415 poi = 0;
416 ipending = pic->pic_pending_irqs;
417 iblocked = pic->pic_blocked_irqs;
418 } else {
419 irq_base += 32;
420 ipending++;
421 iblocked++;
422 KASSERT(irq_base <= pic->pic_maxsources);
423 }
424 continue;
425 #else
426 break;
427 #endif
428 }
429 progress = true;
430 blocked_irqs = 0;
431 do {
432 irq = ffs(pending_irqs) - 1;
433 KASSERT(irq >= 0);
434
435 atomic_and_32(ipending, ~__BIT(irq));
436 is = pic->pic_sources[irq_base + irq];
437 if (is != NULL) {
438 ENABLE_INTERRUPT();
439 pic_dispatch(is, frame);
440 DISABLE_INTERRUPT();
441 #if PIC_MAXSOURCES > 32
442 /*
443 * There is a possibility of interrupting
444 * from ENABLE_INTERRUPT() to
445 * DISABLE_INTERRUPT().
446 */
447 poi = 1;
448 #endif
449 blocked_irqs |= __BIT(irq);
450 } else {
451 KASSERT(0);
452 }
453 pending_irqs = pic_find_pending_irqs_by_ipl(pic,
454 irq_base, *ipending, ipl);
455 } while (pending_irqs);
456 if (blocked_irqs) {
457 atomic_or_32(iblocked, blocked_irqs);
458 ci->ci_blocked_pics |= __BIT(pic->pic_id);
459 }
460 }
461
462 KASSERT(progress);
463 /*
464 * Since interrupts are disabled, we don't have to be too careful
465 * about these.
466 */
467 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0)
468 ci->ci_pending_pics &= ~__BIT(pic->pic_id);
469 }
470
471 static void
472 pic_list_unblock_irqs(struct cpu_info *ci)
473 {
474 uint32_t blocked_pics = ci->ci_blocked_pics;
475
476 ci->ci_blocked_pics = 0;
477
478 for (;;) {
479 struct pic_softc *pic;
480 #if PIC_MAXSOURCES > 32
481 volatile uint32_t *iblocked;
482 uint32_t blocked;
483 size_t irq_base;
484 #endif
485
486 int pic_id = ffs(blocked_pics);
487 if (pic_id-- == 0)
488 return;
489
490 pic = pic_list[pic_id];
491 KASSERT(pic != NULL);
492 #if PIC_MAXSOURCES > 32
493 for (irq_base = 0, iblocked = pic->pic_blocked_irqs;
494 irq_base < pic->pic_maxsources;
495 irq_base += 32, iblocked++) {
496 if ((blocked = *iblocked) != 0) {
497 (*pic->pic_ops->pic_unblock_irqs)(pic,
498 irq_base, blocked);
499 atomic_and_32(iblocked, ~blocked);
500 }
501 }
502 #else
503 KASSERT(pic->pic_blocked_irqs[0] != 0);
504 (*pic->pic_ops->pic_unblock_irqs)(pic,
505 0, pic->pic_blocked_irqs[0]);
506 pic->pic_blocked_irqs[0] = 0;
507 #endif
508 blocked_pics &= ~__BIT(pic_id);
509 }
510 }
511
512 static struct pic_softc *
513 pic_list_find_pic_by_pending_ipl(struct cpu_info *ci, uint32_t ipl_mask)
514 {
515 uint32_t pending_pics = ci->ci_pending_pics;
516 struct pic_softc *pic;
517
518 for (;;) {
519 int pic_id = ffs(pending_pics);
520 if (pic_id-- == 0)
521 return NULL;
522
523 pic = pic_list[pic_id];
524 KASSERT(pic != NULL);
525 if (pic->pic_pending_ipls & ipl_mask)
526 return pic;
527 pending_pics &= ~__BIT(pic_id);
528 }
529 }
530
531 static void
532 pic_list_deliver_irqs(struct cpu_info *ci, register_t psw, int ipl,
533 void *frame)
534 {
535 const uint32_t ipl_mask = __BIT(ipl);
536 struct pic_softc *pic;
537
538 while ((pic = pic_list_find_pic_by_pending_ipl(ci, ipl_mask)) != NULL) {
539 pic_deliver_irqs(ci, pic, ipl, frame);
540 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0);
541 }
542 ci->ci_pending_ipls &= ~ipl_mask;
543 }
544 #endif /* __HAVE_PIC_PENDING_INTRS */
545
546 void
547 pic_do_pending_ints(register_t psw, int newipl, void *frame)
548 {
549 struct cpu_info * const ci = curcpu();
550 if (__predict_false(newipl == IPL_HIGH)) {
551 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl);
552 return;
553 }
554 #if defined(__HAVE_PIC_PENDING_INTRS)
555 while ((ci->ci_pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
556 KASSERT(ci->ci_pending_ipls < __BIT(NIPL));
557 for (;;) {
558 int ipl = 31 - __builtin_clz(ci->ci_pending_ipls);
559 KASSERT(ipl < NIPL);
560 if (ipl <= newipl)
561 break;
562
563 pic_set_priority(ci, ipl);
564 pic_list_deliver_irqs(ci, psw, ipl, frame);
565 pic_list_unblock_irqs(ci);
566 }
567 }
568 #endif /* __HAVE_PIC_PENDING_INTRS */
569 #ifdef __HAVE_PREEMPTION
570 struct lwp * const l = curlwp;
571 if (newipl == IPL_NONE && (l->l_md.md_astpending & __BIT(1))) {
572 pic_set_priority(ci, IPL_SCHED);
573 kpreempt(0);
574 }
575 #endif
576 if (ci->ci_cpl != newipl)
577 pic_set_priority(ci, newipl);
578 }
579
580 static void
581 pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci)
582 {
583 struct pic_percpu * const pcpu = v0;
584 struct pic_softc * const pic = v1;
585
586 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]),
587 KM_SLEEP);
588 KASSERT(pcpu->pcpu_evs != NULL);
589
590 #define PCPU_NAMELEN 32
591 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name);
592
593 KASSERT(namelen < PCPU_NAMELEN);
594 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP);
595 #ifdef MULTIPROCESSOR
596 snprintf(pcpu->pcpu_name, PCPU_NAMELEN,
597 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name);
598 #else
599 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN);
600 #endif
601 pcpu->pcpu_magic = PICPERCPU_MAGIC;
602 #if 0
603 printf("%s: %s %s: <%s>\n",
604 __func__, ci->ci_data.cpu_name, pic->pic_name,
605 pcpu->pcpu_name);
606 #endif
607 }
608
609 static int
610 pic_init(void)
611 {
612
613 mutex_init(&pic_lock, MUTEX_DEFAULT, IPL_HIGH);
614
615 return 0;
616 }
617
618 int
619 pic_add(struct pic_softc *pic, int irqbase)
620 {
621 int slot, maybe_slot = -1;
622 size_t sourcebase;
623 static ONCE_DECL(pic_once);
624
625 ASSERT_SLEEPABLE();
626
627 RUN_ONCE(&pic_once, pic_init);
628
629 KASSERT(strlen(pic->pic_name) > 0);
630
631 mutex_enter(&pic_lock);
632 if (irqbase == PIC_IRQBASE_ALLOC) {
633 irqbase = pic_lastbase;
634 }
635 for (slot = 0; slot < PIC_MAXPICS; slot++) {
636 struct pic_softc * const xpic = pic_list[slot];
637 if (xpic == NULL) {
638 if (maybe_slot < 0)
639 maybe_slot = slot;
640 if (irqbase < 0)
641 break;
642 continue;
643 }
644 if (irqbase < 0 || xpic->pic_irqbase < 0)
645 continue;
646 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources)
647 continue;
648 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase)
649 continue;
650 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts"
651 " with pic %s (%zu sources @ irq %u)",
652 pic->pic_name, pic->pic_maxsources, irqbase,
653 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase);
654 }
655 slot = maybe_slot;
656 #if 0
657 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n",
658 pic->pic_name, pic_sourcebase, pic->pic_maxsources);
659 #endif
660 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu",
661 pic->pic_maxsources);
662 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES);
663 sourcebase = pic_sourcebase;
664 pic_sourcebase += pic->pic_maxsources;
665 if (pic_lastbase < irqbase + pic->pic_maxsources)
666 pic_lastbase = irqbase + pic->pic_maxsources;
667 mutex_exit(&pic_lock);
668
669 /*
670 * Allocate a pointer to each cpu's evcnts and then, for each cpu,
671 * allocate its evcnts and then attach an evcnt for each pin.
672 * We can't allocate the evcnt structures directly since
673 * percpu will move the contents of percpu memory around and
674 * corrupt the pointers in the evcnts themselves. Remember, any
675 * problem can be solved with sufficient indirection.
676 */
677 pic->pic_percpu = percpu_create(sizeof(struct pic_percpu),
678 pic_percpu_allocate, NULL, pic);
679
680 pic->pic_sources = &pic_sources[sourcebase];
681 pic->pic_irqbase = irqbase;
682 pic->pic_id = slot;
683 #ifdef __HAVE_PIC_SET_PRIORITY
684 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL));
685 #endif
686 #ifdef MULTIPROCESSOR
687 KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL));
688 #endif
689 pic_list[slot] = pic;
690
691 return irqbase;
692 }
693
694 int
695 pic_alloc_irq(struct pic_softc *pic)
696 {
697 int irq;
698
699 for (irq = 0; irq < pic->pic_maxsources; irq++) {
700 if (pic->pic_sources[irq] == NULL)
701 return irq;
702 }
703
704 return -1;
705 }
706
707 static void
708 pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci)
709 {
710 struct pic_percpu * const pcpu = v0;
711 struct intrsource * const is = v1;
712
713 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
714 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL,
715 pcpu->pcpu_name, is->is_source);
716 }
717
718 static void
719 pic_unblock_percpu(void *arg1, void *arg2)
720 {
721 struct pic_softc *pic = arg1;
722 struct intrsource *is = arg2;
723
724 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f,
725 __BIT(is->is_irq & 0x1f));
726 }
727
728 void *
729 pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type,
730 int (*func)(void *), void *arg, const char *xname)
731 {
732 struct intrsource *is;
733 int off, nipl;
734
735 if (pic->pic_sources[irq]) {
736 printf("pic_establish_intr: pic %s irq %d already present\n",
737 pic->pic_name, irq);
738 return NULL;
739 }
740
741 is = kmem_zalloc(sizeof(*is), KM_SLEEP);
742 is->is_pic = pic;
743 is->is_irq = irq;
744 is->is_ipl = ipl;
745 is->is_type = type & 0xff;
746 is->is_func = func;
747 is->is_arg = arg;
748 #ifdef MULTIPROCESSOR
749 is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM;
750 #endif
751
752 if (pic->pic_ops->pic_source_name)
753 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source,
754 sizeof(is->is_source));
755 else
756 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq);
757
758 /*
759 * Now attach the per-cpu evcnts.
760 */
761 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is);
762
763 pic->pic_sources[irq] = is;
764
765 /*
766 * First try to use an existing slot which is empty.
767 */
768 bool found = false;
769 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl + 1]; off++) {
770 if (pic__iplsources[off] == NULL) {
771 found = true;
772 break;
773 }
774 }
775
776 if (!found) {
777 /*
778 * Move up all the sources by one.
779 */
780 if (ipl < NIPL) {
781 off = pic_ipl_offset[ipl + 1];
782 memmove(&pic__iplsources[off + 1], &pic__iplsources[off],
783 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off));
784 }
785
786 /*
787 * Advance the offset of all IPLs higher than this. Include an
788 * extra one as well. Thus the number of sources per ipl is
789 * pic_ipl_offset[ipl + 1] - pic_ipl_offset[ipl].
790 */
791 for (nipl = ipl + 1; nipl <= NIPL; nipl++)
792 pic_ipl_offset[nipl]++;
793
794 off = pic_ipl_offset[ipl + 1] - 1;
795 }
796
797 /*
798 * Insert into the 'found' or the just made slot position at the end
799 * of this IPL's sources.
800 */
801 is->is_iplidx = off - pic_ipl_offset[ipl];
802 pic__iplsources[off] = is;
803
804 (*pic->pic_ops->pic_establish_irq)(pic, is);
805
806 if (!mp_online || !is->is_mpsafe || !is->is_percpu) {
807 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f,
808 __BIT(is->is_irq & 0x1f));
809 } else {
810 uint64_t xc = xc_broadcast(0, pic_unblock_percpu, pic, is);
811 xc_wait(xc);
812 }
813
814 if (xname) {
815 if (is->is_xname == NULL)
816 is->is_xname = kmem_zalloc(INTRDEVNAMEBUF, KM_SLEEP);
817 if (is->is_xname[0] != '\0')
818 strlcat(is->is_xname, ", ", INTRDEVNAMEBUF);
819 strlcat(is->is_xname, xname, INTRDEVNAMEBUF);
820 }
821
822 /* We're done. */
823 return is;
824 }
825
826 static void
827 pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci)
828 {
829 struct pic_percpu * const pcpu = v0;
830 struct intrsource * const is = v1;
831
832 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
833 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]);
834 }
835
836 void
837 pic_disestablish_source(struct intrsource *is)
838 {
839 struct pic_softc * const pic = is->is_pic;
840 const int irq = is->is_irq;
841
842 KASSERT(is == pic->pic_sources[irq]);
843
844 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
845 pic->pic_sources[irq] = NULL;
846 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL;
847 if (is->is_xname != NULL) {
848 kmem_free(is->is_xname, INTRDEVNAMEBUF);
849 is->is_xname = NULL;
850 }
851 /*
852 * Now detach the per-cpu evcnts.
853 */
854 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is);
855
856 kmem_free(is, sizeof(*is));
857 }
858
859 void *
860 intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg)
861 {
862 return intr_establish_xname(irq, ipl, type, func, arg, NULL);
863 }
864
865 void *
866 intr_establish_xname(int irq, int ipl, int type, int (*func)(void *), void *arg,
867 const char *xname)
868 {
869 KASSERT(!cpu_intr_p());
870 KASSERT(!cpu_softintr_p());
871
872 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
873 struct pic_softc * const pic = pic_list[slot];
874 if (pic == NULL || pic->pic_irqbase < 0)
875 continue;
876 if (pic->pic_irqbase <= irq
877 && irq < pic->pic_irqbase + pic->pic_maxsources) {
878 return pic_establish_intr(pic, irq - pic->pic_irqbase,
879 ipl, type, func, arg, xname);
880 }
881 }
882
883 return NULL;
884 }
885
886 void
887 intr_disestablish(void *ih)
888 {
889 struct intrsource * const is = ih;
890
891 KASSERT(!cpu_intr_p());
892 KASSERT(!cpu_softintr_p());
893
894 pic_disestablish_source(is);
895 }
896
897 void
898 intr_mask(void *ih)
899 {
900 struct intrsource * const is = ih;
901 struct pic_softc * const pic = is->is_pic;
902 const int irq = is->is_irq;
903
904 if (atomic_inc_32_nv(&is->is_mask_count) == 1)
905 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
906 }
907
908 void
909 intr_unmask(void *ih)
910 {
911 struct intrsource * const is = ih;
912 struct pic_softc * const pic = is->is_pic;
913 const int irq = is->is_irq;
914
915 if (atomic_dec_32_nv(&is->is_mask_count) == 0)
916 (*pic->pic_ops->pic_unblock_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
917 }
918
919 const char *
920 intr_string(intr_handle_t irq, char *buf, size_t len)
921 {
922 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
923 struct pic_softc * const pic = pic_list[slot];
924 if (pic == NULL || pic->pic_irqbase < 0)
925 continue;
926 if (pic->pic_irqbase <= irq
927 && irq < pic->pic_irqbase + pic->pic_maxsources) {
928 struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase];
929 snprintf(buf, len, "%s %s", pic->pic_name, is->is_source);
930 return buf;
931 }
932 }
933
934 return NULL;
935 }
936
937 static struct intrsource *
938 intr_get_source(const char *intrid)
939 {
940 struct intrsource *is;
941 intrid_t buf;
942 size_t slot;
943 int irq;
944
945 KASSERT(mutex_owned(&cpu_lock));
946
947 for (slot = 0; slot < PIC_MAXPICS; slot++) {
948 struct pic_softc * const pic = pic_list[slot];
949 if (pic == NULL || pic->pic_irqbase < 0)
950 continue;
951 for (irq = 0; irq < pic->pic_maxsources; irq++) {
952 is = pic->pic_sources[irq];
953 if (is == NULL || is->is_source[0] == '\0')
954 continue;
955
956 snprintf(buf, sizeof(buf), "%s %s", pic->pic_name, is->is_source);
957 if (strcmp(buf, intrid) == 0)
958 return is;
959 }
960 }
961
962 return NULL;
963 }
964
965 struct intrids_handler *
966 interrupt_construct_intrids(const kcpuset_t *cpuset)
967 {
968 struct intrids_handler *iih;
969 struct intrsource *is;
970 int count, irq, n;
971 size_t slot;
972
973 if (kcpuset_iszero(cpuset))
974 return NULL;
975
976 count = 0;
977 for (slot = 0; slot < PIC_MAXPICS; slot++) {
978 struct pic_softc * const pic = pic_list[slot];
979 if (pic != NULL && pic->pic_irqbase >= 0) {
980 for (irq = 0; irq < pic->pic_maxsources; irq++) {
981 is = pic->pic_sources[irq];
982 if (is && is->is_source[0] != '\0')
983 count++;
984 }
985 }
986 }
987
988 iih = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, KM_SLEEP);
989 iih->iih_nids = count;
990
991 for (n = 0, slot = 0; n < count && slot < PIC_MAXPICS; slot++) {
992 struct pic_softc * const pic = pic_list[slot];
993 if (pic == NULL || pic->pic_irqbase < 0)
994 continue;
995 for (irq = 0; irq < pic->pic_maxsources; irq++) {
996 is = pic->pic_sources[irq];
997 if (is == NULL || is->is_source[0] == '\0')
998 continue;
999
1000 snprintf(iih->iih_intrids[n++], sizeof(intrid_t), "%s %s",
1001 pic->pic_name, is->is_source);
1002 }
1003 }
1004
1005 return iih;
1006 }
1007
1008 void
1009 interrupt_destruct_intrids(struct intrids_handler *iih)
1010 {
1011 if (iih == NULL)
1012 return;
1013
1014 kmem_free(iih, sizeof(int) + sizeof(intrid_t) * iih->iih_nids);
1015 }
1016
1017 void
1018 interrupt_get_available(kcpuset_t *cpuset)
1019 {
1020 CPU_INFO_ITERATOR cii;
1021 struct cpu_info *ci;
1022
1023 kcpuset_zero(cpuset);
1024
1025 mutex_enter(&cpu_lock);
1026 for (CPU_INFO_FOREACH(cii, ci)) {
1027 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
1028 kcpuset_set(cpuset, cpu_index(ci));
1029 }
1030 mutex_exit(&cpu_lock);
1031 }
1032
1033 void
1034 interrupt_get_devname(const char *intrid, char *buf, size_t len)
1035 {
1036 struct intrsource *is;
1037
1038 mutex_enter(&cpu_lock);
1039 is = intr_get_source(intrid);
1040 if (is == NULL || is->is_xname == NULL)
1041 buf[0] = '\0';
1042 else
1043 strlcpy(buf, is->is_xname, len);
1044 mutex_exit(&cpu_lock);
1045 }
1046
1047 struct interrupt_get_count_arg {
1048 struct intrsource *is;
1049 uint64_t count;
1050 u_int cpu_idx;
1051 };
1052
1053 static void
1054 interrupt_get_count_cb(void *v0, void *v1, struct cpu_info *ci)
1055 {
1056 struct pic_percpu * const pcpu = v0;
1057 struct interrupt_get_count_arg * const arg = v1;
1058
1059 if (arg->cpu_idx != cpu_index(ci))
1060 return;
1061
1062 arg->count = pcpu->pcpu_evs[arg->is->is_irq].ev_count;
1063 }
1064
1065 uint64_t
1066 interrupt_get_count(const char *intrid, u_int cpu_idx)
1067 {
1068 struct interrupt_get_count_arg arg;
1069 struct intrsource *is;
1070 uint64_t count;
1071
1072 count = 0;
1073
1074 mutex_enter(&cpu_lock);
1075 is = intr_get_source(intrid);
1076 if (is != NULL && is->is_pic != NULL) {
1077 arg.is = is;
1078 arg.count = 0;
1079 arg.cpu_idx = cpu_idx;
1080 percpu_foreach(is->is_pic->pic_percpu, interrupt_get_count_cb, &arg);
1081 count = arg.count;
1082 }
1083 mutex_exit(&cpu_lock);
1084
1085 return count;
1086 }
1087
1088 #ifdef MULTIPROCESSOR
1089 void
1090 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
1091 {
1092 struct intrsource *is;
1093 struct pic_softc *pic;
1094
1095 kcpuset_zero(cpuset);
1096
1097 mutex_enter(&cpu_lock);
1098 is = intr_get_source(intrid);
1099 if (is != NULL) {
1100 pic = is->is_pic;
1101 if (pic && pic->pic_ops->pic_get_affinity)
1102 pic->pic_ops->pic_get_affinity(pic, is->is_irq, cpuset);
1103 }
1104 mutex_exit(&cpu_lock);
1105 }
1106
1107 int
1108 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
1109 kcpuset_t *oldset)
1110 {
1111 struct intrsource *is;
1112 int error;
1113
1114 mutex_enter(&cpu_lock);
1115 is = intr_get_source(intrid);
1116 if (is == NULL) {
1117 error = ENOENT;
1118 } else {
1119 error = interrupt_distribute(is, newset, oldset);
1120 }
1121 mutex_exit(&cpu_lock);
1122
1123 return error;
1124 }
1125
1126 int
1127 interrupt_distribute(void *ih, const kcpuset_t *newset, kcpuset_t *oldset)
1128 {
1129 struct intrsource * const is = ih;
1130 struct pic_softc * const pic = is->is_pic;
1131
1132 if (pic == NULL)
1133 return EOPNOTSUPP;
1134 if (pic->pic_ops->pic_set_affinity == NULL ||
1135 pic->pic_ops->pic_get_affinity == NULL)
1136 return EOPNOTSUPP;
1137
1138 if (!is->is_mpsafe)
1139 return EINVAL;
1140
1141 if (oldset != NULL)
1142 pic->pic_ops->pic_get_affinity(pic, is->is_irq, oldset);
1143
1144 return pic->pic_ops->pic_set_affinity(pic, is->is_irq, newset);
1145 }
1146 #endif
1147