pic.c revision 1.76 1 /* $NetBSD: pic.c,v 1.76 2021/12/21 06:51:16 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #define _INTR_PRIVATE
33 #include "opt_ddb.h"
34 #include "opt_multiprocessor.h"
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.76 2021/12/21 06:51:16 skrll Exp $");
38
39 #include <sys/param.h>
40 #include <sys/atomic.h>
41 #include <sys/cpu.h>
42 #include <sys/evcnt.h>
43 #include <sys/interrupt.h>
44 #include <sys/intr.h>
45 #include <sys/ipi.h>
46 #include <sys/kernel.h>
47 #include <sys/kmem.h>
48 #include <sys/mutex.h>
49 #include <sys/once.h>
50 #include <sys/xcall.h>
51
52 #include <arm/armreg.h>
53 #include <arm/cpufunc.h>
54 #include <arm/locore.h> /* for compat aarch64 */
55
56 #ifdef DDB
57 #include <arm/db_machdep.h>
58 #endif
59
60 #include <arm/pic/picvar.h>
61
62 #if defined(__HAVE_PIC_PENDING_INTRS)
63 /*
64 * This implementation of pending interrupts on a MULTIPROCESSOR system makes
65 * the assumption that a PIC (pic_softc) shall only have all its interrupts
66 * come from the same CPU. In other words, interrupts from a single PIC will
67 * not be distributed among multiple CPUs.
68 */
69 static uint32_t
70 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int);
71 static struct pic_softc *
72 pic_list_find_pic_by_pending_ipl(struct cpu_info *, uint32_t);
73 static void
74 pic_deliver_irqs(struct cpu_info *, struct pic_softc *, int, void *);
75 static void
76 pic_list_deliver_irqs(struct cpu_info *, register_t, int, void *);
77
78 #endif /* __HAVE_PIC_PENDING_INTRS */
79
80 struct pic_softc *pic_list[PIC_MAXPICS];
81 #if PIC_MAXPICS > 32
82 #error PIC_MAXPICS > 32 not supported
83 #endif
84 struct intrsource *pic_sources[PIC_MAXMAXSOURCES];
85 struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES];
86 struct intrsource **pic_iplsource[NIPL] = {
87 [0 ... NIPL - 1] = pic__iplsources,
88 };
89 size_t pic_ipl_offset[NIPL + 1];
90
91 static kmutex_t pic_lock;
92 static size_t pic_sourcebase;
93 static int pic_lastbase;
94 static struct evcnt pic_deferral_ev =
95 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr");
96 EVCNT_ATTACH_STATIC(pic_deferral_ev);
97
98 static int pic_init(void);
99
100 #ifdef __HAVE_PIC_SET_PRIORITY
101 void
102 pic_set_priority(struct cpu_info *ci, int newipl)
103 {
104 register_t psw = DISABLE_INTERRUPT_SAVE();
105 if (pic_list[0] != NULL)
106 (pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl);
107 ci->ci_cpl = newipl;
108 if ((psw & I32_bit) == 0) {
109 ENABLE_INTERRUPT();
110 }
111 }
112 #endif
113
114 #ifdef MULTIPROCESSOR
115 int
116 pic_ipi_ast(void *arg)
117 {
118 setsoftast(curcpu());
119 return 1;
120 }
121
122 int
123 pic_ipi_nop(void *arg)
124 {
125 /* do nothing */
126 return 1;
127 }
128
129 int
130 pic_ipi_xcall(void *arg)
131 {
132 xc_ipi_handler();
133 return 1;
134 }
135
136 int
137 pic_ipi_generic(void *arg)
138 {
139 ipi_cpu_handler();
140 return 1;
141 }
142
143 #ifdef DDB
144 int
145 pic_ipi_ddb(void *arg)
146 {
147 // printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg);
148 kdb_trap(-1, arg);
149 return 1;
150 }
151 #endif /* DDB */
152
153 #ifdef __HAVE_PREEMPTION
154 int
155 pic_ipi_kpreempt(void *arg)
156 {
157 atomic_or_uint(&curcpu()->ci_astpending, __BIT(1));
158 return 1;
159 }
160 #endif /* __HAVE_PREEMPTION */
161
162 void
163 intr_cpu_init(struct cpu_info *ci)
164 {
165 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
166 struct pic_softc * const pic = pic_list[slot];
167 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) {
168 (*pic->pic_ops->pic_cpu_init)(pic, ci);
169 }
170 }
171 }
172
173 typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long);
174
175 void
176 intr_ipi_send(const kcpuset_t *kcp, u_long ipi)
177 {
178 struct cpu_info * const ci = curcpu();
179 KASSERT(ipi < NIPI);
180 KASSERT(kcp == NULL || kcpuset_countset(kcp) == 1);
181 bool __diagused sent_p = false;
182 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
183 struct pic_softc * const pic = pic_list[slot];
184 if (pic == NULL || pic->pic_cpus == NULL)
185 continue;
186 if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) {
187 /*
188 * Never send to ourself.
189 *
190 * This test uses pointer comparison for systems
191 * that have a pic per cpu, e.g. RPI[23]. GIC sets
192 * pic_cpus to kcpuset_running and handles "not for
193 * self" internally.
194 */
195 if (pic->pic_cpus == ci->ci_kcpuset)
196 continue;
197
198 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi);
199
200 /*
201 * If we were targeting a single CPU or this pic
202 * handles all cpus, we're done.
203 */
204 if (kcp != NULL || pic->pic_cpus == kcpuset_running)
205 return;
206 sent_p = true;
207 }
208 }
209 KASSERTMSG(cold || sent_p || ncpu <= 1, "cold %d sent_p %d ncpu %d",
210 cold, sent_p, ncpu);
211 }
212 #endif /* MULTIPROCESSOR */
213
214 #ifdef __HAVE_PIC_FAST_SOFTINTS
215 int
216 pic_handle_softint(void *arg)
217 {
218 void softint_switch(lwp_t *, int);
219 struct cpu_info * const ci = curcpu();
220 const size_t softint = (size_t) arg;
221 int s = splhigh();
222 ci->ci_intr_depth--; // don't count these as interrupts
223 softint_switch(ci->ci_softlwps[softint], s);
224 ci->ci_intr_depth++;
225 splx(s);
226 return 1;
227 }
228 #endif
229
230 int
231 pic_handle_intr(void *arg)
232 {
233 struct pic_softc * const pic = arg;
234 int rv;
235
236 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic);
237
238 return rv > 0;
239 }
240
241 #if defined(__HAVE_PIC_PENDING_INTRS)
242 void
243 pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is)
244 {
245 const uint32_t ipl_mask = __BIT(is->is_ipl);
246 struct cpu_info * const ci = curcpu();
247
248 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5],
249 __BIT(is->is_irq & 0x1f));
250
251 atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
252 ci->ci_pending_ipls |= ipl_mask;
253 ci->ci_pending_pics |= __BIT(pic->pic_id);
254 }
255
256 void
257 pic_mark_pending(struct pic_softc *pic, int irq)
258 {
259 struct intrsource * const is = pic->pic_sources[irq];
260
261 KASSERT(irq < pic->pic_maxsources);
262 KASSERT(is != NULL);
263
264 pic_mark_pending_source(pic, is);
265 }
266
267 uint32_t
268 pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base,
269 uint32_t pending)
270 {
271 struct intrsource ** const isbase = &pic->pic_sources[irq_base];
272 struct cpu_info * const ci = curcpu();
273 struct intrsource *is;
274 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5];
275 uint32_t ipl_mask = 0;
276
277 if (pending == 0)
278 return ipl_mask;
279
280 KASSERT((irq_base & 31) == 0);
281
282 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending);
283
284 atomic_or_32(ipending, pending);
285 while (pending != 0) {
286 int n = ffs(pending);
287 if (n-- == 0)
288 break;
289 is = isbase[n];
290 KASSERT(is != NULL);
291 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32);
292 pending &= ~__BIT(n);
293 ipl_mask |= __BIT(is->is_ipl);
294 }
295
296 atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
297 ci->ci_pending_ipls |= ipl_mask;
298 ci->ci_pending_pics |= __BIT(pic->pic_id);
299
300 return ipl_mask;
301 }
302
303 uint32_t
304 pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base,
305 uint32_t pending, int ipl)
306 {
307 uint32_t ipl_irq_mask = 0;
308 uint32_t irq_mask;
309
310 for (;;) {
311 int irq = ffs(pending);
312 if (irq-- == 0)
313 return ipl_irq_mask;
314
315 irq_mask = __BIT(irq);
316 #if 1
317 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL,
318 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq);
319 #else
320 if (pic->pic_sources[irq_base + irq] == NULL) {
321 aprint_error("stray interrupt? irq_base=%zu irq=%d\n",
322 irq_base, irq);
323 } else
324 #endif
325 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl)
326 ipl_irq_mask |= irq_mask;
327
328 pending &= ~irq_mask;
329 }
330 }
331 #endif /* __HAVE_PIC_PENDING_INTRS */
332
333 void
334 pic_dispatch(struct intrsource *is, void *frame)
335 {
336 int (*func)(void *) = is->is_func;
337 void *arg = is->is_arg;
338
339 if (__predict_false(arg == NULL)) {
340 if (__predict_false(frame == NULL)) {
341 pic_deferral_ev.ev_count++;
342 return;
343 }
344 arg = frame;
345 }
346
347 #ifdef MULTIPROCESSOR
348 if (!is->is_mpsafe) {
349 KERNEL_LOCK(1, NULL);
350 const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count;
351 const u_int l_blcnt __diagused = curlwp->l_blcnt;
352 (void)(*func)(arg);
353 KASSERT(ci_blcnt == curcpu()->ci_biglock_count);
354 KASSERT(l_blcnt == curlwp->l_blcnt);
355 KERNEL_UNLOCK_ONE(NULL);
356 } else
357 #endif
358 (void)(*func)(arg);
359
360 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu);
361 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
362 pcpu->pcpu_evs[is->is_irq].ev_count++;
363 percpu_putref(is->is_pic->pic_percpu);
364 }
365
366 #if defined(__HAVE_PIC_PENDING_INTRS)
367 void
368 pic_deliver_irqs(struct cpu_info *ci, struct pic_softc *pic, int ipl,
369 void *frame)
370 {
371 const uint32_t ipl_mask = __BIT(ipl);
372 struct intrsource *is;
373 volatile uint32_t *ipending = pic->pic_pending_irqs;
374 volatile uint32_t *iblocked = pic->pic_blocked_irqs;
375 size_t irq_base;
376 #if PIC_MAXSOURCES > 32
377 size_t irq_count;
378 int poi = 0; /* Possibility of interrupting */
379 #endif
380 uint32_t pending_irqs;
381 uint32_t blocked_irqs;
382 int irq;
383 bool progress __diagused = false;
384
385 KASSERT(pic->pic_pending_ipls & ipl_mask);
386
387 irq_base = 0;
388 #if PIC_MAXSOURCES > 32
389 irq_count = 0;
390 #endif
391
392 for (;;) {
393 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base,
394 *ipending, ipl);
395 KASSERT((pending_irqs & *ipending) == pending_irqs);
396 KASSERT((pending_irqs & ~(*ipending)) == 0);
397 if (pending_irqs == 0) {
398 #if PIC_MAXSOURCES > 32
399 irq_count += 32;
400 if (__predict_true(irq_count >= pic->pic_maxsources)) {
401 if (!poi)
402 /*Interrupt at this level was handled.*/
403 break;
404 irq_base = 0;
405 irq_count = 0;
406 poi = 0;
407 ipending = pic->pic_pending_irqs;
408 iblocked = pic->pic_blocked_irqs;
409 } else {
410 irq_base += 32;
411 ipending++;
412 iblocked++;
413 KASSERT(irq_base <= pic->pic_maxsources);
414 }
415 continue;
416 #else
417 break;
418 #endif
419 }
420 progress = true;
421 blocked_irqs = 0;
422 do {
423 irq = ffs(pending_irqs) - 1;
424 KASSERT(irq >= 0);
425
426 atomic_and_32(ipending, ~__BIT(irq));
427 is = pic->pic_sources[irq_base + irq];
428 if (is != NULL) {
429 ENABLE_INTERRUPT();
430 pic_dispatch(is, frame);
431 DISABLE_INTERRUPT();
432 #if PIC_MAXSOURCES > 32
433 /*
434 * There is a possibility of interrupting
435 * from ENABLE_INTERRUPT() to
436 * DISABLE_INTERRUPT().
437 */
438 poi = 1;
439 #endif
440 blocked_irqs |= __BIT(irq);
441 } else {
442 KASSERT(0);
443 }
444 pending_irqs = pic_find_pending_irqs_by_ipl(pic,
445 irq_base, *ipending, ipl);
446 } while (pending_irqs);
447 if (blocked_irqs) {
448 atomic_or_32(iblocked, blocked_irqs);
449 ci->ci_blocked_pics |= __BIT(pic->pic_id);
450 }
451 }
452
453 KASSERT(progress);
454 /*
455 * Since interrupts are disabled, we don't have to be too careful
456 * about these.
457 */
458 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0)
459 ci->ci_pending_pics &= ~__BIT(pic->pic_id);
460 }
461
462 static void
463 pic_list_unblock_irqs(struct cpu_info *ci)
464 {
465 uint32_t blocked_pics = ci->ci_blocked_pics;
466
467 ci->ci_blocked_pics = 0;
468
469 for (;;) {
470 struct pic_softc *pic;
471 #if PIC_MAXSOURCES > 32
472 volatile uint32_t *iblocked;
473 uint32_t blocked;
474 size_t irq_base;
475 #endif
476
477 int pic_id = ffs(blocked_pics);
478 if (pic_id-- == 0)
479 return;
480
481 pic = pic_list[pic_id];
482 KASSERT(pic != NULL);
483 #if PIC_MAXSOURCES > 32
484 for (irq_base = 0, iblocked = pic->pic_blocked_irqs;
485 irq_base < pic->pic_maxsources;
486 irq_base += 32, iblocked++) {
487 if ((blocked = *iblocked) != 0) {
488 (*pic->pic_ops->pic_unblock_irqs)(pic,
489 irq_base, blocked);
490 atomic_and_32(iblocked, ~blocked);
491 }
492 }
493 #else
494 KASSERT(pic->pic_blocked_irqs[0] != 0);
495 (*pic->pic_ops->pic_unblock_irqs)(pic,
496 0, pic->pic_blocked_irqs[0]);
497 pic->pic_blocked_irqs[0] = 0;
498 #endif
499 blocked_pics &= ~__BIT(pic_id);
500 }
501 }
502
503 struct pic_softc *
504 pic_list_find_pic_by_pending_ipl(struct cpu_info *ci, uint32_t ipl_mask)
505 {
506 uint32_t pending_pics = ci->ci_pending_pics;
507 struct pic_softc *pic;
508
509 for (;;) {
510 int pic_id = ffs(pending_pics);
511 if (pic_id-- == 0)
512 return NULL;
513
514 pic = pic_list[pic_id];
515 KASSERT(pic != NULL);
516 if (pic->pic_pending_ipls & ipl_mask)
517 return pic;
518 pending_pics &= ~__BIT(pic_id);
519 }
520 }
521
522 void
523 pic_list_deliver_irqs(struct cpu_info *ci, register_t psw, int ipl,
524 void *frame)
525 {
526 const uint32_t ipl_mask = __BIT(ipl);
527 struct pic_softc *pic;
528
529 while ((pic = pic_list_find_pic_by_pending_ipl(ci, ipl_mask)) != NULL) {
530 pic_deliver_irqs(ci, pic, ipl, frame);
531 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0);
532 }
533 ci->ci_pending_ipls &= ~ipl_mask;
534 }
535 #endif /* __HAVE_PIC_PENDING_INTRS */
536
537 void
538 pic_do_pending_ints(register_t psw, int newipl, void *frame)
539 {
540 struct cpu_info * const ci = curcpu();
541 if (__predict_false(newipl == IPL_HIGH)) {
542 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl);
543 return;
544 }
545 #if defined(__HAVE_PIC_PENDING_INTRS)
546 while ((ci->ci_pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
547 KASSERT(ci->ci_pending_ipls < __BIT(NIPL));
548 for (;;) {
549 int ipl = 31 - __builtin_clz(ci->ci_pending_ipls);
550 KASSERT(ipl < NIPL);
551 if (ipl <= newipl)
552 break;
553
554 pic_set_priority(ci, ipl);
555 pic_list_deliver_irqs(ci, psw, ipl, frame);
556 pic_list_unblock_irqs(ci);
557 }
558 }
559 #endif /* __HAVE_PIC_PENDING_INTRS */
560 #ifdef __HAVE_PREEMPTION
561 if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) {
562 pic_set_priority(ci, IPL_SCHED);
563 kpreempt(0);
564 }
565 #endif
566 if (ci->ci_cpl != newipl)
567 pic_set_priority(ci, newipl);
568 }
569
570 static void
571 pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci)
572 {
573 struct pic_percpu * const pcpu = v0;
574 struct pic_softc * const pic = v1;
575
576 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]),
577 KM_SLEEP);
578 KASSERT(pcpu->pcpu_evs != NULL);
579
580 #define PCPU_NAMELEN 32
581 #ifdef DIAGNOSTIC
582 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name);
583 #endif
584
585 KASSERT(namelen < PCPU_NAMELEN);
586 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP);
587 #ifdef MULTIPROCESSOR
588 snprintf(pcpu->pcpu_name, PCPU_NAMELEN,
589 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name);
590 #else
591 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN);
592 #endif
593 pcpu->pcpu_magic = PICPERCPU_MAGIC;
594 #if 0
595 printf("%s: %s %s: <%s>\n",
596 __func__, ci->ci_data.cpu_name, pic->pic_name,
597 pcpu->pcpu_name);
598 #endif
599 }
600
601 static int
602 pic_init(void)
603 {
604
605 mutex_init(&pic_lock, MUTEX_DEFAULT, IPL_HIGH);
606
607 return 0;
608 }
609
610 int
611 pic_add(struct pic_softc *pic, int irqbase)
612 {
613 int slot, maybe_slot = -1;
614 size_t sourcebase;
615 static ONCE_DECL(pic_once);
616
617 ASSERT_SLEEPABLE();
618
619 RUN_ONCE(&pic_once, pic_init);
620
621 KASSERT(strlen(pic->pic_name) > 0);
622
623 mutex_enter(&pic_lock);
624 if (irqbase == PIC_IRQBASE_ALLOC) {
625 irqbase = pic_lastbase;
626 }
627 for (slot = 0; slot < PIC_MAXPICS; slot++) {
628 struct pic_softc * const xpic = pic_list[slot];
629 if (xpic == NULL) {
630 if (maybe_slot < 0)
631 maybe_slot = slot;
632 if (irqbase < 0)
633 break;
634 continue;
635 }
636 if (irqbase < 0 || xpic->pic_irqbase < 0)
637 continue;
638 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources)
639 continue;
640 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase)
641 continue;
642 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts"
643 " with pic %s (%zu sources @ irq %u)",
644 pic->pic_name, pic->pic_maxsources, irqbase,
645 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase);
646 }
647 slot = maybe_slot;
648 #if 0
649 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n",
650 pic->pic_name, pic_sourcebase, pic->pic_maxsources);
651 #endif
652 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu",
653 pic->pic_maxsources);
654 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES);
655 sourcebase = pic_sourcebase;
656 pic_sourcebase += pic->pic_maxsources;
657 if (pic_lastbase < irqbase + pic->pic_maxsources)
658 pic_lastbase = irqbase + pic->pic_maxsources;
659 mutex_exit(&pic_lock);
660
661 /*
662 * Allocate a pointer to each cpu's evcnts and then, for each cpu,
663 * allocate its evcnts and then attach an evcnt for each pin.
664 * We can't allocate the evcnt structures directly since
665 * percpu will move the contents of percpu memory around and
666 * corrupt the pointers in the evcnts themselves. Remember, any
667 * problem can be solved with sufficient indirection.
668 */
669 pic->pic_percpu = percpu_create(sizeof(struct pic_percpu),
670 pic_percpu_allocate, NULL, pic);
671
672 pic->pic_sources = &pic_sources[sourcebase];
673 pic->pic_irqbase = irqbase;
674 pic->pic_id = slot;
675 #ifdef __HAVE_PIC_SET_PRIORITY
676 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL));
677 #endif
678 #ifdef MULTIPROCESSOR
679 KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL));
680 #endif
681 pic_list[slot] = pic;
682
683 return irqbase;
684 }
685
686 int
687 pic_alloc_irq(struct pic_softc *pic)
688 {
689 int irq;
690
691 for (irq = 0; irq < pic->pic_maxsources; irq++) {
692 if (pic->pic_sources[irq] == NULL)
693 return irq;
694 }
695
696 return -1;
697 }
698
699 static void
700 pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci)
701 {
702 struct pic_percpu * const pcpu = v0;
703 struct intrsource * const is = v1;
704
705 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
706 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL,
707 pcpu->pcpu_name, is->is_source);
708 }
709
710 static void
711 pic_unblock_percpu(void *arg1, void *arg2)
712 {
713 struct pic_softc *pic = arg1;
714 struct intrsource *is = arg2;
715
716 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f,
717 __BIT(is->is_irq & 0x1f));
718 }
719
720 void *
721 pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type,
722 int (*func)(void *), void *arg, const char *xname)
723 {
724 struct intrsource *is;
725 int off, nipl;
726
727 if (pic->pic_sources[irq]) {
728 printf("pic_establish_intr: pic %s irq %d already present\n",
729 pic->pic_name, irq);
730 return NULL;
731 }
732
733 is = kmem_zalloc(sizeof(*is), KM_SLEEP);
734 is->is_pic = pic;
735 is->is_irq = irq;
736 is->is_ipl = ipl;
737 is->is_type = type & 0xff;
738 is->is_func = func;
739 is->is_arg = arg;
740 #ifdef MULTIPROCESSOR
741 is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM;
742 #endif
743
744 if (pic->pic_ops->pic_source_name)
745 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source,
746 sizeof(is->is_source));
747 else
748 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq);
749
750 /*
751 * Now attach the per-cpu evcnts.
752 */
753 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is);
754
755 pic->pic_sources[irq] = is;
756
757 /*
758 * First try to use an existing slot which is empty.
759 */
760 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl + 1]; off++) {
761 if (pic__iplsources[off] == NULL) {
762 is->is_iplidx = off - pic_ipl_offset[ipl];
763 pic__iplsources[off] = is;
764 goto unblock;
765 }
766 }
767
768 /*
769 * Move up all the sources by one.
770 */
771 if (ipl < NIPL) {
772 off = pic_ipl_offset[ipl + 1];
773 memmove(&pic__iplsources[off + 1], &pic__iplsources[off],
774 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off));
775 }
776
777 /*
778 * Advance the offset of all IPLs higher than this. Include an
779 * extra one as well. Thus the number of sources per ipl is
780 * pic_ipl_offset[ipl + 1] - pic_ipl_offset[ipl].
781 */
782 for (nipl = ipl + 1; nipl <= NIPL; nipl++)
783 pic_ipl_offset[nipl]++;
784
785 /*
786 * Insert into the previously made position at the end of this IPL's
787 * sources.
788 */
789 off = pic_ipl_offset[ipl + 1] - 1;
790 is->is_iplidx = off - pic_ipl_offset[ipl];
791 pic__iplsources[off] = is;
792
793 (*pic->pic_ops->pic_establish_irq)(pic, is);
794
795 unblock:
796 if (!mp_online || !is->is_mpsafe || !is->is_percpu) {
797 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f,
798 __BIT(is->is_irq & 0x1f));
799 } else {
800 uint64_t xc = xc_broadcast(0, pic_unblock_percpu, pic, is);
801 xc_wait(xc);
802 }
803
804 if (xname) {
805 if (is->is_xname == NULL)
806 is->is_xname = kmem_zalloc(INTRDEVNAMEBUF, KM_SLEEP);
807 if (is->is_xname[0] != '\0')
808 strlcat(is->is_xname, ", ", INTRDEVNAMEBUF);
809 strlcat(is->is_xname, xname, INTRDEVNAMEBUF);
810 }
811
812 /* We're done. */
813 return is;
814 }
815
816 static void
817 pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci)
818 {
819 struct pic_percpu * const pcpu = v0;
820 struct intrsource * const is = v1;
821
822 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
823 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]);
824 }
825
826 void
827 pic_disestablish_source(struct intrsource *is)
828 {
829 struct pic_softc * const pic = is->is_pic;
830 const int irq = is->is_irq;
831
832 KASSERT(is == pic->pic_sources[irq]);
833
834 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
835 pic->pic_sources[irq] = NULL;
836 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL;
837 if (is->is_xname != NULL) {
838 kmem_free(is->is_xname, INTRDEVNAMEBUF);
839 is->is_xname = NULL;
840 }
841 /*
842 * Now detach the per-cpu evcnts.
843 */
844 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is);
845
846 kmem_free(is, sizeof(*is));
847 }
848
849 void *
850 intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg)
851 {
852 return intr_establish_xname(irq, ipl, type, func, arg, NULL);
853 }
854
855 void *
856 intr_establish_xname(int irq, int ipl, int type, int (*func)(void *), void *arg,
857 const char *xname)
858 {
859 KASSERT(!cpu_intr_p());
860 KASSERT(!cpu_softintr_p());
861
862 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
863 struct pic_softc * const pic = pic_list[slot];
864 if (pic == NULL || pic->pic_irqbase < 0)
865 continue;
866 if (pic->pic_irqbase <= irq
867 && irq < pic->pic_irqbase + pic->pic_maxsources) {
868 return pic_establish_intr(pic, irq - pic->pic_irqbase,
869 ipl, type, func, arg, xname);
870 }
871 }
872
873 return NULL;
874 }
875
876 void
877 intr_disestablish(void *ih)
878 {
879 struct intrsource * const is = ih;
880
881 KASSERT(!cpu_intr_p());
882 KASSERT(!cpu_softintr_p());
883
884 pic_disestablish_source(is);
885 }
886
887 void
888 intr_mask(void *ih)
889 {
890 struct intrsource * const is = ih;
891 struct pic_softc * const pic = is->is_pic;
892 const int irq = is->is_irq;
893
894 if (atomic_inc_32_nv(&is->is_mask_count) == 1)
895 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
896 }
897
898 void
899 intr_unmask(void *ih)
900 {
901 struct intrsource * const is = ih;
902 struct pic_softc * const pic = is->is_pic;
903 const int irq = is->is_irq;
904
905 if (atomic_dec_32_nv(&is->is_mask_count) == 0)
906 (*pic->pic_ops->pic_unblock_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
907 }
908
909 const char *
910 intr_string(intr_handle_t irq, char *buf, size_t len)
911 {
912 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
913 struct pic_softc * const pic = pic_list[slot];
914 if (pic == NULL || pic->pic_irqbase < 0)
915 continue;
916 if (pic->pic_irqbase <= irq
917 && irq < pic->pic_irqbase + pic->pic_maxsources) {
918 struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase];
919 snprintf(buf, len, "%s %s", pic->pic_name, is->is_source);
920 return buf;
921 }
922 }
923
924 return NULL;
925 }
926
927 static struct intrsource *
928 intr_get_source(const char *intrid)
929 {
930 struct intrsource *is;
931 intrid_t buf;
932 size_t slot;
933 int irq;
934
935 KASSERT(mutex_owned(&cpu_lock));
936
937 for (slot = 0; slot < PIC_MAXPICS; slot++) {
938 struct pic_softc * const pic = pic_list[slot];
939 if (pic == NULL || pic->pic_irqbase < 0)
940 continue;
941 for (irq = 0; irq < pic->pic_maxsources; irq++) {
942 is = pic->pic_sources[irq];
943 if (is == NULL || is->is_source[0] == '\0')
944 continue;
945
946 snprintf(buf, sizeof(buf), "%s %s", pic->pic_name, is->is_source);
947 if (strcmp(buf, intrid) == 0)
948 return is;
949 }
950 }
951
952 return NULL;
953 }
954
955 struct intrids_handler *
956 interrupt_construct_intrids(const kcpuset_t *cpuset)
957 {
958 struct intrids_handler *iih;
959 struct intrsource *is;
960 int count, irq, n;
961 size_t slot;
962
963 if (kcpuset_iszero(cpuset))
964 return NULL;
965
966 count = 0;
967 for (slot = 0; slot < PIC_MAXPICS; slot++) {
968 struct pic_softc * const pic = pic_list[slot];
969 if (pic != NULL && pic->pic_irqbase >= 0) {
970 for (irq = 0; irq < pic->pic_maxsources; irq++) {
971 is = pic->pic_sources[irq];
972 if (is && is->is_source[0] != '\0')
973 count++;
974 }
975 }
976 }
977
978 iih = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, KM_SLEEP);
979 iih->iih_nids = count;
980
981 for (n = 0, slot = 0; n < count && slot < PIC_MAXPICS; slot++) {
982 struct pic_softc * const pic = pic_list[slot];
983 if (pic == NULL || pic->pic_irqbase < 0)
984 continue;
985 for (irq = 0; irq < pic->pic_maxsources; irq++) {
986 is = pic->pic_sources[irq];
987 if (is == NULL || is->is_source[0] == '\0')
988 continue;
989
990 snprintf(iih->iih_intrids[n++], sizeof(intrid_t), "%s %s",
991 pic->pic_name, is->is_source);
992 }
993 }
994
995 return iih;
996 }
997
998 void
999 interrupt_destruct_intrids(struct intrids_handler *iih)
1000 {
1001 if (iih == NULL)
1002 return;
1003
1004 kmem_free(iih, sizeof(int) + sizeof(intrid_t) * iih->iih_nids);
1005 }
1006
1007 void
1008 interrupt_get_available(kcpuset_t *cpuset)
1009 {
1010 CPU_INFO_ITERATOR cii;
1011 struct cpu_info *ci;
1012
1013 kcpuset_zero(cpuset);
1014
1015 mutex_enter(&cpu_lock);
1016 for (CPU_INFO_FOREACH(cii, ci)) {
1017 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
1018 kcpuset_set(cpuset, cpu_index(ci));
1019 }
1020 mutex_exit(&cpu_lock);
1021 }
1022
1023 void
1024 interrupt_get_devname(const char *intrid, char *buf, size_t len)
1025 {
1026 struct intrsource *is;
1027
1028 mutex_enter(&cpu_lock);
1029 is = intr_get_source(intrid);
1030 if (is == NULL || is->is_xname == NULL)
1031 buf[0] = '\0';
1032 else
1033 strlcpy(buf, is->is_xname, len);
1034 mutex_exit(&cpu_lock);
1035 }
1036
1037 struct interrupt_get_count_arg {
1038 struct intrsource *is;
1039 uint64_t count;
1040 u_int cpu_idx;
1041 };
1042
1043 static void
1044 interrupt_get_count_cb(void *v0, void *v1, struct cpu_info *ci)
1045 {
1046 struct pic_percpu * const pcpu = v0;
1047 struct interrupt_get_count_arg * const arg = v1;
1048
1049 if (arg->cpu_idx != cpu_index(ci))
1050 return;
1051
1052 arg->count = pcpu->pcpu_evs[arg->is->is_irq].ev_count;
1053 }
1054
1055 uint64_t
1056 interrupt_get_count(const char *intrid, u_int cpu_idx)
1057 {
1058 struct interrupt_get_count_arg arg;
1059 struct intrsource *is;
1060 uint64_t count;
1061
1062 count = 0;
1063
1064 mutex_enter(&cpu_lock);
1065 is = intr_get_source(intrid);
1066 if (is != NULL && is->is_pic != NULL) {
1067 arg.is = is;
1068 arg.count = 0;
1069 arg.cpu_idx = cpu_idx;
1070 percpu_foreach(is->is_pic->pic_percpu, interrupt_get_count_cb, &arg);
1071 count = arg.count;
1072 }
1073 mutex_exit(&cpu_lock);
1074
1075 return count;
1076 }
1077
1078 #ifdef MULTIPROCESSOR
1079 void
1080 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
1081 {
1082 struct intrsource *is;
1083 struct pic_softc *pic;
1084
1085 kcpuset_zero(cpuset);
1086
1087 mutex_enter(&cpu_lock);
1088 is = intr_get_source(intrid);
1089 if (is != NULL) {
1090 pic = is->is_pic;
1091 if (pic && pic->pic_ops->pic_get_affinity)
1092 pic->pic_ops->pic_get_affinity(pic, is->is_irq, cpuset);
1093 }
1094 mutex_exit(&cpu_lock);
1095 }
1096
1097 int
1098 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
1099 kcpuset_t *oldset)
1100 {
1101 struct intrsource *is;
1102 int error;
1103
1104 mutex_enter(&cpu_lock);
1105 is = intr_get_source(intrid);
1106 if (is == NULL) {
1107 error = ENOENT;
1108 } else {
1109 error = interrupt_distribute(is, newset, oldset);
1110 }
1111 mutex_exit(&cpu_lock);
1112
1113 return error;
1114 }
1115
1116 int
1117 interrupt_distribute(void *ih, const kcpuset_t *newset, kcpuset_t *oldset)
1118 {
1119 struct intrsource * const is = ih;
1120 struct pic_softc * const pic = is->is_pic;
1121
1122 if (pic == NULL)
1123 return EOPNOTSUPP;
1124 if (pic->pic_ops->pic_set_affinity == NULL ||
1125 pic->pic_ops->pic_get_affinity == NULL)
1126 return EOPNOTSUPP;
1127
1128 if (!is->is_mpsafe)
1129 return EINVAL;
1130
1131 if (oldset != NULL)
1132 pic->pic_ops->pic_get_affinity(pic, is->is_irq, oldset);
1133
1134 return pic->pic_ops->pic_set_affinity(pic, is->is_irq, newset);
1135 }
1136 #endif
1137