pic.c revision 1.83 1 /* $NetBSD: pic.c,v 1.83 2022/07/28 10:26:26 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #define _INTR_PRIVATE
33 #include "opt_ddb.h"
34 #include "opt_multiprocessor.h"
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.83 2022/07/28 10:26:26 riastradh Exp $");
38
39 #include <sys/param.h>
40 #include <sys/atomic.h>
41 #include <sys/cpu.h>
42 #include <sys/evcnt.h>
43 #include <sys/interrupt.h>
44 #include <sys/intr.h>
45 #include <sys/ipi.h>
46 #include <sys/kernel.h>
47 #include <sys/kmem.h>
48 #include <sys/mutex.h>
49 #include <sys/once.h>
50 #include <sys/xcall.h>
51
52 #include <arm/armreg.h>
53 #include <arm/cpufunc.h>
54 #include <arm/locore.h> /* for compat aarch64 */
55
56 #ifdef DDB
57 #include <arm/db_machdep.h>
58 #endif
59
60 #include <arm/pic/picvar.h>
61
62 #if defined(__HAVE_PIC_PENDING_INTRS)
63 /*
64 * This implementation of pending interrupts on a MULTIPROCESSOR system makes
65 * the assumption that a PIC (pic_softc) shall only have all its interrupts
66 * come from the same CPU. In other words, interrupts from a single PIC will
67 * not be distributed among multiple CPUs.
68 */
69 static uint32_t
70 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int);
71 static struct pic_softc *
72 pic_list_find_pic_by_pending_ipl(struct cpu_info *, uint32_t);
73 static void
74 pic_deliver_irqs(struct cpu_info *, struct pic_softc *, int, void *);
75 static void
76 pic_list_deliver_irqs(struct cpu_info *, register_t, int, void *);
77
78 #endif /* __HAVE_PIC_PENDING_INTRS */
79
80 struct pic_softc *pic_list[PIC_MAXPICS];
81 #if PIC_MAXPICS > 32
82 #error PIC_MAXPICS > 32 not supported
83 #endif
84 struct intrsource *pic_sources[PIC_MAXMAXSOURCES];
85 struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES];
86 size_t pic_ipl_offset[NIPL + 1];
87
88 static kmutex_t pic_lock;
89 static size_t pic_sourcebase;
90 static int pic_lastbase;
91 static struct evcnt pic_deferral_ev =
92 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr");
93 EVCNT_ATTACH_STATIC(pic_deferral_ev);
94
95 static int pic_init(void);
96
97 #ifdef __HAVE_PIC_SET_PRIORITY
98 void
99 pic_set_priority(struct cpu_info *ci, int newipl)
100 {
101 if (__predict_false(pic_list[0] == NULL)) {
102 ci->ci_cpl = newipl;
103 return;
104 }
105
106 pic_list[0]->pic_ops->pic_set_priority(pic_list[0], newipl);
107 }
108 #endif
109
110 #ifdef MULTIPROCESSOR
111 int
112 pic_ipi_ast(void *arg)
113 {
114 setsoftast(curcpu());
115 return 1;
116 }
117
118 int
119 pic_ipi_nop(void *arg)
120 {
121 /* do nothing */
122 return 1;
123 }
124
125 int
126 pic_ipi_xcall(void *arg)
127 {
128 xc_ipi_handler();
129 return 1;
130 }
131
132 int
133 pic_ipi_generic(void *arg)
134 {
135 ipi_cpu_handler();
136 return 1;
137 }
138
139 #ifdef DDB
140 int
141 pic_ipi_ddb(void *arg)
142 {
143 // printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg);
144 kdb_trap(-1, arg);
145 return 1;
146 }
147 #endif /* DDB */
148
149 #ifdef __HAVE_PREEMPTION
150 int
151 pic_ipi_kpreempt(void *arg)
152 {
153 struct lwp * const l = curlwp;
154
155 l->l_md.md_astpending |= __BIT(1);
156 return 1;
157 }
158 #endif /* __HAVE_PREEMPTION */
159
160 void
161 intr_cpu_init(struct cpu_info *ci)
162 {
163 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
164 struct pic_softc * const pic = pic_list[slot];
165 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) {
166 (*pic->pic_ops->pic_cpu_init)(pic, ci);
167 }
168 }
169 }
170
171 typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long);
172
173 void
174 intr_ipi_send(const kcpuset_t *kcp, u_long ipi)
175 {
176 struct cpu_info * const ci = curcpu();
177 KASSERT(ipi < NIPI);
178 KASSERT(kcp == NULL || kcpuset_countset(kcp) == 1);
179 bool __diagused sent_p = false;
180 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
181 struct pic_softc * const pic = pic_list[slot];
182 if (pic == NULL || pic->pic_cpus == NULL)
183 continue;
184 if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) {
185 /*
186 * Never send to ourself.
187 *
188 * This test uses pointer comparison for systems
189 * that have a pic per cpu, e.g. RPI[23]. GIC sets
190 * pic_cpus to kcpuset_running and handles "not for
191 * self" internally.
192 */
193 if (pic->pic_cpus == ci->ci_kcpuset)
194 continue;
195
196 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi);
197
198 /*
199 * If we were targeting a single CPU or this pic
200 * handles all cpus, we're done.
201 */
202 if (kcp != NULL || pic->pic_cpus == kcpuset_running)
203 return;
204 sent_p = true;
205 }
206 }
207 KASSERTMSG(cold || sent_p || ncpu <= 1, "cold %d sent_p %d ncpu %d",
208 cold, sent_p, ncpu);
209 }
210 #endif /* MULTIPROCESSOR */
211
212 #ifdef __HAVE_PIC_FAST_SOFTINTS
213 int
214 pic_handle_softint(void *arg)
215 {
216 void softint_switch(lwp_t *, int);
217 struct cpu_info * const ci = curcpu();
218 const size_t softint = (size_t) arg;
219 int s = splhigh();
220 ci->ci_intr_depth--; // don't count these as interrupts
221 softint_switch(ci->ci_softlwps[softint], s);
222 ci->ci_intr_depth++;
223 splx(s);
224 return 1;
225 }
226 #endif
227
228 int
229 pic_handle_intr(void *arg)
230 {
231 struct pic_softc * const pic = arg;
232 int rv;
233
234 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic);
235
236 return rv > 0;
237 }
238
239 #if defined(__HAVE_PIC_PENDING_INTRS)
240 void
241 pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is)
242 {
243 const uint32_t ipl_mask = __BIT(is->is_ipl);
244 struct cpu_info * const ci = curcpu();
245
246 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5],
247 __BIT(is->is_irq & 0x1f));
248
249 atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
250 ci->ci_pending_ipls |= ipl_mask;
251 ci->ci_pending_pics |= __BIT(pic->pic_id);
252 }
253
254 void
255 pic_mark_pending(struct pic_softc *pic, int irq)
256 {
257 struct intrsource * const is = pic->pic_sources[irq];
258
259 KASSERT(irq < pic->pic_maxsources);
260 KASSERT(is != NULL);
261
262 pic_mark_pending_source(pic, is);
263 }
264
265 uint32_t
266 pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base,
267 uint32_t pending)
268 {
269 struct intrsource ** const isbase = &pic->pic_sources[irq_base];
270 struct cpu_info * const ci = curcpu();
271 struct intrsource *is;
272 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5];
273 uint32_t ipl_mask = 0;
274
275 if (pending == 0)
276 return ipl_mask;
277
278 KASSERT((irq_base & 31) == 0);
279
280 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending);
281
282 atomic_or_32(ipending, pending);
283 while (pending != 0) {
284 int n = ffs(pending);
285 if (n-- == 0)
286 break;
287 is = isbase[n];
288 KASSERT(is != NULL);
289 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32);
290 pending &= ~__BIT(n);
291 ipl_mask |= __BIT(is->is_ipl);
292 }
293
294 atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
295 ci->ci_pending_ipls |= ipl_mask;
296 ci->ci_pending_pics |= __BIT(pic->pic_id);
297
298 return ipl_mask;
299 }
300
301 static uint32_t
302 pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base,
303 uint32_t pending, int ipl)
304 {
305 uint32_t ipl_irq_mask = 0;
306 uint32_t irq_mask;
307
308 for (;;) {
309 int irq = ffs(pending);
310 if (irq-- == 0)
311 return ipl_irq_mask;
312
313 irq_mask = __BIT(irq);
314 #if 1
315 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL,
316 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq);
317 #else
318 if (pic->pic_sources[irq_base + irq] == NULL) {
319 aprint_error("stray interrupt? irq_base=%zu irq=%d\n",
320 irq_base, irq);
321 } else
322 #endif
323 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl)
324 ipl_irq_mask |= irq_mask;
325
326 pending &= ~irq_mask;
327 }
328 }
329 #endif /* __HAVE_PIC_PENDING_INTRS */
330
331 void
332 pic_dispatch(struct intrsource *is, void *frame)
333 {
334 int (*func)(void *) = is->is_func;
335 void *arg = is->is_arg;
336 int ocpl, ncpl;
337
338 if (__predict_false(arg == NULL)) {
339 if (__predict_false(frame == NULL)) {
340 pic_deferral_ev.ev_count++;
341 return;
342 }
343 arg = frame;
344 }
345
346 ocpl = curcpu()->ci_cpl;
347 #ifdef MULTIPROCESSOR
348 if (!is->is_mpsafe) {
349 KERNEL_LOCK(1, NULL);
350 const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count;
351 const u_int l_blcnt __diagused = curlwp->l_blcnt;
352 (void)(*func)(arg);
353 KASSERT(ci_blcnt == curcpu()->ci_biglock_count);
354 KASSERT(l_blcnt == curlwp->l_blcnt);
355 KERNEL_UNLOCK_ONE(NULL);
356 } else
357 #endif
358 (void)(*func)(arg);
359 ncpl = curcpu()->ci_cpl;
360 KASSERTMSG(ocpl <= ncpl, "pic %s irq %u intrsource %s:"
361 " cpl slipped %d -> %d",
362 is->is_pic->pic_name, is->is_irq, is->is_source,
363 ocpl, ncpl);
364
365 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu);
366 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
367 pcpu->pcpu_evs[is->is_irq].ev_count++;
368 percpu_putref(is->is_pic->pic_percpu);
369 }
370
371 #if defined(__HAVE_PIC_PENDING_INTRS)
372 static void
373 pic_deliver_irqs(struct cpu_info *ci, struct pic_softc *pic, int ipl,
374 void *frame)
375 {
376 const uint32_t ipl_mask = __BIT(ipl);
377 struct intrsource *is;
378 volatile uint32_t *ipending = pic->pic_pending_irqs;
379 volatile uint32_t *iblocked = pic->pic_blocked_irqs;
380 size_t irq_base;
381 #if PIC_MAXSOURCES > 32
382 size_t irq_count;
383 int poi = 0; /* Possibility of interrupting */
384 #endif
385 uint32_t pending_irqs;
386 uint32_t blocked_irqs;
387 int irq;
388 bool progress __diagused = false;
389
390 KASSERT(pic->pic_pending_ipls & ipl_mask);
391
392 irq_base = 0;
393 #if PIC_MAXSOURCES > 32
394 irq_count = 0;
395 #endif
396
397 for (;;) {
398 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base,
399 *ipending, ipl);
400 KASSERT((pending_irqs & *ipending) == pending_irqs);
401 KASSERT((pending_irqs & ~(*ipending)) == 0);
402 if (pending_irqs == 0) {
403 #if PIC_MAXSOURCES > 32
404 irq_count += 32;
405 if (__predict_true(irq_count >= pic->pic_maxsources)) {
406 if (!poi)
407 /*Interrupt at this level was handled.*/
408 break;
409 irq_base = 0;
410 irq_count = 0;
411 poi = 0;
412 ipending = pic->pic_pending_irqs;
413 iblocked = pic->pic_blocked_irqs;
414 } else {
415 irq_base += 32;
416 ipending++;
417 iblocked++;
418 KASSERT(irq_base <= pic->pic_maxsources);
419 }
420 continue;
421 #else
422 break;
423 #endif
424 }
425 progress = true;
426 blocked_irqs = 0;
427 do {
428 irq = ffs(pending_irqs) - 1;
429 KASSERT(irq >= 0);
430
431 atomic_and_32(ipending, ~__BIT(irq));
432 is = pic->pic_sources[irq_base + irq];
433 if (is != NULL) {
434 ENABLE_INTERRUPT();
435 pic_dispatch(is, frame);
436 DISABLE_INTERRUPT();
437 #if PIC_MAXSOURCES > 32
438 /*
439 * There is a possibility of interrupting
440 * from ENABLE_INTERRUPT() to
441 * DISABLE_INTERRUPT().
442 */
443 poi = 1;
444 #endif
445 blocked_irqs |= __BIT(irq);
446 } else {
447 KASSERT(0);
448 }
449 pending_irqs = pic_find_pending_irqs_by_ipl(pic,
450 irq_base, *ipending, ipl);
451 } while (pending_irqs);
452 if (blocked_irqs) {
453 atomic_or_32(iblocked, blocked_irqs);
454 ci->ci_blocked_pics |= __BIT(pic->pic_id);
455 }
456 }
457
458 KASSERT(progress);
459 /*
460 * Since interrupts are disabled, we don't have to be too careful
461 * about these.
462 */
463 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0)
464 ci->ci_pending_pics &= ~__BIT(pic->pic_id);
465 }
466
467 static void
468 pic_list_unblock_irqs(struct cpu_info *ci)
469 {
470 uint32_t blocked_pics = ci->ci_blocked_pics;
471
472 ci->ci_blocked_pics = 0;
473
474 for (;;) {
475 struct pic_softc *pic;
476 #if PIC_MAXSOURCES > 32
477 volatile uint32_t *iblocked;
478 uint32_t blocked;
479 size_t irq_base;
480 #endif
481
482 int pic_id = ffs(blocked_pics);
483 if (pic_id-- == 0)
484 return;
485
486 pic = pic_list[pic_id];
487 KASSERT(pic != NULL);
488 #if PIC_MAXSOURCES > 32
489 for (irq_base = 0, iblocked = pic->pic_blocked_irqs;
490 irq_base < pic->pic_maxsources;
491 irq_base += 32, iblocked++) {
492 if ((blocked = *iblocked) != 0) {
493 (*pic->pic_ops->pic_unblock_irqs)(pic,
494 irq_base, blocked);
495 atomic_and_32(iblocked, ~blocked);
496 }
497 }
498 #else
499 KASSERT(pic->pic_blocked_irqs[0] != 0);
500 (*pic->pic_ops->pic_unblock_irqs)(pic,
501 0, pic->pic_blocked_irqs[0]);
502 pic->pic_blocked_irqs[0] = 0;
503 #endif
504 blocked_pics &= ~__BIT(pic_id);
505 }
506 }
507
508 static struct pic_softc *
509 pic_list_find_pic_by_pending_ipl(struct cpu_info *ci, uint32_t ipl_mask)
510 {
511 uint32_t pending_pics = ci->ci_pending_pics;
512 struct pic_softc *pic;
513
514 for (;;) {
515 int pic_id = ffs(pending_pics);
516 if (pic_id-- == 0)
517 return NULL;
518
519 pic = pic_list[pic_id];
520 KASSERT(pic != NULL);
521 if (pic->pic_pending_ipls & ipl_mask)
522 return pic;
523 pending_pics &= ~__BIT(pic_id);
524 }
525 }
526
527 static void
528 pic_list_deliver_irqs(struct cpu_info *ci, register_t psw, int ipl,
529 void *frame)
530 {
531 const uint32_t ipl_mask = __BIT(ipl);
532 struct pic_softc *pic;
533
534 while ((pic = pic_list_find_pic_by_pending_ipl(ci, ipl_mask)) != NULL) {
535 pic_deliver_irqs(ci, pic, ipl, frame);
536 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0);
537 }
538 ci->ci_pending_ipls &= ~ipl_mask;
539 }
540 #endif /* __HAVE_PIC_PENDING_INTRS */
541
542 void
543 pic_do_pending_ints(register_t psw, int newipl, void *frame)
544 {
545 struct cpu_info * const ci = curcpu();
546 if (__predict_false(newipl == IPL_HIGH)) {
547 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl);
548 return;
549 }
550 #if defined(__HAVE_PIC_PENDING_INTRS)
551 while ((ci->ci_pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
552 KASSERT(ci->ci_pending_ipls < __BIT(NIPL));
553 for (;;) {
554 int ipl = 31 - __builtin_clz(ci->ci_pending_ipls);
555 KASSERT(ipl < NIPL);
556 if (ipl <= newipl)
557 break;
558
559 pic_set_priority(ci, ipl);
560 pic_list_deliver_irqs(ci, psw, ipl, frame);
561 pic_list_unblock_irqs(ci);
562 }
563 }
564 #endif /* __HAVE_PIC_PENDING_INTRS */
565 #ifdef __HAVE_PREEMPTION
566 struct lwp * const l = curlwp;
567 if (newipl == IPL_NONE && (l->l_md.md_astpending & __BIT(1))) {
568 pic_set_priority(ci, IPL_SCHED);
569 kpreempt(0);
570 }
571 #endif
572 if (ci->ci_cpl != newipl)
573 pic_set_priority(ci, newipl);
574 }
575
576 static void
577 pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci)
578 {
579 struct pic_percpu * const pcpu = v0;
580 struct pic_softc * const pic = v1;
581
582 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]),
583 KM_SLEEP);
584 KASSERT(pcpu->pcpu_evs != NULL);
585
586 #define PCPU_NAMELEN 32
587 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name);
588
589 KASSERT(namelen < PCPU_NAMELEN);
590 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP);
591 #ifdef MULTIPROCESSOR
592 snprintf(pcpu->pcpu_name, PCPU_NAMELEN,
593 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name);
594 #else
595 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN);
596 #endif
597 pcpu->pcpu_magic = PICPERCPU_MAGIC;
598 #if 0
599 printf("%s: %s %s: <%s>\n",
600 __func__, ci->ci_data.cpu_name, pic->pic_name,
601 pcpu->pcpu_name);
602 #endif
603 }
604
605 static int
606 pic_init(void)
607 {
608
609 mutex_init(&pic_lock, MUTEX_DEFAULT, IPL_HIGH);
610
611 return 0;
612 }
613
614 int
615 pic_add(struct pic_softc *pic, int irqbase)
616 {
617 int slot, maybe_slot = -1;
618 size_t sourcebase;
619 static ONCE_DECL(pic_once);
620
621 ASSERT_SLEEPABLE();
622
623 RUN_ONCE(&pic_once, pic_init);
624
625 KASSERT(strlen(pic->pic_name) > 0);
626
627 mutex_enter(&pic_lock);
628 if (irqbase == PIC_IRQBASE_ALLOC) {
629 irqbase = pic_lastbase;
630 }
631 for (slot = 0; slot < PIC_MAXPICS; slot++) {
632 struct pic_softc * const xpic = pic_list[slot];
633 if (xpic == NULL) {
634 if (maybe_slot < 0)
635 maybe_slot = slot;
636 if (irqbase < 0)
637 break;
638 continue;
639 }
640 if (irqbase < 0 || xpic->pic_irqbase < 0)
641 continue;
642 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources)
643 continue;
644 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase)
645 continue;
646 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts"
647 " with pic %s (%zu sources @ irq %u)",
648 pic->pic_name, pic->pic_maxsources, irqbase,
649 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase);
650 }
651 slot = maybe_slot;
652 #if 0
653 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n",
654 pic->pic_name, pic_sourcebase, pic->pic_maxsources);
655 #endif
656 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu",
657 pic->pic_maxsources);
658 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES);
659 sourcebase = pic_sourcebase;
660 pic_sourcebase += pic->pic_maxsources;
661 if (pic_lastbase < irqbase + pic->pic_maxsources)
662 pic_lastbase = irqbase + pic->pic_maxsources;
663 mutex_exit(&pic_lock);
664
665 /*
666 * Allocate a pointer to each cpu's evcnts and then, for each cpu,
667 * allocate its evcnts and then attach an evcnt for each pin.
668 * We can't allocate the evcnt structures directly since
669 * percpu will move the contents of percpu memory around and
670 * corrupt the pointers in the evcnts themselves. Remember, any
671 * problem can be solved with sufficient indirection.
672 */
673 pic->pic_percpu = percpu_create(sizeof(struct pic_percpu),
674 pic_percpu_allocate, NULL, pic);
675
676 pic->pic_sources = &pic_sources[sourcebase];
677 pic->pic_irqbase = irqbase;
678 pic->pic_id = slot;
679 #ifdef __HAVE_PIC_SET_PRIORITY
680 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL));
681 #endif
682 #ifdef MULTIPROCESSOR
683 KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL));
684 #endif
685 pic_list[slot] = pic;
686
687 return irqbase;
688 }
689
690 int
691 pic_alloc_irq(struct pic_softc *pic)
692 {
693 int irq;
694
695 for (irq = 0; irq < pic->pic_maxsources; irq++) {
696 if (pic->pic_sources[irq] == NULL)
697 return irq;
698 }
699
700 return -1;
701 }
702
703 static void
704 pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci)
705 {
706 struct pic_percpu * const pcpu = v0;
707 struct intrsource * const is = v1;
708
709 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
710 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL,
711 pcpu->pcpu_name, is->is_source);
712 }
713
714 static void
715 pic_unblock_percpu(void *arg1, void *arg2)
716 {
717 struct pic_softc *pic = arg1;
718 struct intrsource *is = arg2;
719
720 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f,
721 __BIT(is->is_irq & 0x1f));
722 }
723
724 void *
725 pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type,
726 int (*func)(void *), void *arg, const char *xname)
727 {
728 struct intrsource *is;
729 int off, nipl;
730
731 if (pic->pic_sources[irq]) {
732 printf("pic_establish_intr: pic %s irq %d already present\n",
733 pic->pic_name, irq);
734 return NULL;
735 }
736
737 is = kmem_zalloc(sizeof(*is), KM_SLEEP);
738 is->is_pic = pic;
739 is->is_irq = irq;
740 is->is_ipl = ipl;
741 is->is_type = type & 0xff;
742 is->is_func = func;
743 is->is_arg = arg;
744 #ifdef MULTIPROCESSOR
745 is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM;
746 #endif
747
748 if (pic->pic_ops->pic_source_name)
749 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source,
750 sizeof(is->is_source));
751 else
752 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq);
753
754 /*
755 * Now attach the per-cpu evcnts.
756 */
757 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is);
758
759 pic->pic_sources[irq] = is;
760
761 /*
762 * First try to use an existing slot which is empty.
763 */
764 bool found = false;
765 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl + 1]; off++) {
766 if (pic__iplsources[off] == NULL) {
767 found = true;
768 break;
769 }
770 }
771
772 if (!found) {
773 /*
774 * Move up all the sources by one.
775 */
776 if (ipl < NIPL) {
777 off = pic_ipl_offset[ipl + 1];
778 memmove(&pic__iplsources[off + 1], &pic__iplsources[off],
779 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off));
780 }
781
782 /*
783 * Advance the offset of all IPLs higher than this. Include an
784 * extra one as well. Thus the number of sources per ipl is
785 * pic_ipl_offset[ipl + 1] - pic_ipl_offset[ipl].
786 */
787 for (nipl = ipl + 1; nipl <= NIPL; nipl++)
788 pic_ipl_offset[nipl]++;
789
790 off = pic_ipl_offset[ipl + 1] - 1;
791 }
792
793 /*
794 * Insert into the 'found' or the just made slot position at the end
795 * of this IPL's sources.
796 */
797 is->is_iplidx = off - pic_ipl_offset[ipl];
798 pic__iplsources[off] = is;
799
800 (*pic->pic_ops->pic_establish_irq)(pic, is);
801
802 if (!mp_online || !is->is_mpsafe || !is->is_percpu) {
803 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f,
804 __BIT(is->is_irq & 0x1f));
805 } else {
806 uint64_t xc = xc_broadcast(0, pic_unblock_percpu, pic, is);
807 xc_wait(xc);
808 }
809
810 if (xname) {
811 if (is->is_xname == NULL)
812 is->is_xname = kmem_zalloc(INTRDEVNAMEBUF, KM_SLEEP);
813 if (is->is_xname[0] != '\0')
814 strlcat(is->is_xname, ", ", INTRDEVNAMEBUF);
815 strlcat(is->is_xname, xname, INTRDEVNAMEBUF);
816 }
817
818 /* We're done. */
819 return is;
820 }
821
822 static void
823 pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci)
824 {
825 struct pic_percpu * const pcpu = v0;
826 struct intrsource * const is = v1;
827
828 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
829 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]);
830 }
831
832 void
833 pic_disestablish_source(struct intrsource *is)
834 {
835 struct pic_softc * const pic = is->is_pic;
836 const int irq = is->is_irq;
837
838 KASSERT(is == pic->pic_sources[irq]);
839
840 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
841 pic->pic_sources[irq] = NULL;
842 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL;
843 if (is->is_xname != NULL) {
844 kmem_free(is->is_xname, INTRDEVNAMEBUF);
845 is->is_xname = NULL;
846 }
847 /*
848 * Now detach the per-cpu evcnts.
849 */
850 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is);
851
852 kmem_free(is, sizeof(*is));
853 }
854
855 void *
856 intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg)
857 {
858 return intr_establish_xname(irq, ipl, type, func, arg, NULL);
859 }
860
861 void *
862 intr_establish_xname(int irq, int ipl, int type, int (*func)(void *), void *arg,
863 const char *xname)
864 {
865 KASSERT(!cpu_intr_p());
866 KASSERT(!cpu_softintr_p());
867
868 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
869 struct pic_softc * const pic = pic_list[slot];
870 if (pic == NULL || pic->pic_irqbase < 0)
871 continue;
872 if (pic->pic_irqbase <= irq
873 && irq < pic->pic_irqbase + pic->pic_maxsources) {
874 return pic_establish_intr(pic, irq - pic->pic_irqbase,
875 ipl, type, func, arg, xname);
876 }
877 }
878
879 return NULL;
880 }
881
882 void
883 intr_disestablish(void *ih)
884 {
885 struct intrsource * const is = ih;
886
887 KASSERT(!cpu_intr_p());
888 KASSERT(!cpu_softintr_p());
889
890 pic_disestablish_source(is);
891 }
892
893 void
894 intr_mask(void *ih)
895 {
896 struct intrsource * const is = ih;
897 struct pic_softc * const pic = is->is_pic;
898 const int irq = is->is_irq;
899
900 if (atomic_inc_32_nv(&is->is_mask_count) == 1)
901 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
902 }
903
904 void
905 intr_unmask(void *ih)
906 {
907 struct intrsource * const is = ih;
908 struct pic_softc * const pic = is->is_pic;
909 const int irq = is->is_irq;
910
911 if (atomic_dec_32_nv(&is->is_mask_count) == 0)
912 (*pic->pic_ops->pic_unblock_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
913 }
914
915 const char *
916 intr_string(intr_handle_t irq, char *buf, size_t len)
917 {
918 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
919 struct pic_softc * const pic = pic_list[slot];
920 if (pic == NULL || pic->pic_irqbase < 0)
921 continue;
922 if (pic->pic_irqbase <= irq
923 && irq < pic->pic_irqbase + pic->pic_maxsources) {
924 struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase];
925 snprintf(buf, len, "%s %s", pic->pic_name, is->is_source);
926 return buf;
927 }
928 }
929
930 return NULL;
931 }
932
933 static struct intrsource *
934 intr_get_source(const char *intrid)
935 {
936 struct intrsource *is;
937 intrid_t buf;
938 size_t slot;
939 int irq;
940
941 KASSERT(mutex_owned(&cpu_lock));
942
943 for (slot = 0; slot < PIC_MAXPICS; slot++) {
944 struct pic_softc * const pic = pic_list[slot];
945 if (pic == NULL || pic->pic_irqbase < 0)
946 continue;
947 for (irq = 0; irq < pic->pic_maxsources; irq++) {
948 is = pic->pic_sources[irq];
949 if (is == NULL || is->is_source[0] == '\0')
950 continue;
951
952 snprintf(buf, sizeof(buf), "%s %s", pic->pic_name, is->is_source);
953 if (strcmp(buf, intrid) == 0)
954 return is;
955 }
956 }
957
958 return NULL;
959 }
960
961 struct intrids_handler *
962 interrupt_construct_intrids(const kcpuset_t *cpuset)
963 {
964 struct intrids_handler *iih;
965 struct intrsource *is;
966 int count, irq, n;
967 size_t slot;
968
969 if (kcpuset_iszero(cpuset))
970 return NULL;
971
972 count = 0;
973 for (slot = 0; slot < PIC_MAXPICS; slot++) {
974 struct pic_softc * const pic = pic_list[slot];
975 if (pic != NULL && pic->pic_irqbase >= 0) {
976 for (irq = 0; irq < pic->pic_maxsources; irq++) {
977 is = pic->pic_sources[irq];
978 if (is && is->is_source[0] != '\0')
979 count++;
980 }
981 }
982 }
983
984 iih = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, KM_SLEEP);
985 iih->iih_nids = count;
986
987 for (n = 0, slot = 0; n < count && slot < PIC_MAXPICS; slot++) {
988 struct pic_softc * const pic = pic_list[slot];
989 if (pic == NULL || pic->pic_irqbase < 0)
990 continue;
991 for (irq = 0; irq < pic->pic_maxsources; irq++) {
992 is = pic->pic_sources[irq];
993 if (is == NULL || is->is_source[0] == '\0')
994 continue;
995
996 snprintf(iih->iih_intrids[n++], sizeof(intrid_t), "%s %s",
997 pic->pic_name, is->is_source);
998 }
999 }
1000
1001 return iih;
1002 }
1003
1004 void
1005 interrupt_destruct_intrids(struct intrids_handler *iih)
1006 {
1007 if (iih == NULL)
1008 return;
1009
1010 kmem_free(iih, sizeof(int) + sizeof(intrid_t) * iih->iih_nids);
1011 }
1012
1013 void
1014 interrupt_get_available(kcpuset_t *cpuset)
1015 {
1016 CPU_INFO_ITERATOR cii;
1017 struct cpu_info *ci;
1018
1019 kcpuset_zero(cpuset);
1020
1021 mutex_enter(&cpu_lock);
1022 for (CPU_INFO_FOREACH(cii, ci)) {
1023 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
1024 kcpuset_set(cpuset, cpu_index(ci));
1025 }
1026 mutex_exit(&cpu_lock);
1027 }
1028
1029 void
1030 interrupt_get_devname(const char *intrid, char *buf, size_t len)
1031 {
1032 struct intrsource *is;
1033
1034 mutex_enter(&cpu_lock);
1035 is = intr_get_source(intrid);
1036 if (is == NULL || is->is_xname == NULL)
1037 buf[0] = '\0';
1038 else
1039 strlcpy(buf, is->is_xname, len);
1040 mutex_exit(&cpu_lock);
1041 }
1042
1043 struct interrupt_get_count_arg {
1044 struct intrsource *is;
1045 uint64_t count;
1046 u_int cpu_idx;
1047 };
1048
1049 static void
1050 interrupt_get_count_cb(void *v0, void *v1, struct cpu_info *ci)
1051 {
1052 struct pic_percpu * const pcpu = v0;
1053 struct interrupt_get_count_arg * const arg = v1;
1054
1055 if (arg->cpu_idx != cpu_index(ci))
1056 return;
1057
1058 arg->count = pcpu->pcpu_evs[arg->is->is_irq].ev_count;
1059 }
1060
1061 uint64_t
1062 interrupt_get_count(const char *intrid, u_int cpu_idx)
1063 {
1064 struct interrupt_get_count_arg arg;
1065 struct intrsource *is;
1066 uint64_t count;
1067
1068 count = 0;
1069
1070 mutex_enter(&cpu_lock);
1071 is = intr_get_source(intrid);
1072 if (is != NULL && is->is_pic != NULL) {
1073 arg.is = is;
1074 arg.count = 0;
1075 arg.cpu_idx = cpu_idx;
1076 percpu_foreach(is->is_pic->pic_percpu, interrupt_get_count_cb, &arg);
1077 count = arg.count;
1078 }
1079 mutex_exit(&cpu_lock);
1080
1081 return count;
1082 }
1083
1084 #ifdef MULTIPROCESSOR
1085 void
1086 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
1087 {
1088 struct intrsource *is;
1089 struct pic_softc *pic;
1090
1091 kcpuset_zero(cpuset);
1092
1093 mutex_enter(&cpu_lock);
1094 is = intr_get_source(intrid);
1095 if (is != NULL) {
1096 pic = is->is_pic;
1097 if (pic && pic->pic_ops->pic_get_affinity)
1098 pic->pic_ops->pic_get_affinity(pic, is->is_irq, cpuset);
1099 }
1100 mutex_exit(&cpu_lock);
1101 }
1102
1103 int
1104 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
1105 kcpuset_t *oldset)
1106 {
1107 struct intrsource *is;
1108 int error;
1109
1110 mutex_enter(&cpu_lock);
1111 is = intr_get_source(intrid);
1112 if (is == NULL) {
1113 error = ENOENT;
1114 } else {
1115 error = interrupt_distribute(is, newset, oldset);
1116 }
1117 mutex_exit(&cpu_lock);
1118
1119 return error;
1120 }
1121
1122 int
1123 interrupt_distribute(void *ih, const kcpuset_t *newset, kcpuset_t *oldset)
1124 {
1125 struct intrsource * const is = ih;
1126 struct pic_softc * const pic = is->is_pic;
1127
1128 if (pic == NULL)
1129 return EOPNOTSUPP;
1130 if (pic->pic_ops->pic_set_affinity == NULL ||
1131 pic->pic_ops->pic_get_affinity == NULL)
1132 return EOPNOTSUPP;
1133
1134 if (!is->is_mpsafe)
1135 return EINVAL;
1136
1137 if (oldset != NULL)
1138 pic->pic_ops->pic_get_affinity(pic, is->is_irq, oldset);
1139
1140 return pic->pic_ops->pic_set_affinity(pic, is->is_irq, newset);
1141 }
1142 #endif
1143