pic.c revision 1.35 1 /* $NetBSD: pic.c,v 1.35 2015/04/18 14:09:32 skrll Exp $ */
2 /*-
3 * Copyright (c) 2008 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #define _INTR_PRIVATE
32 #include "opt_ddb.h"
33 #include "opt_multiprocessor.h"
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.35 2015/04/18 14:09:32 skrll Exp $");
37
38 #include <sys/param.h>
39 #include <sys/atomic.h>
40 #include <sys/cpu.h>
41 #include <sys/evcnt.h>
42 #include <sys/intr.h>
43 #include <sys/kernel.h>
44 #include <sys/kmem.h>
45 #include <sys/mutex.h>
46 #include <sys/once.h>
47 #include <sys/xcall.h>
48 #include <sys/ipi.h>
49
50 #if defined(__arm__)
51 #include <arm/armreg.h>
52 #include <arm/cpufunc.h>
53 #elif defined(__aarch64__)
54 #include <aarch64/locore.h>
55 #define I32_bit DAIF_I
56 #define F32_bit DAIF_F
57 #endif
58
59 #ifdef DDB
60 #include <arm/db_machdep.h>
61 #endif
62
63 #include <arm/pic/picvar.h>
64
65 #if defined(__HAVE_PIC_PENDING_INTRS)
66 /*
67 * This implementation of pending interrupts on a MULTIPROCESSOR system makes
68 * the assumption that a PIC (pic_softc) shall only have all its interrupts
69 * come from the same CPU. In other words, interrupts from a single PIC will
70 * not be distributed among multiple CPUs.
71 */
72 struct pic_pending {
73 volatile uint32_t blocked_pics;
74 volatile uint32_t pending_pics;
75 volatile uint32_t pending_ipls;
76 };
77 static uint32_t
78 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int);
79 static struct pic_softc *
80 pic_list_find_pic_by_pending_ipl(struct pic_pending *, uint32_t);
81 static void
82 pic_deliver_irqs(struct pic_pending *, struct pic_softc *, int, void *);
83 static void
84 pic_list_deliver_irqs(struct pic_pending *, register_t, int, void *);
85
86 #ifdef MULTIPROCESSOR
87 percpu_t *pic_pending_percpu;
88 #else
89 struct pic_pending pic_pending;
90 #endif /* MULTIPROCESSOR */
91 #endif /* __HAVE_PIC_PENDING_INTRS */
92
93 struct pic_softc *pic_list[PIC_MAXPICS];
94 #if PIC_MAXPICS > 32
95 #error PIC_MAXPICS > 32 not supported
96 #endif
97 struct intrsource *pic_sources[PIC_MAXMAXSOURCES];
98 struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES];
99 struct intrsource **pic_iplsource[NIPL] = {
100 [0 ... NIPL-1] = pic__iplsources,
101 };
102 size_t pic_ipl_offset[NIPL+1];
103
104 static kmutex_t pic_lock;
105 size_t pic_sourcebase;
106 static struct evcnt pic_deferral_ev =
107 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr");
108 EVCNT_ATTACH_STATIC(pic_deferral_ev);
109
110 static int pic_init(void);
111
112 #ifdef __HAVE_PIC_SET_PRIORITY
113 void
114 pic_set_priority(struct cpu_info *ci, int newipl)
115 {
116 register_t psw = cpsid(I32_bit);
117 if (pic_list[0] != NULL)
118 (pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl);
119 ci->ci_cpl = newipl;
120 if ((psw & I32_bit) == 0)
121 cpsie(I32_bit);
122 }
123 #endif
124
125 #ifdef MULTIPROCESSOR
126 int
127 pic_ipi_ast(void *arg)
128 {
129 setsoftast(curcpu());
130 return 1;
131 }
132
133 int
134 pic_ipi_nop(void *arg)
135 {
136 /* do nothing */
137 return 1;
138 }
139
140 int
141 pic_ipi_xcall(void *arg)
142 {
143 xc_ipi_handler();
144 return 1;
145 }
146
147 int
148 pic_ipi_generic(void *arg)
149 {
150 ipi_cpu_handler();
151 return 1;
152 }
153
154 #ifdef DDB
155 int
156 pic_ipi_ddb(void *arg)
157 {
158 // printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg);
159 kdb_trap(-1, arg);
160 return 1;
161 }
162
163 #ifdef __HAVE_PREEMPTION
164 int
165 pic_ipi_kpreempt(void *arg)
166 {
167 atomic_or_uint(&curcpu()->ci_astpending, __BIT(1));
168 return 1;
169 }
170 #endif
171 #endif /* MULTIPROCESSOR */
172
173 void
174 intr_cpu_init(struct cpu_info *ci)
175 {
176 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
177 struct pic_softc * const pic = pic_list[slot];
178 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) {
179 (*pic->pic_ops->pic_cpu_init)(pic, ci);
180 }
181 }
182 }
183
184 typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long);
185
186 void
187 intr_ipi_send(const kcpuset_t *kcp, u_long ipi)
188 {
189 struct cpu_info * const ci = curcpu();
190 KASSERT(ipi < NIPI);
191 bool __diagused sent_p = false;
192 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
193 struct pic_softc * const pic = pic_list[slot];
194 if (pic == NULL || pic->pic_cpus == NULL)
195 continue;
196 if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) {
197 // never send to ourself
198 if (pic->pic_cpus == ci->ci_kcpuset)
199 continue;
200
201 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi);
202 // If we were targeting a single CPU or this pic
203 // handles all cpus, we're done.
204 if (kcp != NULL || pic->pic_cpus == kcpuset_running)
205 return;
206 sent_p = true;
207 }
208 }
209 KASSERT(cold || sent_p);
210 }
211 #endif /* MULTIPROCESSOR */
212
213 #ifdef __HAVE_PIC_FAST_SOFTINTS
214 int
215 pic_handle_softint(void *arg)
216 {
217 void softint_switch(lwp_t *, int);
218 struct cpu_info * const ci = curcpu();
219 const size_t softint = (size_t) arg;
220 int s = splhigh();
221 ci->ci_intr_depth--; // don't count these as interrupts
222 softint_switch(ci->ci_softlwps[softint], s);
223 ci->ci_intr_depth++;
224 splx(s);
225 return 1;
226 }
227 #endif
228
229 int
230 pic_handle_intr(void *arg)
231 {
232 struct pic_softc * const pic = arg;
233 int rv;
234
235 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic);
236
237 return rv > 0;
238 }
239
240 #if defined(__HAVE_PIC_PENDING_INTRS)
241 void
242 pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is)
243 {
244 const uint32_t ipl_mask = __BIT(is->is_ipl);
245
246 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5],
247 __BIT(is->is_irq & 0x1f));
248
249 atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
250 #ifdef MULTIPROCESSOR
251 struct pic_pending *pend = percpu_getref(pic_pending_percpu);
252 #else
253 struct pic_pending *pend = &pic_pending;
254 #endif
255 atomic_or_32(&pend->pending_ipls, ipl_mask);
256 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id));
257 #ifdef MULTIPROCESSOR
258 percpu_putref(pic_pending_percpu);
259 #endif
260 }
261
262 void
263 pic_mark_pending(struct pic_softc *pic, int irq)
264 {
265 struct intrsource * const is = pic->pic_sources[irq];
266
267 KASSERT(irq < pic->pic_maxsources);
268 KASSERT(is != NULL);
269
270 pic_mark_pending_source(pic, is);
271 }
272
273 uint32_t
274 pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base,
275 uint32_t pending)
276 {
277 struct intrsource ** const isbase = &pic->pic_sources[irq_base];
278 struct intrsource *is;
279 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5];
280 uint32_t ipl_mask = 0;
281
282 if (pending == 0)
283 return ipl_mask;
284
285 KASSERT((irq_base & 31) == 0);
286
287 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending);
288
289 atomic_or_32(ipending, pending);
290 while (pending != 0) {
291 int n = ffs(pending);
292 if (n-- == 0)
293 break;
294 is = isbase[n];
295 KASSERT(is != NULL);
296 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32);
297 pending &= ~__BIT(n);
298 ipl_mask |= __BIT(is->is_ipl);
299 }
300
301 atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
302 #ifdef MULTIPROCESSOR
303 struct pic_pending *pend = percpu_getref(pic_pending_percpu);
304 #else
305 struct pic_pending *pend = &pic_pending;
306 #endif
307 atomic_or_32(&pend->pending_ipls, ipl_mask);
308 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id));
309 #ifdef MULTIPROCESSOR
310 percpu_putref(pic_pending_percpu);
311 #endif
312 return ipl_mask;
313 }
314
315 uint32_t
316 pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base,
317 uint32_t pending, int ipl)
318 {
319 uint32_t ipl_irq_mask = 0;
320 uint32_t irq_mask;
321
322 for (;;) {
323 int irq = ffs(pending);
324 if (irq-- == 0)
325 return ipl_irq_mask;
326
327 irq_mask = __BIT(irq);
328 #if 1
329 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL,
330 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq);
331 #else
332 if (pic->pic_sources[irq_base + irq] == NULL) {
333 aprint_error("stray interrupt? irq_base=%zu irq=%d\n",
334 irq_base, irq);
335 } else
336 #endif
337 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl)
338 ipl_irq_mask |= irq_mask;
339
340 pending &= ~irq_mask;
341 }
342 }
343 #endif /* __HAVE_PIC_PENDING_INTRS */
344
345 void
346 pic_dispatch(struct intrsource *is, void *frame)
347 {
348 int (*func)(void *) = is->is_func;
349 void *arg = is->is_arg;
350
351 if (__predict_false(arg == NULL)) {
352 if (__predict_false(frame == NULL)) {
353 pic_deferral_ev.ev_count++;
354 return;
355 }
356 arg = frame;
357 }
358
359 #ifdef MULTIPROCESSOR
360 if (!is->is_mpsafe) {
361 KERNEL_LOCK(1, NULL);
362 const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count;
363 const u_int l_blcnt __diagused = curlwp->l_blcnt;
364 (void)(*func)(arg);
365 KASSERT(ci_blcnt == curcpu()->ci_biglock_count);
366 KASSERT(l_blcnt == curlwp->l_blcnt);
367 KERNEL_UNLOCK_ONE(NULL);
368 } else
369 #endif
370 (void)(*func)(arg);
371
372
373 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu);
374 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
375 pcpu->pcpu_evs[is->is_irq].ev_count++;
376 percpu_putref(is->is_pic->pic_percpu);
377 }
378
379 #if defined(__HAVE_PIC_PENDING_INTRS)
380 void
381 pic_deliver_irqs(struct pic_pending *pend, struct pic_softc *pic, int ipl,
382 void *frame)
383 {
384 const uint32_t ipl_mask = __BIT(ipl);
385 struct intrsource *is;
386 volatile uint32_t *ipending = pic->pic_pending_irqs;
387 volatile uint32_t *iblocked = pic->pic_blocked_irqs;
388 size_t irq_base;
389 #if PIC_MAXSOURCES > 32
390 size_t irq_count;
391 int poi = 0; /* Possibility of interrupting */
392 #endif
393 uint32_t pending_irqs;
394 uint32_t blocked_irqs;
395 int irq;
396 bool progress __diagused = false;
397
398 KASSERT(pic->pic_pending_ipls & ipl_mask);
399
400 irq_base = 0;
401 #if PIC_MAXSOURCES > 32
402 irq_count = 0;
403 #endif
404
405 for (;;) {
406 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base,
407 *ipending, ipl);
408 KASSERT((pending_irqs & *ipending) == pending_irqs);
409 KASSERT((pending_irqs & ~(*ipending)) == 0);
410 if (pending_irqs == 0) {
411 #if PIC_MAXSOURCES > 32
412 irq_count += 32;
413 if (__predict_true(irq_count >= pic->pic_maxsources)) {
414 if (!poi)
415 /*Interrupt at this level was handled.*/
416 break;
417 irq_base = 0;
418 irq_count = 0;
419 poi = 0;
420 ipending = pic->pic_pending_irqs;
421 iblocked = pic->pic_blocked_irqs;
422 } else {
423 irq_base += 32;
424 ipending++;
425 iblocked++;
426 KASSERT(irq_base <= pic->pic_maxsources);
427 }
428 continue;
429 #else
430 break;
431 #endif
432 }
433 progress = true;
434 blocked_irqs = 0;
435 do {
436 irq = ffs(pending_irqs) - 1;
437 KASSERT(irq >= 0);
438
439 atomic_and_32(ipending, ~__BIT(irq));
440 is = pic->pic_sources[irq_base + irq];
441 if (is != NULL) {
442 cpsie(I32_bit);
443 pic_dispatch(is, frame);
444 cpsid(I32_bit);
445 #if PIC_MAXSOURCES > 32
446 /*
447 * There is a possibility of interrupting
448 * from cpsie() to cpsid().
449 */
450 poi = 1;
451 #endif
452 blocked_irqs |= __BIT(irq);
453 } else {
454 KASSERT(0);
455 }
456 pending_irqs = pic_find_pending_irqs_by_ipl(pic,
457 irq_base, *ipending, ipl);
458 } while (pending_irqs);
459 if (blocked_irqs) {
460 atomic_or_32(iblocked, blocked_irqs);
461 atomic_or_32(&pend->blocked_pics, __BIT(pic->pic_id));
462 }
463 }
464
465 KASSERT(progress);
466 /*
467 * Since interrupts are disabled, we don't have to be too careful
468 * about these.
469 */
470 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0)
471 atomic_and_32(&pend->pending_pics, ~__BIT(pic->pic_id));
472 }
473
474 static void
475 pic_list_unblock_irqs(struct pic_pending *pend)
476 {
477 uint32_t blocked_pics = pend->blocked_pics;
478
479 pend->blocked_pics = 0;
480
481 for (;;) {
482 struct pic_softc *pic;
483 #if PIC_MAXSOURCES > 32
484 volatile uint32_t *iblocked;
485 uint32_t blocked;
486 size_t irq_base;
487 #endif
488
489 int pic_id = ffs(blocked_pics);
490 if (pic_id-- == 0)
491 return;
492
493 pic = pic_list[pic_id];
494 KASSERT(pic != NULL);
495 #if PIC_MAXSOURCES > 32
496 for (irq_base = 0, iblocked = pic->pic_blocked_irqs;
497 irq_base < pic->pic_maxsources;
498 irq_base += 32, iblocked++) {
499 if ((blocked = *iblocked) != 0) {
500 (*pic->pic_ops->pic_unblock_irqs)(pic,
501 irq_base, blocked);
502 atomic_and_32(iblocked, ~blocked);
503 }
504 }
505 #else
506 KASSERT(pic->pic_blocked_irqs[0] != 0);
507 (*pic->pic_ops->pic_unblock_irqs)(pic,
508 0, pic->pic_blocked_irqs[0]);
509 pic->pic_blocked_irqs[0] = 0;
510 #endif
511 blocked_pics &= ~__BIT(pic_id);
512 }
513 }
514
515
516 struct pic_softc *
517 pic_list_find_pic_by_pending_ipl(struct pic_pending *pend, uint32_t ipl_mask)
518 {
519 uint32_t pending_pics = pend->pending_pics;
520 struct pic_softc *pic;
521
522 for (;;) {
523 int pic_id = ffs(pending_pics);
524 if (pic_id-- == 0)
525 return NULL;
526
527 pic = pic_list[pic_id];
528 KASSERT(pic != NULL);
529 if (pic->pic_pending_ipls & ipl_mask)
530 return pic;
531 pending_pics &= ~__BIT(pic_id);
532 }
533 }
534
535 void
536 pic_list_deliver_irqs(struct pic_pending *pend, register_t psw, int ipl,
537 void *frame)
538 {
539 const uint32_t ipl_mask = __BIT(ipl);
540 struct pic_softc *pic;
541
542 while ((pic = pic_list_find_pic_by_pending_ipl(pend, ipl_mask)) != NULL) {
543 pic_deliver_irqs(pend, pic, ipl, frame);
544 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0);
545 }
546 atomic_and_32(&pend->pending_ipls, ~ipl_mask);
547 }
548 #endif /* __HAVE_PIC_PENDING_INTRS */
549
550 void
551 pic_do_pending_ints(register_t psw, int newipl, void *frame)
552 {
553 struct cpu_info * const ci = curcpu();
554 if (__predict_false(newipl == IPL_HIGH)) {
555 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl);
556 return;
557 }
558 #if defined(__HAVE_PIC_PENDING_INTRS)
559 #ifdef MULTIPROCESSOR
560 struct pic_pending *pend = percpu_getref(pic_pending_percpu);
561 #else
562 struct pic_pending *pend = &pic_pending;
563 #endif
564 while ((pend->pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
565 KASSERT(pend->pending_ipls < __BIT(NIPL));
566 for (;;) {
567 int ipl = 31 - __builtin_clz(pend->pending_ipls);
568 KASSERT(ipl < NIPL);
569 if (ipl <= newipl)
570 break;
571
572 pic_set_priority(ci, ipl);
573 pic_list_deliver_irqs(pend, psw, ipl, frame);
574 pic_list_unblock_irqs(pend);
575 }
576 }
577 #ifdef MULTIPROCESSOR
578 percpu_putref(pic_pending_percpu);
579 #endif
580 #endif /* __HAVE_PIC_PENDING_INTRS */
581 #ifdef __HAVE_PREEMPTION
582 if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) {
583 pic_set_priority(ci, IPL_SCHED);
584 kpreempt(0);
585 }
586 #endif
587 if (ci->ci_cpl != newipl)
588 pic_set_priority(ci, newipl);
589 }
590
591 static void
592 pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci)
593 {
594 struct pic_percpu * const pcpu = v0;
595 struct pic_softc * const pic = v1;
596
597 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]),
598 KM_SLEEP);
599 KASSERT(pcpu->pcpu_evs != NULL);
600
601 #define PCPU_NAMELEN 32
602 #ifdef DIAGNOSTIC
603 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name);
604 #endif
605
606 KASSERT(namelen < PCPU_NAMELEN);
607 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP);
608 #ifdef MULTIPROCESSOR
609 snprintf(pcpu->pcpu_name, PCPU_NAMELEN,
610 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name);
611 #else
612 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN);
613 #endif
614 pcpu->pcpu_magic = PICPERCPU_MAGIC;
615 #if 0
616 printf("%s: %s %s: <%s>\n",
617 __func__, ci->ci_data.cpu_name, pic->pic_name,
618 pcpu->pcpu_name);
619 #endif
620 }
621
622 #if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR)
623 static void
624 pic_pending_zero(void *v0, void *v1, struct cpu_info *ci)
625 {
626 struct pic_pending * const p = v0;
627 memset(p, 0, sizeof(*p));
628 }
629 #endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */
630
631 static int
632 pic_init(void)
633 {
634
635 mutex_init(&pic_lock, MUTEX_DEFAULT, IPL_HIGH);
636
637 return 0;
638 }
639
640 void
641 pic_add(struct pic_softc *pic, int irqbase)
642 {
643 int slot, maybe_slot = -1;
644 size_t sourcebase;
645 static ONCE_DECL(pic_once);
646
647 RUN_ONCE(&pic_once, pic_init);
648
649 KASSERT(strlen(pic->pic_name) > 0);
650
651 #if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR)
652 if (__predict_false(pic_pending_percpu == NULL)) {
653 pic_pending_percpu = percpu_alloc(sizeof(struct pic_pending));
654 KASSERT(pic_pending_percpu != NULL);
655
656 /*
657 * Now zero the per-cpu pending data.
658 */
659 percpu_foreach(pic_pending_percpu, pic_pending_zero, NULL);
660 }
661 #endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */
662
663 mutex_enter(&pic_lock);
664 for (slot = 0; slot < PIC_MAXPICS; slot++) {
665 struct pic_softc * const xpic = pic_list[slot];
666 if (xpic == NULL) {
667 if (maybe_slot < 0)
668 maybe_slot = slot;
669 if (irqbase < 0)
670 break;
671 continue;
672 }
673 if (irqbase < 0 || xpic->pic_irqbase < 0)
674 continue;
675 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources)
676 continue;
677 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase)
678 continue;
679 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts"
680 " with pic %s (%zu sources @ irq %u)",
681 pic->pic_name, pic->pic_maxsources, irqbase,
682 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase);
683 }
684 slot = maybe_slot;
685 #if 0
686 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n",
687 pic->pic_name, pic_sourcebase, pic->pic_maxsources);
688 #endif
689 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu",
690 pic->pic_maxsources);
691 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES);
692 sourcebase = pic_sourcebase;
693 pic_sourcebase += pic->pic_maxsources;
694
695 mutex_exit(&pic_lock);
696
697 /*
698 * Allocate a pointer to each cpu's evcnts and then, for each cpu,
699 * allocate its evcnts and then attach an evcnt for each pin.
700 * We can't allocate the evcnt structures directly since
701 * percpu will move the contents of percpu memory around and
702 * corrupt the pointers in the evcnts themselves. Remember, any
703 * problem can be solved with sufficient indirection.
704 */
705 pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu));
706 KASSERT(pic->pic_percpu != NULL);
707
708 /*
709 * Now allocate the per-cpu evcnts.
710 */
711 percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic);
712
713 pic->pic_sources = &pic_sources[sourcebase];
714 pic->pic_irqbase = irqbase;
715 pic->pic_id = slot;
716 #ifdef __HAVE_PIC_SET_PRIORITY
717 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL));
718 #endif
719 #ifdef MULTIPROCESSOR
720 KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL));
721 #endif
722 pic_list[slot] = pic;
723 }
724
725 int
726 pic_alloc_irq(struct pic_softc *pic)
727 {
728 int irq;
729
730 for (irq = 0; irq < pic->pic_maxsources; irq++) {
731 if (pic->pic_sources[irq] == NULL)
732 return irq;
733 }
734
735 return -1;
736 }
737
738 static void
739 pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci)
740 {
741 struct pic_percpu * const pcpu = v0;
742 struct intrsource * const is = v1;
743
744 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
745 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL,
746 pcpu->pcpu_name, is->is_source);
747 }
748
749 void *
750 pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type,
751 int (*func)(void *), void *arg)
752 {
753 struct intrsource *is;
754 int off, nipl;
755
756 if (pic->pic_sources[irq]) {
757 printf("pic_establish_intr: pic %s irq %d already present\n",
758 pic->pic_name, irq);
759 return NULL;
760 }
761
762 is = kmem_zalloc(sizeof(*is), KM_SLEEP);
763 if (is == NULL)
764 return NULL;
765
766 is->is_pic = pic;
767 is->is_irq = irq;
768 is->is_ipl = ipl;
769 is->is_type = type & 0xff;
770 is->is_func = func;
771 is->is_arg = arg;
772 #ifdef MULTIPROCESSOR
773 is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM;
774 #endif
775
776 if (pic->pic_ops->pic_source_name)
777 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source,
778 sizeof(is->is_source));
779 else
780 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq);
781
782 /*
783 * Now attach the per-cpu evcnts.
784 */
785 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is);
786
787 pic->pic_sources[irq] = is;
788
789 /*
790 * First try to use an existing slot which is empty.
791 */
792 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) {
793 if (pic__iplsources[off] == NULL) {
794 is->is_iplidx = off - pic_ipl_offset[ipl];
795 pic__iplsources[off] = is;
796 return is;
797 }
798 }
799
800 /*
801 * Move up all the sources by one.
802 */
803 if (ipl < NIPL) {
804 off = pic_ipl_offset[ipl+1];
805 memmove(&pic__iplsources[off+1], &pic__iplsources[off],
806 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off));
807 }
808
809 /*
810 * Advance the offset of all IPLs higher than this. Include an
811 * extra one as well. Thus the number of sources per ipl is
812 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl].
813 */
814 for (nipl = ipl + 1; nipl <= NIPL; nipl++)
815 pic_ipl_offset[nipl]++;
816
817 /*
818 * Insert into the previously made position at the end of this IPL's
819 * sources.
820 */
821 off = pic_ipl_offset[ipl + 1] - 1;
822 is->is_iplidx = off - pic_ipl_offset[ipl];
823 pic__iplsources[off] = is;
824
825 (*pic->pic_ops->pic_establish_irq)(pic, is);
826
827 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f,
828 __BIT(is->is_irq & 0x1f));
829
830 /* We're done. */
831 return is;
832 }
833
834 static void
835 pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci)
836 {
837 struct pic_percpu * const pcpu = v0;
838 struct intrsource * const is = v1;
839
840 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
841 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]);
842 }
843
844 void
845 pic_disestablish_source(struct intrsource *is)
846 {
847 struct pic_softc * const pic = is->is_pic;
848 const int irq = is->is_irq;
849
850 KASSERT(is == pic->pic_sources[irq]);
851
852 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
853 pic->pic_sources[irq] = NULL;
854 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL;
855 /*
856 * Now detach the per-cpu evcnts.
857 */
858 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is);
859
860 kmem_free(is, sizeof(*is));
861 }
862
863 void *
864 intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg)
865 {
866 KASSERT(!cpu_intr_p());
867 KASSERT(!cpu_softintr_p());
868
869 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
870 struct pic_softc * const pic = pic_list[slot];
871 if (pic == NULL || pic->pic_irqbase < 0)
872 continue;
873 if (pic->pic_irqbase <= irq
874 && irq < pic->pic_irqbase + pic->pic_maxsources) {
875 return pic_establish_intr(pic, irq - pic->pic_irqbase,
876 ipl, type, func, arg);
877 }
878 }
879
880 return NULL;
881 }
882
883 void
884 intr_disestablish(void *ih)
885 {
886 struct intrsource * const is = ih;
887
888 KASSERT(!cpu_intr_p());
889 KASSERT(!cpu_softintr_p());
890
891 pic_disestablish_source(is);
892 }
893