ar_intr.c revision 1.7 1 1.7 thorpej /* $NetBSD: ar_intr.c,v 1.7 2021/01/04 17:42:29 thorpej Exp $ */
2 1.1 matt /*
3 1.1 matt * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
4 1.1 matt * Copyright (c) 2006 Garrett D'Amore.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code was written by Garrett D'Amore for the Champaign-Urbana
8 1.1 matt * Community Wireless Network Project.
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or
11 1.1 matt * without modification, are permitted provided that the following
12 1.1 matt * conditions are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above
16 1.1 matt * copyright notice, this list of conditions and the following
17 1.1 matt * disclaimer in the documentation and/or other materials provided
18 1.1 matt * with the distribution.
19 1.1 matt * 3. All advertising materials mentioning features or use of this
20 1.1 matt * software must display the following acknowledgements:
21 1.1 matt * This product includes software developed by the Urbana-Champaign
22 1.1 matt * Independent Media Center.
23 1.1 matt * This product includes software developed by Garrett D'Amore.
24 1.1 matt * 4. Urbana-Champaign Independent Media Center's name and Garrett
25 1.1 matt * D'Amore's name may not be used to endorse or promote products
26 1.1 matt * derived from this software without specific prior written permission.
27 1.1 matt *
28 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
29 1.1 matt * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
30 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
31 1.1 matt * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 1.1 matt * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
33 1.1 matt * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
34 1.1 matt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
35 1.1 matt * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
36 1.1 matt * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
37 1.1 matt * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38 1.1 matt * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
40 1.1 matt * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 1.1 matt */
42 1.1 matt
43 1.1 matt #include <sys/cdefs.h>
44 1.7 thorpej __KERNEL_RCSID(0, "$NetBSD: ar_intr.c,v 1.7 2021/01/04 17:42:29 thorpej Exp $");
45 1.1 matt
46 1.1 matt #define __INTR_PRIVATE
47 1.1 matt
48 1.1 matt #include <sys/param.h>
49 1.3 matt #include <sys/intr.h>
50 1.4 matt #include <sys/cpu.h>
51 1.3 matt #include <sys/kernel.h>
52 1.7 thorpej #include <sys/kmem.h>
53 1.1 matt
54 1.4 matt #include <mips/cpuregs.h>
55 1.1 matt #include <mips/locore.h>
56 1.1 matt #include <mips/atheros/include/platform.h>
57 1.1 matt
58 1.1 matt #define REGVAL(x) *((volatile uint32_t *)(MIPS_PHYS_TO_KSEG1((x))))
59 1.1 matt
60 1.1 matt /*
61 1.1 matt * Only MISC interrupts are easily masked at the interrupt controller.
62 1.1 matt * The others have to be masked at the source.
63 1.1 matt */
64 1.1 matt
65 1.1 matt #define NINTRS 7 /* MIPS INT2-INT4 (7 is clock interrupt) */
66 1.1 matt #define NIRQS 32 /* bits in Miscellaneous Interrupt Status Register */
67 1.1 matt
68 1.1 matt struct atheros_intrhand {
69 1.1 matt LIST_ENTRY(atheros_intrhand) ih_q;
70 1.1 matt int (*ih_func)(void *);
71 1.1 matt void *ih_arg;
72 1.1 matt int ih_irq;
73 1.1 matt };
74 1.1 matt
75 1.1 matt struct atheros_intr {
76 1.1 matt LIST_HEAD(, atheros_intrhand) intr_qh;
77 1.1 matt struct evcnt intr_count;
78 1.1 matt };
79 1.1 matt
80 1.1 matt static struct atheros_intr cpu_intrs[NINTRS];
81 1.1 matt static struct atheros_intr misc_intrs[NIRQS];
82 1.1 matt
83 1.1 matt static uint32_t
84 1.1 matt misc_intstat_get(void)
85 1.1 matt {
86 1.1 matt return REGVAL(platformsw->apsw_misc_intstat);
87 1.1 matt }
88 1.1 matt
89 1.1 matt static void
90 1.1 matt misc_intstat_put(uint32_t v)
91 1.1 matt {
92 1.1 matt REGVAL(platformsw->apsw_misc_intstat) = v;
93 1.1 matt }
94 1.1 matt
95 1.1 matt static uint32_t
96 1.1 matt misc_intmask_get(void)
97 1.1 matt {
98 1.1 matt return REGVAL(platformsw->apsw_misc_intmask);
99 1.1 matt }
100 1.1 matt
101 1.1 matt static void
102 1.1 matt misc_intmask_put(uint32_t v)
103 1.1 matt {
104 1.1 matt REGVAL(platformsw->apsw_misc_intmask) = v;
105 1.1 matt }
106 1.1 matt
107 1.1 matt
108 1.1 matt static void *
109 1.1 matt genath_cpu_intr_establish(int intr, int (*func)(void *), void *arg)
110 1.1 matt {
111 1.1 matt struct atheros_intrhand *ih;
112 1.1 matt
113 1.7 thorpej ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
114 1.1 matt ih->ih_func = func;
115 1.1 matt ih->ih_arg = arg;
116 1.1 matt ih->ih_irq = intr;
117 1.1 matt
118 1.1 matt const int s = splhigh();
119 1.1 matt
120 1.1 matt LIST_INSERT_HEAD(&cpu_intrs[intr].intr_qh, ih, ih_q);
121 1.1 matt
122 1.1 matt /*
123 1.1 matt * The MIPS CPU interrupts are enabled at boot time, so they
124 1.1 matt * should pretty much always be ready to go.
125 1.1 matt */
126 1.1 matt
127 1.1 matt splx(s);
128 1.1 matt return (ih);
129 1.1 matt }
130 1.1 matt
131 1.1 matt static void
132 1.1 matt genath_cpu_intr_disestablish(void *arg)
133 1.1 matt {
134 1.1 matt struct atheros_intrhand * const ih = arg;
135 1.1 matt
136 1.1 matt const int s = splhigh();
137 1.1 matt
138 1.1 matt LIST_REMOVE(ih, ih_q);
139 1.1 matt
140 1.1 matt splx(s);
141 1.7 thorpej kmem_free(ih, sizeof(*ih));
142 1.1 matt }
143 1.1 matt
144 1.1 matt static void *
145 1.1 matt genath_misc_intr_establish(int irq, int (*func)(void *), void *arg)
146 1.1 matt {
147 1.1 matt struct atheros_intr * const intr = &misc_intrs[irq];
148 1.1 matt struct atheros_intrhand *ih;
149 1.1 matt bool first;
150 1.1 matt int s;
151 1.1 matt
152 1.1 matt
153 1.7 thorpej ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
154 1.1 matt ih->ih_func = func;
155 1.1 matt ih->ih_arg = arg;
156 1.1 matt ih->ih_irq = irq;
157 1.1 matt
158 1.1 matt s = splhigh();
159 1.1 matt
160 1.1 matt first = LIST_EMPTY(&intr->intr_qh);
161 1.1 matt
162 1.1 matt LIST_INSERT_HEAD(&intr->intr_qh, ih, ih_q);
163 1.1 matt
164 1.1 matt if (first) {
165 1.1 matt const uint32_t mask = misc_intmask_get() | __BIT(irq);
166 1.1 matt misc_intmask_put(mask);
167 1.1 matt (void) misc_intmask_get(); /* flush wbuffer */
168 1.1 matt }
169 1.1 matt
170 1.1 matt splx(s);
171 1.1 matt
172 1.1 matt return ih;
173 1.1 matt }
174 1.1 matt
175 1.1 matt static void
176 1.1 matt genath_misc_intr_disestablish(void *arg)
177 1.1 matt {
178 1.1 matt struct atheros_intrhand *ih = arg;
179 1.1 matt struct atheros_intr * const intr = &misc_intrs[ih->ih_irq];
180 1.1 matt
181 1.1 matt const int s = splhigh();
182 1.1 matt
183 1.1 matt LIST_REMOVE(ih, ih_q);
184 1.1 matt if (LIST_EMPTY(&intr->intr_qh)) {
185 1.1 matt const uint32_t mask = misc_intmask_get() & ~__BIT(ih->ih_irq);
186 1.1 matt misc_intmask_put(mask);
187 1.1 matt (void) misc_intmask_get(); /* flush wbuffer */
188 1.1 matt }
189 1.1 matt
190 1.1 matt splx(s);
191 1.7 thorpej kmem_free(ih, sizeof(*ih));
192 1.1 matt }
193 1.1 matt
194 1.1 matt
195 1.1 matt static int
196 1.1 matt genath_misc_intr(void *arg)
197 1.1 matt {
198 1.1 matt uint32_t isr;
199 1.1 matt uint32_t mask;
200 1.1 matt int rv = 0;
201 1.1 matt struct atheros_intr *intr = arg;
202 1.1 matt
203 1.1 matt isr = misc_intstat_get();
204 1.1 matt mask = misc_intmask_get();
205 1.1 matt
206 1.1 matt misc_intstat_put(isr & ~mask);
207 1.1 matt
208 1.1 matt isr &= mask;
209 1.1 matt while (isr != 0) {
210 1.1 matt struct atheros_intrhand *ih;
211 1.1 matt int index = 31 - __builtin_clz(isr & -isr); /* ffs */
212 1.1 matt intr += index;
213 1.1 matt
214 1.1 matt intr->intr_count.ev_count++;
215 1.1 matt LIST_FOREACH(ih, &intr->intr_qh, ih_q) {
216 1.1 matt rv |= (*ih->ih_func)(ih->ih_arg);
217 1.1 matt }
218 1.1 matt isr >>= index + 1;
219 1.1 matt intr++;
220 1.1 matt }
221 1.1 matt
222 1.1 matt return rv;
223 1.1 matt }
224 1.1 matt
225 1.1 matt static void
226 1.1 matt genath_iointr(int cpl, vaddr_t pc, uint32_t ipending)
227 1.1 matt {
228 1.1 matt struct atheros_intr *intr = &cpu_intrs[NINTRS-1];
229 1.1 matt
230 1.1 matt /* move ipending to the most significant bits */
231 1.1 matt ipending *= __BIT(31) / (MIPS_INT_MASK_0 << (NINTRS-1));
232 1.1 matt while (ipending != 0) {
233 1.1 matt struct atheros_intrhand *ih;
234 1.1 matt int index = __builtin_clz(ipending);
235 1.1 matt
236 1.1 matt intr -= index;
237 1.1 matt ipending <<= index;
238 1.1 matt KASSERT(ipending & __BIT(31));
239 1.1 matt KASSERT(intr >= cpu_intrs);
240 1.1 matt
241 1.1 matt intr->intr_count.ev_count++;
242 1.1 matt LIST_FOREACH(ih, &intr->intr_qh, ih_q) {
243 1.1 matt (*ih->ih_func)(ih->ih_arg);
244 1.1 matt }
245 1.1 matt ipending <<= 1;
246 1.1 matt intr--;
247 1.1 matt }
248 1.1 matt }
249 1.1 matt
250 1.1 matt static void
251 1.1 matt genath_intr_init(void)
252 1.1 matt {
253 1.1 matt const struct atheros_platformsw * const apsw = platformsw;
254 1.1 matt
255 1.1 matt KASSERT(apsw->apsw_ipl_sr_map != NULL);
256 1.1 matt ipl_sr_map = *apsw->apsw_ipl_sr_map;
257 1.1 matt
258 1.1 matt for (size_t i = 0; i < apsw->apsw_cpu_nintrs; i++) {
259 1.1 matt if (apsw->apsw_cpu_intrnames[i] != NULL) {
260 1.1 matt LIST_INIT(&cpu_intrs[i].intr_qh);
261 1.1 matt evcnt_attach_dynamic(&cpu_intrs[i].intr_count,
262 1.1 matt EVCNT_TYPE_INTR, NULL, "cpu",
263 1.1 matt apsw->apsw_cpu_intrnames[i]);
264 1.1 matt }
265 1.1 matt }
266 1.1 matt
267 1.1 matt for (size_t i = 0; i < apsw->apsw_misc_nintrs; i++) {
268 1.1 matt if (apsw->apsw_misc_intrnames[i] != NULL) {
269 1.1 matt LIST_INIT(&misc_intrs[i].intr_qh);
270 1.1 matt evcnt_attach_dynamic(&misc_intrs[i].intr_count,
271 1.1 matt EVCNT_TYPE_INTR, NULL, "misc",
272 1.1 matt apsw->apsw_misc_intrnames[i]);
273 1.1 matt }
274 1.1 matt }
275 1.1 matt
276 1.1 matt /* make sure we start without any misc interrupts enabled */
277 1.1 matt (void) misc_intstat_get();
278 1.1 matt misc_intmask_put(0);
279 1.1 matt misc_intstat_put(0);
280 1.1 matt
281 1.1 matt /* make sure we register the MISC interrupt handler */
282 1.1 matt genath_cpu_intr_establish(apsw->apsw_cpuirq_misc,
283 1.1 matt genath_misc_intr, misc_intrs);
284 1.1 matt }
285 1.1 matt
286 1.1 matt
287 1.1 matt const struct atheros_intrsw atheros_intrsw = {
288 1.1 matt .aisw_init = genath_intr_init,
289 1.1 matt .aisw_cpu_establish = genath_cpu_intr_establish,
290 1.1 matt .aisw_cpu_disestablish = genath_cpu_intr_disestablish,
291 1.1 matt .aisw_misc_establish = genath_misc_intr_establish,
292 1.1 matt .aisw_misc_disestablish = genath_misc_intr_disestablish,
293 1.1 matt .aisw_iointr = genath_iointr,
294 1.1 matt };
295