octeon_intr.c revision 1.2 1 1.2 matt /* $NetBSD: octeon_intr.c,v 1.2 2015/05/19 05:51:16 matt Exp $ */
2 1.1 hikaru /*
3 1.1 hikaru * Copyright 2001, 2002 Wasabi Systems, Inc.
4 1.1 hikaru * All rights reserved.
5 1.1 hikaru *
6 1.1 hikaru * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
7 1.1 hikaru *
8 1.1 hikaru * Redistribution and use in source and binary forms, with or without
9 1.1 hikaru * modification, are permitted provided that the following conditions
10 1.1 hikaru * are met:
11 1.1 hikaru * 1. Redistributions of source code must retain the above copyright
12 1.1 hikaru * notice, this list of conditions and the following disclaimer.
13 1.1 hikaru * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 hikaru * notice, this list of conditions and the following disclaimer in the
15 1.1 hikaru * documentation and/or other materials provided with the distribution.
16 1.1 hikaru * 3. All advertising materials mentioning features or use of this software
17 1.1 hikaru * must display the following acknowledgement:
18 1.1 hikaru * This product includes software developed for the NetBSD Project by
19 1.1 hikaru * Wasabi Systems, Inc.
20 1.1 hikaru * 4. The name of Wasabi Systems, Inc. may not be used to endorse
21 1.1 hikaru * or promote products derived from this software without specific prior
22 1.1 hikaru * written permission.
23 1.1 hikaru *
24 1.1 hikaru * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
25 1.1 hikaru * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 1.1 hikaru * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 1.1 hikaru * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 1.1 hikaru * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 1.1 hikaru * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 1.1 hikaru * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 1.1 hikaru * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 1.1 hikaru * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 1.1 hikaru * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 1.1 hikaru * POSSIBILITY OF SUCH DAMAGE.
35 1.1 hikaru */
36 1.1 hikaru
37 1.1 hikaru /*
38 1.1 hikaru * Platform-specific interrupt support for the MIPS Malta.
39 1.1 hikaru */
40 1.1 hikaru
41 1.1 hikaru #include "opt_octeon.h"
42 1.1 hikaru #define __INTR_PRIVATE
43 1.1 hikaru
44 1.1 hikaru #include <sys/cdefs.h>
45 1.2 matt __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.2 2015/05/19 05:51:16 matt Exp $");
46 1.1 hikaru
47 1.1 hikaru #include <sys/param.h>
48 1.1 hikaru #include <sys/cpu.h>
49 1.1 hikaru #include <sys/systm.h>
50 1.1 hikaru #include <sys/device.h>
51 1.1 hikaru #include <sys/intr.h>
52 1.1 hikaru #include <sys/kernel.h>
53 1.1 hikaru #include <sys/malloc.h>
54 1.1 hikaru
55 1.1 hikaru #include <lib/libkern/libkern.h>
56 1.1 hikaru
57 1.1 hikaru #include <mips/locore.h>
58 1.1 hikaru
59 1.1 hikaru #include <mips/cavium/dev/octeon_ciureg.h>
60 1.1 hikaru #include <mips/cavium/octeonvar.h>
61 1.1 hikaru
62 1.1 hikaru /*
63 1.1 hikaru * This is a mask of bits to clear in the SR when we go to a
64 1.1 hikaru * given hardware interrupt priority level.
65 1.1 hikaru */
66 1.1 hikaru static const struct ipl_sr_map octeon_ipl_sr_map = {
67 1.1 hikaru .sr_bits = {
68 1.1 hikaru [IPL_NONE] = 0,
69 1.1 hikaru [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
70 1.1 hikaru [IPL_SOFTNET] = MIPS_SOFT_INT_MASK,
71 1.1 hikaru [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
72 1.1 hikaru [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
73 1.1 hikaru | MIPS_INT_MASK_5,
74 1.1 hikaru [IPL_DDB] = MIPS_INT_MASK,
75 1.1 hikaru [IPL_HIGH] = MIPS_INT_MASK,
76 1.1 hikaru },
77 1.1 hikaru };
78 1.1 hikaru
79 1.1 hikaru #define NIRQS 64
80 1.1 hikaru
81 1.2 matt const char * const octeon_intrnames[NIRQS] = {
82 1.1 hikaru "workq 0",
83 1.1 hikaru "workq 1",
84 1.1 hikaru "workq 2",
85 1.1 hikaru "workq 3",
86 1.1 hikaru "workq 4",
87 1.1 hikaru "workq 5",
88 1.1 hikaru "workq 6",
89 1.1 hikaru "workq 7",
90 1.1 hikaru "workq 8",
91 1.1 hikaru "workq 9",
92 1.1 hikaru "workq 10",
93 1.1 hikaru "workq 11",
94 1.1 hikaru "workq 12",
95 1.1 hikaru "workq 13",
96 1.1 hikaru "workq 14",
97 1.1 hikaru "workq 15",
98 1.1 hikaru "gpio 0",
99 1.1 hikaru "gpio 1",
100 1.1 hikaru "gpio 2",
101 1.1 hikaru "gpio 3",
102 1.1 hikaru "gpio 4",
103 1.1 hikaru "gpio 5",
104 1.1 hikaru "gpio 6",
105 1.1 hikaru "gpio 7",
106 1.1 hikaru "gpio 8",
107 1.1 hikaru "gpio 9",
108 1.1 hikaru "gpio 10",
109 1.1 hikaru "gpio 11",
110 1.1 hikaru "gpio 12",
111 1.1 hikaru "gpio 13",
112 1.1 hikaru "gpio 14",
113 1.1 hikaru "gpio 15",
114 1.1 hikaru "mbox 0-15",
115 1.1 hikaru "mbox 16-31",
116 1.1 hikaru "uart 0",
117 1.1 hikaru "uart 1",
118 1.1 hikaru "pci inta",
119 1.1 hikaru "pci intb",
120 1.1 hikaru "pci intc",
121 1.1 hikaru "pci intd",
122 1.1 hikaru "pci msi 0-15",
123 1.1 hikaru "pci msi 16-31",
124 1.1 hikaru "pci msi 32-47",
125 1.1 hikaru "pci msi 48-63",
126 1.1 hikaru "wdog summary",
127 1.1 hikaru "twsi",
128 1.1 hikaru "rml",
129 1.1 hikaru "trace",
130 1.1 hikaru "gmx drop",
131 1.1 hikaru "reserved",
132 1.1 hikaru "ipd drop",
133 1.1 hikaru "reserved",
134 1.1 hikaru "timer 0",
135 1.1 hikaru "timer 1",
136 1.1 hikaru "timer 2",
137 1.1 hikaru "timer 3",
138 1.1 hikaru "usb",
139 1.1 hikaru "pcm/tdm",
140 1.1 hikaru "mpi/spi",
141 1.1 hikaru "reserved",
142 1.1 hikaru "reserved",
143 1.1 hikaru "reserved",
144 1.1 hikaru "reserved",
145 1.1 hikaru "reserved",
146 1.1 hikaru };
147 1.1 hikaru
148 1.1 hikaru struct octeon_intrhand {
149 1.1 hikaru LIST_ENTRY(octeon_intrhand) ih_q;
150 1.1 hikaru int (*ih_func)(void *);
151 1.1 hikaru void *ih_arg;
152 1.1 hikaru int ih_irq;
153 1.1 hikaru int ih_ipl;
154 1.1 hikaru int ih_intr;
155 1.1 hikaru };
156 1.1 hikaru
157 1.1 hikaru struct octeon_intrhead {
158 1.1 hikaru LIST_HEAD(, octeon_intrhand) intr_list;
159 1.2 matt struct evcnt intr_count;
160 1.1 hikaru int intr_refcnt;
161 1.1 hikaru };
162 1.1 hikaru
163 1.1 hikaru struct octeon_intrhead octeon_ciu_intrtab[NIRQS];
164 1.1 hikaru
165 1.1 hikaru struct octeon_cpuintr {
166 1.1 hikaru LIST_HEAD(, octeon_intrhand) cintr_list;
167 1.1 hikaru struct evcnt cintr_count;
168 1.1 hikaru };
169 1.1 hikaru
170 1.1 hikaru #define NINTRS 5 /* MIPS INT0 - INT4 */
171 1.1 hikaru
172 1.1 hikaru struct octeon_cpuintr octeon_cpuintrs[NINTRS];
173 1.2 matt const char * const octeon_cpuintrnames[NINTRS] = {
174 1.1 hikaru "int 0 (IP2)",
175 1.1 hikaru "int 1 (IP3)",
176 1.1 hikaru "int 2 ",
177 1.1 hikaru "int 3 ",
178 1.1 hikaru "int 4 ",
179 1.1 hikaru };
180 1.1 hikaru
181 1.1 hikaru
182 1.1 hikaru /* temporary interrupt enable bits */
183 1.1 hikaru uint64_t int0_enable0;
184 1.1 hikaru uint64_t int1_enable0;
185 1.1 hikaru
186 1.1 hikaru
187 1.1 hikaru #if 0
188 1.1 hikaru /*
189 1.1 hikaru * NOTE: This routine must be called with interrupts disabled in the CPSR.
190 1.1 hikaru */
191 1.1 hikaru static void
192 1.1 hikaru octeon_intr_calculate_masks(void)
193 1.1 hikaru {
194 1.1 hikaru struct octeon_intrhand *ih;
195 1.1 hikaru int level, ipl, irq;
196 1.1 hikaru
197 1.1 hikaru /* First, figure out which IPLs each INT has. */
198 1.1 hikaru for (level = 0; level < NINTRS; level++) {
199 1.1 hikaru int levels = 0;
200 1.1 hikaru
201 1.1 hikaru for (irq = 0; irq < NIRQS; irq++)
202 1.1 hikaru LIST_FOREACH(ih, &octeon_intrtab[irq].intr_list, ih_q)
203 1.1 hikaru if (ih->ih_intr == level)
204 1.1 hikaru levels |= 1 << ih->ih_ipl;
205 1.1 hikaru octeon_cpuintrs[level].cintr_levels = levels;
206 1.1 hikaru }
207 1.1 hikaru
208 1.1 hikaru /* Next, figure out which INTs are used by each IPL. */
209 1.1 hikaru for (ipl = 0; ipl < _IPL_N; ipl++) {
210 1.1 hikaru int irqs = 0;
211 1.1 hikaru
212 1.1 hikaru for (level = 0; level < NINTRS; level++)
213 1.1 hikaru if (1 << ipl & octeon_cpuintrs[level].cintr_levels)
214 1.1 hikaru irqs |= MIPS_INT_MASK_0 << level;
215 1.1 hikaru ipl_sr_bits[ipl] = irqs;
216 1.1 hikaru }
217 1.1 hikaru
218 1.1 hikaru /*
219 1.1 hikaru * IPL_CLOCK should mask clock interrupt even if interrupt handler
220 1.1 hikaru * is not registered.
221 1.1 hikaru */
222 1.1 hikaru ipl_sr_bits[IPL_CLOCK] |= MIPS_INT_MASK_5;
223 1.1 hikaru
224 1.1 hikaru /*
225 1.1 hikaru * IPL_NONE is used for hardware interrupts that are never blocked,
226 1.1 hikaru * and do not block anything else.
227 1.1 hikaru */
228 1.1 hikaru ipl_sr_bits[IPL_NONE] = 0;
229 1.1 hikaru
230 1.1 hikaru /*
231 1.1 hikaru * Initialize the soft interrupt masks to block themselves.
232 1.1 hikaru */
233 1.1 hikaru ipl_sr_bits[IPL_SOFTCLOCK] |= SI_TO_SRBIT(SI_SOFTCLOCK);
234 1.1 hikaru ipl_sr_bits[IPL_SOFTNET] |= SI_TO_SRBIT(SI_SOFTNET);
235 1.1 hikaru ipl_sr_bits[IPL_SOFTSERIAL] |= SI_TO_SRBIT(SI_SOFTSERIAL);
236 1.1 hikaru
237 1.1 hikaru /*
238 1.1 hikaru * Enforce a heirarchy that gives "slow" device (or devices with
239 1.1 hikaru * limited input buffer space/"real-time" requirements) a better
240 1.1 hikaru * chance at not dropping data.
241 1.1 hikaru */
242 1.1 hikaru for (ipl = 1; ipl < _IPL_N; ipl++)
243 1.1 hikaru ipl_sr_bits[ipl] |= ipl_sr_bits[ipl - 1];
244 1.1 hikaru
245 1.1 hikaru /*
246 1.1 hikaru * splhigh() must block "everything".
247 1.1 hikaru */
248 1.1 hikaru ipl_sr_bits[IPL_HIGH] = MIPS_INT_MASK;
249 1.1 hikaru
250 1.1 hikaru /*
251 1.1 hikaru * Now compute which INTs must be blocked when servicing any
252 1.1 hikaru * given INT.
253 1.1 hikaru */
254 1.1 hikaru for (level = 0; level < NINTRS; level++) {
255 1.1 hikaru int irqs = (MIPS_INT_MASK_0 << level);
256 1.1 hikaru
257 1.1 hikaru for (irq = 0; irq < NIRQS; irq++)
258 1.1 hikaru LIST_FOREACH(ih, &octeon_intrtab[irq].intr_list, ih_q)
259 1.1 hikaru if (ih->ih_intr == level)
260 1.1 hikaru irqs |= ipl_sr_bits[ih->ih_ipl];
261 1.1 hikaru octeon_cpuintrs[level].cintr_mask = irqs;
262 1.1 hikaru }
263 1.1 hikaru
264 1.1 hikaru // for (ipl = 0; ipl < _IPL_N; ipl++)
265 1.1 hikaru // printf("debug: ipl_sr_bits[%.2d] = %.8x\n", ipl, ipl_sr_bits[ipl]);
266 1.1 hikaru }
267 1.1 hikaru #endif
268 1.1 hikaru
269 1.1 hikaru void
270 1.1 hikaru octeon_intr_init(void)
271 1.1 hikaru {
272 1.1 hikaru ipl_sr_map = octeon_ipl_sr_map;
273 1.1 hikaru
274 1.1 hikaru octeon_write_csr(CIU_INT0_EN0, 0);
275 1.1 hikaru octeon_write_csr(CIU_INT1_EN0, 0);
276 1.1 hikaru octeon_write_csr(CIU_INT32_EN0, 0);
277 1.1 hikaru octeon_write_csr(CIU_INT0_EN1, 0);
278 1.1 hikaru octeon_write_csr(CIU_INT1_EN1, 0);
279 1.1 hikaru octeon_write_csr(CIU_INT32_EN1, 0);
280 1.1 hikaru
281 1.1 hikaru for (size_t i = 0; i < NINTRS; i++) {
282 1.1 hikaru LIST_INIT(&octeon_cpuintrs[i].cintr_list);
283 1.1 hikaru evcnt_attach_dynamic(&octeon_cpuintrs[i].cintr_count,
284 1.1 hikaru EVCNT_TYPE_INTR, NULL, "mips", octeon_cpuintrnames[i]);
285 1.1 hikaru }
286 1.1 hikaru
287 1.1 hikaru for (size_t i = 0; i < NIRQS; i++) {
288 1.1 hikaru LIST_INIT(&octeon_ciu_intrtab[i].intr_list);
289 1.1 hikaru octeon_ciu_intrtab[i].intr_refcnt = 0;
290 1.2 matt evcnt_attach_dynamic(&octeon_ciu_intrtab[i].intr_count,
291 1.2 matt EVCNT_TYPE_INTR, NULL, "octeon", octeon_intrnames[i]);
292 1.1 hikaru }
293 1.1 hikaru }
294 1.1 hikaru
295 1.1 hikaru void
296 1.1 hikaru octeon_cal_timer(int corefreq)
297 1.1 hikaru {
298 1.1 hikaru /* Compute the number of cycles per second. */
299 1.1 hikaru curcpu()->ci_cpu_freq = corefreq;
300 1.1 hikaru
301 1.1 hikaru /* Compute the number of ticks for hz. */
302 1.1 hikaru curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
303 1.1 hikaru
304 1.1 hikaru /* Compute the delay divisor and reciprical. */
305 1.1 hikaru curcpu()->ci_divisor_delay =
306 1.1 hikaru ((curcpu()->ci_cpu_freq + 500000) / 1000000);
307 1.1 hikaru #if 0
308 1.1 hikaru MIPS_SET_CI_RECIPRICAL(curcpu());
309 1.1 hikaru #endif
310 1.1 hikaru
311 1.1 hikaru mips3_cp0_count_write(0);
312 1.1 hikaru mips3_cp0_compare_write(0);
313 1.1 hikaru }
314 1.1 hikaru
315 1.1 hikaru void *
316 1.1 hikaru octeon_intr_establish(int irq, int req, int level,
317 1.1 hikaru int (*func)(void *), void *arg)
318 1.1 hikaru {
319 1.1 hikaru struct octeon_intrhand *ih;
320 1.1 hikaru u_int64_t irq_mask;
321 1.1 hikaru int s;
322 1.1 hikaru
323 1.1 hikaru if (irq >= NIRQS)
324 1.1 hikaru panic("octeon_intr_establish: bogus IRQ %d", irq);
325 1.1 hikaru if (req > 1)
326 1.1 hikaru panic("octeon_intr_establish: bogus request %d", req);
327 1.1 hikaru
328 1.1 hikaru ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
329 1.1 hikaru if (ih == NULL)
330 1.1 hikaru return (NULL);
331 1.1 hikaru
332 1.1 hikaru ih->ih_func = func;
333 1.1 hikaru ih->ih_arg = arg;
334 1.1 hikaru ih->ih_irq = irq;
335 1.1 hikaru ih->ih_ipl = level;
336 1.1 hikaru ih->ih_intr = req;
337 1.1 hikaru
338 1.1 hikaru s = splhigh();
339 1.1 hikaru
340 1.1 hikaru /*
341 1.1 hikaru * First, link it into the tables.
342 1.1 hikaru * XXX do we want a separate list (really, should only be one item, not
343 1.1 hikaru * a list anyway) per irq, not per CPU interrupt?
344 1.1 hikaru */
345 1.1 hikaru LIST_INSERT_HEAD(&octeon_ciu_intrtab[irq].intr_list, ih, ih_q);
346 1.1 hikaru
347 1.1 hikaru /*
348 1.1 hikaru * Now enable it.
349 1.1 hikaru */
350 1.1 hikaru if (octeon_ciu_intrtab[irq].intr_refcnt++ == 0) {
351 1.1 hikaru irq_mask = 1ULL << irq;
352 1.1 hikaru
353 1.1 hikaru switch (req) {
354 1.1 hikaru case 0:
355 1.1 hikaru int0_enable0 |= irq_mask;
356 1.1 hikaru octeon_write_csr(CIU_INT0_EN0, int0_enable0);
357 1.1 hikaru break;
358 1.1 hikaru
359 1.1 hikaru case 1:
360 1.1 hikaru int1_enable0 |= irq_mask;
361 1.1 hikaru octeon_write_csr(CIU_INT1_EN0, int1_enable0);
362 1.1 hikaru break;
363 1.1 hikaru }
364 1.1 hikaru }
365 1.1 hikaru splx(s);
366 1.1 hikaru
367 1.1 hikaru return (ih);
368 1.1 hikaru }
369 1.1 hikaru
370 1.1 hikaru void
371 1.1 hikaru octeon_intr_disestablish(void *cookie)
372 1.1 hikaru {
373 1.1 hikaru struct octeon_intrhand *ih = cookie;
374 1.1 hikaru u_int64_t irq_mask;
375 1.1 hikaru int irq, req, s;
376 1.1 hikaru
377 1.1 hikaru irq = ih->ih_irq;
378 1.1 hikaru req = ih->ih_intr;
379 1.1 hikaru
380 1.1 hikaru s = splhigh();
381 1.1 hikaru
382 1.1 hikaru /*
383 1.1 hikaru * First, remove it from the table.
384 1.1 hikaru */
385 1.1 hikaru LIST_REMOVE(ih, ih_q);
386 1.1 hikaru
387 1.1 hikaru /*
388 1.1 hikaru * Now, disable it, if there is nothing remaining on the
389 1.1 hikaru * list.
390 1.1 hikaru */
391 1.1 hikaru if (octeon_ciu_intrtab[irq].intr_refcnt-- == 1) {
392 1.1 hikaru irq &= 63; /* throw away high bit if set */
393 1.1 hikaru req &= 1; /* throw away high bit if set */
394 1.1 hikaru irq_mask = ~(1ULL << irq);
395 1.1 hikaru
396 1.1 hikaru switch (req) {
397 1.1 hikaru case 0:
398 1.1 hikaru int0_enable0 &= irq_mask;
399 1.1 hikaru octeon_write_csr(CIU_INT0_EN0, int0_enable0);
400 1.1 hikaru break;
401 1.1 hikaru
402 1.1 hikaru case 1:
403 1.1 hikaru int1_enable0 &= irq_mask;
404 1.1 hikaru octeon_write_csr(CIU_INT1_EN0, int1_enable0);
405 1.1 hikaru break;
406 1.1 hikaru }
407 1.1 hikaru }
408 1.1 hikaru free(ih, M_DEVBUF);
409 1.1 hikaru
410 1.1 hikaru splx(s);
411 1.1 hikaru }
412 1.1 hikaru
413 1.1 hikaru void
414 1.1 hikaru octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
415 1.1 hikaru {
416 1.1 hikaru struct octeon_intrhand *ih;
417 1.1 hikaru int level, irq;
418 1.1 hikaru uint64_t hwpend = 0;
419 1.1 hikaru
420 1.2 matt level = __builtin_clz(ipending);
421 1.1 hikaru switch (level = 21 - level) {
422 1.1 hikaru case 0: // MIPS_INT_MASK_0
423 1.1 hikaru hwpend = octeon_read_csr(CIU_INT0_SUM0) & int0_enable0;
424 1.1 hikaru break;
425 1.1 hikaru
426 1.1 hikaru case 1: // MIPS_INT_MASK_1
427 1.1 hikaru hwpend = octeon_read_csr(CIU_INT1_SUM0) & int1_enable0;
428 1.1 hikaru break;
429 1.1 hikaru
430 1.1 hikaru default:
431 1.1 hikaru panic("octeon_iointr: illegal interrupt");
432 1.1 hikaru }
433 1.1 hikaru if ((irq = ffs64(hwpend) - 1) < 0)
434 1.1 hikaru return;
435 1.2 matt octeon_ciu_intrtab[irq].intr_count.ev_count++;
436 1.1 hikaru LIST_FOREACH(ih, &octeon_ciu_intrtab[irq].intr_list, ih_q) {
437 1.1 hikaru (*ih->ih_func)(ih->ih_arg);
438 1.1 hikaru }
439 1.1 hikaru }
440