octeon_intr.c revision 1.1 1 /* $NetBSD: octeon_intr.c,v 1.1 2015/04/29 08:32:00 hikaru Exp $ */
2 /*
3 * Copyright 2001, 2002 Wasabi Systems, Inc.
4 * All rights reserved.
5 *
6 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed for the NetBSD Project by
19 * Wasabi Systems, Inc.
20 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
21 * or promote products derived from this software without specific prior
22 * written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * Platform-specific interrupt support for the MIPS Malta.
39 */
40
41 #include "opt_octeon.h"
42 #define __INTR_PRIVATE
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.1 2015/04/29 08:32:00 hikaru Exp $");
46
47 #include <sys/param.h>
48 #include <sys/cpu.h>
49 #include <sys/systm.h>
50 #include <sys/device.h>
51 #include <sys/intr.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54
55 #include <lib/libkern/libkern.h>
56
57 #include <mips/locore.h>
58
59 #include <mips/cavium/dev/octeon_ciureg.h>
60 #include <mips/cavium/octeonvar.h>
61
62 /*
63 * This is a mask of bits to clear in the SR when we go to a
64 * given hardware interrupt priority level.
65 */
66 static const struct ipl_sr_map octeon_ipl_sr_map = {
67 .sr_bits = {
68 [IPL_NONE] = 0,
69 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
70 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK,
71 [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
72 [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
73 | MIPS_INT_MASK_5,
74 [IPL_DDB] = MIPS_INT_MASK,
75 [IPL_HIGH] = MIPS_INT_MASK,
76 },
77 };
78
79 #define NIRQS 64
80
81 const char *octeon_intrnames[NIRQS] = {
82 "workq 0",
83 "workq 1",
84 "workq 2",
85 "workq 3",
86 "workq 4",
87 "workq 5",
88 "workq 6",
89 "workq 7",
90 "workq 8",
91 "workq 9",
92 "workq 10",
93 "workq 11",
94 "workq 12",
95 "workq 13",
96 "workq 14",
97 "workq 15",
98 "gpio 0",
99 "gpio 1",
100 "gpio 2",
101 "gpio 3",
102 "gpio 4",
103 "gpio 5",
104 "gpio 6",
105 "gpio 7",
106 "gpio 8",
107 "gpio 9",
108 "gpio 10",
109 "gpio 11",
110 "gpio 12",
111 "gpio 13",
112 "gpio 14",
113 "gpio 15",
114 "mbox 0-15",
115 "mbox 16-31",
116 "uart 0",
117 "uart 1",
118 "pci inta",
119 "pci intb",
120 "pci intc",
121 "pci intd",
122 "pci msi 0-15",
123 "pci msi 16-31",
124 "pci msi 32-47",
125 "pci msi 48-63",
126 "wdog summary",
127 "twsi",
128 "rml",
129 "trace",
130 "gmx drop",
131 "reserved",
132 "ipd drop",
133 "reserved",
134 "timer 0",
135 "timer 1",
136 "timer 2",
137 "timer 3",
138 "usb",
139 "pcm/tdm",
140 "mpi/spi",
141 "reserved",
142 "reserved",
143 "reserved",
144 "reserved",
145 "reserved",
146 };
147
148 struct octeon_intrhand {
149 LIST_ENTRY(octeon_intrhand) ih_q;
150 int (*ih_func)(void *);
151 void *ih_arg;
152 int ih_irq;
153 int ih_ipl;
154 int ih_intr;
155 };
156
157 struct octeon_intrhead {
158 LIST_HEAD(, octeon_intrhand) intr_list;
159 int intr_refcnt;
160 };
161
162 struct octeon_intrhead octeon_ciu_intrtab[NIRQS];
163
164 struct octeon_cpuintr {
165 LIST_HEAD(, octeon_intrhand) cintr_list;
166 struct evcnt cintr_count;
167 };
168
169 #define NINTRS 5 /* MIPS INT0 - INT4 */
170
171 struct octeon_cpuintr octeon_cpuintrs[NINTRS];
172 const char *octeon_cpuintrnames[NINTRS] = {
173 "int 0 (IP2)",
174 "int 1 (IP3)",
175 "int 2 ",
176 "int 3 ",
177 "int 4 ",
178 };
179
180
181 /* temporary interrupt enable bits */
182 uint64_t int0_enable0;
183 uint64_t int1_enable0;
184
185
186 #if 0
187 /*
188 * NOTE: This routine must be called with interrupts disabled in the CPSR.
189 */
190 static void
191 octeon_intr_calculate_masks(void)
192 {
193 struct octeon_intrhand *ih;
194 int level, ipl, irq;
195
196 /* First, figure out which IPLs each INT has. */
197 for (level = 0; level < NINTRS; level++) {
198 int levels = 0;
199
200 for (irq = 0; irq < NIRQS; irq++)
201 LIST_FOREACH(ih, &octeon_intrtab[irq].intr_list, ih_q)
202 if (ih->ih_intr == level)
203 levels |= 1 << ih->ih_ipl;
204 octeon_cpuintrs[level].cintr_levels = levels;
205 }
206
207 /* Next, figure out which INTs are used by each IPL. */
208 for (ipl = 0; ipl < _IPL_N; ipl++) {
209 int irqs = 0;
210
211 for (level = 0; level < NINTRS; level++)
212 if (1 << ipl & octeon_cpuintrs[level].cintr_levels)
213 irqs |= MIPS_INT_MASK_0 << level;
214 ipl_sr_bits[ipl] = irqs;
215 }
216
217 /*
218 * IPL_CLOCK should mask clock interrupt even if interrupt handler
219 * is not registered.
220 */
221 ipl_sr_bits[IPL_CLOCK] |= MIPS_INT_MASK_5;
222
223 /*
224 * IPL_NONE is used for hardware interrupts that are never blocked,
225 * and do not block anything else.
226 */
227 ipl_sr_bits[IPL_NONE] = 0;
228
229 /*
230 * Initialize the soft interrupt masks to block themselves.
231 */
232 ipl_sr_bits[IPL_SOFTCLOCK] |= SI_TO_SRBIT(SI_SOFTCLOCK);
233 ipl_sr_bits[IPL_SOFTNET] |= SI_TO_SRBIT(SI_SOFTNET);
234 ipl_sr_bits[IPL_SOFTSERIAL] |= SI_TO_SRBIT(SI_SOFTSERIAL);
235
236 /*
237 * Enforce a heirarchy that gives "slow" device (or devices with
238 * limited input buffer space/"real-time" requirements) a better
239 * chance at not dropping data.
240 */
241 for (ipl = 1; ipl < _IPL_N; ipl++)
242 ipl_sr_bits[ipl] |= ipl_sr_bits[ipl - 1];
243
244 /*
245 * splhigh() must block "everything".
246 */
247 ipl_sr_bits[IPL_HIGH] = MIPS_INT_MASK;
248
249 /*
250 * Now compute which INTs must be blocked when servicing any
251 * given INT.
252 */
253 for (level = 0; level < NINTRS; level++) {
254 int irqs = (MIPS_INT_MASK_0 << level);
255
256 for (irq = 0; irq < NIRQS; irq++)
257 LIST_FOREACH(ih, &octeon_intrtab[irq].intr_list, ih_q)
258 if (ih->ih_intr == level)
259 irqs |= ipl_sr_bits[ih->ih_ipl];
260 octeon_cpuintrs[level].cintr_mask = irqs;
261 }
262
263 // for (ipl = 0; ipl < _IPL_N; ipl++)
264 // printf("debug: ipl_sr_bits[%.2d] = %.8x\n", ipl, ipl_sr_bits[ipl]);
265 }
266 #endif
267
268 void
269 octeon_intr_init(void)
270 {
271 ipl_sr_map = octeon_ipl_sr_map;
272
273 octeon_write_csr(CIU_INT0_EN0, 0);
274 octeon_write_csr(CIU_INT1_EN0, 0);
275 octeon_write_csr(CIU_INT32_EN0, 0);
276 octeon_write_csr(CIU_INT0_EN1, 0);
277 octeon_write_csr(CIU_INT1_EN1, 0);
278 octeon_write_csr(CIU_INT32_EN1, 0);
279
280 for (size_t i = 0; i < NINTRS; i++) {
281 LIST_INIT(&octeon_cpuintrs[i].cintr_list);
282 evcnt_attach_dynamic(&octeon_cpuintrs[i].cintr_count,
283 EVCNT_TYPE_INTR, NULL, "mips", octeon_cpuintrnames[i]);
284 }
285
286 for (size_t i = 0; i < NIRQS; i++) {
287 LIST_INIT(&octeon_ciu_intrtab[i].intr_list);
288 octeon_ciu_intrtab[i].intr_refcnt = 0;
289 }
290 }
291
292 void
293 octeon_cal_timer(int corefreq)
294 {
295 /* Compute the number of cycles per second. */
296 curcpu()->ci_cpu_freq = corefreq;
297
298 /* Compute the number of ticks for hz. */
299 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
300
301 /* Compute the delay divisor and reciprical. */
302 curcpu()->ci_divisor_delay =
303 ((curcpu()->ci_cpu_freq + 500000) / 1000000);
304 #if 0
305 MIPS_SET_CI_RECIPRICAL(curcpu());
306 #endif
307
308 mips3_cp0_count_write(0);
309 mips3_cp0_compare_write(0);
310 }
311
312 void *
313 octeon_intr_establish(int irq, int req, int level,
314 int (*func)(void *), void *arg)
315 {
316 struct octeon_intrhand *ih;
317 u_int64_t irq_mask;
318 int s;
319
320 if (irq >= NIRQS)
321 panic("octeon_intr_establish: bogus IRQ %d", irq);
322 if (req > 1)
323 panic("octeon_intr_establish: bogus request %d", req);
324
325 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
326 if (ih == NULL)
327 return (NULL);
328
329 ih->ih_func = func;
330 ih->ih_arg = arg;
331 ih->ih_irq = irq;
332 ih->ih_ipl = level;
333 ih->ih_intr = req;
334
335 s = splhigh();
336
337 /*
338 * First, link it into the tables.
339 * XXX do we want a separate list (really, should only be one item, not
340 * a list anyway) per irq, not per CPU interrupt?
341 */
342 LIST_INSERT_HEAD(&octeon_ciu_intrtab[irq].intr_list, ih, ih_q);
343
344 /*
345 * Now enable it.
346 */
347 if (octeon_ciu_intrtab[irq].intr_refcnt++ == 0) {
348 irq_mask = 1ULL << irq;
349
350 switch (req) {
351 case 0:
352 int0_enable0 |= irq_mask;
353 octeon_write_csr(CIU_INT0_EN0, int0_enable0);
354 break;
355
356 case 1:
357 int1_enable0 |= irq_mask;
358 octeon_write_csr(CIU_INT1_EN0, int1_enable0);
359 break;
360 }
361 }
362 splx(s);
363
364 return (ih);
365 }
366
367 void
368 octeon_intr_disestablish(void *cookie)
369 {
370 struct octeon_intrhand *ih = cookie;
371 u_int64_t irq_mask;
372 int irq, req, s;
373
374 irq = ih->ih_irq;
375 req = ih->ih_intr;
376
377 s = splhigh();
378
379 /*
380 * First, remove it from the table.
381 */
382 LIST_REMOVE(ih, ih_q);
383
384 /*
385 * Now, disable it, if there is nothing remaining on the
386 * list.
387 */
388 if (octeon_ciu_intrtab[irq].intr_refcnt-- == 1) {
389 irq &= 63; /* throw away high bit if set */
390 req &= 1; /* throw away high bit if set */
391 irq_mask = ~(1ULL << irq);
392
393 switch (req) {
394 case 0:
395 int0_enable0 &= irq_mask;
396 octeon_write_csr(CIU_INT0_EN0, int0_enable0);
397 break;
398
399 case 1:
400 int1_enable0 &= irq_mask;
401 octeon_write_csr(CIU_INT1_EN0, int1_enable0);
402 break;
403 }
404 }
405 free(ih, M_DEVBUF);
406
407 splx(s);
408 }
409
410 void
411 octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
412 {
413 struct octeon_intrhand *ih;
414 int level, irq;
415 uint64_t hwpend = 0;
416
417 asm (".set mips64; clz %0,%1; .set mips0" : "=r"(level) : "r"(ipending));
418 switch (level = 21 - level) {
419 case 0: // MIPS_INT_MASK_0
420 hwpend = octeon_read_csr(CIU_INT0_SUM0) & int0_enable0;
421 break;
422
423 case 1: // MIPS_INT_MASK_1
424 hwpend = octeon_read_csr(CIU_INT1_SUM0) & int1_enable0;
425 break;
426
427 default:
428 panic("octeon_iointr: illegal interrupt");
429 }
430 if ((irq = ffs64(hwpend) - 1) < 0)
431 return;
432 LIST_FOREACH(ih, &octeon_ciu_intrtab[irq].intr_list, ih_q) {
433 (*ih->ih_func)(ih->ih_arg);
434 }
435 }
436