intr.c revision 1.2 1 /* $NetBSD: intr.c,v 1.2 2014/12/23 15:08:25 macallan Exp $ */
2
3 /*-
4 * Copyright (c) 2014 Michael Lorenz
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.2 2014/12/23 15:08:25 macallan Exp $");
31
32 #define __INTR_PRIVATE
33
34 #include <sys/param.h>
35 #include <sys/cpu.h>
36 #include <sys/device.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39 #include <sys/timetc.h>
40 #include <sys/bitops.h>
41
42 #include <mips/locore.h>
43 #include <machine/intr.h>
44
45 #include <mips/ingenic/ingenic_regs.h>
46
47 #include "opt_ingenic.h"
48
49 extern void ingenic_clockintr(uint32_t);
50 extern void ingenic_puts(const char *);
51
52 /*
53 * This is a mask of bits to clear in the SR when we go to a
54 * given hardware interrupt priority level.
55 */
56 static const struct ipl_sr_map ingenic_ipl_sr_map = {
57 .sr_bits = {
58 [IPL_NONE] = 0,
59 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
60 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1,
61 [IPL_VM] =
62 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1 |
63 MIPS_INT_MASK_0 |
64 MIPS_INT_MASK_3 |
65 MIPS_INT_MASK_4 |
66 MIPS_INT_MASK_5,
67 [IPL_SCHED] =
68 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1 |
69 MIPS_INT_MASK_0 |
70 MIPS_INT_MASK_1 |
71 MIPS_INT_MASK_2 |
72 MIPS_INT_MASK_3 |
73 MIPS_INT_MASK_4 |
74 MIPS_INT_MASK_5,
75 [IPL_DDB] = MIPS_INT_MASK,
76 [IPL_HIGH] = MIPS_INT_MASK,
77 },
78 };
79
80 #define NINTR 64
81
82 /* some timer channels share interrupts, couldn't find any others */
83 struct intrhand {
84 struct evcnt ih_count;
85 int (*ih_func)(void *);
86 void *ih_arg;
87 int ih_ipl;
88 };
89
90 struct intrhand intrs[NINTR];
91
92 void ingenic_irq(int);
93
94 void
95 evbmips_intr_init(void)
96 {
97 uint32_t reg;
98 int i;
99 char irqstr[8];
100
101 ipl_sr_map = ingenic_ipl_sr_map;
102
103 /* zero all handlers */
104 for (i = 0; i < NINTR; i++) {
105 intrs[i].ih_func = NULL;
106 intrs[i].ih_arg = NULL;
107 snprintf(irqstr, sizeof(irqstr), "irq %d", i);
108 evcnt_attach_dynamic(&intrs[i].ih_count, EVCNT_TYPE_INTR,
109 NULL, "PIC", irqstr);
110 }
111
112 /* mask all peripheral IRQs */
113 writereg(JZ_ICMR0, 0xffffffff);
114 writereg(JZ_ICMR1, 0xffffffff);
115
116 /* allow peripheral interrupts to core 0 only */
117 reg = MFC0(12, 4); /* reset entry and interrupts */
118 reg &= 0xffff0000;
119 reg |= REIM_IRQ0_M | REIM_MIRQ0_M | REIM_MIRQ1_M;
120 MTC0(reg, 12, 4);
121 }
122
123 void
124 evbmips_iointr(int ipl, vaddr_t pc, uint32_t ipending)
125 {
126 uint32_t id;
127 #ifdef INGENIC_DEBUG
128 char buffer[256];
129
130 snprintf(buffer, 256, "pending: %08x CR %08x\n", ipending,
131 MFC0(MIPS_COP_0_CAUSE, 0));
132 ingenic_puts(buffer);
133 #endif
134 /* see which core we're on */
135 id = MFC0(15, 1) & 7;
136
137 /*
138 * XXX
139 * the manual counts the softint bits as INT0 and INT1, out headers
140 * don't so everything here looks off by two
141 */
142 if (ipending & MIPS_INT_MASK_1) {
143 /*
144 * this is a mailbox interrupt / IPI
145 * for now just print the message and clear it
146 */
147 uint32_t reg;
148
149 /* read pending IPIs */
150 reg = MFC0(12, 3);
151 if (id == 0) {
152 if (reg & CS_MIRQ0_P) {
153
154 #ifdef INGENIC_DEBUG
155 snprintf(buffer, 256,
156 "IPI for core 0, msg %08x\n",
157 MFC0(CP0_CORE_MBOX, 0));
158 ingenic_puts(buffer);
159 #endif
160 reg &= (~CS_MIRQ0_P);
161 /* clear it */
162 MTC0(reg, 12, 3);
163 }
164 } else if (id == 1) {
165 if (reg & CS_MIRQ1_P) {
166 #ifdef INGENIC_DEBUG
167 snprintf(buffer, 256,
168 "IPI for core 1, msg %08x\n",
169 MFC0(CP0_CORE_MBOX, 1));
170 ingenic_puts(buffer);
171 #endif
172 reg &= ( 7 - CS_MIRQ1_P);
173 /* clear it */
174 MTC0(reg, 12, 3);
175 }
176 }
177 }
178 if (ipending & MIPS_INT_MASK_2) {
179 /* this is a timer interrupt */
180 ingenic_clockintr(id);
181 ingenic_puts("INT2\n");
182 }
183 if (ipending & MIPS_INT_MASK_0) {
184 /* peripheral interrupt */
185
186 /*
187 * XXX
188 * OS timer interrupts are supposed to show up as INT2 as well
189 * but I haven't seen them there so for now we just weed them
190 * out right here.
191 * The idea is to allow peripheral interrupts on both cores but
192 * block INT0 on core1 so it would see only timer interrupts
193 * and IPIs. If that doesn't work we'll have to send an IPI to
194 * core1 for each timer tick.
195 */
196 if (readreg(JZ_ICPR0) & 0x08000000) {
197 ingenic_clockintr(id);
198 }
199 ingenic_irq(ipl);
200 KASSERT(id == 0);
201 }
202 }
203
204 void
205 ingenic_irq(int ipl)
206 {
207 uint32_t irql, irqh, mask;
208 int bit, idx;
209
210 irql = readreg(JZ_ICPR0);
211 bit = ffs32(irql);
212 while (bit != 0) {
213 idx = bit - 1;
214 mask = 1 << idx;
215 if (intrs[idx].ih_func != NULL) {
216 if (intrs[idx].ih_ipl == IPL_VM)
217 KERNEL_LOCK(1, NULL);
218 intrs[idx].ih_func(intrs[idx].ih_arg);
219 if (intrs[idx].ih_ipl == IPL_VM)
220 KERNEL_UNLOCK_ONE(NULL);
221 intrs[idx].ih_count.ev_count++;
222 } else {
223 /* spurious interrupt, maks it */
224 writereg(JZ_ICMSR0, mask);
225 }
226 irql &= ~mask;
227 bit = ffs32(irql);
228 }
229
230 irqh = readreg(JZ_ICPR1);
231 bit = ffs32(irqh);
232 while (bit != 0) {
233 idx = bit - 1;
234 mask = 1 << idx;
235 idx += 32;
236 if (intrs[idx].ih_func != NULL) {
237 if (intrs[idx].ih_ipl == IPL_VM)
238 KERNEL_LOCK(1, NULL);
239 intrs[idx].ih_func(intrs[idx].ih_arg);
240 if (intrs[idx].ih_ipl == IPL_VM)
241 KERNEL_UNLOCK_ONE(NULL);
242 intrs[idx].ih_count.ev_count++;
243 } else {
244 /* spurious interrupt, maks it */
245 writereg(JZ_ICMSR1, mask);
246 }
247 irqh &= ~mask;
248 bit = ffs32(irqh);
249 }
250
251 }
252
253 void *
254 evbmips_intr_establish(int irq, int (*func)(void *), void *arg)
255 {
256 int s;
257
258 if ((irq < 0) || (irq >= NINTR)) {
259 aprint_error("%s: invalid irq %d\n", __func__, irq);
260 return NULL;
261 }
262
263 s = splhigh(); /* XXX probably needs a mutex */
264 intrs[irq].ih_func = func;
265 intrs[irq].ih_arg = arg;
266 intrs[irq].ih_ipl = IPL_VM;
267
268 /* now enable the IRQ */
269 if (irq >= 32) {
270 writereg(JZ_ICMCR1, 1 << (irq - 32));
271 } else
272 writereg(JZ_ICMCR0, 1 << irq);
273
274 splx(s);
275
276 return ((void *)(irq + 1));
277 }
278
279 void
280 evbmips_intr_disestablish(void *cookie)
281 {
282 int irq = ((int)cookie) - 1;
283 int s;
284
285 if ((irq < 0) || (irq >= NINTR)) {
286 aprint_error("%s: invalid irq %d\n", __func__, irq);
287 return;
288 }
289
290 s = splhigh();
291
292 /* disable the IRQ */
293 if (irq >= 32) {
294 writereg(JZ_ICMSR1, 1 << (irq - 32));
295 } else
296 writereg(JZ_ICMSR0, 1 << irq);
297
298 intrs[irq].ih_func = NULL;
299 intrs[irq].ih_arg = NULL;
300 intrs[irq].ih_ipl = 0;
301
302 splx(s);
303 }
304