gic.c revision 1.40 1 1.40 skrll /* $NetBSD: gic.c,v 1.40 2020/07/12 13:33:44 skrll Exp $ */
2 1.1 matt /*-
3 1.1 matt * Copyright (c) 2012 The NetBSD Foundation, Inc.
4 1.1 matt * All rights reserved.
5 1.1 matt *
6 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
7 1.1 matt * by Matt Thomas of 3am Software Foundry.
8 1.1 matt *
9 1.1 matt * Redistribution and use in source and binary forms, with or without
10 1.1 matt * modification, are permitted provided that the following conditions
11 1.1 matt * are met:
12 1.1 matt * 1. Redistributions of source code must retain the above copyright
13 1.1 matt * notice, this list of conditions and the following disclaimer.
14 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 matt * notice, this list of conditions and the following disclaimer in the
16 1.1 matt * documentation and/or other materials provided with the distribution.
17 1.1 matt *
18 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
29 1.1 matt */
30 1.1 matt
31 1.7 matt #include "opt_ddb.h"
32 1.11 skrll #include "opt_multiprocessor.h"
33 1.7 matt
34 1.1 matt #define _INTR_PRIVATE
35 1.1 matt
36 1.1 matt #include <sys/cdefs.h>
37 1.40 skrll __KERNEL_RCSID(0, "$NetBSD: gic.c,v 1.40 2020/07/12 13:33:44 skrll Exp $");
38 1.1 matt
39 1.1 matt #include <sys/param.h>
40 1.1 matt #include <sys/bus.h>
41 1.31 skrll #include <sys/cpu.h>
42 1.1 matt #include <sys/device.h>
43 1.1 matt #include <sys/evcnt.h>
44 1.1 matt #include <sys/intr.h>
45 1.1 matt #include <sys/proc.h>
46 1.36 jmcneill #include <sys/atomic.h>
47 1.1 matt
48 1.1 matt #include <arm/armreg.h>
49 1.33 ryo #include <arm/atomic.h>
50 1.1 matt #include <arm/cpufunc.h>
51 1.33 ryo #include <arm/locore.h>
52 1.1 matt
53 1.1 matt #include <arm/cortex/gic_reg.h>
54 1.1 matt #include <arm/cortex/mpcore_var.h>
55 1.1 matt
56 1.21 jmcneill void armgic_irq_handler(void *);
57 1.21 jmcneill
58 1.30 jmcneill #define ARMGIC_SGI_IPIBASE 0
59 1.30 jmcneill
60 1.30 jmcneill /*
61 1.30 jmcneill * SGIs 8-16 are reserved for use by ARM Trusted Firmware.
62 1.30 jmcneill */
63 1.30 jmcneill __CTASSERT(ARMGIC_SGI_IPIBASE + NIPI <= 8);
64 1.1 matt
65 1.1 matt static int armgic_match(device_t, cfdata_t, void *);
66 1.1 matt static void armgic_attach(device_t, device_t, void *);
67 1.1 matt
68 1.1 matt static void armgic_set_priority(struct pic_softc *, int);
69 1.1 matt static void armgic_unblock_irqs(struct pic_softc *, size_t, uint32_t);
70 1.1 matt static void armgic_block_irqs(struct pic_softc *, size_t, uint32_t);
71 1.1 matt static void armgic_establish_irq(struct pic_softc *, struct intrsource *);
72 1.1 matt #if 0
73 1.1 matt static void armgic_source_name(struct pic_softc *, int, char *, size_t);
74 1.1 matt #endif
75 1.1 matt
76 1.1 matt #ifdef MULTIPROCESSOR
77 1.1 matt static void armgic_cpu_init(struct pic_softc *, struct cpu_info *);
78 1.1 matt static void armgic_ipi_send(struct pic_softc *, const kcpuset_t *, u_long);
79 1.35 jmcneill static void armgic_get_affinity(struct pic_softc *, size_t, kcpuset_t *);
80 1.35 jmcneill static int armgic_set_affinity(struct pic_softc *, size_t, const kcpuset_t *);
81 1.1 matt #endif
82 1.1 matt
83 1.1 matt static const struct pic_ops armgic_picops = {
84 1.1 matt .pic_unblock_irqs = armgic_unblock_irqs,
85 1.1 matt .pic_block_irqs = armgic_block_irqs,
86 1.1 matt .pic_establish_irq = armgic_establish_irq,
87 1.1 matt #if 0
88 1.1 matt .pic_source_name = armgic_source_name,
89 1.1 matt #endif
90 1.1 matt .pic_set_priority = armgic_set_priority,
91 1.1 matt #ifdef MULTIPROCESSOR
92 1.1 matt .pic_cpu_init = armgic_cpu_init,
93 1.1 matt .pic_ipi_send = armgic_ipi_send,
94 1.35 jmcneill .pic_get_affinity = armgic_get_affinity,
95 1.35 jmcneill .pic_set_affinity = armgic_set_affinity,
96 1.1 matt #endif
97 1.1 matt };
98 1.1 matt
99 1.1 matt #define PICTOSOFTC(pic) ((struct armgic_softc *)(pic))
100 1.1 matt
101 1.1 matt static struct armgic_softc {
102 1.1 matt struct pic_softc sc_pic;
103 1.1 matt device_t sc_dev;
104 1.1 matt bus_space_tag_t sc_memt;
105 1.4 matt bus_space_handle_t sc_gicch;
106 1.4 matt bus_space_handle_t sc_gicdh;
107 1.1 matt size_t sc_gic_lines;
108 1.1 matt uint32_t sc_gic_type;
109 1.1 matt uint32_t sc_gic_valid_lines[1024/32];
110 1.1 matt uint32_t sc_enabled_local;
111 1.7 matt #ifdef MULTIPROCESSOR
112 1.35 jmcneill uint32_t sc_target[MAXCPUS];
113 1.7 matt uint32_t sc_mptargets;
114 1.7 matt #endif
115 1.24 jmcneill uint32_t sc_bptargets;
116 1.1 matt } armgic_softc = {
117 1.1 matt .sc_pic = {
118 1.1 matt .pic_ops = &armgic_picops,
119 1.1 matt .pic_name = "armgic",
120 1.1 matt },
121 1.1 matt };
122 1.1 matt
123 1.1 matt static struct intrsource armgic_dummy_source;
124 1.1 matt
125 1.1 matt __CTASSERT(NIPL == 8);
126 1.1 matt
127 1.1 matt /*
128 1.6 matt * GIC register are always in little-endian. It is assumed the bus_space
129 1.6 matt * will do any endian conversion required.
130 1.1 matt */
131 1.1 matt static inline uint32_t
132 1.1 matt gicc_read(struct armgic_softc *sc, bus_size_t o)
133 1.1 matt {
134 1.6 matt return bus_space_read_4(sc->sc_memt, sc->sc_gicch, o);
135 1.1 matt }
136 1.1 matt
137 1.1 matt static inline void
138 1.1 matt gicc_write(struct armgic_softc *sc, bus_size_t o, uint32_t v)
139 1.1 matt {
140 1.4 matt bus_space_write_4(sc->sc_memt, sc->sc_gicch, o, v);
141 1.1 matt }
142 1.1 matt
143 1.1 matt static inline uint32_t
144 1.1 matt gicd_read(struct armgic_softc *sc, bus_size_t o)
145 1.1 matt {
146 1.6 matt return bus_space_read_4(sc->sc_memt, sc->sc_gicdh, o);
147 1.1 matt }
148 1.1 matt
149 1.1 matt static inline void
150 1.1 matt gicd_write(struct armgic_softc *sc, bus_size_t o, uint32_t v)
151 1.1 matt {
152 1.4 matt bus_space_write_4(sc->sc_memt, sc->sc_gicdh, o, v);
153 1.1 matt }
154 1.1 matt
155 1.24 jmcneill static uint32_t
156 1.24 jmcneill gicd_find_targets(struct armgic_softc *sc)
157 1.24 jmcneill {
158 1.24 jmcneill uint32_t targets = 0;
159 1.24 jmcneill
160 1.24 jmcneill /*
161 1.24 jmcneill * GICD_ITARGETSR0 through 7 are read-only, and each field returns
162 1.24 jmcneill * a value that corresponds only to the processor reading the
163 1.24 jmcneill * register. Use this to determine the current processor's
164 1.24 jmcneill * CPU interface number.
165 1.24 jmcneill */
166 1.24 jmcneill for (int i = 0; i < 8; i++) {
167 1.24 jmcneill targets = gicd_read(sc, GICD_ITARGETSRn(i));
168 1.24 jmcneill if (targets != 0)
169 1.24 jmcneill break;
170 1.24 jmcneill }
171 1.24 jmcneill targets |= (targets >> 16);
172 1.24 jmcneill targets |= (targets >> 8);
173 1.24 jmcneill targets &= 0xff;
174 1.24 jmcneill
175 1.24 jmcneill return targets ? targets : 1;
176 1.24 jmcneill }
177 1.24 jmcneill
178 1.1 matt /*
179 1.1 matt * In the GIC prioritization scheme, lower numbers have higher priority.
180 1.9 matt * Only write priorities that could be non-secure.
181 1.1 matt */
182 1.1 matt static inline uint32_t
183 1.1 matt armgic_ipl_to_priority(int ipl)
184 1.1 matt {
185 1.9 matt return GICC_PMR_NONSECURE
186 1.9 matt | ((IPL_HIGH - ipl) * GICC_PMR_NS_PRIORITIES / NIPL);
187 1.1 matt }
188 1.1 matt
189 1.5 joerg #if 0
190 1.1 matt static inline int
191 1.1 matt armgic_priority_to_ipl(uint32_t priority)
192 1.1 matt {
193 1.9 matt return IPL_HIGH
194 1.9 matt - (priority & ~GICC_PMR_NONSECURE) * NIPL / GICC_PMR_NS_PRIORITIES;
195 1.1 matt }
196 1.5 joerg #endif
197 1.1 matt
198 1.1 matt static void
199 1.1 matt armgic_unblock_irqs(struct pic_softc *pic, size_t irq_base, uint32_t irq_mask)
200 1.1 matt {
201 1.1 matt struct armgic_softc * const sc = PICTOSOFTC(pic);
202 1.1 matt const size_t group = irq_base / 32;
203 1.1 matt
204 1.1 matt if (group == 0)
205 1.1 matt sc->sc_enabled_local |= irq_mask;
206 1.1 matt
207 1.1 matt gicd_write(sc, GICD_ISENABLERn(group), irq_mask);
208 1.1 matt }
209 1.1 matt
210 1.1 matt static void
211 1.1 matt armgic_block_irqs(struct pic_softc *pic, size_t irq_base, uint32_t irq_mask)
212 1.1 matt {
213 1.1 matt struct armgic_softc * const sc = PICTOSOFTC(pic);
214 1.1 matt const size_t group = irq_base / 32;
215 1.1 matt
216 1.1 matt if (group == 0)
217 1.1 matt sc->sc_enabled_local &= ~irq_mask;
218 1.1 matt
219 1.1 matt gicd_write(sc, GICD_ICENABLERn(group), irq_mask);
220 1.1 matt }
221 1.1 matt
222 1.1 matt static void
223 1.1 matt armgic_set_priority(struct pic_softc *pic, int ipl)
224 1.1 matt {
225 1.1 matt struct armgic_softc * const sc = PICTOSOFTC(pic);
226 1.1 matt
227 1.1 matt const uint32_t priority = armgic_ipl_to_priority(ipl);
228 1.1 matt gicc_write(sc, GICC_PMR, priority);
229 1.1 matt }
230 1.1 matt
231 1.35 jmcneill #ifdef MULTIPROCESSOR
232 1.35 jmcneill static void
233 1.35 jmcneill armgic_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity)
234 1.35 jmcneill {
235 1.35 jmcneill struct armgic_softc * const sc = PICTOSOFTC(pic);
236 1.35 jmcneill const size_t group = irq / 32;
237 1.35 jmcneill int n;
238 1.35 jmcneill
239 1.35 jmcneill kcpuset_zero(affinity);
240 1.35 jmcneill if (group == 0) {
241 1.35 jmcneill /* All CPUs are targets for group 0 (SGI/PPI) */
242 1.35 jmcneill for (n = 0; n < MAXCPUS; n++) {
243 1.35 jmcneill if (sc->sc_target[n] != 0)
244 1.35 jmcneill kcpuset_set(affinity, n);
245 1.35 jmcneill }
246 1.35 jmcneill } else {
247 1.35 jmcneill /* Find distributor targets (SPI) */
248 1.35 jmcneill const u_int byte_shift = 8 * (irq & 3);
249 1.35 jmcneill const bus_size_t targets_reg = GICD_ITARGETSRn(irq / 4);
250 1.35 jmcneill const uint32_t targets = gicd_read(sc, targets_reg);
251 1.35 jmcneill const uint32_t targets_val = (targets >> byte_shift) & 0xff;
252 1.35 jmcneill
253 1.35 jmcneill for (n = 0; n < MAXCPUS; n++) {
254 1.35 jmcneill if (sc->sc_target[n] & targets_val)
255 1.35 jmcneill kcpuset_set(affinity, n);
256 1.35 jmcneill }
257 1.35 jmcneill }
258 1.35 jmcneill }
259 1.35 jmcneill
260 1.35 jmcneill static int
261 1.35 jmcneill armgic_set_affinity(struct pic_softc *pic, size_t irq,
262 1.35 jmcneill const kcpuset_t *affinity)
263 1.35 jmcneill {
264 1.35 jmcneill struct armgic_softc * const sc = PICTOSOFTC(pic);
265 1.35 jmcneill const size_t group = irq / 32;
266 1.35 jmcneill if (group == 0)
267 1.35 jmcneill return EINVAL;
268 1.35 jmcneill
269 1.35 jmcneill const u_int byte_shift = 8 * (irq & 3);
270 1.35 jmcneill const bus_size_t targets_reg = GICD_ITARGETSRn(irq / 4);
271 1.35 jmcneill uint32_t targets_val = 0;
272 1.35 jmcneill int n;
273 1.35 jmcneill
274 1.35 jmcneill for (n = 0; n < MAXCPUS; n++) {
275 1.35 jmcneill if (kcpuset_isset(affinity, n))
276 1.35 jmcneill targets_val |= sc->sc_target[n];
277 1.35 jmcneill }
278 1.35 jmcneill
279 1.35 jmcneill uint32_t targets = gicd_read(sc, targets_reg);
280 1.35 jmcneill targets &= ~(0xff << byte_shift);
281 1.35 jmcneill targets |= (targets_val << byte_shift);
282 1.35 jmcneill gicd_write(sc, targets_reg, targets);
283 1.35 jmcneill
284 1.35 jmcneill return 0;
285 1.35 jmcneill }
286 1.35 jmcneill #endif
287 1.35 jmcneill
288 1.1 matt #ifdef __HAVE_PIC_FAST_SOFTINTS
289 1.1 matt void
290 1.1 matt softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep_p)
291 1.1 matt {
292 1.1 matt lwp_t **lp = &l->l_cpu->ci_softlwps[level];
293 1.1 matt KASSERT(*lp == NULL || *lp == l);
294 1.1 matt *lp = l;
295 1.1 matt /*
296 1.1 matt * Really easy. Just tell it to trigger the local CPU.
297 1.1 matt */
298 1.1 matt *machdep_p = GICD_SGIR_TargetListFilter_Me
299 1.1 matt | __SHIFTIN(level, GICD_SGIR_SGIINTID);
300 1.1 matt }
301 1.1 matt
302 1.1 matt void
303 1.1 matt softint_trigger(uintptr_t machdep)
304 1.1 matt {
305 1.1 matt
306 1.1 matt gicd_write(&armgic_softc, GICD_SGIR, machdep);
307 1.1 matt }
308 1.1 matt #endif
309 1.1 matt
310 1.1 matt void
311 1.29 skrll armgic_irq_handler(void *tf)
312 1.1 matt {
313 1.1 matt struct cpu_info * const ci = curcpu();
314 1.1 matt struct armgic_softc * const sc = &armgic_softc;
315 1.1 matt const int old_ipl = ci->ci_cpl;
316 1.1 matt #ifdef DIAGNOSTIC
317 1.1 matt const int old_mtx_count = ci->ci_mtx_count;
318 1.1 matt const int old_l_biglocks = ci->ci_curlwp->l_biglocks;
319 1.1 matt #endif
320 1.1 matt #ifdef DEBUG
321 1.1 matt size_t n = 0;
322 1.1 matt #endif
323 1.1 matt
324 1.1 matt ci->ci_data.cpu_nintr++;
325 1.1 matt
326 1.1 matt for (;;) {
327 1.1 matt uint32_t iar = gicc_read(sc, GICC_IAR);
328 1.1 matt uint32_t irq = __SHIFTOUT(iar, GICC_IAR_IRQ);
329 1.25 skrll
330 1.28 skrll if (irq == GICC_IAR_IRQ_SPURIOUS ||
331 1.28 skrll irq == GICC_IAR_IRQ_SSPURIOUS) {
332 1.1 matt iar = gicc_read(sc, GICC_IAR);
333 1.1 matt irq = __SHIFTOUT(iar, GICC_IAR_IRQ);
334 1.1 matt if (irq == GICC_IAR_IRQ_SPURIOUS)
335 1.1 matt break;
336 1.28 skrll if (irq == GICC_IAR_IRQ_SSPURIOUS) {
337 1.28 skrll break;
338 1.28 skrll }
339 1.1 matt }
340 1.1 matt
341 1.32 jmcneill KASSERTMSG(old_ipl != IPL_HIGH, "old_ipl %d pmr %#x hppir %#x",
342 1.32 jmcneill old_ipl, gicc_read(sc, GICC_PMR), gicc_read(sc, GICC_HPPIR));
343 1.32 jmcneill
344 1.1 matt //const uint32_t cpuid = __SHIFTOUT(iar, GICC_IAR_CPUID_MASK);
345 1.1 matt struct intrsource * const is = sc->sc_pic.pic_sources[irq];
346 1.2 matt KASSERT(is != &armgic_dummy_source);
347 1.1 matt
348 1.1 matt /*
349 1.1 matt * GIC has asserted IPL for us so we can just update ci_cpl.
350 1.1 matt *
351 1.1 matt * But it's not that simple. We may have already bumped ci_cpl
352 1.1 matt * due to a high priority interrupt and now we are about to
353 1.1 matt * dispatch one lower than the previous. It's possible for
354 1.1 matt * that previous interrupt to have deferred some interrupts
355 1.1 matt * so we need deal with those when lowering to the current
356 1.1 matt * interrupt's ipl.
357 1.1 matt *
358 1.1 matt * However, if are just raising ipl, we can just update ci_cpl.
359 1.1 matt */
360 1.1 matt const int ipl = is->is_ipl;
361 1.1 matt if (__predict_false(ipl < ci->ci_cpl)) {
362 1.1 matt pic_do_pending_ints(I32_bit, ipl, tf);
363 1.1 matt KASSERT(ci->ci_cpl == ipl);
364 1.1 matt } else {
365 1.1 matt KASSERTMSG(ipl > ci->ci_cpl, "ipl %d cpl %d hw-ipl %#x",
366 1.1 matt ipl, ci->ci_cpl,
367 1.1 matt gicc_read(sc, GICC_PMR));
368 1.1 matt gicc_write(sc, GICC_PMR, armgic_ipl_to_priority(ipl));
369 1.1 matt ci->ci_cpl = ipl;
370 1.1 matt }
371 1.1 matt cpsie(I32_bit);
372 1.1 matt pic_dispatch(is, tf);
373 1.1 matt cpsid(I32_bit);
374 1.1 matt gicc_write(sc, GICC_EOIR, iar);
375 1.1 matt #ifdef DEBUG
376 1.1 matt n++;
377 1.1 matt KDASSERTMSG(n < 5, "%s: processed too many (%zu)",
378 1.1 matt ci->ci_data.cpu_name, n);
379 1.1 matt #endif
380 1.1 matt }
381 1.1 matt
382 1.1 matt /*
383 1.1 matt * Now handle any pending ints.
384 1.1 matt */
385 1.1 matt pic_do_pending_ints(I32_bit, old_ipl, tf);
386 1.29 skrll KASSERTMSG(ci->ci_cpl == old_ipl, "ci_cpl %d old_ipl %d", ci->ci_cpl, old_ipl);
387 1.1 matt KASSERT(old_mtx_count == ci->ci_mtx_count);
388 1.1 matt KASSERT(old_l_biglocks == ci->ci_curlwp->l_biglocks);
389 1.1 matt }
390 1.1 matt
391 1.1 matt void
392 1.1 matt armgic_establish_irq(struct pic_softc *pic, struct intrsource *is)
393 1.1 matt {
394 1.1 matt struct armgic_softc * const sc = PICTOSOFTC(pic);
395 1.1 matt const size_t group = is->is_irq / 32;
396 1.1 matt const u_int irq = is->is_irq & 31;
397 1.1 matt const u_int byte_shift = 8 * (irq & 3);
398 1.1 matt const u_int twopair_shift = 2 * (irq & 15);
399 1.1 matt
400 1.1 matt KASSERTMSG(sc->sc_gic_valid_lines[group] & __BIT(irq),
401 1.1 matt "irq %u: not valid (group[%zu]=0x%08x [0x%08x])",
402 1.1 matt is->is_irq, group, sc->sc_gic_valid_lines[group],
403 1.1 matt (uint32_t)__BIT(irq));
404 1.16 skrll
405 1.1 matt KASSERTMSG(is->is_type == IST_LEVEL || is->is_type == IST_EDGE,
406 1.1 matt "irq %u: type %u unsupported", is->is_irq, is->is_type);
407 1.1 matt
408 1.1 matt const bus_size_t targets_reg = GICD_ITARGETSRn(is->is_irq / 4);
409 1.1 matt const bus_size_t cfg_reg = GICD_ICFGRn(is->is_irq / 16);
410 1.1 matt uint32_t targets = gicd_read(sc, targets_reg);
411 1.1 matt uint32_t cfg = gicd_read(sc, cfg_reg);
412 1.1 matt
413 1.1 matt if (group > 0) {
414 1.16 skrll /*
415 1.1 matt * There are 4 irqs per TARGETS register. For now bind
416 1.1 matt * to the primary cpu.
417 1.1 matt */
418 1.39 jmcneill targets &= ~(0xffU << byte_shift);
419 1.12 skrll #if 0
420 1.7 matt #ifdef MULTIPROCESSOR
421 1.7 matt if (is->is_mpsafe) {
422 1.12 skrll targets |= sc->sc_mptargets << byte_shift;
423 1.7 matt } else
424 1.7 matt #endif
425 1.12 skrll #endif
426 1.24 jmcneill targets |= sc->sc_bptargets << byte_shift;
427 1.1 matt gicd_write(sc, targets_reg, targets);
428 1.1 matt
429 1.16 skrll /*
430 1.1 matt * There are 16 irqs per CFG register. 10=EDGE 00=LEVEL
431 1.1 matt */
432 1.1 matt uint32_t new_cfg = cfg;
433 1.40 skrll uint32_t old_cfg = (cfg >> twopair_shift) & __BITS(1, 0);
434 1.40 skrll if (is->is_type == IST_LEVEL && (old_cfg & __BIT(1)) != 0) {
435 1.40 skrll new_cfg &= ~(__BITS(1, 0) << twopair_shift);
436 1.1 matt } else if (is->is_type == IST_EDGE && (old_cfg & 2) == 0) {
437 1.40 skrll new_cfg |= __BIT(1) << twopair_shift;
438 1.1 matt }
439 1.1 matt if (new_cfg != cfg) {
440 1.14 jmcneill gicd_write(sc, cfg_reg, new_cfg);
441 1.1 matt }
442 1.7 matt #ifdef MULTIPROCESSOR
443 1.7 matt } else {
444 1.7 matt /*
445 1.7 matt * All group 0 interrupts are per processor and MPSAFE by
446 1.7 matt * default.
447 1.7 matt */
448 1.7 matt is->is_mpsafe = true;
449 1.7 matt #endif
450 1.1 matt }
451 1.1 matt
452 1.16 skrll /*
453 1.1 matt * There are 4 irqs per PRIORITY register. Map the IPL
454 1.1 matt * to GIC priority.
455 1.1 matt */
456 1.1 matt const bus_size_t priority_reg = GICD_IPRIORITYRn(is->is_irq / 4);
457 1.1 matt uint32_t priority = gicd_read(sc, priority_reg);
458 1.39 jmcneill priority &= ~(0xffU << byte_shift);
459 1.1 matt priority |= armgic_ipl_to_priority(is->is_ipl) << byte_shift;
460 1.1 matt gicd_write(sc, priority_reg, priority);
461 1.1 matt }
462 1.1 matt
463 1.1 matt #ifdef MULTIPROCESSOR
464 1.1 matt static void
465 1.1 matt armgic_cpu_init_priorities(struct armgic_softc *sc)
466 1.1 matt {
467 1.22 skrll /* Set lowest priority, i.e. disable interrupts */
468 1.34 jakllsch for (size_t i = 0; i < sc->sc_pic.pic_maxsources; i += 4) {
469 1.22 skrll const bus_size_t priority_reg = GICD_IPRIORITYRn(i / 4);
470 1.22 skrll gicd_write(sc, priority_reg, ~0);
471 1.22 skrll }
472 1.22 skrll }
473 1.22 skrll
474 1.22 skrll static void
475 1.22 skrll armgic_cpu_update_priorities(struct armgic_softc *sc)
476 1.22 skrll {
477 1.1 matt uint32_t enabled = sc->sc_enabled_local;
478 1.34 jakllsch for (size_t i = 0; i < sc->sc_pic.pic_maxsources; i += 4, enabled >>= 4) {
479 1.1 matt const bus_size_t priority_reg = GICD_IPRIORITYRn(i / 4);
480 1.1 matt uint32_t priority = gicd_read(sc, priority_reg);
481 1.1 matt uint32_t byte_mask = 0xff;
482 1.1 matt size_t byte_shift = 0;
483 1.1 matt for (size_t j = 0; j < 4; j++, byte_mask <<= 8, byte_shift += 8) {
484 1.1 matt struct intrsource * const is = sc->sc_pic.pic_sources[i+j];
485 1.22 skrll priority |= byte_mask;
486 1.1 matt if (is == NULL || is == &armgic_dummy_source)
487 1.1 matt continue;
488 1.1 matt priority &= ~byte_mask;
489 1.1 matt priority |= armgic_ipl_to_priority(is->is_ipl) << byte_shift;
490 1.1 matt }
491 1.1 matt gicd_write(sc, priority_reg, priority);
492 1.1 matt }
493 1.1 matt }
494 1.1 matt
495 1.7 matt static void
496 1.7 matt armgic_cpu_init_targets(struct armgic_softc *sc)
497 1.7 matt {
498 1.7 matt /*
499 1.16 skrll * Update the mpsafe targets
500 1.7 matt */
501 1.13 jmcneill for (size_t irq = 32; irq < sc->sc_pic.pic_maxsources; irq++) {
502 1.7 matt struct intrsource * const is = sc->sc_pic.pic_sources[irq];
503 1.7 matt const bus_size_t targets_reg = GICD_ITARGETSRn(irq / 4);
504 1.7 matt if (is != NULL && is->is_mpsafe) {
505 1.12 skrll const u_int byte_shift = 8 * (irq & 3);
506 1.7 matt uint32_t targets = gicd_read(sc, targets_reg);
507 1.7 matt targets |= sc->sc_mptargets << byte_shift;
508 1.7 matt gicd_write(sc, targets_reg, targets);
509 1.7 matt }
510 1.7 matt }
511 1.7 matt }
512 1.7 matt
513 1.1 matt void
514 1.1 matt armgic_cpu_init(struct pic_softc *pic, struct cpu_info *ci)
515 1.1 matt {
516 1.1 matt struct armgic_softc * const sc = PICTOSOFTC(pic);
517 1.35 jmcneill sc->sc_target[cpu_index(ci)] = gicd_find_targets(sc);
518 1.36 jmcneill atomic_or_32(&sc->sc_mptargets, sc->sc_target[cpu_index(ci)]);
519 1.7 matt KASSERTMSG(ci->ci_cpl == IPL_HIGH, "ipl %d not IPL_HIGH", ci->ci_cpl);
520 1.22 skrll armgic_cpu_init_priorities(sc);
521 1.7 matt if (!CPU_IS_PRIMARY(ci)) {
522 1.24 jmcneill if (popcount(sc->sc_mptargets) != 1) {
523 1.7 matt armgic_cpu_init_targets(sc);
524 1.7 matt }
525 1.7 matt if (sc->sc_enabled_local) {
526 1.22 skrll armgic_cpu_update_priorities(sc);
527 1.7 matt gicd_write(sc, GICD_ISENABLERn(0),
528 1.7 matt sc->sc_enabled_local);
529 1.7 matt }
530 1.1 matt }
531 1.1 matt gicc_write(sc, GICC_PMR, armgic_ipl_to_priority(ci->ci_cpl)); // set PMR
532 1.1 matt gicc_write(sc, GICC_CTRL, GICC_CTRL_V1_Enable); // enable interrupt
533 1.1 matt cpsie(I32_bit); // allow IRQ exceptions
534 1.1 matt }
535 1.1 matt
536 1.1 matt void
537 1.1 matt armgic_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi)
538 1.1 matt {
539 1.1 matt struct armgic_softc * const sc = PICTOSOFTC(pic);
540 1.1 matt
541 1.7 matt #if 0
542 1.1 matt if (ipi == IPI_NOP) {
543 1.1 matt __asm __volatile("sev");
544 1.1 matt return;
545 1.1 matt }
546 1.7 matt #endif
547 1.1 matt
548 1.7 matt uint32_t sgir = __SHIFTIN(ARMGIC_SGI_IPIBASE + ipi, GICD_SGIR_SGIINTID);
549 1.7 matt if (kcp != NULL) {
550 1.37 jmcneill uint32_t targets_val = 0;
551 1.37 jmcneill for (int n = 0; n < MAXCPUS; n++) {
552 1.37 jmcneill if (kcpuset_isset(kcp, n))
553 1.37 jmcneill targets_val |= sc->sc_target[n];
554 1.37 jmcneill }
555 1.37 jmcneill sgir |= __SHIFTIN(targets_val, GICD_SGIR_TargetList);
556 1.7 matt sgir |= GICD_SGIR_TargetListFilter_List;
557 1.7 matt } else {
558 1.7 matt if (ncpu == 1)
559 1.7 matt return;
560 1.7 matt sgir |= GICD_SGIR_TargetListFilter_NotMe;
561 1.7 matt }
562 1.1 matt
563 1.1 matt gicd_write(sc, GICD_SGIR, sgir);
564 1.1 matt }
565 1.1 matt #endif
566 1.1 matt
567 1.1 matt int
568 1.1 matt armgic_match(device_t parent, cfdata_t cf, void *aux)
569 1.1 matt {
570 1.1 matt struct mpcore_attach_args * const mpcaa = aux;
571 1.1 matt
572 1.1 matt if (strcmp(cf->cf_name, mpcaa->mpcaa_name) != 0)
573 1.1 matt return 0;
574 1.4 matt if (!CPU_ID_CORTEX_P(cputype) || CPU_ID_CORTEX_A8_P(cputype))
575 1.1 matt return 0;
576 1.1 matt
577 1.1 matt return 1;
578 1.1 matt }
579 1.1 matt
580 1.1 matt void
581 1.1 matt armgic_attach(device_t parent, device_t self, void *aux)
582 1.1 matt {
583 1.1 matt struct armgic_softc * const sc = &armgic_softc;
584 1.1 matt struct mpcore_attach_args * const mpcaa = aux;
585 1.1 matt
586 1.1 matt sc->sc_dev = self;
587 1.1 matt self->dv_private = sc;
588 1.1 matt
589 1.1 matt sc->sc_memt = mpcaa->mpcaa_memt; /* provided for us */
590 1.4 matt bus_space_subregion(sc->sc_memt, mpcaa->mpcaa_memh, mpcaa->mpcaa_off1,
591 1.4 matt 4096, &sc->sc_gicdh);
592 1.4 matt bus_space_subregion(sc->sc_memt, mpcaa->mpcaa_memh, mpcaa->mpcaa_off2,
593 1.4 matt 4096, &sc->sc_gicch);
594 1.1 matt
595 1.1 matt sc->sc_gic_type = gicd_read(sc, GICD_TYPER);
596 1.1 matt sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(sc->sc_gic_type);
597 1.1 matt
598 1.1 matt gicc_write(sc, GICC_CTRL, 0); /* disable all interrupts */
599 1.1 matt gicd_write(sc, GICD_CTRL, 0); /* disable all interrupts */
600 1.1 matt
601 1.1 matt gicc_write(sc, GICC_PMR, 0xff);
602 1.1 matt uint32_t pmr = gicc_read(sc, GICC_PMR);
603 1.1 matt u_int priorities = 1 << popcount32(pmr);
604 1.1 matt
605 1.26 skrll const uint32_t iidr = gicc_read(sc, GICC_IIDR);
606 1.26 skrll const int iidr_prod = __SHIFTOUT(iidr, GICC_IIDR_ProductID);
607 1.26 skrll const int iidr_arch = __SHIFTOUT(iidr, GICC_IIDR_ArchVersion);
608 1.26 skrll const int iidr_rev = __SHIFTOUT(iidr, GICC_IIDR_Revision);
609 1.26 skrll const int iidr_imp = __SHIFTOUT(iidr, GICC_IIDR_Implementer);
610 1.26 skrll
611 1.1 matt /*
612 1.24 jmcneill * Find the boot processor's CPU interface number.
613 1.24 jmcneill */
614 1.24 jmcneill sc->sc_bptargets = gicd_find_targets(sc);
615 1.24 jmcneill
616 1.24 jmcneill /*
617 1.1 matt * Let's find out how many real sources we have.
618 1.1 matt */
619 1.1 matt for (size_t i = 0, group = 0;
620 1.1 matt i < sc->sc_pic.pic_maxsources;
621 1.1 matt i += 32, group++) {
622 1.1 matt /*
623 1.1 matt * To figure what sources are real, one enables all interrupts
624 1.1 matt * and then reads back the enable mask so which ones really
625 1.1 matt * got enabled.
626 1.1 matt */
627 1.1 matt gicd_write(sc, GICD_ISENABLERn(group), 0xffffffff);
628 1.1 matt uint32_t valid = gicd_read(sc, GICD_ISENABLERn(group));
629 1.1 matt
630 1.1 matt /*
631 1.1 matt * Now disable (clear enable) them again.
632 1.1 matt */
633 1.1 matt gicd_write(sc, GICD_ICENABLERn(group), valid);
634 1.1 matt
635 1.1 matt /*
636 1.1 matt * Count how many are valid.
637 1.1 matt */
638 1.1 matt sc->sc_gic_lines += popcount32(valid);
639 1.1 matt sc->sc_gic_valid_lines[group] = valid;
640 1.1 matt }
641 1.1 matt
642 1.8 matt aprint_normal(": Generic Interrupt Controller, "
643 1.8 matt "%zu sources (%zu valid)\n",
644 1.8 matt sc->sc_pic.pic_maxsources, sc->sc_gic_lines);
645 1.26 skrll aprint_debug_dev(sc->sc_dev, "Architecture version %d"
646 1.26 skrll " (0x%x:%d rev %d)\n", iidr_arch, iidr_imp, iidr_prod,
647 1.26 skrll iidr_rev);
648 1.8 matt
649 1.18 matt #ifdef MULTIPROCESSOR
650 1.18 matt sc->sc_pic.pic_cpus = kcpuset_running;
651 1.18 matt #endif
652 1.1 matt pic_add(&sc->sc_pic, 0);
653 1.1 matt
654 1.1 matt /*
655 1.1 matt * Force the GICD to IPL_HIGH and then enable interrupts.
656 1.1 matt */
657 1.1 matt struct cpu_info * const ci = curcpu();
658 1.1 matt KASSERTMSG(ci->ci_cpl == IPL_HIGH, "ipl %d not IPL_HIGH", ci->ci_cpl);
659 1.1 matt armgic_set_priority(&sc->sc_pic, ci->ci_cpl); // set PMR
660 1.1 matt gicd_write(sc, GICD_CTRL, GICD_CTRL_Enable); // enable Distributer
661 1.1 matt gicc_write(sc, GICC_CTRL, GICC_CTRL_V1_Enable); // enable CPU interrupts
662 1.1 matt cpsie(I32_bit); // allow interrupt exceptions
663 1.1 matt
664 1.1 matt /*
665 1.1 matt * For each line that isn't valid, we set the intrsource for it to
666 1.1 matt * point at a dummy source so that pic_intr_establish will fail for it.
667 1.1 matt */
668 1.1 matt for (size_t i = 0, group = 0;
669 1.1 matt i < sc->sc_pic.pic_maxsources;
670 1.1 matt i += 32, group++) {
671 1.1 matt uint32_t invalid = ~sc->sc_gic_valid_lines[group];
672 1.1 matt for (size_t j = 0; invalid && j < 32; j++, invalid >>= 1) {
673 1.1 matt if (invalid & 1) {
674 1.1 matt sc->sc_pic.pic_sources[i + j] =
675 1.1 matt &armgic_dummy_source;
676 1.1 matt }
677 1.1 matt }
678 1.1 matt }
679 1.1 matt #ifdef __HAVE_PIC_FAST_SOFTINTS
680 1.38 jmcneill intr_establish_xname(SOFTINT_BIO, IPL_SOFTBIO, IST_MPSAFE | IST_EDGE,
681 1.38 jmcneill pic_handle_softint, (void *)SOFTINT_BIO, "softint bio");
682 1.38 jmcneill intr_establish_xname(SOFTINT_CLOCK, IPL_SOFTCLOCK, IST_MPSAFE | IST_EDGE,
683 1.38 jmcneill pic_handle_softint, (void *)SOFTINT_CLOCK, "softint clock");
684 1.38 jmcneill intr_establish_xname(SOFTINT_NET, IPL_SOFTNET, IST_MPSAFE | IST_EDGE,
685 1.38 jmcneill pic_handle_softint, (void *)SOFTINT_NET, "softint net");
686 1.38 jmcneill intr_establish_xname(SOFTINT_SERIAL, IPL_SOFTSERIAL, IST_MPSAFE | IST_EDGE,
687 1.38 jmcneill pic_handle_softint, (void *)SOFTINT_SERIAL, "softint serial");
688 1.1 matt #endif
689 1.1 matt #ifdef MULTIPROCESSOR
690 1.22 skrll armgic_cpu_init(&sc->sc_pic, curcpu());
691 1.22 skrll
692 1.38 jmcneill intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_AST, IPL_VM,
693 1.38 jmcneill IST_MPSAFE | IST_EDGE, pic_ipi_ast, (void *)-1, "IPI ast");
694 1.38 jmcneill intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_XCALL, IPL_HIGH,
695 1.38 jmcneill IST_MPSAFE | IST_EDGE, pic_ipi_xcall, (void *)-1, "IPI xcall");
696 1.38 jmcneill intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_GENERIC, IPL_HIGH,
697 1.38 jmcneill IST_MPSAFE | IST_EDGE, pic_ipi_generic, (void *)-1, "IPI generic");
698 1.38 jmcneill intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_NOP, IPL_VM,
699 1.38 jmcneill IST_MPSAFE | IST_EDGE, pic_ipi_nop, (void *)-1, "IPI nop");
700 1.38 jmcneill intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_SHOOTDOWN, IPL_SCHED,
701 1.38 jmcneill IST_MPSAFE | IST_EDGE, pic_ipi_shootdown, (void *)-1, "IPI shootdown");
702 1.7 matt #ifdef DDB
703 1.38 jmcneill intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_DDB, IPL_HIGH,
704 1.38 jmcneill IST_MPSAFE | IST_EDGE, pic_ipi_ddb, NULL, "IPI ddb");
705 1.1 matt #endif
706 1.1 matt #ifdef __HAVE_PREEMPTION
707 1.38 jmcneill intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_KPREEMPT, IPL_VM,
708 1.38 jmcneill IST_MPSAFE | IST_EDGE, pic_ipi_kpreempt, (void *)-1, "IPI kpreempt");
709 1.1 matt #endif
710 1.1 matt #endif
711 1.1 matt
712 1.1 matt const u_int ppis = popcount32(sc->sc_gic_valid_lines[0] >> 16);
713 1.1 matt const u_int sgis = popcount32(sc->sc_gic_valid_lines[0] & 0xffff);
714 1.27 skrll aprint_normal_dev(sc->sc_dev, "%u Priorities, %zu SPIs, %u PPIs, "
715 1.27 skrll "%u SGIs\n", priorities, sc->sc_gic_lines - ppis - sgis, ppis,
716 1.27 skrll sgis);
717 1.1 matt }
718 1.1 matt
719 1.1 matt CFATTACH_DECL_NEW(armgic, 0,
720 1.1 matt armgic_match, armgic_attach, NULL, NULL);
721