gicv3.c revision 1.42 1 /* $NetBSD: gicv3.c,v 1.42 2021/02/21 15:00:05 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2018 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_multiprocessor.h"
30
31 #define _INTR_PRIVATE
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: gicv3.c,v 1.42 2021/02/21 15:00:05 jmcneill Exp $");
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/device.h>
40 #include <sys/intr.h>
41 #include <sys/systm.h>
42 #include <sys/cpu.h>
43 #include <sys/vmem.h>
44 #include <sys/kmem.h>
45 #include <sys/atomic.h>
46
47 #include <machine/cpufunc.h>
48
49 #include <arm/locore.h>
50 #include <arm/armreg.h>
51
52 #include <arm/cortex/gicv3.h>
53 #include <arm/cortex/gic_reg.h>
54
55 #define PICTOSOFTC(pic) \
56 ((void *)((uintptr_t)(pic) - offsetof(struct gicv3_softc, sc_pic)))
57 #define LPITOSOFTC(lpi) \
58 ((void *)((uintptr_t)(lpi) - offsetof(struct gicv3_softc, sc_lpi)))
59
60 #define IPL_TO_PRIORITY(sc, ipl) (((0xff - (ipl)) << (sc)->sc_priority_shift) & 0xff)
61 #define IPL_TO_PMR(sc, ipl) (((0xff - (ipl)) << (sc)->sc_pmr_shift) & 0xff)
62
63 #define GIC_SUPPORTS_1OFN(sc) (((sc)->sc_gicd_typer & GICD_TYPER_No1N) == 0)
64
65 #define GIC_PRIO_SHIFT_NS 4
66 #define GIC_PRIO_SHIFT_S 3
67
68 static struct gicv3_softc *gicv3_softc;
69
70 static inline uint32_t
71 gicd_read_4(struct gicv3_softc *sc, bus_size_t reg)
72 {
73 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_d, reg);
74 }
75
76 static inline void
77 gicd_write_4(struct gicv3_softc *sc, bus_size_t reg, uint32_t val)
78 {
79 bus_space_write_4(sc->sc_bst, sc->sc_bsh_d, reg, val);
80 }
81
82 #ifdef MULTIPROCESSOR
83 static inline uint64_t
84 gicd_read_8(struct gicv3_softc *sc, bus_size_t reg)
85 {
86 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_d, reg);
87 }
88 #endif
89
90 static inline void
91 gicd_write_8(struct gicv3_softc *sc, bus_size_t reg, uint64_t val)
92 {
93 bus_space_write_8(sc->sc_bst, sc->sc_bsh_d, reg, val);
94 }
95
96 static inline uint32_t
97 gicr_read_4(struct gicv3_softc *sc, u_int index, bus_size_t reg)
98 {
99 KASSERT(index < sc->sc_bsh_r_count);
100 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_r[index], reg);
101 }
102
103 static inline void
104 gicr_write_4(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint32_t val)
105 {
106 KASSERT(index < sc->sc_bsh_r_count);
107 bus_space_write_4(sc->sc_bst, sc->sc_bsh_r[index], reg, val);
108 }
109
110 static inline uint64_t
111 gicr_read_8(struct gicv3_softc *sc, u_int index, bus_size_t reg)
112 {
113 KASSERT(index < sc->sc_bsh_r_count);
114 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_r[index], reg);
115 }
116
117 static inline void
118 gicr_write_8(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint64_t val)
119 {
120 KASSERT(index < sc->sc_bsh_r_count);
121 bus_space_write_8(sc->sc_bst, sc->sc_bsh_r[index], reg, val);
122 }
123
124 static void
125 gicv3_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
126 {
127 struct gicv3_softc * const sc = PICTOSOFTC(pic);
128 struct cpu_info * const ci = curcpu();
129 const u_int group = irqbase / 32;
130
131 if (group == 0) {
132 atomic_or_32(&sc->sc_enabled_sgippi, mask);
133 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, mask);
134 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
135 ;
136 } else {
137 gicd_write_4(sc, GICD_ISENABLERn(group), mask);
138 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
139 ;
140 }
141 }
142
143 static void
144 gicv3_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
145 {
146 struct gicv3_softc * const sc = PICTOSOFTC(pic);
147 struct cpu_info * const ci = curcpu();
148 const u_int group = irqbase / 32;
149
150 if (group == 0) {
151 atomic_and_32(&sc->sc_enabled_sgippi, ~mask);
152 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, mask);
153 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
154 ;
155 } else {
156 gicd_write_4(sc, GICD_ICENABLERn(group), mask);
157 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
158 ;
159 }
160 }
161
162 static void
163 gicv3_establish_irq(struct pic_softc *pic, struct intrsource *is)
164 {
165 struct gicv3_softc * const sc = PICTOSOFTC(pic);
166 const u_int group = is->is_irq / 32;
167 uint32_t ipriority, icfg;
168 uint64_t irouter;
169 u_int n;
170
171 const u_int ipriority_val = IPL_TO_PRIORITY(sc, is->is_ipl);
172 const u_int ipriority_shift = (is->is_irq & 0x3) * 8;
173 const u_int icfg_shift = (is->is_irq & 0xf) * 2;
174
175 if (group == 0) {
176 /* SGIs and PPIs are always MP-safe */
177 is->is_mpsafe = true;
178
179 /* Update interrupt configuration and priority on all redistributors */
180 for (n = 0; n < sc->sc_bsh_r_count; n++) {
181 icfg = gicr_read_4(sc, n, GICR_ICFGRn(is->is_irq / 16));
182 if (is->is_type == IST_LEVEL)
183 icfg &= ~(0x2 << icfg_shift);
184 if (is->is_type == IST_EDGE)
185 icfg |= (0x2 << icfg_shift);
186 gicr_write_4(sc, n, GICR_ICFGRn(is->is_irq / 16), icfg);
187
188 ipriority = gicr_read_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4));
189 ipriority &= ~(0xffU << ipriority_shift);
190 ipriority |= (ipriority_val << ipriority_shift);
191 gicr_write_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4), ipriority);
192 }
193 } else {
194 /*
195 * If 1 of N SPI routing is supported, route MP-safe interrupts to all
196 * participating PEs. Otherwise, just route to the primary PE.
197 */
198 if (is->is_mpsafe && GIC_SUPPORTS_1OFN(sc)) {
199 irouter = GICD_IROUTER_Interrupt_Routing_mode;
200 } else {
201 irouter = sc->sc_irouter[0];
202 }
203 gicd_write_8(sc, GICD_IROUTER(is->is_irq), irouter);
204
205 /* Update interrupt configuration */
206 icfg = gicd_read_4(sc, GICD_ICFGRn(is->is_irq / 16));
207 if (is->is_type == IST_LEVEL)
208 icfg &= ~(0x2 << icfg_shift);
209 if (is->is_type == IST_EDGE)
210 icfg |= (0x2 << icfg_shift);
211 gicd_write_4(sc, GICD_ICFGRn(is->is_irq / 16), icfg);
212
213 /* Update interrupt priority */
214 ipriority = gicd_read_4(sc, GICD_IPRIORITYRn(is->is_irq / 4));
215 ipriority &= ~(0xffU << ipriority_shift);
216 ipriority |= (ipriority_val << ipriority_shift);
217 gicd_write_4(sc, GICD_IPRIORITYRn(is->is_irq / 4), ipriority);
218 }
219 }
220
221 static void
222 gicv3_set_priority(struct pic_softc *pic, int ipl)
223 {
224 struct gicv3_softc * const sc = PICTOSOFTC(pic);
225 struct cpu_info * const ci = curcpu();
226 const uint8_t newpmr = IPL_TO_PMR(sc, ipl);
227
228 if (newpmr > ci->ci_hwpl) {
229 /* Lowering priority mask */
230 ci->ci_hwpl = newpmr;
231 icc_pmr_write(newpmr);
232 }
233 }
234
235 static void
236 gicv3_dist_enable(struct gicv3_softc *sc)
237 {
238 uint32_t gicd_ctrl;
239 u_int n;
240
241 /* Disable the distributor */
242 gicd_ctrl = gicd_read_4(sc, GICD_CTRL);
243 gicd_ctrl &= ~(GICD_CTRL_EnableGrp1A | GICD_CTRL_ARE_NS);
244 gicd_write_4(sc, GICD_CTRL, gicd_ctrl);
245
246 /* Wait for register write to complete */
247 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
248 ;
249
250 /* Clear all INTID enable bits */
251 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32)
252 gicd_write_4(sc, GICD_ICENABLERn(n / 32), ~0);
253
254 /* Set default priorities to lowest */
255 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 4)
256 gicd_write_4(sc, GICD_IPRIORITYRn(n / 4), ~0);
257
258 /* Set all interrupts to G1NS */
259 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32) {
260 gicd_write_4(sc, GICD_IGROUPRn(n / 32), ~0);
261 gicd_write_4(sc, GICD_IGRPMODRn(n / 32), 0);
262 }
263
264 /* Set all interrupts level-sensitive by default */
265 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 16)
266 gicd_write_4(sc, GICD_ICFGRn(n / 16), 0);
267
268 /* Wait for register writes to complete */
269 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
270 ;
271
272 /* Enable Affinity routing and G1NS interrupts */
273 gicd_ctrl = GICD_CTRL_EnableGrp1A | GICD_CTRL_ARE_NS;
274 gicd_write_4(sc, GICD_CTRL, gicd_ctrl);
275 }
276
277 static void
278 gicv3_redist_enable(struct gicv3_softc *sc, struct cpu_info *ci)
279 {
280 uint32_t icfg;
281 u_int n, o;
282
283 /* Clear INTID enable bits */
284 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, ~0);
285
286 /* Wait for register write to complete */
287 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
288 ;
289
290 /* Set default priorities */
291 for (n = 0; n < 32; n += 4) {
292 uint32_t priority = 0;
293 size_t byte_shift = 0;
294 for (o = 0; o < 4; o++, byte_shift += 8) {
295 struct intrsource * const is = sc->sc_pic.pic_sources[n + o];
296 if (is == NULL)
297 priority |= (0xffU << byte_shift);
298 else {
299 const u_int ipriority_val = IPL_TO_PRIORITY(sc, is->is_ipl);
300 priority |= ipriority_val << byte_shift;
301 }
302 }
303 gicr_write_4(sc, ci->ci_gic_redist, GICR_IPRIORITYRn(n / 4), priority);
304 }
305
306 /* Set all interrupts to G1NS */
307 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGROUPR0, ~0);
308 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGRPMODR0, 0);
309
310 /* Restore PPI configs */
311 for (n = 0, icfg = 0; n < 16; n++) {
312 struct intrsource * const is = sc->sc_pic.pic_sources[16 + n];
313 if (is != NULL && is->is_type == IST_EDGE)
314 icfg |= (0x2 << (n * 2));
315 }
316 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICFGRn(1), icfg);
317
318 /* Restore current enable bits */
319 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, sc->sc_enabled_sgippi);
320
321 /* Wait for register write to complete */
322 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
323 ;
324 }
325
326 static uint64_t
327 gicv3_cpu_identity(void)
328 {
329 u_int aff3, aff2, aff1, aff0;
330
331 const register_t mpidr = cpu_mpidr_aff_read();
332 aff0 = __SHIFTOUT(mpidr, MPIDR_AFF0);
333 aff1 = __SHIFTOUT(mpidr, MPIDR_AFF1);
334 aff2 = __SHIFTOUT(mpidr, MPIDR_AFF2);
335 aff3 = __SHIFTOUT(mpidr, MPIDR_AFF3);
336
337 return __SHIFTIN(aff0, GICR_TYPER_Affinity_Value_Aff0) |
338 __SHIFTIN(aff1, GICR_TYPER_Affinity_Value_Aff1) |
339 __SHIFTIN(aff2, GICR_TYPER_Affinity_Value_Aff2) |
340 __SHIFTIN(aff3, GICR_TYPER_Affinity_Value_Aff3);
341 }
342
343 static u_int
344 gicv3_find_redist(struct gicv3_softc *sc)
345 {
346 uint64_t gicr_typer;
347 u_int n;
348
349 const uint64_t cpu_identity = gicv3_cpu_identity();
350
351 for (n = 0; n < sc->sc_bsh_r_count; n++) {
352 gicr_typer = gicr_read_8(sc, n, GICR_TYPER);
353 if ((gicr_typer & GICR_TYPER_Affinity_Value) == cpu_identity)
354 return n;
355 }
356
357 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
358 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
359 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
360 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
361
362 panic("%s: could not find GICv3 redistributor for cpu %d.%d.%d.%d",
363 cpu_name(curcpu()), aff3, aff2, aff1, aff0);
364 }
365
366 static uint64_t
367 gicv3_sgir(struct gicv3_softc *sc)
368 {
369 const uint64_t cpu_identity = gicv3_cpu_identity();
370
371 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
372 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
373 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
374 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
375
376 return __SHIFTIN(__BIT(aff0), ICC_SGIR_EL1_TargetList) |
377 __SHIFTIN(aff1, ICC_SGIR_EL1_Aff1) |
378 __SHIFTIN(aff2, ICC_SGIR_EL1_Aff2) |
379 __SHIFTIN(aff3, ICC_SGIR_EL1_Aff3);
380 }
381
382 static void
383 gicv3_cpu_init(struct pic_softc *pic, struct cpu_info *ci)
384 {
385 struct gicv3_softc * const sc = PICTOSOFTC(pic);
386 uint32_t icc_sre, icc_ctlr, gicr_waker;
387
388 evcnt_attach_dynamic(&ci->ci_intr_preempt, EVCNT_TYPE_MISC, NULL,
389 ci->ci_cpuname, "intr preempt");
390
391 ci->ci_gic_redist = gicv3_find_redist(sc);
392 ci->ci_gic_sgir = gicv3_sgir(sc);
393
394 /* Store route to CPU for SPIs */
395 const uint64_t cpu_identity = gicv3_cpu_identity();
396 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
397 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
398 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
399 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
400 sc->sc_irouter[cpu_index(ci)] =
401 __SHIFTIN(aff0, GICD_IROUTER_Aff0) |
402 __SHIFTIN(aff1, GICD_IROUTER_Aff1) |
403 __SHIFTIN(aff2, GICD_IROUTER_Aff2) |
404 __SHIFTIN(aff3, GICD_IROUTER_Aff3);
405
406 /* Enable System register access and disable IRQ/FIQ bypass */
407 icc_sre = ICC_SRE_EL1_SRE | ICC_SRE_EL1_DFB | ICC_SRE_EL1_DIB;
408 icc_sre_write(icc_sre);
409
410 /* Mark the connected PE as being awake */
411 gicr_waker = gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER);
412 gicr_waker &= ~GICR_WAKER_ProcessorSleep;
413 gicr_write_4(sc, ci->ci_gic_redist, GICR_WAKER, gicr_waker);
414 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER) & GICR_WAKER_ChildrenAsleep)
415 ;
416
417 /* Set initial priority mask */
418 ci->ci_hwpl = IPL_TO_PMR(sc, IPL_HIGH);
419 icc_pmr_write(ci->ci_hwpl);
420
421 /* Set the binary point field to the minimum value */
422 icc_bpr1_write(0);
423
424 /* Enable group 1 interrupt signaling */
425 icc_igrpen1_write(ICC_IGRPEN_EL1_Enable);
426
427 /* Set EOI mode */
428 icc_ctlr = icc_ctlr_read();
429 icc_ctlr &= ~ICC_CTLR_EL1_EOImode;
430 icc_ctlr_write(icc_ctlr);
431
432 /* Enable redistributor */
433 gicv3_redist_enable(sc, ci);
434
435 /* Allow IRQ exceptions */
436 ENABLE_INTERRUPT();
437 }
438
439 #ifdef MULTIPROCESSOR
440 static void
441 gicv3_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi)
442 {
443 struct cpu_info *ci;
444 uint64_t sgir;
445
446 sgir = __SHIFTIN(ipi, ICC_SGIR_EL1_INTID);
447 if (kcp == NULL) {
448 /* Interrupts routed to all PEs, excluding "self" */
449 if (ncpu == 1)
450 return;
451 sgir |= ICC_SGIR_EL1_IRM;
452 } else {
453 /* Interrupt to exactly one PE */
454 ci = cpu_lookup(kcpuset_ffs(kcp) - 1);
455 if (ci == curcpu())
456 return;
457 sgir |= ci->ci_gic_sgir;
458 }
459 icc_sgi1r_write(sgir);
460 isb();
461 }
462
463 static void
464 gicv3_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity)
465 {
466 struct gicv3_softc * const sc = PICTOSOFTC(pic);
467 const size_t group = irq / 32;
468 int n;
469
470 kcpuset_zero(affinity);
471 if (group == 0) {
472 /* All CPUs are targets for group 0 (SGI/PPI) */
473 for (n = 0; n < ncpu; n++) {
474 if (sc->sc_irouter[n] != UINT64_MAX)
475 kcpuset_set(affinity, n);
476 }
477 } else {
478 /* Find distributor targets (SPI) */
479 const uint64_t irouter = gicd_read_8(sc, GICD_IROUTER(irq));
480 for (n = 0; n < ncpu; n++) {
481 if (irouter == GICD_IROUTER_Interrupt_Routing_mode ||
482 irouter == sc->sc_irouter[n])
483 kcpuset_set(affinity, n);
484 }
485 }
486 }
487
488 static int
489 gicv3_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity)
490 {
491 struct gicv3_softc * const sc = PICTOSOFTC(pic);
492 const size_t group = irq / 32;
493 uint64_t irouter;
494
495 if (group == 0)
496 return EINVAL;
497
498 const int set = kcpuset_countset(affinity);
499 if (set == 1) {
500 irouter = sc->sc_irouter[kcpuset_ffs(affinity) - 1];
501 } else if (set == ncpu && GIC_SUPPORTS_1OFN(sc)) {
502 irouter = GICD_IROUTER_Interrupt_Routing_mode;
503 } else {
504 return EINVAL;
505 }
506
507 gicd_write_8(sc, GICD_IROUTER(irq), irouter);
508
509 return 0;
510 }
511 #endif
512
513 static const struct pic_ops gicv3_picops = {
514 .pic_unblock_irqs = gicv3_unblock_irqs,
515 .pic_block_irqs = gicv3_block_irqs,
516 .pic_establish_irq = gicv3_establish_irq,
517 .pic_set_priority = gicv3_set_priority,
518 #ifdef MULTIPROCESSOR
519 .pic_cpu_init = gicv3_cpu_init,
520 .pic_ipi_send = gicv3_ipi_send,
521 .pic_get_affinity = gicv3_get_affinity,
522 .pic_set_affinity = gicv3_set_affinity,
523 #endif
524 };
525
526 static void
527 gicv3_dcache_wb_range(vaddr_t va, vsize_t len)
528 {
529 cpu_dcache_wb_range(va, len);
530 dsb(sy);
531 }
532
533 static void
534 gicv3_lpi_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
535 {
536 struct gicv3_softc * const sc = LPITOSOFTC(pic);
537 int bit;
538
539 while ((bit = ffs(mask)) != 0) {
540 sc->sc_lpiconf.base[irqbase + bit - 1] |= GIC_LPICONF_Enable;
541 if (sc->sc_lpiconf_flush)
542 gicv3_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[irqbase + bit - 1], 1);
543 mask &= ~__BIT(bit - 1);
544 }
545
546 if (!sc->sc_lpiconf_flush)
547 dsb(ishst);
548 }
549
550 static void
551 gicv3_lpi_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
552 {
553 struct gicv3_softc * const sc = LPITOSOFTC(pic);
554 int bit;
555
556 while ((bit = ffs(mask)) != 0) {
557 sc->sc_lpiconf.base[irqbase + bit - 1] &= ~GIC_LPICONF_Enable;
558 if (sc->sc_lpiconf_flush)
559 gicv3_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[irqbase + bit - 1], 1);
560 mask &= ~__BIT(bit - 1);
561 }
562
563 if (!sc->sc_lpiconf_flush)
564 dsb(ishst);
565 }
566
567 static void
568 gicv3_lpi_establish_irq(struct pic_softc *pic, struct intrsource *is)
569 {
570 struct gicv3_softc * const sc = LPITOSOFTC(pic);
571
572 sc->sc_lpiconf.base[is->is_irq] = IPL_TO_PRIORITY(sc, is->is_ipl) | GIC_LPICONF_Res1;
573
574 if (sc->sc_lpiconf_flush)
575 gicv3_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[is->is_irq], 1);
576 else
577 dsb(ishst);
578 }
579
580 static void
581 gicv3_lpi_cpu_init(struct pic_softc *pic, struct cpu_info *ci)
582 {
583 struct gicv3_softc * const sc = LPITOSOFTC(pic);
584 struct gicv3_lpi_callback *cb;
585 uint64_t propbase, pendbase;
586 uint32_t ctlr;
587
588 /* If physical LPIs are not supported on this redistributor, just return. */
589 const uint64_t typer = gicr_read_8(sc, ci->ci_gic_redist, GICR_TYPER);
590 if ((typer & GICR_TYPER_PLPIS) == 0)
591 return;
592
593 /* Interrupt target address for this CPU, used by ITS when GITS_TYPER.PTA == 0 */
594 sc->sc_processor_id[cpu_index(ci)] = __SHIFTOUT(typer, GICR_TYPER_Processor_Number);
595
596 /* Disable LPIs before making changes */
597 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR);
598 ctlr &= ~GICR_CTLR_Enable_LPIs;
599 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr);
600 dsb(sy);
601
602 /* Setup the LPI configuration table */
603 propbase = sc->sc_lpiconf.segs[0].ds_addr |
604 __SHIFTIN(ffs(pic->pic_maxsources) - 1, GICR_PROPBASER_IDbits) |
605 __SHIFTIN(GICR_Shareability_IS, GICR_PROPBASER_Shareability) |
606 __SHIFTIN(GICR_Cache_NORMAL_RA_WA_WB, GICR_PROPBASER_InnerCache);
607 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase);
608 propbase = gicr_read_8(sc, ci->ci_gic_redist, GICR_PROPBASER);
609 if (__SHIFTOUT(propbase, GICR_PROPBASER_Shareability) != GICR_Shareability_IS) {
610 if (__SHIFTOUT(propbase, GICR_PROPBASER_Shareability) == GICR_Shareability_NS) {
611 propbase &= ~GICR_PROPBASER_Shareability;
612 propbase |= __SHIFTIN(GICR_Shareability_NS, GICR_PROPBASER_Shareability);
613 propbase &= ~GICR_PROPBASER_InnerCache;
614 propbase |= __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PROPBASER_InnerCache);
615 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase);
616 }
617 sc->sc_lpiconf_flush = true;
618 }
619
620 /* Setup the LPI pending table */
621 pendbase = sc->sc_lpipend[cpu_index(ci)].segs[0].ds_addr |
622 __SHIFTIN(GICR_Shareability_IS, GICR_PENDBASER_Shareability) |
623 __SHIFTIN(GICR_Cache_NORMAL_RA_WA_WB, GICR_PENDBASER_InnerCache);
624 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase);
625 pendbase = gicr_read_8(sc, ci->ci_gic_redist, GICR_PENDBASER);
626 if (__SHIFTOUT(pendbase, GICR_PENDBASER_Shareability) == GICR_Shareability_NS) {
627 pendbase &= ~GICR_PENDBASER_Shareability;
628 pendbase |= __SHIFTIN(GICR_Shareability_NS, GICR_PENDBASER_Shareability);
629 pendbase &= ~GICR_PENDBASER_InnerCache;
630 pendbase |= __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PENDBASER_InnerCache);
631 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase);
632 }
633
634 /* Enable LPIs */
635 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR);
636 ctlr |= GICR_CTLR_Enable_LPIs;
637 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr);
638 dsb(sy);
639
640 /* Setup ITS if present */
641 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list)
642 cb->cpu_init(cb->priv, ci);
643 }
644
645 #ifdef MULTIPROCESSOR
646 static void
647 gicv3_lpi_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity)
648 {
649 struct gicv3_softc * const sc = LPITOSOFTC(pic);
650 struct gicv3_lpi_callback *cb;
651
652 kcpuset_zero(affinity);
653 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list)
654 cb->get_affinity(cb->priv, irq, affinity);
655 }
656
657 static int
658 gicv3_lpi_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity)
659 {
660 struct gicv3_softc * const sc = LPITOSOFTC(pic);
661 struct gicv3_lpi_callback *cb;
662 int error = EINVAL;
663
664 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) {
665 error = cb->set_affinity(cb->priv, irq, affinity);
666 if (error != EPASSTHROUGH)
667 return error;
668 }
669
670 return EINVAL;
671 }
672 #endif
673
674 static const struct pic_ops gicv3_lpiops = {
675 .pic_unblock_irqs = gicv3_lpi_unblock_irqs,
676 .pic_block_irqs = gicv3_lpi_block_irqs,
677 .pic_establish_irq = gicv3_lpi_establish_irq,
678 #ifdef MULTIPROCESSOR
679 .pic_cpu_init = gicv3_lpi_cpu_init,
680 .pic_get_affinity = gicv3_lpi_get_affinity,
681 .pic_set_affinity = gicv3_lpi_set_affinity,
682 #endif
683 };
684
685 void
686 gicv3_dma_alloc(struct gicv3_softc *sc, struct gicv3_dma *dma, bus_size_t len, bus_size_t align)
687 {
688 int nsegs, error;
689
690 dma->len = len;
691 error = bus_dmamem_alloc(sc->sc_dmat, dma->len, align, 0, dma->segs, 1, &nsegs, BUS_DMA_WAITOK);
692 if (error)
693 panic("bus_dmamem_alloc failed: %d", error);
694 error = bus_dmamem_map(sc->sc_dmat, dma->segs, nsegs, len, (void **)&dma->base, BUS_DMA_WAITOK);
695 if (error)
696 panic("bus_dmamem_map failed: %d", error);
697 error = bus_dmamap_create(sc->sc_dmat, len, 1, len, 0, BUS_DMA_WAITOK, &dma->map);
698 if (error)
699 panic("bus_dmamap_create failed: %d", error);
700 error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->base, dma->len, NULL, BUS_DMA_WAITOK);
701 if (error)
702 panic("bus_dmamap_load failed: %d", error);
703
704 memset(dma->base, 0, dma->len);
705 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, dma->len, BUS_DMASYNC_PREWRITE);
706 }
707
708 static void
709 gicv3_lpi_init(struct gicv3_softc *sc)
710 {
711 /*
712 * Allocate LPI configuration table
713 */
714 gicv3_dma_alloc(sc, &sc->sc_lpiconf, sc->sc_lpi.pic_maxsources, 0x1000);
715 KASSERT((sc->sc_lpiconf.segs[0].ds_addr & ~GICR_PROPBASER_Physical_Address) == 0);
716
717 /*
718 * Allocate LPI pending tables
719 */
720 const bus_size_t lpipend_sz = (8192 + sc->sc_lpi.pic_maxsources) / NBBY;
721 for (int cpuindex = 0; cpuindex < ncpu; cpuindex++) {
722 gicv3_dma_alloc(sc, &sc->sc_lpipend[cpuindex], lpipend_sz, 0x10000);
723 KASSERT((sc->sc_lpipend[cpuindex].segs[0].ds_addr & ~GICR_PENDBASER_Physical_Address) == 0);
724 }
725 }
726
727 void
728 gicv3_irq_handler(void *frame)
729 {
730 struct cpu_info * const ci = curcpu();
731 struct gicv3_softc * const sc = gicv3_softc;
732 struct pic_softc *pic;
733 const int oldipl = ci->ci_cpl;
734 const uint8_t pmr = IPL_TO_PMR(sc, oldipl);
735
736 ci->ci_data.cpu_nintr++;
737
738 if (ci->ci_hwpl != pmr) {
739 ci->ci_hwpl = pmr;
740 icc_pmr_write(pmr);
741 }
742
743 for (;;) {
744 const uint32_t iar = icc_iar1_read();
745 dsb(sy);
746 const uint32_t irq = __SHIFTOUT(iar, ICC_IAR_INTID);
747 if (irq == ICC_IAR_INTID_SPURIOUS)
748 break;
749
750 pic = irq >= GIC_LPI_BASE ? &sc->sc_lpi : &sc->sc_pic;
751 if (irq - pic->pic_irqbase >= pic->pic_maxsources)
752 continue;
753
754 struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase];
755 KASSERT(is != NULL);
756
757 const bool early_eoi = irq < GIC_LPI_BASE && is->is_type == IST_EDGE;
758
759 const int ipl = is->is_ipl;
760 if (__predict_false(ipl < ci->ci_cpl)) {
761 pic_do_pending_ints(I32_bit, ipl, frame);
762 } else if (ci->ci_cpl != ipl) {
763 icc_pmr_write(IPL_TO_PMR(sc, ipl));
764 ci->ci_hwpl = ci->ci_cpl = ipl;
765 }
766
767 if (early_eoi) {
768 icc_eoi1r_write(iar);
769 isb();
770 }
771
772 const int64_t nintr = ci->ci_data.cpu_nintr;
773
774 ENABLE_INTERRUPT();
775 pic_dispatch(is, frame);
776 DISABLE_INTERRUPT();
777
778 if (nintr != ci->ci_data.cpu_nintr)
779 ci->ci_intr_preempt.ev_count++;
780
781 if (!early_eoi) {
782 icc_eoi1r_write(iar);
783 isb();
784 }
785 }
786
787 pic_do_pending_ints(I32_bit, oldipl, frame);
788 }
789
790 static bool
791 gicv3_cpuif_is_nonsecure(struct gicv3_softc *sc)
792 {
793 /*
794 * Write 0 to bit7 and see if it sticks. This is only possible if
795 * we have a non-secure view of the PMR register.
796 */
797 const uint32_t opmr = icc_pmr_read();
798 icc_pmr_write(0);
799 const uint32_t npmr = icc_pmr_read();
800 icc_pmr_write(opmr);
801
802 return (npmr & GICC_PMR_NONSECURE) == 0;
803 }
804
805 static bool
806 gicv3_dist_is_nonsecure(struct gicv3_softc *sc)
807 {
808 const uint32_t gicd_ctrl = gicd_read_4(sc, GICD_CTRL);
809
810 /*
811 * If security is enabled, we have a non-secure view of the IPRIORITYRn
812 * registers and LPI configuration priority fields.
813 */
814 return (gicd_ctrl & GICD_CTRL_DS) == 0;
815 }
816
817 /*
818 * Rockchip RK3399 provides a different view of int priority registers
819 * depending on which firmware is in use. This is hard to detect in
820 * a way that could possibly break other boards, so only do this
821 * detection if we know we are on a RK3399 SoC.
822 */
823 static void
824 gicv3_quirk_rockchip_rk3399(struct gicv3_softc *sc)
825 {
826 /* Detect the number of supported PMR bits */
827 icc_pmr_write(0xff);
828 const uint8_t pmrbits = icc_pmr_read();
829
830 /* Detect the number of supported IPRIORITYRn bits */
831 const uint32_t oiprio = gicd_read_4(sc, GICD_IPRIORITYRn(8));
832 gicd_write_4(sc, GICD_IPRIORITYRn(8), oiprio | 0xff);
833 const uint8_t pribits = gicd_read_4(sc, GICD_IPRIORITYRn(8)) & 0xff;
834 gicd_write_4(sc, GICD_IPRIORITYRn(8), oiprio);
835
836 /*
837 * If we see fewer PMR bits than IPRIORITYRn bits here, it means
838 * we have a secure view of IPRIORITYRn (this is not supposed to
839 * happen!).
840 */
841 if (pmrbits < pribits) {
842 aprint_verbose_dev(sc->sc_dev,
843 "buggy RK3399 firmware detected; applying workaround\n");
844 sc->sc_priority_shift = GIC_PRIO_SHIFT_S;
845 }
846 }
847
848 int
849 gicv3_init(struct gicv3_softc *sc)
850 {
851 int n;
852
853 KASSERT(CPU_IS_PRIMARY(curcpu()));
854
855 LIST_INIT(&sc->sc_lpi_callbacks);
856
857 sc->sc_irouter = kmem_zalloc(sizeof(*sc->sc_irouter) * ncpu, KM_SLEEP);
858 for (n = 0; n < ncpu; n++)
859 sc->sc_irouter[n] = UINT64_MAX;
860
861 sc->sc_gicd_typer = gicd_read_4(sc, GICD_TYPER);
862
863 /*
864 * We don't always have a consistent view of priorities between the
865 * CPU interface (ICC_PMR_EL1) and the GICD/GICR registers. Detect
866 * if we are making secure or non-secure accesses to each, and adjust
867 * the values that we write to each accordingly.
868 */
869 const bool dist_ns = gicv3_dist_is_nonsecure(sc);
870 sc->sc_priority_shift = dist_ns ? GIC_PRIO_SHIFT_NS : GIC_PRIO_SHIFT_S;
871 const bool cpuif_ns = gicv3_cpuif_is_nonsecure(sc);
872 sc->sc_pmr_shift = cpuif_ns ? GIC_PRIO_SHIFT_NS : GIC_PRIO_SHIFT_S;
873
874 if ((sc->sc_quirks & GICV3_QUIRK_RK3399) != 0)
875 gicv3_quirk_rockchip_rk3399(sc);
876
877 aprint_verbose_dev(sc->sc_dev,
878 "iidr 0x%08x, cpuif %ssecure, dist %ssecure, "
879 "priority shift %d, pmr shift %d, quirks %#x\n",
880 gicd_read_4(sc, GICD_IIDR),
881 cpuif_ns ? "non-" : "",
882 dist_ns ? "non-" : "",
883 sc->sc_priority_shift,
884 sc->sc_pmr_shift,
885 sc->sc_quirks);
886
887 sc->sc_pic.pic_ops = &gicv3_picops;
888 sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(sc->sc_gicd_typer);
889 snprintf(sc->sc_pic.pic_name, sizeof(sc->sc_pic.pic_name), "gicv3");
890 #ifdef MULTIPROCESSOR
891 sc->sc_pic.pic_cpus = kcpuset_running;
892 #endif
893 pic_add(&sc->sc_pic, 0);
894
895 if ((sc->sc_gicd_typer & GICD_TYPER_LPIS) != 0) {
896 sc->sc_lpipend = kmem_zalloc(sizeof(*sc->sc_lpipend) * ncpu, KM_SLEEP);
897 sc->sc_processor_id = kmem_zalloc(sizeof(*sc->sc_processor_id) * ncpu, KM_SLEEP);
898
899 sc->sc_lpi.pic_ops = &gicv3_lpiops;
900 sc->sc_lpi.pic_maxsources = 8192; /* Min. required by GICv3 spec */
901 snprintf(sc->sc_lpi.pic_name, sizeof(sc->sc_lpi.pic_name), "gicv3-lpi");
902 pic_add(&sc->sc_lpi, GIC_LPI_BASE);
903
904 sc->sc_lpi_pool = vmem_create("gicv3-lpi", 0, sc->sc_lpi.pic_maxsources,
905 1, NULL, NULL, NULL, 0, VM_SLEEP, IPL_HIGH);
906 if (sc->sc_lpi_pool == NULL)
907 panic("failed to create gicv3 lpi pool\n");
908
909 gicv3_lpi_init(sc);
910 }
911
912 KASSERT(gicv3_softc == NULL);
913 gicv3_softc = sc;
914
915 for (int i = 0; i < sc->sc_bsh_r_count; i++) {
916 const uint64_t gicr_typer = gicr_read_8(sc, i, GICR_TYPER);
917 const u_int aff0 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff0);
918 const u_int aff1 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff1);
919 const u_int aff2 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff2);
920 const u_int aff3 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff3);
921
922 aprint_debug_dev(sc->sc_dev, "redist %d: cpu %d.%d.%d.%d\n",
923 i, aff3, aff2, aff1, aff0);
924 }
925
926 gicv3_dist_enable(sc);
927
928 gicv3_cpu_init(&sc->sc_pic, curcpu());
929 if ((sc->sc_gicd_typer & GICD_TYPER_LPIS) != 0)
930 gicv3_lpi_cpu_init(&sc->sc_lpi, curcpu());
931
932 #ifdef MULTIPROCESSOR
933 intr_establish_xname(IPI_AST, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_ast, (void *)-1, "IPI ast");
934 intr_establish_xname(IPI_XCALL, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_xcall, (void *)-1, "IPI xcall");
935 intr_establish_xname(IPI_GENERIC, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_generic, (void *)-1, "IPI generic");
936 intr_establish_xname(IPI_NOP, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_nop, (void *)-1, "IPI nop");
937 intr_establish_xname(IPI_SHOOTDOWN, IPL_SCHED, IST_MPSAFE | IST_EDGE, pic_ipi_shootdown, (void *)-1, "IPI shootdown");
938 #ifdef DDB
939 intr_establish_xname(IPI_DDB, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_ddb, NULL, "IPI ddb");
940 #endif
941 #ifdef __HAVE_PREEMPTION
942 intr_establish_xname(IPI_KPREEMPT, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_kpreempt, (void *)-1, "IPI kpreempt");
943 #endif
944 #endif
945
946 return 0;
947 }
948