gicv3.c revision 1.35 1 /* $NetBSD: gicv3.c,v 1.35 2020/11/24 23:31:56 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2018 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_multiprocessor.h"
30
31 #define _INTR_PRIVATE
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: gicv3.c,v 1.35 2020/11/24 23:31:56 jmcneill Exp $");
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/device.h>
40 #include <sys/intr.h>
41 #include <sys/systm.h>
42 #include <sys/cpu.h>
43 #include <sys/vmem.h>
44 #include <sys/atomic.h>
45
46 #include <machine/cpufunc.h>
47
48 #include <arm/locore.h>
49 #include <arm/armreg.h>
50
51 #include <arm/cortex/gicv3.h>
52 #include <arm/cortex/gic_reg.h>
53
54 #define PICTOSOFTC(pic) \
55 ((void *)((uintptr_t)(pic) - offsetof(struct gicv3_softc, sc_pic)))
56 #define LPITOSOFTC(lpi) \
57 ((void *)((uintptr_t)(lpi) - offsetof(struct gicv3_softc, sc_lpi)))
58
59 #define IPL_TO_PRIORITY(sc, ipl) (((0xff - (ipl)) << (sc)->sc_priority_shift) & 0xff)
60 #define IPL_TO_PMR(sc, ipl) (((0xff - (ipl)) << (sc)->sc_pmr_shift) & 0xff)
61
62 #define GIC_PRIO_SHIFT_NS 4
63 #define GIC_PRIO_SHIFT_S 3
64
65 static struct gicv3_softc *gicv3_softc;
66
67 static inline uint32_t
68 gicd_read_4(struct gicv3_softc *sc, bus_size_t reg)
69 {
70 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_d, reg);
71 }
72
73 static inline void
74 gicd_write_4(struct gicv3_softc *sc, bus_size_t reg, uint32_t val)
75 {
76 bus_space_write_4(sc->sc_bst, sc->sc_bsh_d, reg, val);
77 }
78
79 static inline uint64_t
80 gicd_read_8(struct gicv3_softc *sc, bus_size_t reg)
81 {
82 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_d, reg);
83 }
84
85 static inline void
86 gicd_write_8(struct gicv3_softc *sc, bus_size_t reg, uint64_t val)
87 {
88 bus_space_write_8(sc->sc_bst, sc->sc_bsh_d, reg, val);
89 }
90
91 static inline uint32_t
92 gicr_read_4(struct gicv3_softc *sc, u_int index, bus_size_t reg)
93 {
94 KASSERT(index < sc->sc_bsh_r_count);
95 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_r[index], reg);
96 }
97
98 static inline void
99 gicr_write_4(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint32_t val)
100 {
101 KASSERT(index < sc->sc_bsh_r_count);
102 bus_space_write_4(sc->sc_bst, sc->sc_bsh_r[index], reg, val);
103 }
104
105 static inline uint64_t
106 gicr_read_8(struct gicv3_softc *sc, u_int index, bus_size_t reg)
107 {
108 KASSERT(index < sc->sc_bsh_r_count);
109 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_r[index], reg);
110 }
111
112 static inline void
113 gicr_write_8(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint64_t val)
114 {
115 KASSERT(index < sc->sc_bsh_r_count);
116 bus_space_write_8(sc->sc_bst, sc->sc_bsh_r[index], reg, val);
117 }
118
119 static void
120 gicv3_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
121 {
122 struct gicv3_softc * const sc = PICTOSOFTC(pic);
123 struct cpu_info * const ci = curcpu();
124 const u_int group = irqbase / 32;
125
126 if (group == 0) {
127 atomic_or_32(&sc->sc_enabled_sgippi, mask);
128 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, mask);
129 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
130 ;
131 } else {
132 gicd_write_4(sc, GICD_ISENABLERn(group), mask);
133 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
134 ;
135 }
136 }
137
138 static void
139 gicv3_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
140 {
141 struct gicv3_softc * const sc = PICTOSOFTC(pic);
142 struct cpu_info * const ci = curcpu();
143 const u_int group = irqbase / 32;
144
145 if (group == 0) {
146 atomic_and_32(&sc->sc_enabled_sgippi, ~mask);
147 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, mask);
148 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
149 ;
150 } else {
151 gicd_write_4(sc, GICD_ICENABLERn(group), mask);
152 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
153 ;
154 }
155 }
156
157 static void
158 gicv3_establish_irq(struct pic_softc *pic, struct intrsource *is)
159 {
160 struct gicv3_softc * const sc = PICTOSOFTC(pic);
161 const u_int group = is->is_irq / 32;
162 uint32_t ipriority, icfg;
163 uint64_t irouter;
164 u_int n;
165
166 const u_int ipriority_val = IPL_TO_PRIORITY(sc, is->is_ipl);
167 const u_int ipriority_shift = (is->is_irq & 0x3) * 8;
168 const u_int icfg_shift = (is->is_irq & 0xf) * 2;
169
170 if (group == 0) {
171 /* SGIs and PPIs are always MP-safe */
172 is->is_mpsafe = true;
173
174 /* Update interrupt configuration and priority on all redistributors */
175 for (n = 0; n < sc->sc_bsh_r_count; n++) {
176 icfg = gicr_read_4(sc, n, GICR_ICFGRn(is->is_irq / 16));
177 if (is->is_type == IST_LEVEL)
178 icfg &= ~(0x2 << icfg_shift);
179 if (is->is_type == IST_EDGE)
180 icfg |= (0x2 << icfg_shift);
181 gicr_write_4(sc, n, GICR_ICFGRn(is->is_irq / 16), icfg);
182
183 ipriority = gicr_read_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4));
184 ipriority &= ~(0xffU << ipriority_shift);
185 ipriority |= (ipriority_val << ipriority_shift);
186 gicr_write_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4), ipriority);
187 }
188 } else {
189 if (is->is_mpsafe) {
190 /* Route MP-safe interrupts to all participating PEs */
191 irouter = GICD_IROUTER_Interrupt_Routing_mode;
192 } else {
193 /* Route non-MP-safe interrupts to the primary PE only */
194 irouter = sc->sc_irouter[0];
195 }
196 gicd_write_8(sc, GICD_IROUTER(is->is_irq), irouter);
197
198 /* Update interrupt configuration */
199 icfg = gicd_read_4(sc, GICD_ICFGRn(is->is_irq / 16));
200 if (is->is_type == IST_LEVEL)
201 icfg &= ~(0x2 << icfg_shift);
202 if (is->is_type == IST_EDGE)
203 icfg |= (0x2 << icfg_shift);
204 gicd_write_4(sc, GICD_ICFGRn(is->is_irq / 16), icfg);
205
206 /* Update interrupt priority */
207 ipriority = gicd_read_4(sc, GICD_IPRIORITYRn(is->is_irq / 4));
208 ipriority &= ~(0xffU << ipriority_shift);
209 ipriority |= (ipriority_val << ipriority_shift);
210 gicd_write_4(sc, GICD_IPRIORITYRn(is->is_irq / 4), ipriority);
211 }
212 }
213
214 static void
215 gicv3_set_priority(struct pic_softc *pic, int ipl)
216 {
217 struct gicv3_softc * const sc = PICTOSOFTC(pic);
218
219 icc_pmr_write(IPL_TO_PMR(sc, ipl));
220 }
221
222 static void
223 gicv3_dist_enable(struct gicv3_softc *sc)
224 {
225 uint32_t gicd_ctrl;
226 u_int n;
227
228 /* Disable the distributor */
229 gicd_ctrl = gicd_read_4(sc, GICD_CTRL);
230 gicd_ctrl &= ~(GICD_CTRL_EnableGrp1A | GICD_CTRL_ARE_NS);
231 gicd_write_4(sc, GICD_CTRL, gicd_ctrl);
232
233 /* Wait for register write to complete */
234 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
235 ;
236
237 /* Clear all INTID enable bits */
238 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32)
239 gicd_write_4(sc, GICD_ICENABLERn(n / 32), ~0);
240
241 /* Set default priorities to lowest */
242 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 4)
243 gicd_write_4(sc, GICD_IPRIORITYRn(n / 4), ~0);
244
245 /* Set all interrupts to G1NS */
246 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32) {
247 gicd_write_4(sc, GICD_IGROUPRn(n / 32), ~0);
248 gicd_write_4(sc, GICD_IGRPMODRn(n / 32), 0);
249 }
250
251 /* Set all interrupts level-sensitive by default */
252 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 16)
253 gicd_write_4(sc, GICD_ICFGRn(n / 16), 0);
254
255 /* Wait for register writes to complete */
256 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
257 ;
258
259 /* Enable Affinity routing and G1NS interrupts */
260 gicd_ctrl = GICD_CTRL_EnableGrp1A | GICD_CTRL_ARE_NS;
261 gicd_write_4(sc, GICD_CTRL, gicd_ctrl);
262 }
263
264 static void
265 gicv3_redist_enable(struct gicv3_softc *sc, struct cpu_info *ci)
266 {
267 uint32_t icfg;
268 u_int n, o;
269
270 /* Clear INTID enable bits */
271 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, ~0);
272
273 /* Wait for register write to complete */
274 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
275 ;
276
277 /* Set default priorities */
278 for (n = 0; n < 32; n += 4) {
279 uint32_t priority = 0;
280 size_t byte_shift = 0;
281 for (o = 0; o < 4; o++, byte_shift += 8) {
282 struct intrsource * const is = sc->sc_pic.pic_sources[n + o];
283 if (is == NULL)
284 priority |= (0xffU << byte_shift);
285 else {
286 const u_int ipriority_val = IPL_TO_PRIORITY(sc, is->is_ipl);
287 priority |= ipriority_val << byte_shift;
288 }
289 }
290 gicr_write_4(sc, ci->ci_gic_redist, GICR_IPRIORITYRn(n / 4), priority);
291 }
292
293 /* Set all interrupts to G1NS */
294 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGROUPR0, ~0);
295 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGRPMODR0, 0);
296
297 /* Restore PPI configs */
298 for (n = 0, icfg = 0; n < 16; n++) {
299 struct intrsource * const is = sc->sc_pic.pic_sources[16 + n];
300 if (is != NULL && is->is_type == IST_EDGE)
301 icfg |= (0x2 << (n * 2));
302 }
303 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICFGRn(1), icfg);
304
305 /* Restore current enable bits */
306 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, sc->sc_enabled_sgippi);
307
308 /* Wait for register write to complete */
309 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
310 ;
311 }
312
313 static uint64_t
314 gicv3_cpu_identity(void)
315 {
316 u_int aff3, aff2, aff1, aff0;
317
318 const register_t mpidr = cpu_mpidr_aff_read();
319 aff0 = __SHIFTOUT(mpidr, MPIDR_AFF0);
320 aff1 = __SHIFTOUT(mpidr, MPIDR_AFF1);
321 aff2 = __SHIFTOUT(mpidr, MPIDR_AFF2);
322 aff3 = __SHIFTOUT(mpidr, MPIDR_AFF3);
323
324 return __SHIFTIN(aff0, GICR_TYPER_Affinity_Value_Aff0) |
325 __SHIFTIN(aff1, GICR_TYPER_Affinity_Value_Aff1) |
326 __SHIFTIN(aff2, GICR_TYPER_Affinity_Value_Aff2) |
327 __SHIFTIN(aff3, GICR_TYPER_Affinity_Value_Aff3);
328 }
329
330 static u_int
331 gicv3_find_redist(struct gicv3_softc *sc)
332 {
333 uint64_t gicr_typer;
334 u_int n;
335
336 const uint64_t cpu_identity = gicv3_cpu_identity();
337
338 for (n = 0; n < sc->sc_bsh_r_count; n++) {
339 gicr_typer = gicr_read_8(sc, n, GICR_TYPER);
340 if ((gicr_typer & GICR_TYPER_Affinity_Value) == cpu_identity)
341 return n;
342 }
343
344 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
345 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
346 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
347 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
348
349 panic("%s: could not find GICv3 redistributor for cpu %d.%d.%d.%d",
350 cpu_name(curcpu()), aff3, aff2, aff1, aff0);
351 }
352
353 static uint64_t
354 gicv3_sgir(struct gicv3_softc *sc)
355 {
356 const uint64_t cpu_identity = gicv3_cpu_identity();
357
358 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
359 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
360 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
361 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
362
363 return __SHIFTIN(__BIT(aff0), ICC_SGIR_EL1_TargetList) |
364 __SHIFTIN(aff1, ICC_SGIR_EL1_Aff1) |
365 __SHIFTIN(aff2, ICC_SGIR_EL1_Aff2) |
366 __SHIFTIN(aff3, ICC_SGIR_EL1_Aff3);
367 }
368
369 static void
370 gicv3_cpu_init(struct pic_softc *pic, struct cpu_info *ci)
371 {
372 struct gicv3_softc * const sc = PICTOSOFTC(pic);
373 uint32_t icc_sre, icc_ctlr, gicr_waker;
374
375 evcnt_attach_dynamic(&ci->ci_intr_preempt, EVCNT_TYPE_MISC, NULL,
376 ci->ci_cpuname, "intr preempt");
377
378 ci->ci_gic_redist = gicv3_find_redist(sc);
379 ci->ci_gic_sgir = gicv3_sgir(sc);
380
381 /* Store route to CPU for SPIs */
382 const uint64_t cpu_identity = gicv3_cpu_identity();
383 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
384 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
385 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
386 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
387 sc->sc_irouter[cpu_index(ci)] =
388 __SHIFTIN(aff0, GICD_IROUTER_Aff0) |
389 __SHIFTIN(aff1, GICD_IROUTER_Aff1) |
390 __SHIFTIN(aff2, GICD_IROUTER_Aff2) |
391 __SHIFTIN(aff3, GICD_IROUTER_Aff3);
392
393 /* Enable System register access and disable IRQ/FIQ bypass */
394 icc_sre = ICC_SRE_EL1_SRE | ICC_SRE_EL1_DFB | ICC_SRE_EL1_DIB;
395 icc_sre_write(icc_sre);
396
397 /* Mark the connected PE as being awake */
398 gicr_waker = gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER);
399 gicr_waker &= ~GICR_WAKER_ProcessorSleep;
400 gicr_write_4(sc, ci->ci_gic_redist, GICR_WAKER, gicr_waker);
401 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER) & GICR_WAKER_ChildrenAsleep)
402 ;
403
404 /* Set initial priority mask */
405 gicv3_set_priority(pic, IPL_HIGH);
406
407 /* Set the binary point field to the minimum value */
408 icc_bpr1_write(0);
409
410 /* Enable group 1 interrupt signaling */
411 icc_igrpen1_write(ICC_IGRPEN_EL1_Enable);
412
413 /* Set EOI mode */
414 icc_ctlr = icc_ctlr_read();
415 icc_ctlr &= ~ICC_CTLR_EL1_EOImode;
416 icc_ctlr_write(icc_ctlr);
417
418 /* Enable redistributor */
419 gicv3_redist_enable(sc, ci);
420
421 /* Allow IRQ exceptions */
422 cpsie(I32_bit);
423 }
424
425 #ifdef MULTIPROCESSOR
426 static void
427 gicv3_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi)
428 {
429 struct cpu_info *ci;
430 uint64_t sgir;
431
432 sgir = __SHIFTIN(ipi, ICC_SGIR_EL1_INTID);
433 if (kcp == NULL) {
434 /* Interrupts routed to all PEs, excluding "self" */
435 if (ncpu == 1)
436 return;
437 sgir |= ICC_SGIR_EL1_IRM;
438 } else {
439 /* Interrupt to exactly one PE */
440 ci = cpu_lookup(kcpuset_ffs(kcp) - 1);
441 if (ci == curcpu())
442 return;
443 sgir |= ci->ci_gic_sgir;
444 }
445 icc_sgi1r_write(sgir);
446 isb();
447 }
448
449 static void
450 gicv3_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity)
451 {
452 struct gicv3_softc * const sc = PICTOSOFTC(pic);
453 const size_t group = irq / 32;
454 int n;
455
456 kcpuset_zero(affinity);
457 if (group == 0) {
458 /* All CPUs are targets for group 0 (SGI/PPI) */
459 for (n = 0; n < ncpu; n++) {
460 if (sc->sc_irouter[n] != UINT64_MAX)
461 kcpuset_set(affinity, n);
462 }
463 } else {
464 /* Find distributor targets (SPI) */
465 const uint64_t irouter = gicd_read_8(sc, GICD_IROUTER(irq));
466 for (n = 0; n < ncpu; n++) {
467 if (irouter == GICD_IROUTER_Interrupt_Routing_mode ||
468 irouter == sc->sc_irouter[n])
469 kcpuset_set(affinity, n);
470 }
471 }
472 }
473
474 static int
475 gicv3_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity)
476 {
477 struct gicv3_softc * const sc = PICTOSOFTC(pic);
478 const size_t group = irq / 32;
479 uint64_t irouter;
480
481 if (group == 0)
482 return EINVAL;
483
484 const int set = kcpuset_countset(affinity);
485 if (set == ncpu)
486 irouter = GICD_IROUTER_Interrupt_Routing_mode;
487 else if (set == 1)
488 irouter = sc->sc_irouter[kcpuset_ffs(affinity) - 1];
489 else
490 return EINVAL;
491
492 gicd_write_8(sc, GICD_IROUTER(irq), irouter);
493
494 return 0;
495 }
496 #endif
497
498 static const struct pic_ops gicv3_picops = {
499 .pic_unblock_irqs = gicv3_unblock_irqs,
500 .pic_block_irqs = gicv3_block_irqs,
501 .pic_establish_irq = gicv3_establish_irq,
502 .pic_set_priority = gicv3_set_priority,
503 #ifdef MULTIPROCESSOR
504 .pic_cpu_init = gicv3_cpu_init,
505 .pic_ipi_send = gicv3_ipi_send,
506 .pic_get_affinity = gicv3_get_affinity,
507 .pic_set_affinity = gicv3_set_affinity,
508 #endif
509 };
510
511 static void
512 gicv3_lpi_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
513 {
514 struct gicv3_softc * const sc = LPITOSOFTC(pic);
515 int bit;
516
517 while ((bit = ffs(mask)) != 0) {
518 sc->sc_lpiconf.base[irqbase + bit - 1] |= GIC_LPICONF_Enable;
519 if (sc->sc_lpiconf_flush)
520 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[irqbase + bit - 1], 1);
521 mask &= ~__BIT(bit - 1);
522 }
523
524 if (!sc->sc_lpiconf_flush)
525 dsb(ishst);
526 }
527
528 static void
529 gicv3_lpi_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
530 {
531 struct gicv3_softc * const sc = LPITOSOFTC(pic);
532 int bit;
533
534 while ((bit = ffs(mask)) != 0) {
535 sc->sc_lpiconf.base[irqbase + bit - 1] &= ~GIC_LPICONF_Enable;
536 if (sc->sc_lpiconf_flush)
537 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[irqbase + bit - 1], 1);
538 mask &= ~__BIT(bit - 1);
539 }
540
541 if (!sc->sc_lpiconf_flush)
542 dsb(ishst);
543 }
544
545 static void
546 gicv3_lpi_establish_irq(struct pic_softc *pic, struct intrsource *is)
547 {
548 struct gicv3_softc * const sc = LPITOSOFTC(pic);
549
550 sc->sc_lpiconf.base[is->is_irq] = IPL_TO_PRIORITY(sc, is->is_ipl) | GIC_LPICONF_Res1;
551
552 if (sc->sc_lpiconf_flush)
553 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[is->is_irq], 1);
554 else
555 dsb(ishst);
556 }
557
558 static void
559 gicv3_lpi_cpu_init(struct pic_softc *pic, struct cpu_info *ci)
560 {
561 struct gicv3_softc * const sc = LPITOSOFTC(pic);
562 struct gicv3_lpi_callback *cb;
563 uint64_t propbase, pendbase;
564 uint32_t ctlr;
565
566 /* If physical LPIs are not supported on this redistributor, just return. */
567 const uint64_t typer = gicr_read_8(sc, ci->ci_gic_redist, GICR_TYPER);
568 if ((typer & GICR_TYPER_PLPIS) == 0)
569 return;
570
571 /* Interrupt target address for this CPU, used by ITS when GITS_TYPER.PTA == 0 */
572 sc->sc_processor_id[cpu_index(ci)] = __SHIFTOUT(typer, GICR_TYPER_Processor_Number);
573
574 /* Disable LPIs before making changes */
575 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR);
576 ctlr &= ~GICR_CTLR_Enable_LPIs;
577 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr);
578 dsb(sy);
579
580 /* Setup the LPI configuration table */
581 propbase = sc->sc_lpiconf.segs[0].ds_addr |
582 __SHIFTIN(ffs(pic->pic_maxsources) - 1, GICR_PROPBASER_IDbits) |
583 __SHIFTIN(GICR_Shareability_IS, GICR_PROPBASER_Shareability) |
584 __SHIFTIN(GICR_Cache_NORMAL_RA_WA_WB, GICR_PROPBASER_InnerCache);
585 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase);
586 propbase = gicr_read_8(sc, ci->ci_gic_redist, GICR_PROPBASER);
587 if (__SHIFTOUT(propbase, GICR_PROPBASER_Shareability) != GICR_Shareability_IS) {
588 if (__SHIFTOUT(propbase, GICR_PROPBASER_Shareability) == GICR_Shareability_NS) {
589 propbase &= ~GICR_PROPBASER_Shareability;
590 propbase |= __SHIFTIN(GICR_Shareability_NS, GICR_PROPBASER_Shareability);
591 propbase &= ~GICR_PROPBASER_InnerCache;
592 propbase |= __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PROPBASER_InnerCache);
593 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase);
594 }
595 sc->sc_lpiconf_flush = true;
596 }
597
598 /* Setup the LPI pending table */
599 pendbase = sc->sc_lpipend[cpu_index(ci)].segs[0].ds_addr |
600 __SHIFTIN(GICR_Shareability_IS, GICR_PENDBASER_Shareability) |
601 __SHIFTIN(GICR_Cache_NORMAL_RA_WA_WB, GICR_PENDBASER_InnerCache);
602 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase);
603 pendbase = gicr_read_8(sc, ci->ci_gic_redist, GICR_PENDBASER);
604 if (__SHIFTOUT(pendbase, GICR_PENDBASER_Shareability) == GICR_Shareability_NS) {
605 pendbase &= ~GICR_PENDBASER_Shareability;
606 pendbase |= __SHIFTIN(GICR_Shareability_NS, GICR_PENDBASER_Shareability);
607 pendbase &= ~GICR_PENDBASER_InnerCache;
608 pendbase |= __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PENDBASER_InnerCache);
609 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase);
610 }
611
612 /* Enable LPIs */
613 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR);
614 ctlr |= GICR_CTLR_Enable_LPIs;
615 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr);
616 dsb(sy);
617
618 /* Setup ITS if present */
619 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list)
620 cb->cpu_init(cb->priv, ci);
621 }
622
623 #ifdef MULTIPROCESSOR
624 static void
625 gicv3_lpi_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity)
626 {
627 struct gicv3_softc * const sc = LPITOSOFTC(pic);
628 struct gicv3_lpi_callback *cb;
629
630 kcpuset_zero(affinity);
631 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list)
632 cb->get_affinity(cb->priv, irq, affinity);
633 }
634
635 static int
636 gicv3_lpi_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity)
637 {
638 struct gicv3_softc * const sc = LPITOSOFTC(pic);
639 struct gicv3_lpi_callback *cb;
640 int error = EINVAL;
641
642 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) {
643 error = cb->set_affinity(cb->priv, irq, affinity);
644 if (error != EPASSTHROUGH)
645 return error;
646 }
647
648 return EINVAL;
649 }
650 #endif
651
652 static const struct pic_ops gicv3_lpiops = {
653 .pic_unblock_irqs = gicv3_lpi_unblock_irqs,
654 .pic_block_irqs = gicv3_lpi_block_irqs,
655 .pic_establish_irq = gicv3_lpi_establish_irq,
656 #ifdef MULTIPROCESSOR
657 .pic_cpu_init = gicv3_lpi_cpu_init,
658 .pic_get_affinity = gicv3_lpi_get_affinity,
659 .pic_set_affinity = gicv3_lpi_set_affinity,
660 #endif
661 };
662
663 void
664 gicv3_dma_alloc(struct gicv3_softc *sc, struct gicv3_dma *dma, bus_size_t len, bus_size_t align)
665 {
666 int nsegs, error;
667
668 dma->len = len;
669 error = bus_dmamem_alloc(sc->sc_dmat, dma->len, align, 0, dma->segs, 1, &nsegs, BUS_DMA_WAITOK);
670 if (error)
671 panic("bus_dmamem_alloc failed: %d", error);
672 error = bus_dmamem_map(sc->sc_dmat, dma->segs, nsegs, len, (void **)&dma->base, BUS_DMA_WAITOK);
673 if (error)
674 panic("bus_dmamem_map failed: %d", error);
675 error = bus_dmamap_create(sc->sc_dmat, len, 1, len, 0, BUS_DMA_WAITOK, &dma->map);
676 if (error)
677 panic("bus_dmamap_create failed: %d", error);
678 error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->base, dma->len, NULL, BUS_DMA_WAITOK);
679 if (error)
680 panic("bus_dmamap_load failed: %d", error);
681
682 memset(dma->base, 0, dma->len);
683 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, dma->len, BUS_DMASYNC_PREWRITE);
684 }
685
686 static void
687 gicv3_lpi_init(struct gicv3_softc *sc)
688 {
689 /*
690 * Allocate LPI configuration table
691 */
692 gicv3_dma_alloc(sc, &sc->sc_lpiconf, sc->sc_lpi.pic_maxsources, 0x1000);
693 KASSERT((sc->sc_lpiconf.segs[0].ds_addr & ~GICR_PROPBASER_Physical_Address) == 0);
694
695 /*
696 * Allocate LPI pending tables
697 */
698 const bus_size_t lpipend_sz = (8192 + sc->sc_lpi.pic_maxsources) / NBBY;
699 for (int cpuindex = 0; cpuindex < ncpu; cpuindex++) {
700 gicv3_dma_alloc(sc, &sc->sc_lpipend[cpuindex], lpipend_sz, 0x10000);
701 KASSERT((sc->sc_lpipend[cpuindex].segs[0].ds_addr & ~GICR_PENDBASER_Physical_Address) == 0);
702 }
703 }
704
705 void
706 gicv3_irq_handler(void *frame)
707 {
708 struct cpu_info * const ci = curcpu();
709 struct gicv3_softc * const sc = gicv3_softc;
710 struct pic_softc *pic;
711 const int oldipl = ci->ci_cpl;
712
713 ci->ci_data.cpu_nintr++;
714
715 for (;;) {
716 const uint32_t iar = icc_iar1_read();
717 dsb(sy);
718 const uint32_t irq = __SHIFTOUT(iar, ICC_IAR_INTID);
719 if (irq == ICC_IAR_INTID_SPURIOUS)
720 break;
721
722 pic = irq >= GIC_LPI_BASE ? &sc->sc_lpi : &sc->sc_pic;
723 if (irq - pic->pic_irqbase >= pic->pic_maxsources)
724 continue;
725
726 struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase];
727 KASSERT(is != NULL);
728
729 const bool early_eoi = irq < GIC_LPI_BASE && is->is_type == IST_EDGE;
730
731 const int ipl = is->is_ipl;
732 if (__predict_false(ipl < ci->ci_cpl)) {
733 pic_do_pending_ints(I32_bit, ipl, frame);
734 } else if (ci->ci_cpl != ipl) {
735 gicv3_set_priority(pic, ipl);
736 ci->ci_cpl = ipl;
737 }
738
739 if (early_eoi) {
740 icc_eoi1r_write(iar);
741 isb();
742 }
743
744 const int64_t nintr = ci->ci_data.cpu_nintr;
745
746 cpsie(I32_bit);
747 pic_dispatch(is, frame);
748 cpsid(I32_bit);
749
750 if (nintr != ci->ci_data.cpu_nintr)
751 ci->ci_intr_preempt.ev_count++;
752
753 if (!early_eoi) {
754 icc_eoi1r_write(iar);
755 isb();
756 }
757 }
758
759 pic_do_pending_ints(I32_bit, oldipl, frame);
760 }
761
762 static bool
763 gicv3_cpuif_is_nonsecure(struct gicv3_softc *sc)
764 {
765 /*
766 * Write 0 to bit7 and see if it sticks. This is only possible if
767 * we have a non-secure view of the PMR register.
768 */
769 const uint32_t opmr = icc_pmr_read();
770 icc_pmr_write(0);
771 const uint32_t npmr = icc_pmr_read();
772 icc_pmr_write(opmr);
773
774 return (npmr & GICC_PMR_NONSECURE) == 0;
775 }
776
777 static bool
778 gicv3_dist_is_nonsecure(struct gicv3_softc *sc)
779 {
780 const uint32_t gicd_ctrl = gicd_read_4(sc, GICD_CTRL);
781
782 /*
783 * If security is enabled, we have a non-secure view of the IPRIORITYRn
784 * registers and LPI configuration priority fields.
785 */
786 return (gicd_ctrl & GICD_CTRL_DS) == 0;
787 }
788
789 /*
790 * Rockchip RK3399 provides a different view of int priority registers
791 * depending on which firmware is in use. This is hard to detect in
792 * a way that could possibly break other boards, so only do this
793 * detection if we know we are on a RK3399 SoC.
794 */
795 static void
796 gicv3_quirk_rockchip_rk3399(struct gicv3_softc *sc)
797 {
798 /* Detect the number of supported PMR bits */
799 icc_pmr_write(0xff);
800 const uint8_t pmrbits = icc_pmr_read();
801
802 /* Detect the number of supported IPRIORITYRn bits */
803 const uint32_t oiprio = gicd_read_4(sc, GICD_IPRIORITYRn(8));
804 gicd_write_4(sc, GICD_IPRIORITYRn(8), oiprio | 0xff);
805 const uint8_t pribits = gicd_read_4(sc, GICD_IPRIORITYRn(8)) & 0xff;
806 gicd_write_4(sc, GICD_IPRIORITYRn(8), oiprio);
807
808 /*
809 * If we see fewer PMR bits than IPRIORITYRn bits here, it means
810 * we have a secure view of IPRIORITYRn (this is not supposed to
811 * happen!).
812 */
813 if (pmrbits < pribits) {
814 aprint_verbose_dev(sc->sc_dev,
815 "buggy RK3399 firmware detected; applying workaround\n");
816 sc->sc_priority_shift = GIC_PRIO_SHIFT_S;
817 }
818 }
819
820 int
821 gicv3_init(struct gicv3_softc *sc)
822 {
823 const uint32_t gicd_typer = gicd_read_4(sc, GICD_TYPER);
824 int n;
825
826 KASSERT(CPU_IS_PRIMARY(curcpu()));
827
828 LIST_INIT(&sc->sc_lpi_callbacks);
829
830 for (n = 0; n < MAXCPUS; n++)
831 sc->sc_irouter[n] = UINT64_MAX;
832
833 /*
834 * We don't alwayst have a consistent view of priorities between the
835 * CPU interface (ICC_PMR_EL1) and the GICD/GICR registers. Detect
836 * if we are making secure or non-secure accesses to each, and adjust
837 * the values that we write to each accordingly.
838 */
839 const bool dist_ns = gicv3_dist_is_nonsecure(sc);
840 sc->sc_priority_shift = dist_ns ? GIC_PRIO_SHIFT_NS : GIC_PRIO_SHIFT_S;
841 const bool cpuif_ns = gicv3_cpuif_is_nonsecure(sc);
842 sc->sc_pmr_shift = cpuif_ns ? GIC_PRIO_SHIFT_NS : GIC_PRIO_SHIFT_S;
843
844 if ((sc->sc_quirks & GICV3_QUIRK_RK3399) != 0)
845 gicv3_quirk_rockchip_rk3399(sc);
846
847 aprint_verbose_dev(sc->sc_dev,
848 "iidr 0x%08x, cpuif %ssecure, dist %ssecure, "
849 "priority shift %d, pmr shift %d, quirks %#x\n",
850 gicd_read_4(sc, GICD_IIDR),
851 cpuif_ns ? "non-" : "",
852 dist_ns ? "non-" : "",
853 sc->sc_priority_shift,
854 sc->sc_pmr_shift,
855 sc->sc_quirks);
856
857 sc->sc_pic.pic_ops = &gicv3_picops;
858 sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(gicd_typer);
859 snprintf(sc->sc_pic.pic_name, sizeof(sc->sc_pic.pic_name), "gicv3");
860 #ifdef MULTIPROCESSOR
861 sc->sc_pic.pic_cpus = kcpuset_running;
862 #endif
863 pic_add(&sc->sc_pic, 0);
864
865 if ((gicd_typer & GICD_TYPER_LPIS) != 0) {
866 sc->sc_lpi.pic_ops = &gicv3_lpiops;
867 sc->sc_lpi.pic_maxsources = 8192; /* Min. required by GICv3 spec */
868 snprintf(sc->sc_lpi.pic_name, sizeof(sc->sc_lpi.pic_name), "gicv3-lpi");
869 pic_add(&sc->sc_lpi, GIC_LPI_BASE);
870
871 sc->sc_lpi_pool = vmem_create("gicv3-lpi", 0, sc->sc_lpi.pic_maxsources,
872 1, NULL, NULL, NULL, 0, VM_SLEEP, IPL_HIGH);
873 if (sc->sc_lpi_pool == NULL)
874 panic("failed to create gicv3 lpi pool\n");
875
876 gicv3_lpi_init(sc);
877 }
878
879 KASSERT(gicv3_softc == NULL);
880 gicv3_softc = sc;
881
882 for (int i = 0; i < sc->sc_bsh_r_count; i++) {
883 const uint64_t gicr_typer = gicr_read_8(sc, i, GICR_TYPER);
884 const u_int aff0 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff0);
885 const u_int aff1 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff1);
886 const u_int aff2 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff2);
887 const u_int aff3 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff3);
888
889 aprint_debug_dev(sc->sc_dev, "redist %d: cpu %d.%d.%d.%d\n",
890 i, aff3, aff2, aff1, aff0);
891 }
892
893 gicv3_dist_enable(sc);
894
895 gicv3_cpu_init(&sc->sc_pic, curcpu());
896 if ((gicd_typer & GICD_TYPER_LPIS) != 0)
897 gicv3_lpi_cpu_init(&sc->sc_lpi, curcpu());
898
899 #ifdef MULTIPROCESSOR
900 intr_establish_xname(IPI_AST, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_ast, (void *)-1, "IPI ast");
901 intr_establish_xname(IPI_XCALL, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_xcall, (void *)-1, "IPI xcall");
902 intr_establish_xname(IPI_GENERIC, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_generic, (void *)-1, "IPI generic");
903 intr_establish_xname(IPI_NOP, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_nop, (void *)-1, "IPI nop");
904 intr_establish_xname(IPI_SHOOTDOWN, IPL_SCHED, IST_MPSAFE | IST_EDGE, pic_ipi_shootdown, (void *)-1, "IPI shootdown");
905 #ifdef DDB
906 intr_establish_xname(IPI_DDB, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_ddb, NULL, "IPI ddb");
907 #endif
908 #ifdef __HAVE_PREEMPTION
909 intr_establish_xname(IPI_KPREEMPT, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_kpreempt, (void *)-1, "IPI kpreempt");
910 #endif
911 #endif
912
913 return 0;
914 }
915