gicv3.c revision 1.22 1 /* $NetBSD: gicv3.c,v 1.22 2019/12/24 09:12:56 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2018 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_multiprocessor.h"
30
31 #define _INTR_PRIVATE
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: gicv3.c,v 1.22 2019/12/24 09:12:56 skrll Exp $");
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/device.h>
40 #include <sys/intr.h>
41 #include <sys/systm.h>
42 #include <sys/cpu.h>
43
44 #include <machine/cpufunc.h>
45
46 #include <arm/locore.h>
47 #include <arm/armreg.h>
48
49 #include <arm/cortex/gicv3.h>
50 #include <arm/cortex/gic_reg.h>
51
52 #define PICTOSOFTC(pic) \
53 ((void *)((uintptr_t)(pic) - offsetof(struct gicv3_softc, sc_pic)))
54 #define LPITOSOFTC(lpi) \
55 ((void *)((uintptr_t)(lpi) - offsetof(struct gicv3_softc, sc_lpi)))
56
57 #define IPL_TO_PRIORITY(sc, ipl) (((0xff - (ipl)) << (sc)->sc_priority_shift) & 0xff)
58 #define IPL_TO_PMR(sc, ipl) (((0xff - (ipl)) << (sc)->sc_pmr_shift) & 0xff)
59 #define IPL_TO_LPIPRIO(sc, ipl) (((0xff - (ipl)) << 4) & 0xff)
60
61 static struct gicv3_softc *gicv3_softc;
62
63 static inline uint32_t
64 gicd_read_4(struct gicv3_softc *sc, bus_size_t reg)
65 {
66 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_d, reg);
67 }
68
69 static inline void
70 gicd_write_4(struct gicv3_softc *sc, bus_size_t reg, uint32_t val)
71 {
72 bus_space_write_4(sc->sc_bst, sc->sc_bsh_d, reg, val);
73 }
74
75 static inline uint64_t
76 gicd_read_8(struct gicv3_softc *sc, bus_size_t reg)
77 {
78 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_d, reg);
79 }
80
81 static inline void
82 gicd_write_8(struct gicv3_softc *sc, bus_size_t reg, uint64_t val)
83 {
84 bus_space_write_8(sc->sc_bst, sc->sc_bsh_d, reg, val);
85 }
86
87 static inline uint32_t
88 gicr_read_4(struct gicv3_softc *sc, u_int index, bus_size_t reg)
89 {
90 KASSERT(index < sc->sc_bsh_r_count);
91 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_r[index], reg);
92 }
93
94 static inline void
95 gicr_write_4(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint32_t val)
96 {
97 KASSERT(index < sc->sc_bsh_r_count);
98 bus_space_write_4(sc->sc_bst, sc->sc_bsh_r[index], reg, val);
99 }
100
101 static inline uint64_t
102 gicr_read_8(struct gicv3_softc *sc, u_int index, bus_size_t reg)
103 {
104 KASSERT(index < sc->sc_bsh_r_count);
105 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_r[index], reg);
106 }
107
108 static inline void
109 gicr_write_8(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint64_t val)
110 {
111 KASSERT(index < sc->sc_bsh_r_count);
112 bus_space_write_8(sc->sc_bst, sc->sc_bsh_r[index], reg, val);
113 }
114
115 static void
116 gicv3_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
117 {
118 struct gicv3_softc * const sc = PICTOSOFTC(pic);
119 struct cpu_info * const ci = curcpu();
120 const u_int group = irqbase / 32;
121
122 if (group == 0) {
123 sc->sc_enabled_sgippi |= mask;
124 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, mask);
125 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
126 ;
127 } else {
128 gicd_write_4(sc, GICD_ISENABLERn(group), mask);
129 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
130 ;
131 }
132 }
133
134 static void
135 gicv3_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
136 {
137 struct gicv3_softc * const sc = PICTOSOFTC(pic);
138 struct cpu_info * const ci = curcpu();
139 const u_int group = irqbase / 32;
140
141 if (group == 0) {
142 sc->sc_enabled_sgippi &= ~mask;
143 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, mask);
144 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
145 ;
146 } else {
147 gicd_write_4(sc, GICD_ICENABLERn(group), mask);
148 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
149 ;
150 }
151 }
152
153 static void
154 gicv3_establish_irq(struct pic_softc *pic, struct intrsource *is)
155 {
156 struct gicv3_softc * const sc = PICTOSOFTC(pic);
157 const u_int group = is->is_irq / 32;
158 uint32_t ipriority, icfg;
159 uint64_t irouter;
160 u_int n;
161
162 const u_int ipriority_val = IPL_TO_PRIORITY(sc, is->is_ipl);
163 const u_int ipriority_shift = (is->is_irq & 0x3) * 8;
164 const u_int icfg_shift = (is->is_irq & 0xf) * 2;
165
166 if (group == 0) {
167 /* SGIs and PPIs are always MP-safe */
168 is->is_mpsafe = true;
169
170 /* Update interrupt configuration and priority on all redistributors */
171 for (n = 0; n < sc->sc_bsh_r_count; n++) {
172 icfg = gicr_read_4(sc, n, GICR_ICFGRn(is->is_irq / 16));
173 if (is->is_type == IST_LEVEL)
174 icfg &= ~(0x2 << icfg_shift);
175 if (is->is_type == IST_EDGE)
176 icfg |= (0x2 << icfg_shift);
177 gicr_write_4(sc, n, GICR_ICFGRn(is->is_irq / 16), icfg);
178
179 ipriority = gicr_read_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4));
180 ipriority &= ~(0xff << ipriority_shift);
181 ipriority |= (ipriority_val << ipriority_shift);
182 gicr_write_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4), ipriority);
183 }
184 } else {
185 if (is->is_mpsafe) {
186 /* Route MP-safe interrupts to all participating PEs */
187 irouter = GICD_IROUTER_Interrupt_Routing_mode;
188 } else {
189 /* Route non-MP-safe interrupts to the primary PE only */
190 irouter = sc->sc_irouter[0];
191 }
192 gicd_write_8(sc, GICD_IROUTER(is->is_irq), irouter);
193
194 /* Update interrupt configuration */
195 icfg = gicd_read_4(sc, GICD_ICFGRn(is->is_irq / 16));
196 if (is->is_type == IST_LEVEL)
197 icfg &= ~(0x2 << icfg_shift);
198 if (is->is_type == IST_EDGE)
199 icfg |= (0x2 << icfg_shift);
200 gicd_write_4(sc, GICD_ICFGRn(is->is_irq / 16), icfg);
201
202 /* Update interrupt priority */
203 ipriority = gicd_read_4(sc, GICD_IPRIORITYRn(is->is_irq / 4));
204 ipriority &= ~(0xff << ipriority_shift);
205 ipriority |= (ipriority_val << ipriority_shift);
206 gicd_write_4(sc, GICD_IPRIORITYRn(is->is_irq / 4), ipriority);
207 }
208 }
209
210 static void
211 gicv3_set_priority(struct pic_softc *pic, int ipl)
212 {
213 struct gicv3_softc * const sc = PICTOSOFTC(pic);
214
215 icc_pmr_write(IPL_TO_PMR(sc, ipl));
216 arm_isb();
217 }
218
219 static void
220 gicv3_dist_enable(struct gicv3_softc *sc)
221 {
222 uint32_t gicd_ctrl;
223 u_int n;
224
225 /* Disable the distributor */
226 gicd_write_4(sc, GICD_CTRL, 0);
227
228 /* Wait for register write to complete */
229 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
230 ;
231
232 /* Clear all INTID enable bits */
233 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32)
234 gicd_write_4(sc, GICD_ICENABLERn(n / 32), ~0);
235
236 /* Set default priorities to lowest */
237 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 4)
238 gicd_write_4(sc, GICD_IPRIORITYRn(n / 4), ~0);
239
240 /* Set all interrupts to G1NS */
241 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32) {
242 gicd_write_4(sc, GICD_IGROUPRn(n / 32), ~0);
243 gicd_write_4(sc, GICD_IGRPMODRn(n / 32), 0);
244 }
245
246 /* Set all interrupts level-sensitive by default */
247 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 16)
248 gicd_write_4(sc, GICD_ICFGRn(n / 16), 0);
249
250 /* Wait for register writes to complete */
251 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
252 ;
253
254 /* Enable Affinity routing and G1NS interrupts */
255 gicd_ctrl = GICD_CTRL_EnableGrp1A | GICD_CTRL_ARE_NS;
256 gicd_write_4(sc, GICD_CTRL, gicd_ctrl);
257 }
258
259 static void
260 gicv3_redist_enable(struct gicv3_softc *sc, struct cpu_info *ci)
261 {
262 uint32_t icfg;
263 u_int n, o;
264
265 /* Clear INTID enable bits */
266 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, ~0);
267
268 /* Wait for register write to complete */
269 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
270 ;
271
272 /* Set default priorities */
273 for (n = 0; n < 32; n += 4) {
274 uint32_t priority = 0;
275 size_t byte_shift = 0;
276 for (o = 0; o < 4; o++, byte_shift += 8) {
277 struct intrsource * const is = sc->sc_pic.pic_sources[n + o];
278 if (is == NULL)
279 priority |= 0xff << byte_shift;
280 else {
281 const u_int ipriority_val = IPL_TO_PRIORITY(sc, is->is_ipl);
282 priority |= ipriority_val << byte_shift;
283 }
284 }
285 gicr_write_4(sc, ci->ci_gic_redist, GICR_IPRIORITYRn(n / 4), priority);
286 }
287
288 /* Set all interrupts to G1NS */
289 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGROUPR0, ~0);
290 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGRPMODR0, 0);
291
292 /* Restore PPI configs */
293 for (n = 0, icfg = 0; n < 16; n++) {
294 struct intrsource * const is = sc->sc_pic.pic_sources[16 + n];
295 if (is != NULL && is->is_type == IST_EDGE)
296 icfg |= (0x2 << (n * 2));
297 }
298 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICFGRn(1), icfg);
299
300 /* Restore current enable bits */
301 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, sc->sc_enabled_sgippi);
302
303 /* Wait for register write to complete */
304 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
305 ;
306 }
307
308 static uint64_t
309 gicv3_cpu_identity(void)
310 {
311 u_int aff3, aff2, aff1, aff0;
312
313 const register_t mpidr = cpu_mpidr_aff_read();
314 aff0 = __SHIFTOUT(mpidr, MPIDR_AFF0);
315 aff1 = __SHIFTOUT(mpidr, MPIDR_AFF1);
316 aff2 = __SHIFTOUT(mpidr, MPIDR_AFF2);
317 aff3 = __SHIFTOUT(mpidr, MPIDR_AFF3);
318
319 return __SHIFTIN(aff0, GICR_TYPER_Affinity_Value_Aff0) |
320 __SHIFTIN(aff1, GICR_TYPER_Affinity_Value_Aff1) |
321 __SHIFTIN(aff2, GICR_TYPER_Affinity_Value_Aff2) |
322 __SHIFTIN(aff3, GICR_TYPER_Affinity_Value_Aff3);
323 }
324
325 static u_int
326 gicv3_find_redist(struct gicv3_softc *sc)
327 {
328 uint64_t gicr_typer;
329 u_int n;
330
331 const uint64_t cpu_identity = gicv3_cpu_identity();
332
333 for (n = 0; n < sc->sc_bsh_r_count; n++) {
334 gicr_typer = gicr_read_8(sc, n, GICR_TYPER);
335 if ((gicr_typer & GICR_TYPER_Affinity_Value) == cpu_identity)
336 return n;
337 }
338
339 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
340 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
341 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
342 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
343
344 panic("%s: could not find GICv3 redistributor for cpu %d.%d.%d.%d",
345 cpu_name(curcpu()), aff3, aff2, aff1, aff0);
346 }
347
348 static uint64_t
349 gicv3_sgir(struct gicv3_softc *sc)
350 {
351 const uint64_t cpu_identity = gicv3_cpu_identity();
352
353 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
354 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
355 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
356 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
357
358 return __SHIFTIN(__BIT(aff0), ICC_SGIR_EL1_TargetList) |
359 __SHIFTIN(aff1, ICC_SGIR_EL1_Aff1) |
360 __SHIFTIN(aff2, ICC_SGIR_EL1_Aff2) |
361 __SHIFTIN(aff3, ICC_SGIR_EL1_Aff3);
362 }
363
364 static void
365 gicv3_cpu_init(struct pic_softc *pic, struct cpu_info *ci)
366 {
367 struct gicv3_softc * const sc = PICTOSOFTC(pic);
368 uint32_t icc_sre, icc_ctlr, gicr_waker;
369
370 ci->ci_gic_redist = gicv3_find_redist(sc);
371 ci->ci_gic_sgir = gicv3_sgir(sc);
372
373 /* Store route to CPU for SPIs */
374 const uint64_t cpu_identity = gicv3_cpu_identity();
375 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
376 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
377 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
378 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
379 sc->sc_irouter[cpu_index(ci)] =
380 __SHIFTIN(aff0, GICD_IROUTER_Aff0) |
381 __SHIFTIN(aff1, GICD_IROUTER_Aff1) |
382 __SHIFTIN(aff2, GICD_IROUTER_Aff2) |
383 __SHIFTIN(aff3, GICD_IROUTER_Aff3);
384
385 /* Enable System register access and disable IRQ/FIQ bypass */
386 icc_sre = ICC_SRE_EL1_SRE | ICC_SRE_EL1_DFB | ICC_SRE_EL1_DIB;
387 icc_sre_write(icc_sre);
388
389 /* Mark the connected PE as being awake */
390 gicr_waker = gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER);
391 gicr_waker &= ~GICR_WAKER_ProcessorSleep;
392 gicr_write_4(sc, ci->ci_gic_redist, GICR_WAKER, gicr_waker);
393 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER) & GICR_WAKER_ChildrenAsleep)
394 ;
395
396 /* Set initial priority mask */
397 gicv3_set_priority(pic, IPL_HIGH);
398
399 /* Set the binary point field to the minimum value */
400 icc_bpr1_write(0);
401
402 /* Enable group 1 interrupt signaling */
403 icc_igrpen1_write(ICC_IGRPEN_EL1_Enable);
404
405 /* Set EOI mode */
406 icc_ctlr = icc_ctlr_read();
407 icc_ctlr &= ~ICC_CTLR_EL1_EOImode;
408 icc_ctlr_write(icc_ctlr);
409
410 /* Enable redistributor */
411 gicv3_redist_enable(sc, ci);
412
413 /* Allow IRQ exceptions */
414 cpsie(I32_bit);
415 }
416
417 #ifdef MULTIPROCESSOR
418 static void
419 gicv3_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi)
420 {
421 CPU_INFO_ITERATOR cii;
422 struct cpu_info *ci;
423 uint64_t intid, aff, targets;
424
425 intid = __SHIFTIN(ipi, ICC_SGIR_EL1_INTID);
426 if (kcp == NULL) {
427 /* Interrupts routed to all PEs, excluding "self" */
428 if (ncpu == 1)
429 return;
430 icc_sgi1r_write(intid | ICC_SGIR_EL1_IRM);
431 } else {
432 /* Interrupts routed to specific PEs */
433 aff = 0;
434 targets = 0;
435 for (CPU_INFO_FOREACH(cii, ci)) {
436 if (!kcpuset_isset(kcp, cpu_index(ci)))
437 continue;
438 if ((ci->ci_gic_sgir & ICC_SGIR_EL1_Aff) != aff) {
439 if (targets != 0) {
440 icc_sgi1r_write(intid | aff | targets);
441 arm_isb();
442 targets = 0;
443 }
444 aff = (ci->ci_gic_sgir & ICC_SGIR_EL1_Aff);
445 }
446 targets |= (ci->ci_gic_sgir & ICC_SGIR_EL1_TargetList);
447 }
448 if (targets != 0) {
449 icc_sgi1r_write(intid | aff | targets);
450 arm_isb();
451 }
452 }
453 }
454
455 static void
456 gicv3_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity)
457 {
458 struct gicv3_softc * const sc = PICTOSOFTC(pic);
459 const size_t group = irq / 32;
460 int n;
461
462 kcpuset_zero(affinity);
463 if (group == 0) {
464 /* All CPUs are targets for group 0 (SGI/PPI) */
465 for (n = 0; n < ncpu; n++) {
466 if (sc->sc_irouter[n] != UINT64_MAX)
467 kcpuset_set(affinity, n);
468 }
469 } else {
470 /* Find distributor targets (SPI) */
471 const uint64_t irouter = gicd_read_8(sc, GICD_IROUTER(irq));
472 for (n = 0; n < ncpu; n++) {
473 if (irouter == GICD_IROUTER_Interrupt_Routing_mode ||
474 irouter == sc->sc_irouter[n])
475 kcpuset_set(affinity, n);
476 }
477 }
478 }
479
480 static int
481 gicv3_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity)
482 {
483 struct gicv3_softc * const sc = PICTOSOFTC(pic);
484 const size_t group = irq / 32;
485 uint64_t irouter;
486
487 if (group == 0)
488 return EINVAL;
489
490 const int set = kcpuset_countset(affinity);
491 if (set == ncpu)
492 irouter = GICD_IROUTER_Interrupt_Routing_mode;
493 else if (set == 1)
494 irouter = sc->sc_irouter[kcpuset_ffs(affinity) - 1];
495 else
496 return EINVAL;
497
498 gicd_write_8(sc, GICD_IROUTER(irq), irouter);
499
500 return 0;
501 }
502 #endif
503
504 static const struct pic_ops gicv3_picops = {
505 .pic_unblock_irqs = gicv3_unblock_irqs,
506 .pic_block_irqs = gicv3_block_irqs,
507 .pic_establish_irq = gicv3_establish_irq,
508 .pic_set_priority = gicv3_set_priority,
509 #ifdef MULTIPROCESSOR
510 .pic_cpu_init = gicv3_cpu_init,
511 .pic_ipi_send = gicv3_ipi_send,
512 .pic_get_affinity = gicv3_get_affinity,
513 .pic_set_affinity = gicv3_set_affinity,
514 #endif
515 };
516
517 static void
518 gicv3_lpi_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
519 {
520 struct gicv3_softc * const sc = LPITOSOFTC(pic);
521 int bit;
522
523 while ((bit = ffs(mask)) != 0) {
524 sc->sc_lpiconf.base[irqbase + bit - 1] |= GIC_LPICONF_Enable;
525 if (sc->sc_lpiconf_flush)
526 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[irqbase + bit - 1], 1);
527 mask &= ~__BIT(bit - 1);
528 }
529
530 if (!sc->sc_lpiconf_flush)
531 __asm __volatile ("dsb ishst");
532 }
533
534 static void
535 gicv3_lpi_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
536 {
537 struct gicv3_softc * const sc = LPITOSOFTC(pic);
538 int bit;
539
540 while ((bit = ffs(mask)) != 0) {
541 sc->sc_lpiconf.base[irqbase + bit - 1] &= ~GIC_LPICONF_Enable;
542 if (sc->sc_lpiconf_flush)
543 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[irqbase + bit - 1], 1);
544 mask &= ~__BIT(bit - 1);
545 }
546
547 if (!sc->sc_lpiconf_flush)
548 __asm __volatile ("dsb ishst");
549 }
550
551 static void
552 gicv3_lpi_establish_irq(struct pic_softc *pic, struct intrsource *is)
553 {
554 struct gicv3_softc * const sc = LPITOSOFTC(pic);
555
556 sc->sc_lpiconf.base[is->is_irq] = IPL_TO_LPIPRIO(sc, is->is_ipl) | GIC_LPICONF_Res1;
557
558 if (sc->sc_lpiconf_flush)
559 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[is->is_irq], 1);
560 else
561 __asm __volatile ("dsb ishst");
562 }
563
564 static void
565 gicv3_lpi_cpu_init(struct pic_softc *pic, struct cpu_info *ci)
566 {
567 struct gicv3_softc * const sc = LPITOSOFTC(pic);
568 struct gicv3_lpi_callback *cb;
569 uint64_t propbase, pendbase;
570 uint32_t ctlr;
571
572 /* If physical LPIs are not supported on this redistributor, just return. */
573 const uint64_t typer = gicr_read_8(sc, ci->ci_gic_redist, GICR_TYPER);
574 if ((typer & GICR_TYPER_PLPIS) == 0)
575 return;
576
577 /* Interrupt target address for this CPU, used by ITS when GITS_TYPER.PTA == 0 */
578 sc->sc_processor_id[cpu_index(ci)] = __SHIFTOUT(typer, GICR_TYPER_Processor_Number);
579
580 /* Disable LPIs before making changes */
581 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR);
582 ctlr &= ~GICR_CTLR_Enable_LPIs;
583 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr);
584 arm_dsb();
585
586 /* Setup the LPI configuration table */
587 propbase = sc->sc_lpiconf.segs[0].ds_addr |
588 __SHIFTIN(ffs(pic->pic_maxsources) - 1, GICR_PROPBASER_IDbits) |
589 __SHIFTIN(GICR_Shareability_IS, GICR_PROPBASER_Shareability) |
590 __SHIFTIN(GICR_Cache_NORMAL_RA_WA_WB, GICR_PROPBASER_InnerCache);
591 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase);
592 propbase = gicr_read_8(sc, ci->ci_gic_redist, GICR_PROPBASER);
593 if (__SHIFTOUT(propbase, GICR_PROPBASER_Shareability) != GICR_Shareability_IS) {
594 if (__SHIFTOUT(propbase, GICR_PROPBASER_Shareability) == GICR_Shareability_NS) {
595 propbase &= ~GICR_PROPBASER_Shareability;
596 propbase |= __SHIFTIN(GICR_Shareability_NS, GICR_PROPBASER_Shareability);
597 propbase &= ~GICR_PROPBASER_InnerCache;
598 propbase |= __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PROPBASER_InnerCache);
599 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase);
600 }
601 sc->sc_lpiconf_flush = true;
602 }
603
604 /* Setup the LPI pending table */
605 pendbase = sc->sc_lpipend[cpu_index(ci)].segs[0].ds_addr |
606 __SHIFTIN(GICR_Shareability_IS, GICR_PENDBASER_Shareability) |
607 __SHIFTIN(GICR_Cache_NORMAL_RA_WA_WB, GICR_PENDBASER_InnerCache);
608 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase);
609 pendbase = gicr_read_8(sc, ci->ci_gic_redist, GICR_PENDBASER);
610 if (__SHIFTOUT(pendbase, GICR_PENDBASER_Shareability) == GICR_Shareability_NS) {
611 pendbase &= ~GICR_PENDBASER_Shareability;
612 pendbase |= __SHIFTIN(GICR_Shareability_NS, GICR_PENDBASER_Shareability);
613 pendbase &= ~GICR_PENDBASER_InnerCache;
614 pendbase |= __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PENDBASER_InnerCache);
615 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase);
616 }
617
618 /* Enable LPIs */
619 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR);
620 ctlr |= GICR_CTLR_Enable_LPIs;
621 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr);
622 arm_dsb();
623
624 /* Setup ITS if present */
625 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list)
626 cb->cpu_init(cb->priv, ci);
627 }
628
629 #ifdef MULTIPROCESSOR
630 static void
631 gicv3_lpi_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity)
632 {
633 struct gicv3_softc * const sc = LPITOSOFTC(pic);
634 struct gicv3_lpi_callback *cb;
635
636 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list)
637 cb->get_affinity(cb->priv, irq, affinity);
638 }
639
640 static int
641 gicv3_lpi_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity)
642 {
643 struct gicv3_softc * const sc = LPITOSOFTC(pic);
644 struct gicv3_lpi_callback *cb;
645 int error = EINVAL;
646
647 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) {
648 error = cb->set_affinity(cb->priv, irq, affinity);
649 if (error)
650 return error;
651 }
652
653 return error;
654 }
655 #endif
656
657 static const struct pic_ops gicv3_lpiops = {
658 .pic_unblock_irqs = gicv3_lpi_unblock_irqs,
659 .pic_block_irqs = gicv3_lpi_block_irqs,
660 .pic_establish_irq = gicv3_lpi_establish_irq,
661 #ifdef MULTIPROCESSOR
662 .pic_cpu_init = gicv3_lpi_cpu_init,
663 .pic_get_affinity = gicv3_lpi_get_affinity,
664 .pic_set_affinity = gicv3_lpi_set_affinity,
665 #endif
666 };
667
668 void
669 gicv3_dma_alloc(struct gicv3_softc *sc, struct gicv3_dma *dma, bus_size_t len, bus_size_t align)
670 {
671 int nsegs, error;
672
673 dma->len = len;
674 error = bus_dmamem_alloc(sc->sc_dmat, dma->len, align, 0, dma->segs, 1, &nsegs, BUS_DMA_WAITOK);
675 if (error)
676 panic("bus_dmamem_alloc failed: %d", error);
677 error = bus_dmamem_map(sc->sc_dmat, dma->segs, nsegs, len, (void **)&dma->base, BUS_DMA_WAITOK);
678 if (error)
679 panic("bus_dmamem_map failed: %d", error);
680 error = bus_dmamap_create(sc->sc_dmat, len, 1, len, 0, BUS_DMA_WAITOK, &dma->map);
681 if (error)
682 panic("bus_dmamap_create failed: %d", error);
683 error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->base, dma->len, NULL, BUS_DMA_WAITOK);
684 if (error)
685 panic("bus_dmamap_load failed: %d", error);
686
687 memset(dma->base, 0, dma->len);
688 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, dma->len, BUS_DMASYNC_PREWRITE);
689 }
690
691 static void
692 gicv3_lpi_init(struct gicv3_softc *sc)
693 {
694 /*
695 * Allocate LPI configuration table
696 */
697 gicv3_dma_alloc(sc, &sc->sc_lpiconf, sc->sc_lpi.pic_maxsources, 0x1000);
698 KASSERT((sc->sc_lpiconf.segs[0].ds_addr & ~GICR_PROPBASER_Physical_Address) == 0);
699
700 /*
701 * Allocate LPI pending tables
702 */
703 const bus_size_t lpipend_sz = (8192 + sc->sc_lpi.pic_maxsources) / NBBY;
704 for (int cpuindex = 0; cpuindex < ncpu; cpuindex++) {
705 gicv3_dma_alloc(sc, &sc->sc_lpipend[cpuindex], lpipend_sz, 0x10000);
706 KASSERT((sc->sc_lpipend[cpuindex].segs[0].ds_addr & ~GICR_PENDBASER_Physical_Address) == 0);
707 }
708 }
709
710 void
711 gicv3_irq_handler(void *frame)
712 {
713 struct cpu_info * const ci = curcpu();
714 struct gicv3_softc * const sc = gicv3_softc;
715 struct pic_softc *pic;
716 const int oldipl = ci->ci_cpl;
717
718 ci->ci_data.cpu_nintr++;
719
720 for (;;) {
721 const uint32_t iar = icc_iar1_read();
722 arm_dsb();
723 const uint32_t irq = __SHIFTOUT(iar, ICC_IAR_INTID);
724 if (irq == ICC_IAR_INTID_SPURIOUS)
725 break;
726
727 pic = irq >= GIC_LPI_BASE ? &sc->sc_lpi : &sc->sc_pic;
728 if (irq - pic->pic_irqbase >= pic->pic_maxsources)
729 continue;
730
731 struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase];
732 KASSERT(is != NULL);
733
734 const bool early_eoi = irq < GIC_LPI_BASE && is->is_type == IST_EDGE;
735
736 const int ipl = is->is_ipl;
737 if (__predict_false(ipl < ci->ci_cpl)) {
738 pic_do_pending_ints(I32_bit, ipl, frame);
739 } else {
740 gicv3_set_priority(pic, ipl);
741 ci->ci_cpl = ipl;
742 }
743
744 if (early_eoi) {
745 icc_eoi1r_write(iar);
746 arm_isb();
747 }
748
749 cpsie(I32_bit);
750 pic_dispatch(is, frame);
751 cpsid(I32_bit);
752
753 if (!early_eoi) {
754 icc_eoi1r_write(iar);
755 arm_isb();
756 }
757 }
758
759 pic_do_pending_ints(I32_bit, oldipl, frame);
760 }
761
762 static int
763 gicv3_detect_pmr_bits(struct gicv3_softc *sc)
764 {
765 const uint32_t opmr = icc_pmr_read();
766 icc_pmr_write(0xbf);
767 const uint32_t npmr = icc_pmr_read();
768 icc_pmr_write(opmr);
769
770 return NBBY - (ffs(npmr) - 1);
771 }
772
773 static int
774 gicv3_detect_ipriority_bits(struct gicv3_softc *sc)
775 {
776 const uint32_t oipriorityr = gicd_read_4(sc, GICD_IPRIORITYRn(8));
777 gicd_write_4(sc, GICD_IPRIORITYRn(8), oipriorityr | 0xff);
778 const uint32_t nipriorityr = gicd_read_4(sc, GICD_IPRIORITYRn(8));
779 gicd_write_4(sc, GICD_IPRIORITYRn(8), oipriorityr);
780
781 return NBBY - (ffs(nipriorityr & 0xff) - 1);
782 }
783
784 int
785 gicv3_init(struct gicv3_softc *sc)
786 {
787 const uint32_t gicd_typer = gicd_read_4(sc, GICD_TYPER);
788 const uint32_t gicd_ctrl = gicd_read_4(sc, GICD_CTRL);
789 int n;
790
791 KASSERT(CPU_IS_PRIMARY(curcpu()));
792
793 LIST_INIT(&sc->sc_lpi_callbacks);
794
795 for (n = 0; n < MAXCPUS; n++)
796 sc->sc_irouter[n] = UINT64_MAX;
797
798 sc->sc_priority_shift = 4;
799 sc->sc_pmr_shift = 4;
800
801 if ((gicd_ctrl & GICD_CTRL_DS) == 0) {
802 const int pmr_bits = gicv3_detect_pmr_bits(sc);
803 const int ipriority_bits = gicv3_detect_ipriority_bits(sc);
804
805 if (ipriority_bits != pmr_bits)
806 --sc->sc_priority_shift;
807
808 aprint_verbose_dev(sc->sc_dev, "%d pmr bits, %d ipriority bits\n",
809 pmr_bits, ipriority_bits);
810 } else {
811 aprint_verbose_dev(sc->sc_dev, "security disabled\n");
812 }
813
814 aprint_verbose_dev(sc->sc_dev, "priority shift %d, pmr shift %d\n",
815 sc->sc_priority_shift, sc->sc_pmr_shift);
816
817 sc->sc_pic.pic_ops = &gicv3_picops;
818 sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(gicd_typer);
819 snprintf(sc->sc_pic.pic_name, sizeof(sc->sc_pic.pic_name), "gicv3");
820 #ifdef MULTIPROCESSOR
821 sc->sc_pic.pic_cpus = kcpuset_running;
822 #endif
823 pic_add(&sc->sc_pic, 0);
824
825 if ((gicd_typer & GICD_TYPER_LPIS) != 0) {
826 sc->sc_lpi.pic_ops = &gicv3_lpiops;
827 sc->sc_lpi.pic_maxsources = 8192; /* Min. required by GICv3 spec */
828 snprintf(sc->sc_lpi.pic_name, sizeof(sc->sc_lpi.pic_name), "gicv3-lpi");
829 pic_add(&sc->sc_lpi, GIC_LPI_BASE);
830
831 gicv3_lpi_init(sc);
832 }
833
834 KASSERT(gicv3_softc == NULL);
835 gicv3_softc = sc;
836
837 for (int i = 0; i < sc->sc_bsh_r_count; i++) {
838 const uint64_t gicr_typer = gicr_read_8(sc, i, GICR_TYPER);
839 const u_int aff0 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff0);
840 const u_int aff1 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff1);
841 const u_int aff2 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff2);
842 const u_int aff3 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff3);
843
844 aprint_debug_dev(sc->sc_dev, "redist %d: cpu %d.%d.%d.%d\n",
845 i, aff3, aff2, aff1, aff0);
846 }
847
848 gicv3_dist_enable(sc);
849
850 gicv3_cpu_init(&sc->sc_pic, curcpu());
851 if ((gicd_typer & GICD_TYPER_LPIS) != 0)
852 gicv3_lpi_cpu_init(&sc->sc_lpi, curcpu());
853
854 #ifdef __HAVE_PIC_FAST_SOFTINTS
855 intr_establish_xname(SOFTINT_BIO, IPL_SOFTBIO, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_BIO, "softint bio");
856 intr_establish_xname(SOFTINT_CLOCK, IPL_SOFTCLOCK, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_CLOCK, "softint clock");
857 intr_establish_xname(SOFTINT_NET, IPL_SOFTNET, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_NET, "softint net");
858 intr_establish_xname(SOFTINT_SERIAL, IPL_SOFTSERIAL, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_SERIAL, "softint serial");
859 #endif
860
861 #ifdef MULTIPROCESSOR
862 intr_establish_xname(IPI_AST, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_ast, (void *)-1, "IPI ast");
863 intr_establish_xname(IPI_XCALL, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_xcall, (void *)-1, "IPI xcall");
864 intr_establish_xname(IPI_GENERIC, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_generic, (void *)-1, "IPI generic");
865 intr_establish_xname(IPI_NOP, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_nop, (void *)-1, "IPI nop");
866 intr_establish_xname(IPI_SHOOTDOWN, IPL_SCHED, IST_MPSAFE | IST_EDGE, pic_ipi_shootdown, (void *)-1, "IPI shootdown");
867 #ifdef DDB
868 intr_establish_xname(IPI_DDB, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_ddb, NULL, "IPI ddb");
869 #endif
870 #ifdef __HAVE_PREEMPTION
871 intr_establish_xname(IPI_KPREEMPT, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_kpreempt, (void *)-1, "IPI kpreempt");
872 #endif
873 #endif
874
875 return 0;
876 }
877