gicv3_its.c revision 1.34 1 /* $NetBSD: gicv3_its.c,v 1.34 2021/10/31 17:24:11 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jared McNeill <jmcneill (at) invisible.ca>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #define _INTR_PRIVATE
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: gicv3_its.c,v 1.34 2021/10/31 17:24:11 skrll Exp $");
36
37 #include <sys/param.h>
38 #include <sys/kmem.h>
39 #include <sys/bus.h>
40 #include <sys/cpu.h>
41 #include <sys/bitops.h>
42
43 #include <uvm/uvm.h>
44
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47
48 #include <arm/pic/picvar.h>
49 #include <arm/cortex/gicv3_its.h>
50
51 /*
52 * ITS translation table sizes
53 */
54 #define GITS_COMMANDS_SIZE 0x1000
55 #define GITS_COMMANDS_ALIGN 0x10000
56
57 #define GITS_ITT_ALIGN 0x100
58
59 /*
60 * IIDR values used for errata
61 */
62 #define GITS_IIDR_PID_CAVIUM_THUNDERX 0xa1
63 #define GITS_IIDR_IMP_CAVIUM 0x34c
64 #define GITS_IIDR_CAVIUM_ERRATA_MASK (GITS_IIDR_Implementor|GITS_IIDR_ProductID|GITS_IIDR_Variant)
65 #define GITS_IIDR_CAVIUM_ERRATA_VALUE \
66 (__SHIFTIN(GITS_IIDR_IMP_CAVIUM, GITS_IIDR_Implementor) | \
67 __SHIFTIN(GITS_IIDR_PID_CAVIUM_THUNDERX, GITS_IIDR_ProductID) | \
68 __SHIFTIN(0, GITS_IIDR_Variant))
69
70 static const char * gits_cache_type[] = {
71 [GITS_Cache_DEVICE_nGnRnE] = "Device-nGnRnE",
72 [GITS_Cache_NORMAL_NC] = "Non-cacheable",
73 [GITS_Cache_NORMAL_RA_WT] = "Cacheable RA WT",
74 [GITS_Cache_NORMAL_RA_WB] = "Cacheable RA WB",
75 [GITS_Cache_NORMAL_WA_WT] = "Cacheable WA WT",
76 [GITS_Cache_NORMAL_WA_WB] = "Cacheable WA WB",
77 [GITS_Cache_NORMAL_RA_WA_WT] = "Cacheable RA WA WT",
78 [GITS_Cache_NORMAL_RA_WA_WB] = "Cacheable RA WA WB",
79 };
80
81 static const char * gits_share_type[] = {
82 [GITS_Shareability_NS] = "Non-shareable",
83 [GITS_Shareability_IS] = "Inner shareable",
84 [GITS_Shareability_OS] = "Outer shareable",
85 [3] = "(Reserved)",
86 };
87
88 static inline uint32_t
89 gits_read_4(struct gicv3_its *its, bus_size_t reg)
90 {
91 return bus_space_read_4(its->its_bst, its->its_bsh, reg);
92 }
93
94 static inline void
95 gits_write_4(struct gicv3_its *its, bus_size_t reg, uint32_t val)
96 {
97 bus_space_write_4(its->its_bst, its->its_bsh, reg, val);
98 }
99
100 static inline uint64_t
101 gits_read_8(struct gicv3_its *its, bus_size_t reg)
102 {
103 return bus_space_read_8(its->its_bst, its->its_bsh, reg);
104 }
105
106 static inline void
107 gits_write_8(struct gicv3_its *its, bus_size_t reg, uint64_t val)
108 {
109 bus_space_write_8(its->its_bst, its->its_bsh, reg, val);
110 }
111
112 static inline void
113 gits_command(struct gicv3_its *its, const struct gicv3_its_command *cmd)
114 {
115 uint64_t cwriter;
116 u_int woff;
117
118 cwriter = gits_read_8(its, GITS_CWRITER);
119 woff = cwriter & GITS_CWRITER_Offset;
120
121 uint64_t *dw = (uint64_t *)(its->its_cmd.base + woff);
122 for (int i = 0; i < __arraycount(cmd->dw); i++)
123 dw[i] = htole64(cmd->dw[i]);
124 bus_dmamap_sync(its->its_dmat, its->its_cmd.map, woff, sizeof(cmd->dw), BUS_DMASYNC_PREWRITE);
125
126 woff += sizeof(cmd->dw);
127 if (woff == its->its_cmd.len)
128 woff = 0;
129
130 gits_write_8(its, GITS_CWRITER, woff);
131 }
132
133 static inline void
134 gits_command_mapc(struct gicv3_its *its, uint16_t icid, uint64_t rdbase, bool v)
135 {
136 struct gicv3_its_command cmd;
137
138 KASSERT((rdbase & 0xffff) == 0);
139
140 /*
141 * Map a collection table entry (ICID) to the target redistributor (RDbase).
142 */
143 memset(&cmd, 0, sizeof(cmd));
144 cmd.dw[0] = GITS_CMD_MAPC;
145 cmd.dw[2] = icid;
146 if (v) {
147 cmd.dw[2] |= rdbase;
148 cmd.dw[2] |= __BIT(63);
149 }
150
151 gits_command(its, &cmd);
152 }
153
154 static inline void
155 gits_command_mapd(struct gicv3_its *its, uint32_t deviceid, uint64_t itt_addr, u_int size, bool v)
156 {
157 struct gicv3_its_command cmd;
158
159 KASSERT((itt_addr & 0xff) == 0);
160
161 /*
162 * Map a device table entry (DeviceID) to its associated ITT (ITT_addr).
163 */
164 memset(&cmd, 0, sizeof(cmd));
165 cmd.dw[0] = GITS_CMD_MAPD | ((uint64_t)deviceid << 32);
166 cmd.dw[1] = size;
167 if (v) {
168 cmd.dw[2] = itt_addr | __BIT(63);
169 }
170
171 gits_command(its, &cmd);
172 }
173
174 static inline void
175 gits_command_mapti(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid, uint32_t pintid, uint16_t icid)
176 {
177 struct gicv3_its_command cmd;
178
179 /*
180 * Map the event defined by EventID and DeviceID to its associated ITE, defined by ICID and pINTID
181 * in the ITT associated with DeviceID.
182 */
183 memset(&cmd, 0, sizeof(cmd));
184 cmd.dw[0] = GITS_CMD_MAPTI | ((uint64_t)deviceid << 32);
185 cmd.dw[1] = eventid | ((uint64_t)pintid << 32);
186 cmd.dw[2] = icid;
187
188 gits_command(its, &cmd);
189 }
190
191 static inline void
192 gits_command_movi(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid, uint16_t icid)
193 {
194 struct gicv3_its_command cmd;
195
196 /*
197 * Update the ICID field in the ITT entry for the event defined by DeviceID and
198 * EventID.
199 */
200 memset(&cmd, 0, sizeof(cmd));
201 cmd.dw[0] = GITS_CMD_MOVI | ((uint64_t)deviceid << 32);
202 cmd.dw[1] = eventid;
203 cmd.dw[2] = icid;
204
205 gits_command(its, &cmd);
206 }
207
208 static inline void
209 gits_command_inv(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid)
210 {
211 struct gicv3_its_command cmd;
212
213 /*
214 * Ensure any caching in the redistributors associated with the specified
215 * EventID is consistent with the LPI configuration tables.
216 */
217 memset(&cmd, 0, sizeof(cmd));
218 cmd.dw[0] = GITS_CMD_INV | ((uint64_t)deviceid << 32);
219 cmd.dw[1] = eventid;
220
221 gits_command(its, &cmd);
222 }
223
224 static inline void
225 gits_command_invall(struct gicv3_its *its, uint16_t icid)
226 {
227 struct gicv3_its_command cmd;
228
229 /*
230 * Ensure any caching associated with this ICID is consistent with LPI
231 * configuration tables for all redistributors.
232 */
233 memset(&cmd, 0, sizeof(cmd));
234 cmd.dw[0] = GITS_CMD_INVALL;
235 cmd.dw[2] = icid;
236
237 gits_command(its, &cmd);
238 }
239
240 static inline void
241 gits_command_sync(struct gicv3_its *its, uint64_t rdbase)
242 {
243 struct gicv3_its_command cmd;
244
245 KASSERT((rdbase & 0xffff) == 0);
246
247 /*
248 * Ensure all outstanding ITS operations associated with physical interrupts
249 * for the specified redistributor (RDbase) are globally observed before
250 * further ITS commands are executed.
251 */
252 memset(&cmd, 0, sizeof(cmd));
253 cmd.dw[0] = GITS_CMD_SYNC;
254 cmd.dw[2] = rdbase;
255
256 gits_command(its, &cmd);
257 }
258
259 static inline int
260 gits_wait(struct gicv3_its *its)
261 {
262 u_int woff, roff;
263 int retry = 100000;
264
265 /*
266 * The ITS command queue is empty when CWRITER and CREADR specify the
267 * same base address offset value.
268 */
269 for (retry = 1000; retry > 0; retry--) {
270 woff = gits_read_8(its, GITS_CWRITER) & GITS_CWRITER_Offset;
271 roff = gits_read_8(its, GITS_CREADR) & GITS_CREADR_Offset;
272 if (woff == roff)
273 break;
274 delay(100);
275 }
276 if (retry == 0) {
277 device_printf(its->its_gic->sc_dev, "ITS command queue timeout\n");
278 return ETIMEDOUT;
279 }
280
281 return 0;
282 }
283
284 static int
285 gicv3_its_msi_alloc_lpi(struct gicv3_its *its,
286 const struct pci_attach_args *pa)
287 {
288 struct pci_attach_args *new_pa;
289 vmem_addr_t n;
290
291 KASSERT(its->its_gic->sc_lpi_pool != NULL);
292
293 if (vmem_alloc(its->its_gic->sc_lpi_pool, 1, VM_INSTANTFIT|VM_SLEEP, &n) != 0)
294 return -1;
295
296 KASSERT(its->its_pa[n] == NULL);
297
298 new_pa = kmem_alloc(sizeof(*new_pa), KM_SLEEP);
299 memcpy(new_pa, pa, sizeof(*new_pa));
300 its->its_pa[n] = new_pa;
301 return n + its->its_pic->pic_irqbase;
302 }
303
304 static void
305 gicv3_its_msi_free_lpi(struct gicv3_its *its, int lpi)
306 {
307 struct pci_attach_args *pa;
308
309 KASSERT(its->its_gic->sc_lpi_pool != NULL);
310 KASSERT(lpi >= its->its_pic->pic_irqbase);
311
312 pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
313 its->its_pa[lpi - its->its_pic->pic_irqbase] = NULL;
314 kmem_free(pa, sizeof(*pa));
315
316 vmem_free(its->its_gic->sc_lpi_pool, lpi - its->its_pic->pic_irqbase, 1);
317 }
318
319 static uint32_t
320 gicv3_its_devid(pci_chipset_tag_t pc, pcitag_t tag)
321 {
322 uint32_t devid;
323 int b, d, f;
324
325 pci_decompose_tag(pc, tag, &b, &d, &f);
326
327 devid = (b << 8) | (d << 3) | f;
328
329 return pci_get_devid(pc, devid);
330 }
331
332 static int
333 gicv3_its_device_map(struct gicv3_its *its, uint32_t devid, u_int count)
334 {
335 struct gicv3_its_device *dev;
336 u_int vectors;
337
338 vectors = MAX(2, count);
339 while (!powerof2(vectors))
340 vectors++;
341
342 const uint64_t typer = gits_read_8(its, GITS_TYPER);
343 const u_int itt_entry_size = __SHIFTOUT(typer, GITS_TYPER_ITT_entry_size) + 1;
344 const u_int itt_size = roundup(vectors * itt_entry_size, GITS_ITT_ALIGN);
345
346 LIST_FOREACH(dev, &its->its_devices, dev_list)
347 if (dev->dev_id == devid) {
348 return itt_size <= dev->dev_size ? 0 : EEXIST;
349 }
350
351 dev = kmem_alloc(sizeof(*dev), KM_SLEEP);
352 dev->dev_id = devid;
353 dev->dev_size = itt_size;
354 gicv3_dma_alloc(its->its_gic, &dev->dev_itt, itt_size, GITS_ITT_ALIGN);
355 LIST_INSERT_HEAD(&its->its_devices, dev, dev_list);
356
357 /*
358 * Map the device to the ITT
359 */
360 const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1;
361 gits_command_mapd(its, devid, dev->dev_itt.segs[0].ds_addr, id_bits - 1, true);
362 gits_wait(its);
363
364 return 0;
365 }
366
367 static void
368 gicv3_its_msi_enable(struct gicv3_its *its, int lpi, int count)
369 {
370 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
371 pci_chipset_tag_t pc = pa->pa_pc;
372 pcitag_t tag = pa->pa_tag;
373 pcireg_t ctl;
374 int off;
375
376 if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL))
377 panic("gicv3_its_msi_enable: device is not MSI-capable");
378
379 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
380 ctl &= ~PCI_MSI_CTL_MME_MASK;
381 ctl |= __SHIFTIN(ilog2(count), PCI_MSI_CTL_MME_MASK);
382 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
383
384 const uint64_t addr = its->its_base + GITS_TRANSLATER;
385 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
386 if (ctl & PCI_MSI_CTL_64BIT_ADDR) {
387 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_LO,
388 addr & 0xffffffff);
389 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_HI,
390 (addr >> 32) & 0xffffffff);
391 pci_conf_write(pc, tag, off + PCI_MSI_MDATA64,
392 lpi - its->its_pic->pic_irqbase);
393 } else {
394 pci_conf_write(pc, tag, off + PCI_MSI_MADDR,
395 addr & 0xffffffff);
396 pci_conf_write(pc, tag, off + PCI_MSI_MDATA,
397 lpi - its->its_pic->pic_irqbase);
398 }
399 ctl |= PCI_MSI_CTL_MSI_ENABLE;
400 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
401 }
402
403 static void
404 gicv3_its_msi_disable(struct gicv3_its *its, int lpi)
405 {
406 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
407 pci_chipset_tag_t pc = pa->pa_pc;
408 pcitag_t tag = pa->pa_tag;
409 pcireg_t ctl;
410 int off;
411
412 if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL))
413 panic("gicv3_its_msi_enable: device is not MSI-capable");
414
415 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
416 ctl &= ~PCI_MSI_CTL_MSI_ENABLE;
417 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
418 }
419
420 static void
421 gicv3_its_msix_enable(struct gicv3_its *its, int lpi, int msix_vec,
422 bus_space_tag_t bst, bus_space_handle_t bsh)
423 {
424 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
425 pci_chipset_tag_t pc = pa->pa_pc;
426 pcitag_t tag = pa->pa_tag;
427 pcireg_t ctl;
428 uint32_t val;
429 int off;
430
431 if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL))
432 panic("gicv3_its_msix_enable: device is not MSI-X-capable");
433
434 const uint64_t addr = its->its_base + GITS_TRANSLATER;
435 const uint64_t entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
436 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_LO, (uint32_t)addr);
437 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_HI, (uint32_t)(addr >> 32));
438 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_DATA, lpi - its->its_pic->pic_irqbase);
439 val = bus_space_read_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL);
440 val &= ~PCI_MSIX_VECTCTL_MASK;
441 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, val);
442
443 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
444 ctl |= PCI_MSIX_CTL_ENABLE;
445 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
446 }
447
448 static void
449 gicv3_its_msix_disable(struct gicv3_its *its, int lpi)
450 {
451 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
452 pci_chipset_tag_t pc = pa->pa_pc;
453 pcitag_t tag = pa->pa_tag;
454 pcireg_t ctl;
455 int off;
456
457 if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL))
458 panic("gicv3_its_msix_disable: device is not MSI-X-capable");
459
460 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
461 ctl &= ~PCI_MSIX_CTL_ENABLE;
462 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
463 }
464
465 static pci_intr_handle_t *
466 gicv3_its_msi_alloc(struct arm_pci_msi *msi, int *count,
467 const struct pci_attach_args *pa, bool exact)
468 {
469 struct gicv3_its * const its = msi->msi_priv;
470 struct cpu_info * const ci = cpu_lookup(0);
471 pci_intr_handle_t *vectors;
472 int n, off;
473
474 if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, &off, NULL))
475 return NULL;
476
477 const uint64_t typer = gits_read_8(its, GITS_TYPER);
478 const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1;
479 if (*count == 0 || *count > (1 << id_bits))
480 return NULL;
481
482 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
483
484 if (gicv3_its_device_map(its, devid, *count) != 0)
485 return NULL;
486
487 vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP);
488 for (n = 0; n < *count; n++) {
489 const int lpi = gicv3_its_msi_alloc_lpi(its, pa);
490 vectors[n] = ARM_PCI_INTR_MSI |
491 __SHIFTIN(lpi, ARM_PCI_INTR_IRQ) |
492 __SHIFTIN(n, ARM_PCI_INTR_MSI_VEC) |
493 __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME);
494
495 if (n == 0)
496 gicv3_its_msi_enable(its, lpi, *count);
497
498 /*
499 * Record devid and target PE
500 */
501 its->its_devid[lpi - its->its_pic->pic_irqbase] = devid;
502 its->its_targets[lpi - its->its_pic->pic_irqbase] = ci;
503
504 /*
505 * Map event
506 */
507 gits_command_mapti(its, devid, lpi - its->its_pic->pic_irqbase, lpi, cpu_index(ci));
508 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
509 }
510 gits_wait(its);
511
512 return vectors;
513 }
514
515 static pci_intr_handle_t *
516 gicv3_its_msix_alloc(struct arm_pci_msi *msi, u_int *table_indexes, int *count,
517 const struct pci_attach_args *pa, bool exact)
518 {
519 struct gicv3_its * const its = msi->msi_priv;
520 struct cpu_info *ci = cpu_lookup(0);
521 pci_intr_handle_t *vectors;
522 bus_space_tag_t bst;
523 bus_space_handle_t bsh;
524 bus_size_t bsz;
525 uint32_t table_offset, table_size;
526 int n, off, bar, error;
527 pcireg_t tbl;
528
529 if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &off, NULL))
530 return NULL;
531
532 const uint64_t typer = gits_read_8(its, GITS_TYPER);
533 const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1;
534 if (*count == 0 || *count > (1 << id_bits))
535 return NULL;
536
537 tbl = pci_conf_read(pa->pa_pc, pa->pa_tag, off + PCI_MSIX_TBLOFFSET);
538 bar = PCI_BAR0 + (4 * (tbl & PCI_MSIX_TBLBIR_MASK));
539 table_offset = tbl & PCI_MSIX_TBLOFFSET_MASK;
540 table_size = pci_msix_count(pa->pa_pc, pa->pa_tag) * PCI_MSIX_TABLE_ENTRY_SIZE;
541 if (table_size == 0)
542 return NULL;
543
544 error = pci_mapreg_submap(pa, bar, pci_mapreg_type(pa->pa_pc, pa->pa_tag, bar),
545 BUS_SPACE_MAP_LINEAR, roundup(table_size, PAGE_SIZE), table_offset,
546 &bst, &bsh, NULL, &bsz);
547 if (error)
548 return NULL;
549
550 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
551
552 if (gicv3_its_device_map(its, devid, *count) != 0) {
553 bus_space_unmap(bst, bsh, bsz);
554 return NULL;
555 }
556
557 vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP);
558 for (n = 0; n < *count; n++) {
559 const int lpi = gicv3_its_msi_alloc_lpi(its, pa);
560 const int msix_vec = table_indexes ? table_indexes[n] : n;
561 vectors[msix_vec] = ARM_PCI_INTR_MSIX |
562 __SHIFTIN(lpi, ARM_PCI_INTR_IRQ) |
563 __SHIFTIN(msix_vec, ARM_PCI_INTR_MSI_VEC) |
564 __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME);
565
566 gicv3_its_msix_enable(its, lpi, msix_vec, bst, bsh);
567
568 /*
569 * Record devid and target PE
570 */
571 its->its_devid[lpi - its->its_pic->pic_irqbase] = devid;
572 its->its_targets[lpi - its->its_pic->pic_irqbase] = ci;
573
574 /*
575 * Map event
576 */
577 gits_command_mapti(its, devid, lpi - its->its_pic->pic_irqbase, lpi, cpu_index(ci));
578 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
579 }
580 gits_wait(its);
581
582 bus_space_unmap(bst, bsh, bsz);
583
584 return vectors;
585 }
586
587 static void *
588 gicv3_its_msi_intr_establish(struct arm_pci_msi *msi,
589 pci_intr_handle_t ih, int ipl, int (*func)(void *), void *arg, const char *xname)
590 {
591 struct gicv3_its * const its = msi->msi_priv;
592 void *intrh;
593
594 const int lpi = __SHIFTOUT(ih, ARM_PCI_INTR_IRQ);
595 const int mpsafe = (ih & ARM_PCI_INTR_MPSAFE) ? IST_MPSAFE : 0;
596
597 intrh = pic_establish_intr(its->its_pic, lpi - its->its_pic->pic_irqbase, ipl,
598 IST_EDGE | mpsafe, func, arg, xname);
599 if (intrh == NULL)
600 return NULL;
601
602 /* Invalidate LPI configuration tables */
603 KASSERT(its->its_pa[lpi - its->its_pic->pic_irqbase] != NULL);
604 const uint32_t devid = its->its_devid[lpi - its->its_pic->pic_irqbase];
605 gits_command_inv(its, devid, lpi - its->its_pic->pic_irqbase);
606
607 return intrh;
608 }
609
610 static void
611 gicv3_its_msi_intr_release(struct arm_pci_msi *msi, pci_intr_handle_t *pih,
612 int count)
613 {
614 struct gicv3_its * const its = msi->msi_priv;
615 int n;
616
617 for (n = 0; n < count; n++) {
618 const int lpi = __SHIFTOUT(pih[n], ARM_PCI_INTR_IRQ);
619 KASSERT(lpi >= its->its_pic->pic_irqbase);
620 if (pih[n] & ARM_PCI_INTR_MSIX)
621 gicv3_its_msix_disable(its, lpi);
622 if (pih[n] & ARM_PCI_INTR_MSI)
623 gicv3_its_msi_disable(its, lpi);
624 gicv3_its_msi_free_lpi(its, lpi);
625 its->its_targets[lpi - its->its_pic->pic_irqbase] = NULL;
626 its->its_devid[lpi - its->its_pic->pic_irqbase] = 0;
627 struct intrsource * const is =
628 its->its_pic->pic_sources[lpi - its->its_pic->pic_irqbase];
629 if (is != NULL)
630 pic_disestablish_source(is);
631 }
632 }
633
634 static void
635 gicv3_its_command_init(struct gicv3_softc *sc, struct gicv3_its *its)
636 {
637 uint64_t cbaser;
638
639 gicv3_dma_alloc(sc, &its->its_cmd, GITS_COMMANDS_SIZE, GITS_COMMANDS_ALIGN);
640
641 cbaser = its->its_cmd.segs[0].ds_addr;
642 cbaser |= __SHIFTIN(GITS_Cache_NORMAL_NC, GITS_CBASER_InnerCache);
643 cbaser |= __SHIFTIN(GITS_Shareability_NS, GITS_CBASER_Shareability);
644 cbaser |= __SHIFTIN((its->its_cmd.len / 4096) - 1, GITS_CBASER_Size);
645 cbaser |= GITS_CBASER_Valid;
646
647 gits_write_8(its, GITS_CBASER, cbaser);
648 gits_write_8(its, GITS_CWRITER, 0);
649 }
650
651 static void
652 gicv3_its_table_params(struct gicv3_softc *sc, struct gicv3_its *its,
653 u_int *devbits, u_int *innercache, u_int *share)
654 {
655
656 const uint64_t typer = gits_read_8(its, GITS_TYPER);
657 const uint32_t iidr = gits_read_4(its, GITS_IIDR);
658
659 /* Default values */
660 *devbits = __SHIFTOUT(typer, GITS_TYPER_Devbits) + 1;
661 *innercache = GITS_Cache_NORMAL_WA_WB;
662 *share = GITS_Shareability_IS;
663
664 /* Cavium ThunderX errata */
665 if ((iidr & GITS_IIDR_CAVIUM_ERRATA_MASK) == GITS_IIDR_CAVIUM_ERRATA_VALUE) {
666 *devbits = 20; /* 8Mb */
667 *innercache = GITS_Cache_DEVICE_nGnRnE;
668 aprint_normal_dev(sc->sc_dev, "Cavium ThunderX errata detected\n");
669 }
670 }
671
672 static void
673 gicv3_its_table_init(struct gicv3_softc *sc, struct gicv3_its *its)
674 {
675 u_int table_size, page_size, table_align;
676 u_int devbits, innercache, share;
677 const char *table_type;
678 uint64_t baser;
679 int tab;
680
681 gicv3_its_table_params(sc, its, &devbits, &innercache, &share);
682
683 for (tab = 0; tab < 8; tab++) {
684 baser = gits_read_8(its, GITS_BASERn(tab));
685
686 const u_int entry_size = __SHIFTOUT(baser, GITS_BASER_Entry_Size) + 1;
687
688 switch (__SHIFTOUT(baser, GITS_BASER_Page_Size)) {
689 case GITS_Page_Size_4KB:
690 page_size = 4096;
691 table_align = 4096;
692 break;
693 case GITS_Page_Size_16KB:
694 page_size = 16384;
695 table_align = 4096;
696 break;
697 case GITS_Page_Size_64KB:
698 default:
699 page_size = 65536;
700 table_align = 65536;
701 break;
702 }
703
704 switch (__SHIFTOUT(baser, GITS_BASER_Type)) {
705 case GITS_Type_Devices:
706 /*
707 * Table size scales with the width of the DeviceID.
708 */
709 table_size = roundup(entry_size * (1 << devbits), page_size);
710 table_type = "Devices";
711 break;
712 case GITS_Type_InterruptCollections:
713 /*
714 * Allocate space for one interrupt collection per CPU.
715 */
716 table_size = roundup(entry_size * ncpu, page_size);
717 table_type = "Collections";
718 break;
719 default:
720 table_size = 0;
721 break;
722 }
723
724 if (table_size == 0)
725 continue;
726
727 gicv3_dma_alloc(sc, &its->its_tab[tab], table_size, table_align);
728
729 baser &= ~GITS_BASER_Size;
730 baser |= __SHIFTIN(table_size / page_size - 1, GITS_BASER_Size);
731 baser &= ~GITS_BASER_Physical_Address;
732 baser |= its->its_tab[tab].segs[0].ds_addr;
733 baser &= ~GITS_BASER_InnerCache;
734 baser |= __SHIFTIN(innercache, GITS_BASER_InnerCache);
735 baser &= ~GITS_BASER_Shareability;
736 baser |= __SHIFTIN(share, GITS_BASER_Shareability);
737 baser |= GITS_BASER_Valid;
738
739 gits_write_8(its, GITS_BASERn(tab), baser);
740
741 baser = gits_read_8(its, GITS_BASERn(tab));
742 if (__SHIFTOUT(baser, GITS_BASER_Shareability) == GITS_Shareability_NS) {
743 baser &= ~GITS_BASER_InnerCache;
744 baser |= __SHIFTIN(GITS_Cache_NORMAL_NC, GITS_BASER_InnerCache);
745
746 gits_write_8(its, GITS_BASERn(tab), baser);
747 }
748
749 baser = gits_read_8(its, GITS_BASERn(tab));
750 aprint_normal_dev(sc->sc_dev, "ITS [#%d] %s table @ %#lx/%#x, %s, %s\n",
751 tab, table_type, its->its_tab[tab].segs[0].ds_addr, table_size,
752 gits_cache_type[__SHIFTOUT(baser, GITS_BASER_InnerCache)],
753 gits_share_type[__SHIFTOUT(baser, GITS_BASER_Shareability)]);
754 }
755 }
756
757 static void
758 gicv3_its_enable(struct gicv3_softc *sc, struct gicv3_its *its)
759 {
760 uint32_t ctlr;
761
762 ctlr = gits_read_4(its, GITS_CTLR);
763 ctlr |= GITS_CTLR_Enabled;
764 gits_write_4(its, GITS_CTLR, ctlr);
765 }
766
767 static void
768 gicv3_its_cpu_init(void *priv, struct cpu_info *ci)
769 {
770 struct gicv3_its * const its = priv;
771 struct gicv3_softc * const sc = its->its_gic;
772 uint64_t rdbase;
773 size_t irq;
774
775 const uint64_t typer = bus_space_read_8(sc->sc_bst, its->its_bsh, GITS_TYPER);
776 if (typer & GITS_TYPER_PTA) {
777 void *va = bus_space_vaddr(sc->sc_bst, sc->sc_bsh_r[ci->ci_gic_redist]);
778 rdbase = vtophys((vaddr_t)va);
779 } else {
780 rdbase = (uint64_t)sc->sc_processor_id[cpu_index(ci)] << 16;
781 }
782 its->its_rdbase[cpu_index(ci)] = rdbase;
783
784 /*
785 * Map collection ID of this CPU's index to this CPU's redistributor.
786 */
787 mutex_enter(its->its_lock);
788 gits_command_mapc(its, cpu_index(ci), rdbase, true);
789 gits_command_invall(its, cpu_index(ci));
790 gits_wait(its);
791
792 /*
793 * Update routing for LPIs targetting this CPU
794 */
795 for (irq = 0; irq < its->its_pic->pic_maxsources; irq++) {
796 if (its->its_targets[irq] != ci)
797 continue;
798 KASSERT(its->its_pa[irq] != NULL);
799
800 const uint32_t devid = its->its_devid[irq];
801 gits_command_movi(its, devid, irq, cpu_index(ci));
802 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
803 }
804 gits_wait(its);
805 mutex_exit(its->its_lock);
806
807 its->its_cpuonline[cpu_index(ci)] = true;
808 }
809
810 static void
811 gicv3_its_get_affinity(void *priv, size_t irq, kcpuset_t *affinity)
812 {
813 struct gicv3_its * const its = priv;
814 struct cpu_info *ci;
815
816 ci = its->its_targets[irq];
817 if (ci)
818 kcpuset_set(affinity, cpu_index(ci));
819 }
820
821 static int
822 gicv3_its_set_affinity(void *priv, size_t irq, const kcpuset_t *affinity)
823 {
824 struct gicv3_its * const its = priv;
825 const struct pci_attach_args *pa;
826 struct cpu_info *ci;
827
828 const int set = kcpuset_countset(affinity);
829 if (set != 1)
830 return EINVAL;
831
832 pa = its->its_pa[irq];
833 if (pa == NULL)
834 return EPASSTHROUGH;
835
836 ci = cpu_lookup(kcpuset_ffs(affinity) - 1);
837 its->its_targets[irq] = ci;
838
839 if (its->its_cpuonline[cpu_index(ci)] == true) {
840 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
841 gits_command_movi(its, devid, irq, cpu_index(ci));
842 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
843 }
844
845 return 0;
846 }
847
848 int
849 gicv3_its_init(struct gicv3_softc *sc, bus_space_handle_t bsh,
850 uint64_t its_base, uint32_t its_id)
851 {
852 struct gicv3_its *its;
853 struct arm_pci_msi *msi;
854
855 const uint64_t typer = bus_space_read_8(sc->sc_bst, bsh, GITS_TYPER);
856 if ((typer & GITS_TYPER_Physical) == 0)
857 return ENXIO;
858
859 its = kmem_zalloc(sizeof(*its), KM_SLEEP);
860 its->its_id = its_id;
861 its->its_bst = sc->sc_bst;
862 its->its_bsh = bsh;
863 its->its_dmat = sc->sc_dmat;
864 its->its_base = its_base;
865 its->its_pic = &sc->sc_lpi;
866 snprintf(its->its_pic->pic_name, sizeof(its->its_pic->pic_name), "gicv3-its");
867 KASSERT(its->its_pic->pic_maxsources > 0);
868 its->its_pa = kmem_zalloc(sizeof(struct pci_attach_args *) * its->its_pic->pic_maxsources, KM_SLEEP);
869 its->its_targets = kmem_zalloc(sizeof(struct cpu_info *) * its->its_pic->pic_maxsources, KM_SLEEP);
870 its->its_devid = kmem_zalloc(sizeof(uint32_t) * its->its_pic->pic_maxsources, KM_SLEEP);
871 its->its_gic = sc;
872 its->its_rdbase = kmem_zalloc(sizeof(*its->its_rdbase) * ncpu, KM_SLEEP);
873 its->its_cpuonline = kmem_zalloc(sizeof(*its->its_cpuonline) * ncpu, KM_SLEEP);
874 its->its_cb.cpu_init = gicv3_its_cpu_init;
875 its->its_cb.get_affinity = gicv3_its_get_affinity;
876 its->its_cb.set_affinity = gicv3_its_set_affinity;
877 its->its_cb.priv = its;
878 LIST_INIT(&its->its_devices);
879 LIST_INSERT_HEAD(&sc->sc_lpi_callbacks, &its->its_cb, list);
880 its->its_lock = mutex_obj_alloc(MUTEX_SPIN, IPL_NONE);
881
882 gicv3_its_command_init(sc, its);
883 gicv3_its_table_init(sc, its);
884
885 gicv3_its_enable(sc, its);
886
887 gicv3_its_cpu_init(its, curcpu());
888
889 msi = &its->its_msi;
890 msi->msi_id = its_id;
891 msi->msi_dev = sc->sc_dev;
892 msi->msi_priv = its;
893 msi->msi_alloc = gicv3_its_msi_alloc;
894 msi->msix_alloc = gicv3_its_msix_alloc;
895 msi->msi_intr_establish = gicv3_its_msi_intr_establish;
896 msi->msi_intr_release = gicv3_its_msi_intr_release;
897
898 return arm_pci_msi_add(msi);
899 }
900