gicv3_its.c revision 1.5 1 /* $NetBSD: gicv3_its.c,v 1.5 2018/11/22 20:47:37 jakllsch Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jared McNeill <jmcneill (at) invisible.ca>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #define _INTR_PRIVATE
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: gicv3_its.c,v 1.5 2018/11/22 20:47:37 jakllsch Exp $");
36
37 #include <sys/param.h>
38 #include <sys/kmem.h>
39 #include <sys/bus.h>
40 #include <sys/cpu.h>
41
42 #include <uvm/uvm.h>
43
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46
47 #include <arm/pic/picvar.h>
48 #include <arm/cortex/gicv3_its.h>
49
50 /*
51 * ITS translation table sizes
52 */
53 #define GITS_COMMANDS_SIZE 0x1000
54 #define GITS_COMMANDS_ALIGN 0x10000
55
56 #define GITS_ITT_ALIGN 0x100
57
58 static inline uint32_t
59 gits_read_4(struct gicv3_its *its, bus_size_t reg)
60 {
61 return bus_space_read_4(its->its_bst, its->its_bsh, reg);
62 }
63
64 static inline void
65 gits_write_4(struct gicv3_its *its, bus_size_t reg, uint32_t val)
66 {
67 bus_space_write_4(its->its_bst, its->its_bsh, reg, val);
68 }
69
70 static inline uint64_t
71 gits_read_8(struct gicv3_its *its, bus_size_t reg)
72 {
73 return bus_space_read_8(its->its_bst, its->its_bsh, reg);
74 }
75
76 static inline void
77 gits_write_8(struct gicv3_its *its, bus_size_t reg, uint64_t val)
78 {
79 bus_space_write_8(its->its_bst, its->its_bsh, reg, val);
80 }
81
82 static inline void
83 gits_command(struct gicv3_its *its, const struct gicv3_its_command *cmd)
84 {
85 uint64_t cwriter;
86 u_int woff;
87
88 cwriter = gits_read_8(its, GITS_CWRITER);
89 woff = cwriter & GITS_CWRITER_Offset;
90
91 memcpy(its->its_cmd.base + woff, cmd->dw, sizeof(cmd->dw));
92 bus_dmamap_sync(its->its_dmat, its->its_cmd.map, woff, sizeof(cmd->dw), BUS_DMASYNC_PREWRITE);
93
94 woff += sizeof(cmd->dw);
95 if (woff == its->its_cmd.len)
96 woff = 0;
97
98 gits_write_8(its, GITS_CWRITER, woff);
99 }
100
101 static inline void
102 gits_command_mapc(struct gicv3_its *its, uint16_t icid, uint64_t rdbase, bool v)
103 {
104 struct gicv3_its_command cmd;
105
106 KASSERT((rdbase & 0xffff) == 0);
107
108 /*
109 * Map a collection table entry (ICID) to the target redistributor (RDbase).
110 */
111 memset(&cmd, 0, sizeof(cmd));
112 cmd.dw[0] = GITS_CMD_MAPC;
113 cmd.dw[2] = icid;
114 if (v) {
115 cmd.dw[2] |= rdbase;
116 cmd.dw[2] |= __BIT(63);
117 }
118
119 gits_command(its, &cmd);
120 }
121
122 static inline void
123 gits_command_mapd(struct gicv3_its *its, uint32_t deviceid, uint64_t itt_addr, u_int size, bool v)
124 {
125 struct gicv3_its_command cmd;
126
127 KASSERT((itt_addr & 0xff) == 0);
128
129 /*
130 * Map a device table entry (DeviceID) to its associated ITT (ITT_addr).
131 */
132 memset(&cmd, 0, sizeof(cmd));
133 cmd.dw[0] = GITS_CMD_MAPD | ((uint64_t)deviceid << 32);
134 cmd.dw[1] = size;
135 if (v) {
136 cmd.dw[2] = itt_addr | __BIT(63);
137 }
138
139 gits_command(its, &cmd);
140 }
141
142 static inline void
143 gits_command_mapi(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid, uint16_t icid)
144 {
145 struct gicv3_its_command cmd;
146
147 /*
148 * Map the event defined by EventID and DeviceID into an ITT entry with ICID and pINTID = EventID
149 */
150 memset(&cmd, 0, sizeof(cmd));
151 cmd.dw[0] = GITS_CMD_MAPI | ((uint64_t)deviceid << 32);
152 cmd.dw[1] = eventid;
153 cmd.dw[2] = icid;
154
155 gits_command(its, &cmd);
156 }
157
158 static inline void
159 gits_command_movi(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid, uint16_t icid)
160 {
161 struct gicv3_its_command cmd;
162
163 /*
164 * Update the ICID field in the ITT entry for the event defined by DeviceID and
165 * EventID.
166 */
167 memset(&cmd, 0, sizeof(cmd));
168 cmd.dw[0] = GITS_CMD_MOVI | ((uint64_t)deviceid << 32);
169 cmd.dw[1] = eventid;
170 cmd.dw[2] = icid;
171
172 gits_command(its, &cmd);
173 }
174
175 static inline void
176 gits_command_inv(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid)
177 {
178 struct gicv3_its_command cmd;
179
180 /*
181 * Ensure any caching in the redistributors associated with the specified
182 * EventID is consistent with the LPI configuration tables.
183 */
184 memset(&cmd, 0, sizeof(cmd));
185 cmd.dw[0] = GITS_CMD_INV | ((uint64_t)deviceid << 32);
186 cmd.dw[1] = eventid;
187
188 gits_command(its, &cmd);
189 }
190
191 static inline void
192 gits_command_invall(struct gicv3_its *its, uint16_t icid)
193 {
194 struct gicv3_its_command cmd;
195
196 /*
197 * Ensure any caching associated with this ICID is consistent with LPI
198 * configuration tables for all redistributors.
199 */
200 memset(&cmd, 0, sizeof(cmd));
201 cmd.dw[0] = GITS_CMD_INVALL;
202 cmd.dw[2] = icid;
203
204 gits_command(its, &cmd);
205 }
206
207 static inline void
208 gits_command_sync(struct gicv3_its *its, uint64_t rdbase)
209 {
210 struct gicv3_its_command cmd;
211
212 KASSERT((rdbase & 0xffff) == 0);
213
214 /*
215 * Ensure all outstanding ITS operations associated with physical interrupts
216 * for the specified redistributor (RDbase) are globally observed before
217 * further ITS commands are executed.
218 */
219 memset(&cmd, 0, sizeof(cmd));
220 cmd.dw[0] = GITS_CMD_SYNC;
221 cmd.dw[2] = rdbase;
222
223 gits_command(its, &cmd);
224 }
225
226 static inline int
227 gits_wait(struct gicv3_its *its)
228 {
229 u_int woff, roff;
230 int retry = 100000;
231
232 /*
233 * The ITS command queue is empty when CWRITER and CREADR specify the
234 * same base address offset value.
235 */
236 for (retry = 1000; retry > 0; retry--) {
237 woff = gits_read_8(its, GITS_CWRITER) & GITS_CWRITER_Offset;
238 roff = gits_read_8(its, GITS_CREADR) & GITS_CREADR_Offset;
239 if (woff == roff)
240 break;
241 delay(100);
242 }
243 if (retry == 0) {
244 device_printf(its->its_gic->sc_dev, "ITS command queue timeout\n");
245 return ETIMEDOUT;
246 }
247
248 return 0;
249 }
250
251 static int
252 gicv3_its_msi_alloc_lpi(struct gicv3_its *its,
253 const struct pci_attach_args *pa)
254 {
255 int n;
256
257 for (n = 0; n < its->its_pic->pic_maxsources; n++) {
258 if (its->its_pa[n] == NULL) {
259 its->its_pa[n] = pa;
260 return n + its->its_pic->pic_irqbase;
261 }
262 }
263
264 return -1;
265 }
266
267 static void
268 gicv3_its_msi_free_lpi(struct gicv3_its *its, int lpi)
269 {
270 KASSERT(lpi >= its->its_pic->pic_irqbase);
271 its->its_pa[lpi - its->its_pic->pic_irqbase] = NULL;
272 }
273
274 static uint32_t
275 gicv3_its_devid(pci_chipset_tag_t pc, pcitag_t tag)
276 {
277 int b, d, f;
278
279 pci_decompose_tag(pc, tag, &b, &d, &f);
280
281 return (b << 8) | (d << 3) | f;
282 }
283
284 static struct gicv3_its_device *
285 gicv3_its_device_lookup(struct gicv3_its *its, uint32_t devid)
286 {
287 struct gicv3_its_device *dev;
288
289 LIST_FOREACH(dev, &its->its_devices, dev_list)
290 if (dev->dev_id == devid)
291 return dev;
292
293 const uint64_t typer = gits_read_8(its, GITS_TYPER);
294 const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1;
295 const u_int itt_entry_size = __SHIFTOUT(typer, GITS_TYPER_ITT_entry_size) + 1;
296 const u_int itt_size = roundup2(itt_entry_size * (1 << id_bits), GITS_ITT_ALIGN);
297
298 dev = kmem_alloc(sizeof(*dev), KM_SLEEP);
299 dev->dev_id = devid;
300 gicv3_dma_alloc(its->its_gic, &dev->dev_itt, itt_size, GITS_ITT_ALIGN);
301 LIST_INSERT_HEAD(&its->its_devices, dev, dev_list);
302
303 return dev;
304 }
305
306 static void
307 gicv3_its_msi_enable(struct gicv3_its *its, int lpi)
308 {
309 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
310 pci_chipset_tag_t pc = pa->pa_pc;
311 pcitag_t tag = pa->pa_tag;
312 pcireg_t ctl;
313 int off;
314
315 if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL))
316 panic("gicv3_its_msi_enable: device is not MSI-capable");
317
318 const uint64_t addr = its->its_base + GITS_TRANSLATER;
319 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
320 if (ctl & PCI_MSI_CTL_64BIT_ADDR) {
321 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_LO,
322 addr & 0xffffffff);
323 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_HI,
324 (addr >> 32) & 0xffffffff);
325 pci_conf_write(pc, tag, off + PCI_MSI_MDATA64, lpi);
326 } else {
327 pci_conf_write(pc, tag, off + PCI_MSI_MADDR,
328 addr & 0xffffffff);
329 pci_conf_write(pc, tag, off + PCI_MSI_MDATA, lpi);
330 }
331 ctl |= PCI_MSI_CTL_MSI_ENABLE;
332 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
333 }
334
335 static void
336 gicv3_its_msi_disable(struct gicv3_its *its, int lpi)
337 {
338 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
339 pci_chipset_tag_t pc = pa->pa_pc;
340 pcitag_t tag = pa->pa_tag;
341 pcireg_t ctl;
342 int off;
343
344 if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL))
345 panic("gicv3_its_msi_enable: device is not MSI-capable");
346
347 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
348 ctl &= ~PCI_MSI_CTL_MSI_ENABLE;
349 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
350 }
351
352 static void
353 gicv3_its_msix_enable(struct gicv3_its *its, int lpi, int msix_vec,
354 bus_space_tag_t bst, bus_space_handle_t bsh)
355 {
356 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
357 pci_chipset_tag_t pc = pa->pa_pc;
358 pcitag_t tag = pa->pa_tag;
359 pcireg_t ctl;
360 int off;
361
362 if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL))
363 panic("gicv3_its_msix_enable: device is not MSI-X-capable");
364
365 const uint64_t addr = its->its_base + GITS_TRANSLATER;
366 const uint64_t entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
367 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_LO, (uint32_t)addr);
368 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_HI, (uint32_t)(addr >> 32));
369 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_DATA, lpi);
370 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, 0);
371
372 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
373 ctl |= PCI_MSIX_CTL_ENABLE;
374 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
375 }
376
377 static void
378 gicv3_its_msix_disable(struct gicv3_its *its, int lpi)
379 {
380 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
381 pci_chipset_tag_t pc = pa->pa_pc;
382 pcitag_t tag = pa->pa_tag;
383 pcireg_t ctl;
384 int off;
385
386 if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL))
387 panic("gicv3_its_msix_disable: device is not MSI-X-capable");
388
389 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
390 ctl &= ~PCI_MSIX_CTL_ENABLE;
391 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
392 }
393
394 static pci_intr_handle_t *
395 gicv3_its_msi_alloc(struct arm_pci_msi *msi, int *count,
396 const struct pci_attach_args *pa, bool exact)
397 {
398 struct gicv3_its * const its = msi->msi_priv;
399 struct cpu_info * const ci = cpu_lookup(0);
400 struct gicv3_its_device *dev;
401 pci_intr_handle_t *vectors;
402 int n, off;
403
404 if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, &off, NULL))
405 return NULL;
406
407 const uint64_t typer = gits_read_8(its, GITS_TYPER);
408 const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1;
409 if (*count == 0 || *count > (1 << id_bits))
410 return NULL;
411
412 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
413
414 /*
415 * Map device
416 */
417 dev = gicv3_its_device_lookup(its, devid);
418 gits_command_mapd(its, devid, dev->dev_itt.segs[0].ds_addr, id_bits - 1, true);
419 gits_wait(its);
420
421 vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP);
422 for (n = 0; n < *count; n++) {
423 const int lpi = gicv3_its_msi_alloc_lpi(its, pa);
424 vectors[n] = ARM_PCI_INTR_MSI |
425 __SHIFTIN(lpi, ARM_PCI_INTR_IRQ) |
426 __SHIFTIN(n, ARM_PCI_INTR_MSI_VEC) |
427 __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME);
428
429 gicv3_its_msi_enable(its, lpi);
430
431 /*
432 * Record target PE
433 */
434 its->its_targets[lpi - its->its_pic->pic_irqbase] = ci;
435
436 /*
437 * Map event
438 */
439 gits_command_mapi(its, devid, lpi, cpu_index(ci));
440 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
441 }
442 gits_wait(its);
443
444 return vectors;
445 }
446
447 static pci_intr_handle_t *
448 gicv3_its_msix_alloc(struct arm_pci_msi *msi, u_int *table_indexes, int *count,
449 const struct pci_attach_args *pa, bool exact)
450 {
451 struct gicv3_its * const its = msi->msi_priv;
452 struct cpu_info *ci = cpu_lookup(0);
453 struct gicv3_its_device *dev;
454 pci_intr_handle_t *vectors;
455 bus_space_tag_t bst;
456 bus_space_handle_t bsh;
457 bus_size_t bsz;
458 uint32_t table_offset, table_size;
459 int n, off, bar, error;
460 pcireg_t tbl;
461
462 if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &off, NULL))
463 return NULL;
464
465 const uint64_t typer = gits_read_8(its, GITS_TYPER);
466 const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1;
467 if (*count == 0 || *count > (1 << id_bits))
468 return NULL;
469
470 tbl = pci_conf_read(pa->pa_pc, pa->pa_tag, off + PCI_MSIX_TBLOFFSET);
471 bar = PCI_BAR0 + (4 * (tbl & PCI_MSIX_PBABIR_MASK));
472 table_offset = tbl & PCI_MSIX_TBLOFFSET_MASK;
473 table_size = pci_msix_count(pa->pa_pc, pa->pa_tag) * PCI_MSIX_TABLE_ENTRY_SIZE;
474 if (table_size == 0)
475 return NULL;
476
477 error = pci_mapreg_submap(pa, bar, pci_mapreg_type(pa->pa_pc, pa->pa_tag, bar),
478 BUS_SPACE_MAP_LINEAR, roundup(table_size, PAGE_SIZE), table_offset,
479 &bst, &bsh, NULL, &bsz);
480 if (error)
481 return NULL;
482
483 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
484
485 /*
486 * Map device
487 */
488 dev = gicv3_its_device_lookup(its, devid);
489 gits_command_mapd(its, devid, dev->dev_itt.segs[0].ds_addr, id_bits - 1, true);
490 gits_wait(its);
491
492 vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP);
493 for (n = 0; n < *count; n++) {
494 const int lpi = gicv3_its_msi_alloc_lpi(its, pa);
495 const int msix_vec = table_indexes ? table_indexes[n] : n;
496 vectors[msix_vec] = ARM_PCI_INTR_MSIX |
497 __SHIFTIN(lpi, ARM_PCI_INTR_IRQ) |
498 __SHIFTIN(msix_vec, ARM_PCI_INTR_MSI_VEC) |
499 __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME);
500
501 gicv3_its_msix_enable(its, lpi, msix_vec, bst, bsh);
502
503 /*
504 * Record target PE
505 */
506 its->its_targets[lpi - its->its_pic->pic_irqbase] = ci;
507
508 /*
509 * Map event
510 */
511 gits_command_mapi(its, devid, lpi, cpu_index(ci));
512 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
513 }
514 gits_wait(its);
515
516 bus_space_unmap(bst, bsh, bsz);
517
518 return vectors;
519 }
520
521 static void *
522 gicv3_its_msi_intr_establish(struct arm_pci_msi *msi,
523 pci_intr_handle_t ih, int ipl, int (*func)(void *), void *arg, const char *xname)
524 {
525 struct gicv3_its * const its = msi->msi_priv;
526 const struct pci_attach_args *pa;
527 void *intrh;
528
529 const int lpi = __SHIFTOUT(ih, ARM_PCI_INTR_IRQ);
530 const int mpsafe = (ih & ARM_PCI_INTR_MPSAFE) ? IST_MPSAFE : 0;
531
532 intrh = pic_establish_intr(its->its_pic, lpi - its->its_pic->pic_irqbase, ipl,
533 IST_EDGE | mpsafe, func, arg, xname);
534 if (intrh == NULL)
535 return NULL;
536
537 /* Invalidate LPI configuration tables */
538 pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
539 KASSERT(pa != NULL);
540 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
541 gits_command_inv(its, devid, lpi);
542
543 return intrh;
544 }
545
546 static void
547 gicv3_its_msi_intr_release(struct arm_pci_msi *msi, pci_intr_handle_t *pih,
548 int count)
549 {
550 struct gicv3_its * const its = msi->msi_priv;
551 int n;
552
553 for (n = 0; n < count; n++) {
554 const int lpi = __SHIFTOUT(pih[n], ARM_PCI_INTR_IRQ);
555 KASSERT(lpi >= its->its_pic->pic_irqbase);
556 if (pih[n] & ARM_PCI_INTR_MSIX)
557 gicv3_its_msix_disable(its, lpi);
558 if (pih[n] & ARM_PCI_INTR_MSI)
559 gicv3_its_msi_disable(its, lpi);
560 gicv3_its_msi_free_lpi(its, lpi);
561 its->its_targets[lpi - its->its_pic->pic_irqbase] = NULL;
562 struct intrsource * const is =
563 its->its_pic->pic_sources[lpi - its->its_pic->pic_irqbase];
564 if (is != NULL)
565 pic_disestablish_source(is);
566 }
567 }
568
569 static void
570 gicv3_its_command_init(struct gicv3_softc *sc, struct gicv3_its *its)
571 {
572 uint64_t cbaser;
573
574 gicv3_dma_alloc(sc, &its->its_cmd, GITS_COMMANDS_SIZE, GITS_COMMANDS_ALIGN);
575
576 cbaser = its->its_cmd.segs[0].ds_addr;
577 cbaser |= __SHIFTIN(GITS_Cache_NORMAL_NC, GITS_CBASER_InnerCache);
578 cbaser |= __SHIFTIN(GITS_Shareability_NS, GITS_CBASER_Shareability);
579 cbaser |= __SHIFTIN((its->its_cmd.len / 4096) - 1, GITS_CBASER_Size);
580 cbaser |= GITS_CBASER_Valid;
581
582 gits_write_8(its, GITS_CBASER, cbaser);
583 gits_write_8(its, GITS_CWRITER, 0);
584 }
585
586 static void
587 gicv3_its_table_init(struct gicv3_softc *sc, struct gicv3_its *its)
588 {
589 u_int table_size, page_size, table_align;
590 uint64_t baser;
591 int tab;
592
593 const uint64_t typer = gits_read_8(its, GITS_TYPER);
594 const u_int devbits = __SHIFTOUT(typer, GITS_TYPER_Devbits) + 1;
595
596 for (tab = 0; tab < 8; tab++) {
597 baser = gits_read_8(its, GITS_BASERn(tab));
598
599 const u_int entry_size = __SHIFTOUT(baser, GITS_BASER_Entry_Size) + 1;
600
601 switch (__SHIFTOUT(baser, GITS_BASER_Page_Size)) {
602 case GITS_Page_Size_4KB:
603 page_size = 4096;
604 table_align = 4096;
605 break;
606 case GITS_Page_Size_16KB:
607 page_size = 16384;
608 table_align = 4096;
609 break;
610 case GITS_Page_Size_64KB:
611 default:
612 page_size = 65536;
613 table_align = 65536;
614 break;
615 }
616
617 switch (__SHIFTOUT(baser, GITS_BASER_Type)) {
618 case GITS_Type_Devices:
619 /*
620 * Table size scales with the width of the DeviceID.
621 */
622 table_size = roundup(entry_size * (1 << devbits), page_size);
623 break;
624 case GITS_Type_InterruptCollections:
625 /*
626 * Allocate space for one interrupt collection per CPU.
627 */
628 table_size = roundup(entry_size * MAXCPUS, page_size);
629 break;
630 default:
631 table_size = 0;
632 break;
633 }
634
635 if (table_size == 0)
636 continue;
637
638 aprint_normal_dev(sc->sc_dev, "ITS TT%u type %#x size %#x\n", tab, (u_int)__SHIFTOUT(baser, GITS_BASER_Type), table_size);
639 gicv3_dma_alloc(sc, &its->its_tab[tab], table_size, table_align);
640
641 baser &= ~GITS_BASER_Size;
642 baser |= __SHIFTIN(table_size / page_size - 1, GITS_BASER_Size);
643 baser &= ~GITS_BASER_Physical_Address;
644 baser |= its->its_tab[tab].segs[0].ds_addr;
645 baser &= ~GITS_BASER_InnerCache;
646 baser |= __SHIFTIN(GITS_Cache_NORMAL_NC, GITS_BASER_InnerCache);
647 baser &= ~GITS_BASER_Shareability;
648 baser |= __SHIFTIN(GITS_Shareability_NS, GITS_BASER_Shareability);
649 baser |= GITS_BASER_Valid;
650
651 gits_write_8(its, GITS_BASERn(tab), baser);
652 }
653 }
654
655 static void
656 gicv3_its_enable(struct gicv3_softc *sc, struct gicv3_its *its)
657 {
658 uint32_t ctlr;
659
660 ctlr = gits_read_4(its, GITS_CTLR);
661 ctlr |= GITS_CTLR_Enabled;
662 gits_write_4(its, GITS_CTLR, ctlr);
663 }
664
665 static void
666 gicv3_its_cpu_init(void *priv, struct cpu_info *ci)
667 {
668 struct gicv3_its * const its = priv;
669 struct gicv3_softc * const sc = its->its_gic;
670 uint64_t rdbase;
671
672 const uint64_t typer = bus_space_read_8(sc->sc_bst, its->its_bsh, GITS_TYPER);
673 if (typer & GITS_TYPER_PTA) {
674 void *va = bus_space_vaddr(sc->sc_bst, sc->sc_bsh_r[ci->ci_gic_redist]);
675 rdbase = vtophys((vaddr_t)va);
676 } else {
677 rdbase = (uint64_t)sc->sc_processor_id[cpu_index(ci)] << 16;
678 }
679 its->its_rdbase[cpu_index(ci)] = rdbase;
680
681 /*
682 * Map collection ID of this CPU's index to this CPU's redistributor.
683 */
684 gits_command_mapc(its, cpu_index(ci), rdbase, true);
685 gits_command_invall(its, cpu_index(ci));
686 gits_wait(its);
687 }
688
689 static void
690 gicv3_its_get_affinity(void *priv, size_t irq, kcpuset_t *affinity)
691 {
692 struct gicv3_its * const its = priv;
693 struct cpu_info *ci;
694
695 kcpuset_zero(affinity);
696 ci = its->its_targets[irq];
697 if (ci)
698 kcpuset_set(affinity, cpu_index(ci));
699 }
700
701 static int
702 gicv3_its_set_affinity(void *priv, size_t irq, const kcpuset_t *affinity)
703 {
704 struct gicv3_its * const its = priv;
705 const struct pci_attach_args *pa;
706 struct cpu_info *ci;
707
708 const int set = kcpuset_countset(affinity);
709 if (set != 1)
710 return EINVAL;
711
712 pa = its->its_pa[irq];
713 if (pa == NULL)
714 return EINVAL;
715
716 ci = cpu_lookup(kcpuset_ffs(affinity) - 1);
717
718 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
719 gits_command_movi(its, devid, devid, cpu_index(ci));
720 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
721
722 its->its_targets[irq] = ci;
723
724 return 0;
725 }
726
727 int
728 gicv3_its_init(struct gicv3_softc *sc, bus_space_handle_t bsh,
729 uint64_t its_base, uint32_t its_id)
730 {
731 struct gicv3_its *its;
732 struct arm_pci_msi *msi;
733
734 const uint64_t typer = bus_space_read_8(sc->sc_bst, bsh, GITS_TYPER);
735 if ((typer & GITS_TYPER_Physical) == 0)
736 return ENXIO;
737
738 its = kmem_alloc(sizeof(*its), KM_SLEEP);
739 its->its_id = its_id;
740 its->its_bst = sc->sc_bst;
741 its->its_bsh = bsh;
742 its->its_dmat = sc->sc_dmat;
743 its->its_base = its_base;
744 its->its_pic = &sc->sc_lpi;
745 KASSERT(its->its_pic->pic_maxsources > 0);
746 its->its_pa = kmem_zalloc(sizeof(struct pci_attach_args *) * its->its_pic->pic_maxsources, KM_SLEEP);
747 its->its_targets = kmem_zalloc(sizeof(struct cpu_info *) * its->its_pic->pic_maxsources, KM_SLEEP);
748 its->its_gic = sc;
749 its->its_cb.cpu_init = gicv3_its_cpu_init;
750 its->its_cb.get_affinity = gicv3_its_get_affinity;
751 its->its_cb.set_affinity = gicv3_its_set_affinity;
752 its->its_cb.priv = its;
753 LIST_INIT(&its->its_devices);
754 LIST_INSERT_HEAD(&sc->sc_lpi_callbacks, &its->its_cb, list);
755
756 gicv3_its_command_init(sc, its);
757 gicv3_its_table_init(sc, its);
758
759 gicv3_its_enable(sc, its);
760
761 gicv3_its_cpu_init(its, curcpu());
762
763 msi = &its->its_msi;
764 msi->msi_dev = sc->sc_dev;
765 msi->msi_priv = its;
766 msi->msi_alloc = gicv3_its_msi_alloc;
767 msi->msix_alloc = gicv3_its_msix_alloc;
768 msi->msi_intr_establish = gicv3_its_msi_intr_establish;
769 msi->msi_intr_release = gicv3_its_msi_intr_release;
770
771 return arm_pci_msi_add(msi);
772 }
773