gicv3_its.c revision 1.7 1 /* $NetBSD: gicv3_its.c,v 1.7 2018/11/23 16:01:27 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jared McNeill <jmcneill (at) invisible.ca>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #define _INTR_PRIVATE
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: gicv3_its.c,v 1.7 2018/11/23 16:01:27 jmcneill Exp $");
36
37 #include <sys/param.h>
38 #include <sys/kmem.h>
39 #include <sys/bus.h>
40 #include <sys/cpu.h>
41 #include <sys/bitops.h>
42
43 #include <uvm/uvm.h>
44
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47
48 #include <arm/pic/picvar.h>
49 #include <arm/cortex/gicv3_its.h>
50
51 /*
52 * ITS translation table sizes
53 */
54 #define GITS_COMMANDS_SIZE 0x1000
55 #define GITS_COMMANDS_ALIGN 0x10000
56
57 #define GITS_ITT_ALIGN 0x100
58
59 static inline uint32_t
60 gits_read_4(struct gicv3_its *its, bus_size_t reg)
61 {
62 return bus_space_read_4(its->its_bst, its->its_bsh, reg);
63 }
64
65 static inline void
66 gits_write_4(struct gicv3_its *its, bus_size_t reg, uint32_t val)
67 {
68 bus_space_write_4(its->its_bst, its->its_bsh, reg, val);
69 }
70
71 static inline uint64_t
72 gits_read_8(struct gicv3_its *its, bus_size_t reg)
73 {
74 return bus_space_read_8(its->its_bst, its->its_bsh, reg);
75 }
76
77 static inline void
78 gits_write_8(struct gicv3_its *its, bus_size_t reg, uint64_t val)
79 {
80 bus_space_write_8(its->its_bst, its->its_bsh, reg, val);
81 }
82
83 static inline void
84 gits_command(struct gicv3_its *its, const struct gicv3_its_command *cmd)
85 {
86 uint64_t cwriter;
87 u_int woff;
88
89 cwriter = gits_read_8(its, GITS_CWRITER);
90 woff = cwriter & GITS_CWRITER_Offset;
91
92 memcpy(its->its_cmd.base + woff, cmd->dw, sizeof(cmd->dw));
93 bus_dmamap_sync(its->its_dmat, its->its_cmd.map, woff, sizeof(cmd->dw), BUS_DMASYNC_PREWRITE);
94
95 woff += sizeof(cmd->dw);
96 if (woff == its->its_cmd.len)
97 woff = 0;
98
99 gits_write_8(its, GITS_CWRITER, woff);
100 }
101
102 static inline void
103 gits_command_mapc(struct gicv3_its *its, uint16_t icid, uint64_t rdbase, bool v)
104 {
105 struct gicv3_its_command cmd;
106
107 KASSERT((rdbase & 0xffff) == 0);
108
109 /*
110 * Map a collection table entry (ICID) to the target redistributor (RDbase).
111 */
112 memset(&cmd, 0, sizeof(cmd));
113 cmd.dw[0] = GITS_CMD_MAPC;
114 cmd.dw[2] = icid;
115 if (v) {
116 cmd.dw[2] |= rdbase;
117 cmd.dw[2] |= __BIT(63);
118 }
119
120 gits_command(its, &cmd);
121 }
122
123 static inline void
124 gits_command_mapd(struct gicv3_its *its, uint32_t deviceid, uint64_t itt_addr, u_int size, bool v)
125 {
126 struct gicv3_its_command cmd;
127
128 KASSERT((itt_addr & 0xff) == 0);
129
130 /*
131 * Map a device table entry (DeviceID) to its associated ITT (ITT_addr).
132 */
133 memset(&cmd, 0, sizeof(cmd));
134 cmd.dw[0] = GITS_CMD_MAPD | ((uint64_t)deviceid << 32);
135 cmd.dw[1] = size;
136 if (v) {
137 cmd.dw[2] = itt_addr | __BIT(63);
138 }
139
140 gits_command(its, &cmd);
141 }
142
143 static inline void
144 gits_command_mapi(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid, uint16_t icid)
145 {
146 struct gicv3_its_command cmd;
147
148 /*
149 * Map the event defined by EventID and DeviceID into an ITT entry with ICID and pINTID = EventID
150 */
151 memset(&cmd, 0, sizeof(cmd));
152 cmd.dw[0] = GITS_CMD_MAPI | ((uint64_t)deviceid << 32);
153 cmd.dw[1] = eventid;
154 cmd.dw[2] = icid;
155
156 gits_command(its, &cmd);
157 }
158
159 static inline void
160 gits_command_movi(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid, uint16_t icid)
161 {
162 struct gicv3_its_command cmd;
163
164 /*
165 * Update the ICID field in the ITT entry for the event defined by DeviceID and
166 * EventID.
167 */
168 memset(&cmd, 0, sizeof(cmd));
169 cmd.dw[0] = GITS_CMD_MOVI | ((uint64_t)deviceid << 32);
170 cmd.dw[1] = eventid;
171 cmd.dw[2] = icid;
172
173 gits_command(its, &cmd);
174 }
175
176 static inline void
177 gits_command_inv(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid)
178 {
179 struct gicv3_its_command cmd;
180
181 /*
182 * Ensure any caching in the redistributors associated with the specified
183 * EventID is consistent with the LPI configuration tables.
184 */
185 memset(&cmd, 0, sizeof(cmd));
186 cmd.dw[0] = GITS_CMD_INV | ((uint64_t)deviceid << 32);
187 cmd.dw[1] = eventid;
188
189 gits_command(its, &cmd);
190 }
191
192 static inline void
193 gits_command_invall(struct gicv3_its *its, uint16_t icid)
194 {
195 struct gicv3_its_command cmd;
196
197 /*
198 * Ensure any caching associated with this ICID is consistent with LPI
199 * configuration tables for all redistributors.
200 */
201 memset(&cmd, 0, sizeof(cmd));
202 cmd.dw[0] = GITS_CMD_INVALL;
203 cmd.dw[2] = icid;
204
205 gits_command(its, &cmd);
206 }
207
208 static inline void
209 gits_command_sync(struct gicv3_its *its, uint64_t rdbase)
210 {
211 struct gicv3_its_command cmd;
212
213 KASSERT((rdbase & 0xffff) == 0);
214
215 /*
216 * Ensure all outstanding ITS operations associated with physical interrupts
217 * for the specified redistributor (RDbase) are globally observed before
218 * further ITS commands are executed.
219 */
220 memset(&cmd, 0, sizeof(cmd));
221 cmd.dw[0] = GITS_CMD_SYNC;
222 cmd.dw[2] = rdbase;
223
224 gits_command(its, &cmd);
225 }
226
227 static inline int
228 gits_wait(struct gicv3_its *its)
229 {
230 u_int woff, roff;
231 int retry = 100000;
232
233 /*
234 * The ITS command queue is empty when CWRITER and CREADR specify the
235 * same base address offset value.
236 */
237 for (retry = 1000; retry > 0; retry--) {
238 woff = gits_read_8(its, GITS_CWRITER) & GITS_CWRITER_Offset;
239 roff = gits_read_8(its, GITS_CREADR) & GITS_CREADR_Offset;
240 if (woff == roff)
241 break;
242 delay(100);
243 }
244 if (retry == 0) {
245 device_printf(its->its_gic->sc_dev, "ITS command queue timeout\n");
246 return ETIMEDOUT;
247 }
248
249 return 0;
250 }
251
252 static int
253 gicv3_its_msi_alloc_lpi(struct gicv3_its *its,
254 const struct pci_attach_args *pa)
255 {
256 int n;
257
258 for (n = 0; n < its->its_pic->pic_maxsources; n++) {
259 if (its->its_pa[n] == NULL) {
260 its->its_pa[n] = pa;
261 return n + its->its_pic->pic_irqbase;
262 }
263 }
264
265 return -1;
266 }
267
268 static void
269 gicv3_its_msi_free_lpi(struct gicv3_its *its, int lpi)
270 {
271 KASSERT(lpi >= its->its_pic->pic_irqbase);
272 its->its_pa[lpi - its->its_pic->pic_irqbase] = NULL;
273 }
274
275 static uint32_t
276 gicv3_its_devid(pci_chipset_tag_t pc, pcitag_t tag)
277 {
278 int b, d, f;
279
280 pci_decompose_tag(pc, tag, &b, &d, &f);
281
282 return (b << 8) | (d << 3) | f;
283 }
284
285 static int
286 gicv3_its_device_map(struct gicv3_its *its, uint32_t devid, u_int count)
287 {
288 struct gicv3_its_device *dev;
289
290 LIST_FOREACH(dev, &its->its_devices, dev_list)
291 if (dev->dev_id == devid)
292 return EEXIST;
293
294 const u_int vectors = MAX(2, count);
295 if (!powerof2(vectors))
296 return EINVAL;
297
298 const uint64_t typer = gits_read_8(its, GITS_TYPER);
299 const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1;
300 const u_int itt_entry_size = __SHIFTOUT(typer, GITS_TYPER_ITT_entry_size) + 1;
301 const u_int itt_size = roundup(vectors * itt_entry_size, GITS_ITT_ALIGN);
302
303 dev = kmem_alloc(sizeof(*dev), KM_SLEEP);
304 dev->dev_id = devid;
305 gicv3_dma_alloc(its->its_gic, &dev->dev_itt, itt_size, GITS_ITT_ALIGN);
306 LIST_INSERT_HEAD(&its->its_devices, dev, dev_list);
307
308 /*
309 * Map the device to the ITT
310 */
311 gits_command_mapd(its, devid, dev->dev_itt.segs[0].ds_addr, id_bits - 1, true);
312 gits_wait(its);
313
314 return 0;
315 }
316
317 static void
318 gicv3_its_msi_enable(struct gicv3_its *its, int lpi)
319 {
320 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
321 pci_chipset_tag_t pc = pa->pa_pc;
322 pcitag_t tag = pa->pa_tag;
323 pcireg_t ctl;
324 int off;
325
326 if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL))
327 panic("gicv3_its_msi_enable: device is not MSI-capable");
328
329 const uint64_t addr = its->its_base + GITS_TRANSLATER;
330 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
331 if (ctl & PCI_MSI_CTL_64BIT_ADDR) {
332 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_LO,
333 addr & 0xffffffff);
334 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_HI,
335 (addr >> 32) & 0xffffffff);
336 pci_conf_write(pc, tag, off + PCI_MSI_MDATA64, lpi);
337 } else {
338 pci_conf_write(pc, tag, off + PCI_MSI_MADDR,
339 addr & 0xffffffff);
340 pci_conf_write(pc, tag, off + PCI_MSI_MDATA, lpi);
341 }
342 ctl |= PCI_MSI_CTL_MSI_ENABLE;
343 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
344 }
345
346 static void
347 gicv3_its_msi_disable(struct gicv3_its *its, int lpi)
348 {
349 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
350 pci_chipset_tag_t pc = pa->pa_pc;
351 pcitag_t tag = pa->pa_tag;
352 pcireg_t ctl;
353 int off;
354
355 if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL))
356 panic("gicv3_its_msi_enable: device is not MSI-capable");
357
358 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
359 ctl &= ~PCI_MSI_CTL_MSI_ENABLE;
360 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
361 }
362
363 static void
364 gicv3_its_msix_enable(struct gicv3_its *its, int lpi, int msix_vec,
365 bus_space_tag_t bst, bus_space_handle_t bsh)
366 {
367 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
368 pci_chipset_tag_t pc = pa->pa_pc;
369 pcitag_t tag = pa->pa_tag;
370 pcireg_t ctl;
371 int off;
372
373 if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL))
374 panic("gicv3_its_msix_enable: device is not MSI-X-capable");
375
376 const uint64_t addr = its->its_base + GITS_TRANSLATER;
377 const uint64_t entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
378 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_LO, (uint32_t)addr);
379 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_HI, (uint32_t)(addr >> 32));
380 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_DATA, lpi);
381 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, 0);
382
383 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
384 ctl |= PCI_MSIX_CTL_ENABLE;
385 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
386 }
387
388 static void
389 gicv3_its_msix_disable(struct gicv3_its *its, int lpi)
390 {
391 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
392 pci_chipset_tag_t pc = pa->pa_pc;
393 pcitag_t tag = pa->pa_tag;
394 pcireg_t ctl;
395 int off;
396
397 if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL))
398 panic("gicv3_its_msix_disable: device is not MSI-X-capable");
399
400 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
401 ctl &= ~PCI_MSIX_CTL_ENABLE;
402 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
403 }
404
405 static pci_intr_handle_t *
406 gicv3_its_msi_alloc(struct arm_pci_msi *msi, int *count,
407 const struct pci_attach_args *pa, bool exact)
408 {
409 struct gicv3_its * const its = msi->msi_priv;
410 struct cpu_info * const ci = cpu_lookup(0);
411 pci_intr_handle_t *vectors;
412 int n, off;
413
414 if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, &off, NULL))
415 return NULL;
416
417 const uint64_t typer = gits_read_8(its, GITS_TYPER);
418 const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1;
419 if (*count == 0 || *count > (1 << id_bits))
420 return NULL;
421
422 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
423
424 if (gicv3_its_device_map(its, devid, *count) != 0)
425 return NULL;
426
427 vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP);
428 for (n = 0; n < *count; n++) {
429 const int lpi = gicv3_its_msi_alloc_lpi(its, pa);
430 vectors[n] = ARM_PCI_INTR_MSI |
431 __SHIFTIN(lpi, ARM_PCI_INTR_IRQ) |
432 __SHIFTIN(n, ARM_PCI_INTR_MSI_VEC) |
433 __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME);
434
435 gicv3_its_msi_enable(its, lpi);
436
437 /*
438 * Record target PE
439 */
440 its->its_targets[lpi - its->its_pic->pic_irqbase] = ci;
441
442 /*
443 * Map event
444 */
445 gits_command_mapi(its, devid, lpi, cpu_index(ci));
446 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
447 }
448 gits_wait(its);
449
450 return vectors;
451 }
452
453 static pci_intr_handle_t *
454 gicv3_its_msix_alloc(struct arm_pci_msi *msi, u_int *table_indexes, int *count,
455 const struct pci_attach_args *pa, bool exact)
456 {
457 struct gicv3_its * const its = msi->msi_priv;
458 struct cpu_info *ci = cpu_lookup(0);
459 pci_intr_handle_t *vectors;
460 bus_space_tag_t bst;
461 bus_space_handle_t bsh;
462 bus_size_t bsz;
463 uint32_t table_offset, table_size;
464 int n, off, bar, error;
465 pcireg_t tbl;
466
467 if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &off, NULL))
468 return NULL;
469
470 const uint64_t typer = gits_read_8(its, GITS_TYPER);
471 const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1;
472 if (*count == 0 || *count > (1 << id_bits))
473 return NULL;
474
475 tbl = pci_conf_read(pa->pa_pc, pa->pa_tag, off + PCI_MSIX_TBLOFFSET);
476 bar = PCI_BAR0 + (4 * (tbl & PCI_MSIX_PBABIR_MASK));
477 table_offset = tbl & PCI_MSIX_TBLOFFSET_MASK;
478 table_size = pci_msix_count(pa->pa_pc, pa->pa_tag) * PCI_MSIX_TABLE_ENTRY_SIZE;
479 if (table_size == 0)
480 return NULL;
481
482 error = pci_mapreg_submap(pa, bar, pci_mapreg_type(pa->pa_pc, pa->pa_tag, bar),
483 BUS_SPACE_MAP_LINEAR, roundup(table_size, PAGE_SIZE), table_offset,
484 &bst, &bsh, NULL, &bsz);
485 if (error)
486 return NULL;
487
488 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
489
490 if (gicv3_its_device_map(its, devid, *count) != 0) {
491 bus_space_unmap(bst, bsh, bsz);
492 return NULL;
493 }
494
495 vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP);
496 for (n = 0; n < *count; n++) {
497 const int lpi = gicv3_its_msi_alloc_lpi(its, pa);
498 const int msix_vec = table_indexes ? table_indexes[n] : n;
499 vectors[msix_vec] = ARM_PCI_INTR_MSIX |
500 __SHIFTIN(lpi, ARM_PCI_INTR_IRQ) |
501 __SHIFTIN(msix_vec, ARM_PCI_INTR_MSI_VEC) |
502 __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME);
503
504 gicv3_its_msix_enable(its, lpi, msix_vec, bst, bsh);
505
506 /*
507 * Record target PE
508 */
509 its->its_targets[lpi - its->its_pic->pic_irqbase] = ci;
510
511 /*
512 * Map event
513 */
514 gits_command_mapi(its, devid, lpi, cpu_index(ci));
515 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
516 }
517 gits_wait(its);
518
519 bus_space_unmap(bst, bsh, bsz);
520
521 return vectors;
522 }
523
524 static void *
525 gicv3_its_msi_intr_establish(struct arm_pci_msi *msi,
526 pci_intr_handle_t ih, int ipl, int (*func)(void *), void *arg, const char *xname)
527 {
528 struct gicv3_its * const its = msi->msi_priv;
529 const struct pci_attach_args *pa;
530 void *intrh;
531
532 const int lpi = __SHIFTOUT(ih, ARM_PCI_INTR_IRQ);
533 const int mpsafe = (ih & ARM_PCI_INTR_MPSAFE) ? IST_MPSAFE : 0;
534
535 intrh = pic_establish_intr(its->its_pic, lpi - its->its_pic->pic_irqbase, ipl,
536 IST_EDGE | mpsafe, func, arg, xname);
537 if (intrh == NULL)
538 return NULL;
539
540 /* Invalidate LPI configuration tables */
541 pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
542 KASSERT(pa != NULL);
543 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
544 gits_command_inv(its, devid, lpi);
545
546 return intrh;
547 }
548
549 static void
550 gicv3_its_msi_intr_release(struct arm_pci_msi *msi, pci_intr_handle_t *pih,
551 int count)
552 {
553 struct gicv3_its * const its = msi->msi_priv;
554 int n;
555
556 for (n = 0; n < count; n++) {
557 const int lpi = __SHIFTOUT(pih[n], ARM_PCI_INTR_IRQ);
558 KASSERT(lpi >= its->its_pic->pic_irqbase);
559 if (pih[n] & ARM_PCI_INTR_MSIX)
560 gicv3_its_msix_disable(its, lpi);
561 if (pih[n] & ARM_PCI_INTR_MSI)
562 gicv3_its_msi_disable(its, lpi);
563 gicv3_its_msi_free_lpi(its, lpi);
564 its->its_targets[lpi - its->its_pic->pic_irqbase] = NULL;
565 struct intrsource * const is =
566 its->its_pic->pic_sources[lpi - its->its_pic->pic_irqbase];
567 if (is != NULL)
568 pic_disestablish_source(is);
569 }
570 }
571
572 static void
573 gicv3_its_command_init(struct gicv3_softc *sc, struct gicv3_its *its)
574 {
575 uint64_t cbaser;
576
577 gicv3_dma_alloc(sc, &its->its_cmd, GITS_COMMANDS_SIZE, GITS_COMMANDS_ALIGN);
578
579 cbaser = its->its_cmd.segs[0].ds_addr;
580 cbaser |= __SHIFTIN(GITS_Cache_NORMAL_NC, GITS_CBASER_InnerCache);
581 cbaser |= __SHIFTIN(GITS_Shareability_NS, GITS_CBASER_Shareability);
582 cbaser |= __SHIFTIN((its->its_cmd.len / 4096) - 1, GITS_CBASER_Size);
583 cbaser |= GITS_CBASER_Valid;
584
585 gits_write_8(its, GITS_CBASER, cbaser);
586 gits_write_8(its, GITS_CWRITER, 0);
587 }
588
589 static void
590 gicv3_its_table_init(struct gicv3_softc *sc, struct gicv3_its *its)
591 {
592 u_int table_size, page_size, table_align;
593 uint64_t baser;
594 int tab;
595
596 const uint64_t typer = gits_read_8(its, GITS_TYPER);
597 const u_int devbits = __SHIFTOUT(typer, GITS_TYPER_Devbits) + 1;
598
599 for (tab = 0; tab < 8; tab++) {
600 baser = gits_read_8(its, GITS_BASERn(tab));
601
602 const u_int entry_size = __SHIFTOUT(baser, GITS_BASER_Entry_Size) + 1;
603
604 switch (__SHIFTOUT(baser, GITS_BASER_Page_Size)) {
605 case GITS_Page_Size_4KB:
606 page_size = 4096;
607 table_align = 4096;
608 break;
609 case GITS_Page_Size_16KB:
610 page_size = 16384;
611 table_align = 4096;
612 break;
613 case GITS_Page_Size_64KB:
614 default:
615 page_size = 65536;
616 table_align = 65536;
617 break;
618 }
619
620 switch (__SHIFTOUT(baser, GITS_BASER_Type)) {
621 case GITS_Type_Devices:
622 /*
623 * Table size scales with the width of the DeviceID.
624 */
625 table_size = roundup(entry_size * (1 << devbits), page_size);
626 break;
627 case GITS_Type_InterruptCollections:
628 /*
629 * Allocate space for one interrupt collection per CPU.
630 */
631 table_size = roundup(entry_size * MAXCPUS, page_size);
632 break;
633 default:
634 table_size = 0;
635 break;
636 }
637
638 if (table_size == 0)
639 continue;
640
641 aprint_normal_dev(sc->sc_dev, "ITS TT%u type %#x size %#x\n", tab, (u_int)__SHIFTOUT(baser, GITS_BASER_Type), table_size);
642 gicv3_dma_alloc(sc, &its->its_tab[tab], table_size, table_align);
643
644 baser &= ~GITS_BASER_Size;
645 baser |= __SHIFTIN(table_size / page_size - 1, GITS_BASER_Size);
646 baser &= ~GITS_BASER_Physical_Address;
647 baser |= its->its_tab[tab].segs[0].ds_addr;
648 baser &= ~GITS_BASER_InnerCache;
649 baser |= __SHIFTIN(GITS_Cache_NORMAL_NC, GITS_BASER_InnerCache);
650 baser &= ~GITS_BASER_Shareability;
651 baser |= __SHIFTIN(GITS_Shareability_NS, GITS_BASER_Shareability);
652 baser |= GITS_BASER_Valid;
653
654 gits_write_8(its, GITS_BASERn(tab), baser);
655 }
656 }
657
658 static void
659 gicv3_its_enable(struct gicv3_softc *sc, struct gicv3_its *its)
660 {
661 uint32_t ctlr;
662
663 ctlr = gits_read_4(its, GITS_CTLR);
664 ctlr |= GITS_CTLR_Enabled;
665 gits_write_4(its, GITS_CTLR, ctlr);
666 }
667
668 static void
669 gicv3_its_cpu_init(void *priv, struct cpu_info *ci)
670 {
671 struct gicv3_its * const its = priv;
672 struct gicv3_softc * const sc = its->its_gic;
673 uint64_t rdbase;
674
675 const uint64_t typer = bus_space_read_8(sc->sc_bst, its->its_bsh, GITS_TYPER);
676 if (typer & GITS_TYPER_PTA) {
677 void *va = bus_space_vaddr(sc->sc_bst, sc->sc_bsh_r[ci->ci_gic_redist]);
678 rdbase = vtophys((vaddr_t)va);
679 } else {
680 rdbase = (uint64_t)sc->sc_processor_id[cpu_index(ci)] << 16;
681 }
682 its->its_rdbase[cpu_index(ci)] = rdbase;
683
684 /*
685 * Map collection ID of this CPU's index to this CPU's redistributor.
686 */
687 gits_command_mapc(its, cpu_index(ci), rdbase, true);
688 gits_command_invall(its, cpu_index(ci));
689 gits_wait(its);
690 }
691
692 static void
693 gicv3_its_get_affinity(void *priv, size_t irq, kcpuset_t *affinity)
694 {
695 struct gicv3_its * const its = priv;
696 struct cpu_info *ci;
697
698 kcpuset_zero(affinity);
699 ci = its->its_targets[irq];
700 if (ci)
701 kcpuset_set(affinity, cpu_index(ci));
702 }
703
704 static int
705 gicv3_its_set_affinity(void *priv, size_t irq, const kcpuset_t *affinity)
706 {
707 struct gicv3_its * const its = priv;
708 const struct pci_attach_args *pa;
709 struct cpu_info *ci;
710
711 const int set = kcpuset_countset(affinity);
712 if (set != 1)
713 return EINVAL;
714
715 pa = its->its_pa[irq];
716 if (pa == NULL)
717 return EINVAL;
718
719 ci = cpu_lookup(kcpuset_ffs(affinity) - 1);
720
721 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
722 gits_command_movi(its, devid, devid, cpu_index(ci));
723 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
724
725 its->its_targets[irq] = ci;
726
727 return 0;
728 }
729
730 int
731 gicv3_its_init(struct gicv3_softc *sc, bus_space_handle_t bsh,
732 uint64_t its_base, uint32_t its_id)
733 {
734 struct gicv3_its *its;
735 struct arm_pci_msi *msi;
736
737 const uint64_t typer = bus_space_read_8(sc->sc_bst, bsh, GITS_TYPER);
738 if ((typer & GITS_TYPER_Physical) == 0)
739 return ENXIO;
740
741 its = kmem_alloc(sizeof(*its), KM_SLEEP);
742 its->its_id = its_id;
743 its->its_bst = sc->sc_bst;
744 its->its_bsh = bsh;
745 its->its_dmat = sc->sc_dmat;
746 its->its_base = its_base;
747 its->its_pic = &sc->sc_lpi;
748 KASSERT(its->its_pic->pic_maxsources > 0);
749 its->its_pa = kmem_zalloc(sizeof(struct pci_attach_args *) * its->its_pic->pic_maxsources, KM_SLEEP);
750 its->its_targets = kmem_zalloc(sizeof(struct cpu_info *) * its->its_pic->pic_maxsources, KM_SLEEP);
751 its->its_gic = sc;
752 its->its_cb.cpu_init = gicv3_its_cpu_init;
753 its->its_cb.get_affinity = gicv3_its_get_affinity;
754 its->its_cb.set_affinity = gicv3_its_set_affinity;
755 its->its_cb.priv = its;
756 LIST_INIT(&its->its_devices);
757 LIST_INSERT_HEAD(&sc->sc_lpi_callbacks, &its->its_cb, list);
758
759 gicv3_its_command_init(sc, its);
760 gicv3_its_table_init(sc, its);
761
762 gicv3_its_enable(sc, its);
763
764 gicv3_its_cpu_init(its, curcpu());
765
766 msi = &its->its_msi;
767 msi->msi_dev = sc->sc_dev;
768 msi->msi_priv = its;
769 msi->msi_alloc = gicv3_its_msi_alloc;
770 msi->msix_alloc = gicv3_its_msix_alloc;
771 msi->msi_intr_establish = gicv3_its_msi_intr_establish;
772 msi->msi_intr_release = gicv3_its_msi_intr_release;
773
774 return arm_pci_msi_add(msi);
775 }
776