1 /* $NetBSD: gicv3_its.c,v 1.41 2025/01/28 21:20:45 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2018 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jared McNeill <jmcneill (at) invisible.ca>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #define _INTR_PRIVATE 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: gicv3_its.c,v 1.41 2025/01/28 21:20:45 jmcneill Exp $"); 36 37 #include <sys/param.h> 38 #include <sys/kmem.h> 39 #include <sys/bus.h> 40 #include <sys/cpu.h> 41 #include <sys/bitops.h> 42 43 #include <uvm/uvm.h> 44 45 #include <dev/pci/pcireg.h> 46 #include <dev/pci/pcivar.h> 47 48 #include <machine/cpufunc.h> 49 50 #include <arm/pic/picvar.h> 51 #include <arm/cortex/gicv3_its.h> 52 53 #ifdef ITS_DEBUG 54 #define DPRINTF(x) printf x 55 #else 56 #define DPRINTF(x) __nothing 57 #endif 58 59 /* 60 * ITS translation table sizes 61 */ 62 #define GITS_COMMANDS_SIZE 0x1000 63 #define GITS_COMMANDS_ALIGN 0x10000 64 65 #define GITS_ITT_ALIGN 0x100 66 67 #define GITS_INDIRECT_ENTRY_SIZE 8 68 69 /* 70 * IIDR values used for errata 71 */ 72 #define GITS_IIDR_PID_CAVIUM_THUNDERX 0xa1 73 #define GITS_IIDR_IMP_CAVIUM 0x34c 74 #define GITS_IIDR_CAVIUM_ERRATA_MASK (GITS_IIDR_Implementor|GITS_IIDR_ProductID|GITS_IIDR_Variant) 75 #define GITS_IIDR_CAVIUM_ERRATA_VALUE \ 76 (__SHIFTIN(GITS_IIDR_IMP_CAVIUM, GITS_IIDR_Implementor) | \ 77 __SHIFTIN(GITS_IIDR_PID_CAVIUM_THUNDERX, GITS_IIDR_ProductID) | \ 78 __SHIFTIN(0, GITS_IIDR_Variant)) 79 80 static const char * gits_cache_type[] = { 81 [GITS_Cache_DEVICE_nGnRnE] = "Device-nGnRnE", 82 [GITS_Cache_NORMAL_NC] = "Non-cacheable", 83 [GITS_Cache_NORMAL_RA_WT] = "Cacheable RA WT", 84 [GITS_Cache_NORMAL_RA_WB] = "Cacheable RA WB", 85 [GITS_Cache_NORMAL_WA_WT] = "Cacheable WA WT", 86 [GITS_Cache_NORMAL_WA_WB] = "Cacheable WA WB", 87 [GITS_Cache_NORMAL_RA_WA_WT] = "Cacheable RA WA WT", 88 [GITS_Cache_NORMAL_RA_WA_WB] = "Cacheable RA WA WB", 89 }; 90 91 static const char * gits_share_type[] = { 92 [GITS_Shareability_NS] = "Non-shareable", 93 [GITS_Shareability_IS] = "Inner shareable", 94 [GITS_Shareability_OS] = "Outer shareable", 95 [3] = "(Reserved)", 96 }; 97 98 static inline uint32_t 99 gits_read_4(struct gicv3_its *its, bus_size_t reg) 100 { 101 return bus_space_read_4(its->its_bst, its->its_bsh, reg); 102 } 103 104 static inline void 105 gits_write_4(struct gicv3_its *its, bus_size_t reg, uint32_t val) 106 { 107 bus_space_write_4(its->its_bst, its->its_bsh, reg, val); 108 } 109 110 static inline uint64_t 111 gits_read_8(struct gicv3_its *its, bus_size_t reg) 112 { 113 return bus_space_read_8(its->its_bst, its->its_bsh, reg); 114 } 115 116 static inline void 117 gits_write_8(struct gicv3_its *its, bus_size_t reg, uint64_t val) 118 { 119 bus_space_write_8(its->its_bst, its->its_bsh, reg, val); 120 } 121 122 static int 123 gits_command(struct gicv3_its *its, const struct gicv3_its_command *cmd) 124 { 125 uint64_t cwriter, creadr; 126 u_int woff; 127 128 creadr = gits_read_8(its, GITS_CREADR); 129 if (ISSET(creadr, GITS_CREADR_Stalled)) { 130 DPRINTF(("ITS: stalled! GITS_CREADR = 0x%lx\n", creadr)); 131 return EIO; 132 } 133 134 cwriter = gits_read_8(its, GITS_CWRITER); 135 woff = cwriter & GITS_CWRITER_Offset; 136 137 uint64_t *dw = (uint64_t *)(its->its_cmd.base + woff); 138 for (int i = 0; i < __arraycount(cmd->dw); i++) { 139 dw[i] = htole64(cmd->dw[i]); 140 DPRINTF(("ITS: dw[%u] = 0x%016lx\n", i, cmd->dw[i])); 141 } 142 143 if (its->its_cmd_flush) { 144 cpu_dcache_wb_range((vaddr_t)dw, sizeof(cmd->dw)); 145 } 146 dsb(sy); 147 148 woff += sizeof(cmd->dw); 149 if (woff == its->its_cmd.len) 150 woff = 0; 151 152 gits_write_8(its, GITS_CWRITER, woff); 153 154 return 0; 155 } 156 157 static int 158 gits_command_mapc(struct gicv3_its *its, uint16_t icid, uint64_t rdbase, bool v) 159 { 160 struct gicv3_its_command cmd; 161 162 KASSERT((rdbase & 0xffff) == 0); 163 164 /* 165 * Map a collection table entry (ICID) to the target redistributor (RDbase). 166 */ 167 memset(&cmd, 0, sizeof(cmd)); 168 cmd.dw[0] = GITS_CMD_MAPC; 169 cmd.dw[2] = icid; 170 if (v) { 171 cmd.dw[2] |= rdbase; 172 cmd.dw[2] |= __BIT(63); 173 } 174 175 DPRINTF(("ITS #%u: MAPC icid 0x%x rdbase 0x%lx valid %u\n", 176 its->its_id, icid, rdbase, v)); 177 178 return gits_command(its, &cmd); 179 } 180 181 static int 182 gits_command_mapd(struct gicv3_its *its, uint32_t deviceid, uint64_t itt_addr, u_int size, bool v) 183 { 184 struct gicv3_its_command cmd; 185 186 KASSERT((itt_addr & 0xff) == 0); 187 188 /* 189 * Map a device table entry (DeviceID) to its associated ITT (ITT_addr). 190 */ 191 memset(&cmd, 0, sizeof(cmd)); 192 cmd.dw[0] = GITS_CMD_MAPD | ((uint64_t)deviceid << 32); 193 if (v) { 194 cmd.dw[1] = uimax(1, size) - 1; 195 cmd.dw[2] = itt_addr | __BIT(63); 196 } 197 198 DPRINTF(("ITS #%u: MAPD deviceid 0x%x itt_addr 0x%lx size %u valid %u\n", 199 its->its_id, deviceid, itt_addr, size, v)); 200 201 return gits_command(its, &cmd); 202 } 203 204 static int 205 gits_command_mapti(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid, uint32_t pintid, uint16_t icid) 206 { 207 struct gicv3_its_command cmd; 208 209 /* 210 * Map the event defined by EventID and DeviceID to its associated ITE, defined by ICID and pINTID 211 * in the ITT associated with DeviceID. 212 */ 213 memset(&cmd, 0, sizeof(cmd)); 214 cmd.dw[0] = GITS_CMD_MAPTI | ((uint64_t)deviceid << 32); 215 cmd.dw[1] = eventid | ((uint64_t)pintid << 32); 216 cmd.dw[2] = icid; 217 218 DPRINTF(("ITS #%u: MAPTI deviceid 0x%x eventid 0x%x pintid 0x%x icid 0x%x\n", 219 its->its_id, deviceid, eventid, pintid, icid)); 220 221 return gits_command(its, &cmd); 222 } 223 224 static int 225 gits_command_movi(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid, uint16_t icid) 226 { 227 struct gicv3_its_command cmd; 228 229 /* 230 * Update the ICID field in the ITT entry for the event defined by DeviceID and 231 * EventID. 232 */ 233 memset(&cmd, 0, sizeof(cmd)); 234 cmd.dw[0] = GITS_CMD_MOVI | ((uint64_t)deviceid << 32); 235 cmd.dw[1] = eventid; 236 cmd.dw[2] = icid; 237 238 DPRINTF(("ITS #%u: MOVI deviceid 0x%x eventid 0x%x icid 0x%x\n", 239 its->its_id, deviceid, eventid, icid)); 240 241 return gits_command(its, &cmd); 242 } 243 244 static int 245 gits_command_inv(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid) 246 { 247 struct gicv3_its_command cmd; 248 249 /* 250 * Ensure any caching in the redistributors associated with the specified 251 * EventID is consistent with the LPI configuration tables. 252 */ 253 memset(&cmd, 0, sizeof(cmd)); 254 cmd.dw[0] = GITS_CMD_INV | ((uint64_t)deviceid << 32); 255 cmd.dw[1] = eventid; 256 257 DPRINTF(("ITS #%u: INV deviceid 0x%x eventid 0x%x\n", 258 its->its_id, deviceid, eventid)); 259 260 return gits_command(its, &cmd); 261 } 262 263 static int 264 gits_command_invall(struct gicv3_its *its, uint16_t icid) 265 { 266 struct gicv3_its_command cmd; 267 268 /* 269 * Ensure any caching associated with this ICID is consistent with LPI 270 * configuration tables for all redistributors. 271 */ 272 memset(&cmd, 0, sizeof(cmd)); 273 cmd.dw[0] = GITS_CMD_INVALL; 274 cmd.dw[2] = icid; 275 276 DPRINTF(("ITS #%u: INVALL icid 0x%x\n", its->its_id, icid)); 277 278 return gits_command(its, &cmd); 279 } 280 281 static int 282 gits_command_sync(struct gicv3_its *its, uint64_t rdbase) 283 { 284 struct gicv3_its_command cmd; 285 286 KASSERT((rdbase & 0xffff) == 0); 287 288 /* 289 * Ensure all outstanding ITS operations associated with physical interrupts 290 * for the specified redistributor (RDbase) are globally observed before 291 * further ITS commands are executed. 292 */ 293 memset(&cmd, 0, sizeof(cmd)); 294 cmd.dw[0] = GITS_CMD_SYNC; 295 cmd.dw[2] = rdbase; 296 297 DPRINTF(("ITS #%u: SYNC rdbase 0x%lx\n", its->its_id, rdbase)); 298 299 return gits_command(its, &cmd); 300 } 301 302 #if 0 303 static int 304 gits_command_int(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid) 305 { 306 struct gicv3_its_command cmd; 307 308 /* 309 * Translate the deviceid and eventid into an icid and pintid through 310 * the device table and ITT. Mark the pintid as pending 311 * on the redistributor. 312 * If the interrupt is not configured the command queue stalls. 313 */ 314 memset(&cmd, 0, sizeof(cmd)); 315 cmd.dw[0] = GITS_CMD_INT | ((uint64_t)deviceid << 32); 316 cmd.dw[1] = eventid; 317 318 DPRINTF(("ITS #%u: INT deviceid 0x%x eventid 0x%x\n", 319 its->its_id, deviceid, eventid)); 320 321 return gits_command(its, &cmd); 322 } 323 #endif 324 325 static int 326 gits_wait(struct gicv3_its *its) 327 { 328 u_int woff, roff; 329 int retry = 100000; 330 331 /* 332 * The ITS command queue is empty when CWRITER and CREADR specify the 333 * same base address offset value. 334 */ 335 for (retry = 1000; retry > 0; retry--) { 336 woff = gits_read_8(its, GITS_CWRITER) & GITS_CWRITER_Offset; 337 roff = gits_read_8(its, GITS_CREADR) & GITS_CREADR_Offset; 338 if (woff == roff) 339 break; 340 delay(100); 341 } 342 if (retry == 0) { 343 device_printf(its->its_gic->sc_dev, 344 "ITS command queue timeout! CREADR=0x%lx CWRITER=0x%lx\n", 345 gits_read_8(its, GITS_CREADR), gits_read_8(its, GITS_CWRITER)); 346 return ETIMEDOUT; 347 } 348 349 return 0; 350 } 351 352 static int 353 gicv3_its_msi_alloc_lpi(struct gicv3_its *its, 354 const struct pci_attach_args *pa) 355 { 356 struct pci_attach_args *new_pa; 357 vmem_addr_t n; 358 359 KASSERT(its->its_gic->sc_lpi_pool != NULL); 360 361 if (vmem_alloc(its->its_gic->sc_lpi_pool, 1, VM_INSTANTFIT|VM_SLEEP, &n) != 0) 362 return -1; 363 364 KASSERT(its->its_pa[n] == NULL); 365 366 new_pa = kmem_alloc(sizeof(*new_pa), KM_SLEEP); 367 memcpy(new_pa, pa, sizeof(*new_pa)); 368 its->its_pa[n] = new_pa; 369 return n + its->its_pic->pic_irqbase; 370 } 371 372 static void 373 gicv3_its_msi_free_lpi(struct gicv3_its *its, int lpi) 374 { 375 struct pci_attach_args *pa; 376 377 KASSERT(its->its_gic->sc_lpi_pool != NULL); 378 KASSERT(lpi >= its->its_pic->pic_irqbase); 379 380 pa = its->its_pa[lpi - its->its_pic->pic_irqbase]; 381 its->its_pa[lpi - its->its_pic->pic_irqbase] = NULL; 382 kmem_free(pa, sizeof(*pa)); 383 384 vmem_free(its->its_gic->sc_lpi_pool, lpi - its->its_pic->pic_irqbase, 1); 385 } 386 387 static uint32_t 388 gicv3_its_devid(pci_chipset_tag_t pc, pcitag_t tag) 389 { 390 uint32_t devid; 391 int b, d, f; 392 393 pci_decompose_tag(pc, tag, &b, &d, &f); 394 395 devid = (b << 8) | (d << 3) | f; 396 397 return pci_get_devid(pc, devid); 398 } 399 400 static int 401 gicv3_its_device_map(struct gicv3_its *its, uint32_t devid, u_int count) 402 { 403 struct gicv3_its_device *dev; 404 struct gicv3_its_table *itstab = &its->its_tab_device; 405 u_int vectors; 406 int error; 407 408 vectors = MAX(2, count); 409 while (!powerof2(vectors)) 410 vectors++; 411 412 const uint64_t typer = gits_read_8(its, GITS_TYPER); 413 const u_int itt_entry_size = __SHIFTOUT(typer, GITS_TYPER_ITT_entry_size) + 1; 414 const u_int itt_size = roundup(uimax(vectors, 2) * itt_entry_size, GITS_ITT_ALIGN); 415 416 LIST_FOREACH(dev, &its->its_devices, dev_list) 417 if (dev->dev_id == devid) { 418 return itt_size <= dev->dev_size ? 0 : EEXIST; 419 } 420 421 if (itstab->tab_indirect) { 422 uint64_t *l1_tab = itstab->tab_l1; 423 const u_int index = devid / itstab->tab_l2_num_ids; 424 425 if ((l1_tab[index] & GITS_BASER_Valid) == 0) { 426 /* Need to allocate the L2 table. */ 427 struct gicv3_its_page_table *pt; 428 429 pt = kmem_alloc(sizeof(*pt), KM_SLEEP); 430 pt->pt_index = index; 431 gicv3_dma_alloc(its->its_gic, &pt->pt_dma, itstab->tab_l2_entry_size, 432 itstab->tab_page_size); 433 LIST_INSERT_HEAD(&itstab->tab_pt, pt, pt_list); 434 435 if (!itstab->tab_shareable) { 436 cpu_dcache_wb_range((vaddr_t)pt->pt_dma.base, 437 itstab->tab_l2_entry_size); 438 } 439 l1_tab[index] = pt->pt_dma.segs[0].ds_addr | GITS_BASER_Valid; 440 if (!itstab->tab_shareable) { 441 cpu_dcache_wb_range((vaddr_t)&l1_tab[index], 442 sizeof(l1_tab[index])); 443 } 444 dsb(sy); 445 446 DPRINTF(("ITS: Allocated L2 entry at index %u\n", index)); 447 } 448 } 449 450 dev = kmem_alloc(sizeof(*dev), KM_SLEEP); 451 dev->dev_id = devid; 452 dev->dev_size = itt_size; 453 gicv3_dma_alloc(its->its_gic, &dev->dev_itt, itt_size, GITS_ITT_ALIGN); 454 LIST_INSERT_HEAD(&its->its_devices, dev, dev_list); 455 456 if (its->its_cmd_flush) { 457 cpu_dcache_wb_range((vaddr_t)dev->dev_itt.base, itt_size); 458 } 459 dsb(sy); 460 461 /* 462 * Map the device to the ITT 463 */ 464 const u_int size = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1; 465 mutex_enter(its->its_lock); 466 error = gits_command_mapd(its, devid, dev->dev_itt.segs[0].ds_addr, size, true); 467 if (error == 0) { 468 error = gits_wait(its); 469 } 470 mutex_exit(its->its_lock); 471 472 return error; 473 } 474 475 static void 476 gicv3_its_msi_enable(struct gicv3_its *its, int lpi, int count) 477 { 478 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase]; 479 pci_chipset_tag_t pc = pa->pa_pc; 480 pcitag_t tag = pa->pa_tag; 481 pcireg_t ctl; 482 int off; 483 484 if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL)) 485 panic("gicv3_its_msi_enable: device is not MSI-capable"); 486 487 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL); 488 ctl &= ~PCI_MSI_CTL_MME_MASK; 489 ctl |= __SHIFTIN(ilog2(count), PCI_MSI_CTL_MME_MASK); 490 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl); 491 492 const uint64_t addr = its->its_base + GITS_TRANSLATER; 493 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL); 494 if (ctl & PCI_MSI_CTL_64BIT_ADDR) { 495 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_LO, 496 addr & 0xffffffff); 497 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_HI, 498 (addr >> 32) & 0xffffffff); 499 pci_conf_write(pc, tag, off + PCI_MSI_MDATA64, 500 lpi - its->its_pic->pic_irqbase); 501 } else { 502 KASSERT((addr >> 32) == 0); 503 pci_conf_write(pc, tag, off + PCI_MSI_MADDR, 504 addr & 0xffffffff); 505 pci_conf_write(pc, tag, off + PCI_MSI_MDATA, 506 lpi - its->its_pic->pic_irqbase); 507 } 508 ctl |= PCI_MSI_CTL_MSI_ENABLE; 509 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl); 510 } 511 512 static void 513 gicv3_its_msi_disable(struct gicv3_its *its, int lpi) 514 { 515 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase]; 516 pci_chipset_tag_t pc = pa->pa_pc; 517 pcitag_t tag = pa->pa_tag; 518 pcireg_t ctl; 519 int off; 520 521 if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL)) 522 panic("gicv3_its_msi_enable: device is not MSI-capable"); 523 524 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL); 525 ctl &= ~PCI_MSI_CTL_MSI_ENABLE; 526 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl); 527 } 528 529 static void 530 gicv3_its_msix_enable(struct gicv3_its *its, int lpi, int msix_vec, 531 bus_space_tag_t bst, bus_space_handle_t bsh) 532 { 533 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase]; 534 pci_chipset_tag_t pc = pa->pa_pc; 535 pcitag_t tag = pa->pa_tag; 536 pcireg_t ctl; 537 uint32_t val; 538 int off; 539 540 if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL)) 541 panic("gicv3_its_msix_enable: device is not MSI-X-capable"); 542 543 const uint64_t addr = its->its_base + GITS_TRANSLATER; 544 const uint64_t entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec; 545 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_LO, (uint32_t)addr); 546 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_HI, (uint32_t)(addr >> 32)); 547 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_DATA, lpi - its->its_pic->pic_irqbase); 548 val = bus_space_read_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL); 549 val &= ~PCI_MSIX_VECTCTL_MASK; 550 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, val); 551 552 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL); 553 ctl |= PCI_MSIX_CTL_ENABLE; 554 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl); 555 } 556 557 static void 558 gicv3_its_msix_disable(struct gicv3_its *its, int lpi) 559 { 560 const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase]; 561 pci_chipset_tag_t pc = pa->pa_pc; 562 pcitag_t tag = pa->pa_tag; 563 pcireg_t ctl; 564 int off; 565 566 if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL)) 567 panic("gicv3_its_msix_disable: device is not MSI-X-capable"); 568 569 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL); 570 ctl &= ~PCI_MSIX_CTL_ENABLE; 571 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl); 572 } 573 574 static pci_intr_handle_t * 575 gicv3_its_msi_alloc(struct arm_pci_msi *msi, int *count, 576 const struct pci_attach_args *pa, bool exact) 577 { 578 struct gicv3_its * const its = msi->msi_priv; 579 struct cpu_info * const ci = cpu_lookup(0); 580 pci_intr_handle_t *vectors; 581 int n, off, error; 582 583 if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, &off, NULL)) 584 return NULL; 585 586 const uint64_t typer = gits_read_8(its, GITS_TYPER); 587 const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1; 588 if (*count == 0 || *count > (1 << id_bits)) 589 return NULL; 590 591 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag); 592 593 if (gicv3_its_device_map(its, devid, *count) != 0) 594 return NULL; 595 596 vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP); 597 mutex_enter(its->its_lock); 598 for (n = 0; n < *count; n++) { 599 const int lpi = gicv3_its_msi_alloc_lpi(its, pa); 600 KASSERT(lpi >= 0); 601 vectors[n] = ARM_PCI_INTR_MSI | 602 __SHIFTIN(lpi, ARM_PCI_INTR_IRQ) | 603 __SHIFTIN(n, ARM_PCI_INTR_MSI_VEC) | 604 __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME); 605 606 if (n == 0) 607 gicv3_its_msi_enable(its, lpi, *count); 608 609 /* 610 * Record devid and target PE 611 */ 612 its->its_devid[lpi - its->its_pic->pic_irqbase] = devid; 613 its->its_targets[lpi - its->its_pic->pic_irqbase] = ci; 614 615 /* 616 * Map event 617 */ 618 gits_command_mapti(its, devid, lpi - its->its_pic->pic_irqbase, lpi, cpu_index(ci)); 619 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]); 620 } 621 error = gits_wait(its); 622 mutex_exit(its->its_lock); 623 624 if (error != 0) { 625 kmem_free(vectors, sizeof(*vectors) * *count); 626 vectors = NULL; 627 } 628 629 return vectors; 630 } 631 632 static pci_intr_handle_t * 633 gicv3_its_msix_alloc(struct arm_pci_msi *msi, u_int *table_indexes, int *count, 634 const struct pci_attach_args *pa, bool exact) 635 { 636 struct gicv3_its * const its = msi->msi_priv; 637 struct cpu_info *ci = cpu_lookup(0); 638 pci_intr_handle_t *vectors; 639 bus_space_tag_t bst; 640 bus_space_handle_t bsh; 641 bus_size_t bsz; 642 uint32_t table_offset, table_size; 643 int n, off, bar, error; 644 pcireg_t tbl; 645 646 if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &off, NULL)) 647 return NULL; 648 649 const uint64_t typer = gits_read_8(its, GITS_TYPER); 650 const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1; 651 if (*count == 0 || *count > (1 << id_bits)) 652 return NULL; 653 654 tbl = pci_conf_read(pa->pa_pc, pa->pa_tag, off + PCI_MSIX_TBLOFFSET); 655 bar = PCI_BAR0 + (4 * (tbl & PCI_MSIX_TBLBIR_MASK)); 656 table_offset = tbl & PCI_MSIX_TBLOFFSET_MASK; 657 table_size = pci_msix_count(pa->pa_pc, pa->pa_tag) * PCI_MSIX_TABLE_ENTRY_SIZE; 658 if (table_size == 0) 659 return NULL; 660 661 error = pci_mapreg_submap(pa, bar, pci_mapreg_type(pa->pa_pc, pa->pa_tag, bar), 662 BUS_SPACE_MAP_LINEAR, roundup(table_size, PAGE_SIZE), table_offset, 663 &bst, &bsh, NULL, &bsz); 664 if (error) 665 return NULL; 666 667 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag); 668 669 if (gicv3_its_device_map(its, devid, *count) != 0) { 670 bus_space_unmap(bst, bsh, bsz); 671 return NULL; 672 } 673 674 vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP); 675 mutex_enter(its->its_lock); 676 for (n = 0; n < *count; n++) { 677 const int lpi = gicv3_its_msi_alloc_lpi(its, pa); 678 KASSERT(lpi >= 0); 679 const int msix_vec = table_indexes ? table_indexes[n] : n; 680 vectors[msix_vec] = ARM_PCI_INTR_MSIX | 681 __SHIFTIN(lpi, ARM_PCI_INTR_IRQ) | 682 __SHIFTIN(msix_vec, ARM_PCI_INTR_MSI_VEC) | 683 __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME); 684 685 gicv3_its_msix_enable(its, lpi, msix_vec, bst, bsh); 686 687 /* 688 * Record devid and target PE 689 */ 690 its->its_devid[lpi - its->its_pic->pic_irqbase] = devid; 691 its->its_targets[lpi - its->its_pic->pic_irqbase] = ci; 692 693 /* 694 * Map event 695 */ 696 gits_command_mapti(its, devid, lpi - its->its_pic->pic_irqbase, lpi, cpu_index(ci)); 697 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]); 698 } 699 gits_wait(its); 700 mutex_exit(its->its_lock); 701 702 bus_space_unmap(bst, bsh, bsz); 703 704 return vectors; 705 } 706 707 static void * 708 gicv3_its_msi_intr_establish(struct arm_pci_msi *msi, 709 pci_intr_handle_t ih, int ipl, int (*func)(void *), void *arg, const char *xname) 710 { 711 struct gicv3_its * const its = msi->msi_priv; 712 void *intrh; 713 714 const int lpi = __SHIFTOUT(ih, ARM_PCI_INTR_IRQ); 715 const int mpsafe = (ih & ARM_PCI_INTR_MPSAFE) ? IST_MPSAFE : 0; 716 717 intrh = pic_establish_intr(its->its_pic, lpi - its->its_pic->pic_irqbase, ipl, 718 IST_EDGE | mpsafe, func, arg, xname); 719 if (intrh == NULL) 720 return NULL; 721 722 /* Invalidate LPI configuration tables */ 723 KASSERT(its->its_pa[lpi - its->its_pic->pic_irqbase] != NULL); 724 const uint32_t devid = its->its_devid[lpi - its->its_pic->pic_irqbase]; 725 gits_command_inv(its, devid, lpi - its->its_pic->pic_irqbase); 726 727 return intrh; 728 } 729 730 static void 731 gicv3_its_msi_intr_release(struct arm_pci_msi *msi, pci_intr_handle_t *pih, 732 int count) 733 { 734 struct gicv3_its * const its = msi->msi_priv; 735 int n; 736 737 for (n = 0; n < count; n++) { 738 const int lpi = __SHIFTOUT(pih[n], ARM_PCI_INTR_IRQ); 739 KASSERT(lpi >= its->its_pic->pic_irqbase); 740 if (pih[n] & ARM_PCI_INTR_MSIX) 741 gicv3_its_msix_disable(its, lpi); 742 if (pih[n] & ARM_PCI_INTR_MSI) 743 gicv3_its_msi_disable(its, lpi); 744 gicv3_its_msi_free_lpi(its, lpi); 745 its->its_targets[lpi - its->its_pic->pic_irqbase] = NULL; 746 its->its_devid[lpi - its->its_pic->pic_irqbase] = 0; 747 struct intrsource * const is = 748 its->its_pic->pic_sources[lpi - its->its_pic->pic_irqbase]; 749 if (is != NULL) 750 pic_disestablish_source(is); 751 } 752 } 753 754 static void 755 gicv3_its_command_init(struct gicv3_softc *sc, struct gicv3_its *its) 756 { 757 uint64_t cbaser, tmp; 758 759 gicv3_dma_alloc(sc, &its->its_cmd, GITS_COMMANDS_SIZE, GITS_COMMANDS_ALIGN); 760 if (its->its_cmd_flush) { 761 cpu_dcache_wb_range((vaddr_t)its->its_cmd.base, GITS_COMMANDS_SIZE); 762 } 763 dsb(sy); 764 765 KASSERT((gits_read_4(its, GITS_CTLR) & GITS_CTLR_Enabled) == 0); 766 KASSERT((gits_read_4(its, GITS_CTLR) & GITS_CTLR_Quiescent) != 0); 767 768 cbaser = its->its_cmd.segs[0].ds_addr; 769 cbaser |= __SHIFTIN((its->its_cmd.len / 4096) - 1, GITS_CBASER_Size); 770 cbaser |= GITS_CBASER_Valid; 771 772 cbaser |= __SHIFTIN(GITS_Cache_NORMAL_WA_WB, GITS_CBASER_InnerCache); 773 cbaser |= __SHIFTIN(GITS_Shareability_IS, GITS_CBASER_Shareability); 774 gits_write_8(its, GITS_CBASER, cbaser); 775 776 tmp = gits_read_8(its, GITS_CBASER); 777 if (__SHIFTOUT(tmp, GITS_CBASER_Shareability) != GITS_Shareability_IS) { 778 if (__SHIFTOUT(tmp, GITS_CBASER_InnerCache) == GITS_Shareability_NS) { 779 cbaser &= ~GITS_CBASER_InnerCache; 780 cbaser |= __SHIFTIN(GITS_Cache_NORMAL_NC, GITS_CBASER_InnerCache); 781 cbaser &= ~GITS_CBASER_Shareability; 782 cbaser |= __SHIFTIN(GITS_Shareability_NS, GITS_CBASER_Shareability); 783 gits_write_8(its, GITS_CBASER, cbaser); 784 } 785 786 its->its_cmd_flush = true; 787 } 788 aprint_normal_dev(sc->sc_dev, "ITS command table @ %#lx/%#lx, %s, %s\n", 789 its->its_cmd.segs[0].ds_addr, its->its_cmd.len, 790 gits_cache_type[__SHIFTOUT(cbaser, GITS_BASER_InnerCache)], 791 gits_share_type[__SHIFTOUT(cbaser, GITS_BASER_Shareability)]); 792 793 gits_write_8(its, GITS_CWRITER, 0); 794 } 795 796 static void 797 gicv3_its_table_params(struct gicv3_softc *sc, struct gicv3_its *its, 798 u_int *devbits, u_int *innercache, u_int *share) 799 { 800 801 const uint64_t typer = gits_read_8(its, GITS_TYPER); 802 const uint32_t iidr = gits_read_4(its, GITS_IIDR); 803 804 /* Default values */ 805 *devbits = __SHIFTOUT(typer, GITS_TYPER_Devbits) + 1; 806 *innercache = GITS_Cache_NORMAL_WA_WB; 807 *share = GITS_Shareability_IS; 808 809 /* Cavium ThunderX errata */ 810 if ((iidr & GITS_IIDR_CAVIUM_ERRATA_MASK) == GITS_IIDR_CAVIUM_ERRATA_VALUE) { 811 *devbits = 20; /* 8Mb */ 812 *innercache = GITS_Cache_DEVICE_nGnRnE; 813 aprint_normal_dev(sc->sc_dev, "Cavium ThunderX errata detected\n"); 814 } 815 } 816 817 static bool 818 gicv3_its_table_probe_indirect(struct gicv3_its *its, int tab) 819 { 820 uint64_t baser; 821 822 baser = gits_read_8(its, GITS_BASERn(tab)); 823 baser |= GITS_BASER_Indirect; 824 gits_write_8(its, GITS_BASERn(tab), baser); 825 826 baser = gits_read_8(its, GITS_BASERn(tab)); 827 828 return (baser & GITS_BASER_Indirect) != 0; 829 } 830 831 static void 832 gicv3_its_table_init(struct gicv3_softc *sc, struct gicv3_its *its) 833 { 834 u_int page_size, table_align; 835 u_int devbits, innercache, share; 836 const char *table_type; 837 uint64_t baser; 838 int tab; 839 840 gicv3_its_table_params(sc, its, &devbits, &innercache, &share); 841 842 DPRINTF(("ITS: devbits = %u\n", devbits)); 843 844 for (tab = 0; tab < 8; tab++) { 845 struct gicv3_its_table *itstab; 846 bool indirect = false; 847 uint64_t l1_entry_size, l2_entry_size; 848 uint64_t l1_num_ids, l2_num_ids; 849 uint64_t table_size; 850 851 baser = gits_read_8(its, GITS_BASERn(tab)); 852 853 l1_entry_size = __SHIFTOUT(baser, GITS_BASER_Entry_Size) + 1; 854 l2_entry_size = 0; 855 l2_num_ids = 0; 856 857 switch (__SHIFTOUT(baser, GITS_BASER_Page_Size)) { 858 case GITS_Page_Size_64KB: 859 page_size = 65536; 860 break; 861 case GITS_Page_Size_16KB: 862 page_size = 16384; 863 break; 864 case GITS_Page_Size_4KB: 865 default: 866 page_size = 4096; 867 } 868 table_align = page_size; 869 870 switch (__SHIFTOUT(baser, GITS_BASER_Type)) { 871 case GITS_Type_Devices: 872 /* 873 * Table size scales with the width of the DeviceID. 874 */ 875 l1_num_ids = 1ULL << devbits; 876 DPRINTF(("ITS: l1_num_ids = %lu\n", l1_num_ids)); 877 indirect = 878 gicv3_its_table_probe_indirect(its, tab); 879 if (indirect) { 880 DPRINTF(("ITS: indirect\n")); 881 l2_entry_size = l1_entry_size; 882 l2_num_ids = page_size / l2_entry_size; 883 l1_num_ids = l1_num_ids / l2_num_ids; 884 l1_entry_size = GITS_INDIRECT_ENTRY_SIZE; 885 } 886 table_size = roundup2(l1_entry_size * l1_num_ids, page_size); 887 if (howmany(table_size, page_size) > GITS_BASER_Size + 1) { 888 DPRINTF(("ITS: clamp table size 0x%lx -> ", table_size)); 889 table_size = (GITS_BASER_Size + 1) * page_size; 890 DPRINTF(("0x%lx\n", table_size)); 891 } 892 table_type = "Devices"; 893 894 DPRINTF(("ITS: table_size is 0x%lx\n", table_size)); 895 896 itstab = &its->its_tab_device; 897 itstab->tab_page_size = page_size; 898 itstab->tab_l1_entry_size = l1_entry_size; 899 itstab->tab_l1_num_ids = l1_num_ids; 900 itstab->tab_l2_entry_size = l2_entry_size; 901 itstab->tab_l2_num_ids = l2_num_ids; 902 itstab->tab_indirect = indirect; 903 LIST_INIT(&itstab->tab_pt); 904 break; 905 case GITS_Type_InterruptCollections: 906 /* 907 * Allocate space for one interrupt collection per CPU. 908 */ 909 table_size = roundup(l1_entry_size * ncpu, page_size); 910 table_type = "Collections"; 911 break; 912 default: 913 table_size = 0; 914 break; 915 } 916 917 if (table_size == 0) 918 continue; 919 920 gicv3_dma_alloc(sc, &its->its_tab[tab], table_size, table_align); 921 if (its->its_cmd_flush) { 922 cpu_dcache_wb_range((vaddr_t)its->its_tab[tab].base, table_size); 923 } 924 dsb(sy); 925 926 baser &= ~GITS_BASER_Size; 927 baser |= __SHIFTIN(howmany(table_size, page_size) - 1, GITS_BASER_Size); 928 baser &= ~GITS_BASER_Physical_Address; 929 baser |= its->its_tab[tab].segs[0].ds_addr; 930 baser &= ~GITS_BASER_InnerCache; 931 baser |= __SHIFTIN(innercache, GITS_BASER_InnerCache); 932 baser &= ~GITS_BASER_Shareability; 933 baser |= __SHIFTIN(share, GITS_BASER_Shareability); 934 baser |= GITS_BASER_Valid; 935 if (indirect) { 936 baser |= GITS_BASER_Indirect; 937 } else { 938 baser &= ~GITS_BASER_Indirect; 939 } 940 941 gits_write_8(its, GITS_BASERn(tab), baser); 942 943 baser = gits_read_8(its, GITS_BASERn(tab)); 944 if (__SHIFTOUT(baser, GITS_BASER_Shareability) == GITS_Shareability_NS) { 945 baser &= ~GITS_BASER_InnerCache; 946 baser |= __SHIFTIN(GITS_Cache_NORMAL_NC, GITS_BASER_InnerCache); 947 948 gits_write_8(its, GITS_BASERn(tab), baser); 949 } 950 951 baser = gits_read_8(its, GITS_BASERn(tab)); 952 aprint_normal_dev(sc->sc_dev, "ITS [#%d] %s table @ %#lx/%#lx, %s, %s%s\n", 953 tab, table_type, its->its_tab[tab].segs[0].ds_addr, table_size, 954 gits_cache_type[__SHIFTOUT(baser, GITS_BASER_InnerCache)], 955 gits_share_type[__SHIFTOUT(baser, GITS_BASER_Shareability)], 956 indirect ? ", indirect" : ""); 957 958 if (__SHIFTOUT(baser, GITS_BASER_Type) == GITS_Type_Devices) { 959 its->its_tab_device.tab_l1 = its->its_tab[tab].base; 960 its->its_tab_device.tab_shareable = 961 __SHIFTOUT(baser, GITS_BASER_Shareability) != GITS_Shareability_NS; 962 } 963 964 } 965 } 966 967 static void 968 gicv3_its_enable(struct gicv3_softc *sc, struct gicv3_its *its) 969 { 970 uint32_t ctlr; 971 972 ctlr = gits_read_4(its, GITS_CTLR); 973 ctlr |= GITS_CTLR_Enabled; 974 gits_write_4(its, GITS_CTLR, ctlr); 975 } 976 977 static void 978 gicv3_its_cpu_init(void *priv, struct cpu_info *ci) 979 { 980 struct gicv3_its * const its = priv; 981 struct gicv3_softc * const sc = its->its_gic; 982 uint64_t rdbase; 983 size_t irq; 984 985 const uint64_t typer = bus_space_read_8(sc->sc_bst, its->its_bsh, GITS_TYPER); 986 if (typer & GITS_TYPER_PTA) { 987 void *va = bus_space_vaddr(sc->sc_bst, sc->sc_bsh_r[ci->ci_gic_redist]); 988 rdbase = vtophys((vaddr_t)va); 989 } else { 990 rdbase = (uint64_t)sc->sc_processor_id[cpu_index(ci)] << 16; 991 } 992 its->its_rdbase[cpu_index(ci)] = rdbase; 993 994 /* 995 * Map collection ID of this CPU's index to this CPU's redistributor. 996 */ 997 mutex_enter(its->its_lock); 998 gits_command_mapc(its, cpu_index(ci), rdbase, true); 999 gits_command_invall(its, cpu_index(ci)); 1000 gits_wait(its); 1001 1002 /* 1003 * Update routing for LPIs targetting this CPU 1004 */ 1005 for (irq = 0; irq < its->its_pic->pic_maxsources; irq++) { 1006 if (its->its_targets[irq] != ci) 1007 continue; 1008 KASSERT(its->its_pa[irq] != NULL); 1009 1010 const uint32_t devid = its->its_devid[irq]; 1011 gits_command_movi(its, devid, irq, cpu_index(ci)); 1012 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]); 1013 } 1014 gits_wait(its); 1015 mutex_exit(its->its_lock); 1016 1017 its->its_cpuonline[cpu_index(ci)] = true; 1018 } 1019 1020 static void 1021 gicv3_its_get_affinity(void *priv, size_t irq, kcpuset_t *affinity) 1022 { 1023 struct gicv3_its * const its = priv; 1024 struct cpu_info *ci; 1025 1026 ci = its->its_targets[irq]; 1027 if (ci) 1028 kcpuset_set(affinity, cpu_index(ci)); 1029 } 1030 1031 static int 1032 gicv3_its_set_affinity(void *priv, size_t irq, const kcpuset_t *affinity) 1033 { 1034 struct gicv3_its * const its = priv; 1035 const struct pci_attach_args *pa; 1036 struct cpu_info *ci; 1037 1038 const int set = kcpuset_countset(affinity); 1039 if (set != 1) 1040 return EINVAL; 1041 1042 pa = its->its_pa[irq]; 1043 if (pa == NULL) 1044 return EPASSTHROUGH; 1045 1046 ci = cpu_lookup(kcpuset_ffs(affinity) - 1); 1047 its->its_targets[irq] = ci; 1048 1049 if (its->its_cpuonline[cpu_index(ci)] == true) { 1050 const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag); 1051 mutex_enter(its->its_lock); 1052 gits_command_movi(its, devid, irq, cpu_index(ci)); 1053 gits_command_sync(its, its->its_rdbase[cpu_index(ci)]); 1054 mutex_exit(its->its_lock); 1055 } 1056 1057 return 0; 1058 } 1059 1060 int 1061 gicv3_its_init(struct gicv3_softc *sc, bus_space_handle_t bsh, 1062 uint64_t its_base, uint32_t its_id) 1063 { 1064 struct gicv3_its *its; 1065 struct arm_pci_msi *msi; 1066 1067 const uint64_t typer = bus_space_read_8(sc->sc_bst, bsh, GITS_TYPER); 1068 if ((typer & GITS_TYPER_Physical) == 0) 1069 return ENXIO; 1070 1071 its = kmem_zalloc(sizeof(*its), KM_SLEEP); 1072 its->its_id = its_id; 1073 its->its_bst = sc->sc_bst; 1074 its->its_bsh = bsh; 1075 its->its_dmat = sc->sc_dmat; 1076 its->its_base = its_base; 1077 its->its_pic = &sc->sc_lpi; 1078 snprintf(its->its_pic->pic_name, sizeof(its->its_pic->pic_name), "gicv3-its"); 1079 KASSERT(its->its_pic->pic_maxsources > 0); 1080 its->its_pa = kmem_zalloc(sizeof(struct pci_attach_args *) * its->its_pic->pic_maxsources, KM_SLEEP); 1081 its->its_targets = kmem_zalloc(sizeof(struct cpu_info *) * its->its_pic->pic_maxsources, KM_SLEEP); 1082 its->its_devid = kmem_zalloc(sizeof(uint32_t) * its->its_pic->pic_maxsources, KM_SLEEP); 1083 its->its_gic = sc; 1084 its->its_rdbase = kmem_zalloc(sizeof(*its->its_rdbase) * ncpu, KM_SLEEP); 1085 its->its_cpuonline = kmem_zalloc(sizeof(*its->its_cpuonline) * ncpu, KM_SLEEP); 1086 its->its_cb.cpu_init = gicv3_its_cpu_init; 1087 its->its_cb.get_affinity = gicv3_its_get_affinity; 1088 its->its_cb.set_affinity = gicv3_its_set_affinity; 1089 its->its_cb.priv = its; 1090 LIST_INIT(&its->its_devices); 1091 LIST_INSERT_HEAD(&sc->sc_lpi_callbacks, &its->its_cb, list); 1092 its->its_lock = mutex_obj_alloc(MUTEX_SPIN, IPL_NONE); 1093 1094 gicv3_its_command_init(sc, its); 1095 gicv3_its_table_init(sc, its); 1096 1097 gicv3_its_enable(sc, its); 1098 1099 gicv3_its_cpu_init(its, curcpu()); 1100 1101 msi = &its->its_msi; 1102 msi->msi_id = its_id; 1103 msi->msi_dev = sc->sc_dev; 1104 msi->msi_priv = its; 1105 msi->msi_alloc = gicv3_its_msi_alloc; 1106 msi->msix_alloc = gicv3_its_msix_alloc; 1107 msi->msi_intr_establish = gicv3_its_msi_intr_establish; 1108 msi->msi_intr_release = gicv3_its_msi_intr_release; 1109 1110 return arm_pci_msi_add(msi); 1111 } 1112