1 /* $NetBSD: iommu.c,v 1.102 2023/12/01 05:22:01 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1996 5 * The President and Fellows of Harvard College. All rights reserved. 6 * Copyright (c) 1995 Paul Kranenburg 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Aaron Brown and 19 * Harvard University. 20 * This product includes software developed by Paul Kranenburg. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: iommu.c,v 1.102 2023/12/01 05:22:01 thorpej Exp $"); 41 42 #include "opt_sparc_arch.h" 43 44 #include <sys/param.h> 45 #include <sys/malloc.h> 46 #include <sys/queue.h> 47 #include <sys/systm.h> 48 #include <sys/device.h> 49 #include <sys/proc.h> 50 #include <sys/vmem.h> 51 52 #include <uvm/uvm.h> 53 54 #define _SPARC_BUS_DMA_PRIVATE 55 #include <sys/bus.h> 56 #include <machine/autoconf.h> 57 #include <machine/ctlreg.h> 58 #include <sparc/sparc/asm.h> 59 #include <sparc/sparc/vaddrs.h> 60 #include <sparc/sparc/cpuvar.h> 61 #include <sparc/sparc/iommureg.h> 62 #include <sparc/sparc/iommuvar.h> 63 64 struct iommu_softc { 65 struct iommureg *sc_reg; 66 u_int sc_pagesize; 67 u_int sc_range; 68 bus_addr_t sc_dvmabase; 69 iopte_t *sc_ptes; 70 int sc_cachecoherent; 71 /* 72 * Note: operations on the extent map are being protected with 73 * splhigh(), since we cannot predict at which interrupt priority 74 * our clients will run. 75 */ 76 struct sparc_bus_dma_tag sc_dmatag; 77 vmem_t *sc_dvmamap; 78 }; 79 80 /* autoconfiguration driver */ 81 int iommu_print(void *, const char *); 82 void iommu_attach(device_t, device_t, void *); 83 int iommu_match(device_t, cfdata_t, void *); 84 85 #if defined(SUN4M) 86 static void iommu_copy_prom_entries(struct iommu_softc *); 87 #endif 88 89 CFATTACH_DECL_NEW(iommu, sizeof(struct iommu_softc), 90 iommu_match, iommu_attach, NULL, NULL); 91 92 /* IOMMU DMA map functions */ 93 int iommu_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t, 94 bus_size_t, int, bus_dmamap_t *); 95 int iommu_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, 96 bus_size_t, struct proc *, int); 97 int iommu_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, 98 struct mbuf *, int); 99 int iommu_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, 100 struct uio *, int); 101 int iommu_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, 102 bus_dma_segment_t *, int, bus_size_t, int); 103 void iommu_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 104 void iommu_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 105 bus_size_t, int); 106 107 int iommu_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, 108 int, size_t, void **, int); 109 void iommu_dmamem_unmap(bus_dma_tag_t, void *, size_t); 110 paddr_t iommu_dmamem_mmap(bus_dma_tag_t, bus_dma_segment_t *, 111 int, off_t, int, int); 112 int iommu_dvma_alloc(struct iommu_softc *, bus_dmamap_t, vaddr_t, 113 bus_size_t, int, bus_addr_t *, bus_size_t *); 114 115 /* 116 * Print the location of some iommu-attached device (called just 117 * before attaching that device). If `iommu' is not NULL, the 118 * device was found but not configured; print the iommu as well. 119 * Return UNCONF (config_find ignores this if the device was configured). 120 */ 121 int 122 iommu_print(void *args, const char *iommu) 123 { 124 struct iommu_attach_args *ia = args; 125 126 if (iommu) 127 aprint_normal("%s at %s", ia->iom_name, iommu); 128 return (UNCONF); 129 } 130 131 int 132 iommu_match(device_t parent, cfdata_t cf, void *aux) 133 { 134 struct mainbus_attach_args *ma = aux; 135 136 if (CPU_ISSUN4 || CPU_ISSUN4C) 137 return (0); 138 return (strcmp(cf->cf_name, ma->ma_name) == 0); 139 } 140 141 /* 142 * Attach the iommu. 143 */ 144 void 145 iommu_attach(device_t parent, device_t self, void *aux) 146 { 147 #if defined(SUN4M) 148 struct iommu_softc *sc = device_private(self); 149 struct mainbus_attach_args *ma = aux; 150 struct sparc_bus_dma_tag *dmat = &sc->sc_dmatag; 151 bus_space_handle_t bh; 152 int node; 153 int js1_implicit_iommu; 154 int i, s; 155 u_int iopte_table_pa; 156 struct pglist mlist; 157 u_int size; 158 struct vm_page *m; 159 vaddr_t va; 160 161 dmat->_cookie = sc; 162 dmat->_dmamap_create = iommu_dmamap_create; 163 dmat->_dmamap_destroy = _bus_dmamap_destroy; 164 dmat->_dmamap_load = iommu_dmamap_load; 165 dmat->_dmamap_load_mbuf = iommu_dmamap_load_mbuf; 166 dmat->_dmamap_load_uio = iommu_dmamap_load_uio; 167 dmat->_dmamap_load_raw = iommu_dmamap_load_raw; 168 dmat->_dmamap_unload = iommu_dmamap_unload; 169 dmat->_dmamap_sync = iommu_dmamap_sync; 170 171 dmat->_dmamem_alloc = _bus_dmamem_alloc; 172 dmat->_dmamem_free = _bus_dmamem_free; 173 dmat->_dmamem_map = iommu_dmamem_map; 174 dmat->_dmamem_unmap = _bus_dmamem_unmap; 175 dmat->_dmamem_mmap = iommu_dmamem_mmap; 176 177 /* 178 * JS1/OF device tree does not have an iommu node and sbus 179 * node is directly under root. mainbus_attach detects this 180 * and calls us with sbus node instead so that we can attach 181 * implicit iommu and attach that sbus node under it. 182 */ 183 node = ma->ma_node; 184 if (strcmp(prom_getpropstring(node, "name"), "sbus") == 0) 185 js1_implicit_iommu = 1; 186 else 187 js1_implicit_iommu = 0; 188 189 /* 190 * Map registers into our space. The PROM may have done this 191 * already, but I feel better if we have our own copy. Plus, the 192 * prom doesn't map the entire register set. 193 * 194 * XXX struct iommureg is bigger than ra->ra_len; what are the 195 * other fields for? 196 */ 197 if (bus_space_map(ma->ma_bustag, ma->ma_paddr, 198 sizeof(struct iommureg), 0, &bh) != 0) { 199 printf("iommu_attach: cannot map registers\n"); 200 return; 201 } 202 sc->sc_reg = (struct iommureg *)bh; 203 204 sc->sc_cachecoherent = js1_implicit_iommu ? 0 205 : node_has_property(node, "cache-coherence?"); 206 if (CACHEINFO.c_enabled == 0) /* XXX - is this correct? */ 207 sc->sc_cachecoherent = 0; 208 209 sc->sc_pagesize = js1_implicit_iommu ? PAGE_SIZE 210 : prom_getpropint(node, "page-size", PAGE_SIZE), 211 212 /* 213 * Allocate memory for I/O pagetables. 214 * This takes 64K of contiguous physical memory to map 64M of 215 * DVMA space (starting at IOMMU_DVMA_BASE). 216 * The table must be aligned on a (-IOMMU_DVMA_BASE/pagesize) 217 * boundary (i.e. 64K for 64M of DVMA space). 218 */ 219 220 size = ((0 - IOMMU_DVMA_BASE) / sc->sc_pagesize) * sizeof(iopte_t); 221 if (uvm_pglistalloc(size, vm_first_phys, vm_first_phys+vm_num_phys, 222 size, 0, &mlist, 1, 0) != 0) 223 panic("iommu_attach: no memory"); 224 225 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY); 226 if (va == 0) 227 panic("iommu_attach: no memory"); 228 229 sc->sc_ptes = (iopte_t *)va; 230 231 m = TAILQ_FIRST(&mlist); 232 iopte_table_pa = VM_PAGE_TO_PHYS(m); 233 234 /* Map the pages */ 235 for (; m != NULL; m = TAILQ_NEXT(m,pageq.queue)) { 236 paddr_t pa = VM_PAGE_TO_PHYS(m); 237 pmap_kenter_pa(va, pa | PMAP_NC, 238 VM_PROT_READ | VM_PROT_WRITE, 0); 239 va += PAGE_SIZE; 240 } 241 pmap_update(pmap_kernel()); 242 243 /* 244 * Copy entries from current IOMMU table. 245 * XXX - Why do we need to do this? 246 */ 247 iommu_copy_prom_entries(sc); 248 249 /* 250 * Now we can install our new pagetable into the IOMMU 251 */ 252 sc->sc_range = 0 - IOMMU_DVMA_BASE; 253 sc->sc_dvmabase = IOMMU_DVMA_BASE; 254 255 /* calculate log2(sc->sc_range/16MB) */ 256 i = ffs(sc->sc_range/(1 << 24)) - 1; 257 if ((1 << i) != (sc->sc_range/(1 << 24))) 258 panic("iommu: bad range: %d", i); 259 260 s = splhigh(); 261 IOMMU_FLUSHALL(sc); 262 263 /* Load range and physical address of PTEs */ 264 sc->sc_reg->io_cr = (sc->sc_reg->io_cr & ~IOMMU_CTL_RANGE) | 265 (i << IOMMU_CTL_RANGESHFT) | IOMMU_CTL_ME; 266 sc->sc_reg->io_bar = (iopte_table_pa >> 4) & IOMMU_BAR_IBA; 267 268 IOMMU_FLUSHALL(sc); 269 splx(s); 270 271 printf(": version 0x%x/0x%x, page-size %d, range %dMB\n", 272 (sc->sc_reg->io_cr & IOMMU_CTL_VER) >> 24, 273 (sc->sc_reg->io_cr & IOMMU_CTL_IMPL) >> 28, 274 sc->sc_pagesize, 275 sc->sc_range >> 20); 276 277 sc->sc_dvmamap = vmem_create("iommudvma", 278 IOMMU_DVMA_BASE, 279 IOMMU_DVMA_END - IOMMU_DVMA_BASE, 280 PAGE_SIZE, /* quantum */ 281 NULL, /* importfn */ 282 NULL, /* releasefn */ 283 NULL, /* source */ 284 0, /* qcache_max */ 285 VM_SLEEP, 286 IPL_VM); 287 288 devhandle_t selfh = device_handle(self); 289 290 /* 291 * If we are attaching implicit iommu on JS1/OF we do not have 292 * an iommu node to traverse, instead mainbus_attach passed us 293 * sbus node in ma.ma_node. Attach it as the only iommu child. 294 */ 295 if (js1_implicit_iommu) { 296 struct iommu_attach_args ia; 297 struct openprom_addr sbus_iommu_reg = { 0, 0x10001000, 0x28 }; 298 299 memset(&ia, 0, sizeof ia); 300 301 /* Propagate BUS & DMA tags */ 302 ia.iom_bustag = ma->ma_bustag; 303 ia.iom_dmatag = &sc->sc_dmatag; 304 305 ia.iom_name = "sbus"; 306 ia.iom_node = node; 307 ia.iom_reg = &sbus_iommu_reg; 308 ia.iom_nreg = 1; 309 310 config_found(self, (void *)&ia, iommu_print, 311 CFARGS(.devhandle = prom_node_to_devhandle(selfh, node))); 312 return; 313 } 314 315 /* 316 * Loop through ROM children (expect Sbus among them). 317 */ 318 for (node = firstchild(node); node; node = nextsibling(node)) { 319 struct iommu_attach_args ia; 320 321 memset(&ia, 0, sizeof ia); 322 ia.iom_name = prom_getpropstring(node, "name"); 323 324 /* Propagate BUS & DMA tags */ 325 ia.iom_bustag = ma->ma_bustag; 326 ia.iom_dmatag = &sc->sc_dmatag; 327 328 ia.iom_node = node; 329 330 ia.iom_reg = NULL; 331 prom_getprop(node, "reg", sizeof(struct openprom_addr), 332 &ia.iom_nreg, &ia.iom_reg); 333 334 config_found(self, (void *)&ia, iommu_print, 335 CFARGS(.devhandle = prom_node_to_devhandle(selfh, node))); 336 if (ia.iom_reg != NULL) 337 free(ia.iom_reg, M_DEVBUF); 338 } 339 #endif 340 } 341 342 #if defined(SUN4M) 343 static void 344 iommu_copy_prom_entries(struct iommu_softc *sc) 345 { 346 u_int pbase, pa; 347 u_int range; 348 iopte_t *tpte_p; 349 u_int pagesz = sc->sc_pagesize; 350 int use_ac = (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc); 351 u_int mmupcr_save; 352 353 /* 354 * We read in the original table using MMU bypass and copy all 355 * of its entries to the appropriate place in our new table, 356 * even if the sizes are different. 357 * This is pretty easy since we know DVMA ends at 0xffffffff. 358 */ 359 360 range = (1 << 24) << 361 ((sc->sc_reg->io_cr & IOMMU_CTL_RANGE) >> IOMMU_CTL_RANGESHFT); 362 363 pbase = (sc->sc_reg->io_bar & IOMMU_BAR_IBA) << 364 (14 - IOMMU_BAR_IBASHFT); 365 366 if (use_ac) { 367 /* 368 * Set MMU AC bit so we'll still read from the cache 369 * in by-pass mode. 370 */ 371 mmupcr_save = lda(SRMMU_PCR, ASI_SRMMU); 372 sta(SRMMU_PCR, ASI_SRMMU, mmupcr_save | VIKING_PCR_AC); 373 } else 374 mmupcr_save = 0; /* XXX - avoid GCC `uninitialized' warning */ 375 376 /* Flush entire IOMMU TLB before messing with the in-memory tables */ 377 IOMMU_FLUSHALL(sc); 378 379 /* 380 * tpte_p = top of our PTE table 381 * pa = top of current PTE table 382 * Then work downwards and copy entries until we hit the bottom 383 * of either table. 384 */ 385 for (tpte_p = &sc->sc_ptes[((0 - IOMMU_DVMA_BASE)/pagesz) - 1], 386 pa = (u_int)pbase + (range/pagesz - 1)*sizeof(iopte_t); 387 tpte_p >= &sc->sc_ptes[0] && pa >= (u_int)pbase; 388 tpte_p--, pa -= sizeof(iopte_t)) { 389 390 *tpte_p = lda(pa, ASI_BYPASS); 391 } 392 393 if (use_ac) { 394 /* restore mmu after bug-avoidance */ 395 sta(SRMMU_PCR, ASI_SRMMU, mmupcr_save); 396 } 397 } 398 #endif 399 400 static void 401 iommu_enter(struct iommu_softc *sc, bus_addr_t dva, paddr_t pa) 402 { 403 int pte; 404 405 /* This routine relies on the fact that sc->sc_pagesize == PAGE_SIZE */ 406 407 #ifdef DIAGNOSTIC 408 if (dva < sc->sc_dvmabase) 409 panic("iommu_enter: dva 0x%lx not in DVMA space", (long)dva); 410 #endif 411 412 pte = atop(pa) << IOPTE_PPNSHFT; 413 pte &= IOPTE_PPN; 414 pte |= IOPTE_V | IOPTE_W | (sc->sc_cachecoherent ? IOPTE_C : 0); 415 sc->sc_ptes[atop(dva - sc->sc_dvmabase)] = pte; 416 IOMMU_FLUSHPAGE(sc, dva); 417 } 418 419 /* 420 * iommu_remove: removes mappings created by iommu_enter 421 */ 422 static void 423 iommu_remove(struct iommu_softc *sc, bus_addr_t dva, bus_size_t len) 424 { 425 u_int pagesz = sc->sc_pagesize; 426 bus_addr_t base = sc->sc_dvmabase; 427 428 #ifdef DEBUG 429 if (dva < base) 430 panic("iommu_remove: va 0x%lx not in DVMA space", (long)dva); 431 #endif 432 433 while ((long)len > 0) { 434 #ifdef notyet 435 #ifdef DEBUG 436 if ((sc->sc_ptes[atop(dva - base)] & IOPTE_V) == 0) 437 panic("iommu_remove: clearing invalid pte at dva 0x%lx", 438 (long)dva); 439 #endif 440 #endif 441 sc->sc_ptes[atop(dva - base)] = 0; 442 IOMMU_FLUSHPAGE(sc, dva); 443 len -= pagesz; 444 dva += pagesz; 445 } 446 } 447 448 #if 0 /* These registers aren't there??? */ 449 void 450 iommu_error(void) 451 { 452 struct iommu_softc *sc = X; 453 struct iommureg *iop = sc->sc_reg; 454 455 printf("iommu: afsr 0x%x, afar 0x%x\n", iop->io_afsr, iop->io_afar); 456 printf("iommu: mfsr 0x%x, mfar 0x%x\n", iop->io_mfsr, iop->io_mfar); 457 } 458 459 int 460 iommu_alloc(u_int va, u_int len) 461 { 462 struct iommu_softc *sc = X; 463 int off, tva, iovaddr, pte; 464 paddr_t pa; 465 466 off = (int)va & PGOFSET; 467 len = round_page(len + off); 468 va -= off; 469 470 if ((int)sc->sc_dvmacur + len > 0) 471 sc->sc_dvmacur = sc->sc_dvmabase; 472 473 iovaddr = tva = sc->sc_dvmacur; 474 sc->sc_dvmacur += len; 475 while (len) { 476 (void) pmap_extract(pmap_kernel(), va, &pa); 477 478 #define IOMMU_PPNSHIFT 8 479 #define IOMMU_V 0x00000002 480 #define IOMMU_W 0x00000004 481 482 pte = atop(pa) << IOMMU_PPNSHIFT; 483 pte |= IOMMU_V | IOMMU_W; 484 sta(sc->sc_ptes + atop(tva - sc->sc_dvmabase), ASI_BYPASS, pte); 485 sc->sc_reg->io_flushpage = tva; 486 len -= PAGE_SIZE; 487 va += PAGE_SIZE; 488 tva += PAGE_SIZE; 489 } 490 return iovaddr + off; 491 } 492 #endif 493 494 495 /* 496 * IOMMU DMA map functions. 497 */ 498 int 499 iommu_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 500 bus_size_t maxsegsz, bus_size_t boundary, int flags, 501 bus_dmamap_t *dmamp) 502 { 503 bus_dmamap_t map; 504 int error; 505 506 if ((error = _bus_dmamap_create(t, size, nsegments, maxsegsz, 507 boundary, flags, &map)) != 0) 508 return (error); 509 510 if ((flags & BUS_DMA_24BIT) != 0) { 511 /* Limit this map to the range usable by `24-bit' devices */ 512 map->_dm_ex_start = D24_DVMA_BASE; 513 map->_dm_ex_end = D24_DVMA_END - 1; 514 } else { 515 /* Enable allocations from the entire map */ 516 map->_dm_ex_start = VMEM_ADDR_MIN; 517 map->_dm_ex_end = VMEM_ADDR_MAX; 518 } 519 520 *dmamp = map; 521 return (0); 522 } 523 524 /* 525 * Internal routine to allocate space in the IOMMU map. 526 */ 527 int 528 iommu_dvma_alloc(struct iommu_softc *sc, bus_dmamap_t map, 529 vaddr_t va, bus_size_t len, int flags, 530 bus_addr_t *dvap, bus_size_t *sgsizep) 531 { 532 bus_size_t sgsize; 533 u_long align, voff; 534 vmem_addr_t dvaddr; 535 int error; 536 int pagesz = PAGE_SIZE; 537 538 /* 539 * Remember page offset, then truncate the buffer address to 540 * a page boundary. 541 */ 542 voff = va & (pagesz - 1); 543 va &= -pagesz; 544 545 if (len > map->_dm_size) 546 return (EINVAL); 547 548 sgsize = (len + voff + pagesz - 1) & -pagesz; 549 align = dvma_cachealign ? dvma_cachealign : map->_dm_align; 550 551 const vm_flag_t vmflags = VM_BESTFIT | 552 ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 553 554 error = vmem_xalloc(sc->sc_dvmamap, sgsize, 555 align, /* alignment */ 556 va & (align-1), /* phase */ 557 map->_dm_boundary, /* nocross */ 558 map->_dm_ex_start, /* minaddr */ 559 map->_dm_ex_end, /* maxaddr */ 560 vmflags, 561 &dvaddr); 562 563 *dvap = (bus_addr_t)dvaddr; 564 *sgsizep = sgsize; 565 return (error); 566 } 567 568 /* 569 * Prepare buffer for DMA transfer. 570 */ 571 int 572 iommu_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 573 void *buf, bus_size_t buflen, 574 struct proc *p, int flags) 575 { 576 struct iommu_softc *sc = t->_cookie; 577 bus_size_t sgsize; 578 bus_addr_t dva; 579 vaddr_t va = (vaddr_t)buf; 580 int pagesz = PAGE_SIZE; 581 pmap_t pmap; 582 int error; 583 584 /* 585 * Make sure that on error condition we return "no valid mappings". 586 */ 587 map->dm_nsegs = 0; 588 589 /* Allocate IOMMU resources */ 590 if ((error = iommu_dvma_alloc(sc, map, va, buflen, flags, 591 &dva, &sgsize)) != 0) 592 return (error); 593 594 if ((sc->sc_cachecoherent == 0) || 595 (curcpu()->cacheinfo.ec_totalsize == 0)) 596 cache_flush(buf, buflen); /* XXX - move to bus_dma_sync? */ 597 598 /* 599 * We always use just one segment. 600 */ 601 map->dm_mapsize = buflen; 602 map->dm_nsegs = 1; 603 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1)); 604 map->dm_segs[0].ds_len = buflen; 605 map->dm_segs[0]._ds_sgsize = sgsize; 606 607 if (p != NULL) 608 pmap = p->p_vmspace->vm_map.pmap; 609 else 610 pmap = pmap_kernel(); 611 612 for (; sgsize != 0; ) { 613 paddr_t pa; 614 /* 615 * Get the physical address for this page. 616 */ 617 if (!pmap_extract(pmap, va, &pa)) { 618 iommu_dmamap_unload(t, map); 619 return (EFAULT); 620 } 621 622 iommu_enter(sc, dva, pa); 623 624 dva += pagesz; 625 va += pagesz; 626 sgsize -= pagesz; 627 } 628 629 return (0); 630 } 631 632 /* 633 * Like _bus_dmamap_load(), but for mbufs. 634 */ 635 int 636 iommu_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, 637 struct mbuf *m, int flags) 638 { 639 640 panic("_bus_dmamap_load_mbuf: not implemented"); 641 } 642 643 /* 644 * Like _bus_dmamap_load(), but for uios. 645 */ 646 int 647 iommu_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, 648 struct uio *uio, int flags) 649 { 650 651 panic("_bus_dmamap_load_uio: not implemented"); 652 } 653 654 /* 655 * Like _bus_dmamap_load(), but for raw memory allocated with 656 * bus_dmamem_alloc(). 657 */ 658 int 659 iommu_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 660 bus_dma_segment_t *segs, int nsegs, bus_size_t size, 661 int flags) 662 { 663 struct iommu_softc *sc = t->_cookie; 664 struct vm_page *m; 665 paddr_t pa; 666 bus_addr_t dva; 667 bus_size_t sgsize; 668 struct pglist *mlist; 669 int pagesz = PAGE_SIZE; 670 int error; 671 672 map->dm_nsegs = 0; 673 674 /* Allocate IOMMU resources */ 675 if ((error = iommu_dvma_alloc(sc, map, segs[0]._ds_va, size, 676 flags, &dva, &sgsize)) != 0) 677 return (error); 678 679 /* 680 * Note DVMA address in case bus_dmamem_map() is called later. 681 * It can then insure cache coherency by choosing a KVA that 682 * is aligned to `ds_addr'. 683 */ 684 segs[0].ds_addr = dva; 685 segs[0].ds_len = size; 686 687 map->dm_segs[0].ds_addr = dva; 688 map->dm_segs[0].ds_len = size; 689 map->dm_segs[0]._ds_sgsize = sgsize; 690 691 /* Map physical pages into IOMMU */ 692 mlist = segs[0]._ds_mlist; 693 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) { 694 if (sgsize == 0) 695 panic("iommu_dmamap_load_raw: size botch"); 696 pa = VM_PAGE_TO_PHYS(m); 697 iommu_enter(sc, dva, pa); 698 dva += pagesz; 699 sgsize -= pagesz; 700 } 701 702 map->dm_nsegs = 1; 703 map->dm_mapsize = size; 704 705 return (0); 706 } 707 708 /* 709 * Unload an IOMMU DMA map. 710 */ 711 void 712 iommu_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 713 { 714 struct iommu_softc *sc = t->_cookie; 715 bus_dma_segment_t *segs = map->dm_segs; 716 int nsegs = map->dm_nsegs; 717 bus_addr_t dva; 718 bus_size_t len; 719 int i; 720 721 for (i = 0; i < nsegs; i++) { 722 dva = segs[i].ds_addr & -PAGE_SIZE; 723 len = segs[i]._ds_sgsize; 724 725 iommu_remove(sc, dva, len); 726 vmem_xfree(sc->sc_dvmamap, dva, len); 727 } 728 729 /* Mark the mappings as invalid. */ 730 map->dm_mapsize = 0; 731 map->dm_nsegs = 0; 732 } 733 734 /* 735 * DMA map synchronization. 736 */ 737 void 738 iommu_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 739 bus_addr_t offset, bus_size_t len, int ops) 740 { 741 742 /* 743 * XXX Should flush CPU write buffers. 744 */ 745 } 746 747 /* 748 * Map DMA-safe memory. 749 */ 750 int 751 iommu_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 752 size_t size, void **kvap, int flags) 753 { 754 struct iommu_softc *sc = t->_cookie; 755 struct vm_page *m; 756 vaddr_t va; 757 bus_addr_t addr; 758 struct pglist *mlist; 759 int cbit; 760 u_long align; 761 int pagesz = PAGE_SIZE; 762 763 if (nsegs != 1) 764 panic("iommu_dmamem_map: nsegs = %d", nsegs); 765 766 cbit = sc->sc_cachecoherent ? 0 : PMAP_NC; 767 align = dvma_cachealign ? dvma_cachealign : pagesz; 768 769 size = round_page(size); 770 771 /* 772 * In case the segment has already been loaded by 773 * iommu_dmamap_load_raw(), find a region of kernel virtual 774 * addresses that can accommodate our alignment requirements. 775 */ 776 va = _bus_dma_valloc_skewed(size, 0, align, 777 segs[0].ds_addr & (align - 1)); 778 if (va == 0) 779 return (ENOMEM); 780 781 segs[0]._ds_va = va; 782 *kvap = (void *)va; 783 784 /* 785 * Map the pages allocated in _bus_dmamem_alloc() to the 786 * kernel virtual address space. 787 */ 788 mlist = segs[0]._ds_mlist; 789 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) { 790 791 if (size == 0) 792 panic("iommu_dmamem_map: size botch"); 793 794 addr = VM_PAGE_TO_PHYS(m); 795 pmap_kenter_pa(va, addr | cbit, 796 VM_PROT_READ | VM_PROT_WRITE, 0); 797 #if 0 798 if (flags & BUS_DMA_COHERENT) 799 /* XXX */; 800 #endif 801 va += pagesz; 802 size -= pagesz; 803 } 804 pmap_update(pmap_kernel()); 805 806 return (0); 807 } 808 809 void 810 iommu_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 811 { 812 813 #ifdef DIAGNOSTIC 814 if ((u_long)kva & PAGE_MASK) 815 panic("iommu_dmamem_unmap"); 816 #endif 817 818 size = round_page(size); 819 pmap_kremove((vaddr_t)kva, size); 820 pmap_update(pmap_kernel()); 821 uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size); 822 } 823 824 825 /* 826 * mmap(2)'ing DMA-safe memory. 827 */ 828 paddr_t 829 iommu_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 830 off_t off, int prot, int flags) 831 { 832 833 panic("_bus_dmamem_mmap: not implemented"); 834 } 835