Home | History | Annotate | Line # | Download | only in sparc
iommu.c revision 1.22
      1 /*	$NetBSD: iommu.c,v 1.22 1998/08/23 09:53:47 pk Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1996
      5  * 	The President and Fellows of Harvard College. All rights reserved.
      6  * Copyright (c) 1995 	Paul Kranenburg
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by Aaron Brown and
     19  *	Harvard University.
     20  *	This product includes software developed by Paul Kranenburg.
     21  * 4. Neither the name of the University nor the names of its contributors
     22  *    may be used to endorse or promote products derived from this software
     23  *    without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  *
     37  */
     38 
     39 #include <sys/param.h>
     40 #include <sys/extent.h>
     41 #include <sys/malloc.h>
     42 #include <sys/queue.h>
     43 #include <sys/systm.h>
     44 #include <sys/device.h>
     45 #include <vm/vm.h>
     46 #include <vm/vm_kern.h>
     47 #include <uvm/uvm.h>
     48 
     49 #define _SPARC_BUS_DMA_PRIVATE
     50 #include <machine/bus.h>
     51 #include <machine/autoconf.h>
     52 #include <machine/ctlreg.h>
     53 #include <sparc/sparc/asm.h>
     54 #include <sparc/sparc/vaddrs.h>
     55 #include <sparc/sparc/cpuvar.h>
     56 #include <sparc/sparc/iommureg.h>
     57 #include <sparc/sparc/iommuvar.h>
     58 
     59 struct iommu_softc {
     60 	struct device	sc_dev;		/* base device */
     61 	struct iommureg	*sc_reg;
     62 	u_int		sc_pagesize;
     63 	u_int		sc_range;
     64 	bus_addr_t	sc_dvmabase;
     65 	iopte_t		*sc_ptes;
     66 	int		sc_hasiocache;
     67 };
     68 struct	iommu_softc *iommu_sc;/*XXX*/
     69 int	has_iocache;
     70 u_long	dvma_cachealign;
     71 
     72 struct extent *iommu_dvmamap;
     73 
     74 
     75 /* autoconfiguration driver */
     76 int	iommu_print __P((void *, const char *));
     77 void	iommu_attach __P((struct device *, struct device *, void *));
     78 int	iommu_match __P((struct device *, struct cfdata *, void *));
     79 
     80 struct cfattach iommu_ca = {
     81 	sizeof(struct iommu_softc), iommu_match, iommu_attach
     82 };
     83 
     84 /* IOMMU DMA map functions */
     85 int	iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
     86 	    bus_size_t, struct proc *, int));
     87 int	iommu_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
     88 	    struct mbuf *, int));
     89 int	iommu_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
     90 	    struct uio *, int));
     91 int	iommu_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
     92 	    bus_dma_segment_t *, int, bus_size_t, int));
     93 void	iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
     94 void	iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
     95 	    bus_size_t, int));
     96 
     97 int	iommu_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
     98 	    bus_size_t alignment, bus_size_t boundary,
     99 	    bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
    100 void	iommu_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
    101 	    int nsegs));
    102 int	iommu_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
    103 	    int nsegs, size_t size, caddr_t *kvap, int flags));
    104 int	iommu_dmamem_mmap __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
    105 	    int nsegs, int off, int prot, int flags));
    106 
    107 
    108 struct sparc_bus_dma_tag iommu_dma_tag = {
    109 	NULL,
    110 	_bus_dmamap_create,
    111 	_bus_dmamap_destroy,
    112 	iommu_dmamap_load,
    113 	iommu_dmamap_load_mbuf,
    114 	iommu_dmamap_load_uio,
    115 	iommu_dmamap_load_raw,
    116 	iommu_dmamap_unload,
    117 	iommu_dmamap_sync,
    118 
    119 	iommu_dmamem_alloc,
    120 	iommu_dmamem_free,
    121 	iommu_dmamem_map,
    122 	_bus_dmamem_unmap,
    123 	iommu_dmamem_mmap
    124 };
    125 /*
    126  * Print the location of some iommu-attached device (called just
    127  * before attaching that device).  If `iommu' is not NULL, the
    128  * device was found but not configured; print the iommu as well.
    129  * Return UNCONF (config_find ignores this if the device was configured).
    130  */
    131 int
    132 iommu_print(args, iommu)
    133 	void *args;
    134 	const char *iommu;
    135 {
    136 	struct iommu_attach_args *ia = args;
    137 
    138 	if (iommu)
    139 		printf("%s at %s", ia->iom_name, iommu);
    140 	return (UNCONF);
    141 }
    142 
    143 int
    144 iommu_match(parent, cf, aux)
    145 	struct device *parent;
    146 	struct cfdata *cf;
    147 	void *aux;
    148 {
    149 	struct mainbus_attach_args *ma = aux;
    150 
    151 	if (CPU_ISSUN4OR4C)
    152 		return (0);
    153 	return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
    154 }
    155 
    156 /*
    157  * Attach the iommu.
    158  */
    159 void
    160 iommu_attach(parent, self, aux)
    161 	struct device *parent;
    162 	struct device *self;
    163 	void *aux;
    164 {
    165 #if defined(SUN4M)
    166 	struct iommu_softc *sc = (struct iommu_softc *)self;
    167 	struct mainbus_attach_args *ma = aux;
    168 	int node;
    169 	struct bootpath *bp;
    170 	bus_space_handle_t bh;
    171 	u_int pbase, pa;
    172 	int i, mmupcrsave, s;
    173 	iopte_t *tpte_p;
    174 	extern u_int *kernel_iopte_table;
    175 	extern u_int kernel_iopte_table_pa;
    176 
    177 /*XXX-GCC!*/mmupcrsave=0;
    178 	iommu_sc = sc;
    179 	/*
    180 	 * XXX there is only one iommu, for now -- do not know how to
    181 	 * address children on others
    182 	 */
    183 	if (sc->sc_dev.dv_unit > 0) {
    184 		printf(" unsupported\n");
    185 		return;
    186 	}
    187 	node = ma->ma_node;
    188 
    189 #if 0
    190 	if (ra->ra_vaddr)
    191 		sc->sc_reg = (struct iommureg *)ca->ca_ra.ra_vaddr;
    192 #else
    193 	/*
    194 	 * Map registers into our space. The PROM may have done this
    195 	 * already, but I feel better if we have our own copy. Plus, the
    196 	 * prom doesn't map the entire register set
    197 	 *
    198 	 * XXX struct iommureg is bigger than ra->ra_len; what are the
    199 	 *     other fields for?
    200 	 */
    201 	if (bus_space_map2(
    202 			ma->ma_bustag,
    203 			ma->ma_iospace,
    204 			ma->ma_paddr,
    205 			sizeof(struct iommureg),
    206 			0,
    207 			0,
    208 			&bh) != 0) {
    209 		printf("iommu_attach: cannot map registers\n");
    210 		return;
    211 	}
    212 	sc->sc_reg = (struct iommureg *)bh;
    213 #endif
    214 
    215 	sc->sc_hasiocache = node_has_property(node, "cache-coherence?");
    216 	if (CACHEINFO.c_enabled == 0) /* XXX - is this correct? */
    217 		sc->sc_hasiocache = 0;
    218 	has_iocache = sc->sc_hasiocache; /* Set global flag */
    219 
    220 	sc->sc_pagesize = getpropint(node, "page-size", NBPG),
    221 	sc->sc_range = (1 << 24) <<
    222 	    ((sc->sc_reg->io_cr & IOMMU_CTL_RANGE) >> IOMMU_CTL_RANGESHFT);
    223 #if 0
    224 	sc->sc_dvmabase = (0 - sc->sc_range);
    225 #endif
    226 	pbase = (sc->sc_reg->io_bar & IOMMU_BAR_IBA) <<
    227 			(14 - IOMMU_BAR_IBASHFT);
    228 
    229 	/*
    230 	 * Now we build our own copy of the IOMMU page tables. We need to
    231 	 * do this since we're going to change the range to give us 64M of
    232 	 * mappings, and thus we can move DVMA space down to 0xfd000000 to
    233 	 * give us lots of space and to avoid bumping into the PROM, etc.
    234 	 *
    235 	 * XXX Note that this is rather messy.
    236 	 */
    237 	sc->sc_ptes = (iopte_t *) kernel_iopte_table;
    238 
    239 	/*
    240 	 * Now discache the page tables so that the IOMMU sees our
    241 	 * changes.
    242 	 */
    243 	kvm_uncache((caddr_t)sc->sc_ptes,
    244 	    (((0 - IOMMU_DVMA_BASE)/sc->sc_pagesize) * sizeof(iopte_t)) / NBPG);
    245 
    246 	/*
    247 	 * Ok. We've got to read in the original table using MMU bypass,
    248 	 * and copy all of its entries to the appropriate place in our
    249 	 * new table, even if the sizes are different.
    250 	 * This is pretty easy since we know DVMA ends at 0xffffffff.
    251 	 *
    252 	 * XXX: PGOFSET, NBPG assume same page size as SRMMU
    253 	 */
    254 	if (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc) {
    255 		/* set MMU AC bit */
    256 		sta(SRMMU_PCR, ASI_SRMMU,
    257 		    ((mmupcrsave = lda(SRMMU_PCR, ASI_SRMMU)) | VIKING_PCR_AC));
    258 	}
    259 
    260 	for (tpte_p = &sc->sc_ptes[((0 - IOMMU_DVMA_BASE)/NBPG) - 1],
    261 	     pa = (u_int)pbase - sizeof(iopte_t) +
    262 		   ((u_int)sc->sc_range/NBPG)*sizeof(iopte_t);
    263 	     tpte_p >= &sc->sc_ptes[0] && pa >= (u_int)pbase;
    264 	     tpte_p--, pa -= sizeof(iopte_t)) {
    265 
    266 		IOMMU_FLUSHPAGE(sc,
    267 			     (tpte_p - &sc->sc_ptes[0])*NBPG + IOMMU_DVMA_BASE);
    268 		*tpte_p = lda(pa, ASI_BYPASS);
    269 	}
    270 	if (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc) {
    271 		/* restore mmu after bug-avoidance */
    272 		sta(SRMMU_PCR, ASI_SRMMU, mmupcrsave);
    273 	}
    274 
    275 	/*
    276 	 * Now we can install our new pagetable into the IOMMU
    277 	 */
    278 	sc->sc_range = 0 - IOMMU_DVMA_BASE;
    279 	sc->sc_dvmabase = IOMMU_DVMA_BASE;
    280 
    281 	/* calculate log2(sc->sc_range/16MB) */
    282 	i = ffs(sc->sc_range/(1 << 24)) - 1;
    283 	if ((1 << i) != (sc->sc_range/(1 << 24)))
    284 		panic("bad iommu range: %d\n",i);
    285 
    286 	s = splhigh();
    287 	IOMMU_FLUSHALL(sc);
    288 
    289 	sc->sc_reg->io_cr = (sc->sc_reg->io_cr & ~IOMMU_CTL_RANGE) |
    290 			  (i << IOMMU_CTL_RANGESHFT) | IOMMU_CTL_ME;
    291 	sc->sc_reg->io_bar = (kernel_iopte_table_pa >> 4) & IOMMU_BAR_IBA;
    292 
    293 	IOMMU_FLUSHALL(sc);
    294 	splx(s);
    295 
    296 	printf(": version 0x%x/0x%x, page-size %d, range %dMB\n",
    297 		(sc->sc_reg->io_cr & IOMMU_CTL_VER) >> 24,
    298 		(sc->sc_reg->io_cr & IOMMU_CTL_IMPL) >> 28,
    299 		sc->sc_pagesize,
    300 		sc->sc_range >> 20);
    301 
    302 	/* Propagate bootpath */
    303 	if (ma->ma_bp != NULL && strcmp(ma->ma_bp->name, "iommu") == 0)
    304 		bp = ma->ma_bp + 1;
    305 	else
    306 		bp = NULL;
    307 
    308 	iommu_dvmamap = extent_create("iommudvma",
    309 					IOMMU_DVMA_BASE, IOMMU_DVMA_END,
    310 					M_DEVBUF, 0, 0, EX_NOWAIT);
    311 	if (iommu_dvmamap == NULL)
    312 		panic("iommu: unable to allocate DVMA map");
    313 
    314 	/*
    315 	 * Loop through ROM children (expect Sbus among them).
    316 	 */
    317 	for (node = firstchild(node); node; node = nextsibling(node)) {
    318 		struct iommu_attach_args ia;
    319 
    320 		bzero(&ia, sizeof ia);
    321 		ia.iom_name = getpropstring(node, "name");
    322 
    323 		/* Propagate BUS & DMA tags */
    324 		ia.iom_bustag = ma->ma_bustag;
    325 		ia.iom_dmatag = &iommu_dma_tag;
    326 		ia.iom_node = node;
    327 		ia.iom_bp = bp;
    328 		(void) config_found(&sc->sc_dev, (void *)&ia, iommu_print);
    329 	}
    330 #endif
    331 }
    332 
    333 void
    334 iommu_enter(va, pa)
    335 	bus_addr_t va;
    336 	paddr_t pa;
    337 {
    338 	struct iommu_softc *sc = iommu_sc;
    339 	int pte;
    340 
    341 #ifdef DEBUG
    342 	if (va < sc->sc_dvmabase)
    343 		panic("iommu_enter: va 0x%lx not in DVMA space", (long)va);
    344 #endif
    345 
    346 	pte = atop(pa) << IOPTE_PPNSHFT;
    347 	pte &= IOPTE_PPN;
    348 	pte |= IOPTE_V | IOPTE_W | (has_iocache ? IOPTE_C : 0);
    349 	sc->sc_ptes[atop(va - sc->sc_dvmabase)] = pte;
    350 	IOMMU_FLUSHPAGE(sc, va);
    351 }
    352 
    353 /*
    354  * iommu_clear: clears mappings created by iommu_enter
    355  */
    356 void
    357 iommu_remove(va, len)
    358 	bus_addr_t va;
    359 	bus_size_t len;
    360 {
    361 	struct iommu_softc *sc = iommu_sc;
    362 	u_int pagesz = sc->sc_pagesize;
    363 	bus_addr_t base = sc->sc_dvmabase;
    364 
    365 #ifdef DEBUG
    366 	if (va < base)
    367 		panic("iommu_enter: va 0x%lx not in DVMA space", (long)va);
    368 #endif
    369 
    370 	while ((long)len > 0) {
    371 #ifdef notyet
    372 #ifdef DEBUG
    373 		if ((sc->sc_ptes[atop(va - base)] & IOPTE_V) == 0)
    374 			panic("iommu_clear: clearing invalid pte at va 0x%lx",
    375 			      (long)va);
    376 #endif
    377 #endif
    378 		sc->sc_ptes[atop(va - base)] = 0;
    379 		IOMMU_FLUSHPAGE(sc, va);
    380 		len -= pagesz;
    381 		va += pagesz;
    382 	}
    383 }
    384 
    385 #if 0	/* These registers aren't there??? */
    386 void
    387 iommu_error()
    388 {
    389 	struct iommu_softc *sc = X;
    390 	struct iommureg *iop = sc->sc_reg;
    391 
    392 	printf("iommu: afsr 0x%x, afar 0x%x\n", iop->io_afsr, iop->io_afar);
    393 	printf("iommu: mfsr 0x%x, mfar 0x%x\n", iop->io_mfsr, iop->io_mfar);
    394 }
    395 int
    396 iommu_alloc(va, len)
    397 	u_int va, len;
    398 {
    399 	struct iommu_softc *sc = X;
    400 	int off, tva, pa, iovaddr, pte;
    401 
    402 	off = (int)va & PGOFSET;
    403 	len = round_page(len + off);
    404 	va -= off;
    405 
    406 if ((int)sc->sc_dvmacur + len > 0)
    407 	sc->sc_dvmacur = sc->sc_dvmabase;
    408 
    409 	iovaddr = tva = sc->sc_dvmacur;
    410 	sc->sc_dvmacur += len;
    411 	while (len) {
    412 		pa = pmap_extract(pmap_kernel(), va);
    413 
    414 #define IOMMU_PPNSHIFT	8
    415 #define IOMMU_V		0x00000002
    416 #define IOMMU_W		0x00000004
    417 
    418 		pte = atop(pa) << IOMMU_PPNSHIFT;
    419 		pte |= IOMMU_V | IOMMU_W;
    420 		sta(sc->sc_ptes + atop(tva - sc->sc_dvmabase), ASI_BYPASS, pte);
    421 		sc->sc_reg->io_flushpage = tva;
    422 		len -= NBPG;
    423 		va += NBPG;
    424 		tva += NBPG;
    425 	}
    426 	return iovaddr + off;
    427 }
    428 #endif
    429 
    430 
    431 /*
    432  * IOMMU DMA map functions.
    433  */
    434 int
    435 iommu_dmamap_load(t, map, buf, buflen, p, flags)
    436 	bus_dma_tag_t t;
    437 	bus_dmamap_t map;
    438 	void *buf;
    439 	bus_size_t buflen;
    440 	struct proc *p;
    441 	int flags;
    442 {
    443 	bus_size_t sgsize;
    444 	bus_addr_t dvmaddr, curaddr;
    445 	vaddr_t vaddr = (vaddr_t)buf;
    446 	pmap_t pmap;
    447 
    448 	/*
    449 	 * Make sure that on error condition we return "no valid mappings".
    450 	 */
    451 	map->dm_nsegs = 0;
    452 
    453 	if (buflen > map->_dm_size)
    454 		return (EINVAL);
    455 
    456 	sgsize = round_page(buflen + (vaddr & PGOFSET));
    457 
    458 	/*
    459 	 * XXX Need to implement "don't dma across this boundry".
    460 	 */
    461 	if (map->_dm_boundary != 0)
    462 		panic("bus_dmamap_load: boundaries not implemented");
    463 
    464 	if (extent_alloc(iommu_dvmamap, sgsize, NBPG, EX_NOBOUNDARY,
    465             EX_NOWAIT, (u_long *)&dvmaddr) != 0)
    466 		return (ENOMEM);
    467 
    468 	cpuinfo.cache_flush(buf, buflen);
    469 
    470 	/*
    471 	 * We always use just one segment.
    472 	 */
    473 	map->dm_mapsize = buflen;
    474 	map->dm_nsegs = 1;
    475 	map->dm_segs[0].ds_addr = dvmaddr + (vaddr & PGOFSET);
    476 	map->dm_segs[0].ds_len = sgsize /*was:buflen*/;
    477 
    478 	if (p != NULL)
    479 		pmap = p->p_vmspace->vm_map.pmap;
    480 	else
    481 		pmap = pmap_kernel();
    482 
    483 	for (; buflen > 0; ) {
    484 		/*
    485 		 * Get the physical address for this page.
    486 		 */
    487 		curaddr = (bus_addr_t)pmap_extract(pmap, vaddr);
    488 
    489 		/*
    490 		 * Compute the segment size, and adjust counts.
    491 		 */
    492 		sgsize = NBPG - (vaddr & PGOFSET);
    493 		if (buflen < sgsize)
    494 			sgsize = buflen;
    495 
    496 		iommu_enter(dvmaddr, curaddr & ~PGOFSET);
    497 
    498 		dvmaddr += NBPG;
    499 		vaddr += sgsize;
    500 		buflen -= sgsize;
    501 	}
    502 	return (0);
    503 }
    504 
    505 /*
    506  * Like _bus_dmamap_load(), but for mbufs.
    507  */
    508 int
    509 iommu_dmamap_load_mbuf(t, map, m, flags)
    510 	bus_dma_tag_t t;
    511 	bus_dmamap_t map;
    512 	struct mbuf *m;
    513 	int flags;
    514 {
    515 
    516 	panic("_bus_dmamap_load: not implemented");
    517 }
    518 
    519 /*
    520  * Like _bus_dmamap_load(), but for uios.
    521  */
    522 int
    523 iommu_dmamap_load_uio(t, map, uio, flags)
    524 	bus_dma_tag_t t;
    525 	bus_dmamap_t map;
    526 	struct uio *uio;
    527 	int flags;
    528 {
    529 
    530 	panic("_bus_dmamap_load_uio: not implemented");
    531 }
    532 
    533 /*
    534  * Like _bus_dmamap_load(), but for raw memory allocated with
    535  * bus_dmamem_alloc().
    536  */
    537 int
    538 iommu_dmamap_load_raw(t, map, segs, nsegs, size, flags)
    539 	bus_dma_tag_t t;
    540 	bus_dmamap_t map;
    541 	bus_dma_segment_t *segs;
    542 	int nsegs;
    543 	bus_size_t size;
    544 	int flags;
    545 {
    546 
    547 	panic("_bus_dmamap_load_raw: not implemented");
    548 }
    549 
    550 /*
    551  * Common function for unloading a DMA map.  May be called by
    552  * bus-specific DMA map unload functions.
    553  */
    554 void
    555 iommu_dmamap_unload(t, map)
    556 	bus_dma_tag_t t;
    557 	bus_dmamap_t map;
    558 {
    559 	bus_addr_t addr;
    560 	bus_size_t len;
    561 
    562 	if (map->dm_nsegs != 1)
    563 		panic("_bus_dmamap_unload: nsegs = %d", map->dm_nsegs);
    564 
    565 	addr = map->dm_segs[0].ds_addr & ~PGOFSET;
    566 	len = map->dm_segs[0].ds_len;
    567 
    568 	iommu_remove(addr, len);
    569 	if (extent_free(iommu_dvmamap, addr, len, EX_NOWAIT) != 0)
    570 		printf("warning: %ld of DVMA space lost\n", (long)len);
    571 
    572 	/* Mark the mappings as invalid. */
    573 	map->dm_mapsize = 0;
    574 	map->dm_nsegs = 0;
    575 }
    576 
    577 /*
    578  * Common function for DMA map synchronization.  May be called
    579  * by bus-specific DMA map synchronization functions.
    580  */
    581 void
    582 iommu_dmamap_sync(t, map, offset, len, ops)
    583 	bus_dma_tag_t t;
    584 	bus_dmamap_t map;
    585 	bus_addr_t offset;
    586 	bus_size_t len;
    587 	int ops;
    588 {
    589 
    590 	/*
    591 	 * XXX Should flush CPU write buffers.
    592 	 */
    593 }
    594 
    595 /*
    596  * Common function for DMA-safe memory allocation.  May be called
    597  * by bus-specific DMA memory allocation functions.
    598  */
    599 int
    600 iommu_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
    601 	bus_dma_tag_t t;
    602 	bus_size_t size, alignment, boundary;
    603 	bus_dma_segment_t *segs;
    604 	int nsegs;
    605 	int *rsegs;
    606 	int flags;
    607 {
    608 	paddr_t pa;
    609 	bus_addr_t dvmaddr;
    610 	vm_page_t m;
    611 	int error;
    612 	struct pglist *mlist;
    613 
    614 	size = round_page(size);
    615 	error = _bus_dmamem_alloc_common(t, size, alignment, boundary,
    616 					 segs, nsegs, rsegs, flags);
    617 	if (error != 0)
    618 		return (error);
    619 
    620 	if (extent_alloc(iommu_dvmamap, size, NBPG, boundary,
    621 			 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
    622 			 (u_long *)&dvmaddr) != 0)
    623 		return (ENOMEM);
    624 
    625 	/*
    626 	 * Compute the location, size, and number of segments actually
    627 	 * returned by the VM code.
    628 	 */
    629 	segs[0].ds_addr = dvmaddr;
    630 	segs[0].ds_len = size;
    631 	*rsegs = 1;
    632 
    633 	mlist = segs[0]._ds_mlist;
    634 	/* Map memory into DVMA space */
    635 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
    636 		pa = VM_PAGE_TO_PHYS(m);
    637 
    638 		iommu_enter(dvmaddr, pa);
    639 		dvmaddr += PAGE_SIZE;
    640 	}
    641 
    642 	return (0);
    643 }
    644 
    645 /*
    646  * Common function for freeing DMA-safe memory.  May be called by
    647  * bus-specific DMA memory free functions.
    648  */
    649 void
    650 iommu_dmamem_free(t, segs, nsegs)
    651 	bus_dma_tag_t t;
    652 	bus_dma_segment_t *segs;
    653 	int nsegs;
    654 {
    655 	bus_addr_t addr;
    656 	bus_size_t len;
    657 
    658 	if (nsegs != 1)
    659 		panic("bus_dmamem_free: nsegs = %d", nsegs);
    660 
    661 	addr = segs[0].ds_addr;
    662 	len = segs[0].ds_len;
    663 
    664 	iommu_remove(addr, len);
    665 	if (extent_free(iommu_dvmamap, addr, len, EX_NOWAIT) != 0)
    666 		printf("warning: %ld of DVMA space lost\n", (long)len);
    667 	/*
    668 	 * Return the list of pages back to the VM system.
    669 	 */
    670 	_bus_dmamem_free_common(t, segs, nsegs);
    671 }
    672 
    673 /*
    674  * Common function for mapping DMA-safe memory.  May be called by
    675  * bus-specific DMA memory map functions.
    676  */
    677 int
    678 iommu_dmamem_map(t, segs, nsegs, size, kvap, flags)
    679 	bus_dma_tag_t t;
    680 	bus_dma_segment_t *segs;
    681 	int nsegs;
    682 	size_t size;
    683 	caddr_t *kvap;
    684 	int flags;
    685 {
    686 	vm_page_t m;
    687 	vaddr_t va, sva;
    688 	bus_addr_t addr;
    689 	struct pglist *mlist;
    690 	int cbit;
    691 	size_t oversize;
    692 	u_long align;
    693 	extern int has_iocache;
    694 	extern u_long dvma_cachealign;
    695 
    696 	if (nsegs != 1)
    697 		panic("iommu_dmamem_map: nsegs = %d", nsegs);
    698 
    699 	cbit = has_iocache ? 0 : PMAP_NC;
    700 	align = dvma_cachealign ? dvma_cachealign : PAGE_SIZE;
    701 
    702 	size = round_page(size);
    703 
    704 	/*
    705 	 * Find a region of kernel virtual addresses that can accomodate
    706 	 * our aligment requirements.
    707 	 */
    708 	oversize = size + align - PAGE_SIZE;
    709 	sva = uvm_km_valloc(kernel_map, oversize);
    710 	if (sva == 0)
    711 		return (ENOMEM);
    712 
    713 	/* Compute start of aligned region */
    714 	va = sva;
    715 	va += ((segs[0].ds_addr & (align - 1)) + align - va) & (align - 1);
    716 
    717 	/* Return excess virtual addresses */
    718 	if (va != sva)
    719 		(void)uvm_unmap(kernel_map, sva, va, 0);
    720 	if (va + size != sva + oversize)
    721 		(void)uvm_unmap(kernel_map, va + size, sva + oversize, 0);
    722 
    723 
    724 	*kvap = (caddr_t)va;
    725 	mlist = segs[0]._ds_mlist;
    726 
    727 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
    728 
    729 		if (size == 0)
    730 			panic("iommu_dmamem_map: size botch");
    731 
    732 		addr = VM_PAGE_TO_PHYS(m);
    733 		pmap_enter(pmap_kernel(), va, addr | cbit,
    734 			   VM_PROT_READ | VM_PROT_WRITE, TRUE);
    735 #if 0
    736 			if (flags & BUS_DMA_COHERENT)
    737 				/* XXX */;
    738 #endif
    739 		va += PAGE_SIZE;
    740 		size -= PAGE_SIZE;
    741 	}
    742 
    743 	return (0);
    744 }
    745 
    746 /*
    747  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
    748  * bus-specific DMA mmap(2)'ing functions.
    749  */
    750 int
    751 iommu_dmamem_mmap(t, segs, nsegs, off, prot, flags)
    752 	bus_dma_tag_t t;
    753 	bus_dma_segment_t *segs;
    754 	int nsegs, off, prot, flags;
    755 {
    756 
    757 	panic("_bus_dmamem_mmap: not implemented");
    758 }
    759