Home | History | Annotate | Line # | Download | only in sparc
iommu.c revision 1.20
      1  1.20        pk /*	$NetBSD: iommu.c,v 1.20 1998/08/20 20:49:33 pk Exp $ */
      2   1.1        pk 
      3   1.1        pk /*
      4   1.1        pk  * Copyright (c) 1996
      5   1.3    abrown  * 	The President and Fellows of Harvard College. All rights reserved.
      6   1.1        pk  * Copyright (c) 1995 	Paul Kranenburg
      7   1.1        pk  *
      8   1.1        pk  * Redistribution and use in source and binary forms, with or without
      9   1.1        pk  * modification, are permitted provided that the following conditions
     10   1.1        pk  * are met:
     11   1.1        pk  * 1. Redistributions of source code must retain the above copyright
     12   1.1        pk  *    notice, this list of conditions and the following disclaimer.
     13   1.1        pk  * 2. Redistributions in binary form must reproduce the above copyright
     14   1.1        pk  *    notice, this list of conditions and the following disclaimer in the
     15   1.1        pk  *    documentation and/or other materials provided with the distribution.
     16   1.1        pk  * 3. All advertising materials mentioning features or use of this software
     17   1.1        pk  *    must display the following acknowledgement:
     18   1.1        pk  *	This product includes software developed by Aaron Brown and
     19   1.1        pk  *	Harvard University.
     20   1.1        pk  *	This product includes software developed by Paul Kranenburg.
     21   1.1        pk  * 4. Neither the name of the University nor the names of its contributors
     22   1.1        pk  *    may be used to endorse or promote products derived from this software
     23   1.1        pk  *    without specific prior written permission.
     24   1.1        pk  *
     25   1.1        pk  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26   1.1        pk  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27   1.1        pk  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28   1.1        pk  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29   1.1        pk  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30   1.1        pk  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31   1.1        pk  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32   1.1        pk  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33   1.1        pk  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34   1.1        pk  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35   1.1        pk  * SUCH DAMAGE.
     36   1.1        pk  *
     37   1.1        pk  */
     38   1.1        pk 
     39   1.1        pk #include <sys/param.h>
     40  1.18        pk #include <sys/extent.h>
     41  1.18        pk #include <sys/malloc.h>
     42  1.18        pk #include <sys/queue.h>
     43   1.1        pk #include <sys/systm.h>
     44   1.1        pk #include <sys/device.h>
     45   1.1        pk #include <vm/vm.h>
     46  1.18        pk #include <vm/vm_kern.h>
     47  1.18        pk #include <uvm/uvm.h>
     48   1.1        pk 
     49  1.18        pk #define _SPARC_BUS_DMA_PRIVATE
     50  1.18        pk #include <machine/bus.h>
     51   1.1        pk #include <machine/autoconf.h>
     52   1.1        pk #include <machine/ctlreg.h>
     53   1.1        pk #include <sparc/sparc/asm.h>
     54   1.1        pk #include <sparc/sparc/vaddrs.h>
     55   1.9        pk #include <sparc/sparc/cpuvar.h>
     56   1.1        pk #include <sparc/sparc/iommureg.h>
     57  1.16        pk #include <sparc/sparc/iommuvar.h>
     58   1.1        pk 
     59   1.1        pk struct iommu_softc {
     60   1.1        pk 	struct device	sc_dev;		/* base device */
     61   1.1        pk 	struct iommureg	*sc_reg;
     62   1.1        pk 	u_int		sc_pagesize;
     63   1.1        pk 	u_int		sc_range;
     64   1.1        pk 	u_int		sc_dvmabase;
     65   1.1        pk 	iopte_t		*sc_ptes;
     66   1.1        pk 	int		sc_hasiocache;
     67   1.1        pk };
     68   1.1        pk struct	iommu_softc *iommu_sc;/*XXX*/
     69   1.1        pk int	has_iocache;
     70  1.19        pk u_long	dvma_cachealign;
     71   1.1        pk 
     72  1.18        pk struct extent *iommu_dvmamap;
     73  1.18        pk 
     74   1.1        pk 
     75   1.1        pk /* autoconfiguration driver */
     76   1.5       cgd int	iommu_print __P((void *, const char *));
     77   1.1        pk void	iommu_attach __P((struct device *, struct device *, void *));
     78   1.8        pk int	iommu_match __P((struct device *, struct cfdata *, void *));
     79   1.1        pk 
     80   1.1        pk struct cfattach iommu_ca = {
     81   1.1        pk 	sizeof(struct iommu_softc), iommu_match, iommu_attach
     82   1.1        pk };
     83   1.1        pk 
     84  1.18        pk /* IOMMU DMA map functions */
     85  1.18        pk int	iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
     86  1.18        pk 	    bus_size_t, struct proc *, int));
     87  1.18        pk int	iommu_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
     88  1.18        pk 	    struct mbuf *, int));
     89  1.18        pk int	iommu_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
     90  1.18        pk 	    struct uio *, int));
     91  1.18        pk int	iommu_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
     92  1.18        pk 	    bus_dma_segment_t *, int, bus_size_t, int));
     93  1.18        pk void	iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
     94  1.18        pk void	iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
     95  1.18        pk 	    bus_size_t, int));
     96  1.18        pk 
     97  1.18        pk int	iommu_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
     98  1.18        pk 	    bus_size_t alignment, bus_size_t boundary,
     99  1.18        pk 	    bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
    100  1.18        pk void	iommu_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
    101  1.18        pk 	    int nsegs));
    102  1.18        pk int	iommu_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
    103  1.18        pk 	    int nsegs, size_t size, caddr_t *kvap, int flags));
    104  1.18        pk int	iommu_dmamem_mmap __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
    105  1.18        pk 	    int nsegs, int off, int prot, int flags));
    106  1.18        pk 
    107  1.18        pk 
    108  1.18        pk struct sparc_bus_dma_tag iommu_dma_tag = {
    109  1.18        pk 	NULL,
    110  1.18        pk 	_bus_dmamap_create,
    111  1.18        pk 	_bus_dmamap_destroy,
    112  1.18        pk 	iommu_dmamap_load,
    113  1.18        pk 	iommu_dmamap_load_mbuf,
    114  1.18        pk 	iommu_dmamap_load_uio,
    115  1.18        pk 	iommu_dmamap_load_raw,
    116  1.18        pk 	iommu_dmamap_unload,
    117  1.18        pk 	iommu_dmamap_sync,
    118  1.18        pk 
    119  1.18        pk 	iommu_dmamem_alloc,
    120  1.18        pk 	iommu_dmamem_free,
    121  1.18        pk 	iommu_dmamem_map,
    122  1.18        pk 	_bus_dmamem_unmap,
    123  1.18        pk 	iommu_dmamem_mmap
    124  1.18        pk };
    125   1.1        pk /*
    126   1.1        pk  * Print the location of some iommu-attached device (called just
    127   1.1        pk  * before attaching that device).  If `iommu' is not NULL, the
    128   1.1        pk  * device was found but not configured; print the iommu as well.
    129   1.1        pk  * Return UNCONF (config_find ignores this if the device was configured).
    130   1.1        pk  */
    131   1.1        pk int
    132   1.1        pk iommu_print(args, iommu)
    133   1.1        pk 	void *args;
    134   1.5       cgd 	const char *iommu;
    135   1.1        pk {
    136  1.16        pk 	struct iommu_attach_args *ia = args;
    137   1.1        pk 
    138   1.1        pk 	if (iommu)
    139  1.16        pk 		printf("%s at %s", ia->iom_name, iommu);
    140   1.1        pk 	return (UNCONF);
    141   1.1        pk }
    142   1.1        pk 
    143   1.1        pk int
    144   1.8        pk iommu_match(parent, cf, aux)
    145   1.1        pk 	struct device *parent;
    146   1.8        pk 	struct cfdata *cf;
    147   1.8        pk 	void *aux;
    148   1.1        pk {
    149  1.16        pk 	struct mainbus_attach_args *ma = aux;
    150   1.1        pk 
    151   1.1        pk 	if (CPU_ISSUN4OR4C)
    152   1.1        pk 		return (0);
    153  1.16        pk 	return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
    154   1.1        pk }
    155   1.1        pk 
    156   1.1        pk /*
    157   1.1        pk  * Attach the iommu.
    158   1.1        pk  */
    159   1.1        pk void
    160   1.1        pk iommu_attach(parent, self, aux)
    161   1.1        pk 	struct device *parent;
    162   1.1        pk 	struct device *self;
    163   1.1        pk 	void *aux;
    164   1.1        pk {
    165   1.4        pk #if defined(SUN4M)
    166   1.1        pk 	register struct iommu_softc *sc = (struct iommu_softc *)self;
    167  1.16        pk 	struct mainbus_attach_args *ma = aux;
    168   1.1        pk 	register int node;
    169  1.16        pk 	struct bootpath *bp;
    170  1.16        pk 	bus_space_handle_t bh;
    171   1.1        pk 	register u_int pbase, pa;
    172  1.10        pk 	register int i, mmupcrsave, s;
    173   1.1        pk 	register iopte_t *tpte_p;
    174   1.1        pk 	extern u_int *kernel_iopte_table;
    175   1.1        pk 	extern u_int kernel_iopte_table_pa;
    176   1.1        pk 
    177  1.10        pk /*XXX-GCC!*/mmupcrsave=0;
    178   1.1        pk 	iommu_sc = sc;
    179   1.1        pk 	/*
    180   1.1        pk 	 * XXX there is only one iommu, for now -- do not know how to
    181   1.1        pk 	 * address children on others
    182   1.1        pk 	 */
    183   1.1        pk 	if (sc->sc_dev.dv_unit > 0) {
    184   1.7  christos 		printf(" unsupported\n");
    185   1.1        pk 		return;
    186   1.1        pk 	}
    187  1.16        pk 	node = ma->ma_node;
    188   1.1        pk 
    189   1.1        pk #if 0
    190   1.1        pk 	if (ra->ra_vaddr)
    191   1.1        pk 		sc->sc_reg = (struct iommureg *)ca->ca_ra.ra_vaddr;
    192   1.1        pk #else
    193   1.1        pk 	/*
    194   1.1        pk 	 * Map registers into our space. The PROM may have done this
    195   1.1        pk 	 * already, but I feel better if we have our own copy. Plus, the
    196   1.1        pk 	 * prom doesn't map the entire register set
    197   1.1        pk 	 *
    198   1.1        pk 	 * XXX struct iommureg is bigger than ra->ra_len; what are the
    199   1.1        pk 	 *     other fields for?
    200   1.1        pk 	 */
    201  1.17        pk 	if (bus_space_map2(
    202  1.16        pk 			ma->ma_bustag,
    203  1.16        pk 			ma->ma_iospace,
    204  1.17        pk 			ma->ma_paddr,
    205  1.16        pk 			sizeof(struct iommureg),
    206  1.16        pk 			0,
    207  1.16        pk 			0,
    208  1.16        pk 			&bh) != 0) {
    209  1.16        pk 		printf("iommu_attach: cannot map registers\n");
    210  1.16        pk 		return;
    211  1.16        pk 	}
    212  1.16        pk 	sc->sc_reg = (struct iommureg *)bh;
    213   1.1        pk #endif
    214   1.1        pk 
    215   1.1        pk 	sc->sc_hasiocache = node_has_property(node, "cache-coherence?");
    216   1.9        pk 	if (CACHEINFO.c_enabled == 0) /* XXX - is this correct? */
    217   1.9        pk 		sc->sc_hasiocache = 0;
    218   1.1        pk 	has_iocache = sc->sc_hasiocache; /* Set global flag */
    219   1.1        pk 
    220   1.1        pk 	sc->sc_pagesize = getpropint(node, "page-size", NBPG),
    221   1.1        pk 	sc->sc_range = (1 << 24) <<
    222   1.1        pk 	    ((sc->sc_reg->io_cr & IOMMU_CTL_RANGE) >> IOMMU_CTL_RANGESHFT);
    223   1.1        pk #if 0
    224   1.1        pk 	sc->sc_dvmabase = (0 - sc->sc_range);
    225   1.1        pk #endif
    226   1.1        pk 	pbase = (sc->sc_reg->io_bar & IOMMU_BAR_IBA) <<
    227   1.1        pk 			(14 - IOMMU_BAR_IBASHFT);
    228   1.1        pk 
    229   1.1        pk 	/*
    230   1.1        pk 	 * Now we build our own copy of the IOMMU page tables. We need to
    231   1.1        pk 	 * do this since we're going to change the range to give us 64M of
    232   1.1        pk 	 * mappings, and thus we can move DVMA space down to 0xfd000000 to
    233   1.1        pk 	 * give us lots of space and to avoid bumping into the PROM, etc.
    234   1.1        pk 	 *
    235   1.1        pk 	 * XXX Note that this is rather messy.
    236   1.1        pk 	 */
    237   1.1        pk 	sc->sc_ptes = (iopte_t *) kernel_iopte_table;
    238   1.1        pk 
    239   1.1        pk 	/*
    240   1.1        pk 	 * Now discache the page tables so that the IOMMU sees our
    241   1.1        pk 	 * changes.
    242   1.1        pk 	 */
    243   1.1        pk 	kvm_uncache((caddr_t)sc->sc_ptes,
    244   1.1        pk 		(((0 - DVMA4M_BASE)/sc->sc_pagesize) * sizeof(iopte_t)) / NBPG);
    245   1.1        pk 
    246   1.1        pk 	/*
    247   1.1        pk 	 * Ok. We've got to read in the original table using MMU bypass,
    248   1.1        pk 	 * and copy all of its entries to the appropriate place in our
    249   1.1        pk 	 * new table, even if the sizes are different.
    250   1.1        pk 	 * This is pretty easy since we know DVMA ends at 0xffffffff.
    251   1.1        pk 	 *
    252   1.1        pk 	 * XXX: PGOFSET, NBPG assume same page size as SRMMU
    253   1.1        pk 	 */
    254  1.14        pk 	if (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc) {
    255  1.10        pk 		/* set MMU AC bit */
    256  1.10        pk 		sta(SRMMU_PCR, ASI_SRMMU,
    257  1.10        pk 		    ((mmupcrsave = lda(SRMMU_PCR, ASI_SRMMU)) | VIKING_PCR_AC));
    258   1.1        pk 	}
    259   1.1        pk 
    260   1.1        pk 	for (tpte_p = &sc->sc_ptes[((0 - DVMA4M_BASE)/NBPG) - 1],
    261   1.1        pk 	     pa = (u_int)pbase - sizeof(iopte_t) +
    262   1.1        pk 		   ((u_int)sc->sc_range/NBPG)*sizeof(iopte_t);
    263   1.1        pk 	     tpte_p >= &sc->sc_ptes[0] && pa >= (u_int)pbase;
    264   1.1        pk 	     tpte_p--, pa -= sizeof(iopte_t)) {
    265   1.1        pk 
    266   1.1        pk 		IOMMU_FLUSHPAGE(sc,
    267   1.1        pk 			        (tpte_p - &sc->sc_ptes[0])*NBPG + DVMA4M_BASE);
    268   1.1        pk 		*tpte_p = lda(pa, ASI_BYPASS);
    269   1.1        pk 	}
    270  1.14        pk 	if (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc) {
    271  1.10        pk 		/* restore mmu after bug-avoidance */
    272  1.10        pk 		sta(SRMMU_PCR, ASI_SRMMU, mmupcrsave);
    273   1.1        pk 	}
    274   1.1        pk 
    275   1.1        pk 	/*
    276   1.1        pk 	 * Now we can install our new pagetable into the IOMMU
    277   1.1        pk 	 */
    278   1.1        pk 	sc->sc_range = 0 - DVMA4M_BASE;
    279   1.1        pk 	sc->sc_dvmabase = DVMA4M_BASE;
    280   1.1        pk 
    281   1.1        pk 	/* calculate log2(sc->sc_range/16MB) */
    282   1.1        pk 	i = ffs(sc->sc_range/(1 << 24)) - 1;
    283   1.1        pk 	if ((1 << i) != (sc->sc_range/(1 << 24)))
    284   1.1        pk 		panic("bad iommu range: %d\n",i);
    285   1.1        pk 
    286   1.1        pk 	s = splhigh();
    287   1.1        pk 	IOMMU_FLUSHALL(sc);
    288   1.1        pk 
    289   1.1        pk 	sc->sc_reg->io_cr = (sc->sc_reg->io_cr & ~IOMMU_CTL_RANGE) |
    290   1.1        pk 			  (i << IOMMU_CTL_RANGESHFT) | IOMMU_CTL_ME;
    291   1.1        pk 	sc->sc_reg->io_bar = (kernel_iopte_table_pa >> 4) & IOMMU_BAR_IBA;
    292   1.1        pk 
    293   1.1        pk 	IOMMU_FLUSHALL(sc);
    294   1.1        pk 	splx(s);
    295   1.1        pk 
    296  1.13      fair 	printf(": version 0x%x/0x%x, page-size %d, range %dMB\n",
    297   1.1        pk 		(sc->sc_reg->io_cr & IOMMU_CTL_VER) >> 24,
    298   1.1        pk 		(sc->sc_reg->io_cr & IOMMU_CTL_IMPL) >> 28,
    299   1.1        pk 		sc->sc_pagesize,
    300   1.1        pk 		sc->sc_range >> 20);
    301   1.1        pk 
    302   1.1        pk 	/* Propagate bootpath */
    303  1.16        pk 	if (ma->ma_bp != NULL && strcmp(ma->ma_bp->name, "iommu") == 0)
    304  1.16        pk 		bp = ma->ma_bp + 1;
    305   1.1        pk 	else
    306  1.16        pk 		bp = NULL;
    307  1.18        pk 
    308  1.18        pk 	iommu_dvmamap = extent_create("iommudvma", DVMA4M_BASE, DVMA4M_END,
    309  1.18        pk 					M_DEVBUF, 0, 0, EX_NOWAIT);
    310   1.1        pk 
    311   1.1        pk 	/*
    312   1.1        pk 	 * Loop through ROM children (expect Sbus among them).
    313   1.1        pk 	 */
    314   1.1        pk 	for (node = firstchild(node); node; node = nextsibling(node)) {
    315  1.16        pk 		struct iommu_attach_args ia;
    316  1.16        pk 
    317  1.16        pk 		bzero(&ia, sizeof ia);
    318  1.16        pk 		ia.iom_name = getpropstring(node, "name");
    319  1.16        pk 
    320  1.16        pk 		/* Propagate BUS & DMA tags */
    321  1.16        pk 		ia.iom_bustag = ma->ma_bustag;
    322  1.18        pk 		ia.iom_dmatag = &iommu_dma_tag;
    323  1.16        pk 		ia.iom_node = node;
    324  1.16        pk 		ia.iom_bp = bp;
    325  1.16        pk 		(void) config_found(&sc->sc_dev, (void *)&ia, iommu_print);
    326   1.1        pk 	}
    327   1.4        pk #endif
    328   1.1        pk }
    329   1.1        pk 
    330   1.1        pk void
    331   1.1        pk iommu_enter(va, pa)
    332   1.1        pk 	u_int va, pa;
    333   1.1        pk {
    334   1.1        pk 	struct iommu_softc *sc = iommu_sc;
    335   1.1        pk 	int pte;
    336   1.1        pk 
    337   1.1        pk #ifdef DEBUG
    338   1.1        pk 	if (va < sc->sc_dvmabase)
    339   1.1        pk 		panic("iommu_enter: va 0x%x not in DVMA space",va);
    340   1.1        pk #endif
    341   1.1        pk 
    342   1.1        pk 	pte = atop(pa) << IOPTE_PPNSHFT;
    343   1.1        pk 	pte &= IOPTE_PPN;
    344   1.2    abrown 	pte |= IOPTE_V | IOPTE_W | (has_iocache ? IOPTE_C : 0);
    345   1.1        pk 	sc->sc_ptes[atop(va - sc->sc_dvmabase)] = pte;
    346   1.1        pk 	IOMMU_FLUSHPAGE(sc, va);
    347   1.1        pk }
    348   1.1        pk 
    349   1.1        pk /*
    350   1.1        pk  * iommu_clear: clears mappings created by iommu_enter
    351   1.1        pk  */
    352   1.1        pk void
    353   1.1        pk iommu_remove(va, len)
    354   1.1        pk 	register u_int va, len;
    355   1.1        pk {
    356   1.1        pk 	register struct iommu_softc *sc = iommu_sc;
    357   1.1        pk 
    358   1.1        pk #ifdef DEBUG
    359   1.1        pk 	if (va < sc->sc_dvmabase)
    360   1.1        pk 		panic("iommu_enter: va 0x%x not in DVMA space", va);
    361   1.1        pk #endif
    362   1.1        pk 
    363   1.1        pk 	while (len > 0) {
    364   1.1        pk #ifdef notyet
    365   1.1        pk #ifdef DEBUG
    366   1.1        pk 		if ((sc->sc_ptes[atop(va - sc->sc_dvmabase)] & IOPTE_V) == 0)
    367   1.1        pk 			panic("iommu_clear: clearing invalid pte at va 0x%x",
    368   1.1        pk 				va);
    369   1.1        pk #endif
    370   1.1        pk #endif
    371   1.1        pk 		sc->sc_ptes[atop(va - sc->sc_dvmabase)] = 0;
    372   1.1        pk 		IOMMU_FLUSHPAGE(sc, va);
    373   1.1        pk 		len -= sc->sc_pagesize;
    374   1.1        pk 		va += sc->sc_pagesize;
    375   1.1        pk 	}
    376   1.1        pk }
    377   1.1        pk 
    378   1.1        pk #if 0	/* These registers aren't there??? */
    379   1.1        pk void
    380   1.1        pk iommu_error()
    381   1.1        pk {
    382   1.1        pk 	struct iommu_softc *sc = X;
    383   1.1        pk 	struct iommureg *iop = sc->sc_reg;
    384   1.1        pk 
    385  1.13      fair 	printf("iommu: afsr 0x%x, afar 0x%x\n", iop->io_afsr, iop->io_afar);
    386  1.13      fair 	printf("iommu: mfsr 0x%x, mfar 0x%x\n", iop->io_mfsr, iop->io_mfar);
    387   1.1        pk }
    388   1.1        pk int
    389   1.1        pk iommu_alloc(va, len)
    390   1.1        pk 	u_int va, len;
    391   1.1        pk {
    392   1.1        pk 	struct iommu_softc *sc = X;
    393   1.1        pk 	int off, tva, pa, iovaddr, pte;
    394   1.1        pk 
    395   1.1        pk 	off = (int)va & PGOFSET;
    396   1.1        pk 	len = round_page(len + off);
    397   1.1        pk 	va -= off;
    398   1.1        pk 
    399   1.1        pk if ((int)sc->sc_dvmacur + len > 0)
    400   1.1        pk 	sc->sc_dvmacur = sc->sc_dvmabase;
    401   1.1        pk 
    402   1.1        pk 	iovaddr = tva = sc->sc_dvmacur;
    403   1.1        pk 	sc->sc_dvmacur += len;
    404   1.1        pk 	while (len) {
    405   1.1        pk 		pa = pmap_extract(pmap_kernel(), va);
    406   1.1        pk 
    407   1.1        pk #define IOMMU_PPNSHIFT	8
    408   1.1        pk #define IOMMU_V		0x00000002
    409   1.1        pk #define IOMMU_W		0x00000004
    410   1.1        pk 
    411   1.1        pk 		pte = atop(pa) << IOMMU_PPNSHIFT;
    412   1.1        pk 		pte |= IOMMU_V | IOMMU_W;
    413   1.1        pk 		sta(sc->sc_ptes + atop(tva - sc->sc_dvmabase), ASI_BYPASS, pte);
    414   1.1        pk 		sc->sc_reg->io_flushpage = tva;
    415   1.1        pk 		len -= NBPG;
    416   1.1        pk 		va += NBPG;
    417   1.1        pk 		tva += NBPG;
    418   1.1        pk 	}
    419   1.1        pk 	return iovaddr + off;
    420   1.1        pk }
    421   1.1        pk #endif
    422  1.18        pk 
    423  1.18        pk 
    424  1.18        pk /*
    425  1.18        pk  * IOMMU DMA map functions.
    426  1.18        pk  */
    427  1.18        pk int
    428  1.18        pk iommu_dmamap_load(t, map, buf, buflen, p, flags)
    429  1.18        pk 	bus_dma_tag_t t;
    430  1.18        pk 	bus_dmamap_t map;
    431  1.18        pk 	void *buf;
    432  1.18        pk 	bus_size_t buflen;
    433  1.18        pk 	struct proc *p;
    434  1.18        pk 	int flags;
    435  1.18        pk {
    436  1.18        pk 	bus_size_t sgsize;
    437  1.18        pk 	bus_addr_t dvmaddr, curaddr;
    438  1.18        pk 	vm_offset_t vaddr = (vm_offset_t)buf;
    439  1.18        pk 	pmap_t pmap;
    440  1.18        pk 
    441  1.18        pk 	/*
    442  1.18        pk 	 * Make sure that on error condition we return "no valid mappings".
    443  1.18        pk 	 */
    444  1.18        pk 	map->dm_nsegs = 0;
    445  1.18        pk 
    446  1.18        pk 	if (buflen > map->_dm_size)
    447  1.18        pk 		return (EINVAL);
    448  1.18        pk 
    449  1.18        pk 	sgsize = round_page(buflen + (vaddr & PGOFSET));
    450  1.18        pk 
    451  1.18        pk 	/*
    452  1.18        pk 	 * XXX Need to implement "don't dma across this boundry".
    453  1.18        pk 	 */
    454  1.18        pk 	if (map->_dm_boundary != 0)
    455  1.18        pk 		panic("bus_dmamap_load: boundaries not implemented");
    456  1.18        pk 
    457  1.18        pk 	if (extent_alloc(iommu_dvmamap, sgsize, NBPG, EX_NOBOUNDARY,
    458  1.18        pk             EX_NOWAIT, (u_long *)&dvmaddr) != 0)
    459  1.18        pk 		return (ENOMEM);
    460  1.18        pk 
    461  1.18        pk 	cpuinfo.cache_flush(buf, buflen);
    462  1.18        pk 
    463  1.18        pk 	/*
    464  1.18        pk 	 * We always use just one segment.
    465  1.18        pk 	 */
    466  1.18        pk 	map->dm_mapsize = buflen;
    467  1.18        pk 	map->dm_nsegs = 1;
    468  1.18        pk 	map->dm_segs[0].ds_addr = dvmaddr + (vaddr & PGOFSET);
    469  1.18        pk 	map->dm_segs[0].ds_len = sgsize /*was:buflen*/;
    470  1.18        pk 
    471  1.18        pk 	if (p != NULL)
    472  1.18        pk 		pmap = p->p_vmspace->vm_map.pmap;
    473  1.18        pk 	else
    474  1.18        pk 		pmap = pmap_kernel();
    475  1.18        pk 
    476  1.18        pk 	for (; buflen > 0; ) {
    477  1.18        pk 		/*
    478  1.18        pk 		 * Get the physical address for this page.
    479  1.18        pk 		 */
    480  1.18        pk 		curaddr = (bus_addr_t)pmap_extract(pmap, vaddr);
    481  1.18        pk 
    482  1.18        pk 		/*
    483  1.18        pk 		 * Compute the segment size, and adjust counts.
    484  1.18        pk 		 */
    485  1.18        pk 		sgsize = NBPG - (vaddr & PGOFSET);
    486  1.18        pk 		if (buflen < sgsize)
    487  1.18        pk 			sgsize = buflen;
    488  1.18        pk 
    489  1.18        pk 		iommu_enter(dvmaddr, curaddr & ~PGOFSET);
    490  1.18        pk 
    491  1.18        pk 		dvmaddr += NBPG;
    492  1.18        pk 		vaddr += sgsize;
    493  1.18        pk 		buflen -= sgsize;
    494  1.18        pk 	}
    495  1.18        pk 	return (0);
    496  1.18        pk }
    497  1.18        pk 
    498  1.18        pk /*
    499  1.18        pk  * Like _bus_dmamap_load(), but for mbufs.
    500  1.18        pk  */
    501  1.18        pk int
    502  1.18        pk iommu_dmamap_load_mbuf(t, map, m, flags)
    503  1.18        pk 	bus_dma_tag_t t;
    504  1.18        pk 	bus_dmamap_t map;
    505  1.18        pk 	struct mbuf *m;
    506  1.18        pk 	int flags;
    507  1.18        pk {
    508  1.18        pk 
    509  1.18        pk 	panic("_bus_dmamap_load: not implemented");
    510  1.18        pk }
    511  1.18        pk 
    512  1.18        pk /*
    513  1.18        pk  * Like _bus_dmamap_load(), but for uios.
    514  1.18        pk  */
    515  1.18        pk int
    516  1.18        pk iommu_dmamap_load_uio(t, map, uio, flags)
    517  1.18        pk 	bus_dma_tag_t t;
    518  1.18        pk 	bus_dmamap_t map;
    519  1.18        pk 	struct uio *uio;
    520  1.18        pk 	int flags;
    521  1.18        pk {
    522  1.18        pk 
    523  1.18        pk 	panic("_bus_dmamap_load_uio: not implemented");
    524  1.18        pk }
    525  1.18        pk 
    526  1.18        pk /*
    527  1.18        pk  * Like _bus_dmamap_load(), but for raw memory allocated with
    528  1.18        pk  * bus_dmamem_alloc().
    529  1.18        pk  */
    530  1.18        pk int
    531  1.18        pk iommu_dmamap_load_raw(t, map, segs, nsegs, size, flags)
    532  1.18        pk 	bus_dma_tag_t t;
    533  1.18        pk 	bus_dmamap_t map;
    534  1.18        pk 	bus_dma_segment_t *segs;
    535  1.18        pk 	int nsegs;
    536  1.18        pk 	bus_size_t size;
    537  1.18        pk 	int flags;
    538  1.18        pk {
    539  1.18        pk 
    540  1.18        pk 	panic("_bus_dmamap_load_raw: not implemented");
    541  1.18        pk }
    542  1.18        pk 
    543  1.18        pk /*
    544  1.18        pk  * Common function for unloading a DMA map.  May be called by
    545  1.18        pk  * bus-specific DMA map unload functions.
    546  1.18        pk  */
    547  1.18        pk void
    548  1.18        pk iommu_dmamap_unload(t, map)
    549  1.18        pk 	bus_dma_tag_t t;
    550  1.18        pk 	bus_dmamap_t map;
    551  1.18        pk {
    552  1.18        pk 	bus_addr_t addr;
    553  1.18        pk 	bus_size_t len;
    554  1.18        pk 
    555  1.18        pk 	if (map->dm_nsegs != 1)
    556  1.18        pk 		panic("_bus_dmamap_unload: nsegs = %d", map->dm_nsegs);
    557  1.18        pk 
    558  1.18        pk 	addr = map->dm_segs[0].ds_addr & ~PGOFSET;
    559  1.18        pk 	len = map->dm_segs[0].ds_len;
    560  1.18        pk 
    561  1.18        pk 	iommu_remove(addr, len);
    562  1.18        pk 	if (extent_free(iommu_dvmamap, addr, len, EX_NOWAIT) != 0)
    563  1.18        pk 		printf("warning: %ld of DVMA space lost\n", len);
    564  1.18        pk 
    565  1.18        pk 	/* Mark the mappings as invalid. */
    566  1.18        pk 	map->dm_mapsize = 0;
    567  1.18        pk 	map->dm_nsegs = 0;
    568  1.18        pk }
    569  1.18        pk 
    570  1.18        pk /*
    571  1.18        pk  * Common function for DMA map synchronization.  May be called
    572  1.18        pk  * by bus-specific DMA map synchronization functions.
    573  1.18        pk  */
    574  1.18        pk void
    575  1.18        pk iommu_dmamap_sync(t, map, offset, len, ops)
    576  1.18        pk 	bus_dma_tag_t t;
    577  1.18        pk 	bus_dmamap_t map;
    578  1.18        pk 	bus_addr_t offset;
    579  1.18        pk 	bus_size_t len;
    580  1.18        pk 	int ops;
    581  1.18        pk {
    582  1.18        pk 
    583  1.18        pk 	/*
    584  1.18        pk 	 * XXX Should flush CPU write buffers.
    585  1.18        pk 	 */
    586  1.18        pk }
    587  1.18        pk 
    588  1.18        pk /*
    589  1.18        pk  * Common function for DMA-safe memory allocation.  May be called
    590  1.18        pk  * by bus-specific DMA memory allocation functions.
    591  1.18        pk  */
    592  1.18        pk int
    593  1.18        pk iommu_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
    594  1.18        pk 	bus_dma_tag_t t;
    595  1.18        pk 	bus_size_t size, alignment, boundary;
    596  1.18        pk 	bus_dma_segment_t *segs;
    597  1.18        pk 	int nsegs;
    598  1.18        pk 	int *rsegs;
    599  1.18        pk 	int flags;
    600  1.18        pk {
    601  1.18        pk 	vm_offset_t curaddr;
    602  1.18        pk 	bus_addr_t dvmaddr;
    603  1.18        pk 	vm_page_t m;
    604  1.18        pk 	int error;
    605  1.18        pk 	struct pglist *mlist;
    606  1.18        pk 
    607  1.18        pk 	size = round_page(size);
    608  1.18        pk 	error = _bus_dmamem_alloc_common(t, size, alignment, boundary,
    609  1.18        pk 					 segs, nsegs, rsegs, flags);
    610  1.18        pk 	if (error != 0)
    611  1.18        pk 		return (error);
    612  1.18        pk 
    613  1.20        pk 	if (extent_alloc(iommu_dvmamap, size, NBPG, boundary,
    614  1.20        pk 			 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
    615  1.20        pk 			 (u_long *)&dvmaddr) != 0)
    616  1.18        pk 		return (ENOMEM);
    617  1.18        pk 
    618  1.18        pk 	/*
    619  1.18        pk 	 * Compute the location, size, and number of segments actually
    620  1.18        pk 	 * returned by the VM code.
    621  1.18        pk 	 */
    622  1.18        pk 	segs[0].ds_addr = dvmaddr;
    623  1.18        pk 	segs[0].ds_len = size;
    624  1.18        pk 	*rsegs = 1;
    625  1.18        pk 
    626  1.18        pk 	mlist = segs[0]._ds_mlist;
    627  1.18        pk 	/* Map memory into DVMA space */
    628  1.18        pk 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
    629  1.18        pk 		curaddr = VM_PAGE_TO_PHYS(m);
    630  1.18        pk 
    631  1.18        pk 		iommu_enter(dvmaddr, curaddr);
    632  1.18        pk 		dvmaddr += PAGE_SIZE;
    633  1.18        pk 	}
    634  1.18        pk 
    635  1.18        pk 	return (0);
    636  1.18        pk }
    637  1.18        pk 
    638  1.18        pk /*
    639  1.18        pk  * Common function for freeing DMA-safe memory.  May be called by
    640  1.18        pk  * bus-specific DMA memory free functions.
    641  1.18        pk  */
    642  1.18        pk void
    643  1.18        pk iommu_dmamem_free(t, segs, nsegs)
    644  1.18        pk 	bus_dma_tag_t t;
    645  1.18        pk 	bus_dma_segment_t *segs;
    646  1.18        pk 	int nsegs;
    647  1.18        pk {
    648  1.18        pk 	bus_addr_t addr;
    649  1.18        pk 	bus_size_t len;
    650  1.18        pk 
    651  1.18        pk 	if (nsegs != 1)
    652  1.18        pk 		panic("bus_dmamem_free: nsegs = %d", nsegs);
    653  1.18        pk 
    654  1.18        pk 	addr = segs[0].ds_addr;
    655  1.18        pk 	len = segs[0].ds_len;
    656  1.18        pk 
    657  1.18        pk 	iommu_remove(addr, len);
    658  1.18        pk 	if (extent_free(iommu_dvmamap, addr, len, EX_NOWAIT) != 0)
    659  1.18        pk 		printf("warning: %ld of DVMA space lost\n", len);
    660  1.18        pk 	/*
    661  1.18        pk 	 * Return the list of pages back to the VM system.
    662  1.18        pk 	 */
    663  1.18        pk 	_bus_dmamem_free_common(t, segs, nsegs);
    664  1.18        pk }
    665  1.18        pk 
    666  1.18        pk /*
    667  1.18        pk  * Common function for mapping DMA-safe memory.  May be called by
    668  1.18        pk  * bus-specific DMA memory map functions.
    669  1.18        pk  */
    670  1.18        pk int
    671  1.18        pk iommu_dmamem_map(t, segs, nsegs, size, kvap, flags)
    672  1.18        pk 	bus_dma_tag_t t;
    673  1.18        pk 	bus_dma_segment_t *segs;
    674  1.18        pk 	int nsegs;
    675  1.18        pk 	size_t size;
    676  1.18        pk 	caddr_t *kvap;
    677  1.18        pk 	int flags;
    678  1.18        pk {
    679  1.18        pk 	vm_page_t m;
    680  1.18        pk 	vm_offset_t va, sva;
    681  1.18        pk 	bus_addr_t addr;
    682  1.18        pk 	struct pglist *mlist;
    683  1.18        pk 	int cbit;
    684  1.18        pk 	size_t oversize;
    685  1.18        pk 	u_long align;
    686  1.18        pk 	extern int has_iocache;
    687  1.18        pk 	extern u_long dvma_cachealign;
    688  1.18        pk 
    689  1.18        pk 	if (nsegs != 1)
    690  1.18        pk 		panic("iommu_dmamem_map: nsegs = %d", nsegs);
    691  1.18        pk 
    692  1.18        pk 	cbit = has_iocache ? 0 : PMAP_NC;
    693  1.18        pk 	align = dvma_cachealign ? dvma_cachealign : PAGE_SIZE;
    694  1.18        pk 
    695  1.18        pk 	size = round_page(size);
    696  1.18        pk 
    697  1.18        pk 	/*
    698  1.18        pk 	 * Find a region of kernel virtual addresses that can accomodate
    699  1.18        pk 	 * our aligment requirements.
    700  1.18        pk 	 */
    701  1.18        pk 	oversize = size + align - PAGE_SIZE;
    702  1.18        pk 	sva = uvm_km_valloc(kernel_map, oversize);
    703  1.18        pk 	if (sva == 0)
    704  1.18        pk 		return (ENOMEM);
    705  1.18        pk 
    706  1.18        pk 	/* Compute start of aligned region */
    707  1.18        pk 	va = sva;
    708  1.18        pk 	va += ((segs[0].ds_addr & (align - 1)) + align - va) & (align - 1);
    709  1.18        pk 
    710  1.18        pk 	/* Return excess virtual addresses */
    711  1.18        pk 	if (va != sva)
    712  1.18        pk 		(void)uvm_unmap(kernel_map, sva, va, 0);
    713  1.18        pk 	if (va + size != sva + oversize)
    714  1.18        pk 		(void)uvm_unmap(kernel_map, va + size, sva + oversize, 0);
    715  1.18        pk 
    716  1.18        pk 
    717  1.18        pk 	*kvap = (caddr_t)va;
    718  1.18        pk 	mlist = segs[0]._ds_mlist;
    719  1.18        pk 
    720  1.18        pk 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
    721  1.18        pk 
    722  1.18        pk 		if (size == 0)
    723  1.18        pk 			panic("iommu_dmamem_map: size botch");
    724  1.18        pk 
    725  1.18        pk 		addr = VM_PAGE_TO_PHYS(m);
    726  1.18        pk 		pmap_enter(pmap_kernel(), va, addr | cbit,
    727  1.18        pk 			   VM_PROT_READ | VM_PROT_WRITE, TRUE);
    728  1.18        pk #if 0
    729  1.18        pk 			if (flags & BUS_DMA_COHERENT)
    730  1.18        pk 				/* XXX */;
    731  1.18        pk #endif
    732  1.18        pk 		va += PAGE_SIZE;
    733  1.18        pk 		size -= PAGE_SIZE;
    734  1.18        pk 	}
    735  1.18        pk 
    736  1.18        pk 	return (0);
    737  1.18        pk }
    738  1.18        pk 
    739  1.18        pk /*
    740  1.18        pk  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
    741  1.18        pk  * bus-specific DMA mmap(2)'ing functions.
    742  1.18        pk  */
    743  1.18        pk int
    744  1.18        pk iommu_dmamem_mmap(t, segs, nsegs, off, prot, flags)
    745  1.18        pk 	bus_dma_tag_t t;
    746  1.18        pk 	bus_dma_segment_t *segs;
    747  1.18        pk 	int nsegs, off, prot, flags;
    748  1.18        pk {
    749  1.18        pk 
    750  1.18        pk 	panic("_bus_dmamem_mmap: not implemented");
    751  1.18        pk }
    752