Home | History | Annotate | Line # | Download | only in sparc
iommu.c revision 1.30
      1  1.30       mrg /*	$NetBSD: iommu.c,v 1.30 1999/03/24 05:51:11 mrg Exp $ */
      2   1.1        pk 
      3   1.1        pk /*
      4   1.1        pk  * Copyright (c) 1996
      5   1.3    abrown  * 	The President and Fellows of Harvard College. All rights reserved.
      6   1.1        pk  * Copyright (c) 1995 	Paul Kranenburg
      7   1.1        pk  *
      8   1.1        pk  * Redistribution and use in source and binary forms, with or without
      9   1.1        pk  * modification, are permitted provided that the following conditions
     10   1.1        pk  * are met:
     11   1.1        pk  * 1. Redistributions of source code must retain the above copyright
     12   1.1        pk  *    notice, this list of conditions and the following disclaimer.
     13   1.1        pk  * 2. Redistributions in binary form must reproduce the above copyright
     14   1.1        pk  *    notice, this list of conditions and the following disclaimer in the
     15   1.1        pk  *    documentation and/or other materials provided with the distribution.
     16   1.1        pk  * 3. All advertising materials mentioning features or use of this software
     17   1.1        pk  *    must display the following acknowledgement:
     18   1.1        pk  *	This product includes software developed by Aaron Brown and
     19   1.1        pk  *	Harvard University.
     20   1.1        pk  *	This product includes software developed by Paul Kranenburg.
     21   1.1        pk  * 4. Neither the name of the University nor the names of its contributors
     22   1.1        pk  *    may be used to endorse or promote products derived from this software
     23   1.1        pk  *    without specific prior written permission.
     24   1.1        pk  *
     25   1.1        pk  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26   1.1        pk  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27   1.1        pk  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28   1.1        pk  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29   1.1        pk  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30   1.1        pk  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31   1.1        pk  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32   1.1        pk  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33   1.1        pk  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34   1.1        pk  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35   1.1        pk  * SUCH DAMAGE.
     36   1.1        pk  *
     37   1.1        pk  */
     38   1.1        pk 
     39   1.1        pk #include <sys/param.h>
     40  1.18        pk #include <sys/extent.h>
     41  1.18        pk #include <sys/malloc.h>
     42  1.18        pk #include <sys/queue.h>
     43   1.1        pk #include <sys/systm.h>
     44   1.1        pk #include <sys/device.h>
     45   1.1        pk #include <vm/vm.h>
     46  1.18        pk #include <vm/vm_kern.h>
     47  1.25        pk 
     48  1.30       mrg #include <uvm/uvm_extern.h>
     49   1.1        pk 
     50  1.18        pk #define _SPARC_BUS_DMA_PRIVATE
     51  1.18        pk #include <machine/bus.h>
     52   1.1        pk #include <machine/autoconf.h>
     53   1.1        pk #include <machine/ctlreg.h>
     54   1.1        pk #include <sparc/sparc/asm.h>
     55   1.1        pk #include <sparc/sparc/vaddrs.h>
     56   1.9        pk #include <sparc/sparc/cpuvar.h>
     57   1.1        pk #include <sparc/sparc/iommureg.h>
     58  1.16        pk #include <sparc/sparc/iommuvar.h>
     59   1.1        pk 
     60   1.1        pk struct iommu_softc {
     61   1.1        pk 	struct device	sc_dev;		/* base device */
     62   1.1        pk 	struct iommureg	*sc_reg;
     63   1.1        pk 	u_int		sc_pagesize;
     64   1.1        pk 	u_int		sc_range;
     65  1.21        pk 	bus_addr_t	sc_dvmabase;
     66   1.1        pk 	iopte_t		*sc_ptes;
     67   1.1        pk 	int		sc_hasiocache;
     68   1.1        pk };
     69   1.1        pk struct	iommu_softc *iommu_sc;/*XXX*/
     70   1.1        pk int	has_iocache;
     71  1.19        pk u_long	dvma_cachealign;
     72   1.1        pk 
     73  1.18        pk struct extent *iommu_dvmamap;
     74  1.18        pk 
     75   1.1        pk 
     76   1.1        pk /* autoconfiguration driver */
     77   1.5       cgd int	iommu_print __P((void *, const char *));
     78   1.1        pk void	iommu_attach __P((struct device *, struct device *, void *));
     79   1.8        pk int	iommu_match __P((struct device *, struct cfdata *, void *));
     80   1.1        pk 
     81   1.1        pk struct cfattach iommu_ca = {
     82   1.1        pk 	sizeof(struct iommu_softc), iommu_match, iommu_attach
     83   1.1        pk };
     84   1.1        pk 
     85  1.18        pk /* IOMMU DMA map functions */
     86  1.18        pk int	iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
     87  1.18        pk 	    bus_size_t, struct proc *, int));
     88  1.18        pk int	iommu_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
     89  1.18        pk 	    struct mbuf *, int));
     90  1.18        pk int	iommu_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
     91  1.18        pk 	    struct uio *, int));
     92  1.18        pk int	iommu_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
     93  1.18        pk 	    bus_dma_segment_t *, int, bus_size_t, int));
     94  1.18        pk void	iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
     95  1.18        pk void	iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
     96  1.18        pk 	    bus_size_t, int));
     97  1.18        pk 
     98  1.18        pk int	iommu_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
     99  1.18        pk 	    bus_size_t alignment, bus_size_t boundary,
    100  1.18        pk 	    bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
    101  1.18        pk void	iommu_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
    102  1.18        pk 	    int nsegs));
    103  1.18        pk int	iommu_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
    104  1.18        pk 	    int nsegs, size_t size, caddr_t *kvap, int flags));
    105  1.18        pk int	iommu_dmamem_mmap __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
    106  1.18        pk 	    int nsegs, int off, int prot, int flags));
    107  1.18        pk 
    108  1.18        pk 
    109  1.18        pk struct sparc_bus_dma_tag iommu_dma_tag = {
    110  1.18        pk 	NULL,
    111  1.18        pk 	_bus_dmamap_create,
    112  1.18        pk 	_bus_dmamap_destroy,
    113  1.18        pk 	iommu_dmamap_load,
    114  1.18        pk 	iommu_dmamap_load_mbuf,
    115  1.18        pk 	iommu_dmamap_load_uio,
    116  1.18        pk 	iommu_dmamap_load_raw,
    117  1.18        pk 	iommu_dmamap_unload,
    118  1.18        pk 	iommu_dmamap_sync,
    119  1.18        pk 
    120  1.18        pk 	iommu_dmamem_alloc,
    121  1.18        pk 	iommu_dmamem_free,
    122  1.18        pk 	iommu_dmamem_map,
    123  1.18        pk 	_bus_dmamem_unmap,
    124  1.18        pk 	iommu_dmamem_mmap
    125  1.18        pk };
    126   1.1        pk /*
    127   1.1        pk  * Print the location of some iommu-attached device (called just
    128   1.1        pk  * before attaching that device).  If `iommu' is not NULL, the
    129   1.1        pk  * device was found but not configured; print the iommu as well.
    130   1.1        pk  * Return UNCONF (config_find ignores this if the device was configured).
    131   1.1        pk  */
    132   1.1        pk int
    133   1.1        pk iommu_print(args, iommu)
    134   1.1        pk 	void *args;
    135   1.5       cgd 	const char *iommu;
    136   1.1        pk {
    137  1.16        pk 	struct iommu_attach_args *ia = args;
    138   1.1        pk 
    139   1.1        pk 	if (iommu)
    140  1.16        pk 		printf("%s at %s", ia->iom_name, iommu);
    141   1.1        pk 	return (UNCONF);
    142   1.1        pk }
    143   1.1        pk 
    144   1.1        pk int
    145   1.8        pk iommu_match(parent, cf, aux)
    146   1.1        pk 	struct device *parent;
    147   1.8        pk 	struct cfdata *cf;
    148   1.8        pk 	void *aux;
    149   1.1        pk {
    150  1.16        pk 	struct mainbus_attach_args *ma = aux;
    151   1.1        pk 
    152   1.1        pk 	if (CPU_ISSUN4OR4C)
    153   1.1        pk 		return (0);
    154  1.16        pk 	return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
    155   1.1        pk }
    156   1.1        pk 
    157   1.1        pk /*
    158   1.1        pk  * Attach the iommu.
    159   1.1        pk  */
    160   1.1        pk void
    161   1.1        pk iommu_attach(parent, self, aux)
    162   1.1        pk 	struct device *parent;
    163   1.1        pk 	struct device *self;
    164   1.1        pk 	void *aux;
    165   1.1        pk {
    166   1.4        pk #if defined(SUN4M)
    167  1.21        pk 	struct iommu_softc *sc = (struct iommu_softc *)self;
    168  1.16        pk 	struct mainbus_attach_args *ma = aux;
    169  1.21        pk 	int node;
    170  1.16        pk 	struct bootpath *bp;
    171  1.16        pk 	bus_space_handle_t bh;
    172  1.21        pk 	u_int pbase, pa;
    173  1.21        pk 	int i, mmupcrsave, s;
    174  1.21        pk 	iopte_t *tpte_p;
    175   1.1        pk 	extern u_int *kernel_iopte_table;
    176   1.1        pk 	extern u_int kernel_iopte_table_pa;
    177   1.1        pk 
    178  1.10        pk /*XXX-GCC!*/mmupcrsave=0;
    179   1.1        pk 	iommu_sc = sc;
    180   1.1        pk 	/*
    181   1.1        pk 	 * XXX there is only one iommu, for now -- do not know how to
    182   1.1        pk 	 * address children on others
    183   1.1        pk 	 */
    184   1.1        pk 	if (sc->sc_dev.dv_unit > 0) {
    185   1.7  christos 		printf(" unsupported\n");
    186   1.1        pk 		return;
    187   1.1        pk 	}
    188  1.16        pk 	node = ma->ma_node;
    189   1.1        pk 
    190   1.1        pk #if 0
    191   1.1        pk 	if (ra->ra_vaddr)
    192   1.1        pk 		sc->sc_reg = (struct iommureg *)ca->ca_ra.ra_vaddr;
    193   1.1        pk #else
    194   1.1        pk 	/*
    195   1.1        pk 	 * Map registers into our space. The PROM may have done this
    196   1.1        pk 	 * already, but I feel better if we have our own copy. Plus, the
    197   1.1        pk 	 * prom doesn't map the entire register set
    198   1.1        pk 	 *
    199   1.1        pk 	 * XXX struct iommureg is bigger than ra->ra_len; what are the
    200   1.1        pk 	 *     other fields for?
    201   1.1        pk 	 */
    202  1.17        pk 	if (bus_space_map2(
    203  1.16        pk 			ma->ma_bustag,
    204  1.16        pk 			ma->ma_iospace,
    205  1.17        pk 			ma->ma_paddr,
    206  1.16        pk 			sizeof(struct iommureg),
    207  1.16        pk 			0,
    208  1.16        pk 			0,
    209  1.16        pk 			&bh) != 0) {
    210  1.16        pk 		printf("iommu_attach: cannot map registers\n");
    211  1.16        pk 		return;
    212  1.16        pk 	}
    213  1.16        pk 	sc->sc_reg = (struct iommureg *)bh;
    214   1.1        pk #endif
    215   1.1        pk 
    216   1.1        pk 	sc->sc_hasiocache = node_has_property(node, "cache-coherence?");
    217   1.9        pk 	if (CACHEINFO.c_enabled == 0) /* XXX - is this correct? */
    218   1.9        pk 		sc->sc_hasiocache = 0;
    219   1.1        pk 	has_iocache = sc->sc_hasiocache; /* Set global flag */
    220   1.1        pk 
    221   1.1        pk 	sc->sc_pagesize = getpropint(node, "page-size", NBPG),
    222   1.1        pk 	sc->sc_range = (1 << 24) <<
    223   1.1        pk 	    ((sc->sc_reg->io_cr & IOMMU_CTL_RANGE) >> IOMMU_CTL_RANGESHFT);
    224   1.1        pk #if 0
    225   1.1        pk 	sc->sc_dvmabase = (0 - sc->sc_range);
    226   1.1        pk #endif
    227   1.1        pk 	pbase = (sc->sc_reg->io_bar & IOMMU_BAR_IBA) <<
    228   1.1        pk 			(14 - IOMMU_BAR_IBASHFT);
    229   1.1        pk 
    230   1.1        pk 	/*
    231   1.1        pk 	 * Now we build our own copy of the IOMMU page tables. We need to
    232   1.1        pk 	 * do this since we're going to change the range to give us 64M of
    233   1.1        pk 	 * mappings, and thus we can move DVMA space down to 0xfd000000 to
    234   1.1        pk 	 * give us lots of space and to avoid bumping into the PROM, etc.
    235   1.1        pk 	 *
    236   1.1        pk 	 * XXX Note that this is rather messy.
    237   1.1        pk 	 */
    238   1.1        pk 	sc->sc_ptes = (iopte_t *) kernel_iopte_table;
    239   1.1        pk 
    240   1.1        pk 	/*
    241   1.1        pk 	 * Now discache the page tables so that the IOMMU sees our
    242   1.1        pk 	 * changes.
    243   1.1        pk 	 */
    244   1.1        pk 	kvm_uncache((caddr_t)sc->sc_ptes,
    245  1.22        pk 	    (((0 - IOMMU_DVMA_BASE)/sc->sc_pagesize) * sizeof(iopte_t)) / NBPG);
    246   1.1        pk 
    247   1.1        pk 	/*
    248   1.1        pk 	 * Ok. We've got to read in the original table using MMU bypass,
    249   1.1        pk 	 * and copy all of its entries to the appropriate place in our
    250   1.1        pk 	 * new table, even if the sizes are different.
    251   1.1        pk 	 * This is pretty easy since we know DVMA ends at 0xffffffff.
    252   1.1        pk 	 *
    253   1.1        pk 	 * XXX: PGOFSET, NBPG assume same page size as SRMMU
    254   1.1        pk 	 */
    255  1.14        pk 	if (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc) {
    256  1.10        pk 		/* set MMU AC bit */
    257  1.10        pk 		sta(SRMMU_PCR, ASI_SRMMU,
    258  1.10        pk 		    ((mmupcrsave = lda(SRMMU_PCR, ASI_SRMMU)) | VIKING_PCR_AC));
    259   1.1        pk 	}
    260   1.1        pk 
    261  1.22        pk 	for (tpte_p = &sc->sc_ptes[((0 - IOMMU_DVMA_BASE)/NBPG) - 1],
    262   1.1        pk 	     pa = (u_int)pbase - sizeof(iopte_t) +
    263   1.1        pk 		   ((u_int)sc->sc_range/NBPG)*sizeof(iopte_t);
    264   1.1        pk 	     tpte_p >= &sc->sc_ptes[0] && pa >= (u_int)pbase;
    265   1.1        pk 	     tpte_p--, pa -= sizeof(iopte_t)) {
    266   1.1        pk 
    267   1.1        pk 		IOMMU_FLUSHPAGE(sc,
    268  1.22        pk 			     (tpte_p - &sc->sc_ptes[0])*NBPG + IOMMU_DVMA_BASE);
    269   1.1        pk 		*tpte_p = lda(pa, ASI_BYPASS);
    270   1.1        pk 	}
    271  1.14        pk 	if (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc) {
    272  1.10        pk 		/* restore mmu after bug-avoidance */
    273  1.10        pk 		sta(SRMMU_PCR, ASI_SRMMU, mmupcrsave);
    274   1.1        pk 	}
    275   1.1        pk 
    276   1.1        pk 	/*
    277   1.1        pk 	 * Now we can install our new pagetable into the IOMMU
    278   1.1        pk 	 */
    279  1.22        pk 	sc->sc_range = 0 - IOMMU_DVMA_BASE;
    280  1.22        pk 	sc->sc_dvmabase = IOMMU_DVMA_BASE;
    281   1.1        pk 
    282   1.1        pk 	/* calculate log2(sc->sc_range/16MB) */
    283   1.1        pk 	i = ffs(sc->sc_range/(1 << 24)) - 1;
    284   1.1        pk 	if ((1 << i) != (sc->sc_range/(1 << 24)))
    285   1.1        pk 		panic("bad iommu range: %d\n",i);
    286   1.1        pk 
    287   1.1        pk 	s = splhigh();
    288   1.1        pk 	IOMMU_FLUSHALL(sc);
    289   1.1        pk 
    290   1.1        pk 	sc->sc_reg->io_cr = (sc->sc_reg->io_cr & ~IOMMU_CTL_RANGE) |
    291   1.1        pk 			  (i << IOMMU_CTL_RANGESHFT) | IOMMU_CTL_ME;
    292   1.1        pk 	sc->sc_reg->io_bar = (kernel_iopte_table_pa >> 4) & IOMMU_BAR_IBA;
    293   1.1        pk 
    294   1.1        pk 	IOMMU_FLUSHALL(sc);
    295   1.1        pk 	splx(s);
    296   1.1        pk 
    297  1.13      fair 	printf(": version 0x%x/0x%x, page-size %d, range %dMB\n",
    298   1.1        pk 		(sc->sc_reg->io_cr & IOMMU_CTL_VER) >> 24,
    299   1.1        pk 		(sc->sc_reg->io_cr & IOMMU_CTL_IMPL) >> 28,
    300   1.1        pk 		sc->sc_pagesize,
    301   1.1        pk 		sc->sc_range >> 20);
    302   1.1        pk 
    303   1.1        pk 	/* Propagate bootpath */
    304  1.16        pk 	if (ma->ma_bp != NULL && strcmp(ma->ma_bp->name, "iommu") == 0)
    305  1.16        pk 		bp = ma->ma_bp + 1;
    306   1.1        pk 	else
    307  1.16        pk 		bp = NULL;
    308  1.18        pk 
    309  1.22        pk 	iommu_dvmamap = extent_create("iommudvma",
    310  1.22        pk 					IOMMU_DVMA_BASE, IOMMU_DVMA_END,
    311  1.18        pk 					M_DEVBUF, 0, 0, EX_NOWAIT);
    312  1.22        pk 	if (iommu_dvmamap == NULL)
    313  1.22        pk 		panic("iommu: unable to allocate DVMA map");
    314   1.1        pk 
    315   1.1        pk 	/*
    316   1.1        pk 	 * Loop through ROM children (expect Sbus among them).
    317   1.1        pk 	 */
    318   1.1        pk 	for (node = firstchild(node); node; node = nextsibling(node)) {
    319  1.16        pk 		struct iommu_attach_args ia;
    320  1.16        pk 
    321  1.16        pk 		bzero(&ia, sizeof ia);
    322  1.16        pk 		ia.iom_name = getpropstring(node, "name");
    323  1.16        pk 
    324  1.16        pk 		/* Propagate BUS & DMA tags */
    325  1.16        pk 		ia.iom_bustag = ma->ma_bustag;
    326  1.18        pk 		ia.iom_dmatag = &iommu_dma_tag;
    327  1.27        pk 
    328  1.16        pk 		ia.iom_node = node;
    329  1.16        pk 		ia.iom_bp = bp;
    330  1.27        pk 
    331  1.27        pk 		ia.iom_reg = NULL;
    332  1.27        pk 		getprop(node, "reg", sizeof(struct sbus_reg),
    333  1.27        pk 			&ia.iom_nreg, (void **)&ia.iom_reg);
    334  1.27        pk 
    335  1.16        pk 		(void) config_found(&sc->sc_dev, (void *)&ia, iommu_print);
    336  1.27        pk 		if (ia.iom_reg != NULL)
    337  1.27        pk 			free(ia.iom_reg, M_DEVBUF);
    338   1.1        pk 	}
    339   1.4        pk #endif
    340   1.1        pk }
    341   1.1        pk 
    342   1.1        pk void
    343   1.1        pk iommu_enter(va, pa)
    344  1.21        pk 	bus_addr_t va;
    345  1.21        pk 	paddr_t pa;
    346   1.1        pk {
    347   1.1        pk 	struct iommu_softc *sc = iommu_sc;
    348   1.1        pk 	int pte;
    349   1.1        pk 
    350   1.1        pk #ifdef DEBUG
    351   1.1        pk 	if (va < sc->sc_dvmabase)
    352  1.21        pk 		panic("iommu_enter: va 0x%lx not in DVMA space", (long)va);
    353   1.1        pk #endif
    354   1.1        pk 
    355   1.1        pk 	pte = atop(pa) << IOPTE_PPNSHFT;
    356   1.1        pk 	pte &= IOPTE_PPN;
    357   1.2    abrown 	pte |= IOPTE_V | IOPTE_W | (has_iocache ? IOPTE_C : 0);
    358   1.1        pk 	sc->sc_ptes[atop(va - sc->sc_dvmabase)] = pte;
    359   1.1        pk 	IOMMU_FLUSHPAGE(sc, va);
    360   1.1        pk }
    361   1.1        pk 
    362   1.1        pk /*
    363   1.1        pk  * iommu_clear: clears mappings created by iommu_enter
    364   1.1        pk  */
    365   1.1        pk void
    366   1.1        pk iommu_remove(va, len)
    367  1.21        pk 	bus_addr_t va;
    368  1.21        pk 	bus_size_t len;
    369   1.1        pk {
    370  1.21        pk 	struct iommu_softc *sc = iommu_sc;
    371  1.21        pk 	u_int pagesz = sc->sc_pagesize;
    372  1.21        pk 	bus_addr_t base = sc->sc_dvmabase;
    373   1.1        pk 
    374   1.1        pk #ifdef DEBUG
    375  1.21        pk 	if (va < base)
    376  1.21        pk 		panic("iommu_enter: va 0x%lx not in DVMA space", (long)va);
    377   1.1        pk #endif
    378   1.1        pk 
    379  1.21        pk 	while ((long)len > 0) {
    380   1.1        pk #ifdef notyet
    381   1.1        pk #ifdef DEBUG
    382  1.21        pk 		if ((sc->sc_ptes[atop(va - base)] & IOPTE_V) == 0)
    383  1.21        pk 			panic("iommu_clear: clearing invalid pte at va 0x%lx",
    384  1.21        pk 			      (long)va);
    385   1.1        pk #endif
    386   1.1        pk #endif
    387  1.21        pk 		sc->sc_ptes[atop(va - base)] = 0;
    388   1.1        pk 		IOMMU_FLUSHPAGE(sc, va);
    389  1.21        pk 		len -= pagesz;
    390  1.21        pk 		va += pagesz;
    391   1.1        pk 	}
    392   1.1        pk }
    393   1.1        pk 
    394   1.1        pk #if 0	/* These registers aren't there??? */
    395   1.1        pk void
    396   1.1        pk iommu_error()
    397   1.1        pk {
    398   1.1        pk 	struct iommu_softc *sc = X;
    399   1.1        pk 	struct iommureg *iop = sc->sc_reg;
    400   1.1        pk 
    401  1.13      fair 	printf("iommu: afsr 0x%x, afar 0x%x\n", iop->io_afsr, iop->io_afar);
    402  1.13      fair 	printf("iommu: mfsr 0x%x, mfar 0x%x\n", iop->io_mfsr, iop->io_mfar);
    403   1.1        pk }
    404   1.1        pk int
    405   1.1        pk iommu_alloc(va, len)
    406   1.1        pk 	u_int va, len;
    407   1.1        pk {
    408   1.1        pk 	struct iommu_softc *sc = X;
    409   1.1        pk 	int off, tva, pa, iovaddr, pte;
    410   1.1        pk 
    411   1.1        pk 	off = (int)va & PGOFSET;
    412   1.1        pk 	len = round_page(len + off);
    413   1.1        pk 	va -= off;
    414   1.1        pk 
    415   1.1        pk if ((int)sc->sc_dvmacur + len > 0)
    416   1.1        pk 	sc->sc_dvmacur = sc->sc_dvmabase;
    417   1.1        pk 
    418   1.1        pk 	iovaddr = tva = sc->sc_dvmacur;
    419   1.1        pk 	sc->sc_dvmacur += len;
    420   1.1        pk 	while (len) {
    421   1.1        pk 		pa = pmap_extract(pmap_kernel(), va);
    422   1.1        pk 
    423   1.1        pk #define IOMMU_PPNSHIFT	8
    424   1.1        pk #define IOMMU_V		0x00000002
    425   1.1        pk #define IOMMU_W		0x00000004
    426   1.1        pk 
    427   1.1        pk 		pte = atop(pa) << IOMMU_PPNSHIFT;
    428   1.1        pk 		pte |= IOMMU_V | IOMMU_W;
    429   1.1        pk 		sta(sc->sc_ptes + atop(tva - sc->sc_dvmabase), ASI_BYPASS, pte);
    430   1.1        pk 		sc->sc_reg->io_flushpage = tva;
    431   1.1        pk 		len -= NBPG;
    432   1.1        pk 		va += NBPG;
    433   1.1        pk 		tva += NBPG;
    434   1.1        pk 	}
    435   1.1        pk 	return iovaddr + off;
    436   1.1        pk }
    437   1.1        pk #endif
    438  1.18        pk 
    439  1.18        pk 
    440  1.18        pk /*
    441  1.18        pk  * IOMMU DMA map functions.
    442  1.18        pk  */
    443  1.18        pk int
    444  1.18        pk iommu_dmamap_load(t, map, buf, buflen, p, flags)
    445  1.18        pk 	bus_dma_tag_t t;
    446  1.18        pk 	bus_dmamap_t map;
    447  1.18        pk 	void *buf;
    448  1.18        pk 	bus_size_t buflen;
    449  1.18        pk 	struct proc *p;
    450  1.18        pk 	int flags;
    451  1.18        pk {
    452  1.26        pk 	bus_size_t sgsize;
    453  1.26        pk 	bus_addr_t dva;
    454  1.24        pk 	bus_addr_t boundary;
    455  1.24        pk 	vaddr_t va = (vaddr_t)buf;
    456  1.24        pk 	u_long align, voff;
    457  1.18        pk 	pmap_t pmap;
    458  1.18        pk 
    459  1.18        pk 	/*
    460  1.24        pk 	 * Remember page offset, then truncate the buffer address to
    461  1.24        pk 	 * a page boundary.
    462  1.24        pk 	 */
    463  1.24        pk 	voff = va & PGOFSET;
    464  1.24        pk 	va &= ~PGOFSET;
    465  1.24        pk 
    466  1.24        pk 	/*
    467  1.18        pk 	 * Make sure that on error condition we return "no valid mappings".
    468  1.18        pk 	 */
    469  1.18        pk 	map->dm_nsegs = 0;
    470  1.18        pk 
    471  1.18        pk 	if (buflen > map->_dm_size)
    472  1.18        pk 		return (EINVAL);
    473  1.18        pk 
    474  1.24        pk 	sgsize = (buflen + voff + PGOFSET) & ~PGOFSET;
    475  1.24        pk 	align = dvma_cachealign ? dvma_cachealign : NBPG;
    476  1.24        pk 	boundary = map->_dm_boundary;
    477  1.18        pk 
    478  1.26        pk 	if (extent_alloc1(iommu_dvmamap, sgsize, align, va & (align-1),
    479  1.26        pk 			  boundary, EX_NOWAIT, (u_long *)&dva) != 0)
    480  1.18        pk 		return (ENOMEM);
    481  1.18        pk 
    482  1.18        pk 	cpuinfo.cache_flush(buf, buflen);
    483  1.18        pk 
    484  1.18        pk 	/*
    485  1.18        pk 	 * We always use just one segment.
    486  1.18        pk 	 */
    487  1.18        pk 	map->dm_mapsize = buflen;
    488  1.18        pk 	map->dm_nsegs = 1;
    489  1.24        pk 	map->dm_segs[0].ds_addr = dva + voff;
    490  1.26        pk 	map->dm_segs[0].ds_len = buflen;
    491  1.18        pk 
    492  1.18        pk 	if (p != NULL)
    493  1.18        pk 		pmap = p->p_vmspace->vm_map.pmap;
    494  1.18        pk 	else
    495  1.18        pk 		pmap = pmap_kernel();
    496  1.18        pk 
    497  1.24        pk 	for (; sgsize != 0; ) {
    498  1.18        pk 		/*
    499  1.18        pk 		 * Get the physical address for this page.
    500  1.18        pk 		 */
    501  1.24        pk 		paddr_t pa = pmap_extract(pmap, va);
    502  1.18        pk 
    503  1.24        pk 		iommu_enter(dva, pa);
    504  1.24        pk 
    505  1.24        pk 		dva += NBPG;
    506  1.24        pk 		va += NBPG;
    507  1.24        pk 		sgsize -= NBPG;
    508  1.18        pk 	}
    509  1.24        pk 
    510  1.18        pk 	return (0);
    511  1.18        pk }
    512  1.18        pk 
    513  1.18        pk /*
    514  1.18        pk  * Like _bus_dmamap_load(), but for mbufs.
    515  1.18        pk  */
    516  1.18        pk int
    517  1.18        pk iommu_dmamap_load_mbuf(t, map, m, flags)
    518  1.18        pk 	bus_dma_tag_t t;
    519  1.18        pk 	bus_dmamap_t map;
    520  1.18        pk 	struct mbuf *m;
    521  1.18        pk 	int flags;
    522  1.18        pk {
    523  1.18        pk 
    524  1.18        pk 	panic("_bus_dmamap_load: not implemented");
    525  1.18        pk }
    526  1.18        pk 
    527  1.18        pk /*
    528  1.18        pk  * Like _bus_dmamap_load(), but for uios.
    529  1.18        pk  */
    530  1.18        pk int
    531  1.18        pk iommu_dmamap_load_uio(t, map, uio, flags)
    532  1.18        pk 	bus_dma_tag_t t;
    533  1.18        pk 	bus_dmamap_t map;
    534  1.18        pk 	struct uio *uio;
    535  1.18        pk 	int flags;
    536  1.18        pk {
    537  1.18        pk 
    538  1.18        pk 	panic("_bus_dmamap_load_uio: not implemented");
    539  1.18        pk }
    540  1.18        pk 
    541  1.18        pk /*
    542  1.18        pk  * Like _bus_dmamap_load(), but for raw memory allocated with
    543  1.18        pk  * bus_dmamem_alloc().
    544  1.18        pk  */
    545  1.18        pk int
    546  1.18        pk iommu_dmamap_load_raw(t, map, segs, nsegs, size, flags)
    547  1.18        pk 	bus_dma_tag_t t;
    548  1.18        pk 	bus_dmamap_t map;
    549  1.18        pk 	bus_dma_segment_t *segs;
    550  1.18        pk 	int nsegs;
    551  1.18        pk 	bus_size_t size;
    552  1.18        pk 	int flags;
    553  1.18        pk {
    554  1.18        pk 
    555  1.18        pk 	panic("_bus_dmamap_load_raw: not implemented");
    556  1.18        pk }
    557  1.18        pk 
    558  1.18        pk /*
    559  1.18        pk  * Common function for unloading a DMA map.  May be called by
    560  1.18        pk  * bus-specific DMA map unload functions.
    561  1.18        pk  */
    562  1.18        pk void
    563  1.18        pk iommu_dmamap_unload(t, map)
    564  1.18        pk 	bus_dma_tag_t t;
    565  1.18        pk 	bus_dmamap_t map;
    566  1.18        pk {
    567  1.18        pk 	bus_addr_t addr;
    568  1.18        pk 	bus_size_t len;
    569  1.18        pk 
    570  1.18        pk 	if (map->dm_nsegs != 1)
    571  1.18        pk 		panic("_bus_dmamap_unload: nsegs = %d", map->dm_nsegs);
    572  1.18        pk 
    573  1.29  christos 	addr = map->dm_segs[0].ds_addr;
    574  1.18        pk 	len = map->dm_segs[0].ds_len;
    575  1.26        pk 	len = ((addr & PGOFSET) + len + PGOFSET) & ~PGOFSET;
    576  1.26        pk 	addr &= ~PGOFSET;
    577  1.18        pk 
    578  1.18        pk 	iommu_remove(addr, len);
    579  1.18        pk 	if (extent_free(iommu_dvmamap, addr, len, EX_NOWAIT) != 0)
    580  1.21        pk 		printf("warning: %ld of DVMA space lost\n", (long)len);
    581  1.18        pk 
    582  1.18        pk 	/* Mark the mappings as invalid. */
    583  1.18        pk 	map->dm_mapsize = 0;
    584  1.18        pk 	map->dm_nsegs = 0;
    585  1.18        pk }
    586  1.18        pk 
    587  1.18        pk /*
    588  1.18        pk  * Common function for DMA map synchronization.  May be called
    589  1.18        pk  * by bus-specific DMA map synchronization functions.
    590  1.18        pk  */
    591  1.18        pk void
    592  1.18        pk iommu_dmamap_sync(t, map, offset, len, ops)
    593  1.18        pk 	bus_dma_tag_t t;
    594  1.18        pk 	bus_dmamap_t map;
    595  1.18        pk 	bus_addr_t offset;
    596  1.18        pk 	bus_size_t len;
    597  1.18        pk 	int ops;
    598  1.18        pk {
    599  1.18        pk 
    600  1.18        pk 	/*
    601  1.18        pk 	 * XXX Should flush CPU write buffers.
    602  1.18        pk 	 */
    603  1.18        pk }
    604  1.18        pk 
    605  1.18        pk /*
    606  1.18        pk  * Common function for DMA-safe memory allocation.  May be called
    607  1.18        pk  * by bus-specific DMA memory allocation functions.
    608  1.18        pk  */
    609  1.18        pk int
    610  1.18        pk iommu_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
    611  1.18        pk 	bus_dma_tag_t t;
    612  1.18        pk 	bus_size_t size, alignment, boundary;
    613  1.18        pk 	bus_dma_segment_t *segs;
    614  1.18        pk 	int nsegs;
    615  1.18        pk 	int *rsegs;
    616  1.18        pk 	int flags;
    617  1.18        pk {
    618  1.21        pk 	paddr_t pa;
    619  1.24        pk 	bus_addr_t dva;
    620  1.18        pk 	vm_page_t m;
    621  1.18        pk 	int error;
    622  1.18        pk 	struct pglist *mlist;
    623  1.18        pk 
    624  1.18        pk 	size = round_page(size);
    625  1.18        pk 	error = _bus_dmamem_alloc_common(t, size, alignment, boundary,
    626  1.18        pk 					 segs, nsegs, rsegs, flags);
    627  1.18        pk 	if (error != 0)
    628  1.18        pk 		return (error);
    629  1.18        pk 
    630  1.23        pk 	if (extent_alloc(iommu_dvmamap, size, alignment, boundary,
    631  1.20        pk 			 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
    632  1.24        pk 			 (u_long *)&dva) != 0)
    633  1.18        pk 		return (ENOMEM);
    634  1.18        pk 
    635  1.18        pk 	/*
    636  1.18        pk 	 * Compute the location, size, and number of segments actually
    637  1.18        pk 	 * returned by the VM code.
    638  1.18        pk 	 */
    639  1.24        pk 	segs[0].ds_addr = dva;
    640  1.18        pk 	segs[0].ds_len = size;
    641  1.18        pk 	*rsegs = 1;
    642  1.18        pk 
    643  1.18        pk 	mlist = segs[0]._ds_mlist;
    644  1.18        pk 	/* Map memory into DVMA space */
    645  1.18        pk 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
    646  1.21        pk 		pa = VM_PAGE_TO_PHYS(m);
    647  1.18        pk 
    648  1.24        pk 		iommu_enter(dva, pa);
    649  1.24        pk 		dva += PAGE_SIZE;
    650  1.18        pk 	}
    651  1.18        pk 
    652  1.18        pk 	return (0);
    653  1.18        pk }
    654  1.18        pk 
    655  1.18        pk /*
    656  1.18        pk  * Common function for freeing DMA-safe memory.  May be called by
    657  1.18        pk  * bus-specific DMA memory free functions.
    658  1.18        pk  */
    659  1.18        pk void
    660  1.18        pk iommu_dmamem_free(t, segs, nsegs)
    661  1.18        pk 	bus_dma_tag_t t;
    662  1.18        pk 	bus_dma_segment_t *segs;
    663  1.18        pk 	int nsegs;
    664  1.18        pk {
    665  1.18        pk 	bus_addr_t addr;
    666  1.18        pk 	bus_size_t len;
    667  1.18        pk 
    668  1.18        pk 	if (nsegs != 1)
    669  1.18        pk 		panic("bus_dmamem_free: nsegs = %d", nsegs);
    670  1.18        pk 
    671  1.18        pk 	addr = segs[0].ds_addr;
    672  1.18        pk 	len = segs[0].ds_len;
    673  1.18        pk 
    674  1.18        pk 	iommu_remove(addr, len);
    675  1.18        pk 	if (extent_free(iommu_dvmamap, addr, len, EX_NOWAIT) != 0)
    676  1.21        pk 		printf("warning: %ld of DVMA space lost\n", (long)len);
    677  1.18        pk 	/*
    678  1.18        pk 	 * Return the list of pages back to the VM system.
    679  1.18        pk 	 */
    680  1.18        pk 	_bus_dmamem_free_common(t, segs, nsegs);
    681  1.18        pk }
    682  1.18        pk 
    683  1.18        pk /*
    684  1.18        pk  * Common function for mapping DMA-safe memory.  May be called by
    685  1.18        pk  * bus-specific DMA memory map functions.
    686  1.18        pk  */
    687  1.18        pk int
    688  1.18        pk iommu_dmamem_map(t, segs, nsegs, size, kvap, flags)
    689  1.18        pk 	bus_dma_tag_t t;
    690  1.18        pk 	bus_dma_segment_t *segs;
    691  1.18        pk 	int nsegs;
    692  1.18        pk 	size_t size;
    693  1.18        pk 	caddr_t *kvap;
    694  1.18        pk 	int flags;
    695  1.18        pk {
    696  1.18        pk 	vm_page_t m;
    697  1.21        pk 	vaddr_t va, sva;
    698  1.18        pk 	bus_addr_t addr;
    699  1.18        pk 	struct pglist *mlist;
    700  1.18        pk 	int cbit;
    701  1.18        pk 	size_t oversize;
    702  1.18        pk 	u_long align;
    703  1.18        pk 
    704  1.18        pk 	if (nsegs != 1)
    705  1.18        pk 		panic("iommu_dmamem_map: nsegs = %d", nsegs);
    706  1.18        pk 
    707  1.18        pk 	cbit = has_iocache ? 0 : PMAP_NC;
    708  1.18        pk 	align = dvma_cachealign ? dvma_cachealign : PAGE_SIZE;
    709  1.18        pk 
    710  1.18        pk 	size = round_page(size);
    711  1.18        pk 
    712  1.18        pk 	/*
    713  1.18        pk 	 * Find a region of kernel virtual addresses that can accomodate
    714  1.18        pk 	 * our aligment requirements.
    715  1.18        pk 	 */
    716  1.18        pk 	oversize = size + align - PAGE_SIZE;
    717  1.18        pk 	sva = uvm_km_valloc(kernel_map, oversize);
    718  1.18        pk 	if (sva == 0)
    719  1.18        pk 		return (ENOMEM);
    720  1.18        pk 
    721  1.18        pk 	/* Compute start of aligned region */
    722  1.18        pk 	va = sva;
    723  1.18        pk 	va += ((segs[0].ds_addr & (align - 1)) + align - va) & (align - 1);
    724  1.18        pk 
    725  1.18        pk 	/* Return excess virtual addresses */
    726  1.18        pk 	if (va != sva)
    727  1.28     chuck 		(void)uvm_unmap(kernel_map, sva, va);
    728  1.18        pk 	if (va + size != sva + oversize)
    729  1.28     chuck 		(void)uvm_unmap(kernel_map, va + size, sva + oversize);
    730  1.18        pk 
    731  1.18        pk 
    732  1.18        pk 	*kvap = (caddr_t)va;
    733  1.18        pk 	mlist = segs[0]._ds_mlist;
    734  1.18        pk 
    735  1.18        pk 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
    736  1.18        pk 
    737  1.18        pk 		if (size == 0)
    738  1.18        pk 			panic("iommu_dmamem_map: size botch");
    739  1.18        pk 
    740  1.18        pk 		addr = VM_PAGE_TO_PHYS(m);
    741  1.18        pk 		pmap_enter(pmap_kernel(), va, addr | cbit,
    742  1.18        pk 			   VM_PROT_READ | VM_PROT_WRITE, TRUE);
    743  1.18        pk #if 0
    744  1.18        pk 			if (flags & BUS_DMA_COHERENT)
    745  1.18        pk 				/* XXX */;
    746  1.18        pk #endif
    747  1.18        pk 		va += PAGE_SIZE;
    748  1.18        pk 		size -= PAGE_SIZE;
    749  1.18        pk 	}
    750  1.18        pk 
    751  1.18        pk 	return (0);
    752  1.18        pk }
    753  1.18        pk 
    754  1.18        pk /*
    755  1.18        pk  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
    756  1.18        pk  * bus-specific DMA mmap(2)'ing functions.
    757  1.18        pk  */
    758  1.18        pk int
    759  1.18        pk iommu_dmamem_mmap(t, segs, nsegs, off, prot, flags)
    760  1.18        pk 	bus_dma_tag_t t;
    761  1.18        pk 	bus_dma_segment_t *segs;
    762  1.18        pk 	int nsegs, off, prot, flags;
    763  1.18        pk {
    764  1.18        pk 
    765  1.18        pk 	panic("_bus_dmamem_mmap: not implemented");
    766  1.18        pk }
    767