Home | History | Annotate | Line # | Download | only in dev
      1 /*	$NetBSD: astro.c,v 1.6 2023/12/03 02:03:18 thorpej Exp $	*/
      2 
      3 /*	$OpenBSD: astro.c,v 1.8 2007/10/06 23:50:54 krw Exp $	*/
      4 
      5 /*
      6  * Copyright (c) 2007 Mark Kettenis
      7  *
      8  * Permission to use, copy, modify, and distribute this software for any
      9  * purpose with or without fee is hereby granted, provided that the above
     10  * copyright notice and this permission notice appear in all copies.
     11  *
     12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     19  */
     20 
     21 #include <sys/param.h>
     22 
     23 #include <sys/systm.h>
     24 #include <sys/device.h>
     25 #include <sys/vmem.h>
     26 #include <sys/kmem.h>
     27 #include <sys/reboot.h>
     28 #include <sys/tree.h>
     29 
     30 #include <uvm/uvm.h>
     31 
     32 #include <machine/iomod.h>
     33 #include <machine/autoconf.h>
     34 #include <machine/pdc.h>
     35 #include <machine/endian.h>
     36 
     37 #include <hppa/dev/cpudevs.h>
     38 #include <hppa/hppa/machdep.h>
     39 
     40 struct astro_regs {
     41 	uint32_t	rid;
     42 	uint32_t	pad0000;
     43 	uint32_t	ioc_ctrl;
     44 	uint32_t	pad0008;
     45 	uint8_t		resv1[0x0300 - 0x0010];
     46 	uint64_t	lmmio_direct0_base;
     47 	uint64_t	lmmio_direct0_mask;
     48 	uint64_t	lmmio_direct0_route;
     49 	uint64_t	lmmio_direct1_base;
     50 	uint64_t	lmmio_direct1_mask;
     51 	uint64_t	lmmio_direct1_route;
     52 	uint64_t	lmmio_direct2_base;
     53 	uint64_t	lmmio_direct2_mask;
     54 	uint64_t	lmmio_direct2_route;
     55 	uint64_t	lmmio_direct3_base;
     56 	uint64_t	lmmio_direct3_mask;
     57 	uint64_t	lmmio_direct3_route;
     58 	uint64_t	lmmio_dist_base;
     59 	uint64_t	lmmio_dist_mask;
     60 	uint64_t	lmmio_dist_route;
     61 	uint64_t	gmmio_dist_base;
     62 	uint64_t	gmmio_dist_mask;
     63 	uint64_t	gmmio_dist_route;
     64 	uint64_t	ios_dist_base;
     65 	uint64_t	ios_dist_mask;
     66 	uint64_t	ios_dist_route;
     67 	uint8_t		resv2[0x03c0 - 0x03a8];
     68 	uint64_t	ios_direct_base;
     69 	uint64_t	ios_direct_mask;
     70 	uint64_t	ios_direct_route;
     71 	uint8_t		resv3[0x22000 - 0x03d8];
     72 	uint64_t	func_id;
     73 	uint64_t	func_class;
     74 	uint8_t		resv4[0x22040 - 0x22010];
     75 	uint64_t	rope_config;
     76 	uint8_t		resv5[0x22050 - 0x22048];
     77 	uint64_t	rope_debug;
     78 	uint8_t		resv6[0x22200 - 0x22058];
     79 	uint64_t	rope0_control;
     80 	uint64_t	rope1_control;
     81 	uint64_t	rope2_control;
     82 	uint64_t	rope3_control;
     83 	uint64_t	rope4_control;
     84 	uint64_t	rope5_control;
     85 	uint64_t	rope6_control;
     86 	uint64_t	rope7_control;
     87 	uint8_t		resv7[0x22300 - 0x22240];
     88 	uint32_t	tlb_ibase;
     89 	uint32_t	pad22300;
     90 	uint32_t	tlb_imask;
     91 	uint32_t	pad22308;
     92 	uint32_t	tlb_pcom;
     93 	uint32_t	pad22310;
     94 	uint32_t	tlb_tcnfg;
     95 	uint32_t	pad22318;
     96 	uint64_t	tlb_pdir_base;
     97 };
     98 
     99 #define ASTRO_IOC_CTRL_TE	0x0001	/* TOC Enable */
    100 #define ASTRO_IOC_CTRL_CE	0x0002	/* Coalesce Enable */
    101 #define ASTRO_IOC_CTRL_DE	0x0004	/* Dillon Enable */
    102 #define ASTRO_IOC_CTRL_IE	0x0008	/* IOS Enable */
    103 #define ASTRO_IOC_CTRL_OS	0x0010	/* Outbound Synchronous */
    104 #define ASTRO_IOC_CTRL_IS	0x0020	/* Inbound Synchronous */
    105 #define ASTRO_IOC_CTRL_RC	0x0040	/* Read Current Enable */
    106 #define ASTRO_IOC_CTRL_L0	0x0080	/* 0-length Read Enable */
    107 #define ASTRO_IOC_CTRL_RM	0x0100	/* Real Mode */
    108 #define ASTRO_IOC_CTRL_NC	0x0200	/* Non-coherent Mode */
    109 #define ASTRO_IOC_CTRL_ID	0x0400	/* Interrupt Disable */
    110 #define ASTRO_IOC_CTRL_D4	0x0800	/* Disable 4-byte Coalescing */
    111 #define ASTRO_IOC_CTRL_CC	0x1000	/* Increase Coalescing counter value */
    112 #define ASTRO_IOC_CTRL_DD	0x2000	/* Disable distr. range coalescing */
    113 #define ASTRO_IOC_CTRL_DC	0x4000	/* Disable the coalescing counter */
    114 
    115 #define IOTTE_V		0x8000000000000000LL	/* Entry valid */
    116 #define IOTTE_PAMASK	0x000000fffffff000LL
    117 #define IOTTE_CI	0x00000000000000ffLL	/* Coherent index */
    118 
    119 struct astro_softc {
    120 	device_t sc_dv;
    121 
    122 	bus_dma_tag_t sc_dmat;
    123 	struct astro_regs volatile *sc_regs;
    124 	uint64_t *sc_pdir;
    125 
    126 	char sc_dvmamapname[20];
    127 	vmem_t *sc_dvmamap;
    128 	struct hppa_bus_dma_tag sc_dmatag;
    129 };
    130 
    131 /*
    132  * per-map DVMA page table
    133  */
    134 struct iommu_page_entry {
    135 	SPLAY_ENTRY(iommu_page_entry) ipe_node;
    136 	paddr_t	ipe_pa;
    137 	vaddr_t	ipe_va;
    138 	bus_addr_t ipe_dva;
    139 };
    140 
    141 struct iommu_page_map {
    142 	SPLAY_HEAD(iommu_page_tree, iommu_page_entry) ipm_tree;
    143 	int ipm_maxpage;	/* Size of allocated page map */
    144 	int ipm_pagecnt;	/* Number of entries in use */
    145 	struct iommu_page_entry	ipm_map[1];
    146 };
    147 
    148 /*
    149  * per-map IOMMU state
    150  */
    151 struct iommu_map_state {
    152 	struct astro_softc *ims_sc;
    153 	bus_addr_t ims_dvmastart;
    154 	bus_size_t ims_dvmasize;
    155 	struct iommu_page_map ims_map;	/* map must be last (array at end) */
    156 };
    157 
    158 int	astro_match(device_t, cfdata_t, void *);
    159 void	astro_attach(device_t, device_t, void *);
    160 static device_t astro_callback(device_t self, struct confargs *ca);
    161 
    162 CFATTACH_DECL_NEW(astro, sizeof(struct astro_softc),
    163     astro_match, astro_attach, NULL, NULL);
    164 
    165 extern struct cfdriver astro_cd;
    166 
    167 int	iommu_dvmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t,
    168 	    int, bus_dmamap_t *);
    169 void	iommu_dvmamap_destroy(void *, bus_dmamap_t);
    170 int	iommu_dvmamap_load(void *, bus_dmamap_t, void *, bus_size_t,
    171 	    struct proc *, int);
    172 int	iommu_dvmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
    173 int	iommu_dvmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
    174 int	iommu_dvmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *,
    175 	    int, bus_size_t, int);
    176 void	iommu_dvmamap_unload(void *, bus_dmamap_t);
    177 void	iommu_dvmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
    178 int	iommu_dvmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t,
    179 	    bus_dma_segment_t *, int, int *, int);
    180 void	iommu_dvmamem_free(void *, bus_dma_segment_t *, int);
    181 int	iommu_dvmamem_map(void *, bus_dma_segment_t *, int, size_t,
    182 	    void **, int);
    183 void	iommu_dvmamem_unmap(void *, void *, size_t);
    184 paddr_t	iommu_dvmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
    185 
    186 void	iommu_enter(struct astro_softc *, bus_addr_t, paddr_t, vaddr_t, int);
    187 void	iommu_remove(struct astro_softc *, bus_addr_t);
    188 
    189 struct iommu_map_state *iommu_iomap_create(int, int);
    190 void	iommu_iomap_destroy(struct iommu_map_state *);
    191 int	iommu_iomap_insert_page(struct iommu_map_state *, vaddr_t, paddr_t);
    192 bus_addr_t iommu_iomap_translate(struct iommu_map_state *, paddr_t);
    193 void	iommu_iomap_clear_pages(struct iommu_map_state *);
    194 
    195 static int iommu_iomap_load_map(struct astro_softc *, bus_dmamap_t, int);
    196 
    197 const struct hppa_bus_dma_tag astro_dmat = {
    198 	NULL,
    199 	iommu_dvmamap_create, iommu_dvmamap_destroy,
    200 	iommu_dvmamap_load, iommu_dvmamap_load_mbuf,
    201 	iommu_dvmamap_load_uio, iommu_dvmamap_load_raw,
    202 	iommu_dvmamap_unload, iommu_dvmamap_sync,
    203 
    204 	iommu_dvmamem_alloc, iommu_dvmamem_free, iommu_dvmamem_map,
    205 	iommu_dvmamem_unmap, iommu_dvmamem_mmap
    206 };
    207 
    208 int
    209 astro_match(device_t parent, cfdata_t cf, void *aux)
    210 {
    211 	struct confargs *ca = aux;
    212 
    213 	/* Astro is a U-Turn variant. */
    214 	if (ca->ca_type.iodc_type != HPPA_TYPE_IOA ||
    215 	    ca->ca_type.iodc_sv_model != HPPA_IOA_UTURN)
    216 		return 0;
    217 
    218 	if (ca->ca_type.iodc_model == 0x58 &&
    219 	    ca->ca_type.iodc_revision >= 0x20)
    220 		return 1;
    221 
    222 	return 0;
    223 }
    224 
    225 void
    226 astro_attach(device_t parent, device_t self, void *aux)
    227 {
    228 	struct confargs *ca = aux, nca;
    229 	struct astro_softc *sc = device_private(self);
    230 	volatile struct astro_regs *r;
    231 	bus_space_handle_t ioh;
    232 	uint32_t rid, ioc_ctrl;
    233 	psize_t size;
    234 	vaddr_t va;
    235 	paddr_t pa;
    236 	void *p;
    237 	struct vm_page *m;
    238 	struct pglist mlist;
    239 	int iova_bits;
    240 	int pagezero_cookie;
    241 
    242 	sc->sc_dv = self;
    243 	sc->sc_dmat = ca->ca_dmatag;
    244 	if (bus_space_map(ca->ca_iot, ca->ca_hpa, sizeof(struct astro_regs),
    245 	    0, &ioh)) {
    246 		aprint_error(": can't map IO space\n");
    247 		return;
    248 	}
    249 	p = bus_space_vaddr(ca->ca_iot, ioh);
    250 	sc->sc_regs = r = p;
    251 	rid = le32toh(r->rid);
    252 	aprint_normal(": Astro rev %d.%d\n", (rid & 7) + 1, (rid >> 3) & 3);
    253 
    254 	ioc_ctrl = le32toh(r->ioc_ctrl);
    255 	ioc_ctrl &= ~ASTRO_IOC_CTRL_CE;
    256 	ioc_ctrl &= ~ASTRO_IOC_CTRL_RM;
    257 	ioc_ctrl &= ~ASTRO_IOC_CTRL_NC;
    258 	r->ioc_ctrl = htole32(ioc_ctrl);
    259 
    260 	/*
    261 	 * Setup the iommu.
    262 	 */
    263 
    264 	/* XXX This gives us 256MB of iova space. */
    265 	iova_bits = 28;
    266 
    267 	r->tlb_ibase = htole32(0);
    268 	r->tlb_imask = htole32(0xffffffff << iova_bits);
    269 
    270 	/* Page size is 4K. */
    271 	r->tlb_tcnfg = htole32(0);
    272 
    273 	/* Flush TLB. */
    274 	r->tlb_pcom = htole32(31);
    275 
    276 	/*
    277 	 * Allocate memory for I/O pagetables.  They need to be physically
    278 	 * contiguous.
    279 	 */
    280 
    281 	size = (1 << (iova_bits - PAGE_SHIFT)) * sizeof(uint64_t);
    282 	TAILQ_INIT(&mlist);
    283 	if (uvm_pglistalloc(size, 0, -1, PAGE_SIZE, 0, &mlist, 1, 0) != 0) {
    284 		aprint_error(": can't allocate PDIR\n");
    285 		return;
    286 	}
    287 
    288 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
    289 
    290 	if (va == 0) {
    291 		aprint_error(": can't map PDIR\n");
    292 		return;
    293 	}
    294 	sc->sc_pdir = (uint64_t *)va;
    295 
    296 	m = TAILQ_FIRST(&mlist);
    297 	r->tlb_pdir_base = htole64(VM_PAGE_TO_PHYS(m));
    298 
    299 	/* Map the pages. */
    300 	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
    301 		pa = VM_PAGE_TO_PHYS(m);
    302 		pmap_enter(pmap_kernel(), va, pa,
    303 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
    304 		va += PAGE_SIZE;
    305 	}
    306 	pmap_update(pmap_kernel());
    307 	memset(sc->sc_pdir, 0, size);
    308 
    309 	/*
    310 	 * The PDC might have set up some devices to do DMA.  It will do
    311 	 * this for the onboard USB controller if an USB keyboard is used
    312 	 * for console input.  In that case, bad things will happen if we
    313 	 * enable iova space.  So reset the PDC devices before we do that.
    314 	 * Don't do this if we're using a serial console though, since it
    315 	 * will stop working if we do.  This is fine since the serial port
    316 	 * doesn't do DMA.
    317 	 */
    318 	pagezero_cookie = hppa_pagezero_map();
    319 	if (PAGE0->mem_cons.pz_class != PCL_DUPLEX)
    320 		pdcproc_ioreset();
    321 	hppa_pagezero_unmap(pagezero_cookie);
    322 
    323 	/* Enable iova space. */
    324 	r->tlb_ibase = htole32(1);
    325 
    326 	/*
    327 	 * Now all the hardware's working we need to allocate a dvma map.
    328 	 */
    329 	snprintf(sc->sc_dvmamapname, sizeof(sc->sc_dvmamapname),
    330 	    "%s_dvma", device_xname(sc->sc_dv));
    331 	sc->sc_dvmamap = vmem_create(sc->sc_dvmamapname,
    332 				     0,			/* base */
    333 				     (1 << iova_bits),	/* size */
    334 				     PAGE_SIZE,		/* quantum */
    335 				     NULL,		/* allocfn */
    336 				     NULL,		/* freefn */
    337 				     NULL,		/* source */
    338 				     0,			/* qcache_max */
    339 				     VM_SLEEP,
    340 				     IPL_VM);
    341 	KASSERT(sc->sc_dvmamap != NULL);
    342 
    343 	sc->sc_dmatag = astro_dmat;
    344 	sc->sc_dmatag._cookie = sc;
    345 
    346 	nca = *ca;	/* clone from us */
    347 	nca.ca_dmatag = &sc->sc_dmatag;
    348 	nca.ca_hpabase = IOMOD_IO_IO_LOW(p);
    349 	nca.ca_nmodules = MAXMODBUS;
    350 	pdc_scanbus(self, &nca, astro_callback);
    351 }
    352 
    353 static device_t
    354 astro_callback(device_t self, struct confargs *ca)
    355 {
    356 
    357 	return config_found(self, ca, mbprint,
    358 	    CFARGS(.submatch = mbsubmatch));
    359 }
    360 
    361 int
    362 iommu_dvmamap_create(void *v, bus_size_t size, int nsegments,
    363     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
    364 {
    365 	struct astro_softc *sc = v;
    366 	bus_dmamap_t map;
    367 	struct iommu_map_state *ims;
    368 	int error;
    369 
    370 	error = bus_dmamap_create(sc->sc_dmat, size, nsegments, maxsegsz,
    371 	    boundary, flags, &map);
    372 	if (error)
    373 		return (error);
    374 
    375 	ims = iommu_iomap_create(atop(round_page(size)), flags);
    376 	if (ims == NULL) {
    377 		bus_dmamap_destroy(sc->sc_dmat, map);
    378 		return (ENOMEM);
    379 	}
    380 
    381 	ims->ims_sc = sc;
    382 	map->_dm_cookie = ims;
    383 	*dmamap = map;
    384 
    385 	return (0);
    386 }
    387 
    388 void
    389 iommu_dvmamap_destroy(void *v, bus_dmamap_t map)
    390 {
    391 	struct astro_softc *sc = v;
    392 
    393 	/*
    394 	 * The specification (man page) requires a loaded
    395 	 * map to be unloaded before it is destroyed.
    396 	 */
    397 	if (map->dm_nsegs)
    398 		iommu_dvmamap_unload(sc, map);
    399 
    400 	if (map->_dm_cookie)
    401 		iommu_iomap_destroy(map->_dm_cookie);
    402 	map->_dm_cookie = NULL;
    403 
    404 	bus_dmamap_destroy(sc->sc_dmat, map);
    405 }
    406 
    407 static int
    408 iommu_iomap_load_map(struct astro_softc *sc, bus_dmamap_t map, int flags)
    409 {
    410 	struct iommu_map_state *ims = map->_dm_cookie;
    411 	struct iommu_page_map *ipm = &ims->ims_map;
    412 	struct iommu_page_entry *e;
    413 	int err, seg;
    414 	paddr_t pa, paend;
    415 	vaddr_t va;
    416 	bus_size_t sgsize;
    417 	bus_size_t align, boundary;
    418 	vmem_addr_t dvmaddr;
    419 	bus_addr_t dva;
    420 	int i;
    421 
    422 	/* XXX */
    423 	boundary = map->_dm_boundary;
    424 	align = 0;	/* align to quantum */
    425 
    426 	iommu_iomap_clear_pages(ims);
    427 
    428 	for (seg = 0; seg < map->dm_nsegs; seg++) {
    429 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
    430 
    431 		paend = round_page(ds->ds_addr + ds->ds_len);
    432 		for (pa = trunc_page(ds->ds_addr), va = trunc_page(ds->_ds_va);
    433 		     pa < paend; pa += PAGE_SIZE, va += PAGE_SIZE) {
    434 			err = iommu_iomap_insert_page(ims, va, pa);
    435 			if (err) {
    436 				printf("iomap insert error: %d for "
    437 				    "va 0x%lx pa 0x%lx\n", err, va, pa);
    438 				bus_dmamap_unload(sc->sc_dmat, map);
    439 				iommu_iomap_clear_pages(ims);
    440 			}
    441 		}
    442 	}
    443 
    444 	const vm_flag_t vmflags = VM_BESTFIT |
    445 	    ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
    446 
    447 	sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
    448 	err = vmem_xalloc(sc->sc_dvmamap, sgsize,
    449 			  align,		/* align */
    450 			  0,			/* phase */
    451 			  boundary,		/* nocross */
    452 			  VMEM_ADDR_MIN,	/* minaddr */
    453 			  VMEM_ADDR_MAX,	/* maxaddr */
    454 			  vmflags,
    455 			  &dvmaddr);
    456 	if (err)
    457 		return (err);
    458 
    459 	ims->ims_dvmastart = dvmaddr;
    460 	ims->ims_dvmasize = sgsize;
    461 
    462 	dva = dvmaddr;
    463 	for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e) {
    464 		e->ipe_dva = dva;
    465 		iommu_enter(sc, e->ipe_dva, e->ipe_pa, e->ipe_va, flags);
    466 		dva += PAGE_SIZE;
    467 	}
    468 
    469 	for (seg = 0; seg < map->dm_nsegs; seg++) {
    470 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
    471 		ds->ds_addr = iommu_iomap_translate(ims, ds->ds_addr);
    472 	}
    473 
    474 	return (0);
    475 }
    476 
    477 int
    478 iommu_dvmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
    479     struct proc *p, int flags)
    480 {
    481 	struct astro_softc *sc = v;
    482 	int err;
    483 
    484 	err = bus_dmamap_load(sc->sc_dmat, map, addr, size, p, flags);
    485 	if (err)
    486 		return (err);
    487 
    488 	return iommu_iomap_load_map(sc, map, flags);
    489 }
    490 
    491 int
    492 iommu_dvmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m, int flags)
    493 {
    494 	struct astro_softc *sc = v;
    495 	int err;
    496 
    497 	err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, flags);
    498 	if (err)
    499 		return (err);
    500 
    501 	return iommu_iomap_load_map(sc, map, flags);
    502 }
    503 
    504 int
    505 iommu_dvmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, int flags)
    506 {
    507 	struct astro_softc *sc = v;
    508 
    509 	printf("load_uio\n");
    510 
    511 	return (bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags));
    512 }
    513 
    514 int
    515 iommu_dvmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
    516     int nsegs, bus_size_t size, int flags)
    517 {
    518 	struct astro_softc *sc = v;
    519 
    520 	printf("load_raw\n");
    521 
    522 	return (bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags));
    523 }
    524 
    525 void
    526 iommu_dvmamap_unload(void *v, bus_dmamap_t map)
    527 {
    528 	struct astro_softc *sc = v;
    529 	struct iommu_map_state *ims = map->_dm_cookie;
    530 	struct iommu_page_map *ipm = &ims->ims_map;
    531 	struct iommu_page_entry *e;
    532 	int i;
    533 
    534 	/* Remove the IOMMU entries. */
    535 	for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e)
    536 		iommu_remove(sc, e->ipe_dva);
    537 
    538 	/* Clear the iomap. */
    539 	iommu_iomap_clear_pages(ims);
    540 
    541 	bus_dmamap_unload(sc->sc_dmat, map);
    542 
    543 	vmem_xfree(sc->sc_dvmamap, ims->ims_dvmastart, ims->ims_dvmasize);
    544 	ims->ims_dvmastart = 0;
    545 	ims->ims_dvmasize = 0;
    546 }
    547 
    548 void
    549 iommu_dvmamap_sync(void *v, bus_dmamap_t map, bus_addr_t off,
    550     bus_size_t len, int ops)
    551 {
    552 	/* Nothing to do; DMA is cache-coherent. */
    553 }
    554 
    555 int
    556 iommu_dvmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
    557     bus_size_t boundary, bus_dma_segment_t *segs,
    558     int nsegs, int *rsegs, int flags)
    559 {
    560 	struct astro_softc *sc = v;
    561 
    562 	return (bus_dmamem_alloc(sc->sc_dmat, size, alignment, boundary,
    563 	    segs, nsegs, rsegs, flags));
    564 }
    565 
    566 void
    567 iommu_dvmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
    568 {
    569 	struct astro_softc *sc = v;
    570 
    571 	bus_dmamem_free(sc->sc_dmat, segs, nsegs);
    572 }
    573 
    574 int
    575 iommu_dvmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
    576     void **kvap, int flags)
    577 {
    578 	struct astro_softc *sc = v;
    579 
    580 	return (bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags));
    581 }
    582 
    583 void
    584 iommu_dvmamem_unmap(void *v, void *kva, size_t size)
    585 {
    586 	struct astro_softc *sc = v;
    587 
    588 	bus_dmamem_unmap(sc->sc_dmat, kva, size);
    589 }
    590 
    591 paddr_t
    592 iommu_dvmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs, off_t off,
    593     int prot, int flags)
    594 {
    595 	struct astro_softc *sc = v;
    596 
    597 	return (bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, off, prot, flags));
    598 }
    599 
    600 /*
    601  * Utility function used by splay tree to order page entries by pa.
    602  */
    603 static inline int
    604 iomap_compare(struct iommu_page_entry *a, struct iommu_page_entry *b)
    605 {
    606 	return ((a->ipe_pa > b->ipe_pa) ? 1 :
    607 		(a->ipe_pa < b->ipe_pa) ? -1 : 0);
    608 }
    609 
    610 SPLAY_PROTOTYPE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
    611 
    612 SPLAY_GENERATE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
    613 
    614 /*
    615  * Create a new iomap.
    616  */
    617 struct iommu_map_state *
    618 iommu_iomap_create(int n, int flags)
    619 {
    620 	struct iommu_map_state *ims;
    621 
    622 	/* Safety for heavily fragmented data, such as mbufs */
    623 	n += 4;
    624 	if (n < 16)
    625 		n = 16;
    626 
    627 	const size_t sz =
    628 	    sizeof(*ims) + (n - 1) * sizeof(ims->ims_map.ipm_map[0]);
    629 
    630 	ims = kmem_zalloc(sz, (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP);
    631 	if (ims == NULL)
    632 		return (NULL);
    633 
    634 	/* Initialize the map. */
    635 	ims->ims_map.ipm_maxpage = n;
    636 	SPLAY_INIT(&ims->ims_map.ipm_tree);
    637 
    638 	return (ims);
    639 }
    640 
    641 /*
    642  * Destroy an iomap.
    643  */
    644 void
    645 iommu_iomap_destroy(struct iommu_map_state *ims)
    646 {
    647 #ifdef DIAGNOSTIC
    648 	if (ims->ims_map.ipm_pagecnt > 0)
    649 		printf("iommu_iomap_destroy: %d page entries in use\n",
    650 		    ims->ims_map.ipm_pagecnt);
    651 #endif
    652 	const int n = ims->ims_map.ipm_maxpage;
    653 	const size_t sz =
    654 	    sizeof(*ims) + (n - 1) * sizeof(ims->ims_map.ipm_map[0]);
    655 
    656 	kmem_free(ims, sz);
    657 }
    658 
    659 /*
    660  * Insert a pa entry in the iomap.
    661  */
    662 int
    663 iommu_iomap_insert_page(struct iommu_map_state *ims, vaddr_t va, paddr_t pa)
    664 {
    665 	struct iommu_page_map *ipm = &ims->ims_map;
    666 	struct iommu_page_entry *e;
    667 
    668 	if (ipm->ipm_pagecnt >= ipm->ipm_maxpage) {
    669 		struct iommu_page_entry ipe;
    670 
    671 		ipe.ipe_pa = pa;
    672 		if (SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &ipe))
    673 			return (0);
    674 
    675 		return (ENOMEM);
    676 	}
    677 
    678 	e = &ipm->ipm_map[ipm->ipm_pagecnt];
    679 
    680 	e->ipe_pa = pa;
    681 	e->ipe_va = va;
    682 	e->ipe_dva = 0;
    683 
    684 	e = SPLAY_INSERT(iommu_page_tree, &ipm->ipm_tree, e);
    685 
    686 	/* Duplicates are okay, but only count them once. */
    687 	if (e)
    688 		return (0);
    689 
    690 	++ipm->ipm_pagecnt;
    691 
    692 	return (0);
    693 }
    694 
    695 /*
    696  * Translate a physical address (pa) into a DVMA address.
    697  */
    698 bus_addr_t
    699 iommu_iomap_translate(struct iommu_map_state *ims, paddr_t pa)
    700 {
    701 	struct iommu_page_map *ipm = &ims->ims_map;
    702 	struct iommu_page_entry *e;
    703 	struct iommu_page_entry pe;
    704 	paddr_t offset = pa & PAGE_MASK;
    705 
    706 	pe.ipe_pa = trunc_page(pa);
    707 
    708 	e = SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &pe);
    709 
    710 	if (e == NULL) {
    711 		panic("couldn't find pa %lx\n", pa);
    712 		return 0;
    713 	}
    714 
    715 	return (e->ipe_dva | offset);
    716 }
    717 
    718 /*
    719  * Clear the iomap table and tree.
    720  */
    721 void
    722 iommu_iomap_clear_pages(struct iommu_map_state *ims)
    723 {
    724 	ims->ims_map.ipm_pagecnt = 0;
    725 	SPLAY_INIT(&ims->ims_map.ipm_tree);
    726 }
    727 
    728 /*
    729  * Add an entry to the IOMMU table.
    730  */
    731 void
    732 iommu_enter(struct astro_softc *sc, bus_addr_t dva, paddr_t pa, vaddr_t va,
    733     int flags)
    734 {
    735 	volatile uint64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
    736 	uint64_t tte;
    737 	uint32_t ci;
    738 
    739 #ifdef ASTRODEBUG
    740 	printf("iommu_enter dva %lx, pa %lx, va %lx\n", dva, pa, va);
    741 #endif
    742 
    743 #ifdef DIAGNOSTIC
    744 	tte = le64toh(*tte_ptr);
    745 
    746 	if (tte & IOTTE_V) {
    747 		printf("Overwriting valid tte entry (dva %lx pa %lx "
    748 		    "&tte %p tte %llx)\n", dva, pa, tte_ptr, tte);
    749 		/* vmem_print(sc->sc_dvmamap);		XXX */
    750 		panic("IOMMU overwrite");
    751 	}
    752 #endif
    753 
    754 	ci = lci(HPPA_SID_KERNEL, va);
    755 
    756 	tte = (pa & IOTTE_PAMASK) | ((ci >> 12) & IOTTE_CI);
    757 	tte |= IOTTE_V;
    758 
    759 	*tte_ptr = htole64(tte);
    760 	fdcache(HPPA_SID_KERNEL, (vaddr_t)tte_ptr, sizeof(*tte_ptr));
    761 }
    762 
    763 /*
    764  * Remove an entry from the IOMMU table.
    765  */
    766 void
    767 iommu_remove(struct astro_softc *sc, bus_addr_t dva)
    768 {
    769 	volatile struct astro_regs *r = sc->sc_regs;
    770 	uint64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
    771 	uint64_t tte;
    772 
    773 #ifdef DIAGNOSTIC
    774 	if (dva != trunc_page(dva)) {
    775 		printf("iommu_remove: unaligned dva: %lx\n", dva);
    776 		dva = trunc_page(dva);
    777 	}
    778 #endif
    779 
    780 	tte = le64toh(*tte_ptr);
    781 
    782 #ifdef DIAGNOSTIC
    783 	if ((tte & IOTTE_V) == 0) {
    784 		printf("Removing invalid tte entry (dva %lx &tte %p "
    785 		    "tte %llx)\n", dva, tte_ptr, tte);
    786 		/* vmem_print(sc->sc_dvmamap);		XXX */
    787 		panic("IOMMU remove overwrite");
    788 	}
    789 #endif
    790 
    791 	*tte_ptr = htole64(tte & ~IOTTE_V);
    792 
    793 	/* Flush IOMMU. */
    794 	r->tlb_pcom = htole32(dva | PAGE_SHIFT);
    795 }
    796