Home | History | Annotate | Line # | Download | only in dev
uturn.c revision 1.3.4.1
      1 /*	$NetBSD: uturn.c,v 1.3.4.1 2021/03/22 02:00:56 thorpej Exp $	*/
      2 
      3 /*	$OpenBSD: uturn.c,v 1.6 2007/12/29 01:26:14 kettenis Exp $	*/
      4 
      5 /*-
      6  * Copyright (c) 2012 The NetBSD Foundation, Inc.
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to The NetBSD Foundation
     10  * by Nick Hudson.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31  * POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 /*
     35  * Copyright (c) 2007 Mark Kettenis
     36  *
     37  * Permission to use, copy, modify, and distribute this software for any
     38  * purpose with or without fee is hereby granted, provided that the above
     39  * copyright notice and this permission notice appear in all copies.
     40  *
     41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     48  */
     49 
     50 /*
     51  * Copyright (c) 2004 Michael Shalayeff
     52  * All rights reserved.
     53  *
     54  * Redistribution and use in source and binary forms, with or without
     55  * modification, are permitted provided that the following conditions
     56  * are met:
     57  * 1. Redistributions of source code must retain the above copyright
     58  *    notice, this list of conditions and the following disclaimer.
     59  * 2. Redistributions in binary form must reproduce the above copyright
     60  *    notice, this list of conditions and the following disclaimer in the
     61  *    documentation and/or other materials provided with the distribution.
     62  *
     63  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     64  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     65  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     66  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
     67  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     68  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     69  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     71  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
     72  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     73  * THE POSSIBILITY OF SUCH DAMAGE.
     74  */
     75 
     76 /*
     77  * References:
     78  * 1. Hardware Cache Coherent Input/Output. Hewlett-Packard Journal, February
     79  *    1996.
     80  * 2. PA-RISC 1.1 Architecture and Instruction Set Reference Manual,
     81  *    Hewlett-Packard, February 1994, Third Edition
     82  */
     83 
     84 #include <sys/param.h>
     85 #include <sys/systm.h>
     86 #include <sys/device.h>
     87 #include <sys/reboot.h>
     88 #include <sys/malloc.h>
     89 #include <sys/extent.h>
     90 #include <sys/mbuf.h>
     91 #include <sys/tree.h>
     92 
     93 #include <uvm/uvm.h>
     94 
     95 #include <sys/bus.h>
     96 #include <machine/iomod.h>
     97 #include <machine/autoconf.h>
     98 
     99 #include <hppa/dev/cpudevs.h>
    100 
    101 #define UTURNDEBUG
    102 #ifdef UTURNDEBUG
    103 
    104 #define	DPRINTF(s)	do {	\
    105 	if (uturndebug)		\
    106 		printf s;	\
    107 } while(0)
    108 
    109 int uturndebug = 0;
    110 #else
    111 #define	DPRINTF(s)	/* */
    112 #endif
    113 
    114 struct uturn_regs {
    115 	/* Runway Supervisory Set */
    116 	int32_t		unused1[12];
    117 	uint32_t	io_command;		/* Offset 12 */
    118 #define	UTURN_CMD_TLB_PURGE		33	/* Purge I/O TLB entry */
    119 #define	UTURN_CMD_TLB_DIRECT_WRITE	35	/* I/O TLB Writes */
    120 
    121 	uint32_t	io_status;		/* Offset 13 */
    122 	uint32_t	io_control;		/* Offset 14 */
    123 #define	UTURN_IOCTRL_TLB_REAL		0x00000000
    124 #define	UTURN_IOCTRL_TLB_ERROR		0x00010000
    125 #define	UTURN_IOCTRL_TLB_NORMAL		0x00020000
    126 
    127 #define	UTURN_IOCTRL_MODE_OFF		0x00000000
    128 #define	UTURN_IOCTRL_MODE_INCLUDE	0x00000080
    129 #define	UTURN_IOCTRL_MODE_PEEK		0x00000180
    130 
    131 #define	UTURN_VIRTUAL_MODE	\
    132 	(UTURN_IOCTRL_TLB_NORMAL | UTURN_IOCTRL_MODE_INCLUDE)
    133 
    134 #define	UTURN_REAL_MODE		\
    135 	UTURN_IOCTRL_MODE_INCLUDE
    136 
    137 	int32_t		unused2[1];
    138 
    139 	/* Runway Auxiliary Register Set */
    140 	uint32_t	io_err_resp;		/* Offset  0 */
    141 	uint32_t	io_err_info;		/* Offset  1 */
    142 	uint32_t	io_err_req;		/* Offset  2 */
    143 	uint32_t	io_err_resp_hi;		/* Offset  3 */
    144 	uint32_t	io_tlb_entry_m;		/* Offset  4 */
    145 	uint32_t	io_tlb_entry_l;		/* Offset  5 */
    146 	uint32_t	unused3[1];
    147 	uint32_t	io_pdir_base;		/* Offset  7 */
    148 	uint32_t	io_io_low_hv;		/* Offset  8 */
    149 	uint32_t	io_io_high_hv;		/* Offset  9 */
    150 	uint32_t	unused4[1];
    151 	uint32_t	io_chain_id_mask;	/* Offset 11 */
    152 	uint32_t	unused5[2];
    153 	uint32_t	io_io_low;		/* Offset 14 */
    154 	uint32_t	io_io_high;		/* Offset 15 */
    155 };
    156 
    157 
    158 /* Uturn supports 256 TLB entries */
    159 #define	UTURN_CHAINID_SHIFT	8
    160 #define	UTURN_CHAINID_MASK	0xff
    161 #define	UTURN_TLB_ENTRIES	(1 << UTURN_CHAINID_SHIFT)
    162 
    163 #define	UTURN_IOVP_SIZE		PAGE_SIZE
    164 #define	UTURN_IOVP_SHIFT	PAGE_SHIFT
    165 #define	UTURN_IOVP_MASK		PAGE_MASK
    166 
    167 #define	UTURN_IOVA(iovp, off)	((iovp) | (off))
    168 #define	UTURN_IOVP(iova)	((iova) & UTURN_IOVP_MASK)
    169 #define	UTURN_IOVA_INDEX(iova)	((iova) >> UTURN_IOVP_SHIFT)
    170 
    171 struct uturn_softc {
    172 	device_t sc_dv;
    173 
    174 	bus_dma_tag_t sc_dmat;
    175 	struct uturn_regs volatile *sc_regs;
    176 	uint64_t *sc_pdir;
    177 	uint32_t sc_chainid_shift;
    178 
    179 	char sc_mapname[20];
    180 	struct extent *sc_map;
    181 
    182 	struct hppa_bus_dma_tag sc_dmatag;
    183 };
    184 
    185 /*
    186  * per-map IOVA page table
    187  */
    188 struct uturn_page_entry {
    189 	SPLAY_ENTRY(uturn_page_entry) upe_node;
    190 	paddr_t	upe_pa;
    191 	vaddr_t	upe_va;
    192 	bus_addr_t upe_iova;
    193 };
    194 
    195 struct uturn_page_map {
    196 	SPLAY_HEAD(uturn_page_tree, uturn_page_entry) upm_tree;
    197 	int upm_maxpage;	/* Size of allocated page map */
    198 	int upm_pagecnt;	/* Number of entries in use */
    199 	struct uturn_page_entry	upm_map[1];
    200 };
    201 
    202 /*
    203  * per-map UTURN state
    204  */
    205 struct uturn_map_state {
    206 	struct uturn_softc *ums_sc;
    207 	bus_addr_t ums_iovastart;
    208 	bus_size_t ums_iovasize;
    209 	struct uturn_page_map ums_map;	/* map must be last (array at end) */
    210 };
    211 
    212 int	uturnmatch(device_t, cfdata_t, void *);
    213 void	uturnattach(device_t, device_t, void *);
    214 static device_t uturn_callback(device_t, struct confargs *);
    215 
    216 CFATTACH_DECL_NEW(uturn, sizeof(struct uturn_softc),
    217     uturnmatch, uturnattach, NULL, NULL);
    218 
    219 extern struct cfdriver uturn_cd;
    220 
    221 int uturn_dmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t, int,
    222     bus_dmamap_t *);
    223 void uturn_dmamap_destroy(void *, bus_dmamap_t);
    224 int uturn_dmamap_load(void *, bus_dmamap_t, void *, bus_size_t, struct proc *,
    225     int);
    226 int uturn_dmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
    227 int uturn_dmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
    228 int uturn_dmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *, int,
    229     bus_size_t, int);
    230 void uturn_dmamap_unload(void *, bus_dmamap_t);
    231 void uturn_dmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
    232 int uturn_dmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t,
    233     bus_dma_segment_t *, int, int *, int);
    234 void uturn_dmamem_free(void *, bus_dma_segment_t *, int);
    235 int uturn_dmamem_map(void *, bus_dma_segment_t *, int, size_t, void **, int);
    236 void uturn_dmamem_unmap(void *, void *, size_t);
    237 paddr_t uturn_dmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
    238 
    239 static void uturn_iommu_enter(struct uturn_softc *, bus_addr_t, pa_space_t,
    240     vaddr_t, paddr_t);
    241 static void uturn_iommu_remove(struct uturn_softc *, bus_addr_t, bus_size_t);
    242 
    243 struct uturn_map_state *uturn_iomap_create(int);
    244 void	uturn_iomap_destroy(struct uturn_map_state *);
    245 int	uturn_iomap_insert_page(struct uturn_map_state *, vaddr_t, paddr_t);
    246 bus_addr_t uturn_iomap_translate(struct uturn_map_state *, paddr_t);
    247 void	uturn_iomap_clear_pages(struct uturn_map_state *);
    248 
    249 static int uturn_iomap_load_map(struct uturn_softc *, bus_dmamap_t, int);
    250 
    251 const struct hppa_bus_dma_tag uturn_dmat = {
    252 	NULL,
    253 	uturn_dmamap_create, uturn_dmamap_destroy,
    254 	uturn_dmamap_load, uturn_dmamap_load_mbuf,
    255 	uturn_dmamap_load_uio, uturn_dmamap_load_raw,
    256 	uturn_dmamap_unload, uturn_dmamap_sync,
    257 
    258 	uturn_dmamem_alloc, uturn_dmamem_free, uturn_dmamem_map,
    259 	uturn_dmamem_unmap, uturn_dmamem_mmap
    260 };
    261 
    262 int
    263 uturnmatch(device_t parent, cfdata_t cf, void *aux)
    264 {
    265 	struct confargs *ca = aux;
    266 
    267 	/* there will be only one */
    268 	if (ca->ca_type.iodc_type != HPPA_TYPE_IOA ||
    269 	    ca->ca_type.iodc_sv_model != HPPA_IOA_UTURN)
    270 		return 0;
    271 
    272 	if (ca->ca_type.iodc_model == 0x58 &&
    273 	    ca->ca_type.iodc_revision >= 0x20)
    274 		return 0;
    275 
    276 	return 1;
    277 }
    278 
    279 void
    280 uturnattach(device_t parent, device_t self, void *aux)
    281 {
    282 	struct confargs *ca = aux, nca;
    283 	struct uturn_softc *sc = device_private(self);
    284 	bus_space_handle_t ioh;
    285 	volatile struct uturn_regs *r;
    286 	struct pglist pglist;
    287 	int iova_bits;
    288 	vaddr_t va;
    289 	psize_t size;
    290 	int i;
    291 
    292 	if (bus_space_map(ca->ca_iot, ca->ca_hpa, IOMOD_HPASIZE, 0, &ioh)) {
    293 		aprint_error(": can't map IO space\n");
    294 		return;
    295 	}
    296 
    297 	sc->sc_dv = self;
    298 	sc->sc_dmat = ca->ca_dmatag;
    299 	sc->sc_regs = r = bus_space_vaddr(ca->ca_iot, ioh);
    300 
    301 	aprint_normal(": %x-%x", r->io_io_low << 16, r->io_io_high << 16);
    302 	aprint_normal(": %x-%x", r->io_io_low_hv << 16, r->io_io_high_hv << 16);
    303 
    304 	aprint_normal(": %s rev %d\n",
    305 	    ca->ca_type.iodc_revision < 0x10 ? "U2" : "UTurn",
    306 	    ca->ca_type.iodc_revision & 0xf);
    307 
    308 	/*
    309 	 * Setup the iommu.
    310 	 */
    311 
    312 	/* XXX 28 bits gives us 256Mb of iova space */
    313 	/* Calculate based on %age of RAM */
    314 	iova_bits = 28;
    315 
    316 	/*
    317 	 * size is # of pdir entries (64bits) in bytes.  1 entry per IOVA
    318 	 * page.
    319 	 */
    320 	size = (1 << (iova_bits - UTURN_IOVP_SHIFT)) * sizeof(uint64_t);
    321 
    322 	/*
    323 	 * Chainid is the upper most bits of an IOVP used to determine which
    324 	 * TLB entry an IOVP will use.
    325 	 */
    326 	sc->sc_chainid_shift = iova_bits - UTURN_CHAINID_SHIFT;
    327 
    328 	/*
    329 	 * Allocate memory for I/O pagetables.  They need to be physically
    330 	 * contiguous.
    331 	 */
    332 
    333 	if (uvm_pglistalloc(size, 0, -1, PAGE_SIZE, 0, &pglist, 1, 0) != 0)
    334 		panic("%s: no memory", __func__);
    335 
    336 	va = (vaddr_t)VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
    337 	sc->sc_pdir = (int64_t *)va;
    338 
    339 	memset(sc->sc_pdir, 0, size);
    340 
    341 	r->io_chain_id_mask = UTURN_CHAINID_MASK << sc->sc_chainid_shift;
    342 	r->io_pdir_base = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
    343 
    344 	r->io_tlb_entry_m = 0;
    345 	r->io_tlb_entry_l = 0;
    346 
    347 	/* for (i = UTURN_TLB_ENTRIES; i != 0; i--) { */
    348 	for (i = 0; i < UTURN_TLB_ENTRIES; i++) {
    349 		r->io_command =
    350 		    UTURN_CMD_TLB_DIRECT_WRITE | (i << sc->sc_chainid_shift);
    351 	}
    352 	/*
    353 	 * Go to "Virtual Mode"
    354 	 */
    355 	r->io_control = UTURN_VIRTUAL_MODE;
    356 
    357 	snprintf(sc->sc_mapname, sizeof(sc->sc_mapname), "%s_map",
    358 	    device_xname(sc->sc_dv));
    359 	sc->sc_map = extent_create(sc->sc_mapname, 0, (1 << iova_bits),
    360 	    0, 0, EX_WAITOK);
    361 
    362 	sc->sc_dmatag = uturn_dmat;
    363 	sc->sc_dmatag._cookie = sc;
    364 
    365 	/*
    366 	 * U2/UTurn is actually a combination of an Upper Bus Converter (UBC)
    367 	 * and a Lower Bus Converter (LBC).  This driver attaches to the UBC;
    368 	 * the LBC isn't very interesting, so we skip it.  This is easy, since
    369 	 * it always is module 63, hence the MAXMODBUS - 1 below.
    370 	 */
    371 	nca = *ca;
    372 	nca.ca_hpabase = r->io_io_low << 16;
    373 	nca.ca_dmatag = &sc->sc_dmatag;
    374 	nca.ca_nmodules = MAXMODBUS - 1;
    375 	pdc_scanbus(self, &nca, uturn_callback);
    376 }
    377 
    378 static device_t
    379 uturn_callback(device_t self, struct confargs *ca)
    380 {
    381 
    382 	return config_found(self, ca, mbprint,
    383 	    CFARG_SUBMATCH, mbsubmatch,
    384 	    CFARG_IATTR, "gedoens",
    385 	    CFARG_EOL);
    386 }
    387 
    388 /*
    389  * PDIR entry format (HP bit number)
    390  *
    391  * +-------+----------------+----------------------------------------------+
    392  * |0     3|4             15|16                                          31|
    393  * | PPN   | Virtual Index  |         Physical Page Number (PPN)           |
    394  * | [0:3] |    [0:11]      |                 [4:19]                       |
    395  * +-------+----------------+----------------------------------------------+
    396  *
    397  * +-----------------------+-----------------------------------------------+
    398  * |0           19|20    24|   25   |       |       |      |  30   |   31  |
    399  * |     PPN      |  Rsvd  | PH     |Update | Rsvd  |Lock  | Safe  | Valid |
    400  * |    [20:39    |        | Enable |Enable |       |Enable| DMA   |       |
    401  * +-----------------------+-----------------------------------------------+
    402  *
    403  */
    404 
    405 #define UTURN_PENTRY_PREFETCH	0x40
    406 #define UTURN_PENTRY_UPDATE	0x20
    407 #define UTURN_PENTRY_LOCK	0x04	/* eisa devices only */
    408 #define UTURN_PENTRY_SAFEDMA	0x02	/* use safe dma - for subcacheline */
    409 #define UTURN_PENTRY_VALID	0x01
    410 
    411 static void
    412 uturn_iommu_enter(struct uturn_softc *sc, bus_addr_t iova, pa_space_t sp,
    413     vaddr_t va, paddr_t pa)
    414 {
    415 	uint64_t pdir_entry;
    416 	uint64_t *pdirp;
    417 	uint32_t ci; /* coherent index */
    418 
    419 	pdirp = &sc->sc_pdir[UTURN_IOVA_INDEX(iova)];
    420 
    421 	DPRINTF(("%s: iova %lx pdir %p pdirp %p pa %lx", __func__, iova,
    422 	    sc->sc_pdir, pdirp, pa));
    423 
    424 	ci = lci(HPPA_SID_KERNEL, va);
    425 
    426 	/* setup hints, etc */
    427 	pdir_entry = (UTURN_PENTRY_LOCK | UTURN_PENTRY_SAFEDMA |
    428 	     UTURN_PENTRY_VALID);
    429 
    430 	/*
    431 	 * bottom 36 bits of pa map directly into entry to form PPN[4:39]
    432 	 * leaving last 12 bits for hints, etc.
    433 	 */
    434 	pdir_entry |= (pa & ~PAGE_MASK);
    435 
    436 	/* mask off top PPN bits */
    437 	pdir_entry &= 0x0000ffffffffffffUL;
    438 
    439 	/* insert the virtual index bits */
    440 	pdir_entry |= (((uint64_t)ci >> 12) << 48);
    441 
    442 	/* PPN[0:3] of the 40bit PPN go in entry[0:3] */
    443 	pdir_entry |= ((((uint64_t)pa & 0x000f000000000000UL) >> 48) << 60);
    444 
    445 	*pdirp = pdir_entry;
    446 
    447 	DPRINTF((": pdir_entry %llx\n", pdir_entry));
    448 
    449 	/*
    450 	 * We could use PDC_MODEL_CAPABILITIES here
    451 	 */
    452  	fdcache(HPPA_SID_KERNEL, (vaddr_t)pdirp, sizeof(uint64_t));
    453 }
    454 
    455 
    456 static void
    457 uturn_iommu_remove(struct uturn_softc *sc, bus_addr_t iova, bus_size_t size)
    458 {
    459 	uint32_t chain_size = 1 << sc->sc_chainid_shift;
    460 	bus_size_t len;
    461 
    462 	KASSERT((iova & PAGE_MASK) == 0);
    463 	KASSERT((size & PAGE_MASK) == 0);
    464 
    465 	DPRINTF(("%s: sc %p iova %lx size %lx\n", __func__, sc, iova, size));
    466 	len = size;
    467 	while (len != 0) {
    468 		uint64_t *pdirp = &sc->sc_pdir[UTURN_IOVA_INDEX(iova)];
    469 
    470 		/* XXX Just the valid bit??? */
    471 		*pdirp = 0;
    472 
    473 		/*
    474 		* We could use PDC_MODEL_CAPABILITIES here
    475 		*/
    476 	 	fdcache(HPPA_SID_KERNEL, (vaddr_t)pdirp, sizeof(uint64_t));
    477 
    478 		iova += PAGE_SIZE;
    479 		len -= PAGE_SIZE;
    480 	}
    481 
    482 	len = size + chain_size;
    483 
    484 	while (len > chain_size) {
    485 		sc->sc_regs->io_command = UTURN_CMD_TLB_PURGE | iova;
    486 		iova += chain_size;
    487 		len -= chain_size;
    488 	}
    489 }
    490 
    491 int
    492 uturn_dmamap_create(void *v, bus_size_t size, int nsegments,
    493     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
    494 {
    495 	struct uturn_softc *sc = v;
    496 	bus_dmamap_t map;
    497 	struct uturn_map_state *ums;
    498 	int error;
    499 
    500 	error = bus_dmamap_create(sc->sc_dmat, size, nsegments, maxsegsz,
    501 	    boundary, flags, &map);
    502 	if (error)
    503 		return (error);
    504 
    505 	ums = uturn_iomap_create(atop(round_page(size)));
    506 	if (ums == NULL) {
    507 		bus_dmamap_destroy(sc->sc_dmat, map);
    508 		return (ENOMEM);
    509 	}
    510 
    511 	ums->ums_sc = sc;
    512 	map->_dm_cookie = ums;
    513 	*dmamap = map;
    514 
    515 	return (0);
    516 }
    517 
    518 void
    519 uturn_dmamap_destroy(void *v, bus_dmamap_t map)
    520 {
    521 	struct uturn_softc *sc = v;
    522 
    523 	/*
    524 	 * The specification (man page) requires a loaded
    525 	 * map to be unloaded before it is destroyed.
    526 	 */
    527 	if (map->dm_nsegs)
    528 		uturn_dmamap_unload(sc, map);
    529 
    530 	if (map->_dm_cookie)
    531 		uturn_iomap_destroy(map->_dm_cookie);
    532 	map->_dm_cookie = NULL;
    533 
    534 	bus_dmamap_destroy(sc->sc_dmat, map);
    535 }
    536 
    537 static int
    538 uturn_iomap_load_map(struct uturn_softc *sc, bus_dmamap_t map, int flags)
    539 {
    540 	struct uturn_map_state *ums = map->_dm_cookie;
    541 	struct uturn_page_map *upm = &ums->ums_map;
    542 	struct uturn_page_entry *e;
    543 	int err, seg, s;
    544 	paddr_t pa, paend;
    545 	vaddr_t va;
    546 	bus_size_t sgsize;
    547 	bus_size_t align, boundary;
    548 	u_long iovaddr;
    549 	bus_addr_t iova;
    550 	int i;
    551 
    552 	/* XXX */
    553 	boundary = map->_dm_boundary;
    554 	align = PAGE_SIZE;
    555 
    556 	uturn_iomap_clear_pages(ums);
    557 
    558 	for (seg = 0; seg < map->dm_nsegs; seg++) {
    559 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
    560 
    561 		paend = round_page(ds->ds_addr + ds->ds_len);
    562 		for (pa = trunc_page(ds->ds_addr), va = trunc_page(ds->_ds_va);
    563 		     pa < paend; pa += PAGE_SIZE, va += PAGE_SIZE) {
    564 			err = uturn_iomap_insert_page(ums, va, pa);
    565 			if (err) {
    566 				printf("iomap insert error: %d for "
    567 				    "va 0x%lx pa 0x%lx\n", err, va, pa);
    568 				bus_dmamap_unload(sc->sc_dmat, map);
    569 				uturn_iomap_clear_pages(ums);
    570 			}
    571 		}
    572 	}
    573 
    574 	sgsize = ums->ums_map.upm_pagecnt * PAGE_SIZE;
    575 	/* XXXNH */
    576 	s = splhigh();
    577 	err = extent_alloc(sc->sc_map, sgsize, align, boundary,
    578 	    EX_NOWAIT | EX_BOUNDZERO, &iovaddr);
    579 	splx(s);
    580 	if (err)
    581 		return (err);
    582 
    583 	ums->ums_iovastart = iovaddr;
    584 	ums->ums_iovasize = sgsize;
    585 
    586 	iova = iovaddr;
    587 	for (i = 0, e = upm->upm_map; i < upm->upm_pagecnt; ++i, ++e) {
    588 		e->upe_iova = iova;
    589 		uturn_iommu_enter(sc, e->upe_iova, HPPA_SID_KERNEL, e->upe_va,
    590 		    e->upe_pa);
    591 		iova += PAGE_SIZE;
    592 	}
    593 
    594 	for (seg = 0; seg < map->dm_nsegs; seg++) {
    595 		struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
    596 		ds->ds_addr = uturn_iomap_translate(ums, ds->ds_addr);
    597 	}
    598 
    599 	return (0);
    600 }
    601 
    602 int
    603 uturn_dmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
    604     struct proc *p, int flags)
    605 {
    606 	struct uturn_softc *sc = v;
    607 	int err;
    608 
    609 	err = bus_dmamap_load(sc->sc_dmat, map, addr, size, p, flags);
    610 	if (err)
    611 		return (err);
    612 
    613 	return uturn_iomap_load_map(sc, map, flags);
    614 }
    615 
    616 int
    617 uturn_dmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m, int flags)
    618 {
    619 	struct uturn_softc *sc = v;
    620 	int err;
    621 
    622 	err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, flags);
    623 	if (err)
    624 		return (err);
    625 
    626 	return uturn_iomap_load_map(sc, map, flags);
    627 }
    628 
    629 int
    630 uturn_dmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, int flags)
    631 {
    632 	struct uturn_softc *sc = v;
    633 
    634 	printf("load_uio\n");
    635 
    636 	return (bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags));
    637 }
    638 
    639 int
    640 uturn_dmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
    641     int nsegs, bus_size_t size, int flags)
    642 {
    643 	struct uturn_softc *sc = v;
    644 
    645 	printf("load_raw\n");
    646 
    647 	return (bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags));
    648 }
    649 
    650 void
    651 uturn_dmamap_unload(void *v, bus_dmamap_t map)
    652 {
    653 	struct uturn_softc *sc = v;
    654 	struct uturn_map_state *ums = map->_dm_cookie;
    655 	struct uturn_page_map *upm = &ums->ums_map;
    656 	struct uturn_page_entry *e;
    657 	int err, i, s;
    658 
    659 	/* Remove the IOMMU entries. */
    660 	for (i = 0, e = upm->upm_map; i < upm->upm_pagecnt; ++i, ++e)
    661 		uturn_iommu_remove(sc, e->upe_iova, PAGE_SIZE);
    662 
    663 	/* Clear the iomap. */
    664 	uturn_iomap_clear_pages(ums);
    665 
    666 	bus_dmamap_unload(sc->sc_dmat, map);
    667 
    668 	s = splhigh();
    669 	err = extent_free(sc->sc_map, ums->ums_iovastart,
    670 	    ums->ums_iovasize, EX_NOWAIT);
    671 	ums->ums_iovastart = 0;
    672 	ums->ums_iovasize = 0;
    673 	splx(s);
    674 	if (err)
    675 		printf("warning: %ld of IOVA space lost\n", ums->ums_iovasize);
    676 }
    677 
    678 void
    679 uturn_dmamap_sync(void *v, bus_dmamap_t map, bus_addr_t off,
    680     bus_size_t len, int ops)
    681 {
    682 	/* Nothing to do; DMA is cache-coherent. */
    683 }
    684 
    685 int
    686 uturn_dmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
    687     bus_size_t boundary, bus_dma_segment_t *segs,
    688     int nsegs, int *rsegs, int flags)
    689 {
    690 	struct uturn_softc *sc = v;
    691 
    692 	return (bus_dmamem_alloc(sc->sc_dmat, size, alignment, boundary,
    693 	    segs, nsegs, rsegs, flags));
    694 }
    695 
    696 void
    697 uturn_dmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
    698 {
    699 	struct uturn_softc *sc = v;
    700 
    701 	bus_dmamem_free(sc->sc_dmat, segs, nsegs);
    702 }
    703 
    704 int
    705 uturn_dmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
    706     void **kvap, int flags)
    707 {
    708 	struct uturn_softc *sc = v;
    709 
    710 	return (bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags));
    711 }
    712 
    713 void
    714 uturn_dmamem_unmap(void *v, void *kva, size_t size)
    715 {
    716 	struct uturn_softc *sc = v;
    717 
    718 	bus_dmamem_unmap(sc->sc_dmat, kva, size);
    719 }
    720 
    721 paddr_t
    722 uturn_dmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs, off_t off,
    723     int prot, int flags)
    724 {
    725 	struct uturn_softc *sc = v;
    726 
    727 	return (bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, off, prot, flags));
    728 }
    729 
    730 /*
    731  * Utility function used by splay tree to order page entries by pa.
    732  */
    733 static inline int
    734 upe_compare(struct uturn_page_entry *a, struct uturn_page_entry *b)
    735 {
    736 	return ((a->upe_pa > b->upe_pa) ? 1 :
    737 		(a->upe_pa < b->upe_pa) ? -1 : 0);
    738 }
    739 
    740 SPLAY_PROTOTYPE(uturn_page_tree, uturn_page_entry, upe_node, upe_compare);
    741 
    742 SPLAY_GENERATE(uturn_page_tree, uturn_page_entry, upe_node, upe_compare);
    743 
    744 /*
    745  * Create a new iomap.
    746  */
    747 struct uturn_map_state *
    748 uturn_iomap_create(int n)
    749 {
    750 	struct uturn_map_state *ums;
    751 
    752 	/* Safety for heavily fragmented data, such as mbufs */
    753 	n += 4;
    754 	if (n < 16)
    755 		n = 16;
    756 
    757 	ums = malloc(sizeof(*ums) + (n - 1) * sizeof(ums->ums_map.upm_map[0]),
    758 	    M_DEVBUF, M_NOWAIT | M_ZERO);
    759 	if (ums == NULL)
    760 		return (NULL);
    761 
    762 	/* Initialize the map. */
    763 	ums->ums_map.upm_maxpage = n;
    764 	SPLAY_INIT(&ums->ums_map.upm_tree);
    765 
    766 	return (ums);
    767 }
    768 
    769 /*
    770  * Destroy an iomap.
    771  */
    772 void
    773 uturn_iomap_destroy(struct uturn_map_state *ums)
    774 {
    775 	KASSERT(ums->ums_map.upm_pagecnt == 0);
    776 
    777 	free(ums, M_DEVBUF);
    778 }
    779 
    780 /*
    781  * Insert a pa entry in the iomap.
    782  */
    783 int
    784 uturn_iomap_insert_page(struct uturn_map_state *ums, vaddr_t va, paddr_t pa)
    785 {
    786 	struct uturn_page_map *upm = &ums->ums_map;
    787 	struct uturn_page_entry *e;
    788 
    789 	if (upm->upm_pagecnt >= upm->upm_maxpage) {
    790 		struct uturn_page_entry upe;
    791 
    792 		upe.upe_pa = pa;
    793 		if (SPLAY_FIND(uturn_page_tree, &upm->upm_tree, &upe))
    794 			return (0);
    795 
    796 		return (ENOMEM);
    797 	}
    798 
    799 	e = &upm->upm_map[upm->upm_pagecnt];
    800 
    801 	e->upe_pa = pa;
    802 	e->upe_va = va;
    803 	e->upe_iova = 0;
    804 
    805 	e = SPLAY_INSERT(uturn_page_tree, &upm->upm_tree, e);
    806 
    807 	/* Duplicates are okay, but only count them once. */
    808 	if (e)
    809 		return (0);
    810 
    811 	++upm->upm_pagecnt;
    812 
    813 	return (0);
    814 }
    815 
    816 /*
    817  * Translate a physical address (pa) into a IOVA address.
    818  */
    819 bus_addr_t
    820 uturn_iomap_translate(struct uturn_map_state *ums, paddr_t pa)
    821 {
    822 	struct uturn_page_map *upm = &ums->ums_map;
    823 	struct uturn_page_entry *e;
    824 	struct uturn_page_entry pe;
    825 	paddr_t offset = pa & PAGE_MASK;
    826 
    827 	pe.upe_pa = trunc_page(pa);
    828 
    829 	e = SPLAY_FIND(uturn_page_tree, &upm->upm_tree, &pe);
    830 
    831 	if (e == NULL) {
    832 		panic("couldn't find pa %lx\n", pa);
    833 		return 0;
    834 	}
    835 
    836 	return (e->upe_iova | offset);
    837 }
    838 
    839 /*
    840  * Clear the iomap table and tree.
    841  */
    842 void
    843 uturn_iomap_clear_pages(struct uturn_map_state *ums)
    844 {
    845 	ums->ums_map.upm_pagecnt = 0;
    846 	SPLAY_INIT(&ums->ums_map.upm_tree);
    847 }
    848