Home | History | Annotate | Line # | Download | only in dev
intio.c revision 1.16
      1 /*	$NetBSD: intio.c,v 1.16 2002/10/02 16:02:40 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998 NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *        This product includes software developed by the NetBSD
     18  *        Foundation, Inc. and its contributors.
     19  * 4. Neither the name of The NetBSD Foundation nor the names of its
     20  *    contributors may be used to endorse or promote products derived
     21  *    from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33  * POSSIBILITY OF SUCH DAMAGE.
     34  */
     35 
     36 /*
     37  * NetBSD/x68k internal I/O virtual bus.
     38  */
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/device.h>
     43 #include <sys/malloc.h>
     44 #include <sys/mbuf.h>
     45 #include <sys/extent.h>
     46 #include <uvm/uvm_extern.h>
     47 
     48 #include <machine/bus.h>
     49 #include <machine/cpu.h>
     50 #include <machine/frame.h>
     51 
     52 #include <arch/x68k/dev/intiovar.h>
     53 #include <arch/x68k/dev/mfp.h>
     54 
     55 
     56 /*
     57  * bus_space(9) interface
     58  */
     59 static int intio_bus_space_map __P((bus_space_tag_t, bus_addr_t, bus_size_t, int, bus_space_handle_t *));
     60 static void intio_bus_space_unmap __P((bus_space_tag_t, bus_space_handle_t, bus_size_t));
     61 static int intio_bus_space_subregion __P((bus_space_tag_t, bus_space_handle_t, bus_size_t, bus_size_t, bus_space_handle_t *));
     62 
     63 static struct x68k_bus_space intio_bus = {
     64 #if 0
     65 	X68K_INTIO_BUS,
     66 #endif
     67 	intio_bus_space_map, intio_bus_space_unmap, intio_bus_space_subregion,
     68 	x68k_bus_space_alloc, x68k_bus_space_free,
     69 #if 0
     70 	x68k_bus_space_barrier,
     71 #endif
     72 
     73 	0
     74 };
     75 
     76 /*
     77  * bus_dma(9) interface
     78  */
     79 #define	INTIO_DMA_BOUNCE_THRESHOLD	(16 * 1024 * 1024)
     80 int	_intio_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
     81 	    bus_size_t, bus_size_t, int, bus_dmamap_t *));
     82 void	_intio_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
     83 int	_intio_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
     84 	    bus_size_t, struct proc *, int));
     85 int	_intio_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
     86 	    struct mbuf *, int));
     87 int	_intio_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
     88 	    struct uio *, int));
     89 int	_intio_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
     90 	    bus_dma_segment_t *, int, bus_size_t, int));
     91 void	_intio_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
     92 void	_intio_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
     93 	    bus_addr_t, bus_size_t, int));
     94 
     95 int	_intio_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t,
     96 	    bus_size_t, bus_dma_segment_t *, int, int *, int));
     97 
     98 int	_intio_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
     99 	    bus_size_t, int));
    100 void	_intio_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
    101 
    102 struct x68k_bus_dma intio_bus_dma = {
    103 	INTIO_DMA_BOUNCE_THRESHOLD,
    104 	_intio_bus_dmamap_create,
    105 	_intio_bus_dmamap_destroy,
    106 	_intio_bus_dmamap_load,
    107 	_intio_bus_dmamap_load_mbuf,
    108 	_intio_bus_dmamap_load_uio,
    109 	_intio_bus_dmamap_load_raw,
    110 	_intio_bus_dmamap_unload,
    111 	_intio_bus_dmamap_sync,
    112 	_intio_bus_dmamem_alloc,
    113 	x68k_bus_dmamem_free,
    114 	x68k_bus_dmamem_map,
    115 	x68k_bus_dmamem_unmap,
    116 	x68k_bus_dmamem_mmap,
    117 };
    118 
    119 /*
    120  * autoconf stuff
    121  */
    122 static int intio_match __P((struct device *, struct cfdata *, void *));
    123 static void intio_attach __P((struct device *, struct device *, void *));
    124 static int intio_search __P((struct device *, struct cfdata *cf, void *));
    125 static int intio_print __P((void *, const char *));
    126 static void intio_alloc_system_ports __P((struct intio_softc*));
    127 
    128 CFATTACH_DECL(intio, sizeof(struct intio_softc),
    129     intio_match, intio_attach, NULL, NULL);
    130 
    131 static struct intio_interrupt_vector {
    132 	intio_intr_handler_t	iiv_handler;
    133 	void			*iiv_arg;
    134 	int			iiv_intrcntoff;
    135 } iiv[256] = {{0,},};
    136 
    137 extern struct cfdriver intio_cd;
    138 
    139 /* used in console initialization */
    140 extern int x68k_realconfig;
    141 int x68k_config_found __P((struct cfdata *, struct device *,
    142 			   void *, cfprint_t));
    143 static struct cfdata *cfdata_intiobus = NULL;
    144 
    145 /* other static functions */
    146 static int scan_intrnames __P((const char *));
    147 #ifdef DEBUG
    148 int intio_debug = 0;
    149 #endif
    150 
    151 static int
    152 intio_match(parent, cf, aux)
    153 	struct device *parent;
    154 	struct cfdata *cf;
    155 	void *aux;		/* NULL */
    156 {
    157 	if (strcmp(aux, intio_cd.cd_name) != 0)
    158 		return (0);
    159 	if (cf->cf_unit != 0)
    160 		return (0);
    161 	if (x68k_realconfig == 0)
    162 		cfdata_intiobus = cf; /* XXX */
    163 
    164 	return (1);
    165 }
    166 
    167 
    168 /* used in console initialization: configure only MFP */
    169 static struct intio_attach_args initial_ia = {
    170 	&intio_bus,
    171 	0/*XXX*/,
    172 
    173 	"mfp",			/* ia_name */
    174 	MFP_ADDR,		/* ia_addr */
    175 	0x30,			/* ia_size */
    176 	MFP_INTR,		/* ia_intr */
    177 	-1			/* ia_dma */
    178 	-1,			/* ia_dmaintr */
    179 };
    180 
    181 static void
    182 intio_attach(parent, self, aux)
    183 	struct device *parent, *self;
    184 	void *aux;		/* NULL */
    185 {
    186 	struct intio_softc *sc = (struct intio_softc *)self;
    187 	struct intio_attach_args ia;
    188 
    189 	if (self == NULL) {
    190 		/* console only init */
    191 		x68k_config_found(cfdata_intiobus, NULL, &initial_ia, NULL);
    192 		return;
    193 	}
    194 
    195 	printf (" mapped at %8p\n", intiobase);
    196 
    197 	sc->sc_map = extent_create("intiomap",
    198 				  PHYS_INTIODEV,
    199 				  PHYS_INTIODEV + 0x400000,
    200 				  M_DEVBUF, NULL, NULL, EX_NOWAIT);
    201 	intio_alloc_system_ports (sc);
    202 
    203 	sc->sc_bst = &intio_bus;
    204 	sc->sc_bst->x68k_bus_device = self;
    205 	sc->sc_dmat = &intio_bus_dma;
    206 	sc->sc_dmac = 0;
    207 
    208 	memset(iiv, 0, sizeof (struct intio_interrupt_vector) * 256);
    209 
    210 	ia.ia_bst = sc->sc_bst;
    211 	ia.ia_dmat = sc->sc_dmat;
    212 
    213 	config_search (intio_search, self, &ia);
    214 }
    215 
    216 static int
    217 intio_search(parent, cf, aux)
    218 	struct device *parent;
    219 	struct cfdata *cf;
    220 	void *aux;
    221 {
    222 	struct intio_attach_args *ia = aux;
    223 	struct intio_softc *sc = (struct intio_softc *)parent;
    224 
    225 	ia->ia_bst = sc->sc_bst;
    226 	ia->ia_dmat = sc->sc_dmat;
    227 	ia->ia_name = cf->cf_name;
    228 	ia->ia_addr = cf->cf_addr;
    229 	ia->ia_intr = cf->cf_intr;
    230 	ia->ia_dma = cf->cf_dma;
    231 	ia->ia_dmaintr = cf->cf_dmaintr;
    232 
    233 	if (config_match(parent, cf, ia) > 0)
    234 		config_attach(parent, cf, ia, intio_print);
    235 
    236 	return (0);
    237 }
    238 
    239 static int
    240 intio_print(aux, name)
    241 	void *aux;
    242 	const char *name;
    243 {
    244 	struct intio_attach_args *ia = aux;
    245 
    246 /*	if (ia->ia_addr > 0)	*/
    247 		printf (" addr 0x%06x", ia->ia_addr);
    248 	if (ia->ia_intr > 0)
    249 		printf (" intr 0x%02x", ia->ia_intr);
    250 	if (ia->ia_dma >= 0) {
    251 		printf (" using DMA ch%d", ia->ia_dma);
    252 		if (ia->ia_dmaintr > 0)
    253 			printf (" intr 0x%02x and 0x%02x",
    254 				ia->ia_dmaintr, ia->ia_dmaintr+1);
    255 	}
    256 
    257 	return (QUIET);
    258 }
    259 
    260 /*
    261  * intio memory map manager
    262  */
    263 
    264 int
    265 intio_map_allocate_region(parent, ia, flag)
    266 	struct device *parent;
    267 	struct intio_attach_args *ia;
    268 	enum intio_map_flag flag; /* INTIO_MAP_TESTONLY or INTIO_MAP_ALLOCATE */
    269 {
    270 	struct intio_softc *sc = (struct intio_softc*) parent;
    271 	struct extent *map = sc->sc_map;
    272 	int r;
    273 
    274 	r = extent_alloc_region (map, ia->ia_addr, ia->ia_size, 0);
    275 #ifdef DEBUG
    276 	if (intio_debug)
    277 		extent_print (map);
    278 #endif
    279 	if (r == 0) {
    280 		if (flag != INTIO_MAP_ALLOCATE)
    281 		extent_free (map, ia->ia_addr, ia->ia_size, 0);
    282 		return 0;
    283 	}
    284 
    285 	return -1;
    286 }
    287 
    288 int
    289 intio_map_free_region(parent, ia)
    290 	struct device *parent;
    291 	struct intio_attach_args *ia;
    292 {
    293 	struct intio_softc *sc = (struct intio_softc*) parent;
    294 	struct extent *map = sc->sc_map;
    295 
    296 	extent_free (map, ia->ia_addr, ia->ia_size, 0);
    297 #ifdef DEBUG
    298 	if (intio_debug)
    299 		extent_print (map);
    300 #endif
    301 	return 0;
    302 }
    303 
    304 void
    305 intio_alloc_system_ports(sc)
    306 	struct intio_softc *sc;
    307 {
    308 	extent_alloc_region (sc->sc_map, INTIO_SYSPORT, 16, 0);
    309 	extent_alloc_region (sc->sc_map, INTIO_SICILIAN, 0x2000, 0);
    310 }
    311 
    312 
    313 /*
    314  * intio bus space stuff.
    315  */
    316 static int
    317 intio_bus_space_map(t, bpa, size, flags, bshp)
    318 	bus_space_tag_t t;
    319 	bus_addr_t bpa;
    320 	bus_size_t size;
    321 	int flags;
    322 	bus_space_handle_t *bshp;
    323 {
    324 	/*
    325 	 * Intio bus is mapped permanently.
    326 	 */
    327 	*bshp = (bus_space_handle_t)
    328 	  ((u_int) bpa - PHYS_INTIODEV + intiobase);
    329 	/*
    330 	 * Some devices are mapped on odd or even addresses only.
    331 	 */
    332 	if ((flags & BUS_SPACE_MAP_SHIFTED_MASK) == BUS_SPACE_MAP_SHIFTED_ODD)
    333 		*bshp += 0x80000001;
    334 	if ((flags & BUS_SPACE_MAP_SHIFTED_MASK) == BUS_SPACE_MAP_SHIFTED_EVEN)
    335 		*bshp += 0x80000000;
    336 
    337 	return (0);
    338 }
    339 
    340 static void
    341 intio_bus_space_unmap(t, bsh, size)
    342 	bus_space_tag_t t;
    343 	bus_space_handle_t bsh;
    344 	bus_size_t size;
    345 {
    346 	return;
    347 }
    348 
    349 static int
    350 intio_bus_space_subregion(t, bsh, offset, size, nbshp)
    351 	bus_space_tag_t t;
    352 	bus_space_handle_t bsh;
    353 	bus_size_t offset, size;
    354 	bus_space_handle_t *nbshp;
    355 {
    356 
    357 	*nbshp = bsh + offset;
    358 	return (0);
    359 }
    360 
    361 
    362 /*
    363  * interrupt handler
    364  */
    365 int
    366 intio_intr_establish (vector, name, handler, arg)
    367 	int vector;
    368 	const char *name;	/* XXX */
    369 	intio_intr_handler_t handler;
    370 	void *arg;
    371 {
    372 	if (vector < 16)
    373 		panic ("Invalid interrupt vector");
    374 	if (iiv[vector].iiv_handler)
    375 		return EBUSY;
    376 	iiv[vector].iiv_handler = handler;
    377 	iiv[vector].iiv_arg = arg;
    378 	iiv[vector].iiv_intrcntoff = scan_intrnames(name);
    379 
    380 	return 0;
    381 }
    382 
    383 static int
    384 scan_intrnames (name)
    385 	const char *name;
    386 {
    387 	extern char intrnames[];
    388 	extern char eintrnames[];
    389 	int r = 0;
    390 	char *p = &intrnames[0];
    391 
    392 	for (;;) {
    393 		if (*p == 0) {	/* new intr */
    394 			if (p + strlen(name) >= eintrnames)
    395 				panic ("Interrupt statics buffer overrun.");
    396 			strcpy (p, name);
    397 			break;
    398 		}
    399 		if (strcmp(p, name) == 0)
    400 			break;
    401 		r++;
    402 		while (*p++ != 0);
    403 	}
    404 
    405 	return r;
    406 }
    407 
    408 int
    409 intio_intr_disestablish (vector, arg)
    410 	int vector;
    411 	void *arg;
    412 {
    413 	if (iiv[vector].iiv_handler == 0 || iiv[vector].iiv_arg != arg)
    414 		return EINVAL;
    415 	iiv[vector].iiv_handler = 0;
    416 	iiv[vector].iiv_arg = 0;
    417 
    418 	return 0;
    419 }
    420 
    421 int
    422 intio_intr (frame)
    423 	struct frame *frame;
    424 {
    425 	int vector = frame->f_vector / 4;
    426 	extern int intrcnt[];
    427 
    428 #if 0				/* this is not correct now */
    429 	/* CAUTION: HERE WE ARE IN SPLHIGH() */
    430 	/* LOWER TO APPROPRIATE IPL AT VERY FIRST IN THE HANDLER!! */
    431 #endif
    432 	if (iiv[vector].iiv_handler == 0) {
    433 		printf ("Stray interrupt: %d type %x\n", vector, frame->f_format);
    434 		return 0;
    435 	}
    436 
    437 	intrcnt[iiv[vector].iiv_intrcntoff]++;
    438 
    439 	return (*(iiv[vector].iiv_handler)) (iiv[vector].iiv_arg);
    440 }
    441 
    442 /*
    443  * Intio I/O controler interrupt
    444  */
    445 static u_int8_t intio_ivec = 0;
    446 
    447 void
    448 intio_set_ivec (vec)
    449 	int vec;
    450 {
    451 	vec &= 0xfc;
    452 
    453 	if (intio_ivec && intio_ivec != (vec & 0xfc))
    454 		panic ("Wrong interrupt vector for Sicilian.");
    455 
    456 	intio_ivec = vec;
    457 	intio_set_sicilian_ivec(vec);
    458 }
    459 
    460 
    461 /*
    462  * intio bus dma stuff.  stolen from arch/i386/isa/isa_machdep.c
    463  */
    464 
    465 /*
    466  * Create an INTIO DMA map.
    467  */
    468 int
    469 _intio_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
    470 	bus_dma_tag_t t;
    471 	bus_size_t size;
    472 	int nsegments;
    473 	bus_size_t maxsegsz;
    474 	bus_size_t boundary;
    475 	int flags;
    476 	bus_dmamap_t *dmamp;
    477 {
    478 	struct intio_dma_cookie *cookie;
    479 	bus_dmamap_t map;
    480 	int error, cookieflags;
    481 	void *cookiestore;
    482 	size_t cookiesize;
    483 	extern paddr_t avail_end;
    484 
    485 	/* Call common function to create the basic map. */
    486 	error = x68k_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
    487 	    flags, dmamp);
    488 	if (error)
    489 		return (error);
    490 
    491 	map = *dmamp;
    492 	map->x68k_dm_cookie = NULL;
    493 
    494 	cookiesize = sizeof(struct intio_dma_cookie);
    495 
    496 	/*
    497 	 * INTIO only has 24-bits of address space.  This means
    498 	 * we can't DMA to pages over 16M.  In order to DMA to
    499 	 * arbitrary buffers, we use "bounce buffers" - pages
    500 	 * in memory below the 16M boundary.  On DMA reads,
    501 	 * DMA happens to the bounce buffers, and is copied into
    502 	 * the caller's buffer.  On writes, data is copied into
    503 	 * but bounce buffer, and the DMA happens from those
    504 	 * pages.  To software using the DMA mapping interface,
    505 	 * this looks simply like a data cache.
    506 	 *
    507 	 * If we have more than 16M of RAM in the system, we may
    508 	 * need bounce buffers.  We check and remember that here.
    509 	 *
    510 	 * ...or, there is an opposite case.  The most segments
    511 	 * a transfer will require is (maxxfer / NBPG) + 1.  If
    512 	 * the caller can't handle that many segments (e.g. the
    513 	 * DMAC), we may have to bounce it as well.
    514 	 */
    515 	if (avail_end <= t->_bounce_thresh)
    516 		/* Bouncing not necessary due to memory size. */
    517 		map->x68k_dm_bounce_thresh = 0;
    518 	cookieflags = 0;
    519 	if (map->x68k_dm_bounce_thresh != 0 ||
    520 	    ((map->x68k_dm_size / NBPG) + 1) > map->x68k_dm_segcnt) {
    521 		cookieflags |= ID_MIGHT_NEED_BOUNCE;
    522 		cookiesize += (sizeof(bus_dma_segment_t) * map->x68k_dm_segcnt);
    523 	}
    524 
    525 	/*
    526 	 * Allocate our cookie.
    527 	 */
    528 	if ((cookiestore = malloc(cookiesize, M_DMAMAP,
    529 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
    530 		error = ENOMEM;
    531 		goto out;
    532 	}
    533 	memset(cookiestore, 0, cookiesize);
    534 	cookie = (struct intio_dma_cookie *)cookiestore;
    535 	cookie->id_flags = cookieflags;
    536 	map->x68k_dm_cookie = cookie;
    537 
    538 	if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
    539 		/*
    540 		 * Allocate the bounce pages now if the caller
    541 		 * wishes us to do so.
    542 		 */
    543 		if ((flags & BUS_DMA_ALLOCNOW) == 0)
    544 			goto out;
    545 
    546 		error = _intio_dma_alloc_bouncebuf(t, map, size, flags);
    547 	}
    548 
    549  out:
    550 	if (error) {
    551 		if (map->x68k_dm_cookie != NULL)
    552 			free(map->x68k_dm_cookie, M_DMAMAP);
    553 		x68k_bus_dmamap_destroy(t, map);
    554 	}
    555 	return (error);
    556 }
    557 
    558 /*
    559  * Destroy an INTIO DMA map.
    560  */
    561 void
    562 _intio_bus_dmamap_destroy(t, map)
    563 	bus_dma_tag_t t;
    564 	bus_dmamap_t map;
    565 {
    566 	struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
    567 
    568 	/*
    569 	 * Free any bounce pages this map might hold.
    570 	 */
    571 	if (cookie->id_flags & ID_HAS_BOUNCE)
    572 		_intio_dma_free_bouncebuf(t, map);
    573 
    574 	free(cookie, M_DMAMAP);
    575 	x68k_bus_dmamap_destroy(t, map);
    576 }
    577 
    578 /*
    579  * Load an INTIO DMA map with a linear buffer.
    580  */
    581 int
    582 _intio_bus_dmamap_load(t, map, buf, buflen, p, flags)
    583 	bus_dma_tag_t t;
    584 	bus_dmamap_t map;
    585 	void *buf;
    586 	bus_size_t buflen;
    587 	struct proc *p;
    588 	int flags;
    589 {
    590 	struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
    591 	int error;
    592 
    593 	/*
    594 	 * Make sure that on error condition we return "no valid mappings."
    595 	 */
    596 	map->dm_mapsize = 0;
    597 	map->dm_nsegs = 0;
    598 
    599 	/*
    600 	 * Try to load the map the normal way.  If this errors out,
    601 	 * and we can bounce, we will.
    602 	 */
    603 	error = x68k_bus_dmamap_load(t, map, buf, buflen, p, flags);
    604 	if (error == 0 ||
    605 	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
    606 		return (error);
    607 
    608 	/*
    609 	 * Allocate bounce pages, if necessary.
    610 	 */
    611 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
    612 		error = _intio_dma_alloc_bouncebuf(t, map, buflen, flags);
    613 		if (error)
    614 			return (error);
    615 	}
    616 
    617 	/*
    618 	 * Cache a pointer to the caller's buffer and load the DMA map
    619 	 * with the bounce buffer.
    620 	 */
    621 	cookie->id_origbuf = buf;
    622 	cookie->id_origbuflen = buflen;
    623 	cookie->id_buftype = ID_BUFTYPE_LINEAR;
    624 	error = x68k_bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
    625 	    p, flags);
    626 	if (error) {
    627 		/*
    628 		 * Free the bounce pages, unless our resources
    629 		 * are reserved for our exclusive use.
    630 		 */
    631 		if ((map->x68k_dm_flags & BUS_DMA_ALLOCNOW) == 0)
    632 			_intio_dma_free_bouncebuf(t, map);
    633 		return (error);
    634 	}
    635 
    636 	/* ...so _intio_bus_dmamap_sync() knows we're bouncing */
    637 	cookie->id_flags |= ID_IS_BOUNCING;
    638 	return (0);
    639 }
    640 
    641 /*
    642  * Like _intio_bus_dmamap_load(), but for mbufs.
    643  */
    644 int
    645 _intio_bus_dmamap_load_mbuf(t, map, m0, flags)
    646 	bus_dma_tag_t t;
    647 	bus_dmamap_t map;
    648 	struct mbuf *m0;
    649 	int flags;
    650 {
    651 	struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
    652 	int error;
    653 
    654 	/*
    655 	 * Make sure on error condition we return "no valid mappings."
    656 	 */
    657 	map->dm_mapsize = 0;
    658 	map->dm_nsegs = 0;
    659 
    660 #ifdef DIAGNOSTIC
    661 	if ((m0->m_flags & M_PKTHDR) == 0)
    662 		panic("_intio_bus_dmamap_load_mbuf: no packet header");
    663 #endif
    664 
    665 	if (m0->m_pkthdr.len > map->x68k_dm_size)
    666 		return (EINVAL);
    667 
    668 	/*
    669 	 * Try to load the map the normal way.  If this errors out,
    670 	 * and we can bounce, we will.
    671 	 */
    672 	error = x68k_bus_dmamap_load_mbuf(t, map, m0, flags);
    673 	if (error == 0 ||
    674 	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
    675 		return (error);
    676 
    677 	/*
    678 	 * Allocate bounce pages, if necessary.
    679 	 */
    680 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
    681 		error = _intio_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
    682 		    flags);
    683 		if (error)
    684 			return (error);
    685 	}
    686 
    687 	/*
    688 	 * Cache a pointer to the caller's buffer and load the DMA map
    689 	 * with the bounce buffer.
    690 	 */
    691 	cookie->id_origbuf = m0;
    692 	cookie->id_origbuflen = m0->m_pkthdr.len;	/* not really used */
    693 	cookie->id_buftype = ID_BUFTYPE_MBUF;
    694 	error = x68k_bus_dmamap_load(t, map, cookie->id_bouncebuf,
    695 	    m0->m_pkthdr.len, NULL, flags);
    696 	if (error) {
    697 		/*
    698 		 * Free the bounce pages, unless our resources
    699 		 * are reserved for our exclusive use.
    700 		 */
    701 		if ((map->x68k_dm_flags & BUS_DMA_ALLOCNOW) == 0)
    702 			_intio_dma_free_bouncebuf(t, map);
    703 		return (error);
    704 	}
    705 
    706 	/* ...so _intio_bus_dmamap_sync() knows we're bouncing */
    707 	cookie->id_flags |= ID_IS_BOUNCING;
    708 	return (0);
    709 }
    710 
    711 /*
    712  * Like _intio_bus_dmamap_load(), but for uios.
    713  */
    714 int
    715 _intio_bus_dmamap_load_uio(t, map, uio, flags)
    716 	bus_dma_tag_t t;
    717 	bus_dmamap_t map;
    718 	struct uio *uio;
    719 	int flags;
    720 {
    721 	panic("_intio_bus_dmamap_load_uio: not implemented");
    722 }
    723 
    724 /*
    725  * Like _intio_bus_dmamap_load(), but for raw memory allocated with
    726  * bus_dmamem_alloc().
    727  */
    728 int
    729 _intio_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
    730 	bus_dma_tag_t t;
    731 	bus_dmamap_t map;
    732 	bus_dma_segment_t *segs;
    733 	int nsegs;
    734 	bus_size_t size;
    735 	int flags;
    736 {
    737 
    738 	panic("_intio_bus_dmamap_load_raw: not implemented");
    739 }
    740 
    741 /*
    742  * Unload an INTIO DMA map.
    743  */
    744 void
    745 _intio_bus_dmamap_unload(t, map)
    746 	bus_dma_tag_t t;
    747 	bus_dmamap_t map;
    748 {
    749 	struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
    750 
    751 	/*
    752 	 * If we have bounce pages, free them, unless they're
    753 	 * reserved for our exclusive use.
    754 	 */
    755 	if ((cookie->id_flags & ID_HAS_BOUNCE) &&
    756 	    (map->x68k_dm_flags & BUS_DMA_ALLOCNOW) == 0)
    757 		_intio_dma_free_bouncebuf(t, map);
    758 
    759 	cookie->id_flags &= ~ID_IS_BOUNCING;
    760 	cookie->id_buftype = ID_BUFTYPE_INVALID;
    761 
    762 	/*
    763 	 * Do the generic bits of the unload.
    764 	 */
    765 	x68k_bus_dmamap_unload(t, map);
    766 }
    767 
    768 /*
    769  * Synchronize an INTIO DMA map.
    770  */
    771 void
    772 _intio_bus_dmamap_sync(t, map, offset, len, ops)
    773 	bus_dma_tag_t t;
    774 	bus_dmamap_t map;
    775 	bus_addr_t offset;
    776 	bus_size_t len;
    777 	int ops;
    778 {
    779 	struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
    780 
    781 	/*
    782 	 * Mixing PRE and POST operations is not allowed.
    783 	 */
    784 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
    785 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
    786 		panic("_intio_bus_dmamap_sync: mix PRE and POST");
    787 
    788 #ifdef DIAGNOSTIC
    789 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
    790 		if (offset >= map->dm_mapsize)
    791 			panic("_intio_bus_dmamap_sync: bad offset");
    792 		if (len == 0 || (offset + len) > map->dm_mapsize)
    793 			panic("_intio_bus_dmamap_sync: bad length");
    794 	}
    795 #endif
    796 
    797 	/*
    798 	 * If we're not bouncing, just return; nothing to do.
    799 	 */
    800 	if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
    801 		return;
    802 
    803 	switch (cookie->id_buftype) {
    804 	case ID_BUFTYPE_LINEAR:
    805 		/*
    806 		 * Nothing to do for pre-read.
    807 		 */
    808 
    809 		if (ops & BUS_DMASYNC_PREWRITE) {
    810 			/*
    811 			 * Copy the caller's buffer to the bounce buffer.
    812 			 */
    813 			memcpy((char *)cookie->id_bouncebuf + offset,
    814 			    (char *)cookie->id_origbuf + offset, len);
    815 		}
    816 
    817 		if (ops & BUS_DMASYNC_POSTREAD) {
    818 			/*
    819 			 * Copy the bounce buffer to the caller's buffer.
    820 			 */
    821 			memcpy((char *)cookie->id_origbuf + offset,
    822 			    (char *)cookie->id_bouncebuf + offset, len);
    823 		}
    824 
    825 		/*
    826 		 * Nothing to do for post-write.
    827 		 */
    828 		break;
    829 
    830 	case ID_BUFTYPE_MBUF:
    831 	    {
    832 		struct mbuf *m, *m0 = cookie->id_origbuf;
    833 		bus_size_t minlen, moff;
    834 
    835 		/*
    836 		 * Nothing to do for pre-read.
    837 		 */
    838 
    839 		if (ops & BUS_DMASYNC_PREWRITE) {
    840 			/*
    841 			 * Copy the caller's buffer to the bounce buffer.
    842 			 */
    843 			m_copydata(m0, offset, len,
    844 			    (char *)cookie->id_bouncebuf + offset);
    845 		}
    846 
    847 		if (ops & BUS_DMASYNC_POSTREAD) {
    848 			/*
    849 			 * Copy the bounce buffer to the caller's buffer.
    850 			 */
    851 			for (moff = offset, m = m0; m != NULL && len != 0;
    852 			     m = m->m_next) {
    853 				/* Find the beginning mbuf. */
    854 				if (moff >= m->m_len) {
    855 					moff -= m->m_len;
    856 					continue;
    857 				}
    858 
    859 				/*
    860 				 * Now at the first mbuf to sync; nail
    861 				 * each one until we have exhausted the
    862 				 * length.
    863 				 */
    864 				minlen = len < m->m_len - moff ?
    865 				    len : m->m_len - moff;
    866 
    867 				memcpy(mtod(m, caddr_t) + moff,
    868 				    (char *)cookie->id_bouncebuf + offset,
    869 				    minlen);
    870 
    871 				moff = 0;
    872 				len -= minlen;
    873 				offset += minlen;
    874 			}
    875 		}
    876 
    877 		/*
    878 		 * Nothing to do for post-write.
    879 		 */
    880 		break;
    881 	    }
    882 
    883 	case ID_BUFTYPE_UIO:
    884 		panic("_intio_bus_dmamap_sync: ID_BUFTYPE_UIO");
    885 		break;
    886 
    887 	case ID_BUFTYPE_RAW:
    888 		panic("_intio_bus_dmamap_sync: ID_BUFTYPE_RAW");
    889 		break;
    890 
    891 	case ID_BUFTYPE_INVALID:
    892 		panic("_intio_bus_dmamap_sync: ID_BUFTYPE_INVALID");
    893 		break;
    894 
    895 	default:
    896 		printf("unknown buffer type %d\n", cookie->id_buftype);
    897 		panic("_intio_bus_dmamap_sync");
    898 	}
    899 }
    900 
    901 /*
    902  * Allocate memory safe for INTIO DMA.
    903  */
    904 int
    905 _intio_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
    906 	bus_dma_tag_t t;
    907 	bus_size_t size, alignment, boundary;
    908 	bus_dma_segment_t *segs;
    909 	int nsegs;
    910 	int *rsegs;
    911 	int flags;
    912 {
    913 	paddr_t high;
    914 	extern paddr_t avail_end;
    915 
    916 	if (avail_end > INTIO_DMA_BOUNCE_THRESHOLD)
    917 		high = trunc_page(INTIO_DMA_BOUNCE_THRESHOLD);
    918 	else
    919 		high = trunc_page(avail_end);
    920 
    921 	return (x68k_bus_dmamem_alloc_range(t, size, alignment, boundary,
    922 	    segs, nsegs, rsegs, flags, 0, high));
    923 }
    924 
    925 /**********************************************************************
    926  * INTIO DMA utility functions
    927  **********************************************************************/
    928 
    929 int
    930 _intio_dma_alloc_bouncebuf(t, map, size, flags)
    931 	bus_dma_tag_t t;
    932 	bus_dmamap_t map;
    933 	bus_size_t size;
    934 	int flags;
    935 {
    936 	struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
    937 	int error = 0;
    938 
    939 	cookie->id_bouncebuflen = round_page(size);
    940 	error = _intio_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
    941 	    NBPG, map->x68k_dm_boundary, cookie->id_bouncesegs,
    942 	    map->x68k_dm_segcnt, &cookie->id_nbouncesegs, flags);
    943 	if (error)
    944 		goto out;
    945 	error = x68k_bus_dmamem_map(t, cookie->id_bouncesegs,
    946 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
    947 	    (caddr_t *)&cookie->id_bouncebuf, flags);
    948 
    949  out:
    950 	if (error) {
    951 		x68k_bus_dmamem_free(t, cookie->id_bouncesegs,
    952 		    cookie->id_nbouncesegs);
    953 		cookie->id_bouncebuflen = 0;
    954 		cookie->id_nbouncesegs = 0;
    955 	} else {
    956 		cookie->id_flags |= ID_HAS_BOUNCE;
    957 	}
    958 
    959 	return (error);
    960 }
    961 
    962 void
    963 _intio_dma_free_bouncebuf(t, map)
    964 	bus_dma_tag_t t;
    965 	bus_dmamap_t map;
    966 {
    967 	struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
    968 
    969 	x68k_bus_dmamem_unmap(t, cookie->id_bouncebuf,
    970 	    cookie->id_bouncebuflen);
    971 	x68k_bus_dmamem_free(t, cookie->id_bouncesegs,
    972 	    cookie->id_nbouncesegs);
    973 	cookie->id_bouncebuflen = 0;
    974 	cookie->id_nbouncesegs = 0;
    975 	cookie->id_flags &= ~ID_HAS_BOUNCE;
    976 }
    977