Home | History | Annotate | Line # | Download | only in isa
isa_machdep.c revision 1.1
      1 /*	$NetBSD: isa_machdep.c,v 1.1 2003/02/27 00:28:08 fvdl Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*-
     41  * Copyright (c) 1991 The Regents of the University of California.
     42  * All rights reserved.
     43  *
     44  * This code is derived from software contributed to Berkeley by
     45  * William Jolitz.
     46  *
     47  * Redistribution and use in source and binary forms, with or without
     48  * modification, are permitted provided that the following conditions
     49  * are met:
     50  * 1. Redistributions of source code must retain the above copyright
     51  *    notice, this list of conditions and the following disclaimer.
     52  * 2. Redistributions in binary form must reproduce the above copyright
     53  *    notice, this list of conditions and the following disclaimer in the
     54  *    documentation and/or other materials provided with the distribution.
     55  * 3. All advertising materials mentioning features or use of this software
     56  *    must display the following acknowledgement:
     57  *	This product includes software developed by the University of
     58  *	California, Berkeley and its contributors.
     59  * 4. Neither the name of the University nor the names of its contributors
     60  *    may be used to endorse or promote products derived from this software
     61  *    without specific prior written permission.
     62  *
     63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     73  * SUCH DAMAGE.
     74  *
     75  *	@(#)isa.c	7.2 (Berkeley) 5/13/91
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: isa_machdep.c,v 1.1 2003/02/27 00:28:08 fvdl Exp $");
     80 
     81 #define ISA_DMA_STATS
     82 
     83 #include <sys/param.h>
     84 #include <sys/systm.h>
     85 #include <sys/kernel.h>
     86 #include <sys/syslog.h>
     87 #include <sys/device.h>
     88 #include <sys/malloc.h>
     89 #include <sys/proc.h>
     90 #include <sys/mbuf.h>
     91 
     92 #define _X86_BUS_DMA_PRIVATE
     93 #include <machine/bus.h>
     94 
     95 #include <machine/pio.h>
     96 #include <machine/cpufunc.h>
     97 
     98 #include <dev/isa/isareg.h>
     99 #include <dev/isa/isavar.h>
    100 
    101 #include <uvm/uvm_extern.h>
    102 
    103 #include "ioapic.h"
    104 
    105 #if NIOAPIC > 0
    106 #include <machine/i82093var.h>
    107 #include <machine/mpbiosvar.h>
    108 #endif
    109 
    110 #include "mca.h"
    111 #if NMCA > 0
    112 #include <machine/mca_machdep.h>		/* for MCA_system */
    113 #endif
    114 
    115 #include "eisa.h"		/* XXX */
    116 
    117 /*
    118  * ISA can only DMA to 0-16M.
    119  */
    120 #define	ISA_DMA_BOUNCE_THRESHOLD	(16 * 1024 * 1024)
    121 
    122 extern	paddr_t avail_end;
    123 
    124 #define	IDTVEC(name)	__CONCAT(X,name)
    125 typedef void (vector) __P((void));
    126 extern vector *IDTVEC(intr)[];
    127 
    128 /*
    129  * Cookie used by ISA dma.  A pointer to one of these it stashed in
    130  * the DMA map.
    131  */
    132 struct x86_isa_dma_cookie {
    133 	int	id_flags;		/* flags; see below */
    134 
    135 	/*
    136 	 * Information about the original buffer used during
    137 	 * DMA map syncs.  Note that origibuflen is only used
    138 	 * for ID_BUFTYPE_LINEAR.
    139 	 */
    140 	void	*id_origbuf;		/* pointer to orig buffer if
    141 					   bouncing */
    142 	bus_size_t id_origbuflen;	/* ...and size */
    143 	int	id_buftype;		/* type of buffer */
    144 
    145 	void	*id_bouncebuf;		/* pointer to the bounce buffer */
    146 	bus_size_t id_bouncebuflen;	/* ...and size */
    147 	int	id_nbouncesegs;		/* number of valid bounce segs */
    148 	bus_dma_segment_t id_bouncesegs[0]; /* array of bounce buffer
    149 					       physical memory segments */
    150 };
    151 
    152 /* id_flags */
    153 #define	ID_MIGHT_NEED_BOUNCE	0x01	/* map could need bounce buffers */
    154 #define	ID_HAS_BOUNCE		0x02	/* map currently has bounce buffers */
    155 #define	ID_IS_BOUNCING		0x04	/* map is bouncing current xfer */
    156 
    157 /* id_buftype */
    158 #define	ID_BUFTYPE_INVALID	0
    159 #define	ID_BUFTYPE_LINEAR	1
    160 #define	ID_BUFTYPE_MBUF		2
    161 #define	ID_BUFTYPE_UIO		3
    162 #define	ID_BUFTYPE_RAW		4
    163 
    164 int	_isa_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
    165 	    bus_size_t, bus_size_t, int, bus_dmamap_t *));
    166 void	_isa_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
    167 int	_isa_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
    168 	    bus_size_t, struct proc *, int));
    169 int	_isa_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
    170 	    struct mbuf *, int));
    171 int	_isa_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
    172 	    struct uio *, int));
    173 int	_isa_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
    174 	    bus_dma_segment_t *, int, bus_size_t, int));
    175 void	_isa_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
    176 void	_isa_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
    177 	    bus_addr_t, bus_size_t, int));
    178 
    179 int	_isa_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t,
    180 	    bus_size_t, bus_dma_segment_t *, int, int *, int));
    181 
    182 int	_isa_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
    183 	    bus_size_t, int));
    184 void	_isa_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
    185 
    186 /*
    187  * Entry points for ISA DMA.  These are mostly wrappers around
    188  * the generic functions that understand how to deal with bounce
    189  * buffers, if necessary.
    190  */
    191 struct x86_bus_dma_tag isa_bus_dma_tag = {
    192 	ISA_DMA_BOUNCE_THRESHOLD,
    193 	_isa_bus_dmamap_create,
    194 	_isa_bus_dmamap_destroy,
    195 	_isa_bus_dmamap_load,
    196 	_isa_bus_dmamap_load_mbuf,
    197 	_isa_bus_dmamap_load_uio,
    198 	_isa_bus_dmamap_load_raw,
    199 	_isa_bus_dmamap_unload,
    200 	_isa_bus_dmamap_sync,
    201 	_isa_bus_dmamem_alloc,
    202 	_bus_dmamem_free,
    203 	_bus_dmamem_map,
    204 	_bus_dmamem_unmap,
    205 	_bus_dmamem_mmap,
    206 };
    207 
    208 #define	LEGAL_IRQ(x)	((x) >= 0 && (x) < NUM_LEGACY_IRQS && (x) != 2)
    209 
    210 int
    211 isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq)
    212 {
    213 	int i, tmp, bestirq, count;
    214 	struct intrhand **p, *q;
    215 	struct intrsource *isp;
    216 	struct cpu_info *ci;
    217 
    218 	if (type == IST_NONE)
    219 		panic("intr_alloc: bogus type");
    220 
    221 	ci = &cpu_info_primary;
    222 
    223 	bestirq = -1;
    224 	count = -1;
    225 
    226 	/* some interrupts should never be dynamically allocated */
    227 	mask &= 0xdef8;
    228 
    229 	/*
    230 	 * XXX some interrupts will be used later (6 for fdc, 12 for pms).
    231 	 * the right answer is to do "breadth-first" searching of devices.
    232 	 */
    233 	mask &= 0xefbf;
    234 
    235 	simple_lock(&ci->ci_slock);
    236 
    237 	for (i = 0; i < NUM_LEGACY_IRQS; i++) {
    238 		if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0)
    239 			continue;
    240 		isp = ci->ci_isources[i];
    241 		if (isp == NULL) {
    242 			/*
    243 			 * if nothing's using the irq, just return it
    244 			 */
    245 			*irq = i;
    246 			simple_unlock(&ci->ci_slock);
    247 			return (0);
    248 		}
    249 
    250 		switch(isp->is_type) {
    251 		case IST_EDGE:
    252 		case IST_LEVEL:
    253 			if (type != isp->is_type)
    254 				continue;
    255 			/*
    256 			 * if the irq is shareable, count the number of other
    257 			 * handlers, and if it's smaller than the last irq like
    258 			 * this, remember it
    259 			 *
    260 			 * XXX We should probably also consider the
    261 			 * interrupt level and stick IPL_TTY with other
    262 			 * IPL_TTY, etc.
    263 			 */
    264 			for (p = &isp->is_handlers, tmp = 0; (q = *p) != NULL;
    265 			     p = &q->ih_next, tmp++)
    266 				;
    267 			if ((bestirq == -1) || (count > tmp)) {
    268 				bestirq = i;
    269 				count = tmp;
    270 			}
    271 			break;
    272 
    273 		case IST_PULSE:
    274 			/* this just isn't shareable */
    275 			continue;
    276 		}
    277 	}
    278 
    279 	simple_unlock(&ci->ci_slock);
    280 
    281 	if (bestirq == -1)
    282 		return (1);
    283 
    284 	*irq = bestirq;
    285 
    286 	return (0);
    287 }
    288 
    289 const struct evcnt *
    290 isa_intr_evcnt(isa_chipset_tag_t ic, int irq)
    291 {
    292 
    293 	/* XXX for now, no evcnt parent reported */
    294 	return NULL;
    295 }
    296 
    297 void *
    298 isa_intr_establish(ic, irq, type, level, ih_fun, ih_arg)
    299 	isa_chipset_tag_t ic;
    300 	int irq;
    301 	int type;
    302 	int level;
    303 	int (*ih_fun) __P((void *));
    304 	void *ih_arg;
    305 {
    306 	struct pic *pic;
    307 	int pin;
    308 #if NIOAPIC > 0
    309 	int mpih;
    310 #endif
    311 
    312 	pin = irq;
    313 	pic = &i8259_pic;
    314 
    315 #if NIOAPIC > 0
    316 	if (mp_busses != NULL) {
    317 		if (intr_find_mpmapping(mp_isa_bus, irq, &mpih) == 0 ||
    318 		    intr_find_mpmapping(mp_eisa_bus, irq, &mpih) == 0) {
    319 			if (!APIC_IRQ_ISLEGACY(mpih)) {
    320 				pin = APIC_IRQ_PIN(mpih);
    321 				pic = (struct pic *)
    322 				    ioapic_find(APIC_IRQ_APIC(mpih));
    323 				if (pic == NULL) {
    324 					printf("isa_intr_establish: "
    325 					       "unknown apic %d\n",
    326 					    APIC_IRQ_APIC(mpih));
    327 					return NULL;
    328 				}
    329 			}
    330 		} else
    331 			printf("isa_intr_establish: no MP mapping found\n");
    332 	}
    333 #endif
    334 	return intr_establish(irq, pic, pin, type, level, ih_fun, ih_arg);
    335 }
    336 
    337 /*
    338  * Deregister an interrupt handler.
    339  */
    340 void
    341 isa_intr_disestablish(ic, arg)
    342 	isa_chipset_tag_t ic;
    343 	void *arg;
    344 {
    345 	struct intrhand *ih = arg;
    346 
    347 	if (!LEGAL_IRQ(ih->ih_pin))
    348 		panic("intr_disestablish: bogus irq");
    349 
    350 	intr_disestablish(ih);
    351 }
    352 
    353 void
    354 isa_attach_hook(parent, self, iba)
    355 	struct device *parent, *self;
    356 	struct isabus_attach_args *iba;
    357 {
    358 	extern struct x86_isa_chipset i386_isa_chipset;
    359 	extern int isa_has_been_seen;
    360 
    361 	/*
    362 	 * Notify others that might need to know that the ISA bus
    363 	 * has now been attached.
    364 	 */
    365 	if (isa_has_been_seen)
    366 		panic("isaattach: ISA bus already seen!");
    367 	isa_has_been_seen = 1;
    368 
    369 	/*
    370 	 * Since we can only have one ISA bus, we just use a single
    371 	 * statically allocated ISA chipset structure.  Pass it up
    372 	 * now.
    373 	 */
    374 	iba->iba_ic = &i386_isa_chipset;
    375 }
    376 
    377 int
    378 isa_mem_alloc(t, size, align, boundary, flags, addrp, bshp)
    379 	bus_space_tag_t t;
    380 	bus_size_t size, align;
    381 	bus_addr_t boundary;
    382 	int flags;
    383 	bus_addr_t *addrp;
    384 	bus_space_handle_t *bshp;
    385 {
    386 
    387 	/*
    388 	 * Allocate physical address space in the ISA hole.
    389 	 */
    390 	return (bus_space_alloc(t, IOM_BEGIN, IOM_END - 1, size, align,
    391 	    boundary, flags, addrp, bshp));
    392 }
    393 
    394 void
    395 isa_mem_free(t, bsh, size)
    396 	bus_space_tag_t t;
    397 	bus_space_handle_t bsh;
    398 	bus_size_t size;
    399 {
    400 
    401 	bus_space_free(t, bsh, size);
    402 }
    403 
    404 /**********************************************************************
    405  * bus.h dma interface entry points
    406  **********************************************************************/
    407 
    408 #ifdef ISA_DMA_STATS
    409 #define	STAT_INCR(v)	(v)++
    410 #define	STAT_DECR(v)	do { \
    411 		if ((v) == 0) \
    412 			printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
    413 		else \
    414 			(v)--; \
    415 		} while (0)
    416 u_long	isa_dma_stats_loads;
    417 u_long	isa_dma_stats_bounces;
    418 u_long	isa_dma_stats_nbouncebufs;
    419 #else
    420 #define	STAT_INCR(v)
    421 #define	STAT_DECR(v)
    422 #endif
    423 
    424 /*
    425  * Create an ISA DMA map.
    426  */
    427 int
    428 _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
    429 	bus_dma_tag_t t;
    430 	bus_size_t size;
    431 	int nsegments;
    432 	bus_size_t maxsegsz;
    433 	bus_size_t boundary;
    434 	int flags;
    435 	bus_dmamap_t *dmamp;
    436 {
    437 	struct x86_isa_dma_cookie *cookie;
    438 	bus_dmamap_t map;
    439 	int error, cookieflags;
    440 	void *cookiestore;
    441 	size_t cookiesize;
    442 
    443 	/* Call common function to create the basic map. */
    444 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
    445 	    flags, dmamp);
    446 	if (error)
    447 		return (error);
    448 
    449 	map = *dmamp;
    450 	map->_dm_cookie = NULL;
    451 
    452 	cookiesize = sizeof(struct x86_isa_dma_cookie);
    453 
    454 	/*
    455 	 * ISA only has 24-bits of address space.  This means
    456 	 * we can't DMA to pages over 16M.  In order to DMA to
    457 	 * arbitrary buffers, we use "bounce buffers" - pages
    458 	 * in memory below the 16M boundary.  On DMA reads,
    459 	 * DMA happens to the bounce buffers, and is copied into
    460 	 * the caller's buffer.  On writes, data is copied into
    461 	 * but bounce buffer, and the DMA happens from those
    462 	 * pages.  To software using the DMA mapping interface,
    463 	 * this looks simply like a data cache.
    464 	 *
    465 	 * If we have more than 16M of RAM in the system, we may
    466 	 * need bounce buffers.  We check and remember that here.
    467 	 *
    468 	 * There are exceptions, however.  VLB devices can do
    469 	 * 32-bit DMA, and indicate that here.
    470 	 *
    471 	 * ...or, there is an opposite case.  The most segments
    472 	 * a transfer will require is (maxxfer / PAGE_SIZE) + 1.  If
    473 	 * the caller can't handle that many segments (e.g. the
    474 	 * ISA DMA controller), we may have to bounce it as well.
    475 	 */
    476 	if (avail_end <= t->_bounce_thresh ||
    477 	    (flags & ISABUS_DMA_32BIT) != 0) {
    478 		/* Bouncing not necessary due to memory size. */
    479 		map->_dm_bounce_thresh = 0;
    480 	}
    481 	cookieflags = 0;
    482 	if (map->_dm_bounce_thresh != 0 ||
    483 	    ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
    484 		cookieflags |= ID_MIGHT_NEED_BOUNCE;
    485 		cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
    486 	}
    487 
    488 	/*
    489 	 * Allocate our cookie.
    490 	 */
    491 	if ((cookiestore = malloc(cookiesize, M_DMAMAP,
    492 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
    493 		error = ENOMEM;
    494 		goto out;
    495 	}
    496 	memset(cookiestore, 0, cookiesize);
    497 	cookie = (struct x86_isa_dma_cookie *)cookiestore;
    498 	cookie->id_flags = cookieflags;
    499 	map->_dm_cookie = cookie;
    500 
    501 	if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
    502 		/*
    503 		 * Allocate the bounce pages now if the caller
    504 		 * wishes us to do so.
    505 		 */
    506 		if ((flags & BUS_DMA_ALLOCNOW) == 0)
    507 			goto out;
    508 
    509 		error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
    510 	}
    511 
    512  out:
    513 	if (error) {
    514 		if (map->_dm_cookie != NULL)
    515 			free(map->_dm_cookie, M_DMAMAP);
    516 		_bus_dmamap_destroy(t, map);
    517 	}
    518 	return (error);
    519 }
    520 
    521 /*
    522  * Destroy an ISA DMA map.
    523  */
    524 void
    525 _isa_bus_dmamap_destroy(t, map)
    526 	bus_dma_tag_t t;
    527 	bus_dmamap_t map;
    528 {
    529 	struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
    530 
    531 	/*
    532 	 * Free any bounce pages this map might hold.
    533 	 */
    534 	if (cookie->id_flags & ID_HAS_BOUNCE)
    535 		_isa_dma_free_bouncebuf(t, map);
    536 
    537 	free(cookie, M_DMAMAP);
    538 	_bus_dmamap_destroy(t, map);
    539 }
    540 
    541 /*
    542  * Load an ISA DMA map with a linear buffer.
    543  */
    544 int
    545 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags)
    546 	bus_dma_tag_t t;
    547 	bus_dmamap_t map;
    548 	void *buf;
    549 	bus_size_t buflen;
    550 	struct proc *p;
    551 	int flags;
    552 {
    553 	struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
    554 	int error;
    555 
    556 	STAT_INCR(isa_dma_stats_loads);
    557 
    558 	/*
    559 	 * Make sure that on error condition we return "no valid mappings."
    560 	 */
    561 	map->dm_mapsize = 0;
    562 	map->dm_nsegs = 0;
    563 
    564 	/*
    565 	 * Try to load the map the normal way.  If this errors out,
    566 	 * and we can bounce, we will.
    567 	 */
    568 	error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
    569 	if (error == 0 ||
    570 	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
    571 		return (error);
    572 
    573 	/*
    574 	 * First attempt failed; bounce it.
    575 	 */
    576 
    577 	STAT_INCR(isa_dma_stats_bounces);
    578 
    579 	/*
    580 	 * Allocate bounce pages, if necessary.
    581 	 */
    582 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
    583 		error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
    584 		if (error)
    585 			return (error);
    586 	}
    587 
    588 	/*
    589 	 * Cache a pointer to the caller's buffer and load the DMA map
    590 	 * with the bounce buffer.
    591 	 */
    592 	cookie->id_origbuf = buf;
    593 	cookie->id_origbuflen = buflen;
    594 	cookie->id_buftype = ID_BUFTYPE_LINEAR;
    595 	error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
    596 	    p, flags);
    597 	if (error) {
    598 		/*
    599 		 * Free the bounce pages, unless our resources
    600 		 * are reserved for our exclusive use.
    601 		 */
    602 		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
    603 			_isa_dma_free_bouncebuf(t, map);
    604 		return (error);
    605 	}
    606 
    607 	/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
    608 	cookie->id_flags |= ID_IS_BOUNCING;
    609 	return (0);
    610 }
    611 
    612 /*
    613  * Like _isa_bus_dmamap_load(), but for mbufs.
    614  */
    615 int
    616 _isa_bus_dmamap_load_mbuf(t, map, m0, flags)
    617 	bus_dma_tag_t t;
    618 	bus_dmamap_t map;
    619 	struct mbuf *m0;
    620 	int flags;
    621 {
    622 	struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
    623 	int error;
    624 
    625 	/*
    626 	 * Make sure on error condition we return "no valid mappings."
    627 	 */
    628 	map->dm_mapsize = 0;
    629 	map->dm_nsegs = 0;
    630 
    631 #ifdef DIAGNOSTIC
    632 	if ((m0->m_flags & M_PKTHDR) == 0)
    633 		panic("_isa_bus_dmamap_load_mbuf: no packet header");
    634 #endif
    635 
    636 	if (m0->m_pkthdr.len > map->_dm_size)
    637 		return (EINVAL);
    638 
    639 	/*
    640 	 * Try to load the map the normal way.  If this errors out,
    641 	 * and we can bounce, we will.
    642 	 */
    643 	error = _bus_dmamap_load_mbuf(t, map, m0, flags);
    644 	if (error == 0 ||
    645 	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
    646 		return (error);
    647 
    648 	/*
    649 	 * First attempt failed; bounce it.
    650 	 */
    651 
    652 	STAT_INCR(isa_dma_stats_bounces);
    653 
    654 	/*
    655 	 * Allocate bounce pages, if necessary.
    656 	 */
    657 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
    658 		error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
    659 		    flags);
    660 		if (error)
    661 			return (error);
    662 	}
    663 
    664 	/*
    665 	 * Cache a pointer to the caller's buffer and load the DMA map
    666 	 * with the bounce buffer.
    667 	 */
    668 	cookie->id_origbuf = m0;
    669 	cookie->id_origbuflen = m0->m_pkthdr.len;	/* not really used */
    670 	cookie->id_buftype = ID_BUFTYPE_MBUF;
    671 	error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
    672 	    m0->m_pkthdr.len, NULL, flags);
    673 	if (error) {
    674 		/*
    675 		 * Free the bounce pages, unless our resources
    676 		 * are reserved for our exclusive use.
    677 		 */
    678 		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
    679 			_isa_dma_free_bouncebuf(t, map);
    680 		return (error);
    681 	}
    682 
    683 	/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
    684 	cookie->id_flags |= ID_IS_BOUNCING;
    685 	return (0);
    686 }
    687 
    688 /*
    689  * Like _isa_bus_dmamap_load(), but for uios.
    690  */
    691 int
    692 _isa_bus_dmamap_load_uio(t, map, uio, flags)
    693 	bus_dma_tag_t t;
    694 	bus_dmamap_t map;
    695 	struct uio *uio;
    696 	int flags;
    697 {
    698 
    699 	panic("_isa_bus_dmamap_load_uio: not implemented");
    700 }
    701 
    702 /*
    703  * Like _isa_bus_dmamap_load(), but for raw memory allocated with
    704  * bus_dmamem_alloc().
    705  */
    706 int
    707 _isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
    708 	bus_dma_tag_t t;
    709 	bus_dmamap_t map;
    710 	bus_dma_segment_t *segs;
    711 	int nsegs;
    712 	bus_size_t size;
    713 	int flags;
    714 {
    715 
    716 	panic("_isa_bus_dmamap_load_raw: not implemented");
    717 }
    718 
    719 /*
    720  * Unload an ISA DMA map.
    721  */
    722 void
    723 _isa_bus_dmamap_unload(t, map)
    724 	bus_dma_tag_t t;
    725 	bus_dmamap_t map;
    726 {
    727 	struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
    728 
    729 	/*
    730 	 * If we have bounce pages, free them, unless they're
    731 	 * reserved for our exclusive use.
    732 	 */
    733 	if ((cookie->id_flags & ID_HAS_BOUNCE) &&
    734 	    (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
    735 		_isa_dma_free_bouncebuf(t, map);
    736 
    737 	cookie->id_flags &= ~ID_IS_BOUNCING;
    738 	cookie->id_buftype = ID_BUFTYPE_INVALID;
    739 
    740 	/*
    741 	 * Do the generic bits of the unload.
    742 	 */
    743 	_bus_dmamap_unload(t, map);
    744 }
    745 
    746 /*
    747  * Synchronize an ISA DMA map.
    748  */
    749 void
    750 _isa_bus_dmamap_sync(t, map, offset, len, ops)
    751 	bus_dma_tag_t t;
    752 	bus_dmamap_t map;
    753 	bus_addr_t offset;
    754 	bus_size_t len;
    755 	int ops;
    756 {
    757 	struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
    758 
    759 	/*
    760 	 * Mixing PRE and POST operations is not allowed.
    761 	 */
    762 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
    763 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
    764 		panic("_isa_bus_dmamap_sync: mix PRE and POST");
    765 
    766 #ifdef DIAGNOSTIC
    767 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
    768 		if (offset >= map->dm_mapsize)
    769 			panic("_isa_bus_dmamap_sync: bad offset");
    770 		if (len == 0 || (offset + len) > map->dm_mapsize)
    771 			panic("_isa_bus_dmamap_sync: bad length");
    772 	}
    773 #endif
    774 
    775 	/*
    776 	 * If we're not bouncing, just return; nothing to do.
    777 	 */
    778 	if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
    779 		return;
    780 
    781 	switch (cookie->id_buftype) {
    782 	case ID_BUFTYPE_LINEAR:
    783 		/*
    784 		 * Nothing to do for pre-read.
    785 		 */
    786 
    787 		if (ops & BUS_DMASYNC_PREWRITE) {
    788 			/*
    789 			 * Copy the caller's buffer to the bounce buffer.
    790 			 */
    791 			memcpy((char *)cookie->id_bouncebuf + offset,
    792 			    (char *)cookie->id_origbuf + offset, len);
    793 		}
    794 
    795 		if (ops & BUS_DMASYNC_POSTREAD) {
    796 			/*
    797 			 * Copy the bounce buffer to the caller's buffer.
    798 			 */
    799 			memcpy((char *)cookie->id_origbuf + offset,
    800 			    (char *)cookie->id_bouncebuf + offset, len);
    801 		}
    802 
    803 		/*
    804 		 * Nothing to do for post-write.
    805 		 */
    806 		break;
    807 
    808 	case ID_BUFTYPE_MBUF:
    809 	    {
    810 		struct mbuf *m, *m0 = cookie->id_origbuf;
    811 		bus_size_t minlen, moff;
    812 
    813 		/*
    814 		 * Nothing to do for pre-read.
    815 		 */
    816 
    817 		if (ops & BUS_DMASYNC_PREWRITE) {
    818 			/*
    819 			 * Copy the caller's buffer to the bounce buffer.
    820 			 */
    821 			m_copydata(m0, offset, len,
    822 			    (char *)cookie->id_bouncebuf + offset);
    823 		}
    824 
    825 		if (ops & BUS_DMASYNC_POSTREAD) {
    826 			/*
    827 			 * Copy the bounce buffer to the caller's buffer.
    828 			 */
    829 			for (moff = offset, m = m0; m != NULL && len != 0;
    830 			     m = m->m_next) {
    831 				/* Find the beginning mbuf. */
    832 				if (moff >= m->m_len) {
    833 					moff -= m->m_len;
    834 					continue;
    835 				}
    836 
    837 				/*
    838 				 * Now at the first mbuf to sync; nail
    839 				 * each one until we have exhausted the
    840 				 * length.
    841 				 */
    842 				minlen = len < m->m_len - moff ?
    843 				    len : m->m_len - moff;
    844 
    845 				memcpy(mtod(m, caddr_t) + moff,
    846 				    (char *)cookie->id_bouncebuf + offset,
    847 				    minlen);
    848 
    849 				moff = 0;
    850 				len -= minlen;
    851 				offset += minlen;
    852 			}
    853 		}
    854 
    855 		/*
    856 		 * Nothing to do for post-write.
    857 		 */
    858 		break;
    859 	    }
    860 
    861 	case ID_BUFTYPE_UIO:
    862 		panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
    863 		break;
    864 
    865 	case ID_BUFTYPE_RAW:
    866 		panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
    867 		break;
    868 
    869 	case ID_BUFTYPE_INVALID:
    870 		panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
    871 		break;
    872 
    873 	default:
    874 		printf("unknown buffer type %d\n", cookie->id_buftype);
    875 		panic("_isa_bus_dmamap_sync");
    876 	}
    877 }
    878 
    879 /*
    880  * Allocate memory safe for ISA DMA.
    881  */
    882 int
    883 _isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
    884 	bus_dma_tag_t t;
    885 	bus_size_t size, alignment, boundary;
    886 	bus_dma_segment_t *segs;
    887 	int nsegs;
    888 	int *rsegs;
    889 	int flags;
    890 {
    891 	paddr_t high;
    892 
    893 	if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
    894 		high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD);
    895 	else
    896 		high = trunc_page(avail_end);
    897 
    898 	return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
    899 	    segs, nsegs, rsegs, flags, 0, high));
    900 }
    901 
    902 /**********************************************************************
    903  * ISA DMA utility functions
    904  **********************************************************************/
    905 
    906 int
    907 _isa_dma_alloc_bouncebuf(t, map, size, flags)
    908 	bus_dma_tag_t t;
    909 	bus_dmamap_t map;
    910 	bus_size_t size;
    911 	int flags;
    912 {
    913 	struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
    914 	int error = 0;
    915 
    916 	cookie->id_bouncebuflen = round_page(size);
    917 	error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
    918 	    PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
    919 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
    920 	if (error)
    921 		goto out;
    922 	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
    923 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
    924 	    (caddr_t *)&cookie->id_bouncebuf, flags);
    925 
    926  out:
    927 	if (error) {
    928 		_bus_dmamem_free(t, cookie->id_bouncesegs,
    929 		    cookie->id_nbouncesegs);
    930 		cookie->id_bouncebuflen = 0;
    931 		cookie->id_nbouncesegs = 0;
    932 	} else {
    933 		cookie->id_flags |= ID_HAS_BOUNCE;
    934 		STAT_INCR(isa_dma_stats_nbouncebufs);
    935 	}
    936 
    937 	return (error);
    938 }
    939 
    940 void
    941 _isa_dma_free_bouncebuf(t, map)
    942 	bus_dma_tag_t t;
    943 	bus_dmamap_t map;
    944 {
    945 	struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
    946 
    947 	STAT_DECR(isa_dma_stats_nbouncebufs);
    948 
    949 	_bus_dmamem_unmap(t, cookie->id_bouncebuf,
    950 	    cookie->id_bouncebuflen);
    951 	_bus_dmamem_free(t, cookie->id_bouncesegs,
    952 	    cookie->id_nbouncesegs);
    953 	cookie->id_bouncebuflen = 0;
    954 	cookie->id_nbouncesegs = 0;
    955 	cookie->id_flags &= ~ID_HAS_BOUNCE;
    956 }
    957