Home | History | Annotate | Line # | Download | only in x86
      1 /*	$NetBSD: bus_dma.c,v 1.75 2017/01/05 09:08:44 msaitoh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1998, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility NASA Ames Research Center, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.75 2017/01/05 09:08:44 msaitoh Exp $");
     35 
     36 /*
     37  * The following is included because _bus_dma_uiomove is derived from
     38  * uiomove() in kern_subr.c.
     39  */
     40 
     41 /*
     42  * Copyright (c) 1982, 1986, 1991, 1993
     43  *	The Regents of the University of California.  All rights reserved.
     44  * (c) UNIX System Laboratories, Inc.
     45  * All or some portions of this file are derived from material licensed
     46  * to the University of California by American Telephone and Telegraph
     47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48  * the permission of UNIX System Laboratories, Inc.
     49  *
     50  * Copyright (c) 1992, 1993
     51  *	The Regents of the University of California.  All rights reserved.
     52  *
     53  * This software was developed by the Computer Systems Engineering group
     54  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     55  * contributed to Berkeley.
     56  *
     57  * All advertising materials mentioning features or use of this software
     58  * must display the following acknowledgement:
     59  *	This product includes software developed by the University of
     60  *	California, Lawrence Berkeley Laboratory.
     61  *
     62  * Redistribution and use in source and binary forms, with or without
     63  * modification, are permitted provided that the following conditions
     64  * are met:
     65  * 1. Redistributions of source code must retain the above copyright
     66  *    notice, this list of conditions and the following disclaimer.
     67  * 2. Redistributions in binary form must reproduce the above copyright
     68  *    notice, this list of conditions and the following disclaimer in the
     69  *    documentation and/or other materials provided with the distribution.
     70  * 3. Neither the name of the University nor the names of its contributors
     71  *    may be used to endorse or promote products derived from this software
     72  *    without specific prior written permission.
     73  *
     74  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     75  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     76  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     77  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     78  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     79  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     80  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     81  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     82  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     83  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     84  * SUCH DAMAGE.
     85  */
     86 
     87 #include "ioapic.h"
     88 #include "isa.h"
     89 #include "opt_mpbios.h"
     90 
     91 #include <sys/param.h>
     92 #include <sys/systm.h>
     93 #include <sys/kernel.h>
     94 #include <sys/kmem.h>
     95 #include <sys/malloc.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/proc.h>
     98 
     99 #include <sys/bus.h>
    100 #include <machine/bus_private.h>
    101 #if NIOAPIC > 0
    102 #include <machine/i82093var.h>
    103 #endif
    104 #ifdef MPBIOS
    105 #include <machine/mpbiosvar.h>
    106 #endif
    107 
    108 #if NISA > 0
    109 #include <dev/isa/isareg.h>
    110 #include <dev/isa/isavar.h>
    111 #endif
    112 
    113 #include <uvm/uvm.h>
    114 
    115 extern	paddr_t avail_end;
    116 
    117 #define	IDTVEC(name)	__CONCAT(X,name)
    118 typedef void (vector)(void);
    119 extern vector *IDTVEC(intr)[];
    120 
    121 #define	BUSDMA_BOUNCESTATS
    122 
    123 #ifdef BUSDMA_BOUNCESTATS
    124 #define	BUSDMA_EVCNT_DECL(name)						\
    125 static struct evcnt bus_dma_ev_##name =					\
    126     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "bus_dma", #name);		\
    127 EVCNT_ATTACH_STATIC(bus_dma_ev_##name)
    128 
    129 #define	STAT_INCR(name)							\
    130     bus_dma_ev_##name.ev_count++
    131 #define	STAT_DECR(name)							\
    132     bus_dma_ev_##name.ev_count--
    133 
    134 BUSDMA_EVCNT_DECL(nbouncebufs);
    135 BUSDMA_EVCNT_DECL(loads);
    136 BUSDMA_EVCNT_DECL(bounces);
    137 #else
    138 #define STAT_INCR(x)
    139 #define STAT_DECR(x)
    140 #endif
    141 
    142 static int	_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
    143 	    bus_size_t, int, bus_dmamap_t *);
    144 static void	_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
    145 static int	_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
    146 	    bus_size_t, struct proc *, int);
    147 static int	_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
    148 	    struct mbuf *, int);
    149 static int	_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
    150 	    struct uio *, int);
    151 static int	_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
    152 	    bus_dma_segment_t *, int, bus_size_t, int);
    153 static void	_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
    154 static void	_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
    155 	    bus_size_t, int);
    156 
    157 static int	_bus_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
    158 	    bus_size_t alignment, bus_size_t boundary,
    159 	    bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags);
    160 static void	_bus_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs,
    161 	    int nsegs);
    162 static int	_bus_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs,
    163 	    int nsegs, size_t size, void **kvap, int flags);
    164 static void	_bus_dmamem_unmap(bus_dma_tag_t tag, void *kva, size_t size);
    165 static paddr_t	_bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs,
    166 	    int nsegs, off_t off, int prot, int flags);
    167 
    168 static int	_bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
    169 	    bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags);
    170 static void	_bus_dmatag_destroy(bus_dma_tag_t tag);
    171 
    172 static int _bus_dma_uiomove(void *, struct uio *, size_t, int);
    173 static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
    174 	    bus_size_t size, int flags);
    175 static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map);
    176 static int _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map,
    177 	    void *buf, bus_size_t buflen, struct vmspace *vm, int flags);
    178 static int _bus_dmamap_load_busaddr(bus_dma_tag_t, bus_dmamap_t,
    179     bus_addr_t, bus_size_t);
    180 
    181 #ifndef _BUS_DMAMEM_ALLOC_RANGE
    182 static int	_bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size,
    183 	    bus_size_t alignment, bus_size_t boundary,
    184 	    bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
    185 	    bus_addr_t low, bus_addr_t high);
    186 
    187 #define _BUS_DMAMEM_ALLOC_RANGE _bus_dmamem_alloc_range
    188 
    189 /*
    190  * Allocate physical memory from the given physical address range.
    191  * Called by DMA-safe memory allocation methods.
    192  */
    193 static int
    194 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
    195     bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
    196     int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high)
    197 {
    198 	paddr_t curaddr, lastaddr;
    199 	struct vm_page *m;
    200 	struct pglist mlist;
    201 	int curseg, error;
    202 	bus_size_t uboundary;
    203 
    204 	/* Always round the size. */
    205 	size = round_page(size);
    206 
    207 	KASSERT(boundary >= PAGE_SIZE || boundary == 0);
    208 
    209 	/*
    210 	 * Allocate pages from the VM system.
    211 	 * We accept boundaries < size, splitting in multiple segments
    212 	 * if needed. uvm_pglistalloc does not, so compute an appropriate
    213          * boundary: next power of 2 >= size
    214          */
    215 
    216 	if (boundary == 0)
    217 		uboundary = 0;
    218 	else {
    219 		uboundary = boundary;
    220 		while (uboundary < size)
    221 			uboundary = uboundary << 1;
    222 	}
    223 	error = uvm_pglistalloc(size, low, high, alignment, uboundary,
    224 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
    225 	if (error)
    226 		return (error);
    227 
    228 	/*
    229 	 * Compute the location, size, and number of segments actually
    230 	 * returned by the VM code.
    231 	 */
    232 	m = TAILQ_FIRST(&mlist);
    233 	curseg = 0;
    234 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
    235 	segs[curseg].ds_len = PAGE_SIZE;
    236 	m = m->pageq.queue.tqe_next;
    237 
    238 	for (; m != NULL; m = m->pageq.queue.tqe_next) {
    239 		curaddr = VM_PAGE_TO_PHYS(m);
    240 #ifdef DIAGNOSTIC
    241 		if (curaddr < low || curaddr >= high) {
    242 			printf("vm_page_alloc_memory returned non-sensical"
    243 			    " address %#" PRIxPADDR "\n", curaddr);
    244 			panic("_bus_dmamem_alloc_range");
    245 		}
    246 #endif
    247 		if (curaddr == (lastaddr + PAGE_SIZE) &&
    248 		    (lastaddr & boundary) == (curaddr & boundary)) {
    249 			segs[curseg].ds_len += PAGE_SIZE;
    250 		} else {
    251 			curseg++;
    252 			if (curseg >= nsegs)
    253 				return EFBIG;
    254 			segs[curseg].ds_addr = curaddr;
    255 			segs[curseg].ds_len = PAGE_SIZE;
    256 		}
    257 		lastaddr = curaddr;
    258 	}
    259 
    260 	*rsegs = curseg + 1;
    261 
    262 	return (0);
    263 }
    264 #endif /* _BUS_DMAMEM_ALLOC_RANGE */
    265 
    266 /*
    267  * Create a DMA map.
    268  */
    269 static int
    270 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
    271     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
    272 {
    273 	struct x86_bus_dma_cookie *cookie;
    274 	bus_dmamap_t map;
    275 	int error, cookieflags;
    276 	void *cookiestore, *mapstore;
    277 	size_t cookiesize, mapsize;
    278 
    279 	/*
    280 	 * Allocate and initialize the DMA map.  The end of the map
    281 	 * is a variable-sized array of segments, so we allocate enough
    282 	 * room for them in one shot.
    283 	 *
    284 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
    285 	 * of ALLOCNOW notifies others that we've reserved these resources,
    286 	 * and they are not to be freed.
    287 	 *
    288 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
    289 	 * the (nsegments - 1).
    290 	 */
    291 	error = 0;
    292 	mapsize = sizeof(struct x86_bus_dmamap) +
    293 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
    294 	if ((mapstore = malloc(mapsize, M_DMAMAP, M_ZERO |
    295 	    ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK))) == NULL)
    296 		return (ENOMEM);
    297 
    298 	map = (struct x86_bus_dmamap *)mapstore;
    299 	map->_dm_size = size;
    300 	map->_dm_segcnt = nsegments;
    301 	map->_dm_maxmaxsegsz = maxsegsz;
    302 	map->_dm_boundary = boundary;
    303 	map->_dm_bounce_thresh = t->_bounce_thresh;
    304 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    305 	map->dm_maxsegsz = maxsegsz;
    306 	map->dm_mapsize = 0;		/* no valid mappings */
    307 	map->dm_nsegs = 0;
    308 
    309 	if (t->_bounce_thresh == 0 || _BUS_AVAIL_END <= t->_bounce_thresh)
    310 		map->_dm_bounce_thresh = 0;
    311 	cookieflags = 0;
    312 
    313 	if (t->_may_bounce != NULL) {
    314 		error = t->_may_bounce(t, map, flags, &cookieflags);
    315 		if (error != 0)
    316 			goto out;
    317 	}
    318 
    319 	if (map->_dm_bounce_thresh != 0)
    320 		cookieflags |= X86_DMA_MIGHT_NEED_BOUNCE;
    321 
    322 	if ((cookieflags & X86_DMA_MIGHT_NEED_BOUNCE) == 0) {
    323 		*dmamp = map;
    324 		return 0;
    325 	}
    326 
    327 	cookiesize = sizeof(struct x86_bus_dma_cookie) +
    328 	    (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
    329 
    330 	/*
    331 	 * Allocate our cookie.
    332 	 */
    333 	if ((cookiestore = malloc(cookiesize, M_DMAMAP, M_ZERO |
    334 	    ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK))) == NULL) {
    335 		error = ENOMEM;
    336 		goto out;
    337 	}
    338 	cookie = (struct x86_bus_dma_cookie *)cookiestore;
    339 	cookie->id_flags = cookieflags;
    340 	map->_dm_cookie = cookie;
    341 
    342 	error = _bus_dma_alloc_bouncebuf(t, map, size, flags);
    343  out:
    344 	if (error)
    345 		_bus_dmamap_destroy(t, map);
    346 	else
    347 		*dmamp = map;
    348 
    349 	return (error);
    350 }
    351 
    352 /*
    353  * Destroy a DMA map.
    354  */
    355 static void
    356 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    357 {
    358 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    359 
    360 	/*
    361 	 * Free any bounce pages this map might hold.
    362 	 */
    363 	if (cookie != NULL) {
    364 		if (cookie->id_flags & X86_DMA_HAS_BOUNCE)
    365 			_bus_dma_free_bouncebuf(t, map);
    366 		free(cookie, M_DMAMAP);
    367 	}
    368 
    369 	free(map, M_DMAMAP);
    370 }
    371 
    372 /*
    373  * Load a DMA map with a linear buffer.
    374  */
    375 static int
    376 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    377     bus_size_t buflen, struct proc *p, int flags)
    378 {
    379 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    380 	int error;
    381 	struct vmspace *vm;
    382 
    383 	STAT_INCR(loads);
    384 
    385 	/*
    386 	 * Make sure that on error condition we return "no valid mappings."
    387 	 */
    388 	map->dm_mapsize = 0;
    389 	map->dm_nsegs = 0;
    390 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    391 
    392 	if (buflen > map->_dm_size)
    393 		return EINVAL;
    394 
    395 	if (p != NULL) {
    396 		vm = p->p_vmspace;
    397 	} else {
    398 		vm = vmspace_kernel();
    399 	}
    400 	error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags);
    401 	if (error == 0) {
    402 		if (cookie != NULL)
    403 			cookie->id_flags &= ~X86_DMA_IS_BOUNCING;
    404 		map->dm_mapsize = buflen;
    405 		return 0;
    406 	}
    407 
    408 	if (cookie == NULL ||
    409 	    (cookie->id_flags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
    410 		return error;
    411 
    412 	/*
    413 	 * First attempt failed; bounce it.
    414 	 */
    415 
    416 	STAT_INCR(bounces);
    417 
    418 	/*
    419 	 * Allocate bounce pages, if necessary.
    420 	 */
    421 	if ((cookie->id_flags & X86_DMA_HAS_BOUNCE) == 0) {
    422 		error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
    423 		if (error)
    424 			return (error);
    425 	}
    426 
    427 	/*
    428 	 * Cache a pointer to the caller's buffer and load the DMA map
    429 	 * with the bounce buffer.
    430 	 */
    431 	cookie->id_origbuf = buf;
    432 	cookie->id_origbuflen = buflen;
    433 	cookie->id_buftype = X86_DMA_BUFTYPE_LINEAR;
    434 	map->dm_nsegs = 0;
    435 	error = bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
    436 	    p, flags);
    437 	if (error)
    438 		return (error);
    439 
    440 	/* ...so _bus_dmamap_sync() knows we're bouncing */
    441 	cookie->id_flags |= X86_DMA_IS_BOUNCING;
    442 	return (0);
    443 }
    444 
    445 static int
    446 _bus_dmamap_load_busaddr(bus_dma_tag_t t, bus_dmamap_t map,
    447     bus_addr_t addr, bus_size_t size)
    448 {
    449 	bus_dma_segment_t * const segs = map->dm_segs;
    450 	int nseg = map->dm_nsegs;
    451 	bus_addr_t bmask = ~(map->_dm_boundary - 1);
    452 	bus_addr_t lastaddr = 0xdead; /* XXX gcc */
    453 	bus_size_t sgsize;
    454 
    455 	if (nseg > 0)
    456 		lastaddr = segs[nseg-1].ds_addr + segs[nseg-1].ds_len;
    457 again:
    458 	sgsize = size;
    459 	/*
    460 	 * Make sure we don't cross any boundaries.
    461 	 */
    462 	if (map->_dm_boundary > 0) {
    463 		bus_addr_t baddr; /* next boundary address */
    464 
    465 		baddr = (addr + map->_dm_boundary) & bmask;
    466 		if (sgsize > (baddr - addr))
    467 			sgsize = (baddr - addr);
    468 	}
    469 
    470 	/*
    471 	 * Insert chunk into a segment, coalescing with
    472 	 * previous segment if possible.
    473 	 */
    474 	if (nseg > 0 && addr == lastaddr &&
    475 	    segs[nseg-1].ds_len + sgsize <= map->dm_maxsegsz &&
    476 	    (map->_dm_boundary == 0 ||
    477 	     (segs[nseg-1].ds_addr & bmask) == (addr & bmask))) {
    478 		/* coalesce */
    479 		segs[nseg-1].ds_len += sgsize;
    480 	} else if (nseg >= map->_dm_segcnt) {
    481 		return EFBIG;
    482 	} else {
    483 		/* new segment */
    484 		segs[nseg].ds_addr = addr;
    485 		segs[nseg].ds_len = sgsize;
    486 		nseg++;
    487 	}
    488 
    489 	lastaddr = addr + sgsize;
    490 	if (map->_dm_bounce_thresh != 0 && lastaddr > map->_dm_bounce_thresh)
    491 		return EINVAL;
    492 
    493 	addr += sgsize;
    494 	size -= sgsize;
    495 	if (size > 0)
    496 		goto again;
    497 
    498 	map->dm_nsegs = nseg;
    499 	return 0;
    500 }
    501 
    502 /*
    503  * Like _bus_dmamap_load(), but for mbufs.
    504  */
    505 static int
    506 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    507     int flags)
    508 {
    509 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    510 	int error;
    511 	struct mbuf *m;
    512 
    513 	/*
    514 	 * Make sure on error condition we return "no valid mappings."
    515 	 */
    516 	map->dm_mapsize = 0;
    517 	map->dm_nsegs = 0;
    518 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    519 
    520 #ifdef DIAGNOSTIC
    521 	if ((m0->m_flags & M_PKTHDR) == 0)
    522 		panic("_bus_dmamap_load_mbuf: no packet header");
    523 #endif
    524 
    525 	if (m0->m_pkthdr.len > map->_dm_size)
    526 		return (EINVAL);
    527 
    528 	error = 0;
    529 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
    530 		int offset;
    531 		int remainbytes;
    532 		const struct vm_page * const *pgs;
    533 		paddr_t paddr;
    534 		int size;
    535 
    536 		if (m->m_len == 0)
    537 			continue;
    538 		switch (m->m_flags & (M_EXT|M_EXT_CLUSTER|M_EXT_PAGES)) {
    539 		case M_EXT|M_EXT_CLUSTER:
    540 			/* XXX KDASSERT */
    541 			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
    542 			paddr = m->m_ext.ext_paddr +
    543 			    (m->m_data - m->m_ext.ext_buf);
    544 			size = m->m_len;
    545 			error = _bus_dmamap_load_busaddr(t, map,
    546 			    _BUS_PHYS_TO_BUS(paddr), size);
    547 			break;
    548 
    549 		case M_EXT|M_EXT_PAGES:
    550 			KASSERT(m->m_ext.ext_buf <= m->m_data);
    551 			KASSERT(m->m_data <=
    552 			    m->m_ext.ext_buf + m->m_ext.ext_size);
    553 
    554 			offset = (vaddr_t)m->m_data -
    555 			    trunc_page((vaddr_t)m->m_ext.ext_buf);
    556 			remainbytes = m->m_len;
    557 
    558 			/* skip uninteresting pages */
    559 			pgs = (const struct vm_page * const *)
    560 			    m->m_ext.ext_pgs + (offset >> PAGE_SHIFT);
    561 
    562 			offset &= PAGE_MASK; /* offset in the first page */
    563 
    564 			/* load each pages */
    565 			while (remainbytes > 0) {
    566 				const struct vm_page *pg;
    567 				bus_addr_t busaddr;
    568 
    569 				size = MIN(remainbytes, PAGE_SIZE - offset);
    570 
    571 				pg = *pgs++;
    572 				KASSERT(pg);
    573 				busaddr = _BUS_VM_PAGE_TO_BUS(pg) + offset;
    574 
    575 				error = _bus_dmamap_load_busaddr(t, map,
    576 				    busaddr, size);
    577 				if (error)
    578 					break;
    579 				offset = 0;
    580 				remainbytes -= size;
    581 			}
    582 			break;
    583 
    584 		case 0:
    585 			paddr = m->m_paddr + M_BUFOFFSET(m) +
    586 			    (m->m_data - M_BUFADDR(m));
    587 			size = m->m_len;
    588 			error = _bus_dmamap_load_busaddr(t, map,
    589 			    _BUS_PHYS_TO_BUS(paddr), size);
    590 			break;
    591 
    592 		default:
    593 			error = _bus_dmamap_load_buffer(t, map, m->m_data,
    594 			    m->m_len, vmspace_kernel(), flags);
    595 		}
    596 	}
    597 	if (error == 0) {
    598 		map->dm_mapsize = m0->m_pkthdr.len;
    599 		return 0;
    600 	}
    601 
    602 	map->dm_nsegs = 0;
    603 
    604 	if (cookie == NULL ||
    605 	    (cookie->id_flags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
    606 		return error;
    607 
    608 	/*
    609 	 * First attempt failed; bounce it.
    610 	 */
    611 
    612 	STAT_INCR(bounces);
    613 
    614 	/*
    615 	 * Allocate bounce pages, if necessary.
    616 	 */
    617 	if ((cookie->id_flags & X86_DMA_HAS_BOUNCE) == 0) {
    618 		error = _bus_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
    619 		    flags);
    620 		if (error)
    621 			return (error);
    622 	}
    623 
    624 	/*
    625 	 * Cache a pointer to the caller's buffer and load the DMA map
    626 	 * with the bounce buffer.
    627 	 */
    628 	cookie->id_origbuf = m0;
    629 	cookie->id_origbuflen = m0->m_pkthdr.len;	/* not really used */
    630 	cookie->id_buftype = X86_DMA_BUFTYPE_MBUF;
    631 	error = bus_dmamap_load(t, map, cookie->id_bouncebuf,
    632 	    m0->m_pkthdr.len, NULL, flags);
    633 	if (error)
    634 		return (error);
    635 
    636 	/* ...so _bus_dmamap_sync() knows we're bouncing */
    637 	cookie->id_flags |= X86_DMA_IS_BOUNCING;
    638 	return (0);
    639 }
    640 
    641 /*
    642  * Like _bus_dmamap_load(), but for uios.
    643  */
    644 static int
    645 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    646     int flags)
    647 {
    648 	int i, error;
    649 	bus_size_t minlen, resid;
    650 	struct vmspace *vm;
    651 	struct iovec *iov;
    652 	void *addr;
    653 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    654 
    655 	/*
    656 	 * Make sure that on error condition we return "no valid mappings."
    657 	 */
    658 	map->dm_mapsize = 0;
    659 	map->dm_nsegs = 0;
    660 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    661 
    662 	resid = uio->uio_resid;
    663 	iov = uio->uio_iov;
    664 
    665 	vm = uio->uio_vmspace;
    666 
    667 	error = 0;
    668 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
    669 		/*
    670 		 * Now at the first iovec to load.  Load each iovec
    671 		 * until we have exhausted the residual count.
    672 		 */
    673 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
    674 		addr = (void *)iov[i].iov_base;
    675 
    676 		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
    677 		    vm, flags);
    678 
    679 		resid -= minlen;
    680 	}
    681 	if (error == 0) {
    682 		map->dm_mapsize = uio->uio_resid;
    683 		return 0;
    684 	}
    685 
    686 	map->dm_nsegs = 0;
    687 
    688 	if (cookie == NULL ||
    689 	    (cookie->id_flags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
    690 		return error;
    691 
    692 	STAT_INCR(bounces);
    693 
    694 	/*
    695 	 * Allocate bounce pages, if necessary.
    696 	 */
    697 	if ((cookie->id_flags & X86_DMA_HAS_BOUNCE) == 0) {
    698 		error = _bus_dma_alloc_bouncebuf(t, map, uio->uio_resid,
    699 		    flags);
    700 		if (error)
    701 			return (error);
    702 	}
    703 
    704 	/*
    705 	 * Cache a pointer to the caller's buffer and load the DMA map
    706 	 * with the bounce buffer.
    707 	 */
    708 	cookie->id_origbuf = uio;
    709 	cookie->id_origbuflen = uio->uio_resid;
    710 	cookie->id_buftype = X86_DMA_BUFTYPE_UIO;
    711 	error = bus_dmamap_load(t, map, cookie->id_bouncebuf,
    712 	    uio->uio_resid, NULL, flags);
    713 	if (error)
    714 		return (error);
    715 
    716 	/* ...so _bus_dmamap_sync() knows we're bouncing */
    717 	cookie->id_flags |= X86_DMA_IS_BOUNCING;
    718 	return (0);
    719 }
    720 
    721 /*
    722  * Like _bus_dmamap_load(), but for raw memory allocated with
    723  * bus_dmamem_alloc().
    724  */
    725 static int
    726 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    727     bus_dma_segment_t *segs, int nsegs, bus_size_t size0, int flags)
    728 {
    729 	bus_size_t size;
    730 	int i, error = 0;
    731 
    732 	/*
    733 	 * Make sure that on error condition we return "no valid mappings."
    734 	 */
    735 	map->dm_mapsize = 0;
    736 	map->dm_nsegs = 0;
    737 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    738 
    739 	if (size0 > map->_dm_size)
    740 		return EINVAL;
    741 
    742 	for (i = 0, size = size0; i < nsegs && size > 0; i++) {
    743 		bus_dma_segment_t *ds = &segs[i];
    744 		bus_size_t sgsize;
    745 
    746 		sgsize = MIN(ds->ds_len, size);
    747 		if (sgsize == 0)
    748 			continue;
    749 		error = _bus_dmamap_load_busaddr(t, map, ds->ds_addr, sgsize);
    750 		if (error != 0)
    751 			break;
    752 		size -= sgsize;
    753 	}
    754 
    755 	if (error != 0) {
    756 		map->dm_mapsize = 0;
    757 		map->dm_nsegs = 0;
    758 		return error;
    759 	}
    760 
    761 	/* XXX TBD bounce */
    762 
    763 	map->dm_mapsize = size0;
    764 	return 0;
    765 }
    766 
    767 /*
    768  * Unload a DMA map.
    769  */
    770 static void
    771 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
    772 {
    773 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    774 
    775 	/*
    776 	 * If we have bounce pages, free them, unless they're
    777 	 * reserved for our exclusive use.
    778 	 */
    779 	if (cookie != NULL) {
    780 		cookie->id_flags &= ~X86_DMA_IS_BOUNCING;
    781 		cookie->id_buftype = X86_DMA_BUFTYPE_INVALID;
    782 	}
    783 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
    784 	map->dm_mapsize = 0;
    785 	map->dm_nsegs = 0;
    786 }
    787 
    788 /*
    789  * Synchronize a DMA map.
    790  */
    791 static void
    792 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    793     bus_size_t len, int ops)
    794 {
    795 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    796 
    797 	/*
    798 	 * Mixing PRE and POST operations is not allowed.
    799 	 */
    800 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
    801 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
    802 		panic("%s: mix PRE and POST", __func__);
    803 
    804 #ifdef DIAGNOSTIC
    805 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
    806 		if (offset >= map->dm_mapsize)
    807 			panic("%s: bad offset 0x%jx >= 0x%jx", __func__,
    808 			(intmax_t)offset, (intmax_t)map->dm_mapsize);
    809 		if ((offset + len) > map->dm_mapsize)
    810 			panic("%s: bad length 0x%jx + 0x%jx > 0x%jx", __func__,
    811 			    (intmax_t)offset, (intmax_t)len,
    812 			    (intmax_t)map->dm_mapsize);
    813 	}
    814 #endif
    815 
    816 	/*
    817 	 * If we're not bouncing, just return; nothing to do.
    818 	 */
    819 	if (len == 0 || cookie == NULL ||
    820 	    (cookie->id_flags & X86_DMA_IS_BOUNCING) == 0)
    821 		goto end;
    822 
    823 	switch (cookie->id_buftype) {
    824 	case X86_DMA_BUFTYPE_LINEAR:
    825 		/*
    826 		 * Nothing to do for pre-read.
    827 		 */
    828 
    829 		if (ops & BUS_DMASYNC_PREWRITE) {
    830 			/*
    831 			 * Copy the caller's buffer to the bounce buffer.
    832 			 */
    833 			memcpy((char *)cookie->id_bouncebuf + offset,
    834 			    (char *)cookie->id_origbuf + offset, len);
    835 		}
    836 
    837 		if (ops & BUS_DMASYNC_POSTREAD) {
    838 			/*
    839 			 * Copy the bounce buffer to the caller's buffer.
    840 			 */
    841 			memcpy((char *)cookie->id_origbuf + offset,
    842 			    (char *)cookie->id_bouncebuf + offset, len);
    843 		}
    844 
    845 		/*
    846 		 * Nothing to do for post-write.
    847 		 */
    848 		break;
    849 
    850 	case X86_DMA_BUFTYPE_MBUF:
    851 	    {
    852 		struct mbuf *m, *m0 = cookie->id_origbuf;
    853 		bus_size_t minlen, moff;
    854 
    855 		/*
    856 		 * Nothing to do for pre-read.
    857 		 */
    858 
    859 		if (ops & BUS_DMASYNC_PREWRITE) {
    860 			/*
    861 			 * Copy the caller's buffer to the bounce buffer.
    862 			 */
    863 			m_copydata(m0, offset, len,
    864 			    (char *)cookie->id_bouncebuf + offset);
    865 		}
    866 
    867 		if (ops & BUS_DMASYNC_POSTREAD) {
    868 			/*
    869 			 * Copy the bounce buffer to the caller's buffer.
    870 			 */
    871 			for (moff = offset, m = m0; m != NULL && len != 0;
    872 			     m = m->m_next) {
    873 				/* Find the beginning mbuf. */
    874 				if (moff >= m->m_len) {
    875 					moff -= m->m_len;
    876 					continue;
    877 				}
    878 
    879 				/*
    880 				 * Now at the first mbuf to sync; nail
    881 				 * each one until we have exhausted the
    882 				 * length.
    883 				 */
    884 				minlen = len < m->m_len - moff ?
    885 				    len : m->m_len - moff;
    886 
    887 				memcpy(mtod(m, char *) + moff,
    888 				    (char *)cookie->id_bouncebuf + offset,
    889 				    minlen);
    890 
    891 				moff = 0;
    892 				len -= minlen;
    893 				offset += minlen;
    894 			}
    895 		}
    896 
    897 		/*
    898 		 * Nothing to do for post-write.
    899 		 */
    900 		break;
    901 	    }
    902 	case X86_DMA_BUFTYPE_UIO:
    903 	    {
    904 		struct uio *uio;
    905 
    906 		uio = (struct uio *)cookie->id_origbuf;
    907 
    908 		/*
    909 		 * Nothing to do for pre-read.
    910 		 */
    911 
    912 		if (ops & BUS_DMASYNC_PREWRITE) {
    913 			/*
    914 			 * Copy the caller's buffer to the bounce buffer.
    915 			 */
    916 			_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
    917 			    uio, len, UIO_WRITE);
    918 		}
    919 
    920 		if (ops & BUS_DMASYNC_POSTREAD) {
    921 			_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
    922 			    uio, len, UIO_READ);
    923 		}
    924 
    925 		/*
    926 		 * Nothing to do for post-write.
    927 		 */
    928 		break;
    929 	    }
    930 
    931 	case X86_DMA_BUFTYPE_RAW:
    932 		panic("%s: X86_DMA_BUFTYPE_RAW", __func__);
    933 		break;
    934 
    935 	case X86_DMA_BUFTYPE_INVALID:
    936 		panic("%s: X86_DMA_BUFTYPE_INVALID", __func__);
    937 		break;
    938 
    939 	default:
    940 		panic("%s: unknown buffer type %d", __func__,
    941 		    cookie->id_buftype);
    942 		break;
    943 	}
    944 end:
    945 	if (ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTWRITE)) {
    946 		/*
    947 		 * from the memory POV a load can be reordered before a store
    948 		 * (a load can fetch data from the write buffers, before
    949 		 * data hits the cache or memory), a mfence avoids it.
    950 		 */
    951 		x86_mfence();
    952 	} else if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTREAD)) {
    953 		/*
    954 		 * all past reads should have completed at before this point,
    955 		 * and future reads should not have started yet.
    956 		 */
    957 		x86_lfence();
    958 	}
    959 }
    960 
    961 /*
    962  * Allocate memory safe for DMA.
    963  */
    964 static int
    965 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    966     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    967     int flags)
    968 {
    969 	bus_addr_t high;
    970 
    971 	if (t->_bounce_alloc_hi != 0 && _BUS_AVAIL_END > t->_bounce_alloc_hi)
    972 		high = trunc_page(t->_bounce_alloc_hi);
    973 	else
    974 		high = trunc_page(_BUS_AVAIL_END);
    975 
    976 	return (_BUS_DMAMEM_ALLOC_RANGE(t, size, alignment, boundary,
    977 	    segs, nsegs, rsegs, flags, t->_bounce_alloc_lo, high));
    978 }
    979 
    980 static int
    981 _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
    982     bus_size_t size, int flags)
    983 {
    984 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    985 	int error = 0;
    986 
    987 #ifdef DIAGNOSTIC
    988 	if (cookie == NULL)
    989 		panic("_bus_dma_alloc_bouncebuf: no cookie");
    990 #endif
    991 
    992 	cookie->id_bouncebuflen = round_page(size);
    993 	error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,
    994 	    PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
    995 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
    996 	if (error) {
    997 		cookie->id_bouncebuflen = 0;
    998 		cookie->id_nbouncesegs = 0;
    999 		return error;
   1000 	}
   1001 
   1002 	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
   1003 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
   1004 	    (void **)&cookie->id_bouncebuf, flags);
   1005 
   1006 	if (error) {
   1007 		_bus_dmamem_free(t, cookie->id_bouncesegs,
   1008 		    cookie->id_nbouncesegs);
   1009 		cookie->id_bouncebuflen = 0;
   1010 		cookie->id_nbouncesegs = 0;
   1011 	} else {
   1012 		cookie->id_flags |= X86_DMA_HAS_BOUNCE;
   1013 		STAT_INCR(nbouncebufs);
   1014 	}
   1015 
   1016 	return (error);
   1017 }
   1018 
   1019 static void
   1020 _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
   1021 {
   1022 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
   1023 
   1024 #ifdef DIAGNOSTIC
   1025 	if (cookie == NULL)
   1026 		panic("_bus_dma_free_bouncebuf: no cookie");
   1027 #endif
   1028 
   1029 	STAT_DECR(nbouncebufs);
   1030 
   1031 	_bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen);
   1032 	_bus_dmamem_free(t, cookie->id_bouncesegs,
   1033 	    cookie->id_nbouncesegs);
   1034 	cookie->id_bouncebuflen = 0;
   1035 	cookie->id_nbouncesegs = 0;
   1036 	cookie->id_flags &= ~X86_DMA_HAS_BOUNCE;
   1037 }
   1038 
   1039 
   1040 /*
   1041  * This function does the same as uiomove, but takes an explicit
   1042  * direction, and does not update the uio structure.
   1043  */
   1044 static int
   1045 _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
   1046 {
   1047 	struct iovec *iov;
   1048 	int error;
   1049 	struct vmspace *vm;
   1050 	char *cp;
   1051 	size_t resid, cnt;
   1052 	int i;
   1053 
   1054 	iov = uio->uio_iov;
   1055 	vm = uio->uio_vmspace;
   1056 	cp = buf;
   1057 	resid = n;
   1058 
   1059 	for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) {
   1060 		iov = &uio->uio_iov[i];
   1061 		if (iov->iov_len == 0)
   1062 			continue;
   1063 		cnt = MIN(resid, iov->iov_len);
   1064 
   1065 		if (!VMSPACE_IS_KERNEL_P(vm) &&
   1066 		    (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
   1067 		    != 0) {
   1068 			preempt();
   1069 		}
   1070 		if (direction == UIO_READ) {
   1071 			error = copyout_vmspace(vm, cp, iov->iov_base, cnt);
   1072 		} else {
   1073 			error = copyin_vmspace(vm, iov->iov_base, cp, cnt);
   1074 		}
   1075 		if (error)
   1076 			return (error);
   1077 		cp += cnt;
   1078 		resid -= cnt;
   1079 	}
   1080 	return (0);
   1081 }
   1082 
   1083 /*
   1084  * Common function for freeing DMA-safe memory.  May be called by
   1085  * bus-specific DMA memory free functions.
   1086  */
   1087 static void
   1088 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
   1089 {
   1090 	struct vm_page *m;
   1091 	bus_addr_t addr;
   1092 	struct pglist mlist;
   1093 	int curseg;
   1094 
   1095 	/*
   1096 	 * Build a list of pages to free back to the VM system.
   1097 	 */
   1098 	TAILQ_INIT(&mlist);
   1099 	for (curseg = 0; curseg < nsegs; curseg++) {
   1100 		for (addr = segs[curseg].ds_addr;
   1101 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
   1102 		    addr += PAGE_SIZE) {
   1103 			m = _BUS_BUS_TO_VM_PAGE(addr);
   1104 			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
   1105 		}
   1106 	}
   1107 
   1108 	uvm_pglistfree(&mlist);
   1109 }
   1110 
   1111 /*
   1112  * Common function for mapping DMA-safe memory.  May be called by
   1113  * bus-specific DMA memory map functions.
   1114  * This supports BUS_DMA_NOCACHE.
   1115  */
   1116 static int
   1117 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
   1118     size_t size, void **kvap, int flags)
   1119 {
   1120 	vaddr_t va;
   1121 	bus_addr_t addr;
   1122 	int curseg;
   1123 	const uvm_flag_t kmflags =
   1124 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
   1125 	u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE;
   1126 
   1127 	size = round_page(size);
   1128 	if (flags & BUS_DMA_NOCACHE)
   1129 		pmapflags |= PMAP_NOCACHE;
   1130 
   1131 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
   1132 
   1133 	if (va == 0)
   1134 		return ENOMEM;
   1135 
   1136 	*kvap = (void *)va;
   1137 
   1138 	for (curseg = 0; curseg < nsegs; curseg++) {
   1139 		for (addr = segs[curseg].ds_addr;
   1140 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
   1141 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
   1142 			if (size == 0)
   1143 				panic("_bus_dmamem_map: size botch");
   1144 			_BUS_PMAP_ENTER(pmap_kernel(), va, addr,
   1145 			    VM_PROT_READ | VM_PROT_WRITE,
   1146 			    pmapflags);
   1147 		}
   1148 	}
   1149 	pmap_update(pmap_kernel());
   1150 
   1151 	return 0;
   1152 }
   1153 
   1154 /*
   1155  * Common function for unmapping DMA-safe memory.  May be called by
   1156  * bus-specific DMA memory unmapping functions.
   1157  */
   1158 
   1159 static void
   1160 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
   1161 {
   1162 	pt_entry_t *pte, opte;
   1163 	vaddr_t va, sva, eva;
   1164 
   1165 #ifdef DIAGNOSTIC
   1166 	if ((u_long)kva & PGOFSET)
   1167 		panic("_bus_dmamem_unmap");
   1168 #endif
   1169 
   1170 	size = round_page(size);
   1171 	sva = (vaddr_t)kva;
   1172 	eva = sva + size;
   1173 
   1174 	/*
   1175          * mark pages cacheable again.
   1176          */
   1177 	for (va = sva; va < eva; va += PAGE_SIZE) {
   1178 		pte = kvtopte(va);
   1179 		opte = *pte;
   1180 		if ((opte & PG_N) != 0)
   1181 			pmap_pte_clearbits(pte, PG_N);
   1182 	}
   1183 	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
   1184 	pmap_update(pmap_kernel());
   1185 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
   1186 }
   1187 
   1188 /*
   1189  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
   1190  * bus-specific DMA mmap(2)'ing functions.
   1191  */
   1192 static paddr_t
   1193 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
   1194     off_t off, int prot, int flags)
   1195 {
   1196 	int i;
   1197 
   1198 	for (i = 0; i < nsegs; i++) {
   1199 #ifdef DIAGNOSTIC
   1200 		if (off & PGOFSET)
   1201 			panic("_bus_dmamem_mmap: offset unaligned");
   1202 		if (segs[i].ds_addr & PGOFSET)
   1203 			panic("_bus_dmamem_mmap: segment unaligned");
   1204 		if (segs[i].ds_len & PGOFSET)
   1205 			panic("_bus_dmamem_mmap: segment size not multiple"
   1206 			    " of page size");
   1207 #endif
   1208 		if (off >= segs[i].ds_len) {
   1209 			off -= segs[i].ds_len;
   1210 			continue;
   1211 		}
   1212 
   1213 		return (x86_btop(_BUS_BUS_TO_PHYS(segs[i].ds_addr + off)));
   1214 	}
   1215 
   1216 	/* Page not found. */
   1217 	return (-1);
   1218 }
   1219 
   1220 /**********************************************************************
   1221  * DMA utility functions
   1222  **********************************************************************/
   1223 
   1224 /*
   1225  * Utility function to load a linear buffer.
   1226  */
   1227 static int
   1228 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
   1229     bus_size_t buflen, struct vmspace *vm, int flags)
   1230 {
   1231 	bus_size_t sgsize;
   1232 	bus_addr_t curaddr;
   1233 	vaddr_t vaddr = (vaddr_t)buf;
   1234 	pmap_t pmap;
   1235 
   1236 	if (vm != NULL)
   1237 		pmap = vm_map_pmap(&vm->vm_map);
   1238 	else
   1239 		pmap = pmap_kernel();
   1240 
   1241 	while (buflen > 0) {
   1242 		int error;
   1243 
   1244 		/*
   1245 		 * Get the bus address for this segment.
   1246 		 */
   1247 		curaddr = _BUS_VIRT_TO_BUS(pmap, vaddr);
   1248 
   1249 		/*
   1250 		 * Compute the segment size, and adjust counts.
   1251 		 */
   1252 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
   1253 		if (buflen < sgsize)
   1254 			sgsize = buflen;
   1255 
   1256 		/*
   1257 		 * If we're beyond the bounce threshold, notify
   1258 		 * the caller.
   1259 		 */
   1260 		if (map->_dm_bounce_thresh != 0 &&
   1261 		    curaddr + sgsize >= map->_dm_bounce_thresh)
   1262 			return (EINVAL);
   1263 
   1264 
   1265 		error = _bus_dmamap_load_busaddr(t, map, curaddr, sgsize);
   1266 		if (error)
   1267 			return error;
   1268 
   1269 		vaddr += sgsize;
   1270 		buflen -= sgsize;
   1271 	}
   1272 
   1273 	return (0);
   1274 }
   1275 
   1276 static int
   1277 _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
   1278 		      bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
   1279 {
   1280 
   1281 	if ((tag->_bounce_thresh != 0   && max_addr >= tag->_bounce_thresh) &&
   1282 	    (tag->_bounce_alloc_hi != 0 && max_addr >= tag->_bounce_alloc_hi) &&
   1283 	    (min_addr <= tag->_bounce_alloc_lo)) {
   1284 		*newtag = tag;
   1285 		/* if the tag must be freed, add a reference */
   1286 		if (tag->_tag_needs_free)
   1287 			(tag->_tag_needs_free)++;
   1288 		return 0;
   1289 	}
   1290 
   1291 	if ((*newtag = malloc(sizeof(struct x86_bus_dma_tag), M_DMAMAP,
   1292 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
   1293 		return ENOMEM;
   1294 
   1295 	**newtag = *tag;
   1296 	(*newtag)->_tag_needs_free = 1;
   1297 
   1298 	if (tag->_bounce_thresh == 0 || max_addr < tag->_bounce_thresh)
   1299 		(*newtag)->_bounce_thresh = max_addr;
   1300 	if (tag->_bounce_alloc_hi == 0 || max_addr < tag->_bounce_alloc_hi)
   1301 		(*newtag)->_bounce_alloc_hi = max_addr;
   1302 	if (min_addr > tag->_bounce_alloc_lo)
   1303 		(*newtag)->_bounce_alloc_lo = min_addr;
   1304 
   1305 	return 0;
   1306 }
   1307 
   1308 static void
   1309 _bus_dmatag_destroy(bus_dma_tag_t tag)
   1310 {
   1311 
   1312 	switch (tag->_tag_needs_free) {
   1313 	case 0:
   1314 		break;				/* not allocated with malloc */
   1315 	case 1:
   1316 		free(tag, M_DMAMAP);		/* last reference to tag */
   1317 		break;
   1318 	default:
   1319 		(tag->_tag_needs_free)--;	/* one less reference */
   1320 	}
   1321 }
   1322 
   1323 
   1324 void
   1325 bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t p, bus_addr_t o, bus_size_t l,
   1326 		int ops)
   1327 {
   1328 	bus_dma_tag_t it;
   1329 
   1330 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_SYNC) == 0)
   1331 		;	/* skip override */
   1332 	else for (it = t; it != NULL; it = it->bdt_super) {
   1333 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_SYNC) == 0)
   1334 			continue;
   1335 		(*it->bdt_ov->ov_dmamap_sync)(it->bdt_ctx, t, p, o,
   1336 		    l, ops);
   1337 		return;
   1338 	}
   1339 
   1340 	if (ops & BUS_DMASYNC_POSTREAD)
   1341 		x86_lfence();
   1342 
   1343 	_bus_dmamap_sync(t, p, o, l, ops);
   1344 }
   1345 
   1346 int
   1347 bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
   1348 		  bus_size_t maxsegsz, bus_size_t boundary, int flags,
   1349 		  bus_dmamap_t *dmamp)
   1350 {
   1351 	bus_dma_tag_t it;
   1352 
   1353 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_CREATE) == 0)
   1354 		;	/* skip override */
   1355 	else for (it = t; it != NULL; it = it->bdt_super) {
   1356 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_CREATE) == 0)
   1357 			continue;
   1358 		return (*it->bdt_ov->ov_dmamap_create)(it->bdt_ctx, t, size,
   1359 		    nsegments, maxsegsz, boundary, flags, dmamp);
   1360 	}
   1361 
   1362 	return _bus_dmamap_create(t, size, nsegments, maxsegsz,
   1363 	    boundary, flags, dmamp);
   1364 }
   1365 
   1366 void
   1367 bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t dmam)
   1368 {
   1369 	bus_dma_tag_t it;
   1370 
   1371 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_DESTROY) == 0)
   1372 		;	/* skip override */
   1373 	else for (it = t; it != NULL; it = it->bdt_super) {
   1374 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_DESTROY) == 0)
   1375 			continue;
   1376 		(*it->bdt_ov->ov_dmamap_destroy)(it->bdt_ctx, t, dmam);
   1377 		return;
   1378 	}
   1379 
   1380 	_bus_dmamap_destroy(t, dmam);
   1381 }
   1382 
   1383 int
   1384 bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t dmam, void *buf,
   1385 		bus_size_t buflen, struct proc *p, int flags)
   1386 {
   1387 	bus_dma_tag_t it;
   1388 
   1389 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD) == 0)
   1390 		;	/* skip override */
   1391 	else for (it = t; it != NULL; it = it->bdt_super) {
   1392 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD) == 0)
   1393 			continue;
   1394 		return (*it->bdt_ov->ov_dmamap_load)(it->bdt_ctx, t, dmam,
   1395 		    buf, buflen, p, flags);
   1396 	}
   1397 
   1398 	return _bus_dmamap_load(t, dmam, buf, buflen, p, flags);
   1399 }
   1400 
   1401 int
   1402 bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t dmam,
   1403 		     struct mbuf *chain, int flags)
   1404 {
   1405 	bus_dma_tag_t it;
   1406 
   1407 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_MBUF) == 0)
   1408 		;	/* skip override */
   1409 	else for (it = t; it != NULL; it = it->bdt_super) {
   1410 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD_MBUF) == 0)
   1411 			continue;
   1412 		return (*it->bdt_ov->ov_dmamap_load_mbuf)(it->bdt_ctx, t, dmam,
   1413 		    chain, flags);
   1414 	}
   1415 
   1416 	return _bus_dmamap_load_mbuf(t, dmam, chain, flags);
   1417 }
   1418 
   1419 int
   1420 bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t dmam,
   1421 		    struct uio *uio, int flags)
   1422 {
   1423 	bus_dma_tag_t it;
   1424 
   1425 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_UIO) == 0)
   1426 		;	/* skip override */
   1427 	else for (it = t; it != NULL; it = it->bdt_super) {
   1428 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD_UIO) == 0)
   1429 			continue;
   1430 		return (*it->bdt_ov->ov_dmamap_load_uio)(it->bdt_ctx, t, dmam,
   1431 		    uio, flags);
   1432 	}
   1433 
   1434 	return _bus_dmamap_load_uio(t, dmam, uio, flags);
   1435 }
   1436 
   1437 int
   1438 bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t dmam,
   1439 		    bus_dma_segment_t *segs, int nsegs,
   1440 		    bus_size_t size, int flags)
   1441 {
   1442 	bus_dma_tag_t it;
   1443 
   1444 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_RAW) == 0)
   1445 		;	/* skip override */
   1446 	else for (it = t; it != NULL; it = it->bdt_super) {
   1447 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD_RAW) == 0)
   1448 			continue;
   1449 		return (*it->bdt_ov->ov_dmamap_load_raw)(it->bdt_ctx, t, dmam,
   1450 		    segs, nsegs, size, flags);
   1451 	}
   1452 
   1453 	return _bus_dmamap_load_raw(t, dmam, segs, nsegs, size, flags);
   1454 }
   1455 
   1456 void
   1457 bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t dmam)
   1458 {
   1459 	bus_dma_tag_t it;
   1460 
   1461 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_UNLOAD) == 0)
   1462 		;	/* skip override */
   1463 	else for (it = t; it != NULL; it = it->bdt_super) {
   1464 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_UNLOAD) == 0)
   1465 			continue;
   1466 		(*it->bdt_ov->ov_dmamap_unload)(it->bdt_ctx, t, dmam);
   1467 		return;
   1468 	}
   1469 
   1470 	_bus_dmamap_unload(t, dmam);
   1471 }
   1472 
   1473 int
   1474 bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
   1475 		 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs,
   1476 		 int *rsegs, int flags)
   1477 {
   1478 	bus_dma_tag_t it;
   1479 
   1480 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_ALLOC) == 0)
   1481 		;	/* skip override */
   1482 	else for (it = t; it != NULL; it = it->bdt_super) {
   1483 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_ALLOC) == 0)
   1484 			continue;
   1485 		return (*it->bdt_ov->ov_dmamem_alloc)(it->bdt_ctx, t, size,
   1486 		    alignment, boundary, segs, nsegs, rsegs, flags);
   1487 	}
   1488 
   1489 	return _bus_dmamem_alloc(t, size, alignment, boundary, segs,
   1490 	    nsegs, rsegs, flags);
   1491 }
   1492 
   1493 void
   1494 bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
   1495 {
   1496 	bus_dma_tag_t it;
   1497 
   1498 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_FREE) == 0)
   1499 		;	/* skip override */
   1500 	else for (it = t; it != NULL; it = it->bdt_super) {
   1501 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_FREE) == 0)
   1502 			continue;
   1503 		(*it->bdt_ov->ov_dmamem_free)(it->bdt_ctx, t, segs, nsegs);
   1504 		return;
   1505 	}
   1506 
   1507 	_bus_dmamem_free(t, segs, nsegs);
   1508 }
   1509 
   1510 int
   1511 bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
   1512 	       size_t size, void **kvap, int flags)
   1513 {
   1514 	bus_dma_tag_t it;
   1515 
   1516 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_MAP) == 0)
   1517 		;	/* skip override */
   1518 	else for (it = t; it != NULL; it = it->bdt_super) {
   1519 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_MAP) == 0)
   1520 			continue;
   1521 		return (*it->bdt_ov->ov_dmamem_map)(it->bdt_ctx, t,
   1522 		    segs, nsegs, size, kvap, flags);
   1523 	}
   1524 
   1525 	return _bus_dmamem_map(t, segs, nsegs, size, kvap, flags);
   1526 }
   1527 
   1528 void
   1529 bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
   1530 {
   1531 	bus_dma_tag_t it;
   1532 
   1533 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_UNMAP) == 0)
   1534 		;	/* skip override */
   1535 	else for (it = t; it != NULL; it = it->bdt_super) {
   1536 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_UNMAP) == 0)
   1537 			continue;
   1538 		(*it->bdt_ov->ov_dmamem_unmap)(it->bdt_ctx, t, kva, size);
   1539 		return;
   1540 	}
   1541 
   1542 	_bus_dmamem_unmap(t, kva, size);
   1543 }
   1544 
   1545 paddr_t
   1546 bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
   1547 		off_t off, int prot, int flags)
   1548 {
   1549 	bus_dma_tag_t it;
   1550 
   1551 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_MMAP) == 0)
   1552 		;	/* skip override */
   1553 	else for (it = t; it != NULL; it = it->bdt_super) {
   1554 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_MMAP) == 0)
   1555 			continue;
   1556 		return (*it->bdt_ov->ov_dmamem_mmap)(it->bdt_ctx, t, segs,
   1557 		    nsegs, off, prot, flags);
   1558 	}
   1559 
   1560 	return _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags);
   1561 }
   1562 
   1563 int
   1564 bus_dmatag_subregion(bus_dma_tag_t t, bus_addr_t min_addr,
   1565 		     bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
   1566 {
   1567 	bus_dma_tag_t it;
   1568 
   1569 	if ((t->bdt_exists & BUS_DMATAG_OVERRIDE_SUBREGION) == 0)
   1570 		;	/* skip override */
   1571 	else for (it = t; it != NULL; it = it->bdt_super) {
   1572 		if ((it->bdt_present & BUS_DMATAG_OVERRIDE_SUBREGION) == 0)
   1573 			continue;
   1574 		return (*it->bdt_ov->ov_dmatag_subregion)(it->bdt_ctx, t,
   1575 		    min_addr, max_addr, newtag, flags);
   1576 	}
   1577 
   1578 	return _bus_dmatag_subregion(t, min_addr, max_addr, newtag, flags);
   1579 }
   1580 
   1581 void
   1582 bus_dmatag_destroy(bus_dma_tag_t t)
   1583 {
   1584 	bus_dma_tag_t it;
   1585 
   1586 	if ((t->bdt_exists & BUS_DMATAG_OVERRIDE_DESTROY) == 0)
   1587 		;	/* skip override */
   1588 	else for (it = t; it != NULL; it = it->bdt_super) {
   1589 		if ((it->bdt_present & BUS_DMATAG_OVERRIDE_DESTROY) == 0)
   1590 			continue;
   1591 		(*it->bdt_ov->ov_dmatag_destroy)(it->bdt_ctx, t);
   1592 		return;
   1593 	}
   1594 
   1595 	_bus_dmatag_destroy(t);
   1596 }
   1597 
   1598 static const void *
   1599 bit_to_function_pointer(const struct bus_dma_overrides *ov, uint64_t bit)
   1600 {
   1601 	switch (bit) {
   1602 	case BUS_DMAMAP_OVERRIDE_CREATE:
   1603 		return ov->ov_dmamap_create;
   1604 	case BUS_DMAMAP_OVERRIDE_DESTROY:
   1605 		return ov->ov_dmamap_destroy;
   1606 	case BUS_DMAMAP_OVERRIDE_LOAD:
   1607 		return ov->ov_dmamap_load;
   1608 	case BUS_DMAMAP_OVERRIDE_LOAD_MBUF:
   1609 		return ov->ov_dmamap_load_mbuf;
   1610 	case BUS_DMAMAP_OVERRIDE_LOAD_UIO:
   1611 		return ov->ov_dmamap_load_uio;
   1612 	case BUS_DMAMAP_OVERRIDE_LOAD_RAW:
   1613 		return ov->ov_dmamap_load_raw;
   1614 	case BUS_DMAMAP_OVERRIDE_UNLOAD:
   1615 		return ov->ov_dmamap_unload;
   1616 	case BUS_DMAMAP_OVERRIDE_SYNC:
   1617 		return ov->ov_dmamap_sync;
   1618 	case BUS_DMAMEM_OVERRIDE_ALLOC:
   1619 		return ov->ov_dmamem_alloc;
   1620 	case BUS_DMAMEM_OVERRIDE_FREE:
   1621 		return ov->ov_dmamem_free;
   1622 	case BUS_DMAMEM_OVERRIDE_MAP:
   1623 		return ov->ov_dmamem_map;
   1624 	case BUS_DMAMEM_OVERRIDE_UNMAP:
   1625 		return ov->ov_dmamem_unmap;
   1626 	case BUS_DMAMEM_OVERRIDE_MMAP:
   1627 		return ov->ov_dmamem_mmap;
   1628 	case BUS_DMATAG_OVERRIDE_SUBREGION:
   1629 		return ov->ov_dmatag_subregion;
   1630 	case BUS_DMATAG_OVERRIDE_DESTROY:
   1631 		return ov->ov_dmatag_destroy;
   1632 	default:
   1633 		return NULL;
   1634 	}
   1635 }
   1636 
   1637 void
   1638 bus_dma_tag_destroy(bus_dma_tag_t bdt)
   1639 {
   1640 	if (bdt->bdt_super != NULL)
   1641 		bus_dmatag_destroy(bdt->bdt_super);
   1642 	kmem_free(bdt, sizeof(struct x86_bus_dma_tag));
   1643 }
   1644 
   1645 int
   1646 bus_dma_tag_create(bus_dma_tag_t obdt, const uint64_t present,
   1647     const struct bus_dma_overrides *ov, void *ctx, bus_dma_tag_t *bdtp)
   1648 {
   1649 	uint64_t bit, bits, nbits;
   1650 	bus_dma_tag_t bdt;
   1651 	const void *fp;
   1652 
   1653 	if (ov == NULL || present == 0)
   1654 		return EINVAL;
   1655 
   1656 	bdt = kmem_alloc(sizeof(struct x86_bus_dma_tag), KM_SLEEP);
   1657 	*bdt = *obdt;
   1658 	/* don't let bus_dmatag_destroy free these */
   1659 	bdt->_tag_needs_free = 0;
   1660 
   1661 	bdt->bdt_super = obdt;
   1662 
   1663 	for (bits = present; bits != 0; bits = nbits) {
   1664 		nbits = bits & (bits - 1);
   1665 		bit = nbits ^ bits;
   1666 		if ((fp = bit_to_function_pointer(ov, bit)) == NULL) {
   1667 #ifdef DEBUG
   1668 			printf("%s: missing bit %" PRIx64 "\n", __func__, bit);
   1669 #endif
   1670 			goto einval;
   1671 		}
   1672 	}
   1673 
   1674 	bdt->bdt_ov = ov;
   1675 	bdt->bdt_exists = obdt->bdt_exists | present;
   1676 	bdt->bdt_present = present;
   1677 	bdt->bdt_ctx = ctx;
   1678 
   1679 	*bdtp = bdt;
   1680 	if (obdt->_tag_needs_free)
   1681 		obdt->_tag_needs_free++;
   1682 
   1683 	return 0;
   1684 einval:
   1685 	kmem_free(bdt, sizeof(struct x86_bus_dma_tag));
   1686 	return EINVAL;
   1687 }
   1688