Home | History | Annotate | Line # | Download | only in x86
      1 /*	$NetBSD: bus_dma.c,v 1.76 2017/06/01 02:45:08 chs Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1998, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility NASA Ames Research Center, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.76 2017/06/01 02:45:08 chs Exp $");
     35 
     36 /*
     37  * The following is included because _bus_dma_uiomove is derived from
     38  * uiomove() in kern_subr.c.
     39  */
     40 
     41 /*
     42  * Copyright (c) 1982, 1986, 1991, 1993
     43  *	The Regents of the University of California.  All rights reserved.
     44  * (c) UNIX System Laboratories, Inc.
     45  * All or some portions of this file are derived from material licensed
     46  * to the University of California by American Telephone and Telegraph
     47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48  * the permission of UNIX System Laboratories, Inc.
     49  *
     50  * Copyright (c) 1992, 1993
     51  *	The Regents of the University of California.  All rights reserved.
     52  *
     53  * This software was developed by the Computer Systems Engineering group
     54  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     55  * contributed to Berkeley.
     56  *
     57  * All advertising materials mentioning features or use of this software
     58  * must display the following acknowledgement:
     59  *	This product includes software developed by the University of
     60  *	California, Lawrence Berkeley Laboratory.
     61  *
     62  * Redistribution and use in source and binary forms, with or without
     63  * modification, are permitted provided that the following conditions
     64  * are met:
     65  * 1. Redistributions of source code must retain the above copyright
     66  *    notice, this list of conditions and the following disclaimer.
     67  * 2. Redistributions in binary form must reproduce the above copyright
     68  *    notice, this list of conditions and the following disclaimer in the
     69  *    documentation and/or other materials provided with the distribution.
     70  * 3. Neither the name of the University nor the names of its contributors
     71  *    may be used to endorse or promote products derived from this software
     72  *    without specific prior written permission.
     73  *
     74  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     75  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     76  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     77  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     78  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     79  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     80  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     81  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     82  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     83  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     84  * SUCH DAMAGE.
     85  */
     86 
     87 #include "ioapic.h"
     88 #include "isa.h"
     89 #include "opt_mpbios.h"
     90 
     91 #include <sys/param.h>
     92 #include <sys/systm.h>
     93 #include <sys/kernel.h>
     94 #include <sys/kmem.h>
     95 #include <sys/malloc.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/proc.h>
     98 
     99 #include <sys/bus.h>
    100 #include <machine/bus_private.h>
    101 #if NIOAPIC > 0
    102 #include <machine/i82093var.h>
    103 #endif
    104 #ifdef MPBIOS
    105 #include <machine/mpbiosvar.h>
    106 #endif
    107 
    108 #if NISA > 0
    109 #include <dev/isa/isareg.h>
    110 #include <dev/isa/isavar.h>
    111 #endif
    112 
    113 #include <uvm/uvm.h>
    114 
    115 extern	paddr_t avail_end;
    116 
    117 #define	IDTVEC(name)	__CONCAT(X,name)
    118 typedef void (vector)(void);
    119 extern vector *IDTVEC(intr)[];
    120 
    121 #define	BUSDMA_BOUNCESTATS
    122 
    123 #ifdef BUSDMA_BOUNCESTATS
    124 #define	BUSDMA_EVCNT_DECL(name)						\
    125 static struct evcnt bus_dma_ev_##name =					\
    126     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "bus_dma", #name);		\
    127 EVCNT_ATTACH_STATIC(bus_dma_ev_##name)
    128 
    129 #define	STAT_INCR(name)							\
    130     bus_dma_ev_##name.ev_count++
    131 #define	STAT_DECR(name)							\
    132     bus_dma_ev_##name.ev_count--
    133 
    134 BUSDMA_EVCNT_DECL(nbouncebufs);
    135 BUSDMA_EVCNT_DECL(loads);
    136 BUSDMA_EVCNT_DECL(bounces);
    137 #else
    138 #define STAT_INCR(x)
    139 #define STAT_DECR(x)
    140 #endif
    141 
    142 static int	_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
    143 	    bus_size_t, int, bus_dmamap_t *);
    144 static void	_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
    145 static int	_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
    146 	    bus_size_t, struct proc *, int);
    147 static int	_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
    148 	    struct mbuf *, int);
    149 static int	_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
    150 	    struct uio *, int);
    151 static int	_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
    152 	    bus_dma_segment_t *, int, bus_size_t, int);
    153 static void	_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
    154 static void	_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
    155 	    bus_size_t, int);
    156 
    157 static int	_bus_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
    158 	    bus_size_t alignment, bus_size_t boundary,
    159 	    bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags);
    160 static void	_bus_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs,
    161 	    int nsegs);
    162 static int	_bus_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs,
    163 	    int nsegs, size_t size, void **kvap, int flags);
    164 static void	_bus_dmamem_unmap(bus_dma_tag_t tag, void *kva, size_t size);
    165 static paddr_t	_bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs,
    166 	    int nsegs, off_t off, int prot, int flags);
    167 
    168 static int	_bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
    169 	    bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags);
    170 static void	_bus_dmatag_destroy(bus_dma_tag_t tag);
    171 
    172 static int _bus_dma_uiomove(void *, struct uio *, size_t, int);
    173 static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
    174 	    bus_size_t size, int flags);
    175 static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map);
    176 static int _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map,
    177 	    void *buf, bus_size_t buflen, struct vmspace *vm, int flags);
    178 static int _bus_dmamap_load_busaddr(bus_dma_tag_t, bus_dmamap_t,
    179     bus_addr_t, bus_size_t);
    180 
    181 #ifndef _BUS_DMAMEM_ALLOC_RANGE
    182 static int	_bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size,
    183 	    bus_size_t alignment, bus_size_t boundary,
    184 	    bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
    185 	    bus_addr_t low, bus_addr_t high);
    186 
    187 #define _BUS_DMAMEM_ALLOC_RANGE _bus_dmamem_alloc_range
    188 
    189 /*
    190  * Allocate physical memory from the given physical address range.
    191  * Called by DMA-safe memory allocation methods.
    192  */
    193 static int
    194 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
    195     bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
    196     int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high)
    197 {
    198 	paddr_t curaddr, lastaddr;
    199 	struct vm_page *m;
    200 	struct pglist mlist;
    201 	int curseg, error;
    202 	bus_size_t uboundary;
    203 
    204 	/* Always round the size. */
    205 	size = round_page(size);
    206 
    207 	KASSERT(boundary >= PAGE_SIZE || boundary == 0);
    208 
    209 	/*
    210 	 * Allocate pages from the VM system.
    211 	 * We accept boundaries < size, splitting in multiple segments
    212 	 * if needed. uvm_pglistalloc does not, so compute an appropriate
    213          * boundary: next power of 2 >= size
    214          */
    215 
    216 	if (boundary == 0)
    217 		uboundary = 0;
    218 	else {
    219 		uboundary = boundary;
    220 		while (uboundary < size)
    221 			uboundary = uboundary << 1;
    222 	}
    223 	error = uvm_pglistalloc(size, low, high, alignment, uboundary,
    224 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
    225 	if (error)
    226 		return (error);
    227 
    228 	/*
    229 	 * Compute the location, size, and number of segments actually
    230 	 * returned by the VM code.
    231 	 */
    232 	m = TAILQ_FIRST(&mlist);
    233 	curseg = 0;
    234 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
    235 	segs[curseg].ds_len = PAGE_SIZE;
    236 	m = m->pageq.queue.tqe_next;
    237 
    238 	for (; m != NULL; m = m->pageq.queue.tqe_next) {
    239 		curaddr = VM_PAGE_TO_PHYS(m);
    240 #ifdef DIAGNOSTIC
    241 		if (curaddr < low || curaddr >= high) {
    242 			printf("vm_page_alloc_memory returned non-sensical"
    243 			    " address %#" PRIxPADDR "\n", curaddr);
    244 			panic("_bus_dmamem_alloc_range");
    245 		}
    246 #endif
    247 		if (curaddr == (lastaddr + PAGE_SIZE) &&
    248 		    (lastaddr & boundary) == (curaddr & boundary)) {
    249 			segs[curseg].ds_len += PAGE_SIZE;
    250 		} else {
    251 			curseg++;
    252 			KASSERT(curseg < nsegs);
    253 			segs[curseg].ds_addr = curaddr;
    254 			segs[curseg].ds_len = PAGE_SIZE;
    255 		}
    256 		lastaddr = curaddr;
    257 	}
    258 
    259 	*rsegs = curseg + 1;
    260 
    261 	return (0);
    262 }
    263 #endif /* _BUS_DMAMEM_ALLOC_RANGE */
    264 
    265 /*
    266  * Create a DMA map.
    267  */
    268 static int
    269 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
    270     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
    271 {
    272 	struct x86_bus_dma_cookie *cookie;
    273 	bus_dmamap_t map;
    274 	int error, cookieflags;
    275 	void *cookiestore, *mapstore;
    276 	size_t cookiesize, mapsize;
    277 
    278 	/*
    279 	 * Allocate and initialize the DMA map.  The end of the map
    280 	 * is a variable-sized array of segments, so we allocate enough
    281 	 * room for them in one shot.
    282 	 *
    283 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
    284 	 * of ALLOCNOW notifies others that we've reserved these resources,
    285 	 * and they are not to be freed.
    286 	 *
    287 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
    288 	 * the (nsegments - 1).
    289 	 */
    290 	error = 0;
    291 	mapsize = sizeof(struct x86_bus_dmamap) +
    292 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
    293 	if ((mapstore = malloc(mapsize, M_DMAMAP, M_ZERO |
    294 	    ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK))) == NULL)
    295 		return (ENOMEM);
    296 
    297 	map = (struct x86_bus_dmamap *)mapstore;
    298 	map->_dm_size = size;
    299 	map->_dm_segcnt = nsegments;
    300 	map->_dm_maxmaxsegsz = maxsegsz;
    301 	map->_dm_boundary = boundary;
    302 	map->_dm_bounce_thresh = t->_bounce_thresh;
    303 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    304 	map->dm_maxsegsz = maxsegsz;
    305 	map->dm_mapsize = 0;		/* no valid mappings */
    306 	map->dm_nsegs = 0;
    307 
    308 	if (t->_bounce_thresh == 0 || _BUS_AVAIL_END <= t->_bounce_thresh)
    309 		map->_dm_bounce_thresh = 0;
    310 	cookieflags = 0;
    311 
    312 	if (t->_may_bounce != NULL) {
    313 		error = t->_may_bounce(t, map, flags, &cookieflags);
    314 		if (error != 0)
    315 			goto out;
    316 	}
    317 
    318 	if (map->_dm_bounce_thresh != 0)
    319 		cookieflags |= X86_DMA_MIGHT_NEED_BOUNCE;
    320 
    321 	if ((cookieflags & X86_DMA_MIGHT_NEED_BOUNCE) == 0) {
    322 		*dmamp = map;
    323 		return 0;
    324 	}
    325 
    326 	cookiesize = sizeof(struct x86_bus_dma_cookie) +
    327 	    (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
    328 
    329 	/*
    330 	 * Allocate our cookie.
    331 	 */
    332 	if ((cookiestore = malloc(cookiesize, M_DMAMAP, M_ZERO |
    333 	    ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK))) == NULL) {
    334 		error = ENOMEM;
    335 		goto out;
    336 	}
    337 	cookie = (struct x86_bus_dma_cookie *)cookiestore;
    338 	cookie->id_flags = cookieflags;
    339 	map->_dm_cookie = cookie;
    340 
    341 	error = _bus_dma_alloc_bouncebuf(t, map, size, flags);
    342  out:
    343 	if (error)
    344 		_bus_dmamap_destroy(t, map);
    345 	else
    346 		*dmamp = map;
    347 
    348 	return (error);
    349 }
    350 
    351 /*
    352  * Destroy a DMA map.
    353  */
    354 static void
    355 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    356 {
    357 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    358 
    359 	/*
    360 	 * Free any bounce pages this map might hold.
    361 	 */
    362 	if (cookie != NULL) {
    363 		if (cookie->id_flags & X86_DMA_HAS_BOUNCE)
    364 			_bus_dma_free_bouncebuf(t, map);
    365 		free(cookie, M_DMAMAP);
    366 	}
    367 
    368 	free(map, M_DMAMAP);
    369 }
    370 
    371 /*
    372  * Load a DMA map with a linear buffer.
    373  */
    374 static int
    375 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    376     bus_size_t buflen, struct proc *p, int flags)
    377 {
    378 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    379 	int error;
    380 	struct vmspace *vm;
    381 
    382 	STAT_INCR(loads);
    383 
    384 	/*
    385 	 * Make sure that on error condition we return "no valid mappings."
    386 	 */
    387 	map->dm_mapsize = 0;
    388 	map->dm_nsegs = 0;
    389 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    390 
    391 	if (buflen > map->_dm_size)
    392 		return EINVAL;
    393 
    394 	if (p != NULL) {
    395 		vm = p->p_vmspace;
    396 	} else {
    397 		vm = vmspace_kernel();
    398 	}
    399 	error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags);
    400 	if (error == 0) {
    401 		if (cookie != NULL)
    402 			cookie->id_flags &= ~X86_DMA_IS_BOUNCING;
    403 		map->dm_mapsize = buflen;
    404 		return 0;
    405 	}
    406 
    407 	if (cookie == NULL ||
    408 	    (cookie->id_flags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
    409 		return error;
    410 
    411 	/*
    412 	 * First attempt failed; bounce it.
    413 	 */
    414 
    415 	STAT_INCR(bounces);
    416 
    417 	/*
    418 	 * Allocate bounce pages, if necessary.
    419 	 */
    420 	if ((cookie->id_flags & X86_DMA_HAS_BOUNCE) == 0) {
    421 		error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
    422 		if (error)
    423 			return (error);
    424 	}
    425 
    426 	/*
    427 	 * Cache a pointer to the caller's buffer and load the DMA map
    428 	 * with the bounce buffer.
    429 	 */
    430 	cookie->id_origbuf = buf;
    431 	cookie->id_origbuflen = buflen;
    432 	cookie->id_buftype = X86_DMA_BUFTYPE_LINEAR;
    433 	map->dm_nsegs = 0;
    434 	error = bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
    435 	    p, flags);
    436 	if (error)
    437 		return (error);
    438 
    439 	/* ...so _bus_dmamap_sync() knows we're bouncing */
    440 	cookie->id_flags |= X86_DMA_IS_BOUNCING;
    441 	return (0);
    442 }
    443 
    444 static int
    445 _bus_dmamap_load_busaddr(bus_dma_tag_t t, bus_dmamap_t map,
    446     bus_addr_t addr, bus_size_t size)
    447 {
    448 	bus_dma_segment_t * const segs = map->dm_segs;
    449 	int nseg = map->dm_nsegs;
    450 	bus_addr_t bmask = ~(map->_dm_boundary - 1);
    451 	bus_addr_t lastaddr = 0xdead; /* XXX gcc */
    452 	bus_size_t sgsize;
    453 
    454 	if (nseg > 0)
    455 		lastaddr = segs[nseg-1].ds_addr + segs[nseg-1].ds_len;
    456 again:
    457 	sgsize = size;
    458 	/*
    459 	 * Make sure we don't cross any boundaries.
    460 	 */
    461 	if (map->_dm_boundary > 0) {
    462 		bus_addr_t baddr; /* next boundary address */
    463 
    464 		baddr = (addr + map->_dm_boundary) & bmask;
    465 		if (sgsize > (baddr - addr))
    466 			sgsize = (baddr - addr);
    467 	}
    468 
    469 	/*
    470 	 * Insert chunk into a segment, coalescing with
    471 	 * previous segment if possible.
    472 	 */
    473 	if (nseg > 0 && addr == lastaddr &&
    474 	    segs[nseg-1].ds_len + sgsize <= map->dm_maxsegsz &&
    475 	    (map->_dm_boundary == 0 ||
    476 	     (segs[nseg-1].ds_addr & bmask) == (addr & bmask))) {
    477 		/* coalesce */
    478 		segs[nseg-1].ds_len += sgsize;
    479 	} else if (nseg >= map->_dm_segcnt) {
    480 		return EFBIG;
    481 	} else {
    482 		/* new segment */
    483 		segs[nseg].ds_addr = addr;
    484 		segs[nseg].ds_len = sgsize;
    485 		nseg++;
    486 	}
    487 
    488 	lastaddr = addr + sgsize;
    489 	if (map->_dm_bounce_thresh != 0 && lastaddr > map->_dm_bounce_thresh)
    490 		return EINVAL;
    491 
    492 	addr += sgsize;
    493 	size -= sgsize;
    494 	if (size > 0)
    495 		goto again;
    496 
    497 	map->dm_nsegs = nseg;
    498 	return 0;
    499 }
    500 
    501 /*
    502  * Like _bus_dmamap_load(), but for mbufs.
    503  */
    504 static int
    505 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    506     int flags)
    507 {
    508 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    509 	int error;
    510 	struct mbuf *m;
    511 
    512 	/*
    513 	 * Make sure on error condition we return "no valid mappings."
    514 	 */
    515 	map->dm_mapsize = 0;
    516 	map->dm_nsegs = 0;
    517 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    518 
    519 #ifdef DIAGNOSTIC
    520 	if ((m0->m_flags & M_PKTHDR) == 0)
    521 		panic("_bus_dmamap_load_mbuf: no packet header");
    522 #endif
    523 
    524 	if (m0->m_pkthdr.len > map->_dm_size)
    525 		return (EINVAL);
    526 
    527 	error = 0;
    528 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
    529 		int offset;
    530 		int remainbytes;
    531 		const struct vm_page * const *pgs;
    532 		paddr_t paddr;
    533 		int size;
    534 
    535 		if (m->m_len == 0)
    536 			continue;
    537 		switch (m->m_flags & (M_EXT|M_EXT_CLUSTER|M_EXT_PAGES)) {
    538 		case M_EXT|M_EXT_CLUSTER:
    539 			/* XXX KDASSERT */
    540 			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
    541 			paddr = m->m_ext.ext_paddr +
    542 			    (m->m_data - m->m_ext.ext_buf);
    543 			size = m->m_len;
    544 			error = _bus_dmamap_load_busaddr(t, map,
    545 			    _BUS_PHYS_TO_BUS(paddr), size);
    546 			break;
    547 
    548 		case M_EXT|M_EXT_PAGES:
    549 			KASSERT(m->m_ext.ext_buf <= m->m_data);
    550 			KASSERT(m->m_data <=
    551 			    m->m_ext.ext_buf + m->m_ext.ext_size);
    552 
    553 			offset = (vaddr_t)m->m_data -
    554 			    trunc_page((vaddr_t)m->m_ext.ext_buf);
    555 			remainbytes = m->m_len;
    556 
    557 			/* skip uninteresting pages */
    558 			pgs = (const struct vm_page * const *)
    559 			    m->m_ext.ext_pgs + (offset >> PAGE_SHIFT);
    560 
    561 			offset &= PAGE_MASK; /* offset in the first page */
    562 
    563 			/* load each pages */
    564 			while (remainbytes > 0) {
    565 				const struct vm_page *pg;
    566 				bus_addr_t busaddr;
    567 
    568 				size = MIN(remainbytes, PAGE_SIZE - offset);
    569 
    570 				pg = *pgs++;
    571 				KASSERT(pg);
    572 				busaddr = _BUS_VM_PAGE_TO_BUS(pg) + offset;
    573 
    574 				error = _bus_dmamap_load_busaddr(t, map,
    575 				    busaddr, size);
    576 				if (error)
    577 					break;
    578 				offset = 0;
    579 				remainbytes -= size;
    580 			}
    581 			break;
    582 
    583 		case 0:
    584 			paddr = m->m_paddr + M_BUFOFFSET(m) +
    585 			    (m->m_data - M_BUFADDR(m));
    586 			size = m->m_len;
    587 			error = _bus_dmamap_load_busaddr(t, map,
    588 			    _BUS_PHYS_TO_BUS(paddr), size);
    589 			break;
    590 
    591 		default:
    592 			error = _bus_dmamap_load_buffer(t, map, m->m_data,
    593 			    m->m_len, vmspace_kernel(), flags);
    594 		}
    595 	}
    596 	if (error == 0) {
    597 		map->dm_mapsize = m0->m_pkthdr.len;
    598 		return 0;
    599 	}
    600 
    601 	map->dm_nsegs = 0;
    602 
    603 	if (cookie == NULL ||
    604 	    (cookie->id_flags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
    605 		return error;
    606 
    607 	/*
    608 	 * First attempt failed; bounce it.
    609 	 */
    610 
    611 	STAT_INCR(bounces);
    612 
    613 	/*
    614 	 * Allocate bounce pages, if necessary.
    615 	 */
    616 	if ((cookie->id_flags & X86_DMA_HAS_BOUNCE) == 0) {
    617 		error = _bus_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
    618 		    flags);
    619 		if (error)
    620 			return (error);
    621 	}
    622 
    623 	/*
    624 	 * Cache a pointer to the caller's buffer and load the DMA map
    625 	 * with the bounce buffer.
    626 	 */
    627 	cookie->id_origbuf = m0;
    628 	cookie->id_origbuflen = m0->m_pkthdr.len;	/* not really used */
    629 	cookie->id_buftype = X86_DMA_BUFTYPE_MBUF;
    630 	error = bus_dmamap_load(t, map, cookie->id_bouncebuf,
    631 	    m0->m_pkthdr.len, NULL, flags);
    632 	if (error)
    633 		return (error);
    634 
    635 	/* ...so _bus_dmamap_sync() knows we're bouncing */
    636 	cookie->id_flags |= X86_DMA_IS_BOUNCING;
    637 	return (0);
    638 }
    639 
    640 /*
    641  * Like _bus_dmamap_load(), but for uios.
    642  */
    643 static int
    644 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    645     int flags)
    646 {
    647 	int i, error;
    648 	bus_size_t minlen, resid;
    649 	struct vmspace *vm;
    650 	struct iovec *iov;
    651 	void *addr;
    652 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    653 
    654 	/*
    655 	 * Make sure that on error condition we return "no valid mappings."
    656 	 */
    657 	map->dm_mapsize = 0;
    658 	map->dm_nsegs = 0;
    659 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    660 
    661 	resid = uio->uio_resid;
    662 	iov = uio->uio_iov;
    663 
    664 	vm = uio->uio_vmspace;
    665 
    666 	error = 0;
    667 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
    668 		/*
    669 		 * Now at the first iovec to load.  Load each iovec
    670 		 * until we have exhausted the residual count.
    671 		 */
    672 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
    673 		addr = (void *)iov[i].iov_base;
    674 
    675 		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
    676 		    vm, flags);
    677 
    678 		resid -= minlen;
    679 	}
    680 	if (error == 0) {
    681 		map->dm_mapsize = uio->uio_resid;
    682 		return 0;
    683 	}
    684 
    685 	map->dm_nsegs = 0;
    686 
    687 	if (cookie == NULL ||
    688 	    (cookie->id_flags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
    689 		return error;
    690 
    691 	STAT_INCR(bounces);
    692 
    693 	/*
    694 	 * Allocate bounce pages, if necessary.
    695 	 */
    696 	if ((cookie->id_flags & X86_DMA_HAS_BOUNCE) == 0) {
    697 		error = _bus_dma_alloc_bouncebuf(t, map, uio->uio_resid,
    698 		    flags);
    699 		if (error)
    700 			return (error);
    701 	}
    702 
    703 	/*
    704 	 * Cache a pointer to the caller's buffer and load the DMA map
    705 	 * with the bounce buffer.
    706 	 */
    707 	cookie->id_origbuf = uio;
    708 	cookie->id_origbuflen = uio->uio_resid;
    709 	cookie->id_buftype = X86_DMA_BUFTYPE_UIO;
    710 	error = bus_dmamap_load(t, map, cookie->id_bouncebuf,
    711 	    uio->uio_resid, NULL, flags);
    712 	if (error)
    713 		return (error);
    714 
    715 	/* ...so _bus_dmamap_sync() knows we're bouncing */
    716 	cookie->id_flags |= X86_DMA_IS_BOUNCING;
    717 	return (0);
    718 }
    719 
    720 /*
    721  * Like _bus_dmamap_load(), but for raw memory allocated with
    722  * bus_dmamem_alloc().
    723  */
    724 static int
    725 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    726     bus_dma_segment_t *segs, int nsegs, bus_size_t size0, int flags)
    727 {
    728 	bus_size_t size;
    729 	int i, error = 0;
    730 
    731 	/*
    732 	 * Make sure that on error condition we return "no valid mappings."
    733 	 */
    734 	map->dm_mapsize = 0;
    735 	map->dm_nsegs = 0;
    736 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    737 
    738 	if (size0 > map->_dm_size)
    739 		return EINVAL;
    740 
    741 	for (i = 0, size = size0; i < nsegs && size > 0; i++) {
    742 		bus_dma_segment_t *ds = &segs[i];
    743 		bus_size_t sgsize;
    744 
    745 		sgsize = MIN(ds->ds_len, size);
    746 		if (sgsize == 0)
    747 			continue;
    748 		error = _bus_dmamap_load_busaddr(t, map, ds->ds_addr, sgsize);
    749 		if (error != 0)
    750 			break;
    751 		size -= sgsize;
    752 	}
    753 
    754 	if (error != 0) {
    755 		map->dm_mapsize = 0;
    756 		map->dm_nsegs = 0;
    757 		return error;
    758 	}
    759 
    760 	/* XXX TBD bounce */
    761 
    762 	map->dm_mapsize = size0;
    763 	return 0;
    764 }
    765 
    766 /*
    767  * Unload a DMA map.
    768  */
    769 static void
    770 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
    771 {
    772 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    773 
    774 	/*
    775 	 * If we have bounce pages, free them, unless they're
    776 	 * reserved for our exclusive use.
    777 	 */
    778 	if (cookie != NULL) {
    779 		cookie->id_flags &= ~X86_DMA_IS_BOUNCING;
    780 		cookie->id_buftype = X86_DMA_BUFTYPE_INVALID;
    781 	}
    782 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
    783 	map->dm_mapsize = 0;
    784 	map->dm_nsegs = 0;
    785 }
    786 
    787 /*
    788  * Synchronize a DMA map.
    789  */
    790 static void
    791 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    792     bus_size_t len, int ops)
    793 {
    794 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    795 
    796 	/*
    797 	 * Mixing PRE and POST operations is not allowed.
    798 	 */
    799 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
    800 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
    801 		panic("%s: mix PRE and POST", __func__);
    802 
    803 #ifdef DIAGNOSTIC
    804 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
    805 		if (offset >= map->dm_mapsize)
    806 			panic("%s: bad offset 0x%jx >= 0x%jx", __func__,
    807 			(intmax_t)offset, (intmax_t)map->dm_mapsize);
    808 		if ((offset + len) > map->dm_mapsize)
    809 			panic("%s: bad length 0x%jx + 0x%jx > 0x%jx", __func__,
    810 			    (intmax_t)offset, (intmax_t)len,
    811 			    (intmax_t)map->dm_mapsize);
    812 	}
    813 #endif
    814 
    815 	/*
    816 	 * If we're not bouncing, just return; nothing to do.
    817 	 */
    818 	if (len == 0 || cookie == NULL ||
    819 	    (cookie->id_flags & X86_DMA_IS_BOUNCING) == 0)
    820 		goto end;
    821 
    822 	switch (cookie->id_buftype) {
    823 	case X86_DMA_BUFTYPE_LINEAR:
    824 		/*
    825 		 * Nothing to do for pre-read.
    826 		 */
    827 
    828 		if (ops & BUS_DMASYNC_PREWRITE) {
    829 			/*
    830 			 * Copy the caller's buffer to the bounce buffer.
    831 			 */
    832 			memcpy((char *)cookie->id_bouncebuf + offset,
    833 			    (char *)cookie->id_origbuf + offset, len);
    834 		}
    835 
    836 		if (ops & BUS_DMASYNC_POSTREAD) {
    837 			/*
    838 			 * Copy the bounce buffer to the caller's buffer.
    839 			 */
    840 			memcpy((char *)cookie->id_origbuf + offset,
    841 			    (char *)cookie->id_bouncebuf + offset, len);
    842 		}
    843 
    844 		/*
    845 		 * Nothing to do for post-write.
    846 		 */
    847 		break;
    848 
    849 	case X86_DMA_BUFTYPE_MBUF:
    850 	    {
    851 		struct mbuf *m, *m0 = cookie->id_origbuf;
    852 		bus_size_t minlen, moff;
    853 
    854 		/*
    855 		 * Nothing to do for pre-read.
    856 		 */
    857 
    858 		if (ops & BUS_DMASYNC_PREWRITE) {
    859 			/*
    860 			 * Copy the caller's buffer to the bounce buffer.
    861 			 */
    862 			m_copydata(m0, offset, len,
    863 			    (char *)cookie->id_bouncebuf + offset);
    864 		}
    865 
    866 		if (ops & BUS_DMASYNC_POSTREAD) {
    867 			/*
    868 			 * Copy the bounce buffer to the caller's buffer.
    869 			 */
    870 			for (moff = offset, m = m0; m != NULL && len != 0;
    871 			     m = m->m_next) {
    872 				/* Find the beginning mbuf. */
    873 				if (moff >= m->m_len) {
    874 					moff -= m->m_len;
    875 					continue;
    876 				}
    877 
    878 				/*
    879 				 * Now at the first mbuf to sync; nail
    880 				 * each one until we have exhausted the
    881 				 * length.
    882 				 */
    883 				minlen = len < m->m_len - moff ?
    884 				    len : m->m_len - moff;
    885 
    886 				memcpy(mtod(m, char *) + moff,
    887 				    (char *)cookie->id_bouncebuf + offset,
    888 				    minlen);
    889 
    890 				moff = 0;
    891 				len -= minlen;
    892 				offset += minlen;
    893 			}
    894 		}
    895 
    896 		/*
    897 		 * Nothing to do for post-write.
    898 		 */
    899 		break;
    900 	    }
    901 	case X86_DMA_BUFTYPE_UIO:
    902 	    {
    903 		struct uio *uio;
    904 
    905 		uio = (struct uio *)cookie->id_origbuf;
    906 
    907 		/*
    908 		 * Nothing to do for pre-read.
    909 		 */
    910 
    911 		if (ops & BUS_DMASYNC_PREWRITE) {
    912 			/*
    913 			 * Copy the caller's buffer to the bounce buffer.
    914 			 */
    915 			_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
    916 			    uio, len, UIO_WRITE);
    917 		}
    918 
    919 		if (ops & BUS_DMASYNC_POSTREAD) {
    920 			_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
    921 			    uio, len, UIO_READ);
    922 		}
    923 
    924 		/*
    925 		 * Nothing to do for post-write.
    926 		 */
    927 		break;
    928 	    }
    929 
    930 	case X86_DMA_BUFTYPE_RAW:
    931 		panic("%s: X86_DMA_BUFTYPE_RAW", __func__);
    932 		break;
    933 
    934 	case X86_DMA_BUFTYPE_INVALID:
    935 		panic("%s: X86_DMA_BUFTYPE_INVALID", __func__);
    936 		break;
    937 
    938 	default:
    939 		panic("%s: unknown buffer type %d", __func__,
    940 		    cookie->id_buftype);
    941 		break;
    942 	}
    943 end:
    944 	if (ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTWRITE)) {
    945 		/*
    946 		 * from the memory POV a load can be reordered before a store
    947 		 * (a load can fetch data from the write buffers, before
    948 		 * data hits the cache or memory), a mfence avoids it.
    949 		 */
    950 		x86_mfence();
    951 	} else if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTREAD)) {
    952 		/*
    953 		 * all past reads should have completed at before this point,
    954 		 * and future reads should not have started yet.
    955 		 */
    956 		x86_lfence();
    957 	}
    958 }
    959 
    960 /*
    961  * Allocate memory safe for DMA.
    962  */
    963 static int
    964 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    965     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    966     int flags)
    967 {
    968 	bus_addr_t high;
    969 
    970 	if (t->_bounce_alloc_hi != 0 && _BUS_AVAIL_END > t->_bounce_alloc_hi)
    971 		high = trunc_page(t->_bounce_alloc_hi);
    972 	else
    973 		high = trunc_page(_BUS_AVAIL_END);
    974 
    975 	return (_BUS_DMAMEM_ALLOC_RANGE(t, size, alignment, boundary,
    976 	    segs, nsegs, rsegs, flags, t->_bounce_alloc_lo, high));
    977 }
    978 
    979 static int
    980 _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
    981     bus_size_t size, int flags)
    982 {
    983 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
    984 	int error = 0;
    985 
    986 #ifdef DIAGNOSTIC
    987 	if (cookie == NULL)
    988 		panic("_bus_dma_alloc_bouncebuf: no cookie");
    989 #endif
    990 
    991 	cookie->id_bouncebuflen = round_page(size);
    992 	error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,
    993 	    PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
    994 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
    995 	if (error) {
    996 		cookie->id_bouncebuflen = 0;
    997 		cookie->id_nbouncesegs = 0;
    998 		return error;
    999 	}
   1000 
   1001 	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
   1002 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
   1003 	    (void **)&cookie->id_bouncebuf, flags);
   1004 
   1005 	if (error) {
   1006 		_bus_dmamem_free(t, cookie->id_bouncesegs,
   1007 		    cookie->id_nbouncesegs);
   1008 		cookie->id_bouncebuflen = 0;
   1009 		cookie->id_nbouncesegs = 0;
   1010 	} else {
   1011 		cookie->id_flags |= X86_DMA_HAS_BOUNCE;
   1012 		STAT_INCR(nbouncebufs);
   1013 	}
   1014 
   1015 	return (error);
   1016 }
   1017 
   1018 static void
   1019 _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
   1020 {
   1021 	struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
   1022 
   1023 #ifdef DIAGNOSTIC
   1024 	if (cookie == NULL)
   1025 		panic("_bus_dma_free_bouncebuf: no cookie");
   1026 #endif
   1027 
   1028 	STAT_DECR(nbouncebufs);
   1029 
   1030 	_bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen);
   1031 	_bus_dmamem_free(t, cookie->id_bouncesegs,
   1032 	    cookie->id_nbouncesegs);
   1033 	cookie->id_bouncebuflen = 0;
   1034 	cookie->id_nbouncesegs = 0;
   1035 	cookie->id_flags &= ~X86_DMA_HAS_BOUNCE;
   1036 }
   1037 
   1038 
   1039 /*
   1040  * This function does the same as uiomove, but takes an explicit
   1041  * direction, and does not update the uio structure.
   1042  */
   1043 static int
   1044 _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
   1045 {
   1046 	struct iovec *iov;
   1047 	int error;
   1048 	struct vmspace *vm;
   1049 	char *cp;
   1050 	size_t resid, cnt;
   1051 	int i;
   1052 
   1053 	iov = uio->uio_iov;
   1054 	vm = uio->uio_vmspace;
   1055 	cp = buf;
   1056 	resid = n;
   1057 
   1058 	for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) {
   1059 		iov = &uio->uio_iov[i];
   1060 		if (iov->iov_len == 0)
   1061 			continue;
   1062 		cnt = MIN(resid, iov->iov_len);
   1063 
   1064 		if (!VMSPACE_IS_KERNEL_P(vm) &&
   1065 		    (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
   1066 		    != 0) {
   1067 			preempt();
   1068 		}
   1069 		if (direction == UIO_READ) {
   1070 			error = copyout_vmspace(vm, cp, iov->iov_base, cnt);
   1071 		} else {
   1072 			error = copyin_vmspace(vm, iov->iov_base, cp, cnt);
   1073 		}
   1074 		if (error)
   1075 			return (error);
   1076 		cp += cnt;
   1077 		resid -= cnt;
   1078 	}
   1079 	return (0);
   1080 }
   1081 
   1082 /*
   1083  * Common function for freeing DMA-safe memory.  May be called by
   1084  * bus-specific DMA memory free functions.
   1085  */
   1086 static void
   1087 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
   1088 {
   1089 	struct vm_page *m;
   1090 	bus_addr_t addr;
   1091 	struct pglist mlist;
   1092 	int curseg;
   1093 
   1094 	/*
   1095 	 * Build a list of pages to free back to the VM system.
   1096 	 */
   1097 	TAILQ_INIT(&mlist);
   1098 	for (curseg = 0; curseg < nsegs; curseg++) {
   1099 		for (addr = segs[curseg].ds_addr;
   1100 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
   1101 		    addr += PAGE_SIZE) {
   1102 			m = _BUS_BUS_TO_VM_PAGE(addr);
   1103 			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
   1104 		}
   1105 	}
   1106 
   1107 	uvm_pglistfree(&mlist);
   1108 }
   1109 
   1110 /*
   1111  * Common function for mapping DMA-safe memory.  May be called by
   1112  * bus-specific DMA memory map functions.
   1113  * This supports BUS_DMA_NOCACHE.
   1114  */
   1115 static int
   1116 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
   1117     size_t size, void **kvap, int flags)
   1118 {
   1119 	vaddr_t va;
   1120 	bus_addr_t addr;
   1121 	int curseg;
   1122 	const uvm_flag_t kmflags =
   1123 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
   1124 	u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE;
   1125 
   1126 	size = round_page(size);
   1127 	if (flags & BUS_DMA_NOCACHE)
   1128 		pmapflags |= PMAP_NOCACHE;
   1129 
   1130 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
   1131 
   1132 	if (va == 0)
   1133 		return ENOMEM;
   1134 
   1135 	*kvap = (void *)va;
   1136 
   1137 	for (curseg = 0; curseg < nsegs; curseg++) {
   1138 		for (addr = segs[curseg].ds_addr;
   1139 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
   1140 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
   1141 			if (size == 0)
   1142 				panic("_bus_dmamem_map: size botch");
   1143 			_BUS_PMAP_ENTER(pmap_kernel(), va, addr,
   1144 			    VM_PROT_READ | VM_PROT_WRITE,
   1145 			    pmapflags);
   1146 		}
   1147 	}
   1148 	pmap_update(pmap_kernel());
   1149 
   1150 	return 0;
   1151 }
   1152 
   1153 /*
   1154  * Common function for unmapping DMA-safe memory.  May be called by
   1155  * bus-specific DMA memory unmapping functions.
   1156  */
   1157 
   1158 static void
   1159 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
   1160 {
   1161 	pt_entry_t *pte, opte;
   1162 	vaddr_t va, sva, eva;
   1163 
   1164 #ifdef DIAGNOSTIC
   1165 	if ((u_long)kva & PGOFSET)
   1166 		panic("_bus_dmamem_unmap");
   1167 #endif
   1168 
   1169 	size = round_page(size);
   1170 	sva = (vaddr_t)kva;
   1171 	eva = sva + size;
   1172 
   1173 	/*
   1174          * mark pages cacheable again.
   1175          */
   1176 	for (va = sva; va < eva; va += PAGE_SIZE) {
   1177 		pte = kvtopte(va);
   1178 		opte = *pte;
   1179 		if ((opte & PG_N) != 0)
   1180 			pmap_pte_clearbits(pte, PG_N);
   1181 	}
   1182 	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
   1183 	pmap_update(pmap_kernel());
   1184 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
   1185 }
   1186 
   1187 /*
   1188  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
   1189  * bus-specific DMA mmap(2)'ing functions.
   1190  */
   1191 static paddr_t
   1192 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
   1193     off_t off, int prot, int flags)
   1194 {
   1195 	int i;
   1196 
   1197 	for (i = 0; i < nsegs; i++) {
   1198 #ifdef DIAGNOSTIC
   1199 		if (off & PGOFSET)
   1200 			panic("_bus_dmamem_mmap: offset unaligned");
   1201 		if (segs[i].ds_addr & PGOFSET)
   1202 			panic("_bus_dmamem_mmap: segment unaligned");
   1203 		if (segs[i].ds_len & PGOFSET)
   1204 			panic("_bus_dmamem_mmap: segment size not multiple"
   1205 			    " of page size");
   1206 #endif
   1207 		if (off >= segs[i].ds_len) {
   1208 			off -= segs[i].ds_len;
   1209 			continue;
   1210 		}
   1211 
   1212 		return (x86_btop(_BUS_BUS_TO_PHYS(segs[i].ds_addr + off)));
   1213 	}
   1214 
   1215 	/* Page not found. */
   1216 	return (-1);
   1217 }
   1218 
   1219 /**********************************************************************
   1220  * DMA utility functions
   1221  **********************************************************************/
   1222 
   1223 /*
   1224  * Utility function to load a linear buffer.
   1225  */
   1226 static int
   1227 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
   1228     bus_size_t buflen, struct vmspace *vm, int flags)
   1229 {
   1230 	bus_size_t sgsize;
   1231 	bus_addr_t curaddr;
   1232 	vaddr_t vaddr = (vaddr_t)buf;
   1233 	pmap_t pmap;
   1234 
   1235 	if (vm != NULL)
   1236 		pmap = vm_map_pmap(&vm->vm_map);
   1237 	else
   1238 		pmap = pmap_kernel();
   1239 
   1240 	while (buflen > 0) {
   1241 		int error;
   1242 
   1243 		/*
   1244 		 * Get the bus address for this segment.
   1245 		 */
   1246 		curaddr = _BUS_VIRT_TO_BUS(pmap, vaddr);
   1247 
   1248 		/*
   1249 		 * Compute the segment size, and adjust counts.
   1250 		 */
   1251 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
   1252 		if (buflen < sgsize)
   1253 			sgsize = buflen;
   1254 
   1255 		/*
   1256 		 * If we're beyond the bounce threshold, notify
   1257 		 * the caller.
   1258 		 */
   1259 		if (map->_dm_bounce_thresh != 0 &&
   1260 		    curaddr + sgsize >= map->_dm_bounce_thresh)
   1261 			return (EINVAL);
   1262 
   1263 
   1264 		error = _bus_dmamap_load_busaddr(t, map, curaddr, sgsize);
   1265 		if (error)
   1266 			return error;
   1267 
   1268 		vaddr += sgsize;
   1269 		buflen -= sgsize;
   1270 	}
   1271 
   1272 	return (0);
   1273 }
   1274 
   1275 static int
   1276 _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
   1277 		      bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
   1278 {
   1279 
   1280 	if ((tag->_bounce_thresh != 0   && max_addr >= tag->_bounce_thresh) &&
   1281 	    (tag->_bounce_alloc_hi != 0 && max_addr >= tag->_bounce_alloc_hi) &&
   1282 	    (min_addr <= tag->_bounce_alloc_lo)) {
   1283 		*newtag = tag;
   1284 		/* if the tag must be freed, add a reference */
   1285 		if (tag->_tag_needs_free)
   1286 			(tag->_tag_needs_free)++;
   1287 		return 0;
   1288 	}
   1289 
   1290 	if ((*newtag = malloc(sizeof(struct x86_bus_dma_tag), M_DMAMAP,
   1291 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
   1292 		return ENOMEM;
   1293 
   1294 	**newtag = *tag;
   1295 	(*newtag)->_tag_needs_free = 1;
   1296 
   1297 	if (tag->_bounce_thresh == 0 || max_addr < tag->_bounce_thresh)
   1298 		(*newtag)->_bounce_thresh = max_addr;
   1299 	if (tag->_bounce_alloc_hi == 0 || max_addr < tag->_bounce_alloc_hi)
   1300 		(*newtag)->_bounce_alloc_hi = max_addr;
   1301 	if (min_addr > tag->_bounce_alloc_lo)
   1302 		(*newtag)->_bounce_alloc_lo = min_addr;
   1303 
   1304 	return 0;
   1305 }
   1306 
   1307 static void
   1308 _bus_dmatag_destroy(bus_dma_tag_t tag)
   1309 {
   1310 
   1311 	switch (tag->_tag_needs_free) {
   1312 	case 0:
   1313 		break;				/* not allocated with malloc */
   1314 	case 1:
   1315 		free(tag, M_DMAMAP);		/* last reference to tag */
   1316 		break;
   1317 	default:
   1318 		(tag->_tag_needs_free)--;	/* one less reference */
   1319 	}
   1320 }
   1321 
   1322 
   1323 void
   1324 bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t p, bus_addr_t o, bus_size_t l,
   1325 		int ops)
   1326 {
   1327 	bus_dma_tag_t it;
   1328 
   1329 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_SYNC) == 0)
   1330 		;	/* skip override */
   1331 	else for (it = t; it != NULL; it = it->bdt_super) {
   1332 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_SYNC) == 0)
   1333 			continue;
   1334 		(*it->bdt_ov->ov_dmamap_sync)(it->bdt_ctx, t, p, o,
   1335 		    l, ops);
   1336 		return;
   1337 	}
   1338 
   1339 	if (ops & BUS_DMASYNC_POSTREAD)
   1340 		x86_lfence();
   1341 
   1342 	_bus_dmamap_sync(t, p, o, l, ops);
   1343 }
   1344 
   1345 int
   1346 bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
   1347 		  bus_size_t maxsegsz, bus_size_t boundary, int flags,
   1348 		  bus_dmamap_t *dmamp)
   1349 {
   1350 	bus_dma_tag_t it;
   1351 
   1352 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_CREATE) == 0)
   1353 		;	/* skip override */
   1354 	else for (it = t; it != NULL; it = it->bdt_super) {
   1355 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_CREATE) == 0)
   1356 			continue;
   1357 		return (*it->bdt_ov->ov_dmamap_create)(it->bdt_ctx, t, size,
   1358 		    nsegments, maxsegsz, boundary, flags, dmamp);
   1359 	}
   1360 
   1361 	return _bus_dmamap_create(t, size, nsegments, maxsegsz,
   1362 	    boundary, flags, dmamp);
   1363 }
   1364 
   1365 void
   1366 bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t dmam)
   1367 {
   1368 	bus_dma_tag_t it;
   1369 
   1370 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_DESTROY) == 0)
   1371 		;	/* skip override */
   1372 	else for (it = t; it != NULL; it = it->bdt_super) {
   1373 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_DESTROY) == 0)
   1374 			continue;
   1375 		(*it->bdt_ov->ov_dmamap_destroy)(it->bdt_ctx, t, dmam);
   1376 		return;
   1377 	}
   1378 
   1379 	_bus_dmamap_destroy(t, dmam);
   1380 }
   1381 
   1382 int
   1383 bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t dmam, void *buf,
   1384 		bus_size_t buflen, struct proc *p, int flags)
   1385 {
   1386 	bus_dma_tag_t it;
   1387 
   1388 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD) == 0)
   1389 		;	/* skip override */
   1390 	else for (it = t; it != NULL; it = it->bdt_super) {
   1391 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD) == 0)
   1392 			continue;
   1393 		return (*it->bdt_ov->ov_dmamap_load)(it->bdt_ctx, t, dmam,
   1394 		    buf, buflen, p, flags);
   1395 	}
   1396 
   1397 	return _bus_dmamap_load(t, dmam, buf, buflen, p, flags);
   1398 }
   1399 
   1400 int
   1401 bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t dmam,
   1402 		     struct mbuf *chain, int flags)
   1403 {
   1404 	bus_dma_tag_t it;
   1405 
   1406 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_MBUF) == 0)
   1407 		;	/* skip override */
   1408 	else for (it = t; it != NULL; it = it->bdt_super) {
   1409 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD_MBUF) == 0)
   1410 			continue;
   1411 		return (*it->bdt_ov->ov_dmamap_load_mbuf)(it->bdt_ctx, t, dmam,
   1412 		    chain, flags);
   1413 	}
   1414 
   1415 	return _bus_dmamap_load_mbuf(t, dmam, chain, flags);
   1416 }
   1417 
   1418 int
   1419 bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t dmam,
   1420 		    struct uio *uio, int flags)
   1421 {
   1422 	bus_dma_tag_t it;
   1423 
   1424 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_UIO) == 0)
   1425 		;	/* skip override */
   1426 	else for (it = t; it != NULL; it = it->bdt_super) {
   1427 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD_UIO) == 0)
   1428 			continue;
   1429 		return (*it->bdt_ov->ov_dmamap_load_uio)(it->bdt_ctx, t, dmam,
   1430 		    uio, flags);
   1431 	}
   1432 
   1433 	return _bus_dmamap_load_uio(t, dmam, uio, flags);
   1434 }
   1435 
   1436 int
   1437 bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t dmam,
   1438 		    bus_dma_segment_t *segs, int nsegs,
   1439 		    bus_size_t size, int flags)
   1440 {
   1441 	bus_dma_tag_t it;
   1442 
   1443 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_RAW) == 0)
   1444 		;	/* skip override */
   1445 	else for (it = t; it != NULL; it = it->bdt_super) {
   1446 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD_RAW) == 0)
   1447 			continue;
   1448 		return (*it->bdt_ov->ov_dmamap_load_raw)(it->bdt_ctx, t, dmam,
   1449 		    segs, nsegs, size, flags);
   1450 	}
   1451 
   1452 	return _bus_dmamap_load_raw(t, dmam, segs, nsegs, size, flags);
   1453 }
   1454 
   1455 void
   1456 bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t dmam)
   1457 {
   1458 	bus_dma_tag_t it;
   1459 
   1460 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_UNLOAD) == 0)
   1461 		;	/* skip override */
   1462 	else for (it = t; it != NULL; it = it->bdt_super) {
   1463 		if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_UNLOAD) == 0)
   1464 			continue;
   1465 		(*it->bdt_ov->ov_dmamap_unload)(it->bdt_ctx, t, dmam);
   1466 		return;
   1467 	}
   1468 
   1469 	_bus_dmamap_unload(t, dmam);
   1470 }
   1471 
   1472 int
   1473 bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
   1474 		 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs,
   1475 		 int *rsegs, int flags)
   1476 {
   1477 	bus_dma_tag_t it;
   1478 
   1479 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_ALLOC) == 0)
   1480 		;	/* skip override */
   1481 	else for (it = t; it != NULL; it = it->bdt_super) {
   1482 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_ALLOC) == 0)
   1483 			continue;
   1484 		return (*it->bdt_ov->ov_dmamem_alloc)(it->bdt_ctx, t, size,
   1485 		    alignment, boundary, segs, nsegs, rsegs, flags);
   1486 	}
   1487 
   1488 	return _bus_dmamem_alloc(t, size, alignment, boundary, segs,
   1489 	    nsegs, rsegs, flags);
   1490 }
   1491 
   1492 void
   1493 bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
   1494 {
   1495 	bus_dma_tag_t it;
   1496 
   1497 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_FREE) == 0)
   1498 		;	/* skip override */
   1499 	else for (it = t; it != NULL; it = it->bdt_super) {
   1500 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_FREE) == 0)
   1501 			continue;
   1502 		(*it->bdt_ov->ov_dmamem_free)(it->bdt_ctx, t, segs, nsegs);
   1503 		return;
   1504 	}
   1505 
   1506 	_bus_dmamem_free(t, segs, nsegs);
   1507 }
   1508 
   1509 int
   1510 bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
   1511 	       size_t size, void **kvap, int flags)
   1512 {
   1513 	bus_dma_tag_t it;
   1514 
   1515 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_MAP) == 0)
   1516 		;	/* skip override */
   1517 	else for (it = t; it != NULL; it = it->bdt_super) {
   1518 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_MAP) == 0)
   1519 			continue;
   1520 		return (*it->bdt_ov->ov_dmamem_map)(it->bdt_ctx, t,
   1521 		    segs, nsegs, size, kvap, flags);
   1522 	}
   1523 
   1524 	return _bus_dmamem_map(t, segs, nsegs, size, kvap, flags);
   1525 }
   1526 
   1527 void
   1528 bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
   1529 {
   1530 	bus_dma_tag_t it;
   1531 
   1532 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_UNMAP) == 0)
   1533 		;	/* skip override */
   1534 	else for (it = t; it != NULL; it = it->bdt_super) {
   1535 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_UNMAP) == 0)
   1536 			continue;
   1537 		(*it->bdt_ov->ov_dmamem_unmap)(it->bdt_ctx, t, kva, size);
   1538 		return;
   1539 	}
   1540 
   1541 	_bus_dmamem_unmap(t, kva, size);
   1542 }
   1543 
   1544 paddr_t
   1545 bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
   1546 		off_t off, int prot, int flags)
   1547 {
   1548 	bus_dma_tag_t it;
   1549 
   1550 	if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_MMAP) == 0)
   1551 		;	/* skip override */
   1552 	else for (it = t; it != NULL; it = it->bdt_super) {
   1553 		if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_MMAP) == 0)
   1554 			continue;
   1555 		return (*it->bdt_ov->ov_dmamem_mmap)(it->bdt_ctx, t, segs,
   1556 		    nsegs, off, prot, flags);
   1557 	}
   1558 
   1559 	return _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags);
   1560 }
   1561 
   1562 int
   1563 bus_dmatag_subregion(bus_dma_tag_t t, bus_addr_t min_addr,
   1564 		     bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
   1565 {
   1566 	bus_dma_tag_t it;
   1567 
   1568 	if ((t->bdt_exists & BUS_DMATAG_OVERRIDE_SUBREGION) == 0)
   1569 		;	/* skip override */
   1570 	else for (it = t; it != NULL; it = it->bdt_super) {
   1571 		if ((it->bdt_present & BUS_DMATAG_OVERRIDE_SUBREGION) == 0)
   1572 			continue;
   1573 		return (*it->bdt_ov->ov_dmatag_subregion)(it->bdt_ctx, t,
   1574 		    min_addr, max_addr, newtag, flags);
   1575 	}
   1576 
   1577 	return _bus_dmatag_subregion(t, min_addr, max_addr, newtag, flags);
   1578 }
   1579 
   1580 void
   1581 bus_dmatag_destroy(bus_dma_tag_t t)
   1582 {
   1583 	bus_dma_tag_t it;
   1584 
   1585 	if ((t->bdt_exists & BUS_DMATAG_OVERRIDE_DESTROY) == 0)
   1586 		;	/* skip override */
   1587 	else for (it = t; it != NULL; it = it->bdt_super) {
   1588 		if ((it->bdt_present & BUS_DMATAG_OVERRIDE_DESTROY) == 0)
   1589 			continue;
   1590 		(*it->bdt_ov->ov_dmatag_destroy)(it->bdt_ctx, t);
   1591 		return;
   1592 	}
   1593 
   1594 	_bus_dmatag_destroy(t);
   1595 }
   1596 
   1597 static const void *
   1598 bit_to_function_pointer(const struct bus_dma_overrides *ov, uint64_t bit)
   1599 {
   1600 	switch (bit) {
   1601 	case BUS_DMAMAP_OVERRIDE_CREATE:
   1602 		return ov->ov_dmamap_create;
   1603 	case BUS_DMAMAP_OVERRIDE_DESTROY:
   1604 		return ov->ov_dmamap_destroy;
   1605 	case BUS_DMAMAP_OVERRIDE_LOAD:
   1606 		return ov->ov_dmamap_load;
   1607 	case BUS_DMAMAP_OVERRIDE_LOAD_MBUF:
   1608 		return ov->ov_dmamap_load_mbuf;
   1609 	case BUS_DMAMAP_OVERRIDE_LOAD_UIO:
   1610 		return ov->ov_dmamap_load_uio;
   1611 	case BUS_DMAMAP_OVERRIDE_LOAD_RAW:
   1612 		return ov->ov_dmamap_load_raw;
   1613 	case BUS_DMAMAP_OVERRIDE_UNLOAD:
   1614 		return ov->ov_dmamap_unload;
   1615 	case BUS_DMAMAP_OVERRIDE_SYNC:
   1616 		return ov->ov_dmamap_sync;
   1617 	case BUS_DMAMEM_OVERRIDE_ALLOC:
   1618 		return ov->ov_dmamem_alloc;
   1619 	case BUS_DMAMEM_OVERRIDE_FREE:
   1620 		return ov->ov_dmamem_free;
   1621 	case BUS_DMAMEM_OVERRIDE_MAP:
   1622 		return ov->ov_dmamem_map;
   1623 	case BUS_DMAMEM_OVERRIDE_UNMAP:
   1624 		return ov->ov_dmamem_unmap;
   1625 	case BUS_DMAMEM_OVERRIDE_MMAP:
   1626 		return ov->ov_dmamem_mmap;
   1627 	case BUS_DMATAG_OVERRIDE_SUBREGION:
   1628 		return ov->ov_dmatag_subregion;
   1629 	case BUS_DMATAG_OVERRIDE_DESTROY:
   1630 		return ov->ov_dmatag_destroy;
   1631 	default:
   1632 		return NULL;
   1633 	}
   1634 }
   1635 
   1636 void
   1637 bus_dma_tag_destroy(bus_dma_tag_t bdt)
   1638 {
   1639 	if (bdt->bdt_super != NULL)
   1640 		bus_dmatag_destroy(bdt->bdt_super);
   1641 	kmem_free(bdt, sizeof(struct x86_bus_dma_tag));
   1642 }
   1643 
   1644 int
   1645 bus_dma_tag_create(bus_dma_tag_t obdt, const uint64_t present,
   1646     const struct bus_dma_overrides *ov, void *ctx, bus_dma_tag_t *bdtp)
   1647 {
   1648 	uint64_t bit, bits, nbits;
   1649 	bus_dma_tag_t bdt;
   1650 	const void *fp;
   1651 
   1652 	if (ov == NULL || present == 0)
   1653 		return EINVAL;
   1654 
   1655 	bdt = kmem_alloc(sizeof(struct x86_bus_dma_tag), KM_SLEEP);
   1656 	*bdt = *obdt;
   1657 	/* don't let bus_dmatag_destroy free these */
   1658 	bdt->_tag_needs_free = 0;
   1659 
   1660 	bdt->bdt_super = obdt;
   1661 
   1662 	for (bits = present; bits != 0; bits = nbits) {
   1663 		nbits = bits & (bits - 1);
   1664 		bit = nbits ^ bits;
   1665 		if ((fp = bit_to_function_pointer(ov, bit)) == NULL) {
   1666 #ifdef DEBUG
   1667 			printf("%s: missing bit %" PRIx64 "\n", __func__, bit);
   1668 #endif
   1669 			goto einval;
   1670 		}
   1671 	}
   1672 
   1673 	bdt->bdt_ov = ov;
   1674 	bdt->bdt_exists = obdt->bdt_exists | present;
   1675 	bdt->bdt_present = present;
   1676 	bdt->bdt_ctx = ctx;
   1677 
   1678 	*bdtp = bdt;
   1679 	if (obdt->_tag_needs_free)
   1680 		obdt->_tag_needs_free++;
   1681 
   1682 	return 0;
   1683 einval:
   1684 	kmem_free(bdt, sizeof(struct x86_bus_dma_tag));
   1685 	return EINVAL;
   1686 }
   1687