Home | History | Annotate | Line # | Download | only in arm32
bus_dma.c revision 1.80
      1 /*	$NetBSD: bus_dma.c,v 1.80 2013/02/18 16:03:25 matt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #define _ARM32_BUS_DMA_PRIVATE
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.80 2013/02/18 16:03:25 matt Exp $");
     37 
     38 #include <sys/param.h>
     39 #include <sys/systm.h>
     40 #include <sys/kernel.h>
     41 #include <sys/proc.h>
     42 #include <sys/buf.h>
     43 #include <sys/reboot.h>
     44 #include <sys/conf.h>
     45 #include <sys/file.h>
     46 #include <sys/malloc.h>
     47 #include <sys/mbuf.h>
     48 #include <sys/vnode.h>
     49 #include <sys/device.h>
     50 
     51 #include <uvm/uvm.h>
     52 
     53 #include <sys/bus.h>
     54 #include <machine/cpu.h>
     55 
     56 #include <arm/cpufunc.h>
     57 
     58 #ifdef BUSDMA_COUNTERS
     59 static struct evcnt bus_dma_creates =
     60 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "creates");
     61 static struct evcnt bus_dma_bounced_creates =
     62 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced creates");
     63 static struct evcnt bus_dma_loads =
     64 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "loads");
     65 static struct evcnt bus_dma_bounced_loads =
     66 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced loads");
     67 static struct evcnt bus_dma_read_bounces =
     68 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "read bounces");
     69 static struct evcnt bus_dma_write_bounces =
     70 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "write bounces");
     71 static struct evcnt bus_dma_bounced_unloads =
     72 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced unloads");
     73 static struct evcnt bus_dma_unloads =
     74 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "unloads");
     75 static struct evcnt bus_dma_bounced_destroys =
     76 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced destroys");
     77 static struct evcnt bus_dma_destroys =
     78 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "destroys");
     79 static struct evcnt bus_dma_sync_prereadwrite =
     80 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prereadwrite");
     81 static struct evcnt bus_dma_sync_preread_begin =
     82 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread begin");
     83 static struct evcnt bus_dma_sync_preread =
     84 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread");
     85 static struct evcnt bus_dma_sync_preread_tail =
     86 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread tail");
     87 static struct evcnt bus_dma_sync_prewrite =
     88 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prewrite");
     89 static struct evcnt bus_dma_sync_postread =
     90 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postread");
     91 static struct evcnt bus_dma_sync_postreadwrite =
     92 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postreadwrite");
     93 static struct evcnt bus_dma_sync_postwrite =
     94 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postwrite");
     95 
     96 EVCNT_ATTACH_STATIC(bus_dma_creates);
     97 EVCNT_ATTACH_STATIC(bus_dma_bounced_creates);
     98 EVCNT_ATTACH_STATIC(bus_dma_loads);
     99 EVCNT_ATTACH_STATIC(bus_dma_bounced_loads);
    100 EVCNT_ATTACH_STATIC(bus_dma_read_bounces);
    101 EVCNT_ATTACH_STATIC(bus_dma_write_bounces);
    102 EVCNT_ATTACH_STATIC(bus_dma_unloads);
    103 EVCNT_ATTACH_STATIC(bus_dma_bounced_unloads);
    104 EVCNT_ATTACH_STATIC(bus_dma_destroys);
    105 EVCNT_ATTACH_STATIC(bus_dma_bounced_destroys);
    106 EVCNT_ATTACH_STATIC(bus_dma_sync_prereadwrite);
    107 EVCNT_ATTACH_STATIC(bus_dma_sync_preread_begin);
    108 EVCNT_ATTACH_STATIC(bus_dma_sync_preread);
    109 EVCNT_ATTACH_STATIC(bus_dma_sync_preread_tail);
    110 EVCNT_ATTACH_STATIC(bus_dma_sync_prewrite);
    111 EVCNT_ATTACH_STATIC(bus_dma_sync_postread);
    112 EVCNT_ATTACH_STATIC(bus_dma_sync_postreadwrite);
    113 EVCNT_ATTACH_STATIC(bus_dma_sync_postwrite);
    114 
    115 #define	STAT_INCR(x)	(bus_dma_ ## x.ev_count++)
    116 #else
    117 #define	STAT_INCR(x)	/*(bus_dma_ ## x.ev_count++)*/
    118 #endif
    119 
    120 int	_bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
    121 	    bus_size_t, struct vmspace *, int);
    122 static struct arm32_dma_range *
    123 	_bus_dma_paddr_inrange(struct arm32_dma_range *, int, paddr_t);
    124 
    125 /*
    126  * Check to see if the specified page is in an allowed DMA range.
    127  */
    128 inline struct arm32_dma_range *
    129 _bus_dma_paddr_inrange(struct arm32_dma_range *ranges, int nranges,
    130     bus_addr_t curaddr)
    131 {
    132 	struct arm32_dma_range *dr;
    133 	int i;
    134 
    135 	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
    136 		if (curaddr >= dr->dr_sysbase &&
    137 		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
    138 			return (dr);
    139 	}
    140 
    141 	return (NULL);
    142 }
    143 
    144 /*
    145  * Check to see if the specified busaddr is in an allowed DMA range.
    146  */
    147 static inline paddr_t
    148 _bus_dma_busaddr_to_paddr(bus_dma_tag_t t, bus_addr_t curaddr)
    149 {
    150 	struct arm32_dma_range *dr;
    151 	u_int i;
    152 
    153 	if (t->_nranges == 0)
    154 		return curaddr;
    155 
    156 	for (i = 0, dr = t->_ranges; i < t->_nranges; i++, dr++) {
    157 		if (dr->dr_busbase <= curaddr
    158 		    && round_page(curaddr) <= dr->dr_busbase + dr->dr_len)
    159 			return curaddr - dr->dr_busbase + dr->dr_sysbase;
    160 	}
    161 	panic("%s: curaddr %#lx not in range", __func__, curaddr);
    162 }
    163 
    164 /*
    165  * Common function to load the specified physical address into the
    166  * DMA map, coalescing segments and boundary checking as necessary.
    167  */
    168 static int
    169 _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map,
    170     bus_addr_t paddr, bus_size_t size, bool coherent)
    171 {
    172 	bus_dma_segment_t * const segs = map->dm_segs;
    173 	int nseg = map->dm_nsegs;
    174 	bus_addr_t lastaddr;
    175 	bus_addr_t bmask = ~(map->_dm_boundary - 1);
    176 	bus_addr_t curaddr;
    177 	bus_size_t sgsize;
    178 	uint32_t _ds_flags = coherent ? _BUS_DMAMAP_COHERENT : 0;
    179 
    180 	if (nseg > 0)
    181 		lastaddr = segs[nseg-1].ds_addr + segs[nseg-1].ds_len;
    182 	else
    183 		lastaddr = 0xdead;
    184 
    185  again:
    186 	sgsize = size;
    187 
    188 	/* Make sure we're in an allowed DMA range. */
    189 	if (t->_ranges != NULL) {
    190 		/* XXX cache last result? */
    191 		const struct arm32_dma_range * const dr =
    192 		    _bus_dma_paddr_inrange(t->_ranges, t->_nranges, paddr);
    193 		if (dr == NULL)
    194 			return (EINVAL);
    195 
    196 		/*
    197 		 * If this region is coherent, mark the segment as coherent.
    198 		 */
    199 		_ds_flags |= dr->dr_flags & _BUS_DMAMAP_COHERENT;
    200 
    201 		/*
    202 		 * In a valid DMA range.  Translate the physical
    203 		 * memory address to an address in the DMA window.
    204 		 */
    205 		curaddr = (paddr - dr->dr_sysbase) + dr->dr_busbase;
    206 #if 0
    207 		printf("%p: %#lx: range %#lx/%#lx/%#lx/%#x: %#x <-- %#lx\n",
    208 		    t, paddr, dr->dr_sysbase, dr->dr_busbase,
    209 		    dr->dr_len, dr->dr_flags, _ds_flags, curaddr);
    210 #endif
    211 	} else
    212 		curaddr = paddr;
    213 
    214 	/*
    215 	 * Make sure we don't cross any boundaries.
    216 	 */
    217 	if (map->_dm_boundary > 0) {
    218 		bus_addr_t baddr;	/* next boundary address */
    219 
    220 		baddr = (curaddr + map->_dm_boundary) & bmask;
    221 		if (sgsize > (baddr - curaddr))
    222 			sgsize = (baddr - curaddr);
    223 	}
    224 
    225 	/*
    226 	 * Insert chunk into a segment, coalescing with the
    227 	 * previous segment if possible.
    228 	 */
    229 	if (nseg > 0 && curaddr == lastaddr &&
    230 	    segs[nseg-1].ds_len + sgsize <= map->dm_maxsegsz &&
    231 	    ((segs[nseg-1]._ds_flags ^ _ds_flags) & _BUS_DMAMAP_COHERENT) == 0 &&
    232 	    (map->_dm_boundary == 0 ||
    233 	     (segs[nseg-1].ds_addr & bmask) == (curaddr & bmask))) {
    234 	     	/* coalesce */
    235 		segs[nseg-1].ds_len += sgsize;
    236 	} else if (nseg >= map->_dm_segcnt) {
    237 		return (EFBIG);
    238 	} else {
    239 		/* new segment */
    240 		segs[nseg].ds_addr = curaddr;
    241 		segs[nseg].ds_len = sgsize;
    242 		segs[nseg]._ds_flags = _ds_flags;
    243 		nseg++;
    244 	}
    245 
    246 	lastaddr = curaddr + sgsize;
    247 
    248 	paddr += sgsize;
    249 	size -= sgsize;
    250 	if (size > 0)
    251 		goto again;
    252 
    253 	map->_dm_flags &= (_ds_flags & _BUS_DMAMAP_COHERENT);
    254 	map->dm_nsegs = nseg;
    255 	return (0);
    256 }
    257 
    258 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
    259 static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
    260 	    bus_size_t size, int flags);
    261 static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map);
    262 static int _bus_dma_uiomove(void *buf, struct uio *uio, size_t n,
    263 	    int direction);
    264 
    265 static int
    266 _bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    267 	size_t buflen, int buftype, int flags)
    268 {
    269 	struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
    270 	struct vmspace * const vm = vmspace_kernel();
    271 	int error;
    272 
    273 	KASSERT(cookie != NULL);
    274 	KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE);
    275 
    276 	/*
    277 	 * Allocate bounce pages, if necessary.
    278 	 */
    279 	if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) {
    280 		error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
    281 		if (error)
    282 			return (error);
    283 	}
    284 
    285 	/*
    286 	 * Cache a pointer to the caller's buffer and load the DMA map
    287 	 * with the bounce buffer.
    288 	 */
    289 	cookie->id_origbuf = buf;
    290 	cookie->id_origbuflen = buflen;
    291 	error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf,
    292 	    buflen, vm, flags);
    293 	if (error)
    294 		return (error);
    295 
    296 	STAT_INCR(bounced_loads);
    297 	map->dm_mapsize = buflen;
    298 	map->_dm_vmspace = vm;
    299 	map->_dm_buftype = buftype;
    300 
    301 	/* ...so _bus_dmamap_sync() knows we're bouncing */
    302 	map->_dm_flags |= _BUS_DMAMAP_IS_BOUNCING;
    303 	cookie->id_flags |= _BUS_DMA_IS_BOUNCING;
    304 	return 0;
    305 }
    306 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
    307 
    308 /*
    309  * Common function for DMA map creation.  May be called by bus-specific
    310  * DMA map creation functions.
    311  */
    312 int
    313 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
    314     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
    315 {
    316 	struct arm32_bus_dmamap *map;
    317 	void *mapstore;
    318 	size_t mapsize;
    319 
    320 #ifdef DEBUG_DMA
    321 	printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
    322 	    t, size, nsegments, maxsegsz, boundary, flags);
    323 #endif	/* DEBUG_DMA */
    324 
    325 	/*
    326 	 * Allocate and initialize the DMA map.  The end of the map
    327 	 * is a variable-sized array of segments, so we allocate enough
    328 	 * room for them in one shot.
    329 	 *
    330 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
    331 	 * of ALLOCNOW notifies others that we've reserved these resources,
    332 	 * and they are not to be freed.
    333 	 *
    334 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
    335 	 * the (nsegments - 1).
    336 	 */
    337 	mapsize = sizeof(struct arm32_bus_dmamap) +
    338 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
    339 	const int mallocflags = M_ZERO|(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK;
    340 	if ((mapstore = malloc(mapsize, M_DMAMAP, mallocflags)) == NULL)
    341 		return (ENOMEM);
    342 
    343 	map = (struct arm32_bus_dmamap *)mapstore;
    344 	map->_dm_size = size;
    345 	map->_dm_segcnt = nsegments;
    346 	map->_dm_maxmaxsegsz = maxsegsz;
    347 	map->_dm_boundary = boundary;
    348 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    349 	map->_dm_origbuf = NULL;
    350 	map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
    351 	map->_dm_vmspace = vmspace_kernel();
    352 	map->_dm_cookie = NULL;
    353 	map->dm_maxsegsz = maxsegsz;
    354 	map->dm_mapsize = 0;		/* no valid mappings */
    355 	map->dm_nsegs = 0;
    356 
    357 	*dmamp = map;
    358 
    359 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
    360 	struct arm32_bus_dma_cookie *cookie;
    361 	int cookieflags;
    362 	void *cookiestore;
    363 	size_t cookiesize;
    364 	int error;
    365 
    366 	cookieflags = 0;
    367 
    368 	if (t->_may_bounce != NULL) {
    369 		error = (*t->_may_bounce)(t, map, flags, &cookieflags);
    370 		if (error != 0)
    371 			goto out;
    372 	}
    373 
    374 	if (t->_ranges != NULL)
    375 		cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE;
    376 
    377 	if ((cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) {
    378 		STAT_INCR(creates);
    379 		return 0;
    380 	}
    381 
    382 	cookiesize = sizeof(struct arm32_bus_dma_cookie) +
    383 	    (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
    384 
    385 	/*
    386 	 * Allocate our cookie.
    387 	 */
    388 	if ((cookiestore = malloc(cookiesize, M_DMAMAP, mallocflags)) == NULL) {
    389 		error = ENOMEM;
    390 		goto out;
    391 	}
    392 	cookie = (struct arm32_bus_dma_cookie *)cookiestore;
    393 	cookie->id_flags = cookieflags;
    394 	map->_dm_cookie = cookie;
    395 	STAT_INCR(bounced_creates);
    396 
    397 	error = _bus_dma_alloc_bouncebuf(t, map, size, flags);
    398  out:
    399 	if (error)
    400 		_bus_dmamap_destroy(t, map);
    401 #else
    402 	STAT_INCR(creates);
    403 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
    404 
    405 #ifdef DEBUG_DMA
    406 	printf("dmamap_create:map=%p\n", map);
    407 #endif	/* DEBUG_DMA */
    408 	return (0);
    409 }
    410 
    411 /*
    412  * Common function for DMA map destruction.  May be called by bus-specific
    413  * DMA map destruction functions.
    414  */
    415 void
    416 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    417 {
    418 
    419 #ifdef DEBUG_DMA
    420 	printf("dmamap_destroy: t=%p map=%p\n", t, map);
    421 #endif	/* DEBUG_DMA */
    422 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
    423 	struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
    424 
    425 	/*
    426 	 * Free any bounce pages this map might hold.
    427 	 */
    428 	if (cookie != NULL) {
    429 		if (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
    430 			STAT_INCR(bounced_unloads);
    431 		map->dm_nsegs = 0;
    432 		if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE)
    433 			_bus_dma_free_bouncebuf(t, map);
    434 		STAT_INCR(bounced_destroys);
    435 		free(cookie, M_DMAMAP);
    436 	} else
    437 #endif
    438 	STAT_INCR(destroys);
    439 
    440 	if (map->dm_nsegs > 0)
    441 		STAT_INCR(unloads);
    442 
    443 	free(map, M_DMAMAP);
    444 }
    445 
    446 /*
    447  * Common function for loading a DMA map with a linear buffer.  May
    448  * be called by bus-specific DMA map load functions.
    449  */
    450 int
    451 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    452     bus_size_t buflen, struct proc *p, int flags)
    453 {
    454 	struct vmspace *vm;
    455 	int error;
    456 
    457 #ifdef DEBUG_DMA
    458 	printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
    459 	    t, map, buf, buflen, p, flags);
    460 #endif	/* DEBUG_DMA */
    461 
    462 	if (map->dm_nsegs > 0) {
    463 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
    464 		struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
    465 		if (cookie != NULL) {
    466 			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
    467 				STAT_INCR(bounced_unloads);
    468 				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
    469 				map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING;
    470 			}
    471 		} else
    472 #endif
    473 		STAT_INCR(unloads);
    474 	}
    475 
    476 	/*
    477 	 * Make sure that on error condition we return "no valid mappings".
    478 	 */
    479 	map->dm_mapsize = 0;
    480 	map->dm_nsegs = 0;
    481 	map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
    482 	KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
    483 	    "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
    484 	    map->dm_maxsegsz, map->_dm_maxmaxsegsz);
    485 
    486 	if (buflen > map->_dm_size)
    487 		return (EINVAL);
    488 
    489 	if (p != NULL) {
    490 		vm = p->p_vmspace;
    491 	} else {
    492 		vm = vmspace_kernel();
    493 	}
    494 
    495 	/* _bus_dmamap_load_buffer() clears this if we're not... */
    496 	map->_dm_flags |= _BUS_DMAMAP_COHERENT;
    497 
    498 	error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags);
    499 	if (error == 0) {
    500 		map->dm_mapsize = buflen;
    501 		map->_dm_vmspace = vm;
    502 		map->_dm_origbuf = buf;
    503 		map->_dm_buftype = _BUS_DMA_BUFTYPE_LINEAR;
    504 		return 0;
    505 	}
    506 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
    507 	struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
    508 	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
    509 		error = _bus_dma_load_bouncebuf(t, map, buf, buflen,
    510 		    _BUS_DMA_BUFTYPE_LINEAR, flags);
    511 	}
    512 #endif
    513 	return (error);
    514 }
    515 
    516 /*
    517  * Like _bus_dmamap_load(), but for mbufs.
    518  */
    519 int
    520 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    521     int flags)
    522 {
    523 	int error;
    524 	struct mbuf *m;
    525 
    526 #ifdef DEBUG_DMA
    527 	printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
    528 	    t, map, m0, flags);
    529 #endif	/* DEBUG_DMA */
    530 
    531 	if (map->dm_nsegs > 0) {
    532 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
    533 		struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
    534 		if (cookie != NULL) {
    535 			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
    536 				STAT_INCR(bounced_unloads);
    537 				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
    538 				map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING;
    539 			}
    540 		} else
    541 #endif
    542 		STAT_INCR(unloads);
    543 	}
    544 
    545 	/*
    546 	 * Make sure that on error condition we return "no valid mappings."
    547 	 */
    548 	map->dm_mapsize = 0;
    549 	map->dm_nsegs = 0;
    550 	map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
    551 	KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
    552 	    "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
    553 	    map->dm_maxsegsz, map->_dm_maxmaxsegsz);
    554 
    555 	KASSERT(m0->m_flags & M_PKTHDR);
    556 
    557 	if (m0->m_pkthdr.len > map->_dm_size)
    558 		return (EINVAL);
    559 
    560 	/* _bus_dmamap_load_paddr() clears this if we're not... */
    561 	map->_dm_flags |= _BUS_DMAMAP_COHERENT;
    562 
    563 	error = 0;
    564 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
    565 		int offset;
    566 		int remainbytes;
    567 		const struct vm_page * const *pgs;
    568 		paddr_t paddr;
    569 		int size;
    570 
    571 		if (m->m_len == 0)
    572 			continue;
    573 		/*
    574 		 * Don't allow reads in read-only mbufs.
    575 		 */
    576 		if (M_ROMAP(m) && (flags & BUS_DMA_READ)) {
    577 			error = EFAULT;
    578 			break;
    579 		}
    580 		switch (m->m_flags & (M_EXT|M_CLUSTER|M_EXT_PAGES)) {
    581 		case M_EXT|M_CLUSTER:
    582 			/* XXX KDASSERT */
    583 			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
    584 			paddr = m->m_ext.ext_paddr +
    585 			    (m->m_data - m->m_ext.ext_buf);
    586 			size = m->m_len;
    587 			error = _bus_dmamap_load_paddr(t, map, paddr, size,
    588 			    false);
    589 			break;
    590 
    591 		case M_EXT|M_EXT_PAGES:
    592 			KASSERT(m->m_ext.ext_buf <= m->m_data);
    593 			KASSERT(m->m_data <=
    594 			    m->m_ext.ext_buf + m->m_ext.ext_size);
    595 
    596 			offset = (vaddr_t)m->m_data -
    597 			    trunc_page((vaddr_t)m->m_ext.ext_buf);
    598 			remainbytes = m->m_len;
    599 
    600 			/* skip uninteresting pages */
    601 			pgs = (const struct vm_page * const *)
    602 			    m->m_ext.ext_pgs + (offset >> PAGE_SHIFT);
    603 
    604 			offset &= PAGE_MASK;	/* offset in the first page */
    605 
    606 			/* load each page */
    607 			while (remainbytes > 0) {
    608 				const struct vm_page *pg;
    609 
    610 				size = MIN(remainbytes, PAGE_SIZE - offset);
    611 
    612 				pg = *pgs++;
    613 				KASSERT(pg);
    614 				paddr = VM_PAGE_TO_PHYS(pg) + offset;
    615 
    616 				error = _bus_dmamap_load_paddr(t, map,
    617 				    paddr, size, false);
    618 				if (error)
    619 					break;
    620 				offset = 0;
    621 				remainbytes -= size;
    622 			}
    623 			break;
    624 
    625 		case 0:
    626 			paddr = m->m_paddr + M_BUFOFFSET(m) +
    627 			    (m->m_data - M_BUFADDR(m));
    628 			size = m->m_len;
    629 			error = _bus_dmamap_load_paddr(t, map, paddr, size,
    630 			    false);
    631 			break;
    632 
    633 		default:
    634 			error = _bus_dmamap_load_buffer(t, map, m->m_data,
    635 			    m->m_len, vmspace_kernel(), flags);
    636 		}
    637 	}
    638 	if (error == 0) {
    639 		map->dm_mapsize = m0->m_pkthdr.len;
    640 		map->_dm_origbuf = m0;
    641 		map->_dm_buftype = _BUS_DMA_BUFTYPE_MBUF;
    642 		map->_dm_vmspace = vmspace_kernel();	/* always kernel */
    643 		return 0;
    644 	}
    645 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
    646 	struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
    647 	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
    648 		error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len,
    649 		    _BUS_DMA_BUFTYPE_MBUF, flags);
    650 	}
    651 #endif
    652 	return (error);
    653 }
    654 
    655 /*
    656  * Like _bus_dmamap_load(), but for uios.
    657  */
    658 int
    659 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    660     int flags)
    661 {
    662 	int i, error;
    663 	bus_size_t minlen, resid;
    664 	struct iovec *iov;
    665 	void *addr;
    666 
    667 	/*
    668 	 * Make sure that on error condition we return "no valid mappings."
    669 	 */
    670 	map->dm_mapsize = 0;
    671 	map->dm_nsegs = 0;
    672 	KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
    673 	    "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
    674 	    map->dm_maxsegsz, map->_dm_maxmaxsegsz);
    675 
    676 	resid = uio->uio_resid;
    677 	iov = uio->uio_iov;
    678 
    679 	/* _bus_dmamap_load_buffer() clears this if we're not... */
    680 	map->_dm_flags |= _BUS_DMAMAP_COHERENT;
    681 
    682 	error = 0;
    683 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
    684 		/*
    685 		 * Now at the first iovec to load.  Load each iovec
    686 		 * until we have exhausted the residual count.
    687 		 */
    688 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
    689 		addr = (void *)iov[i].iov_base;
    690 
    691 		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
    692 		    uio->uio_vmspace, flags);
    693 
    694 		resid -= minlen;
    695 	}
    696 	if (error == 0) {
    697 		map->dm_mapsize = uio->uio_resid;
    698 		map->_dm_origbuf = uio;
    699 		map->_dm_buftype = _BUS_DMA_BUFTYPE_UIO;
    700 		map->_dm_vmspace = uio->uio_vmspace;
    701 	}
    702 	return (error);
    703 }
    704 
    705 /*
    706  * Like _bus_dmamap_load(), but for raw memory allocated with
    707  * bus_dmamem_alloc().
    708  */
    709 int
    710 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    711     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
    712 {
    713 
    714 	panic("_bus_dmamap_load_raw: not implemented");
    715 }
    716 
    717 /*
    718  * Common function for unloading a DMA map.  May be called by
    719  * bus-specific DMA map unload functions.
    720  */
    721 void
    722 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
    723 {
    724 
    725 #ifdef DEBUG_DMA
    726 	printf("dmamap_unload: t=%p map=%p\n", t, map);
    727 #endif	/* DEBUG_DMA */
    728 
    729 	/*
    730 	 * No resources to free; just mark the mappings as
    731 	 * invalid.
    732 	 */
    733 	map->dm_mapsize = 0;
    734 	map->dm_nsegs = 0;
    735 	map->_dm_origbuf = NULL;
    736 	map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
    737 	map->_dm_vmspace = NULL;
    738 }
    739 
    740 static void
    741 _bus_dmamap_sync_segment(vaddr_t va, paddr_t pa, vsize_t len, int ops, bool readonly_p)
    742 {
    743 	KASSERT((va & PAGE_MASK) == (pa & PAGE_MASK));
    744 #if 0
    745 	printf("sync_segment: va=%#lx pa=%#lx len=%#lx ops=%#x ro=%d\n",
    746 	    va, pa, len, ops, readonly_p);
    747 #endif
    748 
    749 	switch (ops) {
    750 	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
    751 		if (!readonly_p) {
    752 			STAT_INCR(sync_prereadwrite);
    753 			cpu_dcache_wbinv_range(va, len);
    754 			cpu_sdcache_wbinv_range(va, pa, len);
    755 			break;
    756 		}
    757 		/* FALLTHROUGH */
    758 
    759 	case BUS_DMASYNC_PREREAD: {
    760 		const size_t line_size = arm_dcache_align;
    761 		const size_t line_mask = arm_dcache_align_mask;
    762 		vsize_t misalignment = va & line_mask;
    763 		if (misalignment) {
    764 			va -= misalignment;
    765 			pa -= misalignment;
    766 			len += misalignment;
    767 			STAT_INCR(sync_preread_begin);
    768 			cpu_dcache_wbinv_range(va, line_size);
    769 			cpu_sdcache_wbinv_range(va, pa, line_size);
    770 			if (len <= line_size)
    771 				break;
    772 			va += line_size;
    773 			pa += line_size;
    774 			len -= line_size;
    775 		}
    776 		misalignment = len & line_mask;
    777 		len -= misalignment;
    778 		if (len > 0) {
    779 			STAT_INCR(sync_preread);
    780 			cpu_dcache_inv_range(va, len);
    781 			cpu_sdcache_inv_range(va, pa, len);
    782 		}
    783 		if (misalignment) {
    784 			va += len;
    785 			pa += len;
    786 			STAT_INCR(sync_preread_tail);
    787 			cpu_dcache_wbinv_range(va, line_size);
    788 			cpu_sdcache_wbinv_range(va, pa, line_size);
    789 		}
    790 		break;
    791 	}
    792 
    793 	case BUS_DMASYNC_PREWRITE:
    794 		STAT_INCR(sync_prewrite);
    795 		cpu_dcache_wb_range(va, len);
    796 		cpu_sdcache_wb_range(va, pa, len);
    797 		break;
    798 
    799 #ifdef CPU_CORTEX
    800 	/*
    801 	 * Cortex CPUs can do speculative loads so we need to clean the cache
    802 	 * after a DMA read to deal with any speculatively loaded cache lines.
    803 	 * Since these can't be dirty, we can just invalidate them and don't
    804 	 * have to worry about having to write back their contents.
    805 	 */
    806 	case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
    807 		STAT_INCR(sync_postreadwrite);
    808 		cpu_dcache_inv_range(va, len);
    809 		cpu_sdcache_inv_range(va, pa, len);
    810 		break;
    811 	case BUS_DMASYNC_POSTREAD:
    812 		STAT_INCR(sync_postread);
    813 		cpu_dcache_inv_range(va, len);
    814 		cpu_sdcache_inv_range(va, pa, len);
    815 		break;
    816 #endif
    817 	}
    818 }
    819 
    820 static inline void
    821 _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    822     bus_size_t len, int ops)
    823 {
    824 	bus_dma_segment_t *ds = map->dm_segs;
    825 	vaddr_t va = (vaddr_t) map->_dm_origbuf;
    826 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
    827 	if (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING) {
    828 		struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
    829 		va = (vaddr_t) cookie->id_bouncebuf;
    830 	}
    831 #endif
    832 
    833 	while (len > 0) {
    834 		while (offset >= ds->ds_len) {
    835 			offset -= ds->ds_len;
    836 			va += ds->ds_len;
    837 			ds++;
    838 		}
    839 
    840 		paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + offset);
    841 		size_t seglen = min(len, ds->ds_len - offset);
    842 
    843 		if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0)
    844 			_bus_dmamap_sync_segment(va + offset, pa, seglen, ops,
    845 			    false);
    846 
    847 		offset += seglen;
    848 		len -= seglen;
    849 	}
    850 }
    851 
    852 static inline void
    853 _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t offset,
    854     bus_size_t len, int ops)
    855 {
    856 	bus_dma_segment_t *ds = map->dm_segs;
    857 	struct mbuf *m = map->_dm_origbuf;
    858 	bus_size_t voff = offset;
    859 	bus_size_t ds_off = offset;
    860 
    861 	while (len > 0) {
    862 		/* Find the current dma segment */
    863 		while (ds_off >= ds->ds_len) {
    864 			ds_off -= ds->ds_len;
    865 			ds++;
    866 		}
    867 		/* Find the current mbuf. */
    868 		while (voff >= m->m_len) {
    869 			voff -= m->m_len;
    870 			m = m->m_next;
    871 		}
    872 
    873 		/*
    874 		 * Now at the first mbuf to sync; nail each one until
    875 		 * we have exhausted the length.
    876 		 */
    877 		vsize_t seglen = min(len, min(m->m_len - voff, ds->ds_len - ds_off));
    878 		vaddr_t va = mtod(m, vaddr_t) + voff;
    879 		paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off);
    880 
    881 		/*
    882 		 * We can save a lot of work here if we know the mapping
    883 		 * is read-only at the MMU:
    884 		 *
    885 		 * If a mapping is read-only, no dirty cache blocks will
    886 		 * exist for it.  If a writable mapping was made read-only,
    887 		 * we know any dirty cache lines for the range will have
    888 		 * been cleaned for us already.  Therefore, if the upper
    889 		 * layer can tell us we have a read-only mapping, we can
    890 		 * skip all cache cleaning.
    891 		 *
    892 		 * NOTE: This only works if we know the pmap cleans pages
    893 		 * before making a read-write -> read-only transition.  If
    894 		 * this ever becomes non-true (e.g. Physically Indexed
    895 		 * cache), this will have to be revisited.
    896 		 */
    897 
    898 		if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0)
    899 			_bus_dmamap_sync_segment(va, pa, seglen, ops,
    900 			    M_ROMAP(m));
    901 		voff += seglen;
    902 		ds_off += seglen;
    903 		len -= seglen;
    904 	}
    905 }
    906 
    907 static inline void
    908 _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    909     bus_size_t len, int ops)
    910 {
    911 	bus_dma_segment_t *ds = map->dm_segs;
    912 	struct uio *uio = map->_dm_origbuf;
    913 	struct iovec *iov = uio->uio_iov;
    914 	bus_size_t voff = offset;
    915 	bus_size_t ds_off = offset;
    916 
    917 	while (len > 0) {
    918 		/* Find the current dma segment */
    919 		while (ds_off >= ds->ds_len) {
    920 			ds_off -= ds->ds_len;
    921 			ds++;
    922 		}
    923 
    924 		/* Find the current iovec. */
    925 		while (voff >= iov->iov_len) {
    926 			voff -= iov->iov_len;
    927 			iov++;
    928 		}
    929 
    930 		/*
    931 		 * Now at the first iovec to sync; nail each one until
    932 		 * we have exhausted the length.
    933 		 */
    934 		vsize_t seglen = min(len, min(iov->iov_len - voff, ds->ds_len - ds_off));
    935 		vaddr_t va = (vaddr_t) iov->iov_base + voff;
    936 		paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off);
    937 
    938 		if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0)
    939 			_bus_dmamap_sync_segment(va, pa, seglen, ops, false);
    940 
    941 		voff += seglen;
    942 		ds_off += seglen;
    943 		len -= seglen;
    944 	}
    945 }
    946 
    947 /*
    948  * Common function for DMA map synchronization.  May be called
    949  * by bus-specific DMA map synchronization functions.
    950  *
    951  * This version works for the Virtually Indexed Virtually Tagged
    952  * cache found on 32-bit ARM processors.
    953  *
    954  * XXX Should have separate versions for write-through vs.
    955  * XXX write-back caches.  We currently assume write-back
    956  * XXX here, which is not as efficient as it could be for
    957  * XXX the write-through case.
    958  */
    959 void
    960 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    961     bus_size_t len, int ops)
    962 {
    963 #ifdef DEBUG_DMA
    964 	printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
    965 	    t, map, offset, len, ops);
    966 #endif	/* DEBUG_DMA */
    967 
    968 	/*
    969 	 * Mixing of PRE and POST operations is not allowed.
    970 	 */
    971 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
    972 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
    973 		panic("_bus_dmamap_sync: mix PRE and POST");
    974 
    975 	KASSERTMSG(offset < map->dm_mapsize,
    976 	    "offset %lu mapsize %lu",
    977 	    offset, map->dm_mapsize);
    978 	KASSERTMSG(len > 0 && offset + len <= map->dm_mapsize,
    979 	    "len %lu offset %lu mapsize %lu",
    980 	    len, offset, map->dm_mapsize);
    981 
    982 	/*
    983 	 * For a virtually-indexed write-back cache, we need
    984 	 * to do the following things:
    985 	 *
    986 	 *	PREREAD -- Invalidate the D-cache.  We do this
    987 	 *	here in case a write-back is required by the back-end.
    988 	 *
    989 	 *	PREWRITE -- Write-back the D-cache.  Note that if
    990 	 *	we are doing a PREREAD|PREWRITE, we can collapse
    991 	 *	the whole thing into a single Wb-Inv.
    992 	 *
    993 	 *	POSTREAD -- Re-invalidate the D-cache in case speculative
    994 	 *	memory accesses caused cachelines to become valid with now
    995 	 *	invalid data.
    996 	 *
    997 	 *	POSTWRITE -- Nothing.
    998 	 */
    999 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
   1000 	const bool bouncing = (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING);
   1001 #else
   1002 	const bool bouncing = false;
   1003 #endif
   1004 
   1005 	const int pre_ops = ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1006 #ifdef CPU_CORTEX
   1007 	const int post_ops = ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1008 #else
   1009 	const int post_ops = 0;
   1010 #endif
   1011 	if (!bouncing && pre_ops == 0 && post_ops == BUS_DMASYNC_POSTWRITE) {
   1012 		STAT_INCR(sync_postwrite);
   1013 		return;
   1014 	}
   1015 	KASSERTMSG(bouncing || pre_ops != 0 || (post_ops & BUS_DMASYNC_POSTREAD),
   1016 	    "pre_ops %#x post_ops %#x", pre_ops, post_ops);
   1017 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
   1018 	if (bouncing && (ops & BUS_DMASYNC_PREWRITE)) {
   1019 		struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
   1020 		STAT_INCR(write_bounces);
   1021 		char * const dataptr = (char *)cookie->id_bouncebuf + offset;
   1022 		/*
   1023 		 * Copy the caller's buffer to the bounce buffer.
   1024 		 */
   1025 		switch (map->_dm_buftype) {
   1026 		case _BUS_DMA_BUFTYPE_LINEAR:
   1027 			memcpy(dataptr, cookie->id_origlinearbuf + offset, len);
   1028 			break;
   1029 		case _BUS_DMA_BUFTYPE_MBUF:
   1030 			m_copydata(cookie->id_origmbuf, offset, len, dataptr);
   1031 			break;
   1032 		case _BUS_DMA_BUFTYPE_UIO:
   1033 			_bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_WRITE);
   1034 			break;
   1035 #ifdef DIAGNOSTIC
   1036 		case _BUS_DMA_BUFTYPE_RAW:
   1037 			panic("_bus_dmamap_sync(pre): _BUS_DMA_BUFTYPE_RAW");
   1038 			break;
   1039 
   1040 		case _BUS_DMA_BUFTYPE_INVALID:
   1041 			panic("_bus_dmamap_sync(pre): _BUS_DMA_BUFTYPE_INVALID");
   1042 			break;
   1043 
   1044 		default:
   1045 			panic("_bus_dmamap_sync(pre): map %p: unknown buffer type %d\n",
   1046 			    map, map->_dm_buftype);
   1047 			break;
   1048 #endif /* DIAGNOSTIC */
   1049 		}
   1050 	}
   1051 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
   1052 
   1053 	/* Skip cache frobbing if mapping was COHERENT. */
   1054 	if (!bouncing && (map->_dm_flags & _BUS_DMAMAP_COHERENT)) {
   1055 		/* Drain the write buffer. */
   1056 		if (pre_ops & BUS_DMASYNC_PREWRITE)
   1057 			cpu_drain_writebuf();
   1058 		return;
   1059 	}
   1060 
   1061 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
   1062 	if (bouncing && ((map->_dm_flags & _BUS_DMAMAP_COHERENT) || pre_ops == 0)) {
   1063 		goto bounce_it;
   1064 	}
   1065 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
   1066 
   1067 #ifndef ARM_MMU_EXTENDED
   1068 	/*
   1069 	 * If the mapping belongs to a non-kernel vmspace, and the
   1070 	 * vmspace has not been active since the last time a full
   1071 	 * cache flush was performed, we don't need to do anything.
   1072 	 */
   1073 	if (__predict_false(!VMSPACE_IS_KERNEL_P(map->_dm_vmspace) &&
   1074 	    vm_map_pmap(&map->_dm_vmspace->vm_map)->pm_cstate.cs_cache_d == 0))
   1075 		return;
   1076 #endif
   1077 
   1078 	int buftype = map->_dm_buftype;
   1079 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
   1080 	if (bouncing) {
   1081 		buftype = _BUS_DMA_BUFTYPE_LINEAR;
   1082 	}
   1083 #endif
   1084 
   1085 	switch (buftype) {
   1086 	case _BUS_DMA_BUFTYPE_LINEAR:
   1087 		_bus_dmamap_sync_linear(t, map, offset, len, ops);
   1088 		break;
   1089 
   1090 	case _BUS_DMA_BUFTYPE_MBUF:
   1091 		_bus_dmamap_sync_mbuf(t, map, offset, len, ops);
   1092 		break;
   1093 
   1094 	case _BUS_DMA_BUFTYPE_UIO:
   1095 		_bus_dmamap_sync_uio(t, map, offset, len, ops);
   1096 		break;
   1097 
   1098 	case _BUS_DMA_BUFTYPE_RAW:
   1099 		panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
   1100 		break;
   1101 
   1102 	case _BUS_DMA_BUFTYPE_INVALID:
   1103 		panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
   1104 		break;
   1105 
   1106 	default:
   1107 		panic("_bus_dmamap_sync: map %p: unknown buffer type %d\n",
   1108 		    map, map->_dm_buftype);
   1109 	}
   1110 
   1111 	/* Drain the write buffer. */
   1112 	cpu_drain_writebuf();
   1113 
   1114 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
   1115   bounce_it:
   1116 	if (!bouncing || (ops & BUS_DMASYNC_POSTREAD) == 0)
   1117 		return;
   1118 
   1119 	struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
   1120 	char * const dataptr = (char *)cookie->id_bouncebuf + offset;
   1121 	STAT_INCR(read_bounces);
   1122 	/*
   1123 	 * Copy the bounce buffer to the caller's buffer.
   1124 	 */
   1125 	switch (map->_dm_buftype) {
   1126 	case _BUS_DMA_BUFTYPE_LINEAR:
   1127 		memcpy(cookie->id_origlinearbuf + offset, dataptr, len);
   1128 		break;
   1129 
   1130 	case _BUS_DMA_BUFTYPE_MBUF:
   1131 		m_copyback(cookie->id_origmbuf, offset, len, dataptr);
   1132 		break;
   1133 
   1134 	case _BUS_DMA_BUFTYPE_UIO:
   1135 		_bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_READ);
   1136 		break;
   1137 #ifdef DIAGNOSTIC
   1138 	case _BUS_DMA_BUFTYPE_RAW:
   1139 		panic("_bus_dmamap_sync(post): _BUS_DMA_BUFTYPE_RAW");
   1140 		break;
   1141 
   1142 	case _BUS_DMA_BUFTYPE_INVALID:
   1143 		panic("_bus_dmamap_sync(post): _BUS_DMA_BUFTYPE_INVALID");
   1144 		break;
   1145 
   1146 	default:
   1147 		panic("_bus_dmamap_sync(post): map %p: unknown buffer type %d\n",
   1148 		    map, map->_dm_buftype);
   1149 		break;
   1150 #endif
   1151 	}
   1152 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
   1153 }
   1154 
   1155 /*
   1156  * Common function for DMA-safe memory allocation.  May be called
   1157  * by bus-specific DMA memory allocation functions.
   1158  */
   1159 
   1160 extern paddr_t physical_start;
   1161 extern paddr_t physical_end;
   1162 
   1163 int
   1164 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
   1165     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
   1166     int flags)
   1167 {
   1168 	struct arm32_dma_range *dr;
   1169 	int error, i;
   1170 
   1171 #ifdef DEBUG_DMA
   1172 	printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx "
   1173 	    "segs=%p nsegs=%x rsegs=%p flags=%x\n", t, size, alignment,
   1174 	    boundary, segs, nsegs, rsegs, flags);
   1175 #endif
   1176 
   1177 	if ((dr = t->_ranges) != NULL) {
   1178 		error = ENOMEM;
   1179 		for (i = 0; i < t->_nranges; i++, dr++) {
   1180 			if (dr->dr_len == 0
   1181 			    || (dr->dr_flags & _BUS_DMAMAP_NOALLOC))
   1182 				continue;
   1183 			error = _bus_dmamem_alloc_range(t, size, alignment,
   1184 			    boundary, segs, nsegs, rsegs, flags,
   1185 			    trunc_page(dr->dr_sysbase),
   1186 			    trunc_page(dr->dr_sysbase + dr->dr_len));
   1187 			if (error == 0)
   1188 				break;
   1189 		}
   1190 	} else {
   1191 		error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
   1192 		    segs, nsegs, rsegs, flags, trunc_page(physical_start),
   1193 		    trunc_page(physical_end));
   1194 	}
   1195 
   1196 #ifdef DEBUG_DMA
   1197 	printf("dmamem_alloc: =%d\n", error);
   1198 #endif
   1199 
   1200 	return(error);
   1201 }
   1202 
   1203 /*
   1204  * Common function for freeing DMA-safe memory.  May be called by
   1205  * bus-specific DMA memory free functions.
   1206  */
   1207 void
   1208 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
   1209 {
   1210 	struct vm_page *m;
   1211 	bus_addr_t addr;
   1212 	struct pglist mlist;
   1213 	int curseg;
   1214 
   1215 #ifdef DEBUG_DMA
   1216 	printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
   1217 #endif	/* DEBUG_DMA */
   1218 
   1219 	/*
   1220 	 * Build a list of pages to free back to the VM system.
   1221 	 */
   1222 	TAILQ_INIT(&mlist);
   1223 	for (curseg = 0; curseg < nsegs; curseg++) {
   1224 		for (addr = segs[curseg].ds_addr;
   1225 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
   1226 		    addr += PAGE_SIZE) {
   1227 			m = PHYS_TO_VM_PAGE(addr);
   1228 			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
   1229 		}
   1230 	}
   1231 	uvm_pglistfree(&mlist);
   1232 }
   1233 
   1234 /*
   1235  * Common function for mapping DMA-safe memory.  May be called by
   1236  * bus-specific DMA memory map functions.
   1237  */
   1238 int
   1239 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
   1240     size_t size, void **kvap, int flags)
   1241 {
   1242 	vaddr_t va;
   1243 	paddr_t pa;
   1244 	int curseg;
   1245 	pt_entry_t *ptep;
   1246 	const uvm_flag_t kmflags = UVM_KMF_VAONLY
   1247 	    | ((flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0);
   1248 	vsize_t align = 0;
   1249 
   1250 #ifdef DEBUG_DMA
   1251 	printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
   1252 	    segs, nsegs, (unsigned long)size, flags);
   1253 #endif	/* DEBUG_DMA */
   1254 
   1255 #ifdef PMAP_MAP_POOLPAGE
   1256 	/*
   1257 	 * If all of memory is mapped, and we are mapping a single physically
   1258 	 * contiguous area then this area is already mapped.  Let's see if we
   1259 	 * avoid having a separate mapping for it.
   1260 	 */
   1261 	if (nsegs == 1) {
   1262 		/*
   1263 		 * If this is a non-COHERENT mapping, then the existing kernel
   1264 		 * mapping is already compatible with it.
   1265 		 */
   1266 		bool direct_mapable = (flags & BUS_DMA_COHERENT) == 0;
   1267 		pa = segs[0].ds_addr;
   1268 
   1269 		/*
   1270 		 * This is a COHERENT mapping which, unless this address is in
   1271 		 * a COHERENT dma range, will not be compatible.
   1272 		 */
   1273 		if (t->_ranges != NULL) {
   1274 			const struct arm32_dma_range * const dr =
   1275 			    _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa);
   1276 			if (dr != NULL
   1277 			    && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) {
   1278 				direct_mapable = true;
   1279 			}
   1280 		}
   1281 
   1282 		if (direct_mapable) {
   1283 			*kvap = (void *)PMAP_MAP_POOLPAGE(pa);
   1284 #ifdef DEBUG_DMA
   1285 			printf("dmamem_map: =%p\n", *kvap);
   1286 #endif	/* DEBUG_DMA */
   1287 			return 0;
   1288 		}
   1289 	}
   1290 #endif
   1291 
   1292 	size = round_page(size);
   1293 	if (__predict_true(size > L2_L_SIZE)) {
   1294 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
   1295 		if (size >= L1_SS_SIZE)
   1296 			align = L1_SS_SIZE;
   1297 		else
   1298 #endif
   1299 		if (size >= L1_S_SIZE)
   1300 			align = L1_S_SIZE;
   1301 		else
   1302 			align = L2_S_SIZE;
   1303 	}
   1304 
   1305 	va = uvm_km_alloc(kernel_map, size, align, kmflags);
   1306 	if (__predict_false(va == 0 && align > 0)) {
   1307 		align = 0;
   1308 		va = uvm_km_alloc(kernel_map, size, 0, kmflags);
   1309 	}
   1310 
   1311 	if (va == 0)
   1312 		return (ENOMEM);
   1313 
   1314 	*kvap = (void *)va;
   1315 
   1316 	for (curseg = 0; curseg < nsegs; curseg++) {
   1317 		for (pa = segs[curseg].ds_addr;
   1318 		    pa < (segs[curseg].ds_addr + segs[curseg].ds_len);
   1319 		    pa += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
   1320 			bool uncached = (flags & BUS_DMA_COHERENT);
   1321 #ifdef DEBUG_DMA
   1322 			printf("wiring p%lx to v%lx", pa, va);
   1323 #endif	/* DEBUG_DMA */
   1324 			if (size == 0)
   1325 				panic("_bus_dmamem_map: size botch");
   1326 
   1327 			const struct arm32_dma_range * const dr =
   1328 			    _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa);
   1329 			/*
   1330 			 * If this dma region is coherent then there is
   1331 			 * no need for an uncached mapping.
   1332 			 */
   1333 			if (dr != NULL
   1334 			    && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) {
   1335 				uncached = false;
   1336 			}
   1337 
   1338 			pmap_kenter_pa(va, pa,
   1339 			    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
   1340 
   1341 			/*
   1342 			 * If the memory must remain coherent with the
   1343 			 * cache then we must make the memory uncacheable
   1344 			 * in order to maintain virtual cache coherency.
   1345 			 * We must also guarantee the cache does not already
   1346 			 * contain the virtal addresses we are making
   1347 			 * uncacheable.
   1348 			 */
   1349 			if (uncached) {
   1350 				cpu_dcache_wbinv_range(va, PAGE_SIZE);
   1351 				cpu_sdcache_wbinv_range(va, pa, PAGE_SIZE);
   1352 				cpu_drain_writebuf();
   1353 				ptep = vtopte(va);
   1354 				*ptep &= ~L2_S_CACHE_MASK;
   1355 				PTE_SYNC(ptep);
   1356 				tlb_flush();
   1357 			}
   1358 #ifdef DEBUG_DMA
   1359 			ptep = vtopte(va);
   1360 			printf(" pte=v%p *pte=%x\n", ptep, *ptep);
   1361 #endif	/* DEBUG_DMA */
   1362 		}
   1363 	}
   1364 	pmap_update(pmap_kernel());
   1365 #ifdef DEBUG_DMA
   1366 	printf("dmamem_map: =%p\n", *kvap);
   1367 #endif	/* DEBUG_DMA */
   1368 	return (0);
   1369 }
   1370 
   1371 /*
   1372  * Common function for unmapping DMA-safe memory.  May be called by
   1373  * bus-specific DMA memory unmapping functions.
   1374  */
   1375 void
   1376 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
   1377 {
   1378 
   1379 #ifdef DEBUG_DMA
   1380 	printf("dmamem_unmap: t=%p kva=%p size=%zx\n", t, kva, size);
   1381 #endif	/* DEBUG_DMA */
   1382 	KASSERTMSG(((uintptr_t)kva & PAGE_MASK) == 0,
   1383 	    "kva %p (%#"PRIxPTR")", kva, (uintptr_t)kva & PAGE_MASK);
   1384 
   1385 	size = round_page(size);
   1386 	pmap_kremove((vaddr_t)kva, size);
   1387 	pmap_update(pmap_kernel());
   1388 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
   1389 }
   1390 
   1391 /*
   1392  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
   1393  * bus-specific DMA mmap(2)'ing functions.
   1394  */
   1395 paddr_t
   1396 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
   1397     off_t off, int prot, int flags)
   1398 {
   1399 	paddr_t map_flags;
   1400 	int i;
   1401 
   1402 	for (i = 0; i < nsegs; i++) {
   1403 		KASSERTMSG((off & PAGE_MASK) == 0,
   1404 		    "off %#qx (%#x)", off, (int)off & PAGE_MASK);
   1405 		KASSERTMSG((segs[i].ds_addr & PAGE_MASK) == 0,
   1406 		    "ds_addr %#lx (%#x)", segs[i].ds_addr,
   1407 		    (int)segs[i].ds_addr & PAGE_MASK);
   1408 		KASSERTMSG((segs[i].ds_len & PAGE_MASK) == 0,
   1409 		    "ds_len %#lx (%#x)", segs[i].ds_addr,
   1410 		    (int)segs[i].ds_addr & PAGE_MASK);
   1411 		if (off >= segs[i].ds_len) {
   1412 			off -= segs[i].ds_len;
   1413 			continue;
   1414 		}
   1415 
   1416 		map_flags = 0;
   1417 		if (flags & BUS_DMA_PREFETCHABLE)
   1418 			map_flags |= ARM32_MMAP_WRITECOMBINE;
   1419 
   1420 		return (arm_btop((u_long)segs[i].ds_addr + off) | map_flags);
   1421 
   1422 	}
   1423 
   1424 	/* Page not found. */
   1425 	return (-1);
   1426 }
   1427 
   1428 /**********************************************************************
   1429  * DMA utility functions
   1430  **********************************************************************/
   1431 
   1432 /*
   1433  * Utility function to load a linear buffer.  lastaddrp holds state
   1434  * between invocations (for multiple-buffer loads).  segp contains
   1435  * the starting segment on entrace, and the ending segment on exit.
   1436  * first indicates if this is the first invocation of this function.
   1437  */
   1438 int
   1439 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
   1440     bus_size_t buflen, struct vmspace *vm, int flags)
   1441 {
   1442 	bus_size_t sgsize;
   1443 	bus_addr_t curaddr;
   1444 	vaddr_t vaddr = (vaddr_t)buf;
   1445 	int error;
   1446 	pmap_t pmap;
   1447 
   1448 #ifdef DEBUG_DMA
   1449 	printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d)\n",
   1450 	    buf, buflen, flags);
   1451 #endif	/* DEBUG_DMA */
   1452 
   1453 	pmap = vm_map_pmap(&vm->vm_map);
   1454 
   1455 	while (buflen > 0) {
   1456 		/*
   1457 		 * Get the physical address for this segment.
   1458 		 *
   1459 		 * XXX Doesn't support checking for coherent mappings
   1460 		 * XXX in user address space.
   1461 		 */
   1462 		bool coherent;
   1463 		if (__predict_true(pmap == pmap_kernel())) {
   1464 			pd_entry_t *pde;
   1465 			pt_entry_t *ptep;
   1466 			(void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
   1467 			if (__predict_false(pmap_pde_section(pde))) {
   1468 				paddr_t s_frame = L1_S_FRAME;
   1469 				paddr_t s_offset = L1_S_OFFSET;
   1470 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
   1471 				if (__predict_false(pmap_pde_supersection(pde))) {
   1472 					s_frame = L1_SS_FRAME;
   1473 					s_offset = L1_SS_OFFSET;
   1474 				}
   1475 #endif
   1476 				curaddr = (*pde & s_frame) | (vaddr & s_offset);
   1477 				coherent = (*pde & L1_S_CACHE_MASK) == 0;
   1478 			} else {
   1479 				pt_entry_t pte = *ptep;
   1480 				KDASSERTMSG((pte & L2_TYPE_MASK) != L2_TYPE_INV,
   1481 				    "va=%#"PRIxVADDR" pde=%#x ptep=%p pte=%#x",
   1482 				    vaddr, *pde, ptep, pte);
   1483 				if (__predict_false((pte & L2_TYPE_MASK)
   1484 						    == L2_TYPE_L)) {
   1485 					curaddr = (pte & L2_L_FRAME) |
   1486 					    (vaddr & L2_L_OFFSET);
   1487 					coherent = (pte & L2_L_CACHE_MASK) == 0;
   1488 				} else {
   1489 					curaddr = (pte & L2_S_FRAME) |
   1490 					    (vaddr & L2_S_OFFSET);
   1491 					coherent = (pte & L2_S_CACHE_MASK) == 0;
   1492 				}
   1493 			}
   1494 		} else {
   1495 			(void) pmap_extract(pmap, vaddr, &curaddr);
   1496 			coherent = false;
   1497 		}
   1498 
   1499 		/*
   1500 		 * Compute the segment size, and adjust counts.
   1501 		 */
   1502 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
   1503 		if (buflen < sgsize)
   1504 			sgsize = buflen;
   1505 
   1506 		error = _bus_dmamap_load_paddr(t, map, curaddr, sgsize,
   1507 		    coherent);
   1508 		if (error)
   1509 			return (error);
   1510 
   1511 		vaddr += sgsize;
   1512 		buflen -= sgsize;
   1513 	}
   1514 
   1515 	return (0);
   1516 }
   1517 
   1518 /*
   1519  * Allocate physical memory from the given physical address range.
   1520  * Called by DMA-safe memory allocation methods.
   1521  */
   1522 int
   1523 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
   1524     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
   1525     int flags, paddr_t low, paddr_t high)
   1526 {
   1527 	paddr_t curaddr, lastaddr;
   1528 	struct vm_page *m;
   1529 	struct pglist mlist;
   1530 	int curseg, error;
   1531 
   1532 	KASSERTMSG(boundary == 0 || (boundary & (boundary-1)) == 0,
   1533 	    "invalid boundary %#lx", boundary);
   1534 
   1535 #ifdef DEBUG_DMA
   1536 	printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
   1537 	    t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
   1538 #endif	/* DEBUG_DMA */
   1539 
   1540 	/* Always round the size. */
   1541 	size = round_page(size);
   1542 
   1543 	/*
   1544 	 * We accept boundaries < size, splitting in multiple segments
   1545 	 * if needed. uvm_pglistalloc does not, so compute an appropriate
   1546 	 * boundary: next power of 2 >= size
   1547 	 */
   1548 	bus_size_t uboundary = boundary;
   1549 	if (uboundary <= PAGE_SIZE) {
   1550 		uboundary = 0;
   1551 	} else {
   1552 		while (uboundary < size) {
   1553 			uboundary <<= 1;
   1554 		}
   1555 	}
   1556 
   1557 	/*
   1558 	 * Allocate pages from the VM system.
   1559 	 */
   1560 	error = uvm_pglistalloc(size, low, high, alignment, uboundary,
   1561 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
   1562 	if (error)
   1563 		return (error);
   1564 
   1565 	/*
   1566 	 * Compute the location, size, and number of segments actually
   1567 	 * returned by the VM code.
   1568 	 */
   1569 	m = TAILQ_FIRST(&mlist);
   1570 	curseg = 0;
   1571 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
   1572 	segs[curseg].ds_len = PAGE_SIZE;
   1573 #ifdef DEBUG_DMA
   1574 		printf("alloc: page %lx\n", lastaddr);
   1575 #endif	/* DEBUG_DMA */
   1576 	m = TAILQ_NEXT(m, pageq.queue);
   1577 
   1578 	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
   1579 		curaddr = VM_PAGE_TO_PHYS(m);
   1580 		KASSERTMSG(low <= curaddr && curaddr < high,
   1581 		    "uvm_pglistalloc returned non-sensicaladdress %#lx "
   1582 		    "(low=%#lx, high=%#lx\n", curaddr, low, high);
   1583 #ifdef DEBUG_DMA
   1584 		printf("alloc: page %lx\n", curaddr);
   1585 #endif	/* DEBUG_DMA */
   1586 		if (curaddr == lastaddr + PAGE_SIZE
   1587 		    && (lastaddr & boundary) == (curaddr & boundary))
   1588 			segs[curseg].ds_len += PAGE_SIZE;
   1589 		else {
   1590 			curseg++;
   1591 			if (curseg >= nsegs) {
   1592 				uvm_pglistfree(&mlist);
   1593 				return EFBIG;
   1594 			}
   1595 			segs[curseg].ds_addr = curaddr;
   1596 			segs[curseg].ds_len = PAGE_SIZE;
   1597 		}
   1598 		lastaddr = curaddr;
   1599 	}
   1600 
   1601 	*rsegs = curseg + 1;
   1602 
   1603 	return (0);
   1604 }
   1605 
   1606 /*
   1607  * Check if a memory region intersects with a DMA range, and return the
   1608  * page-rounded intersection if it does.
   1609  */
   1610 int
   1611 arm32_dma_range_intersect(struct arm32_dma_range *ranges, int nranges,
   1612     paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep)
   1613 {
   1614 	struct arm32_dma_range *dr;
   1615 	int i;
   1616 
   1617 	if (ranges == NULL)
   1618 		return (0);
   1619 
   1620 	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
   1621 		if (dr->dr_sysbase <= pa &&
   1622 		    pa < (dr->dr_sysbase + dr->dr_len)) {
   1623 			/*
   1624 			 * Beginning of region intersects with this range.
   1625 			 */
   1626 			*pap = trunc_page(pa);
   1627 			*sizep = round_page(min(pa + size,
   1628 			    dr->dr_sysbase + dr->dr_len) - pa);
   1629 			return (1);
   1630 		}
   1631 		if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) {
   1632 			/*
   1633 			 * End of region intersects with this range.
   1634 			 */
   1635 			*pap = trunc_page(dr->dr_sysbase);
   1636 			*sizep = round_page(min((pa + size) - dr->dr_sysbase,
   1637 			    dr->dr_len));
   1638 			return (1);
   1639 		}
   1640 	}
   1641 
   1642 	/* No intersection found. */
   1643 	return (0);
   1644 }
   1645 
   1646 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
   1647 static int
   1648 _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
   1649     bus_size_t size, int flags)
   1650 {
   1651 	struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
   1652 	int error = 0;
   1653 
   1654 	KASSERT(cookie != NULL);
   1655 
   1656 	cookie->id_bouncebuflen = round_page(size);
   1657 	error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,
   1658 	    PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
   1659 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
   1660 	if (error == 0) {
   1661 		error = _bus_dmamem_map(t, cookie->id_bouncesegs,
   1662 		    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
   1663 		    (void **)&cookie->id_bouncebuf, flags);
   1664 		if (error) {
   1665 			_bus_dmamem_free(t, cookie->id_bouncesegs,
   1666 			    cookie->id_nbouncesegs);
   1667 			cookie->id_bouncebuflen = 0;
   1668 			cookie->id_nbouncesegs = 0;
   1669 		} else {
   1670 			cookie->id_flags |= _BUS_DMA_HAS_BOUNCE;
   1671 		}
   1672 	} else {
   1673 		cookie->id_bouncebuflen = 0;
   1674 		cookie->id_nbouncesegs = 0;
   1675 	}
   1676 
   1677 	return (error);
   1678 }
   1679 
   1680 static void
   1681 _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
   1682 {
   1683 	struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
   1684 
   1685 	KASSERT(cookie != NULL);
   1686 
   1687 	_bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen);
   1688 	_bus_dmamem_free(t, cookie->id_bouncesegs, cookie->id_nbouncesegs);
   1689 	cookie->id_bouncebuflen = 0;
   1690 	cookie->id_nbouncesegs = 0;
   1691 	cookie->id_flags &= ~_BUS_DMA_HAS_BOUNCE;
   1692 }
   1693 
   1694 /*
   1695  * This function does the same as uiomove, but takes an explicit
   1696  * direction, and does not update the uio structure.
   1697  */
   1698 static int
   1699 _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
   1700 {
   1701 	struct iovec *iov;
   1702 	int error;
   1703 	struct vmspace *vm;
   1704 	char *cp;
   1705 	size_t resid, cnt;
   1706 	int i;
   1707 
   1708 	iov = uio->uio_iov;
   1709 	vm = uio->uio_vmspace;
   1710 	cp = buf;
   1711 	resid = n;
   1712 
   1713 	for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) {
   1714 		iov = &uio->uio_iov[i];
   1715 		if (iov->iov_len == 0)
   1716 			continue;
   1717 		cnt = MIN(resid, iov->iov_len);
   1718 
   1719 		if (!VMSPACE_IS_KERNEL_P(vm) &&
   1720 		    (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
   1721 		    != 0) {
   1722 			preempt();
   1723 		}
   1724 		if (direction == UIO_READ) {
   1725 			error = copyout_vmspace(vm, cp, iov->iov_base, cnt);
   1726 		} else {
   1727 			error = copyin_vmspace(vm, iov->iov_base, cp, cnt);
   1728 		}
   1729 		if (error)
   1730 			return (error);
   1731 		cp += cnt;
   1732 		resid -= cnt;
   1733 	}
   1734 	return (0);
   1735 }
   1736 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
   1737 
   1738 int
   1739 _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
   1740     bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
   1741 {
   1742 
   1743 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
   1744 	struct arm32_dma_range *dr;
   1745 	bool subset = false;
   1746 	size_t nranges = 0;
   1747 	size_t i;
   1748 	for (i = 0, dr = tag->_ranges; i < tag->_nranges; i++, dr++) {
   1749 		if (dr->dr_sysbase <= min_addr
   1750 		    && max_addr <= dr->dr_sysbase + dr->dr_len - 1) {
   1751 			subset = true;
   1752 		}
   1753 		if (min_addr <= dr->dr_sysbase + dr->dr_len
   1754 		    && max_addr >= dr->dr_sysbase) {
   1755 			nranges++;
   1756 		}
   1757 	}
   1758 	if (subset) {
   1759 		*newtag = tag;
   1760 		/* if the tag must be freed, add a reference */
   1761 		if (tag->_tag_needs_free)
   1762 			(tag->_tag_needs_free)++;
   1763 		return 0;
   1764 	}
   1765 	if (nranges == 0) {
   1766 		nranges = 1;
   1767 	}
   1768 
   1769 	size_t mallocsize = sizeof(*tag) + nranges * sizeof(*dr);
   1770 	if ((*newtag = malloc(mallocsize, M_DMAMAP,
   1771 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
   1772 		return ENOMEM;
   1773 
   1774 	dr = (void *)(*newtag + 1);
   1775 	**newtag = *tag;
   1776 	(*newtag)->_tag_needs_free = 1;
   1777 	(*newtag)->_ranges = dr;
   1778 	(*newtag)->_nranges = nranges;
   1779 
   1780 	if (tag->_ranges == NULL) {
   1781 		dr->dr_sysbase = min_addr;
   1782 		dr->dr_busbase = min_addr;
   1783 		dr->dr_len = max_addr + 1 - min_addr;
   1784 	} else {
   1785 		for (i = 0; i < nranges; i++) {
   1786 			if (min_addr > dr->dr_sysbase + dr->dr_len
   1787 			    || max_addr < dr->dr_sysbase)
   1788 				continue;
   1789 			dr[0] = tag->_ranges[i];
   1790 			if (dr->dr_sysbase < min_addr) {
   1791 				psize_t diff = min_addr - dr->dr_sysbase;
   1792 				dr->dr_busbase += diff;
   1793 				dr->dr_len -= diff;
   1794 				dr->dr_sysbase += diff;
   1795 			}
   1796 			if (max_addr != 0xffffffff
   1797 			    && max_addr + 1 < dr->dr_sysbase + dr->dr_len) {
   1798 				dr->dr_len = max_addr + 1 - dr->dr_sysbase;
   1799 			}
   1800 			dr++;
   1801 		}
   1802 	}
   1803 
   1804 	return 0;
   1805 #else
   1806 	return EOPNOTSUPP;
   1807 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
   1808 }
   1809 
   1810 void
   1811 _bus_dmatag_destroy(bus_dma_tag_t tag)
   1812 {
   1813 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
   1814 	switch (tag->_tag_needs_free) {
   1815 	case 0:
   1816 		break;				/* not allocated with malloc */
   1817 	case 1:
   1818 		free(tag, M_DMAMAP);		/* last reference to tag */
   1819 		break;
   1820 	default:
   1821 		(tag->_tag_needs_free)--;	/* one less reference */
   1822 	}
   1823 #endif
   1824 }
   1825