Home | History | Annotate | Line # | Download | only in evbsh3
      1 /*	$NetBSD: bus_dma.c,v 1.5 2020/11/21 16:21:24 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2005 NONAKA Kimihiro <nonaka (at) netbsd.org>
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.5 2020/11/21 16:21:24 thorpej Exp $");
     30 
     31 #include <sys/param.h>
     32 #include <sys/systm.h>
     33 #include <sys/kernel.h>
     34 #include <sys/device.h>
     35 #include <sys/kmem.h>
     36 #include <sys/mbuf.h>
     37 #define	_EVBSH3_BUS_DMA_PRIVATE
     38 #include <sys/bus.h>
     39 
     40 #include <uvm/uvm.h>
     41 
     42 #include <sh3/cache.h>
     43 
     44 #include <machine/autoconf.h>
     45 
     46 #if defined(DEBUG) && defined(BUSDMA_DEBUG)
     47 int busdma_debug = 0;
     48 #define	DPRINTF(a)	if (busdma_debug) printf a
     49 #else
     50 #define	DPRINTF(a)
     51 #endif
     52 
     53 struct _bus_dma_tag evbsh3_bus_dma = {
     54 	._cookie = NULL,
     55 
     56 	._dmamap_create = _bus_dmamap_create,
     57 	._dmamap_destroy = _bus_dmamap_destroy,
     58 	._dmamap_load = _bus_dmamap_load,
     59 	._dmamap_load_mbuf = _bus_dmamap_load_mbuf,
     60 	._dmamap_load_uio = _bus_dmamap_load_uio,
     61 	._dmamap_load_raw = _bus_dmamap_load_raw,
     62 	._dmamap_unload = _bus_dmamap_unload,
     63 	._dmamap_sync = _bus_dmamap_sync,
     64 
     65 	._dmamem_alloc = _bus_dmamem_alloc,
     66 	._dmamem_free = _bus_dmamem_free,
     67 	._dmamem_map = _bus_dmamem_map,
     68 	._dmamem_unmap = _bus_dmamem_unmap,
     69 	._dmamem_mmap = _bus_dmamem_mmap,
     70 };
     71 
     72 static size_t
     73 _bus_dmamap_mapsize(int const nsegments)
     74 {
     75 	KASSERT(nsegments > 0);
     76 	return sizeof(struct _bus_dmamap)
     77 		+ (sizeof(bus_dma_segment_t) * (nsegments - 1));
     78 }
     79 
     80 /*
     81  * Create a DMA map.
     82  */
     83 int
     84 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
     85     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
     86 {
     87 	bus_dmamap_t map;
     88 	void *mapstore;
     89 
     90 	DPRINTF(("%s: t = %p, size = %ld, nsegments = %d, maxsegsz = %ld,"
     91 		 " boundary = %ld, flags = %x\n",
     92 		 __func__, t, size, nsegments, maxsegsz, boundary, flags));
     93 
     94 	/*
     95 	 * Allocate and initialize the DMA map.  The end of the map is
     96 	 * a variable-sized array of segments, so we allocate enough
     97 	 * room for them in one shot.  bus_dmamap_t includes one
     98 	 * bus_dma_segment_t already, hence the (nsegments - 1).
     99 	 *
    100 	 * Note that we don't preserve WAITOK and NOWAIT flags.
    101 	 * Preservation of ALLOCNOW notifies others that we've
    102 	 * reserved these resources, and they are not to be freed.
    103 	 */
    104 	mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
    105 			  (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP);
    106 	if (mapstore == NULL)
    107 		return ENOMEM;
    108 
    109 	DPRINTF(("%s: dmamp = %p\n", __func__, mapstore));
    110 
    111 	map = (bus_dmamap_t)mapstore;
    112 	map->_dm_size = size;
    113 	map->_dm_segcnt = nsegments;
    114 	map->_dm_maxsegsz = maxsegsz;
    115 	map->_dm_boundary = boundary;
    116 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK | BUS_DMA_NOWAIT);
    117 
    118 	map->dm_mapsize = 0;		/* no valid mappings */
    119 	map->dm_nsegs = 0;
    120 
    121 	*dmamp = map;
    122 	return 0;
    123 }
    124 
    125 /*
    126  * Destroy a DMA map.
    127  */
    128 void
    129 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    130 {
    131 
    132 	DPRINTF(("%s: t = %p, map = %p\n", __func__, t, map));
    133 
    134 	kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
    135 }
    136 
    137 static inline int
    138 _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map,
    139     paddr_t paddr, vaddr_t vaddr, int size, int *segp, paddr_t *lastaddrp,
    140     int first)
    141 {
    142 	bus_dma_segment_t * const segs = map->dm_segs;
    143 	bus_addr_t bmask = ~(map->_dm_boundary - 1);
    144 	bus_addr_t lastaddr;
    145 	int nseg;
    146 	int sgsize;
    147 
    148 	nseg = *segp;
    149 	lastaddr = *lastaddrp;
    150 
    151 	DPRINTF(("%s: t = %p, map = %p, paddr = 0x%08lx,"
    152 		 " vaddr = 0x%08lx, size = %d\n",
    153 		 __func__, t, map, paddr, vaddr, size));
    154 	DPRINTF(("%s: nseg = %d, bmask = 0x%08lx, lastaddr = 0x%08lx\n",
    155 		 __func__, nseg, bmask, lastaddr));
    156 
    157 	do {
    158 		sgsize = size;
    159 
    160 		/*
    161 		 * Make sure we don't cross any boundaries.
    162 		 */
    163 		if (map->_dm_boundary > 0) {
    164 			bus_addr_t baddr; /* next boundary address */
    165 
    166 			baddr = (paddr + map->_dm_boundary) & bmask;
    167 			if (sgsize > (baddr - paddr))
    168 				sgsize = (baddr - paddr);
    169 		}
    170 
    171 		DPRINTF(("%s: sgsize = %d\n", __func__, sgsize));
    172 
    173 		/*
    174 		 * Insert chunk coalescing with previous segment if possible.
    175 		 */
    176 		if (first) {
    177 			DPRINTF(("%s: first\n", __func__));
    178 			first = 0;
    179 
    180 			segs[nseg].ds_addr = SH3_PHYS_TO_P2SEG(paddr);
    181 			segs[nseg].ds_len = sgsize;
    182 			segs[nseg]._ds_vaddr = vaddr;
    183 		}
    184 		else if ((paddr == lastaddr)
    185 			 && (segs[nseg].ds_len + sgsize <= map->_dm_maxsegsz)
    186 			 && (map->_dm_boundary == 0 ||
    187 			     (segs[nseg].ds_addr & bmask) == (paddr & bmask)))
    188 		{
    189 			DPRINTF(("%s: coalesce\n", __func__));
    190 
    191 			segs[nseg].ds_len += sgsize;
    192 		}
    193 		else {
    194 			DPRINTF(("%s: new\n", __func__));
    195 
    196 			++nseg;
    197 			if (nseg >= map->_dm_segcnt)
    198 				break;
    199 
    200 			segs[nseg].ds_addr = SH3_PHYS_TO_P2SEG(paddr);
    201 			segs[nseg].ds_len = sgsize;
    202 			segs[nseg]._ds_vaddr = vaddr;
    203 		}
    204 
    205 		paddr += sgsize;
    206 		vaddr += sgsize;
    207 		size -= sgsize;
    208 		lastaddr = paddr;
    209 
    210 		DPRINTF(("%s: lastaddr = 0x%08lx, paddr = 0x%08lx,"
    211 			 " vaddr = 0x%08lx, size = %d\n",
    212 			 __func__, lastaddr, paddr, vaddr, size));
    213 	} while (size > 0);
    214 
    215 	DPRINTF(("%s: nseg = %d\n", __func__, nseg));
    216 
    217 	*segp = nseg;
    218 	*lastaddrp = lastaddr;
    219 
    220 	if (size != 0) {
    221 		/*
    222 		 * It didn't fit.  If there is a chained window, we
    223 		 * will automatically fall back to it.
    224 		 */
    225 		return (EFBIG);		/* XXX better return value here? */
    226 	}
    227 
    228 	return (0);
    229 }
    230 
    231 static inline int
    232 _bus_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    233     bus_size_t buflen, struct proc *p, int flags, int *segp)
    234 {
    235 	bus_size_t sgsize;
    236 	bus_addr_t curaddr;
    237 	bus_size_t len;
    238 	paddr_t lastaddr;
    239 	vaddr_t vaddr = (vaddr_t)buf;
    240 	pmap_t pmap;
    241 	int first;
    242 	int error;
    243 
    244 	DPRINTF(("%s: t = %p, map = %p, buf = %p, buflen = %ld,"
    245 		 " p = %p, flags = %x\n",
    246 		 __func__, t, map, buf, buflen, p, flags));
    247 
    248 	if (p != NULL)
    249 		pmap = p->p_vmspace->vm_map.pmap;
    250 	else
    251 		pmap = pmap_kernel();
    252 
    253 	first = 1;
    254 	lastaddr = 0;
    255 
    256 	len = buflen;
    257 	while (len > 0) {
    258 		bool mapped;
    259 
    260 		mapped = pmap_extract(pmap, vaddr, &curaddr);
    261 		if (!mapped)
    262 			return EFAULT;
    263 
    264 		sgsize = PAGE_SIZE - (vaddr & PGOFSET);
    265 		if (len < sgsize)
    266 			sgsize = len;
    267 
    268 		error = _bus_dmamap_load_paddr(t, map, curaddr, vaddr, sgsize,
    269 					       segp, &lastaddr, first);
    270 		if (error)
    271 			return error;
    272 
    273 		vaddr += sgsize;
    274 		len -= sgsize;
    275 		first = 0;
    276 	}
    277 
    278 	return 0;
    279 }
    280 
    281 /*
    282  * Load a DMA map with a linear buffer.
    283  */
    284 int
    285 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    286     bus_size_t buflen, struct proc *p, int flags)
    287 {
    288 	bus_addr_t addr = (bus_addr_t)buf;
    289 	paddr_t lastaddr;
    290 	int seg;
    291 	int first;
    292 	int error;
    293 
    294 	DPRINTF(("%s: t = %p, map = %p, buf = %p, buflen = %ld,"
    295 		 " p = %p, flags = %x\n",
    296 		 __func__, t, map, buf, buflen, p, flags));
    297 
    298 	/* make sure that on error condition we return "no valid mappings" */
    299 	map->dm_mapsize = 0;
    300 	map->dm_nsegs = 0;
    301 
    302 	if (buflen > map->_dm_size)
    303 		return (EINVAL);
    304 
    305 	error = 0;
    306 	seg = 0;
    307 
    308 	if (SH3_P1SEG_BASE <= addr && addr + buflen <= SH3_P2SEG_END) {
    309 		bus_addr_t curaddr;
    310 		bus_size_t sgsize;
    311 		bus_size_t len = buflen;
    312 
    313 		DPRINTF(("%s: P[12]SEG (0x%08lx)\n", __func__, addr));
    314 
    315 		first = 1;
    316 		lastaddr = 0;
    317 
    318 		while (len > 0) {
    319 			curaddr = SH3_P1SEG_TO_PHYS(addr);
    320 
    321 			sgsize = PAGE_SIZE - ((u_long)addr & PGOFSET);
    322 			if (len < sgsize)
    323 				sgsize = len;
    324 
    325 			error = _bus_dmamap_load_paddr(t, map,
    326 						       curaddr, addr, sgsize,
    327 						       &seg, &lastaddr, first);
    328 			if (error)
    329 				break;
    330 
    331 			addr += sgsize;
    332 			len -= sgsize;
    333 			first = 0;
    334 		}
    335 	}
    336 	else {
    337 		error = _bus_bus_dmamap_load_buffer(t, map, buf, buflen,
    338 						    p, flags, &seg);
    339 	}
    340 
    341 	if (error)
    342 		return (error);
    343 
    344 	map->dm_nsegs = seg + 1;
    345 	map->dm_mapsize = buflen;
    346 	return 0;
    347 }
    348 
    349 /*
    350  * Like _bus_dmamap_load(), but for mbufs.
    351  */
    352 int
    353 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    354     int flags)
    355 {
    356 	struct mbuf *m;
    357 	paddr_t lastaddr;
    358 	int seg;
    359 	int first;
    360 	int error;
    361 
    362 	DPRINTF(("%s: t = %p, map = %p, m0 = %p, flags = %x\n",
    363 		 __func__, t, map, m0, flags));
    364 
    365 	/* make sure that on error condition we return "no valid mappings" */
    366 	map->dm_nsegs = 0;
    367 	map->dm_mapsize = 0;
    368 
    369 #ifdef DIAGNOSTIC
    370 	if ((m0->m_flags & M_PKTHDR) == 0)
    371 		panic("_bus_dmamap_load_mbuf: no packet header");
    372 #endif
    373 
    374 	if (m0->m_pkthdr.len > map->_dm_size)
    375 		return (EINVAL);
    376 
    377 	seg = 0;
    378 	first = 1;
    379 	lastaddr = 0;
    380 
    381 	for (m = m0; m != NULL; m = m->m_next) {
    382 		paddr_t paddr;
    383 		vaddr_t vaddr;
    384 		int size;
    385 
    386 		if (m->m_len == 0)
    387 			continue;
    388 
    389 		vaddr = (vaddr_t)m->m_data;
    390 		size = m->m_len;
    391 
    392 		if (SH3_P1SEG_BASE <= vaddr && vaddr < SH3_P3SEG_BASE) {
    393 			paddr = (paddr_t)(PMAP_UNMAP_POOLPAGE(vaddr));
    394 			error = _bus_dmamap_load_paddr(t, map,
    395 						       paddr, vaddr, size,
    396 						       &seg, &lastaddr, first);
    397 			if (error)
    398 				return error;
    399 			first = 0;
    400 		}
    401 		else {
    402 			/* XXX: stolen from load_buffer, need to refactor */
    403 			while (size > 0) {
    404 				bus_size_t sgsize;
    405 				bool mapped;
    406 
    407 				mapped = pmap_extract(pmap_kernel(), vaddr,
    408 						      &paddr);
    409 				if (!mapped)
    410 					return EFAULT;
    411 
    412 				sgsize = PAGE_SIZE - (vaddr & PGOFSET);
    413 				if (size < sgsize)
    414 					sgsize = size;
    415 
    416 				error = _bus_dmamap_load_paddr(t, map,
    417 						paddr, vaddr, sgsize,
    418 						&seg, &lastaddr, first);
    419 				if (error)
    420 					return error;
    421 
    422 				vaddr += sgsize;
    423 				size -= sgsize;
    424 				first = 0;
    425 			}
    426 
    427 		}
    428 	}
    429 
    430 	map->dm_nsegs = seg + 1;
    431 	map->dm_mapsize = m0->m_pkthdr.len;
    432 	return 0;
    433 }
    434 
    435 /*
    436  * Like _bus_dmamap_load(), but for uios.
    437  */
    438 int
    439 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    440     int flags)
    441 {
    442 
    443 	panic("_bus_dmamap_load_uio: not implemented");
    444 }
    445 
    446 /*
    447  * Like _bus_dmamap_load(), but for raw memory allocated with
    448  * bus_dmamem_alloc().
    449  */
    450 int
    451 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    452     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
    453 {
    454 
    455 	panic("_bus_dmamap_load_raw: not implemented");
    456 }
    457 
    458 /*
    459  * Unload a DMA map.
    460  */
    461 void
    462 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
    463 {
    464 
    465 	DPRINTF(("%s: t = %p, map = %p\n", __func__, t, map));
    466 
    467 	map->dm_nsegs = 0;
    468 	map->dm_mapsize = 0;
    469 }
    470 
    471 /*
    472  * Synchronize a DMA map.
    473  */
    474 void
    475 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    476     bus_size_t len, int ops)
    477 {
    478 	bus_size_t minlen;
    479 	bus_addr_t addr, naddr;
    480 	int i;
    481 
    482 	DPRINTF(("%s: t = %p, map = %p, offset = %ld, len = %ld, ops = %x\n",
    483 		 __func__, t, map, offset, len, ops));
    484 
    485 #ifdef DIAGNOSTIC
    486 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
    487 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
    488 		panic("_bus_dmamap_sync: mix PRE and POST");
    489 
    490 	if (offset >= map->dm_mapsize)
    491 		panic("_bus_dmamap_sync: bad offset");
    492 	if ((offset + len) > map->dm_mapsize)
    493 		panic("_bus_dmamap_sync: bad length");
    494 #endif
    495 
    496 	if (!sh_cache_enable_dcache) {
    497 		/* Nothing to do */
    498 		DPRINTF(("%s: disabled D-Cache\n", __func__));
    499 		return;
    500 	}
    501 
    502 	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
    503 		/* Find the beginning segment. */
    504 		if (offset >= map->dm_segs[i].ds_len) {
    505 			offset -= map->dm_segs[i].ds_len;
    506 			continue;
    507 		}
    508 
    509 		/*
    510 		 * Now at the first segment to sync; nail
    511 		 * each segment until we have exhausted the
    512 		 * length.
    513 		 */
    514 		minlen = len < map->dm_segs[i].ds_len - offset ?
    515 		    len : map->dm_segs[i].ds_len - offset;
    516 
    517 		addr = map->dm_segs[i]._ds_vaddr;
    518 		naddr = addr + offset;
    519 
    520 		if ((naddr >= SH3_P2SEG_BASE)
    521 		 && (naddr + minlen <= SH3_P2SEG_END)) {
    522 			DPRINTF(("%s: P2SEG (0x%08lx)\n", __func__, naddr));
    523 			offset = 0;
    524 			len -= minlen;
    525 			continue;
    526 		}
    527 
    528 		DPRINTF(("%s: flushing segment %d "
    529 			 "(0x%lx+%lx, 0x%lx+0x%lx) (remain = %ld)\n",
    530 			 __func__, i,
    531 			 addr, offset, addr, offset + minlen - 1, len));
    532 
    533 		switch (ops) {
    534 		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
    535 			if (SH_HAS_WRITEBACK_CACHE)
    536 				sh_dcache_wbinv_range(naddr, minlen);
    537 			else
    538 				sh_dcache_inv_range(naddr, minlen);
    539 			break;
    540 
    541 		case BUS_DMASYNC_PREREAD:
    542 			if (SH_HAS_WRITEBACK_CACHE &&
    543 			    ((naddr | minlen) & (sh_cache_line_size - 1)) != 0)
    544 				sh_dcache_wbinv_range(naddr, minlen);
    545 			else
    546 				sh_dcache_inv_range(naddr, minlen);
    547 			break;
    548 
    549 		case BUS_DMASYNC_PREWRITE:
    550 			if (SH_HAS_WRITEBACK_CACHE)
    551 				sh_dcache_wb_range(naddr, minlen);
    552 			break;
    553 		}
    554 		offset = 0;
    555 		len -= minlen;
    556 	}
    557 }
    558 
    559 /*
    560  * Allocate memory safe for DMA.
    561  */
    562 int
    563 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    564     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    565     int flags)
    566 {
    567 	extern paddr_t avail_start, avail_end;	/* from pmap.c */
    568 	struct pglist mlist;
    569 	paddr_t curaddr, lastaddr;
    570 	struct vm_page *m;
    571 	int curseg, error;
    572 
    573 	DPRINTF(("%s: t = %p, size = %ld, alignment = %ld, boundary = %ld,"
    574 		 " segs = %p, nsegs = %d, rsegs = %p, flags = %x\n",
    575 		 __func__, t, size, alignment, boundary,
    576 		 segs, nsegs, rsegs, flags));
    577 	DPRINTF(("%s: avail_start = 0x%08lx, avail_end = 0x%08lx\n",
    578 		 __func__, avail_start, avail_end));
    579 
    580 	/* Always round the size. */
    581 	size = round_page(size);
    582 
    583 	/*
    584 	 * Allocate the pages from the VM system.
    585 	 */
    586 	error = uvm_pglistalloc(size, avail_start, avail_end - PAGE_SIZE,
    587 	    alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
    588 	if (error)
    589 		return (error);
    590 
    591 	/*
    592 	 * Compute the location, size, and number of segments actually
    593 	 * returned by the VM code.
    594 	 */
    595 	m = mlist.tqh_first;
    596 	curseg = 0;
    597 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
    598 	segs[curseg].ds_len = PAGE_SIZE;
    599 
    600 	DPRINTF(("%s: m = %p, lastaddr = 0x%08lx\n", __func__, m, lastaddr));
    601 
    602 	while ((m = TAILQ_NEXT(m, pageq.queue)) != NULL) {
    603 		curaddr = VM_PAGE_TO_PHYS(m);
    604 		DPRINTF(("%s: m = %p, curaddr = 0x%08lx, lastaddr = 0x%08lx\n",
    605 			 __func__, m, curaddr, lastaddr));
    606 
    607 		if (curaddr == (lastaddr + PAGE_SIZE)) {
    608 			segs[curseg].ds_len += PAGE_SIZE;
    609 		} else {
    610 			DPRINTF(("%s: new segment\n", __func__));
    611 			curseg++;
    612 			segs[curseg].ds_addr = curaddr;
    613 			segs[curseg].ds_len = PAGE_SIZE;
    614 		}
    615 		lastaddr = curaddr;
    616 	}
    617 
    618 	*rsegs = curseg + 1;
    619 
    620 	DPRINTF(("%s: curseg = %d, *rsegs = %d\n", __func__, curseg, *rsegs));
    621 
    622 	return (0);
    623 }
    624 
    625 /*
    626  * Common function for freeing DMA-safe memory.  May be called by
    627  * bus-specific DMA memory free functions.
    628  */
    629 void
    630 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
    631 {
    632 	struct vm_page *m;
    633 	bus_addr_t addr;
    634 	struct pglist mlist;
    635 	int curseg;
    636 
    637 	DPRINTF(("%s: t = %p, segs = %p, nsegs = %d\n",
    638 		 __func__, t, segs, nsegs));
    639 
    640 	/*
    641 	 * Build a list of pages to free back to the VM system.
    642 	 */
    643 	TAILQ_INIT(&mlist);
    644 	for (curseg = 0; curseg < nsegs; curseg++) {
    645 		DPRINTF(("%s: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n",
    646 			 __func__, curseg,
    647 			 segs[curseg].ds_addr, segs[curseg].ds_len));
    648 
    649 		for (addr = segs[curseg].ds_addr;
    650 		     addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
    651 		     addr += PAGE_SIZE)
    652 		{
    653 			m = PHYS_TO_VM_PAGE(addr);
    654 			DPRINTF(("%s: m = %p\n", __func__, m));
    655 			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
    656 		}
    657 	}
    658 
    659 	uvm_pglistfree(&mlist);
    660 }
    661 
    662 
    663 int
    664 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    665     size_t size, void **kvap, int flags)
    666 {
    667 	vaddr_t va, topva;
    668 	bus_addr_t addr;
    669 	int curseg;
    670 
    671 	DPRINTF(("%s: t = %p, segs = %p, nsegs = %d, size = %d,"
    672 		 " kvap = %p, flags = %x\n",
    673 		 __func__, t, segs, nsegs, size, kvap, flags));
    674 
    675 	/*
    676 	 * If we're mapping only a single segment, use direct-mapped
    677 	 * va, to avoid thrashing the TLB.
    678 	 */
    679 	if (nsegs == 1) {
    680 		if (flags & BUS_DMA_COHERENT)
    681 			*kvap = (void *)SH3_PHYS_TO_P2SEG(segs[0].ds_addr);
    682 		else
    683 			*kvap = (void *)SH3_PHYS_TO_P1SEG(segs[0].ds_addr);
    684 
    685 		DPRINTF(("%s: addr = 0x%08lx, kva = %p\n",
    686 			 __func__, segs[0].ds_addr, *kvap));
    687 		return 0;
    688 	}
    689 
    690 
    691 	/* Always round the size. */
    692 	size = round_page(size);
    693 
    694 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY
    695 			  | ((flags & BUS_DMA_NOWAIT) ? UVM_KMF_NOWAIT : 0));
    696 	if (va == 0)
    697 		return (ENOMEM);
    698 	topva = va;
    699 
    700 	for (curseg = 0; curseg < nsegs; curseg++) {
    701 		DPRINTF(("%s: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n",
    702 			 __func__, curseg,
    703 			 segs[curseg].ds_addr, segs[curseg].ds_len));
    704 
    705 		for (addr = segs[curseg].ds_addr;
    706 		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
    707 		     addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE)
    708 		{
    709 			if (__predict_false(size == 0))
    710 				panic("_bus_dmamem_map: size botch");
    711 
    712 			pmap_kenter_pa(va, addr,
    713 				       VM_PROT_READ | VM_PROT_WRITE, 0);
    714 		}
    715 	}
    716 
    717 	pmap_update(pmap_kernel());
    718 	*kvap = (void *)topva;
    719 	return (0);
    720 }
    721 
    722 void
    723 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
    724 {
    725 	vaddr_t vaddr = (vaddr_t)kva;
    726 
    727 	DPRINTF(("%s: t = %p, kva = %p, size = %d\n",
    728 		 __func__, t, kva, size));
    729 
    730 #ifdef DIAGNOSTIC
    731 	if (vaddr & PAGE_MASK)
    732 		panic("_bus_dmamem_unmap");
    733 #endif
    734 
    735 	/* nothing to do if we mapped it via P1SEG or P2SEG */
    736 	if (SH3_P1SEG_BASE <= vaddr && vaddr <= SH3_P2SEG_END)
    737 		return;
    738 
    739 	size = round_page(size);
    740 	pmap_kremove(vaddr, size);
    741 	pmap_update(pmap_kernel());
    742 	uvm_km_free(kernel_map, vaddr, size, UVM_KMF_VAONLY);
    743 }
    744 
    745 paddr_t
    746 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    747     off_t off, int prot, int flags)
    748 {
    749 
    750 	/* Not implemented. */
    751 	return (paddr_t)(-1);
    752 }
    753