Home | History | Annotate | Line # | Download | only in g2
gapspci_dma.c revision 1.13
      1 /*	$NetBSD: gapspci_dma.c,v 1.13 2006/08/07 17:36:53 tsutsui Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Bus DMA implementation for the SEGA GAPS PCI bridge.
     41  *
     42  * NOTE: We only implement a small subset of what the bus_space(9)
     43  * API specifies.  Right now, the GAPS PCI bridge is only used for
     44  * the Dreamcast Broadband Adatper, so we only provide what the
     45  * pci(4) and rtk(4) drivers need.
     46  */
     47 
     48 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
     49 __KERNEL_RCSID(0, "$NetBSD: gapspci_dma.c,v 1.13 2006/08/07 17:36:53 tsutsui Exp $");
     50 
     51 #include <sys/param.h>
     52 #include <sys/systm.h>
     53 #include <sys/device.h>
     54 #include <sys/mbuf.h>
     55 #include <sys/extent.h>
     56 #include <sys/malloc.h>
     57 
     58 #include <machine/cpu.h>
     59 #include <machine/bus.h>
     60 
     61 #include <dev/pci/pcivar.h>
     62 
     63 #include <dreamcast/dev/g2/gapspcivar.h>
     64 
     65 #include <uvm/uvm_extern.h>
     66 
     67 int	gaps_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
     68 	    bus_size_t, int, bus_dmamap_t *);
     69 void	gaps_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
     70 int	gaps_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
     71 	    struct proc *, int);
     72 int	gaps_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *, int);
     73 int	gaps_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int);
     74 int	gaps_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
     75 	    int, bus_size_t, int);
     76 void	gaps_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
     77 void	gaps_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
     78 	    bus_size_t, int);
     79 
     80 int	gaps_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
     81 	    bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
     82 	    int nsegs, int *rsegs, int flags);
     83 void	gaps_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs);
     84 int	gaps_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
     85 	    size_t size, caddr_t *kvap, int flags);
     86 void	gaps_dmamem_unmap(bus_dma_tag_t tag, caddr_t kva, size_t size);
     87 paddr_t	gaps_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
     88 	    off_t off, int prot, int flags);
     89 
     90 void
     91 gaps_dma_init(struct gaps_softc *sc)
     92 {
     93 	bus_dma_tag_t t = &sc->sc_dmat;
     94 
     95 	memset(t, 0, sizeof(*t));
     96 
     97 	t->_cookie = sc;
     98 	t->_dmamap_create = gaps_dmamap_create;
     99 	t->_dmamap_destroy = gaps_dmamap_destroy;
    100 	t->_dmamap_load = gaps_dmamap_load;
    101 	t->_dmamap_load_mbuf = gaps_dmamap_load_mbuf;
    102 	t->_dmamap_load_uio = gaps_dmamap_load_uio;
    103 	t->_dmamap_load_raw = gaps_dmamap_load_raw;
    104 	t->_dmamap_unload = gaps_dmamap_unload;
    105 	t->_dmamap_sync = gaps_dmamap_sync;
    106 
    107 	t->_dmamem_alloc = gaps_dmamem_alloc;
    108 	t->_dmamem_free = gaps_dmamem_free;
    109 	t->_dmamem_map = gaps_dmamem_map;
    110 	t->_dmamem_unmap = gaps_dmamem_unmap;
    111 	t->_dmamem_mmap = gaps_dmamem_mmap;
    112 
    113 	/*
    114 	 * The GAPS PCI bridge has 32k of DMA memory.  We manage it
    115 	 * with an extent map.
    116 	 */
    117 	sc->sc_dma_ex = extent_create("gaps dma",
    118 	    sc->sc_dmabase, sc->sc_dmabase + (sc->sc_dmasize - 1),
    119 	    M_DEVBUF, NULL, 0, EX_WAITOK | EXF_NOCOALESCE);
    120 
    121 	if (bus_space_map(sc->sc_memt, sc->sc_dmabase, sc->sc_dmasize,
    122 	    0, &sc->sc_dma_memh) != 0)
    123 		panic("gaps_dma_init: can't map SRAM buffer");
    124 }
    125 
    126 /*
    127  * A GAPS DMA map -- has the standard DMA map, plus some extra
    128  * housekeeping data.
    129  */
    130 struct gaps_dmamap {
    131 	struct dreamcast_bus_dmamap gd_dmamap;
    132 	void *gd_origbuf;
    133 	int gd_buftype;
    134 };
    135 
    136 #define	GAPS_DMA_BUFTYPE_INVALID	0
    137 #define	GAPS_DMA_BUFTYPE_LINEAR		1
    138 #define	GAPS_DMA_BUFTYPE_MBUF		2
    139 
    140 int
    141 gaps_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
    142     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
    143 {
    144 	struct gaps_softc *sc = t->_cookie;
    145 	struct gaps_dmamap *gmap;
    146 	bus_dmamap_t map;
    147 
    148 	/*
    149 	 * Allocate an initialize the DMA map.  The end of the map is
    150 	 * a variable-sized array of segments, so we allocate enough
    151 	 * room for them in one shot.  Since the DMA map always includes
    152 	 * one segment, and we only support one segment, this is really
    153 	 * easy.
    154 	 *
    155 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
    156 	 * of ALLOCNOW notifies others that we've reserved these resources
    157 	 * and they are not to be freed.
    158 	 */
    159 
    160 	gmap = malloc(sizeof(*gmap), M_DMAMAP,
    161 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
    162 	if (gmap == NULL)
    163 		return ENOMEM;
    164 
    165 	memset(gmap, 0, sizeof(*gmap));
    166 
    167 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_INVALID;
    168 
    169 	map = &gmap->gd_dmamap;
    170 
    171 	map->_dm_size = size;
    172 	map->_dm_segcnt = 1;
    173 	map->_dm_maxmaxsegsz = maxsegsz;
    174 	map->_dm_boundary = boundary;
    175 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    176 	map->dm_maxsegsz = maxsegsz;
    177 
    178 	if (flags & BUS_DMA_ALLOCNOW) {
    179 		u_long res;
    180 		int error;
    181 
    182 		error = extent_alloc(sc->sc_dma_ex, size, 1024 /* XXX */,
    183 		    map->_dm_boundary,
    184 		    (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK, &res);
    185 		if (error) {
    186 			free(gmap, M_DEVBUF);
    187 			return error;
    188 		}
    189 
    190 		map->dm_segs[0].ds_addr = res;
    191 		map->dm_segs[0].ds_len = size;
    192 
    193 		map->dm_mapsize = size;
    194 		map->dm_nsegs = 1;
    195 	} else {
    196 		map->dm_mapsize = 0;		/* no valid mappings */
    197 		map->dm_nsegs = 0;
    198 	}
    199 
    200 	*dmamap = map;
    201 
    202 	return 0;
    203 }
    204 
    205 void
    206 gaps_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    207 {
    208 	struct gaps_softc *sc = t->_cookie;
    209 
    210 	if (map->_dm_flags & BUS_DMA_ALLOCNOW) {
    211 		(void) extent_free(sc->sc_dma_ex,
    212 		    map->dm_segs[0].ds_addr,
    213 		    map->dm_mapsize, EX_NOWAIT);
    214 	}
    215 	free(map, M_DMAMAP);
    216 }
    217 
    218 int
    219 gaps_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *addr,
    220     bus_size_t size, struct proc *p, int flags)
    221 {
    222 	struct gaps_softc *sc = t->_cookie;
    223 	struct gaps_dmamap *gmap = (void *) map;
    224 	u_long res;
    225 	int error;
    226 
    227 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
    228 		/*
    229 		 * Make sure that on error condition we return
    230 		 * "no valid mappings".
    231 		 */
    232 		map->dm_mapsize = 0;
    233 		map->dm_nsegs = 0;
    234 		KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    235 	}
    236 
    237 	/* XXX Don't support DMA to process space right now. */
    238 	if (p != NULL)
    239 		return EINVAL;
    240 
    241 	if (size > map->_dm_size)
    242 		return EINVAL;
    243 
    244 	error = extent_alloc(sc->sc_dma_ex, size, 1024 /* XXX */,
    245 	    map->_dm_boundary,
    246 	    (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK, &res);
    247 	if (error)
    248 		return error;
    249 
    250 	map->dm_segs[0].ds_addr = res;
    251 	map->dm_segs[0].ds_len = size;
    252 
    253 	gmap->gd_origbuf = addr;
    254 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_LINEAR;
    255 
    256 	map->dm_mapsize = size;
    257 	map->dm_nsegs = 1;
    258 
    259 	return 0;
    260 }
    261 
    262 int
    263 gaps_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    264     int flags)
    265 {
    266 	struct gaps_softc *sc = t->_cookie;
    267 	struct gaps_dmamap *gmap = (void *) map;
    268 	u_long res;
    269 	int error;
    270 
    271 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
    272 		/*
    273 		 * Make sure that on error condition we return
    274 		 * "no valid mappings".
    275 		 */
    276 		map->dm_mapsize = 0;
    277 		map->dm_nsegs = 0;
    278 		KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    279 	}
    280 
    281 #ifdef DIAGNOSTIC
    282 	if ((m0->m_flags & M_PKTHDR) == 0)
    283 		panic("gaps_dmamap_load_mbuf: no packet header");
    284 #endif
    285 
    286 	if (m0->m_pkthdr.len > map->_dm_size)
    287 		return EINVAL;
    288 
    289 	error = extent_alloc(sc->sc_dma_ex, m0->m_pkthdr.len, 1024 /* XXX */,
    290 	    map->_dm_boundary,
    291 	    (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK, &res);
    292 	if (error)
    293 		return error;
    294 
    295 	map->dm_segs[0].ds_addr = res;
    296 	map->dm_segs[0].ds_len = m0->m_pkthdr.len;
    297 
    298 	gmap->gd_origbuf = m0;
    299 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_MBUF;
    300 
    301 	map->dm_mapsize = m0->m_pkthdr.len;
    302 	map->dm_nsegs = 1;
    303 
    304 	return 0;
    305 }
    306 
    307 int
    308 gaps_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    309     int flags)
    310 {
    311 
    312 	printf("gaps_dmamap_load_uio: not implemented\n");
    313 	return EINVAL;
    314 }
    315 
    316 int
    317 gaps_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    318     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
    319 {
    320 
    321 	printf("gaps_dmamap_load_raw: not implemented\n");
    322 	return EINVAL;
    323 }
    324 
    325 void
    326 gaps_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
    327 {
    328 	struct gaps_softc *sc = t->_cookie;
    329 	struct gaps_dmamap *gmap = (void *) map;
    330 
    331 	if (gmap->gd_buftype == GAPS_DMA_BUFTYPE_INVALID) {
    332 		printf("gaps_dmamap_unload: DMA map not loaded!\n");
    333 		return;
    334 	}
    335 
    336 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
    337 		(void) extent_free(sc->sc_dma_ex,
    338 		    map->dm_segs[0].ds_addr,
    339 		    map->dm_mapsize, EX_NOWAIT);
    340 
    341 		map->dm_maxsegsz = map->_dm_maxmaxsegsz;
    342 		map->dm_mapsize = 0;
    343 		map->dm_nsegs = 0;
    344 	}
    345 
    346 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_INVALID;
    347 }
    348 
    349 void
    350 gaps_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    351     bus_size_t len, int ops)
    352 {
    353 	struct gaps_softc *sc = t->_cookie;
    354 	struct gaps_dmamap *gmap = (void *) map;
    355 	bus_addr_t dmaoff = map->dm_segs[0].ds_addr - sc->sc_dmabase;
    356 
    357 	/*
    358 	 * Mixing PRE and POST operations is not allowed.
    359 	 */
    360 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
    361 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
    362 		panic("gaps_dmamap_sync: mix PRE and POST");
    363 
    364 #ifdef DIAGNOSTIC
    365 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
    366 		if (offset >= map->dm_mapsize) {
    367 			printf("offset 0x%lx mapsize 0x%lx\n",
    368 			    offset, map->dm_mapsize);
    369 			panic("gaps_dmamap_sync: bad offset");
    370 		}
    371 		if (len == 0 || (offset + len) > map->dm_mapsize) {
    372 			printf("len 0x%lx offset 0x%lx mapsize 0x%lx\n",
    373 			    len, offset, map->dm_mapsize);
    374 			panic("gaps_dmamap_sync: bad length");
    375 		}
    376 	}
    377 #endif
    378 
    379 	switch (gmap->gd_buftype) {
    380 	case GAPS_DMA_BUFTYPE_INVALID:
    381 		printf("gaps_dmamap_sync: DMA map is not loaded!\n");
    382 		return;
    383 
    384 	case GAPS_DMA_BUFTYPE_LINEAR:
    385 		/*
    386 		 * Nothing to do for pre-read.
    387 		 */
    388 
    389 		if (ops & BUS_DMASYNC_PREWRITE) {
    390 			/*
    391 			 * Copy the caller's buffer to the SRAM buffer.
    392 			 */
    393 			bus_space_write_region_1(sc->sc_memt,
    394 			    sc->sc_dma_memh,
    395 			    dmaoff + offset,
    396 			    (uint8_t *)gmap->gd_origbuf + offset, len);
    397 		}
    398 
    399 		if (ops & BUS_DMASYNC_POSTREAD) {
    400 			/*
    401 			 * Copy the SRAM buffer to the caller's buffer.
    402 			 */
    403 			bus_space_read_region_1(sc->sc_memt,
    404 			    sc->sc_dma_memh,
    405 			    dmaoff + offset,
    406 			    (uint8_t *)gmap->gd_origbuf + offset, len);
    407 		}
    408 
    409 		/*
    410 		 * Nothing to do for post-write.
    411 		 */
    412 		break;
    413 
    414 	case GAPS_DMA_BUFTYPE_MBUF:
    415 	    {
    416 		struct mbuf *m, *m0 = gmap->gd_origbuf;
    417 		bus_size_t minlen, moff;
    418 
    419 		/*
    420 		 * Nothing to do for pre-read.
    421 		 */
    422 
    423 		if (ops & BUS_DMASYNC_PREWRITE) {
    424 			/*
    425 			 * Copy the caller's buffer into the SRAM buffer.
    426 			 */
    427 			for (moff = offset, m = m0; m != NULL && len != 0;
    428 			     m = m->m_next) {
    429 				/* Find the beginning mbuf. */
    430 				if (moff >= m->m_len) {
    431 					moff -= m->m_len;
    432 					continue;
    433 				}
    434 
    435 				/*
    436 				 * Now at the first mbuf to sync; nail
    437 				 * each one until we have exhausted the
    438 				 * length.
    439 				 */
    440 				minlen = len < m->m_len - moff ?
    441 				    len : m->m_len - moff;
    442 
    443 				bus_space_write_region_1(sc->sc_memt,
    444 				    sc->sc_dma_memh, dmaoff + offset,
    445 				    mtod(m, uint8_t *) + moff, minlen);
    446 
    447 				moff = 0;
    448 				len -= minlen;
    449 				offset += minlen;
    450 			}
    451 		}
    452 
    453 		if (ops & BUS_DMASYNC_POSTREAD) {
    454 			/*
    455 			 * Copy the SRAM buffer into the caller's buffer.
    456 			 */
    457 			for (moff = offset, m = m0; m != NULL && len != 0;
    458 			     m = m->m_next) {
    459 				/* Find the beginning mbuf. */
    460 				if (moff >= m->m_len) {
    461 					moff -= m->m_len;
    462 					continue;
    463 				}
    464 
    465 				/*
    466 				 * Now at the first mbuf to sync; nail
    467 				 * each one until we have exhausted the
    468 				 * length.
    469 				 */
    470 				minlen = len < m->m_len - moff ?
    471 				    len : m->m_len - moff;
    472 
    473 				bus_space_read_region_1(sc->sc_memt,
    474 				    sc->sc_dma_memh, dmaoff + offset,
    475 				    mtod(m, uint8_t *) + moff, minlen);
    476 
    477 				moff = 0;
    478 				len -= minlen;
    479 				offset += minlen;
    480 			}
    481 		}
    482 
    483 		/*
    484 		 * Nothing to do for post-write.
    485 		 */
    486 		break;
    487 	    }
    488 
    489 	default:
    490 		printf("unknown buffer type %d\n", gmap->gd_buftype);
    491 		panic("gaps_dmamap_sync");
    492 	}
    493 }
    494 
    495 int
    496 gaps_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    497     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    498     int flags)
    499 {
    500 	extern paddr_t avail_start, avail_end;	/* from pmap.c */
    501 
    502 	struct pglist mlist;
    503 	paddr_t curaddr, lastaddr;
    504 	struct vm_page *m;
    505 	int curseg, error;
    506 
    507 	/* Always round the size. */
    508 	size = round_page(size);
    509 
    510 	/*
    511 	 * Allocate the pages from the VM system.
    512 	 */
    513 	error = uvm_pglistalloc(size, avail_start, avail_end - PAGE_SIZE,
    514 	    alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
    515 	if (error)
    516 		return error;
    517 
    518 	/*
    519 	 * Compute the location, size, and number of segments actually
    520 	 * returned by the VM code.
    521 	 */
    522 	m = mlist.tqh_first;
    523 	curseg = 0;
    524 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
    525 	segs[curseg].ds_len = PAGE_SIZE;
    526 	m = TAILQ_NEXT(m, pageq);
    527 
    528 	for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
    529 		curaddr = VM_PAGE_TO_PHYS(m);
    530 		if (curaddr == (lastaddr + PAGE_SIZE))
    531 			segs[curseg].ds_len += PAGE_SIZE;
    532 		else {
    533 			curseg++;
    534 			segs[curseg].ds_addr = curaddr;
    535 			segs[curseg].ds_len = PAGE_SIZE;
    536 		}
    537 		lastaddr = curaddr;
    538 	}
    539 
    540 	*rsegs = curseg + 1;
    541 
    542 	return 0;
    543 }
    544 
    545 void
    546 gaps_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
    547 {
    548 	struct pglist mlist;
    549 	struct vm_page *m;
    550 	bus_addr_t addr;
    551 	int curseg;
    552 
    553 	/*
    554 	 * Build a list of pages to free back to the VM system.
    555 	 */
    556 	TAILQ_INIT(&mlist);
    557 	for (curseg = 0; curseg < nsegs; curseg++) {
    558 		for (addr = segs[curseg].ds_addr;
    559 		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
    560 		     addr += PAGE_SIZE) {
    561 			m = PHYS_TO_VM_PAGE(addr);
    562 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
    563 		}
    564 	}
    565 
    566 	uvm_pglistfree(&mlist);
    567 }
    568 
    569 int
    570 gaps_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    571     size_t size, caddr_t *kvap, int flags)
    572 {
    573 	vaddr_t va;
    574 	bus_addr_t addr;
    575 	int curseg;
    576 	const uvm_flag_t kmflags =
    577 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
    578 
    579 	/*
    580 	 * If we're only mapping 1 segment, use P2SEG, to avoid
    581 	 * TLB thrashing.
    582 	 */
    583 	if (nsegs == 1) {
    584 		*kvap = (caddr_t)SH3_PHYS_TO_P2SEG(segs[0].ds_addr);
    585 		return 0;
    586 	}
    587 
    588 	size = round_page(size);
    589 
    590 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
    591 
    592 	if (va == 0)
    593 		return ENOMEM;
    594 
    595 	*kvap = (caddr_t)va;
    596 
    597 	for (curseg = 0; curseg < nsegs; curseg++) {
    598 		for (addr = segs[curseg].ds_addr;
    599 		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
    600 		     addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
    601 			if (size == 0)
    602 				panic("gaps_dmamem_map: size botch");
    603 			pmap_kenter_pa(va, addr,
    604 			    VM_PROT_READ | VM_PROT_WRITE);
    605 		}
    606 	}
    607 	pmap_update(pmap_kernel());
    608 
    609 	return 0;
    610 }
    611 
    612 void
    613 gaps_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
    614 {
    615 
    616 #ifdef DIAGNOSTIC
    617 	if ((u_long) kva & PAGE_MASK)
    618 		panic("gaps_dmamem_unmap");
    619 #endif
    620 
    621 	/*
    622 	 * Nothing to do if we mapped it with P2SEG.
    623 	 */
    624 	if (kva >= (caddr_t)SH3_P2SEG_BASE &&
    625 	    kva <= (caddr_t)SH3_P2SEG_END)
    626 		return;
    627 
    628 	size = round_page(size);
    629 	pmap_kremove((vaddr_t) kva, size);
    630 	pmap_update(pmap_kernel());
    631 	uvm_km_free(kernel_map, (vaddr_t) kva, size, UVM_KMF_VAONLY);
    632 }
    633 
    634 paddr_t
    635 gaps_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    636     off_t off, int prot, int flags)
    637 {
    638 
    639 	/* Not implemented. */
    640 	return -1;
    641 }
    642