Home | History | Annotate | Line # | Download | only in g2
gapspci_dma.c revision 1.9
      1 /*	$NetBSD: gapspci_dma.c,v 1.9 2005/02/19 15:37:35 tsutsui Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Bus DMA implementation for the SEGA GAPS PCI bridge.
     41  *
     42  * NOTE: We only implement a small subset of what the bus_space(9)
     43  * API specifies.  Right now, the GAPS PCI bridge is only used for
     44  * the Dreamcast Broadband Adatper, so we only provide what the
     45  * pci(4) and rtk(4) drivers need.
     46  */
     47 
     48 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
     49 __KERNEL_RCSID(0, "$NetBSD: gapspci_dma.c,v 1.9 2005/02/19 15:37:35 tsutsui Exp $");
     50 
     51 #include <sys/param.h>
     52 #include <sys/systm.h>
     53 #include <sys/device.h>
     54 #include <sys/mbuf.h>
     55 #include <sys/extent.h>
     56 #include <sys/malloc.h>
     57 
     58 #include <machine/cpu.h>
     59 #include <machine/bus.h>
     60 
     61 #include <dev/pci/pcivar.h>
     62 
     63 #include <dreamcast/dev/g2/gapspcivar.h>
     64 
     65 #include <uvm/uvm_extern.h>
     66 
     67 int	gaps_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
     68 	    bus_size_t, int, bus_dmamap_t *);
     69 void	gaps_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
     70 int	gaps_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
     71 	    struct proc *, int);
     72 int	gaps_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *, int);
     73 int	gaps_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int);
     74 int	gaps_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
     75 	    int, bus_size_t, int);
     76 void	gaps_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
     77 void	gaps_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
     78 	    bus_size_t, int);
     79 
     80 int	gaps_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
     81 	    bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
     82 	    int nsegs, int *rsegs, int flags);
     83 void	gaps_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs);
     84 int	gaps_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
     85 	    size_t size, caddr_t *kvap, int flags);
     86 void	gaps_dmamem_unmap(bus_dma_tag_t tag, caddr_t kva, size_t size);
     87 paddr_t	gaps_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
     88 	    off_t off, int prot, int flags);
     89 
     90 void
     91 gaps_dma_init(struct gaps_softc *sc)
     92 {
     93 	bus_dma_tag_t t = &sc->sc_dmat;
     94 
     95 	memset(t, 0, sizeof(*t));
     96 
     97 	t->_cookie = sc;
     98 	t->_dmamap_create = gaps_dmamap_create;
     99 	t->_dmamap_destroy = gaps_dmamap_destroy;
    100 	t->_dmamap_load = gaps_dmamap_load;
    101 	t->_dmamap_load_mbuf = gaps_dmamap_load_mbuf;
    102 	t->_dmamap_load_uio = gaps_dmamap_load_uio;
    103 	t->_dmamap_load_raw = gaps_dmamap_load_raw;
    104 	t->_dmamap_unload = gaps_dmamap_unload;
    105 	t->_dmamap_sync = gaps_dmamap_sync;
    106 
    107 	t->_dmamem_alloc = gaps_dmamem_alloc;
    108 	t->_dmamem_free = gaps_dmamem_free;
    109 	t->_dmamem_map = gaps_dmamem_map;
    110 	t->_dmamem_unmap = gaps_dmamem_unmap;
    111 	t->_dmamem_mmap = gaps_dmamem_mmap;
    112 
    113 	/*
    114 	 * The GAPS PCI bridge has 32k of DMA memory.  We manage it
    115 	 * with an extent map.
    116 	 */
    117 	sc->sc_dma_ex = extent_create("gaps dma",
    118 	    sc->sc_dmabase, sc->sc_dmabase + (sc->sc_dmasize - 1),
    119 	    M_DEVBUF, NULL, 0, EX_WAITOK | EXF_NOCOALESCE);
    120 
    121 	if (bus_space_map(sc->sc_memt, sc->sc_dmabase, sc->sc_dmasize,
    122 	    0, &sc->sc_dma_memh) != 0)
    123 		panic("gaps_dma_init: can't map SRAM buffer");
    124 }
    125 
    126 /*
    127  * A GAPS DMA map -- has the standard DMA map, plus some extra
    128  * housekeeping data.
    129  */
    130 struct gaps_dmamap {
    131 	struct dreamcast_bus_dmamap gd_dmamap;
    132 	void *gd_origbuf;
    133 	int gd_buftype;
    134 };
    135 
    136 #define	GAPS_DMA_BUFTYPE_INVALID	0
    137 #define	GAPS_DMA_BUFTYPE_LINEAR		1
    138 #define	GAPS_DMA_BUFTYPE_MBUF		2
    139 
    140 int
    141 gaps_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
    142     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
    143 {
    144 	struct gaps_softc *sc = t->_cookie;
    145 	struct gaps_dmamap *gmap;
    146 	bus_dmamap_t map;
    147 
    148 	/*
    149 	 * Allocate an initialize the DMA map.  The end of the map is
    150 	 * a variable-sized array of segments, so we allocate enough
    151 	 * room for them in one shot.  Since the DMA map always includes
    152 	 * one segment, and we only support one segment, this is really
    153 	 * easy.
    154 	 *
    155 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
    156 	 * of ALLOCNOW notifies others that we've reserved these resources
    157 	 * and they are not to be freed.
    158 	 */
    159 
    160 	gmap = malloc(sizeof(*gmap), M_DMAMAP,
    161 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
    162 	if (gmap == NULL)
    163 		return ENOMEM;
    164 
    165 	memset(gmap, 0, sizeof(*gmap));
    166 
    167 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_INVALID;
    168 
    169 	map = &gmap->gd_dmamap;
    170 
    171 	map->_dm_size = size;
    172 	map->_dm_segcnt = 1;
    173 	map->_dm_maxsegsz = maxsegsz;
    174 	map->_dm_boundary = boundary;
    175 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    176 
    177 	if (flags & BUS_DMA_ALLOCNOW) {
    178 		u_long res;
    179 		int error;
    180 
    181 		error = extent_alloc(sc->sc_dma_ex, size, 1024 /* XXX */,
    182 		    map->_dm_boundary,
    183 		    (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK, &res);
    184 		if (error) {
    185 			free(gmap, M_DEVBUF);
    186 			return error;
    187 		}
    188 
    189 		map->dm_segs[0].ds_addr = res;
    190 		map->dm_segs[0].ds_len = size;
    191 
    192 		map->dm_mapsize = size;
    193 		map->dm_nsegs = 1;
    194 	} else {
    195 		map->dm_mapsize = 0;		/* no valid mappings */
    196 		map->dm_nsegs = 0;
    197 	}
    198 
    199 	*dmamap = map;
    200 
    201 	return 0;
    202 }
    203 
    204 void
    205 gaps_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    206 {
    207 	struct gaps_softc *sc = t->_cookie;
    208 
    209 	if (map->_dm_flags & BUS_DMA_ALLOCNOW) {
    210 		(void) extent_free(sc->sc_dma_ex,
    211 		    map->dm_segs[0].ds_addr,
    212 		    map->dm_mapsize, EX_NOWAIT);
    213 	}
    214 	free(map, M_DMAMAP);
    215 }
    216 
    217 int
    218 gaps_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *addr,
    219     bus_size_t size, struct proc *p, int flags)
    220 {
    221 	struct gaps_softc *sc = t->_cookie;
    222 	struct gaps_dmamap *gmap = (void *) map;
    223 	u_long res;
    224 	int error;
    225 
    226 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
    227 		/*
    228 		 * Make sure that on error condition we return
    229 		 * "no valid mappings".
    230 		 */
    231 		map->dm_mapsize = 0;
    232 		map->dm_nsegs = 0;
    233 	}
    234 
    235 	/* XXX Don't support DMA to process space right now. */
    236 	if (p != NULL)
    237 		return EINVAL;
    238 
    239 	if (size > map->_dm_size)
    240 		return EINVAL;
    241 
    242 	error = extent_alloc(sc->sc_dma_ex, size, 1024 /* XXX */,
    243 	    map->_dm_boundary,
    244 	    (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK, &res);
    245 	if (error)
    246 		return error;
    247 
    248 	map->dm_segs[0].ds_addr = res;
    249 	map->dm_segs[0].ds_len = size;
    250 
    251 	gmap->gd_origbuf = addr;
    252 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_LINEAR;
    253 
    254 	map->dm_mapsize = size;
    255 	map->dm_nsegs = 1;
    256 
    257 	return 0;
    258 }
    259 
    260 int
    261 gaps_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    262     int flags)
    263 {
    264 	struct gaps_softc *sc = t->_cookie;
    265 	struct gaps_dmamap *gmap = (void *) map;
    266 	u_long res;
    267 	int error;
    268 
    269 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
    270 		/*
    271 		 * Make sure that on error condition we return
    272 		 * "no valid mappings".
    273 		 */
    274 		map->dm_mapsize = 0;
    275 		map->dm_nsegs = 0;
    276 	}
    277 
    278 #ifdef DIAGNOSTIC
    279 	if ((m0->m_flags & M_PKTHDR) == 0)
    280 		panic("gaps_dmamap_load_mbuf: no packet header");
    281 #endif
    282 
    283 	if (m0->m_pkthdr.len > map->_dm_size)
    284 		return EINVAL;
    285 
    286 	error = extent_alloc(sc->sc_dma_ex, m0->m_pkthdr.len, 1024 /* XXX */,
    287 	    map->_dm_boundary,
    288 	    (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK, &res);
    289 	if (error)
    290 		return error;
    291 
    292 	map->dm_segs[0].ds_addr = res;
    293 	map->dm_segs[0].ds_len = m0->m_pkthdr.len;
    294 
    295 	gmap->gd_origbuf = m0;
    296 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_MBUF;
    297 
    298 	map->dm_mapsize = m0->m_pkthdr.len;
    299 	map->dm_nsegs = 1;
    300 
    301 	return 0;
    302 }
    303 
    304 int
    305 gaps_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    306     int flags)
    307 {
    308 
    309 	printf("gaps_dmamap_load_uio: not implemented\n");
    310 	return EINVAL;
    311 }
    312 
    313 int
    314 gaps_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    315     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
    316 {
    317 
    318 	printf("gaps_dmamap_load_raw: not implemented\n");
    319 	return EINVAL;
    320 }
    321 
    322 void
    323 gaps_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
    324 {
    325 	struct gaps_softc *sc = t->_cookie;
    326 	struct gaps_dmamap *gmap = (void *) map;
    327 
    328 	if (gmap->gd_buftype == GAPS_DMA_BUFTYPE_INVALID) {
    329 		printf("gaps_dmamap_unload: DMA map not loaded!\n");
    330 		return;
    331 	}
    332 
    333 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
    334 		(void) extent_free(sc->sc_dma_ex,
    335 		    map->dm_segs[0].ds_addr,
    336 		    map->dm_mapsize, EX_NOWAIT);
    337 
    338 		map->dm_mapsize = 0;
    339 		map->dm_nsegs = 0;
    340 	}
    341 
    342 	gmap->gd_buftype = GAPS_DMA_BUFTYPE_INVALID;
    343 }
    344 
    345 void
    346 gaps_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    347     bus_size_t len, int ops)
    348 {
    349 	struct gaps_softc *sc = t->_cookie;
    350 	struct gaps_dmamap *gmap = (void *) map;
    351 	bus_addr_t dmaoff = map->dm_segs[0].ds_addr - sc->sc_dmabase;
    352 
    353 	/*
    354 	 * Mixing PRE and POST operations is not allowed.
    355 	 */
    356 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
    357 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
    358 		panic("gaps_dmamap_sync: mix PRE and POST");
    359 
    360 #ifdef DIAGNOSTIC
    361 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
    362 		if (offset >= map->dm_mapsize) {
    363 			printf("offset 0x%lx mapsize 0x%lx\n",
    364 			    offset, map->dm_mapsize);
    365 			panic("gaps_dmamap_sync: bad offset");
    366 		}
    367 		if (len == 0 || (offset + len) > map->dm_mapsize) {
    368 			printf("len 0x%lx offset 0x%lx mapsize 0x%lx\n",
    369 			    len, offset, map->dm_mapsize);
    370 			panic("gaps_dmamap_sync: bad length");
    371 		}
    372 	}
    373 #endif
    374 
    375 	switch (gmap->gd_buftype) {
    376 	case GAPS_DMA_BUFTYPE_INVALID:
    377 		printf("gaps_dmamap_sync: DMA map is not loaded!\n");
    378 		return;
    379 
    380 	case GAPS_DMA_BUFTYPE_LINEAR:
    381 		/*
    382 		 * Nothing to do for pre-read.
    383 		 */
    384 
    385 		if (ops & BUS_DMASYNC_PREWRITE) {
    386 			/*
    387 			 * Copy the caller's buffer to the SRAM buffer.
    388 			 */
    389 			bus_space_write_region_1(sc->sc_memt,
    390 			    sc->sc_dma_memh,
    391 			    dmaoff + offset,
    392 			    (uint8_t *)gmap->gd_origbuf + offset, len);
    393 		}
    394 
    395 		if (ops & BUS_DMASYNC_POSTREAD) {
    396 			/*
    397 			 * Copy the SRAM buffer to the caller's buffer.
    398 			 */
    399 			bus_space_read_region_1(sc->sc_memt,
    400 			    sc->sc_dma_memh,
    401 			    dmaoff + offset,
    402 			    (uint8_t *)gmap->gd_origbuf + offset, len);
    403 		}
    404 
    405 		/*
    406 		 * Nothing to do for post-write.
    407 		 */
    408 		break;
    409 
    410 	case GAPS_DMA_BUFTYPE_MBUF:
    411 	    {
    412 		struct mbuf *m, *m0 = gmap->gd_origbuf;
    413 		bus_size_t minlen, moff;
    414 
    415 		/*
    416 		 * Nothing to do for pre-read.
    417 		 */
    418 
    419 		if (ops & BUS_DMASYNC_PREWRITE) {
    420 			/*
    421 			 * Copy the caller's buffer into the SRAM buffer.
    422 			 */
    423 			for (moff = offset, m = m0; m != NULL && len != 0;
    424 			     m = m->m_next) {
    425 				/* Find the beginning mbuf. */
    426 				if (moff >= m->m_len) {
    427 					moff -= m->m_len;
    428 					continue;
    429 				}
    430 
    431 				/*
    432 				 * Now at the first mbuf to sync; nail
    433 				 * each one until we have exhausted the
    434 				 * length.
    435 				 */
    436 				minlen = len < m->m_len - moff ?
    437 				    len : m->m_len - moff;
    438 
    439 				bus_space_write_region_1(sc->sc_memt,
    440 				    sc->sc_dma_memh, dmaoff + offset,
    441 				    mtod(m, uint8_t *) + moff, minlen);
    442 
    443 				moff = 0;
    444 				len -= minlen;
    445 				offset += minlen;
    446 			}
    447 		}
    448 
    449 		if (ops & BUS_DMASYNC_POSTREAD) {
    450 			/*
    451 			 * Copy the SRAM buffer into the caller's buffer.
    452 			 */
    453 			for (moff = offset, m = m0; m != NULL && len != 0;
    454 			     m = m->m_next) {
    455 				/* Find the beginning mbuf. */
    456 				if (moff >= m->m_len) {
    457 					moff -= m->m_len;
    458 					continue;
    459 				}
    460 
    461 				/*
    462 				 * Now at the first mbuf to sync; nail
    463 				 * each one until we have exhausted the
    464 				 * length.
    465 				 */
    466 				minlen = len < m->m_len - moff ?
    467 				    len : m->m_len - moff;
    468 
    469 				bus_space_read_region_1(sc->sc_memt,
    470 				    sc->sc_dma_memh, dmaoff + offset,
    471 				    mtod(m, uint8_t *) + moff, minlen);
    472 
    473 				moff = 0;
    474 				len -= minlen;
    475 				offset += minlen;
    476 			}
    477 		}
    478 
    479 		/*
    480 		 * Nothing to do for post-write.
    481 		 */
    482 		break;
    483 	    }
    484 
    485 	default:
    486 		printf("unknown buffer type %d\n", gmap->gd_buftype);
    487 		panic("gaps_dmamap_sync");
    488 	}
    489 }
    490 
    491 int
    492 gaps_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    493     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    494     int flags)
    495 {
    496 	extern paddr_t avail_start, avail_end;	/* from pmap.c */
    497 
    498 	struct pglist mlist;
    499 	paddr_t curaddr, lastaddr;
    500 	struct vm_page *m;
    501 	int curseg, error;
    502 
    503 	/* Always round the size. */
    504 	size = round_page(size);
    505 
    506 	/*
    507 	 * Allocate the pages from the VM system.
    508 	 */
    509 	error = uvm_pglistalloc(size, avail_start, avail_end - PAGE_SIZE,
    510 	    alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
    511 	if (error)
    512 		return error;
    513 
    514 	/*
    515 	 * Compute the location, size, and number of segments actually
    516 	 * returned by the VM code.
    517 	 */
    518 	m = mlist.tqh_first;
    519 	curseg = 0;
    520 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
    521 	segs[curseg].ds_len = PAGE_SIZE;
    522 	m = TAILQ_NEXT(m, pageq);
    523 
    524 	for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
    525 		curaddr = VM_PAGE_TO_PHYS(m);
    526 		if (curaddr == (lastaddr + PAGE_SIZE))
    527 			segs[curseg].ds_len += PAGE_SIZE;
    528 		else {
    529 			curseg++;
    530 			segs[curseg].ds_addr = curaddr;
    531 			segs[curseg].ds_len = PAGE_SIZE;
    532 		}
    533 		lastaddr = curaddr;
    534 	}
    535 
    536 	*rsegs = curseg + 1;
    537 
    538 	return 0;
    539 }
    540 
    541 void
    542 gaps_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
    543 {
    544 	struct pglist mlist;
    545 	struct vm_page *m;
    546 	bus_addr_t addr;
    547 	int curseg;
    548 
    549 	/*
    550 	 * Build a list of pages to free back to the VM system.
    551 	 */
    552 	TAILQ_INIT(&mlist);
    553 	for (curseg = 0; curseg < nsegs; curseg++) {
    554 		for (addr = segs[curseg].ds_addr;
    555 		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
    556 		     addr += PAGE_SIZE) {
    557 			m = PHYS_TO_VM_PAGE(addr);
    558 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
    559 		}
    560 	}
    561 
    562 	uvm_pglistfree(&mlist);
    563 }
    564 
    565 int
    566 gaps_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    567     size_t size, caddr_t *kvap, int flags)
    568 {
    569 	vaddr_t va;
    570 	bus_addr_t addr;
    571 	int curseg;
    572 
    573 	/*
    574 	 * If we're only mapping 1 segment, use P2SEG, to avoid
    575 	 * TLB thrashing.
    576 	 */
    577 	if (nsegs == 1) {
    578 		*kvap = (caddr_t)SH3_PHYS_TO_P2SEG(segs[0].ds_addr);
    579 		return 0;
    580 	}
    581 
    582 	size = round_page(size);
    583 
    584 	va = uvm_km_valloc(kernel_map, size);
    585 
    586 	if (va == 0)
    587 		return ENOMEM;
    588 
    589 	*kvap = (caddr_t)va;
    590 
    591 	for (curseg = 0; curseg < nsegs; curseg++) {
    592 		for (addr = segs[curseg].ds_addr;
    593 		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
    594 		     addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
    595 			if (size == 0)
    596 				panic("gaps_dmamem_map: size botch");
    597 			pmap_kenter_pa(va, addr,
    598 			    VM_PROT_READ | VM_PROT_WRITE);
    599 		}
    600 	}
    601 	pmap_update(pmap_kernel());
    602 
    603 	return 0;
    604 }
    605 
    606 void
    607 gaps_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
    608 {
    609 
    610 #ifdef DIAGNOSTIC
    611 	if ((u_long) kva & PAGE_MASK)
    612 		panic("gaps_dmamem_unmap");
    613 #endif
    614 
    615 	/*
    616 	 * Nothing to do if we mapped it with P2SEG.
    617 	 */
    618 	if (kva >= (caddr_t)SH3_P2SEG_BASE &&
    619 	    kva <= (caddr_t)SH3_P2SEG_END)
    620 		return;
    621 
    622 	size = round_page(size);
    623 	pmap_kremove((vaddr_t) kva, size);
    624 	pmap_update(pmap_kernel());
    625 	uvm_km_free(kernel_map, (vaddr_t) kva, size);
    626 }
    627 
    628 paddr_t
    629 gaps_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    630     off_t off, int prot, int flags)
    631 {
    632 
    633 	/* Not implemented. */
    634 	return -1;
    635 }
    636