Home | History | Annotate | Line # | Download | only in common
bus_dma.c revision 1.61.2.1
      1 /* $NetBSD: bus_dma.c,v 1.61.2.1 2006/02/05 13:53:39 yamt Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
     41 
     42 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.61.2.1 2006/02/05 13:53:39 yamt Exp $");
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/kernel.h>
     47 #include <sys/device.h>
     48 #include <sys/malloc.h>
     49 #include <sys/proc.h>
     50 #include <sys/mbuf.h>
     51 
     52 #include <uvm/uvm_extern.h>
     53 
     54 #define _ALPHA_BUS_DMA_PRIVATE
     55 #include <machine/bus.h>
     56 #include <machine/intr.h>
     57 
     58 int	_bus_dmamap_load_buffer_direct(bus_dma_tag_t,
     59 	    bus_dmamap_t, void *, bus_size_t, struct vmspace *, int,
     60 	    paddr_t *, int *, int);
     61 
     62 extern paddr_t avail_start, avail_end;	/* from pmap.c */
     63 
     64 /*
     65  * Common function for DMA map creation.  May be called by bus-specific
     66  * DMA map creation functions.
     67  */
     68 int
     69 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
     70     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
     71 {
     72 	struct alpha_bus_dmamap *map;
     73 	void *mapstore;
     74 	size_t mapsize;
     75 
     76 	/*
     77 	 * Allocate and initialize the DMA map.  The end of the map
     78 	 * is a variable-sized array of segments, so we allocate enough
     79 	 * room for them in one shot.
     80 	 *
     81 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
     82 	 * of ALLOCNOW notifies others that we've reserved these resources,
     83 	 * and they are not to be freed.
     84 	 *
     85 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
     86 	 * the (nsegments - 1).
     87 	 */
     88 	mapsize = sizeof(struct alpha_bus_dmamap) +
     89 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
     90 	if ((mapstore = malloc(mapsize, M_DMAMAP,
     91 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
     92 		return (ENOMEM);
     93 
     94 	memset(mapstore, 0, mapsize);
     95 	map = (struct alpha_bus_dmamap *)mapstore;
     96 	map->_dm_size = size;
     97 	map->_dm_segcnt = nsegments;
     98 	map->_dm_maxmaxsegsz = maxsegsz;
     99 	if (t->_boundary != 0 && t->_boundary < boundary)
    100 		map->_dm_boundary = t->_boundary;
    101 	else
    102 		map->_dm_boundary = boundary;
    103 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    104 	map->dm_maxsegsz = maxsegsz;
    105 	map->dm_mapsize = 0;		/* no valid mappings */
    106 	map->dm_nsegs = 0;
    107 	map->_dm_window = NULL;
    108 
    109 	*dmamp = map;
    110 	return (0);
    111 }
    112 
    113 /*
    114  * Common function for DMA map destruction.  May be called by bus-specific
    115  * DMA map destruction functions.
    116  */
    117 void
    118 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    119 {
    120 
    121 	free(map, M_DMAMAP);
    122 }
    123 
    124 /*
    125  * Utility function to load a linear buffer.  lastaddrp holds state
    126  * between invocations (for multiple-buffer loads).  segp contains
    127  * the starting segment on entrance, and the ending segment on exit.
    128  * first indicates if this is the first invocation of this function.
    129  */
    130 int
    131 _bus_dmamap_load_buffer_direct(bus_dma_tag_t t, bus_dmamap_t map,
    132     void *buf, size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
    133     int *segp, int first)
    134 {
    135 	bus_size_t sgsize;
    136 	bus_addr_t curaddr, lastaddr, baddr, bmask;
    137 	vaddr_t vaddr = (vaddr_t)buf;
    138 	int seg;
    139 
    140 	lastaddr = *lastaddrp;
    141 	bmask = ~(map->_dm_boundary - 1);
    142 
    143 	for (seg = *segp; buflen > 0 ; ) {
    144 		/*
    145 		 * Get the physical address for this segment.
    146 		 */
    147 		if (!VMSPACE_IS_KERNEL_P(vm))
    148 			(void) pmap_extract(vm->vm_map.pmap, vaddr, &curaddr);
    149 		else
    150 			curaddr = vtophys(vaddr);
    151 
    152 		/*
    153 		 * If we're beyond the current DMA window, indicate
    154 		 * that and try to fall back into SGMAPs.
    155 		 */
    156 		if (t->_wsize != 0 && curaddr >= t->_wsize)
    157 			return (EINVAL);
    158 
    159 		curaddr |= t->_wbase;
    160 
    161 		/*
    162 		 * Compute the segment size, and adjust counts.
    163 		 */
    164 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
    165 		if (buflen < sgsize)
    166 			sgsize = buflen;
    167 		if (map->dm_maxsegsz < sgsize)
    168 			sgsize = map->dm_maxsegsz;
    169 
    170 		/*
    171 		 * Make sure we don't cross any boundaries.
    172 		 */
    173 		if (map->_dm_boundary > 0) {
    174 			baddr = (curaddr + map->_dm_boundary) & bmask;
    175 			if (sgsize > (baddr - curaddr))
    176 				sgsize = (baddr - curaddr);
    177 		}
    178 
    179 		/*
    180 		 * Insert chunk into a segment, coalescing with
    181 		 * the previous segment if possible.
    182 		 */
    183 		if (first) {
    184 			map->dm_segs[seg].ds_addr = curaddr;
    185 			map->dm_segs[seg].ds_len = sgsize;
    186 			first = 0;
    187 		} else {
    188 			if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 &&
    189 			    curaddr == lastaddr &&
    190 			    (map->dm_segs[seg].ds_len + sgsize) <=
    191 			     map->dm_maxsegsz &&
    192 			    (map->_dm_boundary == 0 ||
    193 			     (map->dm_segs[seg].ds_addr & bmask) ==
    194 			     (curaddr & bmask)))
    195 				map->dm_segs[seg].ds_len += sgsize;
    196 			else {
    197 				if (++seg >= map->_dm_segcnt)
    198 					break;
    199 				map->dm_segs[seg].ds_addr = curaddr;
    200 				map->dm_segs[seg].ds_len = sgsize;
    201 			}
    202 		}
    203 
    204 		lastaddr = curaddr + sgsize;
    205 		vaddr += sgsize;
    206 		buflen -= sgsize;
    207 	}
    208 
    209 	*segp = seg;
    210 	*lastaddrp = lastaddr;
    211 
    212 	/*
    213 	 * Did we fit?
    214 	 */
    215 	if (buflen != 0) {
    216 		/*
    217 		 * If there is a chained window, we will automatically
    218 		 * fall back to it.
    219 		 */
    220 		return (EFBIG);		/* XXX better return value here? */
    221 	}
    222 
    223 	return (0);
    224 }
    225 
    226 /*
    227  * Common function for loading a direct-mapped DMA map with a linear
    228  * buffer.  Called by bus-specific DMA map load functions with the
    229  * OR value appropriate for indicating "direct-mapped" for that
    230  * chipset.
    231  */
    232 int
    233 _bus_dmamap_load_direct(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    234     bus_size_t buflen, struct proc *p, int flags)
    235 {
    236 	paddr_t lastaddr;
    237 	int seg, error;
    238 	struct vmspace *vm = p->p_vmspace;
    239 
    240 	/*
    241 	 * Make sure that on error condition we return "no valid mappings".
    242 	 */
    243 	map->dm_mapsize = 0;
    244 	map->dm_nsegs = 0;
    245 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    246 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
    247 
    248 	if (buflen > map->_dm_size)
    249 		return (EINVAL);
    250 
    251 	seg = 0;
    252 	error = _bus_dmamap_load_buffer_direct(t, map, buf, buflen,
    253 	    vm, flags, &lastaddr, &seg, 1);
    254 	if (error == 0) {
    255 		map->dm_mapsize = buflen;
    256 		map->dm_nsegs = seg + 1;
    257 		map->_dm_window = t;
    258 	} else if (t->_next_window != NULL) {
    259 		/*
    260 		 * Give the next window a chance.
    261 		 */
    262 		error = bus_dmamap_load(t->_next_window, map, buf, buflen,
    263 		    p, flags);
    264 	}
    265 	return (error);
    266 }
    267 
    268 /*
    269  * Like _bus_dmamap_load_direct(), but for mbufs.
    270  */
    271 int
    272 _bus_dmamap_load_mbuf_direct(bus_dma_tag_t t, bus_dmamap_t map,
    273     struct mbuf *m0, int flags)
    274 {
    275 	paddr_t lastaddr;
    276 	int seg, error, first;
    277 	struct mbuf *m;
    278 
    279 	/*
    280 	 * Make sure that on error condition we return "no valid mappings."
    281 	 */
    282 	map->dm_mapsize = 0;
    283 	map->dm_nsegs = 0;
    284 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    285 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
    286 
    287 #ifdef DIAGNOSTIC
    288 	if ((m0->m_flags & M_PKTHDR) == 0)
    289 		panic("_bus_dmamap_load_mbuf_direct: no packet header");
    290 #endif
    291 
    292 	if (m0->m_pkthdr.len > map->_dm_size)
    293 		return (EINVAL);
    294 
    295 	first = 1;
    296 	seg = 0;
    297 	error = 0;
    298 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
    299 		if (m->m_len == 0)
    300 			continue;
    301 		/* XXX Could be better about coalescing. */
    302 		/* XXX Doesn't check boundaries. */
    303 		switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
    304 		case M_EXT|M_EXT_CLUSTER:
    305 			/* XXX KDASSERT */
    306 			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
    307 			lastaddr = m->m_ext.ext_paddr +
    308 			    (m->m_data - m->m_ext.ext_buf);
    309  have_addr:
    310 			if (first == 0 &&
    311 			    ++seg >= map->_dm_segcnt) {
    312 				error = EFBIG;
    313 				break;
    314 			}
    315 
    316 			/*
    317 			 * If we're beyond the current DMA window, indicate
    318 			 * that and try to fall back into SGMAPs.
    319 			 */
    320 			if (t->_wsize != 0 && lastaddr >= t->_wsize) {
    321 				error = EINVAL;
    322 				break;
    323 			}
    324 			lastaddr |= t->_wbase;
    325 
    326 			map->dm_segs[seg].ds_addr = lastaddr;
    327 			map->dm_segs[seg].ds_len = m->m_len;
    328 			lastaddr += m->m_len;
    329 			break;
    330 
    331 		case 0:
    332 			lastaddr = m->m_paddr + M_BUFOFFSET(m) +
    333 			    (m->m_data - M_BUFADDR(m));
    334 			goto have_addr;
    335 
    336 		default:
    337 			error = _bus_dmamap_load_buffer_direct(t, map,
    338 			    m->m_data, m->m_len, NULL, flags, &lastaddr,
    339 			    &seg, first);
    340 		}
    341 		first = 0;
    342 	}
    343 	if (error == 0) {
    344 		map->dm_mapsize = m0->m_pkthdr.len;
    345 		map->dm_nsegs = seg + 1;
    346 		map->_dm_window = t;
    347 	} else if (t->_next_window != NULL) {
    348 		/*
    349 		 * Give the next window a chance.
    350 		 */
    351 		error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
    352 	}
    353 	return (error);
    354 }
    355 
    356 /*
    357  * Like _bus_dmamap_load_direct(), but for uios.
    358  */
    359 int
    360 _bus_dmamap_load_uio_direct(bus_dma_tag_t t, bus_dmamap_t map,
    361     struct uio *uio, int flags)
    362 {
    363 	paddr_t lastaddr;
    364 	int seg, i, error, first;
    365 	bus_size_t minlen, resid;
    366 	struct vmspace *vm;
    367 	struct iovec *iov;
    368 	caddr_t addr;
    369 
    370 	/*
    371 	 * Make sure that on error condition we return "no valid mappings."
    372 	 */
    373 	map->dm_mapsize = 0;
    374 	map->dm_nsegs = 0;
    375 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    376 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
    377 
    378 	resid = uio->uio_resid;
    379 	iov = uio->uio_iov;
    380 
    381 	vm = uio->uio_vmspace;
    382 
    383 	first = 1;
    384 	seg = 0;
    385 	error = 0;
    386 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
    387 		/*
    388 		 * Now at the first iovec to load.  Load each iovec
    389 		 * until we have exhausted the residual count.
    390 		 */
    391 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
    392 		addr = (caddr_t)iov[i].iov_base;
    393 
    394 		error = _bus_dmamap_load_buffer_direct(t, map,
    395 		    addr, minlen, vm, flags, &lastaddr, &seg, first);
    396 		first = 0;
    397 
    398 		resid -= minlen;
    399 	}
    400 	if (error == 0) {
    401 		map->dm_mapsize = uio->uio_resid;
    402 		map->dm_nsegs = seg + 1;
    403 		map->_dm_window = t;
    404 	} else if (t->_next_window != NULL) {
    405 		/*
    406 		 * Give the next window a chance.
    407 		 */
    408 		error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
    409 	}
    410 	return (error);
    411 }
    412 
    413 /*
    414  * Like _bus_dmamap_load_direct(), but for raw memory.
    415  */
    416 int
    417 _bus_dmamap_load_raw_direct(bus_dma_tag_t t, bus_dmamap_t map,
    418     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
    419 {
    420 
    421 	panic("_bus_dmamap_load_raw_direct: not implemented");
    422 }
    423 
    424 /*
    425  * Common function for unloading a DMA map.  May be called by
    426  * chipset-specific DMA map unload functions.
    427  */
    428 void
    429 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
    430 {
    431 
    432 	/*
    433 	 * No resources to free; just mark the mappings as
    434 	 * invalid.
    435 	 */
    436 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
    437 	map->dm_mapsize = 0;
    438 	map->dm_nsegs = 0;
    439 	map->_dm_window = NULL;
    440 	map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
    441 }
    442 
    443 /*
    444  * Common function for DMA map synchronization.  May be called
    445  * by chipset-specific DMA map synchronization functions.
    446  */
    447 void
    448 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    449     bus_size_t len, int ops)
    450 {
    451 
    452 	/*
    453 	 * Flush the store buffer.
    454 	 */
    455 	alpha_mb();
    456 }
    457 
    458 /*
    459  * Common function for DMA-safe memory allocation.  May be called
    460  * by bus-specific DMA memory allocation functions.
    461  */
    462 int
    463 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    464     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    465     int flags)
    466 {
    467 
    468 	return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
    469 	    segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
    470 }
    471 
    472 /*
    473  * Allocate physical memory from the given physical address range.
    474  * Called by DMA-safe memory allocation methods.
    475  */
    476 int
    477 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    478     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    479     int flags, paddr_t low, paddr_t high)
    480 {
    481 	paddr_t curaddr, lastaddr;
    482 	struct vm_page *m;
    483 	struct pglist mlist;
    484 	int curseg, error;
    485 
    486 	/* Always round the size. */
    487 	size = round_page(size);
    488 
    489 	/*
    490 	 * Allocate pages from the VM system.
    491 	 */
    492 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
    493 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
    494 	if (error)
    495 		return (error);
    496 
    497 	/*
    498 	 * Compute the location, size, and number of segments actually
    499 	 * returned by the VM code.
    500 	 */
    501 	m = mlist.tqh_first;
    502 	curseg = 0;
    503 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
    504 	segs[curseg].ds_len = PAGE_SIZE;
    505 	m = m->pageq.tqe_next;
    506 
    507 	for (; m != NULL; m = m->pageq.tqe_next) {
    508 		curaddr = VM_PAGE_TO_PHYS(m);
    509 #ifdef DIAGNOSTIC
    510 		if (curaddr < avail_start || curaddr >= high) {
    511 			printf("uvm_pglistalloc returned non-sensical"
    512 			    " address 0x%lx\n", curaddr);
    513 			panic("_bus_dmamem_alloc");
    514 		}
    515 #endif
    516 		if (curaddr == (lastaddr + PAGE_SIZE))
    517 			segs[curseg].ds_len += PAGE_SIZE;
    518 		else {
    519 			curseg++;
    520 			segs[curseg].ds_addr = curaddr;
    521 			segs[curseg].ds_len = PAGE_SIZE;
    522 		}
    523 		lastaddr = curaddr;
    524 	}
    525 
    526 	*rsegs = curseg + 1;
    527 
    528 	return (0);
    529 }
    530 
    531 /*
    532  * Common function for freeing DMA-safe memory.  May be called by
    533  * bus-specific DMA memory free functions.
    534  */
    535 void
    536 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
    537 {
    538 	struct vm_page *m;
    539 	bus_addr_t addr;
    540 	struct pglist mlist;
    541 	int curseg;
    542 
    543 	/*
    544 	 * Build a list of pages to free back to the VM system.
    545 	 */
    546 	TAILQ_INIT(&mlist);
    547 	for (curseg = 0; curseg < nsegs; curseg++) {
    548 		for (addr = segs[curseg].ds_addr;
    549 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
    550 		    addr += PAGE_SIZE) {
    551 			m = PHYS_TO_VM_PAGE(addr);
    552 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
    553 		}
    554 	}
    555 
    556 	uvm_pglistfree(&mlist);
    557 }
    558 
    559 /*
    560  * Common function for mapping DMA-safe memory.  May be called by
    561  * bus-specific DMA memory map functions.
    562  */
    563 int
    564 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    565     size_t size, caddr_t *kvap, int flags)
    566 {
    567 	vaddr_t va;
    568 	bus_addr_t addr;
    569 	int curseg;
    570 	const uvm_flag_t kmflags =
    571 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
    572 
    573 	/*
    574 	 * If we're only mapping 1 segment, use K0SEG, to avoid
    575 	 * TLB thrashing.
    576 	 */
    577 	if (nsegs == 1) {
    578 		*kvap = (caddr_t)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
    579 		return (0);
    580 	}
    581 
    582 	size = round_page(size);
    583 
    584 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
    585 
    586 	if (va == 0)
    587 		return (ENOMEM);
    588 
    589 	*kvap = (caddr_t)va;
    590 
    591 	for (curseg = 0; curseg < nsegs; curseg++) {
    592 		for (addr = segs[curseg].ds_addr;
    593 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
    594 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
    595 			if (size == 0)
    596 				panic("_bus_dmamem_map: size botch");
    597 			pmap_enter(pmap_kernel(), va, addr,
    598 			    VM_PROT_READ | VM_PROT_WRITE,
    599 			    PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
    600 		}
    601 	}
    602 	pmap_update(pmap_kernel());
    603 
    604 	return (0);
    605 }
    606 
    607 /*
    608  * Common function for unmapping DMA-safe memory.  May be called by
    609  * bus-specific DMA memory unmapping functions.
    610  */
    611 void
    612 _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
    613 {
    614 
    615 #ifdef DIAGNOSTIC
    616 	if ((u_long)kva & PGOFSET)
    617 		panic("_bus_dmamem_unmap");
    618 #endif
    619 
    620 	/*
    621 	 * Nothing to do if we mapped it with K0SEG.
    622 	 */
    623 	if (kva >= (caddr_t)ALPHA_K0SEG_BASE &&
    624 	    kva <= (caddr_t)ALPHA_K0SEG_END)
    625 		return;
    626 
    627 	size = round_page(size);
    628 	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
    629 	pmap_update(pmap_kernel());
    630 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
    631 }
    632 
    633 /*
    634  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
    635  * bus-specific DMA mmap(2)'ing functions.
    636  */
    637 paddr_t
    638 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    639     off_t off, int prot, int flags)
    640 {
    641 	int i;
    642 
    643 	for (i = 0; i < nsegs; i++) {
    644 #ifdef DIAGNOSTIC
    645 		if (off & PGOFSET)
    646 			panic("_bus_dmamem_mmap: offset unaligned");
    647 		if (segs[i].ds_addr & PGOFSET)
    648 			panic("_bus_dmamem_mmap: segment unaligned");
    649 		if (segs[i].ds_len & PGOFSET)
    650 			panic("_bus_dmamem_mmap: segment size not multiple"
    651 			    " of page size");
    652 #endif
    653 		if (off >= segs[i].ds_len) {
    654 			off -= segs[i].ds_len;
    655 			continue;
    656 		}
    657 
    658 		return (alpha_btop((caddr_t)segs[i].ds_addr + off));
    659 	}
    660 
    661 	/* Page not found. */
    662 	return (-1);
    663 }
    664