Home | History | Annotate | Line # | Download | only in common
bus_dma.c revision 1.62
      1 /* $NetBSD: bus_dma.c,v 1.62 2006/03/01 12:38:10 yamt Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
     41 
     42 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.62 2006/03/01 12:38:10 yamt Exp $");
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/kernel.h>
     47 #include <sys/device.h>
     48 #include <sys/malloc.h>
     49 #include <sys/proc.h>
     50 #include <sys/mbuf.h>
     51 
     52 #include <uvm/uvm_extern.h>
     53 
     54 #define _ALPHA_BUS_DMA_PRIVATE
     55 #include <machine/bus.h>
     56 #include <machine/intr.h>
     57 
     58 int	_bus_dmamap_load_buffer_direct(bus_dma_tag_t,
     59 	    bus_dmamap_t, void *, bus_size_t, struct vmspace *, int,
     60 	    paddr_t *, int *, int);
     61 
     62 extern paddr_t avail_start, avail_end;	/* from pmap.c */
     63 
     64 /*
     65  * Common function for DMA map creation.  May be called by bus-specific
     66  * DMA map creation functions.
     67  */
     68 int
     69 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
     70     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
     71 {
     72 	struct alpha_bus_dmamap *map;
     73 	void *mapstore;
     74 	size_t mapsize;
     75 
     76 	/*
     77 	 * Allocate and initialize the DMA map.  The end of the map
     78 	 * is a variable-sized array of segments, so we allocate enough
     79 	 * room for them in one shot.
     80 	 *
     81 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
     82 	 * of ALLOCNOW notifies others that we've reserved these resources,
     83 	 * and they are not to be freed.
     84 	 *
     85 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
     86 	 * the (nsegments - 1).
     87 	 */
     88 	mapsize = sizeof(struct alpha_bus_dmamap) +
     89 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
     90 	if ((mapstore = malloc(mapsize, M_DMAMAP,
     91 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
     92 		return (ENOMEM);
     93 
     94 	memset(mapstore, 0, mapsize);
     95 	map = (struct alpha_bus_dmamap *)mapstore;
     96 	map->_dm_size = size;
     97 	map->_dm_segcnt = nsegments;
     98 	map->_dm_maxmaxsegsz = maxsegsz;
     99 	if (t->_boundary != 0 && t->_boundary < boundary)
    100 		map->_dm_boundary = t->_boundary;
    101 	else
    102 		map->_dm_boundary = boundary;
    103 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    104 	map->dm_maxsegsz = maxsegsz;
    105 	map->dm_mapsize = 0;		/* no valid mappings */
    106 	map->dm_nsegs = 0;
    107 	map->_dm_window = NULL;
    108 
    109 	*dmamp = map;
    110 	return (0);
    111 }
    112 
    113 /*
    114  * Common function for DMA map destruction.  May be called by bus-specific
    115  * DMA map destruction functions.
    116  */
    117 void
    118 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    119 {
    120 
    121 	free(map, M_DMAMAP);
    122 }
    123 
    124 /*
    125  * Utility function to load a linear buffer.  lastaddrp holds state
    126  * between invocations (for multiple-buffer loads).  segp contains
    127  * the starting segment on entrance, and the ending segment on exit.
    128  * first indicates if this is the first invocation of this function.
    129  */
    130 int
    131 _bus_dmamap_load_buffer_direct(bus_dma_tag_t t, bus_dmamap_t map,
    132     void *buf, size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
    133     int *segp, int first)
    134 {
    135 	bus_size_t sgsize;
    136 	bus_addr_t curaddr, lastaddr, baddr, bmask;
    137 	vaddr_t vaddr = (vaddr_t)buf;
    138 	int seg;
    139 
    140 	lastaddr = *lastaddrp;
    141 	bmask = ~(map->_dm_boundary - 1);
    142 
    143 	for (seg = *segp; buflen > 0 ; ) {
    144 		/*
    145 		 * Get the physical address for this segment.
    146 		 */
    147 		if (!VMSPACE_IS_KERNEL_P(vm))
    148 			(void) pmap_extract(vm->vm_map.pmap, vaddr, &curaddr);
    149 		else
    150 			curaddr = vtophys(vaddr);
    151 
    152 		/*
    153 		 * If we're beyond the current DMA window, indicate
    154 		 * that and try to fall back into SGMAPs.
    155 		 */
    156 		if (t->_wsize != 0 && curaddr >= t->_wsize)
    157 			return (EINVAL);
    158 
    159 		curaddr |= t->_wbase;
    160 
    161 		/*
    162 		 * Compute the segment size, and adjust counts.
    163 		 */
    164 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
    165 		if (buflen < sgsize)
    166 			sgsize = buflen;
    167 		if (map->dm_maxsegsz < sgsize)
    168 			sgsize = map->dm_maxsegsz;
    169 
    170 		/*
    171 		 * Make sure we don't cross any boundaries.
    172 		 */
    173 		if (map->_dm_boundary > 0) {
    174 			baddr = (curaddr + map->_dm_boundary) & bmask;
    175 			if (sgsize > (baddr - curaddr))
    176 				sgsize = (baddr - curaddr);
    177 		}
    178 
    179 		/*
    180 		 * Insert chunk into a segment, coalescing with
    181 		 * the previous segment if possible.
    182 		 */
    183 		if (first) {
    184 			map->dm_segs[seg].ds_addr = curaddr;
    185 			map->dm_segs[seg].ds_len = sgsize;
    186 			first = 0;
    187 		} else {
    188 			if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 &&
    189 			    curaddr == lastaddr &&
    190 			    (map->dm_segs[seg].ds_len + sgsize) <=
    191 			     map->dm_maxsegsz &&
    192 			    (map->_dm_boundary == 0 ||
    193 			     (map->dm_segs[seg].ds_addr & bmask) ==
    194 			     (curaddr & bmask)))
    195 				map->dm_segs[seg].ds_len += sgsize;
    196 			else {
    197 				if (++seg >= map->_dm_segcnt)
    198 					break;
    199 				map->dm_segs[seg].ds_addr = curaddr;
    200 				map->dm_segs[seg].ds_len = sgsize;
    201 			}
    202 		}
    203 
    204 		lastaddr = curaddr + sgsize;
    205 		vaddr += sgsize;
    206 		buflen -= sgsize;
    207 	}
    208 
    209 	*segp = seg;
    210 	*lastaddrp = lastaddr;
    211 
    212 	/*
    213 	 * Did we fit?
    214 	 */
    215 	if (buflen != 0) {
    216 		/*
    217 		 * If there is a chained window, we will automatically
    218 		 * fall back to it.
    219 		 */
    220 		return (EFBIG);		/* XXX better return value here? */
    221 	}
    222 
    223 	return (0);
    224 }
    225 
    226 /*
    227  * Common function for loading a direct-mapped DMA map with a linear
    228  * buffer.  Called by bus-specific DMA map load functions with the
    229  * OR value appropriate for indicating "direct-mapped" for that
    230  * chipset.
    231  */
    232 int
    233 _bus_dmamap_load_direct(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    234     bus_size_t buflen, struct proc *p, int flags)
    235 {
    236 	paddr_t lastaddr;
    237 	int seg, error;
    238 	struct vmspace *vm;
    239 
    240 	/*
    241 	 * Make sure that on error condition we return "no valid mappings".
    242 	 */
    243 	map->dm_mapsize = 0;
    244 	map->dm_nsegs = 0;
    245 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    246 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
    247 
    248 	if (buflen > map->_dm_size)
    249 		return (EINVAL);
    250 
    251 	if (p != NULL) {
    252 		vm = p->p_vmspace;
    253 	} else {
    254 		vm = vmspace_kernel();
    255 	}
    256 	seg = 0;
    257 	error = _bus_dmamap_load_buffer_direct(t, map, buf, buflen,
    258 	    vm, flags, &lastaddr, &seg, 1);
    259 	if (error == 0) {
    260 		map->dm_mapsize = buflen;
    261 		map->dm_nsegs = seg + 1;
    262 		map->_dm_window = t;
    263 	} else if (t->_next_window != NULL) {
    264 		/*
    265 		 * Give the next window a chance.
    266 		 */
    267 		error = bus_dmamap_load(t->_next_window, map, buf, buflen,
    268 		    p, flags);
    269 	}
    270 	return (error);
    271 }
    272 
    273 /*
    274  * Like _bus_dmamap_load_direct(), but for mbufs.
    275  */
    276 int
    277 _bus_dmamap_load_mbuf_direct(bus_dma_tag_t t, bus_dmamap_t map,
    278     struct mbuf *m0, int flags)
    279 {
    280 	paddr_t lastaddr;
    281 	int seg, error, first;
    282 	struct mbuf *m;
    283 
    284 	/*
    285 	 * Make sure that on error condition we return "no valid mappings."
    286 	 */
    287 	map->dm_mapsize = 0;
    288 	map->dm_nsegs = 0;
    289 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    290 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
    291 
    292 #ifdef DIAGNOSTIC
    293 	if ((m0->m_flags & M_PKTHDR) == 0)
    294 		panic("_bus_dmamap_load_mbuf_direct: no packet header");
    295 #endif
    296 
    297 	if (m0->m_pkthdr.len > map->_dm_size)
    298 		return (EINVAL);
    299 
    300 	first = 1;
    301 	seg = 0;
    302 	error = 0;
    303 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
    304 		if (m->m_len == 0)
    305 			continue;
    306 		/* XXX Could be better about coalescing. */
    307 		/* XXX Doesn't check boundaries. */
    308 		switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
    309 		case M_EXT|M_EXT_CLUSTER:
    310 			/* XXX KDASSERT */
    311 			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
    312 			lastaddr = m->m_ext.ext_paddr +
    313 			    (m->m_data - m->m_ext.ext_buf);
    314  have_addr:
    315 			if (first == 0 &&
    316 			    ++seg >= map->_dm_segcnt) {
    317 				error = EFBIG;
    318 				break;
    319 			}
    320 
    321 			/*
    322 			 * If we're beyond the current DMA window, indicate
    323 			 * that and try to fall back into SGMAPs.
    324 			 */
    325 			if (t->_wsize != 0 && lastaddr >= t->_wsize) {
    326 				error = EINVAL;
    327 				break;
    328 			}
    329 			lastaddr |= t->_wbase;
    330 
    331 			map->dm_segs[seg].ds_addr = lastaddr;
    332 			map->dm_segs[seg].ds_len = m->m_len;
    333 			lastaddr += m->m_len;
    334 			break;
    335 
    336 		case 0:
    337 			lastaddr = m->m_paddr + M_BUFOFFSET(m) +
    338 			    (m->m_data - M_BUFADDR(m));
    339 			goto have_addr;
    340 
    341 		default:
    342 			error = _bus_dmamap_load_buffer_direct(t, map,
    343 			    m->m_data, m->m_len, vmspace_kernel(), flags,
    344 			    &lastaddr, &seg, first);
    345 		}
    346 		first = 0;
    347 	}
    348 	if (error == 0) {
    349 		map->dm_mapsize = m0->m_pkthdr.len;
    350 		map->dm_nsegs = seg + 1;
    351 		map->_dm_window = t;
    352 	} else if (t->_next_window != NULL) {
    353 		/*
    354 		 * Give the next window a chance.
    355 		 */
    356 		error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
    357 	}
    358 	return (error);
    359 }
    360 
    361 /*
    362  * Like _bus_dmamap_load_direct(), but for uios.
    363  */
    364 int
    365 _bus_dmamap_load_uio_direct(bus_dma_tag_t t, bus_dmamap_t map,
    366     struct uio *uio, int flags)
    367 {
    368 	paddr_t lastaddr;
    369 	int seg, i, error, first;
    370 	bus_size_t minlen, resid;
    371 	struct vmspace *vm;
    372 	struct iovec *iov;
    373 	caddr_t addr;
    374 
    375 	/*
    376 	 * Make sure that on error condition we return "no valid mappings."
    377 	 */
    378 	map->dm_mapsize = 0;
    379 	map->dm_nsegs = 0;
    380 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    381 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
    382 
    383 	resid = uio->uio_resid;
    384 	iov = uio->uio_iov;
    385 
    386 	vm = uio->uio_vmspace;
    387 
    388 	first = 1;
    389 	seg = 0;
    390 	error = 0;
    391 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
    392 		/*
    393 		 * Now at the first iovec to load.  Load each iovec
    394 		 * until we have exhausted the residual count.
    395 		 */
    396 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
    397 		addr = (caddr_t)iov[i].iov_base;
    398 
    399 		error = _bus_dmamap_load_buffer_direct(t, map,
    400 		    addr, minlen, vm, flags, &lastaddr, &seg, first);
    401 		first = 0;
    402 
    403 		resid -= minlen;
    404 	}
    405 	if (error == 0) {
    406 		map->dm_mapsize = uio->uio_resid;
    407 		map->dm_nsegs = seg + 1;
    408 		map->_dm_window = t;
    409 	} else if (t->_next_window != NULL) {
    410 		/*
    411 		 * Give the next window a chance.
    412 		 */
    413 		error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
    414 	}
    415 	return (error);
    416 }
    417 
    418 /*
    419  * Like _bus_dmamap_load_direct(), but for raw memory.
    420  */
    421 int
    422 _bus_dmamap_load_raw_direct(bus_dma_tag_t t, bus_dmamap_t map,
    423     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
    424 {
    425 
    426 	panic("_bus_dmamap_load_raw_direct: not implemented");
    427 }
    428 
    429 /*
    430  * Common function for unloading a DMA map.  May be called by
    431  * chipset-specific DMA map unload functions.
    432  */
    433 void
    434 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
    435 {
    436 
    437 	/*
    438 	 * No resources to free; just mark the mappings as
    439 	 * invalid.
    440 	 */
    441 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
    442 	map->dm_mapsize = 0;
    443 	map->dm_nsegs = 0;
    444 	map->_dm_window = NULL;
    445 	map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
    446 }
    447 
    448 /*
    449  * Common function for DMA map synchronization.  May be called
    450  * by chipset-specific DMA map synchronization functions.
    451  */
    452 void
    453 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    454     bus_size_t len, int ops)
    455 {
    456 
    457 	/*
    458 	 * Flush the store buffer.
    459 	 */
    460 	alpha_mb();
    461 }
    462 
    463 /*
    464  * Common function for DMA-safe memory allocation.  May be called
    465  * by bus-specific DMA memory allocation functions.
    466  */
    467 int
    468 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    469     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    470     int flags)
    471 {
    472 
    473 	return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
    474 	    segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
    475 }
    476 
    477 /*
    478  * Allocate physical memory from the given physical address range.
    479  * Called by DMA-safe memory allocation methods.
    480  */
    481 int
    482 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    483     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    484     int flags, paddr_t low, paddr_t high)
    485 {
    486 	paddr_t curaddr, lastaddr;
    487 	struct vm_page *m;
    488 	struct pglist mlist;
    489 	int curseg, error;
    490 
    491 	/* Always round the size. */
    492 	size = round_page(size);
    493 
    494 	/*
    495 	 * Allocate pages from the VM system.
    496 	 */
    497 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
    498 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
    499 	if (error)
    500 		return (error);
    501 
    502 	/*
    503 	 * Compute the location, size, and number of segments actually
    504 	 * returned by the VM code.
    505 	 */
    506 	m = mlist.tqh_first;
    507 	curseg = 0;
    508 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
    509 	segs[curseg].ds_len = PAGE_SIZE;
    510 	m = m->pageq.tqe_next;
    511 
    512 	for (; m != NULL; m = m->pageq.tqe_next) {
    513 		curaddr = VM_PAGE_TO_PHYS(m);
    514 #ifdef DIAGNOSTIC
    515 		if (curaddr < avail_start || curaddr >= high) {
    516 			printf("uvm_pglistalloc returned non-sensical"
    517 			    " address 0x%lx\n", curaddr);
    518 			panic("_bus_dmamem_alloc");
    519 		}
    520 #endif
    521 		if (curaddr == (lastaddr + PAGE_SIZE))
    522 			segs[curseg].ds_len += PAGE_SIZE;
    523 		else {
    524 			curseg++;
    525 			segs[curseg].ds_addr = curaddr;
    526 			segs[curseg].ds_len = PAGE_SIZE;
    527 		}
    528 		lastaddr = curaddr;
    529 	}
    530 
    531 	*rsegs = curseg + 1;
    532 
    533 	return (0);
    534 }
    535 
    536 /*
    537  * Common function for freeing DMA-safe memory.  May be called by
    538  * bus-specific DMA memory free functions.
    539  */
    540 void
    541 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
    542 {
    543 	struct vm_page *m;
    544 	bus_addr_t addr;
    545 	struct pglist mlist;
    546 	int curseg;
    547 
    548 	/*
    549 	 * Build a list of pages to free back to the VM system.
    550 	 */
    551 	TAILQ_INIT(&mlist);
    552 	for (curseg = 0; curseg < nsegs; curseg++) {
    553 		for (addr = segs[curseg].ds_addr;
    554 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
    555 		    addr += PAGE_SIZE) {
    556 			m = PHYS_TO_VM_PAGE(addr);
    557 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
    558 		}
    559 	}
    560 
    561 	uvm_pglistfree(&mlist);
    562 }
    563 
    564 /*
    565  * Common function for mapping DMA-safe memory.  May be called by
    566  * bus-specific DMA memory map functions.
    567  */
    568 int
    569 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    570     size_t size, caddr_t *kvap, int flags)
    571 {
    572 	vaddr_t va;
    573 	bus_addr_t addr;
    574 	int curseg;
    575 	const uvm_flag_t kmflags =
    576 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
    577 
    578 	/*
    579 	 * If we're only mapping 1 segment, use K0SEG, to avoid
    580 	 * TLB thrashing.
    581 	 */
    582 	if (nsegs == 1) {
    583 		*kvap = (caddr_t)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
    584 		return (0);
    585 	}
    586 
    587 	size = round_page(size);
    588 
    589 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
    590 
    591 	if (va == 0)
    592 		return (ENOMEM);
    593 
    594 	*kvap = (caddr_t)va;
    595 
    596 	for (curseg = 0; curseg < nsegs; curseg++) {
    597 		for (addr = segs[curseg].ds_addr;
    598 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
    599 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
    600 			if (size == 0)
    601 				panic("_bus_dmamem_map: size botch");
    602 			pmap_enter(pmap_kernel(), va, addr,
    603 			    VM_PROT_READ | VM_PROT_WRITE,
    604 			    PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
    605 		}
    606 	}
    607 	pmap_update(pmap_kernel());
    608 
    609 	return (0);
    610 }
    611 
    612 /*
    613  * Common function for unmapping DMA-safe memory.  May be called by
    614  * bus-specific DMA memory unmapping functions.
    615  */
    616 void
    617 _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
    618 {
    619 
    620 #ifdef DIAGNOSTIC
    621 	if ((u_long)kva & PGOFSET)
    622 		panic("_bus_dmamem_unmap");
    623 #endif
    624 
    625 	/*
    626 	 * Nothing to do if we mapped it with K0SEG.
    627 	 */
    628 	if (kva >= (caddr_t)ALPHA_K0SEG_BASE &&
    629 	    kva <= (caddr_t)ALPHA_K0SEG_END)
    630 		return;
    631 
    632 	size = round_page(size);
    633 	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
    634 	pmap_update(pmap_kernel());
    635 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
    636 }
    637 
    638 /*
    639  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
    640  * bus-specific DMA mmap(2)'ing functions.
    641  */
    642 paddr_t
    643 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    644     off_t off, int prot, int flags)
    645 {
    646 	int i;
    647 
    648 	for (i = 0; i < nsegs; i++) {
    649 #ifdef DIAGNOSTIC
    650 		if (off & PGOFSET)
    651 			panic("_bus_dmamem_mmap: offset unaligned");
    652 		if (segs[i].ds_addr & PGOFSET)
    653 			panic("_bus_dmamem_mmap: segment unaligned");
    654 		if (segs[i].ds_len & PGOFSET)
    655 			panic("_bus_dmamem_mmap: segment size not multiple"
    656 			    " of page size");
    657 #endif
    658 		if (off >= segs[i].ds_len) {
    659 			off -= segs[i].ds_len;
    660 			continue;
    661 		}
    662 
    663 		return (alpha_btop((caddr_t)segs[i].ds_addr + off));
    664 	}
    665 
    666 	/* Page not found. */
    667 	return (-1);
    668 }
    669