Home | History | Annotate | Line # | Download | only in common
bus_dma.c revision 1.26
      1 /* $NetBSD: bus_dma.c,v 1.26 1998/08/17 20:15:55 thorpej Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include "opt_uvm.h"
     41 #include "opt_pmap_new.h"
     42 
     43 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
     44 
     45 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.26 1998/08/17 20:15:55 thorpej Exp $");
     46 
     47 #include <sys/param.h>
     48 #include <sys/systm.h>
     49 #include <sys/kernel.h>
     50 #include <sys/device.h>
     51 #include <sys/malloc.h>
     52 #include <sys/proc.h>
     53 #include <sys/mbuf.h>
     54 
     55 #include <vm/vm.h>
     56 #include <vm/vm_kern.h>
     57 #if defined(UVM)
     58 #include <uvm/uvm_extern.h>
     59 #endif
     60 
     61 #define _ALPHA_BUS_DMA_PRIVATE
     62 #include <machine/bus.h>
     63 #include <machine/intr.h>
     64 
     65 int	_bus_dmamap_load_buffer_direct_common __P((bus_dma_tag_t,
     66 	    bus_dmamap_t, void *, bus_size_t, struct proc *, int,
     67 	    paddr_t *, int *, int));
     68 
     69 /*
     70  * Common function for DMA map creation.  May be called by bus-specific
     71  * DMA map creation functions.
     72  */
     73 int
     74 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
     75 	bus_dma_tag_t t;
     76 	bus_size_t size;
     77 	int nsegments;
     78 	bus_size_t maxsegsz;
     79 	bus_size_t boundary;
     80 	int flags;
     81 	bus_dmamap_t *dmamp;
     82 {
     83 	struct alpha_bus_dmamap *map;
     84 	void *mapstore;
     85 	size_t mapsize;
     86 
     87 	/*
     88 	 * Allcoate and initialize the DMA map.  The end of the map
     89 	 * is a variable-sized array of segments, so we allocate enough
     90 	 * room for them in one shot.
     91 	 *
     92 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
     93 	 * of ALLOCNOW notifes others that we've reserved these resources,
     94 	 * and they are not to be freed.
     95 	 *
     96 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
     97 	 * the (nsegments - 1).
     98 	 */
     99 	mapsize = sizeof(struct alpha_bus_dmamap) +
    100 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
    101 	if ((mapstore = malloc(mapsize, M_DMAMAP,
    102 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
    103 		return (ENOMEM);
    104 
    105 	bzero(mapstore, mapsize);
    106 	map = (struct alpha_bus_dmamap *)mapstore;
    107 	map->_dm_size = size;
    108 	map->_dm_segcnt = nsegments;
    109 	map->_dm_maxsegsz = maxsegsz;
    110 	if (t->_boundary != 0 && t->_boundary < boundary)
    111 		map->_dm_boundary = t->_boundary;
    112 	else
    113 		map->_dm_boundary = boundary;
    114 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    115 	map->dm_mapsize = 0;		/* no valid mappings */
    116 	map->dm_nsegs = 0;
    117 
    118 	*dmamp = map;
    119 	return (0);
    120 }
    121 
    122 /*
    123  * Common function for DMA map destruction.  May be called by bus-specific
    124  * DMA map destruction functions.
    125  */
    126 void
    127 _bus_dmamap_destroy(t, map)
    128 	bus_dma_tag_t t;
    129 	bus_dmamap_t map;
    130 {
    131 
    132 	free(map, M_DMAMAP);
    133 }
    134 
    135 /*
    136  * Utility function to load a linear buffer.  lastaddrp holds state
    137  * between invocations (for multiple-buffer loads).  segp contains
    138  * the starting segment on entrance, and the ending segment on exit.
    139  * first indicates if this is the first invocation of this function.
    140  */
    141 int
    142 _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen, p, flags,
    143     lastaddrp, segp, first)
    144 	bus_dma_tag_t t;
    145 	bus_dmamap_t map;
    146 	void *buf;
    147 	bus_size_t buflen;
    148 	struct proc *p;
    149 	int flags;
    150 	paddr_t *lastaddrp;
    151 	int *segp;
    152 	int first;
    153 {
    154 	bus_size_t sgsize;
    155 	bus_addr_t curaddr, lastaddr, baddr, bmask;
    156 	vaddr_t vaddr = (vaddr_t)buf;
    157 	int seg;
    158 
    159 	lastaddr = *lastaddrp;
    160 	bmask = ~(map->_dm_boundary - 1);
    161 
    162 	for (seg = *segp; buflen > 0 ; ) {
    163 		/*
    164 		 * Get the physical address for this segment.
    165 		 */
    166 		if (p != NULL)
    167 			curaddr = pmap_extract(p->p_vmspace->vm_map.pmap,
    168 			    vaddr);
    169 		else
    170 			curaddr = vtophys(vaddr);
    171 
    172 		/*
    173 		 * If we're beyond the current DMA window, indicate
    174 		 * that and try to fall back into SGMAPs.
    175 		 */
    176 		if (t->_wsize != 0 && curaddr >= t->_wsize)
    177 			return (EINVAL);
    178 
    179 		curaddr |= t->_wbase;
    180 
    181 		/*
    182 		 * Compute the segment size, and adjust counts.
    183 		 */
    184 		sgsize = NBPG - ((u_long)vaddr & PGOFSET);
    185 		if (buflen < sgsize)
    186 			sgsize = buflen;
    187 
    188 		/*
    189 		 * Make sure we don't cross any boundaries.
    190 		 */
    191 		if (map->_dm_boundary > 0) {
    192 			baddr = (curaddr + map->_dm_boundary) & bmask;
    193 			if (sgsize > (baddr - curaddr))
    194 				sgsize = (baddr - curaddr);
    195 		}
    196 
    197 		/*
    198 		 * Insert chunk into a segment, coalescing with
    199 		 * the previous segment if possible.
    200 		 */
    201 		if (first) {
    202 			map->dm_segs[seg].ds_addr = curaddr;
    203 			map->dm_segs[seg].ds_len = sgsize;
    204 			first = 0;
    205 		} else {
    206 			if (curaddr == lastaddr &&
    207 			    (map->dm_segs[seg].ds_len + sgsize) <=
    208 			     map->_dm_maxsegsz &&
    209 			    (map->_dm_boundary == 0 ||
    210 			     (map->dm_segs[seg].ds_addr & bmask) ==
    211 			     (curaddr & bmask)))
    212 				map->dm_segs[seg].ds_len += sgsize;
    213 			else {
    214 				if (++seg >= map->_dm_segcnt)
    215 					break;
    216 				map->dm_segs[seg].ds_addr = curaddr;
    217 				map->dm_segs[seg].ds_len = sgsize;
    218 			}
    219 		}
    220 
    221 		lastaddr = curaddr + sgsize;
    222 		vaddr += sgsize;
    223 		buflen -= sgsize;
    224 	}
    225 
    226 	*segp = seg;
    227 	*lastaddrp = lastaddr;
    228 
    229 	/*
    230 	 * Did we fit?
    231 	 */
    232 	if (buflen != 0) {
    233 		/*
    234 		 * If there is a chained window, we will automatically
    235 		 * fall back to it.
    236 		 */
    237 		return (EFBIG);		/* XXX better return value here? */
    238 	}
    239 
    240 	return (0);
    241 }
    242 
    243 /*
    244  * Common function for loading a direct-mapped DMA map with a linear
    245  * buffer.  Called by bus-specific DMA map load functions with the
    246  * OR value appropriate for indicating "direct-mapped" for that
    247  * chipset.
    248  */
    249 int
    250 _bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
    251 	bus_dma_tag_t t;
    252 	bus_dmamap_t map;
    253 	void *buf;
    254 	bus_size_t buflen;
    255 	struct proc *p;
    256 	int flags;
    257 {
    258 	paddr_t lastaddr;
    259 	int seg, error;
    260 
    261 	/*
    262 	 * Make sure that on error condition we return "no valid mappings".
    263 	 */
    264 	map->dm_mapsize = 0;
    265 	map->dm_nsegs = 0;
    266 
    267 	if (buflen > map->_dm_size)
    268 		return (EINVAL);
    269 
    270 	seg = 0;
    271 	error = _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen,
    272 	    p, flags, &lastaddr, &seg, 1);
    273 	if (error == 0) {
    274 		map->dm_mapsize = buflen;
    275 		map->dm_nsegs = seg + 1;
    276 	} else if (t->_next_window != NULL) {
    277 		/*
    278 		 * Give the next window a chance.
    279 		 */
    280 		error = bus_dmamap_load(t->_next_window, map, buf, buflen,
    281 		    p, flags);
    282 	}
    283 	return (error);
    284 }
    285 
    286 /*
    287  * Like _bus_dmamap_load_direct_common(), but for mbufs.
    288  */
    289 int
    290 _bus_dmamap_load_mbuf_direct(t, map, m0, flags)
    291 	bus_dma_tag_t t;
    292 	bus_dmamap_t map;
    293 	struct mbuf *m0;
    294 	int flags;
    295 {
    296 	paddr_t lastaddr;
    297 	int seg, error, first;
    298 	struct mbuf *m;
    299 
    300 	/*
    301 	 * Make sure that on error condition we return "no valid mappings."
    302 	 */
    303 	map->dm_mapsize = 0;
    304 	map->dm_nsegs = 0;
    305 
    306 #ifdef DIAGNOSTIC
    307 	if ((m0->m_flags & M_PKTHDR) == 0)
    308 		panic("_bus_dmamap_load_mbuf_direct_common: no packet header");
    309 #endif
    310 
    311 	if (m0->m_pkthdr.len > map->_dm_size)
    312 		return (EINVAL);
    313 
    314 	first = 1;
    315 	seg = 0;
    316 	error = 0;
    317 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
    318 		error = _bus_dmamap_load_buffer_direct_common(t, map,
    319 		    m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first);
    320 		first = 0;
    321 	}
    322 	if (error == 0) {
    323 		map->dm_mapsize = m0->m_pkthdr.len;
    324 		map->dm_nsegs = seg + 1;
    325 	} else if (t->_next_window != NULL) {
    326 		/*
    327 		 * Give the next window a chance.
    328 		 */
    329 		error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
    330 	}
    331 	return (error);
    332 }
    333 
    334 /*
    335  * Like _bus_dmamap_load_direct_common(), but for uios.
    336  */
    337 int
    338 _bus_dmamap_load_uio_direct(t, map, uio, flags)
    339 	bus_dma_tag_t t;
    340 	bus_dmamap_t map;
    341 	struct uio *uio;
    342 	int flags;
    343 {
    344 	paddr_t lastaddr;
    345 	int seg, i, error, first;
    346 	bus_size_t minlen, resid;
    347 	struct proc *p = NULL;
    348 	struct iovec *iov;
    349 	off_t offset;
    350 	caddr_t addr;
    351 
    352 	/*
    353 	 * Make sure that on error condition we return "no valid mappings."
    354 	 */
    355 	map->dm_mapsize = 0;
    356 	map->dm_nsegs = 0;
    357 
    358 	offset = uio->uio_offset;
    359 	resid = uio->uio_resid;
    360 	iov = uio->uio_iov;
    361 
    362 	if (uio->uio_segflg == UIO_USERSPACE) {
    363 		p = uio->uio_procp;
    364 #ifdef DIAGNOSTIC
    365 		if (p == NULL)
    366 			panic("_bus_dmamap_load_direct_common: USERSPACE but no proc");
    367 #endif
    368 	}
    369 
    370 	first = 1;
    371 	seg = 0;
    372 	error = 0;
    373 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
    374 		/* Find the beginning iovec. */
    375 		if (offset >= iov[i].iov_len) {
    376 			offset -= iov[i].iov_len;
    377 			continue;
    378 		}
    379 
    380 		/*
    381 		 * Now at the first iovec to load.  Load each iovec
    382 		 * until we have exhausted the residual count.
    383 		 */
    384 		minlen = resid < iov[i].iov_len - offset ?
    385 		    resid : iov[i].iov_len - offset;
    386 
    387 		addr = (caddr_t)iov[i].iov_base + offset;
    388 
    389 		error = _bus_dmamap_load_buffer_direct_common(t, map,
    390 		    addr, minlen, p, flags, &lastaddr, &seg, first);
    391 		first = 0;
    392 
    393 		offset = 0;
    394 		resid -= minlen;
    395 	}
    396 	if (error == 0) {
    397 		map->dm_mapsize = uio->uio_resid;
    398 		map->dm_nsegs = seg + 1;
    399 	} else if (t->_next_window != NULL) {
    400 		/*
    401 		 * Give the next window a chance.
    402 		 */
    403 		error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
    404 	}
    405 	return (error);
    406 }
    407 
    408 /*
    409  * Like _bus_dmamap_load_direct_common(), but for raw memory.
    410  */
    411 int
    412 _bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
    413 	bus_dma_tag_t t;
    414 	bus_dmamap_t map;
    415 	bus_dma_segment_t *segs;
    416 	int nsegs;
    417 	bus_size_t size;
    418 	int flags;
    419 {
    420 
    421 	panic("_bus_dmamap_load_raw_direct: not implemented");
    422 }
    423 
    424 /*
    425  * Common function for unloading a DMA map.  May be called by
    426  * chipset-specific DMA map unload functions.
    427  */
    428 void
    429 _bus_dmamap_unload(t, map)
    430 	bus_dma_tag_t t;
    431 	bus_dmamap_t map;
    432 {
    433 
    434 	/*
    435 	 * No resources to free; just mark the mappings as
    436 	 * invalid.
    437 	 */
    438 	map->dm_mapsize = 0;
    439 	map->dm_nsegs = 0;
    440 }
    441 
    442 /*
    443  * Common function for DMA map synchronization.  May be called
    444  * by chipset-specific DMA map synchronization functions.
    445  */
    446 void
    447 _bus_dmamap_sync(t, map, offset, len, ops)
    448 	bus_dma_tag_t t;
    449 	bus_dmamap_t map;
    450 	bus_addr_t offset;
    451 	bus_size_t len;
    452 	int ops;
    453 {
    454 
    455 	/*
    456 	 * Flush the store buffer.
    457 	 */
    458 	alpha_mb();
    459 }
    460 
    461 /*
    462  * Common function for DMA-safe memory allocation.  May be called
    463  * by bus-specific DMA memory allocation functions.
    464  */
    465 int
    466 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
    467 	bus_dma_tag_t t;
    468 	bus_size_t size, alignment, boundary;
    469 	bus_dma_segment_t *segs;
    470 	int nsegs;
    471 	int *rsegs;
    472 	int flags;
    473 {
    474 	extern paddr_t avail_start, avail_end;
    475 	paddr_t curaddr, lastaddr, high;
    476 	vm_page_t m;
    477 	struct pglist mlist;
    478 	int curseg, error;
    479 
    480 	/* Always round the size. */
    481 	size = round_page(size);
    482 
    483 	high = avail_end - PAGE_SIZE;
    484 
    485 	/*
    486 	 * Allocate pages from the VM system.
    487 	 */
    488 	TAILQ_INIT(&mlist);
    489 #if defined(UVM)
    490 	error = uvm_pglistalloc(size, avail_start, high, alignment, boundary,
    491 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
    492 #else
    493 	error = vm_page_alloc_memory(size, avail_start, high,
    494 	    alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
    495 #endif
    496 	if (error)
    497 		return (error);
    498 
    499 	/*
    500 	 * Compute the location, size, and number of segments actually
    501 	 * returned by the VM code.
    502 	 */
    503 	m = mlist.tqh_first;
    504 	curseg = 0;
    505 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
    506 	segs[curseg].ds_len = PAGE_SIZE;
    507 	m = m->pageq.tqe_next;
    508 
    509 	for (; m != NULL; m = m->pageq.tqe_next) {
    510 		curaddr = VM_PAGE_TO_PHYS(m);
    511 #ifdef DIAGNOSTIC
    512 		if (curaddr < avail_start || curaddr >= high) {
    513 			printf("vm_page_alloc_memory returned non-sensical"
    514 			    " address 0x%lx\n", curaddr);
    515 			panic("_bus_dmamem_alloc");
    516 		}
    517 #endif
    518 		if (curaddr == (lastaddr + PAGE_SIZE))
    519 			segs[curseg].ds_len += PAGE_SIZE;
    520 		else {
    521 			curseg++;
    522 			segs[curseg].ds_addr = curaddr;
    523 			segs[curseg].ds_len = PAGE_SIZE;
    524 		}
    525 		lastaddr = curaddr;
    526 	}
    527 
    528 	*rsegs = curseg + 1;
    529 
    530 	return (0);
    531 }
    532 
    533 /*
    534  * Common function for freeing DMA-safe memory.  May be called by
    535  * bus-specific DMA memory free functions.
    536  */
    537 void
    538 _bus_dmamem_free(t, segs, nsegs)
    539 	bus_dma_tag_t t;
    540 	bus_dma_segment_t *segs;
    541 	int nsegs;
    542 {
    543 	vm_page_t m;
    544 	bus_addr_t addr;
    545 	struct pglist mlist;
    546 	int curseg;
    547 
    548 	/*
    549 	 * Build a list of pages to free back to the VM system.
    550 	 */
    551 	TAILQ_INIT(&mlist);
    552 	for (curseg = 0; curseg < nsegs; curseg++) {
    553 		for (addr = segs[curseg].ds_addr;
    554 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
    555 		    addr += PAGE_SIZE) {
    556 			m = PHYS_TO_VM_PAGE(addr);
    557 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
    558 		}
    559 	}
    560 
    561 #if defined(UVM)
    562 	uvm_pglistfree(&mlist);
    563 #else
    564 	vm_page_free_memory(&mlist);
    565 #endif
    566 }
    567 
    568 /*
    569  * Common function for mapping DMA-safe memory.  May be called by
    570  * bus-specific DMA memory map functions.
    571  */
    572 int
    573 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
    574 	bus_dma_tag_t t;
    575 	bus_dma_segment_t *segs;
    576 	int nsegs;
    577 	size_t size;
    578 	caddr_t *kvap;
    579 	int flags;
    580 {
    581 	vaddr_t va;
    582 	bus_addr_t addr;
    583 	int curseg;
    584 
    585 	/*
    586 	 * If we're only mapping 1 segment, use K0SEG, to avoid
    587 	 * TLB thrashing.
    588 	 */
    589 	if (nsegs == 1) {
    590 		*kvap = (caddr_t)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
    591 		return (0);
    592 	}
    593 
    594 	size = round_page(size);
    595 
    596 #if defined(UVM)
    597 	va = uvm_km_valloc(kernel_map, size);
    598 #else
    599 	va = kmem_alloc_pageable(kernel_map, size);
    600 #endif
    601 
    602 	if (va == 0)
    603 		return (ENOMEM);
    604 
    605 	*kvap = (caddr_t)va;
    606 
    607 	for (curseg = 0; curseg < nsegs; curseg++) {
    608 		for (addr = segs[curseg].ds_addr;
    609 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
    610 		    addr += NBPG, va += NBPG, size -= NBPG) {
    611 			if (size == 0)
    612 				panic("_bus_dmamem_map: size botch");
    613 #if defined(PMAP_NEW)
    614 			pmap_kenter_pa(va, addr, VM_PROT_READ | VM_PROT_WRITE);
    615 #else
    616 			pmap_enter(pmap_kernel(), va, addr,
    617 			    VM_PROT_READ | VM_PROT_WRITE, TRUE);
    618 #endif
    619 		}
    620 	}
    621 
    622 	return (0);
    623 }
    624 
    625 /*
    626  * Common function for unmapping DMA-safe memory.  May be called by
    627  * bus-specific DMA memory unmapping functions.
    628  */
    629 void
    630 _bus_dmamem_unmap(t, kva, size)
    631 	bus_dma_tag_t t;
    632 	caddr_t kva;
    633 	size_t size;
    634 {
    635 
    636 #ifdef DIAGNOSTIC
    637 	if ((u_long)kva & PGOFSET)
    638 		panic("_bus_dmamem_unmap");
    639 #endif
    640 
    641 	/*
    642 	 * Nothing to do if we mapped it with K0SEG.
    643 	 */
    644 	if (kva >= (caddr_t)ALPHA_K0SEG_BASE &&
    645 	    kva <= (caddr_t)ALPHA_K0SEG_END)
    646 		return;
    647 
    648 	size = round_page(size);
    649 #if defined(UVM)
    650 	uvm_km_free(kernel_map, (vaddr_t)kva, size);
    651 #else
    652 	kmem_free(kernel_map, (vaddr_t)kva, size);
    653 #endif
    654 }
    655 
    656 /*
    657  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
    658  * bus-specific DMA mmap(2)'ing functions.
    659  */
    660 int
    661 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
    662 	bus_dma_tag_t t;
    663 	bus_dma_segment_t *segs;
    664 	int nsegs, off, prot, flags;
    665 {
    666 	int i;
    667 
    668 	for (i = 0; i < nsegs; i++) {
    669 #ifdef DIAGNOSTIC
    670 		if (off & PGOFSET)
    671 			panic("_bus_dmamem_mmap: offset unaligned");
    672 		if (segs[i].ds_addr & PGOFSET)
    673 			panic("_bus_dmamem_mmap: segment unaligned");
    674 		if (segs[i].ds_len & PGOFSET)
    675 			panic("_bus_dmamem_mmap: segment size not multiple"
    676 			    " of page size");
    677 #endif
    678 		if (off >= segs[i].ds_len) {
    679 			off -= segs[i].ds_len;
    680 			continue;
    681 		}
    682 
    683 		return (alpha_btop((caddr_t)segs[i].ds_addr + off));
    684 	}
    685 
    686 	/* Page not found. */
    687 	return (-1);
    688 }
    689