Home | History | Annotate | Line # | Download | only in vax
      1 /*	$NetBSD: bus_dma.c,v 1.40 2023/12/17 14:54:49 andvar Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 /*
     33  * bus_dma routines for vax. File copied from arm32/bus_dma.c.
     34  * NetBSD: bus_dma.c,v 1.11 1998/09/21 22:53:35 thorpej Exp
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.40 2023/12/17 14:54:49 andvar Exp $");
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/kernel.h>
     43 #include <sys/proc.h>
     44 #include <sys/buf.h>
     45 #include <sys/reboot.h>
     46 #include <sys/conf.h>
     47 #include <sys/file.h>
     48 #include <sys/kmem.h>
     49 #include <sys/mbuf.h>
     50 #include <sys/vnode.h>
     51 #include <sys/device.h>
     52 
     53 #include <uvm/uvm.h>
     54 
     55 #define _VAX_BUS_DMA_PRIVATE
     56 #include <machine/bus.h>
     57 
     58 #include <machine/ka43.h>
     59 #include <machine/sid.h>
     60 
     61 extern	paddr_t avail_start, avail_end;
     62 extern  vaddr_t virtual_avail;
     63 
     64 int	_bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
     65 	    bus_size_t, struct vmspace *, int, vaddr_t *, int *, bool);
     66 int	_bus_dma_inrange(bus_dma_segment_t *, int, bus_addr_t);
     67 int	_bus_dmamem_alloc_range(bus_dma_tag_t, bus_size_t, bus_size_t,
     68 	    bus_size_t, bus_dma_segment_t*, int, int *, int, vaddr_t, vaddr_t);
     69 
     70 static size_t
     71 _bus_dmamap_mapsize(int const nsegments)
     72 {
     73 	KASSERT(nsegments > 0);
     74 	return sizeof(struct vax_bus_dmamap) +
     75 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
     76 }
     77 
     78 /*
     79  * Common function for DMA map creation.  May be called by bus-specific
     80  * DMA map creation functions.
     81  */
     82 int
     83 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
     84 	bus_size_t maxsegsz, bus_size_t boundary, int flags,
     85 	bus_dmamap_t *dmamp)
     86 {
     87 	struct vax_bus_dmamap *map;
     88 	void *mapstore;
     89 
     90 #ifdef DEBUG_DMA
     91 	printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
     92 	    t, size, nsegments, maxsegsz, boundary, flags);
     93 #endif	/* DEBUG_DMA */
     94 
     95 	/*
     96 	 * Allocate and initialize the DMA map.  The end of the map
     97 	 * is a variable-sized array of segments, so we allocate enough
     98 	 * room for them in one shot.
     99 	 *
    100 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
    101 	 * of ALLOCNOW notifies others that we've reserved these resources,
    102 	 * and they are not to be freed.
    103 	 *
    104 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
    105 	 * the (nsegments - 1).
    106 	 */
    107 	if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
    108 	    (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
    109 		return (ENOMEM);
    110 
    111 	map = (struct vax_bus_dmamap *)mapstore;
    112 	map->_dm_size = size;
    113 	map->_dm_segcnt = nsegments;
    114 	map->_dm_maxmaxsegsz = maxsegsz;
    115 	map->_dm_boundary = boundary;
    116 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    117 	map->dm_maxsegsz = maxsegsz;
    118 	map->dm_mapsize = 0;		/* no valid mappings */
    119 	map->dm_nsegs = 0;
    120 
    121 	*dmamp = map;
    122 #ifdef DEBUG_DMA
    123 	printf("dmamap_create:map=%p\n", map);
    124 #endif	/* DEBUG_DMA */
    125 	return (0);
    126 }
    127 
    128 /*
    129  * Common function for DMA map destruction.  May be called by bus-specific
    130  * DMA map destruction functions.
    131  */
    132 void
    133 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    134 {
    135 
    136 #ifdef DEBUG_DMA
    137 	printf("dmamap_destroy: t=%p map=%p\n", t, map);
    138 #endif	/* DEBUG_DMA */
    139 #ifdef DIAGNOSTIC
    140 	if (map->dm_nsegs > 0)
    141 		printf("bus_dmamap_destroy() called for map with valid mappings\n");
    142 #endif	/* DIAGNOSTIC */
    143 	kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
    144 }
    145 
    146 /*
    147  * Common function for loading a DMA map with a linear buffer.  May
    148  * be called by bus-specific DMA map load functions.
    149  */
    150 int
    151 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    152 	bus_size_t buflen, struct proc *p, int flags)
    153 {
    154 	vaddr_t lastaddr = 0;
    155 	int seg, error;
    156 	struct vmspace *vm;
    157 
    158 #ifdef DEBUG_DMA
    159 	printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
    160 	    t, map, buf, buflen, p, flags);
    161 #endif	/* DEBUG_DMA */
    162 
    163 	/*
    164 	 * Make sure that on error condition we return "no valid mappings".
    165 	 */
    166 	map->dm_mapsize = 0;
    167 	map->dm_nsegs = 0;
    168 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    169 
    170 	if (buflen > map->_dm_size)
    171 		return (EINVAL);
    172 
    173 	if (p != NULL) {
    174 		vm = p->p_vmspace;
    175 	} else {
    176 		vm = vmspace_kernel();
    177 	}
    178 
    179 	seg = 0;
    180 	error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags,
    181 	    &lastaddr, &seg, 1);
    182 	if (error == 0) {
    183 		map->dm_mapsize = buflen;
    184 		map->dm_nsegs = seg + 1;
    185 	}
    186 #ifdef DEBUG_DMA
    187 	printf("dmamap_load: error=%d\n", error);
    188 #endif	/* DEBUG_DMA */
    189 	return (error);
    190 }
    191 
    192 /*
    193  * Like _bus_dmamap_load(), but for mbufs.
    194  */
    195 int
    196 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    197 	int flags)
    198 {
    199 	vaddr_t lastaddr = 0;
    200 	int seg, error;
    201 	bool first;
    202 	struct mbuf *m;
    203 
    204 #ifdef DEBUG_DMA
    205 	printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
    206 	    t, map, m0, flags);
    207 #endif	/* DEBUG_DMA */
    208 
    209 	/*
    210 	 * Make sure that on error condition we return "no valid mappings."
    211 	 */
    212 	map->dm_mapsize = 0;
    213 	map->dm_nsegs = 0;
    214 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    215 
    216 #ifdef DIAGNOSTIC
    217 	if ((m0->m_flags & M_PKTHDR) == 0)
    218 		panic("_bus_dmamap_load_mbuf: no packet header");
    219 #endif	/* DIAGNOSTIC */
    220 
    221 	if (m0->m_pkthdr.len > map->_dm_size)
    222 		return (EINVAL);
    223 
    224 	first = true;
    225 	seg = 0;
    226 	error = 0;
    227 	for (m = m0; m != NULL && error == 0; m = m->m_next, first = false) {
    228 		if (m->m_len == 0)
    229 			continue;
    230 #if 0
    231 		switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
    232 #if 0
    233 		case M_EXT|M_EXT_CLUSTER:
    234 			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
    235 			lastaddr = m->m_ext.ext_paddr
    236 			    + (m->m_data - m->m_ext.ext_buf);
    237 #endif
    238 #if 1
    239     have_addr:
    240 #endif
    241 			if (!first && ++seg >= map->_dm_segcnt) {
    242 				error = EFBIG;
    243 				continue;
    244 			}
    245 			map->dm_segs[seg].ds_addr = lastaddr;
    246 			map->dm_segs[seg].ds_len = m->m_len;
    247 			lastaddr += m->m_len;
    248 			continue;
    249 #if 1
    250 		case 0:
    251 			KASSERT(m->m_paddr != M_PADDR_INVALID);
    252 			lastaddr = m->m_paddr + M_BUFOFFSET(m)
    253 			    + (m->m_data - M_BUFADDR(m));
    254 			goto have_addr;
    255 #endif
    256 		default:
    257 			break;
    258 		}
    259 #endif
    260 		error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
    261 		    vmspace_kernel(), flags, &lastaddr, &seg, first);
    262 	}
    263 	if (error == 0) {
    264 		map->dm_mapsize = m0->m_pkthdr.len;
    265 		map->dm_nsegs = seg + 1;
    266 	}
    267 #ifdef DEBUG_DMA
    268 	printf("dmamap_load_mbuf: error=%d\n", error);
    269 #endif	/* DEBUG_DMA */
    270 	return (error);
    271 }
    272 
    273 /*
    274  * Like _bus_dmamap_load(), but for uios.
    275  */
    276 int
    277 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    278 	int flags)
    279 {
    280 	vaddr_t lastaddr = 0;
    281 	int seg, i, error;
    282 	bool first;
    283 	bus_size_t minlen, resid;
    284 	struct iovec *iov;
    285 	void *addr;
    286 
    287 	/*
    288 	 * Make sure that on error condition we return "no valid mappings."
    289 	 */
    290 	map->dm_mapsize = 0;
    291 	map->dm_nsegs = 0;
    292 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    293 
    294 	resid = uio->uio_resid;
    295 	iov = uio->uio_iov;
    296 
    297 	first = true;
    298 	seg = 0;
    299 	error = 0;
    300 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
    301 		/*
    302 		 * Now at the first iovec to load.  Load each iovec
    303 		 * until we have exhausted the residual count.
    304 		 */
    305 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
    306 		addr = (void *)iov[i].iov_base;
    307 
    308 		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
    309 		    uio->uio_vmspace, flags, &lastaddr, &seg, first);
    310 		first = false;
    311 
    312 		resid -= minlen;
    313 	}
    314 	if (error == 0) {
    315 		map->dm_mapsize = uio->uio_resid;
    316 		map->dm_nsegs = seg + 1;
    317 	}
    318 	return (error);
    319 }
    320 
    321 /*
    322  * Like _bus_dmamap_load(), but for raw memory allocated with
    323  * bus_dmamem_alloc().
    324  */
    325 int
    326 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
    327 	int nsegs, bus_size_t size, int flags)
    328 {
    329 
    330 	panic("_bus_dmamap_load_raw: not implemented");
    331 }
    332 
    333 /*
    334  * Common function for unloading a DMA map.  May be called by
    335  * bus-specific DMA map unload functions.
    336  */
    337 void
    338 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
    339 {
    340 
    341 #ifdef DEBUG_DMA
    342 	printf("dmamap_unload: t=%p map=%p\n", t, map);
    343 #endif	/* DEBUG_DMA */
    344 
    345 	/*
    346 	 * No resources to free; just mark the mappings as
    347 	 * invalid.
    348 	 */
    349 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
    350 	map->dm_mapsize = 0;
    351 	map->dm_nsegs = 0;
    352 }
    353 
    354 /*
    355  * Common function for DMA map synchronization.  May be called
    356  * by bus-specific DMA map synchronization functions.
    357  */
    358 void
    359 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    360 	bus_size_t len, int ops)
    361 {
    362 #ifdef DEBUG_DMA
    363 	printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
    364 	    t, map, offset, len, ops);
    365 #endif	/* DEBUG_DMA */
    366 	/*
    367 	 * A vax only has snoop-cache, so this routine is a no-op.
    368 	 */
    369 	return;
    370 }
    371 
    372 /*
    373  * Common function for DMA-safe memory allocation.  May be called
    374  * by bus-specific DMA memory allocation functions.
    375  */
    376 
    377 int
    378 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    379 	bus_size_t boundary, bus_dma_segment_t *segs,
    380 	int nsegs, int *rsegs, int flags)
    381 {
    382 	int error;
    383 
    384 	error =  (_bus_dmamem_alloc_range(t, size, alignment, boundary,
    385 	    segs, nsegs, rsegs, flags, round_page(avail_start),
    386 	    trunc_page(avail_end)));
    387 	return(error);
    388 }
    389 
    390 /*
    391  * Common function for freeing DMA-safe memory.  May be called by
    392  * bus-specific DMA memory free functions.
    393  */
    394 void
    395 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
    396 {
    397 	struct vm_page *m;
    398 	bus_addr_t addr;
    399 	struct pglist mlist;
    400 	int curseg;
    401 
    402 #ifdef DEBUG_DMA
    403 	printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
    404 #endif	/* DEBUG_DMA */
    405 
    406 	/*
    407 	 * Build a list of pages to free back to the VM system.
    408 	 */
    409 	TAILQ_INIT(&mlist);
    410 	for (curseg = 0; curseg < nsegs; curseg++) {
    411 		for (addr = segs[curseg].ds_addr;
    412 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
    413 		    addr += PAGE_SIZE) {
    414 			m = PHYS_TO_VM_PAGE(addr);
    415 			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
    416 		}
    417 	}
    418 	uvm_pglistfree(&mlist);
    419 }
    420 
    421 /*
    422  * Common function for mapping DMA-safe memory.  May be called by
    423  * bus-specific DMA memory map functions.
    424  */
    425 int
    426 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    427 	size_t size, void **kvap, int flags)
    428 {
    429 	vaddr_t va;
    430 	bus_addr_t addr;
    431 	int curseg;
    432 	const uvm_flag_t kmflags =
    433 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
    434 
    435 	/*
    436 	 * Special case (but common):
    437 	 * If there is only one physical segment then the already-mapped
    438 	 * virtual address is returned, since all physical memory is already
    439 	 * in the beginning of kernel virtual memory.
    440 	 */
    441 	if (nsegs == 1) {
    442 		*kvap = (void *)(segs[0].ds_addr | KERNBASE);
    443 		/*
    444 		 * KA43 (3100/m76) must have its DMA-safe memory accessed
    445 		 * through DIAGMEM. Remap it here.
    446 		 */
    447 		if (vax_boardtype == VAX_BTYP_43) {
    448 			pmap_map((vaddr_t)*kvap, segs[0].ds_addr|KA43_DIAGMEM,
    449 			    (segs[0].ds_addr|KA43_DIAGMEM) + size,
    450 			    VM_PROT_READ|VM_PROT_WRITE);
    451 		}
    452 		return 0;
    453 	}
    454 	size = round_page(size);
    455 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
    456 
    457 	if (va == 0)
    458 		return (ENOMEM);
    459 
    460 	*kvap = (void *)va;
    461 
    462 	for (curseg = 0; curseg < nsegs; curseg++) {
    463 		for (addr = segs[curseg].ds_addr;
    464 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
    465 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
    466 			if (size == 0)
    467 				panic("_bus_dmamem_map: size botch");
    468 			if (vax_boardtype == VAX_BTYP_43)
    469 				addr |= KA43_DIAGMEM;
    470 			pmap_enter(pmap_kernel(), va, addr,
    471 			    VM_PROT_READ | VM_PROT_WRITE,
    472 			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
    473 		}
    474 	}
    475 	pmap_update(pmap_kernel());
    476 	return (0);
    477 }
    478 
    479 /*
    480  * Common function for unmapping DMA-safe memory.  May be called by
    481  * bus-specific DMA memory unmapping functions.
    482  */
    483 void
    484 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
    485 {
    486 
    487 #ifdef DEBUG_DMA
    488 	printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva, size);
    489 #endif	/* DEBUG_DMA */
    490 #ifdef DIAGNOSTIC
    491 	if ((u_long)kva & PGOFSET)
    492 		panic("_bus_dmamem_unmap");
    493 #endif	/* DIAGNOSTIC */
    494 
    495 	/* Avoid free'ing if not mapped */
    496 	if (kva < (void *)virtual_avail)
    497 		return;
    498 
    499 	size = round_page(size);
    500 	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
    501 	pmap_update(pmap_kernel());
    502 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
    503 }
    504 
    505 /*
    506  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
    507  * bus-specific DMA mmap(2)'ing functions.
    508  */
    509 paddr_t
    510 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    511 	off_t off, int prot, int flags)
    512 {
    513 	int i;
    514 
    515 	for (i = 0; i < nsegs; i++) {
    516 #ifdef DIAGNOSTIC
    517 		if (off & PGOFSET)
    518 			panic("_bus_dmamem_mmap: offset unaligned");
    519 		if (segs[i].ds_addr & PGOFSET)
    520 			panic("_bus_dmamem_mmap: segment unaligned");
    521 		if (segs[i].ds_len & PGOFSET)
    522 			panic("_bus_dmamem_mmap: segment size not multiple"
    523 			    " of page size");
    524 #endif	/* DIAGNOSTIC */
    525 		if (off >= segs[i].ds_len) {
    526 			off -= segs[i].ds_len;
    527 			continue;
    528 		}
    529 
    530 		return (btop((u_long)segs[i].ds_addr + off));
    531 	}
    532 
    533 	/* Page not found. */
    534 	return (-1);
    535 }
    536 
    537 /**********************************************************************
    538  * DMA utility functions
    539  **********************************************************************/
    540 
    541 /*
    542  * Utility function to load a linear buffer.  lastaddrp holds state
    543  * between invocations (for multiple-buffer loads).  segp contains
    544  * the starting segment on entrance, and the ending segment on exit.
    545  * first indicates if this is the first invocation of this function.
    546  */
    547 int
    548 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    549 	bus_size_t buflen, struct vmspace *vm, int flags, vaddr_t *lastaddrp,
    550 	int *segp, bool first)
    551 {
    552 	bus_size_t sgsize;
    553 	bus_addr_t curaddr, lastaddr, baddr, bmask;
    554 	vaddr_t vaddr = (vaddr_t)buf;
    555 	int seg;
    556 	pmap_t pmap;
    557 
    558 #ifdef DEBUG_DMA
    559 	printf("_bus_dmamap_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
    560 	    buf, buflen, flags, first);
    561 #endif	/* DEBUG_DMA */
    562 
    563 	pmap = vm_map_pmap(&vm->vm_map);
    564 
    565 	lastaddr = *lastaddrp;
    566 	bmask  = ~(map->_dm_boundary - 1);
    567 
    568 	for (seg = *segp; buflen > 0; ) {
    569 		/*
    570 		 * Get the physical address for this segment.
    571 		 */
    572 		(void) pmap_extract(pmap, (vaddr_t)vaddr, &curaddr);
    573 
    574 #if 0
    575 		/*
    576 		 * Make sure we're in an allowed DMA range.
    577 		 */
    578 		if (t->_ranges != NULL &&
    579 		    _bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0)
    580 			return (EINVAL);
    581 #endif
    582 
    583 		/*
    584 		 * Compute the segment size, and adjust counts.
    585 		 */
    586 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
    587 		if (buflen < sgsize)
    588 			sgsize = buflen;
    589 
    590 		/*
    591 		 * Make sure we don't cross any boundaries.
    592 		 */
    593 		if (map->_dm_boundary > 0) {
    594 			baddr = (curaddr + map->_dm_boundary) & bmask;
    595 			if (sgsize > (baddr - curaddr))
    596 				sgsize = (baddr - curaddr);
    597 		}
    598 
    599 		/*
    600 		 * Insert chunk into a segment, coalescing with
    601 		 * previous segment if possible.
    602 		 */
    603 		if (first) {
    604 			map->dm_segs[seg].ds_addr = curaddr;
    605 			map->dm_segs[seg].ds_len = sgsize;
    606 			first = false;
    607 		} else {
    608 			if (curaddr == lastaddr &&
    609 			    (map->dm_segs[seg].ds_len + sgsize) <=
    610 			     map->dm_maxsegsz &&
    611 			    (map->_dm_boundary == 0 ||
    612 			     (map->dm_segs[seg].ds_addr & bmask) ==
    613 			     (curaddr & bmask)))
    614 				map->dm_segs[seg].ds_len += sgsize;
    615 			else {
    616 				if (++seg >= map->_dm_segcnt)
    617 					break;
    618 				map->dm_segs[seg].ds_addr = curaddr;
    619 				map->dm_segs[seg].ds_len = sgsize;
    620 			}
    621 		}
    622 
    623 		lastaddr = curaddr + sgsize;
    624 		vaddr += sgsize;
    625 		buflen -= sgsize;
    626 	}
    627 
    628 	*segp = seg;
    629 	*lastaddrp = lastaddr;
    630 
    631 	/*
    632 	 * Did we fit?
    633 	 */
    634 	if (buflen != 0)
    635 		return (EFBIG);		/* XXX better return value here? */
    636 	return (0);
    637 }
    638 
    639 /*
    640  * Check to see if the specified page is in an allowed DMA range.
    641  */
    642 int
    643 _bus_dma_inrange(bus_dma_segment_t *ranges, int nranges, bus_addr_t curaddr)
    644 {
    645 	bus_dma_segment_t *ds;
    646 	int i;
    647 
    648 	for (i = 0, ds = ranges; i < nranges; i++, ds++) {
    649 		if (curaddr >= ds->ds_addr &&
    650 		    round_page(curaddr) <= (ds->ds_addr + ds->ds_len))
    651 			return (1);
    652 	}
    653 
    654 	return (0);
    655 }
    656 
    657 /*
    658  * Allocate physical memory from the given physical address range.
    659  * Called by DMA-safe memory allocation methods.
    660  */
    661 int
    662 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    663 	bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    664 	int flags, vaddr_t low, vaddr_t high)
    665 {
    666 	vaddr_t curaddr, lastaddr;
    667 	struct vm_page *m;
    668 	struct pglist mlist;
    669 	int curseg, error;
    670 
    671 #ifdef DEBUG_DMA
    672 	printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
    673 	    t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
    674 #endif	/* DEBUG_DMA */
    675 
    676 	/* Always round the size. */
    677 	size = round_page(size);
    678 
    679 	/*
    680 	 * Allocate pages from the VM system.
    681 	 */
    682 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
    683 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
    684 	if (error)
    685 		return (error);
    686 
    687 	/*
    688 	 * Compute the location, size, and number of segments actually
    689 	 * returned by the VM code.
    690 	 */
    691 	m = mlist.tqh_first;
    692 	curseg = 0;
    693 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
    694 	segs[curseg].ds_len = PAGE_SIZE;
    695 #ifdef DEBUG_DMA
    696 		printf("alloc: page %lx\n", lastaddr);
    697 #endif	/* DEBUG_DMA */
    698 	m = m->pageq.queue.tqe_next;
    699 
    700 	for (; m != NULL; m = m->pageq.queue.tqe_next) {
    701 		curaddr = VM_PAGE_TO_PHYS(m);
    702 #ifdef DIAGNOSTIC
    703 		if (curaddr < low || curaddr >= high) {
    704 			printf("uvm_pglistalloc returned non-sensical"
    705 			    " address 0x%lx\n", curaddr);
    706 			panic("_bus_dmamem_alloc_range");
    707 		}
    708 #endif	/* DIAGNOSTIC */
    709 #ifdef DEBUG_DMA
    710 		printf("alloc: page %lx\n", curaddr);
    711 #endif	/* DEBUG_DMA */
    712 		if (curaddr == (lastaddr + PAGE_SIZE))
    713 			segs[curseg].ds_len += PAGE_SIZE;
    714 		else {
    715 			curseg++;
    716 			segs[curseg].ds_addr = curaddr;
    717 			segs[curseg].ds_len = PAGE_SIZE;
    718 		}
    719 		lastaddr = curaddr;
    720 	}
    721 
    722 	*rsegs = curseg + 1;
    723 
    724 	return (0);
    725 }
    726 
    727 /*
    728  * "generic" DMA struct, nothing special.
    729  */
    730 struct vax_bus_dma_tag vax_bus_dma_tag = {
    731 	._dmamap_create		= _bus_dmamap_create,
    732 	._dmamap_destroy	= _bus_dmamap_destroy,
    733 	._dmamap_load		= _bus_dmamap_load,
    734 	._dmamap_load_mbuf	= _bus_dmamap_load_mbuf,
    735 	._dmamap_load_uio	= _bus_dmamap_load_uio,
    736 	._dmamap_load_raw	= _bus_dmamap_load_raw,
    737 	._dmamap_unload		= _bus_dmamap_unload,
    738 	._dmamap_sync		= _bus_dmamap_sync,
    739 	._dmamem_alloc		= _bus_dmamem_alloc,
    740 	._dmamem_free		= _bus_dmamem_free,
    741 	._dmamem_map		= _bus_dmamem_map,
    742 	._dmamem_unmap		= _bus_dmamem_unmap,
    743 	._dmamem_mmap		= _bus_dmamem_mmap,
    744 };
    745