Home | History | Annotate | Line # | Download | only in newsmips
      1 /*	$NetBSD: bus.c,v 1.38 2022/07/26 20:08:56 andvar Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: bus.c,v 1.38 2022/07/26 20:08:56 andvar Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/kernel.h>
     39 #include <sys/device.h>
     40 #include <sys/kmem.h>
     41 #include <sys/proc.h>
     42 #include <sys/mbuf.h>
     43 
     44 #define _NEWSMIPS_BUS_DMA_PRIVATE
     45 #include <machine/bus.h>
     46 #include <machine/cpu.h>
     47 
     48 #include <dev/bus_dma/bus_dmamem_common.h>
     49 
     50 #include <uvm/uvm_extern.h>
     51 
     52 #include <mips/cache.h>
     53 
     54 static int	_bus_dmamap_load_buffer(bus_dmamap_t, void *, bus_size_t,
     55 				struct vmspace *, int, vaddr_t *, int *, int);
     56 
     57 struct newsmips_bus_dma_tag newsmips_default_bus_dma_tag = {
     58 	_bus_dmamap_create,
     59 	_bus_dmamap_destroy,
     60 	_bus_dmamap_load,
     61 	_bus_dmamap_load_mbuf,
     62 	_bus_dmamap_load_uio,
     63 	_bus_dmamap_load_raw,
     64 	_bus_dmamap_unload,
     65 	NULL,
     66 	_bus_dmamem_alloc,
     67 	_bus_dmamem_free,
     68 	_bus_dmamem_map,
     69 	_bus_dmamem_unmap,
     70 	_bus_dmamem_mmap,
     71 };
     72 
     73 void
     74 newsmips_bus_dma_init(void)
     75 {
     76 
     77 #ifdef MIPS1
     78 	if (CPUISMIPS3 == 0)
     79 		newsmips_default_bus_dma_tag._dmamap_sync =
     80 		    _bus_dmamap_sync_r3k;
     81 #endif
     82 #ifdef MIPS3
     83 	if (CPUISMIPS3)
     84 		newsmips_default_bus_dma_tag._dmamap_sync =
     85 		    _bus_dmamap_sync_r4k;
     86 #endif
     87 }
     88 
     89 int
     90 bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags,
     91     bus_space_handle_t *bshp)
     92 {
     93 	int cacheable = flags & BUS_SPACE_MAP_CACHEABLE;
     94 
     95 	if (cacheable)
     96 		*bshp = MIPS_PHYS_TO_KSEG0(bpa);
     97 	else
     98 		*bshp = MIPS_PHYS_TO_KSEG1(bpa);
     99 
    100 	return 0;
    101 }
    102 
    103 int
    104 bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend,
    105     bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags,
    106     bus_addr_t *bpap, bus_space_handle_t *bshp)
    107 {
    108 
    109 	panic("bus_space_alloc: not implemented");
    110 }
    111 
    112 void
    113 bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
    114 {
    115 
    116 	panic("bus_space_free: not implemented");
    117 }
    118 
    119 void
    120 bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
    121 {
    122 
    123 	return;
    124 }
    125 
    126 int
    127 bus_space_subregion(bus_space_tag_t t, bus_space_handle_t bsh,
    128     bus_size_t offset, bus_size_t size, bus_space_handle_t *nbshp)
    129 {
    130 
    131 	*nbshp = bsh + offset;
    132 	return 0;
    133 }
    134 
    135 static size_t
    136 _bus_dmamap_mapsize(int const nsegments)
    137 {
    138 	KASSERT(nsegments > 0);
    139 	return sizeof(struct newsmips_bus_dmamap) +
    140 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
    141 }
    142 
    143 /*
    144  * Common function for DMA map creation.  May be called by bus-specific
    145  * DMA map creation functions.
    146  */
    147 int
    148 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
    149     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
    150 {
    151 	struct newsmips_bus_dmamap *map;
    152 	void *mapstore;
    153 
    154 	/*
    155 	 * Allocate and initialize the DMA map.  The end of the map
    156 	 * is a variable-sized array of segments, so we allocate enough
    157 	 * room for them in one shot.
    158 	 *
    159 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
    160 	 * of ALLOCNOW notifies others that we've reserved these resources,
    161 	 * and they are not to be freed.
    162 	 *
    163 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
    164 	 * the (nsegments - 1).
    165 	 */
    166 	if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
    167 	    (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
    168 		return ENOMEM;
    169 
    170 	map = (struct newsmips_bus_dmamap *)mapstore;
    171 	map->_dm_size = size;
    172 	map->_dm_segcnt = nsegments;
    173 	map->_dm_maxmaxsegsz = maxsegsz;
    174 	map->_dm_boundary = boundary;
    175 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    176 	map->_dm_vmspace = NULL;
    177 	map->dm_maxsegsz = maxsegsz;
    178 	map->dm_mapsize = 0;		/* no valid mappings */
    179 	map->dm_nsegs = 0;
    180 
    181 	*dmamp = map;
    182 	return 0;
    183 }
    184 
    185 /*
    186  * Common function for DMA map destruction.  May be called by bus-specific
    187  * DMA map destruction functions.
    188  */
    189 void
    190 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    191 {
    192 
    193 	kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
    194 }
    195 
    196 extern	paddr_t kvtophys(vaddr_t);		/* XXX */
    197 
    198 /*
    199  * Utility function to load a linear buffer.  lastaddrp holds state
    200  * between invocations (for multiple-buffer loads).  segp contains
    201  * the starting segment on entrance, and the ending segment on exit.
    202  * first indicates if this is the first invocation of this function.
    203  */
    204 int
    205 _bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen,
    206     struct vmspace *vm, int flags, vaddr_t *lastaddrp, int *segp, int first)
    207 {
    208 	bus_size_t sgsize;
    209 	bus_addr_t curaddr, lastaddr, baddr, bmask;
    210 	vaddr_t vaddr = (vaddr_t)buf;
    211 	paddr_t pa;
    212 	size_t seg;
    213 
    214 	lastaddr = *lastaddrp;
    215 	bmask  = ~(map->_dm_boundary - 1);
    216 
    217 	for (seg = *segp; buflen > 0 ; ) {
    218 		/*
    219 		 * Get the physical address for this segment.
    220 		 */
    221 		if (!VMSPACE_IS_KERNEL_P(vm))
    222 			(void) pmap_extract(vm_map_pmap(&vm->vm_map),
    223 			    vaddr, &pa);
    224 		else
    225 			pa = kvtophys(vaddr);
    226 		curaddr = pa;
    227 
    228 		/*
    229 		 * Compute the segment size, and adjust counts.
    230 		 */
    231 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
    232 		if (buflen < sgsize)
    233 			sgsize = buflen;
    234 
    235 		/*
    236 		 * Make sure we don't cross any boundaries.
    237 		 */
    238 		if (map->_dm_boundary > 0) {
    239 			baddr = (curaddr + map->_dm_boundary) & bmask;
    240 			if (sgsize > (baddr - curaddr))
    241 				sgsize = (baddr - curaddr);
    242 		}
    243 
    244 		/*
    245 		 * Insert chunk into a segment, coalescing with
    246 		 * the previous segment if possible.
    247 		 */
    248 		if (first) {
    249 			map->dm_segs[seg].ds_addr = curaddr;
    250 			map->dm_segs[seg].ds_len = sgsize;
    251 			map->dm_segs[seg]._ds_vaddr = vaddr;
    252 			first = 0;
    253 		} else {
    254 			if (curaddr == lastaddr &&
    255 			    (map->dm_segs[seg].ds_len + sgsize) <=
    256 			     map->dm_maxsegsz &&
    257 			    (map->_dm_boundary == 0 ||
    258 			     (map->dm_segs[seg].ds_addr & bmask) ==
    259 			     (curaddr & bmask)))
    260 				map->dm_segs[seg].ds_len += sgsize;
    261 			else {
    262 				if (++seg >= map->_dm_segcnt)
    263 					break;
    264 				map->dm_segs[seg].ds_addr = curaddr;
    265 				map->dm_segs[seg].ds_len = sgsize;
    266 				map->dm_segs[seg]._ds_vaddr = vaddr;
    267 			}
    268 		}
    269 
    270 		lastaddr = curaddr + sgsize;
    271 		vaddr += sgsize;
    272 		buflen -= sgsize;
    273 	}
    274 
    275 	*segp = seg;
    276 	*lastaddrp = lastaddr;
    277 
    278 	/*
    279 	 * Did we fit?
    280 	 */
    281 	if (buflen != 0)
    282 		return EFBIG;		/* XXX Better return value here? */
    283 
    284 	return 0;
    285 }
    286 
    287 /*
    288  * Common function for loading a direct-mapped DMA map with a linear
    289  * buffer.
    290  */
    291 int
    292 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    293     bus_size_t buflen, struct proc *p, int flags)
    294 {
    295 	vaddr_t lastaddr;
    296 	int seg, error;
    297 	struct vmspace *vm;
    298 
    299 	/*
    300 	 * Make sure that on error condition we return "no valid mappings".
    301 	 */
    302 	map->dm_mapsize = 0;
    303 	map->dm_nsegs = 0;
    304 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    305 
    306 	if (buflen > map->_dm_size)
    307 		return EINVAL;
    308 
    309 	if (p != NULL) {
    310 		vm = p->p_vmspace;
    311 	} else {
    312 		vm = vmspace_kernel();
    313 	}
    314 
    315 	seg = 0;
    316 	error = _bus_dmamap_load_buffer(map, buf, buflen,
    317 	    vm, flags, &lastaddr, &seg, 1);
    318 	if (error == 0) {
    319 		map->dm_mapsize = buflen;
    320 		map->dm_nsegs = seg + 1;
    321 		map->_dm_vmspace = vm;
    322 
    323 		/*
    324 		 * For linear buffers, we support marking the mapping
    325 		 * as COHERENT.
    326 		 *
    327 		 * XXX Check TLB entries for cache-inhibit bits?
    328 		 */
    329 		if (buf >= (void *)MIPS_KSEG1_START &&
    330 		    buf < (void *)MIPS_KSEG2_START)
    331 			map->_dm_flags |= NEWSMIPS_DMAMAP_COHERENT;
    332 	}
    333 	return error;
    334 }
    335 
    336 /*
    337  * Like _bus_dmamap_load(), but for mbufs.
    338  */
    339 int
    340 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    341     int flags)
    342 {
    343 	vaddr_t lastaddr;
    344 	int seg, error, first;
    345 	struct mbuf *m;
    346 
    347 	/*
    348 	 * Make sure that on error condition we return "no valid mappings."
    349 	 */
    350 	map->dm_mapsize = 0;
    351 	map->dm_nsegs = 0;
    352 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    353 
    354 #ifdef DIAGNOSTIC
    355 	if ((m0->m_flags & M_PKTHDR) == 0)
    356 		panic("_bus_dmamap_load_mbuf: no packet header");
    357 #endif
    358 
    359 	if (m0->m_pkthdr.len > map->_dm_size)
    360 		return EINVAL;
    361 
    362 	first = 1;
    363 	seg = 0;
    364 	error = 0;
    365 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
    366 		if (m->m_len == 0)
    367 			continue;
    368 		error = _bus_dmamap_load_buffer(map, m->m_data, m->m_len,
    369 		    vmspace_kernel(), flags, &lastaddr, &seg, first);
    370 		first = 0;
    371 	}
    372 	if (error == 0) {
    373 		map->dm_mapsize = m0->m_pkthdr.len;
    374 		map->dm_nsegs = seg + 1;
    375 		map->_dm_vmspace = vmspace_kernel();	/* always kernel */
    376 	}
    377 	return error;
    378 }
    379 
    380 /*
    381  * Like _bus_dmamap_load(), but for uios.
    382  */
    383 int
    384 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    385     int flags)
    386 {
    387 	vaddr_t lastaddr;
    388 	int seg, i, error, first;
    389 	bus_size_t minlen, resid;
    390 	struct iovec *iov;
    391 	void *addr;
    392 
    393 	/*
    394 	 * Make sure that on error condition we return "no valid mappings."
    395 	 */
    396 	map->dm_mapsize = 0;
    397 	map->dm_nsegs = 0;
    398 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    399 
    400 	resid = uio->uio_resid;
    401 	iov = uio->uio_iov;
    402 
    403 	first = 1;
    404 	seg = 0;
    405 	error = 0;
    406 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
    407 		/*
    408 		 * Now at the first iovec to load.  Load each iovec
    409 		 * until we have exhausted the residual count.
    410 		 */
    411 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
    412 		addr = (void *)iov[i].iov_base;
    413 
    414 		error = _bus_dmamap_load_buffer(map, addr, minlen,
    415 		    uio->uio_vmspace, flags, &lastaddr, &seg, first);
    416 		first = 0;
    417 
    418 		resid -= minlen;
    419 	}
    420 	if (error == 0) {
    421 		map->dm_mapsize = uio->uio_resid;
    422 		map->dm_nsegs = seg + 1;
    423 		map->_dm_vmspace = uio->uio_vmspace;
    424 	}
    425 	return error;
    426 }
    427 
    428 /*
    429  * Like _bus_dmamap_load(), but for raw memory.
    430  */
    431 int
    432 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,    int nsegs, bus_size_t size, int flags)
    433 {
    434 
    435 	panic("_bus_dmamap_load_raw: not implemented");
    436 }
    437 
    438 /*
    439  * Common function for unloading a DMA map.  May be called by
    440  * chipset-specific DMA map unload functions.
    441  */
    442 void
    443 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
    444 {
    445 
    446 	/*
    447 	 * No resources to free; just mark the mappings as
    448 	 * invalid.
    449 	 */
    450 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
    451 	map->dm_mapsize = 0;
    452 	map->dm_nsegs = 0;
    453 	map->_dm_flags &= ~NEWSMIPS_DMAMAP_COHERENT;
    454 	map->_dm_vmspace = NULL;
    455 }
    456 
    457 #ifdef MIPS1
    458 /*
    459  * Common function for DMA map synchronization.  May be called
    460  * by chipset-specific DMA map synchronization functions.
    461  *
    462  * This is the R3000 version.
    463  */
    464 void
    465 _bus_dmamap_sync_r3k(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    466     bus_size_t len, int ops)
    467 {
    468 	bus_size_t minlen;
    469 	bus_addr_t addr;
    470 	int i;
    471 
    472 	/*
    473 	 * Mixing PRE and POST operations is not allowed.
    474 	 */
    475 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
    476 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
    477 		panic("_bus_dmamap_sync_r3k: mix PRE and POST");
    478 
    479 #ifdef DIAGNOSTIC
    480 	if (offset >= map->dm_mapsize)
    481 		panic("_bus_dmamap_sync_r3k: bad offset %lu (map size is %lu)",
    482 		      offset, map->dm_mapsize);
    483 	if (len == 0 || (offset + len) > map->dm_mapsize)
    484 		panic("_bus_dmamap_sync_r3k: bad length");
    485 #endif
    486 
    487 	/*
    488 	 * The R3000 cache is write-though.  Therefore, we only need
    489 	 * to drain the write buffer on PREWRITE.  The cache is not
    490 	 * coherent, however, so we need to invalidate the data cache
    491 	 * on PREREAD (should we do it POSTREAD instead?).
    492 	 *
    493 	 * POSTWRITE (and POSTREAD, currently) are noops.
    494 	 */
    495 
    496 	if (ops & BUS_DMASYNC_PREWRITE) {
    497 		/*
    498 		 * Flush the write buffer.
    499 		 */
    500 		wbflush();
    501 	}
    502 
    503 	/*
    504 	 * If we're not doing PREREAD, nothing more to do.
    505 	 */
    506 	if ((ops & BUS_DMASYNC_PREREAD) == 0)
    507 		return;
    508 
    509 	/*
    510 	 * No cache invalidation is necessary if the DMA map covers
    511 	 * COHERENT DMA-safe memory (which is mapped un-cached).
    512 	 */
    513 	if (map->_dm_flags & NEWSMIPS_DMAMAP_COHERENT)
    514 		return;
    515 
    516 	/*
    517 	 * If we are going to hit something as large or larger
    518 	 * than the entire data cache, just nail the whole thing.
    519 	 *
    520 	 * NOTE: Even though this is `wbinv_all', since the cache is
    521 	 * write-though, it just invalidates it.
    522 	 */
    523 	if (len >= mips_cache_info.mci_pdcache_size) {
    524 		mips_dcache_wbinv_all();
    525 		return;
    526 	}
    527 
    528 	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
    529 		/* Find the beginning segment. */
    530 		if (offset >= map->dm_segs[i].ds_len) {
    531 			offset -= map->dm_segs[i].ds_len;
    532 			continue;
    533 		}
    534 
    535 		/*
    536 		 * Now at the first segment to sync; nail
    537 		 * each segment until we have exhausted the
    538 		 * length.
    539 		 */
    540 		minlen = len < map->dm_segs[i].ds_len - offset ?
    541 		    len : map->dm_segs[i].ds_len - offset;
    542 
    543 		addr = map->dm_segs[i].ds_addr;
    544 
    545 #ifdef BUS_DMA_DEBUG
    546 		printf("bus_dmamap_sync_r3k: flushing segment %d "
    547 		    "(0x%lx..0x%lx) ...", i, addr + offset,
    548 		    addr + offset + minlen - 1);
    549 #endif
    550 		mips_dcache_inv_range(
    551 		    MIPS_PHYS_TO_KSEG0(addr + offset), minlen);
    552 #ifdef BUS_DMA_DEBUG
    553 		printf("\n");
    554 #endif
    555 		offset = 0;
    556 		len -= minlen;
    557 	}
    558 }
    559 #endif /* MIPS1 */
    560 
    561 #ifdef MIPS3
    562 /*
    563  * Common function for DMA map synchronization.  May be called
    564  * by chipset-specific DMA map synchronization functions.
    565  *
    566  * This is the R4000 version.
    567  */
    568 void
    569 _bus_dmamap_sync_r4k(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    570     bus_size_t len, int ops)
    571 {
    572 	bus_size_t minlen;
    573 	bus_addr_t addr;
    574 	int i, useindex;
    575 
    576 	/*
    577 	 * Mixing PRE and POST operations is not allowed.
    578 	 */
    579 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
    580 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
    581 		panic("_bus_dmamap_sync_r4k: mix PRE and POST");
    582 
    583 #ifdef DIAGNOSTIC
    584 	if (offset >= map->dm_mapsize)
    585 		panic("_bus_dmamap_sync_r4k: bad offset %lu (map size is %lu)",
    586 		      offset, map->dm_mapsize);
    587 	if (len == 0 || (offset + len) > map->dm_mapsize)
    588 		panic("_bus_dmamap_sync_r4k: bad length");
    589 #endif
    590 
    591 	/*
    592 	 * The R4000 cache is virtually-indexed, write-back.  This means
    593 	 * we need to do the following things:
    594 	 *
    595 	 *	PREREAD -- Invalidate D-cache.  Note we might have
    596 	 *	to also write-back here if we have to use an Index
    597 	 *	op, or if the buffer start/end is not cache-line aligned.
    598 	 *
    599 	 *	PREWRITE -- Write-back the D-cache.  If we have to use
    600 	 *	an Index op, we also have to invalidate.  Note that if
    601 	 *	we are doing PREREAD|PREWRITE, we can collapse everything
    602 	 *	into a single op.
    603 	 *
    604 	 *	POSTREAD -- Nothing.
    605 	 *
    606 	 *	POSTWRITE -- Nothing.
    607 	 */
    608 
    609 	/*
    610 	 * Flush the write buffer.
    611 	 * XXX Is this always necessary?
    612 	 */
    613 	wbflush();
    614 
    615 	ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    616 	if (ops == 0)
    617 		return;
    618 
    619 	/*
    620 	 * If the mapping is of COHERENT DMA-safe memory, no cache
    621 	 * flush is necessary.
    622 	 */
    623 	if (map->_dm_flags & NEWSMIPS_DMAMAP_COHERENT)
    624 		return;
    625 
    626 	/*
    627 	 * If the mapping belongs to the kernel, or if it belongs
    628 	 * to the currently-running process (XXX actually, vmspace),
    629 	 * then we can use Hit ops.  Otherwise, Index ops.
    630 	 *
    631 	 * This should be true the vast majority of the time.
    632 	 */
    633 	if (__predict_true(VMSPACE_IS_KERNEL_P(map->_dm_vmspace) ||
    634 	    map->_dm_vmspace == curproc->p_vmspace))
    635 		useindex = 0;
    636 	else
    637 		useindex = 1;
    638 
    639 	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
    640 		/* Find the beginning segment. */
    641 		if (offset >= map->dm_segs[i].ds_len) {
    642 			offset -= map->dm_segs[i].ds_len;
    643 			continue;
    644 		}
    645 
    646 		/*
    647 		 * Now at the first segment to sync; nail
    648 		 * each segment until we have exhausted the
    649 		 * length.
    650 		 */
    651 		minlen = len < map->dm_segs[i].ds_len - offset ?
    652 		    len : map->dm_segs[i].ds_len - offset;
    653 
    654 		addr = map->dm_segs[i]._ds_vaddr;
    655 
    656 #ifdef BUS_DMA_DEBUG
    657 		printf("bus_dmamap_sync: flushing segment %d "
    658 		    "(0x%lx..0x%lx) ...", i, addr + offset,
    659 		    addr + offset + minlen - 1);
    660 #endif
    661 
    662 		/*
    663 		 * If we are forced to use Index ops, it's always a
    664 		 * Write-back,Invalidate, so just do one test.
    665 		 */
    666 		if (__predict_false(useindex)) {
    667 			mips_dcache_wbinv_range_index(addr + offset, minlen);
    668 #ifdef BUS_DMA_DEBUG
    669 			printf("\n");
    670 #endif
    671 			offset = 0;
    672 			len -= minlen;
    673 			continue;
    674 		}
    675 
    676 		switch (ops) {
    677 		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
    678 			mips_dcache_wbinv_range(addr + offset, minlen);
    679 			break;
    680 
    681 		case BUS_DMASYNC_PREREAD:
    682 #if 1
    683 			mips_dcache_wbinv_range(addr + offset, minlen);
    684 #else
    685 			mips_dcache_inv_range(addr + offset, minlen);
    686 #endif
    687 			break;
    688 
    689 		case BUS_DMASYNC_PREWRITE:
    690 			mips_dcache_wb_range(addr + offset, minlen);
    691 			break;
    692 		}
    693 #ifdef BUS_DMA_DEBUG
    694 		printf("\n");
    695 #endif
    696 		offset = 0;
    697 		len -= minlen;
    698 	}
    699 }
    700 #endif /* MIPS3 */
    701 
    702 /*
    703  * Common function for DMA-safe memory allocation.  May be called
    704  * by bus-specific DMA memory allocation functions.
    705  */
    706 int
    707 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    708     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    709     int flags)
    710 {
    711 	return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary,
    712 	    segs, nsegs, rsegs, flags,
    713 	    pmap_limits.avail_start /*low*/,
    714 	    pmap_limits.avail_end - 1 /*high*/));
    715 }
    716 
    717 /*
    718  * Common function for freeing DMA-safe memory.  May be called by
    719  * bus-specific DMA memory free functions.
    720  */
    721 void
    722 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
    723 {
    724 
    725 	_bus_dmamem_free_common(t, segs, nsegs);
    726 }
    727 
    728 /*
    729  * Common function for mapping DMA-safe memory.  May be called by
    730  * bus-specific DMA memory map functions.
    731  */
    732 int
    733 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    734     size_t size, void **kvap, int flags)
    735 {
    736 
    737 	/*
    738 	 * If we're only mapping 1 segment, and the address is lower than
    739 	 * 256MB, use KSEG0 or KSEG1, to avoid TLB thrashing.
    740 	 */
    741 	if (nsegs == 1 && segs[0].ds_addr + segs[0].ds_len <= 0x10000000) {
    742 		if (flags & BUS_DMA_COHERENT)
    743 			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
    744 		else
    745 			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
    746 		return 0;
    747 	}
    748 
    749 	/* XXX BUS_DMA_COHERENT */
    750 	return (_bus_dmamem_map_common(t, segs, nsegs, size, kvap, flags, 0));
    751 }
    752 
    753 /*
    754  * Common function for unmapping DMA-safe memory.  May be called by
    755  * bus-specific DMA memory unmapping functions.
    756  */
    757 void
    758 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
    759 {
    760 
    761 	/*
    762 	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
    763 	 * not in KSEG2).
    764 	 */
    765 	if (kva >= (void *)MIPS_KSEG0_START &&
    766 	    kva < (void *)MIPS_KSEG2_START)
    767 		return;
    768 
    769 	_bus_dmamem_unmap_common(t, kva, size);
    770 }
    771 
    772 /*
    773  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
    774  * bus-specific DMA mmap(2)'ing functions.
    775  */
    776 paddr_t
    777 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    778     off_t off, int prot, int flags)
    779 {
    780 	bus_addr_t rv;
    781 
    782 	rv = _bus_dmamem_mmap_common(t, segs, nsegs, off, prot, flags);
    783 	if (rv == (bus_addr_t)-1)
    784 		return (-1);
    785 
    786 	return (mips_btop((char *)rv));
    787 }
    788