Home | History | Annotate | Line # | Download | only in hpcmips
      1 /*	$NetBSD: bus_dma.c,v 1.41 2022/07/26 20:08:55 andvar Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.41 2022/07/26 20:08:55 andvar Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/mbuf.h>
     39 #include <sys/proc.h>
     40 #include <sys/kmem.h>
     41 
     42 #include <uvm/uvm_extern.h>
     43 #include <mips/cache.h>
     44 #include <mips/locore.h>
     45 
     46 #include <machine/bus.h>
     47 #include <machine/bus_dma_hpcmips.h>
     48 
     49 #include <dev/bus_dma/bus_dmamem_common.h>
     50 
     51 static int _hpcmips_bd_map_load_buffer(bus_dmamap_t, void *, bus_size_t,
     52     struct vmspace *, int, vaddr_t *, int *, int);
     53 
     54 paddr_t	kvtophys(vaddr_t);	/* XXX */
     55 
     56 /*
     57  * The default DMA tag for all busses on the hpcmips
     58  */
     59 struct bus_dma_tag_hpcmips hpcmips_default_bus_dma_tag = {
     60 	{
     61 		NULL,
     62 		{
     63 			_hpcmips_bd_map_create,
     64 			_hpcmips_bd_map_destroy,
     65 			_hpcmips_bd_map_load,
     66 			_hpcmips_bd_map_load_mbuf,
     67 			_hpcmips_bd_map_load_uio,
     68 			_hpcmips_bd_map_load_raw,
     69 			_hpcmips_bd_map_unload,
     70 			_hpcmips_bd_map_sync,
     71 			_hpcmips_bd_mem_alloc,
     72 			_hpcmips_bd_mem_free,
     73 			_hpcmips_bd_mem_map,
     74 			_hpcmips_bd_mem_unmap,
     75 			_hpcmips_bd_mem_mmap,
     76 		},
     77 	},
     78 	NULL,
     79 };
     80 
     81 static size_t
     82 _bus_dmamap_mapsize(int const nsegments)
     83 {
     84 	KASSERT(nsegments > 0);
     85 	return sizeof(struct bus_dmamap_hpcmips) +
     86 	    sizeof(struct bus_dma_segment_hpcmips) * (nsegments - 1) +
     87 	    sizeof(bus_dma_segment_t) * nsegments;
     88 }
     89 
     90 /*
     91  * Common function for DMA map creation.  May be called by bus-specific
     92  * DMA map creation functions.
     93  */
     94 int
     95 _hpcmips_bd_map_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
     96     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
     97 {
     98 	struct bus_dmamap_hpcmips *map;
     99 	void *mapstore;
    100 
    101 	/*
    102 	 * Allocate and initialize the DMA map.  The end of the map
    103 	 * has two variable-sized array of segments, so we allocate enough
    104 	 * room for them in one shot.
    105 	 *
    106 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
    107 	 * of ALLOCNOW notifies others that we've reserved these resources,
    108 	 * and they are not to be freed.
    109 	 */
    110 	if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
    111 	    (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
    112 		return (ENOMEM);
    113 
    114 	map = (struct bus_dmamap_hpcmips *)mapstore;
    115 	map->_dm_size = size;
    116 	map->_dm_segcnt = nsegments;
    117 	map->_dm_maxmaxsegsz = maxsegsz;
    118 	map->_dm_boundary = boundary;
    119 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    120 	map->bdm.dm_maxsegsz = maxsegsz;
    121 	map->bdm.dm_mapsize = 0;	/* no valid mappings */
    122 	map->bdm.dm_nsegs = 0;
    123 	map->bdm.dm_segs = (bus_dma_segment_t *)((char *)mapstore +
    124 	    sizeof(struct bus_dmamap_hpcmips) +
    125 	    sizeof(struct bus_dma_segment_hpcmips) * (nsegments - 1));
    126 
    127 	*dmamp = &map->bdm;
    128 	return (0);
    129 }
    130 
    131 /*
    132  * Common function for DMA map destruction.  May be called by bus-specific
    133  * DMA map destruction functions.
    134  */
    135 void
    136 _hpcmips_bd_map_destroy(bus_dma_tag_t t, bus_dmamap_t bdm)
    137 {
    138 	struct bus_dmamap_hpcmips *map =
    139 	    container_of(bdm, struct bus_dmamap_hpcmips, bdm);
    140 
    141 	kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
    142 }
    143 
    144 /*
    145  * Utility function to load a linear buffer.  lastaddrp holds state
    146  * between invocations (for multiple-buffer loads).  segp contains
    147  * the starting segment on entrance, and the ending segment on exit.
    148  * first indicates if this is the first invocation of this function.
    149  */
    150 static int
    151 _hpcmips_bd_map_load_buffer(bus_dmamap_t mapx, void *buf, bus_size_t buflen,
    152     struct vmspace *vm, int flags, vaddr_t *lastaddrp, int *segp, int first)
    153 {
    154 	struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx;
    155 	bus_size_t sgsize;
    156 	bus_addr_t curaddr, lastaddr, baddr, bmask;
    157 	vaddr_t vaddr = (vaddr_t)buf;
    158 	paddr_t pa;
    159 	int seg;
    160 
    161 	lastaddr = *lastaddrp;
    162 	bmask  = ~(map->_dm_boundary - 1);
    163 
    164 	for (seg = *segp; buflen > 0 ; ) {
    165 		/*
    166 		 * Get the physical address for this segment.
    167 		 */
    168 		if (!VMSPACE_IS_KERNEL_P(vm))
    169 			(void) pmap_extract(vm_map_pmap(&vm->vm_map),
    170 			    vaddr, &pa);
    171 		else
    172 			pa = kvtophys(vaddr);
    173 		curaddr = pa;
    174 
    175 		/*
    176 		 * Compute the segment size, and adjust counts.
    177 		 */
    178 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
    179 		if (buflen < sgsize)
    180 			sgsize = buflen;
    181 
    182 		/*
    183 		 * Make sure we don't cross any boundaries.
    184 		 */
    185 		if (map->_dm_boundary > 0) {
    186 			baddr = (curaddr + map->_dm_boundary) & bmask;
    187 			if (sgsize > (baddr - curaddr))
    188 				sgsize = (baddr - curaddr);
    189 		}
    190 
    191 		/*
    192 		 * Insert chunk into a segment, coalescing with
    193 		 * the previous segment if possible.
    194 		 */
    195 		if (first) {
    196 			map->bdm.dm_segs[seg].ds_addr = curaddr;
    197 			map->bdm.dm_segs[seg].ds_len = sgsize;
    198 			map->_dm_segs[seg]._ds_vaddr = vaddr;
    199 			first = 0;
    200 		} else {
    201 			if (curaddr == lastaddr &&
    202 			    (map->bdm.dm_segs[seg].ds_len + sgsize) <=
    203 			    map->bdm.dm_maxsegsz &&
    204 			    (map->_dm_boundary == 0 ||
    205 				(map->bdm.dm_segs[seg].ds_addr & bmask) ==
    206 				(curaddr & bmask)))
    207 				map->bdm.dm_segs[seg].ds_len += sgsize;
    208 			else {
    209 				if (++seg >= map->_dm_segcnt)
    210 					break;
    211 				map->bdm.dm_segs[seg].ds_addr = curaddr;
    212 				map->bdm.dm_segs[seg].ds_len = sgsize;
    213 				map->_dm_segs[seg]._ds_vaddr = vaddr;
    214 			}
    215 		}
    216 
    217 		lastaddr = curaddr + sgsize;
    218 		vaddr += sgsize;
    219 		buflen -= sgsize;
    220 	}
    221 
    222 	*segp = seg;
    223 	*lastaddrp = lastaddr;
    224 
    225 	/*
    226 	 * Did we fit?
    227 	 */
    228 	if (buflen != 0)
    229 		return (EFBIG);		/* XXX better return value here? */
    230 
    231 	return (0);
    232 }
    233 
    234 /*
    235  * Common function for loading a direct-mapped DMA map with a linear
    236  * buffer.
    237  */
    238 int
    239 _hpcmips_bd_map_load(bus_dma_tag_t t, bus_dmamap_t mapx, void *buf,
    240     bus_size_t buflen, struct proc *p, int flags)
    241 {
    242 	struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx;
    243 	vaddr_t lastaddr;
    244 	int seg, error;
    245 	struct vmspace *vm;
    246 
    247 	/*
    248 	 * Make sure that on error condition we return "no valid mappings".
    249 	 */
    250 	map->bdm.dm_mapsize = 0;
    251 	map->bdm.dm_nsegs = 0;
    252 	KASSERT(map->bdm.dm_maxsegsz <= map->_dm_maxmaxsegsz);
    253 
    254 	if (buflen > map->_dm_size)
    255 		return (EINVAL);
    256 
    257 	if (p != NULL) {
    258 		vm = p->p_vmspace;
    259 	} else {
    260 		vm = vmspace_kernel();
    261 	}
    262 
    263 	seg = 0;
    264 	error = _hpcmips_bd_map_load_buffer(mapx, buf, buflen,
    265 	    vm, flags, &lastaddr, &seg, 1);
    266 	if (error == 0) {
    267 		map->bdm.dm_mapsize = buflen;
    268 		map->bdm.dm_nsegs = seg + 1;
    269 
    270 		/*
    271 		 * For linear buffers, we support marking the mapping
    272 		 * as COHERENT.
    273 		 *
    274 		 * XXX Check TLB entries for cache-inhibit bits?
    275 		 */
    276 		if (buf >= (void *)MIPS_KSEG1_START &&
    277 		    buf < (void *)MIPS_KSEG2_START)
    278 			map->_dm_flags |= HPCMIPS_DMAMAP_COHERENT;
    279 	}
    280 	return (error);
    281 }
    282 
    283 /*
    284  * Like _hpcmips_bd_map_load(), but for mbufs.
    285  */
    286 int
    287 _hpcmips_bd_map_load_mbuf(bus_dma_tag_t t, bus_dmamap_t mapx, struct mbuf *m0,
    288     int flags)
    289 {
    290 	struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx;
    291 	vaddr_t lastaddr;
    292 	int seg, error, first;
    293 	struct mbuf *m;
    294 
    295 	/*
    296 	 * Make sure that on error condition we return "no valid mappings."
    297 	 */
    298 	map->bdm.dm_mapsize = 0;
    299 	map->bdm.dm_nsegs = 0;
    300 	KASSERT(map->bdm.dm_maxsegsz <= map->_dm_maxmaxsegsz);
    301 
    302 #ifdef DIAGNOSTIC
    303 	if ((m0->m_flags & M_PKTHDR) == 0)
    304 		panic("_hpcmips_bd_map_load_mbuf: no packet header");
    305 #endif
    306 
    307 	if (m0->m_pkthdr.len > map->_dm_size)
    308 		return (EINVAL);
    309 
    310 	first = 1;
    311 	seg = 0;
    312 	error = 0;
    313 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
    314 		if (m->m_len == 0)
    315 			continue;
    316 		error = _hpcmips_bd_map_load_buffer(mapx, m->m_data, m->m_len,
    317 		    vmspace_kernel(), flags, &lastaddr, &seg, first);
    318 		first = 0;
    319 	}
    320 	if (error == 0) {
    321 		map->bdm.dm_mapsize = m0->m_pkthdr.len;
    322 		map->bdm.dm_nsegs = seg + 1;
    323 	}
    324 	return (error);
    325 }
    326 
    327 /*
    328  * Like _hpcmips_bd_map_load(), but for uios.
    329  */
    330 int
    331 _hpcmips_bd_map_load_uio(bus_dma_tag_t t, bus_dmamap_t mapx, struct uio *uio,
    332     int flags)
    333 {
    334 	struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx;
    335 	vaddr_t lastaddr;
    336 	int seg, i, error, first;
    337 	bus_size_t minlen, resid;
    338 	struct iovec *iov;
    339 	void *addr;
    340 
    341 	/*
    342 	 * Make sure that on error condition we return "no valid mappings."
    343 	 */
    344 	map->bdm.dm_mapsize = 0;
    345 	map->bdm.dm_nsegs = 0;
    346 	KASSERT(map->bdm.dm_maxsegsz <= map->_dm_maxmaxsegsz);
    347 
    348 	resid = uio->uio_resid;
    349 	iov = uio->uio_iov;
    350 
    351 	first = 1;
    352 	seg = 0;
    353 	error = 0;
    354 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
    355 		/*
    356 		 * Now at the first iovec to load.  Load each iovec
    357 		 * until we have exhausted the residual count.
    358 		 */
    359 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
    360 		addr = (void *)iov[i].iov_base;
    361 
    362 		error = _hpcmips_bd_map_load_buffer(mapx, addr, minlen,
    363 		    uio->uio_vmspace, flags, &lastaddr, &seg, first);
    364 		first = 0;
    365 
    366 		resid -= minlen;
    367 	}
    368 	if (error == 0) {
    369 		map->bdm.dm_mapsize = uio->uio_resid;
    370 		map->bdm.dm_nsegs = seg + 1;
    371 	}
    372 	return (error);
    373 }
    374 
    375 /*
    376  * Like _hpcmips_bd_map_load(), but for raw memory.
    377  */
    378 int
    379 _hpcmips_bd_map_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    380     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
    381 {
    382 
    383 	panic("_hpcmips_bd_map_load_raw: not implemented");
    384 }
    385 
    386 /*
    387  * Common function for unloading a DMA map.  May be called by
    388  * chipset-specific DMA map unload functions.
    389  */
    390 void
    391 _hpcmips_bd_map_unload(bus_dma_tag_t t, bus_dmamap_t mapx)
    392 {
    393 	struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx;
    394 
    395 	/*
    396 	 * No resources to free; just mark the mappings as
    397 	 * invalid.
    398 	 */
    399 	map->bdm.dm_maxsegsz = map->_dm_maxmaxsegsz;
    400 	map->bdm.dm_mapsize = 0;
    401 	map->bdm.dm_nsegs = 0;
    402 	map->_dm_flags &= ~HPCMIPS_DMAMAP_COHERENT;
    403 }
    404 
    405 /*
    406  * Common function for DMA map synchronization.  May be called
    407  * by chipset-specific DMA map synchronization functions.
    408  */
    409 void
    410 _hpcmips_bd_map_sync(bus_dma_tag_t t, bus_dmamap_t mapx, bus_addr_t offset,
    411     bus_size_t len, int ops)
    412 {
    413 	struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx;
    414 	bus_size_t minlen;
    415 	bus_addr_t addr;
    416 	int i;
    417 
    418 	/*
    419 	 * Mixing PRE and POST operations is not allowed.
    420 	 */
    421 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
    422 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
    423 		panic("_hpcmips_bd_map_sync: mix PRE and POST");
    424 
    425 #ifdef DIAGNOSTIC
    426 	if (offset >= map->bdm.dm_mapsize)
    427 		panic("_hpcmips_bd_map_sync: bad offset %lu (map size is %lu)",
    428 		    offset, map->bdm.dm_mapsize);
    429 	if (len == 0 || (offset + len) > map->bdm.dm_mapsize)
    430 		panic("_hpcmips_bd_map_sync: bad length");
    431 #endif
    432 
    433 	/*
    434 	 * Flush the write buffer.
    435 	 */
    436 	wbflush();
    437 
    438 	/*
    439 	 * If the mapping is of COHERENT DMA-safe memory, no cache
    440 	 * flush is necessary.
    441 	 */
    442 	if (map->_dm_flags & HPCMIPS_DMAMAP_COHERENT)
    443 		return;
    444 
    445 	/*
    446 	 * No cache flushes are necessary if we're only doing
    447 	 * POSTREAD or POSTWRITE (i.e. not doing PREREAD or PREWRITE).
    448 	 */
    449 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0)
    450 		return;
    451 
    452 	/*
    453 	 * Flush data cache for PREREAD.  This has the side-effect
    454 	 * of invalidating the cache.  Done at PREREAD since it
    455 	 * causes the cache line(s) to be written back to memory.
    456 	 *
    457 	 * Flush data cache for PREWRITE, so that the contents of
    458 	 * the data buffer in memory reflect reality.
    459 	 *
    460 	 * Given the test above, we know we're doing one of these
    461 	 * two operations, so no additional tests are necessary.
    462 	 */
    463 
    464 	/*
    465 	 * The R2000 and R3000 have a physically indexed
    466 	 * cache.  Loop through the DMA segments, looking
    467 	 * for the appropriate offset, and flush the D-cache
    468 	 * at that physical address.
    469 	 *
    470 	 * The R4000 has a virtually indexed primary data cache.  We
    471 	 * do the same loop, instead using the virtual address stashed
    472 	 * away in the segments when the map was loaded.
    473 	 */
    474 	for (i = 0; i < map->bdm.dm_nsegs && len != 0; i++) {
    475 		/* Find the beginning segment. */
    476 		if (offset >= map->bdm.dm_segs[i].ds_len) {
    477 			offset -= map->bdm.dm_segs[i].ds_len;
    478 			continue;
    479 		}
    480 
    481 		/*
    482 		 * Now at the first segment to sync; nail
    483 		 * each segment until we have exhausted the
    484 		 * length.
    485 		 */
    486 		minlen = len < map->bdm.dm_segs[i].ds_len - offset ?
    487 		    len : map->bdm.dm_segs[i].ds_len - offset;
    488 
    489 		if (CPUISMIPS3)
    490 			addr = map->_dm_segs[i]._ds_vaddr;
    491 		else
    492 			addr = map->bdm.dm_segs[i].ds_addr;
    493 
    494 #ifdef BUS_DMA_DEBUG
    495 		printf("_hpcmips_bd_map_sync: flushing segment %d "
    496 		    "(0x%lx..0x%lx) ...", i, addr + offset,
    497 		    addr + offset + minlen - 1);
    498 #endif
    499 		if (CPUISMIPS3)
    500 			mips_dcache_wbinv_range(addr + offset, minlen);
    501 		else {
    502 			/*
    503 			 * We can't have a TLB miss; use KSEG0.
    504 			 */
    505 			mips_dcache_wbinv_range(
    506 				MIPS_PHYS_TO_KSEG0(map->bdm.dm_segs[i].ds_addr
    507 				    + offset),
    508 				minlen);
    509 		}
    510 #ifdef BUS_DMA_DEBUG
    511 		printf("\n");
    512 #endif
    513 		offset = 0;
    514 		len -= minlen;
    515 	}
    516 }
    517 
    518 /*
    519  * Common function for DMA-safe memory allocation.  May be called
    520  * by bus-specific DMA memory allocation functions.
    521  */
    522 int
    523 _hpcmips_bd_mem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    524     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    525     int flags)
    526 {
    527 	paddr_t high = pmap_limits.avail_end - PAGE_SIZE;
    528 
    529 	return (_hpcmips_bd_mem_alloc_range(t, size, alignment, boundary,
    530 	    segs, nsegs, rsegs, flags, pmap_limits.avail_start, high));
    531 }
    532 
    533 /*
    534  * Allocate physical memory from the given physical address range.
    535  * Called by DMA-safe memory allocation methods.
    536  */
    537 int
    538 _hpcmips_bd_mem_alloc_range(bus_dma_tag_t t, bus_size_t size,
    539     bus_size_t alignment, bus_size_t boundary,
    540     bus_dma_segment_t *segs, int nsegs, int *rsegs,
    541     int flags, paddr_t low, paddr_t high)
    542 {
    543 #ifdef DIAGNOSTIC
    544 
    545 	high = high<(pmap_limits.avail_end - PAGE_SIZE)? high: (pmap_limits.avail_end - PAGE_SIZE);
    546 	low = low>pmap_limits.avail_start? low: pmap_limits.avail_start;
    547 #endif
    548 
    549 	return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary,
    550 					       segs, nsegs, rsegs, flags,
    551 					       low, high));
    552 }
    553 
    554 /*
    555  * Common function for freeing DMA-safe memory.  May be called by
    556  * bus-specific DMA memory free functions.
    557  */
    558 void
    559 _hpcmips_bd_mem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
    560 {
    561 
    562 	_bus_dmamem_free_common(t, segs, nsegs);
    563 }
    564 
    565 /*
    566  * Common function for mapping DMA-safe memory.  May be called by
    567  * bus-specific DMA memory map functions.
    568  */
    569 int
    570 _hpcmips_bd_mem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    571     size_t size, void **kvap, int flags)
    572 {
    573 
    574 	/*
    575 	 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid
    576 	 * TLB thrashing.
    577 	 */
    578 	if (nsegs == 1) {
    579 		if (flags & BUS_DMA_COHERENT)
    580 			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
    581 		else
    582 			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
    583 		return (0);
    584 	}
    585 
    586 	/* XXX BUS_DMA_COHERENT */
    587 	return (_bus_dmamem_map_common(t, segs, nsegs, size, kvap, flags, 0));
    588 }
    589 
    590 /*
    591  * Common function for unmapping DMA-safe memory.  May be called by
    592  * bus-specific DMA memory unmapping functions.
    593  */
    594 void
    595 _hpcmips_bd_mem_unmap(bus_dma_tag_t t, void *kva, size_t size)
    596 {
    597 
    598 	/*
    599 	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
    600 	 * not in KSEG2).
    601 	 */
    602 	if (kva >= (void *)MIPS_KSEG0_START &&
    603 	    kva < (void *)MIPS_KSEG2_START)
    604 		return;
    605 
    606 	_bus_dmamem_unmap_common(t, kva, size);
    607 }
    608 
    609 /*
    610  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
    611  * bus-specific DMA mmap(2)'ing functions.
    612  */
    613 paddr_t
    614 _hpcmips_bd_mem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    615     off_t off, int prot, int flags)
    616 {
    617 	bus_addr_t rv;
    618 
    619 	rv = _bus_dmamem_mmap_common(t, segs, nsegs, off, prot, flags);
    620 	if (rv == (bus_addr_t)-1)
    621 		return (-1);
    622 
    623 	return (mips_btop((char *)rv));
    624 }
    625