1 /* $NetBSD: bus_dma.c,v 1.9 2022/07/26 20:08:55 andvar Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.9 2022/07/26 20:08:55 andvar Exp $"); 35 36 #include "opt_cputype.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/mbuf.h> 41 #include <sys/proc.h> 42 #include <sys/kmem.h> 43 44 #include <uvm/uvm_extern.h> 45 46 #define _EMIPS_BUS_DMA_PRIVATE 47 #include <machine/bus.h> 48 49 #include <dev/bus_dma/bus_dmamem_common.h> 50 51 #include <mips/cache.h> 52 53 static int _bus_dmamap_load_buffer(bus_dmamap_t, 54 void *, bus_size_t, struct vmspace *, int, vaddr_t *, 55 int *, int); 56 57 paddr_t kvtophys(vaddr_t); /* XXX */ 58 59 /* 60 * The default DMA tag for all busses on the DECstation. 61 */ 62 struct emips_bus_dma_tag emips_default_bus_dma_tag = { 63 _bus_dmamap_create, 64 _bus_dmamap_destroy, 65 _bus_dmamap_load, 66 _bus_dmamap_load_mbuf, 67 _bus_dmamap_load_uio, 68 _bus_dmamap_load_raw, 69 _bus_dmamap_unload, 70 NULL, 71 _bus_dmamem_alloc, 72 _bus_dmamem_free, 73 _bus_dmamem_map, 74 _bus_dmamem_unmap, 75 _bus_dmamem_mmap, 76 }; 77 78 void 79 emips_bus_dma_init(void) 80 { 81 #ifdef MIPS1 82 if (CPUISMIPS3 == 0) 83 emips_default_bus_dma_tag._dmamap_sync = _bus_dmamap_sync_r3k; 84 #endif 85 #ifdef MIPS3 86 if (CPUISMIPS3) 87 emips_default_bus_dma_tag._dmamap_sync = _bus_dmamap_sync_r4k; 88 #endif 89 } 90 91 static size_t 92 _bus_dmamap_mapsize(int const nsegments) 93 { 94 KASSERT(nsegments > 0); 95 return sizeof(struct emips_bus_dmamap) + 96 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 97 } 98 99 /* 100 * Common function for DMA map creation. May be called by bus-specific 101 * DMA map creation functions. 102 */ 103 int 104 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 105 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 106 { 107 struct emips_bus_dmamap *map; 108 void *mapstore; 109 110 /* 111 * Allocate and initialize the DMA map. The end of the map 112 * is a variable-sized array of segments, so we allocate enough 113 * room for them in one shot. 114 * 115 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 116 * of ALLOCNOW notifies others that we've reserved these resources, 117 * and they are not to be freed. 118 * 119 * The bus_dmamap_t includes one bus_dma_segment_t, hence 120 * the (nsegments - 1). 121 */ 122 if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments), 123 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 124 return (ENOMEM); 125 126 map = (struct emips_bus_dmamap *)mapstore; 127 map->_dm_size = size; 128 map->_dm_segcnt = nsegments; 129 map->_dm_maxmaxsegsz = maxsegsz; 130 map->_dm_boundary = boundary; 131 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 132 map->_dm_vmspace = NULL; 133 map->dm_maxsegsz = maxsegsz; 134 map->dm_mapsize = 0; /* no valid mappings */ 135 map->dm_nsegs = 0; 136 137 *dmamp = map; 138 return 0; 139 } 140 141 /* 142 * Common function for DMA map destruction. May be called by bus-specific 143 * DMA map destruction functions. 144 */ 145 void 146 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 147 { 148 149 kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt)); 150 } 151 152 /* 153 * Utility function to load a linear buffer. lastaddrp holds state 154 * between invocations (for multiple-buffer loads). segp contains 155 * the starting segment on entrance, and the ending segment on exit. 156 * first indicates if this is the first invocation of this function. 157 */ 158 static int 159 _bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen, 160 struct vmspace *vm, int flags, vaddr_t *lastaddrp, int *segp, int first) 161 { 162 bus_size_t sgsize; 163 paddr_t pa; 164 bus_addr_t curaddr, lastaddr, baddr, bmask; 165 vaddr_t vaddr = (vaddr_t)buf; 166 int seg; 167 168 lastaddr = *lastaddrp; 169 bmask = ~(map->_dm_boundary - 1); 170 171 for (seg = *segp; buflen > 0 ; ) { 172 /* 173 * Get the physical address for this segment. 174 */ 175 if (!VMSPACE_IS_KERNEL_P(vm)) 176 (void) pmap_extract(vm_map_pmap(&vm->vm_map), 177 vaddr, &pa); 178 else 179 pa = kvtophys(vaddr); 180 curaddr = pa; 181 182 /* 183 * Compute the segment size, and adjust counts. 184 */ 185 sgsize = PAGE_SIZE - (vaddr & PGOFSET); 186 if (buflen < sgsize) 187 sgsize = buflen; 188 189 /* 190 * Make sure we don't cross any boundaries. 191 */ 192 if (map->_dm_boundary > 0) { 193 baddr = (curaddr + map->_dm_boundary) & bmask; 194 if (sgsize > (baddr - curaddr)) 195 sgsize = (baddr - curaddr); 196 } 197 198 /* 199 * Insert chunk into a segment, coalescing with 200 * the previous segment if possible. 201 */ 202 if (first) { 203 map->dm_segs[seg].ds_addr = curaddr; 204 map->dm_segs[seg].ds_len = sgsize; 205 map->dm_segs[seg]._ds_vaddr = vaddr; 206 first = 0; 207 } else { 208 if (curaddr == lastaddr && 209 (map->dm_segs[seg].ds_len + sgsize) <= 210 map->dm_maxsegsz && 211 (map->_dm_boundary == 0 || 212 (map->dm_segs[seg].ds_addr & bmask) == 213 (curaddr & bmask))) 214 map->dm_segs[seg].ds_len += sgsize; 215 else { 216 if (++seg >= map->_dm_segcnt) 217 break; 218 map->dm_segs[seg].ds_addr = curaddr; 219 map->dm_segs[seg].ds_len = sgsize; 220 map->dm_segs[seg]._ds_vaddr = vaddr; 221 } 222 } 223 224 lastaddr = curaddr + sgsize; 225 vaddr += sgsize; 226 buflen -= sgsize; 227 } 228 229 *segp = seg; 230 *lastaddrp = lastaddr; 231 232 /* 233 * Did we fit? 234 */ 235 if (buflen != 0) 236 return EFBIG; /* XXX better return value here? */ 237 238 return 0; 239 } 240 241 /* 242 * Common function for loading a direct-mapped DMA map with a linear 243 * buffer. 244 */ 245 int 246 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 247 bus_size_t buflen, struct proc *p, int flags) 248 { 249 vaddr_t lastaddr; 250 int seg, error; 251 struct vmspace *vm; 252 253 /* 254 * Make sure that on error condition we return "no valid mappings". 255 */ 256 map->dm_mapsize = 0; 257 map->dm_nsegs = 0; 258 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 259 260 if (buflen > map->_dm_size) 261 return EINVAL; 262 263 if (p != NULL) { 264 vm = p->p_vmspace; 265 } else { 266 vm = vmspace_kernel(); 267 } 268 269 seg = 0; 270 error = _bus_dmamap_load_buffer(map, buf, buflen, 271 vm, flags, &lastaddr, &seg, 1); 272 if (error == 0) { 273 map->dm_mapsize = buflen; 274 map->dm_nsegs = seg + 1; 275 map->_dm_vmspace = vm; 276 277 /* 278 * For linear buffers, we support marking the mapping 279 * as COHERENT. 280 * 281 * XXX Check TLB entries for cache-inhibit bits? 282 */ 283 if (buf >= (void *)MIPS_KSEG1_START && 284 buf < (void *)MIPS_KSEG2_START) 285 map->_dm_flags |= EMIPS_DMAMAP_COHERENT; 286 } 287 return error; 288 } 289 290 /* 291 * Like _bus_dmamap_load(), but for mbufs. 292 */ 293 int 294 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, 295 int flags) 296 { 297 vaddr_t lastaddr; 298 int seg, error, first; 299 struct mbuf *m; 300 301 /* 302 * Make sure that on error condition we return "no valid mappings." 303 */ 304 map->dm_mapsize = 0; 305 map->dm_nsegs = 0; 306 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 307 308 #ifdef DIAGNOSTIC 309 if ((m0->m_flags & M_PKTHDR) == 0) 310 panic("_bus_dmamap_load_mbuf: no packet header"); 311 #endif 312 313 if (m0->m_pkthdr.len > map->_dm_size) 314 return (EINVAL); 315 316 first = 1; 317 seg = 0; 318 error = 0; 319 for (m = m0; m != NULL && error == 0; m = m->m_next) { 320 if (m->m_len == 0) 321 continue; 322 error = _bus_dmamap_load_buffer(map, m->m_data, m->m_len, 323 vmspace_kernel(), flags, &lastaddr, &seg, first); 324 first = 0; 325 } 326 if (error == 0) { 327 map->dm_mapsize = m0->m_pkthdr.len; 328 map->dm_nsegs = seg + 1; 329 map->_dm_vmspace = vmspace_kernel(); /* always kernel */ 330 } 331 return error; 332 } 333 334 /* 335 * Like _bus_dmamap_load(), but for uios. 336 */ 337 int 338 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 339 int flags) 340 { 341 vaddr_t lastaddr; 342 int seg, i, error, first; 343 bus_size_t minlen, resid; 344 struct iovec *iov; 345 void *addr; 346 347 /* 348 * Make sure that on error condition we return "no valid mappings." 349 */ 350 map->dm_mapsize = 0; 351 map->dm_nsegs = 0; 352 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 353 354 resid = uio->uio_resid; 355 iov = uio->uio_iov; 356 357 first = 1; 358 seg = 0; 359 error = 0; 360 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 361 /* 362 * Now at the first iovec to load. Load each iovec 363 * until we have exhausted the residual count. 364 */ 365 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 366 addr = iov[i].iov_base; 367 368 error = _bus_dmamap_load_buffer(map, addr, minlen, 369 uio->uio_vmspace, flags, &lastaddr, &seg, first); 370 first = 0; 371 372 resid -= minlen; 373 } 374 if (error == 0) { 375 map->dm_mapsize = uio->uio_resid; 376 map->dm_nsegs = seg + 1; 377 map->_dm_vmspace = uio->uio_vmspace; 378 } 379 return error; 380 } 381 382 /* 383 * Like _bus_dmamap_load(), but for raw memory. 384 */ 385 int 386 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, 387 int nsegs, bus_size_t size, int flags) 388 { 389 390 panic("_bus_dmamap_load_raw: not implemented"); 391 } 392 393 /* 394 * Common function for unloading a DMA map. May be called by 395 * chipset-specific DMA map unload functions. 396 */ 397 void 398 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 399 { 400 401 /* 402 * No resources to free; just mark the mappings as 403 * invalid. 404 */ 405 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 406 map->dm_mapsize = 0; 407 map->dm_nsegs = 0; 408 map->_dm_flags &= ~EMIPS_DMAMAP_COHERENT; 409 map->_dm_vmspace = NULL; 410 } 411 412 #ifdef MIPS1 413 /* 414 * Common function for DMA map synchronization. May be called 415 * by chipset-specific DMA map synchronization functions. 416 * 417 * This is the R3000 version. 418 */ 419 void 420 _bus_dmamap_sync_r3k(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 421 bus_size_t len, int ops) 422 { 423 bus_size_t minlen; 424 bus_addr_t addr; 425 int i; 426 427 /* 428 * Mixing PRE and POST operations is not allowed. 429 */ 430 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 431 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 432 panic("_bus_dmamap_sync_r3k: mix PRE and POST"); 433 434 #ifdef DIAGNOSTIC 435 if (offset >= map->dm_mapsize) 436 panic("_bus_dmamap_sync_r3k: bad offset %lu (map size is %lu)", 437 offset, map->dm_mapsize); 438 if (len == 0 || (offset + len) > map->dm_mapsize) 439 panic("_bus_dmamap_sync_r3k: bad length"); 440 #endif 441 442 /* 443 * The R3000 cache is write-though. Therefore, we only need 444 * to drain the write buffer on PREWRITE. The cache is not 445 * coherent, however, so we need to invalidate the data cache 446 * on PREREAD (should we do it POSTREAD instead?). 447 * 448 * POSTWRITE (and POSTREAD, currently) are noops. 449 */ 450 451 if (ops & BUS_DMASYNC_PREWRITE) { 452 /* 453 * Flush the write buffer. 454 */ 455 wbflush(); 456 } 457 458 /* 459 * If we're not doing PREREAD, nothing more to do. 460 */ 461 if ((ops & BUS_DMASYNC_PREREAD) == 0) 462 return; 463 464 /* 465 * No cache invalidation is necessary if the DMA map covers 466 * COHERENT DMA-safe memory (which is mapped un-cached). 467 */ 468 if (map->_dm_flags & EMIPS_DMAMAP_COHERENT) 469 return; 470 471 /* 472 * If we are going to hit something as large or larger 473 * than the entire data cache, just nail the whole thing. 474 * 475 * NOTE: Even though this is `wbinv_all', since the cache is 476 * write-though, it just invalidates it. 477 */ 478 if (len >= mips_cache_info.mci_pdcache_size) { 479 mips_dcache_wbinv_all(); 480 return; 481 } 482 483 for (i = 0; i < map->dm_nsegs && len != 0; i++) { 484 /* Find the beginning segment. */ 485 if (offset >= map->dm_segs[i].ds_len) { 486 offset -= map->dm_segs[i].ds_len; 487 continue; 488 } 489 490 /* 491 * Now at the first segment to sync; nail 492 * each segment until we have exhausted the 493 * length. 494 */ 495 minlen = len < map->dm_segs[i].ds_len - offset ? 496 len : map->dm_segs[i].ds_len - offset; 497 498 addr = map->dm_segs[i].ds_addr; 499 500 #ifdef BUS_DMA_DEBUG 501 printf("bus_dmamap_sync_r3k: flushing segment %d " 502 "(0x%lx..0x%lx) ...", i, addr + offset, 503 addr + offset + minlen - 1); 504 #endif 505 mips_dcache_inv_range( 506 MIPS_PHYS_TO_KSEG0(addr + offset), minlen); 507 #ifdef BUS_DMA_DEBUG 508 printf("\n"); 509 #endif 510 offset = 0; 511 len -= minlen; 512 } 513 } 514 #endif /* MIPS1 */ 515 516 #ifdef MIPS3 517 /* 518 * Common function for DMA map synchronization. May be called 519 * by chipset-specific DMA map synchronization functions. 520 * 521 * This is the R4000 version. 522 */ 523 void 524 _bus_dmamap_sync_r4k(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 525 bus_size_t len, int ops) 526 { 527 bus_size_t minlen; 528 bus_addr_t addr; 529 int i, useindex; 530 531 /* 532 * Mixing PRE and POST operations is not allowed. 533 */ 534 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 535 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 536 panic("_bus_dmamap_sync_r4k: mix PRE and POST"); 537 538 #ifdef DIAGNOSTIC 539 if (offset >= map->dm_mapsize) 540 panic("_bus_dmamap_sync_r4k: bad offset %lu (map size is %lu)", 541 offset, map->dm_mapsize); 542 if (len == 0 || (offset + len) > map->dm_mapsize) 543 panic("_bus_dmamap_sync_r4k: bad length"); 544 #endif 545 546 /* 547 * The R4000 cache is virtually-indexed, write-back. This means 548 * we need to do the following things: 549 * 550 * PREREAD -- Invalidate D-cache. Note we might have 551 * to also write-back here if we have to use an Index 552 * op, or if the buffer start/end is not cache-line aligned. 553 * 554 * PREWRITE -- Write-back the D-cache. If we have to use 555 * an Index op, we also have to invalidate. Note that if 556 * we are doing PREREAD|PREWRITE, we can collapse everything 557 * into a single op. 558 * 559 * POSTREAD -- Nothing. 560 * 561 * POSTWRITE -- Nothing. 562 */ 563 564 /* 565 * Flush the write buffer. 566 * XXX Is this always necessary? 567 */ 568 wbflush(); 569 570 ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 571 if (ops == 0) 572 return; 573 574 /* 575 * If the mapping is of COHERENT DMA-safe memory, no cache 576 * flush is necessary. 577 */ 578 if (map->_dm_flags & EMIPS_DMAMAP_COHERENT) 579 return; 580 581 /* 582 * If the mapping belongs to the kernel, or if it belongs 583 * to the currently-running process (XXX actually, vmspace), 584 * then we can use Hit ops. Otherwise, Index ops. 585 * 586 * This should be true the vast majority of the time. 587 */ 588 if (__predict_true(VMSPACE_IS_KERNEL_P(map->_dm_vmspace) || 589 map->_dm_vmspace == curproc->p_vmspace)) 590 useindex = 0; 591 else 592 useindex = 1; 593 594 for (i = 0; i < map->dm_nsegs && len != 0; i++) { 595 /* Find the beginning segment. */ 596 if (offset >= map->dm_segs[i].ds_len) { 597 offset -= map->dm_segs[i].ds_len; 598 continue; 599 } 600 601 /* 602 * Now at the first segment to sync; nail 603 * each segment until we have exhausted the 604 * length. 605 */ 606 minlen = len < map->dm_segs[i].ds_len - offset ? 607 len : map->dm_segs[i].ds_len - offset; 608 609 addr = map->dm_segs[i]._ds_vaddr; 610 611 #ifdef BUS_DMA_DEBUG 612 printf("bus_dmamap_sync: flushing segment %d " 613 "(0x%lx..0x%lx) ...", i, addr + offset, 614 addr + offset + minlen - 1); 615 #endif 616 617 /* 618 * If we are forced to use Index ops, it's always a 619 * Write-back,Invalidate, so just do one test. 620 */ 621 if (__predict_false(useindex)) { 622 mips_dcache_wbinv_range_index(addr + offset, minlen); 623 #ifdef BUS_DMA_DEBUG 624 printf("\n"); 625 #endif 626 offset = 0; 627 len -= minlen; 628 continue; 629 } 630 631 switch (ops) { 632 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 633 mips_dcache_wbinv_range(addr + offset, minlen); 634 break; 635 636 case BUS_DMASYNC_PREREAD: 637 #if 1 638 mips_dcache_wbinv_range(addr + offset, minlen); 639 #else 640 mips_dcache_inv_range(addr + offset, minlen); 641 #endif 642 break; 643 644 case BUS_DMASYNC_PREWRITE: 645 mips_dcache_wb_range(addr + offset, minlen); 646 break; 647 } 648 #ifdef BUS_DMA_DEBUG 649 printf("\n"); 650 #endif 651 offset = 0; 652 len -= minlen; 653 } 654 } 655 #endif /* MIPS3 */ 656 657 /* 658 * Common function for DMA-safe memory allocation. May be called 659 * by bus-specific DMA memory allocation functions. 660 */ 661 int 662 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 663 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 664 int flags) 665 { 666 return _bus_dmamem_alloc_range_common(t, size, alignment, boundary, 667 segs, nsegs, rsegs, flags, 668 pmap_limits.avail_start /*low*/, 669 pmap_limits.avail_end - 1 /*high*/); 670 } 671 672 /* 673 * Common function for freeing DMA-safe memory. May be called by 674 * bus-specific DMA memory free functions. 675 */ 676 void 677 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 678 { 679 680 _bus_dmamem_free_common(t, segs, nsegs); 681 } 682 683 /* 684 * Common function for mapping DMA-safe memory. May be called by 685 * bus-specific DMA memory map functions. 686 */ 687 int 688 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 689 size_t size, void **kvap, int flags) 690 { 691 692 /* 693 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid 694 * TLB thrashing. 695 */ 696 if (nsegs == 1) { 697 if (flags & BUS_DMA_COHERENT) 698 *kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr); 699 else 700 *kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr); 701 return 0; 702 } 703 704 return (_bus_dmamem_map_common(t, segs, nsegs, size, kvap, flags, 0)); 705 } 706 707 /* 708 * Common function for unmapping DMA-safe memory. May be called by 709 * bus-specific DMA memory unmapping functions. 710 */ 711 void 712 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 713 { 714 715 /* 716 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e. 717 * not in KSEG2). 718 */ 719 if (kva >= (void *)MIPS_KSEG0_START && 720 kva < (void *)MIPS_KSEG2_START) 721 return; 722 723 _bus_dmamem_unmap_common(t, kva, size); 724 } 725 726 /* 727 * Common function for mmap(2)'ing DMA-safe memory. May be called by 728 * bus-specific DMA mmap(2)'ing functions. 729 */ 730 paddr_t 731 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off, 732 int prot, int flags) 733 { 734 bus_addr_t rv; 735 736 rv = _bus_dmamem_mmap_common(t, segs, nsegs, off, prot, flags); 737 if (rv == (bus_addr_t)-1) 738 return (-1); 739 740 return (mips_btop((char *)rv)); 741 } 742