1 /* $NetBSD: bus_dma.c,v 1.32 2025/04/26 07:33:13 tsutsui Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.32 2025/04/26 07:33:13 tsutsui Exp $"); 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/mbuf.h> 39 #include <sys/device.h> 40 #include <sys/proc.h> 41 #include <sys/kmem.h> 42 43 #include <uvm/uvm_extern.h> 44 45 #define _MIPSCO_BUS_DMA_PRIVATE 46 #include <machine/bus.h> 47 48 #include <dev/bus_dma/bus_dmamem_common.h> 49 50 #include <mips/cache.h> 51 52 paddr_t kvtophys(vaddr_t); /* XXX */ 53 54 static int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, 55 void *, bus_size_t, struct vmspace *, int, paddr_t *, 56 int *, int); 57 58 void 59 _bus_dma_tag_init(bus_dma_tag_t t) 60 { 61 t->_dmamap_create = _bus_dmamap_create; 62 t->_dmamap_destroy = _bus_dmamap_destroy; 63 t->_dmamap_load = _bus_dmamap_load; 64 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf; 65 t->_dmamap_load_uio = _bus_dmamap_load_uio; 66 t->_dmamap_load_raw = _bus_dmamap_load_raw; 67 t->_dmamap_unload = _bus_dmamap_unload; 68 t->_dmamap_sync = _bus_dmamap_sync; 69 t->_dmamem_alloc = _bus_dmamem_alloc; 70 t->_dmamem_free = _bus_dmamem_free; 71 t->_dmamem_map = _bus_dmamem_map; 72 t->_dmamem_unmap = _bus_dmamem_unmap; 73 t->_dmamem_mmap = _bus_dmamem_mmap; 74 } 75 76 static size_t 77 _bus_dmamap_mapsize(int const nsegments) 78 { 79 KASSERT(nsegments > 0); 80 return sizeof(struct mipsco_bus_dmamap) + 81 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 82 } 83 84 /* 85 * Common function for DMA map creation. May be called by bus-specific 86 * DMA map creation functions. 87 */ 88 int 89 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 90 { 91 struct mipsco_bus_dmamap *map; 92 void *mapstore; 93 94 /* 95 * Allocate and initialize the DMA map. The end of the map 96 * is a variable-sized array of segments, so we allocate enough 97 * room for them in one shot. 98 * 99 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 100 * of ALLOCNOW notifies others that we've reserved these resources, 101 * and they are not to be freed. 102 * 103 * The bus_dmamap_t includes one bus_dma_segment_t, hence 104 * the (nsegments - 1). 105 */ 106 if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments), 107 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 108 return (ENOMEM); 109 110 map = (struct mipsco_bus_dmamap *)mapstore; 111 map->_dm_size = size; 112 map->_dm_segcnt = nsegments; 113 map->_dm_maxmaxsegsz = maxsegsz; 114 map->_dm_boundary = boundary; 115 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 116 map->dm_maxsegsz = maxsegsz; 117 map->dm_mapsize = 0; /* no valid mappings */ 118 map->dm_nsegs = 0; 119 120 *dmamp = map; 121 return (0); 122 } 123 124 /* 125 * Common function for DMA map destruction. May be called by bus-specific 126 * DMA map destruction functions. 127 */ 128 void 129 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 130 { 131 132 kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt)); 133 } 134 135 /* 136 * Utility function to load a linear buffer. lastaddrp holds state 137 * between invocations (for multiple-buffer loads). segp contains 138 * the starting segment on entrance, and the ending segment on exit. 139 * first indicates if this is the first invocation of this function. 140 */ 141 static int 142 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp, int *segp, int first) 143 { 144 bus_size_t sgsize; 145 bus_addr_t baddr, bmask; 146 paddr_t curaddr, lastaddr; 147 vaddr_t vaddr = (vaddr_t)buf; 148 int seg; 149 150 lastaddr = *lastaddrp; 151 bmask = ~(map->_dm_boundary - 1); 152 153 for (seg = *segp; buflen > 0 ; ) { 154 /* 155 * Get the physical address for this segment. 156 */ 157 if (!VMSPACE_IS_KERNEL_P(vm)) 158 (void) pmap_extract(vm_map_pmap(&vm->vm_map), 159 vaddr, &curaddr); 160 else 161 curaddr = kvtophys(vaddr); 162 163 /* 164 * Compute the segment size, and adjust counts. 165 */ 166 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 167 if (buflen < sgsize) 168 sgsize = buflen; 169 170 /* 171 * Make sure we don't cross any boundaries. 172 */ 173 if (map->_dm_boundary > 0) { 174 baddr = (curaddr + map->_dm_boundary) & bmask; 175 if (sgsize > (baddr - curaddr)) 176 sgsize = (baddr - curaddr); 177 } 178 179 /* 180 * Insert chunk into a segment, coalescing with 181 * the previous segment if possible. 182 */ 183 if (first) { 184 map->dm_segs[seg].ds_addr = curaddr; 185 map->dm_segs[seg].ds_len = sgsize; 186 first = 0; 187 } else { 188 if (curaddr == lastaddr && 189 (map->dm_segs[seg].ds_len + sgsize) <= 190 map->dm_maxsegsz && 191 (map->_dm_boundary == 0 || 192 (map->dm_segs[seg].ds_addr & bmask) == 193 (curaddr & bmask))) 194 map->dm_segs[seg].ds_len += sgsize; 195 else { 196 if (++seg >= map->_dm_segcnt) 197 break; 198 map->dm_segs[seg].ds_addr = curaddr; 199 map->dm_segs[seg].ds_len = sgsize; 200 } 201 } 202 203 lastaddr = curaddr + sgsize; 204 vaddr += sgsize; 205 buflen -= sgsize; 206 } 207 208 *segp = seg; 209 *lastaddrp = lastaddr; 210 211 /* 212 * Did we fit? 213 */ 214 if (buflen != 0) 215 return (EFBIG); /* XXX better return value here? */ 216 217 return (0); 218 } 219 220 /* 221 * Common function for loading a direct-mapped DMA map with a linear 222 * buffer. 223 */ 224 int 225 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags) 226 { 227 paddr_t lastaddr; 228 int seg, error; 229 struct vmspace *vm; 230 231 /* 232 * Make sure that on error condition we return "no valid mappings". 233 */ 234 map->dm_mapsize = 0; 235 map->dm_nsegs = 0; 236 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 237 238 if (buflen > map->_dm_size) 239 return (EINVAL); 240 241 if (p != NULL) { 242 vm = p->p_vmspace; 243 } else { 244 vm = vmspace_kernel(); 245 } 246 247 seg = 0; 248 error = _bus_dmamap_load_buffer(t, map, buf, buflen, 249 vm, flags, &lastaddr, &seg, 1); 250 if (error == 0) { 251 map->dm_mapsize = buflen; 252 map->dm_nsegs = seg + 1; 253 254 /* 255 * For linear buffers, we support marking the mapping 256 * as COHERENT. 257 * 258 * XXX Check TLB entries for cache-inhibit bits? 259 */ 260 if (buf >= (void *)MIPS_KSEG1_START && 261 buf < (void *)MIPS_KSEG2_START) 262 map->_dm_flags |= MIPSCO_DMAMAP_COHERENT; 263 } 264 return (error); 265 } 266 267 /* 268 * Like _bus_dmamap_load(), but for mbufs. 269 */ 270 int 271 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, int flags) 272 { 273 paddr_t lastaddr; 274 int seg, error, first; 275 struct mbuf *m; 276 277 /* 278 * Make sure that on error condition we return "no valid mappings." 279 */ 280 map->dm_mapsize = 0; 281 map->dm_nsegs = 0; 282 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 283 284 #ifdef DIAGNOSTIC 285 if ((m0->m_flags & M_PKTHDR) == 0) 286 panic("_bus_dmamap_load_mbuf: no packet header"); 287 #endif 288 289 if (m0->m_pkthdr.len > map->_dm_size) 290 return (EINVAL); 291 292 first = 1; 293 seg = 0; 294 error = 0; 295 for (m = m0; m != NULL && error == 0; m = m->m_next) { 296 if (m->m_len == 0) 297 continue; 298 error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len, 299 vmspace_kernel(), flags, &lastaddr, &seg, first); 300 first = 0; 301 } 302 if (error == 0) { 303 map->dm_mapsize = m0->m_pkthdr.len; 304 map->dm_nsegs = seg + 1; 305 } 306 return (error); 307 } 308 309 /* 310 * Like _bus_dmamap_load(), but for uios. 311 */ 312 int 313 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags) 314 { 315 paddr_t lastaddr; 316 int seg, i, error, first; 317 bus_size_t minlen, resid; 318 struct iovec *iov; 319 void *addr; 320 321 /* 322 * Make sure that on error condition we return "no valid mappings." 323 */ 324 map->dm_mapsize = 0; 325 map->dm_nsegs = 0; 326 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 327 328 resid = uio->uio_resid; 329 iov = uio->uio_iov; 330 331 first = 1; 332 seg = 0; 333 error = 0; 334 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 335 /* 336 * Now at the first iovec to load. Load each iovec 337 * until we have exhausted the residual count. 338 */ 339 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 340 addr = (void *)iov[i].iov_base; 341 342 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 343 uio->uio_vmspace, flags, &lastaddr, &seg, first); 344 first = 0; 345 346 resid -= minlen; 347 } 348 if (error == 0) { 349 map->dm_mapsize = uio->uio_resid; 350 map->dm_nsegs = seg + 1; 351 } 352 return (error); 353 } 354 355 /* 356 * Like _bus_dmamap_load(), but for raw memory. 357 */ 358 int 359 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 360 { 361 panic("_bus_dmamap_load_raw: not implemented"); 362 } 363 364 /* 365 * Common function for unloading a DMA map. May be called by 366 * chipset-specific DMA map unload functions. 367 */ 368 void 369 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 370 { 371 372 /* 373 * No resources to free; just mark the mappings as 374 * invalid. 375 */ 376 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 377 map->dm_mapsize = 0; 378 map->dm_nsegs = 0; 379 map->_dm_flags &= ~MIPSCO_DMAMAP_COHERENT; 380 } 381 382 /* 383 * Common function for MIPS1 DMA map synchronization. May be called 384 * by chipset-specific DMA map synchronization functions. 385 * 386 * This is the R3000 version. 387 */ 388 void 389 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops) 390 { 391 bus_size_t minlen; 392 bus_addr_t addr; 393 int i; 394 395 #ifdef DIAGNOSTIC 396 /* 397 * Mixing PRE and POST operations is not allowed. 398 */ 399 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 400 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 401 panic("_bus_dmamap_sync: mix PRE and POST"); 402 403 if (offset >= map->dm_mapsize) 404 panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)", 405 offset, map->dm_mapsize); 406 if (len == 0 || (offset + len) > map->dm_mapsize) 407 panic("_bus_dmamap_sync: bad length"); 408 #endif 409 410 /* 411 * The R3000 cache is write-through. Therefore, we only need 412 * to drain the write buffer on PREWRITE. The cache is not 413 * coherent, however, so we need to invalidate the data cache 414 * on POSTREAD. 415 * 416 * PREREAD and POSTWRITE are noops. 417 */ 418 419 if (ops & BUS_DMASYNC_PREWRITE) { 420 /* 421 * Flush the write buffer. 422 */ 423 wbflush(); 424 } 425 426 /* 427 * If we're not doing a POSTREAD, nothing more to do. 428 */ 429 if ((ops & BUS_DMASYNC_POSTREAD) == 0) 430 return; 431 432 /* 433 * If the mapping is of COHERENT DMA-safe memory, no cache 434 * flush is necessary. 435 */ 436 if (map->_dm_flags & MIPSCO_DMAMAP_COHERENT) 437 return; 438 439 /* 440 * If we are going to hit something as large or larger 441 * than the entire data cache, just nail the whole thing. 442 * 443 * NOTE: Even though this is `wbinv_all', since the cache is 444 * write-through, it just invalidates it. 445 */ 446 if (len >= mips_cache_info.mci_pdcache_size) { 447 mips_dcache_wbinv_all(); 448 return; 449 } 450 451 for (i = 0; i < map->dm_nsegs && len != 0; i++) { 452 /* Find the beginning segment. */ 453 if (offset >= map->dm_segs[i].ds_len) { 454 offset -= map->dm_segs[i].ds_len; 455 continue; 456 } 457 458 /* 459 * Now at the first segment to sync; nail 460 * each segment until we have exhausted the 461 * length. 462 */ 463 minlen = len < map->dm_segs[i].ds_len - offset ? 464 len : map->dm_segs[i].ds_len - offset; 465 466 addr = map->dm_segs[i].ds_addr; 467 468 #ifdef BUS_DMA_DEBUG 469 printf("bus_dmamap_sync: flushing segment %d " 470 "(0x%lx..0x%lx) ...", i, addr + offset, 471 addr + offset + minlen - 1); 472 #endif 473 mips_dcache_inv_range( 474 MIPS_PHYS_TO_KSEG0(addr + offset), minlen); 475 #ifdef BUS_DMA_DEBUG 476 printf("\n"); 477 #endif 478 offset = 0; 479 len -= minlen; 480 } 481 } 482 483 /* 484 * Common function for DMA-safe memory allocation. May be called 485 * by bus-specific DMA memory allocation functions. 486 */ 487 int 488 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) 489 { 490 491 return _bus_dmamem_alloc_range(t, size, alignment, boundary, 492 segs, nsegs, rsegs, flags, pmap_limits.avail_start, 493 trunc_page(pmap_limits.avail_end)); 494 } 495 496 /* 497 * Allocate physical memory from the given physical address range. 498 * Called by DMA-safe memory allocation methods. 499 */ 500 int 501 _bus_dmamem_alloc_range( 502 bus_dma_tag_t t, 503 bus_size_t size, 504 bus_size_t alignment, 505 bus_size_t boundary, 506 bus_dma_segment_t *segs, 507 int nsegs, 508 int *rsegs, 509 int flags, 510 paddr_t low, 511 paddr_t high) 512 { 513 514 return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary, 515 segs, nsegs, rsegs, flags, 516 low, high)); 517 } 518 519 /* 520 * Common function for freeing DMA-safe memory. May be called by 521 * bus-specific DMA memory free functions. 522 */ 523 void 524 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 525 { 526 527 _bus_dmamem_free_common(t, segs, nsegs); 528 } 529 530 /* 531 * Common function for mapping DMA-safe memory. May be called by 532 * bus-specific DMA memory map functions. 533 */ 534 int 535 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size, void **kvap, int flags) 536 { 537 538 /* 539 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid 540 * TLB thrashing. 541 */ 542 if (nsegs == 1) { 543 if (flags & BUS_DMA_COHERENT) 544 *kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr); 545 else 546 *kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr); 547 return (0); 548 } 549 550 /* XXX BUS_DMA_COHERENT */ 551 return (_bus_dmamem_map_common(t, segs, nsegs, size, kvap, flags, 0)); 552 } 553 554 /* 555 * Common function for unmapping DMA-safe memory. May be called by 556 * bus-specific DMA memory unmapping functions. 557 */ 558 void 559 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 560 { 561 562 /* 563 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e. 564 * not in KSEG2). 565 */ 566 if (kva >= (void *)MIPS_KSEG0_START && 567 kva < (void *)MIPS_KSEG2_START) 568 return; 569 570 _bus_dmamem_unmap_common(t, kva, size); 571 } 572 573 /* 574 * Common function for mmap(2)'ing DMA-safe memory. May be called by 575 * bus-specific DMA mmap(2)'ing functions. 576 */ 577 paddr_t 578 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off, int prot, int flags) 579 { 580 bus_addr_t rv; 581 582 rv = _bus_dmamem_mmap_common(t, segs, nsegs, off, prot, flags); 583 if (rv == (bus_addr_t)-1) 584 return (-1); 585 586 return (mips_btop((char *)rv)); 587 } 588