1 /* $NetBSD: bus_dma.c,v 1.74 2022/07/26 20:08:54 andvar Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 34 35 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.74 2022/07/26 20:08:54 andvar Exp $"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/device.h> 41 #include <sys/kmem.h> 42 #include <sys/proc.h> 43 #include <sys/mbuf.h> 44 45 #include <uvm/uvm_extern.h> 46 47 #define _ALPHA_BUS_DMA_PRIVATE 48 #include <sys/bus.h> 49 #include <machine/intr.h> 50 51 #include <dev/bus_dma/bus_dmamem_common.h> 52 53 static int _bus_dmamap_load_buffer_direct(bus_dma_tag_t, 54 bus_dmamap_t, void *, bus_size_t, struct vmspace *, int, 55 paddr_t *, int *, int); 56 57 extern paddr_t avail_start, avail_end; /* from pmap.c */ 58 59 #define DMA_COUNT_DECL(cnt) _DMA_COUNT_DECL(dma_direct, cnt) 60 #define DMA_COUNT(cnt) _DMA_COUNT(dma_direct, cnt) 61 62 static size_t 63 _bus_dmamap_mapsize(int const nsegments) 64 { 65 KASSERT(nsegments > 0); 66 return sizeof(struct alpha_bus_dmamap) + 67 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 68 } 69 70 /* 71 * Common function for DMA map creation. May be called by bus-specific 72 * DMA map creation functions. 73 */ 74 int 75 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 76 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 77 { 78 struct alpha_bus_dmamap *map; 79 void *mapstore; 80 81 /* 82 * Allocate and initialize the DMA map. The end of the map 83 * is a variable-sized array of segments, so we allocate enough 84 * room for them in one shot. 85 * 86 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 87 * of ALLOCNOW notifies others that we've reserved these resources, 88 * and they are not to be freed. 89 * 90 * The bus_dmamap_t includes one bus_dma_segment_t, hence 91 * the (nsegments - 1). 92 */ 93 if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments), 94 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 95 return (ENOMEM); 96 97 map = (struct alpha_bus_dmamap *)mapstore; 98 map->_dm_size = size; 99 map->_dm_segcnt = nsegments; 100 map->_dm_maxmaxsegsz = maxsegsz; 101 if (t->_boundary != 0 && t->_boundary < boundary) 102 map->_dm_boundary = t->_boundary; 103 else 104 map->_dm_boundary = boundary; 105 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 106 map->dm_maxsegsz = maxsegsz; 107 map->dm_mapsize = 0; /* no valid mappings */ 108 map->dm_nsegs = 0; 109 map->_dm_window = NULL; 110 111 *dmamp = map; 112 return (0); 113 } 114 115 /* 116 * Common function for DMA map destruction. May be called by bus-specific 117 * DMA map destruction functions. 118 */ 119 void 120 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 121 { 122 123 kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt)); 124 } 125 126 /* 127 * Utility function to load a linear buffer. lastaddrp holds state 128 * between invocations (for multiple-buffer loads). segp contains 129 * the starting segment on entrance, and the ending segment on exit. 130 * first indicates if this is the first invocation of this function. 131 */ 132 static int 133 _bus_dmamap_load_buffer_direct(bus_dma_tag_t t, bus_dmamap_t map, 134 void *buf, size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp, 135 int *segp, int first) 136 { 137 bus_size_t sgsize; 138 bus_addr_t curaddr, lastaddr, baddr, bmask; 139 vaddr_t vaddr = (vaddr_t)buf; 140 int seg; 141 bool address_is_valid __diagused; 142 143 lastaddr = *lastaddrp; 144 bmask = ~(map->_dm_boundary - 1); 145 146 for (seg = *segp; buflen > 0 ; ) { 147 /* 148 * Get the physical address for this segment. 149 */ 150 address_is_valid = 151 pmap_extract(vm->vm_map.pmap, vaddr, &curaddr); 152 KASSERT(address_is_valid); 153 154 /* 155 * If we're beyond the current DMA window, indicate 156 * that and try to fall back into SGMAPs. 157 */ 158 if (t->_wsize != 0 && curaddr >= t->_wsize) 159 return (EINVAL); 160 161 curaddr |= t->_wbase; 162 163 /* 164 * Compute the segment size, and adjust counts. 165 */ 166 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 167 if (buflen < sgsize) 168 sgsize = buflen; 169 if (map->dm_maxsegsz < sgsize) 170 sgsize = map->dm_maxsegsz; 171 172 /* 173 * Make sure we don't cross any boundaries. 174 */ 175 if (map->_dm_boundary > 0) { 176 baddr = (curaddr + map->_dm_boundary) & bmask; 177 if (sgsize > (baddr - curaddr)) 178 sgsize = (baddr - curaddr); 179 } 180 181 /* 182 * Insert chunk into a segment, coalescing with 183 * the previous segment if possible. 184 */ 185 if (first) { 186 map->dm_segs[seg].ds_addr = curaddr; 187 map->dm_segs[seg].ds_len = sgsize; 188 first = 0; 189 } else { 190 if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 && 191 curaddr == lastaddr && 192 (map->dm_segs[seg].ds_len + sgsize) <= 193 map->dm_maxsegsz && 194 (map->_dm_boundary == 0 || 195 (map->dm_segs[seg].ds_addr & bmask) == 196 (curaddr & bmask))) 197 map->dm_segs[seg].ds_len += sgsize; 198 else { 199 if (++seg >= map->_dm_segcnt) 200 break; 201 map->dm_segs[seg].ds_addr = curaddr; 202 map->dm_segs[seg].ds_len = sgsize; 203 } 204 } 205 206 lastaddr = curaddr + sgsize; 207 vaddr += sgsize; 208 buflen -= sgsize; 209 } 210 211 *segp = seg; 212 *lastaddrp = lastaddr; 213 214 /* 215 * Did we fit? 216 */ 217 if (buflen != 0) { 218 /* 219 * If there is a chained window, we will automatically 220 * fall back to it. 221 */ 222 return (EFBIG); /* XXX better return value here? */ 223 } 224 225 return (0); 226 } 227 228 DMA_COUNT_DECL(load); 229 DMA_COUNT_DECL(load_next_window); 230 231 /* 232 * Common function for loading a direct-mapped DMA map with a linear 233 * buffer. Called by bus-specific DMA map load functions with the 234 * OR value appropriate for indicating "direct-mapped" for that 235 * chipset. 236 */ 237 int 238 _bus_dmamap_load_direct(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 239 bus_size_t buflen, struct proc *p, int flags) 240 { 241 paddr_t lastaddr; 242 int seg, error; 243 struct vmspace *vm; 244 245 /* 246 * Make sure that on error condition we return "no valid mappings". 247 */ 248 map->dm_mapsize = 0; 249 map->dm_nsegs = 0; 250 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 251 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0); 252 253 if (buflen > map->_dm_size) 254 return (EINVAL); 255 256 if (p != NULL) { 257 vm = p->p_vmspace; 258 } else { 259 vm = vmspace_kernel(); 260 } 261 seg = 0; 262 error = _bus_dmamap_load_buffer_direct(t, map, buf, buflen, 263 vm, flags, &lastaddr, &seg, 1); 264 if (error == 0) { 265 DMA_COUNT(load); 266 map->dm_mapsize = buflen; 267 map->dm_nsegs = seg + 1; 268 map->_dm_window = t; 269 } else if (t->_next_window != NULL) { 270 /* 271 * Give the next window a chance. 272 */ 273 DMA_COUNT(load_next_window); 274 error = bus_dmamap_load(t->_next_window, map, buf, buflen, 275 p, flags); 276 } 277 return (error); 278 } 279 280 DMA_COUNT_DECL(load_mbuf); 281 DMA_COUNT_DECL(load_mbuf_next_window); 282 283 /* 284 * Like _bus_dmamap_load_direct(), but for mbufs. 285 */ 286 int 287 _bus_dmamap_load_mbuf_direct(bus_dma_tag_t t, bus_dmamap_t map, 288 struct mbuf *m0, int flags) 289 { 290 paddr_t lastaddr; 291 int seg, error, first; 292 struct mbuf *m; 293 294 /* 295 * Make sure that on error condition we return "no valid mappings." 296 */ 297 map->dm_mapsize = 0; 298 map->dm_nsegs = 0; 299 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 300 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0); 301 302 #ifdef DIAGNOSTIC 303 if ((m0->m_flags & M_PKTHDR) == 0) 304 panic("_bus_dmamap_load_mbuf_direct: no packet header"); 305 #endif 306 307 if (m0->m_pkthdr.len > map->_dm_size) 308 return (EINVAL); 309 310 first = 1; 311 seg = 0; 312 error = 0; 313 for (m = m0; m != NULL && error == 0; m = m->m_next) { 314 if (m->m_len == 0) 315 continue; 316 /* XXX Could be better about coalescing. */ 317 /* XXX Doesn't check boundaries. */ 318 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) { 319 case M_EXT|M_EXT_CLUSTER: 320 /* XXX KDASSERT */ 321 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 322 lastaddr = m->m_ext.ext_paddr + 323 (m->m_data - m->m_ext.ext_buf); 324 have_addr: 325 if (first == 0 && 326 ++seg >= map->_dm_segcnt) { 327 error = EFBIG; 328 break; 329 } 330 331 /* 332 * If we're beyond the current DMA window, indicate 333 * that and try to fall back into SGMAPs. 334 */ 335 if (t->_wsize != 0 && lastaddr >= t->_wsize) { 336 error = EINVAL; 337 break; 338 } 339 lastaddr |= t->_wbase; 340 341 map->dm_segs[seg].ds_addr = lastaddr; 342 map->dm_segs[seg].ds_len = m->m_len; 343 lastaddr += m->m_len; 344 break; 345 346 case 0: 347 lastaddr = m->m_paddr + M_BUFOFFSET(m) + 348 (m->m_data - M_BUFADDR(m)); 349 goto have_addr; 350 351 default: 352 error = _bus_dmamap_load_buffer_direct(t, map, 353 m->m_data, m->m_len, vmspace_kernel(), flags, 354 &lastaddr, &seg, first); 355 } 356 first = 0; 357 } 358 if (error == 0) { 359 DMA_COUNT(load_mbuf); 360 map->dm_mapsize = m0->m_pkthdr.len; 361 map->dm_nsegs = seg + 1; 362 map->_dm_window = t; 363 } else if (t->_next_window != NULL) { 364 /* 365 * Give the next window a chance. 366 */ 367 DMA_COUNT(load_mbuf_next_window); 368 error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags); 369 } 370 return (error); 371 } 372 373 DMA_COUNT_DECL(load_uio); 374 DMA_COUNT_DECL(load_uio_next_window); 375 376 /* 377 * Like _bus_dmamap_load_direct(), but for uios. 378 */ 379 int 380 _bus_dmamap_load_uio_direct(bus_dma_tag_t t, bus_dmamap_t map, 381 struct uio *uio, int flags) 382 { 383 paddr_t lastaddr; 384 int seg, i, error, first; 385 bus_size_t minlen, resid; 386 struct vmspace *vm; 387 struct iovec *iov; 388 void *addr; 389 390 /* 391 * Make sure that on error condition we return "no valid mappings." 392 */ 393 map->dm_mapsize = 0; 394 map->dm_nsegs = 0; 395 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 396 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0); 397 398 resid = uio->uio_resid; 399 iov = uio->uio_iov; 400 401 vm = uio->uio_vmspace; 402 403 first = 1; 404 seg = 0; 405 error = 0; 406 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 407 /* 408 * Now at the first iovec to load. Load each iovec 409 * until we have exhausted the residual count. 410 */ 411 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 412 addr = (void *)iov[i].iov_base; 413 414 error = _bus_dmamap_load_buffer_direct(t, map, 415 addr, minlen, vm, flags, &lastaddr, &seg, first); 416 first = 0; 417 418 resid -= minlen; 419 } 420 if (error == 0) { 421 DMA_COUNT(load_uio); 422 map->dm_mapsize = uio->uio_resid; 423 map->dm_nsegs = seg + 1; 424 map->_dm_window = t; 425 } else if (t->_next_window != NULL) { 426 /* 427 * Give the next window a chance. 428 */ 429 DMA_COUNT(load_uio_next_window); 430 error = bus_dmamap_load_uio(t->_next_window, map, uio, flags); 431 } 432 return (error); 433 } 434 435 /* 436 * Like _bus_dmamap_load_direct(), but for raw memory. 437 */ 438 int 439 _bus_dmamap_load_raw_direct(bus_dma_tag_t t, bus_dmamap_t map, 440 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 441 { 442 443 panic("_bus_dmamap_load_raw_direct: not implemented"); 444 } 445 446 /* 447 * Common function for unloading a DMA map. May be called by 448 * chipset-specific DMA map unload functions. 449 */ 450 void 451 _bus_dmamap_unload_common(bus_dma_tag_t t, bus_dmamap_t map) 452 { 453 454 /* 455 * No resources to free; just mark the mappings as 456 * invalid. 457 */ 458 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 459 map->dm_mapsize = 0; 460 map->dm_nsegs = 0; 461 map->_dm_window = NULL; 462 map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE); 463 } 464 465 DMA_COUNT_DECL(unload); 466 467 void 468 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 469 { 470 KASSERT(map->_dm_window == t); 471 DMA_COUNT(unload); 472 _bus_dmamap_unload_common(t, map); 473 } 474 475 /* 476 * Common function for DMA map synchronization. May be called 477 * by chipset-specific DMA map synchronization functions. 478 */ 479 void 480 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 481 bus_size_t len, int ops) 482 { 483 484 alpha_mb(); 485 } 486 487 /* 488 * Common function for DMA-safe memory allocation. May be called 489 * by bus-specific DMA memory allocation functions. 490 */ 491 int 492 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 493 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 494 int flags) 495 { 496 497 return (_bus_dmamem_alloc_range(t, size, alignment, boundary, 498 segs, nsegs, rsegs, flags, 0, trunc_page(avail_end))); 499 } 500 501 /* 502 * Allocate physical memory from the given physical address range. 503 * Called by DMA-safe memory allocation methods. 504 */ 505 int 506 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 507 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 508 int flags, paddr_t low, paddr_t high) 509 { 510 511 return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary, 512 segs, nsegs, rsegs, flags, 513 low, high)); 514 } 515 516 /* 517 * Common function for freeing DMA-safe memory. May be called by 518 * bus-specific DMA memory free functions. 519 */ 520 void 521 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 522 { 523 524 _bus_dmamem_free_common(t, segs, nsegs); 525 } 526 527 /* 528 * Common function for mapping DMA-safe memory. May be called by 529 * bus-specific DMA memory map functions. 530 */ 531 int 532 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 533 size_t size, void **kvap, int flags) 534 { 535 536 /* 537 * If we're only mapping 1 segment, use K0SEG, to avoid 538 * TLB thrashing. 539 */ 540 if (nsegs == 1) { 541 *kvap = (void *)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr); 542 return (0); 543 } 544 545 return (_bus_dmamem_map_common(t, segs, nsegs, size, kvap, flags, 0)); 546 } 547 548 /* 549 * Common function for unmapping DMA-safe memory. May be called by 550 * bus-specific DMA memory unmapping functions. 551 */ 552 void 553 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 554 { 555 556 /* 557 * Nothing to do if we mapped it with K0SEG. 558 */ 559 if (kva >= (void *)ALPHA_K0SEG_BASE && 560 kva <= (void *)ALPHA_K0SEG_END) 561 return; 562 563 _bus_dmamem_unmap_common(t, kva, size); 564 } 565 566 /* 567 * Common function for mmap(2)'ing DMA-safe memory. May be called by 568 * bus-specific DMA mmap(2)'ing functions. 569 */ 570 paddr_t 571 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 572 off_t off, int prot, int flags) 573 { 574 bus_addr_t rv; 575 576 rv = _bus_dmamem_mmap_common(t, segs, nsegs, off, prot, flags); 577 if (rv == (bus_addr_t)-1) 578 return (-1); 579 580 return (alpha_btop((char *)rv)); 581 } 582