1 1.40 andvar /* $NetBSD: bus_dma.c,v 1.40 2023/12/17 14:54:49 andvar Exp $ */ 2 1.1 ragge 3 1.1 ragge /*- 4 1.1 ragge * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 5 1.1 ragge * All rights reserved. 6 1.1 ragge * 7 1.1 ragge * This code is derived from software contributed to The NetBSD Foundation 8 1.1 ragge * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 1.1 ragge * NASA Ames Research Center. 10 1.1 ragge * 11 1.1 ragge * Redistribution and use in source and binary forms, with or without 12 1.1 ragge * modification, are permitted provided that the following conditions 13 1.1 ragge * are met: 14 1.1 ragge * 1. Redistributions of source code must retain the above copyright 15 1.1 ragge * notice, this list of conditions and the following disclaimer. 16 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright 17 1.1 ragge * notice, this list of conditions and the following disclaimer in the 18 1.1 ragge * documentation and/or other materials provided with the distribution. 19 1.1 ragge * 20 1.1 ragge * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 1.1 ragge * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 1.1 ragge * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 1.1 ragge * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 1.1 ragge * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 1.1 ragge * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 1.1 ragge * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 1.1 ragge * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 1.1 ragge * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 1.1 ragge * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 1.1 ragge * POSSIBILITY OF SUCH DAMAGE. 31 1.1 ragge */ 32 1.1 ragge /* 33 1.1 ragge * bus_dma routines for vax. File copied from arm32/bus_dma.c. 34 1.1 ragge * NetBSD: bus_dma.c,v 1.11 1998/09/21 22:53:35 thorpej Exp 35 1.1 ragge */ 36 1.20 lukem 37 1.20 lukem #include <sys/cdefs.h> 38 1.40 andvar __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.40 2023/12/17 14:54:49 andvar Exp $"); 39 1.1 ragge 40 1.1 ragge #include <sys/param.h> 41 1.1 ragge #include <sys/systm.h> 42 1.1 ragge #include <sys/kernel.h> 43 1.1 ragge #include <sys/proc.h> 44 1.1 ragge #include <sys/buf.h> 45 1.1 ragge #include <sys/reboot.h> 46 1.1 ragge #include <sys/conf.h> 47 1.1 ragge #include <sys/file.h> 48 1.36 thorpej #include <sys/kmem.h> 49 1.1 ragge #include <sys/mbuf.h> 50 1.1 ragge #include <sys/vnode.h> 51 1.1 ragge #include <sys/device.h> 52 1.1 ragge 53 1.34 uebayasi #include <uvm/uvm.h> 54 1.1 ragge 55 1.1 ragge #define _VAX_BUS_DMA_PRIVATE 56 1.1 ragge #include <machine/bus.h> 57 1.1 ragge 58 1.1 ragge #include <machine/ka43.h> 59 1.1 ragge #include <machine/sid.h> 60 1.1 ragge 61 1.16 matt extern paddr_t avail_start, avail_end; 62 1.16 matt extern vaddr_t virtual_avail; 63 1.1 ragge 64 1.30 matt int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, 65 1.30 matt bus_size_t, struct vmspace *, int, vaddr_t *, int *, bool); 66 1.30 matt int _bus_dma_inrange(bus_dma_segment_t *, int, bus_addr_t); 67 1.30 matt int _bus_dmamem_alloc_range(bus_dma_tag_t, bus_size_t, bus_size_t, 68 1.30 matt bus_size_t, bus_dma_segment_t*, int, int *, int, vaddr_t, vaddr_t); 69 1.36 thorpej 70 1.36 thorpej static size_t 71 1.36 thorpej _bus_dmamap_mapsize(int const nsegments) 72 1.36 thorpej { 73 1.36 thorpej KASSERT(nsegments > 0); 74 1.36 thorpej return sizeof(struct vax_bus_dmamap) + 75 1.36 thorpej (sizeof(bus_dma_segment_t) * (nsegments - 1)); 76 1.36 thorpej } 77 1.36 thorpej 78 1.1 ragge /* 79 1.1 ragge * Common function for DMA map creation. May be called by bus-specific 80 1.1 ragge * DMA map creation functions. 81 1.1 ragge */ 82 1.1 ragge int 83 1.30 matt _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 84 1.30 matt bus_size_t maxsegsz, bus_size_t boundary, int flags, 85 1.30 matt bus_dmamap_t *dmamp) 86 1.1 ragge { 87 1.1 ragge struct vax_bus_dmamap *map; 88 1.1 ragge void *mapstore; 89 1.1 ragge 90 1.1 ragge #ifdef DEBUG_DMA 91 1.1 ragge printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n", 92 1.1 ragge t, size, nsegments, maxsegsz, boundary, flags); 93 1.1 ragge #endif /* DEBUG_DMA */ 94 1.1 ragge 95 1.1 ragge /* 96 1.1 ragge * Allocate and initialize the DMA map. The end of the map 97 1.1 ragge * is a variable-sized array of segments, so we allocate enough 98 1.1 ragge * room for them in one shot. 99 1.1 ragge * 100 1.1 ragge * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 101 1.1 ragge * of ALLOCNOW notifies others that we've reserved these resources, 102 1.1 ragge * and they are not to be freed. 103 1.1 ragge * 104 1.1 ragge * The bus_dmamap_t includes one bus_dma_segment_t, hence 105 1.1 ragge * the (nsegments - 1). 106 1.1 ragge */ 107 1.36 thorpej if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments), 108 1.36 thorpej (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 109 1.1 ragge return (ENOMEM); 110 1.1 ragge 111 1.1 ragge map = (struct vax_bus_dmamap *)mapstore; 112 1.1 ragge map->_dm_size = size; 113 1.1 ragge map->_dm_segcnt = nsegments; 114 1.22 matt map->_dm_maxmaxsegsz = maxsegsz; 115 1.1 ragge map->_dm_boundary = boundary; 116 1.1 ragge map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 117 1.22 matt map->dm_maxsegsz = maxsegsz; 118 1.1 ragge map->dm_mapsize = 0; /* no valid mappings */ 119 1.1 ragge map->dm_nsegs = 0; 120 1.1 ragge 121 1.1 ragge *dmamp = map; 122 1.1 ragge #ifdef DEBUG_DMA 123 1.1 ragge printf("dmamap_create:map=%p\n", map); 124 1.1 ragge #endif /* DEBUG_DMA */ 125 1.1 ragge return (0); 126 1.1 ragge } 127 1.1 ragge 128 1.1 ragge /* 129 1.1 ragge * Common function for DMA map destruction. May be called by bus-specific 130 1.1 ragge * DMA map destruction functions. 131 1.1 ragge */ 132 1.1 ragge void 133 1.30 matt _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 134 1.1 ragge { 135 1.1 ragge 136 1.1 ragge #ifdef DEBUG_DMA 137 1.1 ragge printf("dmamap_destroy: t=%p map=%p\n", t, map); 138 1.1 ragge #endif /* DEBUG_DMA */ 139 1.1 ragge #ifdef DIAGNOSTIC 140 1.1 ragge if (map->dm_nsegs > 0) 141 1.1 ragge printf("bus_dmamap_destroy() called for map with valid mappings\n"); 142 1.1 ragge #endif /* DIAGNOSTIC */ 143 1.36 thorpej kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt)); 144 1.1 ragge } 145 1.1 ragge 146 1.1 ragge /* 147 1.1 ragge * Common function for loading a DMA map with a linear buffer. May 148 1.1 ragge * be called by bus-specific DMA map load functions. 149 1.1 ragge */ 150 1.1 ragge int 151 1.30 matt _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 152 1.30 matt bus_size_t buflen, struct proc *p, int flags) 153 1.1 ragge { 154 1.28 matt vaddr_t lastaddr = 0; 155 1.1 ragge int seg, error; 156 1.26 yamt struct vmspace *vm; 157 1.1 ragge 158 1.1 ragge #ifdef DEBUG_DMA 159 1.1 ragge printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n", 160 1.1 ragge t, map, buf, buflen, p, flags); 161 1.1 ragge #endif /* DEBUG_DMA */ 162 1.1 ragge 163 1.1 ragge /* 164 1.1 ragge * Make sure that on error condition we return "no valid mappings". 165 1.1 ragge */ 166 1.1 ragge map->dm_mapsize = 0; 167 1.1 ragge map->dm_nsegs = 0; 168 1.22 matt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 169 1.1 ragge 170 1.1 ragge if (buflen > map->_dm_size) 171 1.1 ragge return (EINVAL); 172 1.1 ragge 173 1.26 yamt if (p != NULL) { 174 1.26 yamt vm = p->p_vmspace; 175 1.26 yamt } else { 176 1.26 yamt vm = vmspace_kernel(); 177 1.26 yamt } 178 1.26 yamt 179 1.1 ragge seg = 0; 180 1.26 yamt error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags, 181 1.1 ragge &lastaddr, &seg, 1); 182 1.1 ragge if (error == 0) { 183 1.1 ragge map->dm_mapsize = buflen; 184 1.1 ragge map->dm_nsegs = seg + 1; 185 1.1 ragge } 186 1.1 ragge #ifdef DEBUG_DMA 187 1.1 ragge printf("dmamap_load: error=%d\n", error); 188 1.1 ragge #endif /* DEBUG_DMA */ 189 1.1 ragge return (error); 190 1.1 ragge } 191 1.1 ragge 192 1.1 ragge /* 193 1.1 ragge * Like _bus_dmamap_load(), but for mbufs. 194 1.1 ragge */ 195 1.1 ragge int 196 1.30 matt _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, 197 1.30 matt int flags) 198 1.1 ragge { 199 1.28 matt vaddr_t lastaddr = 0; 200 1.29 matt int seg, error; 201 1.29 matt bool first; 202 1.1 ragge struct mbuf *m; 203 1.1 ragge 204 1.1 ragge #ifdef DEBUG_DMA 205 1.1 ragge printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n", 206 1.1 ragge t, map, m0, flags); 207 1.1 ragge #endif /* DEBUG_DMA */ 208 1.1 ragge 209 1.1 ragge /* 210 1.1 ragge * Make sure that on error condition we return "no valid mappings." 211 1.1 ragge */ 212 1.1 ragge map->dm_mapsize = 0; 213 1.1 ragge map->dm_nsegs = 0; 214 1.22 matt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 215 1.1 ragge 216 1.1 ragge #ifdef DIAGNOSTIC 217 1.1 ragge if ((m0->m_flags & M_PKTHDR) == 0) 218 1.1 ragge panic("_bus_dmamap_load_mbuf: no packet header"); 219 1.1 ragge #endif /* DIAGNOSTIC */ 220 1.1 ragge 221 1.1 ragge if (m0->m_pkthdr.len > map->_dm_size) 222 1.1 ragge return (EINVAL); 223 1.1 ragge 224 1.29 matt first = true; 225 1.1 ragge seg = 0; 226 1.1 ragge error = 0; 227 1.29 matt for (m = m0; m != NULL && error == 0; m = m->m_next, first = false) { 228 1.21 thorpej if (m->m_len == 0) 229 1.21 thorpej continue; 230 1.29 matt #if 0 231 1.35 maxv switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) { 232 1.29 matt #if 0 233 1.35 maxv case M_EXT|M_EXT_CLUSTER: 234 1.29 matt KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 235 1.29 matt lastaddr = m->m_ext.ext_paddr 236 1.29 matt + (m->m_data - m->m_ext.ext_buf); 237 1.29 matt #endif 238 1.29 matt #if 1 239 1.29 matt have_addr: 240 1.29 matt #endif 241 1.29 matt if (!first && ++seg >= map->_dm_segcnt) { 242 1.29 matt error = EFBIG; 243 1.29 matt continue; 244 1.29 matt } 245 1.29 matt map->dm_segs[seg].ds_addr = lastaddr; 246 1.29 matt map->dm_segs[seg].ds_len = m->m_len; 247 1.29 matt lastaddr += m->m_len; 248 1.29 matt continue; 249 1.29 matt #if 1 250 1.29 matt case 0: 251 1.29 matt KASSERT(m->m_paddr != M_PADDR_INVALID); 252 1.29 matt lastaddr = m->m_paddr + M_BUFOFFSET(m) 253 1.29 matt + (m->m_data - M_BUFADDR(m)); 254 1.29 matt goto have_addr; 255 1.29 matt #endif 256 1.29 matt default: 257 1.29 matt break; 258 1.29 matt } 259 1.29 matt #endif 260 1.1 ragge error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len, 261 1.26 yamt vmspace_kernel(), flags, &lastaddr, &seg, first); 262 1.1 ragge } 263 1.1 ragge if (error == 0) { 264 1.1 ragge map->dm_mapsize = m0->m_pkthdr.len; 265 1.1 ragge map->dm_nsegs = seg + 1; 266 1.1 ragge } 267 1.1 ragge #ifdef DEBUG_DMA 268 1.1 ragge printf("dmamap_load_mbuf: error=%d\n", error); 269 1.1 ragge #endif /* DEBUG_DMA */ 270 1.1 ragge return (error); 271 1.1 ragge } 272 1.1 ragge 273 1.1 ragge /* 274 1.1 ragge * Like _bus_dmamap_load(), but for uios. 275 1.1 ragge */ 276 1.1 ragge int 277 1.30 matt _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 278 1.30 matt int flags) 279 1.1 ragge { 280 1.28 matt vaddr_t lastaddr = 0; 281 1.29 matt int seg, i, error; 282 1.29 matt bool first; 283 1.1 ragge bus_size_t minlen, resid; 284 1.1 ragge struct iovec *iov; 285 1.27 christos void *addr; 286 1.1 ragge 287 1.1 ragge /* 288 1.1 ragge * Make sure that on error condition we return "no valid mappings." 289 1.1 ragge */ 290 1.1 ragge map->dm_mapsize = 0; 291 1.1 ragge map->dm_nsegs = 0; 292 1.22 matt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 293 1.1 ragge 294 1.1 ragge resid = uio->uio_resid; 295 1.1 ragge iov = uio->uio_iov; 296 1.1 ragge 297 1.29 matt first = true; 298 1.1 ragge seg = 0; 299 1.1 ragge error = 0; 300 1.1 ragge for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 301 1.1 ragge /* 302 1.1 ragge * Now at the first iovec to load. Load each iovec 303 1.1 ragge * until we have exhausted the residual count. 304 1.1 ragge */ 305 1.1 ragge minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 306 1.27 christos addr = (void *)iov[i].iov_base; 307 1.1 ragge 308 1.1 ragge error = _bus_dmamap_load_buffer(t, map, addr, minlen, 309 1.26 yamt uio->uio_vmspace, flags, &lastaddr, &seg, first); 310 1.29 matt first = false; 311 1.1 ragge 312 1.1 ragge resid -= minlen; 313 1.1 ragge } 314 1.1 ragge if (error == 0) { 315 1.1 ragge map->dm_mapsize = uio->uio_resid; 316 1.1 ragge map->dm_nsegs = seg + 1; 317 1.1 ragge } 318 1.1 ragge return (error); 319 1.1 ragge } 320 1.1 ragge 321 1.1 ragge /* 322 1.1 ragge * Like _bus_dmamap_load(), but for raw memory allocated with 323 1.1 ragge * bus_dmamem_alloc(). 324 1.1 ragge */ 325 1.1 ragge int 326 1.30 matt _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, 327 1.30 matt int nsegs, bus_size_t size, int flags) 328 1.1 ragge { 329 1.1 ragge 330 1.1 ragge panic("_bus_dmamap_load_raw: not implemented"); 331 1.1 ragge } 332 1.1 ragge 333 1.1 ragge /* 334 1.1 ragge * Common function for unloading a DMA map. May be called by 335 1.1 ragge * bus-specific DMA map unload functions. 336 1.1 ragge */ 337 1.1 ragge void 338 1.30 matt _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 339 1.1 ragge { 340 1.1 ragge 341 1.1 ragge #ifdef DEBUG_DMA 342 1.1 ragge printf("dmamap_unload: t=%p map=%p\n", t, map); 343 1.1 ragge #endif /* DEBUG_DMA */ 344 1.1 ragge 345 1.1 ragge /* 346 1.1 ragge * No resources to free; just mark the mappings as 347 1.1 ragge * invalid. 348 1.1 ragge */ 349 1.22 matt map->dm_maxsegsz = map->_dm_maxmaxsegsz; 350 1.1 ragge map->dm_mapsize = 0; 351 1.1 ragge map->dm_nsegs = 0; 352 1.1 ragge } 353 1.1 ragge 354 1.1 ragge /* 355 1.1 ragge * Common function for DMA map synchronization. May be called 356 1.1 ragge * by bus-specific DMA map synchronization functions. 357 1.1 ragge */ 358 1.1 ragge void 359 1.30 matt _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 360 1.30 matt bus_size_t len, int ops) 361 1.1 ragge { 362 1.1 ragge #ifdef DEBUG_DMA 363 1.1 ragge printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n", 364 1.1 ragge t, map, offset, len, ops); 365 1.1 ragge #endif /* DEBUG_DMA */ 366 1.1 ragge /* 367 1.1 ragge * A vax only has snoop-cache, so this routine is a no-op. 368 1.1 ragge */ 369 1.1 ragge return; 370 1.1 ragge } 371 1.1 ragge 372 1.1 ragge /* 373 1.1 ragge * Common function for DMA-safe memory allocation. May be called 374 1.1 ragge * by bus-specific DMA memory allocation functions. 375 1.1 ragge */ 376 1.1 ragge 377 1.1 ragge int 378 1.30 matt _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 379 1.30 matt bus_size_t boundary, bus_dma_segment_t *segs, 380 1.30 matt int nsegs, int *rsegs, int flags) 381 1.1 ragge { 382 1.1 ragge int error; 383 1.1 ragge 384 1.1 ragge error = (_bus_dmamem_alloc_range(t, size, alignment, boundary, 385 1.1 ragge segs, nsegs, rsegs, flags, round_page(avail_start), 386 1.1 ragge trunc_page(avail_end))); 387 1.1 ragge return(error); 388 1.1 ragge } 389 1.1 ragge 390 1.1 ragge /* 391 1.1 ragge * Common function for freeing DMA-safe memory. May be called by 392 1.1 ragge * bus-specific DMA memory free functions. 393 1.1 ragge */ 394 1.1 ragge void 395 1.30 matt _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 396 1.1 ragge { 397 1.12 chs struct vm_page *m; 398 1.1 ragge bus_addr_t addr; 399 1.1 ragge struct pglist mlist; 400 1.1 ragge int curseg; 401 1.1 ragge 402 1.1 ragge #ifdef DEBUG_DMA 403 1.1 ragge printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs); 404 1.1 ragge #endif /* DEBUG_DMA */ 405 1.1 ragge 406 1.1 ragge /* 407 1.1 ragge * Build a list of pages to free back to the VM system. 408 1.1 ragge */ 409 1.1 ragge TAILQ_INIT(&mlist); 410 1.1 ragge for (curseg = 0; curseg < nsegs; curseg++) { 411 1.1 ragge for (addr = segs[curseg].ds_addr; 412 1.1 ragge addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 413 1.1 ragge addr += PAGE_SIZE) { 414 1.1 ragge m = PHYS_TO_VM_PAGE(addr); 415 1.32 ad TAILQ_INSERT_TAIL(&mlist, m, pageq.queue); 416 1.1 ragge } 417 1.1 ragge } 418 1.1 ragge uvm_pglistfree(&mlist); 419 1.1 ragge } 420 1.1 ragge 421 1.1 ragge /* 422 1.1 ragge * Common function for mapping DMA-safe memory. May be called by 423 1.1 ragge * bus-specific DMA memory map functions. 424 1.1 ragge */ 425 1.1 ragge int 426 1.30 matt _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 427 1.30 matt size_t size, void **kvap, int flags) 428 1.1 ragge { 429 1.6 matt vaddr_t va; 430 1.1 ragge bus_addr_t addr; 431 1.1 ragge int curseg; 432 1.24 yamt const uvm_flag_t kmflags = 433 1.24 yamt (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; 434 1.1 ragge 435 1.1 ragge /* 436 1.1 ragge * Special case (but common): 437 1.1 ragge * If there is only one physical segment then the already-mapped 438 1.1 ragge * virtual address is returned, since all physical memory is already 439 1.1 ragge * in the beginning of kernel virtual memory. 440 1.1 ragge */ 441 1.1 ragge if (nsegs == 1) { 442 1.27 christos *kvap = (void *)(segs[0].ds_addr | KERNBASE); 443 1.1 ragge /* 444 1.1 ragge * KA43 (3100/m76) must have its DMA-safe memory accessed 445 1.1 ragge * through DIAGMEM. Remap it here. 446 1.1 ragge */ 447 1.1 ragge if (vax_boardtype == VAX_BTYP_43) { 448 1.1 ragge pmap_map((vaddr_t)*kvap, segs[0].ds_addr|KA43_DIAGMEM, 449 1.1 ragge (segs[0].ds_addr|KA43_DIAGMEM) + size, 450 1.1 ragge VM_PROT_READ|VM_PROT_WRITE); 451 1.1 ragge } 452 1.1 ragge return 0; 453 1.1 ragge } 454 1.1 ragge size = round_page(size); 455 1.24 yamt va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); 456 1.1 ragge 457 1.1 ragge if (va == 0) 458 1.1 ragge return (ENOMEM); 459 1.1 ragge 460 1.27 christos *kvap = (void *)va; 461 1.1 ragge 462 1.1 ragge for (curseg = 0; curseg < nsegs; curseg++) { 463 1.1 ragge for (addr = segs[curseg].ds_addr; 464 1.1 ragge addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 465 1.17 thorpej addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { 466 1.1 ragge if (size == 0) 467 1.1 ragge panic("_bus_dmamem_map: size botch"); 468 1.1 ragge if (vax_boardtype == VAX_BTYP_43) 469 1.1 ragge addr |= KA43_DIAGMEM; 470 1.2 thorpej pmap_enter(pmap_kernel(), va, addr, 471 1.5 thorpej VM_PROT_READ | VM_PROT_WRITE, 472 1.5 thorpej VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 473 1.1 ragge } 474 1.1 ragge } 475 1.13 chris pmap_update(pmap_kernel()); 476 1.1 ragge return (0); 477 1.1 ragge } 478 1.1 ragge 479 1.1 ragge /* 480 1.1 ragge * Common function for unmapping DMA-safe memory. May be called by 481 1.1 ragge * bus-specific DMA memory unmapping functions. 482 1.1 ragge */ 483 1.1 ragge void 484 1.30 matt _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 485 1.1 ragge { 486 1.1 ragge 487 1.1 ragge #ifdef DEBUG_DMA 488 1.40 andvar printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva, size); 489 1.1 ragge #endif /* DEBUG_DMA */ 490 1.1 ragge #ifdef DIAGNOSTIC 491 1.1 ragge if ((u_long)kva & PGOFSET) 492 1.1 ragge panic("_bus_dmamem_unmap"); 493 1.1 ragge #endif /* DIAGNOSTIC */ 494 1.1 ragge 495 1.1 ragge /* Avoid free'ing if not mapped */ 496 1.27 christos if (kva < (void *)virtual_avail) 497 1.23 yamt return; 498 1.23 yamt 499 1.23 yamt size = round_page(size); 500 1.23 yamt pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size); 501 1.23 yamt pmap_update(pmap_kernel()); 502 1.23 yamt uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 503 1.1 ragge } 504 1.1 ragge 505 1.1 ragge /* 506 1.38 andvar * Common function for mmap(2)'ing DMA-safe memory. May be called by 507 1.1 ragge * bus-specific DMA mmap(2)'ing functions. 508 1.1 ragge */ 509 1.7 simonb paddr_t 510 1.30 matt _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 511 1.30 matt off_t off, int prot, int flags) 512 1.1 ragge { 513 1.1 ragge int i; 514 1.1 ragge 515 1.1 ragge for (i = 0; i < nsegs; i++) { 516 1.1 ragge #ifdef DIAGNOSTIC 517 1.1 ragge if (off & PGOFSET) 518 1.1 ragge panic("_bus_dmamem_mmap: offset unaligned"); 519 1.1 ragge if (segs[i].ds_addr & PGOFSET) 520 1.1 ragge panic("_bus_dmamem_mmap: segment unaligned"); 521 1.1 ragge if (segs[i].ds_len & PGOFSET) 522 1.1 ragge panic("_bus_dmamem_mmap: segment size not multiple" 523 1.1 ragge " of page size"); 524 1.1 ragge #endif /* DIAGNOSTIC */ 525 1.1 ragge if (off >= segs[i].ds_len) { 526 1.1 ragge off -= segs[i].ds_len; 527 1.1 ragge continue; 528 1.1 ragge } 529 1.1 ragge 530 1.1 ragge return (btop((u_long)segs[i].ds_addr + off)); 531 1.1 ragge } 532 1.1 ragge 533 1.1 ragge /* Page not found. */ 534 1.1 ragge return (-1); 535 1.1 ragge } 536 1.1 ragge 537 1.1 ragge /********************************************************************** 538 1.1 ragge * DMA utility functions 539 1.1 ragge **********************************************************************/ 540 1.1 ragge 541 1.1 ragge /* 542 1.1 ragge * Utility function to load a linear buffer. lastaddrp holds state 543 1.1 ragge * between invocations (for multiple-buffer loads). segp contains 544 1.37 andvar * the starting segment on entrance, and the ending segment on exit. 545 1.1 ragge * first indicates if this is the first invocation of this function. 546 1.1 ragge */ 547 1.1 ragge int 548 1.30 matt _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 549 1.30 matt bus_size_t buflen, struct vmspace *vm, int flags, vaddr_t *lastaddrp, 550 1.30 matt int *segp, bool first) 551 1.1 ragge { 552 1.1 ragge bus_size_t sgsize; 553 1.1 ragge bus_addr_t curaddr, lastaddr, baddr, bmask; 554 1.6 matt vaddr_t vaddr = (vaddr_t)buf; 555 1.1 ragge int seg; 556 1.1 ragge pmap_t pmap; 557 1.1 ragge 558 1.1 ragge #ifdef DEBUG_DMA 559 1.39 andvar printf("_bus_dmamap_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n", 560 1.1 ragge buf, buflen, flags, first); 561 1.1 ragge #endif /* DEBUG_DMA */ 562 1.1 ragge 563 1.26 yamt pmap = vm_map_pmap(&vm->vm_map); 564 1.1 ragge 565 1.1 ragge lastaddr = *lastaddrp; 566 1.1 ragge bmask = ~(map->_dm_boundary - 1); 567 1.1 ragge 568 1.1 ragge for (seg = *segp; buflen > 0; ) { 569 1.1 ragge /* 570 1.1 ragge * Get the physical address for this segment. 571 1.1 ragge */ 572 1.3 thorpej (void) pmap_extract(pmap, (vaddr_t)vaddr, &curaddr); 573 1.1 ragge 574 1.1 ragge #if 0 575 1.1 ragge /* 576 1.1 ragge * Make sure we're in an allowed DMA range. 577 1.1 ragge */ 578 1.1 ragge if (t->_ranges != NULL && 579 1.1 ragge _bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0) 580 1.1 ragge return (EINVAL); 581 1.1 ragge #endif 582 1.1 ragge 583 1.1 ragge /* 584 1.1 ragge * Compute the segment size, and adjust counts. 585 1.1 ragge */ 586 1.17 thorpej sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 587 1.1 ragge if (buflen < sgsize) 588 1.1 ragge sgsize = buflen; 589 1.1 ragge 590 1.1 ragge /* 591 1.1 ragge * Make sure we don't cross any boundaries. 592 1.1 ragge */ 593 1.1 ragge if (map->_dm_boundary > 0) { 594 1.1 ragge baddr = (curaddr + map->_dm_boundary) & bmask; 595 1.1 ragge if (sgsize > (baddr - curaddr)) 596 1.1 ragge sgsize = (baddr - curaddr); 597 1.1 ragge } 598 1.1 ragge 599 1.1 ragge /* 600 1.1 ragge * Insert chunk into a segment, coalescing with 601 1.1 ragge * previous segment if possible. 602 1.1 ragge */ 603 1.1 ragge if (first) { 604 1.1 ragge map->dm_segs[seg].ds_addr = curaddr; 605 1.1 ragge map->dm_segs[seg].ds_len = sgsize; 606 1.29 matt first = false; 607 1.1 ragge } else { 608 1.1 ragge if (curaddr == lastaddr && 609 1.1 ragge (map->dm_segs[seg].ds_len + sgsize) <= 610 1.22 matt map->dm_maxsegsz && 611 1.1 ragge (map->_dm_boundary == 0 || 612 1.1 ragge (map->dm_segs[seg].ds_addr & bmask) == 613 1.1 ragge (curaddr & bmask))) 614 1.1 ragge map->dm_segs[seg].ds_len += sgsize; 615 1.1 ragge else { 616 1.1 ragge if (++seg >= map->_dm_segcnt) 617 1.1 ragge break; 618 1.1 ragge map->dm_segs[seg].ds_addr = curaddr; 619 1.1 ragge map->dm_segs[seg].ds_len = sgsize; 620 1.1 ragge } 621 1.1 ragge } 622 1.1 ragge 623 1.1 ragge lastaddr = curaddr + sgsize; 624 1.1 ragge vaddr += sgsize; 625 1.1 ragge buflen -= sgsize; 626 1.1 ragge } 627 1.1 ragge 628 1.1 ragge *segp = seg; 629 1.1 ragge *lastaddrp = lastaddr; 630 1.1 ragge 631 1.1 ragge /* 632 1.1 ragge * Did we fit? 633 1.1 ragge */ 634 1.1 ragge if (buflen != 0) 635 1.1 ragge return (EFBIG); /* XXX better return value here? */ 636 1.1 ragge return (0); 637 1.1 ragge } 638 1.1 ragge 639 1.1 ragge /* 640 1.1 ragge * Check to see if the specified page is in an allowed DMA range. 641 1.1 ragge */ 642 1.1 ragge int 643 1.30 matt _bus_dma_inrange(bus_dma_segment_t *ranges, int nranges, bus_addr_t curaddr) 644 1.1 ragge { 645 1.1 ragge bus_dma_segment_t *ds; 646 1.1 ragge int i; 647 1.1 ragge 648 1.1 ragge for (i = 0, ds = ranges; i < nranges; i++, ds++) { 649 1.1 ragge if (curaddr >= ds->ds_addr && 650 1.1 ragge round_page(curaddr) <= (ds->ds_addr + ds->ds_len)) 651 1.1 ragge return (1); 652 1.1 ragge } 653 1.1 ragge 654 1.1 ragge return (0); 655 1.1 ragge } 656 1.1 ragge 657 1.1 ragge /* 658 1.1 ragge * Allocate physical memory from the given physical address range. 659 1.1 ragge * Called by DMA-safe memory allocation methods. 660 1.1 ragge */ 661 1.1 ragge int 662 1.30 matt _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 663 1.30 matt bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 664 1.30 matt int flags, vaddr_t low, vaddr_t high) 665 1.1 ragge { 666 1.6 matt vaddr_t curaddr, lastaddr; 667 1.12 chs struct vm_page *m; 668 1.1 ragge struct pglist mlist; 669 1.1 ragge int curseg, error; 670 1.1 ragge 671 1.1 ragge #ifdef DEBUG_DMA 672 1.1 ragge printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n", 673 1.1 ragge t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high); 674 1.1 ragge #endif /* DEBUG_DMA */ 675 1.1 ragge 676 1.1 ragge /* Always round the size. */ 677 1.1 ragge size = round_page(size); 678 1.1 ragge 679 1.1 ragge /* 680 1.1 ragge * Allocate pages from the VM system. 681 1.1 ragge */ 682 1.1 ragge error = uvm_pglistalloc(size, low, high, alignment, boundary, 683 1.1 ragge &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 684 1.1 ragge if (error) 685 1.1 ragge return (error); 686 1.1 ragge 687 1.1 ragge /* 688 1.1 ragge * Compute the location, size, and number of segments actually 689 1.1 ragge * returned by the VM code. 690 1.1 ragge */ 691 1.1 ragge m = mlist.tqh_first; 692 1.1 ragge curseg = 0; 693 1.1 ragge lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 694 1.1 ragge segs[curseg].ds_len = PAGE_SIZE; 695 1.1 ragge #ifdef DEBUG_DMA 696 1.1 ragge printf("alloc: page %lx\n", lastaddr); 697 1.1 ragge #endif /* DEBUG_DMA */ 698 1.32 ad m = m->pageq.queue.tqe_next; 699 1.1 ragge 700 1.32 ad for (; m != NULL; m = m->pageq.queue.tqe_next) { 701 1.1 ragge curaddr = VM_PAGE_TO_PHYS(m); 702 1.1 ragge #ifdef DIAGNOSTIC 703 1.1 ragge if (curaddr < low || curaddr >= high) { 704 1.10 soren printf("uvm_pglistalloc returned non-sensical" 705 1.1 ragge " address 0x%lx\n", curaddr); 706 1.1 ragge panic("_bus_dmamem_alloc_range"); 707 1.1 ragge } 708 1.1 ragge #endif /* DIAGNOSTIC */ 709 1.1 ragge #ifdef DEBUG_DMA 710 1.1 ragge printf("alloc: page %lx\n", curaddr); 711 1.1 ragge #endif /* DEBUG_DMA */ 712 1.1 ragge if (curaddr == (lastaddr + PAGE_SIZE)) 713 1.1 ragge segs[curseg].ds_len += PAGE_SIZE; 714 1.1 ragge else { 715 1.1 ragge curseg++; 716 1.1 ragge segs[curseg].ds_addr = curaddr; 717 1.1 ragge segs[curseg].ds_len = PAGE_SIZE; 718 1.1 ragge } 719 1.1 ragge lastaddr = curaddr; 720 1.1 ragge } 721 1.1 ragge 722 1.1 ragge *rsegs = curseg + 1; 723 1.1 ragge 724 1.1 ragge return (0); 725 1.1 ragge } 726 1.4 ragge 727 1.4 ragge /* 728 1.4 ragge * "generic" DMA struct, nothing special. 729 1.4 ragge */ 730 1.4 ragge struct vax_bus_dma_tag vax_bus_dma_tag = { 731 1.30 matt ._dmamap_create = _bus_dmamap_create, 732 1.30 matt ._dmamap_destroy = _bus_dmamap_destroy, 733 1.30 matt ._dmamap_load = _bus_dmamap_load, 734 1.30 matt ._dmamap_load_mbuf = _bus_dmamap_load_mbuf, 735 1.30 matt ._dmamap_load_uio = _bus_dmamap_load_uio, 736 1.30 matt ._dmamap_load_raw = _bus_dmamap_load_raw, 737 1.30 matt ._dmamap_unload = _bus_dmamap_unload, 738 1.30 matt ._dmamap_sync = _bus_dmamap_sync, 739 1.30 matt ._dmamem_alloc = _bus_dmamem_alloc, 740 1.30 matt ._dmamem_free = _bus_dmamem_free, 741 1.30 matt ._dmamem_map = _bus_dmamem_map, 742 1.30 matt ._dmamem_unmap = _bus_dmamem_unmap, 743 1.30 matt ._dmamem_mmap = _bus_dmamem_mmap, 744 1.4 ragge }; 745