1 1.15 skrll /* $NetBSD: isadma_bounce.c,v 1.15 2022/01/22 15:10:31 skrll Exp $ */ 2 1.1 simonb 3 1.1 simonb /*- 4 1.1 simonb * Copyright (c) 1996, 1997, 1998, 2000, 2001 The NetBSD Foundation, Inc. 5 1.1 simonb * All rights reserved. 6 1.1 simonb * 7 1.1 simonb * This code is derived from software contributed to The NetBSD Foundation 8 1.1 simonb * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 1.1 simonb * NASA Ames Research Center. 10 1.1 simonb * 11 1.1 simonb * Redistribution and use in source and binary forms, with or without 12 1.1 simonb * modification, are permitted provided that the following conditions 13 1.1 simonb * are met: 14 1.1 simonb * 1. Redistributions of source code must retain the above copyright 15 1.1 simonb * notice, this list of conditions and the following disclaimer. 16 1.1 simonb * 2. Redistributions in binary form must reproduce the above copyright 17 1.1 simonb * notice, this list of conditions and the following disclaimer in the 18 1.1 simonb * documentation and/or other materials provided with the distribution. 19 1.1 simonb * 20 1.1 simonb * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 1.1 simonb * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 1.1 simonb * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 1.1 simonb * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 1.1 simonb * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 1.1 simonb * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 1.1 simonb * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 1.1 simonb * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 1.1 simonb * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 1.1 simonb * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 1.1 simonb * POSSIBILITY OF SUCH DAMAGE. 31 1.1 simonb */ 32 1.4 lukem 33 1.4 lukem #include <sys/cdefs.h> 34 1.15 skrll __KERNEL_RCSID(0, "$NetBSD: isadma_bounce.c,v 1.15 2022/01/22 15:10:31 skrll Exp $"); 35 1.11 matt 36 1.11 matt #define _MIPS_BUS_DMA_PRIVATE 37 1.1 simonb 38 1.1 simonb #include <sys/param.h> 39 1.11 matt #include <sys/bus.h> 40 1.1 simonb #include <sys/device.h> 41 1.1 simonb #include <sys/malloc.h> 42 1.11 matt #include <sys/mbuf.h> 43 1.1 simonb #include <sys/proc.h> 44 1.11 matt #include <sys/systm.h> 45 1.1 simonb 46 1.1 simonb #include <mips/cache.h> 47 1.11 matt #include <mips/locore.h> 48 1.1 simonb 49 1.1 simonb #include <dev/isa/isareg.h> 50 1.1 simonb #include <dev/isa/isavar.h> 51 1.1 simonb 52 1.1 simonb #include <uvm/uvm_extern.h> 53 1.1 simonb 54 1.1 simonb int isadma_bounce_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t, 55 1.1 simonb bus_size_t, int); 56 1.1 simonb void isadma_bounce_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t); 57 1.1 simonb 58 1.1 simonb /* 59 1.1 simonb * Create an ISA DMA map. 60 1.1 simonb */ 61 1.1 simonb int 62 1.1 simonb isadma_bounce_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 63 1.1 simonb bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 64 1.1 simonb { 65 1.10 matt struct mips_bus_dma_cookie *cookie; 66 1.1 simonb bus_dmamap_t map; 67 1.1 simonb int error, cookieflags; 68 1.1 simonb void *cookiestore; 69 1.1 simonb size_t cookiesize; 70 1.1 simonb 71 1.1 simonb /* Call common function to create the basic map. */ 72 1.1 simonb error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, 73 1.1 simonb flags, dmamp); 74 1.1 simonb if (error) 75 1.1 simonb return (error); 76 1.1 simonb 77 1.1 simonb map = *dmamp; 78 1.1 simonb map->_dm_cookie = NULL; 79 1.1 simonb 80 1.1 simonb cookiesize = sizeof(*cookie); 81 1.1 simonb 82 1.1 simonb /* 83 1.1 simonb * ISA only has 24-bits of address space. This means 84 1.1 simonb * we can't DMA to pages over 16M. In order to DMA to 85 1.1 simonb * arbitrary buffers, we use "bounce buffers" - pages 86 1.1 simonb * in memory below the 16M boundary. On DMA reads, 87 1.1 simonb * DMA happens to the bounce buffers, and is copied into 88 1.1 simonb * the caller's buffer. On writes, data is copied into 89 1.14 skrll * the bounce buffer, and the DMA happens from those 90 1.1 simonb * pages. To software using the DMA mapping interface, 91 1.1 simonb * this looks simply like a data cache. 92 1.1 simonb * 93 1.1 simonb * If we have more than 16M of RAM in the system, we may 94 1.1 simonb * need bounce buffers. We check and remember that here. 95 1.1 simonb * 96 1.1 simonb * ...or, there is an opposite case. The most segments 97 1.1 simonb * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If 98 1.1 simonb * the caller can't handle that many segments (e.g. the 99 1.1 simonb * ISA DMA controller), we may have to bounce it as well. 100 1.1 simonb */ 101 1.1 simonb cookieflags = 0; 102 1.15 skrll if (_BUS_AVAIL_END > (t->_wbase + t->_bounce_alloc_hi - t->_bounce_alloc_lo - 1) 103 1.10 matt || ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) { 104 1.10 matt cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE; 105 1.1 simonb cookiesize += (sizeof(bus_dma_segment_t) * 106 1.1 simonb (map->_dm_segcnt - 1)); 107 1.1 simonb } 108 1.1 simonb 109 1.1 simonb /* 110 1.1 simonb * Allocate our cookie. 111 1.1 simonb */ 112 1.1 simonb if ((cookiestore = malloc(cookiesize, M_DMAMAP, 113 1.1 simonb (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) { 114 1.1 simonb error = ENOMEM; 115 1.1 simonb goto out; 116 1.1 simonb } 117 1.1 simonb memset(cookiestore, 0, cookiesize); 118 1.10 matt cookie = (struct mips_bus_dma_cookie *)cookiestore; 119 1.1 simonb cookie->id_flags = cookieflags; 120 1.1 simonb map->_dm_cookie = cookie; 121 1.1 simonb 122 1.10 matt if (cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) { 123 1.1 simonb /* 124 1.1 simonb * Allocate the bounce pages now if the caller 125 1.1 simonb * wishes us to do so. 126 1.1 simonb */ 127 1.1 simonb if ((flags & BUS_DMA_ALLOCNOW) == 0) 128 1.1 simonb goto out; 129 1.1 simonb 130 1.1 simonb error = isadma_bounce_alloc_bouncebuf(t, map, size, flags); 131 1.1 simonb } 132 1.1 simonb 133 1.1 simonb out: 134 1.1 simonb if (error) { 135 1.1 simonb if (map->_dm_cookie != NULL) 136 1.1 simonb free(map->_dm_cookie, M_DMAMAP); 137 1.1 simonb _bus_dmamap_destroy(t, map); 138 1.1 simonb } 139 1.1 simonb return (error); 140 1.1 simonb } 141 1.1 simonb 142 1.1 simonb /* 143 1.1 simonb * Destroy an ISA DMA map. 144 1.1 simonb */ 145 1.1 simonb void 146 1.1 simonb isadma_bounce_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 147 1.1 simonb { 148 1.10 matt struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 149 1.1 simonb 150 1.1 simonb /* 151 1.1 simonb * Free any bounce pages this map might hold. 152 1.1 simonb */ 153 1.10 matt if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE) 154 1.1 simonb isadma_bounce_free_bouncebuf(t, map); 155 1.1 simonb 156 1.1 simonb free(cookie, M_DMAMAP); 157 1.1 simonb _bus_dmamap_destroy(t, map); 158 1.1 simonb } 159 1.1 simonb 160 1.1 simonb /* 161 1.1 simonb * Load an ISA DMA map with a linear buffer. 162 1.1 simonb */ 163 1.1 simonb int 164 1.1 simonb isadma_bounce_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 165 1.1 simonb bus_size_t buflen, struct proc *p, int flags) 166 1.1 simonb { 167 1.10 matt struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 168 1.1 simonb int error; 169 1.1 simonb 170 1.1 simonb /* 171 1.1 simonb * Make sure that on error condition we return "no valid mappings." 172 1.1 simonb */ 173 1.1 simonb map->dm_mapsize = 0; 174 1.1 simonb map->dm_nsegs = 0; 175 1.1 simonb 176 1.1 simonb /* 177 1.1 simonb * Try to load the map the normal way. If this errors out, 178 1.1 simonb * and we can bounce, we will. 179 1.1 simonb */ 180 1.1 simonb error = _bus_dmamap_load(t, map, buf, buflen, p, flags); 181 1.12 christos if (error == 0 || (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) 182 1.1 simonb return (error); 183 1.1 simonb 184 1.1 simonb /* 185 1.1 simonb * First attempt failed; bounce it. 186 1.1 simonb */ 187 1.1 simonb 188 1.1 simonb /* 189 1.1 simonb * Allocate bounce pages, if necessary. 190 1.1 simonb */ 191 1.10 matt if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) { 192 1.1 simonb error = isadma_bounce_alloc_bouncebuf(t, map, buflen, flags); 193 1.1 simonb if (error) 194 1.1 simonb return (error); 195 1.1 simonb } 196 1.1 simonb 197 1.1 simonb /* 198 1.1 simonb * Cache a pointer to the caller's buffer and load the DMA map 199 1.1 simonb * with the bounce buffer. 200 1.1 simonb */ 201 1.1 simonb cookie->id_origbuf = buf; 202 1.1 simonb cookie->id_origbuflen = buflen; 203 1.10 matt cookie->id_buftype = _BUS_DMA_BUFTYPE_LINEAR; 204 1.1 simonb error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen, 205 1.1 simonb p, flags); 206 1.1 simonb if (error) { 207 1.1 simonb /* 208 1.1 simonb * Free the bounce pages, unless our resources 209 1.1 simonb * are reserved for our exclusive use. 210 1.1 simonb */ 211 1.1 simonb if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 212 1.1 simonb isadma_bounce_free_bouncebuf(t, map); 213 1.1 simonb return (error); 214 1.1 simonb } 215 1.1 simonb 216 1.1 simonb /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */ 217 1.10 matt cookie->id_flags |= _BUS_DMA_IS_BOUNCING; 218 1.1 simonb return (0); 219 1.1 simonb } 220 1.1 simonb 221 1.1 simonb /* 222 1.1 simonb * Like isadma_bounce_dmamap_load(), but for mbufs. 223 1.1 simonb */ 224 1.1 simonb int 225 1.1 simonb isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, 226 1.13 skrll struct mbuf *m0, int flags) 227 1.1 simonb { 228 1.10 matt struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 229 1.1 simonb int error; 230 1.1 simonb 231 1.1 simonb /* 232 1.1 simonb * Make sure on error condition we return "no valid mappings." 233 1.1 simonb */ 234 1.1 simonb map->dm_mapsize = 0; 235 1.1 simonb map->dm_nsegs = 0; 236 1.1 simonb 237 1.1 simonb #ifdef DIAGNOSTIC 238 1.1 simonb if ((m0->m_flags & M_PKTHDR) == 0) 239 1.1 simonb panic("isadma_bounce_dmamap_load_mbuf: no packet header"); 240 1.1 simonb #endif 241 1.1 simonb 242 1.1 simonb if (m0->m_pkthdr.len > map->_dm_size) 243 1.1 simonb return (EINVAL); 244 1.1 simonb 245 1.1 simonb /* 246 1.1 simonb * Try to load the map the normal way. If this errors out, 247 1.1 simonb * and we can bounce, we will. 248 1.1 simonb */ 249 1.1 simonb error = _bus_dmamap_load_mbuf(t, map, m0, flags); 250 1.12 christos if (error == 0 || (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) 251 1.1 simonb return (error); 252 1.1 simonb 253 1.1 simonb /* 254 1.1 simonb * First attempt failed; bounce it. 255 1.1 simonb */ 256 1.1 simonb 257 1.1 simonb /* 258 1.1 simonb * Allocate bounce pages, if necessary. 259 1.1 simonb */ 260 1.10 matt if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) { 261 1.1 simonb error = isadma_bounce_alloc_bouncebuf(t, map, m0->m_pkthdr.len, 262 1.1 simonb flags); 263 1.1 simonb if (error) 264 1.1 simonb return (error); 265 1.1 simonb } 266 1.1 simonb 267 1.1 simonb /* 268 1.1 simonb * Cache a pointer to the caller's buffer and load the DMA map 269 1.1 simonb * with the bounce buffer. 270 1.1 simonb */ 271 1.1 simonb cookie->id_origbuf = m0; 272 1.1 simonb cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */ 273 1.10 matt cookie->id_buftype = _BUS_DMA_BUFTYPE_MBUF; 274 1.1 simonb error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, 275 1.1 simonb m0->m_pkthdr.len, NULL, flags); 276 1.1 simonb if (error) { 277 1.1 simonb /* 278 1.1 simonb * Free the bounce pages, unless our resources 279 1.1 simonb * are reserved for our exclusive use. 280 1.1 simonb */ 281 1.1 simonb if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 282 1.1 simonb isadma_bounce_free_bouncebuf(t, map); 283 1.1 simonb return (error); 284 1.1 simonb } 285 1.1 simonb 286 1.1 simonb /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */ 287 1.10 matt cookie->id_flags |= _BUS_DMA_IS_BOUNCING; 288 1.1 simonb return (0); 289 1.1 simonb } 290 1.1 simonb 291 1.1 simonb /* 292 1.1 simonb * Like isadma_bounce_dmamap_load(), but for uios. 293 1.1 simonb */ 294 1.1 simonb int 295 1.1 simonb isadma_bounce_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, 296 1.1 simonb struct uio *uio, int flags) 297 1.1 simonb { 298 1.1 simonb 299 1.1 simonb panic("isadma_bounce_dmamap_load_uio: not implemented"); 300 1.1 simonb } 301 1.1 simonb 302 1.1 simonb /* 303 1.1 simonb * Like isadma_bounce_dmamap_load(), but for raw memory allocated with 304 1.1 simonb * bus_dmamem_alloc(). 305 1.1 simonb */ 306 1.1 simonb int 307 1.1 simonb isadma_bounce_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 308 1.1 simonb bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 309 1.1 simonb { 310 1.1 simonb 311 1.1 simonb panic("isadma_bounce_dmamap_load_raw: not implemented"); 312 1.1 simonb } 313 1.1 simonb 314 1.1 simonb /* 315 1.1 simonb * Unload an ISA DMA map. 316 1.1 simonb */ 317 1.1 simonb void 318 1.1 simonb isadma_bounce_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 319 1.1 simonb { 320 1.10 matt struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 321 1.1 simonb 322 1.1 simonb /* 323 1.1 simonb * If we have bounce pages, free them, unless they're 324 1.1 simonb * reserved for our exclusive use. 325 1.1 simonb */ 326 1.10 matt if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) && 327 1.1 simonb (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 328 1.1 simonb isadma_bounce_free_bouncebuf(t, map); 329 1.1 simonb 330 1.10 matt cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING; 331 1.10 matt cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID; 332 1.1 simonb 333 1.1 simonb /* 334 1.1 simonb * Do the generic bits of the unload. 335 1.1 simonb */ 336 1.1 simonb _bus_dmamap_unload(t, map); 337 1.1 simonb } 338 1.1 simonb 339 1.1 simonb /* 340 1.1 simonb * Synchronize an ISA DMA map. 341 1.1 simonb */ 342 1.1 simonb void 343 1.1 simonb isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 344 1.1 simonb bus_size_t len, int ops) 345 1.1 simonb { 346 1.10 matt struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 347 1.1 simonb 348 1.1 simonb /* 349 1.1 simonb * Mixing PRE and POST operations is not allowed. 350 1.1 simonb */ 351 1.1 simonb if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 352 1.1 simonb (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 353 1.1 simonb panic("isadma_bounce_dmamap_sync: mix PRE and POST"); 354 1.1 simonb 355 1.1 simonb #ifdef DIAGNOSTIC 356 1.1 simonb if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { 357 1.1 simonb if (offset >= map->dm_mapsize) 358 1.1 simonb panic("isadma_bounce_dmamap_sync: bad offset"); 359 1.1 simonb if (len == 0 || (offset + len) > map->dm_mapsize) 360 1.1 simonb panic("isadma_bounce_dmamap_sync: bad length"); 361 1.1 simonb } 362 1.1 simonb #endif 363 1.1 simonb 364 1.1 simonb /* 365 1.1 simonb * If we're not bouncing, just do the normal sync operation 366 1.1 simonb * and return. 367 1.1 simonb */ 368 1.10 matt if ((cookie->id_flags & _BUS_DMA_IS_BOUNCING) == 0) { 369 1.1 simonb _bus_dmamap_sync(t, map, offset, len, ops); 370 1.1 simonb return; 371 1.1 simonb } 372 1.1 simonb 373 1.1 simonb /* 374 1.1 simonb * Flush data cache for PREREAD. This has the side-effect 375 1.1 simonb * of invalidating the cache. Done at PREREAD since it 376 1.1 simonb * causes the cache line(s) to be written back to memory. 377 1.1 simonb * 378 1.1 simonb * Copy the original buffer to the bounce buffer and flush 379 1.1 simonb * the data cache for PREWRITE, so that the contents 380 1.1 simonb * of the data buffer in memory reflect reality. 381 1.1 simonb * 382 1.1 simonb * Copy the bounce buffer to the original buffer in POSTREAD. 383 1.1 simonb */ 384 1.1 simonb 385 1.1 simonb switch (cookie->id_buftype) { 386 1.10 matt case _BUS_DMA_BUFTYPE_LINEAR: 387 1.1 simonb /* 388 1.1 simonb * Nothing to do for pre-read. 389 1.1 simonb */ 390 1.1 simonb 391 1.1 simonb if (ops & BUS_DMASYNC_PREWRITE) { 392 1.1 simonb /* 393 1.1 simonb * Copy the caller's buffer to the bounce buffer. 394 1.1 simonb */ 395 1.1 simonb memcpy((char *)cookie->id_bouncebuf + offset, 396 1.1 simonb (char *)cookie->id_origbuf + offset, len); 397 1.1 simonb wbflush(); 398 1.1 simonb } 399 1.1 simonb 400 1.1 simonb if (ops & BUS_DMASYNC_POSTREAD) { 401 1.1 simonb /* 402 1.1 simonb * Copy the bounce buffer to the caller's buffer. 403 1.1 simonb */ 404 1.1 simonb memcpy((char *)cookie->id_origbuf + offset, 405 1.1 simonb (char *)cookie->id_bouncebuf + offset, len); 406 1.1 simonb } 407 1.1 simonb 408 1.1 simonb /* 409 1.1 simonb * Nothing to do for post-write. 410 1.1 simonb */ 411 1.1 simonb break; 412 1.1 simonb 413 1.10 matt case _BUS_DMA_BUFTYPE_MBUF: 414 1.1 simonb { 415 1.1 simonb struct mbuf *m, *m0 = cookie->id_origbuf; 416 1.1 simonb bus_size_t minlen, moff; 417 1.1 simonb 418 1.1 simonb /* 419 1.1 simonb * Nothing to do for pre-read. 420 1.1 simonb */ 421 1.1 simonb 422 1.1 simonb if (ops & BUS_DMASYNC_PREWRITE) { 423 1.1 simonb /* 424 1.1 simonb * Copy the caller's buffer to the bounce buffer. 425 1.1 simonb */ 426 1.1 simonb m_copydata(m0, offset, len, 427 1.1 simonb (char *)cookie->id_bouncebuf + offset); 428 1.1 simonb } 429 1.1 simonb 430 1.1 simonb if (ops & BUS_DMASYNC_POSTREAD) { 431 1.1 simonb /* 432 1.1 simonb * Copy the bounce buffer to the caller's buffer. 433 1.1 simonb */ 434 1.1 simonb for (moff = offset, m = m0; m != NULL && len != 0; 435 1.1 simonb m = m->m_next) { 436 1.1 simonb /* Find the beginning mbuf. */ 437 1.1 simonb if (moff >= m->m_len) { 438 1.1 simonb moff -= m->m_len; 439 1.1 simonb continue; 440 1.1 simonb } 441 1.1 simonb 442 1.1 simonb /* 443 1.1 simonb * Now at the first mbuf to sync; nail 444 1.1 simonb * each one until we have exhausted the 445 1.1 simonb * length. 446 1.1 simonb */ 447 1.1 simonb minlen = len < m->m_len - moff ? 448 1.1 simonb len : m->m_len - moff; 449 1.1 simonb 450 1.7 simonb memcpy(mtod(m, char *) + moff, 451 1.1 simonb (char *)cookie->id_bouncebuf + offset, 452 1.1 simonb minlen); 453 1.1 simonb 454 1.1 simonb moff = 0; 455 1.1 simonb len -= minlen; 456 1.1 simonb offset += minlen; 457 1.1 simonb } 458 1.1 simonb } 459 1.1 simonb 460 1.1 simonb /* 461 1.1 simonb * Nothing to do for post-write. 462 1.1 simonb */ 463 1.1 simonb break; 464 1.1 simonb } 465 1.1 simonb 466 1.10 matt case _BUS_DMA_BUFTYPE_UIO: 467 1.10 matt panic("isadma_bounce_dmamap_sync: _BUS_DMA_BUFTYPE_UIO"); 468 1.1 simonb break; 469 1.1 simonb 470 1.10 matt case _BUS_DMA_BUFTYPE_RAW: 471 1.10 matt panic("isadma_bounce_dmamap_sync: _BUS_DMA_BUFTYPE_RAW"); 472 1.1 simonb break; 473 1.1 simonb 474 1.10 matt case _BUS_DMA_BUFTYPE_INVALID: 475 1.10 matt panic("isadma_bounce_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID"); 476 1.1 simonb break; 477 1.1 simonb 478 1.1 simonb default: 479 1.1 simonb printf("unknown buffer type %d\n", cookie->id_buftype); 480 1.1 simonb panic("isadma_bounce_dmamap_sync"); 481 1.1 simonb } 482 1.1 simonb 483 1.1 simonb /* Drain the write buffer. */ 484 1.1 simonb wbflush(); 485 1.1 simonb 486 1.1 simonb /* XXXJRT */ 487 1.1 simonb if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) 488 1.1 simonb mips_dcache_wbinv_range((vaddr_t)cookie->id_bouncebuf + offset, 489 1.1 simonb len); 490 1.1 simonb } 491 1.1 simonb 492 1.1 simonb /* 493 1.1 simonb * Allocate memory safe for ISA DMA. 494 1.1 simonb */ 495 1.1 simonb int 496 1.1 simonb isadma_bounce_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, 497 1.1 simonb bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, 498 1.1 simonb int nsegs, int *rsegs, int flags) 499 1.1 simonb { 500 1.1 simonb paddr_t high; 501 1.1 simonb 502 1.15 skrll if (_BUS_AVAIL_END > ISA_DMA_BOUNCE_THRESHOLD - 1) 503 1.15 skrll high = ISA_DMA_BOUNCE_THRESHOLD - 1; 504 1.1 simonb else 505 1.15 skrll high = _BUS_AVAIL_END; 506 1.1 simonb 507 1.1 simonb return (_bus_dmamem_alloc_range(t, size, alignment, boundary, 508 1.1 simonb segs, nsegs, rsegs, flags, 0, high)); 509 1.1 simonb } 510 1.1 simonb 511 1.1 simonb /********************************************************************** 512 1.1 simonb * ISA DMA utility functions 513 1.1 simonb **********************************************************************/ 514 1.1 simonb 515 1.1 simonb int 516 1.1 simonb isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, 517 1.1 simonb bus_size_t size, int flags) 518 1.1 simonb { 519 1.10 matt struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 520 1.1 simonb int error = 0; 521 1.1 simonb 522 1.1 simonb cookie->id_bouncebuflen = round_page(size); 523 1.1 simonb error = isadma_bounce_dmamem_alloc(t, cookie->id_bouncebuflen, 524 1.1 simonb PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs, 525 1.1 simonb map->_dm_segcnt, &cookie->id_nbouncesegs, flags); 526 1.1 simonb if (error) 527 1.1 simonb goto out; 528 1.1 simonb error = _bus_dmamem_map(t, cookie->id_bouncesegs, 529 1.1 simonb cookie->id_nbouncesegs, cookie->id_bouncebuflen, 530 1.6 christos (void **)&cookie->id_bouncebuf, flags); 531 1.1 simonb 532 1.1 simonb out: 533 1.1 simonb if (error) { 534 1.1 simonb _bus_dmamem_free(t, cookie->id_bouncesegs, 535 1.1 simonb cookie->id_nbouncesegs); 536 1.1 simonb cookie->id_bouncebuflen = 0; 537 1.1 simonb cookie->id_nbouncesegs = 0; 538 1.1 simonb } else 539 1.10 matt cookie->id_flags |= _BUS_DMA_HAS_BOUNCE; 540 1.1 simonb 541 1.1 simonb return (error); 542 1.1 simonb } 543 1.1 simonb 544 1.1 simonb void 545 1.1 simonb isadma_bounce_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map) 546 1.1 simonb { 547 1.10 matt struct mips_bus_dma_cookie *cookie = map->_dm_cookie; 548 1.1 simonb 549 1.1 simonb _bus_dmamem_unmap(t, cookie->id_bouncebuf, 550 1.1 simonb cookie->id_bouncebuflen); 551 1.1 simonb _bus_dmamem_free(t, cookie->id_bouncesegs, 552 1.1 simonb cookie->id_nbouncesegs); 553 1.1 simonb cookie->id_bouncebuflen = 0; 554 1.1 simonb cookie->id_nbouncesegs = 0; 555 1.10 matt cookie->id_flags &= ~_BUS_DMA_HAS_BOUNCE; 556 1.1 simonb } 557