isadma_bounce.c revision 1.3 1 /* $NetBSD: isadma_bounce.c,v 1.3 2003/05/05 12:55:43 fvdl Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998, 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/syslog.h>
43 #include <sys/device.h>
44 #include <sys/malloc.h>
45 #include <sys/proc.h>
46 #include <sys/mbuf.h>
47
48 #include <mips/cache.h>
49 #define _MIPS_BUS_DMA_PRIVATE
50 #include <machine/bus.h>
51 #include <machine/locore.h>
52
53 #include <dev/isa/isareg.h>
54 #include <dev/isa/isavar.h>
55
56 #include <uvm/uvm_extern.h>
57
58 extern paddr_t avail_end;
59
60 /*
61 * Cookie used by bouncing ISA DMA. A pointer to one of these is stashed
62 * in the DMA map.
63 */
64 struct isadma_bounce_cookie {
65 int id_flags; /* flags; see below */
66
67 /*
68 * Information about the original buffer used during
69 * DMA map syncs. Note that origbuflen is only used
70 * for ID_BUFTYPE_LINEAR.
71 */
72 void *id_origbuf; /* pointer to orig buffer if
73 bouncing */
74 bus_size_t id_origbuflen; /* ...and size */
75 int id_buftype; /* type of buffer */
76
77 void *id_bouncebuf; /* pointer to the bounce buffer */
78 bus_size_t id_bouncebuflen; /* ...and size */
79 int id_nbouncesegs; /* number of valid bounce segs */
80 bus_dma_segment_t id_bouncesegs[1]; /* array of bounce buffer
81 physical memory segments */
82 };
83
84 /* id_flags */
85 #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */
86 #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */
87 #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */
88
89 /* id_buftype */
90 #define ID_BUFTYPE_INVALID 0
91 #define ID_BUFTYPE_LINEAR 1
92 #define ID_BUFTYPE_MBUF 2
93 #define ID_BUFTYPE_UIO 3
94 #define ID_BUFTYPE_RAW 4
95
96 int isadma_bounce_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
97 bus_size_t, int);
98 void isadma_bounce_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
99
100 /*
101 * Create an ISA DMA map.
102 */
103 int
104 isadma_bounce_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
105 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
106 {
107 struct isadma_bounce_cookie *cookie;
108 bus_dmamap_t map;
109 int error, cookieflags;
110 void *cookiestore;
111 size_t cookiesize;
112
113 /* Call common function to create the basic map. */
114 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
115 flags, dmamp);
116 if (error)
117 return (error);
118
119 map = *dmamp;
120 map->_dm_cookie = NULL;
121
122 cookiesize = sizeof(*cookie);
123
124 /*
125 * ISA only has 24-bits of address space. This means
126 * we can't DMA to pages over 16M. In order to DMA to
127 * arbitrary buffers, we use "bounce buffers" - pages
128 * in memory below the 16M boundary. On DMA reads,
129 * DMA happens to the bounce buffers, and is copied into
130 * the caller's buffer. On writes, data is copied into
131 * but bounce buffer, and the DMA happens from those
132 * pages. To software using the DMA mapping interface,
133 * this looks simply like a data cache.
134 *
135 * If we have more than 16M of RAM in the system, we may
136 * need bounce buffers. We check and remember that here.
137 *
138 * ...or, there is an opposite case. The most segments
139 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
140 * the caller can't handle that many segments (e.g. the
141 * ISA DMA controller), we may have to bounce it as well.
142 */
143 cookieflags = 0;
144 if (avail_end > (t->_wbase + t->_wsize) ||
145 ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
146 cookieflags |= ID_MIGHT_NEED_BOUNCE;
147 cookiesize += (sizeof(bus_dma_segment_t) *
148 (map->_dm_segcnt - 1));
149 }
150
151 /*
152 * Allocate our cookie.
153 */
154 if ((cookiestore = malloc(cookiesize, M_DMAMAP,
155 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
156 error = ENOMEM;
157 goto out;
158 }
159 memset(cookiestore, 0, cookiesize);
160 cookie = (struct isadma_bounce_cookie *)cookiestore;
161 cookie->id_flags = cookieflags;
162 map->_dm_cookie = cookie;
163
164 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
165 /*
166 * Allocate the bounce pages now if the caller
167 * wishes us to do so.
168 */
169 if ((flags & BUS_DMA_ALLOCNOW) == 0)
170 goto out;
171
172 error = isadma_bounce_alloc_bouncebuf(t, map, size, flags);
173 }
174
175 out:
176 if (error) {
177 if (map->_dm_cookie != NULL)
178 free(map->_dm_cookie, M_DMAMAP);
179 _bus_dmamap_destroy(t, map);
180 }
181 return (error);
182 }
183
184 /*
185 * Destroy an ISA DMA map.
186 */
187 void
188 isadma_bounce_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
189 {
190 struct isadma_bounce_cookie *cookie = map->_dm_cookie;
191
192 /*
193 * Free any bounce pages this map might hold.
194 */
195 if (cookie->id_flags & ID_HAS_BOUNCE)
196 isadma_bounce_free_bouncebuf(t, map);
197
198 free(cookie, M_DMAMAP);
199 _bus_dmamap_destroy(t, map);
200 }
201
202 /*
203 * Load an ISA DMA map with a linear buffer.
204 */
205 int
206 isadma_bounce_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
207 bus_size_t buflen, struct proc *p, int flags)
208 {
209 struct isadma_bounce_cookie *cookie = map->_dm_cookie;
210 int error;
211
212 /*
213 * Make sure that on error condition we return "no valid mappings."
214 */
215 map->dm_mapsize = 0;
216 map->dm_nsegs = 0;
217
218 /*
219 * Try to load the map the normal way. If this errors out,
220 * and we can bounce, we will.
221 */
222 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
223 if (error == 0 ||
224 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
225 return (error);
226
227 /*
228 * First attempt failed; bounce it.
229 */
230
231 /*
232 * Allocate bounce pages, if necessary.
233 */
234 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
235 error = isadma_bounce_alloc_bouncebuf(t, map, buflen, flags);
236 if (error)
237 return (error);
238 }
239
240 /*
241 * Cache a pointer to the caller's buffer and load the DMA map
242 * with the bounce buffer.
243 */
244 cookie->id_origbuf = buf;
245 cookie->id_origbuflen = buflen;
246 cookie->id_buftype = ID_BUFTYPE_LINEAR;
247 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
248 p, flags);
249 if (error) {
250 /*
251 * Free the bounce pages, unless our resources
252 * are reserved for our exclusive use.
253 */
254 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
255 isadma_bounce_free_bouncebuf(t, map);
256 return (error);
257 }
258
259 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
260 cookie->id_flags |= ID_IS_BOUNCING;
261 return (0);
262 }
263
264 /*
265 * Like isadma_bounce_dmamap_load(), but for mbufs.
266 */
267 int
268 isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
269 struct mbuf *m0, int flags)
270 {
271 struct isadma_bounce_cookie *cookie = map->_dm_cookie;
272 int error;
273
274 /*
275 * Make sure on error condition we return "no valid mappings."
276 */
277 map->dm_mapsize = 0;
278 map->dm_nsegs = 0;
279
280 #ifdef DIAGNOSTIC
281 if ((m0->m_flags & M_PKTHDR) == 0)
282 panic("isadma_bounce_dmamap_load_mbuf: no packet header");
283 #endif
284
285 if (m0->m_pkthdr.len > map->_dm_size)
286 return (EINVAL);
287
288 /*
289 * Try to load the map the normal way. If this errors out,
290 * and we can bounce, we will.
291 */
292 error = _bus_dmamap_load_mbuf(t, map, m0, flags);
293 if (error == 0 ||
294 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
295 return (error);
296
297 /*
298 * First attempt failed; bounce it.
299 */
300
301 /*
302 * Allocate bounce pages, if necessary.
303 */
304 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
305 error = isadma_bounce_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
306 flags);
307 if (error)
308 return (error);
309 }
310
311 /*
312 * Cache a pointer to the caller's buffer and load the DMA map
313 * with the bounce buffer.
314 */
315 cookie->id_origbuf = m0;
316 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
317 cookie->id_buftype = ID_BUFTYPE_MBUF;
318 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
319 m0->m_pkthdr.len, NULL, flags);
320 if (error) {
321 /*
322 * Free the bounce pages, unless our resources
323 * are reserved for our exclusive use.
324 */
325 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
326 isadma_bounce_free_bouncebuf(t, map);
327 return (error);
328 }
329
330 /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
331 cookie->id_flags |= ID_IS_BOUNCING;
332 return (0);
333 }
334
335 /*
336 * Like isadma_bounce_dmamap_load(), but for uios.
337 */
338 int
339 isadma_bounce_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
340 struct uio *uio, int flags)
341 {
342
343 panic("isadma_bounce_dmamap_load_uio: not implemented");
344 }
345
346 /*
347 * Like isadma_bounce_dmamap_load(), but for raw memory allocated with
348 * bus_dmamem_alloc().
349 */
350 int
351 isadma_bounce_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
352 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
353 {
354
355 panic("isadma_bounce_dmamap_load_raw: not implemented");
356 }
357
358 /*
359 * Unload an ISA DMA map.
360 */
361 void
362 isadma_bounce_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
363 {
364 struct isadma_bounce_cookie *cookie = map->_dm_cookie;
365
366 /*
367 * If we have bounce pages, free them, unless they're
368 * reserved for our exclusive use.
369 */
370 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
371 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
372 isadma_bounce_free_bouncebuf(t, map);
373
374 cookie->id_flags &= ~ID_IS_BOUNCING;
375 cookie->id_buftype = ID_BUFTYPE_INVALID;
376
377 /*
378 * Do the generic bits of the unload.
379 */
380 _bus_dmamap_unload(t, map);
381 }
382
383 /*
384 * Synchronize an ISA DMA map.
385 */
386 void
387 isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
388 bus_size_t len, int ops)
389 {
390 struct isadma_bounce_cookie *cookie = map->_dm_cookie;
391
392 /*
393 * Mixing PRE and POST operations is not allowed.
394 */
395 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
396 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
397 panic("isadma_bounce_dmamap_sync: mix PRE and POST");
398
399 #ifdef DIAGNOSTIC
400 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
401 if (offset >= map->dm_mapsize)
402 panic("isadma_bounce_dmamap_sync: bad offset");
403 if (len == 0 || (offset + len) > map->dm_mapsize)
404 panic("isadma_bounce_dmamap_sync: bad length");
405 }
406 #endif
407
408 /*
409 * If we're not bouncing, just do the normal sync operation
410 * and return.
411 */
412 if ((cookie->id_flags & ID_IS_BOUNCING) == 0) {
413 _bus_dmamap_sync(t, map, offset, len, ops);
414 return;
415 }
416
417 /*
418 * Flush data cache for PREREAD. This has the side-effect
419 * of invalidating the cache. Done at PREREAD since it
420 * causes the cache line(s) to be written back to memory.
421 *
422 * Copy the original buffer to the bounce buffer and flush
423 * the data cache for PREWRITE, so that the contents
424 * of the data buffer in memory reflect reality.
425 *
426 * Copy the bounce buffer to the original buffer in POSTREAD.
427 */
428
429 switch (cookie->id_buftype) {
430 case ID_BUFTYPE_LINEAR:
431 /*
432 * Nothing to do for pre-read.
433 */
434
435 if (ops & BUS_DMASYNC_PREWRITE) {
436 /*
437 * Copy the caller's buffer to the bounce buffer.
438 */
439 memcpy((char *)cookie->id_bouncebuf + offset,
440 (char *)cookie->id_origbuf + offset, len);
441 wbflush();
442 }
443
444 if (ops & BUS_DMASYNC_POSTREAD) {
445 /*
446 * Copy the bounce buffer to the caller's buffer.
447 */
448 memcpy((char *)cookie->id_origbuf + offset,
449 (char *)cookie->id_bouncebuf + offset, len);
450 }
451
452 /*
453 * Nothing to do for post-write.
454 */
455 break;
456
457 case ID_BUFTYPE_MBUF:
458 {
459 struct mbuf *m, *m0 = cookie->id_origbuf;
460 bus_size_t minlen, moff;
461
462 /*
463 * Nothing to do for pre-read.
464 */
465
466 if (ops & BUS_DMASYNC_PREWRITE) {
467 /*
468 * Copy the caller's buffer to the bounce buffer.
469 */
470 m_copydata(m0, offset, len,
471 (char *)cookie->id_bouncebuf + offset);
472 }
473
474 if (ops & BUS_DMASYNC_POSTREAD) {
475 /*
476 * Copy the bounce buffer to the caller's buffer.
477 */
478 for (moff = offset, m = m0; m != NULL && len != 0;
479 m = m->m_next) {
480 /* Find the beginning mbuf. */
481 if (moff >= m->m_len) {
482 moff -= m->m_len;
483 continue;
484 }
485
486 /*
487 * Now at the first mbuf to sync; nail
488 * each one until we have exhausted the
489 * length.
490 */
491 minlen = len < m->m_len - moff ?
492 len : m->m_len - moff;
493
494 memcpy(mtod(m, caddr_t) + moff,
495 (char *)cookie->id_bouncebuf + offset,
496 minlen);
497
498 moff = 0;
499 len -= minlen;
500 offset += minlen;
501 }
502 }
503
504 /*
505 * Nothing to do for post-write.
506 */
507 break;
508 }
509
510 case ID_BUFTYPE_UIO:
511 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO");
512 break;
513
514 case ID_BUFTYPE_RAW:
515 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW");
516 break;
517
518 case ID_BUFTYPE_INVALID:
519 panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID");
520 break;
521
522 default:
523 printf("unknown buffer type %d\n", cookie->id_buftype);
524 panic("isadma_bounce_dmamap_sync");
525 }
526
527 /* Drain the write buffer. */
528 wbflush();
529
530 /* XXXJRT */
531 if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE))
532 mips_dcache_wbinv_range((vaddr_t)cookie->id_bouncebuf + offset,
533 len);
534 }
535
536 /*
537 * Allocate memory safe for ISA DMA.
538 */
539 int
540 isadma_bounce_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
541 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
542 int nsegs, int *rsegs, int flags)
543 {
544 paddr_t high;
545
546 if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
547 high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD);
548 else
549 high = trunc_page(avail_end);
550
551 return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
552 segs, nsegs, rsegs, flags, 0, high));
553 }
554
555 /**********************************************************************
556 * ISA DMA utility functions
557 **********************************************************************/
558
559 int
560 isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
561 bus_size_t size, int flags)
562 {
563 struct isadma_bounce_cookie *cookie = map->_dm_cookie;
564 int error = 0;
565
566 cookie->id_bouncebuflen = round_page(size);
567 error = isadma_bounce_dmamem_alloc(t, cookie->id_bouncebuflen,
568 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
569 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
570 if (error)
571 goto out;
572 error = _bus_dmamem_map(t, cookie->id_bouncesegs,
573 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
574 (caddr_t *)&cookie->id_bouncebuf, flags);
575
576 out:
577 if (error) {
578 _bus_dmamem_free(t, cookie->id_bouncesegs,
579 cookie->id_nbouncesegs);
580 cookie->id_bouncebuflen = 0;
581 cookie->id_nbouncesegs = 0;
582 } else
583 cookie->id_flags |= ID_HAS_BOUNCE;
584
585 return (error);
586 }
587
588 void
589 isadma_bounce_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
590 {
591 struct isadma_bounce_cookie *cookie = map->_dm_cookie;
592
593 _bus_dmamem_unmap(t, cookie->id_bouncebuf,
594 cookie->id_bouncebuflen);
595 _bus_dmamem_free(t, cookie->id_bouncesegs,
596 cookie->id_nbouncesegs);
597 cookie->id_bouncebuflen = 0;
598 cookie->id_nbouncesegs = 0;
599 cookie->id_flags &= ~ID_HAS_BOUNCE;
600 }
601