isa_machdep.c revision 1.4 1 /* $NetBSD: isa_machdep.c,v 1.4 2003/03/04 01:07:36 fvdl Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1991 The Regents of the University of California.
42 * All rights reserved.
43 *
44 * This code is derived from software contributed to Berkeley by
45 * William Jolitz.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 * notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 * notice, this list of conditions and the following disclaimer in the
54 * documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 * must display the following acknowledgement:
57 * This product includes software developed by the University of
58 * California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * @(#)isa.c 7.2 (Berkeley) 5/13/91
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: isa_machdep.c,v 1.4 2003/03/04 01:07:36 fvdl Exp $");
80
81 #define ISA_DMA_STATS
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/kernel.h>
86 #include <sys/syslog.h>
87 #include <sys/device.h>
88 #include <sys/malloc.h>
89 #include <sys/proc.h>
90 #include <sys/mbuf.h>
91
92 #define _X86_BUS_DMA_PRIVATE
93 #include <machine/bus.h>
94
95 #include <machine/pio.h>
96 #include <machine/cpufunc.h>
97
98 #include <dev/isa/isareg.h>
99 #include <dev/isa/isavar.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include "ioapic.h"
104
105 #if NIOAPIC > 0
106 #include <machine/i82093var.h>
107 #include <machine/mpbiosvar.h>
108 #endif
109
110 /*
111 * ISA can only DMA to 0-16M.
112 */
113 #define ISA_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024)
114
115 extern paddr_t avail_end;
116
117 #define IDTVEC(name) __CONCAT(X,name)
118 typedef void (vector) __P((void));
119 extern vector *IDTVEC(intr)[];
120
121 /*
122 * Cookie used by ISA dma. A pointer to one of these it stashed in
123 * the DMA map.
124 */
125 struct x86_isa_dma_cookie {
126 int id_flags; /* flags; see below */
127
128 /*
129 * Information about the original buffer used during
130 * DMA map syncs. Note that origibuflen is only used
131 * for ID_BUFTYPE_LINEAR.
132 */
133 void *id_origbuf; /* pointer to orig buffer if
134 bouncing */
135 bus_size_t id_origbuflen; /* ...and size */
136 int id_buftype; /* type of buffer */
137
138 void *id_bouncebuf; /* pointer to the bounce buffer */
139 bus_size_t id_bouncebuflen; /* ...and size */
140 int id_nbouncesegs; /* number of valid bounce segs */
141 bus_dma_segment_t id_bouncesegs[0]; /* array of bounce buffer
142 physical memory segments */
143 };
144
145 /* id_flags */
146 #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */
147 #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */
148 #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */
149
150 /* id_buftype */
151 #define ID_BUFTYPE_INVALID 0
152 #define ID_BUFTYPE_LINEAR 1
153 #define ID_BUFTYPE_MBUF 2
154 #define ID_BUFTYPE_UIO 3
155 #define ID_BUFTYPE_RAW 4
156
157 int _isa_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
158 bus_size_t, bus_size_t, int, bus_dmamap_t *));
159 void _isa_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
160 int _isa_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
161 bus_size_t, struct proc *, int));
162 int _isa_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
163 struct mbuf *, int));
164 int _isa_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
165 struct uio *, int));
166 int _isa_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
167 bus_dma_segment_t *, int, bus_size_t, int));
168 void _isa_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
169 void _isa_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
170 bus_addr_t, bus_size_t, int));
171
172 int _isa_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t,
173 bus_size_t, bus_dma_segment_t *, int, int *, int));
174
175 int _isa_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
176 bus_size_t, int));
177 void _isa_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
178
179 /*
180 * Entry points for ISA DMA. These are mostly wrappers around
181 * the generic functions that understand how to deal with bounce
182 * buffers, if necessary.
183 */
184 struct x86_bus_dma_tag isa_bus_dma_tag = {
185 ISA_DMA_BOUNCE_THRESHOLD,
186 _isa_bus_dmamap_create,
187 _isa_bus_dmamap_destroy,
188 _isa_bus_dmamap_load,
189 _isa_bus_dmamap_load_mbuf,
190 _isa_bus_dmamap_load_uio,
191 _isa_bus_dmamap_load_raw,
192 _isa_bus_dmamap_unload,
193 _isa_bus_dmamap_sync,
194 _isa_bus_dmamem_alloc,
195 _bus_dmamem_free,
196 _bus_dmamem_map,
197 _bus_dmamem_unmap,
198 _bus_dmamem_mmap,
199 };
200
201 #define LEGAL_IRQ(x) ((x) >= 0 && (x) < NUM_LEGACY_IRQS && (x) != 2)
202
203 int
204 isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq)
205 {
206 int i, tmp, bestirq, count;
207 struct intrhand **p, *q;
208 struct intrsource *isp;
209 struct cpu_info *ci;
210
211 if (type == IST_NONE)
212 panic("intr_alloc: bogus type");
213
214 ci = &cpu_info_primary;
215
216 bestirq = -1;
217 count = -1;
218
219 /* some interrupts should never be dynamically allocated */
220 mask &= 0xdef8;
221
222 /*
223 * XXX some interrupts will be used later (6 for fdc, 12 for pms).
224 * the right answer is to do "breadth-first" searching of devices.
225 */
226 mask &= 0xefbf;
227
228 simple_lock(&ci->ci_slock);
229
230 for (i = 0; i < NUM_LEGACY_IRQS; i++) {
231 if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0)
232 continue;
233 isp = ci->ci_isources[i];
234 if (isp == NULL) {
235 /*
236 * if nothing's using the irq, just return it
237 */
238 *irq = i;
239 simple_unlock(&ci->ci_slock);
240 return (0);
241 }
242
243 switch(isp->is_type) {
244 case IST_EDGE:
245 case IST_LEVEL:
246 if (type != isp->is_type)
247 continue;
248 /*
249 * if the irq is shareable, count the number of other
250 * handlers, and if it's smaller than the last irq like
251 * this, remember it
252 *
253 * XXX We should probably also consider the
254 * interrupt level and stick IPL_TTY with other
255 * IPL_TTY, etc.
256 */
257 for (p = &isp->is_handlers, tmp = 0; (q = *p) != NULL;
258 p = &q->ih_next, tmp++)
259 ;
260 if ((bestirq == -1) || (count > tmp)) {
261 bestirq = i;
262 count = tmp;
263 }
264 break;
265
266 case IST_PULSE:
267 /* this just isn't shareable */
268 continue;
269 }
270 }
271
272 simple_unlock(&ci->ci_slock);
273
274 if (bestirq == -1)
275 return (1);
276
277 *irq = bestirq;
278
279 return (0);
280 }
281
282 const struct evcnt *
283 isa_intr_evcnt(isa_chipset_tag_t ic, int irq)
284 {
285
286 /* XXX for now, no evcnt parent reported */
287 return NULL;
288 }
289
290 void *
291 isa_intr_establish(ic, irq, type, level, ih_fun, ih_arg)
292 isa_chipset_tag_t ic;
293 int irq;
294 int type;
295 int level;
296 int (*ih_fun) __P((void *));
297 void *ih_arg;
298 {
299 struct pic *pic;
300 int pin;
301 #if NIOAPIC > 0
302 int mpih;
303 #endif
304
305 pin = irq;
306 pic = &i8259_pic;
307
308 #if NIOAPIC > 0
309 if (mp_busses != NULL) {
310 if (intr_find_mpmapping(mp_isa_bus, irq, &mpih) == 0 ||
311 intr_find_mpmapping(mp_eisa_bus, irq, &mpih) == 0) {
312 if (!APIC_IRQ_ISLEGACY(mpih)) {
313 pin = APIC_IRQ_PIN(mpih);
314 pic = (struct pic *)
315 ioapic_find(APIC_IRQ_APIC(mpih));
316 if (pic == NULL) {
317 printf("isa_intr_establish: "
318 "unknown apic %d\n",
319 APIC_IRQ_APIC(mpih));
320 return NULL;
321 }
322 }
323 } else
324 printf("isa_intr_establish: no MP mapping found\n");
325 }
326 #endif
327 return intr_establish(irq, pic, pin, type, level, ih_fun, ih_arg);
328 }
329
330 /*
331 * Deregister an interrupt handler.
332 */
333 void
334 isa_intr_disestablish(ic, arg)
335 isa_chipset_tag_t ic;
336 void *arg;
337 {
338 struct intrhand *ih = arg;
339
340 if (!LEGAL_IRQ(ih->ih_pin))
341 panic("intr_disestablish: bogus irq");
342
343 intr_disestablish(ih);
344 }
345
346 void
347 isa_attach_hook(parent, self, iba)
348 struct device *parent, *self;
349 struct isabus_attach_args *iba;
350 {
351 extern struct x86_isa_chipset x86_isa_chipset;
352 extern int isa_has_been_seen;
353
354 /*
355 * Notify others that might need to know that the ISA bus
356 * has now been attached.
357 */
358 if (isa_has_been_seen)
359 panic("isaattach: ISA bus already seen!");
360 isa_has_been_seen = 1;
361
362 /*
363 * Since we can only have one ISA bus, we just use a single
364 * statically allocated ISA chipset structure. Pass it up
365 * now.
366 */
367 iba->iba_ic = &x86_isa_chipset;
368 }
369
370 int
371 isa_mem_alloc(t, size, align, boundary, flags, addrp, bshp)
372 bus_space_tag_t t;
373 bus_size_t size, align;
374 bus_addr_t boundary;
375 int flags;
376 bus_addr_t *addrp;
377 bus_space_handle_t *bshp;
378 {
379
380 /*
381 * Allocate physical address space in the ISA hole.
382 */
383 return (bus_space_alloc(t, IOM_BEGIN, IOM_END - 1, size, align,
384 boundary, flags, addrp, bshp));
385 }
386
387 void
388 isa_mem_free(t, bsh, size)
389 bus_space_tag_t t;
390 bus_space_handle_t bsh;
391 bus_size_t size;
392 {
393
394 bus_space_free(t, bsh, size);
395 }
396
397 /**********************************************************************
398 * bus.h dma interface entry points
399 **********************************************************************/
400
401 #ifdef ISA_DMA_STATS
402 #define STAT_INCR(v) (v)++
403 #define STAT_DECR(v) do { \
404 if ((v) == 0) \
405 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
406 else \
407 (v)--; \
408 } while (0)
409 u_long isa_dma_stats_loads;
410 u_long isa_dma_stats_bounces;
411 u_long isa_dma_stats_nbouncebufs;
412 #else
413 #define STAT_INCR(v)
414 #define STAT_DECR(v)
415 #endif
416
417 /*
418 * Create an ISA DMA map.
419 */
420 int
421 _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
422 bus_dma_tag_t t;
423 bus_size_t size;
424 int nsegments;
425 bus_size_t maxsegsz;
426 bus_size_t boundary;
427 int flags;
428 bus_dmamap_t *dmamp;
429 {
430 struct x86_isa_dma_cookie *cookie;
431 bus_dmamap_t map;
432 int error, cookieflags;
433 void *cookiestore;
434 size_t cookiesize;
435
436 /* Call common function to create the basic map. */
437 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
438 flags, dmamp);
439 if (error)
440 return (error);
441
442 map = *dmamp;
443 map->_dm_cookie = NULL;
444
445 cookiesize = sizeof(struct x86_isa_dma_cookie);
446
447 /*
448 * ISA only has 24-bits of address space. This means
449 * we can't DMA to pages over 16M. In order to DMA to
450 * arbitrary buffers, we use "bounce buffers" - pages
451 * in memory below the 16M boundary. On DMA reads,
452 * DMA happens to the bounce buffers, and is copied into
453 * the caller's buffer. On writes, data is copied into
454 * but bounce buffer, and the DMA happens from those
455 * pages. To software using the DMA mapping interface,
456 * this looks simply like a data cache.
457 *
458 * If we have more than 16M of RAM in the system, we may
459 * need bounce buffers. We check and remember that here.
460 *
461 * There are exceptions, however. VLB devices can do
462 * 32-bit DMA, and indicate that here.
463 *
464 * ...or, there is an opposite case. The most segments
465 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
466 * the caller can't handle that many segments (e.g. the
467 * ISA DMA controller), we may have to bounce it as well.
468 */
469 if (avail_end <= t->_bounce_thresh ||
470 (flags & ISABUS_DMA_32BIT) != 0) {
471 /* Bouncing not necessary due to memory size. */
472 map->_dm_bounce_thresh = 0;
473 }
474 cookieflags = 0;
475 if (map->_dm_bounce_thresh != 0 ||
476 ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
477 cookieflags |= ID_MIGHT_NEED_BOUNCE;
478 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
479 }
480
481 /*
482 * Allocate our cookie.
483 */
484 if ((cookiestore = malloc(cookiesize, M_DMAMAP,
485 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
486 error = ENOMEM;
487 goto out;
488 }
489 memset(cookiestore, 0, cookiesize);
490 cookie = (struct x86_isa_dma_cookie *)cookiestore;
491 cookie->id_flags = cookieflags;
492 map->_dm_cookie = cookie;
493
494 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
495 /*
496 * Allocate the bounce pages now if the caller
497 * wishes us to do so.
498 */
499 if ((flags & BUS_DMA_ALLOCNOW) == 0)
500 goto out;
501
502 error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
503 }
504
505 out:
506 if (error) {
507 if (map->_dm_cookie != NULL)
508 free(map->_dm_cookie, M_DMAMAP);
509 _bus_dmamap_destroy(t, map);
510 }
511 return (error);
512 }
513
514 /*
515 * Destroy an ISA DMA map.
516 */
517 void
518 _isa_bus_dmamap_destroy(t, map)
519 bus_dma_tag_t t;
520 bus_dmamap_t map;
521 {
522 struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
523
524 /*
525 * Free any bounce pages this map might hold.
526 */
527 if (cookie->id_flags & ID_HAS_BOUNCE)
528 _isa_dma_free_bouncebuf(t, map);
529
530 free(cookie, M_DMAMAP);
531 _bus_dmamap_destroy(t, map);
532 }
533
534 /*
535 * Load an ISA DMA map with a linear buffer.
536 */
537 int
538 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags)
539 bus_dma_tag_t t;
540 bus_dmamap_t map;
541 void *buf;
542 bus_size_t buflen;
543 struct proc *p;
544 int flags;
545 {
546 struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
547 int error;
548
549 STAT_INCR(isa_dma_stats_loads);
550
551 /*
552 * Make sure that on error condition we return "no valid mappings."
553 */
554 map->dm_mapsize = 0;
555 map->dm_nsegs = 0;
556
557 /*
558 * Try to load the map the normal way. If this errors out,
559 * and we can bounce, we will.
560 */
561 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
562 if (error == 0 ||
563 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
564 return (error);
565
566 /*
567 * First attempt failed; bounce it.
568 */
569
570 STAT_INCR(isa_dma_stats_bounces);
571
572 /*
573 * Allocate bounce pages, if necessary.
574 */
575 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
576 error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
577 if (error)
578 return (error);
579 }
580
581 /*
582 * Cache a pointer to the caller's buffer and load the DMA map
583 * with the bounce buffer.
584 */
585 cookie->id_origbuf = buf;
586 cookie->id_origbuflen = buflen;
587 cookie->id_buftype = ID_BUFTYPE_LINEAR;
588 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
589 p, flags);
590 if (error) {
591 /*
592 * Free the bounce pages, unless our resources
593 * are reserved for our exclusive use.
594 */
595 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
596 _isa_dma_free_bouncebuf(t, map);
597 return (error);
598 }
599
600 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
601 cookie->id_flags |= ID_IS_BOUNCING;
602 return (0);
603 }
604
605 /*
606 * Like _isa_bus_dmamap_load(), but for mbufs.
607 */
608 int
609 _isa_bus_dmamap_load_mbuf(t, map, m0, flags)
610 bus_dma_tag_t t;
611 bus_dmamap_t map;
612 struct mbuf *m0;
613 int flags;
614 {
615 struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
616 int error;
617
618 /*
619 * Make sure on error condition we return "no valid mappings."
620 */
621 map->dm_mapsize = 0;
622 map->dm_nsegs = 0;
623
624 #ifdef DIAGNOSTIC
625 if ((m0->m_flags & M_PKTHDR) == 0)
626 panic("_isa_bus_dmamap_load_mbuf: no packet header");
627 #endif
628
629 if (m0->m_pkthdr.len > map->_dm_size)
630 return (EINVAL);
631
632 /*
633 * Try to load the map the normal way. If this errors out,
634 * and we can bounce, we will.
635 */
636 error = _bus_dmamap_load_mbuf(t, map, m0, flags);
637 if (error == 0 ||
638 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
639 return (error);
640
641 /*
642 * First attempt failed; bounce it.
643 */
644
645 STAT_INCR(isa_dma_stats_bounces);
646
647 /*
648 * Allocate bounce pages, if necessary.
649 */
650 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
651 error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
652 flags);
653 if (error)
654 return (error);
655 }
656
657 /*
658 * Cache a pointer to the caller's buffer and load the DMA map
659 * with the bounce buffer.
660 */
661 cookie->id_origbuf = m0;
662 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
663 cookie->id_buftype = ID_BUFTYPE_MBUF;
664 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
665 m0->m_pkthdr.len, NULL, flags);
666 if (error) {
667 /*
668 * Free the bounce pages, unless our resources
669 * are reserved for our exclusive use.
670 */
671 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
672 _isa_dma_free_bouncebuf(t, map);
673 return (error);
674 }
675
676 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
677 cookie->id_flags |= ID_IS_BOUNCING;
678 return (0);
679 }
680
681 /*
682 * Like _isa_bus_dmamap_load(), but for uios.
683 */
684 int
685 _isa_bus_dmamap_load_uio(t, map, uio, flags)
686 bus_dma_tag_t t;
687 bus_dmamap_t map;
688 struct uio *uio;
689 int flags;
690 {
691
692 panic("_isa_bus_dmamap_load_uio: not implemented");
693 }
694
695 /*
696 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
697 * bus_dmamem_alloc().
698 */
699 int
700 _isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
701 bus_dma_tag_t t;
702 bus_dmamap_t map;
703 bus_dma_segment_t *segs;
704 int nsegs;
705 bus_size_t size;
706 int flags;
707 {
708
709 panic("_isa_bus_dmamap_load_raw: not implemented");
710 }
711
712 /*
713 * Unload an ISA DMA map.
714 */
715 void
716 _isa_bus_dmamap_unload(t, map)
717 bus_dma_tag_t t;
718 bus_dmamap_t map;
719 {
720 struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
721
722 /*
723 * If we have bounce pages, free them, unless they're
724 * reserved for our exclusive use.
725 */
726 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
727 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
728 _isa_dma_free_bouncebuf(t, map);
729
730 cookie->id_flags &= ~ID_IS_BOUNCING;
731 cookie->id_buftype = ID_BUFTYPE_INVALID;
732
733 /*
734 * Do the generic bits of the unload.
735 */
736 _bus_dmamap_unload(t, map);
737 }
738
739 /*
740 * Synchronize an ISA DMA map.
741 */
742 void
743 _isa_bus_dmamap_sync(t, map, offset, len, ops)
744 bus_dma_tag_t t;
745 bus_dmamap_t map;
746 bus_addr_t offset;
747 bus_size_t len;
748 int ops;
749 {
750 struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
751
752 /*
753 * Mixing PRE and POST operations is not allowed.
754 */
755 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
756 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
757 panic("_isa_bus_dmamap_sync: mix PRE and POST");
758
759 #ifdef DIAGNOSTIC
760 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
761 if (offset >= map->dm_mapsize)
762 panic("_isa_bus_dmamap_sync: bad offset");
763 if (len == 0 || (offset + len) > map->dm_mapsize)
764 panic("_isa_bus_dmamap_sync: bad length");
765 }
766 #endif
767
768 /*
769 * If we're not bouncing, just return; nothing to do.
770 */
771 if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
772 return;
773
774 switch (cookie->id_buftype) {
775 case ID_BUFTYPE_LINEAR:
776 /*
777 * Nothing to do for pre-read.
778 */
779
780 if (ops & BUS_DMASYNC_PREWRITE) {
781 /*
782 * Copy the caller's buffer to the bounce buffer.
783 */
784 memcpy((char *)cookie->id_bouncebuf + offset,
785 (char *)cookie->id_origbuf + offset, len);
786 }
787
788 if (ops & BUS_DMASYNC_POSTREAD) {
789 /*
790 * Copy the bounce buffer to the caller's buffer.
791 */
792 memcpy((char *)cookie->id_origbuf + offset,
793 (char *)cookie->id_bouncebuf + offset, len);
794 }
795
796 /*
797 * Nothing to do for post-write.
798 */
799 break;
800
801 case ID_BUFTYPE_MBUF:
802 {
803 struct mbuf *m, *m0 = cookie->id_origbuf;
804 bus_size_t minlen, moff;
805
806 /*
807 * Nothing to do for pre-read.
808 */
809
810 if (ops & BUS_DMASYNC_PREWRITE) {
811 /*
812 * Copy the caller's buffer to the bounce buffer.
813 */
814 m_copydata(m0, offset, len,
815 (char *)cookie->id_bouncebuf + offset);
816 }
817
818 if (ops & BUS_DMASYNC_POSTREAD) {
819 /*
820 * Copy the bounce buffer to the caller's buffer.
821 */
822 for (moff = offset, m = m0; m != NULL && len != 0;
823 m = m->m_next) {
824 /* Find the beginning mbuf. */
825 if (moff >= m->m_len) {
826 moff -= m->m_len;
827 continue;
828 }
829
830 /*
831 * Now at the first mbuf to sync; nail
832 * each one until we have exhausted the
833 * length.
834 */
835 minlen = len < m->m_len - moff ?
836 len : m->m_len - moff;
837
838 memcpy(mtod(m, caddr_t) + moff,
839 (char *)cookie->id_bouncebuf + offset,
840 minlen);
841
842 moff = 0;
843 len -= minlen;
844 offset += minlen;
845 }
846 }
847
848 /*
849 * Nothing to do for post-write.
850 */
851 break;
852 }
853
854 case ID_BUFTYPE_UIO:
855 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
856 break;
857
858 case ID_BUFTYPE_RAW:
859 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
860 break;
861
862 case ID_BUFTYPE_INVALID:
863 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
864 break;
865
866 default:
867 printf("unknown buffer type %d\n", cookie->id_buftype);
868 panic("_isa_bus_dmamap_sync");
869 }
870 }
871
872 /*
873 * Allocate memory safe for ISA DMA.
874 */
875 int
876 _isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
877 bus_dma_tag_t t;
878 bus_size_t size, alignment, boundary;
879 bus_dma_segment_t *segs;
880 int nsegs;
881 int *rsegs;
882 int flags;
883 {
884 paddr_t high;
885
886 if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
887 high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD);
888 else
889 high = trunc_page(avail_end);
890
891 return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
892 segs, nsegs, rsegs, flags, 0, high));
893 }
894
895 /**********************************************************************
896 * ISA DMA utility functions
897 **********************************************************************/
898
899 int
900 _isa_dma_alloc_bouncebuf(t, map, size, flags)
901 bus_dma_tag_t t;
902 bus_dmamap_t map;
903 bus_size_t size;
904 int flags;
905 {
906 struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
907 int error = 0;
908
909 cookie->id_bouncebuflen = round_page(size);
910 error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
911 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
912 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
913 if (error)
914 goto out;
915 error = _bus_dmamem_map(t, cookie->id_bouncesegs,
916 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
917 (caddr_t *)&cookie->id_bouncebuf, flags);
918
919 out:
920 if (error) {
921 _bus_dmamem_free(t, cookie->id_bouncesegs,
922 cookie->id_nbouncesegs);
923 cookie->id_bouncebuflen = 0;
924 cookie->id_nbouncesegs = 0;
925 } else {
926 cookie->id_flags |= ID_HAS_BOUNCE;
927 STAT_INCR(isa_dma_stats_nbouncebufs);
928 }
929
930 return (error);
931 }
932
933 void
934 _isa_dma_free_bouncebuf(t, map)
935 bus_dma_tag_t t;
936 bus_dmamap_t map;
937 {
938 struct x86_isa_dma_cookie *cookie = map->_dm_cookie;
939
940 STAT_DECR(isa_dma_stats_nbouncebufs);
941
942 _bus_dmamem_unmap(t, cookie->id_bouncebuf,
943 cookie->id_bouncebuflen);
944 _bus_dmamem_free(t, cookie->id_bouncesegs,
945 cookie->id_nbouncesegs);
946 cookie->id_bouncebuflen = 0;
947 cookie->id_nbouncesegs = 0;
948 cookie->id_flags &= ~ID_HAS_BOUNCE;
949 }
950