intio.c revision 1.8 1 /* $NetBSD: intio.c,v 1.8 2000/06/29 07:07:53 mrg Exp $ */
2
3 /*-
4 * Copyright (c) 1998 NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the NetBSD
18 * Foundation, Inc. and its contributors.
19 * 4. Neither the name of The NetBSD Foundation nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /*
37 * NetBSD/x68k internal I/O virtual bus.
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/extent.h>
46 #include <uvm/uvm_extern.h> /* XXX needed? */
47
48 #include <machine/bus.h>
49 #include <machine/cpu.h>
50 #include <machine/frame.h>
51
52 #include <arch/x68k/dev/intiovar.h>
53 #include <arch/x68k/dev/mfp.h>
54
55
56 /*
57 * bus_space(9) interface
58 */
59 static int intio_bus_space_map __P((bus_space_tag_t, bus_addr_t, bus_size_t, int, bus_space_handle_t *));
60 static void intio_bus_space_unmap __P((bus_space_tag_t, bus_space_handle_t, bus_size_t));
61 static int intio_bus_space_subregion __P((bus_space_tag_t, bus_space_handle_t, bus_size_t, bus_size_t, bus_space_handle_t *));
62
63 static struct x68k_bus_space intio_bus = {
64 #if 0
65 X68K_INTIO_BUS,
66 #endif
67 intio_bus_space_map, intio_bus_space_unmap, intio_bus_space_subregion,
68 x68k_bus_space_alloc, x68k_bus_space_free,
69 #if 0
70 x68k_bus_space_barrier,
71 #endif
72
73 0
74 };
75
76 /*
77 * bus_dma(9) interface
78 */
79 #define INTIO_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024)
80 int _intio_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
81 bus_size_t, bus_size_t, int, bus_dmamap_t *));
82 void _intio_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
83 int _intio_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
84 bus_size_t, struct proc *, int));
85 int _intio_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
86 struct mbuf *, int));
87 int _intio_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
88 struct uio *, int));
89 int _intio_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
90 bus_dma_segment_t *, int, bus_size_t, int));
91 void _intio_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
92 void _intio_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
93 bus_addr_t, bus_size_t, int));
94
95 int _intio_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t,
96 bus_size_t, bus_dma_segment_t *, int, int *, int));
97
98 int _intio_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
99 bus_size_t, int));
100 void _intio_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
101
102 struct x68k_bus_dma intio_bus_dma = {
103 INTIO_DMA_BOUNCE_THRESHOLD,
104 _intio_bus_dmamap_create,
105 _intio_bus_dmamap_destroy,
106 _intio_bus_dmamap_load,
107 _intio_bus_dmamap_load_mbuf,
108 _intio_bus_dmamap_load_uio,
109 _intio_bus_dmamap_load_raw,
110 _intio_bus_dmamap_unload,
111 _intio_bus_dmamap_sync,
112 _intio_bus_dmamem_alloc,
113 x68k_bus_dmamem_free,
114 x68k_bus_dmamem_map,
115 x68k_bus_dmamem_unmap,
116 x68k_bus_dmamem_mmap,
117 };
118
119 /*
120 * autoconf stuff
121 */
122 static int intio_match __P((struct device *, struct cfdata *, void *));
123 static void intio_attach __P((struct device *, struct device *, void *));
124 static int intio_search __P((struct device *, struct cfdata *cf, void *));
125 static int intio_print __P((void *, const char *));
126 static void intio_alloc_system_ports __P((struct intio_softc*));
127
128 struct cfattach intio_ca = {
129 sizeof(struct intio_softc), intio_match, intio_attach
130 };
131
132 static struct intio_interrupt_vector {
133 intio_intr_handler_t iiv_handler;
134 void *iiv_arg;
135 int iiv_intrcntoff;
136 } iiv[256] = {{0,},};
137
138 extern struct cfdriver intio_cd;
139
140 /* used in console initialization */
141 extern int x68k_realconfig;
142 int x68k_config_found __P((struct cfdata *, struct device *,
143 void *, cfprint_t));
144 static struct cfdata *cfdata_intiobus = NULL;
145
146 /* other static functions */
147 static int scan_intrnames __P((const char *));
148 #ifdef DEBUG
149 int intio_debug = 0;
150 #endif
151
152 static int
153 intio_match(parent, cf, aux)
154 struct device *parent;
155 struct cfdata *cf;
156 void *aux; /* NULL */
157 {
158 if (strcmp(aux, intio_cd.cd_name) != 0)
159 return (0);
160 if (cf->cf_unit != 0)
161 return (0);
162 if (x68k_realconfig == 0)
163 cfdata_intiobus = cf; /* XXX */
164
165 return (1);
166 }
167
168
169 /* used in console initialization: configure only MFP */
170 static struct intio_attach_args initial_ia = {
171 &intio_bus,
172 0/*XXX*/,
173
174 "mfp", /* ia_name */
175 MFP_ADDR, /* ia_addr */
176 0x30, /* ia_size */
177 MFP_INTR, /* ia_intr */
178 -1 /* ia_dma */
179 -1, /* ia_dmaintr */
180 };
181
182 static void
183 intio_attach(parent, self, aux)
184 struct device *parent, *self;
185 void *aux; /* NULL */
186 {
187 struct intio_softc *sc = (struct intio_softc *)self;
188 struct intio_attach_args ia;
189
190 if (self == NULL) {
191 /* console only init */
192 x68k_config_found(cfdata_intiobus, NULL, &initial_ia, NULL);
193 return;
194 }
195
196 printf (" mapped at %8p\n", intiobase);
197
198 sc->sc_map = extent_create("intiomap",
199 PHYS_INTIODEV,
200 PHYS_INTIODEV + 0x400000,
201 M_DEVBUF, NULL, NULL, EX_NOWAIT);
202 intio_alloc_system_ports (sc);
203
204 sc->sc_bst = &intio_bus;
205 sc->sc_bst->x68k_bus_device = self;
206 sc->sc_dmat = &intio_bus_dma;
207 sc->sc_dmac = 0;
208
209 bzero(iiv, sizeof (struct intio_interrupt_vector) * 256);
210
211 ia.ia_bst = sc->sc_bst;
212 ia.ia_dmat = sc->sc_dmat;
213
214 config_search (intio_search, self, &ia);
215 }
216
217 static int
218 intio_search(parent, cf, aux)
219 struct device *parent;
220 struct cfdata *cf;
221 void *aux;
222 {
223 struct intio_attach_args *ia = aux;
224 struct intio_softc *sc = (struct intio_softc *)parent;
225
226 ia->ia_bst = sc->sc_bst;
227 ia->ia_dmat = sc->sc_dmat;
228 ia->ia_name = cf->cf_driver->cd_name;
229 ia->ia_addr = cf->cf_addr;
230 ia->ia_intr = cf->cf_intr;
231 ia->ia_dma = cf->cf_dma;
232 ia->ia_dmaintr = cf->cf_dmaintr;
233
234 if ((*cf->cf_attach->ca_match)(parent, cf, ia) > 0)
235 config_attach(parent, cf, ia, intio_print);
236
237 return (0);
238 }
239
240 static int
241 intio_print(aux, name)
242 void *aux;
243 const char *name;
244 {
245 struct intio_attach_args *ia = aux;
246
247 /* if (ia->ia_addr > 0) */
248 printf (" addr 0x%06x", ia->ia_addr);
249 if (ia->ia_intr > 0)
250 printf (" intr 0x%02x", ia->ia_intr);
251 if (ia->ia_dma >= 0) {
252 printf (" using DMA ch%d", ia->ia_dma);
253 if (ia->ia_dmaintr > 0)
254 printf (" intr 0x%02x and 0x%02x",
255 ia->ia_dmaintr, ia->ia_dmaintr+1);
256 }
257
258 return (QUIET);
259 }
260
261 /*
262 * intio memory map manager
263 */
264
265 int
266 intio_map_allocate_region(parent, ia, flag)
267 struct device *parent;
268 struct intio_attach_args *ia;
269 enum intio_map_flag flag; /* INTIO_MAP_TESTONLY or INTIO_MAP_ALLOCATE */
270 {
271 struct intio_softc *sc = (struct intio_softc*) parent;
272 struct extent *map = sc->sc_map;
273 int r;
274
275 r = extent_alloc_region (map, ia->ia_addr, ia->ia_size, 0);
276 #ifdef DEBUG
277 if (intio_debug)
278 extent_print (map);
279 #endif
280 if (r == 0) {
281 if (flag != INTIO_MAP_ALLOCATE)
282 extent_free (map, ia->ia_addr, ia->ia_size, 0);
283 return 0;
284 }
285
286 return -1;
287 }
288
289 int
290 intio_map_free_region(parent, ia)
291 struct device *parent;
292 struct intio_attach_args *ia;
293 {
294 struct intio_softc *sc = (struct intio_softc*) parent;
295 struct extent *map = sc->sc_map;
296
297 extent_free (map, ia->ia_addr, ia->ia_size, 0);
298 #ifdef DEBUG
299 if (intio_debug)
300 extent_print (map);
301 #endif
302 return 0;
303 }
304
305 void
306 intio_alloc_system_ports(sc)
307 struct intio_softc *sc;
308 {
309 extent_alloc_region (sc->sc_map, INTIO_SYSPORT, 16, 0);
310 extent_alloc_region (sc->sc_map, INTIO_SICILIAN, 0x2000, 0);
311 }
312
313
314 /*
315 * intio bus space stuff.
316 */
317 static int
318 intio_bus_space_map(t, bpa, size, flags, bshp)
319 bus_space_tag_t t;
320 bus_addr_t bpa;
321 bus_size_t size;
322 int flags;
323 bus_space_handle_t *bshp;
324 {
325 /*
326 * Intio bus is mapped permanently.
327 */
328 *bshp = (bus_space_handle_t)
329 ((u_int) bpa - PHYS_INTIODEV + intiobase);
330 /*
331 * Some devices are mapped on odd addresses only.
332 */
333 if (flags & BUS_SPACE_MAP_SHIFTED)
334 *bshp += 0x80000001;
335
336 return (0);
337 }
338
339 static void
340 intio_bus_space_unmap(t, bsh, size)
341 bus_space_tag_t t;
342 bus_space_handle_t bsh;
343 bus_size_t size;
344 {
345 return;
346 }
347
348 static int
349 intio_bus_space_subregion(t, bsh, offset, size, nbshp)
350 bus_space_tag_t t;
351 bus_space_handle_t bsh;
352 bus_size_t offset, size;
353 bus_space_handle_t *nbshp;
354 {
355
356 *nbshp = bsh + offset;
357 return (0);
358 }
359
360
361 /*
362 * interrupt handler
363 */
364 int
365 intio_intr_establish (vector, name, handler, arg)
366 int vector;
367 const char *name; /* XXX */
368 intio_intr_handler_t handler;
369 void *arg;
370 {
371 if (vector < 16)
372 panic ("Invalid interrupt vector");
373 if (iiv[vector].iiv_handler)
374 return EBUSY;
375 iiv[vector].iiv_handler = handler;
376 iiv[vector].iiv_arg = arg;
377 iiv[vector].iiv_intrcntoff = scan_intrnames(name);
378
379 return 0;
380 }
381
382 static int
383 scan_intrnames (name)
384 const char *name;
385 {
386 extern char intrnames[];
387 extern char eintrnames[];
388 int r = 0;
389 char *p = &intrnames[0];
390
391 for (;;) {
392 if (*p == 0) { /* new intr */
393 if (p + strlen(name) >= eintrnames)
394 panic ("Interrupt statics buffer overrun.");
395 strcpy (p, name);
396 break;
397 }
398 if (strcmp(p, name) == 0)
399 break;
400 r++;
401 while (*p++ != 0);
402 }
403
404 return r;
405 }
406
407 int
408 intio_intr_disestablish (vector, arg)
409 int vector;
410 void *arg;
411 {
412 if (iiv[vector].iiv_handler == 0 || iiv[vector].iiv_arg != arg)
413 return EINVAL;
414 iiv[vector].iiv_handler = 0;
415 iiv[vector].iiv_arg = 0;
416
417 return 0;
418 }
419
420 int
421 intio_intr (frame)
422 struct frame *frame;
423 {
424 int vector = frame->f_vector / 4;
425 extern int intrcnt[];
426
427 #if 0 /* this is not correct now */
428 /* CAUTION: HERE WE ARE IN SPLHIGH() */
429 /* LOWER TO APPROPRIATE IPL AT VERY FIRST IN THE HANDLER!! */
430 #endif
431 if (iiv[vector].iiv_handler == 0) {
432 printf ("Stray interrupt: %d type %x\n", vector, frame->f_format);
433 return 0;
434 }
435
436 intrcnt[iiv[vector].iiv_intrcntoff]++;
437
438 return (*(iiv[vector].iiv_handler)) (iiv[vector].iiv_arg);
439 }
440
441 /*
442 * Intio I/O controler interrupt
443 */
444 static u_int8_t intio_ivec = 0;
445
446 void
447 intio_set_ivec (vec)
448 int vec;
449 {
450 vec &= 0xfc;
451
452 if (intio_ivec && intio_ivec != (vec & 0xfc))
453 panic ("Wrong interrupt vector for Sicilian.");
454
455 intio_ivec = vec;
456 intio_set_sicilian_ivec(vec);
457 }
458
459
460 /*
461 * intio bus dma stuff. stolen from arch/i386/isa/isa_machdep.c
462 */
463
464 /*
465 * Create an INTIO DMA map.
466 */
467 int
468 _intio_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
469 bus_dma_tag_t t;
470 bus_size_t size;
471 int nsegments;
472 bus_size_t maxsegsz;
473 bus_size_t boundary;
474 int flags;
475 bus_dmamap_t *dmamp;
476 {
477 struct intio_dma_cookie *cookie;
478 bus_dmamap_t map;
479 int error, cookieflags;
480 void *cookiestore;
481 size_t cookiesize;
482 extern paddr_t avail_end;
483
484 /* Call common function to create the basic map. */
485 error = x68k_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
486 flags, dmamp);
487 if (error)
488 return (error);
489
490 map = *dmamp;
491 map->x68k_dm_cookie = NULL;
492
493 cookiesize = sizeof(struct intio_dma_cookie);
494
495 /*
496 * INTIO only has 24-bits of address space. This means
497 * we can't DMA to pages over 16M. In order to DMA to
498 * arbitrary buffers, we use "bounce buffers" - pages
499 * in memory below the 16M boundary. On DMA reads,
500 * DMA happens to the bounce buffers, and is copied into
501 * the caller's buffer. On writes, data is copied into
502 * but bounce buffer, and the DMA happens from those
503 * pages. To software using the DMA mapping interface,
504 * this looks simply like a data cache.
505 *
506 * If we have more than 16M of RAM in the system, we may
507 * need bounce buffers. We check and remember that here.
508 *
509 * ...or, there is an opposite case. The most segments
510 * a transfer will require is (maxxfer / NBPG) + 1. If
511 * the caller can't handle that many segments (e.g. the
512 * DMAC), we may have to bounce it as well.
513 */
514 if (avail_end <= t->_bounce_thresh)
515 /* Bouncing not necessary due to memory size. */
516 map->x68k_dm_bounce_thresh = 0;
517 cookieflags = 0;
518 if (map->x68k_dm_bounce_thresh != 0 ||
519 ((map->x68k_dm_size / NBPG) + 1) > map->x68k_dm_segcnt) {
520 cookieflags |= ID_MIGHT_NEED_BOUNCE;
521 cookiesize += (sizeof(bus_dma_segment_t) * map->x68k_dm_segcnt);
522 }
523
524 /*
525 * Allocate our cookie.
526 */
527 if ((cookiestore = malloc(cookiesize, M_DMAMAP,
528 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
529 error = ENOMEM;
530 goto out;
531 }
532 memset(cookiestore, 0, cookiesize);
533 cookie = (struct intio_dma_cookie *)cookiestore;
534 cookie->id_flags = cookieflags;
535 map->x68k_dm_cookie = cookie;
536
537 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
538 /*
539 * Allocate the bounce pages now if the caller
540 * wishes us to do so.
541 */
542 if ((flags & BUS_DMA_ALLOCNOW) == 0)
543 goto out;
544
545 error = _intio_dma_alloc_bouncebuf(t, map, size, flags);
546 }
547
548 out:
549 if (error) {
550 if (map->x68k_dm_cookie != NULL)
551 free(map->x68k_dm_cookie, M_DMAMAP);
552 x68k_bus_dmamap_destroy(t, map);
553 }
554 return (error);
555 }
556
557 /*
558 * Destroy an INTIO DMA map.
559 */
560 void
561 _intio_bus_dmamap_destroy(t, map)
562 bus_dma_tag_t t;
563 bus_dmamap_t map;
564 {
565 struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
566
567 /*
568 * Free any bounce pages this map might hold.
569 */
570 if (cookie->id_flags & ID_HAS_BOUNCE)
571 _intio_dma_free_bouncebuf(t, map);
572
573 free(cookie, M_DMAMAP);
574 x68k_bus_dmamap_destroy(t, map);
575 }
576
577 /*
578 * Load an INTIO DMA map with a linear buffer.
579 */
580 int
581 _intio_bus_dmamap_load(t, map, buf, buflen, p, flags)
582 bus_dma_tag_t t;
583 bus_dmamap_t map;
584 void *buf;
585 bus_size_t buflen;
586 struct proc *p;
587 int flags;
588 {
589 struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
590 int error;
591
592 /*
593 * Make sure that on error condition we return "no valid mappings."
594 */
595 map->dm_mapsize = 0;
596 map->dm_nsegs = 0;
597
598 /*
599 * Try to load the map the normal way. If this errors out,
600 * and we can bounce, we will.
601 */
602 error = x68k_bus_dmamap_load(t, map, buf, buflen, p, flags);
603 if (error == 0 ||
604 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
605 return (error);
606
607 /*
608 * Allocate bounce pages, if necessary.
609 */
610 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
611 error = _intio_dma_alloc_bouncebuf(t, map, buflen, flags);
612 if (error)
613 return (error);
614 }
615
616 /*
617 * Cache a pointer to the caller's buffer and load the DMA map
618 * with the bounce buffer.
619 */
620 cookie->id_origbuf = buf;
621 cookie->id_origbuflen = buflen;
622 cookie->id_buftype = ID_BUFTYPE_LINEAR;
623 error = x68k_bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
624 p, flags);
625 if (error) {
626 /*
627 * Free the bounce pages, unless our resources
628 * are reserved for our exclusive use.
629 */
630 if ((map->x68k_dm_flags & BUS_DMA_ALLOCNOW) == 0)
631 _intio_dma_free_bouncebuf(t, map);
632 return (error);
633 }
634
635 /* ...so _intio_bus_dmamap_sync() knows we're bouncing */
636 cookie->id_flags |= ID_IS_BOUNCING;
637 return (0);
638 }
639
640 /*
641 * Like _intio_bus_dmamap_load(), but for mbufs.
642 */
643 int
644 _intio_bus_dmamap_load_mbuf(t, map, m0, flags)
645 bus_dma_tag_t t;
646 bus_dmamap_t map;
647 struct mbuf *m0;
648 int flags;
649 {
650 struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
651 int error;
652
653 /*
654 * Make sure on error condition we return "no valid mappings."
655 */
656 map->dm_mapsize = 0;
657 map->dm_nsegs = 0;
658
659 #ifdef DIAGNOSTIC
660 if ((m0->m_flags & M_PKTHDR) == 0)
661 panic("_intio_bus_dmamap_load_mbuf: no packet header");
662 #endif
663
664 if (m0->m_pkthdr.len > map->x68k_dm_size)
665 return (EINVAL);
666
667 /*
668 * Try to load the map the normal way. If this errors out,
669 * and we can bounce, we will.
670 */
671 error = x68k_bus_dmamap_load_mbuf(t, map, m0, flags);
672 if (error == 0 ||
673 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
674 return (error);
675
676 /*
677 * Allocate bounce pages, if necessary.
678 */
679 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
680 error = _intio_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
681 flags);
682 if (error)
683 return (error);
684 }
685
686 /*
687 * Cache a pointer to the caller's buffer and load the DMA map
688 * with the bounce buffer.
689 */
690 cookie->id_origbuf = m0;
691 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
692 cookie->id_buftype = ID_BUFTYPE_MBUF;
693 error = x68k_bus_dmamap_load(t, map, cookie->id_bouncebuf,
694 m0->m_pkthdr.len, NULL, flags);
695 if (error) {
696 /*
697 * Free the bounce pages, unless our resources
698 * are reserved for our exclusive use.
699 */
700 if ((map->x68k_dm_flags & BUS_DMA_ALLOCNOW) == 0)
701 _intio_dma_free_bouncebuf(t, map);
702 return (error);
703 }
704
705 /* ...so _intio_bus_dmamap_sync() knows we're bouncing */
706 cookie->id_flags |= ID_IS_BOUNCING;
707 return (0);
708 }
709
710 /*
711 * Like _intio_bus_dmamap_load(), but for uios.
712 */
713 int
714 _intio_bus_dmamap_load_uio(t, map, uio, flags)
715 bus_dma_tag_t t;
716 bus_dmamap_t map;
717 struct uio *uio;
718 int flags;
719 {
720 panic("_intio_bus_dmamap_load_uio: not implemented");
721 }
722
723 /*
724 * Like _intio_bus_dmamap_load(), but for raw memory allocated with
725 * bus_dmamem_alloc().
726 */
727 int
728 _intio_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
729 bus_dma_tag_t t;
730 bus_dmamap_t map;
731 bus_dma_segment_t *segs;
732 int nsegs;
733 bus_size_t size;
734 int flags;
735 {
736
737 panic("_intio_bus_dmamap_load_raw: not implemented");
738 }
739
740 /*
741 * Unload an INTIO DMA map.
742 */
743 void
744 _intio_bus_dmamap_unload(t, map)
745 bus_dma_tag_t t;
746 bus_dmamap_t map;
747 {
748 struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
749
750 /*
751 * If we have bounce pages, free them, unless they're
752 * reserved for our exclusive use.
753 */
754 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
755 (map->x68k_dm_flags & BUS_DMA_ALLOCNOW) == 0)
756 _intio_dma_free_bouncebuf(t, map);
757
758 cookie->id_flags &= ~ID_IS_BOUNCING;
759 cookie->id_buftype = ID_BUFTYPE_INVALID;
760
761 /*
762 * Do the generic bits of the unload.
763 */
764 x68k_bus_dmamap_unload(t, map);
765 }
766
767 /*
768 * Synchronize an INTIO DMA map.
769 */
770 void
771 _intio_bus_dmamap_sync(t, map, offset, len, ops)
772 bus_dma_tag_t t;
773 bus_dmamap_t map;
774 bus_addr_t offset;
775 bus_size_t len;
776 int ops;
777 {
778 struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
779
780 /*
781 * Mixing PRE and POST operations is not allowed.
782 */
783 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
784 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
785 panic("_intio_bus_dmamap_sync: mix PRE and POST");
786
787 #ifdef DIAGNOSTIC
788 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
789 if (offset >= map->dm_mapsize)
790 panic("_intio_bus_dmamap_sync: bad offset");
791 if (len == 0 || (offset + len) > map->dm_mapsize)
792 panic("_intio_bus_dmamap_sync: bad length");
793 }
794 #endif
795
796 /*
797 * If we're not bouncing, just return; nothing to do.
798 */
799 if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
800 return;
801
802 switch (cookie->id_buftype) {
803 case ID_BUFTYPE_LINEAR:
804 /*
805 * Nothing to do for pre-read.
806 */
807
808 if (ops & BUS_DMASYNC_PREWRITE) {
809 /*
810 * Copy the caller's buffer to the bounce buffer.
811 */
812 memcpy((char *)cookie->id_bouncebuf + offset,
813 (char *)cookie->id_origbuf + offset, len);
814 }
815
816 if (ops & BUS_DMASYNC_POSTREAD) {
817 /*
818 * Copy the bounce buffer to the caller's buffer.
819 */
820 memcpy((char *)cookie->id_origbuf + offset,
821 (char *)cookie->id_bouncebuf + offset, len);
822 }
823
824 /*
825 * Nothing to do for post-write.
826 */
827 break;
828
829 case ID_BUFTYPE_MBUF:
830 {
831 struct mbuf *m, *m0 = cookie->id_origbuf;
832 bus_size_t minlen, moff;
833
834 /*
835 * Nothing to do for pre-read.
836 */
837
838 if (ops & BUS_DMASYNC_PREWRITE) {
839 /*
840 * Copy the caller's buffer to the bounce buffer.
841 */
842 m_copydata(m0, offset, len,
843 (char *)cookie->id_bouncebuf + offset);
844 }
845
846 if (ops & BUS_DMASYNC_POSTREAD) {
847 /*
848 * Copy the bounce buffer to the caller's buffer.
849 */
850 for (moff = offset, m = m0; m != NULL && len != 0;
851 m = m->m_next) {
852 /* Find the beginning mbuf. */
853 if (moff >= m->m_len) {
854 moff -= m->m_len;
855 continue;
856 }
857
858 /*
859 * Now at the first mbuf to sync; nail
860 * each one until we have exhausted the
861 * length.
862 */
863 minlen = len < m->m_len - moff ?
864 len : m->m_len - moff;
865
866 memcpy(mtod(m, caddr_t) + moff,
867 (char *)cookie->id_bouncebuf + offset,
868 minlen);
869
870 moff = 0;
871 len -= minlen;
872 offset += minlen;
873 }
874 }
875
876 /*
877 * Nothing to do for post-write.
878 */
879 break;
880 }
881
882 case ID_BUFTYPE_UIO:
883 panic("_intio_bus_dmamap_sync: ID_BUFTYPE_UIO");
884 break;
885
886 case ID_BUFTYPE_RAW:
887 panic("_intio_bus_dmamap_sync: ID_BUFTYPE_RAW");
888 break;
889
890 case ID_BUFTYPE_INVALID:
891 panic("_intio_bus_dmamap_sync: ID_BUFTYPE_INVALID");
892 break;
893
894 default:
895 printf("unknown buffer type %d\n", cookie->id_buftype);
896 panic("_intio_bus_dmamap_sync");
897 }
898 }
899
900 /*
901 * Allocate memory safe for INTIO DMA.
902 */
903 int
904 _intio_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
905 bus_dma_tag_t t;
906 bus_size_t size, alignment, boundary;
907 bus_dma_segment_t *segs;
908 int nsegs;
909 int *rsegs;
910 int flags;
911 {
912 paddr_t high;
913 extern paddr_t avail_end;
914
915 if (avail_end > INTIO_DMA_BOUNCE_THRESHOLD)
916 high = trunc_page(INTIO_DMA_BOUNCE_THRESHOLD);
917 else
918 high = trunc_page(avail_end);
919
920 return (x68k_bus_dmamem_alloc_range(t, size, alignment, boundary,
921 segs, nsegs, rsegs, flags, 0, high));
922 }
923
924 /**********************************************************************
925 * INTIO DMA utility functions
926 **********************************************************************/
927
928 int
929 _intio_dma_alloc_bouncebuf(t, map, size, flags)
930 bus_dma_tag_t t;
931 bus_dmamap_t map;
932 bus_size_t size;
933 int flags;
934 {
935 struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
936 int error = 0;
937
938 cookie->id_bouncebuflen = round_page(size);
939 error = _intio_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
940 NBPG, map->x68k_dm_boundary, cookie->id_bouncesegs,
941 map->x68k_dm_segcnt, &cookie->id_nbouncesegs, flags);
942 if (error)
943 goto out;
944 error = x68k_bus_dmamem_map(t, cookie->id_bouncesegs,
945 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
946 (caddr_t *)&cookie->id_bouncebuf, flags);
947
948 out:
949 if (error) {
950 x68k_bus_dmamem_free(t, cookie->id_bouncesegs,
951 cookie->id_nbouncesegs);
952 cookie->id_bouncebuflen = 0;
953 cookie->id_nbouncesegs = 0;
954 } else {
955 cookie->id_flags |= ID_HAS_BOUNCE;
956 }
957
958 return (error);
959 }
960
961 void
962 _intio_dma_free_bouncebuf(t, map)
963 bus_dma_tag_t t;
964 bus_dmamap_t map;
965 {
966 struct intio_dma_cookie *cookie = map->x68k_dm_cookie;
967
968 x68k_bus_dmamem_unmap(t, cookie->id_bouncebuf,
969 cookie->id_bouncebuflen);
970 x68k_bus_dmamem_free(t, cookie->id_bouncesegs,
971 cookie->id_nbouncesegs);
972 cookie->id_bouncebuflen = 0;
973 cookie->id_nbouncesegs = 0;
974 cookie->id_flags &= ~ID_HAS_BOUNCE;
975 }
976