dma.c revision 1.29.2.1 1 /* $NetBSD: dma.c,v 1.29.2.1 2004/08/03 10:34:23 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1982, 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 * @(#)dma.c 8.1 (Berkeley) 6/10/93
68 */
69
70 /*
71 * DMA driver
72 */
73
74 #include <sys/cdefs.h>
75 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.29.2.1 2004/08/03 10:34:23 skrll Exp $");
76
77 #include <machine/hp300spu.h> /* XXX param.h includes cpu.h */
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/callout.h>
82 #include <sys/device.h>
83 #include <sys/kernel.h>
84 #include <sys/proc.h>
85
86 #include <uvm/uvm_extern.h>
87
88 #include <machine/bus.h>
89
90 #include <m68k/cacheops.h>
91
92 #include <hp300/dev/intiovar.h>
93 #include <hp300/dev/dmareg.h>
94 #include <hp300/dev/dmavar.h>
95
96 /*
97 * The largest single request will be MAXPHYS bytes which will require
98 * at most MAXPHYS/PAGE_SIZE+1 chain elements to describe, i.e. if none of
99 * the buffer pages are physically contiguous (MAXPHYS/PAGE_SIZE) and the
100 * buffer is not page aligned (+1).
101 */
102 #define DMAMAXIO (MAXPHYS/PAGE_SIZE+1)
103
104 struct dma_chain {
105 int dc_count;
106 char *dc_addr;
107 };
108
109 struct dma_channel {
110 struct dmaqueue *dm_job; /* current job */
111 struct dmadevice *dm_hwaddr; /* registers if DMA_C */
112 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */
113 char dm_flags; /* misc. flags */
114 u_short dm_cmd; /* DMA controller command */
115 int dm_cur; /* current segment */
116 int dm_last; /* last segment */
117 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */
118 };
119
120 struct dma_softc {
121 struct device sc_dev;
122 bus_space_tag_t sc_bst;
123 bus_space_handle_t sc_bsh;
124
125 struct dmareg *sc_dmareg; /* pointer to our hardware */
126 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */
127 TAILQ_HEAD(, dmaqueue) sc_queue; /* job queue */
128 struct callout sc_debug_ch;
129 char sc_type; /* A, B, or C */
130 int sc_ipl; /* our interrupt level */
131 void *sc_ih; /* interrupt cookie */
132 };
133
134 /* types */
135 #define DMA_B 0
136 #define DMA_C 1
137
138 /* flags */
139 #define DMAF_PCFLUSH 0x01
140 #define DMAF_VCFLUSH 0x02
141 #define DMAF_NOINTR 0x04
142
143 int dmamatch(struct device *, struct cfdata *, void *);
144 void dmaattach(struct device *, struct device *, void *);
145
146 CFATTACH_DECL(dma, sizeof(struct dma_softc),
147 dmamatch, dmaattach, NULL, NULL);
148
149 int dmaintr __P((void *));
150
151 #ifdef DEBUG
152 int dmadebug = 0;
153 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
154 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
155 #define DDB_FOLLOW 0x04
156 #define DDB_IO 0x08
157
158 void dmatimeout __P((void *));
159 int dmatimo[NDMACHAN];
160
161 long dmahits[NDMACHAN];
162 long dmamisses[NDMACHAN];
163 long dmabyte[NDMACHAN];
164 long dmaword[NDMACHAN];
165 long dmalword[NDMACHAN];
166 #endif
167
168 static struct dma_softc *dma_softc;
169
170 int
171 dmamatch(parent, match, aux)
172 struct device *parent;
173 struct cfdata *match;
174 void *aux;
175 {
176 struct intio_attach_args *ia = aux;
177 static int dmafound = 0; /* can only have one */
178
179 if (strcmp("dma", ia->ia_modname) != 0 || dmafound)
180 return (0);
181
182 dmafound = 1;
183 return (1);
184 }
185
186
187
188 void
189 dmaattach(parent, self, aux)
190 struct device *parent, *self;
191 void *aux;
192 {
193 struct dma_softc *sc = (struct dma_softc *)self;
194 struct intio_attach_args *ia = aux;
195 struct dma_channel *dc;
196 struct dmareg *dma;
197 int i;
198 char rev;
199
200 /* There's just one. */
201 dma_softc = sc;
202
203 sc->sc_bst = ia->ia_bst;
204 if (bus_space_map(sc->sc_bst, ia->ia_iobase, INTIO_DEVSIZE, 0,
205 &sc->sc_bsh)) {
206 printf("%s: can't map registers\n", sc->sc_dev.dv_xname);
207 return;
208 }
209
210 dma = (struct dmareg *)bus_space_vaddr(sc->sc_bst, sc->sc_bsh);
211 sc->sc_dmareg = dma;
212
213 /*
214 * Determine the DMA type. A DMA_A or DMA_B will fail the
215 * following probe.
216 *
217 * XXX Don't know how to easily differentiate the A and B cards,
218 * so we just hope nobody has an A card (A cards will work if
219 * splbio works out to ipl 3).
220 */
221 if (badbaddr((char *)&dma->dma_id[2])) {
222 rev = 'B';
223 #if !defined(HP320)
224 panic("dmainit: DMA card requires hp320 support");
225 #endif
226 } else
227 rev = dma->dma_id[2];
228
229 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
230
231 TAILQ_INIT(&sc->sc_queue);
232 callout_init(&sc->sc_debug_ch);
233
234 for (i = 0; i < NDMACHAN; i++) {
235 dc = &sc->sc_chan[i];
236 dc->dm_job = NULL;
237 switch (i) {
238 case 0:
239 dc->dm_hwaddr = &dma->dma_chan0;
240 dc->dm_Bhwaddr = &dma->dma_Bchan0;
241 break;
242
243 case 1:
244 dc->dm_hwaddr = &dma->dma_chan1;
245 dc->dm_Bhwaddr = &dma->dma_Bchan1;
246 break;
247
248 default:
249 panic("dmainit: more than 2 channels?");
250 /* NOTREACHED */
251 }
252 }
253
254 #ifdef DEBUG
255 /* make sure timeout is really not needed */
256 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
257 #endif
258
259 printf(": 98620%c, 2 channels, %d-bit DMA\n",
260 rev, (rev == 'B') ? 16 : 32);
261
262 /*
263 * Defer hooking up our interrupt until the first
264 * DMA-using controller has hooked up theirs.
265 */
266 sc->sc_ih = NULL;
267 }
268
269 /*
270 * Compute the ipl and (re)establish the interrupt handler
271 * for the DMA controller.
272 */
273 void
274 dmacomputeipl()
275 {
276 struct dma_softc *sc = dma_softc;
277
278 if (sc->sc_ih != NULL)
279 intr_disestablish(sc->sc_ih);
280
281 /*
282 * Our interrupt level must be as high as the highest
283 * device using DMA (i.e. splbio).
284 */
285 sc->sc_ipl = PSLTOIPL(hp300_ipls[HP300_IPL_BIO]);
286 sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_BIO);
287 }
288
289 int
290 dmareq(dq)
291 struct dmaqueue *dq;
292 {
293 struct dma_softc *sc = dma_softc;
294 int i, chan, s;
295
296 #if 1
297 s = splhigh(); /* XXXthorpej */
298 #else
299 s = splbio();
300 #endif
301
302 chan = dq->dq_chan;
303 for (i = NDMACHAN - 1; i >= 0; i--) {
304 /*
305 * Can we use this channel?
306 */
307 if ((chan & (1 << i)) == 0)
308 continue;
309
310 /*
311 * We can use it; is it busy?
312 */
313 if (sc->sc_chan[i].dm_job != NULL)
314 continue;
315
316 /*
317 * Not busy; give the caller this channel.
318 */
319 sc->sc_chan[i].dm_job = dq;
320 dq->dq_chan = i;
321 splx(s);
322 return (1);
323 }
324
325 /*
326 * Couldn't get a channel now; put this in the queue.
327 */
328 TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
329 splx(s);
330 return (0);
331 }
332
333 void
334 dmafree(dq)
335 struct dmaqueue *dq;
336 {
337 int unit = dq->dq_chan;
338 struct dma_softc *sc = dma_softc;
339 struct dma_channel *dc = &sc->sc_chan[unit];
340 struct dmaqueue *dn;
341 int chan, s;
342
343 #if 1
344 s = splhigh(); /* XXXthorpej */
345 #else
346 s = splbio();
347 #endif
348
349 #ifdef DEBUG
350 dmatimo[unit] = 0;
351 #endif
352
353 DMA_CLEAR(dc);
354
355 #if defined(CACHE_HAVE_PAC) || defined(M68040)
356 /*
357 * XXX we may not always go thru the flush code in dmastop()
358 */
359 if (dc->dm_flags & DMAF_PCFLUSH) {
360 PCIA();
361 dc->dm_flags &= ~DMAF_PCFLUSH;
362 }
363 #endif
364
365 #if defined(CACHE_HAVE_VAC)
366 if (dc->dm_flags & DMAF_VCFLUSH) {
367 /*
368 * 320/350s have VACs that may also need flushing.
369 * In our case we only flush the supervisor side
370 * because we know that if we are DMAing to user
371 * space, the physical pages will also be mapped
372 * in kernel space (via vmapbuf) and hence cache-
373 * inhibited by the pmap module due to the multiple
374 * mapping.
375 */
376 DCIS();
377 dc->dm_flags &= ~DMAF_VCFLUSH;
378 }
379 #endif
380
381 /*
382 * Channel is now free. Look for another job to run on this
383 * channel.
384 */
385 dc->dm_job = NULL;
386 chan = 1 << unit;
387 for (dn = sc->sc_queue.tqh_first; dn != NULL;
388 dn = dn->dq_list.tqe_next) {
389 if (dn->dq_chan & chan) {
390 /* Found one... */
391 TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
392 dc->dm_job = dn;
393 dn->dq_chan = dq->dq_chan;
394 splx(s);
395
396 /* Start the initiator. */
397 (*dn->dq_start)(dn->dq_softc);
398 return;
399 }
400 }
401 splx(s);
402 }
403
404 void
405 dmago(unit, addr, count, flags)
406 int unit;
407 char *addr;
408 int count;
409 int flags;
410 {
411 struct dma_softc *sc = dma_softc;
412 struct dma_channel *dc = &sc->sc_chan[unit];
413 char *dmaend = NULL;
414 int seg, tcount;
415
416 if (count > MAXPHYS)
417 panic("dmago: count > MAXPHYS");
418
419 #if defined(HP320)
420 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
421 panic("dmago: no can do 32-bit DMA");
422 #endif
423
424 #ifdef DEBUG
425 if (dmadebug & DDB_FOLLOW)
426 printf("dmago(%d, %p, %x, %x)\n",
427 unit, addr, count, flags);
428 if (flags & DMAGO_LWORD)
429 dmalword[unit]++;
430 else if (flags & DMAGO_WORD)
431 dmaword[unit]++;
432 else
433 dmabyte[unit]++;
434 #endif
435 /*
436 * Build the DMA chain
437 */
438 for (seg = 0; count > 0; seg++) {
439 dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
440 #if defined(M68040)
441 /*
442 * Push back dirty cache lines
443 */
444 if (mmutype == MMU_68040)
445 DCFP((paddr_t)dc->dm_chain[seg].dc_addr);
446 #endif
447 if (count < (tcount = PAGE_SIZE - ((int)addr & PGOFSET)))
448 tcount = count;
449 dc->dm_chain[seg].dc_count = tcount;
450 addr += tcount;
451 count -= tcount;
452 if (flags & DMAGO_LWORD)
453 tcount >>= 2;
454 else if (flags & DMAGO_WORD)
455 tcount >>= 1;
456
457 /*
458 * Try to compact the DMA transfer if the pages are adjacent.
459 * Note: this will never happen on the first iteration.
460 */
461 if (dc->dm_chain[seg].dc_addr == dmaend
462 #if defined(HP320)
463 /* only 16-bit count on 98620B */
464 && (sc->sc_type != DMA_B ||
465 dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
466 #endif
467 ) {
468 #ifdef DEBUG
469 dmahits[unit]++;
470 #endif
471 dmaend += dc->dm_chain[seg].dc_count;
472 dc->dm_chain[--seg].dc_count += tcount;
473 } else {
474 #ifdef DEBUG
475 dmamisses[unit]++;
476 #endif
477 dmaend = dc->dm_chain[seg].dc_addr +
478 dc->dm_chain[seg].dc_count;
479 dc->dm_chain[seg].dc_count = tcount;
480 }
481 }
482 dc->dm_cur = 0;
483 dc->dm_last = --seg;
484 dc->dm_flags = 0;
485 /*
486 * Set up the command word based on flags
487 */
488 dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
489 if ((flags & DMAGO_READ) == 0)
490 dc->dm_cmd |= DMA_WRT;
491 if (flags & DMAGO_LWORD)
492 dc->dm_cmd |= DMA_LWORD;
493 else if (flags & DMAGO_WORD)
494 dc->dm_cmd |= DMA_WORD;
495 if (flags & DMAGO_PRI)
496 dc->dm_cmd |= DMA_PRI;
497
498 #if defined(M68040)
499 /*
500 * On the 68040 we need to flush (push) the data cache before a
501 * DMA (already done above) and flush again after DMA completes.
502 * In theory we should only need to flush prior to a write DMA
503 * and purge after a read DMA but if the entire page is not
504 * involved in the DMA we might purge some valid data.
505 */
506 if (mmutype == MMU_68040 && (flags & DMAGO_READ))
507 dc->dm_flags |= DMAF_PCFLUSH;
508 #endif
509
510 #if defined(CACHE_HAVE_PAC)
511 /*
512 * Remember if we need to flush external physical cache when
513 * DMA is done. We only do this if we are reading (writing memory).
514 */
515 if (ectype == EC_PHYS && (flags & DMAGO_READ))
516 dc->dm_flags |= DMAF_PCFLUSH;
517 #endif
518
519 #if defined(CACHE_HAVE_VAC)
520 if (ectype == EC_VIRT && (flags & DMAGO_READ))
521 dc->dm_flags |= DMAF_VCFLUSH;
522 #endif
523
524 /*
525 * Remember if we can skip the dma completion interrupt on
526 * the last segment in the chain.
527 */
528 if (flags & DMAGO_NOINT) {
529 if (dc->dm_cur == dc->dm_last)
530 dc->dm_cmd &= ~DMA_ENAB;
531 else
532 dc->dm_flags |= DMAF_NOINTR;
533 }
534 #ifdef DEBUG
535 if (dmadebug & DDB_IO) {
536 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
537 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
538 printf("dmago: cmd %x, flags %x\n",
539 dc->dm_cmd, dc->dm_flags);
540 for (seg = 0; seg <= dc->dm_last; seg++)
541 printf(" %d: %d@%p\n", seg,
542 dc->dm_chain[seg].dc_count,
543 dc->dm_chain[seg].dc_addr);
544 }
545 }
546 dmatimo[unit] = 1;
547 #endif
548 DMA_ARM(sc, dc);
549 }
550
551 void
552 dmastop(unit)
553 int unit;
554 {
555 struct dma_softc *sc = dma_softc;
556 struct dma_channel *dc = &sc->sc_chan[unit];
557
558 #ifdef DEBUG
559 if (dmadebug & DDB_FOLLOW)
560 printf("dmastop(%d)\n", unit);
561 dmatimo[unit] = 0;
562 #endif
563 DMA_CLEAR(dc);
564
565 #if defined(CACHE_HAVE_PAC) || defined(M68040)
566 if (dc->dm_flags & DMAF_PCFLUSH) {
567 PCIA();
568 dc->dm_flags &= ~DMAF_PCFLUSH;
569 }
570 #endif
571
572 #if defined(CACHE_HAVE_VAC)
573 if (dc->dm_flags & DMAF_VCFLUSH) {
574 /*
575 * 320/350s have VACs that may also need flushing.
576 * In our case we only flush the supervisor side
577 * because we know that if we are DMAing to user
578 * space, the physical pages will also be mapped
579 * in kernel space (via vmapbuf) and hence cache-
580 * inhibited by the pmap module due to the multiple
581 * mapping.
582 */
583 DCIS();
584 dc->dm_flags &= ~DMAF_VCFLUSH;
585 }
586 #endif
587
588 /*
589 * We may get this interrupt after a device service routine
590 * has freed the dma channel. So, ignore the intr if there's
591 * nothing on the queue.
592 */
593 if (dc->dm_job != NULL)
594 (*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
595 }
596
597 int
598 dmaintr(arg)
599 void *arg;
600 {
601 struct dma_softc *sc = arg;
602 struct dma_channel *dc;
603 int i, stat;
604 int found = 0;
605
606 #ifdef DEBUG
607 if (dmadebug & DDB_FOLLOW)
608 printf("dmaintr\n");
609 #endif
610 for (i = 0; i < NDMACHAN; i++) {
611 dc = &sc->sc_chan[i];
612 stat = DMA_STAT(dc);
613 if ((stat & DMA_INTR) == 0)
614 continue;
615 found++;
616 #ifdef DEBUG
617 if (dmadebug & DDB_IO) {
618 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
619 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
620 printf("dmaintr: flags %x unit %d stat %x next %d\n",
621 dc->dm_flags, i, stat, dc->dm_cur + 1);
622 }
623 if (stat & DMA_ARMED)
624 printf("dma channel %d: intr when armed\n", i);
625 #endif
626 /*
627 * Load the next segemnt, or finish up if we're done.
628 */
629 dc->dm_cur++;
630 if (dc->dm_cur <= dc->dm_last) {
631 #ifdef DEBUG
632 dmatimo[i] = 1;
633 #endif
634 /*
635 * If we're the last segment, disable the
636 * completion interrupt, if necessary.
637 */
638 if (dc->dm_cur == dc->dm_last &&
639 (dc->dm_flags & DMAF_NOINTR))
640 dc->dm_cmd &= ~DMA_ENAB;
641 DMA_CLEAR(dc);
642 DMA_ARM(sc, dc);
643 } else
644 dmastop(i);
645 }
646 return(found);
647 }
648
649 #ifdef DEBUG
650 void
651 dmatimeout(arg)
652 void *arg;
653 {
654 int i, s;
655 struct dma_softc *sc = arg;
656
657 for (i = 0; i < NDMACHAN; i++) {
658 s = splbio();
659 if (dmatimo[i]) {
660 if (dmatimo[i] > 1)
661 printf("dma channel %d timeout #%d\n",
662 i, dmatimo[i]-1);
663 dmatimo[i]++;
664 }
665 splx(s);
666 }
667 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
668 }
669 #endif
670