dma.c revision 1.25.12.2 1 /* $NetBSD: dma.c,v 1.25.12.2 2002/11/11 21:58:11 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1982, 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * @(#)dma.c 8.1 (Berkeley) 6/10/93
72 */
73
74 /*
75 * DMA driver
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.25.12.2 2002/11/11 21:58:11 nathanw Exp $");
80
81 #include <machine/hp300spu.h> /* XXX param.h includes cpu.h */
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/time.h>
87 #include <sys/kernel.h>
88 #include <sys/proc.h>
89 #include <sys/device.h>
90
91 #include <machine/frame.h>
92 #include <machine/cpu.h>
93 #include <machine/intr.h>
94 #include <m68k/cacheops.h>
95
96 #include <hp300/dev/dmareg.h>
97 #include <hp300/dev/dmavar.h>
98
99 /*
100 * The largest single request will be MAXPHYS bytes which will require
101 * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
102 * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
103 * buffer is not page aligned (+1).
104 */
105 #define DMAMAXIO (MAXPHYS/NBPG+1)
106
107 struct dma_chain {
108 int dc_count;
109 char *dc_addr;
110 };
111
112 struct dma_channel {
113 struct dmaqueue *dm_job; /* current job */
114 struct dmadevice *dm_hwaddr; /* registers if DMA_C */
115 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */
116 char dm_flags; /* misc. flags */
117 u_short dm_cmd; /* DMA controller command */
118 int dm_cur; /* current segment */
119 int dm_last; /* last segment */
120 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */
121 };
122
123 struct dma_softc {
124 struct dmareg *sc_dmareg; /* pointer to our hardware */
125 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */
126 TAILQ_HEAD(, dmaqueue) sc_queue; /* job queue */
127 struct callout sc_debug_ch;
128 char sc_type; /* A, B, or C */
129 int sc_ipl; /* our interrupt level */
130 void *sc_ih; /* interrupt cookie */
131 } dma_softc;
132
133 /* types */
134 #define DMA_B 0
135 #define DMA_C 1
136
137 /* flags */
138 #define DMAF_PCFLUSH 0x01
139 #define DMAF_VCFLUSH 0x02
140 #define DMAF_NOINTR 0x04
141
142 int dmaintr __P((void *));
143
144 #ifdef DEBUG
145 int dmadebug = 0;
146 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
147 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
148 #define DDB_FOLLOW 0x04
149 #define DDB_IO 0x08
150
151 void dmatimeout __P((void *));
152 int dmatimo[NDMACHAN];
153
154 long dmahits[NDMACHAN];
155 long dmamisses[NDMACHAN];
156 long dmabyte[NDMACHAN];
157 long dmaword[NDMACHAN];
158 long dmalword[NDMACHAN];
159 #endif
160
161 /*
162 * Initialize the DMA engine, called by dioattach()
163 */
164 void
165 dmainit()
166 {
167 struct dma_softc *sc = &dma_softc;
168 struct dmareg *dma;
169 struct dma_channel *dc;
170 int i;
171 char rev;
172
173 /* There's just one. */
174 sc->sc_dmareg = (struct dmareg *)DMA_BASE;
175 dma = sc->sc_dmareg;
176
177 /*
178 * Determine the DMA type. A DMA_A or DMA_B will fail the
179 * following probe.
180 *
181 * XXX Don't know how to easily differentiate the A and B cards,
182 * so we just hope nobody has an A card (A cards will work if
183 * splbio works out to ipl 3).
184 */
185 if (badbaddr((char *)&dma->dma_id[2])) {
186 rev = 'B';
187 #if !defined(HP320)
188 panic("dmainit: DMA card requires hp320 support");
189 #endif
190 } else
191 rev = dma->dma_id[2];
192
193 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
194
195 TAILQ_INIT(&sc->sc_queue);
196 callout_init(&sc->sc_debug_ch);
197
198 for (i = 0; i < NDMACHAN; i++) {
199 dc = &sc->sc_chan[i];
200 dc->dm_job = NULL;
201 switch (i) {
202 case 0:
203 dc->dm_hwaddr = &dma->dma_chan0;
204 dc->dm_Bhwaddr = &dma->dma_Bchan0;
205 break;
206
207 case 1:
208 dc->dm_hwaddr = &dma->dma_chan1;
209 dc->dm_Bhwaddr = &dma->dma_Bchan1;
210 break;
211
212 default:
213 panic("dmainit: more than 2 channels?");
214 /* NOTREACHED */
215 }
216 }
217
218 #ifdef DEBUG
219 /* make sure timeout is really not needed */
220 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
221 #endif
222
223 printf("98620%c, 2 channels, %d bit DMA\n",
224 rev, (rev == 'B') ? 16 : 32);
225
226 /*
227 * Defer hooking up our interrupt until the first
228 * DMA-using controller has hooked up theirs.
229 */
230 sc->sc_ih = NULL;
231 }
232
233 /*
234 * Compute the ipl and (re)establish the interrupt handler
235 * for the DMA controller.
236 */
237 void
238 dmacomputeipl()
239 {
240 struct dma_softc *sc = &dma_softc;
241
242 if (sc->sc_ih != NULL)
243 intr_disestablish(sc->sc_ih);
244
245 /*
246 * Our interrupt level must be as high as the highest
247 * device using DMA (i.e. splbio).
248 */
249 sc->sc_ipl = PSLTOIPL(hp300_ipls[HP300_IPL_BIO]);
250 sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_BIO);
251 }
252
253 int
254 dmareq(dq)
255 struct dmaqueue *dq;
256 {
257 struct dma_softc *sc = &dma_softc;
258 int i, chan, s;
259
260 #if 1
261 s = splhigh(); /* XXXthorpej */
262 #else
263 s = splbio();
264 #endif
265
266 chan = dq->dq_chan;
267 for (i = NDMACHAN - 1; i >= 0; i--) {
268 /*
269 * Can we use this channel?
270 */
271 if ((chan & (1 << i)) == 0)
272 continue;
273
274 /*
275 * We can use it; is it busy?
276 */
277 if (sc->sc_chan[i].dm_job != NULL)
278 continue;
279
280 /*
281 * Not busy; give the caller this channel.
282 */
283 sc->sc_chan[i].dm_job = dq;
284 dq->dq_chan = i;
285 splx(s);
286 return (1);
287 }
288
289 /*
290 * Couldn't get a channel now; put this in the queue.
291 */
292 TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
293 splx(s);
294 return (0);
295 }
296
297 void
298 dmafree(dq)
299 struct dmaqueue *dq;
300 {
301 int unit = dq->dq_chan;
302 struct dma_softc *sc = &dma_softc;
303 struct dma_channel *dc = &sc->sc_chan[unit];
304 struct dmaqueue *dn;
305 int chan, s;
306
307 #if 1
308 s = splhigh(); /* XXXthorpej */
309 #else
310 s = splbio();
311 #endif
312
313 #ifdef DEBUG
314 dmatimo[unit] = 0;
315 #endif
316
317 DMA_CLEAR(dc);
318
319 #if defined(CACHE_HAVE_PAC) || defined(M68040)
320 /*
321 * XXX we may not always go thru the flush code in dmastop()
322 */
323 if (dc->dm_flags & DMAF_PCFLUSH) {
324 PCIA();
325 dc->dm_flags &= ~DMAF_PCFLUSH;
326 }
327 #endif
328
329 #if defined(CACHE_HAVE_VAC)
330 if (dc->dm_flags & DMAF_VCFLUSH) {
331 /*
332 * 320/350s have VACs that may also need flushing.
333 * In our case we only flush the supervisor side
334 * because we know that if we are DMAing to user
335 * space, the physical pages will also be mapped
336 * in kernel space (via vmapbuf) and hence cache-
337 * inhibited by the pmap module due to the multiple
338 * mapping.
339 */
340 DCIS();
341 dc->dm_flags &= ~DMAF_VCFLUSH;
342 }
343 #endif
344
345 /*
346 * Channel is now free. Look for another job to run on this
347 * channel.
348 */
349 dc->dm_job = NULL;
350 chan = 1 << unit;
351 for (dn = sc->sc_queue.tqh_first; dn != NULL;
352 dn = dn->dq_list.tqe_next) {
353 if (dn->dq_chan & chan) {
354 /* Found one... */
355 TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
356 dc->dm_job = dn;
357 dn->dq_chan = dq->dq_chan;
358 splx(s);
359
360 /* Start the initiator. */
361 (*dn->dq_start)(dn->dq_softc);
362 return;
363 }
364 }
365 splx(s);
366 }
367
368 void
369 dmago(unit, addr, count, flags)
370 int unit;
371 char *addr;
372 int count;
373 int flags;
374 {
375 struct dma_softc *sc = &dma_softc;
376 struct dma_channel *dc = &sc->sc_chan[unit];
377 char *dmaend = NULL;
378 int seg, tcount;
379
380 if (count > MAXPHYS)
381 panic("dmago: count > MAXPHYS");
382
383 #if defined(HP320)
384 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
385 panic("dmago: no can do 32-bit DMA");
386 #endif
387
388 #ifdef DEBUG
389 if (dmadebug & DDB_FOLLOW)
390 printf("dmago(%d, %p, %x, %x)\n",
391 unit, addr, count, flags);
392 if (flags & DMAGO_LWORD)
393 dmalword[unit]++;
394 else if (flags & DMAGO_WORD)
395 dmaword[unit]++;
396 else
397 dmabyte[unit]++;
398 #endif
399 /*
400 * Build the DMA chain
401 */
402 for (seg = 0; count > 0; seg++) {
403 dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
404 #if defined(M68040)
405 /*
406 * Push back dirty cache lines
407 */
408 if (mmutype == MMU_68040)
409 DCFP((paddr_t)dc->dm_chain[seg].dc_addr);
410 #endif
411 if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
412 tcount = count;
413 dc->dm_chain[seg].dc_count = tcount;
414 addr += tcount;
415 count -= tcount;
416 if (flags & DMAGO_LWORD)
417 tcount >>= 2;
418 else if (flags & DMAGO_WORD)
419 tcount >>= 1;
420
421 /*
422 * Try to compact the DMA transfer if the pages are adjacent.
423 * Note: this will never happen on the first iteration.
424 */
425 if (dc->dm_chain[seg].dc_addr == dmaend
426 #if defined(HP320)
427 /* only 16-bit count on 98620B */
428 && (sc->sc_type != DMA_B ||
429 dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
430 #endif
431 ) {
432 #ifdef DEBUG
433 dmahits[unit]++;
434 #endif
435 dmaend += dc->dm_chain[seg].dc_count;
436 dc->dm_chain[--seg].dc_count += tcount;
437 } else {
438 #ifdef DEBUG
439 dmamisses[unit]++;
440 #endif
441 dmaend = dc->dm_chain[seg].dc_addr +
442 dc->dm_chain[seg].dc_count;
443 dc->dm_chain[seg].dc_count = tcount;
444 }
445 }
446 dc->dm_cur = 0;
447 dc->dm_last = --seg;
448 dc->dm_flags = 0;
449 /*
450 * Set up the command word based on flags
451 */
452 dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
453 if ((flags & DMAGO_READ) == 0)
454 dc->dm_cmd |= DMA_WRT;
455 if (flags & DMAGO_LWORD)
456 dc->dm_cmd |= DMA_LWORD;
457 else if (flags & DMAGO_WORD)
458 dc->dm_cmd |= DMA_WORD;
459 if (flags & DMAGO_PRI)
460 dc->dm_cmd |= DMA_PRI;
461
462 #if defined(M68040)
463 /*
464 * On the 68040 we need to flush (push) the data cache before a
465 * DMA (already done above) and flush again after DMA completes.
466 * In theory we should only need to flush prior to a write DMA
467 * and purge after a read DMA but if the entire page is not
468 * involved in the DMA we might purge some valid data.
469 */
470 if (mmutype == MMU_68040 && (flags & DMAGO_READ))
471 dc->dm_flags |= DMAF_PCFLUSH;
472 #endif
473
474 #if defined(CACHE_HAVE_PAC)
475 /*
476 * Remember if we need to flush external physical cache when
477 * DMA is done. We only do this if we are reading (writing memory).
478 */
479 if (ectype == EC_PHYS && (flags & DMAGO_READ))
480 dc->dm_flags |= DMAF_PCFLUSH;
481 #endif
482
483 #if defined(CACHE_HAVE_VAC)
484 if (ectype == EC_VIRT && (flags & DMAGO_READ))
485 dc->dm_flags |= DMAF_VCFLUSH;
486 #endif
487
488 /*
489 * Remember if we can skip the dma completion interrupt on
490 * the last segment in the chain.
491 */
492 if (flags & DMAGO_NOINT) {
493 if (dc->dm_cur == dc->dm_last)
494 dc->dm_cmd &= ~DMA_ENAB;
495 else
496 dc->dm_flags |= DMAF_NOINTR;
497 }
498 #ifdef DEBUG
499 if (dmadebug & DDB_IO) {
500 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
501 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
502 printf("dmago: cmd %x, flags %x\n",
503 dc->dm_cmd, dc->dm_flags);
504 for (seg = 0; seg <= dc->dm_last; seg++)
505 printf(" %d: %d@%p\n", seg,
506 dc->dm_chain[seg].dc_count,
507 dc->dm_chain[seg].dc_addr);
508 }
509 }
510 dmatimo[unit] = 1;
511 #endif
512 DMA_ARM(sc, dc);
513 }
514
515 void
516 dmastop(unit)
517 int unit;
518 {
519 struct dma_softc *sc = &dma_softc;
520 struct dma_channel *dc = &sc->sc_chan[unit];
521
522 #ifdef DEBUG
523 if (dmadebug & DDB_FOLLOW)
524 printf("dmastop(%d)\n", unit);
525 dmatimo[unit] = 0;
526 #endif
527 DMA_CLEAR(dc);
528
529 #if defined(CACHE_HAVE_PAC) || defined(M68040)
530 if (dc->dm_flags & DMAF_PCFLUSH) {
531 PCIA();
532 dc->dm_flags &= ~DMAF_PCFLUSH;
533 }
534 #endif
535
536 #if defined(CACHE_HAVE_VAC)
537 if (dc->dm_flags & DMAF_VCFLUSH) {
538 /*
539 * 320/350s have VACs that may also need flushing.
540 * In our case we only flush the supervisor side
541 * because we know that if we are DMAing to user
542 * space, the physical pages will also be mapped
543 * in kernel space (via vmapbuf) and hence cache-
544 * inhibited by the pmap module due to the multiple
545 * mapping.
546 */
547 DCIS();
548 dc->dm_flags &= ~DMAF_VCFLUSH;
549 }
550 #endif
551
552 /*
553 * We may get this interrupt after a device service routine
554 * has freed the dma channel. So, ignore the intr if there's
555 * nothing on the queue.
556 */
557 if (dc->dm_job != NULL)
558 (*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
559 }
560
561 int
562 dmaintr(arg)
563 void *arg;
564 {
565 struct dma_softc *sc = arg;
566 struct dma_channel *dc;
567 int i, stat;
568 int found = 0;
569
570 #ifdef DEBUG
571 if (dmadebug & DDB_FOLLOW)
572 printf("dmaintr\n");
573 #endif
574 for (i = 0; i < NDMACHAN; i++) {
575 dc = &sc->sc_chan[i];
576 stat = DMA_STAT(dc);
577 if ((stat & DMA_INTR) == 0)
578 continue;
579 found++;
580 #ifdef DEBUG
581 if (dmadebug & DDB_IO) {
582 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
583 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
584 printf("dmaintr: flags %x unit %d stat %x next %d\n",
585 dc->dm_flags, i, stat, dc->dm_cur + 1);
586 }
587 if (stat & DMA_ARMED)
588 printf("dma channel %d: intr when armed\n", i);
589 #endif
590 /*
591 * Load the next segemnt, or finish up if we're done.
592 */
593 dc->dm_cur++;
594 if (dc->dm_cur <= dc->dm_last) {
595 #ifdef DEBUG
596 dmatimo[i] = 1;
597 #endif
598 /*
599 * If we're the last segment, disable the
600 * completion interrupt, if necessary.
601 */
602 if (dc->dm_cur == dc->dm_last &&
603 (dc->dm_flags & DMAF_NOINTR))
604 dc->dm_cmd &= ~DMA_ENAB;
605 DMA_CLEAR(dc);
606 DMA_ARM(sc, dc);
607 } else
608 dmastop(i);
609 }
610 return(found);
611 }
612
613 #ifdef DEBUG
614 void
615 dmatimeout(arg)
616 void *arg;
617 {
618 int i, s;
619 struct dma_softc *sc = arg;
620
621 for (i = 0; i < NDMACHAN; i++) {
622 s = splbio();
623 if (dmatimo[i]) {
624 if (dmatimo[i] > 1)
625 printf("dma channel %d timeout #%d\n",
626 i, dmatimo[i]-1);
627 dmatimo[i]++;
628 }
629 splx(s);
630 }
631 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
632 }
633 #endif
634