dma.c revision 1.21 1 /* $NetBSD: dma.c,v 1.21 1998/05/23 20:51:09 is Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1982, 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * @(#)dma.c 8.1 (Berkeley) 6/10/93
72 */
73
74 /*
75 * DMA driver
76 */
77
78 #include "opt_m68kcpu.h"
79 #include <machine/hp300spu.h> /* XXX param.h includes cpu.h */
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/time.h>
84 #include <sys/kernel.h>
85 #include <sys/proc.h>
86 #include <sys/device.h>
87
88 #include <machine/frame.h>
89 #include <machine/cpu.h>
90 #include <machine/intr.h>
91
92 #include <hp300/dev/dmareg.h>
93 #include <hp300/dev/dmavar.h>
94
95 /*
96 * The largest single request will be MAXPHYS bytes which will require
97 * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
98 * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
99 * buffer is not page aligned (+1).
100 */
101 #define DMAMAXIO (MAXPHYS/NBPG+1)
102
103 struct dma_chain {
104 int dc_count;
105 char *dc_addr;
106 };
107
108 struct dma_channel {
109 struct dmaqueue *dm_job; /* current job */
110 struct dmadevice *dm_hwaddr; /* registers if DMA_C */
111 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */
112 char dm_flags; /* misc. flags */
113 u_short dm_cmd; /* DMA controller command */
114 int dm_cur; /* current segment */
115 int dm_last; /* last segment */
116 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */
117 };
118
119 struct dma_softc {
120 struct dmareg *sc_dmareg; /* pointer to our hardware */
121 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */
122 TAILQ_HEAD(, dmaqueue) sc_queue; /* job queue */
123 char sc_type; /* A, B, or C */
124 int sc_ipl; /* our interrupt level */
125 void *sc_ih; /* interrupt cookie */
126 } dma_softc;
127
128 /* types */
129 #define DMA_B 0
130 #define DMA_C 1
131
132 /* flags */
133 #define DMAF_PCFLUSH 0x01
134 #define DMAF_VCFLUSH 0x02
135 #define DMAF_NOINTR 0x04
136
137 int dmaintr __P((void *));
138
139 #ifdef DEBUG
140 int dmadebug = 0;
141 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
142 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
143 #define DDB_FOLLOW 0x04
144 #define DDB_IO 0x08
145
146 void dmatimeout __P((void *));
147 int dmatimo[NDMACHAN];
148
149 long dmahits[NDMACHAN];
150 long dmamisses[NDMACHAN];
151 long dmabyte[NDMACHAN];
152 long dmaword[NDMACHAN];
153 long dmalword[NDMACHAN];
154 #endif
155
156 /*
157 * Initialize the DMA engine, called by dioattach()
158 */
159 void
160 dmainit()
161 {
162 struct dma_softc *sc = &dma_softc;
163 struct dmareg *dma;
164 struct dma_channel *dc;
165 int i;
166 char rev;
167
168 /* There's just one. */
169 sc->sc_dmareg = (struct dmareg *)DMA_BASE;
170 dma = sc->sc_dmareg;
171
172 /*
173 * Determine the DMA type. A DMA_A or DMA_B will fail the
174 * following probe.
175 *
176 * XXX Don't know how to easily differentiate the A and B cards,
177 * so we just hope nobody has an A card (A cards will work if
178 * splbio works out to ipl 3).
179 */
180 if (badbaddr((char *)&dma->dma_id[2])) {
181 rev = 'B';
182 #if !defined(HP320)
183 panic("dmainit: DMA card requires hp320 support");
184 #endif
185 } else
186 rev = dma->dma_id[2];
187
188 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
189
190 TAILQ_INIT(&sc->sc_queue);
191
192 for (i = 0; i < NDMACHAN; i++) {
193 dc = &sc->sc_chan[i];
194 dc->dm_job = NULL;
195 switch (i) {
196 case 0:
197 dc->dm_hwaddr = &dma->dma_chan0;
198 dc->dm_Bhwaddr = &dma->dma_Bchan0;
199 break;
200
201 case 1:
202 dc->dm_hwaddr = &dma->dma_chan1;
203 dc->dm_Bhwaddr = &dma->dma_Bchan1;
204 break;
205
206 default:
207 panic("dmainit: more than 2 channels?");
208 /* NOTREACHED */
209 }
210 }
211
212 #ifdef DEBUG
213 /* make sure timeout is really not needed */
214 timeout(dmatimeout, sc, 30 * hz);
215 #endif
216
217 printf("98620%c, 2 channels, %d bit DMA\n",
218 rev, (rev == 'B') ? 16 : 32);
219
220 /*
221 * Defer hooking up our interrupt until the first
222 * DMA-using controller has hooked up theirs.
223 */
224 sc->sc_ih = NULL;
225 }
226
227 /*
228 * Compute the ipl and (re)establish the interrupt handler
229 * for the DMA controller.
230 */
231 void
232 dmacomputeipl()
233 {
234 struct dma_softc *sc = &dma_softc;
235
236 if (sc->sc_ih != NULL)
237 intr_disestablish(sc->sc_ih);
238
239 /*
240 * Our interrupt level must be as high as the highest
241 * device using DMA (i.e. splbio).
242 */
243 sc->sc_ipl = PSLTOIPL(hp300_bioipl);
244 sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_BIO);
245 }
246
247 int
248 dmareq(dq)
249 struct dmaqueue *dq;
250 {
251 struct dma_softc *sc = &dma_softc;
252 int i, chan, s;
253
254 #if 1
255 s = splhigh(); /* XXXthorpej */
256 #else
257 s = splbio();
258 #endif
259
260 chan = dq->dq_chan;
261 for (i = NDMACHAN - 1; i >= 0; i--) {
262 /*
263 * Can we use this channel?
264 */
265 if ((chan & (1 << i)) == 0)
266 continue;
267
268 /*
269 * We can use it; is it busy?
270 */
271 if (sc->sc_chan[i].dm_job != NULL)
272 continue;
273
274 /*
275 * Not busy; give the caller this channel.
276 */
277 sc->sc_chan[i].dm_job = dq;
278 dq->dq_chan = i;
279 splx(s);
280 return (1);
281 }
282
283 /*
284 * Couldn't get a channel now; put this in the queue.
285 */
286 TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
287 splx(s);
288 return (0);
289 }
290
291 void
292 dmafree(dq)
293 struct dmaqueue *dq;
294 {
295 int unit = dq->dq_chan;
296 struct dma_softc *sc = &dma_softc;
297 struct dma_channel *dc = &sc->sc_chan[unit];
298 struct dmaqueue *dn;
299 int chan, s;
300
301 #if 1
302 s = splhigh(); /* XXXthorpej */
303 #else
304 s = splbio();
305 #endif
306
307 #ifdef DEBUG
308 dmatimo[unit] = 0;
309 #endif
310
311 DMA_CLEAR(dc);
312
313 #if defined(CACHE_HAVE_PAC) || defined(M68040)
314 /*
315 * XXX we may not always go thru the flush code in dmastop()
316 */
317 if (dc->dm_flags & DMAF_PCFLUSH) {
318 PCIA();
319 dc->dm_flags &= ~DMAF_PCFLUSH;
320 }
321 #endif
322
323 #if defined(CACHE_HAVE_VAC)
324 if (dc->dm_flags & DMAF_VCFLUSH) {
325 /*
326 * 320/350s have VACs that may also need flushing.
327 * In our case we only flush the supervisor side
328 * because we know that if we are DMAing to user
329 * space, the physical pages will also be mapped
330 * in kernel space (via vmapbuf) and hence cache-
331 * inhibited by the pmap module due to the multiple
332 * mapping.
333 */
334 DCIS();
335 dc->dm_flags &= ~DMAF_VCFLUSH;
336 }
337 #endif
338
339 /*
340 * Channel is now free. Look for another job to run on this
341 * channel.
342 */
343 dc->dm_job = NULL;
344 chan = 1 << unit;
345 for (dn = sc->sc_queue.tqh_first; dn != NULL;
346 dn = dn->dq_list.tqe_next) {
347 if (dn->dq_chan & chan) {
348 /* Found one... */
349 TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
350 dc->dm_job = dn;
351 dn->dq_chan = dq->dq_chan;
352 splx(s);
353
354 /* Start the initiator. */
355 (*dn->dq_start)(dn->dq_softc);
356 return;
357 }
358 }
359 splx(s);
360 }
361
362 void
363 dmago(unit, addr, count, flags)
364 int unit;
365 char *addr;
366 int count;
367 int flags;
368 {
369 struct dma_softc *sc = &dma_softc;
370 struct dma_channel *dc = &sc->sc_chan[unit];
371 char *dmaend = NULL;
372 int seg, tcount;
373
374 if (count > MAXPHYS)
375 panic("dmago: count > MAXPHYS");
376
377 #if defined(HP320)
378 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
379 panic("dmago: no can do 32-bit DMA");
380 #endif
381
382 #ifdef DEBUG
383 if (dmadebug & DDB_FOLLOW)
384 printf("dmago(%d, %p, %x, %x)\n",
385 unit, addr, count, flags);
386 if (flags & DMAGO_LWORD)
387 dmalword[unit]++;
388 else if (flags & DMAGO_WORD)
389 dmaword[unit]++;
390 else
391 dmabyte[unit]++;
392 #endif
393 /*
394 * Build the DMA chain
395 */
396 for (seg = 0; count > 0; seg++) {
397 dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
398 #if defined(M68040)
399 /*
400 * Push back dirty cache lines
401 */
402 if (mmutype == MMU_68040)
403 DCFP((vm_offset_t)dc->dm_chain[seg].dc_addr);
404 #endif
405 if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
406 tcount = count;
407 dc->dm_chain[seg].dc_count = tcount;
408 addr += tcount;
409 count -= tcount;
410 if (flags & DMAGO_LWORD)
411 tcount >>= 2;
412 else if (flags & DMAGO_WORD)
413 tcount >>= 1;
414
415 /*
416 * Try to compact the DMA transfer if the pages are adjacent.
417 * Note: this will never happen on the first iteration.
418 */
419 if (dc->dm_chain[seg].dc_addr == dmaend
420 #if defined(HP320)
421 /* only 16-bit count on 98620B */
422 && (sc->sc_type != DMA_B ||
423 dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
424 #endif
425 ) {
426 #ifdef DEBUG
427 dmahits[unit]++;
428 #endif
429 dmaend += dc->dm_chain[seg].dc_count;
430 dc->dm_chain[--seg].dc_count += tcount;
431 } else {
432 #ifdef DEBUG
433 dmamisses[unit]++;
434 #endif
435 dmaend = dc->dm_chain[seg].dc_addr +
436 dc->dm_chain[seg].dc_count;
437 dc->dm_chain[seg].dc_count = tcount;
438 }
439 }
440 dc->dm_cur = 0;
441 dc->dm_last = --seg;
442 dc->dm_flags = 0;
443 /*
444 * Set up the command word based on flags
445 */
446 dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
447 if ((flags & DMAGO_READ) == 0)
448 dc->dm_cmd |= DMA_WRT;
449 if (flags & DMAGO_LWORD)
450 dc->dm_cmd |= DMA_LWORD;
451 else if (flags & DMAGO_WORD)
452 dc->dm_cmd |= DMA_WORD;
453 if (flags & DMAGO_PRI)
454 dc->dm_cmd |= DMA_PRI;
455
456 #if defined(M68040)
457 /*
458 * On the 68040 we need to flush (push) the data cache before a
459 * DMA (already done above) and flush again after DMA completes.
460 * In theory we should only need to flush prior to a write DMA
461 * and purge after a read DMA but if the entire page is not
462 * involved in the DMA we might purge some valid data.
463 */
464 if (mmutype == MMU_68040 && (flags & DMAGO_READ))
465 dc->dm_flags |= DMAF_PCFLUSH;
466 #endif
467
468 #if defined(CACHE_HAVE_PAC)
469 /*
470 * Remember if we need to flush external physical cache when
471 * DMA is done. We only do this if we are reading (writing memory).
472 */
473 if (ectype == EC_PHYS && (flags & DMAGO_READ))
474 dc->dm_flags |= DMAF_PCFLUSH;
475 #endif
476
477 #if defined(CACHE_HAVE_VAC)
478 if (ectype == EC_VIRT && (flags & DMAGO_READ))
479 dc->dm_flags |= DMAF_VCFLUSH;
480 #endif
481
482 /*
483 * Remember if we can skip the dma completion interrupt on
484 * the last segment in the chain.
485 */
486 if (flags & DMAGO_NOINT) {
487 if (dc->dm_cur == dc->dm_last)
488 dc->dm_cmd &= ~DMA_ENAB;
489 else
490 dc->dm_flags |= DMAF_NOINTR;
491 }
492 #ifdef DEBUG
493 if (dmadebug & DDB_IO) {
494 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
495 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
496 printf("dmago: cmd %x, flags %x\n",
497 dc->dm_cmd, dc->dm_flags);
498 for (seg = 0; seg <= dc->dm_last; seg++)
499 printf(" %d: %d@%p\n", seg,
500 dc->dm_chain[seg].dc_count,
501 dc->dm_chain[seg].dc_addr);
502 }
503 }
504 dmatimo[unit] = 1;
505 #endif
506 DMA_ARM(sc, dc);
507 }
508
509 void
510 dmastop(unit)
511 int unit;
512 {
513 struct dma_softc *sc = &dma_softc;
514 struct dma_channel *dc = &sc->sc_chan[unit];
515
516 #ifdef DEBUG
517 if (dmadebug & DDB_FOLLOW)
518 printf("dmastop(%d)\n", unit);
519 dmatimo[unit] = 0;
520 #endif
521 DMA_CLEAR(dc);
522
523 #if defined(CACHE_HAVE_PAC) || defined(M68040)
524 if (dc->dm_flags & DMAF_PCFLUSH) {
525 PCIA();
526 dc->dm_flags &= ~DMAF_PCFLUSH;
527 }
528 #endif
529
530 #if defined(CACHE_HAVE_VAC)
531 if (dc->dm_flags & DMAF_VCFLUSH) {
532 /*
533 * 320/350s have VACs that may also need flushing.
534 * In our case we only flush the supervisor side
535 * because we know that if we are DMAing to user
536 * space, the physical pages will also be mapped
537 * in kernel space (via vmapbuf) and hence cache-
538 * inhibited by the pmap module due to the multiple
539 * mapping.
540 */
541 DCIS();
542 dc->dm_flags &= ~DMAF_VCFLUSH;
543 }
544 #endif
545
546 /*
547 * We may get this interrupt after a device service routine
548 * has freed the dma channel. So, ignore the intr if there's
549 * nothing on the queue.
550 */
551 if (dc->dm_job != NULL)
552 (*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
553 }
554
555 int
556 dmaintr(arg)
557 void *arg;
558 {
559 struct dma_softc *sc = arg;
560 struct dma_channel *dc;
561 int i, stat;
562 int found = 0;
563
564 #ifdef DEBUG
565 if (dmadebug & DDB_FOLLOW)
566 printf("dmaintr\n");
567 #endif
568 for (i = 0; i < NDMACHAN; i++) {
569 dc = &sc->sc_chan[i];
570 stat = DMA_STAT(dc);
571 if ((stat & DMA_INTR) == 0)
572 continue;
573 found++;
574 #ifdef DEBUG
575 if (dmadebug & DDB_IO) {
576 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
577 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
578 printf("dmaintr: flags %x unit %d stat %x next %d\n",
579 dc->dm_flags, i, stat, dc->dm_cur + 1);
580 }
581 if (stat & DMA_ARMED)
582 printf("dma channel %d: intr when armed\n", i);
583 #endif
584 /*
585 * Load the next segemnt, or finish up if we're done.
586 */
587 dc->dm_cur++;
588 if (dc->dm_cur <= dc->dm_last) {
589 #ifdef DEBUG
590 dmatimo[i] = 1;
591 #endif
592 /*
593 * If we're the last segment, disable the
594 * completion interrupt, if necessary.
595 */
596 if (dc->dm_cur == dc->dm_last &&
597 (dc->dm_flags & DMAF_NOINTR))
598 dc->dm_cmd &= ~DMA_ENAB;
599 DMA_CLEAR(dc);
600 DMA_ARM(sc, dc);
601 } else
602 dmastop(i);
603 }
604 return(found);
605 }
606
607 #ifdef DEBUG
608 void
609 dmatimeout(arg)
610 void *arg;
611 {
612 int i, s;
613 struct dma_softc *sc = arg;
614
615 for (i = 0; i < NDMACHAN; i++) {
616 s = splbio();
617 if (dmatimo[i]) {
618 if (dmatimo[i] > 1)
619 printf("dma channel %d timeout #%d\n",
620 i, dmatimo[i]-1);
621 dmatimo[i]++;
622 }
623 splx(s);
624 }
625 timeout(dmatimeout, sc, 30 * hz);
626 }
627 #endif
628