dma.c revision 1.11 1 /* $NetBSD: dma.c,v 1.11 1997/01/30 09:04:33 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1995, 1996, 1997
5 * Jason R. Thorpe. All rights reserved.
6 * Copyright (c) 1982, 1990, 1993
7 * The Regents of the University of California. All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)dma.c 8.1 (Berkeley) 6/10/93
38 */
39
40 /*
41 * DMA driver
42 */
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/time.h>
47 #include <sys/kernel.h>
48 #include <sys/proc.h>
49 #include <sys/device.h>
50
51 #include <machine/cpu.h>
52
53 #include <hp300/dev/dmareg.h>
54 #include <hp300/dev/dmavar.h>
55
56 #include <hp300/hp300/isr.h>
57
58 extern u_int kvtop();
59 extern void PCIA();
60
61 /*
62 * The largest single request will be MAXPHYS bytes which will require
63 * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
64 * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
65 * buffer is not page aligned (+1).
66 */
67 #define DMAMAXIO (MAXPHYS/NBPG+1)
68
69 struct dma_chain {
70 int dc_count;
71 char *dc_addr;
72 };
73
74 struct dma_channel {
75 struct dmaqueue *dm_job; /* current job */
76 struct dma_softc *dm_softc; /* pointer back to softc */
77 struct dmadevice *dm_hwaddr; /* registers if DMA_C */
78 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */
79 char dm_flags; /* misc. flags */
80 u_short dm_cmd; /* DMA controller command */
81 int dm_cur; /* current segment */
82 int dm_last; /* last segment */
83 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */
84 };
85
86 struct dma_softc {
87 char *sc_xname; /* XXX external name */
88 struct dmareg *sc_dmareg; /* pointer to our hardware */
89 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */
90 TAILQ_HEAD(, dmaqueue) sc_queue; /* job queue */
91 char sc_type; /* A, B, or C */
92 int sc_ipl; /* our interrupt level */
93 void *sc_ih; /* interrupt cookie */
94 } Dma_softc;
95
96 /* types */
97 #define DMA_B 0
98 #define DMA_C 1
99
100 /* flags */
101 #define DMAF_PCFLUSH 0x01
102 #define DMAF_VCFLUSH 0x02
103 #define DMAF_NOINTR 0x04
104
105 int dmaintr __P((void *));
106
107 #ifdef DEBUG
108 int dmadebug = 0;
109 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
110 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
111 #define DDB_FOLLOW 0x04
112 #define DDB_IO 0x08
113
114 void dmatimeout __P((void *));
115 int dmatimo[NDMACHAN];
116
117 long dmahits[NDMACHAN];
118 long dmamisses[NDMACHAN];
119 long dmabyte[NDMACHAN];
120 long dmaword[NDMACHAN];
121 long dmalword[NDMACHAN];
122 #endif
123
124 void
125 dmainit()
126 {
127 struct dma_softc *sc = &Dma_softc;
128 struct dmareg *dma;
129 struct dma_channel *dc;
130 int i;
131 char rev;
132
133 /* There's just one. */
134 sc->sc_dmareg = (struct dmareg *)DMA_BASE;
135 dma = sc->sc_dmareg;
136 sc->sc_xname = "dma0";
137
138 /*
139 * Determine the DMA type. A DMA_A or DMA_B will fail the
140 * following probe.
141 *
142 * XXX Don't know how to easily differentiate the A and B cards,
143 * so we just hope nobody has an A card (A cards will work if
144 * splbio works out to ipl 3).
145 */
146 if (badbaddr((char *)&dma->dma_id[2])) {
147 rev = 'B';
148 #if !defined(HP320)
149 panic("dmainit: DMA card requires hp320 support");
150 #endif
151 } else
152 rev = dma->dma_id[2];
153
154 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
155
156 TAILQ_INIT(&sc->sc_queue);
157
158 for (i = 0; i < NDMACHAN; i++) {
159 dc = &sc->sc_chan[i];
160 dc->dm_softc = sc;
161 dc->dm_job = NULL;
162 switch (i) {
163 case 0:
164 dc->dm_hwaddr = &dma->dma_chan0;
165 dc->dm_Bhwaddr = &dma->dma_Bchan0;
166 break;
167
168 case 1:
169 dc->dm_hwaddr = &dma->dma_chan1;
170 dc->dm_Bhwaddr = &dma->dma_Bchan1;
171 break;
172
173 default:
174 panic("dmainit: more than 2 channels?");
175 /* NOTREACHED */
176 }
177 }
178
179 #ifdef DEBUG
180 /* make sure timeout is really not needed */
181 timeout(dmatimeout, sc, 30 * hz);
182 #endif
183
184 printf("%s: 98620%c, 2 channels, %d bit\n", sc->sc_xname,
185 rev, (rev == 'B') ? 16 : 32);
186
187 /*
188 * Defer hooking up our interrupt until the first
189 * DMA-using controller has hooked up theirs.
190 */
191 sc->sc_ih = NULL;
192 }
193
194 /*
195 * Compute the ipl and (re)establish the interrupt handler
196 * for the DMA controller.
197 */
198 void
199 dmacomputeipl()
200 {
201 struct dma_softc *sc = &Dma_softc;
202
203 if (sc->sc_ih != NULL)
204 isrunlink(sc->sc_ih);
205
206 /*
207 * Our interrupt level must be as high as the highest
208 * device using DMA (i.e. splbio).
209 */
210 sc->sc_ipl = PSLTOIPL(hp300_bioipl);
211 sc->sc_ih = isrlink(dmaintr, sc, sc->sc_ipl, ISRPRI_BIO);
212 }
213
214 int
215 dmareq(dq)
216 struct dmaqueue *dq;
217 {
218 struct dma_softc *sc = &Dma_softc;
219 int i, chan, s;
220
221 #if 1
222 s = splhigh(); /* XXXthorpej */
223 #else
224 s = splbio();
225 #endif
226
227 chan = dq->dq_chan;
228 for (i = NDMACHAN - 1; i >= 0; i--) {
229 /*
230 * Can we use this channel?
231 */
232 if ((chan & (1 << i)) == 0)
233 continue;
234
235 /*
236 * We can use it; is it busy?
237 */
238 if (sc->sc_chan[i].dm_job != NULL)
239 continue;
240
241 /*
242 * Not busy; give the caller this channel.
243 */
244 sc->sc_chan[i].dm_job = dq;
245 dq->dq_chan = i;
246 splx(s);
247 return (1);
248 }
249
250 /*
251 * Couldn't get a channel now; put this in the queue.
252 */
253 TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
254 splx(s);
255 return (0);
256 }
257
258 void
259 dmafree(dq)
260 struct dmaqueue *dq;
261 {
262 int unit = dq->dq_chan;
263 struct dma_softc *sc = &Dma_softc;
264 struct dma_channel *dc = &sc->sc_chan[unit];
265 struct dmaqueue *dn;
266 int chan, s;
267
268 #if 1
269 s = splhigh(); /* XXXthorpej */
270 #else
271 s = splbio();
272 #endif
273
274 #ifdef DEBUG
275 dmatimo[unit] = 0;
276 #endif
277
278 DMA_CLEAR(dc);
279 #if defined(HP360) || defined(HP370) || defined(HP380)
280 /*
281 * XXX we may not always go thru the flush code in dmastop()
282 */
283 if (dc->dm_flags & DMAF_PCFLUSH) {
284 PCIA();
285 dc->dm_flags &= ~DMAF_PCFLUSH;
286 }
287 #endif
288 #if defined(HP320) || defined(HP350)
289 if (dc->dm_flags & DMAF_VCFLUSH) {
290 /*
291 * 320/350s have VACs that may also need flushing.
292 * In our case we only flush the supervisor side
293 * because we know that if we are DMAing to user
294 * space, the physical pages will also be mapped
295 * in kernel space (via vmapbuf) and hence cache-
296 * inhibited by the pmap module due to the multiple
297 * mapping.
298 */
299 DCIS();
300 dc->dm_flags &= ~DMAF_VCFLUSH;
301 }
302 #endif
303 /*
304 * Channel is now free. Look for another job to run on this
305 * channel.
306 */
307 dc->dm_job = NULL;
308 chan = 1 << unit;
309 for (dn = sc->sc_queue.tqh_first; dn != NULL;
310 dn = dn->dq_list.tqe_next) {
311 if (dn->dq_chan & chan) {
312 /* Found one... */
313 TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
314 dc->dm_job = dn;
315 dn->dq_chan = dq->dq_chan;
316 splx(s);
317
318 /* Start the initiator. */
319 (*dn->dq_start)(dn->dq_softc);
320 return;
321 }
322 }
323 splx(s);
324 }
325
326 void
327 dmago(unit, addr, count, flags)
328 int unit;
329 register char *addr;
330 register int count;
331 register int flags;
332 {
333 struct dma_softc *sc = &Dma_softc;
334 register struct dma_channel *dc = &sc->sc_chan[unit];
335 register char *dmaend = NULL;
336 register int seg, tcount;
337
338 if (count > MAXPHYS)
339 panic("dmago: count > MAXPHYS");
340 #if defined(HP320)
341 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
342 panic("dmago: no can do 32-bit DMA");
343 #endif
344 #ifdef DEBUG
345 if (dmadebug & DDB_FOLLOW)
346 printf("dmago(%d, %x, %x, %x)\n",
347 unit, addr, count, flags);
348 if (flags & DMAGO_LWORD)
349 dmalword[unit]++;
350 else if (flags & DMAGO_WORD)
351 dmaword[unit]++;
352 else
353 dmabyte[unit]++;
354 #endif
355 /*
356 * Build the DMA chain
357 */
358 for (seg = 0; count > 0; seg++) {
359 dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
360 #if defined(HP380)
361 /*
362 * Push back dirty cache lines
363 */
364 if (mmutype == MMU_68040)
365 DCFP(dc->dm_chain[seg].dc_addr);
366 #endif
367 if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
368 tcount = count;
369 dc->dm_chain[seg].dc_count = tcount;
370 addr += tcount;
371 count -= tcount;
372 if (flags & DMAGO_LWORD)
373 tcount >>= 2;
374 else if (flags & DMAGO_WORD)
375 tcount >>= 1;
376
377 /*
378 * Try to compact the DMA transfer if the pages are adjacent.
379 * Note: this will never happen on the first iteration.
380 */
381 if (dc->dm_chain[seg].dc_addr == dmaend
382 #if defined(HP320)
383 /* only 16-bit count on 98620B */
384 && (sc->sc_type != DMA_B ||
385 dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
386 #endif
387 ) {
388 #ifdef DEBUG
389 dmahits[unit]++;
390 #endif
391 dmaend += dc->dm_chain[seg].dc_count;
392 dc->dm_chain[--seg].dc_count += tcount;
393 } else {
394 #ifdef DEBUG
395 dmamisses[unit]++;
396 #endif
397 dmaend = dc->dm_chain[seg].dc_addr +
398 dc->dm_chain[seg].dc_count;
399 dc->dm_chain[seg].dc_count = tcount;
400 }
401 }
402 dc->dm_cur = 0;
403 dc->dm_last = --seg;
404 dc->dm_flags = 0;
405 /*
406 * Set up the command word based on flags
407 */
408 dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
409 if ((flags & DMAGO_READ) == 0)
410 dc->dm_cmd |= DMA_WRT;
411 if (flags & DMAGO_LWORD)
412 dc->dm_cmd |= DMA_LWORD;
413 else if (flags & DMAGO_WORD)
414 dc->dm_cmd |= DMA_WORD;
415 if (flags & DMAGO_PRI)
416 dc->dm_cmd |= DMA_PRI;
417 #if defined(HP380)
418 /*
419 * On the 68040 we need to flush (push) the data cache before a
420 * DMA (already done above) and flush again after DMA completes.
421 * In theory we should only need to flush prior to a write DMA
422 * and purge after a read DMA but if the entire page is not
423 * involved in the DMA we might purge some valid data.
424 */
425 if (mmutype == MMU_68040 && (flags & DMAGO_READ))
426 dc->dm_flags |= DMAF_PCFLUSH;
427 #endif
428 #if defined(HP360) || defined(HP370)
429 /*
430 * Remember if we need to flush external physical cache when
431 * DMA is done. We only do this if we are reading (writing memory).
432 */
433 if (ectype == EC_PHYS && (flags & DMAGO_READ))
434 dc->dm_flags |= DMAF_PCFLUSH;
435 #endif
436 #if defined(HP320) || defined(HP350)
437 if (ectype == EC_VIRT && (flags & DMAGO_READ))
438 dc->dm_flags |= DMAF_VCFLUSH;
439 #endif
440 /*
441 * Remember if we can skip the dma completion interrupt on
442 * the last segment in the chain.
443 */
444 if (flags & DMAGO_NOINT) {
445 if (dc->dm_cur == dc->dm_last)
446 dc->dm_cmd &= ~DMA_ENAB;
447 else
448 dc->dm_flags |= DMAF_NOINTR;
449 }
450 #ifdef DEBUG
451 if (dmadebug & DDB_IO) {
452 if ((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD) ||
453 (dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)) {
454 printf("dmago: cmd %x, flags %x\n",
455 dc->dm_cmd, dc->dm_flags);
456 for (seg = 0; seg <= dc->dm_last; seg++)
457 printf(" %d: %d@%x\n", seg,
458 dc->dm_chain[seg].dc_count,
459 dc->dm_chain[seg].dc_addr);
460 }
461 }
462 dmatimo[unit] = 1;
463 #endif
464 DMA_ARM(dc);
465 }
466
467 void
468 dmastop(unit)
469 register int unit;
470 {
471 struct dma_softc *sc = &Dma_softc;
472 register struct dma_channel *dc = &sc->sc_chan[unit];
473 struct dmaqueue *dq;
474
475 #ifdef DEBUG
476 if (dmadebug & DDB_FOLLOW)
477 printf("dmastop(%d)\n", unit);
478 dmatimo[unit] = 0;
479 #endif
480 DMA_CLEAR(dc);
481 #if defined(HP360) || defined(HP370) || defined(HP380)
482 if (dc->dm_flags & DMAF_PCFLUSH) {
483 PCIA();
484 dc->dm_flags &= ~DMAF_PCFLUSH;
485 }
486 #endif
487 #if defined(HP320) || defined(HP350)
488 if (dc->dm_flags & DMAF_VCFLUSH) {
489 /*
490 * 320/350s have VACs that may also need flushing.
491 * In our case we only flush the supervisor side
492 * because we know that if we are DMAing to user
493 * space, the physical pages will also be mapped
494 * in kernel space (via vmapbuf) and hence cache-
495 * inhibited by the pmap module due to the multiple
496 * mapping.
497 */
498 DCIS();
499 dc->dm_flags &= ~DMAF_VCFLUSH;
500 }
501 #endif
502 /*
503 * We may get this interrupt after a device service routine
504 * has freed the dma channel. So, ignore the intr if there's
505 * nothing on the queue.
506 */
507 if (dc->dm_job != NULL)
508 (*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
509 }
510
511 int
512 dmaintr(arg)
513 void *arg;
514 {
515 struct dma_softc *sc = arg;
516 register struct dma_channel *dc;
517 register int i, stat;
518 int found = 0;
519
520 #ifdef DEBUG
521 if (dmadebug & DDB_FOLLOW)
522 printf("dmaintr\n");
523 #endif
524 for (i = 0; i < NDMACHAN; i++) {
525 dc = &sc->sc_chan[i];
526 stat = DMA_STAT(dc);
527 if ((stat & DMA_INTR) == 0)
528 continue;
529 found++;
530 #ifdef DEBUG
531 if (dmadebug & DDB_IO) {
532 if ((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD) ||
533 (dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))
534 printf("dmaintr: flags %x unit %d stat %x next %d\n",
535 dc->dm_flags, i, stat, dc->dm_cur + 1);
536 }
537 if (stat & DMA_ARMED)
538 printf("%s, chan %d: intr when armed\n",
539 sc->sc_xname, i);
540 #endif
541 /*
542 * Load the next segemnt, or finish up if we're done.
543 */
544 dc->dm_cur++;
545 if (dc->dm_cur <= dc->dm_last) {
546 #ifdef DEBUG
547 dmatimo[i] = 1;
548 #endif
549 /*
550 * If we're the last segment, disable the
551 * completion interrupt, if necessary.
552 */
553 if (dc->dm_cur == dc->dm_last &&
554 (dc->dm_flags & DMAF_NOINTR))
555 dc->dm_cmd &= ~DMA_ENAB;
556 DMA_CLEAR(dc);
557 DMA_ARM(dc);
558 } else
559 dmastop(i);
560 }
561 return(found);
562 }
563
564 #ifdef DEBUG
565 void
566 dmatimeout(arg)
567 void *arg;
568 {
569 register int i, s;
570 struct dma_softc *sc = arg;
571
572 for (i = 0; i < NDMACHAN; i++) {
573 s = splbio();
574 if (dmatimo[i]) {
575 if (dmatimo[i] > 1)
576 printf("%s: chan %d timeout #%d\n",
577 sc->sc_xname, i, dmatimo[i]-1);
578 dmatimo[i]++;
579 }
580 splx(s);
581 }
582 timeout(dmatimeout, sc, 30 * hz);
583 }
584 #endif
585