dma.c revision 1.15 1 /* $NetBSD: dma.c,v 1.15 1997/04/02 22:37:27 scottr Exp $ */
2
3 /*
4 * Copyright (c) 1995, 1996, 1997
5 * Jason R. Thorpe. All rights reserved.
6 * Copyright (c) 1982, 1990, 1993
7 * The Regents of the University of California. All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)dma.c 8.1 (Berkeley) 6/10/93
38 */
39
40 /*
41 * DMA driver
42 */
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/time.h>
47 #include <sys/kernel.h>
48 #include <sys/proc.h>
49 #include <sys/device.h>
50
51 #include <machine/frame.h>
52 #include <machine/cpu.h>
53
54 #include <hp300/dev/dmareg.h>
55 #include <hp300/dev/dmavar.h>
56
57 #include <hp300/hp300/isr.h>
58
59 /*
60 * The largest single request will be MAXPHYS bytes which will require
61 * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
62 * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
63 * buffer is not page aligned (+1).
64 */
65 #define DMAMAXIO (MAXPHYS/NBPG+1)
66
67 struct dma_chain {
68 int dc_count;
69 char *dc_addr;
70 };
71
72 struct dma_channel {
73 struct dmaqueue *dm_job; /* current job */
74 struct dma_softc *dm_softc; /* pointer back to softc */
75 struct dmadevice *dm_hwaddr; /* registers if DMA_C */
76 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */
77 char dm_flags; /* misc. flags */
78 u_short dm_cmd; /* DMA controller command */
79 int dm_cur; /* current segment */
80 int dm_last; /* last segment */
81 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */
82 };
83
84 struct dma_softc {
85 char *sc_xname; /* XXX external name */
86 struct dmareg *sc_dmareg; /* pointer to our hardware */
87 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */
88 TAILQ_HEAD(, dmaqueue) sc_queue; /* job queue */
89 char sc_type; /* A, B, or C */
90 int sc_ipl; /* our interrupt level */
91 void *sc_ih; /* interrupt cookie */
92 } Dma_softc;
93
94 /* types */
95 #define DMA_B 0
96 #define DMA_C 1
97
98 /* flags */
99 #define DMAF_PCFLUSH 0x01
100 #define DMAF_VCFLUSH 0x02
101 #define DMAF_NOINTR 0x04
102
103 int dmaintr __P((void *));
104
105 #ifdef DEBUG
106 int dmadebug = 0;
107 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
108 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
109 #define DDB_FOLLOW 0x04
110 #define DDB_IO 0x08
111
112 void dmatimeout __P((void *));
113 int dmatimo[NDMACHAN];
114
115 long dmahits[NDMACHAN];
116 long dmamisses[NDMACHAN];
117 long dmabyte[NDMACHAN];
118 long dmaword[NDMACHAN];
119 long dmalword[NDMACHAN];
120 #endif
121
122 void
123 dmainit()
124 {
125 struct dma_softc *sc = &Dma_softc;
126 struct dmareg *dma;
127 struct dma_channel *dc;
128 int i;
129 char rev;
130
131 /* There's just one. */
132 sc->sc_dmareg = (struct dmareg *)DMA_BASE;
133 dma = sc->sc_dmareg;
134 sc->sc_xname = "dma0";
135
136 /*
137 * Determine the DMA type. A DMA_A or DMA_B will fail the
138 * following probe.
139 *
140 * XXX Don't know how to easily differentiate the A and B cards,
141 * so we just hope nobody has an A card (A cards will work if
142 * splbio works out to ipl 3).
143 */
144 if (badbaddr((char *)&dma->dma_id[2])) {
145 rev = 'B';
146 #if !defined(HP320)
147 panic("dmainit: DMA card requires hp320 support");
148 #endif
149 } else
150 rev = dma->dma_id[2];
151
152 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
153
154 TAILQ_INIT(&sc->sc_queue);
155
156 for (i = 0; i < NDMACHAN; i++) {
157 dc = &sc->sc_chan[i];
158 dc->dm_softc = sc;
159 dc->dm_job = NULL;
160 switch (i) {
161 case 0:
162 dc->dm_hwaddr = &dma->dma_chan0;
163 dc->dm_Bhwaddr = &dma->dma_Bchan0;
164 break;
165
166 case 1:
167 dc->dm_hwaddr = &dma->dma_chan1;
168 dc->dm_Bhwaddr = &dma->dma_Bchan1;
169 break;
170
171 default:
172 panic("dmainit: more than 2 channels?");
173 /* NOTREACHED */
174 }
175 }
176
177 #ifdef DEBUG
178 /* make sure timeout is really not needed */
179 timeout(dmatimeout, sc, 30 * hz);
180 #endif
181
182 printf("%s: 98620%c, 2 channels, %d bit\n", sc->sc_xname,
183 rev, (rev == 'B') ? 16 : 32);
184
185 /*
186 * Defer hooking up our interrupt until the first
187 * DMA-using controller has hooked up theirs.
188 */
189 sc->sc_ih = NULL;
190 }
191
192 /*
193 * Compute the ipl and (re)establish the interrupt handler
194 * for the DMA controller.
195 */
196 void
197 dmacomputeipl()
198 {
199 struct dma_softc *sc = &Dma_softc;
200
201 if (sc->sc_ih != NULL)
202 isrunlink(sc->sc_ih);
203
204 /*
205 * Our interrupt level must be as high as the highest
206 * device using DMA (i.e. splbio).
207 */
208 sc->sc_ipl = PSLTOIPL(hp300_bioipl);
209 sc->sc_ih = isrlink(dmaintr, sc, sc->sc_ipl, ISRPRI_BIO);
210 }
211
212 int
213 dmareq(dq)
214 struct dmaqueue *dq;
215 {
216 struct dma_softc *sc = &Dma_softc;
217 int i, chan, s;
218
219 #if 1
220 s = splhigh(); /* XXXthorpej */
221 #else
222 s = splbio();
223 #endif
224
225 chan = dq->dq_chan;
226 for (i = NDMACHAN - 1; i >= 0; i--) {
227 /*
228 * Can we use this channel?
229 */
230 if ((chan & (1 << i)) == 0)
231 continue;
232
233 /*
234 * We can use it; is it busy?
235 */
236 if (sc->sc_chan[i].dm_job != NULL)
237 continue;
238
239 /*
240 * Not busy; give the caller this channel.
241 */
242 sc->sc_chan[i].dm_job = dq;
243 dq->dq_chan = i;
244 splx(s);
245 return (1);
246 }
247
248 /*
249 * Couldn't get a channel now; put this in the queue.
250 */
251 TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
252 splx(s);
253 return (0);
254 }
255
256 void
257 dmafree(dq)
258 struct dmaqueue *dq;
259 {
260 int unit = dq->dq_chan;
261 struct dma_softc *sc = &Dma_softc;
262 struct dma_channel *dc = &sc->sc_chan[unit];
263 struct dmaqueue *dn;
264 int chan, s;
265
266 #if 1
267 s = splhigh(); /* XXXthorpej */
268 #else
269 s = splbio();
270 #endif
271
272 #ifdef DEBUG
273 dmatimo[unit] = 0;
274 #endif
275
276 DMA_CLEAR(dc);
277 #if defined(HP340) || defined(HP360) || defined(HP370) || defined(HP380)
278 /*
279 * XXX we may not always go thru the flush code in dmastop()
280 */
281 if (dc->dm_flags & DMAF_PCFLUSH) {
282 PCIA();
283 dc->dm_flags &= ~DMAF_PCFLUSH;
284 }
285 #endif
286 #if defined(HP320) || defined(HP350)
287 if (dc->dm_flags & DMAF_VCFLUSH) {
288 /*
289 * 320/350s have VACs that may also need flushing.
290 * In our case we only flush the supervisor side
291 * because we know that if we are DMAing to user
292 * space, the physical pages will also be mapped
293 * in kernel space (via vmapbuf) and hence cache-
294 * inhibited by the pmap module due to the multiple
295 * mapping.
296 */
297 DCIS();
298 dc->dm_flags &= ~DMAF_VCFLUSH;
299 }
300 #endif
301 /*
302 * Channel is now free. Look for another job to run on this
303 * channel.
304 */
305 dc->dm_job = NULL;
306 chan = 1 << unit;
307 for (dn = sc->sc_queue.tqh_first; dn != NULL;
308 dn = dn->dq_list.tqe_next) {
309 if (dn->dq_chan & chan) {
310 /* Found one... */
311 TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
312 dc->dm_job = dn;
313 dn->dq_chan = dq->dq_chan;
314 splx(s);
315
316 /* Start the initiator. */
317 (*dn->dq_start)(dn->dq_softc);
318 return;
319 }
320 }
321 splx(s);
322 }
323
324 void
325 dmago(unit, addr, count, flags)
326 int unit;
327 char *addr;
328 int count;
329 int flags;
330 {
331 struct dma_softc *sc = &Dma_softc;
332 struct dma_channel *dc = &sc->sc_chan[unit];
333 char *dmaend = NULL;
334 int seg, tcount;
335
336 if (count > MAXPHYS)
337 panic("dmago: count > MAXPHYS");
338 #if defined(HP320)
339 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
340 panic("dmago: no can do 32-bit DMA");
341 #endif
342 #ifdef DEBUG
343 if (dmadebug & DDB_FOLLOW)
344 printf("dmago(%d, %p, %x, %x)\n",
345 unit, addr, count, flags);
346 if (flags & DMAGO_LWORD)
347 dmalword[unit]++;
348 else if (flags & DMAGO_WORD)
349 dmaword[unit]++;
350 else
351 dmabyte[unit]++;
352 #endif
353 /*
354 * Build the DMA chain
355 */
356 for (seg = 0; count > 0; seg++) {
357 dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
358 #if defined(HP380)
359 /*
360 * Push back dirty cache lines
361 */
362 if (mmutype == MMU_68040)
363 DCFP((vm_offset_t)dc->dm_chain[seg].dc_addr);
364 #endif
365 if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
366 tcount = count;
367 dc->dm_chain[seg].dc_count = tcount;
368 addr += tcount;
369 count -= tcount;
370 if (flags & DMAGO_LWORD)
371 tcount >>= 2;
372 else if (flags & DMAGO_WORD)
373 tcount >>= 1;
374
375 /*
376 * Try to compact the DMA transfer if the pages are adjacent.
377 * Note: this will never happen on the first iteration.
378 */
379 if (dc->dm_chain[seg].dc_addr == dmaend
380 #if defined(HP320)
381 /* only 16-bit count on 98620B */
382 && (sc->sc_type != DMA_B ||
383 dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
384 #endif
385 ) {
386 #ifdef DEBUG
387 dmahits[unit]++;
388 #endif
389 dmaend += dc->dm_chain[seg].dc_count;
390 dc->dm_chain[--seg].dc_count += tcount;
391 } else {
392 #ifdef DEBUG
393 dmamisses[unit]++;
394 #endif
395 dmaend = dc->dm_chain[seg].dc_addr +
396 dc->dm_chain[seg].dc_count;
397 dc->dm_chain[seg].dc_count = tcount;
398 }
399 }
400 dc->dm_cur = 0;
401 dc->dm_last = --seg;
402 dc->dm_flags = 0;
403 /*
404 * Set up the command word based on flags
405 */
406 dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
407 if ((flags & DMAGO_READ) == 0)
408 dc->dm_cmd |= DMA_WRT;
409 if (flags & DMAGO_LWORD)
410 dc->dm_cmd |= DMA_LWORD;
411 else if (flags & DMAGO_WORD)
412 dc->dm_cmd |= DMA_WORD;
413 if (flags & DMAGO_PRI)
414 dc->dm_cmd |= DMA_PRI;
415 #if defined(HP380)
416 /*
417 * On the 68040 we need to flush (push) the data cache before a
418 * DMA (already done above) and flush again after DMA completes.
419 * In theory we should only need to flush prior to a write DMA
420 * and purge after a read DMA but if the entire page is not
421 * involved in the DMA we might purge some valid data.
422 */
423 if (mmutype == MMU_68040 && (flags & DMAGO_READ))
424 dc->dm_flags |= DMAF_PCFLUSH;
425 #endif
426 #if defined(HP340) || defined(HP360) || defined(HP370)
427 /*
428 * Remember if we need to flush external physical cache when
429 * DMA is done. We only do this if we are reading (writing memory).
430 */
431 if (ectype == EC_PHYS && (flags & DMAGO_READ))
432 dc->dm_flags |= DMAF_PCFLUSH;
433 #endif
434 #if defined(HP320) || defined(HP350)
435 if (ectype == EC_VIRT && (flags & DMAGO_READ))
436 dc->dm_flags |= DMAF_VCFLUSH;
437 #endif
438 /*
439 * Remember if we can skip the dma completion interrupt on
440 * the last segment in the chain.
441 */
442 if (flags & DMAGO_NOINT) {
443 if (dc->dm_cur == dc->dm_last)
444 dc->dm_cmd &= ~DMA_ENAB;
445 else
446 dc->dm_flags |= DMAF_NOINTR;
447 }
448 #ifdef DEBUG
449 if (dmadebug & DDB_IO) {
450 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
451 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
452 printf("dmago: cmd %x, flags %x\n",
453 dc->dm_cmd, dc->dm_flags);
454 for (seg = 0; seg <= dc->dm_last; seg++)
455 printf(" %d: %d@%p\n", seg,
456 dc->dm_chain[seg].dc_count,
457 dc->dm_chain[seg].dc_addr);
458 }
459 }
460 dmatimo[unit] = 1;
461 #endif
462 DMA_ARM(dc);
463 }
464
465 void
466 dmastop(unit)
467 int unit;
468 {
469 struct dma_softc *sc = &Dma_softc;
470 struct dma_channel *dc = &sc->sc_chan[unit];
471
472 #ifdef DEBUG
473 if (dmadebug & DDB_FOLLOW)
474 printf("dmastop(%d)\n", unit);
475 dmatimo[unit] = 0;
476 #endif
477 DMA_CLEAR(dc);
478 #if defined(HP340) || defined(HP360) || defined(HP370) || defined(HP380)
479 if (dc->dm_flags & DMAF_PCFLUSH) {
480 PCIA();
481 dc->dm_flags &= ~DMAF_PCFLUSH;
482 }
483 #endif
484 #if defined(HP320) || defined(HP350)
485 if (dc->dm_flags & DMAF_VCFLUSH) {
486 /*
487 * 320/350s have VACs that may also need flushing.
488 * In our case we only flush the supervisor side
489 * because we know that if we are DMAing to user
490 * space, the physical pages will also be mapped
491 * in kernel space (via vmapbuf) and hence cache-
492 * inhibited by the pmap module due to the multiple
493 * mapping.
494 */
495 DCIS();
496 dc->dm_flags &= ~DMAF_VCFLUSH;
497 }
498 #endif
499 /*
500 * We may get this interrupt after a device service routine
501 * has freed the dma channel. So, ignore the intr if there's
502 * nothing on the queue.
503 */
504 if (dc->dm_job != NULL)
505 (*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
506 }
507
508 int
509 dmaintr(arg)
510 void *arg;
511 {
512 struct dma_softc *sc = arg;
513 struct dma_channel *dc;
514 int i, stat;
515 int found = 0;
516
517 #ifdef DEBUG
518 if (dmadebug & DDB_FOLLOW)
519 printf("dmaintr\n");
520 #endif
521 for (i = 0; i < NDMACHAN; i++) {
522 dc = &sc->sc_chan[i];
523 stat = DMA_STAT(dc);
524 if ((stat & DMA_INTR) == 0)
525 continue;
526 found++;
527 #ifdef DEBUG
528 if (dmadebug & DDB_IO) {
529 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
530 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
531 printf("dmaintr: flags %x unit %d stat %x next %d\n",
532 dc->dm_flags, i, stat, dc->dm_cur + 1);
533 }
534 if (stat & DMA_ARMED)
535 printf("%s, chan %d: intr when armed\n",
536 sc->sc_xname, i);
537 #endif
538 /*
539 * Load the next segemnt, or finish up if we're done.
540 */
541 dc->dm_cur++;
542 if (dc->dm_cur <= dc->dm_last) {
543 #ifdef DEBUG
544 dmatimo[i] = 1;
545 #endif
546 /*
547 * If we're the last segment, disable the
548 * completion interrupt, if necessary.
549 */
550 if (dc->dm_cur == dc->dm_last &&
551 (dc->dm_flags & DMAF_NOINTR))
552 dc->dm_cmd &= ~DMA_ENAB;
553 DMA_CLEAR(dc);
554 DMA_ARM(dc);
555 } else
556 dmastop(i);
557 }
558 return(found);
559 }
560
561 #ifdef DEBUG
562 void
563 dmatimeout(arg)
564 void *arg;
565 {
566 int i, s;
567 struct dma_softc *sc = arg;
568
569 for (i = 0; i < NDMACHAN; i++) {
570 s = splbio();
571 if (dmatimo[i]) {
572 if (dmatimo[i] > 1)
573 printf("%s: chan %d timeout #%d\n",
574 sc->sc_xname, i, dmatimo[i]-1);
575 dmatimo[i]++;
576 }
577 splx(s);
578 }
579 timeout(dmatimeout, sc, 30 * hz);
580 }
581 #endif
582