dma.c revision 1.23 1 /* $NetBSD: dma.c,v 1.23 1998/08/20 08:33:41 kleink Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1982, 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * @(#)dma.c 8.1 (Berkeley) 6/10/93
72 */
73
74 /*
75 * DMA driver
76 */
77
78 #include <machine/hp300spu.h> /* XXX param.h includes cpu.h */
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/time.h>
83 #include <sys/kernel.h>
84 #include <sys/proc.h>
85 #include <sys/device.h>
86
87 #include <machine/frame.h>
88 #include <machine/cpu.h>
89 #include <machine/intr.h>
90
91 #include <hp300/dev/dmareg.h>
92 #include <hp300/dev/dmavar.h>
93
94 /*
95 * The largest single request will be MAXPHYS bytes which will require
96 * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
97 * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
98 * buffer is not page aligned (+1).
99 */
100 #define DMAMAXIO (MAXPHYS/NBPG+1)
101
102 struct dma_chain {
103 int dc_count;
104 char *dc_addr;
105 };
106
107 struct dma_channel {
108 struct dmaqueue *dm_job; /* current job */
109 struct dmadevice *dm_hwaddr; /* registers if DMA_C */
110 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */
111 char dm_flags; /* misc. flags */
112 u_short dm_cmd; /* DMA controller command */
113 int dm_cur; /* current segment */
114 int dm_last; /* last segment */
115 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */
116 };
117
118 struct dma_softc {
119 struct dmareg *sc_dmareg; /* pointer to our hardware */
120 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */
121 TAILQ_HEAD(, dmaqueue) sc_queue; /* job queue */
122 char sc_type; /* A, B, or C */
123 int sc_ipl; /* our interrupt level */
124 void *sc_ih; /* interrupt cookie */
125 } dma_softc;
126
127 /* types */
128 #define DMA_B 0
129 #define DMA_C 1
130
131 /* flags */
132 #define DMAF_PCFLUSH 0x01
133 #define DMAF_VCFLUSH 0x02
134 #define DMAF_NOINTR 0x04
135
136 int dmaintr __P((void *));
137
138 #ifdef DEBUG
139 int dmadebug = 0;
140 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
141 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
142 #define DDB_FOLLOW 0x04
143 #define DDB_IO 0x08
144
145 void dmatimeout __P((void *));
146 int dmatimo[NDMACHAN];
147
148 long dmahits[NDMACHAN];
149 long dmamisses[NDMACHAN];
150 long dmabyte[NDMACHAN];
151 long dmaword[NDMACHAN];
152 long dmalword[NDMACHAN];
153 #endif
154
155 /*
156 * Initialize the DMA engine, called by dioattach()
157 */
158 void
159 dmainit()
160 {
161 struct dma_softc *sc = &dma_softc;
162 struct dmareg *dma;
163 struct dma_channel *dc;
164 int i;
165 char rev;
166
167 /* There's just one. */
168 sc->sc_dmareg = (struct dmareg *)DMA_BASE;
169 dma = sc->sc_dmareg;
170
171 /*
172 * Determine the DMA type. A DMA_A or DMA_B will fail the
173 * following probe.
174 *
175 * XXX Don't know how to easily differentiate the A and B cards,
176 * so we just hope nobody has an A card (A cards will work if
177 * splbio works out to ipl 3).
178 */
179 if (badbaddr((char *)&dma->dma_id[2])) {
180 rev = 'B';
181 #if !defined(HP320)
182 panic("dmainit: DMA card requires hp320 support");
183 #endif
184 } else
185 rev = dma->dma_id[2];
186
187 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
188
189 TAILQ_INIT(&sc->sc_queue);
190
191 for (i = 0; i < NDMACHAN; i++) {
192 dc = &sc->sc_chan[i];
193 dc->dm_job = NULL;
194 switch (i) {
195 case 0:
196 dc->dm_hwaddr = &dma->dma_chan0;
197 dc->dm_Bhwaddr = &dma->dma_Bchan0;
198 break;
199
200 case 1:
201 dc->dm_hwaddr = &dma->dma_chan1;
202 dc->dm_Bhwaddr = &dma->dma_Bchan1;
203 break;
204
205 default:
206 panic("dmainit: more than 2 channels?");
207 /* NOTREACHED */
208 }
209 }
210
211 #ifdef DEBUG
212 /* make sure timeout is really not needed */
213 timeout(dmatimeout, sc, 30 * hz);
214 #endif
215
216 printf("98620%c, 2 channels, %d bit DMA\n",
217 rev, (rev == 'B') ? 16 : 32);
218
219 /*
220 * Defer hooking up our interrupt until the first
221 * DMA-using controller has hooked up theirs.
222 */
223 sc->sc_ih = NULL;
224 }
225
226 /*
227 * Compute the ipl and (re)establish the interrupt handler
228 * for the DMA controller.
229 */
230 void
231 dmacomputeipl()
232 {
233 struct dma_softc *sc = &dma_softc;
234
235 if (sc->sc_ih != NULL)
236 intr_disestablish(sc->sc_ih);
237
238 /*
239 * Our interrupt level must be as high as the highest
240 * device using DMA (i.e. splbio).
241 */
242 sc->sc_ipl = PSLTOIPL(hp300_bioipl);
243 sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_BIO);
244 }
245
246 int
247 dmareq(dq)
248 struct dmaqueue *dq;
249 {
250 struct dma_softc *sc = &dma_softc;
251 int i, chan, s;
252
253 #if 1
254 s = splhigh(); /* XXXthorpej */
255 #else
256 s = splbio();
257 #endif
258
259 chan = dq->dq_chan;
260 for (i = NDMACHAN - 1; i >= 0; i--) {
261 /*
262 * Can we use this channel?
263 */
264 if ((chan & (1 << i)) == 0)
265 continue;
266
267 /*
268 * We can use it; is it busy?
269 */
270 if (sc->sc_chan[i].dm_job != NULL)
271 continue;
272
273 /*
274 * Not busy; give the caller this channel.
275 */
276 sc->sc_chan[i].dm_job = dq;
277 dq->dq_chan = i;
278 splx(s);
279 return (1);
280 }
281
282 /*
283 * Couldn't get a channel now; put this in the queue.
284 */
285 TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
286 splx(s);
287 return (0);
288 }
289
290 void
291 dmafree(dq)
292 struct dmaqueue *dq;
293 {
294 int unit = dq->dq_chan;
295 struct dma_softc *sc = &dma_softc;
296 struct dma_channel *dc = &sc->sc_chan[unit];
297 struct dmaqueue *dn;
298 int chan, s;
299
300 #if 1
301 s = splhigh(); /* XXXthorpej */
302 #else
303 s = splbio();
304 #endif
305
306 #ifdef DEBUG
307 dmatimo[unit] = 0;
308 #endif
309
310 DMA_CLEAR(dc);
311
312 #if defined(CACHE_HAVE_PAC) || defined(M68040)
313 /*
314 * XXX we may not always go thru the flush code in dmastop()
315 */
316 if (dc->dm_flags & DMAF_PCFLUSH) {
317 PCIA();
318 dc->dm_flags &= ~DMAF_PCFLUSH;
319 }
320 #endif
321
322 #if defined(CACHE_HAVE_VAC)
323 if (dc->dm_flags & DMAF_VCFLUSH) {
324 /*
325 * 320/350s have VACs that may also need flushing.
326 * In our case we only flush the supervisor side
327 * because we know that if we are DMAing to user
328 * space, the physical pages will also be mapped
329 * in kernel space (via vmapbuf) and hence cache-
330 * inhibited by the pmap module due to the multiple
331 * mapping.
332 */
333 DCIS();
334 dc->dm_flags &= ~DMAF_VCFLUSH;
335 }
336 #endif
337
338 /*
339 * Channel is now free. Look for another job to run on this
340 * channel.
341 */
342 dc->dm_job = NULL;
343 chan = 1 << unit;
344 for (dn = sc->sc_queue.tqh_first; dn != NULL;
345 dn = dn->dq_list.tqe_next) {
346 if (dn->dq_chan & chan) {
347 /* Found one... */
348 TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
349 dc->dm_job = dn;
350 dn->dq_chan = dq->dq_chan;
351 splx(s);
352
353 /* Start the initiator. */
354 (*dn->dq_start)(dn->dq_softc);
355 return;
356 }
357 }
358 splx(s);
359 }
360
361 void
362 dmago(unit, addr, count, flags)
363 int unit;
364 char *addr;
365 int count;
366 int flags;
367 {
368 struct dma_softc *sc = &dma_softc;
369 struct dma_channel *dc = &sc->sc_chan[unit];
370 char *dmaend = NULL;
371 int seg, tcount;
372
373 if (count > MAXPHYS)
374 panic("dmago: count > MAXPHYS");
375
376 #if defined(HP320)
377 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
378 panic("dmago: no can do 32-bit DMA");
379 #endif
380
381 #ifdef DEBUG
382 if (dmadebug & DDB_FOLLOW)
383 printf("dmago(%d, %p, %x, %x)\n",
384 unit, addr, count, flags);
385 if (flags & DMAGO_LWORD)
386 dmalword[unit]++;
387 else if (flags & DMAGO_WORD)
388 dmaword[unit]++;
389 else
390 dmabyte[unit]++;
391 #endif
392 /*
393 * Build the DMA chain
394 */
395 for (seg = 0; count > 0; seg++) {
396 dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
397 #if defined(M68040)
398 /*
399 * Push back dirty cache lines
400 */
401 if (mmutype == MMU_68040)
402 DCFP((paddr_t)dc->dm_chain[seg].dc_addr);
403 #endif
404 if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
405 tcount = count;
406 dc->dm_chain[seg].dc_count = tcount;
407 addr += tcount;
408 count -= tcount;
409 if (flags & DMAGO_LWORD)
410 tcount >>= 2;
411 else if (flags & DMAGO_WORD)
412 tcount >>= 1;
413
414 /*
415 * Try to compact the DMA transfer if the pages are adjacent.
416 * Note: this will never happen on the first iteration.
417 */
418 if (dc->dm_chain[seg].dc_addr == dmaend
419 #if defined(HP320)
420 /* only 16-bit count on 98620B */
421 && (sc->sc_type != DMA_B ||
422 dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
423 #endif
424 ) {
425 #ifdef DEBUG
426 dmahits[unit]++;
427 #endif
428 dmaend += dc->dm_chain[seg].dc_count;
429 dc->dm_chain[--seg].dc_count += tcount;
430 } else {
431 #ifdef DEBUG
432 dmamisses[unit]++;
433 #endif
434 dmaend = dc->dm_chain[seg].dc_addr +
435 dc->dm_chain[seg].dc_count;
436 dc->dm_chain[seg].dc_count = tcount;
437 }
438 }
439 dc->dm_cur = 0;
440 dc->dm_last = --seg;
441 dc->dm_flags = 0;
442 /*
443 * Set up the command word based on flags
444 */
445 dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
446 if ((flags & DMAGO_READ) == 0)
447 dc->dm_cmd |= DMA_WRT;
448 if (flags & DMAGO_LWORD)
449 dc->dm_cmd |= DMA_LWORD;
450 else if (flags & DMAGO_WORD)
451 dc->dm_cmd |= DMA_WORD;
452 if (flags & DMAGO_PRI)
453 dc->dm_cmd |= DMA_PRI;
454
455 #if defined(M68040)
456 /*
457 * On the 68040 we need to flush (push) the data cache before a
458 * DMA (already done above) and flush again after DMA completes.
459 * In theory we should only need to flush prior to a write DMA
460 * and purge after a read DMA but if the entire page is not
461 * involved in the DMA we might purge some valid data.
462 */
463 if (mmutype == MMU_68040 && (flags & DMAGO_READ))
464 dc->dm_flags |= DMAF_PCFLUSH;
465 #endif
466
467 #if defined(CACHE_HAVE_PAC)
468 /*
469 * Remember if we need to flush external physical cache when
470 * DMA is done. We only do this if we are reading (writing memory).
471 */
472 if (ectype == EC_PHYS && (flags & DMAGO_READ))
473 dc->dm_flags |= DMAF_PCFLUSH;
474 #endif
475
476 #if defined(CACHE_HAVE_VAC)
477 if (ectype == EC_VIRT && (flags & DMAGO_READ))
478 dc->dm_flags |= DMAF_VCFLUSH;
479 #endif
480
481 /*
482 * Remember if we can skip the dma completion interrupt on
483 * the last segment in the chain.
484 */
485 if (flags & DMAGO_NOINT) {
486 if (dc->dm_cur == dc->dm_last)
487 dc->dm_cmd &= ~DMA_ENAB;
488 else
489 dc->dm_flags |= DMAF_NOINTR;
490 }
491 #ifdef DEBUG
492 if (dmadebug & DDB_IO) {
493 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
494 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
495 printf("dmago: cmd %x, flags %x\n",
496 dc->dm_cmd, dc->dm_flags);
497 for (seg = 0; seg <= dc->dm_last; seg++)
498 printf(" %d: %d@%p\n", seg,
499 dc->dm_chain[seg].dc_count,
500 dc->dm_chain[seg].dc_addr);
501 }
502 }
503 dmatimo[unit] = 1;
504 #endif
505 DMA_ARM(sc, dc);
506 }
507
508 void
509 dmastop(unit)
510 int unit;
511 {
512 struct dma_softc *sc = &dma_softc;
513 struct dma_channel *dc = &sc->sc_chan[unit];
514
515 #ifdef DEBUG
516 if (dmadebug & DDB_FOLLOW)
517 printf("dmastop(%d)\n", unit);
518 dmatimo[unit] = 0;
519 #endif
520 DMA_CLEAR(dc);
521
522 #if defined(CACHE_HAVE_PAC) || defined(M68040)
523 if (dc->dm_flags & DMAF_PCFLUSH) {
524 PCIA();
525 dc->dm_flags &= ~DMAF_PCFLUSH;
526 }
527 #endif
528
529 #if defined(CACHE_HAVE_VAC)
530 if (dc->dm_flags & DMAF_VCFLUSH) {
531 /*
532 * 320/350s have VACs that may also need flushing.
533 * In our case we only flush the supervisor side
534 * because we know that if we are DMAing to user
535 * space, the physical pages will also be mapped
536 * in kernel space (via vmapbuf) and hence cache-
537 * inhibited by the pmap module due to the multiple
538 * mapping.
539 */
540 DCIS();
541 dc->dm_flags &= ~DMAF_VCFLUSH;
542 }
543 #endif
544
545 /*
546 * We may get this interrupt after a device service routine
547 * has freed the dma channel. So, ignore the intr if there's
548 * nothing on the queue.
549 */
550 if (dc->dm_job != NULL)
551 (*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
552 }
553
554 int
555 dmaintr(arg)
556 void *arg;
557 {
558 struct dma_softc *sc = arg;
559 struct dma_channel *dc;
560 int i, stat;
561 int found = 0;
562
563 #ifdef DEBUG
564 if (dmadebug & DDB_FOLLOW)
565 printf("dmaintr\n");
566 #endif
567 for (i = 0; i < NDMACHAN; i++) {
568 dc = &sc->sc_chan[i];
569 stat = DMA_STAT(dc);
570 if ((stat & DMA_INTR) == 0)
571 continue;
572 found++;
573 #ifdef DEBUG
574 if (dmadebug & DDB_IO) {
575 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
576 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
577 printf("dmaintr: flags %x unit %d stat %x next %d\n",
578 dc->dm_flags, i, stat, dc->dm_cur + 1);
579 }
580 if (stat & DMA_ARMED)
581 printf("dma channel %d: intr when armed\n", i);
582 #endif
583 /*
584 * Load the next segemnt, or finish up if we're done.
585 */
586 dc->dm_cur++;
587 if (dc->dm_cur <= dc->dm_last) {
588 #ifdef DEBUG
589 dmatimo[i] = 1;
590 #endif
591 /*
592 * If we're the last segment, disable the
593 * completion interrupt, if necessary.
594 */
595 if (dc->dm_cur == dc->dm_last &&
596 (dc->dm_flags & DMAF_NOINTR))
597 dc->dm_cmd &= ~DMA_ENAB;
598 DMA_CLEAR(dc);
599 DMA_ARM(sc, dc);
600 } else
601 dmastop(i);
602 }
603 return(found);
604 }
605
606 #ifdef DEBUG
607 void
608 dmatimeout(arg)
609 void *arg;
610 {
611 int i, s;
612 struct dma_softc *sc = arg;
613
614 for (i = 0; i < NDMACHAN; i++) {
615 s = splbio();
616 if (dmatimo[i]) {
617 if (dmatimo[i] > 1)
618 printf("dma channel %d timeout #%d\n",
619 i, dmatimo[i]-1);
620 dmatimo[i]++;
621 }
622 splx(s);
623 }
624 timeout(dmatimeout, sc, 30 * hz);
625 }
626 #endif
627