dma.c revision 1.26 1 /* $NetBSD: dma.c,v 1.26 2002/03/15 05:55:35 gmcgarry Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1982, 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * @(#)dma.c 8.1 (Berkeley) 6/10/93
72 */
73
74 /*
75 * DMA driver
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.26 2002/03/15 05:55:35 gmcgarry Exp $");
80
81 #include <machine/hp300spu.h> /* XXX param.h includes cpu.h */
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/time.h>
87 #include <sys/kernel.h>
88 #include <sys/proc.h>
89 #include <sys/device.h>
90
91 #include <machine/frame.h>
92 #include <machine/cpu.h>
93 #include <machine/intr.h>
94
95 #include <hp300/dev/dmareg.h>
96 #include <hp300/dev/dmavar.h>
97
98 /*
99 * The largest single request will be MAXPHYS bytes which will require
100 * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
101 * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
102 * buffer is not page aligned (+1).
103 */
104 #define DMAMAXIO (MAXPHYS/NBPG+1)
105
106 struct dma_chain {
107 int dc_count;
108 char *dc_addr;
109 };
110
111 struct dma_channel {
112 struct dmaqueue *dm_job; /* current job */
113 struct dmadevice *dm_hwaddr; /* registers if DMA_C */
114 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */
115 char dm_flags; /* misc. flags */
116 u_short dm_cmd; /* DMA controller command */
117 int dm_cur; /* current segment */
118 int dm_last; /* last segment */
119 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */
120 };
121
122 struct dma_softc {
123 struct dmareg *sc_dmareg; /* pointer to our hardware */
124 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */
125 TAILQ_HEAD(, dmaqueue) sc_queue; /* job queue */
126 struct callout sc_debug_ch;
127 char sc_type; /* A, B, or C */
128 int sc_ipl; /* our interrupt level */
129 void *sc_ih; /* interrupt cookie */
130 } dma_softc;
131
132 /* types */
133 #define DMA_B 0
134 #define DMA_C 1
135
136 /* flags */
137 #define DMAF_PCFLUSH 0x01
138 #define DMAF_VCFLUSH 0x02
139 #define DMAF_NOINTR 0x04
140
141 int dmaintr __P((void *));
142
143 #ifdef DEBUG
144 int dmadebug = 0;
145 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
146 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
147 #define DDB_FOLLOW 0x04
148 #define DDB_IO 0x08
149
150 void dmatimeout __P((void *));
151 int dmatimo[NDMACHAN];
152
153 long dmahits[NDMACHAN];
154 long dmamisses[NDMACHAN];
155 long dmabyte[NDMACHAN];
156 long dmaword[NDMACHAN];
157 long dmalword[NDMACHAN];
158 #endif
159
160 /*
161 * Initialize the DMA engine, called by dioattach()
162 */
163 void
164 dmainit()
165 {
166 struct dma_softc *sc = &dma_softc;
167 struct dmareg *dma;
168 struct dma_channel *dc;
169 int i;
170 char rev;
171
172 /* There's just one. */
173 sc->sc_dmareg = (struct dmareg *)DMA_BASE;
174 dma = sc->sc_dmareg;
175
176 /*
177 * Determine the DMA type. A DMA_A or DMA_B will fail the
178 * following probe.
179 *
180 * XXX Don't know how to easily differentiate the A and B cards,
181 * so we just hope nobody has an A card (A cards will work if
182 * splbio works out to ipl 3).
183 */
184 if (badbaddr((char *)&dma->dma_id[2])) {
185 rev = 'B';
186 #if !defined(HP320)
187 panic("dmainit: DMA card requires hp320 support");
188 #endif
189 } else
190 rev = dma->dma_id[2];
191
192 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
193
194 TAILQ_INIT(&sc->sc_queue);
195 callout_init(&sc->sc_debug_ch);
196
197 for (i = 0; i < NDMACHAN; i++) {
198 dc = &sc->sc_chan[i];
199 dc->dm_job = NULL;
200 switch (i) {
201 case 0:
202 dc->dm_hwaddr = &dma->dma_chan0;
203 dc->dm_Bhwaddr = &dma->dma_Bchan0;
204 break;
205
206 case 1:
207 dc->dm_hwaddr = &dma->dma_chan1;
208 dc->dm_Bhwaddr = &dma->dma_Bchan1;
209 break;
210
211 default:
212 panic("dmainit: more than 2 channels?");
213 /* NOTREACHED */
214 }
215 }
216
217 #ifdef DEBUG
218 /* make sure timeout is really not needed */
219 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
220 #endif
221
222 printf("98620%c, 2 channels, %d bit DMA\n",
223 rev, (rev == 'B') ? 16 : 32);
224
225 /*
226 * Defer hooking up our interrupt until the first
227 * DMA-using controller has hooked up theirs.
228 */
229 sc->sc_ih = NULL;
230 }
231
232 /*
233 * Compute the ipl and (re)establish the interrupt handler
234 * for the DMA controller.
235 */
236 void
237 dmacomputeipl()
238 {
239 struct dma_softc *sc = &dma_softc;
240
241 if (sc->sc_ih != NULL)
242 intr_disestablish(sc->sc_ih);
243
244 /*
245 * Our interrupt level must be as high as the highest
246 * device using DMA (i.e. splbio).
247 */
248 sc->sc_ipl = PSLTOIPL(hp300_ipls[HP300_IPL_BIO]);
249 sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_BIO);
250 }
251
252 int
253 dmareq(dq)
254 struct dmaqueue *dq;
255 {
256 struct dma_softc *sc = &dma_softc;
257 int i, chan, s;
258
259 #if 1
260 s = splhigh(); /* XXXthorpej */
261 #else
262 s = splbio();
263 #endif
264
265 chan = dq->dq_chan;
266 for (i = NDMACHAN - 1; i >= 0; i--) {
267 /*
268 * Can we use this channel?
269 */
270 if ((chan & (1 << i)) == 0)
271 continue;
272
273 /*
274 * We can use it; is it busy?
275 */
276 if (sc->sc_chan[i].dm_job != NULL)
277 continue;
278
279 /*
280 * Not busy; give the caller this channel.
281 */
282 sc->sc_chan[i].dm_job = dq;
283 dq->dq_chan = i;
284 splx(s);
285 return (1);
286 }
287
288 /*
289 * Couldn't get a channel now; put this in the queue.
290 */
291 TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
292 splx(s);
293 return (0);
294 }
295
296 void
297 dmafree(dq)
298 struct dmaqueue *dq;
299 {
300 int unit = dq->dq_chan;
301 struct dma_softc *sc = &dma_softc;
302 struct dma_channel *dc = &sc->sc_chan[unit];
303 struct dmaqueue *dn;
304 int chan, s;
305
306 #if 1
307 s = splhigh(); /* XXXthorpej */
308 #else
309 s = splbio();
310 #endif
311
312 #ifdef DEBUG
313 dmatimo[unit] = 0;
314 #endif
315
316 DMA_CLEAR(dc);
317
318 #if defined(CACHE_HAVE_PAC) || defined(M68040)
319 /*
320 * XXX we may not always go thru the flush code in dmastop()
321 */
322 if (dc->dm_flags & DMAF_PCFLUSH) {
323 PCIA();
324 dc->dm_flags &= ~DMAF_PCFLUSH;
325 }
326 #endif
327
328 #if defined(CACHE_HAVE_VAC)
329 if (dc->dm_flags & DMAF_VCFLUSH) {
330 /*
331 * 320/350s have VACs that may also need flushing.
332 * In our case we only flush the supervisor side
333 * because we know that if we are DMAing to user
334 * space, the physical pages will also be mapped
335 * in kernel space (via vmapbuf) and hence cache-
336 * inhibited by the pmap module due to the multiple
337 * mapping.
338 */
339 DCIS();
340 dc->dm_flags &= ~DMAF_VCFLUSH;
341 }
342 #endif
343
344 /*
345 * Channel is now free. Look for another job to run on this
346 * channel.
347 */
348 dc->dm_job = NULL;
349 chan = 1 << unit;
350 for (dn = sc->sc_queue.tqh_first; dn != NULL;
351 dn = dn->dq_list.tqe_next) {
352 if (dn->dq_chan & chan) {
353 /* Found one... */
354 TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
355 dc->dm_job = dn;
356 dn->dq_chan = dq->dq_chan;
357 splx(s);
358
359 /* Start the initiator. */
360 (*dn->dq_start)(dn->dq_softc);
361 return;
362 }
363 }
364 splx(s);
365 }
366
367 void
368 dmago(unit, addr, count, flags)
369 int unit;
370 char *addr;
371 int count;
372 int flags;
373 {
374 struct dma_softc *sc = &dma_softc;
375 struct dma_channel *dc = &sc->sc_chan[unit];
376 char *dmaend = NULL;
377 int seg, tcount;
378
379 if (count > MAXPHYS)
380 panic("dmago: count > MAXPHYS");
381
382 #if defined(HP320)
383 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
384 panic("dmago: no can do 32-bit DMA");
385 #endif
386
387 #ifdef DEBUG
388 if (dmadebug & DDB_FOLLOW)
389 printf("dmago(%d, %p, %x, %x)\n",
390 unit, addr, count, flags);
391 if (flags & DMAGO_LWORD)
392 dmalword[unit]++;
393 else if (flags & DMAGO_WORD)
394 dmaword[unit]++;
395 else
396 dmabyte[unit]++;
397 #endif
398 /*
399 * Build the DMA chain
400 */
401 for (seg = 0; count > 0; seg++) {
402 dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
403 #if defined(M68040)
404 /*
405 * Push back dirty cache lines
406 */
407 if (mmutype == MMU_68040)
408 DCFP((paddr_t)dc->dm_chain[seg].dc_addr);
409 #endif
410 if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
411 tcount = count;
412 dc->dm_chain[seg].dc_count = tcount;
413 addr += tcount;
414 count -= tcount;
415 if (flags & DMAGO_LWORD)
416 tcount >>= 2;
417 else if (flags & DMAGO_WORD)
418 tcount >>= 1;
419
420 /*
421 * Try to compact the DMA transfer if the pages are adjacent.
422 * Note: this will never happen on the first iteration.
423 */
424 if (dc->dm_chain[seg].dc_addr == dmaend
425 #if defined(HP320)
426 /* only 16-bit count on 98620B */
427 && (sc->sc_type != DMA_B ||
428 dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
429 #endif
430 ) {
431 #ifdef DEBUG
432 dmahits[unit]++;
433 #endif
434 dmaend += dc->dm_chain[seg].dc_count;
435 dc->dm_chain[--seg].dc_count += tcount;
436 } else {
437 #ifdef DEBUG
438 dmamisses[unit]++;
439 #endif
440 dmaend = dc->dm_chain[seg].dc_addr +
441 dc->dm_chain[seg].dc_count;
442 dc->dm_chain[seg].dc_count = tcount;
443 }
444 }
445 dc->dm_cur = 0;
446 dc->dm_last = --seg;
447 dc->dm_flags = 0;
448 /*
449 * Set up the command word based on flags
450 */
451 dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
452 if ((flags & DMAGO_READ) == 0)
453 dc->dm_cmd |= DMA_WRT;
454 if (flags & DMAGO_LWORD)
455 dc->dm_cmd |= DMA_LWORD;
456 else if (flags & DMAGO_WORD)
457 dc->dm_cmd |= DMA_WORD;
458 if (flags & DMAGO_PRI)
459 dc->dm_cmd |= DMA_PRI;
460
461 #if defined(M68040)
462 /*
463 * On the 68040 we need to flush (push) the data cache before a
464 * DMA (already done above) and flush again after DMA completes.
465 * In theory we should only need to flush prior to a write DMA
466 * and purge after a read DMA but if the entire page is not
467 * involved in the DMA we might purge some valid data.
468 */
469 if (mmutype == MMU_68040 && (flags & DMAGO_READ))
470 dc->dm_flags |= DMAF_PCFLUSH;
471 #endif
472
473 #if defined(CACHE_HAVE_PAC)
474 /*
475 * Remember if we need to flush external physical cache when
476 * DMA is done. We only do this if we are reading (writing memory).
477 */
478 if (ectype == EC_PHYS && (flags & DMAGO_READ))
479 dc->dm_flags |= DMAF_PCFLUSH;
480 #endif
481
482 #if defined(CACHE_HAVE_VAC)
483 if (ectype == EC_VIRT && (flags & DMAGO_READ))
484 dc->dm_flags |= DMAF_VCFLUSH;
485 #endif
486
487 /*
488 * Remember if we can skip the dma completion interrupt on
489 * the last segment in the chain.
490 */
491 if (flags & DMAGO_NOINT) {
492 if (dc->dm_cur == dc->dm_last)
493 dc->dm_cmd &= ~DMA_ENAB;
494 else
495 dc->dm_flags |= DMAF_NOINTR;
496 }
497 #ifdef DEBUG
498 if (dmadebug & DDB_IO) {
499 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
500 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
501 printf("dmago: cmd %x, flags %x\n",
502 dc->dm_cmd, dc->dm_flags);
503 for (seg = 0; seg <= dc->dm_last; seg++)
504 printf(" %d: %d@%p\n", seg,
505 dc->dm_chain[seg].dc_count,
506 dc->dm_chain[seg].dc_addr);
507 }
508 }
509 dmatimo[unit] = 1;
510 #endif
511 DMA_ARM(sc, dc);
512 }
513
514 void
515 dmastop(unit)
516 int unit;
517 {
518 struct dma_softc *sc = &dma_softc;
519 struct dma_channel *dc = &sc->sc_chan[unit];
520
521 #ifdef DEBUG
522 if (dmadebug & DDB_FOLLOW)
523 printf("dmastop(%d)\n", unit);
524 dmatimo[unit] = 0;
525 #endif
526 DMA_CLEAR(dc);
527
528 #if defined(CACHE_HAVE_PAC) || defined(M68040)
529 if (dc->dm_flags & DMAF_PCFLUSH) {
530 PCIA();
531 dc->dm_flags &= ~DMAF_PCFLUSH;
532 }
533 #endif
534
535 #if defined(CACHE_HAVE_VAC)
536 if (dc->dm_flags & DMAF_VCFLUSH) {
537 /*
538 * 320/350s have VACs that may also need flushing.
539 * In our case we only flush the supervisor side
540 * because we know that if we are DMAing to user
541 * space, the physical pages will also be mapped
542 * in kernel space (via vmapbuf) and hence cache-
543 * inhibited by the pmap module due to the multiple
544 * mapping.
545 */
546 DCIS();
547 dc->dm_flags &= ~DMAF_VCFLUSH;
548 }
549 #endif
550
551 /*
552 * We may get this interrupt after a device service routine
553 * has freed the dma channel. So, ignore the intr if there's
554 * nothing on the queue.
555 */
556 if (dc->dm_job != NULL)
557 (*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
558 }
559
560 int
561 dmaintr(arg)
562 void *arg;
563 {
564 struct dma_softc *sc = arg;
565 struct dma_channel *dc;
566 int i, stat;
567 int found = 0;
568
569 #ifdef DEBUG
570 if (dmadebug & DDB_FOLLOW)
571 printf("dmaintr\n");
572 #endif
573 for (i = 0; i < NDMACHAN; i++) {
574 dc = &sc->sc_chan[i];
575 stat = DMA_STAT(dc);
576 if ((stat & DMA_INTR) == 0)
577 continue;
578 found++;
579 #ifdef DEBUG
580 if (dmadebug & DDB_IO) {
581 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
582 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
583 printf("dmaintr: flags %x unit %d stat %x next %d\n",
584 dc->dm_flags, i, stat, dc->dm_cur + 1);
585 }
586 if (stat & DMA_ARMED)
587 printf("dma channel %d: intr when armed\n", i);
588 #endif
589 /*
590 * Load the next segemnt, or finish up if we're done.
591 */
592 dc->dm_cur++;
593 if (dc->dm_cur <= dc->dm_last) {
594 #ifdef DEBUG
595 dmatimo[i] = 1;
596 #endif
597 /*
598 * If we're the last segment, disable the
599 * completion interrupt, if necessary.
600 */
601 if (dc->dm_cur == dc->dm_last &&
602 (dc->dm_flags & DMAF_NOINTR))
603 dc->dm_cmd &= ~DMA_ENAB;
604 DMA_CLEAR(dc);
605 DMA_ARM(sc, dc);
606 } else
607 dmastop(i);
608 }
609 return(found);
610 }
611
612 #ifdef DEBUG
613 void
614 dmatimeout(arg)
615 void *arg;
616 {
617 int i, s;
618 struct dma_softc *sc = arg;
619
620 for (i = 0; i < NDMACHAN; i++) {
621 s = splbio();
622 if (dmatimo[i]) {
623 if (dmatimo[i] > 1)
624 printf("dma channel %d timeout #%d\n",
625 i, dmatimo[i]-1);
626 dmatimo[i]++;
627 }
628 splx(s);
629 }
630 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
631 }
632 #endif
633