dma.c revision 1.39 1 /* $NetBSD: dma.c,v 1.39 2008/03/29 06:47:07 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1982, 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 * @(#)dma.c 8.1 (Berkeley) 6/10/93
68 */
69
70 /*
71 * DMA driver
72 */
73
74 #include <sys/cdefs.h>
75 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.39 2008/03/29 06:47:07 tsutsui Exp $");
76
77 #include <machine/hp300spu.h> /* XXX param.h includes cpu.h */
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/callout.h>
82 #include <sys/device.h>
83 #include <sys/kernel.h>
84 #include <sys/proc.h>
85
86 #include <uvm/uvm_extern.h>
87
88 #include <machine/bus.h>
89
90 #include <m68k/cacheops.h>
91
92 #include <hp300/dev/intiovar.h>
93 #include <hp300/dev/dmareg.h>
94 #include <hp300/dev/dmavar.h>
95
96 /*
97 * The largest single request will be MAXPHYS bytes which will require
98 * at most MAXPHYS/PAGE_SIZE+1 chain elements to describe, i.e. if none of
99 * the buffer pages are physically contiguous (MAXPHYS/PAGE_SIZE) and the
100 * buffer is not page aligned (+1).
101 */
102 #define DMAMAXIO (MAXPHYS/PAGE_SIZE+1)
103
104 struct dma_chain {
105 int dc_count;
106 char *dc_addr;
107 };
108
109 struct dma_channel {
110 struct dmaqueue *dm_job; /* current job */
111 struct dmadevice *dm_hwaddr; /* registers if DMA_C */
112 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */
113 char dm_flags; /* misc. flags */
114 u_short dm_cmd; /* DMA controller command */
115 int dm_cur; /* current segment */
116 int dm_last; /* last segment */
117 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */
118 };
119
120 struct dma_softc {
121 device_t sc_dev;
122 bus_space_tag_t sc_bst;
123 bus_space_handle_t sc_bsh;
124
125 struct dmareg *sc_dmareg; /* pointer to our hardware */
126 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */
127 TAILQ_HEAD(, dmaqueue) sc_queue; /* job queue */
128 struct callout sc_debug_ch;
129 char sc_type; /* A, B, or C */
130 int sc_ipl; /* our interrupt level */
131 void *sc_ih; /* interrupt cookie */
132 };
133
134 /* types */
135 #define DMA_B 0
136 #define DMA_C 1
137
138 /* flags */
139 #define DMAF_PCFLUSH 0x01
140 #define DMAF_VCFLUSH 0x02
141 #define DMAF_NOINTR 0x04
142
143 static int dmamatch(device_t, cfdata_t, void *);
144 static void dmaattach(device_t, device_t, void *);
145
146 CFATTACH_DECL_NEW(dma, sizeof(struct dma_softc),
147 dmamatch, dmaattach, NULL, NULL);
148
149 static int dmaintr(void *);
150
151 #ifdef DEBUG
152 int dmadebug = 0;
153 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
154 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
155 #define DDB_FOLLOW 0x04
156 #define DDB_IO 0x08
157
158 static void dmatimeout(void *);
159 int dmatimo[NDMACHAN];
160
161 long dmahits[NDMACHAN];
162 long dmamisses[NDMACHAN];
163 long dmabyte[NDMACHAN];
164 long dmaword[NDMACHAN];
165 long dmalword[NDMACHAN];
166 #endif
167
168 static struct dma_softc *dma_softc;
169
170 static int
171 dmamatch(device_t parent, cfdata_t cf, void *aux)
172 {
173 struct intio_attach_args *ia = aux;
174 static int dmafound = 0; /* can only have one */
175
176 if (strcmp("dma", ia->ia_modname) != 0 || dmafound)
177 return 0;
178
179 dmafound = 1;
180 return 1;
181 }
182
183 static void
184 dmaattach(device_t parent, device_t self, void *aux)
185 {
186 struct dma_softc *sc = device_private(self);
187 struct intio_attach_args *ia = aux;
188 struct dma_channel *dc;
189 struct dmareg *dma;
190 int i;
191 char rev;
192
193 sc->sc_dev = self;
194
195 /* There's just one. */
196 dma_softc = sc;
197
198 sc->sc_bst = ia->ia_bst;
199 if (bus_space_map(sc->sc_bst, ia->ia_iobase, INTIO_DEVSIZE, 0,
200 &sc->sc_bsh)) {
201 aprint_error(": can't map registers\n");
202 return;
203 }
204
205 dma = bus_space_vaddr(sc->sc_bst, sc->sc_bsh);
206 sc->sc_dmareg = dma;
207
208 /*
209 * Determine the DMA type. A DMA_A or DMA_B will fail the
210 * following probe.
211 *
212 * XXX Don't know how to easily differentiate the A and B cards,
213 * so we just hope nobody has an A card (A cards will work if
214 * splbio works out to ipl 3).
215 */
216 if (hp300_bus_space_probe(sc->sc_bst, sc->sc_bsh, DMA_ID2, 1) == 0) {
217 rev = 'B';
218 #if !defined(HP320)
219 aprint_normal("\n");
220 panic("%s: DMA card requires hp320 support", __func__);
221 #endif
222 } else
223 rev = dma->dma_id[2];
224
225 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
226
227 TAILQ_INIT(&sc->sc_queue);
228 callout_init(&sc->sc_debug_ch, 0);
229
230 for (i = 0; i < NDMACHAN; i++) {
231 dc = &sc->sc_chan[i];
232 dc->dm_job = NULL;
233 switch (i) {
234 case 0:
235 dc->dm_hwaddr = &dma->dma_chan0;
236 dc->dm_Bhwaddr = &dma->dma_Bchan0;
237 break;
238
239 case 1:
240 dc->dm_hwaddr = &dma->dma_chan1;
241 dc->dm_Bhwaddr = &dma->dma_Bchan1;
242 break;
243
244 default:
245 aprint_normal("\n");
246 panic("%s: more than 2 channels?", __func__);
247 /* NOTREACHED */
248 }
249 }
250
251 #ifdef DEBUG
252 /* make sure timeout is really not needed */
253 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
254 #endif
255
256 aprint_normal(": 98620%c, 2 channels, %d-bit DMA\n",
257 rev, (rev == 'B') ? 16 : 32);
258
259 /*
260 * Defer hooking up our interrupt until the first
261 * DMA-using controller has hooked up theirs.
262 */
263 sc->sc_ih = NULL;
264 }
265
266 /*
267 * Compute the ipl and (re)establish the interrupt handler
268 * for the DMA controller.
269 */
270 void
271 dmacomputeipl(void)
272 {
273 struct dma_softc *sc = dma_softc;
274
275 if (sc->sc_ih != NULL)
276 intr_disestablish(sc->sc_ih);
277
278 /*
279 * Our interrupt level must be as high as the highest
280 * device using DMA (i.e. splbio).
281 */
282 sc->sc_ipl = PSLTOIPL(hp300_ipl2psl[IPL_BIO]);
283 sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_BIO);
284 }
285
286 int
287 dmareq(struct dmaqueue *dq)
288 {
289 struct dma_softc *sc = dma_softc;
290 int i, chan, s;
291
292 #if 1
293 s = splhigh(); /* XXXthorpej */
294 #else
295 s = splbio();
296 #endif
297
298 chan = dq->dq_chan;
299 for (i = NDMACHAN - 1; i >= 0; i--) {
300 /*
301 * Can we use this channel?
302 */
303 if ((chan & (1 << i)) == 0)
304 continue;
305
306 /*
307 * We can use it; is it busy?
308 */
309 if (sc->sc_chan[i].dm_job != NULL)
310 continue;
311
312 /*
313 * Not busy; give the caller this channel.
314 */
315 sc->sc_chan[i].dm_job = dq;
316 dq->dq_chan = i;
317 splx(s);
318 return 1;
319 }
320
321 /*
322 * Couldn't get a channel now; put this in the queue.
323 */
324 TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
325 splx(s);
326 return 0;
327 }
328
329 void
330 dmafree(struct dmaqueue *dq)
331 {
332 int unit = dq->dq_chan;
333 struct dma_softc *sc = dma_softc;
334 struct dma_channel *dc = &sc->sc_chan[unit];
335 struct dmaqueue *dn;
336 int chan, s;
337
338 #if 1
339 s = splhigh(); /* XXXthorpej */
340 #else
341 s = splbio();
342 #endif
343
344 #ifdef DEBUG
345 dmatimo[unit] = 0;
346 #endif
347
348 DMA_CLEAR(dc);
349
350 #if defined(CACHE_HAVE_PAC) || defined(M68040)
351 /*
352 * XXX we may not always go thru the flush code in dmastop()
353 */
354 if (dc->dm_flags & DMAF_PCFLUSH) {
355 PCIA();
356 dc->dm_flags &= ~DMAF_PCFLUSH;
357 }
358 #endif
359
360 #if defined(CACHE_HAVE_VAC)
361 if (dc->dm_flags & DMAF_VCFLUSH) {
362 /*
363 * 320/350s have VACs that may also need flushing.
364 * In our case we only flush the supervisor side
365 * because we know that if we are DMAing to user
366 * space, the physical pages will also be mapped
367 * in kernel space (via vmapbuf) and hence cache-
368 * inhibited by the pmap module due to the multiple
369 * mapping.
370 */
371 DCIS();
372 dc->dm_flags &= ~DMAF_VCFLUSH;
373 }
374 #endif
375
376 /*
377 * Channel is now free. Look for another job to run on this
378 * channel.
379 */
380 dc->dm_job = NULL;
381 chan = 1 << unit;
382 for (dn = TAILQ_FIRST(&sc->sc_queue); dn != NULL;
383 dn = TAILQ_NEXT(dn, dq_list)) {
384 if (dn->dq_chan & chan) {
385 /* Found one... */
386 TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
387 dc->dm_job = dn;
388 dn->dq_chan = dq->dq_chan;
389 splx(s);
390
391 /* Start the initiator. */
392 (*dn->dq_start)(dn->dq_softc);
393 return;
394 }
395 }
396 splx(s);
397 }
398
399 void
400 dmago(int unit, char *addr, int count, int flags)
401 {
402 struct dma_softc *sc = dma_softc;
403 struct dma_channel *dc = &sc->sc_chan[unit];
404 char *dmaend = NULL;
405 int seg, tcount;
406
407 if (count > MAXPHYS)
408 panic("dmago: count > MAXPHYS");
409
410 #if defined(HP320)
411 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
412 panic("dmago: no can do 32-bit DMA");
413 #endif
414
415 #ifdef DEBUG
416 if (dmadebug & DDB_FOLLOW)
417 printf("dmago(%d, %p, %x, %x)\n",
418 unit, addr, count, flags);
419 if (flags & DMAGO_LWORD)
420 dmalword[unit]++;
421 else if (flags & DMAGO_WORD)
422 dmaword[unit]++;
423 else
424 dmabyte[unit]++;
425 #endif
426 /*
427 * Build the DMA chain
428 */
429 for (seg = 0; count > 0; seg++) {
430 dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
431 #if defined(M68040)
432 /*
433 * Push back dirty cache lines
434 */
435 if (mmutype == MMU_68040)
436 DCFP((paddr_t)dc->dm_chain[seg].dc_addr);
437 #endif
438 if (count < (tcount = PAGE_SIZE - ((int)addr & PGOFSET)))
439 tcount = count;
440 dc->dm_chain[seg].dc_count = tcount;
441 addr += tcount;
442 count -= tcount;
443 if (flags & DMAGO_LWORD)
444 tcount >>= 2;
445 else if (flags & DMAGO_WORD)
446 tcount >>= 1;
447
448 /*
449 * Try to compact the DMA transfer if the pages are adjacent.
450 * Note: this will never happen on the first iteration.
451 */
452 if (dc->dm_chain[seg].dc_addr == dmaend
453 #if defined(HP320)
454 /* only 16-bit count on 98620B */
455 && (sc->sc_type != DMA_B ||
456 dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
457 #endif
458 ) {
459 #ifdef DEBUG
460 dmahits[unit]++;
461 #endif
462 dmaend += dc->dm_chain[seg].dc_count;
463 dc->dm_chain[--seg].dc_count += tcount;
464 } else {
465 #ifdef DEBUG
466 dmamisses[unit]++;
467 #endif
468 dmaend = dc->dm_chain[seg].dc_addr +
469 dc->dm_chain[seg].dc_count;
470 dc->dm_chain[seg].dc_count = tcount;
471 }
472 }
473 dc->dm_cur = 0;
474 dc->dm_last = --seg;
475 dc->dm_flags = 0;
476 /*
477 * Set up the command word based on flags
478 */
479 dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
480 if ((flags & DMAGO_READ) == 0)
481 dc->dm_cmd |= DMA_WRT;
482 if (flags & DMAGO_LWORD)
483 dc->dm_cmd |= DMA_LWORD;
484 else if (flags & DMAGO_WORD)
485 dc->dm_cmd |= DMA_WORD;
486 if (flags & DMAGO_PRI)
487 dc->dm_cmd |= DMA_PRI;
488
489 #if defined(M68040)
490 /*
491 * On the 68040 we need to flush (push) the data cache before a
492 * DMA (already done above) and flush again after DMA completes.
493 * In theory we should only need to flush prior to a write DMA
494 * and purge after a read DMA but if the entire page is not
495 * involved in the DMA we might purge some valid data.
496 */
497 if (mmutype == MMU_68040 && (flags & DMAGO_READ))
498 dc->dm_flags |= DMAF_PCFLUSH;
499 #endif
500
501 #if defined(CACHE_HAVE_PAC)
502 /*
503 * Remember if we need to flush external physical cache when
504 * DMA is done. We only do this if we are reading (writing memory).
505 */
506 if (ectype == EC_PHYS && (flags & DMAGO_READ))
507 dc->dm_flags |= DMAF_PCFLUSH;
508 #endif
509
510 #if defined(CACHE_HAVE_VAC)
511 if (ectype == EC_VIRT && (flags & DMAGO_READ))
512 dc->dm_flags |= DMAF_VCFLUSH;
513 #endif
514
515 /*
516 * Remember if we can skip the dma completion interrupt on
517 * the last segment in the chain.
518 */
519 if (flags & DMAGO_NOINT) {
520 if (dc->dm_cur == dc->dm_last)
521 dc->dm_cmd &= ~DMA_ENAB;
522 else
523 dc->dm_flags |= DMAF_NOINTR;
524 }
525 #ifdef DEBUG
526 if (dmadebug & DDB_IO) {
527 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
528 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
529 printf("dmago: cmd %x, flags %x\n",
530 dc->dm_cmd, dc->dm_flags);
531 for (seg = 0; seg <= dc->dm_last; seg++)
532 printf(" %d: %d@%p\n", seg,
533 dc->dm_chain[seg].dc_count,
534 dc->dm_chain[seg].dc_addr);
535 }
536 }
537 dmatimo[unit] = 1;
538 #endif
539 DMA_ARM(sc, dc);
540 }
541
542 void
543 dmastop(int unit)
544 {
545 struct dma_softc *sc = dma_softc;
546 struct dma_channel *dc = &sc->sc_chan[unit];
547
548 #ifdef DEBUG
549 if (dmadebug & DDB_FOLLOW)
550 printf("dmastop(%d)\n", unit);
551 dmatimo[unit] = 0;
552 #endif
553 DMA_CLEAR(dc);
554
555 #if defined(CACHE_HAVE_PAC) || defined(M68040)
556 if (dc->dm_flags & DMAF_PCFLUSH) {
557 PCIA();
558 dc->dm_flags &= ~DMAF_PCFLUSH;
559 }
560 #endif
561
562 #if defined(CACHE_HAVE_VAC)
563 if (dc->dm_flags & DMAF_VCFLUSH) {
564 /*
565 * 320/350s have VACs that may also need flushing.
566 * In our case we only flush the supervisor side
567 * because we know that if we are DMAing to user
568 * space, the physical pages will also be mapped
569 * in kernel space (via vmapbuf) and hence cache-
570 * inhibited by the pmap module due to the multiple
571 * mapping.
572 */
573 DCIS();
574 dc->dm_flags &= ~DMAF_VCFLUSH;
575 }
576 #endif
577
578 /*
579 * We may get this interrupt after a device service routine
580 * has freed the dma channel. So, ignore the intr if there's
581 * nothing on the queue.
582 */
583 if (dc->dm_job != NULL)
584 (*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
585 }
586
587 static int
588 dmaintr(void *arg)
589 {
590 struct dma_softc *sc = arg;
591 struct dma_channel *dc;
592 int i, stat;
593 int found = 0;
594
595 #ifdef DEBUG
596 if (dmadebug & DDB_FOLLOW)
597 printf("dmaintr\n");
598 #endif
599 for (i = 0; i < NDMACHAN; i++) {
600 dc = &sc->sc_chan[i];
601 stat = DMA_STAT(dc);
602 if ((stat & DMA_INTR) == 0)
603 continue;
604 found++;
605 #ifdef DEBUG
606 if (dmadebug & DDB_IO) {
607 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
608 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
609 printf("dmaintr: flags %x unit %d stat %x "
610 "next %d\n",
611 dc->dm_flags, i, stat, dc->dm_cur + 1);
612 }
613 if (stat & DMA_ARMED)
614 printf("dma channel %d: intr when armed\n", i);
615 #endif
616 /*
617 * Load the next segemnt, or finish up if we're done.
618 */
619 dc->dm_cur++;
620 if (dc->dm_cur <= dc->dm_last) {
621 #ifdef DEBUG
622 dmatimo[i] = 1;
623 #endif
624 /*
625 * If we're the last segment, disable the
626 * completion interrupt, if necessary.
627 */
628 if (dc->dm_cur == dc->dm_last &&
629 (dc->dm_flags & DMAF_NOINTR))
630 dc->dm_cmd &= ~DMA_ENAB;
631 DMA_CLEAR(dc);
632 DMA_ARM(sc, dc);
633 } else
634 dmastop(i);
635 }
636 return found;
637 }
638
639 #ifdef DEBUG
640 static void
641 dmatimeout(void *arg)
642 {
643 int i, s;
644 struct dma_softc *sc = arg;
645
646 for (i = 0; i < NDMACHAN; i++) {
647 s = splbio();
648 if (dmatimo[i]) {
649 if (dmatimo[i] > 1)
650 printf("dma channel %d timeout #%d\n",
651 i, dmatimo[i]-1);
652 dmatimo[i]++;
653 }
654 splx(s);
655 }
656 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
657 }
658 #endif
659