dma.c revision 1.39.4.3 1 /* $NetBSD: dma.c,v 1.39.4.3 2010/08/11 22:52:01 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1982, 1990, 1993
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)dma.c 8.1 (Berkeley) 6/10/93
61 */
62
63 /*
64 * DMA driver
65 */
66
67 #include "opt_m68k_arch.h"
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.39.4.3 2010/08/11 22:52:01 yamt Exp $");
71
72 #include <machine/hp300spu.h> /* XXX param.h includes cpu.h */
73
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/callout.h>
77 #include <sys/device.h>
78 #include <sys/kernel.h>
79 #include <sys/proc.h>
80
81 #include <uvm/uvm_extern.h>
82
83 #include <machine/bus.h>
84
85 #include <m68k/cacheops.h>
86
87 #include <hp300/dev/intiovar.h>
88 #include <hp300/dev/dmareg.h>
89 #include <hp300/dev/dmavar.h>
90
91 /*
92 * The largest single request will be MAXPHYS bytes which will require
93 * at most MAXPHYS/PAGE_SIZE+1 chain elements to describe, i.e. if none of
94 * the buffer pages are physically contiguous (MAXPHYS/PAGE_SIZE) and the
95 * buffer is not page aligned (+1).
96 */
97 #define DMAMAXIO (MAXPHYS/PAGE_SIZE+1)
98
99 struct dma_chain {
100 int dc_count;
101 char *dc_addr;
102 };
103
104 struct dma_channel {
105 struct dmaqueue *dm_job; /* current job */
106 struct dmadevice *dm_hwaddr; /* registers if DMA_C */
107 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */
108 char dm_flags; /* misc. flags */
109 u_short dm_cmd; /* DMA controller command */
110 int dm_cur; /* current segment */
111 int dm_last; /* last segment */
112 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */
113 };
114
115 struct dma_softc {
116 device_t sc_dev;
117 bus_space_tag_t sc_bst;
118 bus_space_handle_t sc_bsh;
119
120 struct dmareg *sc_dmareg; /* pointer to our hardware */
121 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */
122 TAILQ_HEAD(, dmaqueue) sc_queue; /* job queue */
123 struct callout sc_debug_ch;
124 char sc_type; /* A, B, or C */
125 int sc_ipl; /* our interrupt level */
126 void *sc_ih; /* interrupt cookie */
127 };
128
129 /* types */
130 #define DMA_B 0
131 #define DMA_C 1
132
133 /* flags */
134 #define DMAF_PCFLUSH 0x01
135 #define DMAF_VCFLUSH 0x02
136 #define DMAF_NOINTR 0x04
137
138 static int dmamatch(device_t, cfdata_t, void *);
139 static void dmaattach(device_t, device_t, void *);
140
141 CFATTACH_DECL_NEW(dma, sizeof(struct dma_softc),
142 dmamatch, dmaattach, NULL, NULL);
143
144 static int dmaintr(void *);
145
146 #ifdef DEBUG
147 int dmadebug = 0;
148 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
149 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
150 #define DDB_FOLLOW 0x04
151 #define DDB_IO 0x08
152
153 static void dmatimeout(void *);
154 int dmatimo[NDMACHAN];
155
156 long dmahits[NDMACHAN];
157 long dmamisses[NDMACHAN];
158 long dmabyte[NDMACHAN];
159 long dmaword[NDMACHAN];
160 long dmalword[NDMACHAN];
161 #endif
162
163 static struct dma_softc *dma_softc;
164
165 static int
166 dmamatch(device_t parent, cfdata_t cf, void *aux)
167 {
168 struct intio_attach_args *ia = aux;
169 static int dmafound = 0; /* can only have one */
170
171 if (strcmp("dma", ia->ia_modname) != 0 || dmafound)
172 return 0;
173
174 dmafound = 1;
175 return 1;
176 }
177
178 static void
179 dmaattach(device_t parent, device_t self, void *aux)
180 {
181 struct dma_softc *sc = device_private(self);
182 struct intio_attach_args *ia = aux;
183 struct dma_channel *dc;
184 struct dmareg *dma;
185 int i;
186 char rev;
187
188 sc->sc_dev = self;
189
190 /* There's just one. */
191 dma_softc = sc;
192
193 sc->sc_bst = ia->ia_bst;
194 if (bus_space_map(sc->sc_bst, ia->ia_iobase, INTIO_DEVSIZE, 0,
195 &sc->sc_bsh)) {
196 aprint_error(": can't map registers\n");
197 return;
198 }
199
200 dma = bus_space_vaddr(sc->sc_bst, sc->sc_bsh);
201 sc->sc_dmareg = dma;
202
203 /*
204 * Determine the DMA type. A DMA_A or DMA_B will fail the
205 * following probe.
206 *
207 * XXX Don't know how to easily differentiate the A and B cards,
208 * so we just hope nobody has an A card (A cards will work if
209 * splbio works out to ipl 3).
210 */
211 if (hp300_bus_space_probe(sc->sc_bst, sc->sc_bsh, DMA_ID2, 1) == 0) {
212 rev = 'B';
213 #if !defined(HP320)
214 aprint_normal("\n");
215 panic("%s: DMA card requires hp320 support", __func__);
216 #endif
217 } else
218 rev = dma->dma_id[2];
219
220 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
221
222 TAILQ_INIT(&sc->sc_queue);
223 callout_init(&sc->sc_debug_ch, 0);
224
225 for (i = 0; i < NDMACHAN; i++) {
226 dc = &sc->sc_chan[i];
227 dc->dm_job = NULL;
228 switch (i) {
229 case 0:
230 dc->dm_hwaddr = &dma->dma_chan0;
231 dc->dm_Bhwaddr = &dma->dma_Bchan0;
232 break;
233
234 case 1:
235 dc->dm_hwaddr = &dma->dma_chan1;
236 dc->dm_Bhwaddr = &dma->dma_Bchan1;
237 break;
238
239 default:
240 aprint_normal("\n");
241 panic("%s: more than 2 channels?", __func__);
242 /* NOTREACHED */
243 }
244 }
245
246 #ifdef DEBUG
247 /* make sure timeout is really not needed */
248 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
249 #endif
250
251 aprint_normal(": 98620%c, 2 channels, %d-bit DMA\n",
252 rev, (rev == 'B') ? 16 : 32);
253
254 /*
255 * Defer hooking up our interrupt until the first
256 * DMA-using controller has hooked up theirs.
257 */
258 sc->sc_ih = NULL;
259 }
260
261 /*
262 * Compute the ipl and (re)establish the interrupt handler
263 * for the DMA controller.
264 */
265 void
266 dmacomputeipl(void)
267 {
268 struct dma_softc *sc = dma_softc;
269
270 if (sc->sc_ih != NULL)
271 intr_disestablish(sc->sc_ih);
272
273 /*
274 * Our interrupt level must be as high as the highest
275 * device using DMA (i.e. splbio).
276 */
277 sc->sc_ipl = PSLTOIPL(ipl2psl_table[IPL_VM]);
278 sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_VM);
279 }
280
281 int
282 dmareq(struct dmaqueue *dq)
283 {
284 struct dma_softc *sc = dma_softc;
285 int i, chan, s;
286
287 #if 1
288 s = splhigh(); /* XXXthorpej */
289 #else
290 s = splbio();
291 #endif
292
293 chan = dq->dq_chan;
294 for (i = NDMACHAN - 1; i >= 0; i--) {
295 /*
296 * Can we use this channel?
297 */
298 if ((chan & (1 << i)) == 0)
299 continue;
300
301 /*
302 * We can use it; is it busy?
303 */
304 if (sc->sc_chan[i].dm_job != NULL)
305 continue;
306
307 /*
308 * Not busy; give the caller this channel.
309 */
310 sc->sc_chan[i].dm_job = dq;
311 dq->dq_chan = i;
312 splx(s);
313 return 1;
314 }
315
316 /*
317 * Couldn't get a channel now; put this in the queue.
318 */
319 TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
320 splx(s);
321 return 0;
322 }
323
324 void
325 dmafree(struct dmaqueue *dq)
326 {
327 int unit = dq->dq_chan;
328 struct dma_softc *sc = dma_softc;
329 struct dma_channel *dc = &sc->sc_chan[unit];
330 struct dmaqueue *dn;
331 int chan, s;
332
333 #if 1
334 s = splhigh(); /* XXXthorpej */
335 #else
336 s = splbio();
337 #endif
338
339 #ifdef DEBUG
340 dmatimo[unit] = 0;
341 #endif
342
343 DMA_CLEAR(dc);
344
345 #if defined(CACHE_HAVE_PAC) || defined(M68040)
346 /*
347 * XXX we may not always go thru the flush code in dmastop()
348 */
349 if (dc->dm_flags & DMAF_PCFLUSH) {
350 PCIA();
351 dc->dm_flags &= ~DMAF_PCFLUSH;
352 }
353 #endif
354
355 #if defined(CACHE_HAVE_VAC)
356 if (dc->dm_flags & DMAF_VCFLUSH) {
357 /*
358 * 320/350s have VACs that may also need flushing.
359 * In our case we only flush the supervisor side
360 * because we know that if we are DMAing to user
361 * space, the physical pages will also be mapped
362 * in kernel space (via vmapbuf) and hence cache-
363 * inhibited by the pmap module due to the multiple
364 * mapping.
365 */
366 DCIS();
367 dc->dm_flags &= ~DMAF_VCFLUSH;
368 }
369 #endif
370
371 /*
372 * Channel is now free. Look for another job to run on this
373 * channel.
374 */
375 dc->dm_job = NULL;
376 chan = 1 << unit;
377 for (dn = TAILQ_FIRST(&sc->sc_queue); dn != NULL;
378 dn = TAILQ_NEXT(dn, dq_list)) {
379 if (dn->dq_chan & chan) {
380 /* Found one... */
381 TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
382 dc->dm_job = dn;
383 dn->dq_chan = dq->dq_chan;
384 splx(s);
385
386 /* Start the initiator. */
387 (*dn->dq_start)(dn->dq_softc);
388 return;
389 }
390 }
391 splx(s);
392 }
393
394 void
395 dmago(int unit, char *addr, int count, int flags)
396 {
397 struct dma_softc *sc = dma_softc;
398 struct dma_channel *dc = &sc->sc_chan[unit];
399 char *dmaend = NULL;
400 int seg, tcount;
401
402 if (count > MAXPHYS)
403 panic("dmago: count > MAXPHYS");
404
405 #if defined(HP320)
406 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
407 panic("dmago: no can do 32-bit DMA");
408 #endif
409
410 #ifdef DEBUG
411 if (dmadebug & DDB_FOLLOW)
412 printf("dmago(%d, %p, %x, %x)\n",
413 unit, addr, count, flags);
414 if (flags & DMAGO_LWORD)
415 dmalword[unit]++;
416 else if (flags & DMAGO_WORD)
417 dmaword[unit]++;
418 else
419 dmabyte[unit]++;
420 #endif
421 /*
422 * Build the DMA chain
423 */
424 for (seg = 0; count > 0; seg++) {
425 dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
426 #if defined(M68040)
427 /*
428 * Push back dirty cache lines
429 */
430 if (mmutype == MMU_68040)
431 DCFP((paddr_t)dc->dm_chain[seg].dc_addr);
432 #endif
433 if (count < (tcount = PAGE_SIZE - ((int)addr & PGOFSET)))
434 tcount = count;
435 dc->dm_chain[seg].dc_count = tcount;
436 addr += tcount;
437 count -= tcount;
438 if (flags & DMAGO_LWORD)
439 tcount >>= 2;
440 else if (flags & DMAGO_WORD)
441 tcount >>= 1;
442
443 /*
444 * Try to compact the DMA transfer if the pages are adjacent.
445 * Note: this will never happen on the first iteration.
446 */
447 if (dc->dm_chain[seg].dc_addr == dmaend
448 #if defined(HP320)
449 /* only 16-bit count on 98620B */
450 && (sc->sc_type != DMA_B ||
451 dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
452 #endif
453 ) {
454 #ifdef DEBUG
455 dmahits[unit]++;
456 #endif
457 dmaend += dc->dm_chain[seg].dc_count;
458 dc->dm_chain[--seg].dc_count += tcount;
459 } else {
460 #ifdef DEBUG
461 dmamisses[unit]++;
462 #endif
463 dmaend = dc->dm_chain[seg].dc_addr +
464 dc->dm_chain[seg].dc_count;
465 dc->dm_chain[seg].dc_count = tcount;
466 }
467 }
468 dc->dm_cur = 0;
469 dc->dm_last = --seg;
470 dc->dm_flags = 0;
471 /*
472 * Set up the command word based on flags
473 */
474 dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
475 if ((flags & DMAGO_READ) == 0)
476 dc->dm_cmd |= DMA_WRT;
477 if (flags & DMAGO_LWORD)
478 dc->dm_cmd |= DMA_LWORD;
479 else if (flags & DMAGO_WORD)
480 dc->dm_cmd |= DMA_WORD;
481 if (flags & DMAGO_PRI)
482 dc->dm_cmd |= DMA_PRI;
483
484 #if defined(M68040)
485 /*
486 * On the 68040 we need to flush (push) the data cache before a
487 * DMA (already done above) and flush again after DMA completes.
488 * In theory we should only need to flush prior to a write DMA
489 * and purge after a read DMA but if the entire page is not
490 * involved in the DMA we might purge some valid data.
491 */
492 if (mmutype == MMU_68040 && (flags & DMAGO_READ))
493 dc->dm_flags |= DMAF_PCFLUSH;
494 #endif
495
496 #if defined(CACHE_HAVE_PAC)
497 /*
498 * Remember if we need to flush external physical cache when
499 * DMA is done. We only do this if we are reading (writing memory).
500 */
501 if (ectype == EC_PHYS && (flags & DMAGO_READ))
502 dc->dm_flags |= DMAF_PCFLUSH;
503 #endif
504
505 #if defined(CACHE_HAVE_VAC)
506 if (ectype == EC_VIRT && (flags & DMAGO_READ))
507 dc->dm_flags |= DMAF_VCFLUSH;
508 #endif
509
510 /*
511 * Remember if we can skip the dma completion interrupt on
512 * the last segment in the chain.
513 */
514 if (flags & DMAGO_NOINT) {
515 if (dc->dm_cur == dc->dm_last)
516 dc->dm_cmd &= ~DMA_ENAB;
517 else
518 dc->dm_flags |= DMAF_NOINTR;
519 }
520 #ifdef DEBUG
521 if (dmadebug & DDB_IO) {
522 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
523 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
524 printf("dmago: cmd %x, flags %x\n",
525 dc->dm_cmd, dc->dm_flags);
526 for (seg = 0; seg <= dc->dm_last; seg++)
527 printf(" %d: %d@%p\n", seg,
528 dc->dm_chain[seg].dc_count,
529 dc->dm_chain[seg].dc_addr);
530 }
531 }
532 dmatimo[unit] = 1;
533 #endif
534 DMA_ARM(sc, dc);
535 }
536
537 void
538 dmastop(int unit)
539 {
540 struct dma_softc *sc = dma_softc;
541 struct dma_channel *dc = &sc->sc_chan[unit];
542
543 #ifdef DEBUG
544 if (dmadebug & DDB_FOLLOW)
545 printf("dmastop(%d)\n", unit);
546 dmatimo[unit] = 0;
547 #endif
548 DMA_CLEAR(dc);
549
550 #if defined(CACHE_HAVE_PAC) || defined(M68040)
551 if (dc->dm_flags & DMAF_PCFLUSH) {
552 PCIA();
553 dc->dm_flags &= ~DMAF_PCFLUSH;
554 }
555 #endif
556
557 #if defined(CACHE_HAVE_VAC)
558 if (dc->dm_flags & DMAF_VCFLUSH) {
559 /*
560 * 320/350s have VACs that may also need flushing.
561 * In our case we only flush the supervisor side
562 * because we know that if we are DMAing to user
563 * space, the physical pages will also be mapped
564 * in kernel space (via vmapbuf) and hence cache-
565 * inhibited by the pmap module due to the multiple
566 * mapping.
567 */
568 DCIS();
569 dc->dm_flags &= ~DMAF_VCFLUSH;
570 }
571 #endif
572
573 /*
574 * We may get this interrupt after a device service routine
575 * has freed the dma channel. So, ignore the intr if there's
576 * nothing on the queue.
577 */
578 if (dc->dm_job != NULL)
579 (*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
580 }
581
582 static int
583 dmaintr(void *arg)
584 {
585 struct dma_softc *sc = arg;
586 struct dma_channel *dc;
587 int i, stat;
588 int found = 0;
589
590 #ifdef DEBUG
591 if (dmadebug & DDB_FOLLOW)
592 printf("dmaintr\n");
593 #endif
594 for (i = 0; i < NDMACHAN; i++) {
595 dc = &sc->sc_chan[i];
596 stat = DMA_STAT(dc);
597 if ((stat & DMA_INTR) == 0)
598 continue;
599 found++;
600 #ifdef DEBUG
601 if (dmadebug & DDB_IO) {
602 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
603 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
604 printf("dmaintr: flags %x unit %d stat %x "
605 "next %d\n",
606 dc->dm_flags, i, stat, dc->dm_cur + 1);
607 }
608 if (stat & DMA_ARMED)
609 printf("dma channel %d: intr when armed\n", i);
610 #endif
611 /*
612 * Load the next segemnt, or finish up if we're done.
613 */
614 dc->dm_cur++;
615 if (dc->dm_cur <= dc->dm_last) {
616 #ifdef DEBUG
617 dmatimo[i] = 1;
618 #endif
619 /*
620 * If we're the last segment, disable the
621 * completion interrupt, if necessary.
622 */
623 if (dc->dm_cur == dc->dm_last &&
624 (dc->dm_flags & DMAF_NOINTR))
625 dc->dm_cmd &= ~DMA_ENAB;
626 DMA_CLEAR(dc);
627 DMA_ARM(sc, dc);
628 } else
629 dmastop(i);
630 }
631 return found;
632 }
633
634 #ifdef DEBUG
635 static void
636 dmatimeout(void *arg)
637 {
638 int i, s;
639 struct dma_softc *sc = arg;
640
641 for (i = 0; i < NDMACHAN; i++) {
642 s = splbio();
643 if (dmatimo[i]) {
644 if (dmatimo[i] > 1)
645 printf("dma channel %d timeout #%d\n",
646 i, dmatimo[i]-1);
647 dmatimo[i]++;
648 }
649 splx(s);
650 }
651 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
652 }
653 #endif
654