dma.c revision 1.6 1 /* $NetBSD: dma.c,v 1.6 1995/12/02 02:46:45 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1995 Jason R. Thorpe.
5 * Copyright (c) 1982, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)dma.c 8.1 (Berkeley) 6/10/93
37 */
38
39 /*
40 * DMA driver
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/time.h>
46 #include <sys/kernel.h>
47 #include <sys/proc.h>
48
49 #include <machine/cpu.h>
50
51 #include <hp300/dev/device.h>
52 #include <hp300/dev/dmareg.h>
53 #include <hp300/dev/dmavar.h>
54
55 #include <hp300/hp300/isr.h>
56
57 extern void isrlink();
58 extern void _insque();
59 extern void _remque();
60 extern u_int kvtop();
61 extern void PCIA();
62
63 /*
64 * The largest single request will be MAXPHYS bytes which will require
65 * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
66 * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
67 * buffer is not page aligned (+1).
68 */
69 #define DMAMAXIO (MAXPHYS/NBPG+1)
70
71 struct dma_chain {
72 int dc_count;
73 char *dc_addr;
74 };
75
76 struct dma_channel {
77 struct dma_softc *dm_softc; /* pointer back to softc */
78 struct dmadevice *dm_hwaddr; /* registers if DMA_C */
79 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */
80 char dm_flags; /* misc. flags */
81 u_short dm_cmd; /* DMA controller command */
82 struct dma_chain *dm_cur; /* current segment */
83 struct dma_chain *dm_last; /* last segment */
84 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */
85 };
86
87 struct dma_softc {
88 char *sc_xname; /* XXX external name */
89 struct dmareg *sc_dmareg; /* pointer to our hardware */
90 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */
91 char sc_type; /* A, B, or C */
92 } Dma_softc;
93
94 /* types */
95 #define DMA_B 0
96 #define DMA_C 1
97
98 /* flags */
99 #define DMAF_PCFLUSH 0x01
100 #define DMAF_VCFLUSH 0x02
101 #define DMAF_NOINTR 0x04
102
103 struct devqueue dmachan[NDMACHAN + 1];
104 int dmaintr();
105
106 #ifdef DEBUG
107 int dmadebug = 0;
108 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
109 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
110 #define DDB_FOLLOW 0x04
111 #define DDB_IO 0x08
112
113 void dmatimeout __P((void *));
114 int dmatimo[NDMACHAN];
115
116 long dmahits[NDMACHAN];
117 long dmamisses[NDMACHAN];
118 long dmabyte[NDMACHAN];
119 long dmaword[NDMACHAN];
120 long dmalword[NDMACHAN];
121 #endif
122
123 void
124 dmainit()
125 {
126 struct dma_softc *sc = &Dma_softc;
127 struct dmareg *dma;
128 struct dma_channel *dc;
129 int i;
130 char rev;
131
132 /* There's just one. */
133 sc->sc_dmareg = (struct dmareg *)DMA_BASE;
134 dma = sc->sc_dmareg;
135 sc->sc_xname = "dma0";
136
137 /*
138 * Determine the DMA type. A DMA_A or DMA_B will fail the
139 * following probe.
140 *
141 * XXX Don't know how to easily differentiate the A and B cards,
142 * so we just hope nobody has an A card (A cards will work if
143 * DMAINTLVL is set to 3).
144 */
145 if (badbaddr((char *)&dma->dma_id[2])) {
146 rev = 'B';
147 #if !defined(HP320)
148 panic("dmainit: DMA card requires hp320 support");
149 #endif
150 } else
151 rev = dma->dma_id[2];
152
153 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
154
155 for (i = 0; i < NDMACHAN; i++) {
156 dc = &sc->sc_chan[i];
157 dc->dm_softc = sc;
158 switch (i) {
159 case 0:
160 dc->dm_hwaddr = &dma->dma_chan0;
161 dc->dm_Bhwaddr = &dma->dma_Bchan0;
162 break;
163
164 case 1:
165 dc->dm_hwaddr = &dma->dma_chan1;
166 dc->dm_Bhwaddr = &dma->dma_Bchan1;
167 break;
168
169 default:
170 panic("dmainit: more than 2 channels?");
171 /* NOTREACHED */
172 }
173 dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
174 }
175 dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
176 #ifdef DEBUG
177 /* make sure timeout is really not needed */
178 timeout(dmatimeout, sc, 30 * hz);
179 #endif
180
181 printf("%s: 98620%c, 2 channels, %d bit\n", sc->sc_xname,
182 rev, (rev == 'B') ? 16 : 32);
183 }
184
185 int
186 dmareq(dq)
187 register struct devqueue *dq;
188 {
189 register int i;
190 register int chan;
191 register int s = splbio();
192
193 chan = dq->dq_ctlr;
194 i = NDMACHAN;
195 while (--i >= 0) {
196 if ((chan & (1 << i)) == 0)
197 continue;
198 if (dmachan[i].dq_forw != &dmachan[i])
199 continue;
200 insque(dq, &dmachan[i]);
201 dq->dq_ctlr = i;
202 splx(s);
203 return(1);
204 }
205 insque(dq, dmachan[NDMACHAN].dq_back);
206 splx(s);
207 return(0);
208 }
209
210 void
211 dmafree(dq)
212 register struct devqueue *dq;
213 {
214 int unit = dq->dq_ctlr;
215 struct dma_softc *sc = &Dma_softc;
216 register struct dma_channel *dc = &sc->sc_chan[unit];
217 register struct devqueue *dn;
218 register int chan, s;
219
220 s = splbio();
221 #ifdef DEBUG
222 dmatimo[unit] = 0;
223 #endif
224 DMA_CLEAR(dc);
225 #if defined(HP360) || defined(HP370) || defined(HP380)
226 /*
227 * XXX we may not always go thru the flush code in dmastop()
228 */
229 if (dc->dm_flags & DMAF_PCFLUSH) {
230 PCIA();
231 dc->dm_flags &= ~DMAF_PCFLUSH;
232 }
233 #endif
234 #if defined(HP320) || defined(HP350)
235 if (dc->dm_flags & DMAF_VCFLUSH) {
236 /*
237 * 320/350s have VACs that may also need flushing.
238 * In our case we only flush the supervisor side
239 * because we know that if we are DMAing to user
240 * space, the physical pages will also be mapped
241 * in kernel space (via vmapbuf) and hence cache-
242 * inhibited by the pmap module due to the multiple
243 * mapping.
244 */
245 DCIS();
246 dc->dm_flags &= ~DMAF_VCFLUSH;
247 }
248 #endif
249 remque(dq);
250 chan = 1 << unit;
251 for (dn = dmachan[NDMACHAN].dq_forw;
252 dn != &dmachan[NDMACHAN]; dn = dn->dq_forw) {
253 if (dn->dq_ctlr & chan) {
254 remque((caddr_t)dn);
255 insque((caddr_t)dn, (caddr_t)dq->dq_back);
256 splx(s);
257 dn->dq_ctlr = dq->dq_ctlr;
258 (dn->dq_driver->d_start)(dn->dq_unit);
259 return;
260 }
261 }
262 splx(s);
263 }
264
265 void
266 dmago(unit, addr, count, flags)
267 int unit;
268 register char *addr;
269 register int count;
270 register int flags;
271 {
272 struct dma_softc *sc = &Dma_softc;
273 register struct dma_channel *dc = &sc->sc_chan[unit];
274 register struct dma_chain *dcp;
275 register char *dmaend = NULL;
276 register int tcount;
277
278 if (count > MAXPHYS)
279 panic("dmago: count > MAXPHYS");
280 #if defined(HP320)
281 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
282 panic("dmago: no can do 32-bit DMA");
283 #endif
284 #ifdef DEBUG
285 if (dmadebug & DDB_FOLLOW)
286 printf("dmago(%d, %x, %x, %x)\n",
287 unit, addr, count, flags);
288 if (flags & DMAGO_LWORD)
289 dmalword[unit]++;
290 else if (flags & DMAGO_WORD)
291 dmaword[unit]++;
292 else
293 dmabyte[unit]++;
294 #endif
295 /*
296 * Build the DMA chain
297 */
298 for (dcp = dc->dm_chain; count > 0; dcp++) {
299 dcp->dc_addr = (char *) kvtop(addr);
300 #if defined(HP380)
301 /*
302 * Push back dirty cache lines
303 */
304 if (mmutype == MMU_68040)
305 DCFP(dcp->dc_addr);
306 #endif
307 if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
308 tcount = count;
309 dcp->dc_count = tcount;
310 addr += tcount;
311 count -= tcount;
312 if (flags & DMAGO_LWORD)
313 tcount >>= 2;
314 else if (flags & DMAGO_WORD)
315 tcount >>= 1;
316 if (dcp->dc_addr == dmaend
317 #if defined(HP320)
318 /* only 16-bit count on 98620B */
319 && (sc->sc_type != DMA_B ||
320 (dcp-1)->dc_count + tcount <= 65536)
321 #endif
322 ) {
323 #ifdef DEBUG
324 dmahits[unit]++;
325 #endif
326 dmaend += dcp->dc_count;
327 (--dcp)->dc_count += tcount;
328 } else {
329 #ifdef DEBUG
330 dmamisses[unit]++;
331 #endif
332 dmaend = dcp->dc_addr + dcp->dc_count;
333 dcp->dc_count = tcount;
334 }
335 }
336 dc->dm_cur = dc->dm_chain;
337 dc->dm_last = --dcp;
338 dc->dm_flags = 0;
339 /*
340 * Set up the command word based on flags
341 */
342 dc->dm_cmd = DMA_ENAB | DMA_IPL(DMAINTLVL) | DMA_START;
343 if ((flags & DMAGO_READ) == 0)
344 dc->dm_cmd |= DMA_WRT;
345 if (flags & DMAGO_LWORD)
346 dc->dm_cmd |= DMA_LWORD;
347 else if (flags & DMAGO_WORD)
348 dc->dm_cmd |= DMA_WORD;
349 if (flags & DMAGO_PRI)
350 dc->dm_cmd |= DMA_PRI;
351 #if defined(HP380)
352 /*
353 * On the 68040 we need to flush (push) the data cache before a
354 * DMA (already done above) and flush again after DMA completes.
355 * In theory we should only need to flush prior to a write DMA
356 * and purge after a read DMA but if the entire page is not
357 * involved in the DMA we might purge some valid data.
358 */
359 if (mmutype == MMU_68040 && (flags & DMAGO_READ))
360 dc->dm_flags |= DMAF_PCFLUSH;
361 #endif
362 #if defined(HP360) || defined(HP370)
363 /*
364 * Remember if we need to flush external physical cache when
365 * DMA is done. We only do this if we are reading (writing memory).
366 */
367 if (ectype == EC_PHYS && (flags & DMAGO_READ))
368 dc->dm_flags |= DMAF_PCFLUSH;
369 #endif
370 #if defined(HP320) || defined(HP350)
371 if (ectype == EC_VIRT && (flags & DMAGO_READ))
372 dc->dm_flags |= DMAF_VCFLUSH;
373 #endif
374 /*
375 * Remember if we can skip the dma completion interrupt on
376 * the last segment in the chain.
377 */
378 if (flags & DMAGO_NOINT) {
379 if (dc->dm_cur == dc->dm_last)
380 dc->dm_cmd &= ~DMA_ENAB;
381 else
382 dc->dm_flags |= DMAF_NOINTR;
383 }
384 #ifdef DEBUG
385 if (dmadebug & DDB_IO)
386 if ((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD) ||
387 (dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)) {
388 printf("dmago: cmd %x, flags %x\n",
389 dc->dm_cmd, dc->dm_flags);
390 for (dcp = dc->dm_chain; dcp <= dc->dm_last; dcp++)
391 printf(" %d: %d@%x\n", dcp-dc->dm_chain,
392 dcp->dc_count, dcp->dc_addr);
393 }
394 dmatimo[unit] = 1;
395 #endif
396 DMA_ARM(dc);
397 }
398
399 void
400 dmastop(unit)
401 register int unit;
402 {
403 struct dma_softc *sc = &Dma_softc;
404 register struct dma_channel *dc = &sc->sc_chan[unit];
405 register struct devqueue *dq;
406
407 #ifdef DEBUG
408 if (dmadebug & DDB_FOLLOW)
409 printf("dmastop(%d)\n", unit);
410 dmatimo[unit] = 0;
411 #endif
412 DMA_CLEAR(dc);
413 #if defined(HP360) || defined(HP370) || defined(HP380)
414 if (dc->dm_flags & DMAF_PCFLUSH) {
415 PCIA();
416 dc->dm_flags &= ~DMAF_PCFLUSH;
417 }
418 #endif
419 #if defined(HP320) || defined(HP350)
420 if (dc->dm_flags & DMAF_VCFLUSH) {
421 /*
422 * 320/350s have VACs that may also need flushing.
423 * In our case we only flush the supervisor side
424 * because we know that if we are DMAing to user
425 * space, the physical pages will also be mapped
426 * in kernel space (via vmapbuf) and hence cache-
427 * inhibited by the pmap module due to the multiple
428 * mapping.
429 */
430 DCIS();
431 dc->dm_flags &= ~DMAF_VCFLUSH;
432 }
433 #endif
434 /*
435 * We may get this interrupt after a device service routine
436 * has freed the dma channel. So, ignore the intr if there's
437 * nothing on the queue.
438 */
439 dq = dmachan[unit].dq_forw;
440 if (dq != &dmachan[unit])
441 (dq->dq_driver->d_done)(dq->dq_unit);
442 }
443
444 int
445 dmaintr()
446 {
447 struct dma_softc *sc = &Dma_softc;
448 register struct dma_channel *dc;
449 register int i, stat;
450 int found = 0;
451
452 #ifdef DEBUG
453 if (dmadebug & DDB_FOLLOW)
454 printf("dmaintr\n");
455 #endif
456 for (i = 0; i < NDMACHAN; i++) {
457 dc = &sc->sc_chan[i];
458 stat = DMA_STAT(dc);
459 if ((stat & DMA_INTR) == 0)
460 continue;
461 found++;
462 #ifdef DEBUG
463 if (dmadebug & DDB_IO) {
464 if ((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD) ||
465 (dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))
466 printf("dmaintr: unit %d stat %x next %d\n",
467 i, stat, (dc->dm_cur-dc->dm_chain)+1);
468 }
469 if (stat & DMA_ARMED)
470 printf("%s, chan %d: intr when armed\n",
471 sc->sc_xname, i);
472 #endif
473 if (++dc->dm_cur <= dc->dm_last) {
474 #ifdef DEBUG
475 dmatimo[i] = 1;
476 #endif
477 /*
478 * Last chain segment, disable DMA interrupt.
479 */
480 if (dc->dm_cur == dc->dm_last &&
481 (dc->dm_flags & DMAF_NOINTR))
482 dc->dm_cmd &= ~DMA_ENAB;
483 DMA_CLEAR(dc);
484 DMA_ARM(dc);
485 } else
486 dmastop(i);
487 }
488 return(found);
489 }
490
491 #ifdef DEBUG
492 void
493 dmatimeout(arg)
494 void *arg;
495 {
496 register int i, s;
497 struct dma_softc *sc = arg;
498
499 for (i = 0; i < NDMACHAN; i++) {
500 s = splbio();
501 if (dmatimo[i]) {
502 if (dmatimo[i] > 1)
503 printf("%s: timeout #%d\n", sc->sc_xname,
504 i, dmatimo[i]-1);
505 dmatimo[i]++;
506 }
507 splx(s);
508 }
509 timeout(dmatimeout, sc, 30 * hz);
510 }
511 #endif
512