dma.c revision 1.1 1 1.1 cgd /*
2 1.1 cgd * Copyright (c) 1982, 1990 The Regents of the University of California.
3 1.1 cgd * All rights reserved.
4 1.1 cgd *
5 1.1 cgd * Redistribution and use in source and binary forms, with or without
6 1.1 cgd * modification, are permitted provided that the following conditions
7 1.1 cgd * are met:
8 1.1 cgd * 1. Redistributions of source code must retain the above copyright
9 1.1 cgd * notice, this list of conditions and the following disclaimer.
10 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
11 1.1 cgd * notice, this list of conditions and the following disclaimer in the
12 1.1 cgd * documentation and/or other materials provided with the distribution.
13 1.1 cgd * 3. All advertising materials mentioning features or use of this software
14 1.1 cgd * must display the following acknowledgement:
15 1.1 cgd * This product includes software developed by the University of
16 1.1 cgd * California, Berkeley and its contributors.
17 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
18 1.1 cgd * may be used to endorse or promote products derived from this software
19 1.1 cgd * without specific prior written permission.
20 1.1 cgd *
21 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 1.1 cgd * SUCH DAMAGE.
32 1.1 cgd *
33 1.1 cgd * @(#)dma.c 7.5 (Berkeley) 5/4/91
34 1.1 cgd */
35 1.1 cgd
36 1.1 cgd /*
37 1.1 cgd * DMA driver
38 1.1 cgd */
39 1.1 cgd
40 1.1 cgd #include "param.h"
41 1.1 cgd #include "systm.h"
42 1.1 cgd #include "time.h"
43 1.1 cgd #include "kernel.h"
44 1.1 cgd #include "proc.h"
45 1.1 cgd
46 1.1 cgd #include "dmareg.h"
47 1.1 cgd #include "dmavar.h"
48 1.1 cgd #include "device.h"
49 1.1 cgd
50 1.1 cgd #include "../include/cpu.h"
51 1.1 cgd #include "../hp300/isr.h"
52 1.1 cgd
53 1.1 cgd extern void isrlink();
54 1.1 cgd extern void _insque();
55 1.1 cgd extern void _remque();
56 1.1 cgd extern void timeout();
57 1.1 cgd extern u_int kvtop();
58 1.1 cgd extern void PCIA();
59 1.1 cgd
60 1.1 cgd /*
61 1.1 cgd * The largest single request will be MAXPHYS bytes which will require
62 1.1 cgd * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
63 1.1 cgd * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
64 1.1 cgd * buffer is not page aligned (+1).
65 1.1 cgd */
66 1.1 cgd #define DMAMAXIO (MAXPHYS/NBPG+1)
67 1.1 cgd
68 1.1 cgd struct dma_chain {
69 1.1 cgd int dc_count;
70 1.1 cgd char *dc_addr;
71 1.1 cgd };
72 1.1 cgd
73 1.1 cgd struct dma_softc {
74 1.1 cgd struct dmadevice *sc_hwaddr;
75 1.1 cgd struct dmaBdevice *sc_Bhwaddr;
76 1.1 cgd char sc_type;
77 1.1 cgd char sc_flags;
78 1.1 cgd u_short sc_cmd;
79 1.1 cgd struct dma_chain *sc_cur;
80 1.1 cgd struct dma_chain *sc_last;
81 1.1 cgd struct dma_chain sc_chain[DMAMAXIO];
82 1.1 cgd } dma_softc[NDMA];
83 1.1 cgd
84 1.1 cgd /* types */
85 1.1 cgd #define DMA_B 0
86 1.1 cgd #define DMA_C 1
87 1.1 cgd
88 1.1 cgd /* flags */
89 1.1 cgd #define DMAF_PCFLUSH 0x01
90 1.1 cgd #define DMAF_VCFLUSH 0x02
91 1.1 cgd #define DMAF_NOINTR 0x04
92 1.1 cgd
93 1.1 cgd struct devqueue dmachan[NDMA + 1];
94 1.1 cgd int dmaintr();
95 1.1 cgd
96 1.1 cgd #ifdef DEBUG
97 1.1 cgd int dmadebug = 0;
98 1.1 cgd #define DDB_WORD 0x01 /* same as DMAGO_WORD */
99 1.1 cgd #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
100 1.1 cgd #define DDB_FOLLOW 0x04
101 1.1 cgd #define DDB_IO 0x08
102 1.1 cgd
103 1.1 cgd void dmatimeout();
104 1.1 cgd int dmatimo[NDMA];
105 1.1 cgd
106 1.1 cgd long dmahits[NDMA];
107 1.1 cgd long dmamisses[NDMA];
108 1.1 cgd long dmabyte[NDMA];
109 1.1 cgd long dmaword[NDMA];
110 1.1 cgd long dmalword[NDMA];
111 1.1 cgd #endif
112 1.1 cgd
113 1.1 cgd void
114 1.1 cgd dmainit()
115 1.1 cgd {
116 1.1 cgd register struct dmareg *dma = (struct dmareg *)DMA_BASE;
117 1.1 cgd register struct dma_softc *dc;
118 1.1 cgd register int i;
119 1.1 cgd char rev;
120 1.1 cgd
121 1.1 cgd /*
122 1.1 cgd * Determine the DMA type.
123 1.1 cgd * Don't know how to easily differentiate the A and B cards,
124 1.1 cgd * so we just hope nobody has an A card (A cards will work if
125 1.1 cgd * DMAINTLVL is set to 3).
126 1.1 cgd */
127 1.1 cgd if (!badbaddr((char *)&dma->dma_id[2]))
128 1.1 cgd rev = dma->dma_id[2];
129 1.1 cgd else {
130 1.1 cgd rev = 'B';
131 1.1 cgd #if !defined(HP320)
132 1.1 cgd panic("dmainit: DMA card requires hp320 support");
133 1.1 cgd #endif
134 1.1 cgd }
135 1.1 cgd
136 1.1 cgd dc = &dma_softc[0];
137 1.1 cgd for (i = 0; i < NDMA; i++) {
138 1.1 cgd dc->sc_hwaddr = (i & 1) ? &dma->dma_chan1 : &dma->dma_chan0;
139 1.1 cgd dc->sc_Bhwaddr = (i & 1) ? &dma->dma_Bchan1 : &dma->dma_Bchan0;
140 1.1 cgd dc->sc_type = rev == 'B' ? DMA_B : DMA_C;
141 1.1 cgd dc++;
142 1.1 cgd dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
143 1.1 cgd }
144 1.1 cgd dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
145 1.1 cgd #ifdef DEBUG
146 1.1 cgd /* make sure timeout is really not needed */
147 1.1 cgd timeout(dmatimeout, 0, 30 * hz);
148 1.1 cgd #endif
149 1.1 cgd
150 1.1 cgd printf("dma: 98620%c with 2 channels, %d bit DMA\n",
151 1.1 cgd rev, rev == 'B' ? 16 : 32);
152 1.1 cgd }
153 1.1 cgd
154 1.1 cgd int
155 1.1 cgd dmareq(dq)
156 1.1 cgd register struct devqueue *dq;
157 1.1 cgd {
158 1.1 cgd register int i;
159 1.1 cgd register int chan;
160 1.1 cgd register int s = splbio();
161 1.1 cgd
162 1.1 cgd chan = dq->dq_ctlr;
163 1.1 cgd i = NDMA;
164 1.1 cgd while (--i >= 0) {
165 1.1 cgd if ((chan & (1 << i)) == 0)
166 1.1 cgd continue;
167 1.1 cgd if (dmachan[i].dq_forw != &dmachan[i])
168 1.1 cgd continue;
169 1.1 cgd insque(dq, &dmachan[i]);
170 1.1 cgd dq->dq_ctlr = i;
171 1.1 cgd splx(s);
172 1.1 cgd return(1);
173 1.1 cgd }
174 1.1 cgd insque(dq, dmachan[NDMA].dq_back);
175 1.1 cgd splx(s);
176 1.1 cgd return(0);
177 1.1 cgd }
178 1.1 cgd
179 1.1 cgd void
180 1.1 cgd dmafree(dq)
181 1.1 cgd register struct devqueue *dq;
182 1.1 cgd {
183 1.1 cgd int unit = dq->dq_ctlr;
184 1.1 cgd register struct dma_softc *dc = &dma_softc[unit];
185 1.1 cgd register struct devqueue *dn;
186 1.1 cgd register int chan, s;
187 1.1 cgd
188 1.1 cgd s = splbio();
189 1.1 cgd #ifdef DEBUG
190 1.1 cgd dmatimo[unit] = 0;
191 1.1 cgd #endif
192 1.1 cgd DMA_CLEAR(dc);
193 1.1 cgd /*
194 1.1 cgd * XXX we may not always go thru the flush code in dmastop()
195 1.1 cgd */
196 1.1 cgd #if defined(HP360) || defined(HP370)
197 1.1 cgd if (dc->sc_flags & DMAF_PCFLUSH) {
198 1.1 cgd PCIA();
199 1.1 cgd dc->sc_flags &= ~DMAF_PCFLUSH;
200 1.1 cgd }
201 1.1 cgd #endif
202 1.1 cgd #if defined(HP320) || defined(HP350)
203 1.1 cgd if (dc->sc_flags & DMAF_VCFLUSH) {
204 1.1 cgd /*
205 1.1 cgd * 320/350s have VACs that may also need flushing.
206 1.1 cgd * In our case we only flush the supervisor side
207 1.1 cgd * because we know that if we are DMAing to user
208 1.1 cgd * space, the physical pages will also be mapped
209 1.1 cgd * in kernel space (via vmapbuf) and hence cache-
210 1.1 cgd * inhibited by the pmap module due to the multiple
211 1.1 cgd * mapping.
212 1.1 cgd */
213 1.1 cgd DCIS();
214 1.1 cgd dc->sc_flags &= ~DMAF_VCFLUSH;
215 1.1 cgd }
216 1.1 cgd #endif
217 1.1 cgd remque(dq);
218 1.1 cgd chan = 1 << unit;
219 1.1 cgd for (dn = dmachan[NDMA].dq_forw;
220 1.1 cgd dn != &dmachan[NDMA]; dn = dn->dq_forw) {
221 1.1 cgd if (dn->dq_ctlr & chan) {
222 1.1 cgd remque((caddr_t)dn);
223 1.1 cgd insque((caddr_t)dn, (caddr_t)dq->dq_back);
224 1.1 cgd splx(s);
225 1.1 cgd dn->dq_ctlr = dq->dq_ctlr;
226 1.1 cgd (dn->dq_driver->d_start)(dn->dq_unit);
227 1.1 cgd return;
228 1.1 cgd }
229 1.1 cgd }
230 1.1 cgd splx(s);
231 1.1 cgd }
232 1.1 cgd
233 1.1 cgd void
234 1.1 cgd dmago(unit, addr, count, flags)
235 1.1 cgd int unit;
236 1.1 cgd register char *addr;
237 1.1 cgd register int count;
238 1.1 cgd register int flags;
239 1.1 cgd {
240 1.1 cgd register struct dma_softc *dc = &dma_softc[unit];
241 1.1 cgd register struct dma_chain *dcp;
242 1.1 cgd register char *dmaend = NULL;
243 1.1 cgd register int tcount;
244 1.1 cgd
245 1.1 cgd if (count > MAXPHYS)
246 1.1 cgd panic("dmago: count > MAXPHYS");
247 1.1 cgd #if defined(HP320)
248 1.1 cgd if (dc->sc_type == DMA_B && (flags & DMAGO_LWORD))
249 1.1 cgd panic("dmago: no can do 32-bit DMA");
250 1.1 cgd #endif
251 1.1 cgd #ifdef DEBUG
252 1.1 cgd if (dmadebug & DDB_FOLLOW)
253 1.1 cgd printf("dmago(%d, %x, %x, %x)\n",
254 1.1 cgd unit, addr, count, flags);
255 1.1 cgd if (flags & DMAGO_LWORD)
256 1.1 cgd dmalword[unit]++;
257 1.1 cgd else if (flags & DMAGO_WORD)
258 1.1 cgd dmaword[unit]++;
259 1.1 cgd else
260 1.1 cgd dmabyte[unit]++;
261 1.1 cgd #endif
262 1.1 cgd /*
263 1.1 cgd * Build the DMA chain
264 1.1 cgd */
265 1.1 cgd for (dcp = dc->sc_chain; count > 0; dcp++) {
266 1.1 cgd dcp->dc_addr = (char *) kvtop(addr);
267 1.1 cgd if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
268 1.1 cgd tcount = count;
269 1.1 cgd dcp->dc_count = tcount;
270 1.1 cgd addr += tcount;
271 1.1 cgd count -= tcount;
272 1.1 cgd if (flags & DMAGO_LWORD)
273 1.1 cgd tcount >>= 2;
274 1.1 cgd else if (flags & DMAGO_WORD)
275 1.1 cgd tcount >>= 1;
276 1.1 cgd if (dcp->dc_addr == dmaend
277 1.1 cgd #if defined(HP320)
278 1.1 cgd /* only 16-bit count on 98620B */
279 1.1 cgd && (dc->sc_type != DMA_B ||
280 1.1 cgd (dcp-1)->dc_count + tcount <= 65536)
281 1.1 cgd #endif
282 1.1 cgd ) {
283 1.1 cgd #ifdef DEBUG
284 1.1 cgd dmahits[unit]++;
285 1.1 cgd #endif
286 1.1 cgd dmaend += dcp->dc_count;
287 1.1 cgd (--dcp)->dc_count += tcount;
288 1.1 cgd } else {
289 1.1 cgd #ifdef DEBUG
290 1.1 cgd dmamisses[unit]++;
291 1.1 cgd #endif
292 1.1 cgd dmaend = dcp->dc_addr + dcp->dc_count;
293 1.1 cgd dcp->dc_count = tcount;
294 1.1 cgd }
295 1.1 cgd }
296 1.1 cgd dc->sc_cur = dc->sc_chain;
297 1.1 cgd dc->sc_last = --dcp;
298 1.1 cgd dc->sc_flags = 0;
299 1.1 cgd /*
300 1.1 cgd * Set up the command word based on flags
301 1.1 cgd */
302 1.1 cgd dc->sc_cmd = DMA_ENAB | DMA_IPL(DMAINTLVL) | DMA_START;
303 1.1 cgd if ((flags & DMAGO_READ) == 0)
304 1.1 cgd dc->sc_cmd |= DMA_WRT;
305 1.1 cgd if (flags & DMAGO_LWORD)
306 1.1 cgd dc->sc_cmd |= DMA_LWORD;
307 1.1 cgd else if (flags & DMAGO_WORD)
308 1.1 cgd dc->sc_cmd |= DMA_WORD;
309 1.1 cgd if (flags & DMAGO_PRI)
310 1.1 cgd dc->sc_cmd |= DMA_PRI;
311 1.1 cgd #if defined(HP360) || defined(HP370)
312 1.1 cgd /*
313 1.1 cgd * Remember if we need to flush external physical cache when
314 1.1 cgd * DMA is done. We only do this if we are reading (writing memory).
315 1.1 cgd */
316 1.1 cgd if (ectype == EC_PHYS && (flags & DMAGO_READ))
317 1.1 cgd dc->sc_flags |= DMAF_PCFLUSH;
318 1.1 cgd #endif
319 1.1 cgd #if defined(HP320) || defined(HP350)
320 1.1 cgd if (ectype == EC_VIRT && (flags & DMAGO_READ))
321 1.1 cgd dc->sc_flags |= DMAF_VCFLUSH;
322 1.1 cgd #endif
323 1.1 cgd /*
324 1.1 cgd * Remember if we can skip the dma completion interrupt on
325 1.1 cgd * the last segment in the chain.
326 1.1 cgd */
327 1.1 cgd if (flags & DMAGO_NOINT) {
328 1.1 cgd if (dc->sc_cur == dc->sc_last)
329 1.1 cgd dc->sc_cmd &= ~DMA_ENAB;
330 1.1 cgd else
331 1.1 cgd dc->sc_flags |= DMAF_NOINTR;
332 1.1 cgd }
333 1.1 cgd #ifdef DEBUG
334 1.1 cgd if (dmadebug & DDB_IO)
335 1.1 cgd if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) ||
336 1.1 cgd (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD)) {
337 1.1 cgd printf("dmago: cmd %x, flags %x\n",
338 1.1 cgd dc->sc_cmd, dc->sc_flags);
339 1.1 cgd for (dcp = dc->sc_chain; dcp <= dc->sc_last; dcp++)
340 1.1 cgd printf(" %d: %d@%x\n", dcp-dc->sc_chain,
341 1.1 cgd dcp->dc_count, dcp->dc_addr);
342 1.1 cgd }
343 1.1 cgd dmatimo[unit] = 1;
344 1.1 cgd #endif
345 1.1 cgd DMA_ARM(dc);
346 1.1 cgd }
347 1.1 cgd
348 1.1 cgd void
349 1.1 cgd dmastop(unit)
350 1.1 cgd register int unit;
351 1.1 cgd {
352 1.1 cgd register struct dma_softc *dc = &dma_softc[unit];
353 1.1 cgd register struct devqueue *dq;
354 1.1 cgd
355 1.1 cgd #ifdef DEBUG
356 1.1 cgd if (dmadebug & DDB_FOLLOW)
357 1.1 cgd printf("dmastop(%d)\n", unit);
358 1.1 cgd dmatimo[unit] = 0;
359 1.1 cgd #endif
360 1.1 cgd DMA_CLEAR(dc);
361 1.1 cgd #if defined(HP360) || defined(HP370)
362 1.1 cgd if (dc->sc_flags & DMAF_PCFLUSH) {
363 1.1 cgd PCIA();
364 1.1 cgd dc->sc_flags &= ~DMAF_PCFLUSH;
365 1.1 cgd }
366 1.1 cgd #endif
367 1.1 cgd #if defined(HP320) || defined(HP350)
368 1.1 cgd if (dc->sc_flags & DMAF_VCFLUSH) {
369 1.1 cgd /*
370 1.1 cgd * 320/350s have VACs that may also need flushing.
371 1.1 cgd * In our case we only flush the supervisor side
372 1.1 cgd * because we know that if we are DMAing to user
373 1.1 cgd * space, the physical pages will also be mapped
374 1.1 cgd * in kernel space (via vmapbuf) and hence cache-
375 1.1 cgd * inhibited by the pmap module due to the multiple
376 1.1 cgd * mapping.
377 1.1 cgd */
378 1.1 cgd DCIS();
379 1.1 cgd dc->sc_flags &= ~DMAF_VCFLUSH;
380 1.1 cgd }
381 1.1 cgd #endif
382 1.1 cgd /*
383 1.1 cgd * We may get this interrupt after a device service routine
384 1.1 cgd * has freed the dma channel. So, ignore the intr if there's
385 1.1 cgd * nothing on the queue.
386 1.1 cgd */
387 1.1 cgd dq = dmachan[unit].dq_forw;
388 1.1 cgd if (dq != &dmachan[unit])
389 1.1 cgd (dq->dq_driver->d_done)(dq->dq_unit);
390 1.1 cgd }
391 1.1 cgd
392 1.1 cgd int
393 1.1 cgd dmaintr()
394 1.1 cgd {
395 1.1 cgd register struct dma_softc *dc;
396 1.1 cgd register int i, stat;
397 1.1 cgd int found = 0;
398 1.1 cgd
399 1.1 cgd #ifdef DEBUG
400 1.1 cgd if (dmadebug & DDB_FOLLOW)
401 1.1 cgd printf("dmaintr\n");
402 1.1 cgd #endif
403 1.1 cgd for (i = 0, dc = dma_softc; i < NDMA; i++, dc++) {
404 1.1 cgd stat = DMA_STAT(dc);
405 1.1 cgd if ((stat & DMA_INTR) == 0)
406 1.1 cgd continue;
407 1.1 cgd found++;
408 1.1 cgd #ifdef DEBUG
409 1.1 cgd if (dmadebug & DDB_IO) {
410 1.1 cgd if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) ||
411 1.1 cgd (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD))
412 1.1 cgd printf("dmaintr: unit %d stat %x next %d\n",
413 1.1 cgd i, stat, (dc->sc_cur-dc->sc_chain)+1);
414 1.1 cgd }
415 1.1 cgd if (stat & DMA_ARMED)
416 1.1 cgd printf("dma%d: intr when armed\n", i);
417 1.1 cgd #endif
418 1.1 cgd if (++dc->sc_cur <= dc->sc_last) {
419 1.1 cgd #ifdef DEBUG
420 1.1 cgd dmatimo[i] = 1;
421 1.1 cgd #endif
422 1.1 cgd /*
423 1.1 cgd * Last chain segment, disable DMA interrupt.
424 1.1 cgd */
425 1.1 cgd if (dc->sc_cur == dc->sc_last &&
426 1.1 cgd (dc->sc_flags & DMAF_NOINTR))
427 1.1 cgd dc->sc_cmd &= ~DMA_ENAB;
428 1.1 cgd DMA_CLEAR(dc);
429 1.1 cgd DMA_ARM(dc);
430 1.1 cgd } else
431 1.1 cgd dmastop(i);
432 1.1 cgd }
433 1.1 cgd return(found);
434 1.1 cgd }
435 1.1 cgd
436 1.1 cgd #ifdef DEBUG
437 1.1 cgd void
438 1.1 cgd dmatimeout()
439 1.1 cgd {
440 1.1 cgd register int i, s;
441 1.1 cgd
442 1.1 cgd for (i = 0; i < NDMA; i++) {
443 1.1 cgd s = splbio();
444 1.1 cgd if (dmatimo[i]) {
445 1.1 cgd if (dmatimo[i] > 1)
446 1.1 cgd printf("dma%d: timeout #%d\n",
447 1.1 cgd i, dmatimo[i]-1);
448 1.1 cgd dmatimo[i]++;
449 1.1 cgd }
450 1.1 cgd splx(s);
451 1.1 cgd }
452 1.1 cgd timeout(dmatimeout, (caddr_t)0, 30 * hz);
453 1.1 cgd }
454 1.1 cgd #endif
455