Home | History | Annotate | Line # | Download | only in dev
dma.c revision 1.5
      1 /*	$NetBSD: dma.c,v 1.5 1994/10/26 07:23:40 cgd Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1990, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *	This product includes software developed by the University of
     18  *	California, Berkeley and its contributors.
     19  * 4. Neither the name of the University nor the names of its contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  *
     35  *	@(#)dma.c	8.1 (Berkeley) 6/10/93
     36  */
     37 
     38 /*
     39  * DMA driver
     40  */
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/time.h>
     45 #include <sys/kernel.h>
     46 #include <sys/proc.h>
     47 
     48 #include <machine/cpu.h>
     49 
     50 #include <hp300/dev/device.h>
     51 #include <hp300/dev/dmareg.h>
     52 #include <hp300/dev/dmavar.h>
     53 
     54 #include <hp300/hp300/isr.h>
     55 
     56 extern void isrlink();
     57 extern void _insque();
     58 extern void _remque();
     59 extern u_int kvtop();
     60 extern void PCIA();
     61 
     62 /*
     63  * The largest single request will be MAXPHYS bytes which will require
     64  * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
     65  * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
     66  * buffer is not page aligned (+1).
     67  */
     68 #define	DMAMAXIO	(MAXPHYS/NBPG+1)
     69 
     70 struct	dma_chain {
     71 	int	dc_count;
     72 	char	*dc_addr;
     73 };
     74 
     75 struct	dma_softc {
     76 	struct	dmadevice *sc_hwaddr;
     77 	struct	dmaBdevice *sc_Bhwaddr;
     78 	char	sc_type;
     79 	char	sc_flags;
     80 	u_short	sc_cmd;
     81 	struct	dma_chain *sc_cur;
     82 	struct	dma_chain *sc_last;
     83 	struct	dma_chain sc_chain[DMAMAXIO];
     84 } dma_softc[NDMA];
     85 
     86 /* types */
     87 #define	DMA_B	0
     88 #define DMA_C	1
     89 
     90 /* flags */
     91 #define DMAF_PCFLUSH	0x01
     92 #define DMAF_VCFLUSH	0x02
     93 #define DMAF_NOINTR	0x04
     94 
     95 struct	devqueue dmachan[NDMA + 1];
     96 int	dmaintr();
     97 
     98 #ifdef DEBUG
     99 int	dmadebug = 0;
    100 #define DDB_WORD	0x01	/* same as DMAGO_WORD */
    101 #define DDB_LWORD	0x02	/* same as DMAGO_LWORD */
    102 #define	DDB_FOLLOW	0x04
    103 #define DDB_IO		0x08
    104 
    105 void	dmatimeout __P((void *));
    106 int	dmatimo[NDMA];
    107 
    108 long	dmahits[NDMA];
    109 long	dmamisses[NDMA];
    110 long	dmabyte[NDMA];
    111 long	dmaword[NDMA];
    112 long	dmalword[NDMA];
    113 #endif
    114 
    115 void
    116 dmainit()
    117 {
    118 	register struct dmareg *dma = (struct dmareg *)DMA_BASE;
    119 	register struct dma_softc *dc;
    120 	register int i;
    121 	char rev;
    122 
    123 	/*
    124 	 * Determine the DMA type.
    125 	 * Don't know how to easily differentiate the A and B cards,
    126 	 * so we just hope nobody has an A card (A cards will work if
    127 	 * DMAINTLVL is set to 3).
    128 	 */
    129 	if (!badbaddr((char *)&dma->dma_id[2]))
    130 		rev = dma->dma_id[2];
    131 	else {
    132 		rev = 'B';
    133 #if !defined(HP320)
    134 		panic("dmainit: DMA card requires hp320 support");
    135 #endif
    136 	}
    137 
    138 	dc = &dma_softc[0];
    139 	for (i = 0; i < NDMA; i++) {
    140 		dc->sc_hwaddr = (i & 1) ? &dma->dma_chan1 : &dma->dma_chan0;
    141 		dc->sc_Bhwaddr = (i & 1) ? &dma->dma_Bchan1 : &dma->dma_Bchan0;
    142 		dc->sc_type = rev == 'B' ? DMA_B : DMA_C;
    143 		dc++;
    144 		dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
    145 	}
    146 	dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
    147 #ifdef DEBUG
    148 	/* make sure timeout is really not needed */
    149 	timeout(dmatimeout, (void *)0, 30 * hz);
    150 #endif
    151 
    152 	printf("dma: 98620%c with 2 channels, %d bit DMA\n",
    153 	       rev, rev == 'B' ? 16 : 32);
    154 }
    155 
    156 int
    157 dmareq(dq)
    158 	register struct devqueue *dq;
    159 {
    160 	register int i;
    161 	register int chan;
    162 	register int s = splbio();
    163 
    164 	chan = dq->dq_ctlr;
    165 	i = NDMA;
    166 	while (--i >= 0) {
    167 		if ((chan & (1 << i)) == 0)
    168 			continue;
    169 		if (dmachan[i].dq_forw != &dmachan[i])
    170 			continue;
    171 		insque(dq, &dmachan[i]);
    172 		dq->dq_ctlr = i;
    173 		splx(s);
    174 		return(1);
    175 	}
    176 	insque(dq, dmachan[NDMA].dq_back);
    177 	splx(s);
    178 	return(0);
    179 }
    180 
    181 void
    182 dmafree(dq)
    183 	register struct devqueue *dq;
    184 {
    185 	int unit = dq->dq_ctlr;
    186 	register struct dma_softc *dc = &dma_softc[unit];
    187 	register struct devqueue *dn;
    188 	register int chan, s;
    189 
    190 	s = splbio();
    191 #ifdef DEBUG
    192 	dmatimo[unit] = 0;
    193 #endif
    194 	DMA_CLEAR(dc);
    195 #if defined(HP360) || defined(HP370) || defined(HP380)
    196 	/*
    197 	 * XXX we may not always go thru the flush code in dmastop()
    198 	 */
    199 	if (dc->sc_flags & DMAF_PCFLUSH) {
    200 		PCIA();
    201 		dc->sc_flags &= ~DMAF_PCFLUSH;
    202 	}
    203 #endif
    204 #if defined(HP320) || defined(HP350)
    205 	if (dc->sc_flags & DMAF_VCFLUSH) {
    206 		/*
    207 		 * 320/350s have VACs that may also need flushing.
    208 		 * In our case we only flush the supervisor side
    209 		 * because we know that if we are DMAing to user
    210 		 * space, the physical pages will also be mapped
    211 		 * in kernel space (via vmapbuf) and hence cache-
    212 		 * inhibited by the pmap module due to the multiple
    213 		 * mapping.
    214 		 */
    215 		DCIS();
    216 		dc->sc_flags &= ~DMAF_VCFLUSH;
    217 	}
    218 #endif
    219 	remque(dq);
    220 	chan = 1 << unit;
    221 	for (dn = dmachan[NDMA].dq_forw;
    222 	     dn != &dmachan[NDMA]; dn = dn->dq_forw) {
    223 		if (dn->dq_ctlr & chan) {
    224 			remque((caddr_t)dn);
    225 			insque((caddr_t)dn, (caddr_t)dq->dq_back);
    226 			splx(s);
    227 			dn->dq_ctlr = dq->dq_ctlr;
    228 			(dn->dq_driver->d_start)(dn->dq_unit);
    229 			return;
    230 		}
    231 	}
    232 	splx(s);
    233 }
    234 
    235 void
    236 dmago(unit, addr, count, flags)
    237 	int unit;
    238 	register char *addr;
    239 	register int count;
    240 	register int flags;
    241 {
    242 	register struct dma_softc *dc = &dma_softc[unit];
    243 	register struct dma_chain *dcp;
    244 	register char *dmaend = NULL;
    245 	register int tcount;
    246 
    247 	if (count > MAXPHYS)
    248 		panic("dmago: count > MAXPHYS");
    249 #if defined(HP320)
    250 	if (dc->sc_type == DMA_B && (flags & DMAGO_LWORD))
    251 		panic("dmago: no can do 32-bit DMA");
    252 #endif
    253 #ifdef DEBUG
    254 	if (dmadebug & DDB_FOLLOW)
    255 		printf("dmago(%d, %x, %x, %x)\n",
    256 		       unit, addr, count, flags);
    257 	if (flags & DMAGO_LWORD)
    258 		dmalword[unit]++;
    259 	else if (flags & DMAGO_WORD)
    260 		dmaword[unit]++;
    261 	else
    262 		dmabyte[unit]++;
    263 #endif
    264 	/*
    265 	 * Build the DMA chain
    266 	 */
    267 	for (dcp = dc->sc_chain; count > 0; dcp++) {
    268 		dcp->dc_addr = (char *) kvtop(addr);
    269 #if defined(HP380)
    270 		/*
    271 		 * Push back dirty cache lines
    272 		 */
    273 		if (mmutype == MMU_68040)
    274 			DCFP(dcp->dc_addr);
    275 #endif
    276 		if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
    277 			tcount = count;
    278 		dcp->dc_count = tcount;
    279 		addr += tcount;
    280 		count -= tcount;
    281 		if (flags & DMAGO_LWORD)
    282 			tcount >>= 2;
    283 		else if (flags & DMAGO_WORD)
    284 			tcount >>= 1;
    285 		if (dcp->dc_addr == dmaend
    286 #if defined(HP320)
    287 		    /* only 16-bit count on 98620B */
    288 		    && (dc->sc_type != DMA_B ||
    289 			(dcp-1)->dc_count + tcount <= 65536)
    290 #endif
    291 		) {
    292 #ifdef DEBUG
    293 			dmahits[unit]++;
    294 #endif
    295 			dmaend += dcp->dc_count;
    296 			(--dcp)->dc_count += tcount;
    297 		} else {
    298 #ifdef DEBUG
    299 			dmamisses[unit]++;
    300 #endif
    301 			dmaend = dcp->dc_addr + dcp->dc_count;
    302 			dcp->dc_count = tcount;
    303 		}
    304 	}
    305 	dc->sc_cur = dc->sc_chain;
    306 	dc->sc_last = --dcp;
    307 	dc->sc_flags = 0;
    308 	/*
    309 	 * Set up the command word based on flags
    310 	 */
    311 	dc->sc_cmd = DMA_ENAB | DMA_IPL(DMAINTLVL) | DMA_START;
    312 	if ((flags & DMAGO_READ) == 0)
    313 		dc->sc_cmd |= DMA_WRT;
    314 	if (flags & DMAGO_LWORD)
    315 		dc->sc_cmd |= DMA_LWORD;
    316 	else if (flags & DMAGO_WORD)
    317 		dc->sc_cmd |= DMA_WORD;
    318 	if (flags & DMAGO_PRI)
    319 		dc->sc_cmd |= DMA_PRI;
    320 #if defined(HP380)
    321 	/*
    322 	 * On the 68040 we need to flush (push) the data cache before a
    323 	 * DMA (already done above) and flush again after DMA completes.
    324 	 * In theory we should only need to flush prior to a write DMA
    325 	 * and purge after a read DMA but if the entire page is not
    326 	 * involved in the DMA we might purge some valid data.
    327 	 */
    328 	if (mmutype == MMU_68040 && (flags & DMAGO_READ))
    329 		dc->sc_flags |= DMAF_PCFLUSH;
    330 #endif
    331 #if defined(HP360) || defined(HP370)
    332 	/*
    333 	 * Remember if we need to flush external physical cache when
    334 	 * DMA is done.  We only do this if we are reading (writing memory).
    335 	 */
    336 	if (ectype == EC_PHYS && (flags & DMAGO_READ))
    337 		dc->sc_flags |= DMAF_PCFLUSH;
    338 #endif
    339 #if defined(HP320) || defined(HP350)
    340 	if (ectype == EC_VIRT && (flags & DMAGO_READ))
    341 		dc->sc_flags |= DMAF_VCFLUSH;
    342 #endif
    343 	/*
    344 	 * Remember if we can skip the dma completion interrupt on
    345 	 * the last segment in the chain.
    346 	 */
    347 	if (flags & DMAGO_NOINT) {
    348 		if (dc->sc_cur == dc->sc_last)
    349 			dc->sc_cmd &= ~DMA_ENAB;
    350 		else
    351 			dc->sc_flags |= DMAF_NOINTR;
    352 	}
    353 #ifdef DEBUG
    354 	if (dmadebug & DDB_IO)
    355 		if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) ||
    356 		    (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD)) {
    357 			printf("dmago: cmd %x, flags %x\n",
    358 			       dc->sc_cmd, dc->sc_flags);
    359 			for (dcp = dc->sc_chain; dcp <= dc->sc_last; dcp++)
    360 				printf("  %d: %d@%x\n", dcp-dc->sc_chain,
    361 				       dcp->dc_count, dcp->dc_addr);
    362 		}
    363 	dmatimo[unit] = 1;
    364 #endif
    365 	DMA_ARM(dc);
    366 }
    367 
    368 void
    369 dmastop(unit)
    370 	register int unit;
    371 {
    372 	register struct dma_softc *dc = &dma_softc[unit];
    373 	register struct devqueue *dq;
    374 
    375 #ifdef DEBUG
    376 	if (dmadebug & DDB_FOLLOW)
    377 		printf("dmastop(%d)\n", unit);
    378 	dmatimo[unit] = 0;
    379 #endif
    380 	DMA_CLEAR(dc);
    381 #if defined(HP360) || defined(HP370) || defined(HP380)
    382 	if (dc->sc_flags & DMAF_PCFLUSH) {
    383 		PCIA();
    384 		dc->sc_flags &= ~DMAF_PCFLUSH;
    385 	}
    386 #endif
    387 #if defined(HP320) || defined(HP350)
    388 	if (dc->sc_flags & DMAF_VCFLUSH) {
    389 		/*
    390 		 * 320/350s have VACs that may also need flushing.
    391 		 * In our case we only flush the supervisor side
    392 		 * because we know that if we are DMAing to user
    393 		 * space, the physical pages will also be mapped
    394 		 * in kernel space (via vmapbuf) and hence cache-
    395 		 * inhibited by the pmap module due to the multiple
    396 		 * mapping.
    397 		 */
    398 		DCIS();
    399 		dc->sc_flags &= ~DMAF_VCFLUSH;
    400 	}
    401 #endif
    402 	/*
    403 	 * We may get this interrupt after a device service routine
    404 	 * has freed the dma channel.  So, ignore the intr if there's
    405 	 * nothing on the queue.
    406 	 */
    407 	dq = dmachan[unit].dq_forw;
    408 	if (dq != &dmachan[unit])
    409 		(dq->dq_driver->d_done)(dq->dq_unit);
    410 }
    411 
    412 int
    413 dmaintr()
    414 {
    415 	register struct dma_softc *dc;
    416 	register int i, stat;
    417 	int found = 0;
    418 
    419 #ifdef DEBUG
    420 	if (dmadebug & DDB_FOLLOW)
    421 		printf("dmaintr\n");
    422 #endif
    423 	for (i = 0, dc = dma_softc; i < NDMA; i++, dc++) {
    424 		stat = DMA_STAT(dc);
    425 		if ((stat & DMA_INTR) == 0)
    426 			continue;
    427 		found++;
    428 #ifdef DEBUG
    429 		if (dmadebug & DDB_IO) {
    430 			if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) ||
    431 			    (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD))
    432 				printf("dmaintr: unit %d stat %x next %d\n",
    433 				       i, stat, (dc->sc_cur-dc->sc_chain)+1);
    434 		}
    435 		if (stat & DMA_ARMED)
    436 			printf("dma%d: intr when armed\n", i);
    437 #endif
    438 		if (++dc->sc_cur <= dc->sc_last) {
    439 #ifdef DEBUG
    440 			dmatimo[i] = 1;
    441 #endif
    442 			/*
    443 			 * Last chain segment, disable DMA interrupt.
    444 			 */
    445 			if (dc->sc_cur == dc->sc_last &&
    446 			    (dc->sc_flags & DMAF_NOINTR))
    447 				dc->sc_cmd &= ~DMA_ENAB;
    448 			DMA_CLEAR(dc);
    449 			DMA_ARM(dc);
    450 		} else
    451 			dmastop(i);
    452 	}
    453 	return(found);
    454 }
    455 
    456 #ifdef DEBUG
    457 void
    458 dmatimeout(arg)
    459 	void *arg;
    460 {
    461 	register int i, s;
    462 
    463 	for (i = 0; i < NDMA; i++) {
    464 		s = splbio();
    465 		if (dmatimo[i]) {
    466 			if (dmatimo[i] > 1)
    467 				printf("dma%d: timeout #%d\n",
    468 				       i, dmatimo[i]-1);
    469 			dmatimo[i]++;
    470 		}
    471 		splx(s);
    472 	}
    473 	timeout(dmatimeout, (void *)0, 30 * hz);
    474 }
    475 #endif
    476