Home | History | Annotate | Line # | Download | only in qbus
if_dmc.c revision 1.2
      1 /*	$NetBSD: if_dmc.c,v 1.2 2001/11/13 07:11:24 lukem Exp $	*/
      2 /*
      3  * Copyright (c) 1982, 1986 Regents of the University of California.
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  * 3. All advertising materials mentioning features or use of this software
     15  *    must display the following acknowledgement:
     16  *	This product includes software developed by the University of
     17  *	California, Berkeley and its contributors.
     18  * 4. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)if_dmc.c	7.10 (Berkeley) 12/16/90
     35  */
     36 
     37 /*
     38  * DMC11 device driver, internet version
     39  *
     40  *	Bill Nesheim
     41  *	Cornell University
     42  *
     43  *	Lou Salkind
     44  *	New York University
     45  */
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: if_dmc.c,v 1.2 2001/11/13 07:11:24 lukem Exp $");
     49 
     50 #undef DMCDEBUG	/* for base table dump on fatal error */
     51 
     52 #include "opt_inet.h"
     53 
     54 #include <sys/param.h>
     55 #include <sys/systm.h>
     56 #include <sys/mbuf.h>
     57 #include <sys/ioctl.h>
     58 #include <sys/socket.h>
     59 #include <sys/syslog.h>
     60 #include <sys/device.h>
     61 
     62 #include <net/if.h>
     63 #include <net/netisr.h>
     64 
     65 #ifdef	INET
     66 #include <netinet/in.h>
     67 #include <netinet/in_var.h>
     68 #endif
     69 
     70 #include <machine/bus.h>
     71 
     72 #include <dev/qbus/ubareg.h>
     73 #include <dev/qbus/ubavar.h>
     74 #include <dev/qbus/if_uba.h>
     75 
     76 #include <dev/qbus/if_dmcreg.h>
     77 
     78 
     79 /*
     80  * output timeout value, sec.; should depend on line speed.
     81  */
     82 static int dmc_timeout = 20;
     83 
     84 #define NRCV 7
     85 #define NXMT 3
     86 #define NCMDS	(NRCV+NXMT+4)	/* size of command queue */
     87 
     88 #define DMC_WBYTE(csr, val) \
     89 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, csr, val)
     90 #define DMC_WWORD(csr, val) \
     91 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
     92 #define DMC_RBYTE(csr) \
     93 	bus_space_read_1(sc->sc_iot, sc->sc_ioh, csr)
     94 #define DMC_RWORD(csr) \
     95 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
     96 
     97 
     98 #ifdef DMCDEBUG
     99 #define printd if(dmcdebug)printf
    100 int dmcdebug = 0;
    101 #endif
    102 
    103 /* error reporting intervals */
    104 #define DMC_RPNBFS	50
    105 #define DMC_RPDSC	1
    106 #define DMC_RPTMO	10
    107 #define DMC_RPDCK	10
    108 
    109 struct  dmc_command {
    110 	char	qp_cmd;		/* command */
    111 	short	qp_ubaddr;	/* buffer address */
    112 	short	qp_cc;		/* character count || XMEM */
    113 	struct	dmc_command *qp_next;	/* next command on queue */
    114 };
    115 
    116 struct dmcbufs {
    117 	int	ubinfo;		/* from uballoc */
    118 	short	cc;		/* buffer size */
    119 	short	flags;		/* access control */
    120 };
    121 #define	DBUF_OURS	0	/* buffer is available */
    122 #define	DBUF_DMCS	1	/* buffer claimed by somebody */
    123 #define	DBUF_XMIT	4	/* transmit buffer */
    124 #define	DBUF_RCV	8	/* receive buffer */
    125 
    126 
    127 /*
    128  * DMC software status per interface.
    129  *
    130  * Each interface is referenced by a network interface structure,
    131  * sc_if, which the routing code uses to locate the interface.
    132  * This structure contains the output queue for the interface, its address, ...
    133  * We also have, for each interface, a  set of 7 UBA interface structures
    134  * for each, which
    135  * contain information about the UNIBUS resources held by the interface:
    136  * map registers, buffered data paths, etc.  Information is cached in this
    137  * structure for use by the if_uba.c routines in running the interface
    138  * efficiently.
    139  */
    140 struct dmc_softc {
    141 	struct	device sc_dev;		/* Configuration common part */
    142 	struct	ifnet sc_if;		/* network-visible interface */
    143 	short	sc_oused;		/* output buffers currently in use */
    144 	short	sc_iused;		/* input buffers given to DMC */
    145 	short	sc_flag;		/* flags */
    146 	struct	ubinfo sc_ui;		/* UBA mapping info for base table */
    147 	int	sc_errors[4];		/* non-fatal error counters */
    148 	bus_space_tag_t sc_iot;
    149 	bus_addr_t sc_ioh;
    150 	bus_dma_tag_t sc_dmat;
    151 	struct	evcnt sc_rintrcnt;	/* Interrupt counting */
    152 	struct	evcnt sc_tintrcnt;	/* Interrupt counting */
    153 #define sc_datck sc_errors[0]
    154 #define sc_timeo sc_errors[1]
    155 #define sc_nobuf sc_errors[2]
    156 #define sc_disc  sc_errors[3]
    157 	struct	dmcbufs sc_rbufs[NRCV];	/* receive buffer info */
    158 	struct	dmcbufs sc_xbufs[NXMT];	/* transmit buffer info */
    159 	struct	ifubinfo sc_ifuba;	/* UNIBUS resources */
    160 	struct	ifrw sc_ifr[NRCV];	/* UNIBUS receive buffer maps */
    161 	struct	ifxmt sc_ifw[NXMT];	/* UNIBUS receive buffer maps */
    162 	/* command queue stuff */
    163 	struct	dmc_command sc_cmdbuf[NCMDS];
    164 	struct	dmc_command *sc_qhead;	/* head of command queue */
    165 	struct	dmc_command *sc_qtail;	/* tail of command queue */
    166 	struct	dmc_command *sc_qactive;	/* command in progress */
    167 	struct	dmc_command *sc_qfreeh;	/* head of list of free cmd buffers */
    168 	struct	dmc_command *sc_qfreet;	/* tail of list of free cmd buffers */
    169 	/* end command queue stuff */
    170 	struct dmc_base {
    171 		short	d_base[128];		/* DMC base table */
    172 	} dmc_base;
    173 };
    174 
    175 static  int dmcmatch(struct device *, struct cfdata *, void *);
    176 static  void dmcattach(struct device *, struct device *, void *);
    177 static  int dmcinit(struct ifnet *);
    178 static  void dmcrint(void *);
    179 static  void dmcxint(void *);
    180 static  void dmcdown(struct dmc_softc *sc);
    181 static  void dmcrestart(struct dmc_softc *);
    182 static  void dmcload(struct dmc_softc *, int, u_short, u_short);
    183 static  void dmcstart(struct ifnet *);
    184 static  void dmctimeout(struct ifnet *);
    185 static  int dmcioctl(struct ifnet *, u_long, caddr_t);
    186 static  int dmcoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
    187 	struct rtentry *);
    188 static  void dmcreset(struct device *);
    189 
    190 struct	cfattach dmc_ca = {
    191 	sizeof(struct dmc_softc), dmcmatch, dmcattach
    192 };
    193 
    194 /* flags */
    195 #define DMC_RUNNING	0x01		/* device initialized */
    196 #define DMC_BMAPPED	0x02		/* base table mapped */
    197 #define DMC_RESTART	0x04		/* software restart in progress */
    198 #define DMC_ONLINE	0x08		/* device running (had a RDYO) */
    199 
    200 
    201 /* queue manipulation macros */
    202 #define	QUEUE_AT_HEAD(qp, head, tail) \
    203 	(qp)->qp_next = (head); \
    204 	(head) = (qp); \
    205 	if ((tail) == (struct dmc_command *) 0) \
    206 		(tail) = (head)
    207 
    208 #define QUEUE_AT_TAIL(qp, head, tail) \
    209 	if ((tail)) \
    210 		(tail)->qp_next = (qp); \
    211 	else \
    212 		(head) = (qp); \
    213 	(qp)->qp_next = (struct dmc_command *) 0; \
    214 	(tail) = (qp)
    215 
    216 #define DEQUEUE(head, tail) \
    217 	(head) = (head)->qp_next;\
    218 	if ((head) == (struct dmc_command *) 0)\
    219 		(tail) = (head)
    220 
    221 int
    222 dmcmatch(struct device *parent, struct cfdata *cf, void *aux)
    223 {
    224 	struct uba_attach_args *ua = aux;
    225 	struct dmc_softc ssc;
    226 	struct dmc_softc *sc = &ssc;
    227 	int i;
    228 
    229 	sc->sc_iot = ua->ua_iot;
    230 	sc->sc_ioh = ua->ua_ioh;
    231 
    232 	DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
    233 	for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
    234 		;
    235 	if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
    236 		printf("dmcprobe: can't start device\n" );
    237 		return (0);
    238 	}
    239 	DMC_WBYTE(DMC_BSEL0, DMC_RQI|DMC_IEI);
    240 	/* let's be paranoid */
    241 	DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) | DMC_RQI|DMC_IEI);
    242 	DELAY(1000000);
    243 	DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
    244 	for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
    245 		;
    246 	return (1);
    247 }
    248 
    249 /*
    250  * Interface exists: make available by filling in network interface
    251  * record.  System will initialize the interface when it is ready
    252  * to accept packets.
    253  */
    254 void
    255 dmcattach(struct device *parent, struct device *self, void *aux)
    256 {
    257 	struct uba_attach_args *ua = aux;
    258 	struct dmc_softc *sc = (struct dmc_softc *)self;
    259 
    260 	sc->sc_iot = ua->ua_iot;
    261 	sc->sc_ioh = ua->ua_ioh;
    262 	sc->sc_dmat = ua->ua_dmat;
    263 
    264 	strcpy(sc->sc_if.if_xname, sc->sc_dev.dv_xname);
    265 	sc->sc_if.if_mtu = DMCMTU;
    266 	sc->sc_if.if_init = dmcinit;
    267 	sc->sc_if.if_output = dmcoutput;
    268 	sc->sc_if.if_ioctl = dmcioctl;
    269 	sc->sc_if.if_watchdog = dmctimeout;
    270 	sc->sc_if.if_flags = IFF_POINTOPOINT;
    271 	sc->sc_if.if_softc = sc;
    272 
    273 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, dmcrint, sc,
    274 	    &sc->sc_rintrcnt);
    275 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec+4, dmcxint, sc,
    276 	    &sc->sc_tintrcnt);
    277 	uba_reset_establish(dmcreset, &sc->sc_dev);
    278 	evcnt_attach_dynamic(&sc->sc_rintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
    279 	    sc->sc_dev.dv_xname, "intr");
    280 	evcnt_attach_dynamic(&sc->sc_tintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
    281 	    sc->sc_dev.dv_xname, "intr");
    282 
    283 	if_attach(&sc->sc_if);
    284 }
    285 
    286 /*
    287  * Reset of interface after UNIBUS reset.
    288  * If interface is on specified UBA, reset its state.
    289  */
    290 void
    291 dmcreset(struct device *dev)
    292 {
    293 	struct dmc_softc *sc = (struct dmc_softc *)dev;
    294 
    295 	sc->sc_flag = 0;
    296 	sc->sc_if.if_flags &= ~IFF_RUNNING;
    297 	dmcinit(&sc->sc_if);
    298 }
    299 
    300 /*
    301  * Initialization of interface; reinitialize UNIBUS usage.
    302  */
    303 int
    304 dmcinit(struct ifnet *ifp)
    305 {
    306 	struct dmc_softc *sc = ifp->if_softc;
    307 	struct ifrw *ifrw;
    308 	struct ifxmt *ifxp;
    309 	struct dmcbufs *rp;
    310 	struct dmc_command *qp;
    311 	struct ifaddr *ifa;
    312 	struct cfdata *ui = sc->sc_dev.dv_cfdata;
    313 	int base;
    314 	int s;
    315 
    316 	/*
    317 	 * Check to see that an address has been set
    318 	 * (both local and destination for an address family).
    319 	 */
    320 	TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)
    321 		if (ifa->ifa_addr->sa_family && ifa->ifa_dstaddr->sa_family)
    322 			break;
    323 	if (ifa == (struct ifaddr *) 0)
    324 		return 0;
    325 
    326 	if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
    327 		printf("dmcinit: DMC not running\n");
    328 		ifp->if_flags &= ~IFF_UP;
    329 		return 0;
    330 	}
    331 	/* map base table */
    332 	if ((sc->sc_flag & DMC_BMAPPED) == 0) {
    333 		sc->sc_ui.ui_size = sizeof(struct dmc_base);
    334 		sc->sc_ui.ui_vaddr = (caddr_t)&sc->dmc_base;
    335 		uballoc((void *)sc->sc_dev.dv_parent, &sc->sc_ui, 0);
    336 		sc->sc_flag |= DMC_BMAPPED;
    337 	}
    338 	/* initialize UNIBUS resources */
    339 	sc->sc_iused = sc->sc_oused = 0;
    340 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
    341 		if (if_ubaminit(&sc->sc_ifuba, (void *)sc->sc_dev.dv_parent,
    342 		    sizeof(struct dmc_header) + DMCMTU,
    343 		    sc->sc_ifr, NRCV, sc->sc_ifw, NXMT) == 0) {
    344 			printf("%s: can't allocate uba resources\n",
    345 			    sc->sc_dev.dv_xname);
    346 			ifp->if_flags &= ~IFF_UP;
    347 			return 0;
    348 		}
    349 		ifp->if_flags |= IFF_RUNNING;
    350 	}
    351 	sc->sc_flag &= ~DMC_ONLINE;
    352 	sc->sc_flag |= DMC_RUNNING;
    353 	/*
    354 	 * Limit packets enqueued until we see if we're on the air.
    355 	 */
    356 	ifp->if_snd.ifq_maxlen = 3;
    357 
    358 	/* initialize buffer pool */
    359 	/* receives */
    360 	ifrw = &sc->sc_ifr[0];
    361 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
    362 		rp->ubinfo = ifrw->ifrw_info;
    363 		rp->cc = DMCMTU + sizeof (struct dmc_header);
    364 		rp->flags = DBUF_OURS|DBUF_RCV;
    365 		ifrw++;
    366 	}
    367 	/* transmits */
    368 	ifxp = &sc->sc_ifw[0];
    369 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
    370 		rp->ubinfo = ifxp->ifw_info;
    371 		rp->cc = 0;
    372 		rp->flags = DBUF_OURS|DBUF_XMIT;
    373 		ifxp++;
    374 	}
    375 
    376 	/* set up command queues */
    377 	sc->sc_qfreeh = sc->sc_qfreet
    378 		 = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive =
    379 		(struct dmc_command *)0;
    380 	/* set up free command buffer list */
    381 	for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) {
    382 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
    383 	}
    384 
    385 	/* base in */
    386 	base = sc->sc_ui.ui_baddr;
    387 	dmcload(sc, DMC_BASEI, (u_short)base, (base>>2) & DMC_XMEM);
    388 	/* specify half duplex operation, flags tell if primary */
    389 	/* or secondary station */
    390 	if (ui->cf_flags == 0)
    391 		/* use DDCMP mode in full duplex */
    392 		dmcload(sc, DMC_CNTLI, 0, 0);
    393 	else if (ui->cf_flags == 1)
    394 		/* use MAINTENENCE mode */
    395 		dmcload(sc, DMC_CNTLI, 0, DMC_MAINT );
    396 	else if (ui->cf_flags == 2)
    397 		/* use DDCMP half duplex as primary station */
    398 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX);
    399 	else if (ui->cf_flags == 3)
    400 		/* use DDCMP half duplex as secondary station */
    401 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC);
    402 
    403 	/* enable operation done interrupts */
    404 	while ((DMC_RBYTE(DMC_BSEL2) & DMC_IEO) == 0)
    405 		DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) | DMC_IEO);
    406 	s = splnet();
    407 	/* queue first NRCV buffers for DMC to fill */
    408 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
    409 		rp->flags |= DBUF_DMCS;
    410 		dmcload(sc, DMC_READ, rp->ubinfo,
    411 			(((rp->ubinfo>>2)&DMC_XMEM) | rp->cc));
    412 		sc->sc_iused++;
    413 	}
    414 	splx(s);
    415 	return 0;
    416 }
    417 
    418 /*
    419  * Start output on interface.  Get another datagram
    420  * to send from the interface queue and map it to
    421  * the interface before starting output.
    422  *
    423  * Must be called at spl 5
    424  */
    425 void
    426 dmcstart(struct ifnet *ifp)
    427 {
    428 	struct dmc_softc *sc = ifp->if_softc;
    429 	struct mbuf *m;
    430 	struct dmcbufs *rp;
    431 	int n;
    432 
    433 	/*
    434 	 * Dequeue up to NXMT requests and map them to the UNIBUS.
    435 	 * If no more requests, or no dmc buffers available, just return.
    436 	 */
    437 	n = 0;
    438 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) {
    439 		/* find an available buffer */
    440 		if ((rp->flags & DBUF_DMCS) == 0) {
    441 			IF_DEQUEUE(&sc->sc_if.if_snd, m);
    442 			if (m == 0)
    443 				return;
    444 			/* mark it dmcs */
    445 			rp->flags |= (DBUF_DMCS);
    446 			/*
    447 			 * Have request mapped to UNIBUS for transmission
    448 			 * and start the output.
    449 			 */
    450 			rp->cc = if_ubaput(&sc->sc_ifuba, &sc->sc_ifw[n], m);
    451 			rp->cc &= DMC_CCOUNT;
    452 			if (++sc->sc_oused == 1)
    453 				sc->sc_if.if_timer = dmc_timeout;
    454 			dmcload(sc, DMC_WRITE, rp->ubinfo,
    455 				rp->cc | ((rp->ubinfo>>2)&DMC_XMEM));
    456 		}
    457 		n++;
    458 	}
    459 }
    460 
    461 /*
    462  * Utility routine to load the DMC device registers.
    463  */
    464 void
    465 dmcload(struct dmc_softc *sc, int type, u_short w0, u_short w1)
    466 {
    467 	struct dmc_command *qp;
    468 	int sps;
    469 
    470 	sps = splnet();
    471 
    472 	/* grab a command buffer from the free list */
    473 	if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0)
    474 		panic("dmc command queue overflow");
    475 	DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet);
    476 
    477 	/* fill in requested info */
    478 	qp->qp_cmd = (type | DMC_RQI);
    479 	qp->qp_ubaddr = w0;
    480 	qp->qp_cc = w1;
    481 
    482 	if (sc->sc_qactive) {	/* command in progress */
    483 		if (type == DMC_READ) {
    484 			QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail);
    485 		} else {
    486 			QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail);
    487 		}
    488 	} else {	/* command port free */
    489 		sc->sc_qactive = qp;
    490 		DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
    491 		dmcrint(sc);
    492 	}
    493 	splx(sps);
    494 }
    495 
    496 /*
    497  * DMC interface receiver interrupt.
    498  * Ready to accept another command,
    499  * pull one off the command queue.
    500  */
    501 void
    502 dmcrint(void *arg)
    503 {
    504 	struct dmc_softc *sc = arg;
    505 	struct dmc_command *qp;
    506 	int n;
    507 
    508 	if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) {
    509 		printf("%s: dmcrint no command\n", sc->sc_dev.dv_xname);
    510 		return;
    511 	}
    512 	while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
    513 		DMC_WWORD(DMC_SEL4, qp->qp_ubaddr);
    514 		DMC_WWORD(DMC_SEL6, qp->qp_cc);
    515 		DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & ~(DMC_IEI|DMC_RQI));
    516 		/* free command buffer */
    517 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
    518 		while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
    519 			/*
    520 			 * Can't check for RDYO here 'cause
    521 			 * this routine isn't reentrant!
    522 			 */
    523 			DELAY(5);
    524 		}
    525 		/* move on to next command */
    526 		if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0)
    527 			break;		/* all done */
    528 		/* more commands to do, start the next one */
    529 		qp = sc->sc_qactive;
    530 		DEQUEUE(sc->sc_qhead, sc->sc_qtail);
    531 		DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
    532 		n = RDYSCAN;
    533 		while (n-- > 0)
    534 			if ((DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) ||
    535 			    (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO))
    536 				break;
    537 	}
    538 	if (sc->sc_qactive) {
    539 		DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
    540 		/* VMS does it twice !*$%@# */
    541 		DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
    542 	}
    543 
    544 }
    545 
    546 /*
    547  * DMC interface transmitter interrupt.
    548  * A transfer may have completed, check for errors.
    549  * If it was a read, notify appropriate protocol.
    550  * If it was a write, pull the next one off the queue.
    551  */
    552 void
    553 dmcxint(void *a)
    554 {
    555         struct dmc_softc *sc = a;
    556 
    557 	struct ifnet *ifp;
    558 	struct mbuf *m;
    559 	struct ifqueue *inq;
    560 	int arg, pkaddr, cmd, len, s;
    561 	struct ifrw *ifrw;
    562 	struct dmcbufs *rp;
    563 	struct ifxmt *ifxp;
    564 	struct dmc_header *dh;
    565 	char buf[64];
    566 
    567 	ifp = &sc->sc_if;
    568 
    569 	while (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO) {
    570 
    571 		cmd = DMC_RBYTE(DMC_BSEL2) & 0xff;
    572 		arg = DMC_RWORD(DMC_SEL6) & 0xffff;
    573 		/* reconstruct UNIBUS address of buffer returned to us */
    574 		pkaddr = ((arg&DMC_XMEM)<<2) | (DMC_RWORD(DMC_SEL4) & 0xffff);
    575 		/* release port */
    576 		DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) & ~DMC_RDYO);
    577 		switch (cmd & 07) {
    578 
    579 		case DMC_OUR:
    580 			/*
    581 			 * A read has completed.
    582 			 * Pass packet to type specific
    583 			 * higher-level input routine.
    584 			 */
    585 			ifp->if_ipackets++;
    586 			/* find location in dmcuba struct */
    587 			ifrw= &sc->sc_ifr[0];
    588 			for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
    589 				if(rp->ubinfo == pkaddr)
    590 					break;
    591 				ifrw++;
    592 			}
    593 			if (rp >= &sc->sc_rbufs[NRCV])
    594 				panic("dmc rcv");
    595 			if ((rp->flags & DBUF_DMCS) == 0)
    596 				printf("%s: done unalloc rbuf\n",
    597 				    sc->sc_dev.dv_xname);
    598 
    599 			len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header);
    600 			if (len < 0 || len > DMCMTU) {
    601 				ifp->if_ierrors++;
    602 #ifdef DMCDEBUG
    603 				printd("%s: bad rcv pkt addr 0x%x len 0x%x\n",
    604 				    sc->sc_dev.dv_xname, pkaddr, len);
    605 #endif
    606 				goto setup;
    607 			}
    608 			/*
    609 			 * Deal with trailer protocol: if type is trailer
    610 			 * get true type from first 16-bit word past data.
    611 			 * Remember that type was trailer by setting off.
    612 			 */
    613 			dh = (struct dmc_header *)ifrw->ifrw_addr;
    614 			dh->dmc_type = ntohs((u_short)dh->dmc_type);
    615 			if (len == 0)
    616 				goto setup;
    617 
    618 			/*
    619 			 * Pull packet off interface.  Off is nonzero if
    620 			 * packet has trailing header; dmc_get will then
    621 			 * force this header information to be at the front,
    622 			 * but we still have to drop the type and length
    623 			 * which are at the front of any trailer data.
    624 			 */
    625 			m = if_ubaget(&sc->sc_ifuba, ifrw, ifp, len);
    626 			if (m == 0)
    627 				goto setup;
    628 			/* Shave off dmc_header */
    629 			m_adj(m, sizeof(struct dmc_header));
    630 			switch (dh->dmc_type) {
    631 
    632 #ifdef INET
    633 			case DMC_IPTYPE:
    634 				schednetisr(NETISR_IP);
    635 				inq = &ipintrq;
    636 				break;
    637 #endif
    638 			default:
    639 				m_freem(m);
    640 				goto setup;
    641 			}
    642 
    643 			s = splnet();
    644 			if (IF_QFULL(inq)) {
    645 				IF_DROP(inq);
    646 				m_freem(m);
    647 			} else
    648 				IF_ENQUEUE(inq, m);
    649 			splx(s);
    650 
    651 	setup:
    652 			/* is this needed? */
    653 			rp->ubinfo = ifrw->ifrw_info;
    654 
    655 			dmcload(sc, DMC_READ, rp->ubinfo,
    656 			    ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc);
    657 			break;
    658 
    659 		case DMC_OUX:
    660 			/*
    661 			 * A write has completed, start another
    662 			 * transfer if there is more data to send.
    663 			 */
    664 			ifp->if_opackets++;
    665 			/* find associated dmcbuf structure */
    666 			ifxp = &sc->sc_ifw[0];
    667 			for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
    668 				if(rp->ubinfo == pkaddr)
    669 					break;
    670 				ifxp++;
    671 			}
    672 			if (rp >= &sc->sc_xbufs[NXMT]) {
    673 				printf("%s: bad packet address 0x%x\n",
    674 				    sc->sc_dev.dv_xname, pkaddr);
    675 				break;
    676 			}
    677 			if ((rp->flags & DBUF_DMCS) == 0)
    678 				printf("%s: unallocated packet 0x%x\n",
    679 				    sc->sc_dev.dv_xname, pkaddr);
    680 			/* mark buffer free */
    681 			if_ubaend(&sc->sc_ifuba, ifxp);
    682 			rp->flags &= ~DBUF_DMCS;
    683 			if (--sc->sc_oused == 0)
    684 				sc->sc_if.if_timer = 0;
    685 			else
    686 				sc->sc_if.if_timer = dmc_timeout;
    687 			if ((sc->sc_flag & DMC_ONLINE) == 0) {
    688 				extern int ifqmaxlen;
    689 
    690 				/*
    691 				 * We're on the air.
    692 				 * Open the queue to the usual value.
    693 				 */
    694 				sc->sc_flag |= DMC_ONLINE;
    695 				ifp->if_snd.ifq_maxlen = ifqmaxlen;
    696 			}
    697 			break;
    698 
    699 		case DMC_CNTLO:
    700 			arg &= DMC_CNTMASK;
    701 			if (arg & DMC_FATAL) {
    702 				if (arg != DMC_START) {
    703 					bitmask_snprintf(arg, CNTLO_BITS,
    704 					    buf, sizeof(buf));
    705 					log(LOG_ERR,
    706 					    "%s: fatal error, flags=%s\n",
    707 					    sc->sc_dev.dv_xname, buf);
    708 				}
    709 				dmcrestart(sc);
    710 				break;
    711 			}
    712 			/* ACCUMULATE STATISTICS */
    713 			switch(arg) {
    714 			case DMC_NOBUFS:
    715 				ifp->if_ierrors++;
    716 				if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0)
    717 					goto report;
    718 				break;
    719 			case DMC_DISCONN:
    720 				if ((sc->sc_disc++ % DMC_RPDSC) == 0)
    721 					goto report;
    722 				break;
    723 			case DMC_TIMEOUT:
    724 				if ((sc->sc_timeo++ % DMC_RPTMO) == 0)
    725 					goto report;
    726 				break;
    727 			case DMC_DATACK:
    728 				ifp->if_oerrors++;
    729 				if ((sc->sc_datck++ % DMC_RPDCK) == 0)
    730 					goto report;
    731 				break;
    732 			default:
    733 				goto report;
    734 			}
    735 			break;
    736 		report:
    737 #ifdef DMCDEBUG
    738 			bitmask_snprintf(arg, CNTLO_BITS, buf, sizeof(buf));
    739 			printd("%s: soft error, flags=%s\n",
    740 			    sc->sc_dev.dv_xname, buf);
    741 #endif
    742 			if ((sc->sc_flag & DMC_RESTART) == 0) {
    743 				/*
    744 				 * kill off the dmc to get things
    745 				 * going again by generating a
    746 				 * procedure error
    747 				 */
    748 				sc->sc_flag |= DMC_RESTART;
    749 				arg = sc->sc_ui.ui_baddr;
    750 				dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM);
    751 			}
    752 			break;
    753 
    754 		default:
    755 			printf("%s: bad control %o\n",
    756 			    sc->sc_dev.dv_xname, cmd);
    757 			break;
    758 		}
    759 	}
    760 	dmcstart(ifp);
    761 }
    762 
    763 /*
    764  * DMC output routine.
    765  * Encapsulate a packet of type family for the dmc.
    766  * Use trailer local net encapsulation if enough data in first
    767  * packet leaves a multiple of 512 bytes of data in remainder.
    768  */
    769 int
    770 dmcoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
    771     struct rtentry *rt)
    772 {
    773 	int type, error, s;
    774 	struct mbuf *m = m0;
    775 	struct dmc_header *dh;
    776 
    777 	if ((ifp->if_flags & IFF_UP) == 0) {
    778 		error = ENETDOWN;
    779 		goto bad;
    780 	}
    781 
    782 	switch (dst->sa_family) {
    783 #ifdef	INET
    784 	case AF_INET:
    785 		type = DMC_IPTYPE;
    786 		break;
    787 #endif
    788 
    789 	case AF_UNSPEC:
    790 		dh = (struct dmc_header *)dst->sa_data;
    791 		type = dh->dmc_type;
    792 		break;
    793 
    794 	default:
    795 		printf("%s: can't handle af%d\n", ifp->if_xname,
    796 			dst->sa_family);
    797 		error = EAFNOSUPPORT;
    798 		goto bad;
    799 	}
    800 
    801 	/*
    802 	 * Add local network header
    803 	 * (there is space for a uba on a vax to step on)
    804 	 */
    805 	M_PREPEND(m, sizeof(struct dmc_header), M_DONTWAIT);
    806 	if (m == 0) {
    807 		error = ENOBUFS;
    808 		goto bad;
    809 	}
    810 	dh = mtod(m, struct dmc_header *);
    811 	dh->dmc_type = htons((u_short)type);
    812 
    813 	/*
    814 	 * Queue message on interface, and start output if interface
    815 	 * not yet active.
    816 	 */
    817 	s = splnet();
    818 	if (IF_QFULL(&ifp->if_snd)) {
    819 		IF_DROP(&ifp->if_snd);
    820 		m_freem(m);
    821 		splx(s);
    822 		return (ENOBUFS);
    823 	}
    824 	IF_ENQUEUE(&ifp->if_snd, m);
    825 	dmcstart(ifp);
    826 	splx(s);
    827 	return (0);
    828 
    829 bad:
    830 	m_freem(m0);
    831 	return (error);
    832 }
    833 
    834 
    835 /*
    836  * Process an ioctl request.
    837  */
    838 /* ARGSUSED */
    839 int
    840 dmcioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
    841 {
    842 	int s = splnet(), error = 0;
    843 	register struct dmc_softc *sc = ifp->if_softc;
    844 
    845 	switch (cmd) {
    846 
    847 	case SIOCSIFADDR:
    848 		ifp->if_flags |= IFF_UP;
    849 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    850 			dmcinit(ifp);
    851 		break;
    852 
    853 	case SIOCSIFDSTADDR:
    854 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    855 			dmcinit(ifp);
    856 		break;
    857 
    858 	case SIOCSIFFLAGS:
    859 		if ((ifp->if_flags & IFF_UP) == 0 &&
    860 		    sc->sc_flag & DMC_RUNNING)
    861 			dmcdown(sc);
    862 		else if (ifp->if_flags & IFF_UP &&
    863 		    (sc->sc_flag & DMC_RUNNING) == 0)
    864 			dmcrestart(sc);
    865 		break;
    866 
    867 	default:
    868 		error = EINVAL;
    869 	}
    870 	splx(s);
    871 	return (error);
    872 }
    873 
    874 /*
    875  * Restart after a fatal error.
    876  * Clear device and reinitialize.
    877  */
    878 void
    879 dmcrestart(struct dmc_softc *sc)
    880 {
    881 	int s, i;
    882 
    883 #ifdef DMCDEBUG
    884 	/* dump base table */
    885 	printf("%s base table:\n", sc->sc_dev.dv_xname);
    886 	for (i = 0; i < sizeof (struct dmc_base); i++)
    887 		printf("%o\n" ,dmc_base[unit].d_base[i]);
    888 #endif
    889 
    890 	dmcdown(sc);
    891 
    892 	/*
    893 	 * Let the DMR finish the MCLR.	 At 1 Mbit, it should do so
    894 	 * in about a max of 6.4 milliseconds with diagnostics enabled.
    895 	 */
    896 	for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
    897 		;
    898 	/* Did the timer expire or did the DMR finish? */
    899 	if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
    900 		log(LOG_ERR, "%s: M820 Test Failed\n", sc->sc_dev.dv_xname);
    901 		return;
    902 	}
    903 
    904 	/* restart DMC */
    905 	dmcinit(&sc->sc_if);
    906 	sc->sc_flag &= ~DMC_RESTART;
    907 	s = splnet();
    908 	dmcstart(&sc->sc_if);
    909 	splx(s);
    910 	sc->sc_if.if_collisions++;	/* why not? */
    911 }
    912 
    913 /*
    914  * Reset a device and mark down.
    915  * Flush output queue and drop queue limit.
    916  */
    917 void
    918 dmcdown(struct dmc_softc *sc)
    919 {
    920 	struct ifxmt *ifxp;
    921 
    922 	DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
    923 	sc->sc_flag &= ~(DMC_RUNNING | DMC_ONLINE);
    924 
    925 	for (ifxp = sc->sc_ifw; ifxp < &sc->sc_ifw[NXMT]; ifxp++) {
    926 #ifdef notyet
    927 		if (ifxp->ifw_xtofree) {
    928 			(void) m_freem(ifxp->ifw_xtofree);
    929 			ifxp->ifw_xtofree = 0;
    930 		}
    931 #endif
    932 	}
    933 	IF_PURGE(&sc->sc_if.if_snd);
    934 }
    935 
    936 /*
    937  * Watchdog timeout to see that transmitted packets don't
    938  * lose interrupts.  The device has to be online (the first
    939  * transmission may block until the other side comes up).
    940  */
    941 void
    942 dmctimeout(struct ifnet *ifp)
    943 {
    944 	struct dmc_softc *sc = ifp->if_softc;
    945 	char buf1[64], buf2[64];
    946 
    947 	if (sc->sc_flag & DMC_ONLINE) {
    948 		bitmask_snprintf(DMC_RBYTE(DMC_BSEL0) & 0xff, DMC0BITS,
    949 		    buf1, sizeof(buf1));
    950 		bitmask_snprintf(DMC_RBYTE(DMC_BSEL2) & 0xff, DMC2BITS,
    951 		    buf2, sizeof(buf2));
    952 		log(LOG_ERR, "%s: output timeout, bsel0=%s bsel2=%s\n",
    953 		    sc->sc_dev.dv_xname, buf1, buf2);
    954 		dmcrestart(sc);
    955 	}
    956 }
    957