Home | History | Annotate | Line # | Download | only in qbus
if_qe.c revision 1.41
      1 /*      $NetBSD: if_qe.c,v 1.41 2000/06/04 06:17:03 matt Exp $ */
      2 /*
      3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  * 3. All advertising materials mentioning features or use of this software
     14  *    must display the following acknowledgement:
     15  *      This product includes software developed at Ludd, University of
     16  *      Lule}, Sweden and its contributors.
     17  * 4. The name of the author may not be used to endorse or promote products
     18  *    derived from this software without specific prior written permission
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Driver for DEQNA/DELQA ethernet cards.
     34  * Things that is still to do:
     35  *	Have a timeout check for hang transmit logic.
     36  *	Handle ubaresets. Does not work at all right now.
     37  *	Fix ALLMULTI reception. But someone must tell me how...
     38  *	Collect statistics.
     39  */
     40 
     41 #include "opt_inet.h"
     42 #include "bpfilter.h"
     43 
     44 #include <sys/param.h>
     45 #include <sys/mbuf.h>
     46 #include <sys/socket.h>
     47 #include <sys/device.h>
     48 #include <sys/systm.h>
     49 #include <sys/sockio.h>
     50 
     51 #include <net/if.h>
     52 #include <net/if_ether.h>
     53 #include <net/if_dl.h>
     54 
     55 #include <netinet/in.h>
     56 #include <netinet/if_inarp.h>
     57 
     58 #if NBPFILTER > 0
     59 #include <net/bpf.h>
     60 #include <net/bpfdesc.h>
     61 #endif
     62 
     63 #include <machine/bus.h>
     64 
     65 #include <dev/qbus/ubavar.h>
     66 #include <dev/qbus/if_qereg.h>
     67 
     68 #include "ioconf.h"
     69 
     70 #define RXDESCS	30	/* # of receive descriptors */
     71 #define TXDESCS	60	/* # transmit descs */
     72 
     73 /*
     74  * Structure containing the elements that must be in DMA-safe memory.
     75  */
     76 struct qe_cdata {
     77 	struct qe_ring	qc_recv[RXDESCS+1];	/* Receive descriptors */
     78 	struct qe_ring	qc_xmit[TXDESCS+1];	/* Transmit descriptors */
     79 	u_int8_t	qc_setup[128];		/* Setup packet layout */
     80 };
     81 
     82 struct	qe_softc {
     83 	struct device	sc_dev;		/* Configuration common part	*/
     84 	struct evcnt	sc_intrcnt;	/* Interrupt counting		*/
     85 	struct ethercom sc_ec;		/* Ethernet common part		*/
     86 #define sc_if	sc_ec.ec_if		/* network-visible interface	*/
     87 	bus_space_tag_t sc_iot;
     88 	bus_addr_t	sc_ioh;
     89 	bus_dma_tag_t	sc_dmat;
     90 	struct qe_cdata *sc_qedata;	/* Descriptor struct		*/
     91 	struct qe_cdata *sc_pqedata;	/* Unibus address of above	*/
     92 	bus_dmamap_t	sc_cmap;	/* Map for control structures	*/
     93 	struct mbuf*	sc_txmbuf[TXDESCS];
     94 	struct mbuf*	sc_rxmbuf[RXDESCS];
     95 	bus_dmamap_t	sc_xmtmap[TXDESCS];
     96 	bus_dmamap_t	sc_rcvmap[RXDESCS];
     97 	int		sc_intvec;	/* Interrupt vector		*/
     98 	int		sc_nexttx;
     99 	int		sc_inq;
    100 	int		sc_lastack;
    101 	int		sc_nextrx;
    102 	int		sc_setup;	/* Setup packet in queue	*/
    103 };
    104 
    105 static	int	qematch __P((struct device *, struct cfdata *, void *));
    106 static	void	qeattach __P((struct device *, struct device *, void *));
    107 static	void	qeinit __P((struct qe_softc *));
    108 static	void	qestart __P((struct ifnet *));
    109 static	void	qeintr __P((void *));
    110 static	int	qeioctl __P((struct ifnet *, u_long, caddr_t));
    111 static	int	qe_add_rxbuf __P((struct qe_softc *, int));
    112 static	void	qe_setup __P((struct qe_softc *));
    113 static	void	qetimeout __P((struct ifnet *));
    114 
    115 struct	cfattach qe_ca = {
    116 	sizeof(struct qe_softc), qematch, qeattach
    117 };
    118 
    119 #define	QE_WCSR(csr, val) \
    120 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
    121 #define	QE_RCSR(csr) \
    122 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
    123 
    124 #define	LOWORD(x)	((int)(x) & 0xffff)
    125 #define	HIWORD(x)	(((int)(x) >> 16) & 0x3f)
    126 
    127 /*
    128  * Check for present DEQNA. Done by sending a fake setup packet
    129  * and wait for interrupt.
    130  */
    131 int
    132 qematch(parent, cf, aux)
    133 	struct	device *parent;
    134 	struct	cfdata *cf;
    135 	void	*aux;
    136 {
    137 	bus_dmamap_t	cmap;
    138 	struct	qe_softc ssc;
    139 	struct	qe_softc *sc = &ssc;
    140 	struct	uba_attach_args *ua = aux;
    141 	struct	uba_softc *ubasc = (struct uba_softc *)parent;
    142 
    143 #define	PROBESIZE	(sizeof(struct qe_ring) * 4 + 128)
    144 	struct	qe_ring ring[15]; /* For diag purposes only */
    145 	struct	qe_ring *rp;
    146 	int error;
    147 
    148 	bzero(sc, sizeof(struct qe_softc));
    149 	bzero(ring, PROBESIZE);
    150 	sc->sc_iot = ua->ua_iot;
    151 	sc->sc_ioh = ua->ua_ioh;
    152 	sc->sc_dmat = ua->ua_dmat;
    153 
    154 	ubasc->uh_lastiv -= 4;
    155 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    156 	QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
    157 
    158 	/*
    159 	 * Map the ring area. Actually this is done only to be able to
    160 	 * send and receive a internal packet; some junk is loopbacked
    161 	 * so that the DEQNA has a reason to interrupt.
    162 	 */
    163 	if ((error = bus_dmamap_create(sc->sc_dmat, PROBESIZE, 1, PROBESIZE, 0,
    164 	    BUS_DMA_NOWAIT, &cmap))) {
    165 		printf("qematch: bus_dmamap_create failed = %d\n", error);
    166 		return 0;
    167 	}
    168 	if ((error = bus_dmamap_load(sc->sc_dmat, cmap, ring, PROBESIZE, 0,
    169 	    BUS_DMA_NOWAIT))) {
    170 		printf("qematch: bus_dmamap_load failed = %d\n", error);
    171 		bus_dmamap_destroy(sc->sc_dmat, cmap);
    172 		return 0;
    173 	}
    174 
    175 	/*
    176 	 * Init a simple "fake" receive and transmit descriptor that
    177 	 * points to some unused area. Send a fake setup packet.
    178 	 */
    179 	rp = (void *)cmap->dm_segs[0].ds_addr;
    180 	ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
    181 	ring[0].qe_addr_lo = LOWORD(&rp[4]);
    182 	ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
    183 	ring[0].qe_buf_len = 128;
    184 
    185 	ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
    186 	ring[2].qe_addr_lo = LOWORD(&rp[4]);
    187 	ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
    188 	ring[2].qe_buf_len = 128;
    189 
    190 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    191 	DELAY(1000);
    192 
    193 	/*
    194 	 * Start the interface and wait for the packet.
    195 	 */
    196 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
    197 	QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
    198 	QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
    199 	QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
    200 	QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
    201 	DELAY(10000);
    202 
    203 	/*
    204 	 * All done with the bus resources.
    205 	 */
    206 	bus_dmamap_unload(sc->sc_dmat, cmap);
    207 	bus_dmamap_destroy(sc->sc_dmat, cmap);
    208 	return 1;
    209 }
    210 
    211 /*
    212  * Interface exists: make available by filling in network interface
    213  * record.  System will initialize the interface when it is ready
    214  * to accept packets.
    215  */
    216 void
    217 qeattach(parent, self, aux)
    218 	struct	device *parent, *self;
    219 	void	*aux;
    220 {
    221 	struct	uba_attach_args *ua = aux;
    222 	struct	uba_softc *ubasc = (struct uba_softc *)parent;
    223 	struct	qe_softc *sc = (struct qe_softc *)self;
    224 	struct	ifnet *ifp = (struct ifnet *)&sc->sc_if;
    225 	struct	qe_ring *rp;
    226 	u_int8_t enaddr[ETHER_ADDR_LEN];
    227 	bus_dma_segment_t seg;
    228 	int i, rseg, error;
    229 
    230 	sc->sc_iot = ua->ua_iot;
    231 	sc->sc_ioh = ua->ua_ioh;
    232 	sc->sc_dmat = ua->ua_dmat;
    233 
    234         /*
    235          * Allocate DMA safe memory for descriptors and setup memory.
    236          */
    237 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
    238 	    sizeof(struct qe_cdata), NBPG, 0, &seg, 1, &rseg,
    239 	    BUS_DMA_NOWAIT)) != 0) {
    240 		printf(": unable to allocate control data, error = %d\n",
    241 		    error);
    242 		goto fail_0;
    243 	}
    244 
    245 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
    246 	    sizeof(struct qe_cdata), (caddr_t *)&sc->sc_qedata,
    247 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
    248 		printf(": unable to map control data, error = %d\n", error);
    249 		goto fail_1;
    250 	}
    251 
    252 	if ((error = bus_dmamap_create(sc->sc_dmat,
    253 	    sizeof(struct qe_cdata), 1,
    254 	    sizeof(struct qe_cdata), 0, BUS_DMA_NOWAIT,
    255 	    &sc->sc_cmap)) != 0) {
    256 		printf(": unable to create control data DMA map, error = %d\n",
    257 		    error);
    258 		goto fail_2;
    259 	}
    260 
    261 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap,
    262 	    sc->sc_qedata, sizeof(struct qe_cdata), NULL,
    263 	    BUS_DMA_NOWAIT)) != 0) {
    264 		printf(": unable to load control data DMA map, error = %d\n",
    265 		    error);
    266 		goto fail_3;
    267 	}
    268 
    269 	/*
    270 	 * Zero the newly allocated memory.
    271 	 */
    272 	bzero(sc->sc_qedata, sizeof(struct qe_cdata));
    273 	/*
    274 	 * Create the transmit descriptor DMA maps. We take advantage
    275 	 * of the fact that the Qbus address space is big, and therefore
    276 	 * allocate map registers for all transmit descriptors also,
    277 	 * so that we can avoid this each time we send a packet.
    278 	 */
    279 	for (i = 0; i < TXDESCS; i++) {
    280 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    281 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    282 		    &sc->sc_xmtmap[i]))) {
    283 			printf(": unable to create tx DMA map %d, error = %d\n",
    284 			    i, error);
    285 			goto fail_4;
    286 		}
    287 	}
    288 
    289 	/*
    290 	 * Create receive buffer DMA maps.
    291 	 */
    292 	for (i = 0; i < RXDESCS; i++) {
    293 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    294 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
    295 		    &sc->sc_rcvmap[i]))) {
    296 			printf(": unable to create rx DMA map %d, error = %d\n",
    297 			    i, error);
    298 			goto fail_5;
    299 		}
    300 	}
    301 	/*
    302 	 * Pre-allocate the receive buffers.
    303 	 */
    304 	for (i = 0; i < RXDESCS; i++) {
    305 		if ((error = qe_add_rxbuf(sc, i)) != 0) {
    306 			printf(": unable to allocate or map rx buffer %d\n,"
    307 			    " error = %d\n", i, error);
    308 			goto fail_6;
    309 		}
    310 	}
    311 
    312 	/*
    313 	 * Create ring loops of the buffer chains.
    314 	 * This is only done once.
    315 	 */
    316 	sc->sc_pqedata = (struct qe_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
    317 
    318 	rp = sc->sc_qedata->qc_recv;
    319 	rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
    320 	rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
    321 	    QE_VALID | QE_CHAIN;
    322 	rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
    323 
    324 	rp = sc->sc_qedata->qc_xmit;
    325 	rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
    326 	rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
    327 	    QE_VALID | QE_CHAIN;
    328 	rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
    329 
    330 	/*
    331 	 * Get the vector that were set at match time, and remember it.
    332 	 */
    333 	sc->sc_intvec = ubasc->uh_lastiv;
    334 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    335 	DELAY(1000);
    336 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    337 
    338 	/*
    339 	 * Read out ethernet address and tell which type this card is.
    340 	 */
    341 	for (i = 0; i < 6; i++)
    342 		enaddr[i] = QE_RCSR(i * 2) & 0xff;
    343 
    344 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
    345 	printf("\n%s: %s, hardware address %s\n", sc->sc_dev.dv_xname,
    346 		QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
    347 		ether_sprintf(enaddr));
    348 
    349 	QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
    350 
    351 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
    352 		sc, &sc->sc_intrcnt);
    353 	evcnt_attach(&sc->sc_dev, "intr", &sc->sc_intrcnt);
    354 
    355 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    356 	ifp->if_softc = sc;
    357 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    358 	ifp->if_start = qestart;
    359 	ifp->if_ioctl = qeioctl;
    360 	ifp->if_watchdog = qetimeout;
    361 
    362 	/*
    363 	 * Attach the interface.
    364 	 */
    365 	if_attach(ifp);
    366 	ether_ifattach(ifp, enaddr);
    367 
    368 #if NBPFILTER > 0
    369 	bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
    370 #endif
    371 	return;
    372 
    373 	/*
    374 	 * Free any resources we've allocated during the failed attach
    375 	 * attempt.  Do this in reverse order and fall through.
    376 	 */
    377  fail_6:
    378 	for (i = 0; i < RXDESCS; i++) {
    379 		if (sc->sc_rxmbuf[i] != NULL) {
    380 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
    381 			m_freem(sc->sc_rxmbuf[i]);
    382 		}
    383 	}
    384  fail_5:
    385 	for (i = 0; i < RXDESCS; i++) {
    386 		if (sc->sc_xmtmap[i] != NULL)
    387 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
    388 	}
    389  fail_4:
    390 	for (i = 0; i < TXDESCS; i++) {
    391 		if (sc->sc_rcvmap[i] != NULL)
    392 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
    393 	}
    394 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
    395  fail_3:
    396 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
    397  fail_2:
    398 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_qedata,
    399 	    sizeof(struct qe_cdata));
    400  fail_1:
    401 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
    402  fail_0:
    403 	return;
    404 }
    405 
    406 /*
    407  * Initialization of interface.
    408  */
    409 void
    410 qeinit(sc)
    411 	struct qe_softc *sc;
    412 {
    413 	struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
    414 	struct qe_cdata *qc = sc->sc_qedata;
    415 	int i;
    416 
    417 
    418 	/*
    419 	 * Reset the interface.
    420 	 */
    421 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    422 	DELAY(1000);
    423 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    424 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
    425 
    426 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
    427 	/*
    428 	 * Release and init transmit descriptors.
    429 	 */
    430 	for (i = 0; i < TXDESCS; i++) {
    431 		if (sc->sc_txmbuf[i]) {
    432 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
    433 			m_freem(sc->sc_txmbuf[i]);
    434 			sc->sc_txmbuf[i] = 0;
    435 		}
    436 		qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
    437 		qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
    438 	}
    439 
    440 
    441 	/*
    442 	 * Init receive descriptors.
    443 	 */
    444 	for (i = 0; i < RXDESCS; i++)
    445 		qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
    446 	sc->sc_nextrx = 0;
    447 
    448 	/*
    449 	 * Write the descriptor addresses to the device.
    450 	 * Receiving packets will be enabled in the interrupt routine.
    451 	 */
    452 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
    453 	QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
    454 	QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
    455 
    456 	ifp->if_flags |= IFF_RUNNING;
    457 	ifp->if_flags &= ~IFF_OACTIVE;
    458 
    459 	/*
    460 	 * Send a setup frame.
    461 	 * This will start the transmit machinery as well.
    462 	 */
    463 	qe_setup(sc);
    464 
    465 }
    466 
    467 /*
    468  * Start output on interface.
    469  */
    470 void
    471 qestart(ifp)
    472 	struct ifnet *ifp;
    473 {
    474 	struct qe_softc *sc = ifp->if_softc;
    475 	struct qe_cdata *qc = sc->sc_qedata;
    476 	paddr_t	buffer;
    477 	struct mbuf *m, *m0;
    478 	int idx, len, s, i, totlen, error;
    479 	short orword;
    480 
    481 	if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
    482 		return;
    483 
    484 	s = splimp();
    485 	while (sc->sc_inq < (TXDESCS - 1)) {
    486 
    487 		if (sc->sc_setup) {
    488 			qe_setup(sc);
    489 			continue;
    490 		}
    491 		idx = sc->sc_nexttx;
    492 		IF_DEQUEUE(&sc->sc_if.if_snd, m);
    493 		if (m == 0)
    494 			goto out;
    495 		/*
    496 		 * Count number of mbufs in chain.
    497 		 * Always do DMA directly from mbufs, therefore the transmit
    498 		 * ring is really big.
    499 		 */
    500 		for (m0 = m, i = 0; m0; m0 = m0->m_next)
    501 			if (m0->m_len)
    502 				i++;
    503 		if (i >= TXDESCS)
    504 			panic("qestart");
    505 
    506 		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
    507 			IF_PREPEND(&sc->sc_if.if_snd, m);
    508 			ifp->if_flags |= IFF_OACTIVE;
    509 			goto out;
    510 		}
    511 
    512 #if NBPFILTER > 0
    513 		if (ifp->if_bpf)
    514 			bpf_mtap(ifp->if_bpf, m);
    515 #endif
    516 		/*
    517 		 * m now points to a mbuf chain that can be loaded.
    518 		 * Loop around and set it.
    519 		 */
    520 		totlen = 0;
    521 		for (m0 = m; m0; m0 = m0->m_next) {
    522 			error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
    523 			    mtod(m0, void *), m0->m_len, 0, 0);
    524 			buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
    525 			len = m0->m_len;
    526 			if (len == 0)
    527 				continue;
    528 
    529 			totlen += len;
    530 			/* Word alignment calc */
    531 			orword = 0;
    532 			if (totlen == m->m_pkthdr.len) {
    533 				if (totlen < ETHER_MIN_LEN)
    534 					len += (ETHER_MIN_LEN - totlen);
    535 				orword |= QE_EOMSG;
    536 				sc->sc_txmbuf[idx] = m;
    537 			}
    538 			if ((buffer & 1) || (len & 1))
    539 				len += 2;
    540 			if (buffer & 1)
    541 				orword |= QE_ODDBEGIN;
    542 			if ((buffer + len) & 1)
    543 				orword |= QE_ODDEND;
    544 			qc->qc_xmit[idx].qe_buf_len = -(len/2);
    545 			qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
    546 			qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
    547 			qc->qc_xmit[idx].qe_flag =
    548 			    qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
    549 			qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
    550 			if (++idx == TXDESCS)
    551 				idx = 0;
    552 			sc->sc_inq++;
    553 		}
    554 #ifdef DIAGNOSTIC
    555 		if (totlen != m->m_pkthdr.len)
    556 			panic("qestart: len fault");
    557 #endif
    558 
    559 		/*
    560 		 * Kick off the transmit logic, if it is stopped.
    561 		 */
    562 		if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
    563 			QE_WCSR(QE_CSR_XMTL,
    564 			    LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
    565 			QE_WCSR(QE_CSR_XMTH,
    566 			    HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
    567 		}
    568 		sc->sc_nexttx = idx;
    569 	}
    570 	if (sc->sc_inq == (TXDESCS - 1))
    571 		ifp->if_flags |= IFF_OACTIVE;
    572 
    573 out:	if (sc->sc_inq)
    574 		ifp->if_timer = 5; /* If transmit logic dies */
    575 	splx(s);
    576 }
    577 
    578 static void
    579 qeintr(arg)
    580 	void *arg;
    581 {
    582 	struct qe_softc *sc = arg;
    583 	struct qe_cdata *qc = sc->sc_qedata;
    584 	struct ifnet *ifp = &sc->sc_if;
    585 	struct ether_header *eh;
    586 	struct mbuf *m;
    587 	int csr, status1, status2, len;
    588 
    589 	csr = QE_RCSR(QE_CSR_CSR);
    590 
    591 	QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
    592 	    QE_RCV_INT | QE_ILOOP);
    593 
    594 	if (csr & QE_RCV_INT)
    595 		while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
    596 			status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
    597 			status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
    598 			m = sc->sc_rxmbuf[sc->sc_nextrx];
    599 			len = ((status1 & QE_RBL_HI) |
    600 			    (status2 & QE_RBL_LO)) + 60;
    601 			qe_add_rxbuf(sc, sc->sc_nextrx);
    602 			m->m_pkthdr.rcvif = ifp;
    603 			m->m_pkthdr.len = m->m_len = len;
    604 			if (++sc->sc_nextrx == RXDESCS)
    605 				sc->sc_nextrx = 0;
    606 			eh = mtod(m, struct ether_header *);
    607 #if NBPFILTER > 0
    608 			if (ifp->if_bpf) {
    609 				bpf_mtap(ifp->if_bpf, m);
    610 				if ((ifp->if_flags & IFF_PROMISC) != 0 &&
    611 				    bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
    612 				    ETHER_ADDR_LEN) != 0 &&
    613 				    ((eh->ether_dhost[0] & 1) == 0)) {
    614 					m_freem(m);
    615 					continue;
    616 				}
    617 			}
    618 #endif
    619 			/*
    620 			 * ALLMULTI means PROMISC in this driver.
    621 			 */
    622 			if ((ifp->if_flags & IFF_ALLMULTI) &&
    623 			    ((eh->ether_dhost[0] & 1) == 0) &&
    624 			    bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
    625 			    ETHER_ADDR_LEN)) {
    626 				m_freem(m);
    627 				continue;
    628 			}
    629 			(*ifp->if_input)(ifp, m);
    630 		}
    631 
    632 	if (csr & QE_XMIT_INT) {
    633 		while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
    634 			int idx = sc->sc_lastack;
    635 
    636 			sc->sc_inq--;
    637 			if (++sc->sc_lastack == TXDESCS)
    638 				sc->sc_lastack = 0;
    639 
    640 			/* XXX collect statistics */
    641 			qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
    642 			qc->qc_xmit[idx].qe_status1 =
    643 			    qc->qc_xmit[idx].qe_flag = QE_NOTYET;
    644 
    645 			if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
    646 				continue;
    647 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
    648 			if (sc->sc_txmbuf[idx]) {
    649 				m_freem(sc->sc_txmbuf[idx]);
    650 				sc->sc_txmbuf[idx] = 0;
    651 			}
    652 		}
    653 		ifp->if_timer = 0;
    654 		ifp->if_flags &= ~IFF_OACTIVE;
    655 		qestart(ifp); /* Put in more in queue */
    656 	}
    657 	/*
    658 	 * How can the receive list get invalid???
    659 	 * Verified that it happens anyway.
    660 	 */
    661 	if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
    662 	    (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
    663 		QE_WCSR(QE_CSR_RCLL,
    664 		    LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
    665 		QE_WCSR(QE_CSR_RCLH,
    666 		    HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
    667 	}
    668 }
    669 
    670 /*
    671  * Process an ioctl request.
    672  */
    673 int
    674 qeioctl(ifp, cmd, data)
    675 	struct ifnet *ifp;
    676 	u_long cmd;
    677 	caddr_t data;
    678 {
    679 	struct qe_softc *sc = ifp->if_softc;
    680 	struct ifreq *ifr = (struct ifreq *)data;
    681 	struct ifaddr *ifa = (struct ifaddr *)data;
    682 	int s = splnet(), error = 0;
    683 
    684 	switch (cmd) {
    685 
    686 	case SIOCSIFADDR:
    687 		ifp->if_flags |= IFF_UP;
    688 		switch(ifa->ifa_addr->sa_family) {
    689 #ifdef INET
    690 		case AF_INET:
    691 			qeinit(sc);
    692 			arp_ifinit(ifp, ifa);
    693 			break;
    694 #endif
    695 		}
    696 		break;
    697 
    698 	case SIOCSIFFLAGS:
    699 		if ((ifp->if_flags & IFF_UP) == 0 &&
    700 		    (ifp->if_flags & IFF_RUNNING) != 0) {
    701 			/*
    702 			 * If interface is marked down and it is running,
    703 			 * stop it. (by disabling receive mechanism).
    704 			 */
    705 			QE_WCSR(QE_CSR_CSR,
    706 			    QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
    707 			ifp->if_flags &= ~IFF_RUNNING;
    708 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
    709 			   (ifp->if_flags & IFF_RUNNING) == 0) {
    710 			/*
    711 			 * If interface it marked up and it is stopped, then
    712 			 * start it.
    713 			 */
    714 			qeinit(sc);
    715 		} else if ((ifp->if_flags & IFF_UP) != 0) {
    716 			/*
    717 			 * Send a new setup packet to match any new changes.
    718 			 * (Like IFF_PROMISC etc)
    719 			 */
    720 			qe_setup(sc);
    721 		}
    722 		break;
    723 
    724 	case SIOCADDMULTI:
    725 	case SIOCDELMULTI:
    726 		/*
    727 		 * Update our multicast list.
    728 		 */
    729 		error = (cmd == SIOCADDMULTI) ?
    730 			ether_addmulti(ifr, &sc->sc_ec):
    731 			ether_delmulti(ifr, &sc->sc_ec);
    732 
    733 		if (error == ENETRESET) {
    734 			/*
    735 			 * Multicast list has changed; set the hardware filter
    736 			 * accordingly.
    737 			 */
    738 			qe_setup(sc);
    739 			error = 0;
    740 		}
    741 		break;
    742 
    743 	default:
    744 		error = EINVAL;
    745 
    746 	}
    747 	splx(s);
    748 	return (error);
    749 }
    750 
    751 /*
    752  * Add a receive buffer to the indicated descriptor.
    753  */
    754 int
    755 qe_add_rxbuf(sc, i)
    756 	struct qe_softc *sc;
    757 	int i;
    758 {
    759 	struct mbuf *m;
    760 	struct qe_ring *rp;
    761 	vaddr_t addr;
    762 	int error;
    763 
    764 	MGETHDR(m, M_DONTWAIT, MT_DATA);
    765 	if (m == NULL)
    766 		return (ENOBUFS);
    767 
    768 	MCLGET(m, M_DONTWAIT);
    769 	if ((m->m_flags & M_EXT) == 0) {
    770 		m_freem(m);
    771 		return (ENOBUFS);
    772 	}
    773 
    774 	if (sc->sc_rxmbuf[i] != NULL)
    775 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
    776 
    777 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
    778 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
    779 	if (error)
    780 		panic("%s: can't load rx DMA map %d, error = %d\n",
    781 		    sc->sc_dev.dv_xname, i, error);
    782 	sc->sc_rxmbuf[i] = m;
    783 
    784 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
    785 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
    786 
    787 	/*
    788 	 * We know that the mbuf cluster is page aligned. Also, be sure
    789 	 * that the IP header will be longword aligned.
    790 	 */
    791 	m->m_data += 2;
    792 	addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
    793 	rp = &sc->sc_qedata->qc_recv[i];
    794 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
    795 	rp->qe_addr_lo = LOWORD(addr);
    796 	rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
    797 	rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
    798 
    799 	return (0);
    800 }
    801 
    802 /*
    803  * Create a setup packet and put in queue for sending.
    804  */
    805 void
    806 qe_setup(sc)
    807 	struct qe_softc *sc;
    808 {
    809 	struct ether_multi *enm;
    810 	struct ether_multistep step;
    811 	struct qe_cdata *qc = sc->sc_qedata;
    812 	struct ifnet *ifp = &sc->sc_if;
    813 	u_int8_t *enaddr = LLADDR(ifp->if_sadl);
    814 	int i, j, k, idx, s;
    815 
    816 	s = splimp();
    817 	if (sc->sc_inq == (TXDESCS - 1)) {
    818 		sc->sc_setup = 1;
    819 		splx(s);
    820 		return;
    821 	}
    822 	sc->sc_setup = 0;
    823 	/*
    824 	 * Init the setup packet with valid info.
    825 	 */
    826 	memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
    827 	for (i = 0; i < ETHER_ADDR_LEN; i++)
    828 		qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
    829 
    830 	/*
    831 	 * Multicast handling. The DEQNA can handle up to 12 direct
    832 	 * ethernet addresses.
    833 	 */
    834 	j = 3; k = 0;
    835 	ifp->if_flags &= ~IFF_ALLMULTI;
    836 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
    837 	while (enm != NULL) {
    838 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
    839 			ifp->if_flags |= IFF_ALLMULTI;
    840 			break;
    841 		}
    842 		for (i = 0; i < ETHER_ADDR_LEN; i++)
    843 			qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
    844 		j++;
    845 		if (j == 8) {
    846 			j = 1; k += 64;
    847 		}
    848 		if (k > 64) {
    849 			ifp->if_flags |= IFF_ALLMULTI;
    850 			break;
    851 		}
    852 		ETHER_NEXT_MULTI(step, enm);
    853 	}
    854 	idx = sc->sc_nexttx;
    855 	qc->qc_xmit[idx].qe_buf_len = -64;
    856 
    857 	/*
    858 	 * How is the DEQNA turned in ALLMULTI mode???
    859 	 * Until someone tells me, fall back to PROMISC when more than
    860 	 * 12 ethernet addresses.
    861 	 */
    862 	if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
    863 		qc->qc_xmit[idx].qe_buf_len = -65;
    864 
    865 	qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
    866 	qc->qc_xmit[idx].qe_addr_hi =
    867 	    HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
    868 	qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
    869 	qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
    870 
    871 	if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
    872 		QE_WCSR(QE_CSR_XMTL,
    873 		    LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
    874 		QE_WCSR(QE_CSR_XMTH,
    875 		    HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
    876 	}
    877 
    878 	sc->sc_inq++;
    879 	if (++sc->sc_nexttx == TXDESCS)
    880 		sc->sc_nexttx = 0;
    881 	splx(s);
    882 }
    883 
    884 /*
    885  * Check for dead transmit logic. Not uncommon.
    886  */
    887 void
    888 qetimeout(ifp)
    889 	struct ifnet *ifp;
    890 {
    891 	struct qe_softc *sc = ifp->if_softc;
    892 
    893 	if (sc->sc_inq == 0)
    894 		return;
    895 
    896 	printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
    897 	/*
    898 	 * Do a reset of interface, to get it going again.
    899 	 * Will it work by just restart the transmit logic?
    900 	 */
    901 	qeinit(sc);
    902 }
    903