Home | History | Annotate | Line # | Download | only in qbus
if_qe.c revision 1.69
      1 /*      $NetBSD: if_qe.c,v 1.69 2009/03/18 16:00:20 cegger Exp $ */
      2 /*
      3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  * 3. All advertising materials mentioning features or use of this software
     14  *    must display the following acknowledgement:
     15  *      This product includes software developed at Ludd, University of
     16  *      Lule}, Sweden and its contributors.
     17  * 4. The name of the author may not be used to endorse or promote products
     18  *    derived from this software without specific prior written permission
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Driver for DEQNA/DELQA ethernet cards.
     34  * Things that is still to do:
     35  *	Handle ubaresets. Does not work at all right now.
     36  *	Fix ALLMULTI reception. But someone must tell me how...
     37  *	Collect statistics.
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.69 2009/03/18 16:00:20 cegger Exp $");
     42 
     43 #include "opt_inet.h"
     44 #include "bpfilter.h"
     45 
     46 #include <sys/param.h>
     47 #include <sys/mbuf.h>
     48 #include <sys/socket.h>
     49 #include <sys/device.h>
     50 #include <sys/systm.h>
     51 #include <sys/sockio.h>
     52 
     53 #include <net/if.h>
     54 #include <net/if_ether.h>
     55 #include <net/if_dl.h>
     56 
     57 #include <netinet/in.h>
     58 #include <netinet/if_inarp.h>
     59 
     60 #if NBPFILTER > 0
     61 #include <net/bpf.h>
     62 #include <net/bpfdesc.h>
     63 #endif
     64 
     65 #include <sys/bus.h>
     66 
     67 #include <dev/qbus/ubavar.h>
     68 #include <dev/qbus/if_qereg.h>
     69 
     70 #include "ioconf.h"
     71 
     72 #define RXDESCS	30	/* # of receive descriptors */
     73 #define TXDESCS	60	/* # transmit descs */
     74 
     75 /*
     76  * Structure containing the elements that must be in DMA-safe memory.
     77  */
     78 struct qe_cdata {
     79 	struct qe_ring	qc_recv[RXDESCS+1];	/* Receive descriptors */
     80 	struct qe_ring	qc_xmit[TXDESCS+1];	/* Transmit descriptors */
     81 	u_int8_t	qc_setup[128];		/* Setup packet layout */
     82 };
     83 
     84 struct	qe_softc {
     85 	device_t	sc_dev;		/* Configuration common part	*/
     86 	struct uba_softc *sc_uh;	/* our parent */
     87 	struct evcnt	sc_intrcnt;	/* Interrupt counting		*/
     88 	struct ethercom sc_ec;		/* Ethernet common part		*/
     89 #define sc_if	sc_ec.ec_if		/* network-visible interface	*/
     90 	bus_space_tag_t sc_iot;
     91 	bus_addr_t	sc_ioh;
     92 	bus_dma_tag_t	sc_dmat;
     93 	struct qe_cdata *sc_qedata;	/* Descriptor struct		*/
     94 	struct qe_cdata *sc_pqedata;	/* Unibus address of above	*/
     95 	struct mbuf*	sc_txmbuf[TXDESCS];
     96 	struct mbuf*	sc_rxmbuf[RXDESCS];
     97 	bus_dmamap_t	sc_xmtmap[TXDESCS];
     98 	bus_dmamap_t	sc_rcvmap[RXDESCS];
     99 	bus_dmamap_t	sc_nulldmamap;	/* ethernet padding buffer	*/
    100 	struct ubinfo	sc_ui;
    101 	int		sc_intvec;	/* Interrupt vector		*/
    102 	int		sc_nexttx;
    103 	int		sc_inq;
    104 	int		sc_lastack;
    105 	int		sc_nextrx;
    106 	int		sc_setup;	/* Setup packet in queue	*/
    107 };
    108 
    109 static	int	qematch(device_t, cfdata_t, void *);
    110 static	void	qeattach(device_t, device_t, void *);
    111 static	void	qeinit(struct qe_softc *);
    112 static	void	qestart(struct ifnet *);
    113 static	void	qeintr(void *);
    114 static	int	qeioctl(struct ifnet *, u_long, void *);
    115 static	int	qe_add_rxbuf(struct qe_softc *, int);
    116 static	void	qe_setup(struct qe_softc *);
    117 static	void	qetimeout(struct ifnet *);
    118 
    119 CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc),
    120     qematch, qeattach, NULL, NULL);
    121 
    122 #define	QE_WCSR(csr, val) \
    123 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
    124 #define	QE_RCSR(csr) \
    125 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
    126 
    127 #define	LOWORD(x)	((int)(x) & 0xffff)
    128 #define	HIWORD(x)	(((int)(x) >> 16) & 0x3f)
    129 
    130 #define	ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
    131 
    132 /*
    133  * Check for present DEQNA. Done by sending a fake setup packet
    134  * and wait for interrupt.
    135  */
    136 int
    137 qematch(device_t parent, cfdata_t cf, void *aux)
    138 {
    139 	struct	qe_softc ssc;
    140 	struct	qe_softc *sc = &ssc;
    141 	struct	uba_attach_args *ua = aux;
    142 	struct	uba_softc *uh = device_private(parent);
    143 	struct ubinfo ui;
    144 
    145 #define	PROBESIZE	4096
    146 	struct qe_ring *ring;
    147 	struct	qe_ring *rp;
    148 	int error;
    149 
    150 	ring = malloc(PROBESIZE, M_TEMP, M_WAITOK|M_ZERO);
    151 	memset(sc, 0, sizeof(*sc));
    152 	sc->sc_iot = ua->ua_iot;
    153 	sc->sc_ioh = ua->ua_ioh;
    154 	sc->sc_dmat = ua->ua_dmat;
    155 
    156 	uh->uh_lastiv -= 4;
    157 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    158 	QE_WCSR(QE_CSR_VECTOR, uh->uh_lastiv);
    159 
    160 	/*
    161 	 * Map the ring area. Actually this is done only to be able to
    162 	 * send and receive a internal packet; some junk is loopbacked
    163 	 * so that the DEQNA has a reason to interrupt.
    164 	 */
    165 	ui.ui_size = PROBESIZE;
    166 	ui.ui_vaddr = (void *)&ring[0];
    167 	if ((error = uballoc(uh, &ui, UBA_CANTWAIT)))
    168 		return 0;
    169 
    170 	/*
    171 	 * Init a simple "fake" receive and transmit descriptor that
    172 	 * points to some unused area. Send a fake setup packet.
    173 	 */
    174 	rp = (void *)ui.ui_baddr;
    175 	ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
    176 	ring[0].qe_addr_lo = LOWORD(&rp[4]);
    177 	ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
    178 	ring[0].qe_buf_len = -64;
    179 
    180 	ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
    181 	ring[2].qe_addr_lo = LOWORD(&rp[4]);
    182 	ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
    183 	ring[2].qe_buf_len = -(1500/2);
    184 
    185 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    186 	DELAY(1000);
    187 
    188 	/*
    189 	 * Start the interface and wait for the packet.
    190 	 */
    191 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
    192 	QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
    193 	QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
    194 	QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
    195 	QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
    196 	DELAY(10000);
    197 
    198 	/*
    199 	 * All done with the bus resources.
    200 	 */
    201 	ubfree(uh, &ui);
    202 	free(ring, M_TEMP);
    203 	return 1;
    204 }
    205 
    206 /*
    207  * Interface exists: make available by filling in network interface
    208  * record.  System will initialize the interface when it is ready
    209  * to accept packets.
    210  */
    211 void
    212 qeattach(device_t parent, device_t self, void *aux)
    213 {
    214 	struct uba_attach_args *ua = aux;
    215 	struct qe_softc *sc = device_private(self);
    216 	struct ifnet *ifp = &sc->sc_if;
    217 	struct qe_ring *rp;
    218 	u_int8_t enaddr[ETHER_ADDR_LEN];
    219 	int i, error;
    220 	char *nullbuf;
    221 
    222 	sc->sc_dev = self;
    223 	sc->sc_uh = device_private(parent);
    224 	sc->sc_iot = ua->ua_iot;
    225 	sc->sc_ioh = ua->ua_ioh;
    226 	sc->sc_dmat = ua->ua_dmat;
    227 
    228 	/*
    229 	 * Allocate DMA safe memory for descriptors and setup memory.
    230 	 */
    231 
    232 	sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
    233 	if ((error = ubmemalloc(sc->sc_uh, &sc->sc_ui, 0))) {
    234 		aprint_error(": unable to ubmemalloc(), error = %d\n", error);
    235 		return;
    236 	}
    237 	sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
    238 	sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
    239 
    240 	/*
    241 	 * Zero the newly allocated memory.
    242 	 */
    243 	memset(sc->sc_qedata, 0, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
    244 	nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
    245 	/*
    246 	 * Create the transmit descriptor DMA maps. We take advantage
    247 	 * of the fact that the Qbus address space is big, and therefore
    248 	 * allocate map registers for all transmit descriptors also,
    249 	 * so that we can avoid this each time we send a packet.
    250 	 */
    251 	for (i = 0; i < TXDESCS; i++) {
    252 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    253 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    254 		    &sc->sc_xmtmap[i]))) {
    255 			aprint_error(
    256 			    ": unable to create tx DMA map %d, error = %d\n",
    257 			    i, error);
    258 			goto fail_4;
    259 		}
    260 	}
    261 
    262 	/*
    263 	 * Create receive buffer DMA maps.
    264 	 */
    265 	for (i = 0; i < RXDESCS; i++) {
    266 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    267 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
    268 		    &sc->sc_rcvmap[i]))) {
    269 			aprint_error(
    270 			    ": unable to create rx DMA map %d, error = %d\n",
    271 			    i, error);
    272 			goto fail_5;
    273 		}
    274 	}
    275 	/*
    276 	 * Pre-allocate the receive buffers.
    277 	 */
    278 	for (i = 0; i < RXDESCS; i++) {
    279 		if ((error = qe_add_rxbuf(sc, i)) != 0) {
    280 			aprint_error(
    281 			    ": unable to allocate or map rx buffer %d,"
    282 			    " error = %d\n", i, error);
    283 			goto fail_6;
    284 		}
    285 	}
    286 
    287 	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
    288 	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
    289 		aprint_error(
    290 		    ": unable to create pad buffer DMA map, error = %d\n",
    291 		    error);
    292 		goto fail_6;
    293 	}
    294 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
    295 	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
    296 		aprint_error(
    297 		    ": unable to load pad buffer DMA map, error = %d\n",
    298 		    error);
    299 		goto fail_7;
    300 	}
    301 	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
    302 	    BUS_DMASYNC_PREWRITE);
    303 
    304 	/*
    305 	 * Create ring loops of the buffer chains.
    306 	 * This is only done once.
    307 	 */
    308 
    309 	rp = sc->sc_qedata->qc_recv;
    310 	rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
    311 	rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
    312 	    QE_VALID | QE_CHAIN;
    313 	rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
    314 
    315 	rp = sc->sc_qedata->qc_xmit;
    316 	rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
    317 	rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
    318 	    QE_VALID | QE_CHAIN;
    319 	rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
    320 
    321 	/*
    322 	 * Get the vector that were set at match time, and remember it.
    323 	 */
    324 	sc->sc_intvec = sc->sc_uh->uh_lastiv;
    325 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    326 	DELAY(1000);
    327 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    328 
    329 	/*
    330 	 * Read out ethernet address and tell which type this card is.
    331 	 */
    332 	for (i = 0; i < 6; i++)
    333 		enaddr[i] = QE_RCSR(i * 2) & 0xff;
    334 
    335 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
    336 	aprint_normal(": %s, hardware address %s\n",
    337 		QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
    338 		ether_sprintf(enaddr));
    339 
    340 	QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
    341 
    342 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
    343 		sc, &sc->sc_intrcnt);
    344 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
    345 		device_xname(sc->sc_dev), "intr");
    346 
    347 	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
    348 	ifp->if_softc = sc;
    349 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    350 	ifp->if_start = qestart;
    351 	ifp->if_ioctl = qeioctl;
    352 	ifp->if_watchdog = qetimeout;
    353 	IFQ_SET_READY(&ifp->if_snd);
    354 
    355 	/*
    356 	 * Attach the interface.
    357 	 */
    358 	if_attach(ifp);
    359 	ether_ifattach(ifp, enaddr);
    360 
    361 	return;
    362 
    363 	/*
    364 	 * Free any resources we've allocated during the failed attach
    365 	 * attempt.  Do this in reverse order and fall through.
    366 	 */
    367  fail_7:
    368 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
    369  fail_6:
    370 	for (i = 0; i < RXDESCS; i++) {
    371 		if (sc->sc_rxmbuf[i] != NULL) {
    372 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
    373 			m_freem(sc->sc_rxmbuf[i]);
    374 		}
    375 	}
    376  fail_5:
    377 	for (i = 0; i < RXDESCS; i++) {
    378 		if (sc->sc_xmtmap[i] != NULL)
    379 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
    380 	}
    381  fail_4:
    382 	for (i = 0; i < TXDESCS; i++) {
    383 		if (sc->sc_rcvmap[i] != NULL)
    384 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
    385 	}
    386 }
    387 
    388 /*
    389  * Initialization of interface.
    390  */
    391 void
    392 qeinit(struct qe_softc *sc)
    393 {
    394 	struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
    395 	struct qe_cdata *qc = sc->sc_qedata;
    396 	int i;
    397 
    398 
    399 	/*
    400 	 * Reset the interface.
    401 	 */
    402 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    403 	DELAY(1000);
    404 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    405 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
    406 
    407 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
    408 	/*
    409 	 * Release and init transmit descriptors.
    410 	 */
    411 	for (i = 0; i < TXDESCS; i++) {
    412 		if (sc->sc_txmbuf[i]) {
    413 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
    414 			m_freem(sc->sc_txmbuf[i]);
    415 			sc->sc_txmbuf[i] = 0;
    416 		}
    417 		qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
    418 		qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
    419 	}
    420 
    421 
    422 	/*
    423 	 * Init receive descriptors.
    424 	 */
    425 	for (i = 0; i < RXDESCS; i++)
    426 		qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
    427 	sc->sc_nextrx = 0;
    428 
    429 	/*
    430 	 * Write the descriptor addresses to the device.
    431 	 * Receiving packets will be enabled in the interrupt routine.
    432 	 */
    433 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
    434 	QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
    435 	QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
    436 
    437 	ifp->if_flags |= IFF_RUNNING;
    438 	ifp->if_flags &= ~IFF_OACTIVE;
    439 
    440 	/*
    441 	 * Send a setup frame.
    442 	 * This will start the transmit machinery as well.
    443 	 */
    444 	qe_setup(sc);
    445 
    446 }
    447 
    448 /*
    449  * Start output on interface.
    450  */
    451 void
    452 qestart(struct ifnet *ifp)
    453 {
    454 	struct qe_softc *sc = ifp->if_softc;
    455 	struct qe_cdata *qc = sc->sc_qedata;
    456 	paddr_t	buffer;
    457 	struct mbuf *m, *m0;
    458 	int idx, len, s, i, totlen, buflen, error;
    459 	short orword, csr;
    460 
    461 	if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
    462 		return;
    463 
    464 	s = splnet();
    465 	while (sc->sc_inq < (TXDESCS - 1)) {
    466 
    467 		if (sc->sc_setup) {
    468 			qe_setup(sc);
    469 			continue;
    470 		}
    471 		idx = sc->sc_nexttx;
    472 		IFQ_POLL(&ifp->if_snd, m);
    473 		if (m == 0)
    474 			goto out;
    475 		/*
    476 		 * Count number of mbufs in chain.
    477 		 * Always do DMA directly from mbufs, therefore the transmit
    478 		 * ring is really big.
    479 		 */
    480 		for (m0 = m, i = 0; m0; m0 = m0->m_next)
    481 			if (m0->m_len)
    482 				i++;
    483 		if (m->m_pkthdr.len < ETHER_PAD_LEN) {
    484 			buflen = ETHER_PAD_LEN;
    485 			i++;
    486 		} else
    487 			buflen = m->m_pkthdr.len;
    488 		if (i >= TXDESCS)
    489 			panic("qestart");
    490 
    491 		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
    492 			ifp->if_flags |= IFF_OACTIVE;
    493 			goto out;
    494 		}
    495 
    496 		IFQ_DEQUEUE(&ifp->if_snd, m);
    497 
    498 #if NBPFILTER > 0
    499 		if (ifp->if_bpf)
    500 			bpf_mtap(ifp->if_bpf, m);
    501 #endif
    502 		/*
    503 		 * m now points to a mbuf chain that can be loaded.
    504 		 * Loop around and set it.
    505 		 */
    506 		totlen = 0;
    507 		for (m0 = m; ; m0 = m0->m_next) {
    508 			if (m0) {
    509 				if (m0->m_len == 0)
    510 					continue;
    511 				error = bus_dmamap_load(sc->sc_dmat,
    512 				    sc->sc_xmtmap[idx], mtod(m0, void *),
    513 				    m0->m_len, 0, 0);
    514 				buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
    515 				len = m0->m_len;
    516 			} else if (totlen < ETHER_PAD_LEN) {
    517 				buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
    518 				len = ETHER_PAD_LEN - totlen;
    519 			} else {
    520 				break;
    521 			}
    522 
    523 			totlen += len;
    524 			/* Word alignment calc */
    525 			orword = 0;
    526 			if (totlen == buflen) {
    527 				orword |= QE_EOMSG;
    528 				sc->sc_txmbuf[idx] = m;
    529 			}
    530 			if ((buffer & 1) || (len & 1))
    531 				len += 2;
    532 			if (buffer & 1)
    533 				orword |= QE_ODDBEGIN;
    534 			if ((buffer + len) & 1)
    535 				orword |= QE_ODDEND;
    536 			qc->qc_xmit[idx].qe_buf_len = -(len/2);
    537 			qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
    538 			qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
    539 			qc->qc_xmit[idx].qe_flag =
    540 			    qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
    541 			qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
    542 			if (++idx == TXDESCS)
    543 				idx = 0;
    544 			sc->sc_inq++;
    545 			if (m0 == NULL)
    546 				break;
    547 		}
    548 #ifdef DIAGNOSTIC
    549 		if (totlen != buflen)
    550 			panic("qestart: len fault");
    551 #endif
    552 
    553 		/*
    554 		 * Kick off the transmit logic, if it is stopped.
    555 		 */
    556 		csr = QE_RCSR(QE_CSR_CSR);
    557 		if (csr & QE_XL_INVALID) {
    558 			QE_WCSR(QE_CSR_XMTL,
    559 			    LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
    560 			QE_WCSR(QE_CSR_XMTH,
    561 			    HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
    562 		}
    563 		sc->sc_nexttx = idx;
    564 	}
    565 	if (sc->sc_inq == (TXDESCS - 1))
    566 		ifp->if_flags |= IFF_OACTIVE;
    567 
    568 out:	if (sc->sc_inq)
    569 		ifp->if_timer = 5; /* If transmit logic dies */
    570 	splx(s);
    571 }
    572 
    573 static void
    574 qeintr(void *arg)
    575 {
    576 	struct qe_softc *sc = arg;
    577 	struct qe_cdata *qc = sc->sc_qedata;
    578 	struct ifnet *ifp = &sc->sc_if;
    579 	struct mbuf *m;
    580 	int csr, status1, status2, len;
    581 
    582 	csr = QE_RCSR(QE_CSR_CSR);
    583 
    584 	QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
    585 	    QE_RCV_INT | QE_ILOOP);
    586 
    587 	if (csr & QE_RCV_INT)
    588 		while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
    589 			status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
    590 			status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
    591 
    592 			m = sc->sc_rxmbuf[sc->sc_nextrx];
    593 			len = ((status1 & QE_RBL_HI) |
    594 			    (status2 & QE_RBL_LO)) + 60;
    595 			qe_add_rxbuf(sc, sc->sc_nextrx);
    596 			m->m_pkthdr.rcvif = ifp;
    597 			m->m_pkthdr.len = m->m_len = len;
    598 			if (++sc->sc_nextrx == RXDESCS)
    599 				sc->sc_nextrx = 0;
    600 #if NBPFILTER > 0
    601 			if (ifp->if_bpf)
    602 				bpf_mtap(ifp->if_bpf, m);
    603 #endif
    604 			if ((status1 & QE_ESETUP) == 0)
    605 				(*ifp->if_input)(ifp, m);
    606 			else
    607 				m_freem(m);
    608 		}
    609 
    610 	if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
    611 		while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
    612 			int idx = sc->sc_lastack;
    613 
    614 			sc->sc_inq--;
    615 			if (++sc->sc_lastack == TXDESCS)
    616 				sc->sc_lastack = 0;
    617 
    618 			/* XXX collect statistics */
    619 			qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
    620 			qc->qc_xmit[idx].qe_status1 =
    621 			    qc->qc_xmit[idx].qe_flag = QE_NOTYET;
    622 
    623 			if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
    624 				continue;
    625 			if (sc->sc_txmbuf[idx] == NULL ||
    626 			    sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
    627 				bus_dmamap_unload(sc->sc_dmat,
    628 				    sc->sc_xmtmap[idx]);
    629 			if (sc->sc_txmbuf[idx]) {
    630 				m_freem(sc->sc_txmbuf[idx]);
    631 				sc->sc_txmbuf[idx] = NULL;
    632 			}
    633 		}
    634 		ifp->if_timer = 0;
    635 		ifp->if_flags &= ~IFF_OACTIVE;
    636 		qestart(ifp); /* Put in more in queue */
    637 	}
    638 	/*
    639 	 * How can the receive list get invalid???
    640 	 * Verified that it happens anyway.
    641 	 */
    642 	if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
    643 	    (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
    644 		QE_WCSR(QE_CSR_RCLL,
    645 		    LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
    646 		QE_WCSR(QE_CSR_RCLH,
    647 		    HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
    648 	}
    649 }
    650 
    651 /*
    652  * Process an ioctl request.
    653  */
    654 int
    655 qeioctl(struct ifnet *ifp, u_long cmd, void *data)
    656 {
    657 	struct qe_softc *sc = ifp->if_softc;
    658 	struct ifaddr *ifa = (struct ifaddr *)data;
    659 	int s = splnet(), error = 0;
    660 
    661 	switch (cmd) {
    662 
    663 	case SIOCINITIFADDR:
    664 		ifp->if_flags |= IFF_UP;
    665 		switch(ifa->ifa_addr->sa_family) {
    666 #ifdef INET
    667 		case AF_INET:
    668 			qeinit(sc);
    669 			arp_ifinit(ifp, ifa);
    670 			break;
    671 #endif
    672 		}
    673 		break;
    674 
    675 	case SIOCSIFFLAGS:
    676 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    677 			break;
    678 		/* XXX re-use ether_ioctl() */
    679 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
    680 		case IFF_RUNNING:
    681 			/*
    682 			 * If interface is marked down and it is running,
    683 			 * stop it. (by disabling receive mechanism).
    684 			 */
    685 			QE_WCSR(QE_CSR_CSR,
    686 			    QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
    687 			ifp->if_flags &= ~IFF_RUNNING;
    688 			break;
    689 		case IFF_UP:
    690 			/*
    691 			 * If interface it marked up and it is stopped, then
    692 			 * start it.
    693 			 */
    694 			qeinit(sc);
    695 			break;
    696 		case IFF_UP|IFF_RUNNING:
    697 			/*
    698 			 * Send a new setup packet to match any new changes.
    699 			 * (Like IFF_PROMISC etc)
    700 			 */
    701 			qe_setup(sc);
    702 			break;
    703 		case 0:
    704 			break;
    705 		}
    706 		break;
    707 
    708 	case SIOCADDMULTI:
    709 	case SIOCDELMULTI:
    710 		/*
    711 		 * Update our multicast list.
    712 		 */
    713 		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
    714 			/*
    715 			 * Multicast list has changed; set the hardware filter
    716 			 * accordingly.
    717 			 */
    718 			if (ifp->if_flags & IFF_RUNNING)
    719 				qe_setup(sc);
    720 			error = 0;
    721 		}
    722 		break;
    723 
    724 	default:
    725 		error = ether_ioctl(ifp, cmd, data);
    726 	}
    727 	splx(s);
    728 	return (error);
    729 }
    730 
    731 /*
    732  * Add a receive buffer to the indicated descriptor.
    733  */
    734 int
    735 qe_add_rxbuf(struct qe_softc *sc, int i)
    736 {
    737 	struct mbuf *m;
    738 	struct qe_ring *rp;
    739 	vaddr_t addr;
    740 	int error;
    741 
    742 	MGETHDR(m, M_DONTWAIT, MT_DATA);
    743 	if (m == NULL)
    744 		return (ENOBUFS);
    745 
    746 	MCLGET(m, M_DONTWAIT);
    747 	if ((m->m_flags & M_EXT) == 0) {
    748 		m_freem(m);
    749 		return (ENOBUFS);
    750 	}
    751 
    752 	if (sc->sc_rxmbuf[i] != NULL)
    753 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
    754 
    755 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
    756 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
    757 	if (error)
    758 		panic("%s: can't load rx DMA map %d, error = %d",
    759 		    device_xname(sc->sc_dev), i, error);
    760 	sc->sc_rxmbuf[i] = m;
    761 
    762 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
    763 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
    764 
    765 	/*
    766 	 * We know that the mbuf cluster is page aligned. Also, be sure
    767 	 * that the IP header will be longword aligned.
    768 	 */
    769 	m->m_data += 2;
    770 	addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
    771 	rp = &sc->sc_qedata->qc_recv[i];
    772 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
    773 	rp->qe_addr_lo = LOWORD(addr);
    774 	rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
    775 	rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
    776 
    777 	return (0);
    778 }
    779 
    780 /*
    781  * Create a setup packet and put in queue for sending.
    782  */
    783 void
    784 qe_setup(struct qe_softc *sc)
    785 {
    786 	struct ether_multi *enm;
    787 	struct ether_multistep step;
    788 	struct qe_cdata *qc = sc->sc_qedata;
    789 	struct ifnet *ifp = &sc->sc_if;
    790 	u_int8_t enaddr[ETHER_ADDR_LEN];
    791 	int i, j, k, idx, s;
    792 
    793 	s = splnet();
    794 	if (sc->sc_inq == (TXDESCS - 1)) {
    795 		sc->sc_setup = 1;
    796 		splx(s);
    797 		return;
    798 	}
    799 	sc->sc_setup = 0;
    800 	/*
    801 	 * Init the setup packet with valid info.
    802 	 */
    803 	memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
    804 	memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
    805 	for (i = 0; i < ETHER_ADDR_LEN; i++)
    806 		qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
    807 
    808 	/*
    809 	 * Multicast handling. The DEQNA can handle up to 12 direct
    810 	 * ethernet addresses.
    811 	 */
    812 	j = 3; k = 0;
    813 	ifp->if_flags &= ~IFF_ALLMULTI;
    814 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
    815 	while (enm != NULL) {
    816 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
    817 			ifp->if_flags |= IFF_ALLMULTI;
    818 			break;
    819 		}
    820 		for (i = 0; i < ETHER_ADDR_LEN; i++)
    821 			qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
    822 		j++;
    823 		if (j == 8) {
    824 			j = 1; k += 64;
    825 		}
    826 		if (k > 64) {
    827 			ifp->if_flags |= IFF_ALLMULTI;
    828 			break;
    829 		}
    830 		ETHER_NEXT_MULTI(step, enm);
    831 	}
    832 	idx = sc->sc_nexttx;
    833 	qc->qc_xmit[idx].qe_buf_len = -64;
    834 
    835 	/*
    836 	 * How is the DEQNA turned in ALLMULTI mode???
    837 	 * Until someone tells me, fall back to PROMISC when more than
    838 	 * 12 ethernet addresses.
    839 	 */
    840 	if (ifp->if_flags & IFF_ALLMULTI)
    841 		ifp->if_flags |= IFF_PROMISC;
    842 	else if (ifp->if_pcount == 0)
    843 		ifp->if_flags &= ~IFF_PROMISC;
    844 	if (ifp->if_flags & IFF_PROMISC)
    845 		qc->qc_xmit[idx].qe_buf_len = -65;
    846 
    847 	qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
    848 	qc->qc_xmit[idx].qe_addr_hi =
    849 	    HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
    850 	qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
    851 	qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
    852 
    853 	if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
    854 		QE_WCSR(QE_CSR_XMTL,
    855 		    LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
    856 		QE_WCSR(QE_CSR_XMTH,
    857 		    HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
    858 	}
    859 
    860 	sc->sc_inq++;
    861 	if (++sc->sc_nexttx == TXDESCS)
    862 		sc->sc_nexttx = 0;
    863 	splx(s);
    864 }
    865 
    866 /*
    867  * Check for dead transmit logic. Not uncommon.
    868  */
    869 void
    870 qetimeout(struct ifnet *ifp)
    871 {
    872 	struct qe_softc *sc = ifp->if_softc;
    873 
    874 	if (sc->sc_inq == 0)
    875 		return;
    876 
    877 	aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
    878 	/*
    879 	 * Do a reset of interface, to get it going again.
    880 	 * Will it work by just restart the transmit logic?
    881 	 */
    882 	qeinit(sc);
    883 }
    884