Home | History | Annotate | Line # | Download | only in qbus
if_qe.c revision 1.82
      1 /*      $NetBSD: if_qe.c,v 1.82 2024/03/25 05:37:45 mrg Exp $ */
      2 /*
      3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  *
     14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     24  */
     25 
     26 /*
     27  * Driver for DEQNA/DELQA ethernet cards.
     28  * Things that is still to do:
     29  *	Handle ubaresets. Does not work at all right now.
     30  *	Fix ALLMULTI reception. But someone must tell me how...
     31  *	Collect statistics.
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.82 2024/03/25 05:37:45 mrg Exp $");
     36 
     37 #include "opt_inet.h"
     38 
     39 #include <sys/param.h>
     40 #include <sys/mbuf.h>
     41 #include <sys/socket.h>
     42 #include <sys/device.h>
     43 #include <sys/systm.h>
     44 #include <sys/sockio.h>
     45 
     46 #include <net/if.h>
     47 #include <net/if_ether.h>
     48 #include <net/if_dl.h>
     49 #include <net/bpf.h>
     50 
     51 #include <netinet/in.h>
     52 #include <netinet/if_inarp.h>
     53 
     54 #include <sys/bus.h>
     55 
     56 #include <dev/qbus/ubavar.h>
     57 #include <dev/qbus/if_qereg.h>
     58 
     59 #include "ioconf.h"
     60 
     61 #define RXDESCS	30	/* # of receive descriptors */
     62 #define TXDESCS	60	/* # transmit descs */
     63 
     64 /*
     65  * Structure containing the elements that must be in DMA-safe memory.
     66  */
     67 struct qe_cdata {
     68 	struct qe_ring	qc_recv[RXDESCS+1];	/* Receive descriptors */
     69 	struct qe_ring	qc_xmit[TXDESCS+1];	/* Transmit descriptors */
     70 	uint8_t	qc_setup[128];		/* Setup packet layout */
     71 };
     72 
     73 struct	qe_softc {
     74 	device_t	sc_dev;		/* Configuration common part	*/
     75 	struct uba_softc *sc_uh;	/* our parent */
     76 	struct evcnt	sc_intrcnt;	/* Interrupt counting		*/
     77 	struct ethercom sc_ec;		/* Ethernet common part		*/
     78 #define sc_if	sc_ec.ec_if		/* network-visible interface	*/
     79 	bus_space_tag_t sc_iot;
     80 	bus_addr_t	sc_ioh;
     81 	bus_dma_tag_t	sc_dmat;
     82 	struct qe_cdata *sc_qedata;	/* Descriptor struct		*/
     83 	struct qe_cdata *sc_pqedata;	/* Unibus address of above	*/
     84 	struct mbuf*	sc_txmbuf[TXDESCS];
     85 	struct mbuf*	sc_rxmbuf[RXDESCS];
     86 	bus_dmamap_t	sc_xmtmap[TXDESCS];
     87 	bus_dmamap_t	sc_rcvmap[RXDESCS];
     88 	bus_dmamap_t	sc_nulldmamap;	/* ethernet padding buffer	*/
     89 	struct ubinfo	sc_ui;
     90 	int		sc_intvec;	/* Interrupt vector		*/
     91 	int		sc_nexttx;
     92 	int		sc_inq;
     93 	int		sc_lastack;
     94 	int		sc_nextrx;
     95 	int		sc_setup;	/* Setup packet in queue	*/
     96 };
     97 
     98 static	int	qematch(device_t, cfdata_t, void *);
     99 static	void	qeattach(device_t, device_t, void *);
    100 static	int	qeinit(struct ifnet *);
    101 static	void	qestart(struct ifnet *);
    102 static	void	qeintr(void *);
    103 static	int	qeioctl(struct ifnet *, u_long, void *);
    104 static	int	qe_add_rxbuf(struct qe_softc *, int);
    105 static	void	qe_setup(struct qe_softc *);
    106 static	void	qetimeout(struct ifnet *);
    107 
    108 CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc),
    109     qematch, qeattach, NULL, NULL);
    110 
    111 #define	QE_WCSR(csr, val) \
    112 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
    113 #define	QE_RCSR(csr) \
    114 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
    115 
    116 #define	LOWORD(x)	((int)(x) & 0xffff)
    117 #define	HIWORD(x)	(((int)(x) >> 16) & 0x3f)
    118 
    119 #define	ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
    120 
    121 /*
    122  * Check for present DEQNA. Done by sending a fake setup packet
    123  * and wait for interrupt.
    124  */
    125 int
    126 qematch(device_t parent, cfdata_t cf, void *aux)
    127 {
    128 	struct	qe_softc ssc;
    129 	struct	qe_softc *sc = &ssc;
    130 	struct	uba_attach_args *ua = aux;
    131 	struct	uba_softc *uh = device_private(parent);
    132 	struct ubinfo ui;
    133 
    134 #define	PROBESIZE	4096
    135 	struct qe_ring *ring;
    136 	struct	qe_ring *rp;
    137 	int error, match;
    138 
    139 	ring = malloc(PROBESIZE, M_TEMP, M_WAITOK | M_ZERO);
    140 	memset(sc, 0, sizeof(*sc));
    141 	sc->sc_iot = ua->ua_iot;
    142 	sc->sc_ioh = ua->ua_ioh;
    143 	sc->sc_dmat = ua->ua_dmat;
    144 
    145 	uh->uh_lastiv -= 4;
    146 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    147 	QE_WCSR(QE_CSR_VECTOR, uh->uh_lastiv);
    148 
    149 	/*
    150 	 * Map the ring area. Actually this is done only to be able to
    151 	 * send and receive a internal packet; some junk is loopbacked
    152 	 * so that the DEQNA has a reason to interrupt.
    153 	 */
    154 	ui.ui_size = PROBESIZE;
    155 	ui.ui_vaddr = (void *)&ring[0];
    156 	if ((error = uballoc(uh, &ui, UBA_CANTWAIT))) {
    157 		match = 0;
    158 		goto out0;
    159 	}
    160 
    161 	/*
    162 	 * Init a simple "fake" receive and transmit descriptor that
    163 	 * points to some unused area. Send a fake setup packet.
    164 	 */
    165 	rp = (void *)ui.ui_baddr;
    166 	ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
    167 	ring[0].qe_addr_lo = LOWORD(&rp[4]);
    168 	ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
    169 	ring[0].qe_buf_len = -64;
    170 
    171 	ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
    172 	ring[2].qe_addr_lo = LOWORD(&rp[4]);
    173 	ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
    174 	ring[2].qe_buf_len = -(1500/2);
    175 
    176 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    177 	DELAY(1000);
    178 
    179 	/*
    180 	 * Start the interface and wait for the packet.
    181 	 */
    182 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT);
    183 	QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
    184 	QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
    185 	QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
    186 	QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
    187 	DELAY(10000);
    188 
    189 	match = 1;
    190 
    191 	/*
    192 	 * All done with the bus resources.
    193 	 */
    194 	ubfree(uh, &ui);
    195 out0:	free(ring, M_TEMP);
    196 	return match;
    197 }
    198 
    199 /*
    200  * Interface exists: make available by filling in network interface
    201  * record.  System will initialize the interface when it is ready
    202  * to accept packets.
    203  */
    204 void
    205 qeattach(device_t parent, device_t self, void *aux)
    206 {
    207 	struct uba_attach_args *ua = aux;
    208 	struct qe_softc *sc = device_private(self);
    209 	struct ifnet *ifp = &sc->sc_if;
    210 	struct qe_ring *rp;
    211 	uint8_t enaddr[ETHER_ADDR_LEN];
    212 	int i, error;
    213 	char *nullbuf;
    214 
    215 	sc->sc_dev = self;
    216 	sc->sc_uh = device_private(parent);
    217 	sc->sc_iot = ua->ua_iot;
    218 	sc->sc_ioh = ua->ua_ioh;
    219 	sc->sc_dmat = ua->ua_dmat;
    220 
    221 	/*
    222 	 * Allocate DMA safe memory for descriptors and setup memory.
    223 	 */
    224 
    225 	sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
    226 	if ((error = ubmemalloc(sc->sc_uh, &sc->sc_ui, 0))) {
    227 		aprint_error(": unable to ubmemalloc(), error = %d\n", error);
    228 		return;
    229 	}
    230 	sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
    231 	sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
    232 
    233 	/*
    234 	 * Zero the newly allocated memory.
    235 	 */
    236 	memset(sc->sc_qedata, 0, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
    237 	nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
    238 	/*
    239 	 * Create the transmit descriptor DMA maps. We take advantage
    240 	 * of the fact that the Qbus address space is big, and therefore
    241 	 * allocate map registers for all transmit descriptors also,
    242 	 * so that we can avoid this each time we send a packet.
    243 	 */
    244 	for (i = 0; i < TXDESCS; i++) {
    245 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    246 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    247 		    &sc->sc_xmtmap[i]))) {
    248 			aprint_error(
    249 			    ": unable to create tx DMA map %d, error = %d\n",
    250 			    i, error);
    251 			goto fail_4;
    252 		}
    253 	}
    254 
    255 	/*
    256 	 * Create receive buffer DMA maps.
    257 	 */
    258 	for (i = 0; i < RXDESCS; i++) {
    259 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    260 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
    261 		    &sc->sc_rcvmap[i]))) {
    262 			aprint_error(
    263 			    ": unable to create rx DMA map %d, error = %d\n",
    264 			    i, error);
    265 			goto fail_5;
    266 		}
    267 	}
    268 	/*
    269 	 * Pre-allocate the receive buffers.
    270 	 */
    271 	for (i = 0; i < RXDESCS; i++) {
    272 		if ((error = qe_add_rxbuf(sc, i)) != 0) {
    273 			aprint_error(
    274 			    ": unable to allocate or map rx buffer %d,"
    275 			    " error = %d\n", i, error);
    276 			goto fail_6;
    277 		}
    278 	}
    279 
    280 	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
    281 	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
    282 		aprint_error(
    283 		    ": unable to create pad buffer DMA map, error = %d\n",
    284 		    error);
    285 		goto fail_6;
    286 	}
    287 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
    288 	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
    289 		aprint_error(
    290 		    ": unable to load pad buffer DMA map, error = %d\n",
    291 		    error);
    292 		goto fail_7;
    293 	}
    294 	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
    295 	    BUS_DMASYNC_PREWRITE);
    296 
    297 	/*
    298 	 * Create ring loops of the buffer chains.
    299 	 * This is only done once.
    300 	 */
    301 
    302 	rp = sc->sc_qedata->qc_recv;
    303 	rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
    304 	rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
    305 	    QE_VALID | QE_CHAIN;
    306 	rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
    307 
    308 	rp = sc->sc_qedata->qc_xmit;
    309 	rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
    310 	rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
    311 	    QE_VALID | QE_CHAIN;
    312 	rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
    313 
    314 	/*
    315 	 * Get the vector that were set at match time, and remember it.
    316 	 */
    317 	sc->sc_intvec = sc->sc_uh->uh_lastiv;
    318 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    319 	DELAY(1000);
    320 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    321 
    322 	/*
    323 	 * Read out ethernet address and tell which type this card is.
    324 	 */
    325 	for (i = 0; i < 6; i++)
    326 		enaddr[i] = QE_RCSR(i * 2) & 0xff;
    327 
    328 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
    329 	aprint_normal(": %s, hardware address %s\n",
    330 		QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
    331 		ether_sprintf(enaddr));
    332 
    333 	QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
    334 
    335 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
    336 		sc, &sc->sc_intrcnt);
    337 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
    338 		device_xname(sc->sc_dev), "intr");
    339 
    340 	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
    341 	ifp->if_softc = sc;
    342 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    343 	ifp->if_start = qestart;
    344 	ifp->if_init = qeinit;
    345 	ifp->if_ioctl = qeioctl;
    346 	ifp->if_watchdog = qetimeout;
    347 	IFQ_SET_READY(&ifp->if_snd);
    348 
    349 	/*
    350 	 * Attach the interface.
    351 	 */
    352 	if_attach(ifp);
    353 	ether_ifattach(ifp, enaddr);
    354 
    355 	return;
    356 
    357 	/*
    358 	 * Free any resources we've allocated during the failed attach
    359 	 * attempt.  Do this in reverse order and fall through.
    360 	 */
    361  fail_7:
    362 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
    363  fail_6:
    364 	for (i = 0; i < RXDESCS; i++) {
    365 		if (sc->sc_rxmbuf[i] != NULL) {
    366 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
    367 			m_freem(sc->sc_rxmbuf[i]);
    368 		}
    369 	}
    370  fail_5:
    371 	for (i = 0; i < RXDESCS; i++) {
    372 		if (sc->sc_rcvmap[i] != NULL)
    373 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
    374 	}
    375  fail_4:
    376 	for (i = 0; i < TXDESCS; i++) {
    377 		if (sc->sc_xmtmap[i] != NULL)
    378 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
    379 	}
    380 }
    381 
    382 /*
    383  * Initialization of interface.
    384  */
    385 int
    386 qeinit(struct ifnet *ifp)
    387 {
    388 	struct qe_softc *sc = ifp->if_softc;
    389 	struct qe_cdata *qc = sc->sc_qedata;
    390 	int i;
    391 
    392 
    393 	/*
    394 	 * Reset the interface.
    395 	 */
    396 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    397 	DELAY(1000);
    398 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    399 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
    400 
    401 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
    402 	/*
    403 	 * Release and init transmit descriptors.
    404 	 */
    405 	for (i = 0; i < TXDESCS; i++) {
    406 		if (sc->sc_txmbuf[i]) {
    407 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
    408 			m_freem(sc->sc_txmbuf[i]);
    409 			sc->sc_txmbuf[i] = 0;
    410 		}
    411 		qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
    412 		qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
    413 	}
    414 
    415 	/*
    416 	 * Init receive descriptors.
    417 	 */
    418 	for (i = 0; i < RXDESCS; i++)
    419 		qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
    420 	sc->sc_nextrx = 0;
    421 
    422 	/*
    423 	 * Write the descriptor addresses to the device.
    424 	 * Receiving packets will be enabled in the interrupt routine.
    425 	 */
    426 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT);
    427 	QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
    428 	QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
    429 
    430 	ifp->if_flags |= IFF_RUNNING;
    431 	ifp->if_flags &= ~IFF_OACTIVE;
    432 
    433 	/*
    434 	 * Send a setup frame.
    435 	 * This will start the transmit machinery as well.
    436 	 */
    437 	qe_setup(sc);
    438 
    439 	return 0;
    440 }
    441 
    442 /*
    443  * Start output on interface.
    444  */
    445 void
    446 qestart(struct ifnet *ifp)
    447 {
    448 	struct qe_softc *sc = ifp->if_softc;
    449 	struct qe_cdata *qc = sc->sc_qedata;
    450 	paddr_t	buffer;
    451 	struct mbuf *m, *m0;
    452 	int idx, len, s, i, totlen, buflen;
    453 	short orword, csr;
    454 
    455 	if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
    456 		return;
    457 
    458 	s = splnet();
    459 	while (sc->sc_inq < (TXDESCS - 1)) {
    460 
    461 		if (sc->sc_setup) {
    462 			qe_setup(sc);
    463 			continue;
    464 		}
    465 		idx = sc->sc_nexttx;
    466 		IFQ_POLL(&ifp->if_snd, m);
    467 		if (m == 0)
    468 			goto out;
    469 		/*
    470 		 * Count number of mbufs in chain.
    471 		 * Always do DMA directly from mbufs, therefore the transmit
    472 		 * ring is really big.
    473 		 */
    474 		for (m0 = m, i = 0; m0; m0 = m0->m_next)
    475 			if (m0->m_len)
    476 				i++;
    477 		if (m->m_pkthdr.len < ETHER_PAD_LEN) {
    478 			buflen = ETHER_PAD_LEN;
    479 			i++;
    480 		} else
    481 			buflen = m->m_pkthdr.len;
    482 		if (i >= TXDESCS)
    483 			panic("qestart");
    484 
    485 		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
    486 			ifp->if_flags |= IFF_OACTIVE;
    487 			goto out;
    488 		}
    489 
    490 		IFQ_DEQUEUE(&ifp->if_snd, m);
    491 
    492 		bpf_mtap(ifp, m, BPF_D_OUT);
    493 		/*
    494 		 * m now points to a mbuf chain that can be loaded.
    495 		 * Loop around and set it.
    496 		 */
    497 		totlen = 0;
    498 		for (m0 = m; ; m0 = m0->m_next) {
    499 			if (m0) {
    500 				if (m0->m_len == 0)
    501 					continue;
    502 				bus_dmamap_load(sc->sc_dmat,
    503 				    sc->sc_xmtmap[idx], mtod(m0, void *),
    504 				    m0->m_len, 0, 0);
    505 				buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
    506 				len = m0->m_len;
    507 			} else if (totlen < ETHER_PAD_LEN) {
    508 				buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
    509 				len = ETHER_PAD_LEN - totlen;
    510 			} else {
    511 				break;
    512 			}
    513 
    514 			totlen += len;
    515 			/* Word alignment calc */
    516 			orword = 0;
    517 			if (totlen == buflen) {
    518 				orword |= QE_EOMSG;
    519 				sc->sc_txmbuf[idx] = m;
    520 			}
    521 			if ((buffer & 1) || (len & 1))
    522 				len += 2;
    523 			if (buffer & 1)
    524 				orword |= QE_ODDBEGIN;
    525 			if ((buffer + len) & 1)
    526 				orword |= QE_ODDEND;
    527 			qc->qc_xmit[idx].qe_buf_len = -(len/2);
    528 			qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
    529 			qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
    530 			qc->qc_xmit[idx].qe_flag =
    531 			    qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
    532 			qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
    533 			if (++idx == TXDESCS)
    534 				idx = 0;
    535 			sc->sc_inq++;
    536 			if (m0 == NULL)
    537 				break;
    538 		}
    539 #ifdef DIAGNOSTIC
    540 		if (totlen != buflen)
    541 			panic("qestart: len fault");
    542 #endif
    543 
    544 		/*
    545 		 * Kick off the transmit logic, if it is stopped.
    546 		 */
    547 		csr = QE_RCSR(QE_CSR_CSR);
    548 		if (csr & QE_XL_INVALID) {
    549 			QE_WCSR(QE_CSR_XMTL,
    550 			    LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
    551 			QE_WCSR(QE_CSR_XMTH,
    552 			    HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
    553 		}
    554 		sc->sc_nexttx = idx;
    555 	}
    556 	if (sc->sc_inq == (TXDESCS - 1))
    557 		ifp->if_flags |= IFF_OACTIVE;
    558 
    559 out:	if (sc->sc_inq)
    560 		ifp->if_timer = 5; /* If transmit logic dies */
    561 	splx(s);
    562 }
    563 
    564 static void
    565 qeintr(void *arg)
    566 {
    567 	struct qe_softc *sc = arg;
    568 	struct qe_cdata *qc = sc->sc_qedata;
    569 	struct ifnet *ifp = &sc->sc_if;
    570 	struct mbuf *m;
    571 	int csr, status1, status2, len;
    572 
    573 	csr = QE_RCSR(QE_CSR_CSR);
    574 
    575 	QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
    576 	    QE_RCV_INT | QE_ILOOP);
    577 
    578 	if (csr & QE_RCV_INT)
    579 		while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
    580 			status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
    581 			status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
    582 
    583 			m = sc->sc_rxmbuf[sc->sc_nextrx];
    584 			len = ((status1 & QE_RBL_HI) |
    585 			    (status2 & QE_RBL_LO)) + 60;
    586 			qe_add_rxbuf(sc, sc->sc_nextrx);
    587 			m_set_rcvif(m, ifp);
    588 			m->m_pkthdr.len = m->m_len = len;
    589 			if (++sc->sc_nextrx == RXDESCS)
    590 				sc->sc_nextrx = 0;
    591 			if ((status1 & QE_ESETUP) == 0)
    592 				if_percpuq_enqueue(ifp->if_percpuq, m);
    593 			else
    594 				m_freem(m);
    595 		}
    596 
    597 	if (csr & (QE_XMIT_INT | QE_XL_INVALID)) {
    598 		while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
    599 			int idx = sc->sc_lastack;
    600 
    601 			sc->sc_inq--;
    602 			if (++sc->sc_lastack == TXDESCS)
    603 				sc->sc_lastack = 0;
    604 
    605 			/* XXX collect statistics */
    606 			qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
    607 			qc->qc_xmit[idx].qe_status1 =
    608 			    qc->qc_xmit[idx].qe_flag = QE_NOTYET;
    609 
    610 			if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
    611 				continue;
    612 			if (sc->sc_txmbuf[idx] == NULL ||
    613 			    sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
    614 				bus_dmamap_unload(sc->sc_dmat,
    615 				    sc->sc_xmtmap[idx]);
    616 			if (sc->sc_txmbuf[idx]) {
    617 				m_freem(sc->sc_txmbuf[idx]);
    618 				sc->sc_txmbuf[idx] = NULL;
    619 			}
    620 		}
    621 		ifp->if_timer = 0;
    622 		ifp->if_flags &= ~IFF_OACTIVE;
    623 		qestart(ifp); /* Put in more in queue */
    624 	}
    625 	/*
    626 	 * How can the receive list get invalid???
    627 	 * Verified that it happens anyway.
    628 	 */
    629 	if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
    630 	    (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
    631 		QE_WCSR(QE_CSR_RCLL,
    632 		    LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
    633 		QE_WCSR(QE_CSR_RCLH,
    634 		    HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
    635 	}
    636 }
    637 
    638 /*
    639  * Process an ioctl request.
    640  */
    641 int
    642 qeioctl(struct ifnet *ifp, u_long cmd, void *data)
    643 {
    644 	struct qe_softc *sc = ifp->if_softc;
    645 	struct ifaddr *ifa = (struct ifaddr *)data;
    646 	int s = splnet(), error = 0;
    647 
    648 	switch (cmd) {
    649 
    650 	case SIOCINITIFADDR:
    651 		ifp->if_flags |= IFF_UP;
    652 		switch (ifa->ifa_addr->sa_family) {
    653 #ifdef INET
    654 		case AF_INET:
    655 			qeinit(ifp);
    656 			arp_ifinit(ifp, ifa);
    657 			break;
    658 #endif
    659 		}
    660 		break;
    661 
    662 	case SIOCSIFFLAGS:
    663 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    664 			break;
    665 		/* XXX re-use ether_ioctl() */
    666 		switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
    667 		case IFF_RUNNING:
    668 			/*
    669 			 * If interface is marked down and it is running,
    670 			 * stop it. (by disabling receive mechanism).
    671 			 */
    672 			QE_WCSR(QE_CSR_CSR,
    673 			    QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
    674 			ifp->if_flags &= ~IFF_RUNNING;
    675 			break;
    676 		case IFF_UP:
    677 			/*
    678 			 * If interface it marked up and it is stopped, then
    679 			 * start it.
    680 			 */
    681 			qeinit(ifp);
    682 			break;
    683 		case IFF_UP | IFF_RUNNING:
    684 			/*
    685 			 * Send a new setup packet to match any new changes.
    686 			 * (Like IFF_PROMISC etc)
    687 			 */
    688 			qe_setup(sc);
    689 			break;
    690 		case 0:
    691 			break;
    692 		}
    693 		break;
    694 
    695 	case SIOCADDMULTI:
    696 	case SIOCDELMULTI:
    697 		/*
    698 		 * Update our multicast list.
    699 		 */
    700 		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
    701 			/*
    702 			 * Multicast list has changed; set the hardware filter
    703 			 * accordingly.
    704 			 */
    705 			if (ifp->if_flags & IFF_RUNNING)
    706 				qe_setup(sc);
    707 			error = 0;
    708 		}
    709 		break;
    710 
    711 	default:
    712 		error = ether_ioctl(ifp, cmd, data);
    713 	}
    714 	splx(s);
    715 	return error;
    716 }
    717 
    718 /*
    719  * Add a receive buffer to the indicated descriptor.
    720  */
    721 int
    722 qe_add_rxbuf(struct qe_softc *sc, int i)
    723 {
    724 	struct mbuf *m;
    725 	struct qe_ring *rp;
    726 	vaddr_t addr;
    727 	int error;
    728 
    729 	MGETHDR(m, M_DONTWAIT, MT_DATA);
    730 	if (m == NULL)
    731 		return ENOBUFS;
    732 
    733 	MCLGET(m, M_DONTWAIT);
    734 	if ((m->m_flags & M_EXT) == 0) {
    735 		m_freem(m);
    736 		return ENOBUFS;
    737 	}
    738 
    739 	if (sc->sc_rxmbuf[i] != NULL)
    740 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
    741 
    742 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
    743 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
    744 	if (error)
    745 		panic("%s: can't load rx DMA map %d, error = %d",
    746 		    device_xname(sc->sc_dev), i, error);
    747 	sc->sc_rxmbuf[i] = m;
    748 
    749 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
    750 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
    751 
    752 	/*
    753 	 * We know that the mbuf cluster is page aligned. Also, be sure
    754 	 * that the IP header will be longword aligned.
    755 	 */
    756 	m->m_data += 2;
    757 	addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
    758 	rp = &sc->sc_qedata->qc_recv[i];
    759 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
    760 	rp->qe_addr_lo = LOWORD(addr);
    761 	rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
    762 	rp->qe_buf_len = -(m->m_ext.ext_size - 2) / 2;
    763 
    764 	return 0;
    765 }
    766 
    767 /*
    768  * Create a setup packet and put in queue for sending.
    769  */
    770 void
    771 qe_setup(struct qe_softc *sc)
    772 {
    773 	struct ethercom *ec = &sc->sc_ec;
    774 	struct ether_multi *enm;
    775 	struct ether_multistep step;
    776 	struct qe_cdata *qc = sc->sc_qedata;
    777 	struct ifnet *ifp = &sc->sc_if;
    778 	uint8_t enaddr[ETHER_ADDR_LEN];
    779 	int i, j, k, idx, s;
    780 
    781 	s = splnet();
    782 	if (sc->sc_inq == (TXDESCS - 1)) {
    783 		sc->sc_setup = 1;
    784 		splx(s);
    785 		return;
    786 	}
    787 	sc->sc_setup = 0;
    788 	/*
    789 	 * Init the setup packet with valid info.
    790 	 */
    791 	memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
    792 	memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
    793 	for (i = 0; i < ETHER_ADDR_LEN; i++)
    794 		qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
    795 
    796 	/*
    797 	 * Multicast handling. The DEQNA can handle up to 12 direct
    798 	 * ethernet addresses.
    799 	 */
    800 	j = 3; k = 0;
    801 	ifp->if_flags &= ~IFF_ALLMULTI;
    802 	ETHER_LOCK(ec);
    803 	ETHER_FIRST_MULTI(step, ec, enm);
    804 	while (enm != NULL) {
    805 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
    806 			ifp->if_flags |= IFF_ALLMULTI;
    807 			break;
    808 		}
    809 		for (i = 0; i < ETHER_ADDR_LEN; i++)
    810 			qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
    811 		j++;
    812 		if (j == 8) {
    813 			j = 1; k += 64;
    814 		}
    815 		if (k > 64) {
    816 			ifp->if_flags |= IFF_ALLMULTI;
    817 			break;
    818 		}
    819 		ETHER_NEXT_MULTI(step, enm);
    820 	}
    821 	ETHER_UNLOCK(ec);
    822 	idx = sc->sc_nexttx;
    823 	qc->qc_xmit[idx].qe_buf_len = -64;
    824 
    825 	/*
    826 	 * How is the DEQNA turned in ALLMULTI mode???
    827 	 * Until someone tells me, fall back to PROMISC when more than
    828 	 * 12 ethernet addresses.
    829 	 */
    830 	if (ifp->if_flags & IFF_ALLMULTI)
    831 		ifp->if_flags |= IFF_PROMISC;
    832 	else if (ifp->if_pcount == 0)
    833 		ifp->if_flags &= ~IFF_PROMISC;
    834 	if (ifp->if_flags & IFF_PROMISC)
    835 		qc->qc_xmit[idx].qe_buf_len = -65;
    836 
    837 	qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
    838 	qc->qc_xmit[idx].qe_addr_hi =
    839 	    HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
    840 	qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
    841 	qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
    842 
    843 	if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
    844 		QE_WCSR(QE_CSR_XMTL,
    845 		    LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
    846 		QE_WCSR(QE_CSR_XMTH,
    847 		    HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
    848 	}
    849 
    850 	sc->sc_inq++;
    851 	if (++sc->sc_nexttx == TXDESCS)
    852 		sc->sc_nexttx = 0;
    853 	splx(s);
    854 }
    855 
    856 /*
    857  * Check for dead transmit logic. Not uncommon.
    858  */
    859 void
    860 qetimeout(struct ifnet *ifp)
    861 {
    862 	struct qe_softc *sc = ifp->if_softc;
    863 
    864 	if (sc->sc_inq == 0)
    865 		return;
    866 
    867 	aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
    868 	/*
    869 	 * Do a reset of interface, to get it going again.
    870 	 * Will it work by just restart the transmit logic?
    871 	 */
    872 	qeinit(ifp);
    873 }
    874