Home | History | Annotate | Line # | Download | only in qbus
if_qe.c revision 1.46.2.3
      1  1.46.2.3   nathanw /*      $NetBSD: if_qe.c,v 1.46.2.3 2002/06/20 03:46:23 nathanw Exp $ */
      2       1.1     ragge /*
      3      1.37     ragge  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
      4       1.1     ragge  *
      5       1.1     ragge  * Redistribution and use in source and binary forms, with or without
      6       1.1     ragge  * modification, are permitted provided that the following conditions
      7       1.1     ragge  * are met:
      8       1.1     ragge  * 1. Redistributions of source code must retain the above copyright
      9       1.1     ragge  *    notice, this list of conditions and the following disclaimer.
     10       1.1     ragge  * 2. Redistributions in binary form must reproduce the above copyright
     11       1.1     ragge  *    notice, this list of conditions and the following disclaimer in the
     12       1.1     ragge  *    documentation and/or other materials provided with the distribution.
     13       1.1     ragge  * 3. All advertising materials mentioning features or use of this software
     14       1.1     ragge  *    must display the following acknowledgement:
     15      1.37     ragge  *      This product includes software developed at Ludd, University of
     16      1.37     ragge  *      Lule}, Sweden and its contributors.
     17      1.37     ragge  * 4. The name of the author may not be used to endorse or promote products
     18      1.37     ragge  *    derived from this software without specific prior written permission
     19      1.37     ragge  *
     20      1.37     ragge  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21      1.37     ragge  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22      1.37     ragge  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23      1.37     ragge  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24      1.37     ragge  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25      1.37     ragge  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26      1.37     ragge  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27      1.37     ragge  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28      1.37     ragge  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29      1.37     ragge  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30       1.1     ragge  */
     31       1.1     ragge 
     32       1.1     ragge /*
     33      1.37     ragge  * Driver for DEQNA/DELQA ethernet cards.
     34      1.37     ragge  * Things that is still to do:
     35      1.37     ragge  *	Handle ubaresets. Does not work at all right now.
     36      1.37     ragge  *	Fix ALLMULTI reception. But someone must tell me how...
     37      1.37     ragge  *	Collect statistics.
     38       1.1     ragge  */
     39  1.46.2.2   nathanw 
     40  1.46.2.2   nathanw #include <sys/cdefs.h>
     41  1.46.2.3   nathanw __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.46.2.3 2002/06/20 03:46:23 nathanw Exp $");
     42      1.22     ragge 
     43      1.27  jonathan #include "opt_inet.h"
     44      1.22     ragge #include "bpfilter.h"
     45      1.22     ragge 
     46       1.9   mycroft #include <sys/param.h>
     47       1.9   mycroft #include <sys/mbuf.h>
     48       1.9   mycroft #include <sys/socket.h>
     49       1.9   mycroft #include <sys/device.h>
     50      1.37     ragge #include <sys/systm.h>
     51      1.37     ragge #include <sys/sockio.h>
     52       1.9   mycroft 
     53       1.9   mycroft #include <net/if.h>
     54      1.20        is #include <net/if_ether.h>
     55      1.21     ragge #include <net/if_dl.h>
     56       1.1     ragge 
     57       1.9   mycroft #include <netinet/in.h>
     58      1.20        is #include <netinet/if_inarp.h>
     59      1.22     ragge 
     60      1.22     ragge #if NBPFILTER > 0
     61      1.22     ragge #include <net/bpf.h>
     62      1.22     ragge #include <net/bpfdesc.h>
     63      1.22     ragge #endif
     64      1.22     ragge 
     65      1.37     ragge #include <machine/bus.h>
     66       1.1     ragge 
     67      1.37     ragge #include <dev/qbus/ubavar.h>
     68      1.37     ragge #include <dev/qbus/if_qereg.h>
     69       1.1     ragge 
     70      1.37     ragge #include "ioconf.h"
     71      1.37     ragge 
     72      1.37     ragge #define RXDESCS	30	/* # of receive descriptors */
     73      1.37     ragge #define TXDESCS	60	/* # transmit descs */
     74       1.6       jtc 
     75       1.1     ragge /*
     76      1.37     ragge  * Structure containing the elements that must be in DMA-safe memory.
     77       1.1     ragge  */
     78      1.37     ragge struct qe_cdata {
     79      1.37     ragge 	struct qe_ring	qc_recv[RXDESCS+1];	/* Receive descriptors */
     80      1.37     ragge 	struct qe_ring	qc_xmit[TXDESCS+1];	/* Transmit descriptors */
     81      1.37     ragge 	u_int8_t	qc_setup[128];		/* Setup packet layout */
     82      1.37     ragge };
     83      1.37     ragge 
     84       1.1     ragge struct	qe_softc {
     85      1.37     ragge 	struct device	sc_dev;		/* Configuration common part	*/
     86      1.41      matt 	struct evcnt	sc_intrcnt;	/* Interrupt counting		*/
     87      1.37     ragge 	struct ethercom sc_ec;		/* Ethernet common part		*/
     88      1.37     ragge #define sc_if	sc_ec.ec_if		/* network-visible interface	*/
     89      1.37     ragge 	bus_space_tag_t sc_iot;
     90      1.37     ragge 	bus_addr_t	sc_ioh;
     91      1.37     ragge 	bus_dma_tag_t	sc_dmat;
     92      1.37     ragge 	struct qe_cdata *sc_qedata;	/* Descriptor struct		*/
     93      1.37     ragge 	struct qe_cdata *sc_pqedata;	/* Unibus address of above	*/
     94      1.37     ragge 	struct mbuf*	sc_txmbuf[TXDESCS];
     95      1.37     ragge 	struct mbuf*	sc_rxmbuf[RXDESCS];
     96      1.37     ragge 	bus_dmamap_t	sc_xmtmap[TXDESCS];
     97      1.37     ragge 	bus_dmamap_t	sc_rcvmap[RXDESCS];
     98  1.46.2.1   nathanw 	struct ubinfo	sc_ui;
     99      1.37     ragge 	int		sc_intvec;	/* Interrupt vector		*/
    100      1.37     ragge 	int		sc_nexttx;
    101      1.37     ragge 	int		sc_inq;
    102      1.37     ragge 	int		sc_lastack;
    103      1.37     ragge 	int		sc_nextrx;
    104      1.37     ragge 	int		sc_setup;	/* Setup packet in queue	*/
    105       1.7     ragge };
    106       1.1     ragge 
    107      1.46     ragge static	int	qematch(struct device *, struct cfdata *, void *);
    108      1.46     ragge static	void	qeattach(struct device *, struct device *, void *);
    109      1.46     ragge static	void	qeinit(struct qe_softc *);
    110      1.46     ragge static	void	qestart(struct ifnet *);
    111      1.46     ragge static	void	qeintr(void *);
    112      1.46     ragge static	int	qeioctl(struct ifnet *, u_long, caddr_t);
    113      1.46     ragge static	int	qe_add_rxbuf(struct qe_softc *, int);
    114      1.46     ragge static	void	qe_setup(struct qe_softc *);
    115      1.46     ragge static	void	qetimeout(struct ifnet *);
    116       1.1     ragge 
    117      1.12     ragge struct	cfattach qe_ca = {
    118      1.12     ragge 	sizeof(struct qe_softc), qematch, qeattach
    119      1.12     ragge };
    120      1.23   thorpej 
    121      1.37     ragge #define	QE_WCSR(csr, val) \
    122      1.37     ragge 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
    123      1.37     ragge #define	QE_RCSR(csr) \
    124      1.37     ragge 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
    125       1.1     ragge 
    126      1.37     ragge #define	LOWORD(x)	((int)(x) & 0xffff)
    127      1.37     ragge #define	HIWORD(x)	(((int)(x) >> 16) & 0x3f)
    128       1.7     ragge 
    129       1.1     ragge /*
    130      1.37     ragge  * Check for present DEQNA. Done by sending a fake setup packet
    131      1.37     ragge  * and wait for interrupt.
    132       1.1     ragge  */
    133       1.7     ragge int
    134      1.46     ragge qematch(struct device *parent, struct cfdata *cf, void *aux)
    135       1.7     ragge {
    136      1.37     ragge 	struct	qe_softc ssc;
    137      1.37     ragge 	struct	qe_softc *sc = &ssc;
    138       1.7     ragge 	struct	uba_attach_args *ua = aux;
    139       1.7     ragge 	struct	uba_softc *ubasc = (struct uba_softc *)parent;
    140  1.46.2.1   nathanw 	struct ubinfo ui;
    141      1.37     ragge 
    142  1.46.2.3   nathanw #define	PROBESIZE	4096
    143  1.46.2.3   nathanw 	struct qe_ring *ring;
    144      1.21     ragge 	struct	qe_ring *rp;
    145      1.37     ragge 	int error;
    146       1.1     ragge 
    147  1.46.2.3   nathanw 	ring = malloc(PROBESIZE, M_TEMP, M_WAITOK);
    148      1.37     ragge 	bzero(sc, sizeof(struct qe_softc));
    149      1.37     ragge 	bzero(ring, PROBESIZE);
    150      1.37     ragge 	sc->sc_iot = ua->ua_iot;
    151      1.37     ragge 	sc->sc_ioh = ua->ua_ioh;
    152      1.37     ragge 	sc->sc_dmat = ua->ua_dmat;
    153       1.7     ragge 
    154      1.37     ragge 	ubasc->uh_lastiv -= 4;
    155      1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    156      1.37     ragge 	QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
    157       1.1     ragge 
    158       1.1     ragge 	/*
    159      1.37     ragge 	 * Map the ring area. Actually this is done only to be able to
    160      1.37     ragge 	 * send and receive a internal packet; some junk is loopbacked
    161      1.37     ragge 	 * so that the DEQNA has a reason to interrupt.
    162       1.1     ragge 	 */
    163  1.46.2.1   nathanw 	ui.ui_size = PROBESIZE;
    164  1.46.2.1   nathanw 	ui.ui_vaddr = (caddr_t)&ring[0];
    165  1.46.2.1   nathanw 	if ((error = uballoc((void *)parent, &ui, UBA_CANTWAIT)))
    166      1.37     ragge 		return 0;
    167       1.1     ragge 
    168       1.1     ragge 	/*
    169      1.37     ragge 	 * Init a simple "fake" receive and transmit descriptor that
    170      1.37     ragge 	 * points to some unused area. Send a fake setup packet.
    171       1.1     ragge 	 */
    172  1.46.2.1   nathanw 	rp = (void *)ui.ui_baddr;
    173      1.37     ragge 	ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
    174      1.37     ragge 	ring[0].qe_addr_lo = LOWORD(&rp[4]);
    175      1.37     ragge 	ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
    176  1.46.2.3   nathanw 	ring[0].qe_buf_len = -64;
    177       1.1     ragge 
    178      1.37     ragge 	ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
    179      1.37     ragge 	ring[2].qe_addr_lo = LOWORD(&rp[4]);
    180      1.37     ragge 	ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
    181  1.46.2.3   nathanw 	ring[2].qe_buf_len = -(1500/2);
    182       1.1     ragge 
    183      1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    184      1.37     ragge 	DELAY(1000);
    185       1.1     ragge 
    186       1.1     ragge 	/*
    187       1.1     ragge 	 * Start the interface and wait for the packet.
    188       1.1     ragge 	 */
    189      1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
    190      1.37     ragge 	QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
    191      1.37     ragge 	QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
    192      1.37     ragge 	QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
    193      1.37     ragge 	QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
    194       1.1     ragge 	DELAY(10000);
    195      1.37     ragge 
    196       1.1     ragge 	/*
    197       1.1     ragge 	 * All done with the bus resources.
    198       1.1     ragge 	 */
    199  1.46.2.1   nathanw 	ubfree((void *)parent, &ui);
    200  1.46.2.3   nathanw 	free(ring, M_TEMP);
    201       1.7     ragge 	return 1;
    202       1.1     ragge }
    203       1.1     ragge 
    204       1.1     ragge /*
    205       1.1     ragge  * Interface exists: make available by filling in network interface
    206       1.1     ragge  * record.  System will initialize the interface when it is ready
    207       1.1     ragge  * to accept packets.
    208       1.1     ragge  */
    209       1.7     ragge void
    210      1.46     ragge qeattach(struct device *parent, struct device *self, void *aux)
    211       1.7     ragge {
    212       1.7     ragge 	struct	uba_attach_args *ua = aux;
    213      1.37     ragge 	struct	uba_softc *ubasc = (struct uba_softc *)parent;
    214       1.7     ragge 	struct	qe_softc *sc = (struct qe_softc *)self;
    215      1.37     ragge 	struct	ifnet *ifp = (struct ifnet *)&sc->sc_if;
    216      1.37     ragge 	struct	qe_ring *rp;
    217      1.37     ragge 	u_int8_t enaddr[ETHER_ADDR_LEN];
    218  1.46.2.1   nathanw 	int i, error;
    219      1.37     ragge 
    220      1.37     ragge 	sc->sc_iot = ua->ua_iot;
    221      1.37     ragge 	sc->sc_ioh = ua->ua_ioh;
    222      1.37     ragge 	sc->sc_dmat = ua->ua_dmat;
    223      1.37     ragge 
    224      1.37     ragge         /*
    225      1.37     ragge          * Allocate DMA safe memory for descriptors and setup memory.
    226      1.37     ragge          */
    227  1.46.2.1   nathanw 
    228  1.46.2.1   nathanw 	sc->sc_ui.ui_size = sizeof(struct qe_cdata);
    229  1.46.2.1   nathanw 	if ((error = ubmemalloc((struct uba_softc *)parent, &sc->sc_ui, 0))) {
    230  1.46.2.1   nathanw 		printf(": unable to ubmemalloc(), error = %d\n", error);
    231  1.46.2.1   nathanw 		return;
    232      1.37     ragge 	}
    233  1.46.2.1   nathanw 	sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
    234  1.46.2.1   nathanw 	sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
    235      1.37     ragge 
    236      1.37     ragge 	/*
    237      1.37     ragge 	 * Zero the newly allocated memory.
    238      1.37     ragge 	 */
    239      1.37     ragge 	bzero(sc->sc_qedata, sizeof(struct qe_cdata));
    240      1.37     ragge 	/*
    241      1.37     ragge 	 * Create the transmit descriptor DMA maps. We take advantage
    242      1.37     ragge 	 * of the fact that the Qbus address space is big, and therefore
    243      1.37     ragge 	 * allocate map registers for all transmit descriptors also,
    244      1.37     ragge 	 * so that we can avoid this each time we send a packet.
    245      1.37     ragge 	 */
    246      1.37     ragge 	for (i = 0; i < TXDESCS; i++) {
    247      1.37     ragge 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    248      1.37     ragge 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    249      1.37     ragge 		    &sc->sc_xmtmap[i]))) {
    250      1.37     ragge 			printf(": unable to create tx DMA map %d, error = %d\n",
    251      1.37     ragge 			    i, error);
    252      1.37     ragge 			goto fail_4;
    253      1.37     ragge 		}
    254      1.37     ragge 	}
    255      1.37     ragge 
    256      1.37     ragge 	/*
    257      1.37     ragge 	 * Create receive buffer DMA maps.
    258      1.37     ragge 	 */
    259      1.37     ragge 	for (i = 0; i < RXDESCS; i++) {
    260      1.37     ragge 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    261      1.37     ragge 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
    262      1.37     ragge 		    &sc->sc_rcvmap[i]))) {
    263      1.37     ragge 			printf(": unable to create rx DMA map %d, error = %d\n",
    264      1.37     ragge 			    i, error);
    265      1.37     ragge 			goto fail_5;
    266      1.37     ragge 		}
    267      1.37     ragge 	}
    268      1.37     ragge 	/*
    269      1.37     ragge 	 * Pre-allocate the receive buffers.
    270      1.37     ragge 	 */
    271      1.37     ragge 	for (i = 0; i < RXDESCS; i++) {
    272      1.37     ragge 		if ((error = qe_add_rxbuf(sc, i)) != 0) {
    273      1.37     ragge 			printf(": unable to allocate or map rx buffer %d\n,"
    274      1.37     ragge 			    " error = %d\n", i, error);
    275      1.37     ragge 			goto fail_6;
    276      1.37     ragge 		}
    277      1.37     ragge 	}
    278       1.1     ragge 
    279       1.1     ragge 	/*
    280      1.37     ragge 	 * Create ring loops of the buffer chains.
    281      1.37     ragge 	 * This is only done once.
    282       1.1     ragge 	 */
    283      1.37     ragge 
    284      1.37     ragge 	rp = sc->sc_qedata->qc_recv;
    285      1.37     ragge 	rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
    286      1.37     ragge 	rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
    287      1.37     ragge 	    QE_VALID | QE_CHAIN;
    288      1.37     ragge 	rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
    289      1.37     ragge 
    290      1.37     ragge 	rp = sc->sc_qedata->qc_xmit;
    291      1.37     ragge 	rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
    292      1.37     ragge 	rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
    293      1.37     ragge 	    QE_VALID | QE_CHAIN;
    294      1.37     ragge 	rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
    295       1.1     ragge 
    296       1.1     ragge 	/*
    297      1.37     ragge 	 * Get the vector that were set at match time, and remember it.
    298       1.1     ragge 	 */
    299      1.37     ragge 	sc->sc_intvec = ubasc->uh_lastiv;
    300      1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    301      1.37     ragge 	DELAY(1000);
    302      1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    303       1.1     ragge 
    304       1.1     ragge 	/*
    305      1.37     ragge 	 * Read out ethernet address and tell which type this card is.
    306       1.1     ragge 	 */
    307      1.37     ragge 	for (i = 0; i < 6; i++)
    308      1.37     ragge 		enaddr[i] = QE_RCSR(i * 2) & 0xff;
    309       1.1     ragge 
    310      1.37     ragge 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
    311      1.37     ragge 	printf("\n%s: %s, hardware address %s\n", sc->sc_dev.dv_xname,
    312      1.37     ragge 		QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
    313      1.37     ragge 		ether_sprintf(enaddr));
    314      1.37     ragge 
    315      1.37     ragge 	QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
    316      1.37     ragge 
    317      1.41      matt 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
    318      1.41      matt 		sc, &sc->sc_intrcnt);
    319      1.42      matt 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
    320      1.42      matt 		sc->sc_dev.dv_xname, "intr");
    321      1.39      matt 
    322      1.37     ragge 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    323      1.37     ragge 	ifp->if_softc = sc;
    324      1.37     ragge 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    325       1.1     ragge 	ifp->if_start = qestart;
    326       1.1     ragge 	ifp->if_ioctl = qeioctl;
    327      1.38     ragge 	ifp->if_watchdog = qetimeout;
    328      1.45   thorpej 	IFQ_SET_READY(&ifp->if_snd);
    329      1.37     ragge 
    330      1.37     ragge 	/*
    331      1.37     ragge 	 * Attach the interface.
    332      1.37     ragge 	 */
    333       1.1     ragge 	if_attach(ifp);
    334      1.37     ragge 	ether_ifattach(ifp, enaddr);
    335      1.22     ragge 
    336      1.37     ragge 	return;
    337       1.1     ragge 
    338      1.37     ragge 	/*
    339      1.37     ragge 	 * Free any resources we've allocated during the failed attach
    340      1.37     ragge 	 * attempt.  Do this in reverse order and fall through.
    341      1.37     ragge 	 */
    342      1.37     ragge  fail_6:
    343      1.37     ragge 	for (i = 0; i < RXDESCS; i++) {
    344      1.37     ragge 		if (sc->sc_rxmbuf[i] != NULL) {
    345      1.37     ragge 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
    346      1.37     ragge 			m_freem(sc->sc_rxmbuf[i]);
    347      1.37     ragge 		}
    348      1.37     ragge 	}
    349      1.37     ragge  fail_5:
    350      1.37     ragge 	for (i = 0; i < RXDESCS; i++) {
    351      1.37     ragge 		if (sc->sc_xmtmap[i] != NULL)
    352      1.37     ragge 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
    353      1.37     ragge 	}
    354      1.37     ragge  fail_4:
    355      1.37     ragge 	for (i = 0; i < TXDESCS; i++) {
    356      1.37     ragge 		if (sc->sc_rcvmap[i] != NULL)
    357      1.37     ragge 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
    358      1.37     ragge 	}
    359       1.1     ragge }
    360       1.1     ragge 
    361       1.1     ragge /*
    362       1.1     ragge  * Initialization of interface.
    363       1.1     ragge  */
    364       1.7     ragge void
    365      1.46     ragge qeinit(struct qe_softc *sc)
    366       1.1     ragge {
    367      1.37     ragge 	struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
    368      1.37     ragge 	struct qe_cdata *qc = sc->sc_qedata;
    369       1.4     ragge 	int i;
    370       1.1     ragge 
    371       1.1     ragge 
    372      1.37     ragge 	/*
    373      1.37     ragge 	 * Reset the interface.
    374      1.37     ragge 	 */
    375      1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    376      1.37     ragge 	DELAY(1000);
    377      1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    378      1.37     ragge 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
    379      1.37     ragge 
    380      1.37     ragge 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
    381      1.37     ragge 	/*
    382      1.37     ragge 	 * Release and init transmit descriptors.
    383      1.37     ragge 	 */
    384      1.37     ragge 	for (i = 0; i < TXDESCS; i++) {
    385      1.37     ragge 		if (sc->sc_txmbuf[i]) {
    386      1.37     ragge 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
    387      1.37     ragge 			m_freem(sc->sc_txmbuf[i]);
    388      1.37     ragge 			sc->sc_txmbuf[i] = 0;
    389       1.1     ragge 		}
    390      1.37     ragge 		qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
    391      1.37     ragge 		qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
    392       1.1     ragge 	}
    393      1.37     ragge 
    394      1.37     ragge 
    395      1.37     ragge 	/*
    396      1.37     ragge 	 * Init receive descriptors.
    397      1.37     ragge 	 */
    398      1.37     ragge 	for (i = 0; i < RXDESCS; i++)
    399      1.37     ragge 		qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
    400      1.37     ragge 	sc->sc_nextrx = 0;
    401      1.37     ragge 
    402      1.37     ragge 	/*
    403      1.37     ragge 	 * Write the descriptor addresses to the device.
    404      1.37     ragge 	 * Receiving packets will be enabled in the interrupt routine.
    405      1.37     ragge 	 */
    406      1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
    407      1.37     ragge 	QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
    408      1.37     ragge 	QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
    409      1.37     ragge 
    410      1.37     ragge 	ifp->if_flags |= IFF_RUNNING;
    411      1.37     ragge 	ifp->if_flags &= ~IFF_OACTIVE;
    412      1.37     ragge 
    413       1.1     ragge 	/*
    414      1.37     ragge 	 * Send a setup frame.
    415      1.37     ragge 	 * This will start the transmit machinery as well.
    416       1.1     ragge 	 */
    417      1.37     ragge 	qe_setup(sc);
    418      1.37     ragge 
    419       1.1     ragge }
    420       1.1     ragge 
    421       1.1     ragge /*
    422       1.1     ragge  * Start output on interface.
    423       1.1     ragge  */
    424       1.2   mycroft void
    425      1.46     ragge qestart(struct ifnet *ifp)
    426       1.1     ragge {
    427      1.37     ragge 	struct qe_softc *sc = ifp->if_softc;
    428      1.37     ragge 	struct qe_cdata *qc = sc->sc_qedata;
    429      1.37     ragge 	paddr_t	buffer;
    430      1.37     ragge 	struct mbuf *m, *m0;
    431      1.38     ragge 	int idx, len, s, i, totlen, error;
    432      1.46     ragge 	short orword, csr;
    433      1.37     ragge 
    434      1.37     ragge 	if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
    435      1.37     ragge 		return;
    436       1.1     ragge 
    437  1.46.2.1   nathanw 	s = splnet();
    438      1.37     ragge 	while (sc->sc_inq < (TXDESCS - 1)) {
    439       1.1     ragge 
    440      1.37     ragge 		if (sc->sc_setup) {
    441      1.37     ragge 			qe_setup(sc);
    442      1.37     ragge 			continue;
    443      1.37     ragge 		}
    444      1.37     ragge 		idx = sc->sc_nexttx;
    445      1.45   thorpej 		IFQ_POLL(&ifp->if_snd, m);
    446      1.37     ragge 		if (m == 0)
    447      1.37     ragge 			goto out;
    448      1.37     ragge 		/*
    449      1.37     ragge 		 * Count number of mbufs in chain.
    450      1.37     ragge 		 * Always do DMA directly from mbufs, therefore the transmit
    451      1.37     ragge 		 * ring is really big.
    452      1.37     ragge 		 */
    453      1.37     ragge 		for (m0 = m, i = 0; m0; m0 = m0->m_next)
    454      1.38     ragge 			if (m0->m_len)
    455      1.38     ragge 				i++;
    456      1.37     ragge 		if (i >= TXDESCS)
    457      1.37     ragge 			panic("qestart");
    458      1.37     ragge 
    459      1.37     ragge 		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
    460      1.38     ragge 			ifp->if_flags |= IFF_OACTIVE;
    461      1.37     ragge 			goto out;
    462      1.37     ragge 		}
    463      1.45   thorpej 
    464      1.45   thorpej 		IFQ_DEQUEUE(&ifp->if_snd, m);
    465      1.45   thorpej 
    466      1.22     ragge #if NBPFILTER > 0
    467      1.37     ragge 		if (ifp->if_bpf)
    468      1.37     ragge 			bpf_mtap(ifp->if_bpf, m);
    469      1.22     ragge #endif
    470       1.1     ragge 		/*
    471      1.37     ragge 		 * m now points to a mbuf chain that can be loaded.
    472      1.37     ragge 		 * Loop around and set it.
    473       1.1     ragge 		 */
    474      1.38     ragge 		totlen = 0;
    475      1.37     ragge 		for (m0 = m; m0; m0 = m0->m_next) {
    476      1.38     ragge 			error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
    477      1.37     ragge 			    mtod(m0, void *), m0->m_len, 0, 0);
    478      1.37     ragge 			buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
    479      1.37     ragge 			len = m0->m_len;
    480      1.38     ragge 			if (len == 0)
    481      1.38     ragge 				continue;
    482      1.37     ragge 
    483      1.38     ragge 			totlen += len;
    484      1.37     ragge 			/* Word alignment calc */
    485      1.37     ragge 			orword = 0;
    486      1.38     ragge 			if (totlen == m->m_pkthdr.len) {
    487      1.38     ragge 				if (totlen < ETHER_MIN_LEN)
    488      1.38     ragge 					len += (ETHER_MIN_LEN - totlen);
    489      1.37     ragge 				orword |= QE_EOMSG;
    490      1.38     ragge 				sc->sc_txmbuf[idx] = m;
    491      1.37     ragge 			}
    492      1.37     ragge 			if ((buffer & 1) || (len & 1))
    493      1.37     ragge 				len += 2;
    494      1.37     ragge 			if (buffer & 1)
    495      1.37     ragge 				orword |= QE_ODDBEGIN;
    496      1.37     ragge 			if ((buffer + len) & 1)
    497      1.37     ragge 				orword |= QE_ODDEND;
    498      1.37     ragge 			qc->qc_xmit[idx].qe_buf_len = -(len/2);
    499      1.37     ragge 			qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
    500      1.37     ragge 			qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
    501      1.37     ragge 			qc->qc_xmit[idx].qe_flag =
    502      1.37     ragge 			    qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
    503      1.37     ragge 			qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
    504      1.37     ragge 			if (++idx == TXDESCS)
    505      1.37     ragge 				idx = 0;
    506      1.37     ragge 			sc->sc_inq++;
    507      1.37     ragge 		}
    508      1.38     ragge #ifdef DIAGNOSTIC
    509      1.38     ragge 		if (totlen != m->m_pkthdr.len)
    510      1.38     ragge 			panic("qestart: len fault");
    511      1.38     ragge #endif
    512      1.37     ragge 
    513      1.37     ragge 		/*
    514      1.37     ragge 		 * Kick off the transmit logic, if it is stopped.
    515      1.37     ragge 		 */
    516      1.46     ragge 		csr = QE_RCSR(QE_CSR_CSR);
    517      1.46     ragge 		if (csr & QE_XL_INVALID) {
    518      1.37     ragge 			QE_WCSR(QE_CSR_XMTL,
    519      1.37     ragge 			    LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
    520      1.37     ragge 			QE_WCSR(QE_CSR_XMTH,
    521      1.37     ragge 			    HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
    522      1.37     ragge 		}
    523      1.37     ragge 		sc->sc_nexttx = idx;
    524      1.37     ragge 	}
    525      1.37     ragge 	if (sc->sc_inq == (TXDESCS - 1))
    526      1.37     ragge 		ifp->if_flags |= IFF_OACTIVE;
    527      1.38     ragge 
    528      1.38     ragge out:	if (sc->sc_inq)
    529      1.38     ragge 		ifp->if_timer = 5; /* If transmit logic dies */
    530      1.38     ragge 	splx(s);
    531       1.1     ragge }
    532       1.1     ragge 
    533      1.39      matt static void
    534      1.46     ragge qeintr(void *arg)
    535       1.1     ragge {
    536      1.39      matt 	struct qe_softc *sc = arg;
    537      1.37     ragge 	struct qe_cdata *qc = sc->sc_qedata;
    538      1.37     ragge 	struct ifnet *ifp = &sc->sc_if;
    539      1.37     ragge 	struct mbuf *m;
    540      1.37     ragge 	int csr, status1, status2, len;
    541       1.1     ragge 
    542      1.37     ragge 	csr = QE_RCSR(QE_CSR_CSR);
    543       1.1     ragge 
    544      1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
    545      1.37     ragge 	    QE_RCV_INT | QE_ILOOP);
    546       1.1     ragge 
    547      1.37     ragge 	if (csr & QE_RCV_INT)
    548      1.37     ragge 		while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
    549      1.37     ragge 			status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
    550      1.37     ragge 			status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
    551      1.46     ragge 
    552      1.37     ragge 			m = sc->sc_rxmbuf[sc->sc_nextrx];
    553      1.37     ragge 			len = ((status1 & QE_RBL_HI) |
    554      1.37     ragge 			    (status2 & QE_RBL_LO)) + 60;
    555      1.37     ragge 			qe_add_rxbuf(sc, sc->sc_nextrx);
    556      1.37     ragge 			m->m_pkthdr.rcvif = ifp;
    557      1.37     ragge 			m->m_pkthdr.len = m->m_len = len;
    558      1.37     ragge 			if (++sc->sc_nextrx == RXDESCS)
    559      1.37     ragge 				sc->sc_nextrx = 0;
    560      1.37     ragge #if NBPFILTER > 0
    561      1.43   thorpej 			if (ifp->if_bpf)
    562      1.37     ragge 				bpf_mtap(ifp->if_bpf, m);
    563      1.37     ragge #endif
    564      1.46     ragge 			if ((status1 & QE_ESETUP) == 0)
    565      1.46     ragge 				(*ifp->if_input)(ifp, m);
    566      1.46     ragge 			else
    567      1.46     ragge 				m_freem(m);
    568       1.1     ragge 		}
    569      1.37     ragge 
    570      1.46     ragge 	if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
    571      1.37     ragge 		while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
    572      1.37     ragge 			int idx = sc->sc_lastack;
    573      1.37     ragge 
    574      1.37     ragge 			sc->sc_inq--;
    575      1.37     ragge 			if (++sc->sc_lastack == TXDESCS)
    576      1.37     ragge 				sc->sc_lastack = 0;
    577      1.37     ragge 
    578      1.37     ragge 			/* XXX collect statistics */
    579      1.37     ragge 			qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
    580      1.37     ragge 			qc->qc_xmit[idx].qe_status1 =
    581      1.37     ragge 			    qc->qc_xmit[idx].qe_flag = QE_NOTYET;
    582      1.37     ragge 
    583      1.37     ragge 			if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
    584      1.37     ragge 				continue;
    585      1.37     ragge 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
    586      1.37     ragge 			if (sc->sc_txmbuf[idx]) {
    587      1.37     ragge 				m_freem(sc->sc_txmbuf[idx]);
    588      1.37     ragge 				sc->sc_txmbuf[idx] = 0;
    589      1.37     ragge 			}
    590      1.37     ragge 		}
    591      1.38     ragge 		ifp->if_timer = 0;
    592      1.37     ragge 		ifp->if_flags &= ~IFF_OACTIVE;
    593      1.37     ragge 		qestart(ifp); /* Put in more in queue */
    594       1.1     ragge 	}
    595      1.37     ragge 	/*
    596      1.37     ragge 	 * How can the receive list get invalid???
    597      1.37     ragge 	 * Verified that it happens anyway.
    598       1.1     ragge 	 */
    599      1.37     ragge 	if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
    600      1.37     ragge 	    (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
    601      1.37     ragge 		QE_WCSR(QE_CSR_RCLL,
    602      1.37     ragge 		    LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
    603      1.37     ragge 		QE_WCSR(QE_CSR_RCLH,
    604      1.37     ragge 		    HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
    605       1.1     ragge 	}
    606       1.1     ragge }
    607       1.1     ragge 
    608       1.1     ragge /*
    609       1.1     ragge  * Process an ioctl request.
    610       1.1     ragge  */
    611       1.7     ragge int
    612      1.46     ragge qeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
    613       1.1     ragge {
    614      1.14   thorpej 	struct qe_softc *sc = ifp->if_softc;
    615      1.37     ragge 	struct ifreq *ifr = (struct ifreq *)data;
    616       1.1     ragge 	struct ifaddr *ifa = (struct ifaddr *)data;
    617       1.8   mycroft 	int s = splnet(), error = 0;
    618       1.1     ragge 
    619       1.1     ragge 	switch (cmd) {
    620       1.1     ragge 
    621       1.1     ragge 	case SIOCSIFADDR:
    622       1.1     ragge 		ifp->if_flags |= IFF_UP;
    623       1.1     ragge 		switch(ifa->ifa_addr->sa_family) {
    624       1.1     ragge #ifdef INET
    625       1.1     ragge 		case AF_INET:
    626      1.37     ragge 			qeinit(sc);
    627      1.20        is 			arp_ifinit(ifp, ifa);
    628       1.1     ragge 			break;
    629       1.1     ragge #endif
    630       1.1     ragge 		}
    631       1.1     ragge 		break;
    632       1.1     ragge 
    633       1.1     ragge 	case SIOCSIFFLAGS:
    634       1.1     ragge 		if ((ifp->if_flags & IFF_UP) == 0 &&
    635      1.37     ragge 		    (ifp->if_flags & IFF_RUNNING) != 0) {
    636      1.37     ragge 			/*
    637      1.37     ragge 			 * If interface is marked down and it is running,
    638      1.37     ragge 			 * stop it. (by disabling receive mechanism).
    639      1.37     ragge 			 */
    640      1.37     ragge 			QE_WCSR(QE_CSR_CSR,
    641      1.37     ragge 			    QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
    642      1.37     ragge 			ifp->if_flags &= ~IFF_RUNNING;
    643      1.37     ragge 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
    644      1.37     ragge 			   (ifp->if_flags & IFF_RUNNING) == 0) {
    645      1.37     ragge 			/*
    646      1.37     ragge 			 * If interface it marked up and it is stopped, then
    647      1.37     ragge 			 * start it.
    648      1.37     ragge 			 */
    649      1.19     ragge 			qeinit(sc);
    650      1.37     ragge 		} else if ((ifp->if_flags & IFF_UP) != 0) {
    651      1.37     ragge 			/*
    652      1.37     ragge 			 * Send a new setup packet to match any new changes.
    653      1.37     ragge 			 * (Like IFF_PROMISC etc)
    654      1.37     ragge 			 */
    655      1.37     ragge 			qe_setup(sc);
    656      1.37     ragge 		}
    657       1.1     ragge 		break;
    658       1.1     ragge 
    659      1.22     ragge 	case SIOCADDMULTI:
    660      1.22     ragge 	case SIOCDELMULTI:
    661      1.22     ragge 		/*
    662      1.22     ragge 		 * Update our multicast list.
    663      1.22     ragge 		 */
    664      1.22     ragge 		error = (cmd == SIOCADDMULTI) ?
    665      1.37     ragge 			ether_addmulti(ifr, &sc->sc_ec):
    666      1.37     ragge 			ether_delmulti(ifr, &sc->sc_ec);
    667      1.22     ragge 
    668      1.22     ragge 		if (error == ENETRESET) {
    669      1.22     ragge 			/*
    670      1.22     ragge 			 * Multicast list has changed; set the hardware filter
    671      1.22     ragge 			 * accordingly.
    672      1.22     ragge 			 */
    673      1.37     ragge 			qe_setup(sc);
    674      1.22     ragge 			error = 0;
    675      1.22     ragge 		}
    676      1.22     ragge 		break;
    677      1.22     ragge 
    678       1.1     ragge 	default:
    679       1.1     ragge 		error = EINVAL;
    680       1.1     ragge 
    681       1.1     ragge 	}
    682       1.1     ragge 	splx(s);
    683       1.1     ragge 	return (error);
    684       1.1     ragge }
    685       1.1     ragge 
    686       1.1     ragge /*
    687      1.37     ragge  * Add a receive buffer to the indicated descriptor.
    688       1.1     ragge  */
    689      1.37     ragge int
    690      1.46     ragge qe_add_rxbuf(struct qe_softc *sc, int i)
    691       1.1     ragge {
    692      1.37     ragge 	struct mbuf *m;
    693      1.37     ragge 	struct qe_ring *rp;
    694      1.37     ragge 	vaddr_t addr;
    695      1.37     ragge 	int error;
    696      1.37     ragge 
    697      1.37     ragge 	MGETHDR(m, M_DONTWAIT, MT_DATA);
    698      1.37     ragge 	if (m == NULL)
    699      1.37     ragge 		return (ENOBUFS);
    700      1.37     ragge 
    701      1.37     ragge 	MCLGET(m, M_DONTWAIT);
    702      1.37     ragge 	if ((m->m_flags & M_EXT) == 0) {
    703      1.37     ragge 		m_freem(m);
    704      1.37     ragge 		return (ENOBUFS);
    705      1.37     ragge 	}
    706      1.37     ragge 
    707      1.37     ragge 	if (sc->sc_rxmbuf[i] != NULL)
    708      1.37     ragge 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
    709       1.1     ragge 
    710      1.37     ragge 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
    711      1.37     ragge 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
    712      1.37     ragge 	if (error)
    713      1.37     ragge 		panic("%s: can't load rx DMA map %d, error = %d\n",
    714      1.37     ragge 		    sc->sc_dev.dv_xname, i, error);
    715      1.37     ragge 	sc->sc_rxmbuf[i] = m;
    716       1.1     ragge 
    717      1.37     ragge 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
    718      1.37     ragge 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
    719       1.1     ragge 
    720       1.1     ragge 	/*
    721      1.37     ragge 	 * We know that the mbuf cluster is page aligned. Also, be sure
    722      1.37     ragge 	 * that the IP header will be longword aligned.
    723       1.1     ragge 	 */
    724      1.37     ragge 	m->m_data += 2;
    725      1.37     ragge 	addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
    726      1.37     ragge 	rp = &sc->sc_qedata->qc_recv[i];
    727      1.37     ragge 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
    728      1.37     ragge 	rp->qe_addr_lo = LOWORD(addr);
    729      1.37     ragge 	rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
    730      1.37     ragge 	rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
    731       1.1     ragge 
    732      1.37     ragge 	return (0);
    733       1.1     ragge }
    734      1.37     ragge 
    735       1.1     ragge /*
    736      1.37     ragge  * Create a setup packet and put in queue for sending.
    737       1.1     ragge  */
    738       1.7     ragge void
    739      1.46     ragge qe_setup(struct qe_softc *sc)
    740       1.1     ragge {
    741      1.37     ragge 	struct ether_multi *enm;
    742      1.37     ragge 	struct ether_multistep step;
    743      1.37     ragge 	struct qe_cdata *qc = sc->sc_qedata;
    744      1.37     ragge 	struct ifnet *ifp = &sc->sc_if;
    745      1.37     ragge 	u_int8_t *enaddr = LLADDR(ifp->if_sadl);
    746      1.37     ragge 	int i, j, k, idx, s;
    747      1.37     ragge 
    748  1.46.2.1   nathanw 	s = splnet();
    749      1.37     ragge 	if (sc->sc_inq == (TXDESCS - 1)) {
    750      1.37     ragge 		sc->sc_setup = 1;
    751      1.37     ragge 		splx(s);
    752      1.37     ragge 		return;
    753      1.37     ragge 	}
    754      1.37     ragge 	sc->sc_setup = 0;
    755       1.1     ragge 	/*
    756      1.37     ragge 	 * Init the setup packet with valid info.
    757       1.1     ragge 	 */
    758      1.37     ragge 	memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
    759      1.37     ragge 	for (i = 0; i < ETHER_ADDR_LEN; i++)
    760      1.37     ragge 		qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
    761      1.37     ragge 
    762       1.1     ragge 	/*
    763      1.37     ragge 	 * Multicast handling. The DEQNA can handle up to 12 direct
    764      1.37     ragge 	 * ethernet addresses.
    765       1.1     ragge 	 */
    766      1.37     ragge 	j = 3; k = 0;
    767      1.37     ragge 	ifp->if_flags &= ~IFF_ALLMULTI;
    768      1.37     ragge 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
    769      1.37     ragge 	while (enm != NULL) {
    770  1.46.2.3   nathanw 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
    771      1.37     ragge 			ifp->if_flags |= IFF_ALLMULTI;
    772      1.37     ragge 			break;
    773      1.37     ragge 		}
    774      1.37     ragge 		for (i = 0; i < ETHER_ADDR_LEN; i++)
    775      1.37     ragge 			qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
    776      1.37     ragge 		j++;
    777      1.37     ragge 		if (j == 8) {
    778      1.37     ragge 			j = 1; k += 64;
    779      1.37     ragge 		}
    780      1.37     ragge 		if (k > 64) {
    781      1.37     ragge 			ifp->if_flags |= IFF_ALLMULTI;
    782      1.37     ragge 			break;
    783      1.22     ragge 		}
    784      1.37     ragge 		ETHER_NEXT_MULTI(step, enm);
    785      1.22     ragge 	}
    786      1.37     ragge 	idx = sc->sc_nexttx;
    787      1.37     ragge 	qc->qc_xmit[idx].qe_buf_len = -64;
    788       1.1     ragge 
    789       1.1     ragge 	/*
    790      1.37     ragge 	 * How is the DEQNA turned in ALLMULTI mode???
    791      1.37     ragge 	 * Until someone tells me, fall back to PROMISC when more than
    792      1.37     ragge 	 * 12 ethernet addresses.
    793       1.1     ragge 	 */
    794      1.43   thorpej 	if (ifp->if_flags & IFF_ALLMULTI)
    795      1.43   thorpej 		ifp->if_flags |= IFF_PROMISC;
    796      1.43   thorpej 	else if (ifp->if_pcount == 0)
    797      1.43   thorpej 		ifp->if_flags &= ~IFF_PROMISC;
    798      1.43   thorpej 	if (ifp->if_flags & IFF_PROMISC)
    799      1.37     ragge 		qc->qc_xmit[idx].qe_buf_len = -65;
    800       1.1     ragge 
    801      1.37     ragge 	qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
    802      1.37     ragge 	qc->qc_xmit[idx].qe_addr_hi =
    803      1.37     ragge 	    HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
    804      1.37     ragge 	qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
    805      1.37     ragge 	qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
    806       1.1     ragge 
    807      1.37     ragge 	if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
    808      1.37     ragge 		QE_WCSR(QE_CSR_XMTL,
    809      1.37     ragge 		    LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
    810      1.37     ragge 		QE_WCSR(QE_CSR_XMTH,
    811      1.37     ragge 		    HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
    812      1.22     ragge 	}
    813       1.1     ragge 
    814      1.37     ragge 	sc->sc_inq++;
    815      1.37     ragge 	if (++sc->sc_nexttx == TXDESCS)
    816      1.37     ragge 		sc->sc_nexttx = 0;
    817      1.37     ragge 	splx(s);
    818      1.38     ragge }
    819      1.38     ragge 
    820      1.38     ragge /*
    821      1.38     ragge  * Check for dead transmit logic. Not uncommon.
    822      1.38     ragge  */
    823      1.38     ragge void
    824      1.46     ragge qetimeout(struct ifnet *ifp)
    825      1.38     ragge {
    826      1.38     ragge 	struct qe_softc *sc = ifp->if_softc;
    827      1.38     ragge 
    828      1.38     ragge 	if (sc->sc_inq == 0)
    829      1.38     ragge 		return;
    830      1.38     ragge 
    831      1.38     ragge 	printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
    832      1.38     ragge 	/*
    833      1.38     ragge 	 * Do a reset of interface, to get it going again.
    834      1.38     ragge 	 * Will it work by just restart the transmit logic?
    835      1.38     ragge 	 */
    836      1.38     ragge 	qeinit(sc);
    837       1.1     ragge }
    838