Home | History | Annotate | Line # | Download | only in qbus
if_qe.c revision 1.62.16.1
      1  1.62.16.1      matt /*      $NetBSD: if_qe.c,v 1.62.16.1 2007/11/06 23:29:59 matt Exp $ */
      2        1.1     ragge /*
      3       1.37     ragge  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
      4        1.1     ragge  *
      5        1.1     ragge  * Redistribution and use in source and binary forms, with or without
      6        1.1     ragge  * modification, are permitted provided that the following conditions
      7        1.1     ragge  * are met:
      8        1.1     ragge  * 1. Redistributions of source code must retain the above copyright
      9        1.1     ragge  *    notice, this list of conditions and the following disclaimer.
     10        1.1     ragge  * 2. Redistributions in binary form must reproduce the above copyright
     11        1.1     ragge  *    notice, this list of conditions and the following disclaimer in the
     12        1.1     ragge  *    documentation and/or other materials provided with the distribution.
     13        1.1     ragge  * 3. All advertising materials mentioning features or use of this software
     14        1.1     ragge  *    must display the following acknowledgement:
     15       1.59    simonb  *      This product includes software developed at Ludd, University of
     16       1.37     ragge  *      Lule}, Sweden and its contributors.
     17       1.37     ragge  * 4. The name of the author may not be used to endorse or promote products
     18       1.37     ragge  *    derived from this software without specific prior written permission
     19       1.37     ragge  *
     20       1.37     ragge  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21       1.37     ragge  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22       1.37     ragge  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23       1.37     ragge  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24       1.37     ragge  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25       1.37     ragge  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26       1.37     ragge  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27       1.37     ragge  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28       1.37     ragge  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29       1.37     ragge  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30        1.1     ragge  */
     31        1.1     ragge 
     32        1.1     ragge /*
     33       1.37     ragge  * Driver for DEQNA/DELQA ethernet cards.
     34       1.37     ragge  * Things that is still to do:
     35       1.37     ragge  *	Handle ubaresets. Does not work at all right now.
     36       1.37     ragge  *	Fix ALLMULTI reception. But someone must tell me how...
     37       1.37     ragge  *	Collect statistics.
     38        1.1     ragge  */
     39       1.49     lukem 
     40       1.49     lukem #include <sys/cdefs.h>
     41  1.62.16.1      matt __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.62.16.1 2007/11/06 23:29:59 matt Exp $");
     42       1.22     ragge 
     43       1.27  jonathan #include "opt_inet.h"
     44       1.22     ragge #include "bpfilter.h"
     45       1.22     ragge 
     46        1.9   mycroft #include <sys/param.h>
     47        1.9   mycroft #include <sys/mbuf.h>
     48        1.9   mycroft #include <sys/socket.h>
     49        1.9   mycroft #include <sys/device.h>
     50       1.37     ragge #include <sys/systm.h>
     51       1.37     ragge #include <sys/sockio.h>
     52        1.9   mycroft 
     53        1.9   mycroft #include <net/if.h>
     54       1.20        is #include <net/if_ether.h>
     55       1.21     ragge #include <net/if_dl.h>
     56        1.1     ragge 
     57        1.9   mycroft #include <netinet/in.h>
     58       1.20        is #include <netinet/if_inarp.h>
     59       1.22     ragge 
     60       1.22     ragge #if NBPFILTER > 0
     61       1.22     ragge #include <net/bpf.h>
     62       1.22     ragge #include <net/bpfdesc.h>
     63       1.22     ragge #endif
     64       1.22     ragge 
     65  1.62.16.1      matt #include <sys/bus.h>
     66        1.1     ragge 
     67       1.37     ragge #include <dev/qbus/ubavar.h>
     68       1.37     ragge #include <dev/qbus/if_qereg.h>
     69        1.1     ragge 
     70       1.37     ragge #include "ioconf.h"
     71       1.37     ragge 
     72       1.37     ragge #define RXDESCS	30	/* # of receive descriptors */
     73       1.37     ragge #define TXDESCS	60	/* # transmit descs */
     74        1.6       jtc 
     75        1.1     ragge /*
     76       1.37     ragge  * Structure containing the elements that must be in DMA-safe memory.
     77        1.1     ragge  */
     78       1.37     ragge struct qe_cdata {
     79       1.37     ragge 	struct qe_ring	qc_recv[RXDESCS+1];	/* Receive descriptors */
     80       1.37     ragge 	struct qe_ring	qc_xmit[TXDESCS+1];	/* Transmit descriptors */
     81       1.37     ragge 	u_int8_t	qc_setup[128];		/* Setup packet layout */
     82       1.37     ragge };
     83       1.37     ragge 
     84        1.1     ragge struct	qe_softc {
     85       1.37     ragge 	struct device	sc_dev;		/* Configuration common part	*/
     86       1.41      matt 	struct evcnt	sc_intrcnt;	/* Interrupt counting		*/
     87       1.37     ragge 	struct ethercom sc_ec;		/* Ethernet common part		*/
     88       1.37     ragge #define sc_if	sc_ec.ec_if		/* network-visible interface	*/
     89       1.37     ragge 	bus_space_tag_t sc_iot;
     90       1.37     ragge 	bus_addr_t	sc_ioh;
     91       1.37     ragge 	bus_dma_tag_t	sc_dmat;
     92       1.37     ragge 	struct qe_cdata *sc_qedata;	/* Descriptor struct		*/
     93       1.37     ragge 	struct qe_cdata *sc_pqedata;	/* Unibus address of above	*/
     94       1.37     ragge 	struct mbuf*	sc_txmbuf[TXDESCS];
     95       1.37     ragge 	struct mbuf*	sc_rxmbuf[RXDESCS];
     96       1.37     ragge 	bus_dmamap_t	sc_xmtmap[TXDESCS];
     97       1.37     ragge 	bus_dmamap_t	sc_rcvmap[RXDESCS];
     98       1.57    bouyer 	bus_dmamap_t	sc_nulldmamap;	/* ethernet padding buffer	*/
     99       1.48     ragge 	struct ubinfo	sc_ui;
    100       1.37     ragge 	int		sc_intvec;	/* Interrupt vector		*/
    101       1.37     ragge 	int		sc_nexttx;
    102       1.37     ragge 	int		sc_inq;
    103       1.37     ragge 	int		sc_lastack;
    104       1.37     ragge 	int		sc_nextrx;
    105       1.37     ragge 	int		sc_setup;	/* Setup packet in queue	*/
    106        1.7     ragge };
    107        1.1     ragge 
    108       1.46     ragge static	int	qematch(struct device *, struct cfdata *, void *);
    109       1.46     ragge static	void	qeattach(struct device *, struct device *, void *);
    110       1.46     ragge static	void	qeinit(struct qe_softc *);
    111       1.46     ragge static	void	qestart(struct ifnet *);
    112       1.46     ragge static	void	qeintr(void *);
    113       1.62  christos static	int	qeioctl(struct ifnet *, u_long, void *);
    114       1.46     ragge static	int	qe_add_rxbuf(struct qe_softc *, int);
    115       1.46     ragge static	void	qe_setup(struct qe_softc *);
    116       1.46     ragge static	void	qetimeout(struct ifnet *);
    117        1.1     ragge 
    118       1.54   thorpej CFATTACH_DECL(qe, sizeof(struct qe_softc),
    119       1.55   thorpej     qematch, qeattach, NULL, NULL);
    120       1.23   thorpej 
    121       1.37     ragge #define	QE_WCSR(csr, val) \
    122       1.37     ragge 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
    123       1.37     ragge #define	QE_RCSR(csr) \
    124       1.37     ragge 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
    125        1.1     ragge 
    126       1.37     ragge #define	LOWORD(x)	((int)(x) & 0xffff)
    127       1.37     ragge #define	HIWORD(x)	(((int)(x) >> 16) & 0x3f)
    128        1.7     ragge 
    129       1.57    bouyer #define	ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
    130       1.57    bouyer 
    131        1.1     ragge /*
    132       1.37     ragge  * Check for present DEQNA. Done by sending a fake setup packet
    133       1.37     ragge  * and wait for interrupt.
    134        1.1     ragge  */
    135        1.7     ragge int
    136       1.46     ragge qematch(struct device *parent, struct cfdata *cf, void *aux)
    137        1.7     ragge {
    138       1.37     ragge 	struct	qe_softc ssc;
    139       1.37     ragge 	struct	qe_softc *sc = &ssc;
    140        1.7     ragge 	struct	uba_attach_args *ua = aux;
    141        1.7     ragge 	struct	uba_softc *ubasc = (struct uba_softc *)parent;
    142       1.48     ragge 	struct ubinfo ui;
    143       1.37     ragge 
    144       1.51     ragge #define	PROBESIZE	4096
    145       1.51     ragge 	struct qe_ring *ring;
    146       1.21     ragge 	struct	qe_ring *rp;
    147       1.37     ragge 	int error;
    148        1.1     ragge 
    149       1.51     ragge 	ring = malloc(PROBESIZE, M_TEMP, M_WAITOK);
    150       1.37     ragge 	bzero(sc, sizeof(struct qe_softc));
    151       1.37     ragge 	bzero(ring, PROBESIZE);
    152       1.37     ragge 	sc->sc_iot = ua->ua_iot;
    153       1.37     ragge 	sc->sc_ioh = ua->ua_ioh;
    154       1.37     ragge 	sc->sc_dmat = ua->ua_dmat;
    155        1.7     ragge 
    156       1.37     ragge 	ubasc->uh_lastiv -= 4;
    157       1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    158       1.37     ragge 	QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
    159        1.1     ragge 
    160        1.1     ragge 	/*
    161       1.59    simonb 	 * Map the ring area. Actually this is done only to be able to
    162       1.37     ragge 	 * send and receive a internal packet; some junk is loopbacked
    163       1.37     ragge 	 * so that the DEQNA has a reason to interrupt.
    164        1.1     ragge 	 */
    165       1.48     ragge 	ui.ui_size = PROBESIZE;
    166       1.62  christos 	ui.ui_vaddr = (void *)&ring[0];
    167       1.48     ragge 	if ((error = uballoc((void *)parent, &ui, UBA_CANTWAIT)))
    168       1.37     ragge 		return 0;
    169        1.1     ragge 
    170        1.1     ragge 	/*
    171       1.37     ragge 	 * Init a simple "fake" receive and transmit descriptor that
    172       1.37     ragge 	 * points to some unused area. Send a fake setup packet.
    173        1.1     ragge 	 */
    174       1.48     ragge 	rp = (void *)ui.ui_baddr;
    175       1.37     ragge 	ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
    176       1.37     ragge 	ring[0].qe_addr_lo = LOWORD(&rp[4]);
    177       1.37     ragge 	ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
    178       1.51     ragge 	ring[0].qe_buf_len = -64;
    179        1.1     ragge 
    180       1.37     ragge 	ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
    181       1.37     ragge 	ring[2].qe_addr_lo = LOWORD(&rp[4]);
    182       1.37     ragge 	ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
    183       1.51     ragge 	ring[2].qe_buf_len = -(1500/2);
    184        1.1     ragge 
    185       1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    186       1.37     ragge 	DELAY(1000);
    187        1.1     ragge 
    188        1.1     ragge 	/*
    189        1.1     ragge 	 * Start the interface and wait for the packet.
    190        1.1     ragge 	 */
    191       1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
    192       1.37     ragge 	QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
    193       1.37     ragge 	QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
    194       1.37     ragge 	QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
    195       1.37     ragge 	QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
    196        1.1     ragge 	DELAY(10000);
    197       1.37     ragge 
    198        1.1     ragge 	/*
    199        1.1     ragge 	 * All done with the bus resources.
    200        1.1     ragge 	 */
    201       1.48     ragge 	ubfree((void *)parent, &ui);
    202       1.51     ragge 	free(ring, M_TEMP);
    203        1.7     ragge 	return 1;
    204        1.1     ragge }
    205        1.1     ragge 
    206        1.1     ragge /*
    207        1.1     ragge  * Interface exists: make available by filling in network interface
    208        1.1     ragge  * record.  System will initialize the interface when it is ready
    209        1.1     ragge  * to accept packets.
    210        1.1     ragge  */
    211        1.7     ragge void
    212       1.46     ragge qeattach(struct device *parent, struct device *self, void *aux)
    213        1.7     ragge {
    214        1.7     ragge 	struct	uba_attach_args *ua = aux;
    215       1.37     ragge 	struct	uba_softc *ubasc = (struct uba_softc *)parent;
    216       1.61   thorpej 	struct	qe_softc *sc = device_private(self);
    217       1.37     ragge 	struct	ifnet *ifp = (struct ifnet *)&sc->sc_if;
    218       1.37     ragge 	struct	qe_ring *rp;
    219       1.37     ragge 	u_int8_t enaddr[ETHER_ADDR_LEN];
    220       1.48     ragge 	int i, error;
    221       1.57    bouyer 	char *nullbuf;
    222       1.37     ragge 
    223       1.37     ragge 	sc->sc_iot = ua->ua_iot;
    224       1.37     ragge 	sc->sc_ioh = ua->ua_ioh;
    225       1.37     ragge 	sc->sc_dmat = ua->ua_dmat;
    226       1.37     ragge 
    227       1.59    simonb 	/*
    228       1.59    simonb 	 * Allocate DMA safe memory for descriptors and setup memory.
    229       1.59    simonb 	 */
    230       1.37     ragge 
    231       1.57    bouyer 	sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
    232       1.48     ragge 	if ((error = ubmemalloc((struct uba_softc *)parent, &sc->sc_ui, 0))) {
    233       1.48     ragge 		printf(": unable to ubmemalloc(), error = %d\n", error);
    234       1.48     ragge 		return;
    235       1.37     ragge 	}
    236       1.48     ragge 	sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
    237       1.48     ragge 	sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
    238       1.37     ragge 
    239       1.37     ragge 	/*
    240       1.37     ragge 	 * Zero the newly allocated memory.
    241       1.37     ragge 	 */
    242       1.57    bouyer 	bzero(sc->sc_qedata, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
    243       1.57    bouyer 	nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
    244       1.37     ragge 	/*
    245       1.37     ragge 	 * Create the transmit descriptor DMA maps. We take advantage
    246       1.59    simonb 	 * of the fact that the Qbus address space is big, and therefore
    247       1.37     ragge 	 * allocate map registers for all transmit descriptors also,
    248       1.37     ragge 	 * so that we can avoid this each time we send a packet.
    249       1.37     ragge 	 */
    250       1.37     ragge 	for (i = 0; i < TXDESCS; i++) {
    251       1.37     ragge 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    252       1.37     ragge 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    253       1.37     ragge 		    &sc->sc_xmtmap[i]))) {
    254       1.37     ragge 			printf(": unable to create tx DMA map %d, error = %d\n",
    255       1.37     ragge 			    i, error);
    256       1.37     ragge 			goto fail_4;
    257       1.37     ragge 		}
    258       1.37     ragge 	}
    259       1.37     ragge 
    260       1.37     ragge 	/*
    261       1.37     ragge 	 * Create receive buffer DMA maps.
    262       1.37     ragge 	 */
    263       1.37     ragge 	for (i = 0; i < RXDESCS; i++) {
    264       1.37     ragge 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    265       1.37     ragge 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
    266       1.37     ragge 		    &sc->sc_rcvmap[i]))) {
    267       1.37     ragge 			printf(": unable to create rx DMA map %d, error = %d\n",
    268       1.37     ragge 			    i, error);
    269       1.37     ragge 			goto fail_5;
    270       1.37     ragge 		}
    271       1.37     ragge 	}
    272       1.37     ragge 	/*
    273       1.37     ragge 	 * Pre-allocate the receive buffers.
    274       1.37     ragge 	 */
    275       1.37     ragge 	for (i = 0; i < RXDESCS; i++) {
    276       1.37     ragge 		if ((error = qe_add_rxbuf(sc, i)) != 0) {
    277       1.37     ragge 			printf(": unable to allocate or map rx buffer %d\n,"
    278       1.37     ragge 			    " error = %d\n", i, error);
    279       1.37     ragge 			goto fail_6;
    280       1.37     ragge 		}
    281       1.37     ragge 	}
    282        1.1     ragge 
    283       1.57    bouyer 	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
    284       1.57    bouyer 	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
    285       1.57    bouyer 		printf("%s: unable to create pad buffer DMA map, "
    286       1.57    bouyer 		    "error = %d\n", sc->sc_dev.dv_xname, error);
    287       1.57    bouyer 		goto fail_6;
    288       1.57    bouyer 	}
    289       1.57    bouyer 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
    290       1.57    bouyer 	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
    291       1.57    bouyer 		printf("%s: unable to load pad buffer DMA map, "
    292       1.57    bouyer 		    "error = %d\n", sc->sc_dev.dv_xname, error);
    293       1.57    bouyer 		goto fail_7;
    294       1.57    bouyer 	}
    295       1.57    bouyer 	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
    296       1.57    bouyer 	    BUS_DMASYNC_PREWRITE);
    297       1.57    bouyer 
    298        1.1     ragge 	/*
    299       1.37     ragge 	 * Create ring loops of the buffer chains.
    300       1.37     ragge 	 * This is only done once.
    301        1.1     ragge 	 */
    302       1.37     ragge 
    303       1.37     ragge 	rp = sc->sc_qedata->qc_recv;
    304       1.37     ragge 	rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
    305       1.37     ragge 	rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
    306       1.37     ragge 	    QE_VALID | QE_CHAIN;
    307       1.37     ragge 	rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
    308       1.37     ragge 
    309       1.37     ragge 	rp = sc->sc_qedata->qc_xmit;
    310       1.37     ragge 	rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
    311       1.37     ragge 	rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
    312       1.37     ragge 	    QE_VALID | QE_CHAIN;
    313       1.37     ragge 	rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
    314        1.1     ragge 
    315        1.1     ragge 	/*
    316       1.37     ragge 	 * Get the vector that were set at match time, and remember it.
    317        1.1     ragge 	 */
    318       1.37     ragge 	sc->sc_intvec = ubasc->uh_lastiv;
    319       1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    320       1.37     ragge 	DELAY(1000);
    321       1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    322        1.1     ragge 
    323        1.1     ragge 	/*
    324       1.37     ragge 	 * Read out ethernet address and tell which type this card is.
    325        1.1     ragge 	 */
    326       1.37     ragge 	for (i = 0; i < 6; i++)
    327       1.37     ragge 		enaddr[i] = QE_RCSR(i * 2) & 0xff;
    328        1.1     ragge 
    329       1.37     ragge 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
    330       1.37     ragge 	printf("\n%s: %s, hardware address %s\n", sc->sc_dev.dv_xname,
    331       1.37     ragge 		QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
    332       1.37     ragge 		ether_sprintf(enaddr));
    333       1.37     ragge 
    334       1.37     ragge 	QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
    335       1.37     ragge 
    336       1.41      matt 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
    337       1.41      matt 		sc, &sc->sc_intrcnt);
    338       1.42      matt 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
    339       1.42      matt 		sc->sc_dev.dv_xname, "intr");
    340       1.39      matt 
    341       1.37     ragge 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    342       1.37     ragge 	ifp->if_softc = sc;
    343       1.37     ragge 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    344        1.1     ragge 	ifp->if_start = qestart;
    345        1.1     ragge 	ifp->if_ioctl = qeioctl;
    346       1.38     ragge 	ifp->if_watchdog = qetimeout;
    347       1.45   thorpej 	IFQ_SET_READY(&ifp->if_snd);
    348       1.37     ragge 
    349       1.37     ragge 	/*
    350       1.37     ragge 	 * Attach the interface.
    351       1.37     ragge 	 */
    352        1.1     ragge 	if_attach(ifp);
    353       1.37     ragge 	ether_ifattach(ifp, enaddr);
    354       1.22     ragge 
    355       1.37     ragge 	return;
    356        1.1     ragge 
    357       1.37     ragge 	/*
    358       1.37     ragge 	 * Free any resources we've allocated during the failed attach
    359       1.37     ragge 	 * attempt.  Do this in reverse order and fall through.
    360       1.37     ragge 	 */
    361       1.57    bouyer  fail_7:
    362       1.57    bouyer 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
    363       1.37     ragge  fail_6:
    364       1.37     ragge 	for (i = 0; i < RXDESCS; i++) {
    365       1.37     ragge 		if (sc->sc_rxmbuf[i] != NULL) {
    366       1.57    bouyer 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
    367       1.37     ragge 			m_freem(sc->sc_rxmbuf[i]);
    368       1.37     ragge 		}
    369       1.37     ragge 	}
    370       1.37     ragge  fail_5:
    371       1.37     ragge 	for (i = 0; i < RXDESCS; i++) {
    372       1.37     ragge 		if (sc->sc_xmtmap[i] != NULL)
    373       1.37     ragge 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
    374       1.37     ragge 	}
    375       1.37     ragge  fail_4:
    376       1.37     ragge 	for (i = 0; i < TXDESCS; i++) {
    377       1.37     ragge 		if (sc->sc_rcvmap[i] != NULL)
    378       1.37     ragge 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
    379       1.37     ragge 	}
    380        1.1     ragge }
    381        1.1     ragge 
    382        1.1     ragge /*
    383        1.1     ragge  * Initialization of interface.
    384        1.1     ragge  */
    385        1.7     ragge void
    386       1.46     ragge qeinit(struct qe_softc *sc)
    387        1.1     ragge {
    388       1.37     ragge 	struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
    389       1.37     ragge 	struct qe_cdata *qc = sc->sc_qedata;
    390        1.4     ragge 	int i;
    391        1.1     ragge 
    392        1.1     ragge 
    393       1.37     ragge 	/*
    394       1.37     ragge 	 * Reset the interface.
    395       1.37     ragge 	 */
    396       1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RESET);
    397       1.37     ragge 	DELAY(1000);
    398       1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
    399       1.37     ragge 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
    400       1.37     ragge 
    401       1.37     ragge 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
    402       1.37     ragge 	/*
    403       1.37     ragge 	 * Release and init transmit descriptors.
    404       1.37     ragge 	 */
    405       1.37     ragge 	for (i = 0; i < TXDESCS; i++) {
    406       1.37     ragge 		if (sc->sc_txmbuf[i]) {
    407       1.37     ragge 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
    408       1.37     ragge 			m_freem(sc->sc_txmbuf[i]);
    409       1.37     ragge 			sc->sc_txmbuf[i] = 0;
    410        1.1     ragge 		}
    411       1.37     ragge 		qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
    412       1.37     ragge 		qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
    413        1.1     ragge 	}
    414       1.37     ragge 
    415       1.37     ragge 
    416       1.37     ragge 	/*
    417       1.37     ragge 	 * Init receive descriptors.
    418       1.37     ragge 	 */
    419       1.37     ragge 	for (i = 0; i < RXDESCS; i++)
    420       1.37     ragge 		qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
    421       1.37     ragge 	sc->sc_nextrx = 0;
    422       1.37     ragge 
    423       1.37     ragge 	/*
    424       1.37     ragge 	 * Write the descriptor addresses to the device.
    425       1.37     ragge 	 * Receiving packets will be enabled in the interrupt routine.
    426       1.37     ragge 	 */
    427       1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
    428       1.37     ragge 	QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
    429       1.37     ragge 	QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
    430       1.37     ragge 
    431       1.37     ragge 	ifp->if_flags |= IFF_RUNNING;
    432       1.37     ragge 	ifp->if_flags &= ~IFF_OACTIVE;
    433       1.37     ragge 
    434        1.1     ragge 	/*
    435       1.37     ragge 	 * Send a setup frame.
    436       1.37     ragge 	 * This will start the transmit machinery as well.
    437        1.1     ragge 	 */
    438       1.37     ragge 	qe_setup(sc);
    439       1.37     ragge 
    440        1.1     ragge }
    441        1.1     ragge 
    442        1.1     ragge /*
    443        1.1     ragge  * Start output on interface.
    444        1.1     ragge  */
    445        1.2   mycroft void
    446       1.46     ragge qestart(struct ifnet *ifp)
    447        1.1     ragge {
    448       1.37     ragge 	struct qe_softc *sc = ifp->if_softc;
    449       1.37     ragge 	struct qe_cdata *qc = sc->sc_qedata;
    450       1.37     ragge 	paddr_t	buffer;
    451       1.37     ragge 	struct mbuf *m, *m0;
    452       1.57    bouyer 	int idx, len, s, i, totlen, buflen, error;
    453       1.46     ragge 	short orword, csr;
    454       1.37     ragge 
    455       1.37     ragge 	if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
    456       1.37     ragge 		return;
    457        1.1     ragge 
    458       1.47   thorpej 	s = splnet();
    459       1.37     ragge 	while (sc->sc_inq < (TXDESCS - 1)) {
    460        1.1     ragge 
    461       1.37     ragge 		if (sc->sc_setup) {
    462       1.37     ragge 			qe_setup(sc);
    463       1.37     ragge 			continue;
    464       1.37     ragge 		}
    465       1.37     ragge 		idx = sc->sc_nexttx;
    466       1.45   thorpej 		IFQ_POLL(&ifp->if_snd, m);
    467       1.37     ragge 		if (m == 0)
    468       1.37     ragge 			goto out;
    469       1.37     ragge 		/*
    470       1.37     ragge 		 * Count number of mbufs in chain.
    471       1.37     ragge 		 * Always do DMA directly from mbufs, therefore the transmit
    472       1.37     ragge 		 * ring is really big.
    473       1.37     ragge 		 */
    474       1.37     ragge 		for (m0 = m, i = 0; m0; m0 = m0->m_next)
    475       1.38     ragge 			if (m0->m_len)
    476       1.38     ragge 				i++;
    477       1.57    bouyer 		if (m->m_pkthdr.len < ETHER_PAD_LEN) {
    478       1.57    bouyer 			buflen = ETHER_PAD_LEN;
    479       1.57    bouyer 			i++;
    480       1.57    bouyer 		} else
    481       1.57    bouyer 			buflen = m->m_pkthdr.len;
    482       1.37     ragge 		if (i >= TXDESCS)
    483       1.37     ragge 			panic("qestart");
    484       1.37     ragge 
    485       1.37     ragge 		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
    486       1.38     ragge 			ifp->if_flags |= IFF_OACTIVE;
    487       1.37     ragge 			goto out;
    488       1.37     ragge 		}
    489       1.45   thorpej 
    490       1.45   thorpej 		IFQ_DEQUEUE(&ifp->if_snd, m);
    491       1.45   thorpej 
    492       1.22     ragge #if NBPFILTER > 0
    493       1.37     ragge 		if (ifp->if_bpf)
    494       1.37     ragge 			bpf_mtap(ifp->if_bpf, m);
    495       1.22     ragge #endif
    496        1.1     ragge 		/*
    497       1.37     ragge 		 * m now points to a mbuf chain that can be loaded.
    498       1.37     ragge 		 * Loop around and set it.
    499        1.1     ragge 		 */
    500       1.38     ragge 		totlen = 0;
    501       1.57    bouyer 		for (m0 = m; ; m0 = m0->m_next) {
    502       1.57    bouyer 			if (m0) {
    503       1.57    bouyer 				if (m0->m_len == 0)
    504       1.57    bouyer 					continue;
    505       1.57    bouyer 				error = bus_dmamap_load(sc->sc_dmat,
    506       1.57    bouyer 				    sc->sc_xmtmap[idx], mtod(m0, void *),
    507       1.57    bouyer 				    m0->m_len, 0, 0);
    508       1.57    bouyer 				buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
    509       1.57    bouyer 				len = m0->m_len;
    510       1.57    bouyer 			} else if (totlen < ETHER_PAD_LEN) {
    511       1.57    bouyer 				buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
    512       1.57    bouyer 				len = ETHER_PAD_LEN - totlen;
    513       1.57    bouyer 			} else {
    514       1.57    bouyer 				break;
    515       1.57    bouyer 			}
    516       1.37     ragge 
    517       1.38     ragge 			totlen += len;
    518       1.37     ragge 			/* Word alignment calc */
    519       1.37     ragge 			orword = 0;
    520       1.57    bouyer 			if (totlen == buflen) {
    521       1.37     ragge 				orword |= QE_EOMSG;
    522       1.38     ragge 				sc->sc_txmbuf[idx] = m;
    523       1.37     ragge 			}
    524       1.37     ragge 			if ((buffer & 1) || (len & 1))
    525       1.37     ragge 				len += 2;
    526       1.37     ragge 			if (buffer & 1)
    527       1.37     ragge 				orword |= QE_ODDBEGIN;
    528       1.37     ragge 			if ((buffer + len) & 1)
    529       1.37     ragge 				orword |= QE_ODDEND;
    530       1.37     ragge 			qc->qc_xmit[idx].qe_buf_len = -(len/2);
    531       1.37     ragge 			qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
    532       1.37     ragge 			qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
    533       1.37     ragge 			qc->qc_xmit[idx].qe_flag =
    534       1.37     ragge 			    qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
    535       1.37     ragge 			qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
    536       1.37     ragge 			if (++idx == TXDESCS)
    537       1.37     ragge 				idx = 0;
    538       1.37     ragge 			sc->sc_inq++;
    539       1.57    bouyer 			if (m0 == NULL)
    540       1.57    bouyer 				break;
    541       1.37     ragge 		}
    542       1.38     ragge #ifdef DIAGNOSTIC
    543       1.57    bouyer 		if (totlen != buflen)
    544       1.38     ragge 			panic("qestart: len fault");
    545       1.38     ragge #endif
    546       1.37     ragge 
    547       1.37     ragge 		/*
    548       1.37     ragge 		 * Kick off the transmit logic, if it is stopped.
    549       1.37     ragge 		 */
    550       1.46     ragge 		csr = QE_RCSR(QE_CSR_CSR);
    551       1.46     ragge 		if (csr & QE_XL_INVALID) {
    552       1.37     ragge 			QE_WCSR(QE_CSR_XMTL,
    553       1.37     ragge 			    LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
    554       1.37     ragge 			QE_WCSR(QE_CSR_XMTH,
    555       1.37     ragge 			    HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
    556       1.37     ragge 		}
    557       1.37     ragge 		sc->sc_nexttx = idx;
    558       1.37     ragge 	}
    559       1.37     ragge 	if (sc->sc_inq == (TXDESCS - 1))
    560       1.37     ragge 		ifp->if_flags |= IFF_OACTIVE;
    561       1.38     ragge 
    562       1.38     ragge out:	if (sc->sc_inq)
    563       1.38     ragge 		ifp->if_timer = 5; /* If transmit logic dies */
    564       1.38     ragge 	splx(s);
    565        1.1     ragge }
    566        1.1     ragge 
    567       1.39      matt static void
    568       1.46     ragge qeintr(void *arg)
    569        1.1     ragge {
    570       1.39      matt 	struct qe_softc *sc = arg;
    571       1.37     ragge 	struct qe_cdata *qc = sc->sc_qedata;
    572       1.37     ragge 	struct ifnet *ifp = &sc->sc_if;
    573       1.37     ragge 	struct mbuf *m;
    574       1.37     ragge 	int csr, status1, status2, len;
    575        1.1     ragge 
    576       1.37     ragge 	csr = QE_RCSR(QE_CSR_CSR);
    577        1.1     ragge 
    578       1.37     ragge 	QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
    579       1.37     ragge 	    QE_RCV_INT | QE_ILOOP);
    580        1.1     ragge 
    581       1.37     ragge 	if (csr & QE_RCV_INT)
    582       1.37     ragge 		while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
    583       1.37     ragge 			status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
    584       1.37     ragge 			status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
    585       1.46     ragge 
    586       1.37     ragge 			m = sc->sc_rxmbuf[sc->sc_nextrx];
    587       1.37     ragge 			len = ((status1 & QE_RBL_HI) |
    588       1.37     ragge 			    (status2 & QE_RBL_LO)) + 60;
    589       1.37     ragge 			qe_add_rxbuf(sc, sc->sc_nextrx);
    590       1.37     ragge 			m->m_pkthdr.rcvif = ifp;
    591       1.37     ragge 			m->m_pkthdr.len = m->m_len = len;
    592       1.37     ragge 			if (++sc->sc_nextrx == RXDESCS)
    593       1.37     ragge 				sc->sc_nextrx = 0;
    594       1.37     ragge #if NBPFILTER > 0
    595       1.43   thorpej 			if (ifp->if_bpf)
    596       1.37     ragge 				bpf_mtap(ifp->if_bpf, m);
    597       1.37     ragge #endif
    598       1.46     ragge 			if ((status1 & QE_ESETUP) == 0)
    599       1.46     ragge 				(*ifp->if_input)(ifp, m);
    600       1.46     ragge 			else
    601       1.46     ragge 				m_freem(m);
    602        1.1     ragge 		}
    603       1.37     ragge 
    604       1.46     ragge 	if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
    605       1.37     ragge 		while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
    606       1.37     ragge 			int idx = sc->sc_lastack;
    607       1.37     ragge 
    608       1.37     ragge 			sc->sc_inq--;
    609       1.37     ragge 			if (++sc->sc_lastack == TXDESCS)
    610       1.37     ragge 				sc->sc_lastack = 0;
    611       1.37     ragge 
    612       1.37     ragge 			/* XXX collect statistics */
    613       1.37     ragge 			qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
    614       1.37     ragge 			qc->qc_xmit[idx].qe_status1 =
    615       1.37     ragge 			    qc->qc_xmit[idx].qe_flag = QE_NOTYET;
    616       1.37     ragge 
    617       1.37     ragge 			if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
    618       1.37     ragge 				continue;
    619       1.57    bouyer 			if (sc->sc_txmbuf[idx] == NULL ||
    620       1.57    bouyer 			    sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
    621       1.57    bouyer 				bus_dmamap_unload(sc->sc_dmat,
    622       1.57    bouyer 				    sc->sc_xmtmap[idx]);
    623       1.37     ragge 			if (sc->sc_txmbuf[idx]) {
    624       1.37     ragge 				m_freem(sc->sc_txmbuf[idx]);
    625       1.57    bouyer 				sc->sc_txmbuf[idx] = NULL;
    626       1.37     ragge 			}
    627       1.37     ragge 		}
    628       1.38     ragge 		ifp->if_timer = 0;
    629       1.37     ragge 		ifp->if_flags &= ~IFF_OACTIVE;
    630       1.37     ragge 		qestart(ifp); /* Put in more in queue */
    631        1.1     ragge 	}
    632       1.37     ragge 	/*
    633       1.37     ragge 	 * How can the receive list get invalid???
    634       1.37     ragge 	 * Verified that it happens anyway.
    635        1.1     ragge 	 */
    636       1.37     ragge 	if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
    637       1.37     ragge 	    (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
    638       1.37     ragge 		QE_WCSR(QE_CSR_RCLL,
    639       1.37     ragge 		    LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
    640       1.37     ragge 		QE_WCSR(QE_CSR_RCLH,
    641       1.37     ragge 		    HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
    642        1.1     ragge 	}
    643        1.1     ragge }
    644        1.1     ragge 
    645        1.1     ragge /*
    646        1.1     ragge  * Process an ioctl request.
    647        1.1     ragge  */
    648        1.7     ragge int
    649       1.62  christos qeioctl(struct ifnet *ifp, u_long cmd, void *data)
    650        1.1     ragge {
    651       1.14   thorpej 	struct qe_softc *sc = ifp->if_softc;
    652        1.1     ragge 	struct ifaddr *ifa = (struct ifaddr *)data;
    653        1.8   mycroft 	int s = splnet(), error = 0;
    654        1.1     ragge 
    655        1.1     ragge 	switch (cmd) {
    656        1.1     ragge 
    657        1.1     ragge 	case SIOCSIFADDR:
    658        1.1     ragge 		ifp->if_flags |= IFF_UP;
    659        1.1     ragge 		switch(ifa->ifa_addr->sa_family) {
    660        1.1     ragge #ifdef INET
    661        1.1     ragge 		case AF_INET:
    662       1.37     ragge 			qeinit(sc);
    663       1.20        is 			arp_ifinit(ifp, ifa);
    664        1.1     ragge 			break;
    665        1.1     ragge #endif
    666        1.1     ragge 		}
    667        1.1     ragge 		break;
    668        1.1     ragge 
    669        1.1     ragge 	case SIOCSIFFLAGS:
    670        1.1     ragge 		if ((ifp->if_flags & IFF_UP) == 0 &&
    671       1.37     ragge 		    (ifp->if_flags & IFF_RUNNING) != 0) {
    672       1.37     ragge 			/*
    673       1.37     ragge 			 * If interface is marked down and it is running,
    674       1.37     ragge 			 * stop it. (by disabling receive mechanism).
    675       1.37     ragge 			 */
    676       1.37     ragge 			QE_WCSR(QE_CSR_CSR,
    677       1.37     ragge 			    QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
    678       1.37     ragge 			ifp->if_flags &= ~IFF_RUNNING;
    679       1.37     ragge 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
    680       1.37     ragge 			   (ifp->if_flags & IFF_RUNNING) == 0) {
    681       1.37     ragge 			/*
    682       1.37     ragge 			 * If interface it marked up and it is stopped, then
    683       1.37     ragge 			 * start it.
    684       1.37     ragge 			 */
    685       1.19     ragge 			qeinit(sc);
    686       1.37     ragge 		} else if ((ifp->if_flags & IFF_UP) != 0) {
    687       1.37     ragge 			/*
    688       1.37     ragge 			 * Send a new setup packet to match any new changes.
    689       1.37     ragge 			 * (Like IFF_PROMISC etc)
    690       1.37     ragge 			 */
    691       1.37     ragge 			qe_setup(sc);
    692       1.37     ragge 		}
    693        1.1     ragge 		break;
    694        1.1     ragge 
    695       1.22     ragge 	case SIOCADDMULTI:
    696       1.22     ragge 	case SIOCDELMULTI:
    697       1.22     ragge 		/*
    698       1.22     ragge 		 * Update our multicast list.
    699       1.22     ragge 		 */
    700  1.62.16.1      matt 		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
    701       1.22     ragge 			/*
    702       1.22     ragge 			 * Multicast list has changed; set the hardware filter
    703       1.22     ragge 			 * accordingly.
    704       1.22     ragge 			 */
    705       1.58   thorpej 			if (ifp->if_flags & IFF_RUNNING)
    706       1.58   thorpej 				qe_setup(sc);
    707       1.22     ragge 			error = 0;
    708       1.22     ragge 		}
    709       1.22     ragge 		break;
    710       1.22     ragge 
    711        1.1     ragge 	default:
    712        1.1     ragge 		error = EINVAL;
    713        1.1     ragge 
    714        1.1     ragge 	}
    715        1.1     ragge 	splx(s);
    716        1.1     ragge 	return (error);
    717        1.1     ragge }
    718        1.1     ragge 
    719        1.1     ragge /*
    720       1.37     ragge  * Add a receive buffer to the indicated descriptor.
    721        1.1     ragge  */
    722       1.37     ragge int
    723       1.59    simonb qe_add_rxbuf(struct qe_softc *sc, int i)
    724        1.1     ragge {
    725       1.37     ragge 	struct mbuf *m;
    726       1.37     ragge 	struct qe_ring *rp;
    727       1.37     ragge 	vaddr_t addr;
    728       1.37     ragge 	int error;
    729       1.37     ragge 
    730       1.37     ragge 	MGETHDR(m, M_DONTWAIT, MT_DATA);
    731       1.37     ragge 	if (m == NULL)
    732       1.37     ragge 		return (ENOBUFS);
    733       1.37     ragge 
    734       1.37     ragge 	MCLGET(m, M_DONTWAIT);
    735       1.37     ragge 	if ((m->m_flags & M_EXT) == 0) {
    736       1.37     ragge 		m_freem(m);
    737       1.37     ragge 		return (ENOBUFS);
    738       1.37     ragge 	}
    739       1.37     ragge 
    740       1.37     ragge 	if (sc->sc_rxmbuf[i] != NULL)
    741       1.37     ragge 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
    742        1.1     ragge 
    743       1.37     ragge 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
    744       1.37     ragge 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
    745       1.37     ragge 	if (error)
    746       1.52    provos 		panic("%s: can't load rx DMA map %d, error = %d",
    747       1.37     ragge 		    sc->sc_dev.dv_xname, i, error);
    748       1.37     ragge 	sc->sc_rxmbuf[i] = m;
    749        1.1     ragge 
    750       1.37     ragge 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
    751       1.37     ragge 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
    752        1.1     ragge 
    753        1.1     ragge 	/*
    754       1.37     ragge 	 * We know that the mbuf cluster is page aligned. Also, be sure
    755       1.37     ragge 	 * that the IP header will be longword aligned.
    756        1.1     ragge 	 */
    757       1.37     ragge 	m->m_data += 2;
    758       1.37     ragge 	addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
    759       1.37     ragge 	rp = &sc->sc_qedata->qc_recv[i];
    760       1.37     ragge 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
    761       1.37     ragge 	rp->qe_addr_lo = LOWORD(addr);
    762       1.37     ragge 	rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
    763       1.37     ragge 	rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
    764        1.1     ragge 
    765       1.37     ragge 	return (0);
    766        1.1     ragge }
    767       1.37     ragge 
    768        1.1     ragge /*
    769       1.37     ragge  * Create a setup packet and put in queue for sending.
    770        1.1     ragge  */
    771        1.7     ragge void
    772       1.46     ragge qe_setup(struct qe_softc *sc)
    773        1.1     ragge {
    774       1.37     ragge 	struct ether_multi *enm;
    775       1.37     ragge 	struct ether_multistep step;
    776       1.37     ragge 	struct qe_cdata *qc = sc->sc_qedata;
    777       1.37     ragge 	struct ifnet *ifp = &sc->sc_if;
    778       1.37     ragge 	u_int8_t *enaddr = LLADDR(ifp->if_sadl);
    779       1.37     ragge 	int i, j, k, idx, s;
    780       1.37     ragge 
    781       1.47   thorpej 	s = splnet();
    782       1.37     ragge 	if (sc->sc_inq == (TXDESCS - 1)) {
    783       1.37     ragge 		sc->sc_setup = 1;
    784       1.37     ragge 		splx(s);
    785       1.37     ragge 		return;
    786       1.37     ragge 	}
    787       1.37     ragge 	sc->sc_setup = 0;
    788        1.1     ragge 	/*
    789       1.37     ragge 	 * Init the setup packet with valid info.
    790        1.1     ragge 	 */
    791       1.37     ragge 	memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
    792       1.37     ragge 	for (i = 0; i < ETHER_ADDR_LEN; i++)
    793       1.37     ragge 		qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
    794       1.37     ragge 
    795        1.1     ragge 	/*
    796       1.59    simonb 	 * Multicast handling. The DEQNA can handle up to 12 direct
    797       1.37     ragge 	 * ethernet addresses.
    798        1.1     ragge 	 */
    799       1.37     ragge 	j = 3; k = 0;
    800       1.37     ragge 	ifp->if_flags &= ~IFF_ALLMULTI;
    801       1.37     ragge 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
    802       1.37     ragge 	while (enm != NULL) {
    803       1.50       wiz 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
    804       1.37     ragge 			ifp->if_flags |= IFF_ALLMULTI;
    805       1.37     ragge 			break;
    806       1.37     ragge 		}
    807       1.37     ragge 		for (i = 0; i < ETHER_ADDR_LEN; i++)
    808       1.37     ragge 			qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
    809       1.37     ragge 		j++;
    810       1.37     ragge 		if (j == 8) {
    811       1.37     ragge 			j = 1; k += 64;
    812       1.37     ragge 		}
    813       1.37     ragge 		if (k > 64) {
    814       1.37     ragge 			ifp->if_flags |= IFF_ALLMULTI;
    815       1.37     ragge 			break;
    816       1.22     ragge 		}
    817       1.37     ragge 		ETHER_NEXT_MULTI(step, enm);
    818       1.22     ragge 	}
    819       1.37     ragge 	idx = sc->sc_nexttx;
    820       1.37     ragge 	qc->qc_xmit[idx].qe_buf_len = -64;
    821        1.1     ragge 
    822        1.1     ragge 	/*
    823       1.37     ragge 	 * How is the DEQNA turned in ALLMULTI mode???
    824       1.37     ragge 	 * Until someone tells me, fall back to PROMISC when more than
    825       1.37     ragge 	 * 12 ethernet addresses.
    826        1.1     ragge 	 */
    827       1.43   thorpej 	if (ifp->if_flags & IFF_ALLMULTI)
    828       1.43   thorpej 		ifp->if_flags |= IFF_PROMISC;
    829       1.43   thorpej 	else if (ifp->if_pcount == 0)
    830       1.43   thorpej 		ifp->if_flags &= ~IFF_PROMISC;
    831       1.43   thorpej 	if (ifp->if_flags & IFF_PROMISC)
    832       1.37     ragge 		qc->qc_xmit[idx].qe_buf_len = -65;
    833        1.1     ragge 
    834       1.37     ragge 	qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
    835       1.37     ragge 	qc->qc_xmit[idx].qe_addr_hi =
    836       1.37     ragge 	    HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
    837       1.37     ragge 	qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
    838       1.37     ragge 	qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
    839        1.1     ragge 
    840       1.37     ragge 	if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
    841       1.37     ragge 		QE_WCSR(QE_CSR_XMTL,
    842       1.37     ragge 		    LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
    843       1.37     ragge 		QE_WCSR(QE_CSR_XMTH,
    844       1.37     ragge 		    HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
    845       1.22     ragge 	}
    846        1.1     ragge 
    847       1.37     ragge 	sc->sc_inq++;
    848       1.37     ragge 	if (++sc->sc_nexttx == TXDESCS)
    849       1.37     ragge 		sc->sc_nexttx = 0;
    850       1.37     ragge 	splx(s);
    851       1.38     ragge }
    852       1.38     ragge 
    853       1.38     ragge /*
    854       1.38     ragge  * Check for dead transmit logic. Not uncommon.
    855       1.38     ragge  */
    856       1.38     ragge void
    857       1.46     ragge qetimeout(struct ifnet *ifp)
    858       1.38     ragge {
    859       1.38     ragge 	struct qe_softc *sc = ifp->if_softc;
    860       1.38     ragge 
    861       1.38     ragge 	if (sc->sc_inq == 0)
    862       1.38     ragge 		return;
    863       1.38     ragge 
    864       1.38     ragge 	printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
    865       1.38     ragge 	/*
    866       1.38     ragge 	 * Do a reset of interface, to get it going again.
    867       1.38     ragge 	 * Will it work by just restart the transmit logic?
    868       1.38     ragge 	 */
    869       1.38     ragge 	qeinit(sc);
    870        1.1     ragge }
    871