Home | History | Annotate | Line # | Download | only in ic
sgec.c revision 1.38
      1 /*      $NetBSD: sgec.c,v 1.38 2010/04/05 07:19:36 joerg Exp $ */
      2 /*
      3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  * 3. All advertising materials mentioning features or use of this software
     14  *    must display the following acknowledgement:
     15  *      This product includes software developed at Ludd, University of
     16  *      Lule}, Sweden and its contributors.
     17  * 4. The name of the author may not be used to endorse or promote products
     18  *    derived from this software without specific prior written permission
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Driver for the SGEC (Second Generation Ethernet Controller), sitting
     34  * on for example the VAX 4000/300 (KA670).
     35  *
     36  * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
     37  *
     38  * Even though the chip is capable to use virtual addresses (read the
     39  * System Page Table directly) this driver doesn't do so, and there
     40  * is no benefit in doing it either in NetBSD of today.
     41  *
     42  * Things that is still to do:
     43  *	Collect statistics.
     44  *	Use imperfect filtering when many multicast addresses.
     45  */
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: sgec.c,v 1.38 2010/04/05 07:19:36 joerg Exp $");
     49 
     50 #include "opt_inet.h"
     51 
     52 #include <sys/param.h>
     53 #include <sys/mbuf.h>
     54 #include <sys/socket.h>
     55 #include <sys/device.h>
     56 #include <sys/systm.h>
     57 #include <sys/sockio.h>
     58 
     59 #include <uvm/uvm_extern.h>
     60 
     61 #include <net/if.h>
     62 #include <net/if_ether.h>
     63 #include <net/if_dl.h>
     64 
     65 #include <netinet/in.h>
     66 #include <netinet/if_inarp.h>
     67 
     68 #include <net/bpf.h>
     69 #include <net/bpfdesc.h>
     70 
     71 #include <sys/bus.h>
     72 
     73 #include <dev/ic/sgecreg.h>
     74 #include <dev/ic/sgecvar.h>
     75 
     76 static	void	zeinit(struct ze_softc *);
     77 static	void	zestart(struct ifnet *);
     78 static	int	zeioctl(struct ifnet *, u_long, void *);
     79 static	int	ze_add_rxbuf(struct ze_softc *, int);
     80 static	void	ze_setup(struct ze_softc *);
     81 static	void	zetimeout(struct ifnet *);
     82 static	bool	zereset(struct ze_softc *);
     83 
     84 #define	ZE_WCSR(csr, val) \
     85 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
     86 #define	ZE_RCSR(csr) \
     87 	bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
     88 
     89 /*
     90  * Interface exists: make available by filling in network interface
     91  * record.  System will initialize the interface when it is ready
     92  * to accept packets.
     93  */
     94 void
     95 sgec_attach(struct ze_softc *sc)
     96 {
     97 	struct ifnet *ifp = &sc->sc_if;
     98 	struct ze_tdes *tp;
     99 	struct ze_rdes *rp;
    100 	bus_dma_segment_t seg;
    101 	int i, rseg, error;
    102 
    103         /*
    104          * Allocate DMA safe memory for descriptors and setup memory.
    105          */
    106 	error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ze_cdata),
    107 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
    108 	if (error) {
    109 		aprint_error(": unable to allocate control data, error = %d\n",
    110 		    error);
    111 		goto fail_0;
    112 	}
    113 
    114 	error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct ze_cdata),
    115 	    (void **)&sc->sc_zedata, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    116 	if (error) {
    117 		aprint_error(
    118 		    ": unable to map control data, error = %d\n", error);
    119 		goto fail_1;
    120 	}
    121 
    122 	error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ze_cdata), 1,
    123 	    sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT, &sc->sc_cmap);
    124 	if (error) {
    125 		aprint_error(
    126 		    ": unable to create control data DMA map, error = %d\n",
    127 		    error);
    128 		goto fail_2;
    129 	}
    130 
    131 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap, sc->sc_zedata,
    132 	    sizeof(struct ze_cdata), NULL, BUS_DMA_NOWAIT);
    133 	if (error) {
    134 		aprint_error(
    135 		    ": unable to load control data DMA map, error = %d\n",
    136 		    error);
    137 		goto fail_3;
    138 	}
    139 
    140 	/*
    141 	 * Zero the newly allocated memory.
    142 	 */
    143 	memset(sc->sc_zedata, 0, sizeof(struct ze_cdata));
    144 
    145 	/*
    146 	 * Create the transmit descriptor DMA maps.
    147 	 */
    148 	for (i = 0; error == 0 && i < TXDESCS; i++) {
    149 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    150 		    TXDESCS - 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    151 		    &sc->sc_xmtmap[i]);
    152 	}
    153 	if (error) {
    154 		aprint_error(": unable to create tx DMA map %d, error = %d\n",
    155 		    i, error);
    156 		goto fail_4;
    157 	}
    158 
    159 	/*
    160 	 * Create receive buffer DMA maps.
    161 	 */
    162 	for (i = 0; error == 0 && i < RXDESCS; i++) {
    163 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    164 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rcvmap[i]);
    165 	}
    166 	if (error) {
    167 		aprint_error(": unable to create rx DMA map %d, error = %d\n",
    168 		    i, error);
    169 		goto fail_5;
    170 	}
    171 
    172 	/*
    173 	 * Pre-allocate the receive buffers.
    174 	 */
    175 	for (i = 0; error == 0 && i < RXDESCS; i++) {
    176 		error = ze_add_rxbuf(sc, i);
    177 	}
    178 
    179 	if (error) {
    180 		aprint_error(
    181 		    ": unable to allocate or map rx buffer %d, error = %d\n",
    182 		    i, error);
    183 		goto fail_6;
    184 	}
    185 
    186 	/* For vmstat -i
    187 	 */
    188 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
    189 	    device_xname(sc->sc_dev), "intr");
    190 	evcnt_attach_dynamic(&sc->sc_rxintrcnt, EVCNT_TYPE_INTR,
    191 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "rx intr");
    192 	evcnt_attach_dynamic(&sc->sc_txintrcnt, EVCNT_TYPE_INTR,
    193 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx intr");
    194 	evcnt_attach_dynamic(&sc->sc_txdraincnt, EVCNT_TYPE_INTR,
    195 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx drain");
    196 	evcnt_attach_dynamic(&sc->sc_nobufintrcnt, EVCNT_TYPE_INTR,
    197 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "nobuf intr");
    198 	evcnt_attach_dynamic(&sc->sc_nointrcnt, EVCNT_TYPE_INTR,
    199 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "no intr");
    200 
    201 	/*
    202 	 * Create ring loops of the buffer chains.
    203 	 * This is only done once.
    204 	 */
    205 	sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
    206 
    207 	rp = sc->sc_zedata->zc_recv;
    208 	rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
    209 	rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
    210 	rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;
    211 
    212 	tp = sc->sc_zedata->zc_xmit;
    213 	tp[TXDESCS].ze_tdr = ZE_TDR_OW;
    214 	tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
    215 	tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;
    216 
    217 	if (zereset(sc))
    218 		return;
    219 
    220 	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
    221 	ifp->if_softc = sc;
    222 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    223 	ifp->if_start = zestart;
    224 	ifp->if_ioctl = zeioctl;
    225 	ifp->if_watchdog = zetimeout;
    226 	IFQ_SET_READY(&ifp->if_snd);
    227 
    228 	/*
    229 	 * Attach the interface.
    230 	 */
    231 	if_attach(ifp);
    232 	ether_ifattach(ifp, sc->sc_enaddr);
    233 
    234 	aprint_normal("\n");
    235 	aprint_normal_dev(sc->sc_dev, "hardware address %s\n",
    236 	    ether_sprintf(sc->sc_enaddr));
    237 	return;
    238 
    239 	/*
    240 	 * Free any resources we've allocated during the failed attach
    241 	 * attempt.  Do this in reverse order and fall through.
    242 	 */
    243  fail_6:
    244 	for (i = 0; i < RXDESCS; i++) {
    245 		if (sc->sc_rxmbuf[i] != NULL) {
    246 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
    247 			m_freem(sc->sc_rxmbuf[i]);
    248 		}
    249 	}
    250  fail_5:
    251 	for (i = 0; i < RXDESCS; i++) {
    252 		if (sc->sc_xmtmap[i] != NULL)
    253 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
    254 	}
    255  fail_4:
    256 	for (i = 0; i < TXDESCS; i++) {
    257 		if (sc->sc_rcvmap[i] != NULL)
    258 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
    259 	}
    260 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
    261  fail_3:
    262 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
    263  fail_2:
    264 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_zedata,
    265 	    sizeof(struct ze_cdata));
    266  fail_1:
    267 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
    268  fail_0:
    269 	return;
    270 }
    271 
    272 /*
    273  * Initialization of interface.
    274  */
    275 void
    276 zeinit(struct ze_softc *sc)
    277 {
    278 	struct ifnet *ifp = &sc->sc_if;
    279 	struct ze_cdata *zc = sc->sc_zedata;
    280 	int i;
    281 
    282 	/*
    283 	 * Reset the interface.
    284 	 */
    285 	if (zereset(sc))
    286 		return;
    287 
    288 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = sc->sc_txcnt = 0;
    289 	/*
    290 	 * Release and init transmit descriptors.
    291 	 */
    292 	for (i = 0; i < TXDESCS; i++) {
    293 		if (sc->sc_xmtmap[i]->dm_nsegs > 0)
    294 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
    295 		if (sc->sc_txmbuf[i]) {
    296 			m_freem(sc->sc_txmbuf[i]);
    297 			sc->sc_txmbuf[i] = 0;
    298 		}
    299 		zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
    300 	}
    301 
    302 
    303 	/*
    304 	 * Init receive descriptors.
    305 	 */
    306 	for (i = 0; i < RXDESCS; i++)
    307 		zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
    308 	sc->sc_nextrx = 0;
    309 
    310 	ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE|ZE_NICSR6_BL_8|ZE_NICSR6_ST|
    311 	    ZE_NICSR6_SR|ZE_NICSR6_DC);
    312 
    313 	ifp->if_flags |= IFF_RUNNING;
    314 	ifp->if_flags &= ~IFF_OACTIVE;
    315 
    316 	/*
    317 	 * Send a setup frame.
    318 	 * This will start the transmit machinery as well.
    319 	 */
    320 	ze_setup(sc);
    321 
    322 }
    323 
    324 /*
    325  * Start output on interface.
    326  */
    327 void
    328 zestart(struct ifnet *ifp)
    329 {
    330 	struct ze_softc *sc = ifp->if_softc;
    331 	struct ze_cdata *zc = sc->sc_zedata;
    332 	paddr_t	buffer;
    333 	struct mbuf *m;
    334 	int nexttx, starttx;
    335 	int len, i, totlen, error;
    336 	int old_inq = sc->sc_inq;
    337 	uint16_t orword, tdr;
    338 	bus_dmamap_t map;
    339 
    340 	while (sc->sc_inq < (TXDESCS - 1)) {
    341 
    342 		if (sc->sc_setup) {
    343 			ze_setup(sc);
    344 			continue;
    345 		}
    346 		nexttx = sc->sc_nexttx;
    347 		IFQ_POLL(&sc->sc_if.if_snd, m);
    348 		if (m == 0)
    349 			goto out;
    350 		/*
    351 		 * Count number of mbufs in chain.
    352 		 * Always do DMA directly from mbufs, therefore the transmit
    353 		 * ring is really big.
    354 		 */
    355 		map = sc->sc_xmtmap[nexttx];
    356 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
    357 		    BUS_DMA_WRITE);
    358 		if (error) {
    359 			aprint_error_dev(sc->sc_dev,
    360 			    "zestart: load_mbuf failed: %d", error);
    361 			goto out;
    362 		}
    363 
    364 		if (map->dm_nsegs >= TXDESCS)
    365 			panic("zestart"); /* XXX */
    366 
    367 		if ((map->dm_nsegs + sc->sc_inq) >= (TXDESCS - 1)) {
    368 			bus_dmamap_unload(sc->sc_dmat, map);
    369 			ifp->if_flags |= IFF_OACTIVE;
    370 			goto out;
    371 		}
    372 
    373 		/*
    374 		 * m now points to a mbuf chain that can be loaded.
    375 		 * Loop around and set it.
    376 		 */
    377 		totlen = 0;
    378 		orword = ZE_TDES1_FS;
    379 		starttx = nexttx;
    380 		for (i = 0; i < map->dm_nsegs; i++) {
    381 			buffer = map->dm_segs[i].ds_addr;
    382 			len = map->dm_segs[i].ds_len;
    383 
    384 			KASSERT(len > 0);
    385 
    386 			totlen += len;
    387 			/* Word alignment calc */
    388 			if (totlen == m->m_pkthdr.len) {
    389 				sc->sc_txcnt += map->dm_nsegs;
    390 				if (sc->sc_txcnt >= TXDESCS * 3 / 4) {
    391 					orword |= ZE_TDES1_IC;
    392 					sc->sc_txcnt = 0;
    393 				}
    394 				orword |= ZE_TDES1_LS;
    395 				sc->sc_txmbuf[nexttx] = m;
    396 			}
    397 			zc->zc_xmit[nexttx].ze_bufsize = len;
    398 			zc->zc_xmit[nexttx].ze_bufaddr = (char *)buffer;
    399 			zc->zc_xmit[nexttx].ze_tdes1 = orword;
    400 			zc->zc_xmit[nexttx].ze_tdr = tdr;
    401 
    402 			if (++nexttx == TXDESCS)
    403 				nexttx = 0;
    404 			orword = 0;
    405 			tdr = ZE_TDR_OW;
    406 		}
    407 
    408 		sc->sc_inq += map->dm_nsegs;
    409 
    410 		IFQ_DEQUEUE(&ifp->if_snd, m);
    411 #ifdef DIAGNOSTIC
    412 		if (totlen != m->m_pkthdr.len)
    413 			panic("zestart: len fault");
    414 #endif
    415 		/*
    416 		 * Turn ownership of the packet over to the device.
    417 		 */
    418 		zc->zc_xmit[starttx].ze_tdr = ZE_TDR_OW;
    419 
    420 		/*
    421 		 * Kick off the transmit logic, if it is stopped.
    422 		 */
    423 		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
    424 			ZE_WCSR(ZE_CSR1, -1);
    425 		sc->sc_nexttx = nexttx;
    426 	}
    427 	if (sc->sc_inq == (TXDESCS - 1))
    428 		ifp->if_flags |= IFF_OACTIVE;
    429 
    430 out:	if (old_inq < sc->sc_inq)
    431 		ifp->if_timer = 5; /* If transmit logic dies */
    432 }
    433 
    434 int
    435 sgec_intr(struct ze_softc *sc)
    436 {
    437 	struct ze_cdata *zc = sc->sc_zedata;
    438 	struct ifnet *ifp = &sc->sc_if;
    439 	struct mbuf *m;
    440 	int csr, len;
    441 
    442 	csr = ZE_RCSR(ZE_CSR5);
    443 	if ((csr & ZE_NICSR5_IS) == 0) { /* Wasn't we */
    444 		sc->sc_nointrcnt.ev_count++;
    445 		return 0;
    446 	}
    447 	ZE_WCSR(ZE_CSR5, csr);
    448 
    449 	if (csr & ZE_NICSR5_RU)
    450 		sc->sc_nobufintrcnt.ev_count++;
    451 
    452 	if (csr & ZE_NICSR5_RI) {
    453 		sc->sc_rxintrcnt.ev_count++;
    454 		while ((zc->zc_recv[sc->sc_nextrx].ze_framelen &
    455 		    ZE_FRAMELEN_OW) == 0) {
    456 
    457 			ifp->if_ipackets++;
    458 			m = sc->sc_rxmbuf[sc->sc_nextrx];
    459 			len = zc->zc_recv[sc->sc_nextrx].ze_framelen;
    460 			ze_add_rxbuf(sc, sc->sc_nextrx);
    461 			if (++sc->sc_nextrx == RXDESCS)
    462 				sc->sc_nextrx = 0;
    463 			if (len < ETHER_MIN_LEN) {
    464 				ifp->if_ierrors++;
    465 				m_freem(m);
    466 			} else {
    467 				m->m_pkthdr.rcvif = ifp;
    468 				m->m_pkthdr.len = m->m_len =
    469 				    len - ETHER_CRC_LEN;
    470 				bpf_mtap(ifp, m);
    471 				(*ifp->if_input)(ifp, m);
    472 			}
    473 		}
    474 	}
    475 
    476 	if (csr & ZE_NICSR5_TI)
    477 		sc->sc_txintrcnt.ev_count++;
    478 	if (sc->sc_lastack != sc->sc_nexttx) {
    479 		int lastack;
    480 		for (lastack = sc->sc_lastack; lastack != sc->sc_nexttx; ) {
    481 			bus_dmamap_t map;
    482 			int nlastack;
    483 
    484 			if ((zc->zc_xmit[lastack].ze_tdr & ZE_TDR_OW) != 0)
    485 				break;
    486 
    487 			if ((zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_DT) ==
    488 			    ZE_TDES1_DT_SETUP) {
    489 				if (++lastack == TXDESCS)
    490 					lastack = 0;
    491 				sc->sc_inq--;
    492 				continue;
    493 			}
    494 
    495 			KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_FS);
    496 			map = sc->sc_xmtmap[lastack];
    497 			KASSERT(map->dm_nsegs > 0);
    498 			nlastack = (lastack + map->dm_nsegs - 1) % TXDESCS;
    499 			if (zc->zc_xmit[nlastack].ze_tdr & ZE_TDR_OW)
    500 				break;
    501 			lastack = nlastack;
    502 			if (sc->sc_txcnt > map->dm_nsegs)
    503 			    sc->sc_txcnt -= map->dm_nsegs;
    504 			else
    505 			    sc->sc_txcnt = 0;
    506 			sc->sc_inq -= map->dm_nsegs;
    507 			KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_LS);
    508 			ifp->if_opackets++;
    509 			bus_dmamap_unload(sc->sc_dmat, map);
    510 			KASSERT(sc->sc_txmbuf[lastack]);
    511 			bpf_mtap(ifp, sc->sc_txmbuf[lastack]);
    512 			m_freem(sc->sc_txmbuf[lastack]);
    513 			sc->sc_txmbuf[lastack] = 0;
    514 			if (++lastack == TXDESCS)
    515 				lastack = 0;
    516 		}
    517 		if (lastack != sc->sc_lastack) {
    518 			sc->sc_txdraincnt.ev_count++;
    519 			sc->sc_lastack = lastack;
    520 			if (sc->sc_inq == 0)
    521 				ifp->if_timer = 0;
    522 			ifp->if_flags &= ~IFF_OACTIVE;
    523 			zestart(ifp); /* Put in more in queue */
    524 		}
    525 	}
    526 	return 1;
    527 }
    528 
    529 /*
    530  * Process an ioctl request.
    531  */
    532 int
    533 zeioctl(struct ifnet *ifp, u_long cmd, void *data)
    534 {
    535 	struct ze_softc *sc = ifp->if_softc;
    536 	struct ifaddr *ifa = data;
    537 	int s = splnet(), error = 0;
    538 
    539 	switch (cmd) {
    540 
    541 	case SIOCINITIFADDR:
    542 		ifp->if_flags |= IFF_UP;
    543 		switch(ifa->ifa_addr->sa_family) {
    544 #ifdef INET
    545 		case AF_INET:
    546 			zeinit(sc);
    547 			arp_ifinit(ifp, ifa);
    548 			break;
    549 #endif
    550 		}
    551 		break;
    552 
    553 	case SIOCSIFFLAGS:
    554 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    555 			break;
    556 		/* XXX re-use ether_ioctl() */
    557 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
    558 		case IFF_RUNNING:
    559 			/*
    560 			 * If interface is marked down and it is running,
    561 			 * stop it. (by disabling receive mechanism).
    562 			 */
    563 			ZE_WCSR(ZE_CSR6, ZE_RCSR(ZE_CSR6) &
    564 			    ~(ZE_NICSR6_ST|ZE_NICSR6_SR));
    565 			ifp->if_flags &= ~IFF_RUNNING;
    566 			break;
    567 		case IFF_UP:
    568 			/*
    569 			 * If interface it marked up and it is stopped, then
    570 			 * start it.
    571 			 */
    572 			zeinit(sc);
    573 			break;
    574 		case IFF_UP|IFF_RUNNING:
    575 			/*
    576 			 * Send a new setup packet to match any new changes.
    577 			 * (Like IFF_PROMISC etc)
    578 			 */
    579 			ze_setup(sc);
    580 			break;
    581 		case 0:
    582 			break;
    583 		}
    584 		break;
    585 
    586 	case SIOCADDMULTI:
    587 	case SIOCDELMULTI:
    588 		/*
    589 		 * Update our multicast list.
    590 		 */
    591 		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
    592 			/*
    593 			 * Multicast list has changed; set the hardware filter
    594 			 * accordingly.
    595 			 */
    596 			if (ifp->if_flags & IFF_RUNNING)
    597 				ze_setup(sc);
    598 			error = 0;
    599 		}
    600 		break;
    601 
    602 	default:
    603 		error = ether_ioctl(ifp, cmd, data);
    604 
    605 	}
    606 	splx(s);
    607 	return (error);
    608 }
    609 
    610 /*
    611  * Add a receive buffer to the indicated descriptor.
    612  */
    613 int
    614 ze_add_rxbuf(struct ze_softc *sc, int i)
    615 {
    616 	struct mbuf *m;
    617 	struct ze_rdes *rp;
    618 	int error;
    619 
    620 	MGETHDR(m, M_DONTWAIT, MT_DATA);
    621 	if (m == NULL)
    622 		return (ENOBUFS);
    623 
    624 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
    625 	MCLGET(m, M_DONTWAIT);
    626 	if ((m->m_flags & M_EXT) == 0) {
    627 		m_freem(m);
    628 		return (ENOBUFS);
    629 	}
    630 
    631 	if (sc->sc_rxmbuf[i] != NULL)
    632 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
    633 
    634 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
    635 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
    636 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
    637 	if (error)
    638 		panic("%s: can't load rx DMA map %d, error = %d",
    639 		    device_xname(sc->sc_dev), i, error);
    640 	sc->sc_rxmbuf[i] = m;
    641 
    642 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
    643 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
    644 
    645 	/*
    646 	 * We know that the mbuf cluster is page aligned. Also, be sure
    647 	 * that the IP header will be longword aligned.
    648 	 */
    649 	m->m_data += 2;
    650 	rp = &sc->sc_zedata->zc_recv[i];
    651 	rp->ze_bufsize = (m->m_ext.ext_size - 2);
    652 	rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
    653 	rp->ze_framelen = ZE_FRAMELEN_OW;
    654 
    655 	return (0);
    656 }
    657 
    658 /*
    659  * Create a setup packet and put in queue for sending.
    660  */
    661 void
    662 ze_setup(struct ze_softc *sc)
    663 {
    664 	struct ether_multi *enm;
    665 	struct ether_multistep step;
    666 	struct ze_cdata *zc = sc->sc_zedata;
    667 	struct ifnet *ifp = &sc->sc_if;
    668 	const u_int8_t *enaddr = CLLADDR(ifp->if_sadl);
    669 	int j, idx, reg;
    670 
    671 	if (sc->sc_inq == (TXDESCS - 1)) {
    672 		sc->sc_setup = 1;
    673 		return;
    674 	}
    675 	sc->sc_setup = 0;
    676 	/*
    677 	 * Init the setup packet with valid info.
    678 	 */
    679 	memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */
    680 	memcpy(zc->zc_setup, enaddr, ETHER_ADDR_LEN);
    681 
    682 	/*
    683 	 * Multicast handling. The SGEC can handle up to 16 direct
    684 	 * ethernet addresses.
    685 	 */
    686 	j = 16;
    687 	ifp->if_flags &= ~IFF_ALLMULTI;
    688 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
    689 	while (enm != NULL) {
    690 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
    691 			ifp->if_flags |= IFF_ALLMULTI;
    692 			break;
    693 		}
    694 		memcpy(&zc->zc_setup[j], enm->enm_addrlo, ETHER_ADDR_LEN);
    695 		j += 8;
    696 		ETHER_NEXT_MULTI(step, enm);
    697 		if ((enm != NULL)&& (j == 128)) {
    698 			ifp->if_flags |= IFF_ALLMULTI;
    699 			break;
    700 		}
    701 	}
    702 
    703 	/*
    704 	 * ALLMULTI implies PROMISC in this driver.
    705 	 */
    706 	if (ifp->if_flags & IFF_ALLMULTI)
    707 		ifp->if_flags |= IFF_PROMISC;
    708 	else if (ifp->if_pcount == 0)
    709 		ifp->if_flags &= ~IFF_PROMISC;
    710 
    711 	/*
    712 	 * Fiddle with the receive logic.
    713 	 */
    714 	reg = ZE_RCSR(ZE_CSR6);
    715 	DELAY(10);
    716 	ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */
    717 	reg &= ~ZE_NICSR6_AF;
    718 	if (ifp->if_flags & IFF_PROMISC)
    719 		reg |= ZE_NICSR6_AF_PROM;
    720 	else if (ifp->if_flags & IFF_ALLMULTI)
    721 		reg |= ZE_NICSR6_AF_ALLM;
    722 	DELAY(10);
    723 	ZE_WCSR(ZE_CSR6, reg);
    724 	/*
    725 	 * Only send a setup packet if needed.
    726 	 */
    727 	if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) == 0) {
    728 		idx = sc->sc_nexttx;
    729 		zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP;
    730 		zc->zc_xmit[idx].ze_bufsize = 128;
    731 		zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup;
    732 		zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
    733 
    734 		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
    735 			ZE_WCSR(ZE_CSR1, -1);
    736 
    737 		sc->sc_inq++;
    738 		if (++sc->sc_nexttx == TXDESCS)
    739 			sc->sc_nexttx = 0;
    740 	}
    741 }
    742 
    743 /*
    744  * Check for dead transmit logic.
    745  */
    746 void
    747 zetimeout(struct ifnet *ifp)
    748 {
    749 	struct ze_softc *sc = ifp->if_softc;
    750 
    751 	if (sc->sc_inq == 0)
    752 		return;
    753 
    754 	aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
    755 	/*
    756 	 * Do a reset of interface, to get it going again.
    757 	 * Will it work by just restart the transmit logic?
    758 	 */
    759 	zeinit(sc);
    760 }
    761 
    762 /*
    763  * Reset chip:
    764  * Set/reset the reset flag.
    765  *  Write interrupt vector.
    766  *  Write ring buffer addresses.
    767  *  Write SBR.
    768  */
    769 bool
    770 zereset(struct ze_softc *sc)
    771 {
    772 	int reg, i;
    773 
    774 	ZE_WCSR(ZE_CSR6, ZE_NICSR6_RE);
    775 	DELAY(50000);
    776 	if (ZE_RCSR(ZE_CSR6) & ZE_NICSR5_SF) {
    777 		aprint_error_dev(sc->sc_dev, "selftest failed\n");
    778 		return true;
    779 	}
    780 
    781 	/*
    782 	 * Get the vector that were set at match time, and remember it.
    783 	 * WHICH VECTOR TO USE? Take one unused. XXX
    784 	 * Funny way to set vector described in the programmers manual.
    785 	 */
    786 	reg = ZE_NICSR0_IPL14 | sc->sc_intvec | 0x1fff0003; /* SYNC/ASYNC??? */
    787 	i = 10;
    788 	do {
    789 		if (i-- == 0) {
    790 			aprint_error_dev(sc->sc_dev,
    791 			    "failing SGEC CSR0 init\n");
    792 			return true;
    793 		}
    794 		ZE_WCSR(ZE_CSR0, reg);
    795 	} while (ZE_RCSR(ZE_CSR0) != reg);
    796 
    797 	ZE_WCSR(ZE_CSR3, (vaddr_t)sc->sc_pzedata->zc_recv);
    798 	ZE_WCSR(ZE_CSR4, (vaddr_t)sc->sc_pzedata->zc_xmit);
    799 	return false;
    800 }
    801