Home | History | Annotate | Line # | Download | only in ic
i82596.c revision 1.1.4.2
      1 /* $NetBSD: i82596.c,v 1.1.4.2 2004/08/03 10:46:15 skrll Exp $ */
      2 
      3 /*
      4  * Copyright (c) 2003 Jochen Kunz.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of Jochen Kunz may not be used to endorse or promote
     16  *    products derived from this software without specific prior
     17  *    written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY JOCHEN KUNZ
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL JOCHEN KUNZ
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Driver for the Intel i82596 10MBit/s Ethernet chip.
     34  * It operates the i82596 in 32-Bit Linear Mode, opposed to the old i82586
     35  * ie(4) driver (src/sys/dev/ic/i82586.c), that degrades the i82596 to
     36  * i82586 compatibility mode.
     37  * Documentation about this chip can be found on http://www.openpa.net/
     38  * file names 29021806.pdf and 29021906.pdf
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: i82596.c,v 1.1.4.2 2004/08/03 10:46:15 skrll Exp $");
     43 
     44 /* autoconfig and device stuff */
     45 #include <sys/param.h>
     46 #include <sys/device.h>
     47 #include <sys/conf.h>
     48 #include <machine/iomod.h>
     49 #include <machine/autoconf.h>
     50 #include "locators.h"
     51 #include "ioconf.h"
     52 
     53 /* bus_space / bus_dma etc. */
     54 #include <machine/bus.h>
     55 #include <machine/intr.h>
     56 
     57 /* general system data and functions */
     58 #include <sys/systm.h>
     59 #include <sys/ioctl.h>
     60 #include <sys/ioccom.h>
     61 #include <sys/types.h>
     62 
     63 /* tsleep / sleep / wakeup */
     64 #include <sys/proc.h>
     65 /* hz for above */
     66 #include <sys/kernel.h>
     67 
     68 /* network stuff */
     69 #include <net/if.h>
     70 #include <net/if_dl.h>
     71 #include <net/if_media.h>
     72 #include <net/if_ether.h>
     73 #include <sys/socket.h>
     74 #include <sys/mbuf.h>
     75 
     76 #include "bpfilter.h"
     77 #if NBPFILTER > 0
     78 #include <net/bpf.h>
     79 #endif
     80 
     81 #include <dev/ic/i82596reg.h>
     82 #include <dev/ic/i82596var.h>
     83 
     84 
     85 
     86 /* Supported chip variants */
     87 char *i82596_typenames[] = { "unknowen", "DX/SX", "CA" };
     88 
     89 
     90 
     91 /* media change and status callback */
     92 static int iee_mediachange(struct ifnet *);
     93 static void iee_mediastatus(struct ifnet *, struct ifmediareq *);
     94 
     95 /* interface routines to upper protocols */
     96 static void iee_start(struct ifnet *);			/* initiate output */
     97 static int iee_ioctl(struct ifnet *, u_long, caddr_t);	/* ioctl routine */
     98 static int iee_init(struct ifnet *);			/* init routine */
     99 static void iee_stop(struct ifnet *, int);		/* stop routine */
    100 static void iee_watchdog(struct ifnet *);		/* timer routine */
    101 static void iee_drain(struct ifnet *);			/* release resources */
    102 
    103 /* internal helper functions */
    104 static void iee_cb_setup(struct iee_softc *, u_int32_t);
    105 
    106 /*
    107 Things a MD frontend has to provide:
    108 
    109 The functions via function pointers in the softc:
    110         int (*sc_iee_cmd)(struct iee_softc *sc, u_int32_t cmd);
    111         int (*sc_iee_reset)(struct iee_softc *sc);
    112         void (*sc_mediastatus)(struct ifnet *, struct ifmediareq *);
    113         int (*sc_mediachange)(struct ifnet *);
    114 
    115 sc_iee_cmd(): send a command to the i82596 by writing the cmd parameter
    116 	to the SCP cmd word and issuing a Channel Attention.
    117 sc_iee_reset(): initiate a reset, supply the address of the SCP to the
    118 	chip, wait for the chip to initialize and ACK interrupts that
    119 	this may have caused by caling (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
    120 This functions must carefully bus_dmamap_sync() all data they have touched!
    121 
    122 sc_mediastatus() and  sc_mediachange() are just MD hooks to the according
    123 MI functions. The MD frontend may set this pointers to NULL when they
    124 are not needed.
    125 
    126 sc->sc_type has to be set to I82596_UNKNOWN or I82596_DX or I82596_CA.
    127 This is for printing out the correct chip type at attach time only. The
    128 MI backend doesn't distinguish different chip types when programming
    129 the chip.
    130 
    131 sc->sc_flags has to be set to 0 on litle endian hardware and to
    132 IEE_NEED_SWAP on big endian hardware, when endianes conversion is not
    133 done by the bus attachment. Usually you need to set IEE_NEED_SWAP
    134 when IEE_SYSBUS_BE is set in the sysbus byte.
    135 
    136 sc->sc_cl_align bust be set to 1 or to the cache line size. When set to
    137 1 no special alignment of DMA descriptors is done. If sc->sc_cl_align != 1
    138 it forces alignment of the data structres in the shared memory to a multiple
    139 of sc->sc_cl_align. This is needed on archs like hp700 that have non DMA
    140 I/O coherent caches and are unable to map the shared memory uncachable.
    141 (At least pre PA7100LC CPUs are unable to map memory uncachable.)
    142 
    143 sc->sc_cl_align MUST BE INITIALIZED BEFORE THE FOLOWING MACROS ARE USED:
    144 SC_* IEE_*_SZ IEE_*_OFF IEE_SHMEM_MAX (shell style glob(3) pattern)
    145 
    146 The MD frontend has to allocate a piece of DMA memory at least of
    147 IEE_SHMEM_MAX bytes size. All communication with the chip is done via
    148 this shared memory. If possible map this memory non-cachable on
    149 archs with non DMA I/O coherent caches. The base of the memory needs
    150 to be aligend to an even address if sc->sc_cl_align == 1 and aligend
    151 to a cache line if sc->sc_cl_align != 1.
    152 
    153 An interrupt with iee_intr() as handler must be established.
    154 
    155 Call void iee_attach(struct iee_softc *sc, u_int8_t *ether_address,
    156 int *media, int nmedia, int defmedia); when everything is set up. First
    157 parameter is a pointer to the MI softc, ether_address is an array that
    158 contains the ethernet address. media is an array of the media types
    159 provided by the hardware. The members of this array are supplied to
    160 ifmedia_add() in sequence. nmedia is the count of elements in media.
    161 defmedia is the default media that is set via ifmedia_set().
    162 nmedia and defmedia are ignored when media == NULL.
    163 
    164 The MD backend may call iee_detach() to detach the device.
    165 
    166 See sys/arch/hp700/gsc/if_iee.c for an example.
    167 */
    168 
    169 
    170 /*
    171 How frame reception is done:
    172 Each Recieve Frame Descriptor has one associated Recieve Buffer Descriptor.
    173 Each RBD points to the data area of a mbuf cluster. The RFDs are linked
    174 together in a circular list. sc->sc_rx_done is the count of RFDs in the
    175 list already processed / the number of the RFD that has to be checked for
    176 a new frame first at the next RX interrupt. Upon successful reception of
    177 a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
    178 cluster is allocated and the RFD / RBD are reinitialized accordingly.
    179 
    180 When a RFD list overrun occured the whole RFD and RBD lists are reinitialized
    181 and frame reception is started again.
    182 */
    183 int
    184 iee_intr(void *intarg)
    185 {
    186 	struct iee_softc *sc = intarg;
    187 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    188 	struct iee_rfd *rfd;
    189 	struct iee_rbd *rbd;
    190 	bus_dmamap_t rx_map;
    191 	struct mbuf *rx_mbuf;
    192 	struct mbuf *new_mbuf;
    193 	int scb_status;
    194 	int scb_cmd;
    195 	int n;
    196 
    197 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
    198 		(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
    199 		return(1);
    200 	}
    201 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
    202 	    BUS_DMASYNC_POSTREAD);
    203 	scb_status = SC_SCB->scb_status;
    204 	scb_cmd = SC_SCB->scb_cmd;
    205 	n = 0;
    206 	rfd = SC_RFD(sc->sc_rx_done);
    207 	while ((scb_status & IEE_SCB_STAT_FR) != 0
    208 	    && (rfd->rfd_status & IEE_RFD_B) == 0 && rfd->rfd_status != 0) {
    209 		/* At least one packet was received. */
    210 		n = 1;
    211 		rbd = SC_RBD(sc->sc_rx_done);
    212 		rx_map = sc->sc_rx_map[sc->sc_rx_done];
    213 		rx_mbuf = sc->sc_rx_mbuf[sc->sc_rx_done];
    214 		SC_RBD((sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD)->rbd_size
    215 		    &= ~IEE_RBD_EL;
    216 		if ((rfd->rfd_status & IEE_RFD_OK) == 0
    217 		    || (rbd->rbd_count & IEE_RBD_EOF) == 0
    218 		    || (rbd->rbd_count & IEE_RBD_F) == 0){
    219 			/* Receive error, skip frame and reuse buffer. */
    220 			rfd->rfd_status = 0;
    221 			rbd->rbd_count = 0;
    222 			rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
    223 			printf("%s: iee_intr: receive error %d, rfd_status="
    224 			    "0x%.4x, rfd_count=0x%.4x\n", sc->sc_dev.dv_xname,
    225 			    ++sc->sc_rx_err, rfd->rfd_status, rbd->rbd_count);
    226 			sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
    227 			continue;
    228 		}
    229 		rfd->rfd_status = 0;
    230 		bus_dmamap_sync(sc->sc_dmat, rx_map, 0, rx_mbuf->m_ext.ext_size,
    231 		    BUS_DMASYNC_POSTREAD);
    232 		rx_mbuf->m_pkthdr.len = rx_mbuf->m_len =
    233 		    rbd->rbd_count & IEE_RBD_COUNT;
    234 		rx_mbuf->m_pkthdr.rcvif = ifp;
    235 		MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
    236 		if (new_mbuf == NULL) {
    237 			printf("%s: iee_intr: can't allocate mbuf\n",
    238 			    sc->sc_dev.dv_xname);
    239 			break;
    240 		}
    241 		MCLAIM(new_mbuf, &sc->sc_ethercom.ec_rx_mowner);
    242 		MCLGET(new_mbuf, M_DONTWAIT);
    243 		if ((new_mbuf->m_flags & M_EXT) == 0) {
    244 			printf("%s: iee_intr: can't alloc mbuf cluster\n",
    245 			    sc->sc_dev.dv_xname);
    246 			m_freem(new_mbuf);
    247 			break;
    248 		}
    249 		bus_dmamap_unload(sc->sc_dmat, rx_map);
    250 		if (bus_dmamap_load(sc->sc_dmat, rx_map,
    251 		    new_mbuf->m_ext.ext_buf, new_mbuf->m_ext.ext_size,
    252 		    NULL, BUS_DMA_READ | BUS_DMA_NOWAIT) != 0)
    253 			panic("%s: iee_intr: can't load RX DMA map\n",
    254 			    sc->sc_dev.dv_xname);
    255 		bus_dmamap_sync(sc->sc_dmat, rx_map, 0,
    256 		    new_mbuf->m_ext.ext_size, BUS_DMASYNC_PREREAD);
    257 #if NBPFILTER > 0
    258 		if (ifp->if_bpf != 0)
    259 			bpf_mtap(ifp->if_bpf, rx_mbuf);
    260 #endif /* NBPFILTER > 0 */
    261 		(*ifp->if_input)(ifp, rx_mbuf);
    262 		ifp->if_ipackets++;
    263 		sc->sc_rx_mbuf[sc->sc_rx_done] = new_mbuf;
    264 		rbd->rbd_count = 0;
    265 		rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
    266 		rbd->rbd_rb_addr = rx_map->dm_segs[0].ds_addr;
    267 		sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
    268 		rfd = SC_RFD(sc->sc_rx_done);
    269 	}
    270 	if ((scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR1
    271 	    || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR2
    272 	    || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR3) {
    273 		/* Receive Overrun, reinit receive ring buffer. */
    274 		for (n = 0 ; n < IEE_NRFD ; n++) {
    275 			SC_RFD(n)->rfd_cmd = IEE_RFD_SF;
    276 			SC_RFD(n)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
    277 			    + IEE_RFD_SZ * ((n + 1) % IEE_NRFD));
    278 			SC_RBD(n)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
    279 			    + IEE_RBD_SZ * ((n + 1) % IEE_NRFD));
    280 			SC_RBD(n)->rbd_size = IEE_RBD_EL |
    281 			    sc->sc_rx_map[n]->dm_segs[0].ds_len;
    282 			SC_RBD(n)->rbd_rb_addr =
    283 			    sc->sc_rx_map[n]->dm_segs[0].ds_addr;
    284 		}
    285 		SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
    286 		sc->sc_rx_done = 0;
    287 		bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_RFD_OFF,
    288 		    IEE_RFD_LIST_SZ + IEE_RBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
    289 		(sc->sc_iee_cmd)(sc, IEE_SCB_RUC_ST);
    290 		printf("%s: iee_intr: receive ring buffer overrun\n",
    291 		    sc->sc_dev.dv_xname);
    292 	} else
    293 		if (n != 0)
    294 			bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
    295 			    IEE_RFD_OFF, IEE_RFD_LIST_SZ + IEE_RBD_LIST_SZ,
    296 			    BUS_DMASYNC_PREWRITE);
    297 
    298 	if (sc->sc_next_cb != 0 && (scb_status & IEE_SCB_CUS_ACT) == 0) {
    299 		/* CMD list finished */
    300 		ifp->if_timer = 0;
    301 		if (sc->sc_next_tbd != 0) {
    302 			/* A TX CMD list finished, clenup */
    303 			for (n = 0 ; n < sc->sc_next_cb ; n++) {
    304 				m_freem(sc->sc_tx_mbuf[n]);
    305 				sc->sc_tx_mbuf[n] = NULL;
    306 				bus_dmamap_unload(sc->sc_dmat,sc->sc_tx_map[n]);
    307 				if ((SC_CB(n)->cb_status & IEE_CB_COL) != 0 &&
    308 				    (SC_CB(n)->cb_status & IEE_CB_MAXCOL) == 0)
    309 					sc->sc_tx_col += 16;
    310 				else
    311 					sc->sc_tx_col += SC_CB(n)->cb_status
    312 					    & IEE_CB_MAXCOL;
    313 			}
    314 			sc->sc_next_tbd = 0;
    315 			ifp->if_flags &= ~IFF_OACTIVE;
    316 		}
    317 		for (n = 0 ; n < sc->sc_next_cb ; n++) {
    318 			/* Check if a CMD failed, but ignore TX errors. */
    319 			if ((SC_CB(n)->cb_cmd & IEE_CB_CMD) != IEE_CB_CMD_TR
    320 			    && ((SC_CB(n)->cb_status & IEE_CB_C) == 0
    321 			    || (SC_CB(n)->cb_status & IEE_CB_OK) == 0))
    322 				printf("%s: iee_intr: scb_status=0x%x "
    323 				    "scb_cmd=0x%x failed command %d: "
    324 				    "cb_status[%d]=0x%.4x cb_cmd[%d]=0x%.4x\n",
    325 				    sc->sc_dev.dv_xname, scb_status, scb_cmd,
    326 				    ++sc->sc_cmd_err, n, SC_CB(n)->cb_status,
    327 				    n, SC_CB(n)->cb_cmd);
    328 		}
    329 		sc->sc_next_cb = 0;
    330 		if ((sc->sc_flags & IEE_WANT_MCAST) != 0) {
    331 			iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
    332 			    | IEE_CB_I);
    333 			(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
    334 		} else
    335 			/* Try to get defered packets going. */
    336 			iee_start(ifp);
    337 	}
    338 	if (IEE_SWAP(SC_SCB->scb_crc_err) != sc->sc_crc_err) {
    339 		sc->sc_crc_err = IEE_SWAP(SC_SCB->scb_crc_err);
    340 		printf("%s: iee_intr: crc_err=%d\n", sc->sc_dev.dv_xname,
    341 		    sc->sc_crc_err);
    342 	}
    343 	if (IEE_SWAP(SC_SCB->scb_align_err) != sc->sc_align_err) {
    344 		sc->sc_align_err = IEE_SWAP(SC_SCB->scb_align_err);
    345 		printf("%s: iee_intr: align_err=%d\n", sc->sc_dev.dv_xname,
    346 		    sc->sc_align_err);
    347 	}
    348 	if (IEE_SWAP(SC_SCB->scb_resource_err) != sc->sc_resource_err) {
    349 		sc->sc_resource_err = IEE_SWAP(SC_SCB->scb_resource_err);
    350 		printf("%s: iee_intr: resource_err=%d\n", sc->sc_dev.dv_xname,
    351 		    sc->sc_resource_err);
    352 	}
    353 	if (IEE_SWAP(SC_SCB->scb_overrun_err) != sc->sc_overrun_err) {
    354 		sc->sc_overrun_err = IEE_SWAP(SC_SCB->scb_overrun_err);
    355 		printf("%s: iee_intr: overrun_err=%d\n", sc->sc_dev.dv_xname,
    356 		    sc->sc_overrun_err);
    357 	}
    358 	if (IEE_SWAP(SC_SCB->scb_rcvcdt_err) != sc->sc_rcvcdt_err) {
    359 		sc->sc_rcvcdt_err = IEE_SWAP(SC_SCB->scb_rcvcdt_err);
    360 		printf("%s: iee_intr: rcvcdt_err=%d\n", sc->sc_dev.dv_xname,
    361 		    sc->sc_rcvcdt_err);
    362 	}
    363 	if (IEE_SWAP(SC_SCB->scb_short_fr_err) != sc->sc_short_fr_err) {
    364 		sc->sc_short_fr_err = IEE_SWAP(SC_SCB->scb_short_fr_err);
    365 		printf("%s: iee_intr: short_fr_err=%d\n", sc->sc_dev.dv_xname,
    366 		    sc->sc_short_fr_err);
    367 	}
    368 	(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
    369 	return(1);
    370 }
    371 
    372 
    373 
    374 /*
    375 How Command Block List Processing is done.
    376 
    377 A runing CBL is never manipulated. If there is a CBL already runing,
    378 further CMDs are deferd until the current list is done. A new list is
    379 setup when the old has finished.
    380 This eases programming. To manipulate a runing CBL it is neccesary to
    381 suspend the Command Unit to avoid race conditions. After a suspend
    382 is sent we have to wait for an interrupt that ACKs the suspend. Then
    383 we can manipulate the CBL and resume operation. I am not sure that this
    384 is more effective then the current, much simpler approach. => KISS
    385 See i82596CA data sheet page 26.
    386 
    387 A CBL is runing or on the way to be set up when (sc->sc_next_cb != 0).
    388 
    389 A CBL may consist of TX CMDs, and _only_ TX CMDs.
    390 A TX CBL is runing or on the way to be set up when
    391 ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)).
    392 
    393 A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_
    394 non-TX CMDs.
    395 
    396 This comes mostly through the way how an Ethernet driver works and
    397 because runing CBLs are not manipulated when they are on the way. If
    398 if_start() is called there will be TX CMDs enqueued so we have a runing
    399 CBL and other CMDs from e.g. if_ioctl() will be deferd and vice versa.
    400 
    401 The Multicast Setup Command is special. A MCS needs more space then
    402 a single CB has. Actual space requiement depends on the length of the
    403 multicast list. So we allways defer MCS until other CBLs are finished,
    404 then we setup a CONF CMD in the first CB. The CONF CMD is needed to
    405 turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may
    406 use all the remaining space in the CBL and the Transmit Buffer Descriptor
    407 List. (Therefore CBL and TBDL must be continious in pysical and virtual
    408 memory. This is guaranteed through the definitions of the list offsets
    409 in i82596reg.h and because it is only a single DMA segment used for all
    410 lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with
    411 a multicast list length of 0, thus disabling the multicast filter.
    412 A defered MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0)
    413 */
    414 void
    415 iee_cb_setup(struct iee_softc *sc, u_int32_t cmd)
    416 {
    417 	struct iee_cb *cb = SC_CB(sc->sc_next_cb);
    418 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    419 	struct ether_multistep step;
    420 	struct ether_multi *enm;
    421 
    422 	memset(cb, 0, IEE_CB_SZ);
    423 	cb->cb_cmd = cmd;
    424 	switch(cmd & IEE_CB_CMD) {
    425 	case IEE_CB_CMD_NOP:	/* NOP CMD */
    426 		break;
    427 	case IEE_CB_CMD_IAS:	/* Individual Address Setup */
    428 		memcpy((void*)cb->cb_ind_addr, LLADDR(ifp->if_sadl),
    429 		    ETHER_ADDR_LEN);
    430 		break;
    431 	case IEE_CB_CMD_CONF:	/* Configure */
    432 		memcpy((void*)cb->cb_cf, sc->sc_cf, sc->sc_cf[0]
    433 		    & IEE_CF_0_CNT_M);
    434 		break;
    435 	case IEE_CB_CMD_MCS:	/* Multicast Setup */
    436 		if (sc->sc_next_cb != 0) {
    437 			sc->sc_flags |= IEE_WANT_MCAST;
    438 			return;
    439 		}
    440 		sc->sc_flags &= ~IEE_WANT_MCAST;
    441 		if ((sc->sc_cf[8] & IEE_CF_8_PRM) != 0) {
    442 			/* Need no multicast filter in promisc mode. */
    443 			iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL
    444 			    | IEE_CB_I);
    445 			return;
    446 		}
    447 		/* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */
    448 		cb = SC_CB(sc->sc_next_cb + 1);
    449 		cb->cb_cmd = cmd;
    450 		cb->cb_mcast.mc_size = 0;
    451 		ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
    452 		while (enm != NULL) {
    453 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
    454 			    ETHER_ADDR_LEN) != 0 || cb->cb_mcast.mc_size
    455 			    * ETHER_ADDR_LEN + 2 * IEE_CB_SZ
    456 			    > IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ) {
    457 				cb->cb_mcast.mc_size = 0;
    458 				break;
    459 			}
    460 			memcpy((void*) &cb->cb_mcast.mc_addrs[
    461 			    cb->cb_mcast.mc_size * ETHER_ADDR_LEN],
    462 			    enm->enm_addrlo, ETHER_ADDR_LEN);
    463 			ETHER_NEXT_MULTI(step, enm);
    464 			cb->cb_mcast.mc_size++;
    465 		}
    466 		if (cb->cb_mcast.mc_size == 0) {
    467 			/* Can't do exact mcast filtering, do ALLMULTI mode. */
    468 			ifp->if_flags |= IFF_ALLMULTI;
    469 			sc->sc_cf[11] &= ~IEE_CF_11_MCALL;
    470 		} else {
    471 			/* disable ALLMULTI and load mcast list */
    472 			ifp->if_flags &= ~IFF_ALLMULTI;
    473 			sc->sc_cf[11] |= IEE_CF_11_MCALL;
    474 			/* Mcast setup may need more then IEE_CB_SZ bytes. */
    475 			bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
    476 			    IEE_CB_OFF, IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ,
    477 			    BUS_DMASYNC_PREWRITE);
    478 		}
    479 		iee_cb_setup(sc, IEE_CB_CMD_CONF);
    480 		break;
    481 	case IEE_CB_CMD_TR:	/* Transmit */
    482 		cb->cb_transmit.tx_tbd_addr = IEE_PHYS_SHMEM(IEE_TBD_OFF
    483 		    + IEE_TBD_SZ * sc->sc_next_tbd);
    484 		cb->cb_cmd |= IEE_CB_SF; /* Allways use Flexible Mode. */
    485 		break;
    486 	case IEE_CB_CMD_TDR:	/* Time Domain Reflectometry */
    487 		break;
    488 	case IEE_CB_CMD_DUMP:	/* Dump */
    489 		break;
    490 	case IEE_CB_CMD_DIAG:	/* Diagnose */
    491 		break;
    492 	default:
    493 		/* can't happen */
    494 		break;
    495 	}
    496 	cb->cb_link_addr = IEE_PHYS_SHMEM(IEE_CB_OFF + IEE_CB_SZ *
    497 	    (sc->sc_next_cb + 1));
    498 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_OFF
    499 	    + IEE_CB_SZ * sc->sc_next_cb, IEE_CB_SZ, BUS_DMASYNC_PREWRITE);
    500 	sc->sc_next_cb++;
    501 	ifp->if_timer = 5;
    502 	return;
    503 }
    504 
    505 
    506 
    507 void
    508 iee_attach(struct iee_softc *sc, u_int8_t *eth_addr, int *media, int nmedia,
    509     int defmedia)
    510 {
    511 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    512 	int n;
    513 
    514 	/* Set pointer to Intermediate System Configuration Pointer. */
    515 	/* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
    516 	SC_SCP->scp_iscp_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_ISCP_OFF));
    517 	/* Set pointer to System Control Block. */
    518 	/* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
    519 	SC_ISCP->iscp_scb_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_SCB_OFF));
    520 	/* Set pointer to Receive Frame Area. (physical address) */
    521 	SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
    522 	/* Set pointer to Command Block. (physical address) */
    523 	SC_SCB->scb_cmd_blk_addr = IEE_PHYS_SHMEM(IEE_CB_OFF);
    524 
    525 	ifmedia_init(&sc->sc_ifmedia, 0, iee_mediachange, iee_mediastatus);
    526 	if (media != NULL) {
    527 		for (n = 0 ; n < nmedia ; n++)
    528 			ifmedia_add(&sc->sc_ifmedia, media[n], 0, NULL);
    529 		ifmedia_set(&sc->sc_ifmedia, defmedia);
    530 	} else {
    531 		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE, 0, NULL);
    532 		ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE);
    533 	}
    534 
    535 	ifp->if_softc = sc;
    536 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    537 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    538 	ifp->if_start = iee_start;	/* initiate output routine */
    539 	ifp->if_ioctl = iee_ioctl;	/* ioctl routine */
    540 	ifp->if_init = iee_init;	/* init routine */
    541 	ifp->if_stop = iee_stop;	/* stop routine */
    542 	ifp->if_watchdog = iee_watchdog;	/* timer routine */
    543 	ifp->if_drain = iee_drain;	/* routine to release resources */
    544 	IFQ_SET_READY(&ifp->if_snd);
    545 	/* iee supports IEEE 802.1Q Virtual LANs, see vlan(4). */
    546 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
    547 
    548 	if_attach(ifp);
    549 	ether_ifattach(ifp, eth_addr);
    550 
    551 	aprint_normal(": Intel 82596%s address %s\n",
    552 	    i82596_typenames[ sc->sc_type], ether_sprintf(eth_addr));
    553 
    554 	for (n = 0 ; n < IEE_NCB ; n++)
    555 		sc->sc_tx_map[n] = NULL;
    556 	for (n = 0 ; n < IEE_NRFD ; n++) {
    557 		sc->sc_rx_mbuf[n] = NULL;
    558 		sc->sc_rx_map[n] = NULL;
    559 	}
    560 	sc->sc_tx_timeout = 0;
    561 	sc->sc_setup_timeout = 0;
    562 	(sc->sc_iee_reset)(sc);
    563 	return;
    564 }
    565 
    566 
    567 
    568 void
    569 iee_detach(struct iee_softc *sc, int flags)
    570 {
    571 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    572 
    573 	if ((ifp->if_flags & IFF_RUNNING) != 0)
    574 		iee_stop(ifp, 1);
    575 	ether_ifdetach(ifp);
    576 	if_detach(ifp);
    577 	return;
    578 }
    579 
    580 
    581 
    582 /* media change and status callback */
    583 int
    584 iee_mediachange(struct ifnet *ifp)
    585 {
    586 	struct iee_softc *sc = ifp->if_softc;
    587 
    588 	if (sc->sc_mediachange != NULL)
    589 		return ((sc->sc_mediachange)(ifp));
    590 	return(0);
    591 }
    592 
    593 
    594 
    595 void
    596 iee_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmreq)
    597 {
    598 	struct iee_softc *sc = ifp->if_softc;
    599 
    600 	if (sc->sc_mediastatus != NULL)
    601 		return ((sc->sc_mediastatus)(ifp, ifmreq));
    602 	return;
    603 }
    604 
    605 
    606 
    607 /* initiate output routine */
    608 void
    609 iee_start(struct ifnet *ifp)
    610 {
    611 	struct iee_softc *sc = ifp->if_softc;
    612 	struct mbuf *m = NULL;
    613 	int t;
    614 	int n;
    615 
    616 	if (sc->sc_next_cb != 0)
    617 		/* There is already a CMD runing. Defer packet enqueueing. */
    618 		return;
    619 	for (t = 0 ; t < IEE_NCB ; t++) {
    620 		IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
    621 		if (sc->sc_tx_mbuf[t] == NULL)
    622 			break;
    623 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
    624 		    sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
    625 			/*
    626 			 * The packet needs more TBD then we support.
    627 			 * Copy the packet into a mbuf cluster to get it out.
    628 			 */
    629 			printf("%s: iee_start: failed to load DMA map\n",
    630 			    sc->sc_dev.dv_xname);
    631 			MGETHDR(m, M_DONTWAIT, MT_DATA);
    632 			if (m == NULL) {
    633 				printf("%s: iee_start: can't allocate mbuf\n",
    634 				    sc->sc_dev.dv_xname);
    635 				m_freem(sc->sc_tx_mbuf[t]);
    636 				t--;
    637 				continue;
    638 			}
    639 			MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
    640 			MCLGET(m, M_DONTWAIT);
    641 			if ((m->m_flags & M_EXT) == 0) {
    642 				printf("%s: iee_start: can't allocate mbuf "
    643 				    "cluster\n", sc->sc_dev.dv_xname);
    644 				m_freem(sc->sc_tx_mbuf[t]);
    645 				m_freem(m);
    646 				t--;
    647 				continue;
    648 			}
    649 			m_copydata(sc->sc_tx_mbuf[t], 0,
    650 			    sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, caddr_t));
    651 			m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
    652 			m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
    653 			m_freem(sc->sc_tx_mbuf[t]);
    654 			sc->sc_tx_mbuf[t] = m;
    655 			if(bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
    656 		    	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
    657 				printf("%s: iee_start: can't load TX DMA map\n",
    658 				    sc->sc_dev.dv_xname);
    659 				m_freem(sc->sc_tx_mbuf[t]);
    660 				t--;
    661 				continue;
    662 			}
    663 		}
    664 		for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
    665 			SC_TBD(sc->sc_next_tbd + n)->tbd_tb_addr =
    666 			    sc->sc_tx_map[t]->dm_segs[n].ds_addr;
    667 			SC_TBD(sc->sc_next_tbd + n)->tbd_size =
    668 			    sc->sc_tx_map[t]->dm_segs[n].ds_len;
    669 			SC_TBD(sc->sc_next_tbd + n)->tbd_link_addr =
    670 			    IEE_PHYS_SHMEM(IEE_TBD_OFF + IEE_TBD_SZ
    671 			    * (sc->sc_next_tbd + n + 1));
    672 		}
    673 		SC_TBD(sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
    674 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
    675 		    sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
    676 		IFQ_POLL(&ifp->if_snd, m);
    677 		if (m == NULL)
    678 			iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
    679 			    | IEE_CB_I);
    680 		else
    681 			iee_cb_setup(sc, IEE_CB_CMD_TR);
    682 		sc->sc_next_tbd += n;
    683 #if NBPFILTER > 0
    684 		/* Pass packet to bpf if someone listens. */
    685 		if (ifp->if_bpf)
    686 			bpf_mtap(ifp->if_bpf, sc->sc_tx_mbuf[t]);
    687 #endif
    688 	}
    689 	if (t == 0)
    690 		/* No packets got set up for TX. */
    691 		return;
    692 	if (t == IEE_NCB)
    693 		ifp->if_flags |= IFF_OACTIVE;
    694 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_SZ,
    695 	    IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
    696 	(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
    697 	return;
    698 }
    699 
    700 
    701 
    702 /* ioctl routine */
    703 int
    704 iee_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
    705 {
    706 	struct iee_softc *sc = ifp->if_softc;
    707 	int s;
    708 	int err;
    709 
    710 	s = splnet();
    711 	if (cmd == SIOCSIFMEDIA || cmd == SIOCGIFMEDIA)
    712 		return(ifmedia_ioctl(ifp, (struct ifreq *) data,
    713 		    &sc->sc_ifmedia, cmd));
    714 	else {
    715 		err = ether_ioctl(ifp, cmd, data);
    716 		if (err == ENETRESET ||
    717 		    ((ifp->if_flags & IFF_PROMISC) != 0
    718 		    && (sc->sc_cf[8] & IEE_CF_8_PRM) == 0)
    719 		    || ((ifp->if_flags & IFF_PROMISC) == 0
    720 		    && (sc->sc_cf[8] & IEE_CF_8_PRM) != 0)) {
    721 			/* Do multicast setup / toggle promisc mode. */
    722 			if ((ifp->if_flags & IFF_PROMISC) != 0)
    723 				sc->sc_cf[8] |= IEE_CF_8_PRM;
    724 			else
    725 				sc->sc_cf[8] &= ~IEE_CF_8_PRM;
    726 			/* Put new multicast list into the hardware filter. */
    727 			iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
    728 			    | IEE_CB_I);
    729 			if ((sc->sc_flags & IEE_WANT_MCAST) == 0)
    730 				/* Mcast setup is not defered. */
    731 				(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
    732 			err = 0;
    733 		}
    734 	}
    735 	splx(s);
    736 	return(err);
    737 }
    738 
    739 
    740 
    741 /* init routine */
    742 int
    743 iee_init(struct ifnet *ifp)
    744 {
    745 	struct iee_softc *sc = ifp->if_softc;
    746 	int r;
    747 	int t;
    748 	int n;
    749 	int err;
    750 
    751 	sc->sc_next_cb = 0;
    752 	sc->sc_next_tbd = 0;
    753 	sc->sc_flags &= ~IEE_WANT_MCAST;
    754 	sc->sc_rx_done = 0;
    755 	SC_SCB->scb_crc_err = 0;
    756 	SC_SCB->scb_align_err = 0;
    757 	SC_SCB->scb_resource_err = 0;
    758 	SC_SCB->scb_overrun_err = 0;
    759 	SC_SCB->scb_rcvcdt_err = 0;
    760 	SC_SCB->scb_short_fr_err = 0;
    761 	sc->sc_crc_err = 0;
    762 	sc->sc_align_err = 0;
    763 	sc->sc_resource_err = 0;
    764 	sc->sc_overrun_err = 0;
    765 	sc->sc_rcvcdt_err = 0;
    766 	sc->sc_short_fr_err = 0;
    767 	sc->sc_tx_col = 0;
    768 	sc->sc_rx_err = 0;
    769 	sc->sc_cmd_err = 0;
    770 	/* Create Transmit DMA maps. */
    771 	for (t = 0 ; t < IEE_NCB ; t++) {
    772 		if (sc->sc_tx_map[t] == NULL && bus_dmamap_create(sc->sc_dmat,
    773 		    MCLBYTES, IEE_NTBD, MCLBYTES, 0, BUS_DMA_NOWAIT,
    774 		    &sc->sc_tx_map[t]) != 0) {
    775 			printf("%s: iee_init: can't create TX DMA map\n",
    776 			    sc->sc_dev.dv_xname);
    777 			for (n = 0 ; n < t ; n++)
    778 				bus_dmamap_destroy(sc->sc_dmat,
    779 				    sc->sc_tx_map[n]);
    780 			return(ENOBUFS);
    781 		}
    782 	}
    783 	/* Initialize Receive Frame and Receive Buffer Descriptors */
    784 	err = 0;
    785 	memset(SC_RFD(0), 0, IEE_RFD_LIST_SZ);
    786 	memset(SC_RBD(0), 0, IEE_RBD_LIST_SZ);
    787 	for (r = 0 ; r < IEE_NRFD ; r++) {
    788 		SC_RFD(r)->rfd_cmd = IEE_RFD_SF;
    789 		SC_RFD(r)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
    790 		    + IEE_RFD_SZ * ((r + 1) % IEE_NRFD));
    791 
    792 		SC_RBD(r)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
    793 		    + IEE_RBD_SZ * ((r + 1) % IEE_NRFD));
    794 		if (sc->sc_rx_mbuf[r] == NULL) {
    795 			MGETHDR(sc->sc_rx_mbuf[r], M_DONTWAIT, MT_DATA);
    796 			if (sc->sc_rx_mbuf[r] == NULL) {
    797 				printf("%s: iee_init: can't allocate mbuf\n",
    798 				    sc->sc_dev.dv_xname);
    799 				err = 1;
    800 				break;
    801 			}
    802 			MCLAIM(sc->sc_rx_mbuf[r],&sc->sc_ethercom.ec_rx_mowner);
    803 			MCLGET(sc->sc_rx_mbuf[r], M_DONTWAIT);
    804 			if ((sc->sc_rx_mbuf[r]->m_flags & M_EXT) == 0) {
    805 				printf("%s: iee_init: can't allocate mbuf"
    806 				    " cluster\n", sc->sc_dev.dv_xname);
    807 				m_freem(sc->sc_rx_mbuf[r]);
    808 				err = 1;
    809 				break;
    810 			}
    811 		}
    812 		if (sc->sc_rx_map[r] == NULL && bus_dmamap_create(sc->sc_dmat,
    813 		    MCLBYTES, 1, MCLBYTES , 0, BUS_DMA_NOWAIT,
    814 		    &sc->sc_rx_map[r]) != 0) {
    815 				printf("%s: iee_init: can't create RX "
    816 				    "DMA map\n", sc->sc_dev.dv_xname);
    817 				m_freem(sc->sc_rx_mbuf[r]);
    818 				err = 1;
    819 				break;
    820 			}
    821 		if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_map[r],
    822 		    sc->sc_rx_mbuf[r]->m_ext.ext_buf,
    823 		    sc->sc_rx_mbuf[r]->m_ext.ext_size, NULL,
    824 		    BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
    825 			printf("%s: iee_init: can't load RX DMA map\n",
    826 			    sc->sc_dev.dv_xname);
    827 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[r]);
    828 			m_freem(sc->sc_rx_mbuf[r]);
    829 			err = 1;
    830 			break;
    831 		}
    832 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_map[r], 0,
    833 		    sc->sc_rx_mbuf[r]->m_ext.ext_size, BUS_DMASYNC_PREREAD);
    834 		SC_RBD(r)->rbd_size = sc->sc_rx_map[r]->dm_segs[0].ds_len;
    835 		SC_RBD(r)->rbd_rb_addr= sc->sc_rx_map[r]->dm_segs[0].ds_addr;
    836 	}
    837 	SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
    838 	if (err != 0) {
    839 		for (n = 0 ; n < r; n++) {
    840 			m_freem(sc->sc_rx_mbuf[n]);
    841 			sc->sc_rx_mbuf[n] = NULL;
    842 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
    843 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
    844 			sc->sc_rx_map[n] = NULL;
    845 		}
    846 		for (n = 0 ; n < t ; n++) {
    847 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
    848 			sc->sc_tx_map[n] = NULL;
    849 		}
    850 		return(ENOBUFS);
    851 	}
    852 
    853 	(sc->sc_iee_reset)(sc);
    854 	iee_cb_setup(sc, IEE_CB_CMD_IAS);
    855 	sc->sc_cf[0] = IEE_CF_0_DEF | IEE_CF_0_PREF;
    856 	sc->sc_cf[1] = IEE_CF_1_DEF;
    857 	sc->sc_cf[2] = IEE_CF_2_DEF;
    858 	sc->sc_cf[3] = IEE_CF_3_ADDRLEN_DEF | IEE_CF_3_NSAI
    859 	    | IEE_CF_3_PREAMLEN_DEF;
    860 	sc->sc_cf[4] = IEE_CF_4_DEF;
    861 	sc->sc_cf[5] = IEE_CF_5_DEF;
    862 	sc->sc_cf[6] = IEE_CF_6_DEF;
    863 	sc->sc_cf[7] = IEE_CF_7_DEF;
    864 	sc->sc_cf[8] = IEE_CF_8_DEF;
    865 	sc->sc_cf[9] = IEE_CF_9_DEF;
    866 	sc->sc_cf[10] = IEE_CF_10_DEF;
    867 	sc->sc_cf[11] = IEE_CF_11_DEF & ~IEE_CF_11_LNGFLD;
    868 	sc->sc_cf[12] = IEE_CF_12_DEF;
    869 	sc->sc_cf[13] = IEE_CF_13_DEF;
    870 	iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL);
    871 	SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
    872 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
    873 	    BUS_DMASYNC_PREWRITE);
    874 	(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE | IEE_SCB_RUC_ST);
    875 	/* Issue a Channel Attention to ACK interrupts we may have caused. */
    876 	(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
    877 
    878 	/* Mark the interface as running and ready to RX/TX packets. */
    879 	ifp->if_flags |= IFF_RUNNING;
    880 	ifp->if_flags &= ~IFF_OACTIVE;
    881 	return(0);
    882 }
    883 
    884 
    885 
    886 /* stop routine */
    887 void
    888 iee_stop(struct ifnet *ifp, int disable)
    889 {
    890 	struct iee_softc *sc = ifp->if_softc;
    891 	int n;
    892 
    893 	ifp->if_flags &= ~IFF_RUNNING;
    894 	ifp->if_flags |= IFF_OACTIVE;
    895 	ifp->if_timer = 0;
    896 	/* Reset the chip to get it quiet. */
    897 	(sc->sc_iee_reset)(ifp->if_softc);
    898 	/* Issue a Channel Attention to ACK interrupts we may have caused. */
    899 	(sc->sc_iee_cmd)(ifp->if_softc, IEE_SCB_ACK);
    900 	/* Release any dynamically allocated ressources. */
    901 	for (n = 0 ; n < IEE_NCB ; n++) {
    902 		if (sc->sc_tx_map[n] != NULL)
    903 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
    904 		sc->sc_tx_map[n] = NULL;
    905 	}
    906 	for (n = 0 ; n < IEE_NRFD ; n++) {
    907 		if (sc->sc_rx_mbuf[n] != NULL)
    908 			m_freem(sc->sc_rx_mbuf[n]);
    909 		sc->sc_rx_mbuf[n] = NULL;
    910 		if (sc->sc_rx_map[n] != NULL) {
    911 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
    912 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
    913 		}
    914 		sc->sc_rx_map[n] = NULL;
    915 	}
    916 	return;
    917 }
    918 
    919 
    920 
    921 /* timer routine */
    922 void
    923 iee_watchdog(struct ifnet *ifp)
    924 {
    925 	struct iee_softc *sc = ifp->if_softc;
    926 
    927 	(sc->sc_iee_reset)(sc);
    928 	if (sc->sc_next_tbd != 0)
    929 		printf("%s: iee_watchdog: transmit timeout %d\n",
    930 		    sc->sc_dev.dv_xname, ++sc->sc_tx_timeout);
    931 	else
    932 		printf("%s: iee_watchdog: setup timeout %d\n",
    933 		    sc->sc_dev.dv_xname, ++sc->sc_setup_timeout);
    934 	iee_init(ifp);
    935 	return;
    936 }
    937 
    938 
    939 
    940 /* routine to release res. */
    941 void
    942 iee_drain(struct ifnet *ifp)
    943 {
    944 	iee_stop(ifp, 0);
    945 	return;
    946 }
    947 
    948 
    949 
    950