Home | History | Annotate | Line # | Download | only in ic
i82596.c revision 1.9
      1 /* $NetBSD: i82596.c,v 1.9 2005/06/02 14:41:26 he Exp $ */
      2 
      3 /*
      4  * Copyright (c) 2003 Jochen Kunz.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of Jochen Kunz may not be used to endorse or promote
     16  *    products derived from this software without specific prior
     17  *    written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY JOCHEN KUNZ
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL JOCHEN KUNZ
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Driver for the Intel i82596 10MBit/s Ethernet chip.
     34  * It operates the i82596 in 32-Bit Linear Mode, opposed to the old i82586
     35  * ie(4) driver (src/sys/dev/ic/i82586.c), that degrades the i82596 to
     36  * i82586 compatibility mode.
     37  * Documentation about this chip can be found on http://www.openpa.net/
     38  * file names 29021806.pdf and 29021906.pdf
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: i82596.c,v 1.9 2005/06/02 14:41:26 he Exp $");
     43 
     44 /* autoconfig and device stuff */
     45 #include <sys/param.h>
     46 #include <sys/device.h>
     47 #include <sys/conf.h>
     48 #include "locators.h"
     49 #include "ioconf.h"
     50 
     51 /* bus_space / bus_dma etc. */
     52 #include <machine/bus.h>
     53 #include <machine/intr.h>
     54 
     55 /* general system data and functions */
     56 #include <sys/systm.h>
     57 #include <sys/ioctl.h>
     58 
     59 /* tsleep / sleep / wakeup */
     60 #include <sys/proc.h>
     61 /* hz for above */
     62 #include <sys/kernel.h>
     63 
     64 /* network stuff */
     65 #include <net/if.h>
     66 #include <net/if_dl.h>
     67 #include <net/if_media.h>
     68 #include <net/if_ether.h>
     69 #include <sys/socket.h>
     70 #include <sys/mbuf.h>
     71 
     72 #include "bpfilter.h"
     73 #if NBPFILTER > 0
     74 #include <net/bpf.h>
     75 #endif
     76 
     77 #include <dev/ic/i82596reg.h>
     78 #include <dev/ic/i82596var.h>
     79 
     80 
     81 
     82 /* Supported chip variants */
     83 const char *i82596_typenames[] = { "unknowen", "DX/SX", "CA" };
     84 
     85 
     86 
     87 /* media change and status callback */
     88 static int iee_mediachange(struct ifnet *);
     89 static void iee_mediastatus(struct ifnet *, struct ifmediareq *);
     90 
     91 /* interface routines to upper protocols */
     92 static void iee_start(struct ifnet *);			/* initiate output */
     93 static int iee_ioctl(struct ifnet *, u_long, caddr_t);	/* ioctl routine */
     94 static int iee_init(struct ifnet *);			/* init routine */
     95 static void iee_stop(struct ifnet *, int);		/* stop routine */
     96 static void iee_watchdog(struct ifnet *);		/* timer routine */
     97 static void iee_drain(struct ifnet *);			/* release resources */
     98 
     99 /* internal helper functions */
    100 static void iee_cb_setup(struct iee_softc *, uint32_t);
    101 
    102 /*
    103 Things a MD frontend has to provide:
    104 
    105 The functions via function pointers in the softc:
    106         int (*sc_iee_cmd)(struct iee_softc *sc, uint32_t cmd);
    107         int (*sc_iee_reset)(struct iee_softc *sc);
    108         void (*sc_mediastatus)(struct ifnet *, struct ifmediareq *);
    109         int (*sc_mediachange)(struct ifnet *);
    110 
    111 sc_iee_cmd(): send a command to the i82596 by writing the cmd parameter
    112 	to the SCP cmd word and issuing a Channel Attention.
    113 sc_iee_reset(): initiate a reset, supply the address of the SCP to the
    114 	chip, wait for the chip to initialize and ACK interrupts that
    115 	this may have caused by caling (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
    116 This functions must carefully bus_dmamap_sync() all data they have touched!
    117 
    118 sc_mediastatus() and  sc_mediachange() are just MD hooks to the according
    119 MI functions. The MD frontend may set this pointers to NULL when they
    120 are not needed.
    121 
    122 sc->sc_type has to be set to I82596_UNKNOWN or I82596_DX or I82596_CA.
    123 This is for printing out the correct chip type at attach time only. The
    124 MI backend doesn't distinguish different chip types when programming
    125 the chip.
    126 
    127 sc->sc_flags has to be set to 0 on litle endian hardware and to
    128 IEE_NEED_SWAP on big endian hardware, when endianes conversion is not
    129 done by the bus attachment. Usually you need to set IEE_NEED_SWAP
    130 when IEE_SYSBUS_BE is set in the sysbus byte.
    131 
    132 sc->sc_cl_align bust be set to 1 or to the cache line size. When set to
    133 1 no special alignment of DMA descriptors is done. If sc->sc_cl_align != 1
    134 it forces alignment of the data structres in the shared memory to a multiple
    135 of sc->sc_cl_align. This is needed on archs like hp700 that have non DMA
    136 I/O coherent caches and are unable to map the shared memory uncachable.
    137 (At least pre PA7100LC CPUs are unable to map memory uncachable.)
    138 
    139 sc->sc_cl_align MUST BE INITIALIZED BEFORE THE FOLOWING MACROS ARE USED:
    140 SC_* IEE_*_SZ IEE_*_OFF IEE_SHMEM_MAX (shell style glob(3) pattern)
    141 
    142 The MD frontend has to allocate a piece of DMA memory at least of
    143 IEE_SHMEM_MAX bytes size. All communication with the chip is done via
    144 this shared memory. If possible map this memory non-cachable on
    145 archs with non DMA I/O coherent caches. The base of the memory needs
    146 to be aligend to an even address if sc->sc_cl_align == 1 and aligend
    147 to a cache line if sc->sc_cl_align != 1.
    148 
    149 An interrupt with iee_intr() as handler must be established.
    150 
    151 Call void iee_attach(struct iee_softc *sc, uint8_t *ether_address,
    152 int *media, int nmedia, int defmedia); when everything is set up. First
    153 parameter is a pointer to the MI softc, ether_address is an array that
    154 contains the ethernet address. media is an array of the media types
    155 provided by the hardware. The members of this array are supplied to
    156 ifmedia_add() in sequence. nmedia is the count of elements in media.
    157 defmedia is the default media that is set via ifmedia_set().
    158 nmedia and defmedia are ignored when media == NULL.
    159 
    160 The MD backend may call iee_detach() to detach the device.
    161 
    162 See sys/arch/hp700/gsc/if_iee.c for an example.
    163 */
    164 
    165 
    166 /*
    167 How frame reception is done:
    168 Each Recieve Frame Descriptor has one associated Recieve Buffer Descriptor.
    169 Each RBD points to the data area of a mbuf cluster. The RFDs are linked
    170 together in a circular list. sc->sc_rx_done is the count of RFDs in the
    171 list already processed / the number of the RFD that has to be checked for
    172 a new frame first at the next RX interrupt. Upon successful reception of
    173 a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
    174 cluster is allocated and the RFD / RBD are reinitialized accordingly.
    175 
    176 When a RFD list overrun occured the whole RFD and RBD lists are reinitialized
    177 and frame reception is started again.
    178 */
    179 int
    180 iee_intr(void *intarg)
    181 {
    182 	struct iee_softc *sc = intarg;
    183 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    184 	struct iee_rfd *rfd;
    185 	struct iee_rbd *rbd;
    186 	bus_dmamap_t rx_map;
    187 	struct mbuf *rx_mbuf;
    188 	struct mbuf *new_mbuf;
    189 	int scb_status;
    190 	int scb_cmd;
    191 	int n, col;
    192 
    193 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
    194 		(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
    195 		return(1);
    196 	}
    197 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
    198 	    BUS_DMASYNC_POSTREAD);
    199 	scb_status = SC_SCB->scb_status;
    200 	scb_cmd = SC_SCB->scb_cmd;
    201 	rfd = SC_RFD(sc->sc_rx_done);
    202 	while ((rfd->rfd_status & IEE_RFD_C) != 0) {
    203 		/* At least one packet was received. */
    204 		rbd = SC_RBD(sc->sc_rx_done);
    205 		rx_map = sc->sc_rx_map[sc->sc_rx_done];
    206 		rx_mbuf = sc->sc_rx_mbuf[sc->sc_rx_done];
    207 		SC_RBD((sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD)->rbd_size
    208 		    &= ~IEE_RBD_EL;
    209 		if ((rfd->rfd_status & IEE_RFD_OK) == 0
    210 		    || (rbd->rbd_count & IEE_RBD_EOF) == 0
    211 		    || (rbd->rbd_count & IEE_RBD_F) == 0){
    212 			/* Receive error, skip frame and reuse buffer. */
    213 			rfd->rfd_status = 0;
    214 			rbd->rbd_count = 0;
    215 			rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
    216 			printf("%s: iee_intr: receive error %d, rfd_status="
    217 			    "0x%.4x, rfd_count=0x%.4x\n", sc->sc_dev.dv_xname,
    218 			    ++sc->sc_rx_err, rfd->rfd_status, rbd->rbd_count);
    219 			sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
    220 			continue;
    221 		}
    222 		rfd->rfd_status = 0;
    223 		bus_dmamap_sync(sc->sc_dmat, rx_map, 0, rx_mbuf->m_ext.ext_size,
    224 		    BUS_DMASYNC_POSTREAD);
    225 		rx_mbuf->m_pkthdr.len = rx_mbuf->m_len =
    226 		    rbd->rbd_count & IEE_RBD_COUNT;
    227 		rx_mbuf->m_pkthdr.rcvif = ifp;
    228 		MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
    229 		if (new_mbuf == NULL) {
    230 			printf("%s: iee_intr: can't allocate mbuf\n",
    231 			    sc->sc_dev.dv_xname);
    232 			break;
    233 		}
    234 		MCLAIM(new_mbuf, &sc->sc_ethercom.ec_rx_mowner);
    235 		MCLGET(new_mbuf, M_DONTWAIT);
    236 		if ((new_mbuf->m_flags & M_EXT) == 0) {
    237 			printf("%s: iee_intr: can't alloc mbuf cluster\n",
    238 			    sc->sc_dev.dv_xname);
    239 			m_freem(new_mbuf);
    240 			break;
    241 		}
    242 		bus_dmamap_unload(sc->sc_dmat, rx_map);
    243 		if (bus_dmamap_load(sc->sc_dmat, rx_map,
    244 		    new_mbuf->m_ext.ext_buf, new_mbuf->m_ext.ext_size,
    245 		    NULL, BUS_DMA_READ | BUS_DMA_NOWAIT) != 0)
    246 			panic("%s: iee_intr: can't load RX DMA map\n",
    247 			    sc->sc_dev.dv_xname);
    248 		bus_dmamap_sync(sc->sc_dmat, rx_map, 0,
    249 		    new_mbuf->m_ext.ext_size, BUS_DMASYNC_PREREAD);
    250 #if NBPFILTER > 0
    251 		if (ifp->if_bpf != 0)
    252 			bpf_mtap(ifp->if_bpf, rx_mbuf);
    253 #endif /* NBPFILTER > 0 */
    254 		(*ifp->if_input)(ifp, rx_mbuf);
    255 		ifp->if_ipackets++;
    256 		sc->sc_rx_mbuf[sc->sc_rx_done] = new_mbuf;
    257 		rbd->rbd_count = 0;
    258 		rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
    259 		rbd->rbd_rb_addr = rx_map->dm_segs[0].ds_addr;
    260 		sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
    261 		rfd = SC_RFD(sc->sc_rx_done);
    262 	}
    263 	if ((scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR1
    264 	    || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR2
    265 	    || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR3) {
    266 		/* Receive Overrun, reinit receive ring buffer. */
    267 		for (n = 0 ; n < IEE_NRFD ; n++) {
    268 			SC_RFD(n)->rfd_cmd = IEE_RFD_SF;
    269 			SC_RFD(n)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
    270 			    + IEE_RFD_SZ * ((n + 1) % IEE_NRFD));
    271 			SC_RBD(n)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
    272 			    + IEE_RBD_SZ * ((n + 1) % IEE_NRFD));
    273 			SC_RBD(n)->rbd_size = IEE_RBD_EL |
    274 			    sc->sc_rx_map[n]->dm_segs[0].ds_len;
    275 			SC_RBD(n)->rbd_rb_addr =
    276 			    sc->sc_rx_map[n]->dm_segs[0].ds_addr;
    277 		}
    278 		SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
    279 		sc->sc_rx_done = 0;
    280 		bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_RFD_OFF,
    281 		    IEE_RFD_LIST_SZ + IEE_RBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
    282 		(sc->sc_iee_cmd)(sc, IEE_SCB_RUC_ST);
    283 		printf("%s: iee_intr: receive ring buffer overrun\n",
    284 		    sc->sc_dev.dv_xname);
    285 	}
    286 
    287 	if (sc->sc_next_cb != 0
    288 	    && (SC_CB(sc->sc_next_cb - 1)->cb_status & IEE_CB_C) != 0) {
    289 		/* CMD list finished */
    290 		ifp->if_timer = 0;
    291 		if (sc->sc_next_tbd != 0) {
    292 			/* A TX CMD list finished, clenup */
    293 			for (n = 0 ; n < sc->sc_next_cb ; n++) {
    294 				m_freem(sc->sc_tx_mbuf[n]);
    295 				sc->sc_tx_mbuf[n] = NULL;
    296 				bus_dmamap_unload(sc->sc_dmat,sc->sc_tx_map[n]);
    297 				if ((SC_CB(n)->cb_status & IEE_CB_COL) != 0 &&
    298 				    (SC_CB(n)->cb_status & IEE_CB_MAXCOL) == 0)
    299 					col = 16;
    300 				else
    301 					col = SC_CB(n)->cb_status
    302 					    & IEE_CB_MAXCOL;
    303 				sc->sc_tx_col += col;
    304 				if ((SC_CB(n)->cb_status & IEE_CB_OK) != 0) {
    305 					ifp->if_opackets++;
    306 					ifp->if_collisions += col;
    307 				}
    308 			}
    309 			sc->sc_next_tbd = 0;
    310 			ifp->if_flags &= ~IFF_OACTIVE;
    311 		}
    312 		for (n = 0 ; n < sc->sc_next_cb ; n++) {
    313 			/* Check if a CMD failed, but ignore TX errors. */
    314 			if ((SC_CB(n)->cb_cmd & IEE_CB_CMD) != IEE_CB_CMD_TR
    315 			    && ((SC_CB(n)->cb_status & IEE_CB_OK) == 0))
    316 				printf("%s: iee_intr: scb_status=0x%x "
    317 				    "scb_cmd=0x%x failed command %d: "
    318 				    "cb_status[%d]=0x%.4x cb_cmd[%d]=0x%.4x\n",
    319 				    sc->sc_dev.dv_xname, scb_status, scb_cmd,
    320 				    ++sc->sc_cmd_err, n, SC_CB(n)->cb_status,
    321 				    n, SC_CB(n)->cb_cmd);
    322 		}
    323 		sc->sc_next_cb = 0;
    324 		if ((sc->sc_flags & IEE_WANT_MCAST) != 0) {
    325 			iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
    326 			    | IEE_CB_I);
    327 			(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
    328 		} else
    329 			/* Try to get defered packets going. */
    330 			iee_start(ifp);
    331 	}
    332 	if (IEE_SWAP(SC_SCB->scb_crc_err) != sc->sc_crc_err) {
    333 		sc->sc_crc_err = IEE_SWAP(SC_SCB->scb_crc_err);
    334 		printf("%s: iee_intr: crc_err=%d\n", sc->sc_dev.dv_xname,
    335 		    sc->sc_crc_err);
    336 	}
    337 	if (IEE_SWAP(SC_SCB->scb_align_err) != sc->sc_align_err) {
    338 		sc->sc_align_err = IEE_SWAP(SC_SCB->scb_align_err);
    339 		printf("%s: iee_intr: align_err=%d\n", sc->sc_dev.dv_xname,
    340 		    sc->sc_align_err);
    341 	}
    342 	if (IEE_SWAP(SC_SCB->scb_resource_err) != sc->sc_resource_err) {
    343 		sc->sc_resource_err = IEE_SWAP(SC_SCB->scb_resource_err);
    344 		printf("%s: iee_intr: resource_err=%d\n", sc->sc_dev.dv_xname,
    345 		    sc->sc_resource_err);
    346 	}
    347 	if (IEE_SWAP(SC_SCB->scb_overrun_err) != sc->sc_overrun_err) {
    348 		sc->sc_overrun_err = IEE_SWAP(SC_SCB->scb_overrun_err);
    349 		printf("%s: iee_intr: overrun_err=%d\n", sc->sc_dev.dv_xname,
    350 		    sc->sc_overrun_err);
    351 	}
    352 	if (IEE_SWAP(SC_SCB->scb_rcvcdt_err) != sc->sc_rcvcdt_err) {
    353 		sc->sc_rcvcdt_err = IEE_SWAP(SC_SCB->scb_rcvcdt_err);
    354 		printf("%s: iee_intr: rcvcdt_err=%d\n", sc->sc_dev.dv_xname,
    355 		    sc->sc_rcvcdt_err);
    356 	}
    357 	if (IEE_SWAP(SC_SCB->scb_short_fr_err) != sc->sc_short_fr_err) {
    358 		sc->sc_short_fr_err = IEE_SWAP(SC_SCB->scb_short_fr_err);
    359 		printf("%s: iee_intr: short_fr_err=%d\n", sc->sc_dev.dv_xname,
    360 		    sc->sc_short_fr_err);
    361 	}
    362 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
    363 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    364 	(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
    365 	return(1);
    366 }
    367 
    368 
    369 
    370 /*
    371 How Command Block List Processing is done.
    372 
    373 A runing CBL is never manipulated. If there is a CBL already runing,
    374 further CMDs are deferd until the current list is done. A new list is
    375 setup when the old has finished.
    376 This eases programming. To manipulate a runing CBL it is neccesary to
    377 suspend the Command Unit to avoid race conditions. After a suspend
    378 is sent we have to wait for an interrupt that ACKs the suspend. Then
    379 we can manipulate the CBL and resume operation. I am not sure that this
    380 is more effective then the current, much simpler approach. => KISS
    381 See i82596CA data sheet page 26.
    382 
    383 A CBL is runing or on the way to be set up when (sc->sc_next_cb != 0).
    384 
    385 A CBL may consist of TX CMDs, and _only_ TX CMDs.
    386 A TX CBL is runing or on the way to be set up when
    387 ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)).
    388 
    389 A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_
    390 non-TX CMDs.
    391 
    392 This comes mostly through the way how an Ethernet driver works and
    393 because runing CBLs are not manipulated when they are on the way. If
    394 if_start() is called there will be TX CMDs enqueued so we have a runing
    395 CBL and other CMDs from e.g. if_ioctl() will be deferd and vice versa.
    396 
    397 The Multicast Setup Command is special. A MCS needs more space then
    398 a single CB has. Actual space requiement depends on the length of the
    399 multicast list. So we allways defer MCS until other CBLs are finished,
    400 then we setup a CONF CMD in the first CB. The CONF CMD is needed to
    401 turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may
    402 use all the remaining space in the CBL and the Transmit Buffer Descriptor
    403 List. (Therefore CBL and TBDL must be continious in pysical and virtual
    404 memory. This is guaranteed through the definitions of the list offsets
    405 in i82596reg.h and because it is only a single DMA segment used for all
    406 lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with
    407 a multicast list length of 0, thus disabling the multicast filter.
    408 A defered MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0)
    409 */
    410 void
    411 iee_cb_setup(struct iee_softc *sc, uint32_t cmd)
    412 {
    413 	struct iee_cb *cb = SC_CB(sc->sc_next_cb);
    414 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    415 	struct ether_multistep step;
    416 	struct ether_multi *enm;
    417 
    418 	memset(cb, 0, IEE_CB_SZ);
    419 	cb->cb_cmd = cmd;
    420 	switch(cmd & IEE_CB_CMD) {
    421 	case IEE_CB_CMD_NOP:	/* NOP CMD */
    422 		break;
    423 	case IEE_CB_CMD_IAS:	/* Individual Address Setup */
    424 		memcpy(__UNVOLATILE(cb->cb_ind_addr), LLADDR(ifp->if_sadl),
    425 		    ETHER_ADDR_LEN);
    426 		break;
    427 	case IEE_CB_CMD_CONF:	/* Configure */
    428 		memcpy(__UNVOLATILE(cb->cb_cf), sc->sc_cf, sc->sc_cf[0]
    429 		    & IEE_CF_0_CNT_M);
    430 		break;
    431 	case IEE_CB_CMD_MCS:	/* Multicast Setup */
    432 		if (sc->sc_next_cb != 0) {
    433 			sc->sc_flags |= IEE_WANT_MCAST;
    434 			return;
    435 		}
    436 		sc->sc_flags &= ~IEE_WANT_MCAST;
    437 		if ((sc->sc_cf[8] & IEE_CF_8_PRM) != 0) {
    438 			/* Need no multicast filter in promisc mode. */
    439 			iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL
    440 			    | IEE_CB_I);
    441 			return;
    442 		}
    443 		/* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */
    444 		cb = SC_CB(sc->sc_next_cb + 1);
    445 		cb->cb_cmd = cmd;
    446 		cb->cb_mcast.mc_size = 0;
    447 		ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
    448 		while (enm != NULL) {
    449 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
    450 			    ETHER_ADDR_LEN) != 0 || cb->cb_mcast.mc_size
    451 			    * ETHER_ADDR_LEN + 2 * IEE_CB_SZ
    452 			    > IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ) {
    453 				cb->cb_mcast.mc_size = 0;
    454 				break;
    455 			}
    456 			memcpy(__UNVOLATILE(&cb->cb_mcast.mc_addrs[
    457 			    cb->cb_mcast.mc_size * ETHER_ADDR_LEN]),
    458 			    enm->enm_addrlo, ETHER_ADDR_LEN);
    459 			ETHER_NEXT_MULTI(step, enm);
    460 			cb->cb_mcast.mc_size++;
    461 		}
    462 		if (cb->cb_mcast.mc_size == 0) {
    463 			/* Can't do exact mcast filtering, do ALLMULTI mode. */
    464 			ifp->if_flags |= IFF_ALLMULTI;
    465 			sc->sc_cf[11] &= ~IEE_CF_11_MCALL;
    466 		} else {
    467 			/* disable ALLMULTI and load mcast list */
    468 			ifp->if_flags &= ~IFF_ALLMULTI;
    469 			sc->sc_cf[11] |= IEE_CF_11_MCALL;
    470 			/* Mcast setup may need more then IEE_CB_SZ bytes. */
    471 			bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
    472 			    IEE_CB_OFF, IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ,
    473 			    BUS_DMASYNC_PREWRITE);
    474 		}
    475 		iee_cb_setup(sc, IEE_CB_CMD_CONF);
    476 		break;
    477 	case IEE_CB_CMD_TR:	/* Transmit */
    478 		cb->cb_transmit.tx_tbd_addr = IEE_PHYS_SHMEM(IEE_TBD_OFF
    479 		    + IEE_TBD_SZ * sc->sc_next_tbd);
    480 		cb->cb_cmd |= IEE_CB_SF; /* Allways use Flexible Mode. */
    481 		break;
    482 	case IEE_CB_CMD_TDR:	/* Time Domain Reflectometry */
    483 		break;
    484 	case IEE_CB_CMD_DUMP:	/* Dump */
    485 		break;
    486 	case IEE_CB_CMD_DIAG:	/* Diagnose */
    487 		break;
    488 	default:
    489 		/* can't happen */
    490 		break;
    491 	}
    492 	cb->cb_link_addr = IEE_PHYS_SHMEM(IEE_CB_OFF + IEE_CB_SZ *
    493 	    (sc->sc_next_cb + 1));
    494 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_OFF
    495 	    + IEE_CB_SZ * sc->sc_next_cb, IEE_CB_SZ, BUS_DMASYNC_PREWRITE);
    496 	sc->sc_next_cb++;
    497 	ifp->if_timer = 5;
    498 	return;
    499 }
    500 
    501 
    502 
    503 void
    504 iee_attach(struct iee_softc *sc, uint8_t *eth_addr, int *media, int nmedia,
    505     int defmedia)
    506 {
    507 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    508 	int n;
    509 
    510 	/* Set pointer to Intermediate System Configuration Pointer. */
    511 	/* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
    512 	SC_SCP->scp_iscp_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_ISCP_OFF));
    513 	/* Set pointer to System Control Block. */
    514 	/* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
    515 	SC_ISCP->iscp_scb_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_SCB_OFF));
    516 	/* Set pointer to Receive Frame Area. (physical address) */
    517 	SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
    518 	/* Set pointer to Command Block. (physical address) */
    519 	SC_SCB->scb_cmd_blk_addr = IEE_PHYS_SHMEM(IEE_CB_OFF);
    520 
    521 	ifmedia_init(&sc->sc_ifmedia, 0, iee_mediachange, iee_mediastatus);
    522 	if (media != NULL) {
    523 		for (n = 0 ; n < nmedia ; n++)
    524 			ifmedia_add(&sc->sc_ifmedia, media[n], 0, NULL);
    525 		ifmedia_set(&sc->sc_ifmedia, defmedia);
    526 	} else {
    527 		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE, 0, NULL);
    528 		ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE);
    529 	}
    530 
    531 	ifp->if_softc = sc;
    532 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    533 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    534 	ifp->if_start = iee_start;	/* initiate output routine */
    535 	ifp->if_ioctl = iee_ioctl;	/* ioctl routine */
    536 	ifp->if_init = iee_init;	/* init routine */
    537 	ifp->if_stop = iee_stop;	/* stop routine */
    538 	ifp->if_watchdog = iee_watchdog;	/* timer routine */
    539 	ifp->if_drain = iee_drain;	/* routine to release resources */
    540 	IFQ_SET_READY(&ifp->if_snd);
    541 	/* iee supports IEEE 802.1Q Virtual LANs, see vlan(4). */
    542 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
    543 
    544 	if_attach(ifp);
    545 	ether_ifattach(ifp, eth_addr);
    546 
    547 	aprint_normal(": Intel 82596%s address %s\n",
    548 	    i82596_typenames[ sc->sc_type], ether_sprintf(eth_addr));
    549 
    550 	for (n = 0 ; n < IEE_NCB ; n++)
    551 		sc->sc_tx_map[n] = NULL;
    552 	for (n = 0 ; n < IEE_NRFD ; n++) {
    553 		sc->sc_rx_mbuf[n] = NULL;
    554 		sc->sc_rx_map[n] = NULL;
    555 	}
    556 	sc->sc_tx_timeout = 0;
    557 	sc->sc_setup_timeout = 0;
    558 	(sc->sc_iee_reset)(sc);
    559 	return;
    560 }
    561 
    562 
    563 
    564 void
    565 iee_detach(struct iee_softc *sc, int flags)
    566 {
    567 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    568 
    569 	if ((ifp->if_flags & IFF_RUNNING) != 0)
    570 		iee_stop(ifp, 1);
    571 	ether_ifdetach(ifp);
    572 	if_detach(ifp);
    573 	return;
    574 }
    575 
    576 
    577 
    578 /* media change and status callback */
    579 int
    580 iee_mediachange(struct ifnet *ifp)
    581 {
    582 	struct iee_softc *sc = ifp->if_softc;
    583 
    584 	if (sc->sc_mediachange != NULL)
    585 		return ((sc->sc_mediachange)(ifp));
    586 	return(0);
    587 }
    588 
    589 
    590 
    591 void
    592 iee_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmreq)
    593 {
    594 	struct iee_softc *sc = ifp->if_softc;
    595 
    596 	if (sc->sc_mediastatus != NULL)
    597 		return ((sc->sc_mediastatus)(ifp, ifmreq));
    598 	return;
    599 }
    600 
    601 
    602 
    603 /* initiate output routine */
    604 void
    605 iee_start(struct ifnet *ifp)
    606 {
    607 	struct iee_softc *sc = ifp->if_softc;
    608 	struct mbuf *m = NULL;
    609 	int t;
    610 	int n;
    611 
    612 	if (sc->sc_next_cb != 0)
    613 		/* There is already a CMD runing. Defer packet enqueueing. */
    614 		return;
    615 	for (t = 0 ; t < IEE_NCB ; t++) {
    616 		IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
    617 		if (sc->sc_tx_mbuf[t] == NULL)
    618 			break;
    619 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
    620 		    sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
    621 			/*
    622 			 * The packet needs more TBD then we support.
    623 			 * Copy the packet into a mbuf cluster to get it out.
    624 			 */
    625 			printf("%s: iee_start: failed to load DMA map\n",
    626 			    sc->sc_dev.dv_xname);
    627 			MGETHDR(m, M_DONTWAIT, MT_DATA);
    628 			if (m == NULL) {
    629 				printf("%s: iee_start: can't allocate mbuf\n",
    630 				    sc->sc_dev.dv_xname);
    631 				m_freem(sc->sc_tx_mbuf[t]);
    632 				t--;
    633 				continue;
    634 			}
    635 			MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
    636 			MCLGET(m, M_DONTWAIT);
    637 			if ((m->m_flags & M_EXT) == 0) {
    638 				printf("%s: iee_start: can't allocate mbuf "
    639 				    "cluster\n", sc->sc_dev.dv_xname);
    640 				m_freem(sc->sc_tx_mbuf[t]);
    641 				m_freem(m);
    642 				t--;
    643 				continue;
    644 			}
    645 			m_copydata(sc->sc_tx_mbuf[t], 0,
    646 			    sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, caddr_t));
    647 			m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
    648 			m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
    649 			m_freem(sc->sc_tx_mbuf[t]);
    650 			sc->sc_tx_mbuf[t] = m;
    651 			if(bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
    652 		    	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
    653 				printf("%s: iee_start: can't load TX DMA map\n",
    654 				    sc->sc_dev.dv_xname);
    655 				m_freem(sc->sc_tx_mbuf[t]);
    656 				t--;
    657 				continue;
    658 			}
    659 		}
    660 		for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
    661 			SC_TBD(sc->sc_next_tbd + n)->tbd_tb_addr =
    662 			    sc->sc_tx_map[t]->dm_segs[n].ds_addr;
    663 			SC_TBD(sc->sc_next_tbd + n)->tbd_size =
    664 			    sc->sc_tx_map[t]->dm_segs[n].ds_len;
    665 			SC_TBD(sc->sc_next_tbd + n)->tbd_link_addr =
    666 			    IEE_PHYS_SHMEM(IEE_TBD_OFF + IEE_TBD_SZ
    667 			    * (sc->sc_next_tbd + n + 1));
    668 		}
    669 		SC_TBD(sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
    670 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
    671 		    sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
    672 		IFQ_POLL(&ifp->if_snd, m);
    673 		if (m == NULL)
    674 			iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
    675 			    | IEE_CB_I);
    676 		else
    677 			iee_cb_setup(sc, IEE_CB_CMD_TR);
    678 		sc->sc_next_tbd += n;
    679 #if NBPFILTER > 0
    680 		/* Pass packet to bpf if someone listens. */
    681 		if (ifp->if_bpf)
    682 			bpf_mtap(ifp->if_bpf, sc->sc_tx_mbuf[t]);
    683 #endif
    684 	}
    685 	if (t == 0)
    686 		/* No packets got set up for TX. */
    687 		return;
    688 	if (t == IEE_NCB)
    689 		ifp->if_flags |= IFF_OACTIVE;
    690 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_SZ,
    691 	    IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
    692 	(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
    693 	return;
    694 }
    695 
    696 
    697 
    698 /* ioctl routine */
    699 int
    700 iee_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
    701 {
    702 	struct iee_softc *sc = ifp->if_softc;
    703 	int s;
    704 	int err;
    705 
    706 	s = splnet();
    707 	switch (cmd) {
    708 	case SIOCSIFMEDIA:
    709 	case SIOCGIFMEDIA:
    710 		err = ifmedia_ioctl(ifp, (struct ifreq *) data,
    711 		    &sc->sc_ifmedia, cmd);
    712 		break;
    713 
    714 	default:
    715 		err = ether_ioctl(ifp, cmd, data);
    716 		if (err == ENETRESET) {
    717 			/*
    718 			 * Multicast list as changed; set the hardware filter
    719 			 * accordingly.
    720 			 */
    721 			if (ifp->if_flags & IFF_RUNNING) {
    722 				iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S |
    723 				    IEE_CB_EL | IEE_CB_I);
    724 				if ((sc->sc_flags & IEE_WANT_MCAST) == 0)
    725 					(*sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
    726 			}
    727 			err = 0;
    728 		}
    729 		break;
    730 	}
    731 	splx(s);
    732 	return(err);
    733 }
    734 
    735 
    736 
    737 /* init routine */
    738 int
    739 iee_init(struct ifnet *ifp)
    740 {
    741 	struct iee_softc *sc = ifp->if_softc;
    742 	int r;
    743 	int t;
    744 	int n;
    745 	int err;
    746 
    747 	sc->sc_next_cb = 0;
    748 	sc->sc_next_tbd = 0;
    749 	sc->sc_flags &= ~IEE_WANT_MCAST;
    750 	sc->sc_rx_done = 0;
    751 	SC_SCB->scb_crc_err = 0;
    752 	SC_SCB->scb_align_err = 0;
    753 	SC_SCB->scb_resource_err = 0;
    754 	SC_SCB->scb_overrun_err = 0;
    755 	SC_SCB->scb_rcvcdt_err = 0;
    756 	SC_SCB->scb_short_fr_err = 0;
    757 	sc->sc_crc_err = 0;
    758 	sc->sc_align_err = 0;
    759 	sc->sc_resource_err = 0;
    760 	sc->sc_overrun_err = 0;
    761 	sc->sc_rcvcdt_err = 0;
    762 	sc->sc_short_fr_err = 0;
    763 	sc->sc_tx_col = 0;
    764 	sc->sc_rx_err = 0;
    765 	sc->sc_cmd_err = 0;
    766 	/* Create Transmit DMA maps. */
    767 	for (t = 0 ; t < IEE_NCB ; t++) {
    768 		if (sc->sc_tx_map[t] == NULL && bus_dmamap_create(sc->sc_dmat,
    769 		    MCLBYTES, IEE_NTBD, MCLBYTES, 0, BUS_DMA_NOWAIT,
    770 		    &sc->sc_tx_map[t]) != 0) {
    771 			printf("%s: iee_init: can't create TX DMA map\n",
    772 			    sc->sc_dev.dv_xname);
    773 			for (n = 0 ; n < t ; n++)
    774 				bus_dmamap_destroy(sc->sc_dmat,
    775 				    sc->sc_tx_map[n]);
    776 			return(ENOBUFS);
    777 		}
    778 	}
    779 	/* Initialize Receive Frame and Receive Buffer Descriptors */
    780 	err = 0;
    781 	memset(SC_RFD(0), 0, IEE_RFD_LIST_SZ);
    782 	memset(SC_RBD(0), 0, IEE_RBD_LIST_SZ);
    783 	for (r = 0 ; r < IEE_NRFD ; r++) {
    784 		SC_RFD(r)->rfd_cmd = IEE_RFD_SF;
    785 		SC_RFD(r)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
    786 		    + IEE_RFD_SZ * ((r + 1) % IEE_NRFD));
    787 
    788 		SC_RBD(r)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
    789 		    + IEE_RBD_SZ * ((r + 1) % IEE_NRFD));
    790 		if (sc->sc_rx_mbuf[r] == NULL) {
    791 			MGETHDR(sc->sc_rx_mbuf[r], M_DONTWAIT, MT_DATA);
    792 			if (sc->sc_rx_mbuf[r] == NULL) {
    793 				printf("%s: iee_init: can't allocate mbuf\n",
    794 				    sc->sc_dev.dv_xname);
    795 				err = 1;
    796 				break;
    797 			}
    798 			MCLAIM(sc->sc_rx_mbuf[r],&sc->sc_ethercom.ec_rx_mowner);
    799 			MCLGET(sc->sc_rx_mbuf[r], M_DONTWAIT);
    800 			if ((sc->sc_rx_mbuf[r]->m_flags & M_EXT) == 0) {
    801 				printf("%s: iee_init: can't allocate mbuf"
    802 				    " cluster\n", sc->sc_dev.dv_xname);
    803 				m_freem(sc->sc_rx_mbuf[r]);
    804 				err = 1;
    805 				break;
    806 			}
    807 		}
    808 		if (sc->sc_rx_map[r] == NULL && bus_dmamap_create(sc->sc_dmat,
    809 		    MCLBYTES, 1, MCLBYTES , 0, BUS_DMA_NOWAIT,
    810 		    &sc->sc_rx_map[r]) != 0) {
    811 				printf("%s: iee_init: can't create RX "
    812 				    "DMA map\n", sc->sc_dev.dv_xname);
    813 				m_freem(sc->sc_rx_mbuf[r]);
    814 				err = 1;
    815 				break;
    816 			}
    817 		if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_map[r],
    818 		    sc->sc_rx_mbuf[r]->m_ext.ext_buf,
    819 		    sc->sc_rx_mbuf[r]->m_ext.ext_size, NULL,
    820 		    BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
    821 			printf("%s: iee_init: can't load RX DMA map\n",
    822 			    sc->sc_dev.dv_xname);
    823 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[r]);
    824 			m_freem(sc->sc_rx_mbuf[r]);
    825 			err = 1;
    826 			break;
    827 		}
    828 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_map[r], 0,
    829 		    sc->sc_rx_mbuf[r]->m_ext.ext_size, BUS_DMASYNC_PREREAD);
    830 		SC_RBD(r)->rbd_size = sc->sc_rx_map[r]->dm_segs[0].ds_len;
    831 		SC_RBD(r)->rbd_rb_addr= sc->sc_rx_map[r]->dm_segs[0].ds_addr;
    832 	}
    833 	SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
    834 	if (err != 0) {
    835 		for (n = 0 ; n < r; n++) {
    836 			m_freem(sc->sc_rx_mbuf[n]);
    837 			sc->sc_rx_mbuf[n] = NULL;
    838 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
    839 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
    840 			sc->sc_rx_map[n] = NULL;
    841 		}
    842 		for (n = 0 ; n < t ; n++) {
    843 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
    844 			sc->sc_tx_map[n] = NULL;
    845 		}
    846 		return(ENOBUFS);
    847 	}
    848 
    849 	(sc->sc_iee_reset)(sc);
    850 	iee_cb_setup(sc, IEE_CB_CMD_IAS);
    851 	sc->sc_cf[0] = IEE_CF_0_DEF | IEE_CF_0_PREF;
    852 	sc->sc_cf[1] = IEE_CF_1_DEF;
    853 	sc->sc_cf[2] = IEE_CF_2_DEF;
    854 	sc->sc_cf[3] = IEE_CF_3_ADDRLEN_DEF | IEE_CF_3_NSAI
    855 	    | IEE_CF_3_PREAMLEN_DEF;
    856 	sc->sc_cf[4] = IEE_CF_4_DEF;
    857 	sc->sc_cf[5] = IEE_CF_5_DEF;
    858 	sc->sc_cf[6] = IEE_CF_6_DEF;
    859 	sc->sc_cf[7] = IEE_CF_7_DEF;
    860 	sc->sc_cf[8] = IEE_CF_8_DEF;
    861 	sc->sc_cf[9] = IEE_CF_9_DEF;
    862 	sc->sc_cf[10] = IEE_CF_10_DEF;
    863 	sc->sc_cf[11] = IEE_CF_11_DEF & ~IEE_CF_11_LNGFLD;
    864 	sc->sc_cf[12] = IEE_CF_12_DEF;
    865 	sc->sc_cf[13] = IEE_CF_13_DEF;
    866 	iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL);
    867 	SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
    868 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
    869 	    BUS_DMASYNC_PREWRITE);
    870 	(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE | IEE_SCB_RUC_ST);
    871 	/* Issue a Channel Attention to ACK interrupts we may have caused. */
    872 	(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
    873 
    874 	/* Mark the interface as running and ready to RX/TX packets. */
    875 	ifp->if_flags |= IFF_RUNNING;
    876 	ifp->if_flags &= ~IFF_OACTIVE;
    877 	return(0);
    878 }
    879 
    880 
    881 
    882 /* stop routine */
    883 void
    884 iee_stop(struct ifnet *ifp, int disable)
    885 {
    886 	struct iee_softc *sc = ifp->if_softc;
    887 	int n;
    888 
    889 	ifp->if_flags &= ~IFF_RUNNING;
    890 	ifp->if_flags |= IFF_OACTIVE;
    891 	ifp->if_timer = 0;
    892 	/* Reset the chip to get it quiet. */
    893 	(sc->sc_iee_reset)(ifp->if_softc);
    894 	/* Issue a Channel Attention to ACK interrupts we may have caused. */
    895 	(sc->sc_iee_cmd)(ifp->if_softc, IEE_SCB_ACK);
    896 	/* Release any dynamically allocated ressources. */
    897 	for (n = 0 ; n < IEE_NCB ; n++) {
    898 		if (sc->sc_tx_map[n] != NULL)
    899 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
    900 		sc->sc_tx_map[n] = NULL;
    901 	}
    902 	for (n = 0 ; n < IEE_NRFD ; n++) {
    903 		if (sc->sc_rx_mbuf[n] != NULL)
    904 			m_freem(sc->sc_rx_mbuf[n]);
    905 		sc->sc_rx_mbuf[n] = NULL;
    906 		if (sc->sc_rx_map[n] != NULL) {
    907 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
    908 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
    909 		}
    910 		sc->sc_rx_map[n] = NULL;
    911 	}
    912 	return;
    913 }
    914 
    915 
    916 
    917 /* timer routine */
    918 void
    919 iee_watchdog(struct ifnet *ifp)
    920 {
    921 	struct iee_softc *sc = ifp->if_softc;
    922 
    923 	(sc->sc_iee_reset)(sc);
    924 	if (sc->sc_next_tbd != 0)
    925 		printf("%s: iee_watchdog: transmit timeout %d\n",
    926 		    sc->sc_dev.dv_xname, ++sc->sc_tx_timeout);
    927 	else
    928 		printf("%s: iee_watchdog: setup timeout %d\n",
    929 		    sc->sc_dev.dv_xname, ++sc->sc_setup_timeout);
    930 	iee_init(ifp);
    931 	return;
    932 }
    933 
    934 
    935 
    936 /* routine to release res. */
    937 void
    938 iee_drain(struct ifnet *ifp)
    939 {
    940 	iee_stop(ifp, 0);
    941 	return;
    942 }
    943 
    944 
    945 
    946